[Link]-key 1.
pem
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEA26BSa1vnrVYSenCUb1v4BGbNE8kMYwP+wvyIwv4v0PReM9M
g
0Zbhn91CvGmpXCv8Mb7a+3W102lzsw1NX6aFZ/RiQFKiz33O6pNBoh45szIZ0L2r
USTCJJt4puoqH2PlTAik9ZoniMkEY8YlpzDRiZL9jNiRxmqjPi6N240JLQHKPbCh
qVMN/j3lVr8smxDDx6ZJQy9OZH9XUb82dDT8150gY/FEmhjuR6GSpL4a762TkRY8
8xfFKJ3+cFXFHYYCiDCncVchra1fn3h4VC49gezCmf5OjfDONTEdpcNueZy28kuR
avCdPyt2My2TRZ0bmcXqJifw1pwEsYdC4RUPGQIDAQABAoIBAQCjQLHjIca9vuwv
XT3I5Rkf1HZ+TYTKmS6nK+ZQ43cRoIPhi/+qK7h9B5BS00vTutJU++R05JOKmakq
ICoyfmpN0oex/nsNNxw2m80+v67uN8CevE37HBK/+V0e3ZMAEFZiRU6xTawzbhC4
RZK5i31GASA9sMHzNlMvppVF7Lk0XO47jW5mRbxpAWPj3QEDHlypbdoXJh0b53FR
jUhAqAGC26ealaGEwxiLSNT+LjX+OiJ7xkCYnByaAUEUS/kU5FB12/2T+j9+y+W9
jaUVB82guCxa0D0boHVT8S+BlIR3A58zTYsFG5dWJ/R67Z/0xFczYgr1Axoihb1u
Rix8W3GxAoGBAPpOsOCGxB+HZiSjo8HRRY5fpIpR0GaQy0Wrf6BiintB8u11bg7z
Uk61UWwycUggn3UgAxqYXL+ruV06GbzCYHiijmY4iJGUNVVpS92njlzqqMaBtSiu
VkYXTUwHHaHzgKJJcCtce2QQFwcVfT94vv57gVyM/bvTb2uX7cmn0ThVAoGBAOCf
AMuRXZ6m3VGBsGoUA6N4IGq5r+Um7L+xDLPZh7o0C8scj7SfPf2Dn4wfCF1t+lR0
IRr+KLbfIxib62cAEw9ehiNPb6ISUJD8UxbAzJ2J9J9dPqS8h0cFMZdeYNIiKyld
arPKhHCOyImF8tH2KQkE48zukzEZb26c4Mvgrk+1AoGAIJYmCNUCq2ghMBvtscng
2EwzS56jFvc6v3M4RlUN2kYXrjpffarHyF9qnj9i6BT3LXRhcxDq4jklBLIPr7dO
f+YJy90ISouU9e2jZtPtgMM7/bvdLdKkgLN/6Kn0faUJ1hbhd4OuvLQri0yM5bp+
gvrpOHdSgSVhmAxLdLlYT5ECgYBeGV5c6X3928u1vI3qMo4/qjByZW3RotflikdX
BNc+Y1VZdpKhSWDP1pc9PTbk5RDM1oWT6EuoKJxsT81zLe3OL3wkSL/sdp23z20D
iCtwB/
4p0QdQPgax8vQOhKQtA9FvF8p+nWWUPcy3CB1sZiJUu+6uyHdzR0OOZDws
TOjZTQKBgG1qEeiydQswAKA27nfnM/8jCqSfiP4MaYH5jGxaeziq65aB2LiyWQua
cBcboQaKgwei0WqD+o1mmH7ZlYiNrxAK1hxGDFoKQeuBWD04sXlouXsuCSZGvy8P
5/VJt3X5K7uYjkh0qq0JWv+YrsZtEpCLZ6TG9CZ/P19UrU9i8nHh
-----END RSA PRIVATE KEY-----
maji file
#
# Script @test from content/installation/[Link]
#
#----------------------------------------------------------------------# Start 1
echo "Block 'checkForBash' (1/9 in test) of content/installation/[Link]"
####
set | grep BASH > /dev/null || echo "Vanadium installation requires Bash."
#----------------------------------------------------------------------# End 1
#----------------------------------------------------------------------# Start 2
echo "Block 'define_JIRI_ROOT' (2/9 in test) of content/installation/step-by-
[Link]"
####
# Uses existing $JIRI_ROOT environment variable, defaults to ${HOME}/vanadium if
# $JIRI_ROOT is not set.
export JIRI_ROOT=${JIRI_ROOT:=${HOME}/vanadium}
#----------------------------------------------------------------------# End 2
#----------------------------------------------------------------------# Start 3
echo "Block 'define_VANADIUM_RELEASE' (3/9 in test) of content/installation/step-
[Link]"
####
# Needed for tutorials only.
export VANADIUM_RELEASE=${JIRI_ROOT}/release/go
#----------------------------------------------------------------------# End 3
bash -euo pipefail <<'HANDLED_SCRIPT'
function handledTrouble() {
echo " "
echo "Unable to continue!"
exit 1
}
trap handledTrouble INT TERM
#
# Script @test from content/installation/[Link]
#
#----------------------------------------------------------------------# Start 1
echo "Block 'checkForBash' (1/9 in test) of content/installation/[Link]"
####
set | grep BASH > /dev/null || echo "Vanadium installation requires Bash."
#----------------------------------------------------------------------# End 1
#----------------------------------------------------------------------# Start 2
echo "Block 'define_JIRI_ROOT' (2/9 in test) of content/installation/step-by-
[Link]"
####
# Uses existing $JIRI_ROOT environment variable, defaults to ${HOME}/vanadium if
# $JIRI_ROOT is not set.
export JIRI_ROOT=${JIRI_ROOT:=${HOME}/vanadium}
#----------------------------------------------------------------------# End 2
Solution#1
#----------------------------------------------------------------------# Start 3
echo "Block 'define_VANADIUM_RELEASE' (3/9 in test) of content/installation/step-
[Link]"
####
# Needed for tutorials only.
export VANADIUM_RELEASE=${JIRI_ROOT}/release/go
#----------------------------------------------------------------------# End 3
#----------------------------------------------------------------------# Start 4
echo "Block 'check_JIRI_ROOT' (4/9 in test) of content/installation/step-by-
[Link]"
####
# Check that the JIRI_ROOT path does not exist.
if [[ -e "${JIRI_ROOT}" ]]; then
echo ""
echo "ERROR: The JIRI_ROOT path already exists: ${JIRI_ROOT}"
echo "To proceed with a fresh install remove the directory and re-run:"
echo ""
echo " rm -rf ${JIRI_ROOT}"
echo ""
echo "Or set JIRI_ROOT to a different path."
exit 1
fi
#----------------------------------------------------------------------# End 4
#----------------------------------------------------------------------# Start 5
echo "Block 'runBootstrapScript' (5/9 in test) of content/installation/step-by-
[Link]"
####
# This can take several minutes.
curl -f [Link] | bash
#----------------------------------------------------------------------# End 5
#----------------------------------------------------------------------# Start 6
echo "Block 'addDevtoolsToPath' (6/9 in test) of content/installation/step-by-
[Link]"
####
export PATH=$JIRI_ROOT/.jiri_root/scripts:$PATH
#----------------------------------------------------------------------# End 6
#----------------------------------------------------------------------# Start 7
echo "Block 'packagesBaseProfile' (7/9 in test) of content/installation/step-by-
[Link]"
####
# Print the package installation command.
jiri profile os-packages v23:base
# Run the package installation command as root.
if [ -n "$(jiri profile os-packages v23:base)" ]; then
sudo $(jiri profile os-packages v23:base)
fi
#----------------------------------------------------------------------# End 7
#----------------------------------------------------------------------# Start 8
echo "Block 'installBaseProfile' (8/9 in test) of content/installation/step-by-
[Link]"
####
jiri profile install v23:base
#----------------------------------------------------------------------# End 8
#----------------------------------------------------------------------# Start 9
echo "Block 'installVanadiumBinaries' (9/9 in test) of content/installation/step-by-
[Link]"
####
# Install specific tools needed for the tutorials.
jiri go install [Link]/x/ref/cmd/... [Link]/x/ref/services/agent/... [Link]/x/ref/services/
mounttable/... [Link]/x/ref/services/syncbase/...
#----------------------------------------------------------------------# End 9
echo " "
echo "All done. No errors."
HANDLED_SCRIPT
"ociVersion": "0.1.0"
"root": {
"path": "rootfs",
"readonly": true
}
Example (Windows)
"root": {
"path": "\\\\?\\Volume{ec84d99e-3f02-11e7-ac6c-00155d7682cf}\\"
}
Mounts
mounts (array of objects, OPTIONAL) specifies additional mounts beyond root. The
runtime MUST mount entries in the listed order. For Linux, the parameters are as
documented in mount(2) system call man page. For Solaris, the mount entry
corresponds to the 'fs' resource in the zonecfg(1M) man page.
destination (string, REQUIRED) Destination of mount point: path inside container.
Linux: This value SHOULD be an absolute path. For compatibility with old tools and
configurations, it MAY be a relative path, in which case it MUST be interpreted as
relative to "/". Relative paths are deprecated.
Windows: This value MUST be an absolute path. One mount destination MUST NOT
be nested within another mount (e.g., c:\foo and c:\foo\bar).
Solaris: This value MUST be an absolute path. Corresponds to "dir" of the fs
resource in zonecfg(1M).
For all other platforms: This value MUST be an absolute path.
source (string, OPTIONAL) A device name, but can also be a file or directory name
for bind mounts or a dummy. Path values for bind mounts are either absolute or
relative to the bundle. A mount is a bind mount if it has either bind or rbind in the
options.
Windows: a local directory on the filesystem of the container host. UNC paths and
mapped drives are not supported.
Solaris: corresponds to "special" of the fs resource in zonecfg(1M).
options (array of strings, OPTIONAL) Mount options of the filesystem to be used.
Linux: See Linux mount options below.
Solaris: corresponds to "options" of the fs resource in zonecfg(1M).
Windows: runtimes MUST support ro, mounting the filesystem read-only when ro is
given.
Linux mount options
Runtimes MUST/SHOULD/MAY implement the following option strings for Linux:
Option name Requirement Description
async MUST 1
atime MUST 1
bind MUST Bind mount 2
defaults MUST 1
dev MUST 1
diratimeMUST 1
dirsync MUST 1
exec MUST 1
iversion MUST 1
lazytime MUST 1
loud MUST 1
mand MAY 1 (Deprecated in kernel 5.15, util-linux 2.38)
noatime MUST 1
nodev MUST 1
nodiratime MUST 1
noexec MUST 1
noiversion MUST 1
nolazytime MUST 1
nomand MAY 1
norelatime MUST 1
nostrictatime MUST 1
nosuid MUST 1
nosymfollow SHOULD 1 (Introduced in kernel 5.10, util-linux 2.38)
private MUST Bind mount propagation 2
ratime SHOULD Recursive atime 3
rbind MUST Recursive bind mount 2
rdev SHOULD Recursive dev 3
rdiratime SHOULD Recursive diratime 3
relatime MUST 1
remount MUST 1
rexec SHOULD Recursive dev 3
rnoatime SHOULD Recursive noatime 3
rnodiratime SHOULD Recursive nodiratime 3
rnoexec SHOULD Recursive noexec 3
rnorelatime SHOULD Recursive norelatime 3
rnostrictatime SHOULD Recursive nostrictatime 3
rnosuid SHOULD Recursive nosuid 3
rnosymfollow SHOULD Recursive nosymfollow 3
ro MUST 1
rprivate MUST Bind mount propagation 2
rrelatime SHOULD Recursive relatime 3
rro SHOULD Recursive ro 3
rrw SHOULD Recursive rw 3
rshared MUST Bind mount propagation 2
rslave MUST Bind mount propagation 2
rstrictatime SHOULD Recursive strictatime 3
rsuid SHOULD Recursive suid 3
rsymfollow SHOULD Recursive symfollow 3
runbindable MUST Bind mount propagation 2
rw MUST 1
shared MUST 1
silent MUST 1
slave MUST Bind mount propagation 2
strictatime MUST 1
suid MUST 1
symfollow SHOULD Opposite of nosymfollow
sync MUST 1
tmpcopyup MAY copy up the contents to a tmpfs
unbindable MUST Bind mount propagation 2
idmap SHOULD Indicates that the mount MUST have an idmapping applied.
This option SHOULD NOT be passed to the underlying mount(2) call. If uidMappings
or gidMappings are specified for the mount, the runtime MUST use those values for
the mount's mapping. If they are not specified, the runtime MAY use the container's
user namespace mapping, otherwise an error MUST be returned. If there are no
uidMappings and gidMappings specified and the container isn't using user
namespaces, an error MUST be returned. This SHOULD be implemented using
mount_setattr(MOUNT_ATTR_IDMAP), available since Linux 5.12.
ridmap SHOULD Indicates that the mount MUST have an idmapping applied, and
the mapping is applied recursively 3. This option SHOULD NOT be passed to the
underlying mount(2) call. If uidMappings or gidMappings are specified for the
mount, the runtime MUST use those values for the mount's mapping. If they are not
specified, the runtime MAY use the container's user namespace mapping, otherwise
an error MUST be returned. If there are no uidMappings and gidMappings specified
and the container isn't using user namespaces, an error MUST be returned. This
SHOULD be implemented using mount_setattr(MOUNT_ATTR_IDMAP), available
since Linux 5.12.
The "MUST" options correspond to mount(8).
Runtimes MAY also implement custom option strings that are not listed in the table
above. If a custom option string is already recognized by mount(8), the runtime
SHOULD follow the behavior of mount(8).
Runtimes SHOULD treat unknown options as filesystem-specific ones) and pass
those as a comma-separated string to the fifth (const void *data) argument of
mount(2).
Example (Windows)
"mounts": [
{
"destination": "C:\\folder-inside-container",
"source": "C:\\folder-on-host",
"options": ["ro"]
}
]
POSIX-platform Mounts
For POSIX platforms the mounts structure has the following fields:
type (string, OPTIONAL) The type of the filesystem to be mounted.
Linux: filesystem types supported by the kernel as listed in /proc/filesystems (e.g.,
"minix", "ext2", "ext3", "jfs", "xfs", "reiserfs", "msdos", "proc", "nfs", "iso9660").
For bind mounts (when options include either bind or rbind), the type is a dummy,
often "none" (not listed in /proc/filesystems).
Solaris: corresponds to "type" of the fs resource in zonecfg(1M).
uidMappings (array of type LinuxIDMapping, OPTIONAL) The mapping to convert
UIDs from the source file system to the destination mount point. This SHOULD be
implemented using mount_setattr(MOUNT_ATTR_IDMAP), available since Linux
5.12. If specified, the options field of the mounts structure SHOULD contain either
idmap or ridmap to specify whether the mapping should be applied recursively for
rbind mounts, as well as to ensure that older runtimes will not silently ignore this
field. The format is the same as user namespace mappings. If specified, it MUST be
specified along with gidMappings.
gidMappings (array of type LinuxIDMapping, OPTIONAL) The mapping to convert
GIDs from the source file system to the destination mount point. This SHOULD be
implemented using mount_setattr(MOUNT_ATTR_IDMAP), available since Linux
5.12. If specified, the options field of the mounts structure SHOULD contain either
idmap or ridmap to specify whether the mapping should be applied recursively for
rbind mounts, as well as to ensure that older runtimes will not silently ignore this
field. For more details see uidMappings. If specified, it MUST be specified along
with uidMappings.
Example (Linux)
"mounts": [
{
"destination": "/tmp",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid","strictatime","mode=755","size=65536k"]
},
{
"destination": "/data",
"type": "none",
"source": "/volumes/testing",
"options": ["rbind","rw"]
}
]
Example (Solaris)
"mounts": [
{
"destination": "/opt/local",
"type": "lofs",
"source": "/usr/local",
"options": ["ro","nodevices"]
},
{
"destination": "/opt/sfw",
"type": "lofs",
"source": "/opt/sfw"
}
]
Process
process (object, OPTIONAL) specifies the container process. This property is
REQUIRED when start is called.
terminal (bool, OPTIONAL) specifies whether a terminal is attached to the process,
defaults to false. As an example, if set to true on Linux a pseudoterminal pair is
allocated for the process and the pseudoterminal pty is duplicated on the process's
standard streams.
consoleSize (object, OPTIONAL) specifies the console size in characters of the
terminal. Runtimes MUST ignore consoleSize if terminal is false or unset.
height (uint, REQUIRED)
width (uint, REQUIRED)
cwd (string, REQUIRED) is the working directory that will be set for the executable.
This value MUST be an absolute path.
env (array of strings, OPTIONAL) with the same semantics as IEEE Std
1003.1-2008's environ.
args (array of strings, OPTIONAL) with similar semantics to IEEE Std 1003.1-2008
execvp's argv. This specification extends the IEEE standard in that at least one entry
is REQUIRED (non-Windows), and that entry is used with the same semantics as
execvp's file. This field is OPTIONAL on Windows, and commandLine is REQUIRED if
this field is omitted.
commandLine (string, OPTIONAL) specifies the full command line to be executed on
Windows. This is the preferred means of supplying the command line on Windows.
If omitted, the runtime will fall back to escaping and concatenating fields from args
before making the system call into Windows.
POSIX process
For systems that support POSIX rlimits (for example Linux and Solaris), the process
object supports the following process-specific properties:
rlimits (array of objects, OPTIONAL) allows setting resource limits for the process.
Each entry has the following structure:
type (string, REQUIRED) the platform resource being limited.
Linux: valid values are defined in the getrlimit(2) man page, such as
RLIMIT_MSGQUEUE.
Solaris: valid values are defined in the getrlimit(3) man page, such as RLIMIT_CORE.
The runtime MUST generate an error for any values which cannot be mapped to a
relevant kernel interface. For each entry in rlimits, a getrlimit(3) on type MUST
succeed. For the following properties, rlim refers to the status returned by the
getrlimit(3) call.
soft (uint64, REQUIRED) the value of the limit enforced for the corresponding
resource. rlim.rlim_cur MUST match the configured value.
hard (uint64, REQUIRED) the ceiling for the soft limit that could be set by an
unprivileged process. rlim.rlim_max MUST match the configured value. Only a
privileged process (e.g. one with the CAP_SYS_RESOURCE capability) can raise a
hard limit.
If rlimits contains duplicated entries with same type, the runtime MUST generate an
error.
Linux Process
For Linux-based systems, the process object supports the following process-
specific properties.
apparmorProfile (string, OPTIONAL) specifies the name of the AppArmor profile for
the process. For more information about AppArmor, see AppArmor documentation.
capabilities (object, OPTIONAL) is an object containing arrays that specifies the
sets of capabilities for the process. Valid values are defined in the capabilities(7)
man page, such as CAP_CHOWN. Any value which cannot be mapped to a relevant
kernel interface, or cannot be granted otherwise MUST be logged as a warning by
the runtime. Runtimes SHOULD NOT fail if the container configuration requests
capabilities that cannot be granted, for example, if the runtime operates in a
restricted environment with a limited set of capabilities. capabilities contains the
following properties:
effective (array of strings, OPTIONAL) the effective field is an array of effective
capabilities that are kept for the process.
bounding (array of strings, OPTIONAL) the bounding field is an array of bounding
capabilities that are kept for the process.
inheritable (array of strings, OPTIONAL) the inheritable field is an array of
inheritable capabilities that are kept for the process.
permitted (array of strings, OPTIONAL) the permitted field is an array of permitted
capabilities that are kept for the process.
ambient (array of strings, OPTIONAL) the ambient field is an array of ambient
capabilities that are kept for the process.
noNewPrivileges (bool, OPTIONAL) setting noNewPrivileges to true prevents the
process from gaining additional privileges. As an example, the no_new_privs article
in the kernel documentation has information on how this is achieved using a prctl
system call on Linux.
oomScoreAdj (int, OPTIONAL) adjusts the oom-killer score in [pid]/oom_score_adj
for the process's [pid] in a proc pseudo-filesystem. If oomScoreAdj is set, the
runtime MUST set oom_score_adj to the given value. If oomScoreAdj is not set, the
runtime MUST NOT change the value of oom_score_adj.
This is a per-process setting, where as disableOOMKiller is scoped for a memory
cgroup. For more information on how these two settings work together, see the
memory cgroup documentation section 10. OOM Contol.
scheduler (object, OPTIONAL) is an object describing the scheduler properties for
the process. The scheduler contains the following properties:
policy (string, REQUIRED) represents the scheduling policy. A valid list of values is:
SCHED_OTHER
SCHED_FIFO
SCHED_RR
SCHED_BATCH
SCHED_ISO
SCHED_IDLE
SCHED_DEADLINE
nice (int32, OPTIONAL) is the nice value for the process, affecting its priority. A
lower nice value corresponds to a higher priority. If not set, the runtime must use
the value 0.
priority (int32, OPTIONAL) represents the static priority of the process, used by
real-time policies like SCHED_FIFO and SCHED_RR. If not set, the runtime must use
the value 0.
flags (array of strings, OPTIONAL) is an array of strings representing scheduling
flags. A valid list of values is:
SCHED_FLAG_RESET_ON_FORK
SCHED_FLAG_RECLAIM
SCHED_FLAG_DL_OVERRUN
SCHED_FLAG_KEEP_POLICY
SCHED_FLAG_KEEP_PARAMS
SCHED_FLAG_UTIL_CLAMP_MIN
SCHED_FLAG_UTIL_CLAMP_MAX
runtime (uint64, OPTIONAL) represents the amount of time in nanoseconds during
which the process is allowed to run in a given period, used by the deadline
scheduler. If not set, the runtime must use the value 0.
deadline (uint64, OPTIONAL) represents the absolute deadline for the process to
complete its execution, used by the deadline scheduler. If not set, the runtime must
use the value 0.
period (uint64, OPTIONAL) represents the length of the period in nanoseconds used
for determining the process runtime, used by the deadline scheduler. If not set, the
runtime must use the value 0.
selinuxLabel (string, OPTIONAL) specifies the SELinux label for the process. For
more information about SELinux, see SELinux documentation.
ioPriority (object, OPTIONAL) configures the I/O priority settings for the container's
processes within the process group. The I/O priority settings will be automatically
applied to the entire process group, affecting all processes within the container. The
following properties are available:
class (string, REQUIRED) specifies the I/O scheduling class. Possible values are
IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, and IOPRIO_CLASS_IDLE.
priority (int, REQUIRED) specifies the priority level within the class. The value
should be an integer ranging from 0 (highest) to 7 (lowest).
User
The user for the process is a platform-specific structure that allows specific control
over which user the process runs as.
POSIX-platform User
For POSIX platforms the user structure has the following fields:
uid (int, REQUIRED) specifies the user ID in the container namespace.
gid (int, REQUIRED) specifies the group ID in the container namespace.
umask (int, OPTIONAL) specifies the [umask][umask_2] of the user. If unspecified,
the umask should not be changed from the calling process' umask.
additionalGids (array of ints, OPTIONAL) specifies additional group IDs in the
container namespace to be added to the process.
Note: symbolic name for uid and gid, such as uname and gname respectively, are
left to upper levels to derive (i.e. /etc/passwd parsing, NSS, etc)
Example (Linux)
"process": {
"terminal": true,
"consoleSize": {
"height": 25,
"width": 80
},
"user": {
"uid": 1,
"gid": 1,
"umask": 63,
"additionalGids": [5, 6]
},
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm"
],
"cwd": "/root",
"args": [
"sh"
],
"apparmorProfile": "acme_secure_profile",
"selinuxLabel": "system_u:system_r:svirt_lxc_net_t:s0:c124,c675",
"ioPriority": {
"class": "IOPRIO_CLASS_IDLE",
"priority": 4
},
"noNewPrivileges": true,
"capabilities": {
"bounding": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"permitted": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"inheritable": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"effective": [
"CAP_AUDIT_WRITE",
"CAP_KILL"
],
"ambient": [
"CAP_NET_BIND_SERVICE"
]
},
"rlimits": [
{
"type": "RLIMIT_NOFILE",
"hard": 1024,
"soft": 1024
}
]
}
Example (Solaris)
"process": {
"terminal": true,
"consoleSize": {
"height": 25,
"width": 80
},
"user": {
"uid": 1,
"gid": 1,
"umask": 7,
"additionalGids": [2, 8]
},
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm"
],
"cwd": "/root",
"args": [
"/usr/bin/bash"
]
}
Windows User
For Windows based systems the user structure has the following fields:
username (string, OPTIONAL) specifies the user name for the process.
Example (Windows)
"process": {
"terminal": true,
"user": {
"username": "containeradministrator"
},
"env": [
"VARIABLE=1"
],
"cwd": "c:\\foo",
"args": [
"[Link]",
]
}
Hostname
hostname (string, OPTIONAL) specifies the container's hostname as seen by
processes running inside the container. On Linux, for example, this will change the
hostname in the container UTS namespace. Depending on your namespace
configuration, the container UTS namespace may be the runtime UTS namespace.
Example
"hostname": "mrsdalloway"
Domainname
domainname (string, OPTIONAL) specifies the container's domainname as seen by
processes running inside the container. On Linux, for example, this will change the
domainname in the container UTS namespace. Depending on your namespace
configuration, the container UTS namespace may be the runtime UTS namespace.
Example
"domainname": "[Link]"
Platform-specific configuration
linux (object, OPTIONAL) Linux-specific configuration. This MAY be set if the target
platform of this spec is linux.
windows (object, OPTIONAL) Windows-specific configuration. This MUST be set if
the target platform of this spec is windows.
solaris (object, OPTIONAL) Solaris-specific configuration. This MAY be set if the
target platform of this spec is solaris.
vm (object, OPTIONAL) Virtual-machine-specific configuration. This MAY be set if
the target platform and architecture of this spec support hardware virtualization.
zos (object, OPTIONAL) z/OS-specific configuration. This MAY be set if the target
platform of this spec is zos.
Example (Linux)
{
"linux": {
"namespaces": [
{
"type": "pid"
}
]
}
}
POSIX-platform Hooks
For POSIX platforms, the configuration structure supports hooks for configuring
custom actions related to the lifecycle of the container.
hooks (object, OPTIONAL) MAY contain any of the following properties:
prestart (array of objects, OPTIONAL, DEPRECATED) is an array of prestart hooks.
Entries in the array contain the following properties:
path (string, REQUIRED) with similar semantics to IEEE Std 1003.1-2008 execv's
path. This specification extends the IEEE standard in that path MUST be absolute.
args (array of strings, OPTIONAL) with the same semantics as IEEE Std
1003.1-2008 execv's argv.
env (array of strings, OPTIONAL) with the same semantics as IEEE Std
1003.1-2008's environ.
timeout (int, OPTIONAL) is the number of seconds before aborting the hook. If set,
timeout MUST be greater than zero.
The value of path MUST resolve in the runtime namespace.
The prestart hooks MUST be executed in the runtime namespace.
createRuntime (array of objects, OPTIONAL) is an array of createRuntime hooks.
Entries in the array contain the following properties (the entries are identical to the
entries in the deprecated prestart hooks):
path (string, REQUIRED) with similar semantics to IEEE Std 1003.1-2008 execv's
path. This specification extends the IEEE standard in that path MUST be absolute.
args (array of strings, OPTIONAL) with the same semantics as IEEE Std
1003.1-2008 execv's argv.
env (array of strings, OPTIONAL) with the same semantics as IEEE Std
1003.1-2008's environ.
timeout (int, OPTIONAL) is the number of seconds before aborting the hook. If set,
timeout MUST be greater than zero.
The value of path MUST resolve in the runtime namespace.
The createRuntime hooks MUST be executed in the runtime namespace.
createContainer (array of objects, OPTIONAL) is an array of createContainer hooks.
Entries in the array have the same schema as createRuntime entries.
The value of path MUST resolve in the runtime namespace.
The createContainer hooks MUST be executed in the container namespace.
startContainer (array of objects, OPTIONAL) is an array of startContainer hooks.
Entries in the array have the same schema as createRuntime entries.
The value of path MUST resolve in the container namespace.
The startContainer hooks MUST be executed in the container namespace.
poststart (array of objects, OPTIONAL) is an array of poststart hooks.
Entries in the array have the same schema as createRuntime entries.
The value of path MUST resolve in the runtime namespace.
The poststart hooks MUST be executed in the runtime namespace.
poststop (array of objects, OPTIONAL) is an array of poststop hooks.
Entries in the array have the same schema as createRuntime entries.
The value of path MUST resolve in the runtime namespace.
The poststop hooks MUST be executed in the runtime namespace.
Hooks allow users to specify programs to run before or after various lifecycle
events. Hooks MUST be called in the listed order. The state of the container MUST
be passed to hooks over stdin so that they may do work appropriate to the current
state of the container.
Prestart
The prestart hooks MUST be called as part of the create operation after the runtime
environment has been created (according to the configuration in [Link]) but
before the pivot_root or any equivalent operation has been executed. On Linux, for
example, they are called after the container namespaces are created, so they
provide an opportunity to customize the container (e.g. the network namespace
could be specified in this hook). The prestart hooks MUST be called before the
createRuntime hooks.
Note: prestart hooks were deprecated in favor of createRuntime, createContainer
and startContainer hooks, which allow more granular hook control during the create
and start phase.
The prestart hooks' path MUST resolve in the runtime namespace. The prestart
hooks MUST be executed in the runtime namespace.
CreateRuntime Hooks
The createRuntime hooks MUST be called as part of the create operation after the
runtime environment has been created (according to the configuration in
[Link]) but before the pivot_root or any equivalent operation has been
executed.
The createRuntime hooks' path MUST resolve in the runtime namespace. The
createRuntime hooks MUST be executed in the runtime namespace.
On Linux, for example, they are called after the container namespaces are created,
so they provide an opportunity to customize the container (e.g. the network
namespace could be specified in this hook).
The definition of createRuntime hooks is currently underspecified and hooks
authors, should only expect from the runtime that the mount namespace have been
created and the mount operations performed. Other operations such as cgroups
and SELinux/AppArmor labels might not have been performed by the runtime.
CreateContainer Hooks
The createContainer hooks MUST be called as part of the create operation after the
runtime environment has been created (according to the configuration in
[Link]) but before the pivot_root or any equivalent operation has been
executed. The createContainer hooks MUST be called after the createRuntime
hooks.
The createContainer hooks' path MUST resolve in the runtime namespace. The
createContainer hooks MUST be executed in the container namespace.
For example, on Linux this would happen before the pivot_root operation is
executed but after the mount namespace was created and setup.
The definition of createContainer hooks is currently underspecified and hooks
authors, should only expect from the runtime that the mount namespace and
different mounts will be setup. Other operations such as cgroups and SELinux/
AppArmor labels might not have been performed by the runtime.
StartContainer Hooks
The startContainer hooks MUST be called before the user-specified process is
executed as part of the start operation. This hook can be used to execute some
operations in the container, for example running the ldconfig binary on linux before
the container process is spawned.
The startContainer hooks' path MUST resolve in the container namespace. The
startContainer hooks MUST be executed in the container namespace.
Poststart
The poststart hooks MUST be called after the user-specified process is executed
but before the start operation returns. For example, this hook can notify the user
that the container process is spawned.
The poststart hooks' path MUST resolve in the runtime namespace. The poststart
hooks MUST be executed in the runtime namespace.
Poststop
The poststop hooks MUST be called after the container is deleted but before the
delete operation returns. Cleanup or debugging functions are examples of such a
hook.
The poststop hooks' path MUST resolve in the runtime namespace. The poststop
hooks MUST be executed in the runtime namespace.
Summary
See the below table for a summary of hooks and when they are called:
Name Namespace When
prestart (Deprecated) runtime After the start operation is called but before the
user-specified program command is executed.
createRuntime runtime During the create operation, after the runtime environment
has been created and before the pivot root or any equivalent operation.
createContainer container During the create operation, after the runtime
environment has been created and before the pivot root or any equivalent operation.
startContainer container After the start operation is called but before the user-
specified program command is executed.
poststart runtime After the user-specified process is executed but before the
start operation returns.
poststop runtime After the container is deleted but before the delete operation
returns.
Example
"hooks": {
"prestart": [
{
"path": "/usr/bin/fix-mounts",
"args": ["fix-mounts", "arg1", "arg2"],
"env": [ "key1=value1"]
},
{
"path": "/usr/bin/setup-network"
}
],
"createRuntime": [
{
"path": "/usr/bin/fix-mounts",
"args": ["fix-mounts", "arg1", "arg2"],
"env": [ "key1=value1"]
},
{
"path": "/usr/bin/setup-network"
}
],
"createContainer": [
{
"path": "/usr/bin/mount-hook",
"args": ["-mount", "arg1", "arg2"],
"env": [ "key1=value1"]
}
],
"startContainer": [
{
"path": "/usr/bin/refresh-ldcache"
}
],
"poststart": [
{
"path": "/usr/bin/notify-start",
"timeout": 5
}
],
"poststop": [
{
"path": "/usr/sbin/[Link]",
"args": ["[Link]", "-f"]
}
]
}
Annotations
annotations (object, OPTIONAL) contains arbitrary metadata for the container. This
information MAY be structured or unstructured. Annotations MUST be a key-value
map. If there are no annotations then this property MAY either be absent or an
empty map.
Keys MUST be strings. Keys MUST NOT be an empty string. Keys SHOULD be
named using a reverse domain notation - e.g. [Link].
The [Link] namespace for keys is reserved for use by this
specification, annotations using keys in this namespace MUST be as described in
this section. The following keys in the [Link] namespaces MAY be
used:
Key Definition
[Link] Indicates the operating system the container
image was built to run on. The annotation value MUST have a valid value for the os
property as defined in the OCI image specification. This annotation SHOULD only be
used in accordance with the OCI image specification's runtime conversion
specification.
[Link] Indicates the operating system version
targeted by the container image. The annotation value MUST have a valid value for
the [Link] property as defined in the OCI image specification. This annotation
SHOULD only be used in accordance with the OCI image specification's runtime
conversion specification.
[Link] Indicates mandatory operating system
features required by the container image. The annotation value MUST have a valid
value for the [Link] property as defined in the OCI image specification. This
annotation SHOULD only be used in accordance with the OCI image specification's
runtime conversion specification.
[Link] Indicates the architecture that binaries in
the container image are built to run on. The annotation value MUST have a valid
value for the architecture property as defined in the OCI image specification. This
annotation SHOULD only be used in accordance with the OCI image specification's
runtime conversion specification.
[Link] Indicates the variant of the architecture that
binaries in the container image are built to run on. The annotation value MUST have
a valid value for the variant property as defined in the OCI image specification. This
annotation SHOULD only be used in accordance with the OCI image specification's
runtime conversion specification.
[Link] Indicates the author of the container image.
The annotation value MUST have a valid value for the author property as defined in
the OCI image specification. This annotation SHOULD only be used in accordance
with the OCI image specification's runtime conversion specification.
[Link] Indicates the date and time when the
container image was created. The annotation value MUST have a valid value for the
created property as defined in the OCIimage specification. This annotation SHOULD
only be used in accordance with the OCI image specification's runtime conversion
specification.
[Link] Indicates signal that SHOULD be sent by
the container runtimes to kill the container. The annotation value MUST have a valid
value for the [Link] property as defined in the OCI image specification.
This annotation SHOULD only be used in accordance with the OCI image
specification's runtime conversion specification.
All other keys in the [Link] namespace not specified in this above table
are reserved and MUST NOT be used by subsequent specifications. Runtimes MUST
handle unknown annotation keys like any other unknown property.
Values MUST be strings. Values MAY be an empty string.
"annotations": {
"[Link]-cores": "2"
}
Extensibility
Runtimes MAY log unknown properties but MUST otherwise ignore them. That
includes not generating errors if they encounter an unknown property.
Valid values
Runtimes MUST generate an error when invalid or unsupported values are
encountered. Unless support for a valid value is explicitly required, runtimes MAY
choose which subset of the valid values it will support.
Configuration Schema Example
Here is a full example [Link] for reference.
{
"ociVersion": "1.0.1",
"process": {
"terminal": true,
"user": {
"uid": 1,
"gid": 1,
"additionalGids": [
5,
6
]
},
"args": [
"sh"
],
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm"
],
"cwd": "/",
"capabilities": {
"bounding": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"permitted": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"inheritable": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"effective": [
"CAP_AUDIT_WRITE",
"CAP_KILL"
],
"ambient": [
"CAP_NET_BIND_SERVICE"
]
},
"rlimits": [
{
"type": "RLIMIT_CORE",
"hard": 1024,
"soft": 1024
},
{
"type": "RLIMIT_NOFILE",
"hard": 1024,
"soft": 1024
}
],
"apparmorProfile": "acme_secure_profile",
"oomScoreAdj": 100,
"selinuxLabel": "system_u:system_r:svirt_lxc_net_t:s0:c124,c675",
"ioPriority": {
"class": "IOPRIO_CLASS_IDLE",
"priority": 4
},
"noNewPrivileges": true
},
"root": {
"path": "rootfs",
"readonly": true
},
"hostname": "slartibartfast",
"mounts": [
{
"destination": "/proc",
"type": "proc",
"source": "proc"
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination": "/dev/pts",
"type": "devpts",
"source": "devpts",
"options": [
"nosuid",
"noexec",
"newinstance",
"ptmxmode=0666",
"mode=0620",
"gid=5"
]
},
{
"destination": "/dev/shm",
"type": "tmpfs",
"source": "shm",
"options": [
"nosuid",
"noexec",
"nodev",
"mode=1777",
"size=65536k"
]
},
{
"destination": "/dev/mqueue",
"type": "mqueue",
"source": "mqueue",
"options": [
"nosuid",
"noexec",
"nodev"
]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": [
"nosuid",
"noexec",
"nodev"
]
},
{
"destination": "/sys/fs/cgroup",
"type": "cgroup",
"source": "cgroup",
"options": [
"nosuid",
"noexec",
"nodev",
"relatime",
"ro"
]
}
],
"hooks": {
"prestart": [
{
"path": "/usr/bin/fix-mounts",
"args": [
"fix-mounts",
"arg1",
"arg2"
],
"env": [
"key1=value1"
]
},
{
"path": "/usr/bin/setup-network"
}
],
"poststart": [
{
"path": "/usr/bin/notify-start",
"timeout": 5
}
],
"poststop": [
{
"path": "/usr/sbin/[Link]",
"args": [
"[Link]",
"-f"
]
}
]
},
"linux": {
"devices": [
{
"path": "/dev/fuse",
"type": "c",
"major": 10,
"minor": 229,
"fileMode": 438,
"uid": 0,
"gid": 0
},
{
"path": "/dev/sda",
"type": "b",
"major": 8,
"minor": 0,
"fileMode": 432,
"uid": 0,
"gid": 0
}
],
"uidMappings": [
{
"containerID": 0,
"hostID": 1000,
"size": 32000
}
],
"gidMappings": [
{
"containerID": 0,
"hostID": 1000,
"size": 32000
}
],
"sysctl": {
"net.ipv4.ip_forward": "1",
"[Link]": "256"
},
"cgroupsPath": "/myRuntime/myContainer",
"resources": {
"network": {
"classID": 1048577,
"priorities": [
{
"name": "eth0",
"priority": 500
},
{
"name": "eth1",
"priority": 1000
}
]
},
"pids": {
"limit": 32771
},
"hugepageLimits": [
{
"pageSize": "2MB",
"limit": 9223372036854772000
},
{
"pageSize": "64KB",
"limit": 1000000
}
],
"memory": {
"limit": 536870912,
"reservation": 536870912,
"swap": 536870912,
"kernel": -1,
"kernelTCP": -1,
"swappiness": 0,
"disableOOMKiller": false
},
"cpu": {
"shares": 1024,
"quota": 1000000,
"period": 500000,
"realtimeRuntime": 950000,
"realtimePeriod": 1000000,
"cpus": "2-3",
"idle": 1,
"mems": "0-7"
},
"devices": [
{
"allow": false,
"access": "rwm"
},
{
"allow": true,
"type": "c",
"major": 10,
"minor": 229,
"access": "rw"
},
{
"allow": true,
"type": "b",
"major": 8,
"minor": 0,
"access": "r"
}
],
"blockIO": {
"weight": 10,
"leafWeight": 10,
"weightDevice": [
{
"major": 8,
"minor": 0,
"weight": 500,
"leafWeight": 300
},
{
"major": 8,
"minor": 16,
"weight": 500
}
],
"throttleReadBpsDevice": [
{
"major": 8,
"minor": 0,
"rate": 600
}
],
"throttleWriteIOPSDevice": [
{
"major": 8,
"minor": 16,
"rate": 300
}
]
}
},
"rootfsPropagation": "slave",
"seccomp": {
"defaultAction": "SCMP_ACT_ALLOW",
"architectures": [
"SCMP_ARCH_X86",
"SCMP_ARCH_X32"
],
"syscalls": [
{
"names": [
"getcwd",
"chmod"
],
"action": "SCMP_ACT_ERRNO"
}
]
},
"timeOffsets": {
"monotonic": {
"secs": 172800,
"nanosecs": 0
},
"boottime": {
"secs": 604800,
"nanosecs": 0
}
},
"namespaces": [
{
"type": "pid"
},
{
"type": "network"
},
{
"type": "ipc"
},
{
"type": "uts"
},
{
"type": "mount"
},
{
"type": "user"
},
{
"type": "cgroup"
},
{
"type": "time"
}
],
"maskedPaths": [
"/proc/kcore",
"/proc/latency_stats",
"/proc/timer_stats",
"/proc/sched_debug"
],
"readonlyPaths": [
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
],
"mountLabel": "system_u:object_r:svirt_sandbox_file_t:s0:c715,c811"
},
"annotations": {
"[Link].key1": "value1",
"[Link].key2": "value2"
}
}
#
# Script @test from content/installation/[Link]
#
#----------------------------------------------------------------------# Start 1
echo "Block 'checkForBash' (1/9 in test) of content/installation/[Link]"
####
set | grep BASH > /dev/null || echo "Vanadium installation requires Bash."
#----------------------------------------------------------------------# End 1
#----------------------------------------------------------------------# Start 2
echo "Block 'define_JIRI_ROOT' (2/9 in test) of content/installation/step-by-
[Link]"
####
# Uses existing $JIRI_ROOT environment variable, defaults to ${HOME}/vanadium if
# $JIRI_ROOT is not set.
export JIRI_ROOT=${JIRI_ROOT:=${HOME}/vanadium}
#----------------------------------------------------------------------# End 2
#----------------------------------------------------------------------# Start 3
echo "Block 'define_VANADIUM_RELEASE' (3/9 in test) of content/installation/step-
[Link]"
####
# Needed for tutorials only.
export VANADIUM_RELEASE=${JIRI_ROOT}/release/go
#----------------------------------------------------------------------# End 3
bash -euo pipefail <<'HANDLED_SCRIPT'
function handledTrouble() {
echo " "
echo "Unable to continue!"
exit 1
}
trap handledTrouble INT TERM
#
# Script @test from content/installation/[Link]
#
#----------------------------------------------------------------------# Start 1
echo "Block 'checkForBash' (1/9 in test) of content/installation/[Link]"
####
set | grep BASH > /dev/null || echo "Vanadium installation requires Bash."
#----------------------------------------------------------------------# End 1
#----------------------------------------------------------------------# Start 2
echo "Block 'define_JIRI_ROOT' (2/9 in test) of content/installation/step-by-
[Link]"
####
# Uses existing $JIRI_ROOT environment variable, defaults to ${HOME}/vanadium if
# $JIRI_ROOT is not set.
export JIRI_ROOT=${JIRI_ROOT:=${HOME}/vanadium}
#----------------------------------------------------------------------# End 2
#----------------------------------------------------------------------# Start 3
echo "Block 'define_VANADIUM_RELEASE' (3/9 in test) of content/installation/step-
[Link]"
####
# Needed for tutorials only.
export VANADIUM_RELEASE=${JIRI_ROOT}/release/go
#----------------------------------------------------------------------# End 3
#----------------------------------------------------------------------# Start 4
echo "Block 'check_JIRI_ROOT' (4/9 in test) of content/installation/step-by-
[Link]"
####
# Check that the JIRI_ROOT path does not exist.
if [[ -e "${JIRI_ROOT}" ]]; then
echo ""
echo "ERROR: The JIRI_ROOT path already exists: ${JIRI_ROOT}"
echo "To proceed with a fresh install remove the directory and re-run:"
echo ""
echo " rm -rf ${JIRI_ROOT}"
echo ""
echo "Or set JIRI_ROOT to a different path."
exit 1
fi
#----------------------------------------------------------------------# End 4
#----------------------------------------------------------------------# Start 5
echo "Block 'runBootstrapScript' (5/9 in test) of content/installation/step-by-
[Link]"
####
# This can take several minutes.
curl -f [Link] | bash
#----------------------------------------------------------------------# End 5
#----------------------------------------------------------------------# Start 6
echo "Block 'addDevtoolsToPath' (6/9 in test) of content/installation/step-by-
[Link]"
####
export PATH=$JIRI_ROOT/.jiri_root/scripts:$PATH
#----------------------------------------------------------------------# End 6
#----------------------------------------------------------------------# Start 7
echo "Block 'packagesBaseProfile' (7/9 in test) of content/installation/step-by-
[Link]"
####
# Print the package installation command.
jiri profile os-packages v23:base
# Run the package installation command as root.
if [ -n "$(jiri profile os-packages v23:base)" ]; then
sudo $(jiri profile os-packages v23:base)
fi
#----------------------------------------------------------------------# End 7
#----------------------------------------------------------------------# Start 8
echo "Block 'installBaseProfile' (8/9 in test) of content/installation/step-by-
[Link]"
####
jiri profile install v23:base
#----------------------------------------------------------------------# End 8
#----------------------------------------------------------------------# Start 9
echo "Block 'installVanadiumBinaries' (9/9 in test) of content/installation/step-by-
[Link]"
####
# Install specific tools needed for the tutorials.
jiri go install [Link]/x/ref/cmd/... [Link]/x/ref/services/agent/... [Link]/x/ref/services/
mounttable/... [Link]/x/ref/services/syncbase/...
#----------------------------------------------------------------------# End 9
echo " "
echo "All done. No errors."
HANDLED_SCRIPT// Install the C# / .NET helper library from [Link]/docs/csharp/
install
using System;
using Twilio;
using [Link];
class Program
{
static void Main(string[] args)
{
// Find your Account SID and Auth Token at [Link]/console
// and set the environment variables. See [Link]
string accountSid =
[Link]("TWILIO_ACCOUNT_SID");
string authToken =
[Link]("TWILIO_AUTH_TOKEN");
[Link](accountSid, authToken);
var message = [Link](
body: "Hello there!",
from: new [Link]("whatsapp:+14155238886"),
to: new [Link]("whatsapp:+15005550006")
);
[Link]([Link]);
}
}
Install-Package Twilio -ProjectName MyProject
// Install the C# / .NET helper library from [Link]/docs/csharp/install
using System;
using [Link];
using Twilio;
using [Link];
class Program
{
static void Main(string[] args)
{
// Find your Account SID and Auth Token at [Link]/console
// and set the environment variables. See [Link]
string accountSid =
[Link]("TWILIO_ACCOUNT_SID");
string authToken =
[Link]("TWILIO_AUTH_TOKEN");
[Link](accountSid, authToken);
var mediaUrl = new[] {
new Uri("[Link]
ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=668&q=80
")
}.ToList();
var message = [Link](
mediaUrl: mediaUrl,
from: new [Link]("whatsapp:+14155238886"),
to: new [Link]("whatsapp:+15017122661")
);
[Link]([Link]);
}
}
{
"account_sid": "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"api_version": "2010-04-01",
"body": "Hello! ! ",
"date_created": "Thu, 24 Aug 2023 [Link] +0000",
"date_sent": "Thu, 24 Aug 2023 [Link] +0000",
"date_updated": "Thu, 24 Aug 2023 [Link] +0000",
"direction": "outbound-api",
"error_code": null,
"error_message": null,
"from": "whatsapp:+14155238886",
"num_media": "0",
"num_segments": "1",
"price": null,
"price_unit": null,
"messaging_service_sid": "MGXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"sid": "SMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"status": "queued",
"subresource_uris": {
"media": "/2010-04-01/Accounts/
ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Messages/
SMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/[Link]"
},
"to": "whatsapp:+15017122661",
"uri": "/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/
Messages/[Link]"
}
csc [Link]
program
"subresource_uris": {"media": "/2010-04 01/Accounts/ACxxxxxxxx/Messages/
SMxxxxxxxxxxxxx/[Link]"}
<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message>Thanks for the message!</Message>
</Response>
using [Link];
using [Link];
using [Link];
using [Link];
using [Link];
using [Link];
using HttpPostAttribute = [Link];
using static [Link];
using Microsoft.Win32;
using [Link];
using System;
using [Link];
using [Link];
namespace [Link]
{
public class WhatsAppMediaController : TwilioController
{
public static Uri GOOD_BOY_URL = new Uri("[Link]
photo-1518717758536-85ae29035b6d?ixlib=" +
"rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1350&q=80");
public virtual SystemWebClient webClient()
{
return new SystemWebClient();
}
[HttpPostAttribute]
public TwiMLResult Create(FormCollection formCollection)
{
var numMedia = [Link](formCollection["NumMedia"]);
var response = new MessagingResponse();
if (numMedia > 0)
{
var message = new Message();
[Link]("Thanks for the image! Here's one for you!");
[Link](GOOD_BOY_URL);
[Link](message);
} else
{
[Link]("Send us an image!");
}
return TwiML(response);
}
private string AppDataDirectory
{
get {
var dataDirectory = [Link](
[Link],
@"..\..\..\WhatsAppMediaTutorial\App_Data\"
);
return [Link](dataDirectory);
}
}
public static string GetDefaultExtension(string mimeType)
{
var key = [Link](@"MIME\Database\Content
Type\" + mimeType, false);
var value = key != null ? [Link]("Extension", null) : null;
var result = value != null ? [Link]() : [Link];
return result;
}
private void LogParamsAsJson() =>
[Link]([Link](
[Link](r => r, r => [Link][r]),
[Link]
).ToString());
}
}
You
sudo unzip ~/Downloads/[Link] -d /usr/local/bin
ngrok config add-authtoken
2coSXkUPJHKNejdfgFKZaF0CjKd_6NvztF58L172P1MHzwAHF
ngrok http [Link]
ngrok http 80 --domain *.[Link]
ngrok tcp 22 --remote-addr [Link]
import ngrok
listener = [Link]("localhost:8080", authtoken_from_env=True)
print(f"Ingress established at: {[Link]()}");
curl [Link] -H "authorization: Bearer {API Key}" -H "ngrok-version:
2"
ngrok config add-api-key "{API Key}"
ngrok api endpoints list
ngrok config check
api_key: 24yRd5U3DestCQapJrrVHLOqiAC_7RviwRqpd3wc9dKLujQZN
authtoken: 4nq9771bPxe8ctg7LKr_2ClH7Y15Zqe4bWLWF9p
log: /var/log/[Link]
metadata: bad8c1c0-8fce-11e4-b4a9-0800200c9a66
curl \
-X GET \
-H "Authorization: Bearer
{2g5KrllbQjlrDn7is1sl2SVkViy_6GVDo516KduTjwWWps71K}" \
-H "Ngrok-Version: 2" \
[Link]
{
"tunnel_sessions": [
{
"agent_version": "3.1000.0-development",
"credential": {
"id": "cr_2NTVGrph0tDzS8vZOKkyy5TCcgm",
"uri": "[Link]
cr_2NTVGrph0tDzS8vZOKkyy5TCcgm"
},
"id": "ts_2NTVGvz42oUMzkeQYYkikVVavdh",
"ip": "[Link]",
"os": "linux",
"region": "us",
"started_at": "2023-03-24T[Link]Z",
"transport": "ngrok/2",
"uri": "[Link]
ts_2NTVGvz42oUMzkeQYYkikVVavdh"
}
],
"uri": "[Link]
"next_page_uri": null
}
curl \
-X GET \
-H "Authorization: Bearer
{2g5KrllbQjlrDn7is1sl2SVkViy_6GVDo516KduTjwWWps71K}" \
-H "Ngrok-Version: 2" \
[Link]
{
"agent_version": "3.1000.0-development",
"credential": {
"id": "cr_2NTVH4t8mesGa7guptJ9k61r3ee",
"uri": "[Link]
},
"id": "ts_2NTVGziuaThwbjlN9eh32Y9xkJP",
"ip": "[Link]",
"os": "linux",
"region": "us",
"started_at": "2023-03-24T[Link]Z",
"transport": "ngrok/2",
"uri": "[Link]
ts_2NTVGziuaThwbjlN9eh32Y9xkJP"
}
curl \
-X POST \
-H "Authorization: Bearer
{2g5KrllbQjlrDn7is1sl2SVkViy_6GVDo516KduTjwWWps71K}" \
-H "Content-Type: application/json" \
-H "Ngrok-Version: 2" \
-d '{}' \
[Link]
authtoken: 4nq9771bPxe8ctg7LKr_2ClH7Y15Zqe4bWLWF9p
api_key: 24yRd5U3DestCQapJrrVHLOqiAC_7RviwRqpd3wc9dKLujQZN
connect_timeout: 30s
console_ui: true
console_ui_color: transparent
dns_resolver_ips:
- [Link]
- [Link]
heartbeat_interval: 1m
heartbeat_tolerance: 5s
inspect_db_size: 104857600 # 100mb
inspect_db_size: 50000000
log_level: info
log_format: json
log: /var/log/[Link]
metadata: '{"serial": "00012xa-33rUtz9", "comment": "For customer
alan@[Link]"}'
proxy_url: socks5://localhost:9150
remote_management: false
root_cas: trusted
update_channel: stable
update_check: false
version: 2
web_addr: localhost:4040
tunnels:
website:
addr: 8888
basic_auth:
- "bob:bobpassword"
schemes:
- https
host_header: "[Link]"
inspect: false
proto: http
domain: [Link]
e2etls:
addr: 9000
proto: tls
domain: [Link]
crt: [Link]
key: [Link]
policyenforced:
policy:
inbound:
- name: LimitIPs
expressions:
- "[Link] != '[Link]'"
actions:
- type: deny
addr: 8000
proto: tcp
ssh-access:
addr: 22
proto: tcp
remote_addr: [Link]
my-load-balanced-website:
labels:
- env=prod
- team=infra
addr: 8000
tunnels:
httpbin:
proto: http
addr: 8000
domain: [Link]
demo:
proto: http
addr: 9090
domain: [Link]
inspect: false
ngrok start httpbin
tunnels:
my-cool-website:
labels:
- env=prod
- team=infra
addr: 8000
inspect: false
ssh-tunnel:
labels:
- hostname=my-hostname
- service=ssh
- team=development
addr: 22
pip install ngrok-api
import ngrok
# construct the api client
client =
[Link]("<2g5KrllbQjlrDn7is1sl2SVkViy_6GVDo516KduTjwWWps71K>")
# list all online tunnels
for t in [Link]():
print(t)
# create an ip policy the allows traffic from some subnets
policy = client.ip_policies.create()
for cidr in ["[Link]/8", "[Link]/8"]:
client.ip_policy_rules.create(cidr=cidr, ip_policy_id=[Link], action="allow")
Automatic
ak_2dvZEkjdgaLw0K0a5G0RX80fTjJ
ngrok http --domain=[Link] 80
edge=edghts_2d7h0YrdnMObWZ8UvuA5kKFYMIh
ngrok tunnel --label edge=edghts_2d7h0YrdnMObWZ8UvuA5kKFYMIh http://
localhost:80
ngrok config edit
version: 2
authtoken: 2coSXkUPJHKNejdfgFKZaF0CjKd_6NvztF58L172P1MHzwAHF
tunnels:
my_tunnel_name:
labels:
- edge=edghts_2d7h0YrdnMObWZ8UvuA5kKFYMIh
addr: [Link]
ngrok start bkdtg_2d7h0WShluW1A1A7HEBrT9sfThd
ngrok start --all
brew install ngrok/ngrok/ngrok
ngrok config add-authtoken
2coSXkUPJHKNejdfgFKZaF0CjKd_6NvztF58L172P1MHzwAHF
ngrok tunnel --label edge=edghts_2d7h0YrdnMObWZ8UvuA5kKFYMIh http://
localhost:80
ID of tunnel:
bkdtg_2d7h0WShluW1A1A7HEBrT9sfThd
endpoint : edghts_2d7h0YrdnMObWZ8UvuA5kKFYMIh
edghts_2d7h0YrdnMObWZ8UvuA5kKFYMIh
Status
edge=edghts_2d7h0YrdnMObWZ8UvuA5kKFYMIh
npx make-magic
npm install magic-sdk
const magic = new Magic([Link].NEXT_PUBLIC_MAGIC_API_KEY, {
network: {
rpcUrl: "<[Link]
chainId: 11155111,
},
})
const web3 = new Web3((magic as any).rpcProvider))
function sendEth(amount: number, recipientAddress: string) {
const senderAddress = [Link]().[0]
const txnParams = {
from: senderAddress,
to: recipientAddress,
value: [Link](amount, "ether"),
gas: 21000,
}
[Link]
.sendTransaction(txnParams as any)
.on("transactionHash", (txHash: string) => {
[Link]("Transaction hash:", txHash)
})
.then((receipt: any) => {
[Link]("Transaction receipt:", receipt)
})
}
import { Magic } from 'magic-sdk';
const magic = new Magic('pk_live_1BE0FA6CAAF15473');
try {
await [Link]();
} catch {
// Handle errors if required!
}
npm install magic-sdk
import { Magic } from 'magic-sdk';
let magic;
// Construct with an API key:
magic = new Magic('pk_live_1BE0FA6CAAF15473');
// Construct with an API key and locale:
magic = new Magic('pk_live_1BE0FA6CAAF15473', { locale: 'es' });
// Construct with an API key and testMode enabled:
magic = new Magic('pk_live_1BE0FA6CAAF15473', { testMode: true });
// Construct with an API key and defer the loading of the Magic Iframe's assets
magic = new Magic('pk_live_1BE0FA6CAAF15473', { deferPreload: true });
// Construct with an API key plus options:
magic = new Magic('pk_live_1BE0FA6CAAF15473', { network: 'goerli', endpoint:
'...' });
// Construct with API key and custom node options:
const customNodeOptions = {
rpcUrl: '[Link] // your ethereum, polygon, or optimism mainnet/
testnet rpc URL
chainId: 137 // corresponding chainId for your rpc url
}
magic = new Magic('pk_live_1BE0FA6CAAF15473', {
network: customNodeOptions, // connected to Polygon Mainnet!
});
import { Magic } from 'magic-sdk';
const magic = new Magic('pk_live_1BE0FA6CAAF15473', { deferPreload: true });
// ...
[Link];
import { Magic } from 'magic-sdk';
const magic = new Magic('pk_live_1BE0FA6CAAF15473');
[Link]().then(() => [Link]('Magic <iframe> loaded.'));
import { Magic } from 'magic-sdk';
const magic = new Magic('pk_live_1BE0FA6CAAF15473');
[Link];
[Link];
[Link];
[Link];
[Link];
[Link];
import { Magic } from 'magic-sdk';
const magic = new Magic('pk_live_1BE0FA6CAAF15473');
try {
await [Link]({
email: 'hello@[Link]',
redirectURI: '[Link]
});
// When the user clicks their magic link, they will be logged-in here
// and in the "callback" context.
} catch {
// Handle errors if required!
}
import { Magic } from 'magic-sdk';
const magic = new Magic('pk_live_1BE0FA6CAAF15473');
try {
await [Link]();
} catch {
// Handle errors if required!
}
// You can also provide the credential yourself
try {
await [Link]('iamacredentialtoken');
} catch {
// Handle errors if required!
}
// You can also provide the credential as a query string
try {
await [Link]([Link]);
} catch {
// Handle errors if required!
}
import { Magic } from 'magic-sdk';
const magic = new Magic('pk_live_1BE0FA6CAAF15473');
// Initiates the flow to update a user's current email to a new one.
try {
...
/* Assuming user is logged in */
await [Link]({ email: 'grateful345ii@[Link]' });
} catch {
// Handle errors if required!
}
/**
* Initiates the flow to update a user's current email to a new one,
* without showing an out-of-the box UI.
*/
try {
/* Assuming user is logged in */
await [Link]({ email: 'grateful345ii@[Link]', showUI:
false });
} catch {
// Handle errors if required!
}
import {
Magic,
RecencyCheckEventOnReceived,
RecencyCheckEventEmit,
UpdateEmailEventOnReceived,
UpdateEmailEventEmit,
} from 'magic-sdk';
const magic = new Magic('API_KEY');
try {
/* Initiates update email flow to update a user's current email to a new one */
const handle = await [Link]({
email: 'grateful345ii@[Link]',
showUI: false,
});
/*
Recency Check Events & Emit
*/
let recencyCheckRetries = 5;
handle
.on([Link], () => {
// Email OTP has been sent to the user's primary email
// Prompt the user for the OTP
const otp = [Link]('Primary Email OTP');
// Send the OTP for verification
[Link]([Link], otp);
})
.on([Link], () => {
[Link]('Primary Factor has been verified');
})
.on([Link], () => {
// Email OTP was undeliverable to user's primary email
// Cancel update email request
[Link]([Link]);
[Link]('Email Not Deliverable');
})
.on([Link], () => {
// User entered expired OTP
[Link]([Link]);
[Link]('Expired OTP');
})
.on([Link], () => {
// User entered invalid OTP; you may limit retries and cancel the request
if (!recencyCheckRetries) {
// Cancel update email request
alert('Too many attempts');
[Link]([Link]);
} else {
const otp = [Link](
`Invalid code, Please enter OTP again. Retries left: ${recencyCheckRetries}`,
);
recencyCheckRetries--;
// Send the OTP for verification
[Link]([Link], otp);
}
});
/*
Update Email Events & Emit
*/
let updateEmailRetries = 5;
handle
.on([Link], () => {
// Email OTP has been sent to the user's secondary email
// Prompt the user for the OTP
const otp = [Link]('Enter new Email OTP');
// Send the OTP for verification
[Link]([Link], otp);
})
.on([Link], () => {
// Email OTP was undeliverable to user's secondary email
const newEmail = [Link]('Invalid Email, Enter a new Email');
// Try same or new email address
[Link](
[Link],
newEmail || email,
);
})
.on([Link], () => {
// Account already exists for new email address
const newEmail = [Link]('Email address already in use, Enter a different
Email');
// Try same or new email address
[Link](
[Link],
newEmail || email,
);
})
.on([Link], () => {
// User entered invalid OTP; you may limit retries and cancel the request
if (!updateEmailRetries) {
// Cancel update email request
alert('Too many attempts');
[Link]([Link]);
} else {
const otp = [Link](
`Invalid code, Please enter OTP again. Retries left: ${updateEmailRetries}`,
);
updateEmailRetries--;
// Send the OTP for verification
[Link]([Link], otp);
}
})
.on([Link], () => {
// Update email succcessful
alert('Email Updated');
})
handle
.on('error', () => {
// is called if the Promise rejects
alert('Error occurred');
});
const res = await handle;
[Link](res);
// Can also handle successful email update here
alert('Email Updated');
} catch {
// Handle errors if required!
}
import { Magic, RPCError, RPCErrorCode } from 'magic-sdk';
const magic = new Magic('pk_live_1BE0FA6CAAF15473');
try {
await [Link]({ email: 'hello@[Link]', showUI:
false });
} catch (err) {
if (err instanceof RPCError) {
switch ([Link]) {
case [Link]:
// Handle errors accordingly :)
break;
}
}
}
import { Magic } from 'magic-sdk';
const magic = new Magic('pk_live_1BE0FA6CAAF15473');
[Link];
[Link];
[Link];
[Link];
[Link];
[Link];
[Link];
[Link]; // enterprise only
[Link];
const claim = [Link]({ ... }); // Data representing the user's access
const proof = sign(claim); // Sign data with Ethereum's `personal_sign` method
const DIDToken = btoa([Link]([proof, claim]));
// Construct the user's claim
const claim = [Link]({
iat: [Link]([Link]() / 1000),
ext: [Link]([Link]() / 1000) + lifespan,
iss: `did:ethr:${user_public_address}`,
sub: subject,
aud: audience,
nbf: [Link]([Link]() / 1000),
tid: uuid(),
});
// Sign the claim with the user's private key
// (this way the claim is verifiable and impossible to forge).
const proof = sign(claim);
// Encode the DIDToken so it can be transported over HTTP.
const DIDToken = btoa([Link]([proof, claim]));
pip install magic-admin
publishable key : pk_live_4ECCAEA9476D332A
from magic_admin import Magic
magic = Magic(
api_secret_key='<sk_live_4CDFABE4C43580D6>',
retries=5,
timeout=5,
backoff_factor=0.01,
)
npx make-magic \
--template nextjs-dedicated-wallet \
--network polygon-amoy \
--publishable-api-key pk_live_4ECCAEA9476D332A \
--login-methods EmailOTP --login-methods SMSOTP
from magic_admin import Magic
magic = Magic(api_secret_key='<sk_live_A2ED5BC3F492BBC0>')
[Link]
[Link].get_issuer
[Link].get_public_address
[Link]
[Link]
Token.get_issuer(did_token)
Token.get_public_address(did_token)
[Link](did_token)
[Link](did_token)
from magic_admin import Magic
magic = Magic(api_secret_key='<sk_live_A2ED5BC3F492BBC0>')
[Link]
[Link].get_metadata_by_issuer
[Link].get_metadata_by_public_address
[Link].get_metadata_by_token
[Link].logout_by_issuer
[Link].logout_by_public_address
[Link].logout_by_token
User.get_metadata_by_issuer(issuer)
User.get_metadata_by_public_address(public_address)
User.get_metadata_by_token(did_token)
User.logout_by_issuer(issuer)
User.logout_by_public_address(public_address)
User.logout_by_token(did_token)
from magic_admin.response import MagicResponse
[Link]
[Link]
[Link]
MagicError
|
|------- RequestError
|
| ------- RateLimitingError
| ------- BadRequestError
| ------- AuthenticationError
| ------- ForbiddenError
| ------- APIError
| ------- APIConnectionError
|
| ------- DIDTokenInvalid
|
| ------- DIDTokenMalformed
|
| ------- DIDTokenExpired
MagicError(message=None)
RequestError(
message=None, http_status=None, http_code=None, http_resp_data=None,
http_message=None, http_error_code=None, http_request_params=None,
http_request_data=None, http_method=None,
)
try:
# Make requests to Magic server.
except RateLimitingError as e:
pass
except BadRequestError as e:
pass
except AuthenticationError as e:
pass
except ForbiddenError as e:
pass
except APIError as e:
pass
except APIConnectionError as e:
pass
except DIDTokenInvalid as e:
pass
except DIDTokenMalformed as e:
pass
except DIDTokenExpired as e:
pass
const claim = [Link]({ ... }); // Data representing the user's access
const proof = sign(claim); // Sign data with Ethereum's `personal_sign` method
const DIDToken = btoa([Link]([proof, claim]));
const claim = [Link]({ ... }); // Data representing the user's access
const proof = sign(claim); // Sign data with Ethereum's `personal_sign` method
const DIDToken = btoa([Link]([proof, claim]));
// Construct the user's claim
const claim = [Link]({
iat: [Link]([Link]() / 1000),
ext: [Link]([Link]() / 1000) + lifespan,
iss: `did:ethr:${user_public_address}`,
sub: subject,
aud: audience,
nbf: [Link]([Link]() / 1000),
tid: uuid(),
});
// Sign the claim with the user's private key
// (this way the claim is verifiable and impossible to forge).
const proof = sign(claim);
// Encode the DIDToken so it can be transported over HTTP.
const DIDToken = btoa([Link]([proof, claim]));
// Construct the user's claim
const claim = [Link]({
iat: [Link]([Link]() / 1000),
ext: [Link]([Link]() / 1000) + lifespan,
iss: `did:ethr:${user_public_address}`,
sub: subject,
aud: audience,
nbf: [Link]([Link]() / 1000),
tid: uuid(),
});
// Sign the claim with the user's private key
// (this way the claim is verifiable and impossible to forge).
const proof = sign(claim);
// Encode the DIDToken so it can be transported over HTTP.
const DIDToken = btoa([Link]([proof, claim]));
import { Magic } from 'magic-sdk';
// must use a Dedicated Wallet API Key
const magic = new Magic('API_KEY');
// log in a user by their email
try {
await [Link]({ email: 'grateful345ii@[Link]' });
} catch {
// Handle errors if required!
}
// log in a user by their email, without showing an out-of-the box UI.
try {
await [Link]({ email: 'grateful345ii@[Link]', showUI:
false });
} catch {
// Handle errors if required!
}
pip install magic-admin
const customNodeOptions = {
rpcUrl: '[Link] // Your own node URL
chainId: 1011, // Your own node's chainId
};
// Setting network to localhost blockchain
const magic = new Magic(' pk_live_4ECCAEA9476D332A', {
network: customNodeOptions,
});
<!DOCTYPE html>
<html>
<head>
<title>Magic Hello World ! </title>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<link rel="stylesheet" type="text/css" href="[Link]" />
<!-- " Install Magic SDK -->
<script src="[Link]
<script>
/* # Initialize Magic Instance */
let magic = new Magic("pk_live_D5EA1E346A791DB7");
/* $ Implement Render Function */
const render = async () => {
const isLoggedIn = await [Link]();
/* Show login form if user is not logged in */
let html = `
<h1>Please sign up or login</h1>
<form onsubmit="handleLogin(event)">
<input type="email" name="email" required="required" placeholder="Enter
your email" />
<button type="submit">Send</button>
</form>
`;
if (isLoggedIn) {
/* Get user metadata including email */
const userMetadata = await [Link]();
html = `
<h1>Current user: ${[Link]}</h1>
<button onclick="handleLogout()">Logout</button>
`;
}
[Link]("app").innerHTML = html;
};
/* ! Implement Login Handler */
const handleLogin = async (e) => {
[Link]();
const email = new FormData([Link]).get("email");
if (email) {
/* One-liner login with email OTP " */
await [Link]({ email });
render();
}
};
/* # Implement Logout Handler */
const handleLogout = async () => {
await [Link]();
render();
};
</script>
</head>
<body onload="render()">
<div id="app">Loading...</div>
</body>
yarn add magic-sdk
npx make-magic \
--template nextjs-dedicated-wallet \
--network etherlink-ghostnet \
--publishable-api-key pk_live_1BE0FA6CAAF15473 \
--login-methods EmailOTP --login-methods SMSOTP
npm install magic-sdk @magic-ext/webauthn
import { Magic } from 'magic-sdk';
import { WebAuthnExtension } from '@magic-ext/webauthn';
const magic = new Magic('YOUR_API_KEY', {
extensions: [new WebAuthnExtension()],
});
// register a user by their username
try {
const token = await [Link]({ username: 'username' });
} catch (e) {
// Handle errors if required!
}
// login a user by their username
try {
const token = await [Link]({ username: 'username' });
} catch (e) {
// Handle errors if required!
}
// Initiates the flow to get webauthn metadata for current account.
try {
const metadata = await [Link]();
/* webauthn metadata shape
{
"devicesInfo": [
{
"id": "EjI_EFJhB6cdCj6rHPRHUcFCn6NnywALuWjQyPe0_dI=",
"nickname": "",
"transport": "internal",
"user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3)
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36"
}
],
"username": "username"
}
*/
} catch (e) {
// Handle errors if required!
}
const claim = [Link]({ ... }); // Data representing the user's access
const proof = sign(claim); // Sign data with Ethereum's `personal_sign` method
const DIDToken = btoa([Link]([proof, claim]));
{
"embeds": [
{
"type": "article",
"url": "[Link]
"title": "O5 Council",
"description": "The O5 Council is one of the two legislative bodies of the SCP
Foundation and is the most senior of the two. The Council is the highest rank
possible in the Foundation, consisting of O5...",
"provider": {
"name": "SCPF Foundation Roblox Wikia"
},
"thumbnail": {
"url": "[Link]
Christ_Arms_coat_of_.jpeg/revision/latest?cb=20240102051436",
"proxy_url": "[Link]
o6kjq8k12Exa5jJbD1IYPky8IDybS3SPsIAUlW2NtEA/%3Fcb%3D20240102051436/
https/[Link]/scpf-foundation-roblox/images/5/5a/
Christ_Arms_coat_of_.jpeg/revision/latest",
"width": 871,
"height": 1018,
"placeholder": "qDkGFgK2FQmBq7ljitiFp3h3mkf/6Ok=",
"placeholder_version": 1
}
}
]
}
import os
from sendgrid import SendGridAPIClient
sg = SendGridAPIClient([Link]('SENDGRID_API_KEY'))
headers = {'Accept': 'application/json'}
response = [Link](
request_headers=headers
)
print(response.status_code)
print([Link])
print([Link])
Example Incoming Webhook
{
"name": "test webhook",
"type": 1,
"channel_id": "199737254929760256",
"token":
"3d89bb7572e0fb30d8128367b3b1b44fecd1726de135cbe28a41f8b2f777c372ba2
939e72279b94526ff5d1bd4358d65cf11",
"avatar": null,
"guild_id": "199737254929760256",
"id": "223704706495545344",
"application_id": null,
"user": {
"username": "test",
"discriminator": "7479",
"id": "190320984123768832",
"avatar": "b004ec1740a63ca06ae2e14c5cee11f3",
"public_flags": 131328
}
}
Example Channel Follower Webhook
{
"type": 2,
"id": "752831914402115456",
"name": "Guildy name",
"avatar": "bb71f469c158984e265093a81b3397fb",
"channel_id": "561885260615255432",
"guild_id": "56188498421443265",
"application_id": null,
"source_guild": {
"id": "56188498421476534",
"name": "Guildy name",
"icon": "bb71f469c158984e265093a81b3397fb"
},
"source_channel": {
"id": "5618852344134324",
"name": "announcements"
},
"user": {
"username": "test",
"discriminator": "7479",
"id": "190320984123768832",
"avatar": "b004ec1740a63ca06ae2e14c5cee11f3",
"public_flags": 131328
}
}
Example Application Webhook
{
"type": 3,
"id": "658822586720976555",
"name": "Clyde",
"avatar": "689161dc90ac261d00f1608694ac6bfd",
"channel_id": null,
"guild_id": null,
"application_id": "658822586720976555"
}
{
"embeds": [
{
"type": "article",
"url": "[Link]
"title": "SCP Foundation",
"description": "==(~Foundation ~ Men In Black Agency) == ==*Foundation
Administrator== The powers of the Administrator, include, but are not limited to:
•Swearing in and validating the O5 Overseers •Making...",
"provider": {
"name": "SCPF Foundation Roblox Wikia"
},
"thumbnail": {
"url": "[Link]
Christ_Arms_coat_of_.jpeg/revision/latest?cb=20240102051436",
"proxy_url": "[Link]
o6kjq8k12Exa5jJbD1IYPky8IDybS3SPsIAUlW2NtEA/%3Fcb%3D20240102051436/
https/[Link]/scpf-foundation-roblox/images/5/5a/
Christ_Arms_coat_of_.jpeg/revision/latest",
"width": 871,
"height": 1018,
"placeholder": "qDkGFgK2FQmBq7ljitiFp3h3mkf/6Ok=",
"placeholder_version": 1
}
}
]
}
var kLSDataErr: OSStatus { get }[Link](
# Remove the payment_method_types parameter
# to manage payment methods in the Dashboard
payment_method_types=['card'],
line_items=[{
'price_data': {
# The currency parameter determines which
# payment methods are used in the Checkout Session.
'currency': 'eur',
'product_data': {
'name': 'T-shirt',
},
'unit_amount': 2000,
},
'quantity': 1,
}],
mode='payment',
static let three: [Float] = [0, 1, 1, 1, 1, 0, // ⚪ ⚫ ⚫ ⚫ ⚫ ⚪
0, 0, 0, 0, 1, 0, // ⚪ ⚪ ⚪ ⚪ ⚫ ⚪
0, 1, 1, 1, 1, 0, // ⚪ ⚫ ⚫ ⚫ ⚫ ⚪
0, 0, 0, 0, 1, 0, // ⚪ ⚪ ⚪ ⚪ ⚫ ⚪
0, 1, 1, 1, 1, 0, // ⚪ ⚫ ⚫ ⚫ ⚫ ⚪
0, 0, 0, 0, 0, 0] // ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
The network consists of the three layers success_url='[Link]
success',
cancel_url='[Link]
)
Enable shipping address collection in
static var filterParameters = BNNSFilterParameters(
flags: [Link],
n_threads: 1,
alloc_memory: nil,
free_memory: nil)
# Install through pip
pip3 install --upgrade stripe
PyPi
Python
var kLSDataUnavailableErr: OSStatus { get }
static let convolutionWeights: BNNSNDArrayDescriptor = {
let convolutionKernelSize = 3
let convolutionWeightsShape = [Link](
convolutionKernelSize,
convolutionKernelSize,
convolutionInputImageChannels,
convolutionOutputImageChannels)
guard let desc = [Link](
randomUniformUsing: randomGenerator,
range: Float(-0.5)...Float(0.5),
shape: convolutionWeightsShape) else {
fatalError("Unable to create `convolutionWeightsArray`.")
}
return desc
}()
The app initializes the convolution bias and the batch normalization beta and
gamma arrays with a repeated scalar value.
static let convolutionBias = [Link](
repeating: Float(0),
shape: .vector(convolutionOutputImageChannels))
static let featureMaps = convolutionOutputImageChannels
static let batchNormBeta = [Link](
repeating: Float(0),
shape: .vector(featureMaps),
batchSize: batchSize)
static let batchNormGamma = [Link](
repeating: Float(1),
shape: .vector(featureMaps),
batchSize: batchSize)
The code below creates the fused layer that applies convolution and normalization
to the input:
static let fusedConvBatchNormLayer: [Link] = {
let convolutionParameters = [Link](
type: .standard,
weights: convolutionWeights,
bias: convolutionBias,
stride: (1, 1),
dilationStride: (1, 1),
groupSize: 1,
padding: .symmetric(x: convolutionPadding,
y: convolutionPadding))
let normalizationParameters = [Link](
type: .batch(movingMean: batchNormMovingMean,
movingVariance: batchNormMovingVariance),
beta: batchNormBeta,
gamma: batchNormGamma,
momentum: 0.9,
epsilon: 1e-07,
activation: .rectifiedLinear)
guard let layer = [Link](
input: input,
output: batchNormOutput,
fusedLayerParameters: [convolutionParameters, normalizationParameters],
filterParameters: filterParameters) else {
fatalError("unable to create fusedConvBatchnormLayer")
}
return layer
}()
Create the pooling layer
Pooling layers downscale their input while preserving the most important
information and produce an output that, in the case of this sample code project,
consists of the maximum value in each input pixel’s local neighborhood.
The following code creates the pooling layer:
static var poolingLayer: [Link] = {
guard let poolingLayer = [Link](
type: .max(xDilationStride: 1, yDilationStride: 1),
input: batchNormOutput,
output: poolingOutput,
bias: nil,
activation: .identity,
kernelSize: (2, 2),
stride: (2, 2),
padding: .zero,
filterParameters: filterParameters) else {
fatalError("Unable to create `poolingLayer`.")
}
return poolingLayer
}()
Create the fully connected layer
Fully connected layers compute the matrix-vector product of a weights matrix and
its input, and flatten the data to predict the correct label.
The app initializes the fully connected weights array with random values.
static let fullyConnectedWeights: BNNSNDArrayDescriptor = {
guard let desc = [Link](
randomUniformUsing: randomGenerator,
range: Float(-0.5)...Float(0.5),
shape: .matrixRowMajor(poolingOutputSize,
fullyConnectedOutputWidth)) else {
fatalError("Unable to create `fullyConnectedWeightsArray`.")
}
return desc
}()
The code below creates the fully connected layer:
static var fullyConnectedLayer: [Link] = {
let desc = BNNSNDArrayDescriptor(dataType: .float,
shape: .vector(poolingOutputSize))
guard let fullyConnectedLayer = [Link](
input: desc,
output: fullyConnectedOutput,
weights: fullyConnectedWeights,
bias: nil,
activation: .identity,
filterParameters: filterParameters) else {
fatalError("Unable to create `fullyConnectedLayer`.")
}
return fullyConnectedLayer
}()
Create the loss layer
The loss layer is responsible for quantifying a score that indicates how the
predicted values deviate from the labels.
The code below creates the loss layer:
static var lossLayer: [Link] = {
guard let lossLayer = [Link](input: fullyConnectedOutput,
output: lossOutput,
lossFunction: .softmaxCrossEntropy(labelSmoothing: 0),
lossReduction: .reductionMean,
filterParameters: filterParameters) else {
fatalError("Unable to create `lossLayer`.")
}
return lossLayer
}()
Create the candidate input
For each iteration of the training phase, the sample creates a matrix that represents
a random digit, and a one-hot encoded tensor of the same digit. The sample places
digits randomly in a 20 x 20 matrix, so a 3 might appear in the matrix as the image
below. This example renders 0 as ⚪ , and 1 as ⚫ .
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚫ ⚫ ⚫ ⚫ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚫ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚫ ⚫ ⚫ ⚫ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚫ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚫ ⚫ ⚫ ⚫ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪ ⚪
The one-hot encoded tensor contains a 1 at the zero-based index of 3.
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
The sample code project uses a batch size of 32, so each iteration generates 32
random digits in random positions in the 20 x 20 grid.
Perform the forward pass
To perform the forward pass, the sample code calls apply on the fused, pooling, and
fully connected layers.
try [Link](batchSize: batchSize,
input: input,
output: batchNormOutput,
for: .training)
try [Link](batchSize: batchSize,
input: batchNormOutput,
output: poolingOutput)
try [Link](batchSize: batchSize,
input: poolingOutput,
output: fullyConnectedOutput)
Calculate the loss and loss gradient
Calculating the loss evaluates the efficacy of the neural network. The loss layer
generates its output, lossOutput, which contains a score that indicates how the
predicted values deviate from the labels, and lossInputGradient, which is the output
gradient parameter to the backward application of the fully connected layer.
try [Link](batchSize: batchSize,
input: fullyConnectedOutput,
labels: oneHotLabels,
output: lossOutput,
generatingInputGradient: lossInputGradient)
Create the optimizer
The optimizer is responsible for updating the weights, biases, beta, and gamma. In
the code below, the sample code project creates an optimizer using the Adam
algorithm:
static var adam = [Link](learningRate: 0.01,
timeStep: 1,
gradientScale: 1,
regularizationScale: 0.01,
gradientClipping: .byValue(bounds: -0.5 ... 0.5),
regularizationFunction: BNNSOptimizerRegularizationL2)
Perform a backward pass and optimization step on the fully connected layer
The sample code project performs the backward pass in reverse order to the
forward pass. Therefore, the sample’s first step is to call applyBackward on the fully
connected layer, and perform an optimization step on its weights.
The applyBackward call on the fully connected layer generates an input gradient
that acts as the output gradient for the pooling layer’s backward apply, and a
weights gradient that passes to the fully connected optimizer step.
try [Link](
batchSize: batchSize,
input: poolingOutput,
output: fullyConnectedOutput,
outputGradient: lossInputGradient,
generatingInputGradient: fullyConnectedInputGradient,
generatingWeightsGradient: fullyConnectedWeightGradient)
Perform a backward pass on the pooling layer
The backward pass on the pooling layer generates an input gradient that’s the
output gradient to the backward apply of the fused layer.
try [Link](
batchSize: batchSize,
input: batchNormOutput,
output: poolingOutput,
outputGradient: fullyConnectedInputGradient,
generatingInputGradient: poolingInputGradientArray)
Perform a backward pass and optimization step on the fused layer
The sample calls applyBackward on the fused layer. This performs an optimization
step on the convolution layer’s weights and bias, and the normalization layer’s beta
and gamma.
let gradientParameters = [convolutionWeightGradient,
convolutionBiasGradient,
batchNormBetaGradient,
batchNormGammaGradient]
try [Link](
batchSize: batchSize,
input: input,
output: batchNormOutput,
outputGradient: poolingInputGradientArray,
generatingInputGradient: convolutionInputGradient,
generatingParameterGradients: gradientParameters)
The code below performs the optimization step:
try [Link](
parameters: [fullyConnectedWeights,
convolutionWeights, convolutionBias,
batchNormBeta, batchNormGamma],
gradients: [fullyConnectedWeightGradient,
convolutionWeightGradient, convolutionBiasGradient,
batchNormBetaGradient, batchNormGammaGradient],
accumulators: [fullyConnectedWeightAccumulator1,
convolutionWeightAccumulator1, convolutionBiasAccumulator1,
batchNormBetaAccumulator1, batchNormGammaAccumulator1,
fullyConnectedWeightAccumulator2,
convolutionWeightAccumulator2, convolutionBiasAccumulator2,
batchNormBetaAccumulator2, batchNormGammaAccumulator2],
filterParameters: filterParameters)
After the app completes all the optimization steps for this iteration, it increments
the optimizer time step.
[Link] += 1
Evaluate the neural network
The sample iterates over the forward, loss, backward, and optimization steps, and
with each iteration, the trend of the loss is to reduce. The following graph shows the
loss, as a solid stroke, decreasing during training:
A graph with loss as the y-axis and training epochs as the x-axis that shows loss
decreasing during training.
The code in the sample defines a maximum number of iterations. Additionally, it
calculates a moving average of recent loss values, which appear as a dashed stroke
in the graph above. At each iteration, the sample checks whether the recent average
loss is below that threshold, and, if it is, it breaks from the training phase early.
let maximumIterationCount = 1000
// The `recentLosses` array contains the last `recentLossesCount` losses.
let recentLossesCount = 20
var recentLosses = [Float]()
// The `averageRecentLossThreshold` constant defines the loss threshold
// at which to consider the training phase complete.
let averageRecentLossThreshold = Float(0.125)
for epoch in 0 ..< maximumIterationCount {
if epoch == 500 {
[Link] /= 10
}
generateInputAndLabels()
forwardPass()
computeLoss()
guard let loss = [Link](of: [Link],
batchSize: 1)?.first else {
print("Unable to calculate loss.")
return
}
if [Link] {
recentLosses = [Float](repeating: loss,
count: recentLossesCount)
}
recentLosses[epoch % recentLossesCount] = loss
let averageRecentLoss = [Link](recentLosses)
if epoch % 10 == 0 {
print("Epoch \(epoch): \(loss) : \(averageRecentLoss)")
}
if averageRecentLoss < averageRecentLossThreshold {
print("Recent average loss: \(averageRecentLoss), breaking at epoch \
(epoch).")
break
}
backwardPass()
}
After the training phase completes, the sample calculates the accuracy of the
network over a new dataset. It then creates a new batch of random digits and runs a
forward pass of the network.
try [Link](batchSize: batchSize,
input: input,
output: batchNormOutput,
for: .inference)
try [Link](batchSize: batchSize,
input: batchNormOutput,
output: poolingOutput)
try [Link](batchSize: batchSize,
input: poolingOutput,
output: fullyConnectedOutput)
Finally, the app evaluates the accuracy of the network by comparing the values in
the fully connected layer’s output to the one-hot labels. For example, when the
recognized digit is 3, one-hot labels contain the values [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
and values in the fully connected layer’s output might be as follows:
[-2.51, -3.62, -0.10, 8.52, -0.42, 5.11, -1.65, 1.34, 0.82, -2.77]
[-3.94, -2.74, -0.30, 8.39, -1.45, 6.02, -0.66, 3.25, 0.49, -3.19]
[-2.51, -2.77, -0.77, 8.41, -0.82, 4.87, -0.37, 2.32, -0.49, -3.05]
[-3.01, -2.79, 0.48, 7.95, -2.57, 4.55, -1.05, 1.67, 1.38, -1.43]
[-2.48, -1.59, -0.97, 7.59, -2.52, 4.00, 0.95, 4.02, -2.10, -1.62]
Note that in each case, the highest value in the fully connected layer’s output is at
index 3.
The following code performs that evaluation for each digit in the batch:
guard
let fullyConnected = [Link](
of: [Link],
batchSize: batchSize),
let labels = [Link](
of: [Link],
batchSize: batchSize) else {
fatalError("Unable to create arrays for evaluation.")
}
var correctCount = 0
for sample in 0 ..< batchSize {
let offset = fullyConnectedOutputWidth * sample
let fullyConnectedBatch = fullyConnected[offset ..< offset +
fullyConnectedOutputWidth]
let predictedDigit = [Link](fullyConnectedBatch).0
let oneHotLabelsBatch = labels[offset ..< offset + fullyConnectedOutputWidth]
let label = [Link](oneHotLabelsBatch).0
if label == predictedDigit {
correctCount += 1
}
print("Sample \(sample) — digit: \(label) | prediction: \(predictedDigit)")
}
The evaluation function prints out something like the following:
Sample 0 — digit: 7 | prediction: 7
Sample 1 — digit: 5 | prediction: 5
Sample 2 — digit: 7 | prediction: 7
Sample 3 — digit: 7 | prediction: 7
Sample 4 — digit: 0 | prediction: 0
Sample 5 — digit: 8 | prediction: 8
Sample 6 — digit: 3 | prediction: 3
Sample 7 — digit: 6 | prediction: 6
Sample 8 — digit: 2 | prediction: 2
Sample 9 — digit: 7 | prediction: 7
[ ... ]
In this case, the neural network accurately
# Or find the Stripe package on [Link]
[Link]
Python
var kLSServerCommunicationErr: OSStatus { get }
var kLSAppInTrashErr: OSStatus { get }
# Find the version you want to pin:
# [Link]
# Specify that version in your [Link] file
stripe>=5.0.0
func LSCanURLAcceptURL(
_ inItemURL: CFURL,
_ inTargetURL: CFURL,
_ inRoleMask: LSRolesMask,
_ inFlags: LSAcceptanceFlags,
_ outAcceptsItem: UnsafeMutablePointer<DarwinBoolean>
) -> OSStatus
{
"id"
:
"evt_1Ox4QrGF83d3fsgW1Y8BYaCl"
,
"object"
:
"event"
,
"api_version"
:
"2023-10-16"
,
"created"
:
1711099837
,
"data": {
"object": {… 11 items},
},
"livemode"
:
true
,
"pending_webhooks"
:
6
,
"request": {
"id"
:
null
,
"idempotency_key"
:
null
,
},
"type"
:
"[Link]"
,
}
Shell
Read only
owner_id": "usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "public_key": "ssh-rsa
AAAAB3NzaC1yc2EAAAADAQABAAABAQDmGS49FkSODAcKhn3+/
47DW2zEn19BZvzRQ8RZjL3v6hCIX2qXfsFK35EGxNI0wV23H4xXC2gVRPHKU71Yn
Cb50tad3yMBTM6+2yfGsEDasEH/anmBLclChKvuGiT547RskZlpbAbdq3GvbzmY+R/
2EBRMOiObpc8XmSzKAd05j28kqN0+rZO65SWId0MXdvJdSCSAnuRqBNd/
aXKlu8hBPDcgwbT2lMkuR+ApoBS2FLRBOiQyt2Ol0T7Uuf7lTLlazpGB3uTw5zFYUN
XkuuI6cAP8QYuY1Bne/hNrG8t3Aw9a1yc2C4Fz1hJ/
4OMRxTQ8SUQf+Rmxs8DryMlMFJ8r device132@[Link]", "uri": "https://
[Link]/ssh_credentials/sshcr_2cSjyydertQFE3OLV3NIEJIp6Kh" } Fields
Name Type { "acl": ["bind:[Link]",
"bind:[Link]"], "created_at": "2024-02-16T[Link]Z",
"description": "for device #132", "id": "sshcr_2cSjyydertQFE3OLV3NIEJIp6Kh",
"owner_id": "usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "public_key": "ssh-rsa
AAAAB3NzaC1yc2EAAAADAQABAAABAQDmGS49FkSODAcKhn3+/
47DW2zEn19BZvzRQ8RZjL3v6hCIX2qXfsFK35EGxNI0wV23H4xXC2gVRPHKU71Yn
Cb50tad3yMBTM6+2yfGsEDasEH/anmBLclChKvuGiT547RskZlpbAbdq3GvbzmY+R/
2EBRMOiObpc8XmSzKAd05j28kqN0+rZO65SWId0MXdvJdSCSAnuRqBNd/
aXKlu8hBPDcgwbT2lMkuR+ApoBS2FLRBOiQyt2Ol0T7Uuf7lTLlazpGB3uTw5zFYUN
XkuuI6cAP8QYuY1Bne/hNrG8t3Aw9a1yc2C4Fz1hJ/
4OMRxTQ8SUQf+Rmxs8DryMlMFJ8r device132@[Link]", "uri": "https://
[Link]/ssh_credentials/sshcr_2cSjyydertQFE3OLV3NIEJIp6Kh" } curl \ -X
POST \ -H "Authorization: Bearer {ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H
"Content-Type: application/json" \ -H "Ngrok-Version: 2" \ -d '{"acl":
["bind:[Link]","bind:[Link]"],"description":"for
device #132","public_key":"ssh-rsa
AAAAB3NzaC1yc2EAAAADAQABAAABAQDmGS49FkSODAcKhn3+/
47DW2zEn19BZvzRQ8RZjL3v6hCIX2qXfsFK35EGxNI0wV23H4xXC2gVRPHKU71Yn
Cb50tad3yMBTM6+2yfGsEDasEH/anmBLclChKvuGiT547RskZlpbAbdq3GvbzmY+R/
2EBRMOiObpc8XmSzKAd05j28kqN0+rZO65SWId0MXdvJdSCSAnuRqBNd/
aXKlu8hBPDcgwbT2lMkuR+ApoBS2FLRBOiQyt2Ol0T7Uuf7lTLlazpGB3uTw5zFYUN
XkuuI6cAP8QYuY1Bne/hNrG8t3Aw9a1yc2C4Fz1hJ/
4OMRxTQ8SUQf+Rmxs8DryMlMFJ8r device132@[Link]"}' \ https://
[Link]/ssh_credentials curl \ -X GET \ -H "Authorization: Bearer
{ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H "Ngrok-Version: 2" \ https://
[Link]/credentials { "acl": [], "created_at": "2024-02-16T[Link]Z",
"description": "device alpha-2", "id": "cr_2cSjwF80LQJdOIaFZDbLWO9pMxd",
"metadata": "{\"device_id\": \"d5111ba7-0cc5-4ba3-8398-e6c79e4e89c2\"}",
"owner_id": "usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "token": null, "uri": "https://
[Link]/credentials/cr_2cSjwF80LQJdOIaFZDbLWO9pMxd" } curl \ -X PATCH
\ -H "Authorization: Bearer {ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H "Content-
Type: application/json" \ -H "Ngrok-Version: 2" \ -d '{"description":"device
alpha-2","metadata":"{\"device_id\": \"d5111ba7-0cc5-4ba3-8398-
e6c79e4e89c2\"}"}' \ [Link]
cr_2cSjwF80LQJdOIaFZDbLWO9pMxd { "credentials": [ { "acl": [], "created_at":
"2024-02-16T[Link]Z", "description": "credential for \"api-examples-
c954256d6ada8b72@[Link]\"", "id": "cr_2cSjwL9yhXwTeYnIOSxZlvZ8S8y",
"owner_id": "usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "token": null, "uri": "https://
[Link]/credentials/cr_2cSjwL9yhXwTeYnIOSxZlvZ8S8y" }, { "acl":
["bind:[Link]", "bind:[Link]"], "created_at":
"2024-02-16T[Link]Z", "description": "for device #132", "id":
"cr_2cSjwHg6uyMl2h31jEgXZOHI77u", "owner_id":
"usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "token": null, "uri": "https://
[Link]/credentials/cr_2cSjwHg6uyMl2h31jEgXZOHI77u" }, { "acl": [],
"created_at": "2024-02-16T[Link]Z", "description": "development cred for
alan@[Link]", "id": "cr_2cSjwF80LQJdOIaFZDbLWO9pMxd", "owner_id":
"usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "token": null, "uri": "https://
[Link]/credentials/cr_2cSjwF80LQJdOIaFZDbLWO9pMxd" } ],
"next_page_uri": null, "uri": "[Link] } { "keys":
[ { "created_at": "2024-02-16T[Link]Z", "description": "api key for example
generation", "id": "ak_2cSjwEVodV0jYT8IMC2gIZFyotg", "owner_id":
"usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "token": null, "uri": "https://
[Link]/api_keys/ak_2cSjwEVodV0jYT8IMC2gIZFyotg" }, { "created_at":
"2024-02-16T[Link]Z", "description": "ad-hoc dev testing", "id":
"ak_2cSjwFetnZnRcG0B7U5fEYBX2V3", "metadata": "{\"environment\":\"dev\"}",
"owner_id": "usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "token": null, "uri": "https://
[Link]/api_keys/ak_2cSjwFetnZnRcG0B7U5fEYBX2V3" } ],
"next_page_uri": null, "uri": "[Link] } curl \ -X GET \ -H
"Authorization: Bearer {ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H "Ngrok-
Version: 2" \ [Link] curl \ -X PATCH \ -H "Authorization:
Bearer {ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H "Content-Type: application/
json" \ -H "Ngrok-Version: 2" \ -d '{"metadata":"{\"environment\":\"dev\",
\"owner_id\": 123}"}' \ [Link]
ak_2cSjwFetnZnRcG0B7U5fEYBX2V3 { "created_at": "2024-02-16T[Link]Z",
"description": "ad-hoc dev testing", "id": "ak_2cSjwFetnZnRcG0B7U5fEYBX2V3",
"metadata": "{\"environment\":\"dev\", \"owner_id\": 123}", "owner_id":
"usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "token": null, "uri": "https://
[Link]/api_keys/ak_2cSjwFetnZnRcG0B7U5fEYBX2V3" } curl \ -X POST \ -H
"Authorization: Bearer {ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H "Content-Type:
application/json" \ -H "Ngrok-Version: 2" \ -d '{"acl":
["bind:[Link]","bind:[Link]"],"description":"for
device #132","public_key":"ssh-rsa
AAAAB3NzaC1yc2EAAAADAQABAAABAQDmGS49FkSODAcKhn3+/
47DW2zEn19BZvzRQ8RZjL3v6hCIX2qXfsFK35EGxNI0wV23H4xXC2gVRPHKU71Yn
Cb50tad3yMBTM6+2yfGsEDasEH/anmBLclChKvuGiT547RskZlpbAbdq3GvbzmY+R/
2EBRMOiObpc8XmSzKAd05j28kqN0+rZO65SWId0MXdvJdSCSAnuRqBNd/
aXKlu8hBPDcgwbT2lMkuR+ApoBS2FLRBOiQyt2Ol0T7Uuf7lTLlazpGB3uTw5zFYUN
XkuuI6cAP8QYuY1Bne/hNrG8t3Aw9a1yc2C4Fz1hJ/
4OMRxTQ8SUQf+Rmxs8DryMlMFJ8r device132@[Link]"}' \ https://
[Link]/ssh_credentials { "acl": ["bind:[Link]",
"bind:[Link]"], "created_at": "2024-02-16T[Link]Z",
"description": "for device #132", "id": "sshcr_2cSjyydertQFE3OLV3NIEJIp6Kh",
"owner_id": "usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "public_key": "ssh-rsa
AAAAB3NzaC1yc2EAAAADAQABAAABAQDmGS49FkSODAcKhn3+/
47DW2zEn19BZvzRQ8RZjL3v6hCIX2qXfsFK35EGxNI0wV23H4xXC2gVRPHKU71Yn
Cb50tad3yMBTM6+2yfGsEDasEH/anmBLclChKvuGiT547RskZlpbAbdq3GvbzmY+R/
2EBRMOiObpc8XmSzKAd05j28kqN0+rZO65SWId0MXdvJdSCSAnuRqBNd/
aXKlu8hBPDcgwbT2lMkuR+ApoBS2FLRBOiQyt2Ol0T7Uuf7lTLlazpGB3uTw5zFYUN
XkuuI6cAP8QYuY1Bne/hNrG8t3Aw9a1yc2C4Fz1hJ/
4OMRxTQ8SUQf+Rmxs8DryMlMFJ8r device132@[Link]", "uri": "https://
[Link]/ssh_credentials/sshcr_2cSjyydertQFE3OLV3NIEJIp6Kh" } curl \ -X
GET \ -H "Authorization: Bearer {ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H
"Ngrok-Version: 2" \ [Link]
sshcr_2cSjyydertQFE3OLV3NIEJIp6Kh { "acl": ["bind:[Link]",
"bind:[Link]"], "created_at": "2024-02-16T[Link]Z",
"description": "my dev machine", "id": "sshcr_2cSjyydertQFE3OLV3NIEJIp6Kh",
"metadata": "{\"hostname\": \"[Link]\"}", "owner_id":
"usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "public_key": "ssh-rsa
AAAAB3NzaC1yc2EAAAADAQABAAABAQDmGS49FkSODAcKhn3+/
47DW2zEn19BZvzRQ8RZjL3v6hCIX2qXfsFK35EGxNI0wV23H4xXC2gVRPHKU71Yn
Cb50tad3yMBTM6+2yfGsEDasEH/anmBLclChKvuGiT547RskZlpbAbdq3GvbzmY+R/
2EBRMOiObpc8XmSzKAd05j28kqN0+rZO65SWId0MXdvJdSCSAnuRqBNd/
aXKlu8hBPDcgwbT2lMkuR+ApoBS2FLRBOiQyt2Ol0T7Uuf7lTLlazpGB3uTw5zFYUN
XkuuI6cAP8QYuY1Bne/hNrG8t3Aw9a1yc2C4Fz1hJ/
4OMRxTQ8SUQf+Rmxs8DryMlMFJ8r device132@[Link]", "uri": "https://
[Link]/ssh_credentials/sshcr_2cSjyydertQFE3OLV3NIEJIp6Kh" } curl \ -X
GET \ -H "Authorization: Bearer {ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H
"Ngrok-Version: 2" \ [Link] { "next_page_uri": null,
"ssh_credentials": [ { "acl": ["bind:[Link]",
"bind:[Link]"], "created_at": "2024-02-16T[Link]Z",
"description": "for device #132", "id": "sshcr_2cSjyydertQFE3OLV3NIEJIp6Kh",
"owner_id": "usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "public_key": "ssh-rsa
AAAAB3NzaC1yc2EAAAADAQABAAABAQDmGS49FkSODAcKhn3+/
47DW2zEn19BZvzRQ8RZjL3v6hCIX2qXfsFK35EGxNI0wV23H4xXC2gVRPHKU71Yn
Cb50tad3yMBTM6+2yfGsEDasEH/anmBLclChKvuGiT547RskZlpbAbdq3GvbzmY+R/
2EBRMOiObpc8XmSzKAd05j28kqN0+rZO65SWId0MXdvJdSCSAnuRqBNd/
aXKlu8hBPDcgwbT2lMkuR+ApoBS2FLRBOiQyt2Ol0T7Uuf7lTLlazpGB3uTw5zFYUN
XkuuI6cAP8QYuY1Bne/hNrG8t3Aw9a1yc2C4Fz1hJ/
4OMRxTQ8SUQf+Rmxs8DryMlMFJ8r device132@[Link]", "uri": "https://
[Link]/ssh_credentials/sshcr_2cSjyydertQFE3OLV3NIEJIp6Kh" } ], "uri":
"[Link] } curl \ -X PATCH \ -H "Authorization:
Bearer {ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H "Content-Type: application/
json" \ -H "Ngrok-Version: 2" \ -d '{"description":"my dev
machine","metadata":"{\"hostname\": \"[Link]\"}"}' \ https://
[Link]/ssh_credentials/sshcr_2cSjyydertQFE3OLV3NIEJIp6Kh { "acl":
["bind:[Link]", "bind:[Link]"], "created_at":
"2024-02-16T[Link]Z", "description": "my dev machine", "id":
"sshcr_2cSjyydertQFE3OLV3NIEJIp6Kh", "metadata": "{\"hostname\":
\"[Link]\"}", "owner_id": "usr_2cSjwF6w6AynjfPtm4Ww5xTdkId",
"public_key": "ssh-rsa
AAAAB3NzaC1yc2EAAAADAQABAAABAQDmGS49FkSODAcKhn3+/
47DW2zEn19BZvzRQ8RZjL3v6hCIX2qXfsFK35EGxNI0wV23H4xXC2gVRPHKU71Yn
Cb50tad3yMBTM6+2yfGsEDasEH/anmBLclChKvuGiT547RskZlpbAbdq3GvbzmY+R/
2EBRMOiObpc8XmSzKAd05j28kqN0+rZO65SWId0MXdvJdSCSAnuRqBNd/
aXKlu8hBPDcgwbT2lMkuR+ApoBS2FLRBOiQyt2Ol0T7Uuf7lTLlazpGB3uTw5zFYUN
XkuuI6cAP8QYuY1Bne/hNrG8t3Aw9a1yc2C4Fz1hJ/
4OMRxTQ8SUQf+Rmxs8DryMlMFJ8r device132@[Link]", "uri": "https://
[Link]/ssh_credentials/sshcr_2cSjyydertQFE3OLV3NIEJIp6Kh" } curl \ -X
POST \ -H "Authorization: Bearer {ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H
"Content-Type: application/json" \ -H "Ngrok-Version: 2" \ -d
'{"description":"development cred for alan@[Link]"}' \ https://
[Link]/credentials { "acl": [], "created_at": "2024-02-16T[Link]Z",
"description": "development cred for alan@[Link]", "id":
"cr_2cSjwF80LQJdOIaFZDbLWO9pMxd", "owner_id":
"usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "token":
"2cSjwF80LQJdOIaFZDbLWO9pMxd_3qYXgk4mMNpksRrJdRt6Z", "uri": "https://
[Link]/credentials/cr_2cSjwF80LQJdOIaFZDbLWO9pMxd" } curl \ -X GET \
-H "Authorization: Bearer {ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H "Ngrok-
Version: 2" \ [Link]
cr_2cSjwF80LQJdOIaFZDbLWO9pMxd { "acl": [], "created_at":
"2024-02-16T[Link]Z", "description": "device alpha-2", "id":
"cr_2cSjwF80LQJdOIaFZDbLWO9pMxd", "metadata": "{\"device_id\":
\"d5111ba7-0cc5-4ba3-8398-e6c79e4e89c2\"}", "owner_id":
"usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "token": null, "uri": "https://
[Link]/credentials/cr_2cSjwF80LQJdOIaFZDbLWO9pMxd" } curl \ -X GET \
-H "Authorization: Bearer {ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H "Ngrok-
Version: 2" \ [Link] { "credentials": [ { "acl": [],
"created_at": "2024-02-16T[Link]Z", "description": "credential for \"api-
examples-c954256d6ada8b72@[Link]\"", "id":
"cr_2cSjwL9yhXwTeYnIOSxZlvZ8S8y", "owner_id":
"usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "token": null, "uri": "https://
[Link]/credentials/cr_2cSjwL9yhXwTeYnIOSxZlvZ8S8y" }, { "acl":
["bind:[Link]", "bind:[Link]"], "created_at":
"2024-02-16T[Link]Z", "description": "for device #132", "id":
"cr_2cSjwHg6uyMl2h31jEgXZOHI77u", "owner_id":
"usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "token": null, "uri": "https://
[Link]/credentials/cr_2cSjwHg6uyMl2h31jEgXZOHI77u" }, { "acl": [],
"created_at": "2024-02-16T[Link]Z", "description": "development cred for
alan@[Link]", "id": "cr_2cSjwF80LQJdOIaFZDbLWO9pMxd", "owner_id":
"usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "token": null, "uri": "https://
[Link]/credentials/cr_2cSjwF80LQJdOIaFZDbLWO9pMxd" } ],
"next_page_uri": null, "uri": "[Link] } curl \ -X PATCH \
-H "Authorization: Bearer {ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H "Content-
Type: application/json" \ -H "Ngrok-Version: 2" \ -d '{"description":"device
alpha-2","metadata":"{\"device_id\": \"d5111ba7-0cc5-4ba3-8398-
e6c79e4e89c2\"}"}' \ [Link]
cr_2cSjwF80LQJdOIaFZDbLWO9pMxd { "acl": [], "created_at":
"2024-02-16T[Link]Z", "description": "device alpha-2", "id":
"cr_2cSjwF80LQJdOIaFZDbLWO9pMxd", "metadata": "{\"device_id\":
\"d5111ba7-0cc5-4ba3-8398-e6c79e4e89c2\"}", "owner_id":
"usr_2cSjwF6w6AynjfPtm4Ww5xTdkId", "token": null, "uri": "https://
[Link]/credentials/cr_2cSjwF80LQJdOIaFZDbLWO9pMxd" } ngrok tcp 22
ngrok tcp 3389 --remote-addr [Link] ngrok tcp [Link]:5432
ngrok tcp 22 --proxy-proto=2 curl \ -X GET \ -H "Authorization: Bearer
{ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H "Ngrok-Version: 2" \ https://
[Link]/endpoints { "endpoints": [ { "created_at": "2024-02-16T[Link]Z",
"hostport": "[Link]", "id":
"ep_2cSjylRoERIy4iWK6WvyM8iGBJI", "proto": "https", "public_url": "https://
[Link]", "tunnel": { "id": "tn_2cSjylRoERIy4iWK6WvyM8iGBJI",
"uri": "[Link] },
"type": "ephemeral", "updated_at": "2024-02-16T[Link]Z" }, { "created_at":
"2024-02-16T[Link]Z", "domain": { "id":
"rd_2cSjyFcMgouXhorVWV04TEc6ymx", "uri": "[Link]
reserved_domains/rd_2cSjyFcMgouXhorVWV04TEc6ymx" }, "edge": { "id":
"edgtls_2cSjyGeNROACDCqWqgVKmVny8Qj", "uri": "[Link]
tls/edgtls_2cSjyGeNROACDCqWqgVKmVny8Qj" }, "hostport": "endpoint-
[Link]", "id": "ep_2cSjyTNy61ZcuweildgxZyhgk9F", "proto": "tls",
"public_url": "tls://[Link]", "type": "edge", "updated_at":
"2024-02-16T[Link]Z" } ], "next_page_uri": null, "uri": "[Link]
endpoints" } curl \ -X GET \ -H "Authorization: Bearer
{ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H "Ngrok-Version: 2" \ https://
[Link]/endpoints/ep_2cSjylRoERIy4iWK6WvyM8iGBJI { "created_at":
"2024-02-16T[Link]Z", "hostport": "[Link]", "id":
"ep_2cSjylRoERIy4iWK6WvyM8iGBJI", "proto": "https", "public_url": "https://
[Link]", "tunnel": { "id": "tn_2cSjylRoERIy4iWK6WvyM8iGBJI",
"uri": "[Link] },
"type": "ephemeral", "updated_at": "2024-02-16T[Link]Z" } helm repo add
ngrok [Link] export
NAMESPACE=[keith Bieszczat] export NGROK_AUTHTOKEN=[AUTHTOKEN] export
NGROK_API_KEY=[ak_2cSjwFetnZnRcG0B7U5fEYBX2V3] helm install ngrok-
ingress-controller ngrok/kubernetes-ingress-controller \ --namespace
$NAMESPACE \ --create-namespace \ --set [Link]=$NGROK_API_KEY \
--set [Link]=$NGROK_AUTHTOKEN kubectl apply -n ngrok-
ingress-controller \ -f [Link]
ingress-controller/main/[Link] curl -s [Link]
[Link]/[Link] | \ sudo tee /etc/apt/[Link].d/[Link] >/
dev/null && \ echo "deb [Link] buster main" | \
sudo tee /etc/apt/[Link].d/[Link] && \ sudo apt update && sudo apt install
ngrok ngrok config add-authtoken <
2coSXkUPJHKNejdfgFKZaF0CjKd_6NvztF58L172P1MHzwAHF > ngrok http http://
localhost:8080 ngrok (Ctrl+C to quit) Session Status online Account
inconshreveable (Plan: Free) Version 3.0.0 Region United States (us) Latency 78ms
Web Interface [Link] Forwarding [Link]
-> [Link] Connections ttl opn rt1 rt5 p50 p90 0 0 0.00 0.00 0.00
0.00 ngrok http 8080 --domain [Link] ngrok http
[Link] --oauth=google --oauth-allow-email=alan@[Link]
ngrok http [Link] --basic-auth 'username:a-very-secure-password'
curl [Link] -H "authorization: Bearer {API Key}" -H "ngrok-version:
2" ngrok config add-api-key "{ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" ngrok api
endpoints list # Configure the ngrok provider provider "ngrok" { api_key =
"{ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" } # Provision an ngrok domain resource
"ngrok_reserved_domain" "my_domain" { name = "[Link]"
region = "us" certificate_management_policy { authority = "letsencrypt"
private_key_type = "ecdsa" } } { "error_code": "ERR_NGROK_218", "status_code":
400, "msg": "Your request has not specified an API version. Please include the
version you wish to use in the Ngrok-Version header. Supported versions: 2.",
"details": { "operation_id": "op_2RfSBcv0nsy71XCHWOegjx1OoKh" } }
(cr_2coSXkUPJHKNejdfgFKZaF0CjKd ) tunnel credential ngrok ok paid
(usr_2coSXng7X2Ax9cfBzu5E26b6SDR ) user id Keith Bieszczat Ngrok
god964v@[Link] (2coazssVIEva7q1Fw2nwytLIq2N_61a9jMzvJb96ByjXJAH6H )
Bot user Auth token (cr_2coazssVIEva7q1Fw2nwytLIq2N ) tunnel credential bot user
ascended master (bot_2coayXt1oJzWKxgTYyanAjQ3KPV ) bot user ascended
master id {ak_2cSjwFetnZnRcG0B7U5fEYBX2V3} API key god964v@[Link]
Keith Bieszczat NGROK
2coSXkUPJHKNejdfgFKZaF0CjKd_6NvztF58L172P1MHzwAHF ngrok paid auth.
token $ ngrok config add-authtoken
2coSXkUPJHKNejdfgFKZaF0CjKd_6NvztF58L172P1MHzwAHF file. # in [Link]
authtoken: 2coSXkUPJHKNejdfgFKZaF0CjKd_6NvztF58L172P1MHzwAHF
2coSXkUPJHKNejdfgFKZaF0CjKd_6NvztF58L172P1MHzwAHF ngrok paid auth.
token $ ngrok config add-authtoken
2coSXkUPJHKNejdfgFKZaF0CjKd_6NvztF58L172P1MHzwAHF file. # in [Link]
authtoken: 2coSXkUPJHKNejdfgFKZaF0CjKd_6NvztF58L172P1MHzwAHF ngrok tcp
22 --proxy-proto=2 ngrok tcp [Link]:5432 ngrok tcp 22 ngrok tcp 3389 --
remote-addr [Link] ngrok tcp [Link]:5432 curl \ -X GET \ -H
"Authorization: Bearer {ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H "Ngrok-
Version: 2" \ [Link] { "endpoints": [ { "created_at":
"2024-02-16T[Link]Z", "hostport": "[Link]", "id":
"ep_2cSjylRoERIy4iWK6WvyM8iGBJI", "proto": "https", "public_url": "https://
[Link]", "tunnel": { "id": "tn_2cSjylRoERIy4iWK6WvyM8iGBJI",
"uri": "[Link] },
"type": "ephemeral", "updated_at": "2024-02-16T[Link]Z" }, { "created_at":
"2024-02-16T[Link]Z", "domain": { "id":
"rd_2cSjyFcMgouXhorVWV04TEc6ymx", "uri": "[Link]
reserved_domains/rd_2cSjyFcMgouXhorVWV04TEc6ymx" }, "edge": { "id":
"edgtls_2cSjyGeNROACDCqWqgVKmVny8Qj", "uri": "[Link]
tls/edgtls_2cSjyGeNROACDCqWqgVKmVny8Qj" }, "hostport": "endpoint-
[Link]", "id": "ep_2cSjyTNy61ZcuweildgxZyhgk9F", "proto": "tls",
"public_url": "tls://[Link]", "type": "edge", "updated_at":
"2024-02-16T[Link]Z" } ], "next_page_uri": null, "uri": "[Link]
endpoints" } curl \ -X GET \ -H "Authorization: Bearer
{ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" \ -H "Ngrok-Version: 2" \ https://
[Link]/endpoints/ep_2cSjylRoERIy4iWK6WvyM8iGBJI { "created_at":
"2024-02-16T[Link]Z", "hostport": "[Link]", "id":
"ep_2cSjylRoERIy4iWK6WvyM8iGBJI", "proto": "https", "public_url": "https://
[Link]", "tunnel": { "id": "tn_2cSjylRoERIy4iWK6WvyM8iGBJI",
"uri": "[Link] },
"type": "ephemeral", "updated_at": "2024-02-16T[Link]Z" } curl https://
[Link] -H "authorization: Bearer {ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}"
-H "ngrok-version: 2" ngrok config add-api-key
"{ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" ngrok api endpoints list # Configure the
ngrok provider provider "ngrok" { api_key =
"{ak_2cSjwFetnZnRcG0B7U5fEYBX2V3}" } # Provision an ngrok domain resource
"ngrok_reserved_domain" "my_domain" { name = "[Link]"
region = "us" certificate_management_policy { authority = "letsencrypt"
private_key_type = "ecdsa" } } { "error_code": "ERR_NGROK_218", "status_code":
400, "msg": "Your request has not specified an API version. Please include the
version you wish to use in the Ngrok-Version header. Supported versions: 2.",
"details": { "operation_id": "op_2RfSBcv0nsy71XCHWOegjx1OoKh" } } ngrok tcp 22
ngrok tcp 3389 --remote-addr [Link] ngrok tcp [Link]:5432
ngrok tcp 22 --proxy-proto=2 curl \ -X GET \ -H "Authorization: Bearer {API_KEY}" \
-H "Ngrok-Version: 2" \ [Link] { "endpoints":
[ { "created_at": "2024-02-16T[Link]Z", "hostport":
"[Link]", "id": "ep_2cSjylRoERIy4iWK6WvyM8iGBJI",
"proto": "https", "public_url": "[Link] "tunnel": { "id":
"tn_2cSjylRoERIy4iWK6WvyM8iGBJI", "uri": "[Link]
tn_2cSjylRoERIy4iWK6WvyM8iGBJI" }, "type": "ephemeral", "updated_at":
"2024-02-16T[Link]Z" }, { "created_at": "2024-02-16T[Link]Z", "domain":
{ "id": "rd_2cSjyFcMgouXhorVWV04TEc6ymx", "uri": "[Link]
reserved_domains/rd_2cSjyFcMgouXhorVWV04TEc6ymx" }, "edge": { "id":
"edgtls_2cSjyGeNROACDCqWqgVKmVny8Qj", "uri": "[Link]
tls/edgtls_2cSjyGeNROACDCqWqgVKmVny8Qj" }, "hostport": "endpoint-
[Link]", "id": "ep_2cSjyTNy61ZcuweildgxZyhgk9F", "proto": "tls",
"public_url": "tls://[Link]", "type": "edge", "updated_at":
"2024-02-16T[Link]Z" } ], "next_page_uri": null, "uri": "[Link]
endpoints" } curl \ -X GET \ -H "Authorization: Bearer {API_KEY}" \ -H "Ngrok-
Version: 2" \ [Link]
{ "created_at": "2024-02-16T[Link]Z", "hostport":
"[Link]", "id": "ep_2cSjylRoERIy4iWK6WvyM8iGBJI",
"proto": "https", "public_url": "[Link] "tunnel": { "id":
"tn_2cSjylRoERIy4iWK6WvyM8iGBJI", "uri": "[Link]
tn_2cSjylRoERIy4iWK6WvyM8iGBJI" }, "type": "ephemeral", "updated_at":
"2024-02-16T[Link]Z" } helm repo add ngrok [Link]
kubernetes-ingress-controller export NAMESPACE=[keith bieszczat/
god964v@[Link]] export
NGROK_AUTHTOKEN=[2coazssVIEva7q1Fw2nwytLIq2N_61a9jMzvJb96ByjXJAH6H
] export NGROK_API_KEY=[ak_2cSjwFetnZnRcG0B7U5fEYBX2V3] helm install
ngrok-ingress-controller ngrok/kubernetes-ingress-controller \ --namespace
$NAMESPACE \keith bieszczat --create-namespace \keith bieszczat --set
[Link]=$NGROK_API_KEY \ --set
[Link]=$2coazssVIEva7q1Fw2nwytLIq2N_61a9jMzvJb96ByjXJAH6
H kubectl apply -n ngrok-ingress-controller \ -f [Link]
ngrok/kubernetes-ingress-controller/main/[Link] curl -s https://
[Link]/[Link] | \ sudo tee /etc/apt/[Link].d/
[Link] >/dev/null && \ echo "deb [Link] buster
main" | \ sudo tee /etc/apt/[Link].d/[Link] && \ sudo apt update && sudo
apt install ngrok ngrok config add-authtoken
<2coazssVIEva7q1Fw2nwytLIq2N_61a9jMzvJb96ByjXJAH6H> ngrok http http://
localhost:8080 ngrok (Ctrl+C to quit) Session Status online Account
inconshreveable (Plan: Free) Version 3.0.0 Region United States (us) Latency 78ms
Web Interface [Link] Forwarding [Link]
-> [Link] Connections ttl opn rt1 rt5 p50 p90 0 0 0.00 0.00 0.00
0.00 ngrok http 8080 --domain [Link] ngrok http
[Link] --oauth=google --oauth-allow-email=alan@[Link]
ngrok http [Link] --basic-auth 'username:a-very-secure-password'
curl [Link] -H "authorization: Bearer {API Key}" -H "ngrok-version:
2" ngrok config add-api-key "{API Key}" ngrok api endpoints list # Configure the
ngrok provider provider "ngrok" { api_key = "{API_KEY}" } # Provision an ngrok
domain resource "ngrok_reserved_domain" "my_domain" { name = "my-
[Link]" region = "us" certificate_management_policy { authority =
"letsencrypt" private_key_type = "ecdsa" } } { "error_code": "ERR_NGROK_218",
"status_code": 400, "msg": "Your request has not specified an API version. Please
include the version you wish to use in the Ngrok-Version header. Supported
versions: 2.", "details": { "operation_id": "op_2RfSBcv0nsy71XCHWOegjx1OoKh" } }
2coSXkUPJHKNejdfgFKZaF0CjKd_6NvztF58L172P1MHzwAHF ngrok paid auth.
token $ ngrok config add-authtoken
2coSXkUPJHKNejdfgFKZaF0CjKd_6NvztF58L172P1MHzwAHF ngrok tcp 22 --
proxy-proto=2 ngrok tcp [Link]:5432 ngrok tcp 22 ngrok tcp 3389 --remote-
addr [Link] ngrok tcp [Link]:5432 curl \ -X GET \ -H
"Authorization: Bearer {API_KEY}" \ -H "Ngrok-Version: 2" \ [Link]
endpoints { "endpoints": [ { "created_at": "2024-02-16T[Link]Z", "hostport":
"[Link]", "id": "ep_2cSjylRoERIy4iWK6WvyM8iGBJI",
"proto": "https", "public_url": "[Link] "tunnel": { "id":
"tn_2cSjylRoERIy4iWK6WvyM8iGBJI", "uri": "[Link]
tn_2cSjylRoERIy4iWK6WvyM8iGBJI" }, "type": "ephemeral", "updated_at":
"2024-02-16T[Link]Z" }, { "created_at": "2024-02-16T[Link]Z", "domain":
{ "id": "rd_2cSjyFcMgouXhorVWV04TEc6ymx", "uri": "[Link]
reserved
[Link]('cat', 'cat') #=> true # match entire string
[Link]('cat', 'category') #=> false # only match partial string
[Link]('c{at,ub}s', 'cats') #=> false # { } isn't supported by default
[Link]('c{at,ub}s', 'cats', File::FNM_EXTGLOB) #=> true # { } is supported on
FNM_EXTGLOB
[Link]('c?t', 'cat') #=> true # '?' match only 1 character
[Link]('c??t', 'cat') #=> false # ditto
[Link]('c*', 'cats') #=> true # '*' match 0 or more characters
[Link]('c*t', 'c/a/b/t') #=> true # ditto
[Link]('ca[a-z]', 'cat') #=> true # inclusive bracket expression
[Link]('ca[^t]', 'cat') #=> false # exclusive bracket expression ('^' or '!')
[Link]('cat', 'CAT') #=> false # case sensitive
[Link]('cat', 'CAT', File::FNM_CASEFOLD) #=> true # case insensitive
[Link]('?', '/', File::FNM_PATHNAME) #=> false # wildcard doesn't match '/'
on FNM_PATHNAME
[Link]('*', '/', File::FNM_PATHNAME) #=> false # ditto
[Link]('[/]', '/', File::FNM_PATHNAME) #=> false # ditto
[Link]('\?', '?') #=> true # escaped wildcard becomes ordinary
[Link]('\a', 'a') #=> true # escaped ordinary remains ordinary
[Link]('\a', '\a', File::FNM_NOESCAPE) #=> true # FNM_NOESCAPE makes
'\' ordinary
[Link]('[\?]', '?') #=> true # can escape inside bracket expression
[Link]('*', '.profile') #=> false # wildcard doesn't match leading
[Link]('*', '.profile', File::FNM_DOTMATCH) #=> true # period by default.
[Link]('.*', '.profile') #=> true
rbfiles = '**' '/' '*.rb' # you don't have to do like this. just write in single string.
[Link](rbfiles, '[Link]') #=> false
[Link](rbfiles, './[Link]') #=> false
[Link](rbfiles, 'lib/[Link]') #=> true
[Link]('**.rb', '[Link]') #=> true
[Link]('**.rb', './[Link]') #=> false
[Link]('**.rb', 'lib/[Link]') #=> true
[Link]('*', 'dave/.profile') #=> true
pattern = '*' '/' '*'
[Link](pattern, 'dave/.profile', File::FNM_PATHNAME) #=> false
[Link](pattern, 'dave/.profile', File::FNM_PATHNAME | File::FNM_DOTMATCH)
#=> true
pattern = '**' '/' 'foo'
[Link](pattern, 'a/b/c/foo', File::FNM_PATHNAME) #=> true
[Link](pattern, '/a/b/c/foo', File::FNM_PATHNAME) #=> true
[Link](pattern, 'c:/a/b/c/foo', File::FNM_PATHNAME) #=> true
[Link](pattern, 'a/.b/c/foo', File::FNM_PATHNAME) #=> false
[Link](pattern, 'a/.b/c/foo', File::FNM_PATHNAME | File::FNM_DOTMATCH)
#=> true
[Link]('cat', 'cat') #=> true # match entire string
[Link]('cat', 'category') #=> false # only match partial string
[Link]('c{at,ub}s', 'cats') #=> false # { } isn't supported by default
[Link]('c{at,ub}s', 'cats', File::FNM_EXTGLOB) #=> true # { } is supported on
FNM_EXTGLOB
[Link]('c?t', 'cat') #=> true # '?' match only 1 character
[Link]('c??t', 'cat') #=> false # ditto
[Link]('c*', 'cats') #=> true # '*' match 0 or more characters
[Link]('c*t', 'c/a/b/t') #=> true # ditto
[Link]('ca[a-z]', 'cat') #=> true # inclusive bracket expression
[Link]('ca[^t]', 'cat') #=> false # exclusive bracket expression ('^' or '!')
[Link]('cat', 'CAT') #=> false # case sensitive
[Link]('cat', 'CAT', File::FNM_CASEFOLD) #=> true # case insensitive
[Link]('?', '/', File::FNM_PATHNAME) #=> false # wildcard doesn't match '/'
on FNM_PATHNAME
[Link]('*', '/', File::FNM_PATHNAME) #=> false # ditto
[Link]('[/]', '/', File::FNM_PATHNAME) #=> false # ditto
[Link]('\?', '?') #=> true # escaped wildcard becomes ordinary
[Link]('\a', 'a') #=> true # escaped ordinary remains ordinary
[Link]('\a', '\a', File::FNM_NOESCAPE) #=> true # FNM_NOESCAPE makes
'\' ordinary
[Link]('[\?]', '?') #=> true # can escape inside bracket expression
[Link]('*', '.profile') #=> false # wildcard doesn't match leading
[Link]('*', '.profile', File::FNM_DOTMATCH) #=> true # period by default.
[Link]('.*', '.profile') #=> true
rbfiles = '**' '/' '*.rb' # you don't have to do like this. just write in single string.
[Link](rbfiles, '[Link]') #=> false
[Link](rbfiles, './[Link]') #=> false
[Link](rbfiles, 'lib/[Link]') #=> true
[Link]('**.rb', '[Link]') #=> true
[Link]('**.rb', './[Link]') #=> false
[Link]('**.rb', 'lib/[Link]') #=> true
[Link]('*', 'dave/.profile') #=> true
pattern = '*' '/' '*'
[Link](pattern, 'dave/.profile', File::FNM_PATHNAME) #=> false
[Link](pattern, 'dave/.profile', File::FNM_PATHNAME | File::FNM_DOTMATCH)
#=> true
pattern = '**' '/' 'foo'
[Link](pattern, 'a/b/c/foo', File::FNM_PATHNAME) #=> true
[Link](pattern, '/a/b/c/foo', File::FNM_PATHNAME) #=> true
[Link](pattern, 'c:/a/b/c/foo', File::FNM_PATHNAME) #=> true
[Link](pattern, 'a/.b/c/foo', File::FNM_PATHNAME) #=> false
[Link](pattern, 'a/.b/c/foo', File::FNM_PATHNAME | File::FNM_DOTMATCH)
#=> true
ftype(file_name) → string
Identifies the type of the named file; the return string is one of “file'', “directory'',
“characterSpecial'', “blockSpecial'', “fifo'', “link'', “socket'', or “unknown''.
[Link]("testfile") #=> "file"
[Link]("/dev/tty") #=> "characterSpecial"
[Link]("/tmp/.X11-unix/X0") #=> "socket"
grpowned?(file_name) → true or false
Returns true if the named file exists and the effective group id of the calling process
is the owner of the file. Returns false on Windows.
file_name can be an IO object.
identical?(file_1, file_2) → true or false
Returns true if the named files are identical.
file_1 and file_2 can be an IO object.
open("a", "w") {}
p [Link]?("a", "a") #=> true
p [Link]?("a", "./a") #=> true
[Link]("a", "b")
p [Link]?("a", "b") #=> true
[Link]("a", "c")
p [Link]?("a", "c") #=> true
open("d", "w") {}
p [Link]?("a", "d") #=> false
join(string, ...) → string
Returns a new string formed by joining the strings using "/".
[Link]("usr", "mail", "gumby") #=> "usr/mail/gumby"
lchmod(mode_int, file_name, ...) → integer
Equivalent to File::chmod, but does not follow symbolic links (so it will change the
permissions associated with the link, not the file referenced by the link). Often not
available.
lchown(owner_int, group_int, file_name,..) → integer
Equivalent to File::chown, but does not follow symbolic links (so it will change the
owner associated with the link, not the file referenced by the link). Often not
available. Returns number of files in the argument list.
link(old_name, new_name) → 0
Creates a new name for an existing file using a hard link. Will not overwrite
new_name if it already exists (raising a subclass of SystemCallError). Not available
on all platforms.
[Link]("testfile", ".testfile") #=> 0
[Link](".testfile")[0] #=> "This is line one\n"
lstat(file_name) → stat
Same as File::stat, but does not follow the last symbolic link. Instead, reports on the
link itself.
[Link]("testfile", "link2test") #=> 0
[Link]("testfile").size #=> 66
[Link]("link2test").size #=> 8
[Link]("link2test").size #=> 66
lutime(atime, mtime, file_name,...) → integer
Sets the access and modification times of each named file to the first two
arguments. If a file is a symlink, this method acts upon the link itself as opposed to
its referent; for the inverse behavior, see [Link]. Returns the number of file
names in the argument list.
mkfifo(file_name, mode=0666) => 0
Creates a FIFO special file with name file_name. mode specifies the FIFO's
permissions. It is modified by the process's umask in the usual way: the
permissions of the created file are (mode & ~umask).
mtime(file_name) → time
Returns the modification time for the named file as a Time object.
file_name can be an IO object.
[Link]("testfile") #=> Tue Apr 08 [Link] CDT 2003
new(filename, mode="r" [, opt]) → file
new(filename [, mode [, perm]] [, opt]) → file
Opens the file named by filename according to the given mode and returns a new
File object.
See [Link] for a description of mode and opt.
If a file is being created, permission bits may be given in perm. These mode and
permission bits are platform dependent; on Unix systems, see open(2) and
chmod(2) man pages for details.
The new File object is buffered mode (or non-sync mode), unless filename is a tty.
See IO#flush, IO#fsync, IO#fdatasync, and IO#sync= about sync mode.
Examples¶ ↑
f = [Link]("testfile", "r")
f = [Link]("newfile", "w+")
f = [Link]("newfile", File::CREAT|File::TRUNC|File::RDWR, 0644)
open(filename, mode="r" [, opt]) → file
open(filename [, mode [, perm]] [, opt]) → file
open(filename, mode="r" [, opt]) {|file| block } → obj
open(filename [, mode [, perm]] [, opt]) {|file| block } → obj
With no associated block, [Link] is a synonym for [Link]. If the optional code
block is given, it will be passed the opened file as an argument and the File object
will automatically be closed when the block terminates. The value of the block will
be returned from [Link].
If a file is being created, its initial permissions may be set using the perm parameter.
See [Link] for further discussion.
See [Link] for a description of the mode and opt parameters.
owned?(file_name) → true or false
Returns true if the named file exists and the effective used id of the calling process
is the owner of the file.
file_name can be an IO object.
path(path) → string
Returns the string representation of the path
[Link]("/dev/null") #=> "/dev/null"
[Link]([Link]("/tmp")) #=> "/tmp"
pipe?(file_name) → true or false
Returns true if the named file is a pipe.
file_name can be an IO object.
readable?(file_name) → true or false
Returns true if the named file is readable by the effective user and group id of this
process. See eaccess(3).
readable_real?(file_name) → true or false
Returns true if the named file is readable by the real user and group id of this
process. See access(3).
readlink(link_name) → file_name
Returns the name of the file referenced by the given link. Not available on all
platforms.
[Link]("testfile", "link2test") #=> 0
[Link]("link2test") #=> "testfile"
realdirpath(pathname [, dir_string]) → real_pathname
Returns the real (absolute) pathname of pathname in the actual filesystem. The real
pathname doesn't contain symlinks or useless dots.
If dir_string is given, it is used as a base directory for interpreting relative pathname
instead of the current directory.
The last component of the real pathname can be nonexistent.
realpath(pathname [, dir_string]) → real_pathname
Returns the real (absolute) pathname of pathname in the actual filesystem not
containing symlinks or useless dots.
If dir_string is given, it is used as a base directory for interpreting relative pathname
instead of the current directory.
All components of the pathname must exist when this method is called.
rename(old_name, new_name) → 0
Renames the given file to the new name. Raises a SystemCallError if the file cannot
be renamed.
[Link]("afile", "[Link]") #=> 0
setgid?(file_name) → true or false
Returns true if the named file has the setgid bit set.
setuid?(file_name) → true or false
Returns true if the named file has the setuid bit set.
size(file_name) → integer
Returns the size of file_name.
file_name can be an IO object.
size?(file_name) → Integer or nil
Returns nil if file_name doesn't exist or has zero size, the size of the file otherwise.
file_name can be an IO object.
socket?(file_name) → true or false
Returns true if the named file is a socket.
file_name can be an IO object.
split(file_name) → array
Splits the given string into a directory and a file component and returns them in a
two-element array. See also File::dirname and File::basename.
[Link]("/home/gumby/.profile") #=> ["/home/gumby", ".profile"]
stat(file_name) → stat
Returns a File::Stat object for the named file (see File::Stat).
[Link]("testfile").mtime #=> Tue Apr 08 [Link] CDT 2003
sticky?(file_name) → true or false
Returns true if the named file has the sticky bit set.
symlink(old_name, new_name) → 0
Creates a symbolic link called new_name for the existing file old_name. Raises a
NotImplemented exception on platforms that do not support symbolic links.
[Link]("testfile", "link2test") #=> 0
symlink?(file_name) → true or false
Returns true if the named file is a symbolic link.
truncate(file_name, integer) → 0
Truncates the file file_name to be at most integer bytes long. Not available on all
platforms.
f = [Link]("out", "w")
[Link]("1234567890") #=> 10
[Link] #=> nil
[Link]("out", 5) #=> 0
[Link]("out") #=> 5
umask() → integer
umask(integer) → integer
Returns the current umask value for this process. If the optional argument is given,
set the umask to that value and return the previous value. Umask values are
subtracted from the default permissions, so a umask of 0222 would make a file
read-only for everyone.
[Link](0006) #=> 18
[Link] #=> 6
unlink(file_name, ...) → integer
Deletes the named files, returning the number of names passed as arguments.
Raises an exception on any error. Since the underlying implementation relies on the
unlink(2) system call, the type of exception raised depends on its error type (see
[Link]/man/2/unlink) and has the form of e.g. Errno::ENOENT.
See also Dir::rmdir.
utime(atime, mtime, file_name,...) → integer
Sets the access and modification times of each named file to the first two
arguments. If a file is a symlink, this method acts upon its referent rather than the
link itself; for the inverse behavior see [Link]. Returns the number of file names
in the argument list.
world_readable?(file_name) → integer or nil
If file_name is readable by others, returns an integer representing the file
permission bits of file_name. Returns nil otherwise. The meaning of the bits is
platform dependent; on Unix systems, see stat(2).
file_name can be an IO object.
File.world_readable?("/etc/passwd") #=> 420
m = File.world_readable?("/etc/passwd")
sprintf("%o", m) #=> "644"
world_writable?(file_name) → integer or nil
If file_name is writable by others, returns an integer representing the file permission
bits of file_name. Returns nil otherwise. The meaning of the bits is platform
dependent; on Unix systems, see stat(2).
file_name can be an IO object.
File.world_writable?("/tmp") #=> 511
m = File.world_writable?("/tmp")
sprintf("%o", m) #=> "777"
writable?(file_name) → true or false
Returns true if the named file is writable by the effective user and group id of this
process. See eaccess(3).
writable_real?(file_name) → true or false
Returns true if the named file is writable by the real user and group id of this
process. See access(3)
zero?(file_name) → true or false
Returns true if the named file exists and has a zero size.
file_name can be an IO object.
Public Instance Methods
atime → time
Returns the last access time (a Time object) for file, or epoch if file has not been
accessed.
[Link]("testfile").atime #=> Wed Dec 31 [Link] CST 1969
birthtime → time
Returns the birth time for file.
[Link]("testfile").birthtime #=> Wed Apr 09 [Link] CDT 2003
If the platform doesn't have birthtime, raises NotImplementedError.
chmod(mode_int) → 0
Changes permission bits on file to the bit pattern represented by mode_int. Actual
effects are platform dependent; on Unix systems, see chmod(2) for details. Follows
symbolic links. Also see File#lchmod.
f = [Link]("out", "w");
[Link](0644) #=> 0
chown(owner_int, group_int ) → 0
Changes the owner and group of file to the given numeric owner and group id's.
Only a process with superuser privileges may change the owner of a file. The
current owner of a file may change the file's group to any group to which the owner
belongs. A nil or -1 owner or group id is ignored. Follows symbolic links. See also
File#lchown.
[Link]("testfile").chown(502, 1000)
ctime → time
Returns the change time for file (that is, the time directory information about the file
was changed, not the file itself).
Note that on Windows (NTFS), returns creation time (birth time).
[Link]("testfile").ctime #=> Wed Apr 09 [Link] CDT 2003
flock(locking_constant) → 0 or false
Locks or unlocks a file according to locking_constant (a logical or of the values in
the table below). Returns false if File::LOCK_NB is specified and the operation
would otherwise have blocked. Not available on all platforms.
Locking constants (in class File):
LOCK_EX | Exclusive lock. Only one process may hold an
| exclusive lock for a given file at a time.
----------+------------------------------------------------
LOCK_NB | Don't block when locking. May be combined
| with other lock options using logical or.
----------+------------------------------------------------
LOCK_SH | Shared lock. Multiple processes may each hold a
| shared lock for a given file at the same time.
----------+------------------------------------------------
LOCK_UN | Unlock.
Example:
# update a counter using write lock
# don't use "w" because it truncates the file before lock.
[Link]("counter", File::RDWR|File::CREAT, 0644) {|f|
[Link](File::LOCK_EX)
value = [Link].to_i + 1
[Link]
[Link]("#{value}\n")
[Link]
[Link]([Link])
}
# read the counter using read lock
[Link]("counter", "r") {|f|
[Link](File::LOCK_SH)
p [Link]
}
lstat → stat
Same as IO#stat, but does not follow the last symbolic link. Instead, reports on the
link itself.
[Link]("testfile", "link2test") #=> 0
[Link]("testfile").size #=> 66
f = [Link]("link2test")
[Link] #=> 8
[Link] #=> 66
mtime → time
Returns the modification time for file.
[Link]("testfile").mtime #=> Wed Apr 09 [Link] CDT 2003
path → filename
to_path → filename
Returns the pathname used to create file as a string. Does not normalize the name.
The pathname may not point to the file corresponding to file. For instance, the
pathname becomes void when the file has been moved or deleted.
This method raises IOError for a file created using File::Constants::TMPFILE
because they don't have a pathname.
[Link]("testfile").path #=> "testfile"
[Link]("/tmp/../tmp/xxx", "w").path #=> "/tmp/../tmp/xxx"
size → integer
Returns the size of file in bytes.
[Link]("testfile").size #=> 66
to_path → filename
Returns the pathname used to create file as a string. Does not normalize the name.
The pathname may not point to the file corresponding to file. For instance, the
pathname becomes void when the file has been moved or deleted.
This method raises IOError for a file created using File::Constants::TMPFILE
because they don't have a pathname.
[Link]("testfile").path #=> "testfile"
[Link]("/tmp/../tmp/xxx", "w").path #=> "/tmp/../tmp/xxx"
truncate(integer) → 0
Truncates file to at most integer bytes. The file must be opened for writing. Not
available on all platforms.
f = [Link]("out", "w")
[Link]("1234567890") #=> 10
[Link](5) #=> 0
[Link]() #=> nil
[Link]("out") #=> 5
set SENDGRID_API_KEY=YOUR_API_KEY
Permanently set the environment variable(accessible in all subsequent cli
sessions):
setx SENDGRID_API_KEY "YOUR_API_KEY"
Install Package
pip install sendgrid
Dependencies
Python-HTTP-Client
ECDSA-Python
Quick Start
Hello Email
The following is the minimum needed code to send an email with the /mail/send
Helper (here is a full example):
With Mail Helper Class
import sendgrid
import os
from [Link] import *
sg = [Link](api_key=[Link]('SENDGRID_API_KEY'))
from_email = Email("test@[Link]")
to_email = To("test@[Link]")
subject = "Sending with SendGrid is Fun"
content = Content("text/plain", "and easy to do anywhere, even with Python")
mail = Mail(from_email, to_email, subject, content)
response = [Link](request_body=[Link]())
print(response.status_code)
print([Link])
print([Link])
The Mail constructor creates a personalization object for you. Here is an example of
how to add it.
Without Mail Helper Class
The following is the minimum needed code to send an email without the /mail/send
Helper (here is a full example):
import sendgrid
import os
sg = [Link](api_key=[Link]('SENDGRID_API_KEY'))
data = {
"personalizations": [
{
"to": [
{
"email": "test@[Link]"
}
],
"subject": "Sending with SendGrid is Fun"
}
],
"from": {
"email": "test@[Link]"
},
"content": [
{
"type": "text/plain",
"value": "and easy to do anywhere, even with Python"
}
]
}
response = [Link](request_body=data)
print(response.status_code)
print([Link])
print([Link])
General v3 Web API Usage (With Fluent Interface)
import sendgrid
import os
sg = [Link](api_key=[Link]('SENDGRID_API_KEY'))
response = [Link]()
print(response.status_code)
print([Link])
print([Link])
General v3 Web API Usage (Without Fluent Interface)
import sendgrid
import os
sg = [Link](api_key=[Link]('SENDGRID_API_KEY'))
response = [Link]._("suppression/bounces").get()
print(response.status_code)
print([Link])
print([Link])
[Link]
curl -G [Link] \
-u '[YOUR ACCOUNT SID]:[YOUR AUTH TOKEN]'
/Credentials/PublicKeys/
POST [Link]
# Download the helper library from [Link]
import os
from [Link] import Client
# Find your Account SID and Auth Token at [Link]/console
# and set the environment variables. See [Link]
account_sid = [Link]['TWILIO_ACCOUNT_SID']
auth_token = [Link]['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
public_key = [Link] \
.v1 \
.credentials \
.public_key \
.create(public_key='publickey')
{
"account_sid": "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"date_created": "2015-07-31T[Link]Z",
"date_updated": "2015-07-31T[Link]Z",
"friendly_name": "friendly_name",
"sid": "CRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"url": "[Link]
CRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
}
Fetch a CredentialPublicKey resource
GET [Link]
# Download the helper library from [Link]
import os
from [Link] import Client
# Find your Account SID and Auth Token at [Link]/console
# and set the environment variables. See [Link]
account_sid = [Link]['TWILIO_ACCOUNT_SID']
auth_token = [Link]['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
public_key = [Link] \
.v1 \
.credentials \
.public_key('CRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.fetch()
print(public_key.friendly_name)
Output
{
"account_sid": "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"date_created": "2015-07-31T[Link]Z",
"date_updated": "2015-07-31T[Link]Z",
"friendly_name": "friendly_name",
"sid": "CRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"url": "[Link]
CRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
}
Read multiple CredentialPublicKey resources
GET [Link]
# Download the helper library from [Link]
import os
from [Link] import Client
# Find your Account SID and Auth Token at [Link]/console
# and set the environment variables. See [Link]
account_sid = [Link]['TWILIO_ACCOUNT_SID']
auth_token = [Link]['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
public_key = [Link].public_key.list(limit=20)
for record in public_key:
print([Link])
{
"credentials": [
{
"account_sid": "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"date_created": "2015-07-31T[Link]Z",
"date_updated": "2015-07-31T[Link]Z",
"friendly_name": "friendly_name",
"sid": "CRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"url": "[Link]
CRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
}
],
"meta": {
"first_page_url": "[Link]
PageSize=50&Page=0",
"key": "credentials",
"next_page_url": "[Link]
PageSize=50&Page=1",
"page": 0,
"page_size": 50,
"previous_page_url": "[Link]
PageSize=50&Page=0",
"url": "[Link]
PageSize=50&Page=0"
}
}
import [Link];
import [Link];
import [Link];
import [Link];
import [Link];
import [Link];
public class NewSubAccount {
private static final String ACCOUNT_SID =
[Link]("TWILIO_ACCOUNT_SID");
private static final String API_KEY = [Link]("TWILIO_MAIN_KEY");
private static final String API_SECRET =
[Link]("TWILIO_MAIN_SECRET");
private static final String PUBLIC_KEY_SID =
[Link]("TWILIO_PUBLIC_KEY_SID");
private static final PrivateKey PRIVATE_KEY = [Link]();
private static final String PUBLIC_KEY = [Link]();
public static void main(String[] args) {
//Create client with Main Account Credentials
TwilioRestClient client = new [Link](API_KEY, API_SECRET)
.accountSid(ACCOUNT_SID)
.httpClient(new ValidationClient(ACCOUNT_SID, PUBLIC_KEY_SID,
API_KEY, PRIVATE_KEY))
.build();
//Create new Subaccount
Account myAccount = [Link]().setFriendlyName("PKCV
Account").create(client);
String myAccountSid = [Link]();
//Seed API Key
NewKey myKey = [Link](myAccountSid).setFriendlyName("PKCV
Key").create(client);
//Seed Public Key
PublicKey myPubKey = [Link](PUBLIC_KEY)
.setAccountSid(myAccountSid)
.setFriendlyName("Seed PK")
.create(client);
//Create a client for new Subaccount
TwilioRestClient newClient = new [Link]([Link](),
[Link]())
.accountSid(myAccountSid)
.httpClient(new ValidationClient(myAccountSid, [Link](),
[Link](), PRIVATE_KEY))
.build();
//Make API call with new account and list public key sid(s) assigned to account
Iterable pks = [Link]().read(newClient);
for (PublicKey pk : pks) {
[Link]("key: " + [Link]() + " - friendlyName: " +
[Link]());
}
//Clean up
[Link](myAccountSid).setStatus([Link]).update(client)
;
}
}
The
[Link]
{
"anonymousId": "507f191e810c19729de860ea",
"context": {
"locale": "en-US",
"page": {
"title": "Analytics Academy",
"url": "[Link]
}
},
"integrations": {
"All": true,
"Mixpanel": false,
"Salesforce": false,
"My Destination Function (My Workspace)": true
}
}
integrations: {
Appsflyer: {
appsflyerId: 'xxxxxx'
}
}
For
pod "Segment-Adjust"
#import <Segment-Adjust/SEGAdjustIntegrationFactory.h>
NSString *const SEGMENT_WRITE_KEY = @"
xDfE5GeTMyukogiMks5Rgd1jDCIzDeK2 ";
SEGAnalyticsConfiguration *config = [SEGAnalyticsConfiguration
configurationWithWriteKey:xDfE5GeTMyukogiMks5Rgd1jDCIzDeK2];
[config use:[SEGAdjustIntegrationFactory instance]];
[SEGAnalytics setupWithConfiguration:config];
[Link]().then((status) {
switch (status) {
case 0:
// ATTrackingManagerAuthorizationStatusNotDetermined case
break;
case 1:
// ATTrackingManagerAuthorizationStatusRestricted case
break;
case 2:
// ATTrackingManagerAuthorizationStatusDenied case
break;
case 3:
// ATTrackingManagerAuthorizationStatusAuthorized case
break;
}
});
WWWWWW||WWWWWW
W W W||W W W
||
( OO )__________
/ | \
/o o| MIT \
\___/||_||__||_|| *
|| || || ||
_||_|| _||_||
(__|__|(__|__|
The MIT License (MIT)
Copyright (c) 2016 Segment, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE
SOFTWARE.
Add the Analytics dependency to your [Link].
Initialize and configure the client.
repositories {
mavenCentral()
}
dependencies {
implementation '[Link]:core:<latest_version>'
}
Initialize and configure the client.
Segment recommends you to initialize the client in your main function.
Analytics("xDfE5GeTMyukogiMks5Rgd1jDCIzDeK2") {
application = "MainApp"
flushAt = 3
flushInterval = 10
}
fun identify(userId: String, traits: JsonObject = emptyJsonObject)
// If <T> is annotated with @Serializable you will not need to provide a
serializationStrategy
fun <T> identify(userId: String, traits: T, serializationStrategy: KSerializer<T>)
fun track(name: String, properties: JsonObject = emptyJsonObject)
// If <T> is annotated with @Serializable you will not need to provide a
serializationStrategy
fun <T> track(name: String, properties: T, serializationStrategy: KSerializer<T>)
[Link]("View Product", buildJsonObject {
put("productId", 123)
put("productName" "Striped trousers")
});
fun screen(screenTitle: String, properties: JsonObject = emptyJsonObject,
category: String = "")
// If <T> is annotated with @Serializable you will not need to provide a
serializationStrategy
fun <T> screen(screenTitle: String, properties: T, category: String = "",
serializationStrategy: KSerializer<T>)
fun group(groupId: String, traits: JsonObject = emptyJsonObject)
// If <T> is annotated with @Serializable you will not need to provide a
serializationStrategy
fun <T> group(groupId: String, traits: T, serializationStrategy: KSerializer<T>)
class SomePlugin: Plugin {
override val type = [Link]
override val name = "SomePlugin"
override var lateinit analytics: Analytics
override fun execute(event: BaseEvent): BaseEvent? {
[Link]("foo", "bar")
return event
}
}
class SomePlugin: EventPlugin {
override fun track(event: TrackEvent): BaseEvent? {
// code to modify track event
return event
}
override fun identify(event: TrackEvent): BaseEvent? {
// code to modify identify event
return event
}
}
class AmplitudePlugin: DestinationPlugin() {
override val key = "Amplitude" // This is the name of the destination plugin, it is
used to retrieve settings internally
val amplitudeSDK: Amplitude // This is an instance of the partner SDK
init { // Initializing the partner SDK and setting things up
amplitudeSDK = [Link]
[Link](applicationContext, "API_KEY");
}
/*
* Implementing this function allows this plugin to hook into any track events
* coming into the analytics timeline
*/
override fun track(event: TrackEvent): BaseEvent? {
[Link]([Link])
return event
}
}
With Analytics-Kotlin, you can send data using Kotlin applications to any analytics
or marketing tool without having to learn, test, or implement a new API every time.
Analytics-Kotlin enables you to process and track the history of a payload, while
Segment controls the API and prevents unintended operations.
val amplitudePlugin = AmplitudePlugin()
[Link](amplitudePlugin) // add amplitudePlugin to the analytics client
val amplitudeEnrichment = object: Plugin {
override val type = [Link]
override val name = "SomePlugin"
override var lateinit analytics: Analytics
override fun execute(event: BaseEvent): BaseEvent? {
[Link]("foo", "bar")
return event
}
}
[Link](amplitudeEnrichment) // add enrichment plugin to amplitude
timeline
Adding a plugin
Adding plugins enable you to modify your analytics implementation to best fit your
needs. You can add a plugin using this:
val yourPlugin = SomePlugin()
[Link](yourPlugin)
class SomePlugin: Plugin {
override val type = [Link]
override val name = "SomePlugin"
override var lateinit analytics: Analytics
override fun execute(event: BaseEvent): BaseEvent? {
[Link]("foo", "bar")
return event
}
}
val yourPlugin = SomePlugin()
[Link](yourPlugin)
fun add(plugin: Plugin): Analytics
val plugin = object: Plugin {
override val type = [Link]
override val name = "SomePlugin"
override var lateinit analytics: Analytics
}
[Link](plugin)
fun find(pluginName: String): Plugin
val plugin = [Link]("SomePlugin")
public fun flush()
[Link]("SomePlugin")
- Workspace Owner, or
- Source Admin and Warehouse Admin
[Link]
connected-warehouses
{
"warehouse_id": "{{warehouse_id}}",
"enabled": false
}
curl --location --request PATCH '[Link]
workspaces/myworkspace/sources/js/connected-warehouses' \
--header 'Content-Type: application/json' \
--data '{
"warehouse_id": "{{warehouse_id}}",
"enabled": false
}'
[Link]
sfn_{{source_id}}/deploy
curl --location -g '[Link]
workspace_id/functions/sfn_{{source_id}}/deploy' \
--header 'Authorization: Bearer ...' \
--header 'Content-Type: application/json'
[Link]
destinations/
{
"destination": {
"name": "workspaces/myworkspace/sources/js/destinations/google-analytics",
"config": [
{
"name": "workspaces/myworkspace/sources/js/destinations/google-analytics/
config/trackingId",
"type": "string",
"value": "UA-970334309-1"
}
],
"enabled": false,
"connection_mode": "UNSPECIFIED"
}
}
curl --location '[Link]
myworkspace/sources/js/destinations/' \
--header 'Content-Type: application/json' \
--header 'Authorization: Bearer ...' \
--data '{
"destination": {
"name": "workspaces/myworkspace/sources/js/destinations/google-analytics",
"config": [
{
"name": "workspaces/myworkspace/sources/js/destinations/google-analytics/
config/trackingId",
"type": "string",
"value": "UA-970334309-1"
},
{
"name": "workspaces/myworkspace/sources/js/destinations/google-analytics/
config/dimensions",
"type": "map",
"value": {
"User Type": "dimension1",
"Gender": "dimension2"
}
},
{
"name": "workspaces/myworkspace/sources/js/destinations/google-analytics/
config/enableServerIdentify",
"type": "boolean",
"value": true
}
],
"enabled": false
}
}'
[Link]
destinations/google-analytics
curl --location '[Link]
myworkspace/sources/js/destinations/google-analytics' \
--header 'Content-Type: application/json' \
--header 'Authorization: Bearer ...'
curl --location -g '[Link]
{{destination_config_name}}' \
--header 'Content-Type: application/json' \
--header 'Authorization: Bearer ...'
[Link]
plans/
{
"tracking_plan": {
"display_name": "Kicks App",
"rules": {
"global": {
"$schema": "[Link]
"type": "object",
"properties": {
"context": {
"type": "object",
"properties": {
"librarry": {
"type": [ "object" ]
}
},
* Other community or team contact