- 1. adoc
- 2. amavis
- 3. ansible
- 4. autoyast
- 5. awk
- 6. azure client
- 7. bash
- 8. Bash scripting
- 9. calico
- 10. cgroups
- 11. containerd
- 12. csync2
- 13. curl
- 14. diff
- 15. docker
- 16. du
- 17. elasticsearch
- 18. etcd
- 19. ffmpeg
- 20. find
- 21. FluentD
- 22. git
- 23. gopass
- 24. gpg
- 25. HAproxy
- 26. HAR - HTTP Archive format
- 27. helm
- 28. hpacucli, hpssacli
- 29. inotifywait
- 30. ipsec
- 31. jq
- 32. k3d
- 33. Troubleshooting Kubernetes commands
- 34. Deleting the Kubernetes namespace stuck in terminating state
- 35. kscreen-doctor
- 36. bash completion
- 37. kubectl
- 37.1. merge multiple kubeconfig
- 37.2. list all pods and its nodes
- 37.3. list all container images running in a cluster
- 37.4. create,get secret
- 37.5. object check (yaml lint, api version check)
- 37.6. using config map for config files
- 37.7. object relation graph generating
- 37.8. delete evicted pod
- 37.9. node conditions
- 37.10. display which pods have ti PVC in use
- 37.11. create pod
- 37.12. cheatsheet
# footnotes
footnote:[URL]
footnote:id[URL]
# example
Ported from upstreamsfootnote:[https://git.home.lan/app1],footnote:[https://git.home.lan/app2]image::sunset.jpg[alt text]
.Title of image
[#imgID link=https://home.lan/img1.jpg]
image::img1.jpg[alt text,width,height][%linenums,bash]
----
echo "users"
getent passwd
echo "groups"
getent
----
# or
[,python,linenums]
----
echo "users"
getent passwd
echo "groups"
getent
-----
asciidoc, adoc
-
awk, mawk, nawk, gawk
-
bash, sh, zsh (shell-vs-console)
-
crmsh, crm, pcmk
-
curl
-
dns, zone, bind
-
dockerfile, docker
-
xml, html, xhtml, rss, atom, xjb, xsd, xsl, plist, svg
-
http, https
-
ini, toml
-
json
-
less
-
ldif
-
markdown, md, mkdown, mkd
-
nginx, nginxconf
-
plaintext, txt, text
-
pgsql, postgres, postgresql
-
python, py, gyp
-
rpm-specfile, rpm, spec, rpm-spec, specfile
-
shell, console (shell-vs-console)
-
sql
-
terraform, tf, hcl
-
vim
-
yml, yaml -
=== title
[source,adoc]
----
----steps: asciidoc → XML → markdown
# single file
asciidoc -b docbook README.adoc
pandoc -f docbook -t markdown_strict file.xml -o README.md
podman run --rm -v $PWD:/documents/ --entrypoint '["/usr/bin/asciidoctor", "-w", "--trace", "-b", "docbook", "-t", "/documents/README.adoc"]' docker.io/asciidoctor/docker-asciidoctor
podman run --rm -v $PWD:/data pandoc/core -f docbook -t markdown README.xml -o README.md
# all adoc files
for i in *.adoc; do asciidoc -b docbook $i; done
for i in *.xml; do pandoc -f docbook -t markdown_gfm $i -o $i.md; done
podman run --rm -v $PWD:/documents/ --entrypoint '["/usr/bin/asciidoctor", "-w", "--trace", "-b", "docbook", "-t", "/documents/*.adoc"]' docker.io/asciidoctor/docker-asciidoctor
for i in *.xml; do podman run --rm -v $PWD:/data pandoc/core -f docbook -t markdown $i -o ${i%.*}.md ; done- name: copy 1 file
copy:
src: files/motd
dest: /etc/motd
owner: root
group: root
mode: 0644
- name: copy lot of files
ansible.posix.synchronize:
src: some/relative/path
dest: /some/absolute/path
- name: copy content
copy:
content: "Welcome to this system."
dest: /etc/motd
owner: root
group: root
mode: 0644
- name: create user
user:
name: ricardo
group: users
groups: wheel
uid: 2001
password: "{{ 'mypassword' | password_hash('sha512') }}"
state: present
- name: install package
package:
name: httpd
state: present
- name: start service
service:
name: sshd
state: started
# firewalld
- name: Ensure port 80 (http) is open
firewalld:
service: http
state: enabled
permanent: yes
immediate: yes
# open port test
- name: Ensure port 3000/TCP is open
firewalld:
port: 3000/tcp
state: enabled
permanent: yes
immediate: yes
# create dir
- name: Ensure directory /app exists
file:
path: /app
state: directory
owner: ricardo
group: users
mode: 0770
# lineinfile
- name: Ensure host rh8-vm03 in hosts file
lineinfile:
path: /etc/hosts
line: 192.168.122.236 rh8-vm03
state: present
# edit config
- name: Ensure root cannot login via ssh
lineinfile:
path: /etc/ssh/sshd_config
regexp: '^PermitRootLogin'
line: PermitRootLogin no
state: present
# unarchive
- name: Extract contents of app.tar.gz
unarchive:
src: /tmp/app.tar.gz
dest: /app
remote_src: yes
# run command
- name: Run the app installer
command: "/app/install.sh"Welcome to {{ inventory_hostname }}.- name: copy from template
template:
src: templates/motd.j2
dest: /etc/motd
owner: root
group: root
mode: 0644
validate: cat /etc/motd# zypper patch
ansible -e ZYPP_LOCK_TIMEOUT=60 -f 10 -i hosts -m zypper -a 'name="*" state="latest" type="patch"' all
ansible -f 10 -i hosts -m apt -a 'name="*" state="latest" update_cache="yes"' all
ansible -i hosts -m shell -a 'zypper ps -s' all -o |grep 'The following running processes use deleted files:' | awk '{print $1}'
# postgres
ansible --become --become-user=postgres -i hosts -m postgresql_query -a 'db=postgres query="GRANT pg_monitor TO zabbix_user;"' all
ansible -i hosts -m shell -a 'grep -i permission /var/lib/pgsql/log/postgresql-2023-04-*| head -1' -o all |sort
ansible --become --become-user=postgres -i hosts -m postgresql_db -a 'name=DBNAME state=absent' $DBHOST # delete DB
ansible --become --become-user=postgres -i hosts -m postgresql_db -a 'name=DBNAME state=present' $DBHOST # create DB
ansible --become --become-user=postgres -i hosts -m shell -a 'psql -c "select * from pg_stat_activity"' $DBHOST
# cron
ansible -i hosts -m cron -a 'name=pg_dump_global-only weekday=* minute=0 hour=23 user=postgres job="pg_dumpall --globals-only > ~/roles-$(date +%A).sql" cron_file=pg_dump_global-only' -CD all
#autohorized_key
# if you have sudo rights (-bK)
ansible -bK all -m authorized_key -a "user=automation key={{ lookup('file\', '/home/automation/.ssh/id.pub\') }}" -CD
ansible -bK all -m authorized_key -a "user=automation key='{{ lookup(\"file\", \"/home/automation/.ssh/id.pub\") }}'" -CD
ansible -bK all -m authorized_key -a "user=automation key='{{ lookup(\\'file\\', \\'/home/automation/.ssh/id.pub\\') }}'" -CD
ansible -bK all -m authorized_key -a "user=automation key=\"{{ lookup('file', '/home/automation/.ssh/id.pub') }}\"" -CD
ansible -bK all -m authorized_key -a "user=automation key=\"{{ lookup(\\\"file\\\", \\\"/home/automation/.ssh/id.pub\\\") }}\"" -CD
# root, become, pass
# ssh root@
ansible-playbook -u root --ask-pass --ask-become-pass
# create file
ansible -i hosts -m file -a "path=/var/lib/pgsql/log state=directory mode=755 owner=postgres group=postgres"
# copy file
ansible -i hosts -m copy -a "src=motd-gen.sh dest=/usr/local/sbin/motd-gen.sh owner=root group=root mode=0755 state=present" -CD all
# without inventory
# append , to after hosts
ansible -k --user root --become all -i srv.example.com,srv1, -m setup
ansible-playbook -i example.com, playbook.yml-
https://sites.google.com/site/cloud1impulse/ansible-cheatsheet
-
https://medium.com/edureka/ansible-cheat-sheet-guide-5fe615ad65c0
-
https://lzone.de/cheat-sheet/Ansible
-
https://github.com/germainlefebvre4/ansible-cheatsheet
awk '$1 ~ /pattern/ { ... }' infile # Match lines
awk '{if($1 ~ /pattern) { ... }}' infile # Matching for Conditions
awk '{print $(NF - 1)}' # Negative Indizes${parameter:-defaultValue} Get default shell variables value
${parameter:=defaultValue} Set default shell variables value
${parameter:?"Error Message"} Display an error message if parameter is not set
${#var} Find the length of the string
${var%pattern} Remove from shortest rear (end) pattern
${var%%pattern} Remove from longest rear (end) pattern
${var:num1:num2} Substring
${var#pattern} Remove from shortest front pattern
${var##pattern} Remove from longest front pattern
${var/pattern/string} Find and replace (only replace first occurrence)
${var//pattern/string} Find and replace all occurrences
var="This is a test"
echo "${var~~}" # Reverse var #
echo "${var^^}" # UPPERCASE var #
echo "${var,,}" # lowercase var #file=/home/tux/book/book.tar.bz2
echo ${file#*/}
home/tux/book/book.tar.bz2file=/home/tux/book/book.tar.bz2
echo ${file##*/}
book.tar.bz2file=/home/tux/book/book.tar.bz2
echo ${file%.*}
/home/tux/book/book.tarfile=/home/tux/book/book.tar.bz2
echo ${file%%.*}
/home/tux/book/bookfile=/home/tux/book/book.tar.bz2
echo ${file/tux/wilber}
/home/wilber/book/book.tar.bz2file=/home/tux/book/book.tar.bz2
echo ${file//book/newspaper}
/home/tux/newspaper/newspaper.tar.bz2### functions
# usage: show_time $SECONDS
function show_time () {
num=$1
min=0
hour=0
day=0
if((num>59));then
((sec=num%60))
((num=num/60))
if((num>59));then
((min=num%60))
((num=num/60))
if((num>23));then
((hour=num%24))
((day=num/24))
else
((hour=num))
fi
else
((min=num))
fi
else
((sec=num))
fi
echo "$day"d "$hour"h "$min"m "$sec"s
}read -s -p pass: PASS; for i in srv1 srv2 do echo $SPASS sudo -S "cat /etc/sudoers| grep -v '#' | grep -v '^$'"; done
read -s -p"pass: " SPASS; for i in $(cat ~serverek.txt); do echo $SPASS | sudo -S "cat /etc/sudoers| grep -v '#' | grep -v '^$'"; done.---------------------------------------------------------------------------.
| |
| Bash Redirections Cheat Sheet |
| |
+---------------------------------------------------------------------------+
| |
| Created by Peteris Krumins (peter@catonmat.net) |
| www.catonmat.net -- good coders code, great coders reuse |
| |
+-----------------------------.---------------------------------------------+
| Redirection | Description |
'-----------------------------'---------------------------------------------'
| cmd > file | Redirect the standard output (stdout) of |
| | `cmd` to a file. |
+-----------------------------'---------------------------------------------'
| cmd 1> file | Same as `cmd > file`. 1 is the default file |
| | descriptor for stdout. |
+-----------------------------'---------------------------------------------'
| cmd 2> file | Redirect the standard error (stderr) of |
| | `cmd` to a file. 2 is the default file |
| | descriptor for stderr. |
+-----------------------------'---------------------------------------------'
| cmd >> file | Append stdout of `cmd` to a file. |
+-----------------------------'---------------------------------------------'
| cmd 2>> file | Append stderr of `cmd` to a file. |
+-----------------------------'---------------------------------------------'
| cmd &> file | Redirect stdout and stderr to a file. |
+-----------------------------'---------------------------------------------'
| cmd > file 2>&1 | Another way to redirect both stdout and |
| | stderr of `cmd` to a file. This *is not* |
| | same as `cmd 2>&1 > file`. |
| | Redirection order matters! |
+-----------------------------'---------------------------------------------'
| cmd > /dev/null | Discard stdout of `cmd`. |
+-----------------------------'---------------------------------------------'
| cmd 2> /dev/null | Discard stderr of `cmd`. |
+-----------------------------'---------------------------------------------'
| cmd &> /dev/null | Discard stdout and stderr. |
+-----------------------------'---------------------------------------------'
| cmd < file | Redirect the contents of the file to the |
| | stdin of `cmd`. |
+-----------------------------'---------------------------------------------'
| cmd << EOL | |
| foo | Redirect a bunch of lines to the stdin. |
| bar | If 'EOL' is quoted, text is treated |
| baz | literally. This is called a here-document. |
| EOL | |
+-----------------------------'---------------------------------------------'
| cmd <<- EOL | |
| <tab>foo | Redirect a bunch of lines to the stdin. |
| <tab><tab>bar | The <tab>'s are ignored but not the |
| EOL | whitespace. Helpful for formatting. |
+-----------------------------'---------------------------------------------'
| cmd <<< "string" | Redirect a single line of text to stdin. |
| | This is called a here-string. |
+-----------------------------'---------------------------------------------'
| exec 2> file | Redirect stderr of all commands to a file |
| | forever. |
+-----------------------------'---------------------------------------------'
| exec 3< file | Open a file for reading using a custom fd. |
+-----------------------------'---------------------------------------------'
| exec 3> file | Open a file for writing using a custom fd. |
+-----------------------------'---------------------------------------------'
| exec 3<> file | Open a file for reading and writing using |
| | a custom file descriptor. |
+-----------------------------'---------------------------------------------'
| exec 3>&- | Close a file descriptor. |
+-----------------------------'---------------------------------------------'
| exec 4>&3 | Make file descriptor 4 to be a copy of file |
| | descriptor 3. (Copy fd 3 to 4.) |
+-----------------------------'---------------------------------------------'
| exec 4>&3- | Copy file descriptor 3 to 4 and close fd 3 |
+-----------------------------'---------------------------------------------'
| echo "foo" >&3 | Write to a custom file descriptor. |
+-----------------------------'---------------------------------------------'
| cat <&3 | Read from a custom file descriptor. |
+-----------------------------'---------------------------------------------'
| (cmd1; cmd2) > file | Redirect stdout from multiple commands to a |
| | file (using a sub-shell). |
+-----------------------------'---------------------------------------------'
| { cmd1; cmd2; } > file | Redirect stdout from multiple commands to a |
| | file (faster; not using a sub-shell). |
+-----------------------------'---------------------------------------------'
| exec 3<> /dev/tcp/host/port | Open a TCP connection to host:port. |
+-----------------------------'---------------------------------------------'
| exec 3<> /dev/udp/host/port | Open a UDP connection to host:port. |
+-----------------------------'---------------------------------------------'
| cmd <(cmd1) | Redirect stdout of `cmd1` to an anonymous |
| | fifo, then pass the fifo to `cmd` as an |
| | argument. Useful when `cmd` doesn't read |
| | from stdin directly. |
+-----------------------------'---------------------------------------------'
| cmd < <(cmd1) | Redirect stdout of `cmd1` to an anonymous |
| | fifo, then redirect the fifo to stdin of |
| ____' `cmd`. Best example: |
| | diff <(find /path1 | sort) <(find /path2 | sort) |
+------------------------'----.---------------------------------------------'
| cmd <(cmd1) <(cmd2) | Redirect stdout of `cmd1` `cmd2` to two |
| | anonymous fifos, then pass both fifos as |
| | arguments to \verb|cmd|. |
+-----------------------------.---------------------------------------------'
| cmd1 >(cmd2) | Run `cmd2` with its stdin connected to an |
| | anonymous fifo, and pass the filename of |
| | the pipe as an argument to `cmd1`. |
+-----------------------------.---------------------------------------------'
| cmd1 | cmd2 | Redirect stdout of cmd1 to stdin of `cmd2`. |
| | Pro-tip: This is the same as |
| | `cmd1 > >(cmd2)`, same as `cmd2 < <(cmd1)`, |
| | same as `> >(cmd2) cmd1`, same as |
| | `< <(cmd1) cmd2`. |
+-----------------------------'---------------------------------------------'
| cmd1 |& cmd2 | Redirect stdout and stderr of `cmd1` to |
| | stdin of `cmd2` (bash 4.0+ only). |
| | Use `cmd1 2>&1 | cmd2` for older bashes. |
+-----------------------------'---------------------------------------------'
| cmd | tee file | Redirect stdout of `cmd` to a file and |
| | print it to screen. |
+-----------------------------'---------------------------------------------'
| exec {filew}> file | Open a file for writing using a named file |
| | descriptor called `{filew}` (bash 4.1+) |
+-----------------------------'---------------------------------------------'
| cmd 3>&1 1>&2 2>&3 | Swap stdout and stderr of `cmd`. |
+-----------------------------'---------------------------------------------'
| cmd > >(cmd1) 2> >(cmd2) | Send stdout of `cmd` to `cmd1` and stderr |
| | `cmd` to `cmd2`. |
+-----------------------------'---------------------------------------------'
| cmd1 | cmd2 | cmd3 | cmd4 | Find out the exit codes of all piped cmds. |
| echo ${PIPESTATUS[@]} | |
+-----------------------------'---------------------------------------------'lsns # Show all namespaces
lsns -p <pid> # Show everything under namespace <pid>
nsenter -t <pid> # Enter namespace
nsenter -t <pid> -p -r # Enter pid namespace (-p) and set root dir (-r)
nsenter -t <pid> <cmd> # Run command in namespacessh srv1
# podman image save IMAGE-NAME > IMAGE-NAME.tar
podman image save docker.io/rancher/rancher-webhook:v0.5.1 > docker.io_rancher_rancher-webhook:v0.5.1.tar
ctr image pull docker.io/rancher/rancher-webhook:v0.5.1
ctr image export docker.io_rancher_rancher-webhook:v0.5.1.tar docker.io/rancher/rancher-webhook:v0.5.1
rsync -avz ./docker.io_rancher_rancher-webhook:v0.5.1.tar airgap:~/
ssh airgap
export CRI_CONFIG_FILE=/var/lib/rancher/rke2/agent/etc/crictl.yaml KUBECONFIG=/etc/rancher/rke2/rke2.yaml PATH=/sbin:/usr/sbin:/usr/local/sbin:/root/bin:/usr/local/bin:/usr/bin:/bin:/var/lib/rancher/rke2/bin:/opt/rke2/bin:/var/lib/rancher/rke2/bin:/opt/rke2/bin
ctr --address=/run/k3s/containerd/containerd.sock -n k8s.io image import docker.io_rancher_rancher-webhook:v0.5.1.tarERROR from peer node1: Identification failed!
csync2 -Rv # Remove files from database which do not match config entries.cat << EOF > curl-format.txt
time_namelookup: %{time_namelookup}s\n
time_connect: %{time_connect}s\n
time_appconnect: %{time_appconnect}s\n
time_pretransfer: %{time_pretransfer}s\n
time_redirect: %{time_redirect}s\n
time_starttransfer: %{time_starttransfer}s\n
----------\n
time_total: %{time_total}s\n
EOF
curl -w "@curl-format.txt" -o /dev/null -s "https://test.hu"
# post json with variable substitution
curl "http://localhost:8080" \
-H "Accept: application/json" \
-H "Content-Type:application/json" \
--data @<(cat <<EOF
{
"me": "$USER",
"something": $(date +%s)
}
EOF
)
# post json from file
curl -X POST -H "Content-Type: application/json" -d @FILENAME DESTINATIONvimdiff <(ssh srv1 'sudo cat /etc/kubernetes/manifests/kube-apiserver.yaml') <(ssh srv2 'sudo cat /etc/kubernetes/manifests/kube-apiserver.yaml')add CA for private registry
mkdir -p /etc/docker/certs.d/harbor.local
cp ca.pem /etc/docker/certs.d/harbor.kozut.local/ca.crt
docker login -u USER -p PASS harbor.kozut.localGET "/_cluster/health/pretty"{
"cluster_name": "elk",
"status": "red", # <============
"timed_out": true,
"number_of_nodes": 1,
"number_of_data_nodes": 1,
"active_primary_shards": 0,
"active_shards": 0,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0,
"delayed_unassigned_shards": 0,
"number_of_pending_tasks": 0,
"number_of_in_flight_fetch": 0,
"task_max_waiting_in_queue_millis": 0,
"active_shards_percent_as_number": 100
}
GET /_cat/indices
GET /_cat/indices?v&health=redred open local-k8s.202406 6eHfC-xASuiOOWwieuxvnQ 1 1GET _cat/shards?v&h=n,index,shard,prirep,state,sto,sc,unassigned.reason,unassigned.details&s=sto,index"| grep UNASSIGNED | grep local-k8s.202406local-k8s.202406 0 p UNASSIGNED INDEX_CREATED
local-k8s.202406 0 r UNASSIGNED INDEX_CREATEDcurl -X GET "localhost:9200/_cluster/allocation/explain?pretty" -H 'Content-Type: application/json' -d'
{
"index": "local-k8s.202406",
"shard": 0,
"primary": true
}
'{
"index": "local-k8s.202406",
"shard": 0,
"primary": true
}
'
{
"index" : "local-k8s.202406",
"shard" : 0,
"primary" : true,
"current_state" : "unassigned",
"unassigned_info" : {
"reason" : "INDEX_CREATED",
"at" : "2024-06-01T10:41:03.304Z",
"last_allocation_status" : "no"
},
"can_allocate" : "no",
"allocate_explanation" : "Elasticsearch isn't allowed to allocate this shard to any of the nodes in the cluster. Choose a node to which you expect this shard to be allocated, find this node in the node-by-node explanation, and address the reasons which prevent Elasticsearch from allocating this shard there.",
"node_allocation_decisions" : [
{
"node_id" : "ODSeJLJYQFiU6Au87J6ttw",
"node_name" : "node-1",
"transport_address" : "10.11.12.13:9300",
"node_attributes" : {
"ml.machine_memory" : "16680517632",
"ml.allocated_processors" : "4",
"ml.allocated_processors_double" : "4.0",
"ml.max_jvm_size" : "2147483648",
"ml.config_version" : "12.0.0",
"xpack.installed" : "true",
"transform.config_version" : "10.0.0"
},
"roles" : [
"data",
"data_cold",
"data_content",
"data_frozen",
"data_hot",
"data_warm",
"ingest",
"master",
"ml",
"remote_cluster_client",
"transform"
],
"node_decision" : "no",
"weight_ranking" : 1,
"deciders" : [
{
"decider" : "disk_threshold",
"decision" : "NO",
"explanation" : "the node is above the high watermark cluster setting [cluster.routing.allocation.disk.watermark.high=90%], having less than the minimum required [19.9gb] free space, actual free: [16.7gb], actual used: [91.6%]"
}
]
}
]
}Some common issues include:
-
Low Disk Space — No room to allocate
-
Shard Count Limits — Too many shards per node, common when new indexes are created or some nodes are removed and the system can’t find a place for them.
-
JVM or Heap Limits — Some versions can limit allocations when they are low on RAM
-
Routing or Allocation Rules — Common HA cloud or large complex systems
-
Corruption or Serious Problems — There are many more issues that can arise, each needing special attention or solutions, or, in many cases, just removing the old shards and adding new replicas or primaries.
references:
curl -k -u 'USER:PASS' -X GET "https://local:9200_nodes/stats/fs"| jq
curl -k -u 'USER:PASS' -X GET "https://local:9200/_cluster/health/"| jq
curl -k -u 'USER:PASS' -X GET "https://local:9200/_cat/shards"| jq
curl -k -u 'USER:PASS' -X GET "https://local:9200/_cat/indices/?v=true"delete unnecessery index
curl -k -u 'USER:PASS' -X DELETE "https://local:9200/indexname"change setting from percentage to absolute value
PUT _cluster/settings
{
"persistent": {
"cluster.routing.allocation.disk.threshold_enabled": true,
"cluster.routing.allocation.disk.watermark.low": "1g",
"cluster.routing.allocation.disk.watermark.high": "500m",
"cluster.info.update.interval": "5m"
}
}kubectl drain node1 --delete-emptydir-data --ignore-daemonsets
kubectl get no
rke2-killall.sh
fdisk -l
parted /dev/sdX
mklabel msdos
mkpart primary 0% 100%
mkfs.xfs -L etcd /dev/sdX1
cd /var/lib/rancher/rke2/server/db/
mv etcd etcd_
mkdir etcd
blkid | grep etcd
blkid | grep etcd | cut -d' ' -f3
echo 'UUID="4c392b90-b2f3-48c1-a055-45ac1" /var/lib/rancher/rke2/server/db/etcd xfs defaults 0 0' >> /etc/fstab
mount -a
chown etcd:etcd /var/lib/rancher/rke2/server/db/etcd
ls -lad /var/lib/rancher/rke2/server/db/etcd
rsync -avz etcd_/ etcd/
find etcd_
find etcd
diff <(find etcd -printf '%f\n'|sort) <(find etcd_ -printf '%f\n'|sort)
systemctl start rke2-server
kubectl uncordon node1# Multi Bitrate HLS VOD encrypted
ffmpeg -re -i {$pathFileName} -c:a aac -strict -2 -b:a 128k -c:v libx264 -vf scale=-2:360 -g 48 -keyint_min 48 -sc_threshold 0 -bf 3 -b_strategy 2 -b:v 800k -maxrate 856k -bufsize 1200k -f hls -hls_time 6 -hls_list_size 0 -hls_key_info_file {$destinationFile}keyinfo {$destinationFile}low/index.m3u8 -c:a aac -strict -2 -b:a 128k -c:v libx264 -vf scale=-2:540 -g 48 -keyint_min 48 -sc_threshold 0 -bf 3 -b_strategy 2 -b:v 1400k -maxrate 1498k -bufsize 2100k -f hls -hls_time 6 -hls_list_size 0 -hls_key_info_file {$destinationFile}keyinfo {$destinationFile}sd/index.m3u8 -c:a aac -strict -2 -b:a 128k -c:v libx264 -vf scale=-2:720 -g 48 -keyint_min 48 -sc_threshold 0 -bf 3 -b_strategy 2 -b:v 2800k -maxrate 2996k -bufsize 4200k -f hls -hls_time 6 -hls_list_size 0 -hls_key_info_file {$destinationFile}keyinfo {$destinationFile}hd/index.m3u8
# MP4 Low
ffmpeg -i {$pathFileName} -vf scale=-2:360 -movflags +faststart -preset veryfast -vcodec h264 -acodec aac -strict -2 -max_muxing_queue_size 1024 -y {$destinationFile}
# MP4 SD
ffmpeg -i {$pathFileName} -vf scale=-2:540 -movflags +faststart -preset veryfast -vcodec h264 -acodec aac -strict -2 -max_muxing_queue_size 1024 -y {$destinationFile}
# MP4 HD
ffmpeg -i {$pathFileName} -vf scale=-2:720 -movflags +faststart -preset veryfast -vcodec h264 -acodec aac -strict -2 -max_muxing_queue_size 1024 -y {$destinationFile}
# WEBM Low
ffmpeg -i {$pathFileName} -vf scale=-2:360 -movflags +faststart -preset veryfast -f webm -c:v libvpx -b:v 1M -acodec libvorbis -y {$destinationFile}
# WEBM SD
ffmpeg -i {$pathFileName} -vf scale=-2:540 -movflags +faststart -preset veryfast -f webm -c:v libvpx -b:v 1M -acodec libvorbis -y {$destinationFile}
# WEBM HD
ffmpeg -i {$pathFileName} -vf scale=-2:720 -movflags +faststart -preset veryfast -f webm -c:v libvpx -b:v 1M -acodec libvorbis -y {$destinationFile}
# MP3
ffmpeg -i {$pathFileName} -acodec libmp3lame -y {$destinationFile}
# OGG
ffmpeg -i {$pathFileName} -acodec libvorbis -y {$destinationFile}
# MP3 to Spectrum.MP4
ffmpeg -i {$pathFileName} -filter_complex '[0:a]showwaves=s=640x360:mode=line,format=yuv420p[v]' -map '[v]' -map 0:a -c:v libx264 -c:a copy {$destinationFile}
# Video.MP4 to Audio.MP3
ffmpeg -i {$pathFileName} -y {$destinationFile}git log -p --follow -- filename # generate patches for each log entrycat << EOF >> ~/.bashrc
function gi() { curl -sL https://www.gitignore.io/api/$@ ;}
function gignore() { curl -sL https://www.gitignore.io/api/$@ ;}
function gistatus() { git status ; }
function gilog() { git log ; }
function gipush() { git push ; }
function gipull() { git pull ; }
function giaddall() { git add -A ; }
function giadd() { git add $1 ; }
function gicommitall() { git commit -m -a "$1" ; }
EOF
source ~/.bashrcgit branch -d localbranch # delete branch locally
git push origin --delete remotebranch # delete branch remotelycd repo git init touch README.adoc git add -A git commit -a -m "add readme"
git status
On branch main <----- local branch name nothing to commit, working tree clean
git push --set-upstream git@doma.in:project/repo.git main # <---- same as local branch name
add otp
zbarimg qrcode.png
QR-Code:otpauth://totp/username@example.com?secret=123456789
gopass insert secretname otpauth
secretname:otpauth []: //totp/username@example.com?secret=123456789
# oneliner
echo otpauth://totp/username@example.com?secret=123456789 | gopass insert -a account/secretgenerate token
gopass otp secretname
renew key
gpg --list-keys
gpg --edit-key (key id)
# By default, you're working on the primary key.) If you need to update a sub-key:
# gpg> key 1
gpg> expire
gpg> savecat /etc/haproxy/haproxy/cert.pem
-----BEGIN CERTIFICATE-----
server cert
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
server private key
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
subCA cert
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
CA cert
-----END CERTIFICATE-----check order:
cd /tmp
mkdir cert
cd cert
echo |openssl s_client -connect <server_name>:<port> -servername <server_name> -showcerts 2>&1 |csplit - '/-----BEGIN CERTIFICATE-----/' '{*}'
rm xx00; for i in `ls xx*`; do openssl x509 -in $i -noout -subject -hash -issuer_hash; done
podman run --rm -it -v /tmp/har:/tmp/har registry.suse.com/bci/python
pip install harview
harview -vv --filter-all /tmp/har/site.harshortnames:
-
chassisname = ch
-
controller = ctrl
-
logicaldrive = ld
-
physicaldrive = pd
-
drivewritecache = dwc
-
licensekey = lk
### Specify drives:
- A range of drives (one to three): 1E:1:1-1E:1:3
- Drives that are unassigned: allunassigned
# Show - Controller Slot 1 Controller configuration basic
./ssacli ctrl slot=1 show config
# Show - Controller Slot 1 Controller configuration detailed
./ssacli ctrl slot=1 show detail
# Show - Controller Slot 1 full configuration
./ssacli ctrl slot=1 show config detail
# Show - Controller Slot 1 Status
./ssacli ctrl slot=1 show status
# Show - All Controllers Configuration
./ssacli ctrl all show config
# Show - Controller slot 1 logical drive 1 status
./ssacli ctrl slot=1 ld 1 show status
# Show - Physical Disks status basic
./ssacli ctrl slot=1 pd all show status
# Show - Physical Disk status detailed
./ssacli ctrl slot=1 pd all show status
# Show - Logical Disk status basic
./ssacli ctrl slot=1 ld all show status
# Show - Logical Disk status detailed
./ssacli ctrl slot=1 ld all show detail
# Create - New single disk volume
./ssacli ctrl slot=1 create type=ld drives=2I:0:8 raid=0 forced
# Create - New spare disk (two defined)
./ssacli ctrl slot=1 array all add spares=2I:1:6,2I:1:7
# Create - New RAID 1 volume
./ssacli ctrl slot=1 create type=ld drives=1I:0:1,1I:0:2 raid=1 forced
# Create - New RAID 5 volume
./ssacli ctrl slot=1 create type=ld drives=1I:0:1,1I:0:2,1I:0:3 raid=5 forced
# Add - All unassigned drives to logical drive 1
./ssacli ctrl slot=1 ld 1 add drives=allunassigned
# Modify - Extend logical drive 2 size to maximum (must be run with the "forced" flag)
./ssacli ctrl slot=1 ld 2 modify size=max forced
### Rescan all controllers
./ssacli rescan
# Led - Activate LEDs on logical drive 2 disks
./ssacli ctrl slot=1 ld 2 modify led=on
# Led - Deactivate LEDs on logical drive 2 disks
./ssacli ctrl slot=1 ld 2 modify led=off
# Led - Activate LED on physical drive
./ssacli ctrl slot=0 pd 1I:0:1 modify led=on
# Led - Deactivate LED on physical drive
./ssacli ctrl slot=0 pd 1I:0:1 modify led=off
# Show - Cache Ratio Status
./ssacli ctrl slot=1 modify cacheratio=?
# Modify - Cache Ratio read: 25% / write: 75%
./ssacli ctrl slot=1 modify cacheratio=25/75
# Modify - Cache Ratio read: 50% / write: 50%
./ssacli ctrl slot=1 modify cacheratio=50/50
# Modify - Cache Ratio read: 0% / Write: 100%
./ssacli ctrl slot=1 modify cacheratio=0/100
# Show - Write Cache Status
./ssacli ctrl slot=1 modify dwc=?
# Modify - Enable Write Cache on controller
./ssacli ctrl slot=1 modify dwc=enable forced
# Modify - Disable Write Cache on controller
./ssacli ctrl slot=1 modify dwc=disable forced
# Show - Write Cache Logicaldrive Status
./ssacli ctrl slot=1 logicaldrive 1 modify aa=?
# Modify - Enable Write Cache on Logicaldrive 1
./ssacli ctrl slot=1 logicaldrive 1 modify aa=enable
# Modify - Disable Write Cache on Logicaldrive 1
./ssacli ctrl slot=1 logicaldrive 1 modify aa=disable
# Show - Rebuild Priority Status
./ssacli ctrl slot=1 modify rp=?
# Modify - Set rebuildpriority to Low
./ssacli ctrl slot=1 modify rebuildpriority=low
# Modify - Set rebuildpriority to Medium
./ssacli ctrl slot=1 modify rebuildpriority=medium
# Modify - Set rebuildpriority to High
./ssacli ctrl slot=1 modify rebuildpriority=high
# You can modify the HPE SDD Smart Path feature by disabling or enabling. To make clear what the HPE SDD Smart Path includes, here is a official statement by HPE:
# https://support.hpe.com/hpsc/doc/public/display?docId=emr_na-a00044117en_us&docLocale=en_US
“HP SmartCache feature is a controller-based read and write caching solution that caches the most frequently accessed data (“hot” data) onto lower latency SSDs to dynamically accelerate application workloads. This can be implemented on direct-attached storage and SAN storage.”
For example, when running VMware vSAN SSD Smart Path must be disabled for better performance. In some cases worse the entire vSAN disk group fails.
# https://kb.vmware.com/s/article/2092190
# Note: This command requires the array naming type like A/B/C/D/E
# Modify - Enable SSD Smart Path
./ssacli ctrl slot=1 array a modify ssdsmartpath=enable
# Modify - Disable SSD Smart Path
./ssacli ctrl slot=1 array a modify ssdsmartpath=disable
# Delete - Logical Drive 1
./ssacli ctrl slot=1 ld 1 delete
# Delete - Logical Drive 2
./ssacli ctrl slot=1 ld 2 delete
# ssd info
/opt/smartstorageadmin/ssacli/bin/ssacli ctrl slot=0 ssdpd all show detail
/opt/smartstorageadmin/ssacli/bin/ssacli ctrl slot=0 show ssdinfo
/opt/smartstorageadmin/ssacli/bin/ssacli ctrl slot=0 show ssdinfo detail
/opt/smartstorageadmin/ssacli/bin/ssacli ctrl slot=0 show ssdinfo summary
# Erase physical drive with default erasepattern
./ssacli ctrl slot=1 pd 2I:1:1 modify erase
# Erase physical drive with zero erasepattern
./ssacli ctrl slot=1 pd 2I:1:1 modify erase erasepattern=zero
# Erase physical drive with random zero erasepattern
./ssacli ctrl slot=1 pd 1E:1:1-1E:1:3 modify erase erasepattern=random_zero
# Erase physical drive with random random zero erasepattern
./ssacli ctrl slot=1 pd 1E:1:1-1E:1:3 modify erase erasepattern=random_random_zero
# Stop the erasing process on phsyical drive 1E:1:1
./ssacli ctrl slot=1 pd 1E:1:1 modify stoperase
# License key installation
./ssacli ctrl slot=1 licensekey XXXXX-XXXXX-XXXXX-XXXXX-XXXXX
# License key removal
./ssacli ctrl slot=5 lk XXXXXXXXXXXXXXXXXXXXXXXXX deleteinotifywait -r -m -e modify vagrant/README.adoc adoc/README.adoc | while read file_path file_event file_name; do echo ${file_path}${file_name} event: ${file_event}; echo "generate pdf"; podman run --rm -v $PWD:/media registry.local/documentation:1.0 daps -d /media/MAIN pdf ; doneipsec status ; iptables -L FORWARD | grep ipsec ; ip xfrm state ; ip xfrm policy ; ip route list table 220lsblk --json | jq -r
lsblk --json -o name | jq -r '.blockdevices[]'
lsblk --json -o name | jq -r '.blockdevices[] | .name'
lsblk --json | jq -r '.blockdevices[] | .children[]'
lsblk --json | jq -r '.blockdevices[] | .children'
lsblk --json | jq -r '.blockdevices[] | .children[]? |select(.name=="sda6")'
lsblk --json | jq -r '.blockdevices[] | (.children[]?) | select(.mountpoint==null)'
23:25
lsblk --json | jq -r '.blockdevices[] | (.children[]?) | select(.mountpoint=="/" and .name=="sda2") '
23:33
lsblk --json | jq -r '.blockdevices[] | select(.children != null) | .children[]'
lsblk --json | jq -r '.blockdevices[] | select(.children != null) | .children[] | select(.size | contains("9"))'
lsblk --json | jq -r '.blockdevices[] | select(.children != null) | .children[] | select((.size | contains("9")) and (.name | contains("sda")))'
lsblk --json | jq -r '.blockdevices[] | (.children[]?) | select((.size | contains("9")) and (.name | contains("sda")))'
0:13
lsblk --json | python3 -c "import sys, json; print(json.load(sys.stdin)['blockdevices'][0].keys())"
lsblk --json | python3 -c "import sys, json; print(json.load(sys.stdin)['blockdevices'][0]['children'][0]['name'])"
jq -r '.|keys'
jq -r '.[]|keys'jq -r '.[]|"\(.name) \(.id)"'
echo '{
"name": "R1",
"type": "robot",
"prop1": "a5482na",
"prop2": null,
"prop3": 55
}' |\
jq '. | to_entries[] | select( .key | contains("prop"))'
echo '{
"devDependencies": {
"@antora/cli": "3.1.3",
"@antora/site-generator": "3.1.3",
"@antora/site-generator-with-pdf-exporter": "gitlab:opendevise/oss/antora-site-generator-with-pdf-exporter#v2.3.0-alpha.2"
}
} | jq '.devDependencies | to_entries[] | select (.key)|"\(.key)@\(.value)"k get ns -o json cattle-monitoring-system{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"annotations": {
"cattle.io/status": "{\"Conditions\":[{\"Type\":\"ResourceQuotaInit\",\"Status\":\"True\",\"Message\":\"\",\"LastUpdateTime\":\"2024-03-20T11:27:27Z\"},{\"Type\":\"InitialRolesPopulated\",\"Status\":\"True\",\"Message\":\"\",\"LastUpdateTime\":\"2024-03-20T11:27:28Z\"}]}",
"field.cattle.io/projectId": "c-m-s2gjcrwx:p-m48vq",
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Namespace\",\"metadata\":{\"annotations\":{},\"name\":\"cattle-monitoring-system\"}}\n",
"lifecycle.cattle.io/create.namespace-auth": "true",
"management.cattle.io/no-default-sa-token": "true",
"meta.helm.sh/release-name": "rancher-monitoring",
"meta.helm.sh/release-namespace": "cattle-monitoring-system",
"objectset.rio.cattle.io/id": "default-smtp-alert-alertmanager-rancher-monitoring"
},
"creationTimestamp": "2024-03-20T11:27:26Z",
"deletionTimestamp": "2024-06-27T07:57:38Z",
"labels": {
"app.kubernetes.io/managed-by": "Helm",
"field.cattle.io/projectId": "p-m48vq",
"kubernetes.io/metadata.name": "cattle-monitoring-system",
"objectset.rio.cattle.io/hash": "db91c173a6cb6696f8334e6a0abd0fe4db2186dd"
},
"name": "cattle-monitoring-system",
"resourceVersion": "217675132",
"uid": "0c1e7839-1dbf-464b-b184-2894918896ec"
},
"spec": {
"finalizers": [
"kubernetes"
]
},
"status": {
"conditions": [
{
"lastTransitionTime": "2024-06-27T07:57:51Z",
"message": "Discovery failed for some groups, 1 failing: unable to retrieve the complete list of server APIs: custom.metrics.k8s.io/v1beta1: the server is currently unable to handle the request",
"reason": "DiscoveryFailed",
"status": "True",
"type": "NamespaceDeletionDiscoveryFailure"
},
{
"lastTransitionTime": "2024-06-27T07:57:45Z",
"message": "All legacy kube types successfully parsed",
"reason": "ParsedGroupVersions",
"status": "False",
"type": "NamespaceDeletionGroupVersionParsingFailure"
},
{
"lastTransitionTime": "2024-06-27T07:57:45Z",
"message": "All content successfully deleted, may be waiting on finalization",
"reason": "ContentDeleted",
"status": "False",
"type": "NamespaceDeletionContentFailure"
},
{
"lastTransitionTime": "2024-06-27T07:57:51Z",
"message": "All content successfully removed",
"reason": "ContentRemoved",
"status": "False",
"type": "NamespaceContentRemaining"
},
{
"lastTransitionTime": "2024-06-27T07:57:51Z",
"message": "All content-preserving finalizers finished",
"reason": "ContentHasNoFinalizers",
"status": "False",
"type": "NamespaceFinalizersRemaining"
}
],
"phase": "Terminating"
}
}kubectl api-resources -o name --verbs=list --namespaced | xargs -n 1 kubectl get --show-kind --ignore-not-found -n cattle-monitoring-systemE0627 11:25:02.857331 28602 memcache.go:287] couldn't get resource list for custom.metrics.k8s.io/v1beta1: the server is currently unable to handle the request
E0627 11:25:03.066288 28602 memcache.go:121] couldn't get resource list for custom.metrics.k8s.io/v1beta1: the server is currently unable to handle the request
error: unable to retrieve the complete list of server APIs: custom.metrics.k8s.io/v1beta1: the server is currently unable to handle the requeskubectl get apiservicesv1beta1.custom.metrics.k8s.io cattle-monitoring-system/rancher-monitoring-prometheus-adapter False (ServiceNotFound) 98dkubectl delete apiservices v1beta1.custom.metrics.k8s.io
kubectl get namespace cattle-monitoring-system -o json | tr -d "\n" | sed "s/\"finalizers\": \[[^]]\+\]/\"finalizers\": []/" | kubectl replace --raw /api/v1/namespaces/cattle-monitoring-system/finalize -f -
kubectl edit namespace cattle-monitoring-system
# delete finalizergraph
DP-1["DP-1 <br /><br /> position: 0,0"]
eDP-1["eDP-1 <br /><br /> position: 1920,0"]
DP-3["DP-3<br /><br />position:3840,0"]
kscreen-doctor --json | jq -r '.outputs[]|"\(.name) \(.enabled) \(.pos) \(.connected)"'| grep true
kscreen-doctor output.DP-1.position.0,0 output.eDP-1.position.1920,0 output.DP-3.position.3840,0source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
alias k=kubectl
complete -F __start_kubectl k
# or
alias k=kubectl
source <(kubectl completion bash | sed 's/kubectl/k/g')mkdir ~/.kube/conf.d
cp cluster1-config ~/.kube/conf.d/
cp cluster2-config ~/.kube/conf.d/
cp cluster3-condig ~/.kube/conf.d/
# the contexts must be different!
grep -rA5 context: ~/.kube/conf.d/
export KUBECONFIG=$(find ~/.kube/conf.d/ -maxdepth 1 -type f -printf "%p:" | sed 's/:$//g')
echo $KUBECONFIG
UMASK=0600 kubectl config view --flatten > ~/.kube/configUMASK=0600 KUBECONFIG=$(find ~/.kube/conf.d/ -maxdepth 1 -type f -printf "%p:" | sed 's/:$//g') kubectl config view --flatten > ~/.kube/configUMASK=0600 KUBECONFIG=$(find ~/.kube/conf.d/oracle/ -maxdepth 1 -type f -printf "%p:" | sed 's/:$//g') kubectl config view --flatten > ~/.kube/configkubectl config get-clusters
NAME
cluster1
cluster2
cluster3kubectl get pods -o wide --all-namespaces --sort-by="{.spec.nodeName}"
kubectl get pod -o=custom-columns=NAME:.metadata.name,STATUS:.status.phase,NODE:.spec.nodeName --all-namespaces
kubectl get pod -o=custom-columns=NODE:.spec.nodeName,NAME:.metadata.name --all-namespaces
kubectl get pod --all-namespaces -o json | jq '.items[] | .spec.nodeName + " " + .metadata.name + " " + .status.podIP'
kubectl get pods --all-namespaces --output 'jsonpath={range .items[*]}{.spec.nodeName}{" "}{.metadata.namespace}{" "}{.metadata.name}{"\n"}{end}'kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' |\
sort |\
uniq -ckubectl get pods --all-namespaces -o jsonpath='{..image}' | tr ' ' '\n' | sort -u
kubectl create secret generic wiki-postgresql --from-literal=psqlpassword=secretpassword123
kubectl get secrets wiki-postgresql --template='{{.data.psqlpassword}}' | base64 -d
kubectl get secrets wiki-postgresql -o go-template='{{.data.psqlpassword|base64decode}}{{ "\n" }}'
kubectl get secret -n cattle-system tls-rancher -o "jsonpath={.data['tls\.crt']}"| base64 -d | openssl x509 -noout -text# /tmp/nginx.conf
user nginx;
worker_processes auto;
error_log /dev/stdout notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /dev/stdout main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
server {
listen 8080;
# listen [::]:80;
server_name _;
location / {
root /srv/www/htdocs;
try_files $uri $uri/ /index.html;
}
}
}kubectl create configmap nginx-configmap --from-file=/tmp/nginx.conf
kubectl get cm nginx-config -o json| jq -r '.data[]|keys' # use this key in volumes section of deployment[
"nginx.conf"
]apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: registry.suse.com/suse/nginx:1.21
name: nginx
ports:
- containerPort: 8080
name: nginx
volumeMounts:
- name: nginx-configmap-volume
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
volumes:
- name: nginx-configmap-volume
configMap:
name: nginx-configmap
items:
- key: nginx.conf
path: nginx.confkubectl create -f nginx-deployment.yamlgrep listen /tmp/nginx.conf listen 8080;
# listen [::]:80;kubectl get cm nginx-configmap -o json| jq -r '.data[]' | grep listen listen 8080;
# listen [::]:80;kubectl exec -it nginx-68c798d8f7-5crqc -- grep listen /etc/nginx/nginx.conf listen 8080;
# listen [::]:80;# 0. step: install krew https://krew.sigs.k8s.io/docs/user-guide/setup/install/
kubectl krew install graph
kubectl graph -t 100 ConfigMap,deployments,ingress,secret,service -n acltool -o mermaid
kubectl graph -t 100 $(kubectl api-resources --verbs=list --namespaced -o name | xargs -n 1 kubectl get --show-kind --ignore-not-found --no-headers=true -A | awk '{print $2}' | cut -d'/' -f1 | sort -u | tail -n +4 | xargs| tr ' ' ',') -A -o graphviz | dot -T svg -o context.svgkubectl get pods --all-namespaces -o json | jq '.items[] | select(.status.reason!=null) | select(.status.reason | contains("Evicted")) | "kubectl delete pods \(.metadata.name) -n \(.metadata.namespace)"' | xargs -n 1 bash -c
kubectl get pods | grep Evicted | awk ‘{print $1}’ | xargs kubectl delete pods -n <namespace-name>
kubectl get nodes -o jsonpath='{range .items[*]}{@.metadata.name}:{" "}{range @.status.conditions[*]}{@.type}={@.status}; {end}{"\n"}{end}'kubectl get pods --all-namespaces -o=json | jq -c '.items[] | {name: .metadata.name, namespace: .metadata.namespace, claimName:.spec.volumes[] | select( has ("persistentVolumeClaim") ).persistentVolumeClaim.claimName }'podman run --rm -it --entrypoint=/bin/bash registry.suse.com/suse/git:2.43 kubectl run git-bash --rm -i -t --restart=Never --image=registry.suse.com/suse/git:2.43 --command -- /bin/bash
# Check kubectl version kubectl version --client # Get cluster info kubectl cluster-info # Get cluster info dump kubectl cluster-info dump # Get API versions kubectl api-versions # Get API resources kubectl api-resources # Get API resources with short names kubectl api-resources --namespaced=false
# Get pod logs kubectl logs <pod-name> # Follow logs (tail -f equivalent) kubectl logs <pod-name> -f # Get logs from specific container in multi-container pod kubectl logs <pod-name> -c <container-name> # Get previous container logs (useful for crashed containers) kubectl logs <pod-name> --previous # Get logs with timestamps kubectl logs <pod-name> --timestamps # Get logs from last 1 hour kubectl logs <pod-name> --since=1h # Get last 100 lines of logs kubectl logs <pod-name> --tail=100
# Execute command in pod (single container) kubectl exec <pod-name> -- <command> # Interactive shell into pod kubectl exec -it <pod-name> -- /bin/bash kubectl exec -it <pod-name> -- /bin/sh # if bash not available # Execute command in specific container kubectl exec -it <pod-name> -c <container-name> -- /bin/bash # Copy files from/to pod kubectl cp <pod-name>:/path/to/file /local/path kubectl cp /local/path <pod-name>:/path/to/file # Copy files from specific container kubectl cp <pod-name>:/path/to/file /local/path -c <container-name>
# Forward local port to pod port kubectl port-forward <pod-name> <local-port>:<pod-port> # Forward to service kubectl port-forward service/<service-name> <local-port>:<service-port> # Forward with specific address binding kubectl port-forward --address 0.0.0.0 <pod-name> <local-port>:<pod-port> # Forward multiple ports kubectl port-forward <pod-name> <local-port1>:<pod-port1> <local-port2>:<pod-port2>
# Watch pods in real-time kubectl get pods -w # Watch pods with output refreshing every 2 seconds kubectl get pods -w --output-watch-events # Get pod resource usage (requires metrics-server - Metrics API) kubectl top pods # Get pod resource usage for specific namespace kubectl top pods -n <namespace> # Get pod resource usage for all namespaces kubectl top pods --all-namespaces
# List deployments kubectl get deployments kubectl get deploy # shorthand # Get deployment details kubectl describe deployment <deployment-name> # Create deployment from image kubectl create deployment <deployment-name> --image=<image-name> # Scale deployment kubectl scale deployment <deployment-name> --replicas=<number> # Update deployment image kubectl set image deployment/<deployment-name> <container-name>=<new-image> # Rollout status kubectl rollout status deployment/<deployment-name> # Rollout history kubectl rollout history deployment/<deployment-name> # Rollback deployment kubectl rollout undo deployment/<deployment-name> # Rollback to specific revision kubectl rollout undo deployment/<deployment-name> --to-revision=<revision-number> # Restart deployment (rolling restart) kubectl rollout restart deployment/<deployment-name>
# List services kubectl get services kubectl get svc # shorthand # Get service details kubectl describe service <service-name> # Expose deployment as service kubectl expose deployment <deployment-name> --port=<port> --type=<service-type> # Create service from YAML kubectl apply -f service.yaml # Delete service kubectl delete service <service-name> # Get service endpoints kubectl get endpoints <service-name>
# List configmaps kubectl get configmaps kubectl get cm # shorthand # Create configmap from literal values kubectl create configmap <configmap-name> --from-literal=<key>=<value> # Create configmap from file kubectl create configmap <configmap-name> --from-file=<file-path> # Create configmap from directory kubectl create configmap <configmap-name> --from-file=<directory-path> # Get configmap data kubectl get configmap <configmap-name> -o yaml # Edit configmap kubectl edit configmap <configmap-name>
# List secrets
kubectl get secrets
# Create secret from literal values
kubectl create secret generic <secret-name> --from-literal=<key>=<value>
# Create secret from file
kubectl create secret generic <secret-name> --from-file=<file-path>
# Create TLS secret
kubectl create secret tls <secret-name> --cert=<cert-file> --key=<key-file>
# Get secret data (base64 encoded)
kubectl get secret <secret-name> -o yaml
# Decode secret value
kubectl get secret <secret-name> -o jsonpath='{.data.<key>}' | base64 --decode
# Get resource with custom columns kubectl get pods -o custom-columns=NAME:.metadata.name,STATUS:.status.phase,NODE:.spec.nodeName
# Get events in current namespace kubectl get events # Get events sorted by time kubectl get events --sort-by=.metadata.creationTimestamp # Get events for specific resource kubectl get events --field-selector involvedObject.name=<resource-name> # Watch events kubectl get events -w
# Run temporary pod for debugging kubectl run debug-pod --image=busybox --rm -it --restart=Never -- /bin/sh # Run temporary pod in specific namespace kubectl run debug-pod --image=busybox --rm -it --restart=Never -n <namespace> -- /bin/sh # Run temporary pod with specific service account kubectl run debug-pod --image=busybox --rm -it --restart=Never --serviceaccount=<sa-name> -- /bin/sh
==== Resource Management
===== Resource Quotas & Limits
# Get resource quotas kubectl get resourcequotas kubectl get quota # shorthand # Get limit ranges kubectl get limitranges kubectl get limits # shorthand # Get node resource usage kubectl top nodes (Requires Metrics API) # Get pod resource requests and limits kubectl describe pods <pod-name> | grep -A 5 "Requests\|Limits"
==== Horizontal Pod Autoscaler (HPA)
# List HPAs kubectl get hpa # Create HPA kubectl autoscale deployment <deployment-name> --cpu-percent=80 --min=1 --max=10 # Get HPA details kubectl describe hpa <hpa-name>
==== RBAC
# List roles kubectl get roles # List cluster roles kubectl get clusterroles # List role bindings kubectl get rolebindings # List cluster role bindings kubectl get clusterrolebindings # Check permissions for current user kubectl auth can-i <verb> <resource> # Check permissions for specific user kubectl auth can-i <verb> <resource> --as=<user> # Check permissions for service account kubectl auth can-i <verb> <resource> --as=system:serviceaccount:<namespace>:<sa-name>
==== Working with YAML Manifests
===== Apply & Create
# Apply configuration from file kubectl apply -f <file.yaml> # Apply all YAML files in directory kubectl apply -f <directory>/ # Apply from URL kubectl apply -f https://example.com/manifest.yaml # Create resource from file (fails if is already exists) kubectl create -f <file.yaml> # Dry run to validate YAML kubectl apply -f <file.yaml> --dry-run=client # Server-side dry run kubectl apply -f <file.yaml> --dry-run=server
===== Generate & Export
# Generate YAML for deployment kubectl create deployment <name> --image=<image> --dry-run=client -o yaml # Generate YAML for service kubectl expose deployment <deployment-name> --port=80 --dry-run=client -o yaml # Export existing resource to YAML kubectl get deployment <deployment-name> -o yaml --export > deployment.yaml # Generate manifest template kubectl run <pod-name> --image=<image> --dry-run=client -o yaml > pod.yaml
==== Labels & Selectors
# Add label to resource kubectl label pods <pod-name> <label-key>=<label-value> # Remove label from resource kubectl label pods <pod-name> <label-key>- # Select resources by label kubectl get pods -l <label-key>=<label-value> # Select resources by multiple labels kubectl get pods -l <label-key1>=<label-value1>,<label-key2>=<label-value2> # Select resources with label exists kubectl get pods -l <label-key> # Select resources with label not equal kubectl get pods -l <label-key>!=<label-value>
==== Patch Operations
# Patch resource with strategic merge
kubectl patch deployment <deployment-name> -p '{"spec":{"replicas":5}}'
# Patch resource with JSON merge
kubectl patch deployment <deployment-name> --type merge -p '{"spec":{"replicas":5}}'
# Patch resource with JSON patch
kubectl patch deployment <deployment-name> --type json -p='[{"op": "replace", "path": "/spec/replicas", "value": 5}]'
==== Output Formats
# Wide output (more columns)
kubectl get pods -o wide
# JSON output
kubectl get pods -o json
# YAML output
kubectl get pods -o yaml
# Custom columns
kubectl get pods -o custom-columns=NAME:.metadata.name,STATUS:.status.phase
# JSONPath output
kubectl get pods -o jsonpath='{.items[*].metadata.name}'
# Go template output
kubectl get pods -o go-template='{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'
==== Resource Shortnames
po # pods svc # services deploy # deployments rs # replicasets ds # daemonsets sts # statefulsets cm # configmaps sa # serviceaccounts ns # namespaces no # nodes pv # persistentvolumes pvc # persistentvolumeclaims ing # ingresses netpol # networkpolicies
==== Emergency Commands
===== Force Operations
# Force delete pod (when stuck in terminating) kubectl delete pod <pod-name> --grace-period=0 --force # Force delete namespace (when stuck in terminating) kubectl delete namespace <namespace-name> --grace-period=0 --force # Force delete all pods in namespace kubectl delete pods --all --grace-period=0 --force -n <namespace>
===== Recovery Operations
# Cordon node (mark as unschedulable) kubectl cordon <node-name> # Uncordon node kubectl uncordon <node-name> # Drain node (evict all pods) kubectl drain <node-name> --ignore-daemonsets --delete-emptydir-data # Restart all pods in deployment (rolling restart) kubectl rollout restart deployment/<deployment-name> # Scale deployment to 0 and back kubectl scale deployment <deployment-name> --replicas=0 kubectl scale deployment <deployment-name> --replicas=3
==== Additional Tips
-
Use
kubectl explain <resource>to get detailed information about resource fields. -
Use
kubectl diff -f <file.yaml>to see what changes will be applied. -
Use
kubectl get events --sort-by='.lastTimestamp'for chronological events. -
Use
kubectl logs -l <label-selector>to get logs from multiple pods -
Set
export KUBE_EDITOR=vimor nano for your favorite editor when using kubectl edit*
==== aliases
alias k='kubectl' alias kgp='kubectl get pods' alias kgs='kubectl get svc' alias kgd='kubectl get deployment' alias kdp='kubectl describe pod' alias kds='kubectl describe svc' alias kdd='kubectl describe deployment' alias kaf='kubectl apply -f' alias kdel='kubectl delete' alias klog='kubectl logs' alias kex='kubectl exec -it' alias kpf='kubectl port-forward'
== kubeval
podman run -it -v ${PWD}/k8s:/k8s docker.io/garethr/kubeval k8s/*
podman run -it -v ${PWD}/k8s:/k8s docker.io/garethr/kubeval --skip-kinds Kustomization -s https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master -v 1.25.11 k8s/*== ldapsearch :toc: :source-highlighter: rouge
ldapsearch -o ldif-wrap=no -LLL -h server.com -p 389/636 -x -D "binduser@server.com" -b "DC=server,DC=com" -w <PASS> -s sub "sAMAccountName=USERNAME"
# with base64 decode
-o ldif-wrap=no "$@" | perl -MMIME::Base64 -Mutf8 -pe 's/^([-a-zA-Z0-9;]+):(:\s+)(\S+)$/$1.$2.&decode_base64($3)/e'== liquidprompt
curl --remote-name -L https://github.com/liquidprompt/liquidprompt/releases/download/v2.1.2/liquidprompt-v2.1.2.tar.gz
tar -C ~/bin/ -xzf liquidprompt-v2.1.2.tar.gz# .config/liquidpromptrc
LP_TEMP=0
LP_ENABLE_KUBECONTEXT=1
LP_ENABLE_GIT=1
LP_ENABLE_CONTAINER=1.bashrc
# liquidprompt
[[ $- = *i* ]] && source ~/bin/liquidprompt/liquidprompt
export LP_PS1_POSTFIX='\n$ '== locate :toc: :source-highlighter: rouge
systemctl enable --now plocate-updatedb.timer== Grafana Loki :toc: :source-highlighter: rouge
=== API
k -n loki get ep
k -n loki get svc=== delete logs
GET /compactor/ring
POST /loki/api/v1/delete
PUT /loki/api/v1/delete== LVM :toc: :source-highlighter: rouge
crm cluster run 'echo 1 > /sys/block/sdb/device/rescan'
crm cluster run 'pvresize /dev/sdb'
crm cluster run 'lvextend -rl +100%FREE /dev/pgsql_data/postgres'
crm cluster run 'df -h /var/lib/pgsql'== markdown :toc: :source-highlighter: rouge
=== convert markdown to asciidoc
podman run --rm -v $PWD:/media docker.io/naszuf/kramdoc:1.0 -o /media/README.adoc /media/README.md== meld :toc: :source-highlighter: rouge
meld
meld FILE1
meld DIR1
meld FILE1 FILE2
meld FILE1 FILE2 FILE3
meld DIR1 DIR2
meld DIR1 DIR2 DIR3
meld --diff FILE1 FILE2 --diff FILE3 FILE4== netcat :toc: :source-highlighter: rouge
nc example.org 8080
nc -l 8080
nc --sh-exec "ncat example.org 80" -l 8080 --keep-open
nc --exec "/bin/bash" -l 8081 --keep-open
nc --exec "/bin/bash" --max-conns 3 --allow 192.168.0.0/24 -l 8081 --keep-open
nc --proxy socks4host --proxy-type socks4 --proxy-auth user smtphost 25
nc -l --proxy-type http localhost 8888
# send data (-z not send data)
netcat -v -w 3 192.168.45.166 1556== NFS :toc: :source-highlighter: rouge
=== list connected clients
grep '^name:' /proc/fs/nfsd/clients/*/info
# or
nfsdclnts --hostname | cut -d '|' -f5 | sort -u=== list opened files:
nfsdclnts --hostname== openssl :toc: :source-highlighter: rouge
=== pfx
openssl pkcs12 -in file.pfx -nocerts -out priv.key
openssl pkcs12 -in file.pfx -clcerts -nokeys -out srv.crt
openssl rsa -in priv.key -out priv-nopass.key=== show cert
echo |openssl s_client -connect sites.example.com:443 -servername site1.example.com -showcerts 2>/dev/null | sed --quiet '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout openssl s_client -connect sites.examle.com:443 2>/dev/null </dev/null| sed -n '/^-----BEGIN CERTIFICATE-----/,/^-----END CERTIFICATE-----/p' | openssl x509 -text -noout
== php :toc: :source-highlighter: rouge
<?
phpinfo();
?>
php -c /etc/php/apache2/php.ini -i
php -i== podman :toc: :source-highlighter: rouge
=== extract content from container
podman create --name tst registry.local/agent:f769de33
# list content
podman export tst| tar -t| less
# extract all content
podman export tst -o tst.tar
# extract 1 file
podman export tst| tar -t| grep kubernetes-rbac-agent
# determine file type
podman export tst1| tar --extract usr/bin/kubernetes-rbac-agent -O | file -
#
podman export tst1| tar --extract usr/bin/kubernetes-rbac-agent== ponymail-foal
# count msgs
curl -u user:password "https://mailarchive.home.lan/api/stats.lua?list=listname&domain=lists.home.lan" | jq -r ".active_months[]" | xargs| sed 's/ / + /g' | bc== postgresql :toc: :source-highlighter: rouge
=== manual archive cleanup
# to psql version 9.6
WAL_FILE=$(pg_controldata -D /var/lib/pgsql/data/ | grep "Latest checkpoint's REDO WAL file:" | cut -d: -f2)
WAL_CLEAN=$(ls -rt /var/lib/pgsql/data/pg_xlog/ | grep $WAL_FILE -B3 | head -1)
#dry-run
pg_archivecleanup -nd /var/lib/pgsql/data/pg_xlog/ $WAL_CLEAN
# from psql version 10
WAL_FILE=$(pg_controldata -D /var/lib/pgsql/data/ | grep "Latest checkpoint's REDO WAL file:" | cut -d: -f2)
WAL_CLEAN=$(ls -rt /var/lib/pgsql/data/pg_wal/ | grep $WAL_FILE -B3 | head -1)
# dry-run
pg_archivecleanup -nd /var/lib/pgsql/data/pg_wal/ $WAL_CLEAN=== pgbadger
log_min_duration_statement = 0 # pgbadger log_line_prefix = '%t [%p]: user=%u,db=%d,app=%a,client=%h ' # pgbadger log_checkpoints = on # pgbadger log_connections = on # pgbadger log_disconnections = on # pgbadger log_lock_waits = on # pgbadger log_temp_files = 0 # pgbadger log_autovacuum_min_duration = 0 # pgbadger log_error_verbosity = default # pgbadger lc_messages='en_US.UTF-8'0 # pgbadger
== promethues :toc: :source-highlighter: rouge
== ps :toc: :source-highlighter: rouge
ps -e
ps -ef
ps -eF
ps -ely
ps ax
ps axu
ps -ejH
ps axjf
ps -eLf
ps axms
ps -eo euser,ruser,suser,fuser,f,comm,label
ps axZ
ps -eM
ps -U root -u root u
ps -eo pid,tid,class,rtprio,ni,pri,psr,pcpu,stat,wchan:14,comm
ps axo stat,euid,ruid,tty,tpgid,sess,pgrp,ppid,pid,pcpu,comm
ps -Ao pid,tt,user,fname,tmout,f,wchan
ps -C syslogd -o pid=
ps -p 42 -o comm=== RabbitMQ
# test RabbitMQ AMQ Protocol
printf "HELO\n\n\n\n" | netcat pm-rabbitmq.hpo.hu 30100; echo== rancher
=== update
helm repo list
helm list -n cattle-system # show installed version
helm repo update
helm search repo rancher-stable # list versions in repo
helm get values rancher -n cattle-system -o yaml > rancher-values.yaml # export current value
helm upgrade rancher rancher-<chartrepo>/rancher -n cattle-system -f rancher-values.yaml --version=VERSION
vi rancher-values.yaml # change image tag=== reset admin password
kubectl -n cattle-system exec $(kubectl -n cattle-system get pods | grep ^rancher | head -n 1 | awk '{ print $1 }') reset-password=== delete rancher-agent
k get namespaces
NAME STATUS AGE
cattle-system Terminating 209d
kubectl get customresourcedefinitions |grep management.cattle.io
apiservices.management.cattle.io 2023-12-08T18:45:58Z
authconfigs.management.cattle.io 2023-12-08T18:46:04Z
clusterregistrationtokens.management.cattle.io 2023-12-08T18:45:58Z
clusters.management.cattle.io 2023-12-08T18:45:58Z
features.management.cattle.io 2023-12-08T18:45:54Z
groupmembers.management.cattle.io 2023-12-08T18:46:04Z
groups.management.cattle.io 2023-12-08T18:46:04Z
podsecurityadmissionconfigurationtemplates.management.cattle.io 2023-12-08T18:45:58Z
preferences.management.cattle.io 2023-12-08T18:45:59Z
settings.management.cattle.io 2023-12-08T18:45:58Z
tokens.management.cattle.io 2023-12-08T18:46:04Z
userattributes.management.cattle.io 2023-12-08T18:46:04Z
users.management.cattle.io 2023-12-08T18:46:04Z
kubectl get customresourcedefinitions |grep management.cattle.io |awk '{print $1}' | xargs kubectl delete customresourcedefinitions
kubectl get namespaces --field-selector status.phase=Terminating -o name | xargs -i kubectl patch {} -p '{"metadata":{"finalizers":[]}}' --type merge
Error from server (InternalError): Internal error occurred: failed calling webhook "rancher.cattle.io.namespaces": failed to call webhook: Post "https://rancher-webhook.cattle-system.svc:443/v1/webhook/validation/namespaces?timeout=10s": service "rancher-webhook" not found
Error from server (InternalError): Internal error occurred: failed calling webhook "rancher.cattle.io.namespaces": failed to call webhook: Post "https://rancher-webhook.cattle-system.svc:443/v1/webhook/validation/namespaces?timeout=10s": service "rancher-webhook" not found
kubectl create ns cattle-system
Error from server (InternalError): Internal error occurred: failed calling webhook "rancher.cattle.io.namespaces.create-non-kubesystem": failed to call webhook: Post "https://rancher-webhook.cattle-system.svc:443/v1/webhook/validation/namespaces?timeout=10s": service "rancher-webhook" not found
k get validatingwebhookconfigurations.admissionregistration.k8s.io | grep rancher.cattle.io
NAME WEBHOOKS AGE
rancher.cattle.io 7 208d
kubectl delete validatingwebhookconfigurations rancher.cattle.io
validatingwebhookconfiguration.admissionregistration.k8s.io "rancher.cattle.io" deleted
kubectl create ns cattle-system
Error from server (AlreadyExists): object is being deleted: namespaces "cattle-system" already exists
kubectl get namespaces --field-selector status.phase=Terminating -o name | xargs -i kubectl patch {} -p '{"metadata":{"finalizers":[]}}' --type mergenamespace/baikal patched
namespace/cattle-system patchedkubectl get customresourcedefinitions |grep management.cattle.io |awk '{print $1}' | xargs kubectl delete customresourcedefinitions
=== enable embedded registry mirror
rancher/cluster managment/select cluster/edit yaml
spec:
rkeConfig:
machineGlobalConfig:
embedded-registry: true=== Rancher managed RKE2 clusters stuck in "Waiting for probes: kube-controller-manager, kube-scheduler"
(
curl --cacert /var/lib/rancher/rke2/server/tls/kube-controller-manager/kube-controller-manager.crt \
https://127.0.0.1:10257/healthz >/dev/null 2>&1 \
&& echo "[OK] Kube Controller probe" \
|| echo "[FAIL] Kube Controller probe";
curl --cacert /var/lib/rancher/rke2/server/tls/kube-scheduler/kube-scheduler.crt \
https://127.0.0.1:10259/healthz >/dev/null 2>&1 \
&& echo "[OK] Scheduler probe" \
|| echo "[FAIL] Scheduler probe";
)
[FAIL] Kube Controller probe
[FAIL] Scheduler probe
echo "Rotating kube-controller-manager certificate"
rm /var/lib/rancher/rke2/server/tls/kube-controller-manager/kube-controller-manager.{crt,key}
crictl rm -f $(crictl ps -q --name kube-controller-manager)
echo "Rotating kube-scheduler certificate"
rm /var/lib/rancher/rke2/server/tls/kube-scheduler/kube-scheduler.{crt,key}
crictl rm -f $(crictl ps -q --name kube-scheduler)
(
curl --cacert /var/lib/rancher/rke2/server/tls/kube-controller-manager/kube-controller-manager.crt \
https://127.0.0.1:10257/healthz >/dev/null 2>&1 \
&& echo "[OK] Kube Controller probe" \
|| echo "[FAIL] Kube Controller probe";
curl --cacert /var/lib/rancher/rke2/server/tls/kube-scheduler/kube-scheduler.crt \
https://127.0.0.1:10259/healthz >/dev/null 2>&1 \
&& echo "[OK] Scheduler probe" \
|| echo "[FAIL] Scheduler probe";
)
[OK] Kube Controller probe
[OK] Scheduler probe=== get kubeconfig for all downstream clusters
-
login rancher.example.com
-
profile (on the right side)
-
Accont & API Keys
-
Create API Key
-
Scope: No Scope
mkdir -p ~/.kube/conf.d/rancher.example.com
export TOKEN=<TOKEN>
# with socks proxy
for CLUSTER in $(curl -k -x socks5h://127.111.30.111:1080 -X GET -LH "Authorization: Bearer ${TOKEN}" https://rancher.example.com/v3/clusters | jq -r '.data[].id') ; do curl -k -x socks5h://127.111.30.111:1080 -X POST -LH "Authorization: Bearer ${TOKEN}" https://rancher.example.com/v3/clusters/$CLUSTER?action=generateKubeconfig| jq -r ".config" | grep -v 'current-context:' |sed '0,/ server:/s// proxy-url: socks5:\/\/127.111.30.111:1080\n&/' > ~/.kube/conf.d/rancher.example.com/$CLUSTER.yaml; done
# without socks proxy
for CLUSTER in $(curl -k -X GET -LH "Authorization: Bearer ${TOKEN}" https://rancher.example.com/v3/clusters | jq -r '.data[].id') ; do curl -k -X POST -LH "Authorization: Bearer ${TOKEN}" https://rancher.example.com/v3/clusters/$CLUSTER?action=generateKubeconfig| jq -r ".config" | grep -v 'current-context:' > ~/.kube/conf.d/rancher.example.com/$CLUSTER.yaml; done
UMASK=0600 KUBECONFIG=$(find ~/.kube/conf.d/rancher.example.com/ -maxdepth 1 -name *.yaml -type f -printf "%p:" | sed 's/:$//g') kubectl config view --flatten > ~/.kube/conf.d/rancher.example.com/config
export KUBECONFIG=~/.kube/conf.d/rancher.example.com/
kubectl config get-contexts
kubectl config use-context <CLUSTER>=== remove heritage object of deleted nodes from rancher deployed cluster
$ kubectl get nodes.management.cattle.io -A --show-labels
NAMESPACE NAME AGE LABELS
c-m-dsfbj2ln machine-2wfk8 223d cattle.io/creator=norman,management.cattle.io/nodename=test-kiafmaster03
c-m-dsfbj2ln machine-7pc62 168d cattle.io/creator=norman,management.cattle.io/nodename=test-kiafworker03
c-m-dsfbj2ln machine-k4fjs 223d cattle.io/creator=norman,management.cattle.io/nodename=test-kiafmaster05
c-m-dsfbj2ln machine-n6c9c 223d cattle.io/creator=norman,management.cattle.io/nodename=test-kiafworker01
c-m-dsfbj2ln machine-w7d57 223d cattle.io/creator=norman,management.cattle.io/nodename=test-kiafmaster01
c-m-dsfbj2ln machine-zll2k 168d cattle.io/creator=norman,management.cattle.io/nodename=test-kiafworker05
c-m-fkk65d24 machine-25drx 68d cattle.io/creator=norman,management.cattle.io/nodename=int-kiafworker09
c-m-fkk65d24 machine-cb8nh 68d cattle.io/creator=norman,management.cattle.io/nodename=int-kiafworker11
$ kubectl get nodes.management.cattle.io -A --show-labels | grep -E 'c-m-dsfbj2ln|test'
c-m-dsfbj2ln machine-2wfk8 223d cattle.io/creator=norman,management.cattle.io/nodename=test-kiafmaster03
c-m-dsfbj2ln machine-7pc62 168d cattle.io/creator=norman,management.cattle.io/nodename=test-kiafworker03
c-m-dsfbj2ln machine-k4fjs 223d cattle.io/creator=norman,management.cattle.io/nodename=test-kiafmaster05
c-m-dsfbj2ln machine-n6c9c 223d cattle.io/creator=norman,management.cattle.io/nodename=test-kiafworker01
c-m-dsfbj2ln machine-w7d57 223d cattle.io/creator=norman,management.cattle.io/nodename=test-kiafmaster01
c-m-dsfbj2ln machine-zll2k 168d cattle.io/creator=norman,management.cattle.io/nodename=test-kiafworker05
$ kubectl -n c-m-t2rpkhbg get nodes.management.cattle.io -o json | jq -r '.items[] | [.metadata.name, (.spec.nodeName // ""), (.status.nodeIP // ""), ((.status.conditions[]? | select(.type=="Ready") | .status) // "n/a"), (.status.state // "n/a")] | @tsv'
machine-4qw7b True n/a
machine-hf2kh True n/a
machine-kc2xt True n/a
machine-vkkht True n/a
machine-x5zvg True n/a
machine-xlw5v True n/a
m-1e132d45b796 n/a n/a <==
m-50b9ad96996e n/a n/a <==
m-cb41c5436278 n/a n/a <==
kubectl -n c-m-t2rpkhbg delete nodes.management.cattle.io m-1e132d45b796 m-50b9ad96996e m-cb41c5436278== regexp :toc: :source-highlighter: rouge
add questionmark around words
one line: 1 two line: 2
vcode: (^.*:) "$1"
"one line:" 1 "two line:" 2
== RKE2
=== RKE2 commands
=== Manual upgrade
curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL=v1.28 sh -
rke2-killall.sh
systemctl status rke2-server.service
systemctl stop rke2-server.service
systemctl start rke2-server.service
journalctl -fu rke2-server.service=== automated upgrade
kubectl apply -f https://github.com/rancher/system-upgrade-controller/releases/download/v0.13.1/system-upgrade-controller.yamlcreate upgrade plan
# Server plan
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: server-plan
namespace: system-upgrade
labels:
rke2-upgrade: server
spec:
concurrency: 1
nodeSelector:
matchExpressions:
- {key: rke2-upgrade, operator: Exists}
- {key: rke2-upgrade, operator: NotIn, values: ["disabled", "false"]}
# When using k8s version 1.19 or older, swap control-plane with master
- {key: node-role.kubernetes.io/control-plane, operator: In, values: ["true"]}
tolerations:
- key: "CriticalAddonsOnly"
operator: "Equal"
value: "true"
effect: "NoExecute"
serviceAccountName: system-upgrade
cordon: true
# drain:
# force: true
upgrade:
image: rancher/rke2-upgrade
version: v1.23.1-rke2r2
---
# Agent plan
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: agent-plan
namespace: system-upgrade
labels:
rke2-upgrade: agent
spec:
concurrency: 2
nodeSelector:
matchExpressions:
- {key: rke2-upgrade, operator: Exists}
- {key: rke2-upgrade, operator: NotIn, values: ["disabled", "false"]}
# When using k8s version 1.19 or older, swap control-plane with master
- {key: node-role.kubernetes.io/control-plane, operator: NotIn, values: ["true"]}
prepare:
args:
- prepare
- server-plan
image: rancher/rke2-upgrade
serviceAccountName: system-upgrade
cordon: true
drain:
force: true
upgrade:
image: rancher/rke2-upgrade
version: v1.23.1-rke2r2# list channels
curl -sL https://update.rke2.io/v1-release/channels | jq -r '.data[].id'
# latest release in specific channel
rke2_ver=$(curl -sL https://update.rke2.io/v1-release/channels| jq -r '.data[]|select(.id=="v1.26").latest')
sed -i "s/version:.*/version: $rke2_ver/g" rke2-upgrade.yaml
kubectl -n system-upgrade apply -f rke2-upgrade.yaml
for node in $(kubectl get node -o name | awk -F '/' '{print $2}'); do kubectl label node ${node} rke2-upgrade=true --overwrite ; done
kubectl -n system-upgrade events -w
kubectl -n system-upgrade get plans -o yaml
kubectl -n system-upgrade get plans
kubectl -n system-upgrade get plans --selector='upgrade.cattle.io/plan=<PLAN>'
kubectl -n system-upgrade logs <PODNAME>
kubectl -n system-upgrade get jobs -o yaml=== uninstall
rke2-uninstall.sh reboot rm -rf /etc/ceph /etc/cni /etc/kubernetes /etc/rancher /opt/cni /opt/rke /run/secrets/kubernetes.io /run/calico /run/flannel /var/lib/calico /var/lib/etcd /var/lib/cni /var/lib/kubelet /var/lib/rancher/var/log/containers /var/log/kube-audit /var/log/pods /var/run/calico
=== registry.yaml
mirrors:
'*':
docker.io:
endpoint:
- "mirror.gcr.io"
== rmt :toc: :source-highlighter: rouge
zypper in -y suseconnect-ng awk
curl -kL --remote-name https://rmt-server.example.com/tools/rmt-client-setup
echo -e "y\ny\n"| sh rmt-client-setup https://rmt-server.example.comrmt-cli repos list --all | grep 15 |grep -e SP5 -e SP6 | grep -i -e legacy
rmt-cli repos enable
rmt-cli repos enable ID1 ID2
for i in ID1 ID2 ; do rmt-cli mirror repository $i ; done== rocketchat :toc: :source-highlighter: rouge
Ctrl+Shift+I
Console:
document.querySelectorAll('.rcx-banner').forEach(el => el.style.display = 'none');== rsyslog
# /etc/systemd/journald.conf
# log messages received by the journal daemon shall be forwarded to a traditional syslog daemon
# https://www.freedesktop.org/software/systemd/man/journald.conf.html#ForwardToSyslog=
ForwardToSyslog=yes== salt
salt '*' cmd.run 'ls -l | grep foo'=== apply highsate
salt --state-output=mixed rancher[1,2,3] state.highstate=== list, apply state
salt srv1 state.show_states
salt --state-output=mixed srv1 state.sls statename=== call single state of sls file
bar:
file.managed:
- source: salt://some/filesalt '*' state.sls_id bar foo== SBOM :toc: :source-highlighter: rouge
cat << EOF > /tmp/suse.pem
-----BEGIN PUBLIC KEY-----
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxfZssLE2jeY1swPb5WGe
8C/FWKmIxlGLm9amCNdgheAn8RzuM8slA+TJefAQnrUnC4Qn9ykjQZjH6o2e2ueA
KFdgOdHnlS2d6lETB8dd4O8HYDJx0CEk2SCbAKVuzLbcbP4ug/QDc+Bm8ldxfc+D
GnLVRAt85brSTnfgOHY1PbQ1JAV+ByibbjCZuFmw4gIkMzeiy3M4wJZwblFM4a3s
X2bW/6GWaGz6AMOjCyAPI6shyG5wHZM7OvJJ8lfhXRTZo4Cc5qC0Nyq9Xu3O6DmV
opIajhHc36kdcetmd7TB5OSbQZCLyReAF75LV74y8960+44NptR62hdw1ovCJMfV
mU6m+k/MsN8AIyRFR6dNF9wTOKi67OpPtybiRufCfMvD4VEeoINzEJytToq2XGSc
+hIxtmPOhqDKHH0As113sjTqqo20Ik233x9FFeTFD8Or7ahpqjiv5YCufk9AoQbC
xMIjrK9RkQYgW4RycgvXGASobwN8EE+OsMcyMUER/pdCtQhTQCc1jYLt85VhfEkC
4k9szMB8eZrdV9re/Ku6vnCeXRR5yn2NWKO88U4HfxEpJv5s2uFJi37+x/v9w7Uh
+864W/9NexXg/JFNsvh0Kmxsbi3ZegaouLyrMCHwSA3ByBZ2yCf2VuFPyUCNEZOH
Owi0oc9TgY1yopjsTneyGaMCAwEAAQ==
-----END PUBLIC KEY-----
podman run --rm gcr.io/go-containerregistry/crane digest --platform linux/amd64 --full-ref registry.suse.com/bci/openjdk:21
registry.suse.com/suse/sle15@sha256:fb0f9b6d3c5836af482ab2fddc5f69206ebf4b9189e8c6e1d4ab8efb944c8ddf
podman run --rm -v /tmp/suse.pem:/tmp/suse.pem docker.io/bitnami/cosign verify-attestation \
--key=/tmp/suse.pem --type spdxjson \
registry.suse.com/suse/sle15@sha256:fb0f9b6d3c5836af482ab2fddc5f69206ebf4b9189e8c6e1d4ab8efb944c8ddf \
| jq '.payload' | tr -d '"'| base64 -d | jq '.predicate'== screen :toc: :source-highlighter: rouge
caption always # activates window caption
caption string '%{= wk}[ %{k}%H %{k}][%= %{= wk}%?%-Lw%?%{r}(%{r}%n*%f%t%?(%u)%?%{r})%{k}%?%+Lw%?%?%= %{k}][%{b} %Y-%m-%d %{k}%c %{k}]'# Set screen window title
case "$TERM" in
screen)
PROMPT_COMMAND='echo -ne "\033k$HOSTNAME\033\\"'
;;
esac
screen.xterm-256color)
PROMPT_COMMAND='echo -ne "\033k$HOSTNAME\033\\"'
;;
#== sed :toc: :source-highlighter: rouge
sed -ibak 's/On/Off/' php.ini # Backup and modify input file directly
sed -n '1,4 p' input.txt # Print lines 1-4
sed -n -e '1,4 p' -e '6,7 p' input.txt # Print lines 1-4 and 6-7
sed '1,4 d' input.txt # Print lines except 1-4
sed '2 a new-line' input.txt # Append line after
sed '2 i new-line' input.txt # Insert line before== ssh
=== ssh-copy-id
for i in srv1 srv2 srv3 ; do sshpass -p server_password ssh-copy-id -oStrictHostKeyChecking=no root@$i -p 22; done== Ubuntu :toc: :source-highlighter: rouge
=== repo size
source ../scripts/ubuntu-repo-size.sh
ubuntu-repo-size -s noble,jammy,focal,bionic -a amd64 > result.txt
cat result.txt | sed 's/noble/24.04(noble)/I' | sed 's/jammy/22.04(jammy)/I' | sed 's/focal/20.04(focal)/I' | sed 's/bionic/18.04(bionic)/I'=== repos description
-
Main - Canonical-supported free and open-source software.
-
Universe - Community-maintained free and open-source software.
-
Restricted - Proprietary drivers for devices.
-
Multiverse - Software restricted by copyright or legal issues.
| Free software | Non-free software | |
|---|---|---|
Supported |
Main |
Restricted |
Unsupported |
Universe |
Multiverse |
== Vagrant :toc: :source-highlighter: rouge
if Vagrant.has_plugin?("vagrant-vbguest") then
config.vbguest.auto_update = false
end=== remove all vm
for i in $(vagrant global-status | grep virtualbox | awk '{print $5}') ; do cd $i ; vagrant destroy -f ; done:g/^$/d # delete empty/blank lines== vimdiff
ctrl + w # switch between windows
do # diff obtain, Pull the changes to the current file.
dp # diff push, Push the changes to the current file.
:diffupdate # rescan diff== vsphere
govc vm.change -vm /Datacenter/vm/srv1 -e="disk.enableUUID=1"
govc vm.option.info -vm /Datacenter/vm/srv1 -json | jq -r '.GuestOSDescriptor.[].DiskUuidEnabled'== Windows :toc: :source-highlighter: rouge
=== Make bootable USB device from installer iso
zypper in WoeUSB
woeusb --workaround-bios-boot-flag --device Downloads/ISOs/Win11_25H2_Hungarian_x64.iso /dev/sdX== xz
xz foo
xz -dk bar.xz
xz -dcf a.txt b.txt.xz c.txt d.txt.lzma > abcd.txt
xz --robot --list *.xz | awk '/^totals/{print $5-$4}'
xz --lzma2=preset=1,dict=32MiB foo.tar
xz -vv --lzma2=dict=192MiB big_foo.tar== yamllint
=== Gitlab CI :toc: :source-highlighter: rouge
cat << EOF > .gitlab-ci.yml
stages:
- linting
yamllint:
stage: linting
image: registry.gitlab.com/pipeline-components/yamllint:latest
script:
- yamllint --no-warnings -f colored -c ./.yamllint-rules .
EOF
cat << EOF > ./yamllint-rules
extends: default
rules:
# 80 chars should be enough, but don't fail if a line is longer
line-length:
max: 80
level: warning
# empty lines
empty-lines:
max: 2
max-start: 0
max-end: 1
EOF== zypper
# remove all package that installed by pattern
zypper rm $(zypper info --type pattern PATTERN_NAME |grep ' | package | ' |awk -F'|' '{print $2}' |xargs)
# remove pkgs installed by pattern
zypper info --requires PATTERN_NAME | grep '| package |' | awk '{print $3}' | xargs zypper rm