You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

til.md 10KB

til

javascript

Write code like it’s synchrone

let get = async URL => {
  const retval = await fetch(URL);
  if (retval.ok) {
    this.playlists = await retval.json();
  } else {
    console.error("doh! network error");
  }
};
get();

Or use promise chaining

fetch(URL)
  .then(stream => stream.json())
  .then(data => (this.playlists = data))
  .catch(error => console.error(error));

bash

auto-reply y on installations or fsck repairs

yes | pacman -S <something>

convert all svg files from current path into pngs

find . -name *.svg -exec mogrify -format png {} +

network diff

FILE=/tmp/a
diff -ru <(sort -u "$FILE") <(ssh user@host "sort -u $FILE")

ffmpeg

extract audio-only from video file with ID3 tags

ffmpeg -i <input video>  -metadata title="Title" -metadata artist="Artist" -ab 256k file.mp3

record screen

ffmpeg -f x11grab  -s 1366x768 -i :0.0 -r 25 -threads 2 -c:v libx264 -crf 0 -preset ultrafast  output.mkv

html

<style type="text/css">
  table {
    page-break-inside: auto;
  }
  tr {
    page-break-inside: avoid;
    page-break-after: auto;
  }
  thead {
    display: table-header-group;
  }
  tfoot {
    display: table-footer-group;
  }
</style>

iptables

drop all but accept from one ip

iptables -A INPUT -p tcp --dport 8000 -s 1.2.3.4 -j ACCEPT
iptables -A INPUT -p tcp --dport 8000 -j DROP

drop all incomming ssh connections

iptables -A INPUT -i eth0 -p tcp --dport 22 -m state --state NEW,ESTABLISHED -j DROP

docker

Delete all containers

  • docker rm $(docker ps -a -q)

Delete all images

  • docker rmi $(docker images -q)

Delete all dangling images

  • docker rmi $(docker images -f dangling=true -q)

Create docker network

  • docker network create mynet

clean up

  • docker system prune
  • docker volume rm $(docker volume ls -q --filter dangling=true)

run two docker img container with different names foo and bar and the can reach each other with domain name foo and bar

  • docker run --name foo --net mynet img
  • docker run --name bar --net mynet img

copy files from an image to /tmp/some.file

docker cp $(docker create my/image:latest):/etc/some.file /tmp/some.file

git

Set git to use the credential memory cache

git config --global credential.helper cache

Set the cache to timeout after 1 hour (setting is in seconds)

git config --global credential.helper 'cache --timeout=3600'

Set default editor

git config --global core.editor "vim"

create a patch from a modified file

git diff <modified file>  > this.patch

apply a diff patch

git apply this.patch

checkout a pull request

add fetch = +refs/pull/*/head:refs/remotes/origin/pr/* to .git/config under the [remote "origin"] section.
do git fetch origin. No you can git checkout pr/999.

youtube-dl

search and download first match

youtube-dl ytsearch:damniam

set auto id3 tags

youtube-dl --prefer-ffmpeg --embed-thumbnail --add-metadata --metadata-from-title "%(artist)s - %(title)s" --audio-quality 0 --audio-format mp3 --extract-audio https://www.youtube.com/watch?v=mvK_5nNPKr8

retropie

for rom converting

community/ecm-tools 1.03-1 [Installiert]
    Error Code Modeler

ecm2bin rom.img.ecm

ufw

list all rules

  • ufw status

disable/enable firewall

  • ufw enable
  • ufw disable

systemd

systemctl list-unit-files

vs code

pipe into vs-code

ps fax | grep code | code-oss # for using open source version of vs code

compare open file to clipboard

workbench.files.action.compareWithClipboard).

Nodejs

sleep()

function sleep(ms) {
    return new Promise(resolve => setTimeout(resolve, ms));
}


sleep(1000).then(() => console.log("after 1.5 seconds))

async function main() {
    await sleep(20000)
    console.log("do something after 20 seconds")
}

main()

nginx

proxy an api service and add cors headers and basic auth

load_module modules/ngx_http_headers_more_filter_module.so;

events {
   worker_connections 1024;

}

http {
  server {
    listen 80;
    server_name 127.0.0.1;


    location / {
        proxy_redirect off;
        proxy_set_header Host $host;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header Authorization "Basic ZW5lcmdpY29zOmVuZXJnaWNvcw==";

        set $cors "1";

        if ($request_method = 'OPTIONS') {
            set $cors "${cors}o";
        }

        if ($cors = "1") {
            more_set_headers 'Access-Control-Allow-Origin: $http_origin';
            more_set_headers 'Access-Control-Allow-Credentials: true';
        }

        if ($cors = "1o") {
            more_set_headers 'Access-Control-Allow-Origin: $http_origin';
            more_set_headers 'Access-Control-Allow-Methods: GET, POST, OPTIONS, PUT, DELETE';
            more_set_headers 'Access-Control-Allow-Credentials: true';
            more_set_headers 'Access-Control-Allow-Headers: Origin,Content-Type,Accept';
            add_header Content-Length 0;
            add_header Content-Type text/plain;
            return 204;
        }

        proxy_pass      https://some.url;
    }
  }
}

A Dockerfile would look like this

FROM alpine:3.7

RUN apk --update --no-cache add nginx nginx-mod-http-headers-more

COPY nginx.conf /etc/nginx/nginx.conf
RUN mkdir /run/nginx

EXPOSE 80
CMD nginx -g 'daemon off;'

nginx dynamic reverse proxy for docker swarm mode

events {
        worker_connections 1024;
}

http {
    server {
        resolver 127.0.0.11;
        location ~ /(.*) {
            proxy_redirect off;
            proxy_set_header Host $host;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

            set $upstream $1;

            proxy_pass http://$upstream;
        }
    }
}

knex

insert on duplicated for MariaDB/MySQL

async function insert_on_duplicated(table, data) {
  let insert = knex(table)
    .insert(data)
    .toString();
  let update = knex(table)
    .update(data)
    .toString()
    .replace(/^update .* set /i, "");
  return await knex.raw(`${insert} on duplicate key update ${update}`);
}

k8s

list secrets

  • kubectl -n kube-system get secret

https://blog.alexellis.io/kubernetes-in-10-minutes/

vue

vuejs without shitty webpack

npm install -D  @vue/cli
npx vue init simple web

do spaces with goofy

place key and sected into .aws/credentials

[default]
aws_access_key_id = ...
aws_secret_access_key = ...

and do

./goofys --endpoint ams3.digitaloceanspaces.com melmac /home/ec2-user/t/

python

Python json dump with datetime. every time JSON doesn’t know how to convert a value, it calls the default() function.

def dtconverter(o):
    if isinstance(o, datetime.datetime):
        return o.__str__()

print(json.dumps(my_py_dict_var, default = dtconverter))

ML

Neural networks are universal approximators - meaning that for any function F and error E, there exists some neural network (needing only a single hidden layer) that can approximate F with error less than E.

Normalisation is required so that all the inputs are at a comparable range.

With two inputs (x1 and x2), where x1 values are from range 0 to 0.5 and x2 values are from range to 0 to 1000. When x1 is changing by 0.5, the change is 100%, and a change of x2 by 0.5 is only 0.05%.

puppet

  • list all nodes

puppet cert list --all

  • remove a node

puppet cert clean

  • add / accept a node

puppet cert sign

MySQL / MariaDB

dump a live system without blocking

  • for MyISAM
    nice -n 19 ionice -c2 -n 7 mysqldump --lock-tables=false <dbname> > dump.sql

  • for InnoDB
    nice -n 19 ionice -c2 -n 7 mysqldump --single-transaction=TRUE <dbname> > dump.sql

  • allow user to create databases with prefix

GRANT ALL PRIVILEGES ON  `dev\_%` . * TO  'dev'@'%';`

NetBSD

PATH="/usr/pkg/sbin:$PATH"
PKG_PATH="ftp://ftp.NetBSD.org/pub/pkgsrc/packages/NetBSD/amd64/8.0_current/All/"
export PATH PKG_PATH
pkg_add bash nano

NetBSD luactl

modload lua
luactl create mycpu
luactl load mycpu ./cpu.lua

print to /var/log/messages using systm module

cat hw.lua
    systm.print("hello kernel!\n")

modload luasystm
modstat |grep lua
luactl require helo systm
luactl load helo ./hw.lua
cat /var/log/messages
    ...
    Oct  9 09:37:29 localhost /netbsd: hello kernel!

ssh

create pub key from private

ssh-keygen -y -f my.key > my.pub

create pem from public

ssh-keygen -f ~/.ssh/my.pub -e -m PKCS8 > my.pem

encrypt message with pem

echo "some secret" |openssl rsautl -encrypt -pubin -inkey my.pem -ssl
echo "some secret" |openssl rsautl -encrypt -pubin -inkey my.pem -ssl > encrypted_message
echo "some secret" |openssl rsautl -encrypt -pubin -inkey my.pem -ssl -out encrypted_message

decrypt message with private

openssl rsautl -decrypt -inkey ~/.ssh/my.key -in encrypted_message

nextcloud

call cron.php from nextcloud which is running in a docker container

docker exec -u www-data $(docker ps  --filter "Name=nextcloud" --format "{{.Names}}")  php cron.php

mount with davfs2

sudo mount -t davfs https://nextcloud/remote.php/webdav /mount/point

xfs

increase filesystem

xfs_growfs /mount/point/

yubikey

In general you got _ 2 Slots _ 32 oath credentials storage

oath-hotp 2nd slot for ssh auth

  1. generate secret
    dd if=/dev/random bs=1k count=1 | sha1sum
  2. flash yubi key slot 2 with generated secret
    ykpersonalize -2 -o oath-hotp -o oath-hotp8 -o append-cr -a <SECRET>

oath totp for aws

  • set
    ykman oath add -t aws-username <YOUR_BASE_32_KEY>
  • get
    ykman oath code bergholm -s
  • list
    ykman oath list

ansible

run tests in docker container

bin/ansible-test units -v --python 3.7 --docker default

proxy sql

change admin credentials

variables with an admin- prefix are ADMIN variables, and you should use these commands:

UPDATE global_variables SET variable_value='admin:N3wP4ssw3rd!' WHERE variable_name='admin-admin_credentials';
SAVE MYSQL VARIABLES TO DISK;
LOAD MYSQL VARIABLES TO RUNTIME;

btrfs

on small devices

sudo mkfs.btrfs --mixed -f /dev/nvme1n1

mount with zstd compression

- name: mount with zstd compression
    mount:
    path: /mnt/
    src: /dev/nvme1n1
    fstype: btrfs
    opts: compress=zstd,discard,nofail,defaults
    state: present