You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

9.8 KiB



Write code like it's synchrone

let get = async (URL) => {
    const retval = await fetch(URL)
    if (retval.ok) {
        this.playlists = await retval.json()
    } else {
        console.error("doh! network error")

Or use promise chaining

    .then(stream => stream.json())
    .then(data => this.playlists = data)
    .catch(error => console.error(error))


auto-reply y on installations or fsck repairs

yes | pacman -S <something>

convert all svg files from current path into pngs

find . -name *.svg -exec mogrify -format png {} +

network diff

diff -ru <(sort -u "$FILE") <(ssh user@host "sort -u $FILE")


extract audio-only from video file with ID3 tags

ffmpeg -i <input video>  -metadata title="Title" -metadata artist="Artist" -ab 256k file.mp3

record screen

ffmpeg -f x11grab  -s 1366x768 -i :0.0 -r 25 -threads 2 -c:v libx264 -crf 0 -preset ultrafast  output.mkv


<style type="text/css">
    table { page-break-inside:auto }
    tr    { page-break-inside:avoid; page-break-after:auto }
    thead { display:table-header-group }
    tfoot { display:table-footer-group }


drop all but accept from one ip

iptables -A INPUT -p tcp --dport 8000 -s -j ACCEPT
iptables -A INPUT -p tcp --dport 8000 -j DROP

drop all incomming ssh connections

iptables -A INPUT -i eth0 -p tcp --dport 22 -m state --state NEW,ESTABLISHED -j DROP


Delete all containers

  • docker rm $(docker ps -a -q)

Delete all images

  • docker rmi $(docker images -q)

Delete all dangling images

  • docker rmi $(docker images -f dangling=true -q)

Create docker network

  • docker network create mynet

clean up

  • docker system prune
  • docker volume rm $(docker volume ls -q --filter dangling=true)

run two docker img container with different names foo and bar and the can reach each other with domain name foo and bar

  • docker run --name foo --net mynet img
  • docker run --name bar --net mynet img

copy files from an image to /tmp/some.file

docker cp $(docker create my/image:latest):/etc/some.file /tmp/some.file


Set git to use the credential memory cache

git config --global credential.helper cache

Set the cache to timeout after 1 hour (setting is in seconds)

git config --global credential.helper 'cache --timeout=3600'

Set default editor

git config --global core.editor "vim"

create a patch from a modified file

git diff <modified file>  > this.patch

apply a diff patch

git apply this.patch

checkout a pull request

add fetch = +refs/pull/*/head:refs/remotes/origin/pr/* to .git/config under the [remote "origin"] section.
do git fetch origin. No you can git checkout pr/999.


search and download first match

youtube-dl ytsearch:damniam

set auto id3 tags

youtube-dl --prefer-ffmpeg --embed-thumbnail --add-metadata --metadata-from-title "%(artist)s - %(title)s" --audio-quality 0 --audio-format mp3 --extract-audio


for rom converting

community/ecm-tools 1.03-1 [Installiert]
    Error Code Modeler

ecm2bin rom.img.ecm


list all rules

  • ufw status

disable/enable firewall

  • ufw enable
  • ufw disable


systemctl list-unit-files

vs code

pipe into vs-code

ps fax | grep code | code-oss # for using open source version of vs code

compare open file to clipboard




function sleep(ms) {
    return new Promise(resolve => setTimeout(resolve, ms));

sleep(1000).then(() => console.log("after 1.5 seconds))

async function main() {
    await sleep(20000)
    console.log("do something after 20 seconds")



proxy an api service and add cors headers and basic auth

load_module modules/;

events {
   worker_connections 1024;


http {
  server {
    listen 80;

    location / {
        proxy_redirect off;
        proxy_set_header Host $host;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header Authorization "Basic ZW5lcmdpY29zOmVuZXJnaWNvcw==";

        set $cors "1";
        if ($request_method = 'OPTIONS') {
            set $cors "${cors}o";
        if ($cors = "1") {
            more_set_headers 'Access-Control-Allow-Origin: $http_origin';
            more_set_headers 'Access-Control-Allow-Credentials: true';
        if ($cors = "1o") {
            more_set_headers 'Access-Control-Allow-Origin: $http_origin';
            more_set_headers 'Access-Control-Allow-Methods: GET, POST, OPTIONS, PUT, DELETE';
            more_set_headers 'Access-Control-Allow-Credentials: true';
            more_set_headers 'Access-Control-Allow-Headers: Origin,Content-Type,Accept';
            add_header Content-Length 0;
            add_header Content-Type text/plain;
            return 204;
        proxy_pass      https://some.url;

A Dockerfile would look like this

FROM alpine:3.7

RUN apk --update --no-cache add nginx nginx-mod-http-headers-more

COPY nginx.conf /etc/nginx/nginx.conf
RUN mkdir /run/nginx

CMD nginx -g 'daemon off;'

nginx dynamic reverse proxy for docker swarm mode

events {
        worker_connections 1024;

http {
    server {
        location ~ /(.*) {
            proxy_redirect off;
            proxy_set_header Host $host;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

            set $upstream $1;

            proxy_pass http://$upstream;


insert on duplicated for MariaDB/MySQL

async function insert_on_duplicated(table, data){
	let insert = knex(table).insert(data).toString();
	let update = knex(table).update(data).toString().replace(/^update .* set /i, '');
	return await knex.raw(`${insert} on duplicate key update ${update}`);


list secrets

  • kubectl -n kube-system get secret


vuejs without shitty webpack

npm install -D  @vue/cli
npx vue init simple web

do spaces with goofy

place key and sected into .aws/credentials

aws_access_key_id = ...
aws_secret_access_key = ...

and do

./goofys --endpoint melmac /home/ec2-user/t/


Python json dump with datetime. every time JSON doesn't know how to convert a value, it calls the default() function.

def dtconverter(o):
    if isinstance(o, datetime.datetime):
        return o.__str__()

print(json.dumps(my_py_dict_var, default = dtconverter))


Neural networks are universal approximators - meaning that for any function F and error E, there exists some neural network (needing only a single hidden layer) that can approximate F with error less than E.

Normalisation is required so that all the inputs are at a comparable range.

With two inputs (x1 and x2), where x1 values are from range 0 to 0.5 and x2 values are from range to 0 to 1000. When x1 is changing by 0.5, the change is 100%, and a change of x2 by 0.5 is only 0.05%.


  • list all nodes

    puppet cert list --all

  • remove a node

    puppet cert clean

  • add / accept a node

    puppet cert sign

MySQL / MariaDB

dump a live system without blocking

  • for MyISAM
    nice -n 19 ionice -c2 -n 7 mysqldump --lock-tables=false <dbname> > dump.sql

  • for InnoDB
    nice -n 19 ionice -c2 -n 7 mysqldump --single-transaction=TRUE <dbname> > dump.sql

  • allow user to create databases with prefix

GRANT ALL PRIVILEGES ON  `dev\_%` . * TO  'dev'@'%';`


pkg_add bash nano

NetBSD luactl

modload lua
luactl create mycpu
luactl load mycpu ./cpu.lua 

print to /var/log/messages using systm module

cat hw.lua
    systm.print("hello kernel!\n")

modload luasystm
modstat |grep lua
luactl require helo systm
luactl load helo ./hw.lua
cat /var/log/messages
    Oct  9 09:37:29 localhost /netbsd: hello kernel!


create pub key from private

ssh-keygen -y -f my.key >

create pem from public

ssh-keygen -f ~/.ssh/ -e -m PKCS8 > my.pem

encrypt message with pem

echo "some secret" |openssl rsautl -encrypt -pubin -inkey my.pem -ssl
echo "some secret" |openssl rsautl -encrypt -pubin -inkey my.pem -ssl > encrypted_message
echo "some secret" |openssl rsautl -encrypt -pubin -inkey my.pem -ssl -out encrypted_message

decrypt message with private

openssl rsautl -decrypt -inkey ~/.ssh/my.key -in encrypted_message


call cron.php from nextcloud which is running in a docker container

docker exec -u www-data $(docker ps  --filter "Name=nextcloud" --format "{{.Names}}")  php cron.php

mount with davfs2

sudo mount -t davfs https://nextcloud/remote.php/webdav /mount/point


increase filesystem

xfs_growfs /mount/point/


In general you got * 2 Slots * 32 oath credentials storage

oath-hotp 2nd slot for ssh auth

  1. generate secret
    dd if=/dev/random bs=1k count=1 | sha1sum
  2. flash yubi key slot 2 with generated secret
    ykpersonalize -2 -o oath-hotp -o oath-hotp8 -o append-cr -a <SECRET>

oath totp for aws

  • set
    ykman oath add -t aws-username <YOUR_BASE_32_KEY>
  • get
    ykman oath code bergholm -s
  • list
    ykman oath list