wiki.getshifting.com

--- Sjoerd Hooft's InFormation Technology ---

User Tools

Site Tools


start

SHIFT-WIKI - Sjoerd Hooft's InFormation Technology

This WIKI is my personal documentation blog. Please enjoy it and feel free to reach out through blue sky if you have a question, remark, improvement or observation.


Cheatsheet Node.js

Summary: Nodejs hints, tips, oneliners and best practices.
Date: 8 December 2024

AWS Lambda Node.js

Get Dynamo DB Table as Log

const AWS = require("aws-sdk");
const dynamodb = new AWS.DynamoDB();
 
// Get Time in the Netherlands
var dutchTime = new Date().toLocaleString('en-US', {timeZone: "Europe/Amsterdam"});
var requestTime = new Date(dutchTime);
var year = requestTime.getFullYear();
var month = requestTime.getMonth() + 1; //Months go from 0 - 11
// Set months to get logs for
if (month === 1){
    var fullMonth = year + "-0" + month;
    var lastMonth = (year - 1 ) + "-12";
}
if (month > 1 && month < 10){
    var fullMonth = year + "-0" + month;
    var lastMonth = year + "-0" + (month - 1);
}
if (month === 10){
    var fullMonth = year + "-" + month;
    var lastMonth = year + "-0" + (month - 1);
}
if (month > 10){
    var fullMonth = year + "-" + month;
    var lastMonth = year + "-" + (month - 1);
}
 
const params = {
  TableName: 'LogTable',
  FilterExpression: 'begins_with(RequestTime, :RequestTime) OR begins_with(RequestTime, :RequestTime2)',
  ExpressionAttributeValues: {
    ':RequestTime': {S: fullMonth},
    ':RequestTime2': {S: lastMonth},
  },
};
 
exports.handler = (event, context, callback) => {
    console.log(fullMonth + lastMonth);
    dynamodb.scan(params, (err, data) => {
        if(err){
            console.log('Error on retrieving log data ' + err);
        }
        callback(null, data['Items']);
    });
};

Send Email

//jshint esversion:6
 
/* IAM-Access
Description: Sends the end email to the customer with all required information.
 
Links:
- AWS Javascript SDK for SES: https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/SES.html
- Util.inspect and JSON.stringify: https://medium.com/@reginald.johnson/testing-aws-lambda-functions-the-easy-way-41cf1ed8c090
 
*/
 
// AWS Variables
var aws = require('aws-sdk');
aws.config.update({region: 'eu-west-1'});
// SES const & vars
const ses = new aws.SES();
var util = require('util');
var emailfrom = 'info@shiftwiki . nl';
 
// Workflow Variables
// Declaring the customerData variable
var customerData;
 
// Send email to customer
function sendMail(params){
  console.log("Start function sendMail. Email address: " + params.email );
  if (customerData.discountCode === "testmodus"){
    emailto = 'sjoerd@shiftwiki .nl';
    emailfrom = 'info@shiftwiki .nl';
  } else {
    emailto = params.email;
  }
  console.log("Email address: " + emailto );
  //email
  var mailparams = {
      Destination: {
          ToAddresses: [emailto]
      },
      Message: {
          Body: {
              Html: {
                  Charset: "UTF-8",
                  Data: "Dear " + params.firstname + " " + params.lastname +
                  "<br><br>" +
                  "All is ready. " +
                  "<br><br>" +
                  "Kind regards, <br>" +
                  "Shift"
              }
          },
          Subject: { Data: "[SHIFT]Ready. "}
      },
      Source: emailfrom
  };
  return ses.sendEmail(mailparams).promise();
}
 
// Error Handling
function errorResponse(message) {
  this.name = "email-to-customer-end";
  this.message = message;
}
errorResponse.prototype = new Error();
 
exports.handler = (event, context, callback) => {
    console.log("Start email-to-customer-end");
    //console.log(event);
    //console.log(context);
    customerData = event;
 
    // Display available information
    console.log(customerData);
 
    // Invoke function sendmail
    sendMail(customerData).then(() => {
      customerData.status = customerData.status + "Customer end email sent out. ";
      callback(null, customerData);
    }).catch((err) => {
      // Error to CloudWatch
      console.error(err);
      // Add Error to Task Output
      const error = new errorResponse(err.message);
      callback(error);
    });
};

Create S3 Bucket

//jshint esversion:6
 
/* S3-Buckets-Add
Description: Creates two S3 buckets, configure access policy, website hosting and redirect.
 
Original PowerShell Script:
### S3 Bucket Creation as a Static Website ###
New-S3Bucket -BucketName $bucketname -PublicReadOnly
Write-S3BucketWebsite -BucketName $bucketname -WebsiteConfiguration_IndexDocumentSuffix index.html -WebsiteConfiguration_ErrorDocument error.html
$s3policy = '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":"*","Action":"s3:GetObject","Resource":"arn:aws:s3:::bucketnameplaceholder/*"}]}'
$s3policy = $s3policy -replace "bucketnameplaceholder", "$bucketname"
Write-S3BucketPolicy -BucketName "$bucketname" -Policy $s3policy
write-host "S3 Bucket $bucketname was created on $((Get-S3Bucket -BucketName $bucketname).creationdate) "
### S3 Bucket Creation Redirect ###
New-S3Bucket -BucketName $wwwbucketname
Add-S3PublicAccessBlock -BucketName $wwwbucketname `
-PublicAccessBlockConfiguration_BlockPublicAcl $true `
-PublicAccessBlockConfiguration_BlockPublicPolicy $true `
-PublicAccessBlockConfiguration_IgnorePublicAcl $true `
-PublicAccessBlockConfiguration_RestrictPublicBucket $true
Write-S3BucketWebsite -BucketName $wwwbucketname -RedirectAllRequestsTo_HostName $bucketname
write-host "S3 Bucket $wwwbucketname was created on $((Get-S3Bucket -BucketName $wwwbucketname).creationdate) "
 
Links:
- AWS Javascript SDK for S3: https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html
 
*/
 
// AWS Variables
var aws = require('aws-sdk');
aws.config.update({region: 'eu-west-1'});
var s3 = new aws.S3();
 
// Workflow Variables
// Declaring the customerData variable
var customerData;
 
//function s3BucketCreate
function s3BucketCreate(params) {
  console.log("Start function s3BucketCreate. Input: " + params.domainname);
  return s3.createBucket({
    Bucket: params.domainname,
    CreateBucketConfiguration: {
     LocationConstraint: aws.config.region
    },
    ACL: "public-read"
  }).promise();
}
 
//function s3wwwBucketCreate
function s3wwwBucketCreate(params) {
  console.log("Start function s3wwwBucketCreate. Input: " + params.wwwdomainname);
  return s3.createBucket({
    Bucket: params.wwwdomainname,
    CreateBucketConfiguration: {
     LocationConstraint: aws.config.region
    }
  }).promise();
}
 
//function s3BucketWebsite
function s3BucketWebsite(params){
  console.log("Start function s3BucketWebsite. Input: " + params.domainname);
  return s3.putBucketWebsite({
    Bucket: params.domainname,
    WebsiteConfiguration: {
      ErrorDocument: {
        Key: 'error.html'
      },
      IndexDocument: {
        Suffix: 'index.html'
      }
    }
  }).promise();
}
 
//function s3BucketPolicy
function s3BucketPolicy(params){
  console.log("Start function s3BucketPolicy. Input: " + params.domainname);
  var s3policyplaceholder = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":\"*\",\"Action\":\"s3:GetObject\",\"Resource\":\"arn:aws:s3:::domainnameplaceholder/*\"}]}";
  var s3policy = s3policyplaceholder.replace("domainnameplaceholder", params.domainname);
  return s3.putBucketPolicy({
    Bucket: params.domainname,
    Policy: s3policy,
  }).promise();
}
 
//function s3BucketPublicAccessBlock
function s3BucketPublicAccessBlock(params){
  console.log("Start function s3BucketPublicAccessBlock. Input: " + params.wwwdomainname);
  return s3.putPublicAccessBlock({
    Bucket: params.wwwdomainname,
    PublicAccessBlockConfiguration: {
      BlockPublicAcls: true,
      BlockPublicPolicy: true,
      IgnorePublicAcls: true,
      RestrictPublicBuckets: true
    }
  }).promise();
}
 
//function s3BucketWebsiteRedirect
function s3BucketWebsiteRedirect(params){
  console.log("Start function s3BucketWebsiteRedirect. Input: " + params.wwwdomainname + " and " + params.domainname);
  return s3.putBucketWebsite({
    Bucket: params.wwwdomainname,
    WebsiteConfiguration: {
      RedirectAllRequestsTo: {
        HostName: params.domainname
      }
    }
  }).promise();
}
 
// Error Handling
function errorResponse(message) {
  this.name = "s3-buckets-add";
  this.message = message;
}
errorResponse.prototype = new Error();
 
exports.handler = (event, context, callback) => {
    console.log("Start s3-buckets-add");
    // console.log(event);
    customerData = event;
 
    // Display available information
    console.log(customerData);
 
    // create params variable to pass to s3 functions
    var params = {
      domainname: customerData.domainname,
      wwwdomainname: customerData.wwwdomainname
    };
 
    // Invoke function s3BucketCreate
    s3BucketCreate(params).then(() => {
      customerData.status = customerData.status + "S3 Bucket creation success. ";
      s3BucketWebsite(params).then(() => {
          customerData.status = customerData.status + "S3 Bucket configured as website. ";
          s3BucketPolicy(params).then(() => {
            customerData.status = customerData.status + "S3 Bucket Policy set. ";
            s3wwwBucketCreate(params).then(() => {
              customerData.status = customerData.status + "S3 wwwBucket creation success. ";
              s3BucketPublicAccessBlock(params).then(() => {
                customerData.status = customerData.status + "S3 wwwBucket public access blocked. ";
                s3BucketWebsiteRedirect(params).then(() => {
                  customerData.status = customerData.status + "S3 wwwBucket redirected to S3 Bucket. ";
                  callback(null, customerData);
                }).catch((err) => {
                  // Error to CloudWatch
                  console.error(err);
                  // Add Error to Task Output
                  const error = new errorResponse(err.message);
                  callback(error);
                });
              }).catch((err) => {
                // Error to CloudWatch
                console.error(err);
                // Add Error to Task Output
                const error = new errorResponse(err.message);
                callback(error);
              });
            }).catch((err) => {
              // Error to CloudWatch
              console.error(err);
              // Add Error to Task Output
              const error = new errorResponse(err.message);
              callback(error);
            });
          }).catch((err) => {
            // Error to CloudWatch
            console.error(err);
            // Add Error to Task Output
            const error = new errorResponse(err.message);
            callback(error);
          });
      }).catch((err) => {
        // Error to CloudWatch
        console.error(err);
        // Add Error to Task Output
        const error = new errorResponse(err.message);
        callback(error);
      });
    }).catch((err) => {
      // Error to CloudWatch
      console.error(err);
      // Add Error to Task Output
      const error = new errorResponse(err.message);
      callback(error);
    });
};
2025/06/01 11:59

Cheatsheet Networking

Summary: A cheatsheet to collect various information regarding networking.
Date: 3 January 2025

OSI Model

Layer TypeData Unit Layer Function Example
Host Layer Data 7. Application Network process to application DNS, FTP, HTTP, NFS, NTP, SMTP, SNMP, Telnet
Host Layer Data 6. Presentation Data representation and encryption MIME
Host Layer Data 5. Session Interhost communication NetBIOS
Host Layer Segment 4. Transport End-to-end connections and reliability TCP, UDP, SSL, TLS
Media Layer Packet 3. Network Path determination and logical addressing IP, ICMP, IPsec, IPX, AppleTalk
Media Layer Frame 2. Data Link Physical addressing ARP, Ethernet, PPP
Media Layer Bit 1. Physical Media, signal and binary transmission T1, DSL, 802.11a/b/g/n PHY, Ethernet, USB, Bluetooth

Cisco

This is a notes page, extended with tips & tricks. This page is not really documentation, just stuff for me to remember. Sometimes things will get removed from these pages and turned into real documentation, sometimes not. You might find these notes to come in hand, maybe not. For me, it's just things I don't want to forget.

Log In

config t
int g0/12

enter your changes

end
show run (optional to check your changes)
write
Command Description
conf t config terminal ; configure via terminal
int g0/12 interface g0/12 ; configure gigabit interface of stack 0, port 12
end ready with changes
write save the changes

Settings

interface GigabitEthernet0/12
 description server12
 switchport access vlan 10
 switchport mode access
 spanning-tree portfast
 spanning-tree bpdufilter enable
 no shutdown
Setting Beschrijving
description server12 Description given to the port
switchport access vlan 10 The vlan the port has access to
switchport mode access There is just one vlan where the port has access to
spanning-tree portfast Fast availability despite spanning-tree
spanning-tree bpdufilter enable Prevent broadcast packet storms
no shutdown Enable the port
interface GigabitEthernet0/4
 description blade4
 switchport trunk native vlan 3999
 switchport mode trunk
 spanning-tree portfast trunk
 spanning-tree bpdufilter enable
Setting Beschrijving
switchport trunk native vlan 3999 Devices without a vlan tag will be in this vlan
switchport mode trunk The port has access to all vlans, vlan tagging is done on device level
spanning-tree portfast trunk Fast availability despite spanning-tree

Fast Overview All Ports

The show interfaces status command will give you a quick overview of port configuration:

coreswitch#show interfaces status

Port      Name               Status       Vlan       Duplex  Speed Type
Gi1/0/1                      connected    trunk      a-full a-1000 10/100/1000BaseTX
Gi1/0/2                      connected    trunk      a-full a-1000 10/100/1000BaseTX
Gi1/0/3                      connected    trunk      a-full a-1000 10/100/1000BaseTX
Gi1/0/4                      connected    trunk      a-full a-1000 10/100/1000BaseTX
Gi1/0/5                      notconnect   18           auto   auto 10/100/1000BaseTX
Gi1/0/6                      notconnect   1            auto   auto 10/100/1000BaseTX
Gi1/0/7                      notconnect   1            auto   auto 10/100/1000BaseTX
Gi1/0/8                      notconnect   1            auto   auto 10/100/1000BaseTX

From Windows

Reverse loopup with ping
ping -a 10.10.10.10


NSlookup with default nameserver
nslookup servername


NSlookup with non-default nameserver ns01
nslookup servername ns01


Remote desktop console
mstsc.exe /v:servername /admin


Test connection
Test-NetConnection -InformationLevel "Detailed" -Computername $host

Windows TCP Dump

Note: This needs (portable) tcpdump, the created file can be opened by (portable) wireshark for analysis. See here for more info on tcpdump.

dump traffic with host 10.10.10.10 on interface 1 to file
\tcpdump.exe -i 1 -vv host 10.10.10.10 -w tcpdumpfile.pcap


dump traffic with host 10.10.10.10 on interface 1 to file with a readable timestamp format
\tcpdump.exe -i 1 -vv host 10.10.10.10 -tttt -nr tcpdumpfile.pcap

Remote IPConfig

Renew Ip config remotely without losing access
ipconfig /release && ipconfig /renew
2025/06/01 11:59

Active Directory 2008 R2 Basic Installation

Summary: How to install a Windows Server 2008 server as a domain controller.
Date: Around 2009
Refactor: 20 February 2025: Checked links and formatting.

This is an installation report of a basic installation of Microsoft's Active Directory through the tool dcpromo. The settings are based on a small AD domain design and should be set to your own settings when installing in a production environment.

This page is created for a 2008 R2 AD, if you need an explanation for 2003, look here.

Installation

To start the installation go to start → run and type dcpromo. After pressing <enter> the installation wizard first checks whether the required binaries are installed:

adinstall2008r2-01.jpg


After a while (shouldn't take more than a few minutes tops) you'll get the welcome screen of the installation wizard, where you'll just keep the default (do not select the advanced mode):

adinstall2008r2-02.jpg


Read the warning (it will just take a few seconds) and click next:

adinstall2008r2-03.jpg


Now select to create a new domain in a new forest:

adinstall2008r2-04.jpg


Enter the full FQDN name for your domain. I've found it a good practice to set this the same as your internal DNS domain suffix. Click Next:

adinstall2008r2-05.jpg


Select the functional level you require:

adinstall2008r2-06.jpg


Also select to install the DNS server, as this is a recommendation and AD is very DNS dependent:

adinstall2008r2-07.jpg


Because you're installing a new DNS server, and there is none yet available, you'll get a warning, which can be ignored in most cases:

adinstall2008r2-08.jpg


Keep the defaults, both the database and the log folder won't grow out of proportion, so just click Next:

adinstall2008r2-09.jpg


Enter a password which you'll need if you'll ever need to start AD in restore mode. Document this password properly:

adinstall2008r2-10.jpg


Check the installation summary and if everything is correct, click next:

adinstall2008r2-11.jpg


And the installation will start:

adinstall2008r2-12.jpg


When done click finish and REBOOT THE MACHINE. No seriously, please do:

adinstall2008r2-13.jpg


Check the AD installation

After the installation and after the reboot of the server it's not so wise to rush into any other installation (like Exchange). Unfortunately, Microsoft does not have the best reputation on software installations and that's not without a reason. We're going to verify if our installation went well.

Check the DC

  • Start → Run → dsa.msc (starts AD Users and Computers)
  • Check to see if the DC is listed under the 'Domain Controllers' OU.

Check the site

  • Start → Run → dssite.msc (starts AD Sites and Services)
  • Check to see if you have a 'NTDS Settings' under your DC.

Check DNS

  • Start → Run → dnsmgmt.msc (starts the DNS Management MMC snap-in)
  • Check the DNS configuration and zones.

Create Reverse Lookup Zone

By default, dcpromo creates a forward lookup zone, but no reverse lookup zone. I recommend to create one right away, this will make some tools behave nice, and prevents you from creating all kind of records manually when you have to add one later on. Right click the reverse lookup zones and select 'Add new zone'. This will give you this wizard so you can select your options:

adinstall2008r2-14.jpg


Select primary zone and keep the zone stored in AD and click Next:

adinstall2008r2-15.jpg


Set the replication to all DNS servers in the domain:

adinstall2008r2-16.jpg


Select a IPv4 reverse lookup zone:

adinstall2008r2-17.jpg


Fill in the network ID:

adinstall2008r2-18.jpg


Allow only secure updates:

adinstall2008r2-19.jpg


Click finish to complete the wizard and start using your reverse lookup zone:

adinstall2008r2-20.jpg


Check folders

Check these folders to see if the content is correct:

  • C:\WINDOWS\NTDS
    • The AD database should be created (NTDS.DIT, keeps information stored about user accounts, groups, etc.)
    • The edb.chk file is a checkpoint file that points to the last committed checkpoint in the log file. The edb.log file is the name of the current log file and is for the ntds.dit transactions.
    • edbres00001.jrs and edbres00002.jrs are reserve log files in case the drive runs out of disk space. These files are always 10 MB in size.
  • C:\WINDOWS\SYSVOL
    • In the SYSVOL\domain\Policies should be two directories containing the 'Default Domain Policy' and the 'Default Domain Controllers Policy'. You won't recognize them as such since they have unique names, for example '6AC1786C-016F-11D2-945F-00C04fB984F9'.
2025/06/01 11:59

Cheatsheet Linux and Bash

Summary: Linux Management & Bash hints, tips, oneliners and best practices.
Date: 8 December 2024

OS

See release
cat /etc/os-release
cat /etc/SuSE-release


See kernel version
# Kernel version
uname -r
# Full version
uname -a

Runlevel

This is the runlevel config as defined on SuSE:

  • 0 System halt
  • 1 Single user mode
  • 2 Local multiuser mode zonder netwerk
  • 3 Full multiuser mode met netwerk
  • 4 Niet gebruikt
  • 5 Full multiser met netwerk en GUI
  • 6 System Reboot

The default runlevel is set in /etc/inittab.

When the box is already running you can issue the command

"init x"

while replacing the x for the appropriate runlevel number to change the runlevel. If you want to change the runlevel while booting issue the same command as a boot parameter.

Environment info

  • .profile
    • Set variables (must be exported), aliases and configure the shell
    • ENV: special variable, used to set a file that will be executed on starting the shell (usually .bashrc en .kshrc )
      • Defines extra variabels and aliases.
  • .dtprofile
    • Set GUI, DTSOURCEPROFILE must be TRUE to also load .profile
  • .exrc
    • Loaded on starting vi
      • Allows for stuff like “set number” and “set showmode”
  • .hushlogin
    • Add the empty .hushlogin in your HOME to discard the MOTD (Message Of The Day) when logging in.

Users

Check who is logged in:
who


Check who you are with session information:
who am i


Check your own userid and group information:
whoami
id


Check the user information of a specific user:
id userid
finger userid


Check the last login of a user:
last
> Show
last
# Show the last 20 logins
last -20
# Show the last logins of a specific user
last userid

Vi

Configure vi

  • Turn off syntax :syntax off
  • Show line numbers :set number
  • Show mode (input or command) :set showmode

Vi commands

  • Insert text before the cursor i
  • Append text after the cursor a
  • Insert text at the beginning of the line I
  • Append text at the end of the line A
  • Replace character r
  • Replace text R
  • Delete character x
  • Delete 5 characters 5x
  • Delete current line dd
  • Delete the next word dw
  • Delete 10 lines 10dd
  • Delete lines 2 to 4 2,4dd
  • Delete all below lines, including current: dG
  • Copy 10 lines 10yy
  • Paste the copied lines p
  • Undo last change u
  • New line below the current line and insert o
  • New line above the current line and insert O
  • Search for a string downwards /"string"
  • Search for a string upwards ?"string"
  • Search and replace in the complete document :%s/stringold/stringnew/
  • Search and replace in the complete document, confirm each change :%s/stringold/stringnew/gc

Vi navigation

  • Go to the next word w
  • Go to the previous word b
  • Go to the end of the line $
  • Go to the beginning of the line 0
  • Go to the last line of the file G
  • Go to line 1 1G
  • Go to line 206 206G

Other Vi commands

  • Past text from windows clipboard through putty: SHIFT + INS

Certificate Management

Create csr with new private key (preferred)
openssl req -nodes -newkey rsa:2048 -keyout *.shiftwiki.nl.key -out *.shiftwiki.nl.key.csr


Create csr with existing key
openssl req -new -key /data/keys/httpd-wild.shiftwiki.nl/oeserver.pem  -out *.shiftwiki.nl.key.csr


View certificate
openssl x509 -text -noout -in certificate.crt


View csr

openssl req -text -in request.csr

Logfile Management

Truncate a logfile that has grown too big

sudo truncate -s 0 odoo.log


Show and follow a logfile
tail -f "logfile"


Show and follow a logfile with a specific number of lines
tail -f -n 100 "logfile"


Show and follow a logfile starting from a specific line
tail -f +100 "logfile"


Show and follow a logfile starting from a specific line counted from the end
tail -f -100 "logfile"


Show the first 8 lines from a file
head -8 "logfile"

Kernel log

Kernel log file: /var/log/messages
dmesg

Disks

See link between device and filesystem for AWS EBS disks. From the instance, go to Storage tab and see the device name, then use these commands:

[sjoerd@server ~]$  df -hT /dev/sdf
Filesystem     Type  Size  Used Avail Use% Mounted on
/dev/nvme1n1   xfs    20G   14G  6.8G  67% /var/lib/docker
[sjoerd@server ~]$  df -hT /dev/sdg
Filesystem     Type  Size  Used Avail Use% Mounted on
/dev/nvme2n1   xfs    20G   15G  5.8G  72% /data

iNodes

Understanding UNIX / Linux filesystem Inodes The inode (index node) is a fundamental concept in the Linux filesystem. Each object in the filesystem is represented by an inode. But what are the objects? Let us try to understand it in simple words. Each and every file under Linux (and UNIX) has following attributes:

  • File type (executable, block special etc)
  • Permissions (read, write etc)
  • Owner
  • Group
  • File Size
  • File access, change and modification time (remember UNIX or Linux never stores file creation time, this is favorite question asked in UNIX/Linux sys admin job interview)
  • File deletion time
  • Number of links (soft/hard)
  • Extended attribute such as append only or no one can delete file including root user (immutability)
  • Access Control List (ACLs)

All the above information stored in an inode. In short the inode identifies the file and its attributes (as above). Each inode is identified by a unique inode number within the file system. Inode is also know as index number.

How do I see file inode number?

$ ls -i /etc/passwd

You can also use stat command to find out inode number and its attribute:

$ stat /etc/passwd

Find and remove file using find command (necessary with special characters), type the command as follows:

$ find . -inum 782263 -exec rm -i {} \;

Additional info: A directory is nothing more than a table of the underlying files and a pointer to the inode information of this file. To “jump” to this inode table you need execute rights on the direcory. So if you want to open a file you'll also need execute rights on the directory.

Finding MD5 Hash on Linux

Find the MD5 hash of files on Linux by entering the command below and replace the “Your-File-Here” with the name of the file:

echo -n "Your-File-Here" | md5sum

Netwerk Commands

Show the hostname
hostname
hostname -s


Use scp to securely copy files between hosts
scp "source" "target"
scp host:/home/file /targetfile
# Copy a directory
scp -r host:/home/dir /targetdir


Check all open ports
netstat -a
# -n only show port number
# -r Route tabel
# -p Including processes
# Search for open port 389 with the corresponding process
netstat -nap | grep :389
# Show the routing table
netstat -rn

Processes

Using top to get a list of running processes and their resource usage
top
# c Expand processes
# 1 separate all processes
# s set refresh rate
# M sort on memory usage
# P sort on processor usage


Using ps to get a list of running processes
# all running processes
ps -e
# all running processes with full info excluding kernel processes
ps -ef
# all running processes with full info
ps -Af
# all running processes for a specific user
ps -u "userid"
# Start a process in the background and keep it running after the session is closed
nohup command &

Searching

Grep

grep file without empty lines and comments
grep -v '^\s*$\|^\s*\#' postgresql.conf


grep options
# -i : case insensitive
# -v : invert match
# -w : match whole word
# -r : recursive
# -l : only filenames
# -n : show line numbers
# -c : count matches
# Search for test in all files in the current directory and subdirectories and only show the filenames
grep -rlw test .


Find the Linenumber of a Specific Match
grep -n <match> <file>


Find Specific String in Files - Recursive
grep -r '172.18' *

Find

Find is always recursive
# find file in current and subdirectories
sudo find . -name main.cf
# find all files starting with a and execute ls -l on them
find . -name 'a*' -exec ls -l {} \;
# find all files starting with a and execute ls -l on them, but ask for confirmation
find . -name 'a*' -ok ls -l {} \;
# Set read permissions on other for all files in /var/news
find /var/news -type f -exec chmod o+r {} \;
# Rename all files in /tmp to filename.old
find /tmp -type f -exec mv {} {}.old \;
# Find only files older than 31 days and remove them
find /tmp/archive/. -type f -mtime +31 -exec rm {} \;
Find and Disk Space Info
# Check disk usage
df -H
# Find largest directories
sudo du -Sh / | sort -rh | head -5
# Find largest files in /data
sudo find /data -type f -exec du -Sh {} + | sort -rh | head -n 10
# Find largest files in /var/lib/docker
sudo find /var/lib/docker -type f -exec du -Sh {} + | sort -rh | head -n 10
# Find largest files except in /data
sudo find / -type f -not -path "/data/*" -exec du -Sh {} + | sort -rh | head -n 10

Crontab

display all crontabs for all users as root sudo su -
for user in $(cut -f1 -d: /etc/passwd); do echo $user; crontab -u $user -l; done

Shell Management

Note this is especially useful in pipelines

set
# -e : exit the script if any command fails
# -u : treat unset variables as errors and exit
# -o pipefail : ensure the entire pipeline fails if any command within it fails
# -x : print all the commands to the terminal
# -v : print all lines without variable resolution
# - For all, to disable use +, for example `set +v`
set -euo pipefail

File Management

Long listing of files
ls -l


List files ordered by change date
ls -t


Reverse file listing order
ls -r


List files including hidden files (files starting with a dot)
ls -a


Count files in a directory
# wc: word count; -l: count lines
ls -l | wc -l


Compare two files
diff file file2


Compare two files next to each other, only differences
diff -y --suppress-common-lines file1 file2

File Management with Regex

Show file type
file "filename"


Show file type, inode number and all three dates (modify, access, change)
stat "filename"


List files with a, b or c in the name
ls [abc]


List files without a, b or c in the name
ls [!abc]


List files that start with a, b, c or d
ls [a-d]*


List files that start with a or d
ls [ad]*


List files that start with a or d and have a or e as second letter
ls [ad][ae]*


List files that start with a, A, b or B
ls [a-bA-B]*


List files that start with a or b and have at least one more character
ls [ab]?*


List files that don't end with a letter
ls *[!a-zA-Z]

Disk Management

List disk usage
# show diskspace of all directories 1 level deep in human readable format
du --max-depth=1 -h -c
# do 'du' for every existing directory
## ls -1 lists all files and directories each on a separate line
## [[ -d ]] is true if listed variable is a directory
for i in `ls -1`; do if [[ -d $i ]]; then sudo du -sm $i; fi; done

If File in Directory Then

for i in `ls -1 | grep was-`; do if [ -f $i/.run ]; then echo start slot $i; fi; done
  • ls -1 lists all files and directories each on a separate line
  • [ -f ] is true if listed file is a regular file

Variables

# Standard variables
# Prompt
PS1
# Home
HOME
# Path
PATH
# User
USER
# Add text directly after a variable (useful in scripts)
echo ${var}tekst
# Assign output of command to variable
x=$(command)
# Create a readonly variable
readonly var=waarde
# Remove / unset a variable
unset var
# Export a variable so it is also available in subshells
export var

Regular Expressions

.       Any single character
*       0,1 or more of any character
.*      0,1 or more of any character
[aA]    a or A
[a-f]   a to and including f
^a      Starts with a
a$      Ends with a
^a.*b$  Everything starting with a and ending with b
^$      Empty line
Combined with grep you can use regular expressions to filter out comments and empty lines out of config files. However, this should take you two regular expressions: grep -v '^&' file | grep -v '^#'. Using egrep allows you to do that in one go: egrep -v '^$|^#' file

Output Redirection

# Run command in the background (output and errors still go to the screen)
& 'command'
# Input redirection
< inputfile
# Output redirection
> outputfile
# Error redirection
2> errorfile
# Combine output and errors for redirection to a command or a file
2>&1
# Output redirection append
>> outputfile
# reads the input, then writes the output of a program to standard output and simultaneously copies it into the specified file or files, with example: The output of command is written to file1 and also used as input for command2:
command | tee file1 | command2

Sed

Basic sed search and replace line:
sed "s/$FIND/$REPLACE/g" $inputfile > /tmp/outputfile


Code line to make email adresses unusable by adding the extension .local to the complete email address:
sed "s/^mail.*$/&\.local/g" inputfile > outputfile

* s : substitute

  • ^mail : all lines that start with mail
  • .*$ : and match everything after the initial match
  • & : everything that has been matched
  • \.local : Add .local (\ is for escaping the '.')
  • g : global

Running a script

There are three ways to run scripts:

  • bash script
    • Script runs in subshell and read permissions are sufficient
  • (./)script
    • Script runs in subshell and execute permissions are required
  • . (./)script
    • Script runs in current shell and read permissions are sufficient (used to export variables)

Script Variables

  • $$ = The PID number of the process executing the shell.
  • $? = Exit status variable.
  • $0 = The name of the command you used to call a program.
  • $1 = The first argument on the command line.
  • $2 = The second argument on the command line.
  • $n = The nth argument on the command line.
  • $* = All the arguments on the command line.
  • $# = The number of command line arguments.
  • $@ = All arguments

The “shift” command can be used to shift command line arguments to the left, ie $1 becomes the value of $2, $3 shifts into $2, etc. The command, “shift 2” will shift 2 places meaning the new value of $1 will be the old value of $3 and so forth.

If Options

[ -a FILE ]True if FILE exists.
[ -b FILE ]True if FILE exists and is a block-special file.
[ -c FILE ]True if FILE exists and is a character-special file.
[ -d FILE ]True if FILE exists and is a directory.
[ -e FILE ]True if FILE exists.
[ -f FILE ]True if FILE exists and is a regular file.
[ -g FILE ]True if FILE exists and its SGID bit is set.
[ -h FILE ]True if FILE exists and is a symbolic link.
[ -k FILE ]True if FILE exists and its sticky bit is set.
[ -p FILE ]True if FILE exists and is a named pipe (FIFO).
[ -r FILE ]True if FILE exists and is readable.
[ -s FILE ]True if FILE exists and has a size greater than zero.
[ -t FD ]True if file descriptor FD is open and refers to a terminal.
[ -u FILE ]True if FILE exists and its SUID (set user ID) bit is set.
[ -w FILE ]True if FILE exists and is writable.
[ -x FILE ]True if FILE exists and is executable.
[ -O FILE ]True if FILE exists and is owned by the effective user ID.
[ -G FILE ]True if FILE exists and is owned by the effective group ID.
[ -L FILE ]True if FILE exists and is a symbolic link.
[ -N FILE ]True if FILE exists and has been modified since it was last read.
[ -S FILE ]True if FILE exists and is a socket.
[ FILE1 -nt FILE2 ]True if FILE1 has been changed more recently than FILE2, or if FILE1 exists and FILE2 does not.
[ FILE1 -ot FILE2 ]True if FILE1 is older than FILE2, or is FILE2 exists and FILE1 does not.
[ FILE1 -ef FILE2 ]True if FILE1 and FILE2 refer to the same device and inode numbers.
[ -o OPTIONNAME ]True if shell option “OPTIONNAME” is enabled.
[ -z STRING ]True of the length if “STRING” is zero.
[ -n STRING ] or [ STRING ]True if the length of “STRING” is non-zero.
[ STRING1 == STRING2 ] True if the strings are equal. “=” may be used instead of “==” for strict POSIX compliance.
[ STRING1 != STRING2 ] True if the strings are not equal.
[ STRING1 < STRING2 ] True if “STRING1” sorts before “STRING2” lexicographically in the current locale.
[ STRING1 > STRING2 ] True if “STRING1” sorts after “STRING2” lexicographically in the current locale.
[ ARG1 OP ARG2 ]“OP” is one of -eq, -ne, -lt, -le, -gt or -ge. These arithmetic binary operators return true if “ARG1” is equal to, not equal to, less than, less than or equal to, greater than, or greater than or equal to “ARG2”, respectively. “ARG1” and “ARG2” are integers.
[ ! EXPR ]True if EXPR is false.
[ ( EXPR ) ]Returns the value of EXPR. This may be used to override the normal precedence of operators.
[ EXPR1 -a EXPR2 ]True if both EXPR1 and EXPR2 are true.
[ EXPR1 -o EXPR2 ]True if either EXPR1 or EXPR2 is true.
command True if command returns a 0 (zero).
2025/06/01 11:59
start.txt · Last modified: by 127.0.0.1