Linux Password Recovery.

1) Reboot the machine holding “SHIFT” on the bios screen and enter the recover mode.

2) Remount the “/” drive as writable.

1
$ mount -rw -o remount /

3) Set the password for the username..

efoufhiushfsiuefh

1
$ passwd root

1) Reboot the machine holding “SHIFT” on the bios screen and enter the recover mode.

2) Remount the “/” drive as writable.

1
$ mount -rw -o remount /

3) Set the password for the username..

1
$ passwd root

File Commands

View the End of a Log file, Continually

1
$ tail -f /path/to/log/file.log

Control the Number of Lines Outputted

1
$ tail -n30 -f /path/to/log/file.log

View the Ends of Multiple Log files

1
$ tail -f /path/to/log/file.log -f /path/to/log/system.log
1
2
// Convert Line endings DOS to Unix
$ sed -i 's/\r//g' file

HAProxy v1.6

0.17061501

HAProxy-HTTPS-Config_17061501.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
global
        log 127.0.0.1 local0
        log 127.0.0.1 local1 notice
        user haproxy
        group haproxy
        daemon
        maxconn 2048
        tune.ssl.default-dh-param 2048
 
defaults
        log     global
        mode    http
        option forwardfor
        option http-server-close
        option  httplog
        option  dontlognull
        retries 3
        option  redispatch
        timeout connect 5000
        timeout client  10000
        timeout server  10000
        #errorfile 400 /etc/haproxy/errors/400.http
        #errorfile 403 /etc/haproxy/errors/403.http
        #errorfile 408 /etc/haproxy/errors/408.http
        #errorfile 500 /etc/haproxy/errors/500.http
        #errorfile 502 /etc/haproxy/errors/502.http
        #errorfile 503 /etc/haproxy/errors/503.http
        #errorfile 504 /etc/haproxy/errors/504.http
        stats enable
        stats uri /super-secret-haproxy-uar?with-extra-vars=forbar
        stats realm Strictly\ Private
        stats auth username:password
 
frontend http-domain.com
       bind *:80
       reqadd X-Forwarded-Proto:\ http
       default_backend backend-domain.com
 
frontend https-bluecirclue.uat-bd.co.uk
       bind *:443 ssl crt /path/to/ssl/domain.com.pem
       reqadd X-Forwarded-Proto:\ https
       default_backend backend-domain.com
 
backend backend-domain.com
        redirect scheme https if !{ ssl_fc }
        balance leastconn
        option httpclose
        option forwardfor
        cookie PHPSESSID prefix
        server s1 server-domain-or-ip:port check cookie s1
HAProxy-HTTPS-MultiDomain-Config_17061501.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
global
        log 127.0.0.1 local0
        log 127.0.0.1 local1 notice
        user haproxy
        group haproxy
        daemon
        maxconn 2048
        tune.ssl.default-dh-param 2048
 
defaults
        log     global
        mode    http
        option forwardfor
        option http-server-close
        option  httplog
        option  dontlognull
        retries 3
        option  redispatch
        timeout connect 5000
        timeout client  10000
        timeout server  10000
        #errorfile 400 /etc/haproxy/errors/400.http
        #errorfile 403 /etc/haproxy/errors/403.http
        #errorfile 408 /etc/haproxy/errors/408.http
        #errorfile 500 /etc/haproxy/errors/500.http
        #errorfile 502 /etc/haproxy/errors/502.http
        #errorfile 503 /etc/haproxy/errors/503.http
        #errorfile 504 /etc/haproxy/errors/504.http
        stats enable
        stats uri /super-secret-haproxy-stats-url?with-extra-vars=foobar
        stats realm Strictly\ Private
        stats auth username:password
 
frontend http-in
	bind *:80
	reqadd X-Forwarded-Proto:\ http
	# Define hosts
	acl host_domainone_live hdr(host) -i domainone.com
	acl host_domaintwo_live hdr(host) -i domaintwo.com
 
	## figure out which one to use
	use_backend domainone_live_cluster if host_domainone_live
	use_backend domaintwo_live_cluster if host_domaintwo_live
 
frontend https-in
	# Todo - Conditional https crts
	# Solution - bind *:443 ssl crt /certs/haproxy1.pem crt /certs/haproxy2.pem 
	bind *:443 ssl crt /path/to/ssl/domaineone.com.pem
	reqadd X-Forwarded-Proto:\ https
	# Define hosts
	acl host_domainone_live hdr(host) -i domainone.com
	acl host_domaintwo_live hdr(host) -i domaintwo.com
 
	## figure out which one to use
	use_backend domainone_live_cluster if host_domainone_live
	use_backend domaintwo_live_cluster if host_domaintwo_live
 
backend domainone_live_cluster
	redirect scheme https if !{ ssl_fc }
	balance leastconn
	option httpclose
	option forwardfor
	cookie PHPSESSID prefix
	server s1 domain-or-ip-server:port check cookie s1
 
backend domaintwo_live_cluster
	redirect scheme https if !{ ssl_fc }
	balance leastconn
	option httpclose
	option forwardfor
	cookie PHPSESSID prefix
	server s1 domain-or-ip-server:port check cookie s1

Create an SSL Cert for HAProxy

1
2
3
4
5
# Create the store folder
mkdir /etc/haproxy/ssl
 
# Create the SSL CERT
sudo openssl req -x509 -nodes -days 3600 -newkey rsa:2048 -keyout /etc/haproxy/ssl/domain.com.key -out /etc/haproxy/ssl/domain.com.crt

Create a PEM file from SSL Cert

1
2
# Use cat to combine components of the SSL
cat /etc/haproxy/ssl/domain.com.key /etc/haproxy/ssl/domain.com.crt > /etc/haproxy/ssl/domain.com.pem

This is a collection commonly used Unix commands.

Empty Log Files

1
2
3
$ truncate logfile --size 0
$ rm logfile
$ cat /dev/null > logfile

File Transfer

1
$ scp -P 55455 your_username@remotehost.edu:foobar.txt /local/dir

Complete Directory Transfer

1
$ scp -P PortNumber -r user@your.server.example.com:/path/to/foo /home/user/Desktop/

Compress & Decompress a File

1
2
3
4
// http://unix.stackexchange.com/questions/46969/compress-a-folder-with-tar#46971
$ tar -zcvf archive.tar.gz directory/
$ tar -zxvf archive.tar.gz 
$ cd /var/www && sudo tar czf ~/www_backups/$(date +%Y%m%d-%H%M%S).tar.gz .

Recursively Give Directories 755 Permissions

1
$ find /path/to/base/dir -type d -exec chmod 755 {} +

Recursively Give Files 644 Permissions

1
$ find /path/to/base/dir -type f -exec chmod 644 {} +

Listing USB Devices (Ardunio)

1
$ ls /dev/tty*

Listing Files

1
2
3
4
5
6
$ ls -la 
$ ls -lsh
$ ls -lh --> Permissions and file size
$ ls -lah --> Permissions and file size on all files
$ ls -ltrh --> Permissions and file size, sorted on access time (newest is last)
$ ls -ld --> Permissions, only directories

Find Files by Name

1
$ find /path/to/search/dir -type f -print | grep 'SearchString'

Find Files by Extention

1
$ find $directory -type f -name "*.ext"

Drive Summary Commands

DF Free Space on the Filesystem.

1
$ df -h


Example:
Filesystem Size Used Avail Capacity Mounted on
/dev/disk0s2 148Gi 120Gi 28Gi 81% /
devfs 198Ki 198Ki 0Bi 100% /dev
map -hosts 0Bi 0Bi 0Bi 100% /net
map auto_home 0Bi 0Bi 0Bi 100% /home

Discus Free Space on the Filesystem.

1
2
# Install Discus
$ sudo apt-get install discus
1
2
# Use Discus
$ sudo discus

DU Space used by Folders under Target Path

1
$ du -sh /*
1
$ du -sh /root/*
1
$ du -sh /var/log/*


Example:
9.8M /var/backups
417M /var/cache
4.0K /var/crash
1.8G /var/lib
4.0K /var/local
0 /var/lock
57M /var/log
4.0K /var/mail
4.0K /var/opt
0 /var/run
9.4M /var/spool
4.0K /var/tmp
88K /var/www

Empty Large Mail System

Empty Over Sized Root Mail Dir

1
2
3
4
5
6
7
8
9
10
11
 # Delete all mail
$ postsuper -d ALL 
$ yes | rm -rf /root/Maildir/cur/*
# Deleting the new inbox is optional
$ yes | rm -rf /root/Maildir/new/*
 
# If the target dir is to large RM will fail, use the command below.
$ cd /root/Maildir/cur && find . -maxdepth 1 -name "*" -print0 | xargs -0 rm
 
# CRON version
$ postsuper -d ALL && sudo yes | rm -rf /root/Maildir/cur/*

Log Rotate and Log File Commands

Empty a Log File

1
$ truncate -s 0 /var/log/mysql.log

Log Rotate logrotate.conf Log Size Limits

1
$ sudo nano /etc/logrotate.conf


minsize 100M
Log files are rotated when they grow bigger than size bytes,
but not before the additionally specified time interval (daily,
weekly, monthly, or yearly). The related size option is simi-
lar except that it is mutually exclusive with the time interval
options, and it causes log files to be rotated without regard
for the last rotation time. When minsize is used, both the
size and timestamp of a log file are considered.

size 100M
Log files are rotated only if they grow bigger then size bytes.
If size is followed by k, the size is assumed to be in kilo-
bytes. If the M is used, the size is in megabytes, and if G is
used, the size is in gigabytes. So size 100, size 100k, size
100M and size 100G are all valid.

maxsize 250M
Log files are rotated when they grow bigger than size bytes even before
the additionally specified time interval (daily, weekly, monthly,
or yearly). The related size option is similar except that it
is mutually exclusive with the time interval options, and it causes
log files to be rotated without regard for the last rotation time.
When maxsize is used, both the size and timestamp of a log file are
considered.

OS Commands

Unix OS Version

1
$ cat /etc/*release*

Restart

1
$ sudo reboot

Shutdown

1
$ sudo poweroff

Find Service

1
$ which ServiceName

Find, Partition and Mount a New Drive

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# Find the Disk Volume by Listing
fdisk -l
# Example Ouput:
Disk /dev/sdc: 120 GiB, 128849018880 bytes, 251658240 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
 
# Add Drive, Partition
sudo parted /dev/sdc mklabel gpt
sudo parted -a opt /dev/sdc mkpart primary ext4 0% 100%
sudo mkfs.ext4 /dev/sdc
sudo mkdir -p /mnt/volume-120gb
echo '/dev/sdc /mnt/volume-120gb ext4 defaults,nofail,discard 0 2' | sudo tee -a /etc/fstab
sudo mount -a
 
# Test
findmnt /mnt/volume-120gb
echo 'success' | sudo tee /mnt/volume-120gb/test_file
cat /mnt/volume-120gb/test_file
sudo rm /mnt/volume-120gb/test_file

Remove Drive, Partition

1
2
3
sudo umount /mnt/volume-nyc1-01-part1
sudo nano /etc/fstab
sudo rmdir /mnt/volume-nyc1-01-part1

PHP Commands

PHP Version and Info

1
$ php -i

Bash Script Executable by PHP

1
$ chmod u+s file

PHP Location

1
$ which php

Globally Install Composer (Ubuntu 14.04)

1
2
3
$ cd /tmp
$ curl -sS https://getcomposer.org/installer | php
$ sudo mv composer.phar /usr/local/bin/composer

PHP Clear Cache From Console

1
$ php -r "apc_clear_cache(); apc_clear_cache('user'); apc_clear_cache('opcode');"


Allow or Block DNS Record Based Access With CSF Firewall, Good for WordPress.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
/**
 * Custom Security
 */
function getClientIp() {
    $ip = false;
 
    if ( isset($_SERVER['HTTP_X_CLUSTER_CLIENT_IP']) && !empty($_SERVER['HTTP_X_CLUSTER_CLIENT_IP']) ) $ip = $_SERVER['HTTP_X_CLUSTER_CLIENT_IP'];
    else if ( isset($_SERVER['HTTP_X_FORWARDED_FOR']) && !empty($_SERVER['HTTP_X_FORWARDED_FOR']) ) $ip = $_SERVER['HTTP_X_FORWARDED_FOR'];
    else if ( isset($_SERVER['HTTP_CF_CONNECTING_IP']) && !empty($_SERVER['HTTP_CF_CONNECTING_IP']) ) $ip = $_SERVER['HTTP_CF_CONNECTING_IP'];
    else if ( isset($_SERVER['REMOTE_ADDR']) && !empty($_SERVER['REMOTE_ADDR']) ) $ip = $_SERVER['REMOTE_ADDR'];
 
    return $ip;
}
 
$clientIP = getClientIp();
//echo $clientIP . '
';
if (
    $clientIP != '*.*.*.*'
    && $clientIP != gethostbyname('example.com')
    && $clientIP != gethostbyname('example.co.uk')
) {
    echo 'Blocked...';
    $output = shell_exec('csf -td ' . $clientIP . ' 604800 -p22,80,443,10000,55455 -d inout bad');
 
    // Email notification
    $to = "[email protected]";
    $subject = "Wordpress IP Blacklisted - " . $clientIP;
    $message = "Server just blacklisted {$clientIP} for accessing the login screen. Message: {$output}. Look this IP up here, http://www.infosniper.net/index.php?ip_address={$clientIP}&map_sour$    $from = "[email protected]";
    $headers = "From:" . $from;
    mail($to,$subject,$message,$headers);
 
    die();
}

Composer Commands

Install Git Repo as a Dependency in Composer

1
2
3
4
5
6
7
8
9
10
11
{
    "repositories": [
        {
            "url": "https://github.com/target/repo.git",
            "type": "git"
        }
    ],
    "require": {
        "target/branch": "~2.3"
    }
}

GIT Commands

Basic GIT

1
2
# GIT List Remotes
git remote -v
1
2
# GIT Remove Remotes
git remote rm origin

GIT Setup

1
2
3
4
5
6
7
# GIT Create New Repo (No files Files)
git clone http://git.url/to/repo
cd visqueenbuilding.com
touch README.md
git add README.md
git commit -m "add README"
git push -u origin master
1
2
3
4
5
6
7
# GIT Use Existing Repo (Existing Files)
cd existing_folder
git init
git remote add origin http://git.url/to/repo
git add .
git commit
git push -u origin master

Process Commands

Query a Process’s Status and Current Task

1
$ ps aux | grep process-name

Kill Process By Name

1
$ pkill soffice.bin

Show Process ID Attached to Port

1
$ fuser 80/tcp

Kill Process Attached to Port

1
$ fuser -k 80/tcp

List Running Process by Memory Usage

1
2
3
$ top
Shift + m
# Docs, http://www.computerhope.com/unix/top.htm

TOP output to console for other apps to use.

1
2
3
4
# -b, output to console
# -o RES, sort by memory usage.
# -n 1, number of loops. Only 1 is needed.
$ top -bo RES -n1

TOP output to file.

1
$ top -bo RES -n1 > /tmp/top_output.txt

A better version of Top, Htop

1
2
3
4
# May require installing
$ apt-get install htop
$ htop
Shift + m

Init.d Prevent Auto Start

1
2
# This will prevent init.d auto starting the process
$ echo manual | sudo tee /etc/init/varnish.override
1
2
# Remove the override, init.d will continue working
$ rm /etc/init/varnish.override

Mail Commands

Postfix for Send Externally (Don’t forget)

1
apt-get install postfix

Mail Install

1
2
$ apt-get update
$ apt-get install mailutils

Mail Test

1
2
#Then pipe message to the command:
echo testing | mail -s test_subject myemail@example.com

Mail Via MPack

1
2
apt-get install mpack
mpack -s "file you wanted" ./data.pdf example@example.com

Mail Via Sendgrid and CURL

1
2
3
4
5
curl --request POST \
  --url https://api.sendgrid.com/v3/mail/send \
  --header 'Authorization: Bearer *************THE_LARGE_API_KEY*************' \
  --header 'Content-Type: application/json' \
  --data '{"personalizations": [{"to": [{"email": "[email protected]"}]}],"from": {"email": "[email protected]"},"subject": "Hello, World!","content": [{"type": "text/plain", "value": "Heya!"}]}'

Centos Commands

Centos Shutdown

1
2
$ /sbin/shutdown -h now
$ /sbin/shutdown -r now

Centos Restart/Clear Cache Apache

1
2
$ sudo service apache2 graceful
$ sudo service httpd graceful

MySQL Commands

MySQL Login

1
$ mysql -u UserName -p

Dump Database to File

1
$mysqldump -u root -pRootPassword DatabaseName > dumpfilename.sql

Dump Database Schema to File

1
$ mysqldump -u root -pRootPassword DatabaseName --no-data > dumpfilename.sql

Import to File to Database

1
$ mysql -u root -pRootPassword DatabaseName < dumpfilename.sql

OPCache Config

Production
[opcache]
; Determines if Zend OPCache is enabled
opcache.enable=1
 
; Determines if Zend OPCache is enabled for the CLI version of PHP
opcache.enable_cli=1
 
; The OPcache shared memory storage size.
;opcache.memory_consumption=64 ; 96, 128, 256
 
; The amount of memory for interned strings in Mbytes.
opcache.interned_strings_buffer=8
 
; The maximum number of keys (scripts) in the OPcache hash table.
; Only numbers between 200 and 100000 are allowed.
opcache.max_accelerated_files=7000
 
; The maximum percentage of "wasted" memory until a restart is scheduled.
;opcache.max_wasted_percentage=5
 
; When this directive is enabled, the OPcache appends the current working
; directory to the script key, thus eliminating possible collisions between
; files with the same name (basename). Disabling the directive improves
; performance, but may break existing applications.
;opcache.use_cwd=1
 
; When disabled, you must reset the OPcache manually or restart the
; webserver for changes to the filesystem to take effect.
opcache.validate_timestamps=0
 
; How often (in seconds) to check file timestamps for changes to the shared
; memory storage allocation. ("1" means validate once per second, but only
; once per request. "0" means always validate)
opcache.revalidate_freq=21600
 
; Enables or disables file search in include_path optimization
;opcache.revalidate_path=0
 
; If disabled, all PHPDoc comments are dropped from the code to reduce the
; size of the optimized code.
opcache.save_comments=0
 
; If disabled, PHPDoc comments are not loaded from SHM, so "Doc Comments"
; may be always stored (save_comments=1), but not loaded by applications
; that don't need them anyway.
;opcache.load_comments=1
 
; If enabled, a fast shutdown sequence is used for the accelerated code
opcache.fast_shutdown=1
 
; Allow file existence override (file_exists, etc.) performance feature.
;opcache.enable_file_override=0
 
; A bitmask, where each bit enables or disables the appropriate OPcache
; passes
;opcache.optimization_level=0xffffffff
 
;opcache.inherited_hack=1
;opcache.dups_fix=0
 
; The location of the OPcache blacklist file (wildcards allowed).
; Each OPcache blacklist file is a text file that holds the names of files
; that should not be accelerated. The file format is to add each filename
; to a new line. The filename may be a full path or just a file prefix
; (i.e., /var/www/x  blacklists all the files and directories in /var/www
; that start with 'x'). Line starting with a ; are ignored (comments).
;opcache.blacklist_filename=
 
; Allows exclusion of large files from being cached. By default all files
; are cached.
;opcache.max_file_size=0
 
; Check the cache checksum each N requests.
; The default value of "0" means that the checks are disabled.
;opcache.consistency_checks=0
 
; How long to wait (in seconds) for a scheduled restart to begin if the cache
; is not being accessed.
opcache.force_restart_timeout=20
 
; OPcache error_log file name. Empty string assumes "stderr".
;opcache.error_log=
 
; All OPcache errors go to the Web server log.
; By default, only fatal errors (level 0) or errors (level 1) are logged.
; You can also enable warnings (level 2), info messages (level 3) or
; debug messages (level 4).
opcache.log_verbosity_level=1
 
; Preferred Shared Memory back-end. Leave empty and let the system decide.
;opcache.preferred_memory_model=
 
; Protect the shared memory from unexpected writing during script execution.
; Useful for internal debugging only.
;opcache.protect_memory=0

User Commands

Create User Home DIR

1
$ sudo mkdir /home/username && sudo chown username:username /home/username

Add a User to an Existing Group

1
$ useradd -g {group-name} username

Change User Password

1
$ passwd userName

Change User Terminal

1
$ sudo usermod -s /bin/zsh username

Add user to Sudoers

1
$ nano /etc/sudoers

Switch User

1
$ nano /etc/sudoers

Delete User

1
2
$ sudo userdel username
$ sudo rm -rf /home/username

Apt-Get Commands

Apt-Get Update Repos

1
$ apt-get update

Apt-Get Install Package

1
$ apt-get install [Package Name]

Apt-Get Upgrade Check Only

1
$ apt-get -u upgrade

Apt-Get Search for Something.

1
$ apt-cache search phpforexmaple

Nmap Commands

Print Status of Port 80 for the IP Range

1
$ nmap -sS -sV -vv -n -Pn -T5 xxx.xxx.xxx.1-255 -p80 -oG -
Example Output :
Host: xxx.xxx.xxx.228 () Ports: 80/open/tcp//http//mini_httpd 1.17beta1 26may2002/
Host: xxx.xxx.xxx.229 () Status: Up
Host: xxx.xxx.xxx.229 () Ports: 80/filtered/tcp//http///
Host: xxx.xxx.xxx.230 () Status: Up

Screen Commands

Always use screen when with the console. Multitask and prevent connection interuptions.

Multitasking Console Controls

1
2
3
4
5
6
7
// New Seesion with an id of sessionnsame
$ screen -S sessionname
// Detatch session CTRL + A + D
// Reconnect to session
screen -r sessionname
// List all screen sessions
screen -list

Key Shortcuts

CTRL + A + C, Create a new session.
CTRL + A + N, Next session.
CTRL + A + P, Previous session.
CTRL + A + D, Detatch session (Exit but will remain open).
CTRL + A + K, Kill the session.

Serial monitor (USB Device)

1
$ screen /dev/ttyAMC1 9600

Telnet/Pentest Commands

Telnet Connect HTTP(80)

1
$ telnet www.httpbin.org 80

HEAD Only

1
$ HEAD / HTTP/1.0

GET HTTP/1.0

1
$ GET /ip HTTP/1.0

Example Output :HTTP/1.1 200 OK
Server: nginx
Date: Tue, 04 Oct 2016 11:31:45 GMT
Content-Type: application/json
Content-Length: 31
Connection: close
Access-Control-Allow-Origin: *
Access-Control-Allow-Credentials: true

{
"origin": "31.24.217.94"
}
Connection closed by foreign host.

Parse and Mongo Commands

Parse Server Start

1
$ parse-server --appId xxxx --masterKey xxxx

Mongo Admin Start

1
$ npm start

Mongo Start

1
$ Service mongodb start

Update, install and start

1
2
3
$ git pull
$ npm install
$ npm start

CURL Commands

API Console command Examples

# Get the machines public ip address
curl -s http://icanhazip.com
# Download a file
curl -fsSL https://url.to/the/target.file
# Cloudflare, list subdomain records for a register domain.
curl https://www.cloudflare/com/api_json.html \
    -d 'a=rec_load_all' \
    -d 'tkn==your-api-key' \
    -d 'email=your-account-email' \
    -d 'z=your-domain-name.com'
# Cloudflare, update an IP address for a register subdomain.
curl https://www.cloudflare.com/api_json.html \
  -d a=rec_edit \
  -d tkn=$cfkey \
  -d email=$cfuser \
  -d z=$domain \
  -d id=$cfid \
  -d type=A \
  -d name=$cfhost \
  -d ttl=1 \
  -d "content=$WAN_IP"

OpenWrt Commands

OPKG Basics

1
2
opkg update
opkg install distribute

opkg Collected errors: * parse_from_stream_nomalloc: Missing new line character at end of file! FIX

1
rm -rf /etc/opkg/*

PIP Install

1
2
3
opkg install python-openssl
opkg install python-bzip2
easy_install pip

Maintenance Snippets


Time Sync Server Addresses – Server Listing Link

1
2
3
4
5
    # UK - GMT
    server 0.uk.pool.ntp.org
    server 1.uk.pool.ntp.org
    server 2.uk.pool.ntp.org
    server 3.uk.pool.ntp.org


Dirt Cleanup Script

1
2
3
4
5
6
7
8
9
10
11
12
#!/bin/bash
# Script to help with server cleanup
# Running mail purge and log deletion from here will 
# prevent the "Y" input confirmation blocking the process. 
#rm -rf /var/log/syslog >/dev/null 2>&1
#rm -rf /var/log/syslog.1 >/dev/null 2>&1
#rm -rf /var/log/syslog.1.gz >/dev/null 2>&1
#rm -rf /var/log/mail.log >/dev/null 2>&1
#rm -rf /var/log/mail.log.1 >/dev/null 2>&1
#rm -rf /var/log/mail.log.2.gz >/dev/null 2>&1
postsuper -d ALL && yes | rm -rf /root/Maildir/cur/* >/dev/null 2>&1
printf "Cleaned"