Skip to content

Instantly share code, notes, and snippets.

@dca-b
Created January 9, 2023 03:12
Show Gist options
  • Select an option

  • Save dca-b/c09ac8ddc3369d76ff4be0a58263c3d4 to your computer and use it in GitHub Desktop.

Select an option

Save dca-b/c09ac8ddc3369d76ff4be0a58263c3d4 to your computer and use it in GitHub Desktop.
################################################################################
# Important commands
################################################################################
# Copy folder/file over SSH
rsync -avz /home/domains/a.com -e "ssh -p 2012" root@a.b.c.d:/home/domains/ # --exclude logs/ --exclude cache/cache/ --exclude '*.log'
# Start screen after su to another user
su - someuser
script /dev/null
screen
# Permissions
find . -type d -print0 | xargs -0 chmod 755 && find . -type f -print0 | xargs -0 chmod 644
# Password
echo -e -n "NewPass\nNewPass" | passwd
########################################
# MySQL
########################################
watch -n 4 ' mysql -t -e "SELECT time, state, user, SUBSTRING(REPLACE(info,\"\n\",\"\"),1,120) info FROM information_schema.processlist WHERE command!=\"Sleep\" AND time>0 ORDER BY time DESC, id " '
SELECT
table_schema "DB",
sum(data_length + index_length) / 1024 / 1024 "DB size (MB)",
sum(data_free) / 1024 / 1024 "Free reclaimable space (MB)"
FROM information_schema.TABLES
GROUP BY table_schema;
SHOW ENGINE INNODB STATUS\G
# Optimize tables
mysql -e "optimize table dson.user_item" # Slower. MySQL 5.6.17 supports online optimize.
pt-online-schema-change --critical-load="Threads_running=1000" --alter-foreign-keys-method=auto --set-vars="foreign_key_checks=0" --alter "ENGINE=InnoDB" D=dson,t=user_mail --execute ; noti # Recommend
# - Add noti() to ~/.bashrc
# - source ~/.bashrc
# - Add " ; noti" to the end of command to notify
# Add index
pt-online-schema-change --critical-load="Threads_running=1000" --alter-foreign-keys-method=auto --set-vars="foreign_key_checks=0" --alter "ADD INDEX(date_created)" D=dson,t=user_dungeon --dry-run # For mysql < 5.6 only
mysql -e "alter table dson.user_event_details add index idx_eventid (event_id)" ; noti # MySQL 5.6 has support for online index updates
# Partitioning
pt-online-schema-change --critical-load="Threads_running=1000" --alter "DROP PRIMARY KEY, ADD PRIMARY KEY(id,user_id)" D=dson,t=user_mail --no-check-alter --execute ; noti
pt-online-schema-change --critical-load="Threads_running=1000" --alter "PARTITION BY HASH(user_id) PARTITIONS 50" D=dson,t=user_mail --execute ; noti
##
# Update big string
##
mysql -Ne " select v from dson_main.config_api where k='test1' " > /tmp/a.sql
vi /tmp/a.sql
# Replace newline: :%s/\\n/\r/gc
+++
# update dson_main.config_api set v='
# ..........................
# ... updated big string ...
# ..........................
#'
# where k='test1';
+++
mysql < /tmp/a.sql
mysql -e " select v from dson_main.config_api where k='test1' "
# Current dirty pages
mysql -e "show global status like '%dirty%';"
# One way to decrease the shutdown time is to pre-flush the dirty pages
mysql -e "set global innodb_max_dirty_pages_pct = 0;"
mysql -e "set global innodb_io_capacity = 4000;"
mysqladmin ext -i 2 | grep Innodb_buffer_pool_pages_dirty
# And wait until it approaches zero.
# If the server is being actively used, it won’t get to zero.
# Once it’s pretty low, you can perform the shutdown.
mysqladmin status
# Uptime: 118290 Threads: 989 Questions: 1495072952 Slow queries: 0 Opens: 12708 Flush tables: 4 Open tables: 4778 Queries per second avg: 12639.047
mysqladmin extended-status
# crontab
*/5 * * * * echo `date` `mysql -e "select concat (user,':',C) as X from (select user, count(*) C from information_schema.processlist group by user order by C desc) S1"` >> mysql-debug.log
*/2 * * * * echo $(date) $(mysqladmin extended-status | grep "Innodb_buffer_pool_pages_dirty") >> mysql-debug.log
# --
# Clean data before big event [02] days at least
# --
##
# Restore data of one specified user
##
# OP Viet: dson.user_material moved to db2 below
####
# db
tar xf mydumper_22.04.09_04.tar
cd export-*
mkdir restore_20220409
mv -t restore_20220409 \
metadata \
dson.user_hero.sql.gz dson.user_hero-schema.sql.gz \
dson.user_artifact.sql.gz dson.user_artifact-schema.sql.gz \
dson.user_item.sql.gz dson.user_item-schema.sql.gz \
dson.user_pet.sql.gz dson.user_pet-schema.sql.gz \
dson.user_data.sql.gz dson.user_data-schema.sql.gz \
dson.user.sql.gz dson.user-schema.sql.gz
cd restore_*
gzip -d *.gz
ls -la
head -n 4 dson.user_hero.sql > dson.user_hero.0.sql
head -n 4 dson.user_artifact.sql > dson.user_artifact.0.sql
head -n 4 dson.user_item.sql > dson.user_item.0.sql
head -n 4 dson.user_pet.sql > dson.user_pet.0.sql
head -n 4 dson.user_data.sql > dson.user_data.0.sql
head -n 4 dson.user.sql > dson.user.0.sql
grep ",403752," dson.user_hero.sql >> dson.user_hero.0.sql
grep "(403752," dson.user_artifact.sql >> dson.user_artifact.0.sql
grep "(403752," dson.user_item.sql >> dson.user_item.0.sql
grep "(403752," dson.user_pet.sql >> dson.user_pet.0.sql
grep "(403752," dson.user_data.sql >> dson.user_data.0.sql
grep "(403752," dson.user.sql >> dson.user.0.sql
grep -E ",455260,|,314247," dson.user_hero.sql >> dson.user_hero.0.sql
grep -E "\(455260,|\(314247," dson.user_artifact.sql >> dson.user_artifact.0.sql
grep -E "\(455260,|\(314247," dson.user_item.sql >> dson.user_item.0.sql
grep -E "\(455260,|\(314247," dson.user_pet.sql >> dson.user_pet.0.sql
grep -E "\(455260,|\(314247," dson.user_data.sql >> dson.user_data.0.sql
grep -E "\(455260,|\(314247," dson.user.sql >> dson.user.0.sql
wc -l *.0.sql
rm dson.user_hero.sql \
dson.user_artifact.sql \
dson.user_item.sql \
dson.user_pet.sql \
dson.user_data.sql \
dson.user.sql
sed -i -E "s/\);$/\),/g" *.0.sql # replace end of lines: ');' -> '),'
sed -i '$ s/,$/;/' *.0.sql # replace last char: ',' -> ';'
myloader -u root -p "pass" -h "db" -P 3307 -d /home/backupdb/backup/export-20220409-040001/restore_20220409 -B dson_20220409 -o
#####
# db2
tar xf mydumper_22.04.09_04.tar
cd export-*
mkdir restore_20220409
mv -t restore_20220409 \
metadata \
dson.user_material.sql.gz dson.user_material-schema.sql.gz
cd restore_*
gzip -d *.gz
ls -la
head -n 4 dson.user_material.sql > dson.user_material.0.sql
grep "(403752," dson.user_material.sql >> dson.user_material.0.sql
grep -E "\(455260,|\(314247," dson.user_material.sql >> dson.user_material.0.sql
wc -l *.0.sql
rm dson.user_material.sql
sed -i -E "s/\);$/\),/g" *.0.sql # replace end of lines: ');' -> '),'
sed -i '$ s/,$/;/' *.0.sql # replace last char: ',' -> ';'
myloader -u root -p "pass" -h "db2" -P 3307 -d /home/backupdb/backup_db2/export-20220409-040001/restore_20220409 -B dson_20220409 -o
##
# dson.user_hero id overflow (estimate: 2h)
##
# create table dson.user_hero_FOR_id_overflow ... (partition, indexes, grant)
mysql -e "create table if not exists dson.user_hero_FOR_id_overflow ( id int auto_increment, user_id int default 0 not null, hero_id int default 0 not null, level int default 0 not null, tier int default 0 not null, star int default 0 not null, item varchar(100) default '[0,0,0,0,0,0]' not null, skills varchar(100) default '[0,0,0,0,0,0,0,0,0,0]' null, gems varchar(100) default '[0,0,0,0,0,0,0,0,0,0]' null, treasure int default 0 not null, treasure_effect int default 0 not null, treasure_index int default 0 not null, special_item varchar(100) null, locked int default 0 not null, locked_crystal int default 0 not null, locked_trial int default 0 not null, locked_vodau int default 0 null, date_created timestamp default CURRENT_TIMESTAMP not null, hero_void varchar(255) charset utf8 default '' null, old_id int null, primary key (id, user_id) ) engine = InnoDB partition by hash (user_id) partitions 50"
mysql -e "create index idx_level on dson.user_hero_FOR_id_overflow (level)"
mysql -e "create index idx_userid on dson.user_hero_FOR_id_overflow (user_id)"
mysql -e "INSERT into dson.user_hero_FOR_id_overflow (old_id, user_id, hero_id, level, tier, star, item, skills, treasure, treasure_effect, treasure_index, special_item, locked, locked_crystal, locked_trial, locked_vodau, date_created, hero_void) SELECT id, user_id, hero_id, level, tier, star, item, skills, treasure, treasure_effect, treasure_index, special_item, locked, locked_crystal, locked_trial, locked_vodau, date_created, hero_void from dson.user_hero" ; noti
mysql -e "rename table dson.user_hero to dson.user_hero_20230106"
mysql -e "rename table dson.user_hero_FOR_id_overflow to dson.user_hero"
mysql -e "select id, old_id from dson.user_hero order by id desc limit 1; select id, old_id from dson.user_hero_20230106 order by id desc limit 1;"
mysql -e "drop table dson.user_hero_20230106" ; noti
mysql -e "truncate table dson.user_interior_hero"
# tail mysql for CRUD
tcpdump -i any -s 0 -l -w - dst port 3307 | strings | grep -i -E "^(SELECT|UPDATE|DELETE|INSERT)"
##
# Manual truncation of undo tablespaces (undo files are too big)
##
SELECT TABLESPACE_NAME, FILE_NAME FROM INFORMATION_SCHEMA.FILES WHERE FILE_TYPE LIKE 'UNDO LOG';
SELECT NAME, STATE FROM INFORMATION_SCHEMA.INNODB_TABLESPACES WHERE NAME LIKE '%undo%';
# Swap undo files (innodb_undo_[001,002] <-> undo[003,004])
CREATE UNDO TABLESPACE undo_003 ADD DATAFILE 'undo_003.ibu';
CREATE UNDO TABLESPACE undo_004 ADD DATAFILE 'undo_004.ibu';
ALTER UNDO TABLESPACE innodb_undo_001 SET INACTIVE;
ALTER UNDO TABLESPACE innodb_undo_002 SET INACTIVE;
ALTER UNDO TABLESPACE innodb_undo_001 SET ACTIVE;
ALTER UNDO TABLESPACE innodb_undo_002 SET ACTIVE;
ALTER UNDO TABLESPACE undo_003 SET INACTIVE;
ALTER UNDO TABLESPACE undo_004 SET INACTIVE;
DROP UNDO TABLESPACE undo_003;
DROP UNDO TABLESPACE undo_004;
########################################
# sysdig
########################################
# https://github.com/draios/sysdig/wiki/Sysdig-Examples
# Memcached: monitor
sysdig -c memcachelog get
sysdig -c memcachelog 'set 0'
sysdig fd.sport=80
sysdig -cl # available chisels
sysdig -i spy_ip # chisel info
sysdig -c topprocs_file # See the top processes in terms of disk bandwidth usage
sysdig -c topfiles_bytes # See the top files in terms of read+write bytes
sysdig -c spy_file
sysdig -c topconns
sysdig -c topprocs_net
sysdig -c httplog
sysdig -c httptop
sysdig -A -pc -c echo_fds fd.port=7001
sysdig -pc -c httplog fd.port=7001
sysdig -c topprocs_cpu
sysdig -c netstat
########################################
# tcpdump
########################################
tcpdump -c 10 -A -i lo 'port 8081' # debug http headers of a localhost backend port 8081
# The -c option captures X number of packets and then stops
# SQL logging
tcpdump -i any -s 0 -l -w - dst port 3307 | strings | perl -e '
while(<>) { chomp; next if /^[^ ]+[ ]*$/;
if(/^(SELECT|UPDATE|DELETE|INSERT|SET|COMMIT|ROLLBACK|CREATE|DROP|ALTER)/i) {
if (defined $q) { print "$q\n"; }
$q=$_;
} else {
$_ =~ s/^[ \t]+//; $q.=" $_";
}
}'
########################################
# Redis
########################################
# dump & restore a key
redis-cli --raw dump event_master | head -c-1 > event_master.txt
cat event_master.txt | redis-cli -h local.redis -x restore event_master_139 0
# Backup key T:event:1, clone key T:event:2 to T:event:1
redis-cli rename T:event:1 T:event:1:bak && redis-cli --raw dump T:event:2 | head -c-1 | redis-cli -x restore T:event:1 0
watch -n 10 "redis-cli -h redis2 llen event_master"
redis-cli --scan --pattern "*EventDaoImpl.selectAllAvailable(*" | xargs -d '\n' redis-cli unlink
redis-cli monitor
redis-cli info
redis-cli info memory
redis-cli -n 0 dbsize
redis-cli --stat
redis-cli --bigkeys
redis-cli client list
redis-cli client list | grep \.32\.31: | wc -l
########################################
# Network
########################################
ss -pant4 | grep ':9020 '
watch "ss -tan state established | wc -l && ss -tan state time-wait | wc -l"
iftop -PNn -i ens4 -f tcp
iftop -P -i ens4 -F 192.168.32.6/32
pktstat -BFTt -w 6 -i ens4
pktstat -BTnt -w 2 -i eth0
########################################
# SSH
########################################
# Firewall
for x in host1 host2; do \
echo $x; \
ssh $x 'for y in y1 y2; do echo $y; sudo ufw allow from 192.168.32.$y; done'; \
done
# Add hosts
for x in host1 host2; do \
echo $x; \
ssh $x 'echo 192.168.32.2 cms >> /etc/hosts'; \
ssh $x 'echo 192.168.32.2 api >> /etc/hosts'; \
done
# Change a host
for x in host1 host2; do \
echo $x; \
ssh $x 'sed -i "s/192.168.32.4 mongo/192.168.32.24 mongo/" /etc/hosts'; \
done
# For JMX
for x in host1 host2; do \
echo $x; \
ssh $x " sed -i 's/^127.0.0.1\tlocalhost$/127.0.0.1\tlocalhost $x/' /etc/hosts "; \
done
########################################
# Memcached
########################################
printf "add i 0 3600 1\r\n1\r\n" | nc memcached 11211
echo "incr i 1" | nc -q 1 memcached 11211
echo "get s:sdk_123" | nc -q 1 memcached 11211
printf "set s:sdk_123 0 86400 5\r\nSess1\r\n" | nc memcached 11211
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment