select substring_index(host,':', 1) as appip ,count(*) ascountfrom information_schema.PROCESSLIST where COMMAND='Sleep'groupby appip orderbycountdesc ;
分析哪些DB访问的比较多
1
select DB ,count(*) ascountfrom information_schema.PROCESSLIST where COMMAND='Sleep'groupby DB orderbycountdesc ;
分析哪些用户访问的比较多
1
selectuser ,count(*) ascountfrom information_schema.PROCESSLIST where COMMAND='Sleep'groupbyuserorderbycountdesc ;
TABLES
列出大于10G以上的表
1
select TABLE_SCHEMA,TABLE_NAME,TABLE_ROWS,ROUND((INDEX_LENGTH+DATA_FREE+DATA_LENGTH)/1024/1024/1024) as size_G from information_schema.tableswhere ROUND((INDEX_LENGTH+DATA_FREE+DATA_LENGTH)/1024/1024/1024) > 10orderby size_G desc ;
performance_schema相关
performance_schema占用多少内存
http://dev.mysql.com/doc/refman/5.7/en/show-engine.html SHOW ENGINE PERFORMANCE_SCHEMA STATUS; For the Performance Schema as a whole, performance_schema.memory is the sum of all the memory used (the sum of all other memory values).
performance_schema 瓶颈
1 2 3 4 5
1) SHOW VARIABLES LIKE 'perf%'; 2) SHOW STATUS LIKE 'perf%'; 3) SHOW ENGINE PERFORMANCE_SCHEMA STATUS\G
select * from performance_schema.variables_by_thread as a,(select THREAD_ID,PROCESSLIST_ID,PROCESSLIST_USER,PROCESSLIST_HOST,PROCESSLIST_COMMAND,PROCESSLIST_STATE from performance_schema.threads where PROCESSLIST_USER<>'NULL') as b where a.THREAD_ID = b.THREAD_ID and a.VARIABLE_NAME = 'sql_safe_updates'
selectSCHEMA_NAME,DIGEST_TEXT,COUNT_STAR,FIRST_SEEN,LAST_SEEN from performance_schema.events_statements_summary_by_digest where DIGEST_TEXT like'select%'and DIGEST_TEXT notlike'%SESSION%'orderby COUNT_STAR desclimit10\G
一个实例中写入最多的TopN SQL
1
selectSCHEMA_NAME,DIGEST_TEXT,COUNT_STAR,FIRST_SEEN,LAST_SEEN from performance_schema.events_statements_summary_by_digest where DIGEST_TEXT like'insert%'or DIGEST_TEXT like'update%'or DIGEST_TEXT like'delete%'or DIGEST_TEXT like'replace%'orderby COUNT_STAR desclimit10\G
库中: 求SQL
一个库中查询最多的TopN SQL
1
同上 实例中: 求SQL
一个库中写入最多的TopN SQL
1
同上 实例中: 求SQL
实例中:求table
使用说明
1 2 3 4 5 6 7 8 9
usage: perl xx.pl -i 192.168.1.10 -p 3306 -e read|write|all2>/dev/null ; opt e: read getselectcount write get insert,update,deletecount allgetall sql count opt i: 192.xx.xx.xx ip address opt p: 3306 db port
select OBJECT_SCHEMA,sum(SUM_TIMER_WAIT) as all_time,sum(SUM_TIMER_READ) as all_read_time,sum(SUM_TIMER_WRITE) as all_write_time,sum(COUNT_STAR) as all_star,sum(COUNT_read) as all_read ,sum(COUNT_WRITE) as all_write,sum(COUNT_FETCH) as all_fetch,sum(COUNT_INSERT) as all_insert,sum(COUNT_UPDATE) as all_update,sum(COUNT_DELETE) as all_delete from performance_schema.table_io_waits_summary_by_table groupby OBJECT_SCHEMA orderby all_time desc;
如何查看一个MySQL实例中哪个库的read latency时间最大
1
select OBJECT_SCHEMA,sum(SUM_TIMER_WAIT) as all_time,sum(SUM_TIMER_READ) as all_read_time,sum(SUM_TIMER_WRITE) as all_write_time,sum(COUNT_STAR) as all_star,sum(COUNT_read) as all_read ,sum(COUNT_WRITE) as all_write,sum(COUNT_FETCH) as all_fetch,sum(COUNT_INSERT) as all_insert,sum(COUNT_UPDATE) as all_update,sum(COUNT_DELETE) as all_delete from performance_schema.table_io_waits_summary_by_table groupby OBJECT_SCHEMA orderby all_read_time desc;
如何查看一个MySQL实例中哪个库的write latency时间最大
1
select OBJECT_SCHEMA,sum(SUM_TIMER_WAIT) as all_time,sum(SUM_TIMER_READ) as all_read_time,sum(SUM_TIMER_WRITE) as all_write_time,sum(COUNT_STAR) as all_star,sum(COUNT_read) as all_read ,sum(COUNT_WRITE) as all_write,sum(COUNT_FETCH) as all_fetch,sum(COUNT_INSERT) as all_insert,sum(COUNT_UPDATE) as all_update,sum(COUNT_DELETE) as all_delete from performance_schema.table_io_waits_summary_by_table groupby OBJECT_SCHEMA orderby all_write_time desc;
如何查看一个MySQL实例中哪个库的总访问量最大
1
select OBJECT_SCHEMA,sum(SUM_TIMER_WAIT) as all_time,sum(COUNT_STAR) as all_star,sum(COUNT_read) as all_read ,sum(COUNT_WRITE) as all_write,sum(COUNT_FETCH) as all_fetch,sum(COUNT_INSERT) as all_insert,sum(COUNT_UPDATE) as all_update,sum(COUNT_DELETE) as all_delete from performance_schema.table_io_waits_summary_by_table groupby OBJECT_SCHEMA orderby all_star desc;
select OBJECT_SCHEMA,sum(SUM_TIMER_WAIT) as all_time,sum(COUNT_STAR) as all_star,sum(COUNT_read) as all_read ,sum(COUNT_WRITE) as all_write,sum(COUNT_FETCH) as all_fetch,sum(COUNT_INSERT) as all_insert,sum(COUNT_UPDATE) as all_update,sum(COUNT_DELETE) as all_delete from performance_schema.table_io_waits_summary_by_table groupby OBJECT_SCHEMA orderby all_read desc;
如何查看一个MySQL实例中哪个库的写入量最大
1
select OBJECT_SCHEMA,sum(SUM_TIMER_WAIT) as all_time,sum(COUNT_STAR) as all_star,sum(COUNT_read) as all_read ,sum(COUNT_WRITE) as all_write,sum(COUNT_FETCH) as all_fetch,sum(COUNT_INSERT) as all_insert,sum(COUNT_UPDATE) as all_update,sum(COUNT_DELETE) as all_delete from performance_schema.table_io_waits_summary_by_table groupby OBJECT_SCHEMA orderby all_write desc;
如何查看一个MySQL实例中哪个库的update量最大
1
select OBJECT_SCHEMA,sum(SUM_TIMER_WAIT) as all_time,sum(COUNT_STAR) as all_star,sum(COUNT_read) as all_read ,sum(COUNT_WRITE) as all_write,sum(COUNT_FETCH) as all_fetch,sum(COUNT_INSERT) as all_insert,sum(COUNT_UPDATE) as all_update,sum(COUNT_DELETE) as all_delete from performance_schema.table_io_waits_summary_by_table groupby OBJECT_SCHEMA orderby all_update desc;
如何查看一个MySQL实例中哪个库的insert量最大
1
select OBJECT_SCHEMA,sum(SUM_TIMER_WAIT) as all_time,sum(COUNT_STAR) as all_star,sum(COUNT_read) as all_read ,sum(COUNT_WRITE) as all_write,sum(COUNT_FETCH) as all_fetch,sum(COUNT_INSERT) as all_insert,sum(COUNT_UPDATE) as all_update,sum(COUNT_DELETE) as all_delete from performance_schema.table_io_waits_summary_by_table groupby OBJECT_SCHEMA orderby all_insert desc;
如何查看一个MySQL实例中哪个库的delete量最大
1
select OBJECT_SCHEMA,sum(SUM_TIMER_WAIT) as all_time,sum(COUNT_STAR) as all_star,sum(COUNT_read) as all_read ,sum(COUNT_WRITE) as all_write,sum(COUNT_FETCH) as all_fetch,sum(COUNT_INSERT) as all_insert,sum(COUNT_UPDATE) as all_update,sum(COUNT_DELETE) as all_delete from performance_schema.table_io_waits_summary_by_table groupby OBJECT_SCHEMA orderby all_delete desc;
表级别
表的all latency时间(read + write)最大
1
select OBJECT_SCHEMA,OBJECT_NAME,SUM_TIMER_WAIT,SUM_TIMER_READ,SUM_TIMER_WRITE,COUNT_STAR,COUNT_read,COUNT_WRITE,COUNT_UPDATE,COUNT_insert,COUNT_delete from performance_schema.table_io_waits_summary_by_table orderby SUM_TIMER_WAIT desclimit10
表的read latency(fetch)时间最大
1
select OBJECT_SCHEMA,OBJECT_NAME,SUM_TIMER_WAIT,SUM_TIMER_READ,SUM_TIMER_WRITE,COUNT_STAR,COUNT_read,COUNT_WRITE,COUNT_UPDATE,COUNT_insert,COUNT_delete from performance_schema.table_io_waits_summary_by_table orderby SUM_TIMER_READ desclimit10
表的write latency 时间最大
1
select OBJECT_SCHEMA,OBJECT_NAME,SUM_TIMER_WAIT,SUM_TIMER_READ,SUM_TIMER_WRITE,COUNT_STAR,COUNT_read,COUNT_WRITE,COUNT_UPDATE,COUNT_insert,COUNT_delete from performance_schema.table_io_waits_summary_by_table orderby SUM_TIMER_WRITE desclimit10
表的rows 总访问量最大
1
select OBJECT_SCHEMA,OBJECT_NAME,SUM_TIMER_WAIT,COUNT_STAR,COUNT_read,COUNT_WRITE,COUNT_UPDATE,COUNT_insert,COUNT_delete from performance_schema.table_io_waits_summary_by_table orderby COUNT_STAR desclimit10
表的rows 查询量最大
1
select OBJECT_SCHEMA,OBJECT_NAME,SUM_TIMER_WAIT,COUNT_STAR,COUNT_read,COUNT_WRITE,COUNT_UPDATE,COUNT_insert,COUNT_delete from performance_schema.table_io_waits_summary_by_table orderby COUNT_read desclimit10
表的rows 写入量最大
1
select OBJECT_SCHEMA,OBJECT_NAME,SUM_TIMER_WAIT,COUNT_STAR,COUNT_read,COUNT_WRITE,COUNT_UPDATE,COUNT_insert,COUNT_delete from performance_schema.table_io_waits_summary_by_table orderby COUNT_WRITE desclimit10
表的rows update量最大
1
select OBJECT_SCHEMA,OBJECT_NAME,SUM_TIMER_WAIT,COUNT_STAR,COUNT_read,COUNT_WRITE,COUNT_UPDATE,COUNT_insert,COUNT_delete from performance_schema.table_io_waits_summary_by_table orderby COUNT_update desclimit10
表的rows insert量最大
1
select OBJECT_SCHEMA,OBJECT_NAME,SUM_TIMER_WAIT,COUNT_STAR,COUNT_read,COUNT_WRITE,COUNT_UPDATE,COUNT_insert,COUNT_delete from performance_schema.table_io_waits_summary_by_table orderby COUNT_insert desclimit10
表的rows delete量最大
1
select OBJECT_SCHEMA,OBJECT_NAME,SUM_TIMER_WAIT,COUNT_STAR,COUNT_read,COUNT_WRITE,COUNT_UPDATE,COUNT_insert,COUNT_delete from performance_schema.table_io_waits_summary_by_table orderby COUNT_delete desclimit10
5) 找到最需要优化的SQL后,可以开始跟踪分析单条SQL来获得更加底层实际的东 西,目前最好的三种方法是a)show profile b)show status c)slow query条目 a)show profile SQL> set profiling=1; SQL> select * fromtable; SQL> show profiles; SQL> show profile for query 1; 格式化输出: SQL> set @query_id = 1; SQL> SELECT STATE,SUM(DURATION) AS Total_R, ROUND( 100*SUM(DURATION) / (SELECT SUM(DURATION) FROM INFORMATION_SCHEMA.PROFILING WHERE QUERY_ID = @query_id ), 2) AS Pct_R, COUNT(*) AS CallS, SUM(DURATION) / COUNT(*) AS "R/CALL" FROM INFORMATION_SCHEMA.PROFILING WHERE QUERY_ID = @query_id GROUPBY STATE ORDERBY Total_R DESC;
查看是否使用了磁盘临时表还是内存临时表: flush status; sql; show status where variable_name like'Handler%'or variable_name like'Created%';
b) show status SQL> 句柄计数器 handler counter,临时文件,表计数器 SQL> flush status ; 刷新绘画级别的状态值。 SQL> select * fromtable; SQL> show status where variable_name like'Handler%'or Variable_name like'Created%'; --可以看到是否利用了磁盘临时表,而explain是无法看到的。
8. 关于索引选择性: 字段1 building_id,字段2 status 单索引字段的索引选择性: select count(distinct building_id)/count(*) as selectivity from community_units; 组合索引的索引选择性: select count(distinct (concat(building_id,status)))/count(*) as selectivity from community_units; 组合前缀的索引选择性: select count(distinct (concat(building_id,left(status,2))))/count(*) as selectivity from community_units; 得到的结果越接近1,效果越好
配置:/etc/rsyncd.conf uid = root gid = root use chroot = no max connections = 64 pid file = /var/run/rsyncd.pid lock file = /var/run/rsync.lock logfile = /var/log/rsyncd.log
[dbbak]
path = /data/dbbackup use chroot = no ignore errors read only = no list = no [Binlog] path = /data/BINLOG_BACKUP use chroot = no ignore errors read only = no list = no
[fullbak]
path = /data/FULL_BACKUP use chroot = no ignore errors read only = no list = no 启动: /usr/bin/rsync –daemon 限速100k/s传输 : /usr/bin/rsync -av –progress –update –bwlimit=100 –checksum –compress $file root@$ip::dbbak 正常传输: /usr/bin/rsync -av –progress $file root@$ip::dbbak
pigz使用
常用知识普及
1 2 3 4 5 6 7 8
错误的写法:nohup tar -cvf - xx_20151129 | pigz -p 24 > xx_20151129.tar.gz & --一定不能加nohup,因为中间有管道符,不能传递下去的 错误的代价: tar: This does not look like a tar archive tar: Skipping tonext header tar: Exiting withfailure status due to previous errors 以上错误的案例中,为此付出过很大的代价,哭晕在厕所N次了...
* axel 下载&安装 wget -c http://pkgs.repoforge.org/axel/axel-2.4-1.el5.rf.x86_64.rpm rpm -ivh axel-2.4-1.el5.rf.x86_64.rpm
* axel 核心参数 -n 指定线程数 -o 指定另存为目录
* httpd服务搭建与配置
yum install httpd
* httpd配置主目录 /etc/httpd/conf/httpd.conf
[xx html]
# cat /etc/httpd/conf/httpd.conf | grep DocumentRoot # DocumentRoot: The directory out of which you will serve your #DocumentRoot “/var/www/html” –注释 DocumentRoot “/data/dbbackup/html” –配置成容量大的地址 * 开启httpd服务 service httpd restart * 下载数据 目的地ip shell> nohup axel -n 10 -v -o /data/dbbackup/ http://$数据源ip/xx_20151129.tar.gz &
1. for i in $(ls /proc | grep "^[0-9]" | awk '$0>100'); do awk '/Swap:/{a=a+$2}END{print '"$i"',a/1024"M"}' /proc/$i/smaps;done| sort -k2nr | head
有些linux无法跑上面的程序,可参考下一条命令
2. for i in $(ll /proc | awk '{print $9}' | grep "^[0-9]" | awk '$0>100'); do awk '/Swap:/{a=a+$2}END{print '"$i"',a/1024"M"}' /proc/$i/smaps;done| sort -k2nr | head