概述
今天偶然发现一个指令:ps_mem
ps_mem 是一个可以帮助我们精确获取 Linux 中各个程序核心内存使用情况的简单 python 脚本。虽然在 Linux 上有很多可用于查看内存使用情况的工具,比如 free、vmstat、smem、top、htop、atop等,但这个工具和其它的区别在于其精确显示核心内存使用情况。
ps_mem会分别计算一个程序私有内存总量和共享内存总量,并以更准确的方式给出了总的内存使用量,显示系统中哪些程序正在占用多少的内存。
操作实例
帮助
[root@node161 ~]# ps_mem -h
ps_mem.py - Show process memory usage
-h Show this help
-w <N> Measure and show process memory every N seconds
-p <pid>[,pid2,...pidN] Only show memory usage PIDs in the specified list
-s, --show-cmdline Show complete program path with options
示例1
[root@node161 ~]# ps_mem
Private + Shared = RAM used Program
4.0 KiB + 12.0 KiB = 16.0 KiB acpid
112.0 KiB + 13.0 KiB = 125.0 KiB agetty
124.0 KiB + 27.0 KiB = 151.0 KiB ipmievd
4.0 KiB + 164.5 KiB = 168.5 KiB lvmetad
184.0 KiB + 20.5 KiB = 204.5 KiB rpc.rquotad
204.0 KiB + 33.0 KiB = 237.0 KiB atd
184.0 KiB + 55.0 KiB = 239.0 KiB auditd
196.0 KiB + 49.0 KiB = 245.0 KiB rpc.idmapd
240.0 KiB + 31.5 KiB = 271.5 KiB crond
196.0 KiB + 127.5 KiB = 323.5 KiB rpcbind
328.0 KiB + 33.5 KiB = 361.5 KiB ntpd
352.0 KiB + 14.5 KiB = 366.5 KiB iscsi-scstd
316.0 KiB + 85.0 KiB = 401.0 KiB gssproxy
568.0 KiB + 69.5 KiB = 637.5 KiB zed
532.0 KiB + 105.5 KiB = 637.5 KiB smartd
412.0 KiB + 335.0 KiB = 747.0 KiB avahi-daemon (2)
692.0 KiB + 91.0 KiB = 783.0 KiB systemd-udevd
696.0 KiB + 87.5 KiB = 783.5 KiB nscd
660.0 KiB + 192.5 KiB = 852.5 KiB dbus-daemon
824.0 KiB + 52.5 KiB = 876.5 KiB irqbalance
924.0 KiB + 106.0 KiB = 1.0 MiB rpc.mountd
992.0 KiB + 106.5 KiB = 1.1 MiB rpc.statd
1.1 MiB + 86.5 KiB = 1.1 MiB master
1.1 MiB + 110.0 KiB = 1.2 MiB qmgr
1.2 MiB + 87.0 KiB = 1.3 MiB stunnel
128.0 KiB + 1.3 MiB = 1.4 MiB cleanupd
136.0 KiB + 1.3 MiB = 1.4 MiB smbd-notifyd
1.5 MiB + 46.5 KiB = 1.5 MiB rsyslogd
1.5 MiB + 111.0 KiB = 1.6 MiB pickup
1.9 MiB + 39.0 KiB = 2.0 MiB systemd-logind
2.1 MiB + 209.5 KiB = 2.3 MiB ctdb_eventd
2.3 MiB + 487.5 KiB = 2.8 MiB su (4)
2.3 MiB + 601.5 KiB = 2.8 MiB ctdb_recovered
2.6 MiB + 352.0 KiB = 3.0 MiB sudo (2)
2.8 MiB + 567.0 KiB = 3.4 MiB multipathd
2.4 MiB + 1.3 MiB = 3.7 MiB nmbd
3.6 MiB + 127.0 KiB = 3.7 MiB systemd
3.4 MiB + 316.0 KiB = 3.8 MiB polkitd
1.6 MiB + 3.1 MiB = 4.7 MiB smbd
4.4 MiB + 405.0 KiB = 4.8 MiB tmux (2)
6.1 MiB + 501.0 KiB = 6.6 MiB tuned
1.5 MiB + 6.7 MiB = 8.2 MiB winbindd (4)
8.4 MiB + 12.6 MiB = 21.0 MiB systemd-journald
22.4 MiB + 79.5 KiB = 22.5 MiB gmond
23.1 MiB + 78.5 KiB = 23.2 MiB memcached
24.4 MiB + 275.5 KiB = 24.7 MiB python2.7
30.1 MiB + 2.6 MiB = 32.7 MiB bash (89)
37.1 MiB + 1.7 MiB = 38.8 MiB ctdbd
12.2 MiB + 50.0 MiB = 62.2 MiB ezs3-agent.py
68.7 MiB + 306.0 KiB = 69.0 MiB davserver
69.9 MiB + 951.0 KiB = 70.8 MiB csmonitord
73.9 MiB + 314.0 KiB = 74.2 MiB ezfs-recycle-fl
72.7 MiB + 2.4 MiB = 75.2 MiB radosgw
74.3 MiB + 920.5 KiB = 75.2 MiB ezsnapsched.py
77.1 MiB + 921.5 KiB = 78.0 MiB bt-pool-reweigh
81.5 MiB + 964.5 KiB = 82.4 MiB bt-recovery-qos
84.7 MiB + 1.2 MiB = 85.9 MiB eziscsi-rbd-cle
85.4 MiB + 1.2 MiB = 86.6 MiB eziscsid.py
85.8 MiB + 975.0 KiB = 86.7 MiB bt-ipmi-agent.p
86.5 MiB + 1.0 MiB = 87.5 MiB ezfs-agent
87.1 MiB + 653.5 KiB = 87.7 MiB atop (2)
88.1 MiB + 918.0 KiB = 89.0 MiB ezqosd
120.0 MiB + 468.0 KiB = 120.5 MiB ceph-mgr
103.2 MiB + 19.4 MiB = 122.6 MiB sshd (86)
136.2 MiB + 1.1 MiB = 137.3 MiB ezs3-smart-moni
139.1 MiB + 1.6 MiB = 140.7 MiB ezrpcd
11.1 MiB + 156.6 MiB = 167.8 MiB ezmonitord
202.0 MiB + 17.6 MiB = 219.6 MiB ssh (213)
266.3 MiB + 8.5 MiB = 274.8 MiB httpd (12)
501.0 MiB + 35.4 MiB = 536.3 MiB ezs3-bucket-log (9)
544.1 MiB + 312.5 KiB = 544.4 MiB ceph-mon
75.7 GiB + 14.8 MiB = 75.7 GiB ceph-osd (12)
---------------------------------
79.3 GiB
=================================
[root@node161 ~]#
说明:
最后一列输出中,括号里显示的,是对应进程名称的数量,确认一下:
[root@node161 ~]# ps -ef |grep sshd | grep -v grep | wc -l
86
[root@node161 ~]# ps -ef |grep ceph-osd | grep -v grep | wc -l
12
[root@node161 ~]# ps -ef |grep bash | grep -v grep | wc -l
89
[root@node161 ~]#
输出中打印出command line
[root@node161 ~]# ps_mem -s
Private + Shared = RAM used Program
4.0 KiB + 12.0 KiB = 16.0 KiB /usr/sbin/acpid
4.0 KiB + 118.0 KiB = 122.0 KiB avahi-daemon: chroot helper
96.0 KiB + 27.0 KiB = 123.0 KiB /usr/sbin/ipmievd sel daemon pidfile=/var/run/ipmievd.pid
112.0 KiB + 13.0 KiB = 125.0 KiB /sbin/agetty --noclear tty1 linux
4.0 KiB + 164.5 KiB = 168.5 KiB /usr/sbin/lvmetad -f
184.0 KiB + 20.5 KiB = 204.5 KiB rpc.rquotad
204.0 KiB + 33.0 KiB = 237.0 KiB /usr/sbin/atd -f
。。。。。。 中间信息省略 。。。。。。
86.6 MiB + 1.0 MiB = 87.6 MiB /usr/bin/python2 /usr/local/bin/ezfs-agent start
88.2 MiB + 937.0 KiB = 89.1 MiB /usr/bin/python2 /usr/local/bin/ezqosd start
110.0 MiB + 330.0 KiB = 110.4 MiB /usr/bin/atop -a -R -w /var/log/atop/atop_20210219 600
109.7 MiB + 1.1 MiB = 110.8 MiB /usr/bin/python2 /usr/local/bin/ezs3-agent.py start
101.0 MiB + 12.7 MiB = 113.7 MiB sshd: root@notty (77)
121.8 MiB + 477.0 KiB = 122.2 MiB /usr/bin/ceph-mgr -f --cluster ceph --id ojzrm --setuser root --setgroup root
136.3 MiB + 1.1 MiB = 137.4 MiB /usr/bin/python2 /usr/local/bin/ezs3-smart-monitor.py start
139.1 MiB + 1.6 MiB = 140.7 MiB /usr/bin/python2 /usr/local/bin/ezrpcd start
230.2 MiB + 2.6 MiB = 232.8 MiB (wsgi:ezs3admin -DFOREGROUND
320.4 MiB + 1.9 MiB = 322.3 MiB /usr/bin/python2 /usr/local/bin/ezmonitord start
500.9 MiB + 33.3 MiB = 534.2 MiB /usr/bin/python2 /usr/local/bin/ezs3-bucket-logging-agent.py start (9)
562.3 MiB + 315.5 KiB = 562.6 MiB /usr/bin/ceph-mon -f --cluster ceph --id ojzrm --setuser root --setgroup root
4.4 GiB + 980.0 KiB = 4.4 GiB /usr/bin/ceph-osd -f --cluster ceph --id 2 --setuser root --setgroup root
4.5 GiB + 1.1 MiB = 4.5 GiB /usr/bin/ceph-osd -f --cluster ceph --id 10 --setuser root --setgroup root
4.5 GiB + 1.1 MiB = 4.5 GiB /usr/bin/ceph-osd -f --cluster ceph --id 4 --setuser root --setgroup root
4.7 GiB + 1.0 MiB = 4.7 GiB /usr/bin/ceph-osd -f --cluster ceph --id 6 --setuser root --setgroup root
4.9 GiB + 985.0 KiB = 4.9 GiB /usr/bin/ceph-osd -f --cluster ceph --id 8 --setuser root --setgroup root
5.4 GiB + 988.0 KiB = 5.4 GiB /usr/bin/ceph-osd -f --cluster ceph --id 0 --setuser root --setgroup root
5.7 GiB + 1.0 MiB = 5.7 GiB /usr/bin/ceph-osd -f --cluster ceph --id 11 --setuser root --setgroup root
6.4 GiB + 1.3 MiB = 6.4 GiB /usr/bin/ceph-osd -f --cluster ceph --id 9 --setuser root --setgroup root
7.5 GiB + 1.0 MiB = 7.5 GiB /usr/bin/ceph-osd -f --cluster ceph --id 7 --setuser root --setgroup root
7.7 GiB + 1.1 MiB = 7.7 GiB /usr/bin/ceph-osd -f --cluster ceph --id 5 --setuser root --setgroup root
8.7 GiB + 1.1 MiB = 8.7 GiB /usr/bin/ceph-osd -f --cluster ceph --id 1 --setuser root --setgroup root
9.0 GiB + 1.1 MiB = 9.0 GiB /usr/bin/ceph-osd -f --cluster ceph --id 3 --setuser root --setgroup root
---------------------------------
77.2 GiB
=================================
只显示特定的 PID 列表的内存使用情况
[root@node161 ~]# ps_mem -p 6646
Private + Shared = RAM used Program
6.4 GiB + 1.2 MiB = 6.4 GiB ceph-osd
---------------------------------
6.4 GiB
=================================
[root@node161 ~]#
每 N 秒打印进程内存
以下命令每5秒报告一次内存使用情况
[root@node161 ~]# ps_mem -p 6646 -w 5
Private + Shared = RAM used Program
6.4 GiB + 1.2 MiB = 6.4 GiB ceph-osd
---------------------------------
6.4 GiB
=================================
6.4 GiB + 1.2 MiB = 6.4 GiB ceph-osd
---------------------------------
6.4 GiB
=================================