Sorted
> find . -name '*.sh' | xargs wc -l | sort -nr
Not sorted
> find . -name '*.sh' | xargs wc -l
Thursday, January 23, 2020
Friday, January 17, 2020
When it becomes handy to disable NFS attribute caching
https://docstore.mik.ua/orelly/networking_2ndEd/nfs/ch18_06.htm
If changes made by one client need to be reflected on other clients with finer granularity, the attribute cache lifetime can be reduced to one second using the actimeo option, which sets both the regular file and directory minimum and maximum lifetimes to the same value:
> mount -t nfs -o actimeo=1 server:/export /mnt
This has the same effect as:
> mount -t nfs -o acregmin=1,acregmax=1,acdirmin=1,acdirmax=1 \ server:/export /mnt
To disable the caching completely:
> mount -t nfs -o actimeo=0 server:/export /mnt
If changes made by one client need to be reflected on other clients with finer granularity, the attribute cache lifetime can be reduced to one second using the actimeo option, which sets both the regular file and directory minimum and maximum lifetimes to the same value:
> mount -t nfs -o actimeo=1 server:/export /mnt
This has the same effect as:
> mount -t nfs -o acregmin=1,acregmax=1,acdirmin=1,acdirmax=1 \ server:/export /mnt
To disable the caching completely:
> mount -t nfs -o actimeo=0 server:/export /mnt
Thursday, January 16, 2020
Docker on CentOS 8
> sudo dnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo
> dnf list docker-ce --showduplicates | sort -r
> sudo dnf install https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm
> sudo dnf install docker-ce
> systemctl enable docker
> systemctl start docker
> systemctl status docker
> cd ~
> mkdir docker-centos
> cd docker-centos
> echo "FROM centos" > Dockerfile
> docker build .
> docker run centos uname -a
> docker run centos cat "/etc/redhat-release"
> dnf list docker-ce --showduplicates | sort -r
> sudo dnf install https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm
> sudo dnf install docker-ce
> systemctl enable docker
> systemctl start docker
> systemctl status docker
> cd ~
> mkdir docker-centos
> cd docker-centos
> echo "FROM centos" > Dockerfile
> docker build .
> docker run centos uname -a
> docker run centos cat "/etc/redhat-release"
Sunday, November 17, 2019
Linux ZFS and disk configuration a collection of notes
#check stats on pool
zpool iostat -v 1
#mounting an encrypted filesystem
zfs mount -l -a
#adding slog to a pool
zpool add zfs log /dev/nvme0n1
#adding l2arc to a pool
zpool add zfs cache /dev/nvme0n2
#checking all zfs module parameters
modinfo zfs
#check atime enabled?
zfs get all |grep atime
#disable atime
zfs set atime=off zfs
zfs set atime=off zfs_sata
#check serial number of disk
[root@node-2 ~]# sginfo -s /dev/sda
Serial Number 'PDNLH0BRH9F2DJ'
#check physical and logical block size of disks
lsblk -o NAME,PHY-SEC,LOG-SEC,SIZE,TYPE,ROTA
#physical vs. logical block size
basically ashift means the exponent, index or power to the base of 2 e.g. for a physical sector size of 512 == 2^9 -> ashift of 9, or 4k == 2^12 -> ashift 12
#Example for 4x2TB rotational disks
[root@node-2 ~]# lsblk -o NAME,PHY-SEC,LOG-SEC,SIZE,TYPE,ROTA |grep 'disk 1'
sdk 4096 512 1.8T disk 1
sdi 4096 512 1.8T disk 1
sdl 4096 512 1.8T disk 1
sdj 4096 512 1.8T disk 1
[root@node-2 ~]# ls -l /dev/disk/by-id/
total 0
lrwxrwxrwx. 1 root root 9 Nov 17 23:14 ata-ST2000LM015-2E8174_WDZ3WZFN -> ../../sdj
lrwxrwxrwx. 1 root root 9 Nov 17 23:15 ata-ST2000LM015-2E8174_WDZAAC2H -> ../../sdi
lrwxrwxrwx. 1 root root 9 Nov 17 23:15 ata-ST2000LX001-1RG174_WDZASXRK -> ../../sdk
lrwxrwxrwx. 1 root root 9 Nov 17 23:15 ata-ST2000LX001-1RG174_ZDZ4TJK2 -> ../../sdl
#creating the pool
zpool create zfs_sata -o ashift=12 mirror ata-ST2000LM015-2E8174_WDZAAC2H ata-ST2000LM015-2E8174_WDZ3WZFN mirror ata-ST2000LX001-1RG174_WDZASXRK ata-ST2000LX001-1RG174_ZDZ4TJK2
#creating two volumes with different record sizes
zfs create -o recordsize=16k zfs_sata/mfdatabase
zfs create -o recordsize=1024k zfs_sata/mfjournal
#some sequential testing
dd if=/dev/zero of=/zfs_sata/mfdatabase/tempfile bs=1M count=1024; sync
dd if=/dev/zero of=/zfs_sata/mfdatabase/tempfile2 bs=16k count=65536; sync
/sbin/sysctl -w vm.drop_caches=3
dd if=/zfs_sata/mfdatabase/tempfile of=/dev/null bs=1M count=1024
dd if=/zfs_sata/mfdatabase/tempfile2 of=/dev/null bs=16k count=65536
rm -rf /zfs_sata/mfdatabase/*
writes
16.5013 s, 65.1 MB/s (expected slower)
7.26762 s, 148 MB/s (expected faster)
reads
54.6486 s, 19.6 MB/s (expected slower)
59.2402 s, 18.1 MB/s (expected slower)
dd if=/dev/zero of=/zfs_sata/mfjournal/tempfile bs=1M count=1024; sync
dd if=/dev/zero of=/zfs_sata/mfjournal/tempfile2 bs=16k count=65536; sync
/sbin/sysctl -w vm.drop_caches=3
dd if=/zfs_sata/mfjournal/tempfile of=/dev/null bs=1M count=1024
dd if=/zfs_sata/mfjournal/tempfile2 of=/dev/null bs=16k count=65536
rm -rf /zfs_sata/mfjournal/*
writes
12.1631 s, 88.3 MB/s (expected faster)
8.75189 s, 123 MB/s (expected slower)
reads
43.0267 s, 25.0 MB/s (expected faster)
23.1101 s, 46.5 MB/s (expected faster)
#change zfs options for good
/etc/modprobe.d/zfs.conf
e.g. options zfs PARAMETER=VALUE
#change zfs option in flight
#values to be changed
zfs_arc_max
#for all SSD pool logbias could be changed e.g.
zfs set logbias=throughput zfs/mydata
#look at performance
zpool iostat -v 1
#resources I read
zpool iostat -v 1
#mounting an encrypted filesystem
zfs mount -l -a
#adding slog to a pool
zpool add zfs log /dev/nvme0n1
#adding l2arc to a pool
zpool add zfs cache /dev/nvme0n2
#checking all zfs module parameters
modinfo zfs
#check atime enabled?
zfs get all |grep atime
#disable atime
zfs set atime=off zfs
zfs set atime=off zfs_sata
#check serial number of disk
[root@node-2 ~]# sginfo -s /dev/sda
Serial Number 'PDNLH0BRH9F2DJ'
#check physical and logical block size of disks
lsblk -o NAME,PHY-SEC,LOG-SEC,SIZE,TYPE,ROTA
#physical vs. logical block size
- physical sector size -> actual hard drive reads and writes
- logical sector size -> supported smallest hard drive reads and writes
basically ashift means the exponent, index or power to the base of 2 e.g. for a physical sector size of 512 == 2^9 -> ashift of 9, or 4k == 2^12 -> ashift 12
#Example for 4x2TB rotational disks
[root@node-2 ~]# lsblk -o NAME,PHY-SEC,LOG-SEC,SIZE,TYPE,ROTA |grep 'disk 1'
sdk 4096 512 1.8T disk 1
sdi 4096 512 1.8T disk 1
sdl 4096 512 1.8T disk 1
sdj 4096 512 1.8T disk 1
[root@node-2 ~]# ls -l /dev/disk/by-id/
total 0
lrwxrwxrwx. 1 root root 9 Nov 17 23:14 ata-ST2000LM015-2E8174_WDZ3WZFN -> ../../sdj
lrwxrwxrwx. 1 root root 9 Nov 17 23:15 ata-ST2000LM015-2E8174_WDZAAC2H -> ../../sdi
lrwxrwxrwx. 1 root root 9 Nov 17 23:15 ata-ST2000LX001-1RG174_WDZASXRK -> ../../sdk
lrwxrwxrwx. 1 root root 9 Nov 17 23:15 ata-ST2000LX001-1RG174_ZDZ4TJK2 -> ../../sdl
#creating the pool
zpool create zfs_sata -o ashift=12 mirror ata-ST2000LM015-2E8174_WDZAAC2H ata-ST2000LM015-2E8174_WDZ3WZFN mirror ata-ST2000LX001-1RG174_WDZASXRK ata-ST2000LX001-1RG174_ZDZ4TJK2
#creating two volumes with different record sizes
zfs create -o recordsize=16k zfs_sata/mfdatabase
zfs create -o recordsize=1024k zfs_sata/mfjournal
#some sequential testing
dd if=/dev/zero of=/zfs_sata/mfdatabase/tempfile bs=1M count=1024; sync
dd if=/dev/zero of=/zfs_sata/mfdatabase/tempfile2 bs=16k count=65536; sync
/sbin/sysctl -w vm.drop_caches=3
dd if=/zfs_sata/mfdatabase/tempfile of=/dev/null bs=1M count=1024
dd if=/zfs_sata/mfdatabase/tempfile2 of=/dev/null bs=16k count=65536
rm -rf /zfs_sata/mfdatabase/*
writes
16.5013 s, 65.1 MB/s (expected slower)
7.26762 s, 148 MB/s (expected faster)
reads
54.6486 s, 19.6 MB/s (expected slower)
59.2402 s, 18.1 MB/s (expected slower)
dd if=/dev/zero of=/zfs_sata/mfjournal/tempfile bs=1M count=1024; sync
dd if=/dev/zero of=/zfs_sata/mfjournal/tempfile2 bs=16k count=65536; sync
/sbin/sysctl -w vm.drop_caches=3
dd if=/zfs_sata/mfjournal/tempfile of=/dev/null bs=1M count=1024
dd if=/zfs_sata/mfjournal/tempfile2 of=/dev/null bs=16k count=65536
rm -rf /zfs_sata/mfjournal/*
writes
12.1631 s, 88.3 MB/s (expected faster)
8.75189 s, 123 MB/s (expected slower)
reads
43.0267 s, 25.0 MB/s (expected faster)
23.1101 s, 46.5 MB/s (expected faster)
#zfs parameters
yum install sysfsutils -y
systool -vm zfs
#or
ls -l /sys/module/zfs/parameters/#change zfs options for good
/etc/modprobe.d/zfs.conf
e.g. options zfs PARAMETER=VALUE
#change zfs option in flight
echo NEWVALUE >> /sys/module/zfs/parameters/PARAMETER
#Example 16GiB for zfs_arc_max
echo 17179869184>> /sys/module/zfs/parameters/zfs_arc_max
[root@node-2d current]# systool -vm zfs |grep zfs_arc_max
zfs_arc_max = "17179869184"
zfs_arc_max = "17179869184"
zfs_arc_max
https://forums.freebsd.org/threads/howto-tuning-l2arc-in-zfs.29907/ l2arc_write_max: 8388608 # Maximum number of bytes written to l2arc per feed l2arc_write_boost: 8388608 # Mostly only relevant at the first few hours after boot l2arc_headroom: 2 # Not sure l2arc_feed_secs: 1 # l2arc feeding period l2arc_feed_min_ms: 200 # minimum l2arc feeding period l2arc_noprefetch: 1 # control whether streaming data is cached or not l2arc_feed_again: 1 # control whether feed_min_ms is used or not l2arc_norw: 1 # no read and write at the same time
/etc/modprobe.d/zfs.conf #log options zfs zfs_txg_timeout=30 #cache options zfs zfs_arc_max=34359738368 options zfs l2arc_noprefetch=0 options zfs l2arc_write_max=1073741824 options zfs l2arc_write_boost=2147483648
1073741824zil_slog_limit
#for all SSD pool logbias could be changed e.g.
zfs set logbias=throughput zfs/mydata
#look at performance
zpool iostat -v 1
#resources I read
- http://open-zfs.org/wiki/Performance_tuning
- https://github.com/zfsonlinux/zfs/wiki/ZFS-on-Linux-Module-Parameters
- https://martin.heiland.io/2018/02/23/zfs-tuning
- https://www.svennd.be/tuning-of-zfs-module
- https://utcc.utoronto.ca/~cks/space/blog/solaris/ZFSWritesAndZIL
- http://www.nanowolk.nl/ext/2013_02_zfs_sequential_read_write_performance
- http://www.nanowolk.nl/ext/2013_02_zfs_random_iops_read_write_performance
- https://sites.google.com/site/ryanbabchishin/home/publications/changing-a-zvol-block-size-while-making-it-sparse-and-compressed
- https://utcc.utoronto.ca/~cks/space/blog/tech/AdvancedFormatDrives
- https://docs.oracle.com/cd/E23823_01/html/819-5461/gazss.html#indexterm-425
- https://zfs.datto.com/2017_slides/pinchuk.pdf
Thursday, November 14, 2019
Need to know drive properties on Windows
Open the command line as Administrator
> fsutil fsinfo ntfsinfo [drive letter]
Friday, September 13, 2019
Sort content of file and remove duplicates
I had to sort contents of a list and remove duplicates..
> cat list.txt | sort -u list-sorted-unique.txt
And then I found that the first column still has duplicates
> cut -d ':' -f1 list-sorted-unique.txt | sort -u | wc -l
> cat list.txt | sort -u list-sorted-unique.txt
And then I found that the first column still has duplicates
> cut -d ':' -f1 list-sorted-unique.txt | sort -u | wc -l
Saturday, September 7, 2019
Reporting with lsblk and specific columns
lsblk -o NAME,FSTYPE,LABEL,MOUNTPOINT,SIZE,MODEL,SERIAL
NAME FSTYPE LABEL MOUNTPOINT SIZE MODEL SERIAL
sdf 465.8G WDC WDBNCE5000P 190476803028
├─sdf9 8M
└─sdf1 zfs_member zfs 465.8G
nvme0n1 931.5G Samsung SSD 970 EVO 1TB S467NX0KB02478V
├─nvme0n1p1 zfs_member 128G
└─nvme0n1p2 zfs_member 803.5G
sdo btrfs 931.5G ST1000LM048-2E71 WDEQTNE0
├─sdo1 zfs_member zfs_sata 931.5G
└─sdo9 btrfs 8M
sdd 465.8G WDC WDBNCE5000P 190476801481
├─sdd9 8M
└─sdd1 zfs_member zfs 465.8G
sdm btrfs 931.5G ST1000LM048-2E71 WDEN067Y
├─sdm1 zfs_member zfs_sata 931.5G
└─sdm9 btrfs 8M
sdb isw_raid_member 465.8G CT500MX500SSD1 1906E1E8FBC3
└─md126 465.8G
├─md126p2 LVM2_member 464.8G
│ ├─centos-swap swap 15.7G
│ ├─centos-home xfs /home 399.1G
│ └─centos-root xfs / 50G
└─md126p1 xfs /boot 1G
sdk 1.8T ST2000LX001-1RG1 WDZASXRK
├─sdk9 8M
└─sdk1 zfs_member zfs_sata 1.8T
sdi btrfs 1.8T ST2000LM015-2E81 WDZAAC2H
├─sdi9 btrfs 8M
└─sdi1 zfs_member zfs_sata 1.8T
sdq 465.8G Samsung SSD 850 S24CNXAGC07791V
├─sdq9 8M
└─sdq1 zfs_member zfs 465.8G
sdg 465.8G WDC WDBNCE5000P 190476800512
├─sdg9 8M
└─sdg1 zfs_member zfs 465.8G
sde 465.8G WDC WDBNCE5000P 190476800250
├─sde9 8M
└─sde1 zfs_member zfs 465.8G
sdn btrfs 931.5G ST1000LM048-2E71 WDEMXXPP
├─sdn1 zfs_member zfs_sata 931.5G
└─sdn9 btrfs 8M
sdc 465.8G WDC WDBNCE5000P 190476802105
├─sdc9 8M
└─sdc1 zfs_member zfs 465.8G
sdl 1.8T ST2000LX001-1RG1 ZDZ4TJK2
├─sdl1 zfs_member zfs_sata 1.8T
└─sdl9 8M
nvme1n1 477G PCIe SSD 19012351200132
├─nvme1n1p2 zfs_member zfs 221G
└─nvme1n1p1 swap [SWAP] 256G
sda isw_raid_member 465.8G CT500MX500SSD1 1906E1E8FE6C
└─md126 465.8G
├─md126p2 LVM2_member 464.8G
│ ├─centos-swap swap 15.7G
│ ├─centos-home xfs /home 399.1G
│ └─centos-root xfs / 50G
└─md126p1 xfs /boot 1G
sdj btrfs 1.8T ST2000LM015-2E81 WDZ3WZFN
├─sdj9 btrfs 8M
└─sdj1 zfs_member zfs_sata 1.8T
sdr 465.8G CT500MX500SSD4 1909E1ED9C29
├─sdr9 8M
└─sdr1 zfs_member zfs 465.8G
sdh 465.8G WDC WDBNCE5000P 190476800028
├─sdh9 8M
└─sdh1 zfs_member zfs 465.8G
sdp btrfs 931.5G ST1000LM048-2E71 WDEPK5PK
├─sdp9 btrfs 8M
└─sdp1 zfs_member zfs_sata 931.5G
Friday, June 21, 2019
Temporarily remove and add dedicated SWAP to my Centos
Find out what swap is currently configured
> swapon --show
NAME TYPE SIZE USED PRIO
/dev/dm-1 partition 15.7G 66.9M -2
Turn off the current swap
> swapoff /dev/dm-1
Find partition for my new swap
> ls -lh /dev/disk/by-id/ |grep nvme-PCIe_SSD_19012351200132-part1
Make the swap
> mkswap /dev/nvme1n1p1
Turn on swap
> swapon /dev/nvme1n1p1
Verify
> swapon --show
NAME TYPE SIZE USED PRIO
/dev/nvme1n1p1 partition 256G 0B -2
> swapon --show
NAME TYPE SIZE USED PRIO
/dev/dm-1 partition 15.7G 66.9M -2
Turn off the current swap
> swapoff /dev/dm-1
Find partition for my new swap
> ls -lh /dev/disk/by-id/ |grep nvme-PCIe_SSD_19012351200132-part1
Make the swap
> mkswap /dev/nvme1n1p1
Turn on swap
> swapon /dev/nvme1n1p1
Verify
> swapon --show
NAME TYPE SIZE USED PRIO
/dev/nvme1n1p1 partition 256G 0B -2
Tuesday, May 7, 2019
Disable ext4 journaling and enable trim ext4 and SSD
Confirm that journaling is enabled and trim mount option is missing (default mount options).
> tune2fs -l /dev/nvme0n1
...
Filesystem features: has_journal ext_attr resize_inode dir_index filetype extent 64bit flex_bg sparse_super large_file huge_file uninit_bg dir_nlink extra_isize
Filesystem flags: signed_directory_hash
Default mount options: user_xattr acl
...
> tune2fs -o discard /dev/nvme1n1
> tune2fs -O ^has_journal /dev/nvme1n1
Note that the ^ character is to toggle between enable/disable
> tune2fs -l /dev/nvme0n1
...
Filesystem features: ext_attr resize_inode dir_index filetype extent 64bit flex_bg sparse_super large_file huge_file uninit_bg dir_nlink extra_isize
Filesystem flags: signed_directory_hash
Default mount options: user_xattr acl discard
...
> tune2fs -l /dev/nvme0n1
...
Filesystem features: has_journal ext_attr resize_inode dir_index filetype extent 64bit flex_bg sparse_super large_file huge_file uninit_bg dir_nlink extra_isize
Filesystem flags: signed_directory_hash
Default mount options: user_xattr acl
...
> tune2fs -o discard /dev/nvme1n1
> tune2fs -O ^has_journal /dev/nvme1n1
Note that the ^ character is to toggle between enable/disable
> tune2fs -l /dev/nvme0n1
...
Filesystem features: ext_attr resize_inode dir_index filetype extent 64bit flex_bg sparse_super large_file huge_file uninit_bg dir_nlink extra_isize
Filesystem flags: signed_directory_hash
Default mount options: user_xattr acl discard
...
Wanted to install mainline kernel 5 on my CentOS
> yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
> rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
> yum --enablerepo=elrepo-kernel install kernel-ml
> yum -y --enablerepo=elrepo-kernel install kernel-ml-{devel,headers,perf}
After a reboot, I uninstalled all other kernel versions e.g.
> yum remove kernel
(if you wanted to install a kernel with long term support choose *-lt instead of *-ml)
Interesting M.2 to PCIe NVME adapter cards & Samsung 970 Evo Plus benchmark
I am actually using two of the EZDIY-FAB cards, the form factor really makes the difference to me, but I also tried the Silverstone and Vantec card
- EZDIY-FAB Dual M.2 Adapter, M.2 PCIe NVMe and PCIe AHCI SSD to PCIe 3.0 x4 and M.2 SATA SSD to SATA III Adapter Card ($19 Amazon)
- Silverstone SST-ECM22 Dual M.2 to PCIe x4 NVMe SSD and SATA 6 G Adapter Card with Advanced Cooling ($33 Newegg)
- Vantec M.2 NVMe/M.2 SATA SSD PCIe x4 Adapter ($19 Microcenter)
- JEYI SK7 M.2 NVMe SSD NGFF To PCI-E X4 Adapter M Key B Key Dual Interface Card Support PCI-E3.0 Dual Voltage 12V+3.3V SATA3 ($9 AliExpress)
reclen KB | write | rewrite | read | reread | random read | random write |
4 | 942 | 915 | 2647 | 2849 | 73 | 843 |
16 | 947 | 918 | 3019 | 3029 | 231 | 871 |
128 | 913 | 905 | 3019 | 3024 | 838 | 859 |
I think these values are not far off from what public sources report for example: https://www.tomshardware.com/reviews/samsung-970-evo-plus-ssd,5608.html
Bunch of comparable cards..
- $17 RIITOP Dual M.2 to PCIe Adapter, M Key M.2 NVME AHCI SSD to PCI-e 3.0 x4 and B / B+M Key
- $35 SK16 PRO PCIE NVMe M-Key + AHCI B-Key + mSATA Adapter x16 PCI-E 3.0 Full Speed M.2 2280 aluminum
- $21 VANTEC UGT-M2PC200 M.2 NVMe + M.2 SATA SSD PCIe X4 Adapter
- $25 Rivo Dual M.2 SATA III and M2 to PCIe 3.0 X4 Adapter Card - Add M.2 SSD Devices to PC or Motherboard
- $19 Dual M.2 PCIe Adapter, M2 SSD NVME (m key) or SATA (b key) 22110 2280 2260 2242 2230 to PCI-e 3.0 x4 Host Controller Expansion Card
- $18 Dual M.2 PCIe Adapter M2 SSD NVME m key or SATA b key 22110 2280 2260 2242 2230 to PCI-e 3.0 x 4 Host Controller Expansion Card with
- $17 ALLOYSEED PCI-E 4X to NGFF SSD Adapter Card B+M-key M.2 NGFF sata base ssd +NVME pcie ssd
Bunch of specialty cards:
- $299 SuperMicro AOC-SLG3-2E4 NVme PCIe card
- $199 https://www.amazon.com/Supermicro-AOC-SHG3-4M2P-Add-on-Card/dp/B07FYSZR6L
- $89 https://www.delock.de/produkte/G_62704/merkmale.html?setLanguage=en
- ???$ http://amfeltec.com/products/pci-express-gen-3-carrier-board-for-m-2-ssd
- $399 HighPoint
SSD7101A-1 4x dedicated 32Gbps M.2 Ports to PCIe 3.0 x16 RAID
Controller - $149 https://store.squirrelsresearch.com/sqrl/acorn-nest-x4 (not sure if compatible to SSDs)
EZDIY-FAB Dual M.2 Adapter
Sunday, May 5, 2019
Looking to test f2fs with my NVME Flash Drive
> yum install epel-release
> yum --enablerepo=epel-testing install f2fs-tools
> yum --enablerepo=epel-testing install f2fs-tools
Saturday, May 4, 2019
Install Glances on CentOS
I really like Glances as a resource monitor for my Linux installation. Here is how to do:
> yum install -y epel-release
> yum install -y python-pip python-devel
> pip install --upgrade pip
> pip install glances
So we can run glances as a web service
> pip install bottle
I think I am going to try using Glances with Grafana: https://www.tecmint.com/install-glances-influxdb-grafana-to-monitor-centos-7/
> yum install -y epel-release
> yum install -y python-pip python-devel
> pip install --upgrade pip
> pip install glances
So we can run glances as a web service
> pip install bottle
I think I am going to try using Glances with Grafana: https://www.tecmint.com/install-glances-influxdb-grafana-to-monitor-centos-7/
Subscribe to:
Posts (Atom)