Thursday, May 28, 2020
Checking on how many USB root hosts exists in Nvidia Jetson Nano B01
root@jnode-1:~# lsusb -t
/: Bus 02.Port 1: Dev 1, Class=root_hub, Driver=tegra-xusb/4p, 5000M
|__ Port 1: Dev 2, If 0, Class=Hub, Driver=hub/4p, 5000M
|__ Port 1: Dev 3, If 0, Class=Mass Storage, Driver=usb-storage, 5000M
|__ Port 2: Dev 4, If 0, Class=Mass Storage, Driver=usb-storage, 5000M
|__ Port 4: Dev 5, If 0, Class=Mass Storage, Driver=uas, 5000M
/: Bus 01.Port 1: Dev 1, Class=root_hub, Driver=tegra-xusb/5p, 480M
|__ Port 2: Dev 2, If 0, Class=Hub, Driver=hub/4p, 480M
Tuesday, May 26, 2020
Monday, May 25, 2020
Python 3.8 on Centos7
> yum -y groupinstall "Development Tools"
> yum -y install openssl-devel bzip2-devel libffi-devel wget
> cd /opt
> wget https://www.python.org/ftp/python/3.8.3/Python-3.8.3.tgz
> tar xvf Python-3.8.3.tgz
> cd Python-3.8.3/
> ./configure --enable-optimizations
> make altinstall
> python3.8 --version
Python 3.8.3
> yum -y install openssl-devel bzip2-devel libffi-devel wget
> cd /opt
> wget https://www.python.org/ftp/python/3.8.3/Python-3.8.3.tgz
> tar xvf Python-3.8.3.tgz
> cd Python-3.8.3/
> ./configure --enable-optimizations
> make altinstall
> python3.8 --version
Python 3.8.3
Thursday, May 21, 2020
Dependencies to compile 32 bit app on 64 bit Linux
yum -y install glibc-devel.i686 glibc-devel libstdc++-devel.i686
NTP service to keep time in sync
yum install ntp ntpdate ntp-doc -y
ntpdate europe.pool.ntp.org
systemctl start ntpd
systemctl enable ntpd
ntpdate europe.pool.ntp.org
systemctl start ntpd
systemctl enable ntpd
Writing to dev null using the dev null block device driver
# modprobe null_blk
# ls -l /dev/null
null nullb0
# ls -l /dev/nullb0
brw-rw---- 1 root disk 251, 0 May 21 07:30 /dev/nullb0
# dd if=/dev/zero of=/dev/nullb0 count=1024 bs=1M
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB, 1.0 GiB) copied, 0.460911 s, 2.3 GB/s
# dd if=/dev/zero of=/dev/nullb0 count=1024 bs=1M conv=fsync
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB, 1.0 GiB) copied, 0.524639 s, 2.0 GB/s
# dd if=/dev/urandom of=/dev/nullb0 count=1024 bs=1M conv=fsync
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB, 1.0 GiB) copied, 17.4459 s, 61.5 MB/s
Resource
https://www.kernel.org/doc/Documentation/block/null_blk.txt
https://www.kernel.org/doc/html/latest/block/null_blk.html
https://zonedstorage.io/linux/nullblk/
# ls -l /dev/null
null nullb0
# ls -l /dev/nullb0
brw-rw---- 1 root disk 251, 0 May 21 07:30 /dev/nullb0
# dd if=/dev/zero of=/dev/nullb0 count=1024 bs=1M
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB, 1.0 GiB) copied, 0.460911 s, 2.3 GB/s
# dd if=/dev/zero of=/dev/nullb0 count=1024 bs=1M conv=fsync
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB, 1.0 GiB) copied, 0.524639 s, 2.0 GB/s
# dd if=/dev/urandom of=/dev/nullb0 count=1024 bs=1M conv=fsync
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB, 1.0 GiB) copied, 17.4459 s, 61.5 MB/s
Resource
https://www.kernel.org/doc/Documentation/block/null_blk.txt
https://www.kernel.org/doc/html/latest/block/null_blk.html
https://zonedstorage.io/linux/nullblk/
Wednesday, May 20, 2020
Centos 7 mounting SMB share with user and password
mount -t cifs -o domain=testing,username=test_user,dir_mode=0777,file_mode=0777 //server.some.net/sharename$ /mnt/mediaflux-storage
Friday, May 15, 2020
Monday, May 11, 2020
LVM Create RAID0 for 2 NVME SSD and 4 SATA HDD
My example application is reading and writing 256 kilobyte blocks of data.
EXT4
stride = RAID stripe / filesystem block-size
stripe-width = stride * number of data bearing drives in RAID array
XFS
su = RAID stripe in kilobytes
sw = number of data bearing drives in RAID array
RAID0 over 4x HDD
pvcreate /dev/sdc
pvcreate /dev/sdd
pvcreate /dev/sde
pvcreate /dev/sdf
vgcreate sata /dev/sdc /dev/sdd /dev/sde /dev/sdf -y
lvcreate --type raid0 -l 100%free --stripes 4 --stripesize 256 -n scratch sata -y
> EXT4
mkfs.ext4 -b 4096 -E stride=64,stripe-width=256 /dev/mapper/sata-scratch
tune2fs -O ^has_journal /dev/mapper/sata-scratch
> XFS
mkfs.xfs -b size=4096 -d su=256k,sw=4 /dev/mapper/sata-scratch
RAID0 over 2x NVME SSD
pvcreate /dev/nvme0n1
pvcreate /dev/nvme1n1
vgcreate nvme /dev/nvme0n1 /dev/nvme1n1 -y
lvcreate --type raid0 -l 100%free --stripes 2 --stripesize 256 -n scratch nvme -y
> EXT4
mkfs.ext4 -b 4096 -E stride=64,stripe-width=128 /dev/mapper/nvme-scratch
tune2fs -O ^has_journal /dev/mapper/nvme-scratch
tune2fs -o ^discard /dev/mapper/nvme-scratch
> XFS
mkfs.xfs -b size=4096 -d su=256k,sw=2 /dev/mapper/nvme-scratch
Resources:
https://uclibc.org/~aldot/mkfs_stride.html
https://gryzli.info/2015/02/26/calculating-filesystem-stride_size-and-stripe_width-for-best-performance-under-raid/
https://erikugel.wordpress.com/tag/raid0/
https://xfs.org/index.php/XFS_FAQ#Q:_How_to_calculate_the_correct_sunit.2Cswidth_values_for_optimal_performance
EXT4
stride = RAID stripe / filesystem block-size
stripe-width = stride * number of data bearing drives in RAID array
XFS
su = RAID stripe in kilobytes
sw = number of data bearing drives in RAID array
RAID0 over 4x HDD
pvcreate /dev/sdc
pvcreate /dev/sdd
pvcreate /dev/sde
pvcreate /dev/sdf
vgcreate sata /dev/sdc /dev/sdd /dev/sde /dev/sdf -y
lvcreate --type raid0 -l 100%free --stripes 4 --stripesize 256 -n scratch sata -y
> EXT4
mkfs.ext4 -b 4096 -E stride=64,stripe-width=256 /dev/mapper/sata-scratch
tune2fs -O ^has_journal /dev/mapper/sata-scratch
> XFS
mkfs.xfs -b size=4096 -d su=256k,sw=4 /dev/mapper/sata-scratch
RAID0 over 2x NVME SSD
pvcreate /dev/nvme0n1
pvcreate /dev/nvme1n1
vgcreate nvme /dev/nvme0n1 /dev/nvme1n1 -y
lvcreate --type raid0 -l 100%free --stripes 2 --stripesize 256 -n scratch nvme -y
> EXT4
mkfs.ext4 -b 4096 -E stride=64,stripe-width=128 /dev/mapper/nvme-scratch
tune2fs -O ^has_journal /dev/mapper/nvme-scratch
tune2fs -o ^discard /dev/mapper/nvme-scratch
> XFS
mkfs.xfs -b size=4096 -d su=256k,sw=2 /dev/mapper/nvme-scratch
Resources:
https://uclibc.org/~aldot/mkfs_stride.html
https://gryzli.info/2015/02/26/calculating-filesystem-stride_size-and-stripe_width-for-best-performance-under-raid/
https://erikugel.wordpress.com/tag/raid0/
https://xfs.org/index.php/XFS_FAQ#Q:_How_to_calculate_the_correct_sunit.2Cswidth_values_for_optimal_performance
sysfs interface --------------- /sys/block/<disk>/alignment_offset /sys/block/<disk>/<partition>/alignment_offset /sys/block/<disk>/queue/physical_block_size /sys/block/<disk>/queue/logical_block_size /sys/block/<disk>/queue/minimum_io_size /sys/block/<disk>/queue/optimal_io_sizehttps://people.redhat.com/msnitzer/docs/io-limits.txt
LVM Create HDD volumes with NVME caches
pvcreate /dev/sdc
pvcreate /dev/sdd
pvcreate /dev/sde
pvcreate /dev/sdf
vgcreate plot /dev/sdc /dev/sdd /dev/sde /dev/sdf
pvcreate /dev/nvme0n1
pvcreate /dev/nvme1n1
vgextend plot /dev/nvme0n1
vgextend plot /dev/nvme1n1
lvcreate -l 100%free -n disk1 plot /dev/sdc
lvcreate -l 100%free -n disk2 plot /dev/sdd
lvcreate -l 100%free -n disk3 plot /dev/sde
lvcreate -l 100%free -n disk4 plot /dev/sdf
lvcreate -L 450G -n disk1_c plot /dev/nvme0n1
lvcreate -L 450M -n disk1_cm plot /dev/nvme0n1
lvcreate -L 450G -n disk2_c plot /dev/nvme0n1
lvcreate -L 450M -n disk2_cm plot /dev/nvme0n1
lvcreate -L 450G -n disk3_c plot /dev/nvme1n1
lvcreate -L 450M -n disk3_cm plot /dev/nvme1n1
lvcreate -L 450G -n disk4_c plot /dev/nvme1n1
lvcreate -L 450M -n disk4_cm plot /dev/nvme1n1
lvconvert --type cache-pool --cachemode writethrough --poolmetadata plot/disk1_cm plot/disk1_c -y
lvconvert --type cache-pool --cachemode writethrough --poolmetadata plot/disk2_cm plot/disk2_c -y
lvconvert --type cache-pool --cachemode writethrough --poolmetadata plot/disk3_cm plot/disk3_c -y
lvconvert --type cache-pool --cachemode writethrough --poolmetadata plot/disk4_cm plot/disk4_c -y
lvconvert --type cache --cachepool plot/disk1_c plot/disk1 -y
lvconvert --type cache --cachepool plot/disk2_c plot/disk2 -y
lvconvert --type cache --cachepool plot/disk3_c plot/disk3 -y
lvconvert --type cache --cachepool plot/disk4_c plot/disk4 -y
mkfs.ext4 /dev/mapper/plot-disk1
mkfs.ext4 /dev/mapper/plot-disk2
mkfs.ext4 /dev/mapper/plot-disk3
mkfs.ext4 /dev/mapper/plot-disk4
#for my use case I didn't want journaling
tune2fs -o ^discard /dev/mapper/plot-disk1
tune2fs -o ^discard /dev/mapper/plot-disk2
tune2fs -o ^discard /dev/mapper/plot-disk3
tune2fs -o ^discard /dev/mapper/plot-disk4
mount /dev/mapper/plot-disk1 /data/plots/disk1
mount /dev/mapper/plot-disk2 /data/plots/disk2
mount /dev/mapper/plot-disk3 /data/plots/disk3
mount /dev/mapper/plot-disk4 /data/plots/disk4
Monday, May 4, 2020
What gcc knows about your CPU
> gcc -v -E -x c /dev/null -o /dev/null -march=native 2>&1 | grep /cc1
> lscpu
> lscpu
Subscribe to:
Posts (Atom)