Friday, October 30, 2020

Check the open files limit and current for a process

> for p in $(pidof java); do printf "$p: "; ls -l /proc/$p/fd |wc -l; done
54055: 93
54008: 9


> for p in $(pidof java); do printf "$p: "; cat /proc/$p/limits |grep open; done
54055: Max open files            65536                65536                files
54008: Max open files            65536                65536                files

Friday, October 9, 2020

Compiler flags

want to find compiler flags supported by your CPU?

gcc -v -E -x c /dev/null -o /dev/null -march=native 2>&1 | grep /cc1

Saturday, August 8, 2020

LVM Raid10 on 4 HD with SSD cache

sdc/sdd are SSD, sde through sdh are 3TB HDD

vgcreate data_sata1 /dev/sdc /dev/sdd /dev/sde /dev/sdf /dev/sdg /dev/sdh


lvcreate --type raid10 -m 1 -i 2 --stripesize 1024k -l100%free -n plots data_sata1 /dev/sde /dev/sdf /dev/sdg /dev/sdh


lvcreate --type raid1 -m 1 -l 100%free -n cache data_sata1 /dev/sdc /dev/sdd

lvconvert --type cache --cachevol cache --cachemode writeback --chunksize 1024 data_sata1/plots

mkfs.xfs /dev/mapper/data_sata1-plots

Wednesday, July 29, 2020

LVM Looking up stripes and stripesize of logical volumes

lvs -o+lv_layout,stripes,stripesize
  LV    VG            Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert Layout     #Str Stripe
  data  fedora_node-2 -wi-ao---- 400.00g                                                     linear        1     0
  data  fedora_node-2 -wi-ao---- 400.00g                                                     linear        1     0
  root  fedora_node-2 -wi-ao---- <33.45g                                                     linear        1     0
  root  fedora_node-2 -wi-ao---- <33.45g                                                     linear        1     0
  swap  fedora_node-2 -wi-ao----  15.73g                                                     linear        1     0
  plots scratch_nvme1 rwi-aor---  <1.82t                                                     raid,raid0    2  1.00m
  plots scratch_sata1 rwi-aor---   2.91t                                                     raid,raid0    4  1.00m
  plots scratch_sata2 rwi-aor---  <1.46t                                                     raid,raid0    4  1.00m

Tuesday, June 23, 2020

Test if a mounted drive works with timeout

timeout -k 10s -s SIGKILL 10 touch /mnt/disk1/foo

> this worked well with a stale NFS mount, yet need to test if works also with "hung" SATA/USB drive etc.

Find out to which HBA disks are connected to


[root@node-2d current]# udevadm info --query=path --name /dev/sdd
/devices/pci0000:00/0000:00:03.2/0000:10:00.0/host10/port-10:0/end_device-10:0/target10:0:0/10:0:0:0/block/sdd

[root@node-2d current]# lspci -v -s 10:00.0
10:00.0 Serial Attached SCSI controller: Broadcom / LSI SAS3008 PCI-Express Fusion-MPT SAS-3 (rev 02)
        Subsystem: Broadcom / LSI SAS9300-8i
        Flags: bus master, fast devsel, latency 0, IRQ 152
        I/O ports at d000 [size=256]
        Memory at f6e40000 (64-bit, non-prefetchable) [size=64K]
        Memory at f6e00000 (64-bit, non-prefetchable) [size=256K]
        Expansion ROM at f6d00000 [disabled] [size=1M]
        Capabilities: [50] Power Management version 3
        Capabilities: [68] Express Endpoint, MSI 00
        Capabilities: [a8] MSI: Enable- Count=1/1 Maskable+ 64bit+
        Capabilities: [c0] MSI-X: Enable+ Count=96 Masked-
        Capabilities: [100] Advanced Error Reporting
        Capabilities: [1e0] Secondary PCI Express
        Capabilities: [1c0] Power Budgeting <?>
        Capabilities: [190] Dynamic Power Allocation <?>
        Capabilities: [148] Alternative Routing-ID Interpretation (ARI)
        Kernel driver in use: mpt3sas
        Kernel modules: mpt3sas


[root@node-2d current]# udevadm info --query=path --name /dev/sdc
/devices/pci0000:00/0000:00:01.2/0000:02:00.0/0000:03:0a.0/0000:0e:00.0/ata8/host7/target7:0:0/7:0:0:0/block/sdc


[root@node-2d current]# lspci -v -s 02:00.0
02:00.0 PCI bridge: Advanced Micro Devices, Inc. [AMD] Matisse Switch Upstream (prog-if 00 [Normal decode])
        Flags: bus master, fast devsel, latency 0, IRQ 24
        Bus: primary=02, secondary=03, subordinate=0e, sec-latency=0
        I/O behind bridge: 0000f000-0000ffff [size=4K]
        Memory behind bridge: f6200000-f69fffff [size=8M]
        Prefetchable memory behind bridge: 00000000f2400000-00000000f2cfffff [size=9M]
        Capabilities: [50] Power Management version 3
        Capabilities: [58] Express Upstream Port, MSI 00
        Capabilities: [a0] MSI: Enable- Count=1/1 Maskable- 64bit+
        Capabilities: [100] Vendor Specific Information: ID=0001 Rev=1 Len=010 <?>
        Capabilities: [270] Secondary PCI Express
        Capabilities: [370] L1 PM Substates
        Capabilities: [400] Data Link Feature <?>
        Capabilities: [410] Physical Layer 16.0 GT/s <?>
        Capabilities: [440] Lane Margining at the Receiver <?>
        Kernel driver in use: pcieport


Resources:

Thursday, May 28, 2020

Wipe a signature from a device

wipefs -a /dev/sda

Checking on how many USB root hosts exists in Nvidia Jetson Nano B01

root@jnode-1:~# lsusb -t

/:  Bus 02.Port 1: Dev 1, Class=root_hub, Driver=tegra-xusb/4p, 5000M
    |__ Port 1: Dev 2, If 0, Class=Hub, Driver=hub/4p, 5000M
        |__ Port 1: Dev 3, If 0, Class=Mass Storage, Driver=usb-storage, 5000M
        |__ Port 2: Dev 4, If 0, Class=Mass Storage, Driver=usb-storage, 5000M
        |__ Port 4: Dev 5, If 0, Class=Mass Storage, Driver=uas, 5000M
/:  Bus 01.Port 1: Dev 1, Class=root_hub, Driver=tegra-xusb/5p, 480M
    |__ Port 2: Dev 2, If 0, Class=Hub, Driver=hub/4p, 480M

Monday, May 25, 2020

Python 3.8 on Centos7

> yum -y groupinstall "Development Tools"
> yum -y install openssl-devel bzip2-devel libffi-devel wget

> cd /opt
> wget https://www.python.org/ftp/python/3.8.3/Python-3.8.3.tgz

> tar xvf Python-3.8.3.tgz
> cd Python-3.8.3/

> ./configure --enable-optimizations
> make altinstall

> python3.8 --version
Python 3.8.3

Thursday, May 21, 2020

Dependencies to compile 32 bit app on 64 bit Linux

yum -y install glibc-devel.i686 glibc-devel libstdc++-devel.i686

NTP service to keep time in sync

yum install ntp ntpdate ntp-doc -y
ntpdate europe.pool.ntp.org
systemctl start ntpd
systemctl enable ntpd

Writing to dev null using the dev null block device driver

# modprobe null_blk
# ls -l /dev/null
null    nullb0

# ls -l /dev/nullb0
brw-rw---- 1 root disk 251, 0 May 21 07:30 /dev/nullb0

# dd if=/dev/zero of=/dev/nullb0 count=1024 bs=1M
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB, 1.0 GiB) copied, 0.460911 s, 2.3 GB/s

# dd if=/dev/zero of=/dev/nullb0 count=1024 bs=1M conv=fsync
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB, 1.0 GiB) copied, 0.524639 s, 2.0 GB/s

# dd if=/dev/urandom of=/dev/nullb0 count=1024 bs=1M conv=fsync
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB, 1.0 GiB) copied, 17.4459 s, 61.5 MB/s


Resource
https://www.kernel.org/doc/Documentation/block/null_blk.txt
https://www.kernel.org/doc/html/latest/block/null_blk.html
https://zonedstorage.io/linux/nullblk/

Wednesday, May 20, 2020

Centos 7 mounting SMB share with user and password

mount -t cifs -o domain=testing,username=test_user,dir_mode=0777,file_mode=0777 //server.some.net/sharename$ /mnt/mediaflux-storage

Monday, May 11, 2020

LVM Create RAID0 for 2 NVME SSD and 4 SATA HDD

My example application is reading and writing 256 kilobyte blocks of data.

EXT4
stride = RAID stripe / filesystem block-size
stripe-width = stride * number of data bearing drives in RAID array


XFS
su = RAID stripe in kilobytes
sw = number of data bearing drives in RAID array


RAID0 over 4x HDD
pvcreate /dev/sdc
pvcreate /dev/sdd
pvcreate /dev/sde
pvcreate /dev/sdf
vgcreate sata /dev/sdc /dev/sdd /dev/sde /dev/sdf -y
lvcreate --type raid0 -l 100%free --stripes 4 --stripesize 256 -n scratch sata -y

 > EXT4
mkfs.ext4 -b 4096 -E stride=64,stripe-width=256 /dev/mapper/sata-scratch
tune2fs -O ^has_journal /dev/mapper/sata-scratch

> XFS
mkfs.xfs -b size=4096 -d su=256k,sw=4 /dev/mapper/sata-scratch


RAID0 over 2x NVME SSD
pvcreate /dev/nvme0n1
pvcreate /dev/nvme1n1
vgcreate nvme /dev/nvme0n1 /dev/nvme1n1 -y
lvcreate --type raid0 -l 100%free --stripes 2 --stripesize 256 -n scratch nvme -y

> EXT4
mkfs.ext4 -b 4096 -E stride=64,stripe-width=128 /dev/mapper/nvme-scratch
tune2fs -O ^has_journal /dev/mapper/nvme-scratch
tune2fs -o ^discard /dev/mapper/nvme-scratch

> XFS
mkfs.xfs -b size=4096 -d su=256k,sw=2 /dev/mapper/nvme-scratch


Resources:
https://uclibc.org/~aldot/mkfs_stride.html
https://gryzli.info/2015/02/26/calculating-filesystem-stride_size-and-stripe_width-for-best-performance-under-raid/
https://erikugel.wordpress.com/tag/raid0/
https://xfs.org/index.php/XFS_FAQ#Q:_How_to_calculate_the_correct_sunit.2Cswidth_values_for_optimal_performance


sysfs interface
---------------
/sys/block/<disk>/alignment_offset
/sys/block/<disk>/<partition>/alignment_offset
/sys/block/<disk>/queue/physical_block_size
/sys/block/<disk>/queue/logical_block_size
/sys/block/<disk>/queue/minimum_io_size
/sys/block/<disk>/queue/optimal_io_size

https://people.redhat.com/msnitzer/docs/io-limits.txt

LVM Create HDD volumes with NVME caches



pvcreate /dev/sdc
pvcreate /dev/sdd
pvcreate /dev/sde
pvcreate /dev/sdf
vgcreate plot /dev/sdc /dev/sdd /dev/sde /dev/sdf
pvcreate /dev/nvme0n1
pvcreate /dev/nvme1n1

vgextend plot /dev/nvme0n1
vgextend plot /dev/nvme1n1

lvcreate -l 100%free -n disk1 plot /dev/sdc
lvcreate -l 100%free -n disk2 plot /dev/sdd
lvcreate -l 100%free -n disk3 plot /dev/sde
lvcreate -l 100%free -n disk4 plot /dev/sdf

lvcreate -L 450G -n disk1_c plot /dev/nvme0n1
lvcreate -L 450M -n disk1_cm plot /dev/nvme0n1

lvcreate -L 450G -n disk2_c plot /dev/nvme0n1
lvcreate -L 450M -n disk2_cm plot /dev/nvme0n1

lvcreate -L 450G -n disk3_c plot /dev/nvme1n1
lvcreate -L 450M -n disk3_cm plot /dev/nvme1n1

lvcreate -L 450G -n disk4_c plot /dev/nvme1n1
lvcreate -L 450M -n disk4_cm plot /dev/nvme1n1

lvconvert --type cache-pool --cachemode writethrough --poolmetadata plot/disk1_cm plot/disk1_c -y
lvconvert --type cache-pool --cachemode writethrough --poolmetadata plot/disk2_cm plot/disk2_c -y
lvconvert --type cache-pool --cachemode writethrough --poolmetadata plot/disk3_cm plot/disk3_c -y
lvconvert --type cache-pool --cachemode writethrough --poolmetadata plot/disk4_cm plot/disk4_c -y

lvconvert --type cache --cachepool plot/disk1_c plot/disk1 -y
lvconvert --type cache --cachepool plot/disk2_c plot/disk2 -y
lvconvert --type cache --cachepool plot/disk3_c plot/disk3 -y
lvconvert --type cache --cachepool plot/disk4_c plot/disk4 -y

mkfs.ext4 /dev/mapper/plot-disk1
mkfs.ext4 /dev/mapper/plot-disk2
mkfs.ext4 /dev/mapper/plot-disk3
mkfs.ext4 /dev/mapper/plot-disk4

#for my use case I didn't want journaling
tune2fs -o ^discard /dev/mapper/plot-disk1
tune2fs -o ^discard /dev/mapper/plot-disk2
tune2fs -o ^discard /dev/mapper/plot-disk3
tune2fs -o ^discard /dev/mapper/plot-disk4

mount /dev/mapper/plot-disk1 /data/plots/disk1
mount /dev/mapper/plot-disk2 /data/plots/disk2
mount /dev/mapper/plot-disk3 /data/plots/disk3
mount /dev/mapper/plot-disk4 /data/plots/disk4

Monday, May 4, 2020

What gcc knows about your CPU

> gcc -v -E -x c /dev/null -o /dev/null -march=native 2>&1 | grep /cc1
> lscpu

Wednesday, April 29, 2020

Fedora 32 change max open files

#soft limit
> ulimit -Sn
1024

#hard limit
> ulimit -Hn
524288

Add to /etc/sysctl.conf

fs.inotify.max_user_watches=524288
fs.file-max=100000


Add to /etc/security/limits.conf

*     soft  nofile 100000
*     hard  nofile 524288
root  soft  nofile 100000
root  hard  nofile 524288


reboot
https://www.99ideas.in/blog-post/increasing-file-descriptors-and-open-files-limit-in-centos-7/

Tuesday, April 28, 2020

Entware oPKG on Synology NAS

  1. To Install opkg:
  2. Open Package Center Then Settings
  3. Go to the General Tab and under Trust level Click “Any Publisher
  4. Go to the Package Sources Tab Then Add https://www.cphub.net/ to package sources
  5. Search for “Easy Bootstrap Installer” and Install
  6. Choose “Entware oPKG
  7. sudo -i
  8. opkg list

Thursday, April 16, 2020

Update default Python on Debian


Welcome to Ubuntu 18.04.4 LTS (GNU/Linux 4.14.173-173 armv7l)

> root@hc1-node-1:~# update-alternatives --list python
update-alternatives: error: no alternatives for python

> root@hc1-node-1:~# python -V
Python 2.7.17

> root@hc1-node-1:~# update-alternatives --install /usr/bin/python python /usr/bin/python2.7 1
update-alternatives: using /usr/bin/python2.7 to provide /usr/bin/python (python) in auto mode

> root@hc1-node-1:~# update-alternatives --install /usr/bin/python python /usr/bin/python3.6 2
update-alternatives: using /usr/bin/python3.6 to provide /usr/bin/python (python) in auto mode

> root@hc1-node-1:~# update-alternatives --install /usr/bin/python python /usr/bin/python3.7 3
update-alternatives: using /usr/bin/python3.7 to provide /usr/bin/python (python) in auto mode

> root@hc1-node-1:~# update-alternatives --list python
/usr/bin/python2.7
/usr/bin/python3.6
/usr/bin/python3.7


> root@hc1-node-1:~# update-alternatives --config python
There are 3 choices for the alternative python (providing /usr/bin/python).

  Selection    Path                Priority   Status
------------------------------------------------------------
* 0            /usr/bin/python3.7   3         auto mode
  1            /usr/bin/python2.7   1         manual mode
  2            /usr/bin/python3.6   2         manual mode
  3            /usr/bin/python3.7   3         manual mode

Press <enter> to keep the current choice[*], or type selection number:

> root@hc1-node-1:~# python -V
Python 3.7.5

Saturday, March 14, 2020

Tuesday, March 10, 2020

Mounting an iso file in Linux

mount -o loop /tmp/dd.iso /mnt

How do I find all files containing specific text on Linux?


grep -rnw '/path/to/somewhere/' -e 'pattern'
-r or -R is recursive,
-n is line number, and
-w stands for match the whole word.
-l (lower-case L) can be added to just give the file name of matching files.

Sunday, March 8, 2020

Oracle JDK 8 on Fedora 31

The issue was
> dnf install  /tmp/jdk-11.0.6_linux-x64_bin.rpm
Last metadata expiration check: 0:24:25 ago on Sun 08 Mar 2020 11:43:22 PM EDT.
Error:
 Problem: conflicting requests
  - nothing provides /bin/basename needed by jdk-11.0.6-2000:11.0.6-ga.x86_64
  - nothing provides /bin/cp needed by jdk-11.0.6-2000:11.0.6-ga.x86_64
  - nothing provides /bin/ls needed by jdk-11.0.6-2000:11.0.6-ga.x86_64
  - nothing provides /bin/mkdir needed by jdk-11.0.6-2000:11.0.6-ga.x86_64
  - nothing provides /bin/mv needed by jdk-11.0.6-2000:11.0.6-ga.x86_64
  - nothing provides /bin/pwd needed by jdk-11.0.6-2000:11.0.6-ga.x86_64
  - nothing provides /bin/sort needed by jdk-11.0.6-2000:11.0.6-ga.x86_64
(try to add '--skip-broken' to skip uninstallable packages)


And here is how it worked

> rpm -Uvh --nodeps /tmp/jdk-8u241-linux-x64.rpm
warning: /tmp/jdk-8u241-linux-x64.rpm: Header V3 RSA/SHA256 Signature, key ID ec551f03: NOKEY
Verifying...                          ################################# [100%]
Preparing...                          ################################# [100%]
Updating / installing...
   1:jdk1.8-2000:1.8.0_241-fcs        ################################# [100%]
Unpacking JAR files...
        tools.jar...
        plugin.jar...
        javaws.jar...
        deploy.jar...
        rt.jar...
        jsse.jar...
        charsets.jar...
        localedata.jar...

> alternatives --config java

There are 2 programs which provide 'java'.

  Selection    Command
-----------------------------------------------
 + 1           java-11-openjdk.x86_64 (/usr/lib/jvm/java-11-openjdk-11.0.6.10-0.fc31.x86_64/bin/java)
*  2           /usr/java/jdk1.8.0_241-amd64/jre/bin/java

Enter to keep the current selection[+], or type selection number: 2

> java -version
java version "1.8.0_241"
Java(TM) SE Runtime Environment (build 1.8.0_241-b07)
Java HotSpot(TM) 64-Bit Server VM (build 25.241-b07, mixed mode)

Saturday, February 29, 2020

Centos NFS Firewalld rules

firewall-cmd --permanent --add-service=nfs
firewall-cmd --permanent --add-service=mountd
firewall-cmd --permanent --add-service=rpc-bind
firewall-cmd --reload

Thursday, February 13, 2020

Iozone & dd on the Gigabyte AORUS NVMe Gen4 1TB SSD

I am going to run a few tests on, there aren't really big performance differences between using ext4 with or without journal. Also using kernel 4.18.0x vs 5.3.3x doesn't make much of a difference.

This runs on an Asus motherboard: https://www.asus.com/us/Motherboards/TUF-GAMING-X570-PLUS/   with the AMD Ryzen 5 3600: https://www.amd.com/en/products/cpu/amd-ryzen-5-3600  and 64 GB of RAM from G-Skill (F4-3200C16-32GTZN)

The test drive is from Gigabyte and 1TB large: https://www.gigabyte.com/us/Solid-State-Drive/AORUS-NVMe-Gen4-SSD-1TB


Iozone: Performance Test of File I/O
Version $Revision: 3.489 $
Compiled for 64 bit mode.
Build: linux-AMD64

Most importantly, I am re-mounting the drive for each test, that clears the buffer cache. Running watch -n 0.2 free -h I can observe the wanted effect.

The exact command used to run the tests is:
./iozone -e -r 4 -r 8 -r 16 -r 32 -r 64 -r 128 -r 512 -r 1024 -r 2048 -r 4096 -r 8192 -r 16384 -s 4g -i 0 -i 1 -i 2 -i 8 -f /apps/tfile -U /apps


CentOS Linux release 8.1.1911 (Core)
4.18.0-147.5.1.el8_1.x86_64

mkfs.ext4 /dev/nvme0n1






random random
testfile MB reclen kB write rewrite read reread read write
4096 4 1609 1892 2391 2291 98 1672
4096 8 1669 1972 2386 2289 167 1831
4096 16 1700 2049 2398 2299 256 1947
4096 32 1720 2075 2395 2302 314 1102
4096 64 1733 2079 2301 2303 503 2048
4096 128 1723 2061 2303 2298 728 2055
4096 512 1659 1953 2406 2309 1747 1969
4096 1024 1657 1975 3458 3405 2176 1968
4096 2048 1661 1972 2304 2307 2796 1961
4096 4096 1669 1964 3879 3876 3255 1955
4096 8192 1668 1952 4143 3866 3732 1948
4096 16384 1605 1898 4082 4079 3824 1901

mkfs.ext4 /dev/nvme0n1
tune2fs -O ^has_journal /dev/nvme0n1







random random
testfile MB reclen kB write rewrite read reread read write
4096 4 1674 1999 2300 2298 97 1734
4096 8 1746 2086 2302 2302 167 1911
4096 16 1787 2124 2302 2302 254 2018
4096 32 1808 2157 2302 2305 314 2105
4096 64 1812 2182 2304 2298 502 2144
4096 128 1801 2153 2305 2299 808 2122
4096 512 1735 2037 2315 2378 1819 2028
4096 1024 1735 2046 3500 3456 2157 2034
4096 2048 1748 2036 2310 2307 2778 2042
4096 4096 1745 2031 3885 3903 3249 2030
4096 8192 1731 2016 4008 3909 3741 2028
4096 16384 1674 1977 4072 4059 3997 1979
mkfs.ext4 /dev/nvme0n1
tune2fs -O ^has_journal /dev/nvme0n1
tune2fs -o ^discard /dev/nvme0n1







random random
testfile MB reclen kB write rewrite read reread read write
4096 4 1659 1980 2394 2261 96 1735
4096 8 1737 2082 2342 2291 117 1922
4096 16 1724 2133 2398 2393 252 2028
4096 32 1805 2157 2398 2298 310 2100
4096 64 1815 2175 2302 2297 493 2133
4096 128 1807 2087 2388 2295 838 2142
4096 512 1736 2048 2439 2313 1756 2038
4096 1024 1736 2035 3471 3325 2115 2047
4096 2048 1732 2043 2403 2404 2680 2050
4096 4096 1745 2046 3879 3896 3260 2045
4096 8192 1739 2027 3975 3884 3727 2032
4096 16384 1675 1978 4071 4055 3983 1985

CentOS Linux release 8.1.1911 (Core)
5.5.3-1.el8.elrepo.x86_64

mkfs.ext4 /dev/nvme0n1







random random
testfile MB reclen kB write rewrite read reread read write
4096 4 1583 1857 2397 2297 97 1647
4096 8 1639 1957 2399 2258 167 1819
4096 16 1687 1981 2386 2385 248 1920
4096 32 1697 2021 2393 2384 316 2000
4096 64 1722 2048 2395 2397 509 2031
4096 128 1711 2010 2397 2298 841 2016
4096 512 1644 1889 2713 2585 1934 1939
4096 1024 1659 1919 3490 3468 2258 1942
4096 2048 1649 1936 2402 2404 2766 1930
4096 4096 1649 1880 3920 3932 3320 1935
4096 8192 1642 1867 4035 3986 3727 1914
4096 16384 1598 1871 4124 4118 4064 1870

mkfs.ext4 /dev/nvme0n1
tune2fs -O ^has_journal /dev/nvme0n1







random random
testfile MB reclen kB write rewrite read reread read write
4096 4 1660 1938 2395 2392 99 1716
4096 8 1723 2025 2397 2389 166 1887
4096 16 1770 2028 2305 2393 255 1997
4096 32 1781 2067 2302 2390 313 2074
4096 64 1800 2143 2396 2395 518 2122
4096 128 1792 2123 2398 2393 800 2122
4096 512 1724 1974 2423 2399 1821 2009
4096 1024 1725 2019 3483 3465 2210 2012
4096 2048 1727 1964 2401 2404 2818 2006
4096 4096 1729 2023 3909 3931 3307 1997
4096 8192 1718 2005 4038 4040 3743 1971
4096 16384 1660 1912 4136 4111 4071 1938
The exact command used to run the tests is:
./iozone -r 4 -r 8 -r 16 -r 32 -r 64 -r 128 -r 512 -r 1024 -r 2048 -r 4096 -r 8192 -r 16384 -s 4g -i 0 -i 1 -i 2 -i 8 -f /apps/tfile -U /apps

The biggest difference here is, not specifying the -e option which then excludes flush (fsync,fflush) in the timing calculations.

mkfs.ext4 /dev/nvme0n1
tune2fs -O ^has_journal /dev/nvme0n1








random random
testfile MB reclen kB write rewrite read reread read write
4096 4 2782 3831 2397 2389 99 2939
4096 8 2997 4164 2397 2390 168 3507
4096 16 3124 4427 2398 2395 259 3974
4096 32 3211 4523 2400 2393 309 4314
4096 64 3296 4589 2398 2395 507 4467
4096 128 3217 4574 2398 2392 799 4537
4096 512 3037 4079 2487 2402 1808 4032
4096 1024 3022 4093 3499 3464 2292 4088
4096 2048 3025 4063 2406 2404 2857 4068
4096 4096 3023 4065 3917 3934 3282 4067
4096 8192 3003 4032 4029 4024 3708 4061
4096 16384 2836 3804 4120 4110 3885 3817


dd if=/dev/zero of=/apps/file1.txt count=50240 bs=1M conv=fsync
 50240+0 records in
50240+0 records out
52680458240 bytes (53 GB, 49 GiB) copied, 26.8013 s, 2.0 GB/s

dd if=/dev/zero of=/apps/file2.txt count=50240 bs=1M
50240+0 records in
50240+0 records out
52680458240 bytes (53 GB, 49 GiB) copied, 26.3247 s, 2.0 GB/s