How to setup High Performance Lustre Shared Storage for HPC
# For lab lets disable selinux and firewall..
sed -i '/^SELINUX=/c\SELINUX=disabled' /etc/sysconfig/selinux
systemctl disable firewalld
systemctl stop firewalld
# Add repos..
[root@alma-lustre yum.repos.d]# yum update
Last metadata expiration check: 0:00:26 ago on Wed Feb 11 20:31:55 2026.
Dependencies resolved.
Nothing to do.
Complete!
[root@alma-lustre yum.repos.d]# cat lustre.repo
[lustre-server]
name=Lustre 2.17.0 Server
baseurl=https://downloads.whamcloud.com/public/lustre/lustre-2.17.0/el9.7/server
enabled=1
gpgcheck=0
#exclude=*debuginfo*
[lustre-client]
name=Lustre 2.17.0 Client
baseurl=https://downloads.whamcloud.com/public/lustre/lustre-2.17.0/el9.7/client
enabled=1
gpgcheck=0
#exclude=*debuginfo*
[e2fsprogs-wc]
name=e2fsprogs-1.47.3.wc2
baseurl=https://downloads.whamcloud.com/public/e2fsprogs/1.47.3.wc2/el9
enabled=1
gpgcheck=0
#exclude=*debuginfo*
# Check packages installed
rpm -qa e2fsprogs
dnf install lustre-server -y
reboot now # to boot with patched kernel..
[root@alma-lustre ~]# uname -a
Linux alma-lustre.tinihub.lan 5.14.0-611.13.1_lustre.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Tue Dec 30 01:49:33 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux
# Check modules are installed
[root@alma-lustre ~]# lctl get_param version
version=2.17.0
[root@alma-lustre ~]# modprobe lustre
[root@alma-lustre ~]# lsmod | grep lustre
lustre 1314816 0
mdc 339968 1 lustre
lov 389120 2 mdc,lustre
lmv 262144 1 lustre
fld 102400 4 fid,lov,lmv,lustre
ptlrpc 3235840 7 fld,osc,fid,lov,mdc,lmv,lustre
obdclass 3903488 8 fld,osc,fid,ptlrpc,lov,mdc,lmv,lustre
lnet 1114112 6 osc,obdclass,ptlrpc,ksocklnd,lmv,lustre
libcfs 176128 11 fld,lnet,osc,fid,obdclass,ptlrpc,ksocklnd,lov,mdc,lmv,lustre
[root@alma-lustre ~]#
# Check network
[root@alma-lustre ~]# lnetctl net show
net:
- net type: lo
local NI(s):
- nid: 0@lo
status: up
- net type: tcp
local NI(s):
- nid: 192.168.1.103@tcp
status: up
interfaces:
0: ens18
# Generate HostID
#!/bin/bash
# Generate a 4-byte binary hostid if /etc/hostid does not exist
HOSTID_FILE="/etc/hostid"
if [ ! -f "$HOSTID_FILE" ]; then
# Generate a random 4-byte hostid
head -c 4 /dev/urandom > "$HOSTID_FILE"
chmod 644 "$HOSTID_FILE"
echo "Generated new hostid: $(hostid)"
else
echo "/etc/hostid already exists: $(hostid)"
fi
# Create luster.conf & Verify
cat >/etc/modprobe.d/lustre.conf <<EOF
options lnet networks=tcp
EOF
# cat /etc/modprobe.d/lustre.conf
options lnet networks=tcp
# Find drives information
[root@alma-lustre ~]# lsblk -o NAME,SIZE,TYPE,MOUNTPOINT
NAME SIZE TYPE MOUNTPOINT
sda 32G disk
├─sda1 600M part /boot/efi
├─sda2 1G part /boot
└─sda3 27.2G part /
sdb 5G disk
sdc 5G disk
sdd 5G disk
# We will use:
sdb → MDT + MGS
sdc → OST0
sdd → OST1
# Create MDT (with MGS)
mkfs.lustre \
--mdt \
--mgs \
--fsname=lustrefs \
--index=0 \
/dev/sdb
# Create OST0 & OSDT1 on available drives.
FSNAME="lustrefs"
MGSNID="192.168.1.103@tcp"
INDEX=0
for disk in sdc sdd; do
echo "Creating OST${INDEX} on /dev/${disk}..."
mkfs.lustre \
--ost \
--reformat \
--fsname=$FSNAME \
--mgsnode=$MGSNID \
--index=$INDEX \
/dev/$disk
INDEX=$((INDEX+1))
done
# Create Mount Points
mkdir -p /mnt/mdt
mkdir -p /mnt/ost0
mkdir -p /mnt/ost1
# Mount MDT First
mount -t lustre /dev/sdb /mnt/mdt
# Mount OSTs
mount -t lustre /dev/sdc /mnt/ost0
mount -t lustre /dev/sdd /mnt/ost1
[root@alma-lustre ~]# mount |grep -i lustre
/dev/sdb on /mnt/mdt type lustre (rw,svname=lustrefs-MDT0000,mgs,mgsnode=192.168.1.103@tcp,osd=osd-ldiskfs)
/dev/sdd on /mnt/ost1 type lustre (rw,svname=lustrefs-OST0001,mgsnode=192.168.1.103@tcp:192.168.1.103@tcp,osd=osd-ldiskfs)
/dev/sdc on /mnt/ost0 type lustre (rw,svname=lustrefs-OST0000,mgsnode=192.168.1.103@tcp:192.168.1.103@tcp,osd=osd-ldiskfs)
[root@alma-lustre ~]# cat /etc/fstab |grep -i lustre
/dev/sdb /mnt/mdt lustre rw,svname=lustrefs-MDT0000,mgs,osd=osd-ldiskfs,user_xattr,errors=remount-ro 0 0
/dev/sdc /mnt/ost0 lustre rw,svname=lustrefs-OST0000,mgsnode=192.168.1.103@tcp:192.168.1.103@tcp,osd=osd-ldiskfs 0 0
/dev/sdd /mnt/ost1 lustre rw,svname=lustrefs-OST0001,mgsnode=192.168.1.103@tcp:192.168.1.103@tcp,osd=osd-ldiskfs 0 0
# Check Status
[root@alma-lustre ~]# lctl get_param obdfilter.*.stats
obdfilter.lustrefs-OST0000.stats=
snapshot_time 1771010248.380246721 secs.nsecs
start_time 1771005092.521764596 secs.nsecs
elapsed_time 5155.858482125 secs.nsecs
write_bytes 26 samples [bytes] 24576 4194304 104857600 439599700639744
write 26 samples [usecs] 79 9942 116171 604411327
statfs 1009 samples [usecs] 0 26 7812 65168
get_info 1 samples [usecs] 3502 3502 3502 12264004
obdfilter.lustrefs-OST0001.stats=
snapshot_time 1771010248.380300571 secs.nsecs
start_time 1771005092.616798783 secs.nsecs
elapsed_time 5155.763501788 secs.nsecs
statfs 1010 samples [usecs] 0 18 7553 58151
get_info 1 samples [usecs] 3042 3042 3042 9253764
[root@alma-lustre ~]# lctl get_param mdt.*.recovery_status
mdt.lustrefs-MDT0000.recovery_status=status: INACTIVE
[root@alma-lustre ~]#
[root@alma-lustre ~]# lctl get_param obdfilter.*.recovery_status
obdfilter.lustrefs-OST0000.recovery_status=
status: COMPLETE
recovery_start: 1771005098
recovery_duration: 0
completed_clients: 1/1
replayed_requests: 0
last_transno: 8589934593
VBR: DISABLED
IR: DISABLED
obdfilter.lustrefs-OST0001.recovery_status=
status: COMPLETE
recovery_start: 1771005094
recovery_duration: 0
completed_clients: 1/1
replayed_requests: 0
last_transno: 8589934595
VBR: DISABLED
IR: DISABLED
[root@alma-lustre ~]#
# Now Verify your Installation:
[root@alma-lustre ~]# mkdir -p /mnt/lustre
[root@alma-lustre ~]# mount -t lustre 192.168.1.103@tcp:/lustrefs /mnt/lustre
[root@alma-lustre ~]# df -h |grep -i lustrefs
192.168.1.103@tcp:/lustrefs 9.2G 2.8M 8.7G 1% /mnt/lustre
[root@alma-lustre ~]# lctl dl
0 UP osd-ldiskfs lustrefs-OST0000-osd lustrefs-OST0000-osd_UUID 5
1 UP mgc MGC192.168.1.103@tcp cfac27d4-c63d-4ce6-ab80-712206c27355 5
2 UP ost OSS OSS_uuid 3
3 UP obdfilter lustrefs-OST0000 lustrefs-OST0000_UUID 5
4 UP lwp lustrefs-MDT0000-lwp-OST0000 lustrefs-MDT0000-lwp-OST0000_UUID 5
5 UP osd-ldiskfs lustrefs-MDT0000-osd lustrefs-MDT0000-osd_UUID 10
6 UP mgs MGS MGS 5
7 UP mds MDS MDS_uuid 3
8 UP lod lustrefs-MDT0000-mdtlov lustrefs-MDT0000-mdtlov_UUID 4
9 UP mdt lustrefs-MDT0000 lustrefs-MDT0000_UUID 11
10 UP mdd lustrefs-MDD0000 lustrefs-MDD0000_UUID 4
11 UP qmt lustrefs-QMT0000 lustrefs-QMT0000_UUID 4
12 UP osp lustrefs-OST0000-osc-MDT0000 lustrefs-MDT0000-mdtlov_UUID 5
13 UP osp lustrefs-OST0001-osc-MDT0000 lustrefs-MDT0000-mdtlov_UUID 5
14 UP lwp lustrefs-MDT0000-lwp-MDT0000 lustrefs-MDT0000-lwp-MDT0000_UUID 5
15 UP osd-ldiskfs lustrefs-OST0001-osd lustrefs-OST0001-osd_UUID 5
16 UP obdfilter lustrefs-OST0001 lustrefs-OST0001_UUID 5
17 UP lwp lustrefs-MDT0000-lwp-OST0001 lustrefs-MDT0000-lwp-OST0001_UUID 5
18 UP lov lustrefs-clilov-ffff97ba86b69800 530d0b2d-046e-4c0e-b888-50b3a83589bc 4
19 UP lmv lustrefs-clilmv-ffff97ba86b69800 530d0b2d-046e-4c0e-b888-50b3a83589bc 5
20 UP mdc lustrefs-MDT0000-mdc-ffff97ba86b69800 530d0b2d-046e-4c0e-b888-50b3a83589bc 5
21 UP osc lustrefs-OST0000-osc-ffff97ba86b69800 530d0b2d-046e-4c0e-b888-50b3a83589bc 5
22 UP osc lustrefs-OST0001-osc-ffff97ba86b69800 530d0b2d-046e-4c0e-b888-50b3a83589bc 5
[root@alma-lustre ~]# lfs df
UUID 1K-blocks Used Available Use% Mounted on
lustrefs-MDT0000_UUID 2895200 1636 2616612 1% /mnt/lustre[MDT:0]
lustrefs-OST0000_UUID 4799268 1392 4519348 1% /mnt/lustre[OST:0]
lustrefs-OST0001_UUID 4799268 1392 4519348 1% /mnt/lustre[OST:1]
filesystem_summary: 9598536 2784 9038696 1% /mnt/lustre
# Stripe size before write..
[root@alma-lustre ~]# lfs getstripe /mnt/lustre
/mnt/lustre
stripe_count: 1 stripe_size: 4194304 pattern: 0 stripe_offset: -1
#Testing Writes & Stripe Size:
[root@alma-lustre ~]# dd if=/dev/zero of=/mnt/lustre/testfile bs=1M count=100
100+0 records in
100+0 records out
104857600 bytes (105 MB, 100 MiB) copied, 0.200842 s, 522 MB/s
[root@alma-lustre ~]# lfs getstripe /mnt/lustre/testfile
/mnt/lustre/testfile
lmm_stripe_count: 1
lmm_stripe_size: 4194304
lmm_pattern: raid0
lmm_layout_gen: 0
lmm_stripe_offset: 0
obdidx objid objid group
0 34 0x22 0x240000400
[root@alma-lustre ~]# lfs quota
Disk quotas for usr root (uid 0):
Filesystem kbytes bquota blimit bgrace files iquota ilimit igrace
/mnt/lustre 106732 0 0 - 266 0 0 -
Disk quotas for grp root (gid 0):
Filesystem kbytes bquota blimit bgrace files iquota ilimit igrace
/mnt/lustre 106732 0 0 - 266 0 0 -
[root@alma-lustre ~]# lfs osds
lfs: 'osds' is not a valid command. See 'lfs --list-commands'.
[root@alma-lustre ~]# lfs osts
OBDS:
0: lustrefs-OST0000_UUID ACTIVE
1: lustrefs-OST0001_UUID ACTIV
##### Installing Client:
cat /etc/yum.repo.d/lustre.repo
[lustre-client]
name=Lustre 2.17.0 Client
baseurl=https://downloads.whamcloud.com/public/lustre/lustre-2.17.0/el9.7/client
enabled=1
gpgcheck=0
#exclude=*debuginfo*
# Install lustre-client...
[root@alma-hpc2 ~]# yum install lustre-client
Last metadata expiration check: 0:48:05 ago on Fri Feb 13 19:05:05 2026.
Dependencies resolved.
===========================================================================================
Package Architecture Version Repository Size
===========================================================================================
Installing:
lustre-client x86_64 2.17.0-1.el9 lustre-client 851 k
Installing dependencies:
kmod-lustre-client x86_64 2.17.0-1.el9 lustre-client 2.9 M
Transaction Summary
===========================================================================================
Install 2 Packages
After installation reboot with patched or latest kernel.