Skip to Content

High Performance Storage

Lustre
February 14, 2026 by
Tinihub Inc.
How to setup High Performance Lustre Shared Storage for HPC


Lustre is an open-source, parallel distributed file system designed to power the world’s most data-intensive high-performance computing (HPC) and AI environments.By decoupling metadata management from data storage.

Lustre allows thousands of client nodes to access petabytes of data simultaneously with aggregate throughput reaching terabytes per second.Its unique architecture uses Metadata Servers (MDS) to handle namespace operations and Object Storage Servers (OSS) to manage raw data "striping" across multiple targets, effectively eliminating the bottlenecks found in traditional NAS or SAN solutions. Whether it is accelerating large-scale genomic sequencing, climate modeling, or training massive generative AI models, Lustre remains the industry standard for organizations that require extreme scalability and POSIX-compliant, low-latency performance without vendor lock-in.


# For lab lets disable selinux and firewall..
sed -i '/^SELINUX=/c\SELINUX=disabled' /etc/sysconfig/selinux
systemctl disable firewalld
systemctl stop firewalld

# Add repos..
[root@alma-lustre yum.repos.d]# yum update
Last metadata expiration check: 0:00:26 ago on Wed Feb 11 20:31:55 2026.
Dependencies resolved.
Nothing to do.
Complete!

[root@alma-lustre yum.repos.d]# cat lustre.repo
[lustre-server]
name=Lustre 2.17.0 Server
baseurl=https://downloads.whamcloud.com/public/lustre/lustre-2.17.0/el9.7/server
enabled=1
gpgcheck=0

[lustre-client]
name=Lustre 2.17.0 Client
baseurl=https://downloads.whamcloud.com/public/lustre/lustre-2.17.0/el9.7/client
enabled=1
gpgcheck=0

[e2fsprogs-wc]
name=e2fsprogs-1.47.3.wc2
baseurl=https://downloads.whamcloud.com/public/e2fsprogs/1.47.3.wc2/el9
enabled=1
gpgcheck=0

# Check packages installed
rpm -qa e2fsprogs
dnf install lustre-server -y
reboot now # to boot with patched kernel..

[root@alma-lustre ~]# uname -a
Linux alma-lustre.tinihub.lan 5.14.0-611.13.1_lustre.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Tue Dec 30 01:49:33 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux

# Check modules are installed
[root@alma-lustre ~]# lctl get_param version
version=2.17.0
[root@alma-lustre ~]# modprobe lustre
[root@alma-lustre ~]# lsmod | grep lustre
lustre 1314816 0
mdc 339968 1 lustre
lov 389120 2 mdc,lustre
lmv 262144 1 lustre
fld 102400 4 fid,lov,lmv,lustre
ptlrpc 3235840 7 fld,osc,fid,lov,mdc,lmv,lustre
obdclass 3903488 8 fld,osc,fid,ptlrpc,lov,mdc,lmv,lustre
lnet 1114112 6 osc,obdclass,ptlrpc,ksocklnd,lmv,lustre
libcfs 176128 11 fld,lnet,osc,fid,obdclass,ptlrpc,ksocklnd,lov,mdc,lmv,lustre

# Check network
[root@alma-lustre ~]# lnetctl net show
net:
- net type: lo
local NI(s):
- nid: 0@lo
status: up
- net type: tcp
local NI(s):
- nid: 192.168.1.103@tcp
status: up
interfaces:
0: ens18

# Generate HostID
#!/bin/bash
HOSTID_FILE="/etc/hostid"
if [ ! -f "$HOSTID_FILE" ]; then
head -c 4 /dev/urandom > "$HOSTID_FILE"
chmod 644 "$HOSTID_FILE"
echo "Generated new hostid: $(hostid)"
else
echo "/etc/hostid already exists: $(hostid)"
fi

# Create luster.conf & Verify
cat >/etc/modprobe.d/lustre.conf <<EOF
options lnet networks=tcp
EOF

# Find drives information
[root@alma-lustre ~]# lsblk -o NAME,SIZE,TYPE,MOUNTPOINT
NAME SIZE TYPE MOUNTPOINT
sda 32G disk
├─sda1 600M part /boot/efi
├─sda2 1G part /boot
└─sda3 27.2G part /
sdb 5G disk
sdc 5G disk
sdd 5G disk

# We will use:
# sdb → MDT + MGS
# sdc → OST0
# sdd → OST1

# Create MDT (with MGS)
mkfs.lustre \
--mdt \
--mgs \
--fsname=lustrefs \
--index=0 \
/dev/sdb

# Create OST0 & OSDT1 on available drives.
FSNAME="lustrefs"
MGSNID="192.168.1.103@tcp"
INDEX=0
for disk in sdc sdd; do
echo "Creating OST${INDEX} on /dev/${disk}..."
mkfs.lustre \
--ost \
--reformat \
--fsname=$FSNAME \
--mgsnode=$MGSNID \
--index=$INDEX \
/dev/$disk
INDEX=$((INDEX+1))
done

# Create Mount Points
mkdir -p /mnt/mdt
mkdir -p /mnt/ost0
mkdir -p /mnt/ost1

# Mount MDT First
mount -t lustre /dev/sdb /mnt/mdt

# Mount OSTs
mount -t lustre /dev/sdc /mnt/ost0
mount -t lustre /dev/sdd /mnt/ost1

[root@alma-lustre ~]# mount |grep -i lustre
/dev/sdb on /mnt/mdt type lustre (rw,svname=lustrefs-MDT0000,mgs,mgsnode=192.168.1.103@tcp,osd=osd-ldiskfs)
/dev/sdd on /mnt/ost1 type lustre (rw,svname=lustrefs-OST0001,mgsnode=192.168.1.103@tcp,osd=osd-ldiskfs)
/dev/sdc on /mnt/ost0 type lustre (rw,svname=lustrefs-OST0000,mgsnode=192.168.1.103@tcp,osd=osd-ldiskfs)

[root@alma-lustre ~]# cat /etc/fstab |grep -i lustre
/dev/sdb /mnt/mdt lustre rw,svname=lustrefs-MDT0000,mgs,osd=osd-ldiskfs,user_xattr,errors=remount-ro 0 0
/dev/sdc /mnt/ost0 lustre rw,svname=lustrefs-OST0000,mgsnode=192.168.1.103@tcp,osd=osd-ldiskfs 0 0
/dev/sdd /mnt/ost1 lustre rw,svname=lustrefs-OST0001,mgsnode=192.168.1.103@tcp,osd=osd-ldiskfs 0 0

# Check Status
[root@alma-lustre ~]# lctl get_param obdfilter.*.stats
[root@alma-lustre ~]# lctl get_param mdt.*.recovery_status
[root@alma-lustre ~]# lctl get_param obdfilter.*.recovery_status

# Now Verify your Installation:
[root@alma-lustre ~]# mkdir -p /mnt/lustre
[root@alma-lustre ~]# mount -t lustre 192.168.1.103@tcp:/lustrefs /mnt/lustre
[root@alma-lustre ~]# df -h |grep -i lustrefs
192.168.1.103@tcp:/lustrefs 9.2G 2.8M 8.7G 1% /mnt/lustre

[root@alma-lustre ~]# lctl dl
[root@alma-lustre ~]# lfs df
[root@alma-lustre ~]# lfs osts

# Testing Writes & Stripe Size:
[root@alma-lustre ~]# dd if=/dev/zero of=/mnt/lustre/testfile bs=1M count=100
[root@alma-lustre ~]# lfs getstripe /mnt/lustre/testfile

##### Installing Client:
[root@alma-hpc2 ~]# yum install lustre-client

Tinihub Inc. February 14, 2026
Share this post
Tags
Archive