User Tools

Site Tools


Sidebar

cn:ccr:storage:s_g_c2014_soluzione3

Table of Contents

Esercitazione 3

Subnet

Controlla le interfacce di rete e verifica connettivita' su eth1 con gli altri nodi del cluster

# ip addr show
1: lo: mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:16:3e:02:01:2e brd ff:ff:ff:ff:ff:ff
inet 192.168.200.246/24 brd 192.168.200.255 scope global eth0
inet6 fe80::216:3eff:fe02:12e/64 scope link
valid_lft forever preferred_lft forever
3: eth1: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:9d:5b:c5 brd ff:ff:ff:ff:ff:ff
inet 10.168.200.246/24 brd 10.168.200.255 scope global eth1
inet6 fe80::5054:ff:fe9d:5bc5/64 scope link
valid_lft forever preferred_lft forever

# ping -c 2 10.168.200.247
PING 10.168.200.247 (10.168.200.247) 56(84) bytes of data.
64 bytes from 10.168.200.247: icmp_seq=1 ttl=64 time=1.77 ms
64 bytes from 10.168.200.247: icmp_seq=2 ttl=64 time=0.279 ms

--- 10.168.200.247 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1002ms
rtt min/avg/max/mdev = 0.279/1.027/1.775/0.748 ms

# ping -c 2 10.168.200.248
PING 10.168.200.248 (10.168.200.248) 56(84) bytes of data.
64 bytes from 10.168.200.248: icmp_seq=1 ttl=64 time=1.02 ms
64 bytes from 10.168.200.248: icmp_seq=2 ttl=64 time=0.215 ms

--- 10.168.200.248 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 0.215/0.619/1.024/0.405 ms


Definisce la rete IP delle interfacce eth1 come preferenziale per il cluster

# mmchconfig subnets="10.168.200.0"
mmchconfig: Command successfully completed
mmchconfig: Propagating the cluster configuration data to all
affected nodes. This is an asynchronous process.


Verifica la attuale configurazione della connettivita' del nodo su cui operi

# mmdiag –network<br> <br> === mmdiag: network ===<br> <br> Pending messages:<br> (none)<br> Inter-node communication configuration:<br> tscTcpPort 1191<br> my address 192.168.200.246/24 (eth0) <c0n0><br> my addr list 192.168.200.246/24 (eth0)<br> my node number 1<br> TCP Connections between nodes:<br> Device eth0:<br> hostname node destination status err sock sent(MB) recvd(MB) ostype<br> c16-serv2 <c0n1> 192.168.200.247 connected 0 15 0 0 Linux/L<br> c16-serv3 <c0n2> 192.168.200.248 connected 0 23 0 0 Linux/L<br> diag verbs: VERBS RDMA class not initialized<br> <br> </p></html>

Restart dei demoni e verifica della connettivita' dopo il boot

# mmshutdown -a
Wed Nov 26 13:30:22 CET 2014: mmshutdown: Starting force unmount of GPFS file systems
Wed Nov 26 13:30:27 CET 2014: mmshutdown: Shutting down GPFS daemons
c16-serv1.openlab.infn.it: Shutting down!
c16-serv3.openlab.infn.it: Shutting down!
c16-serv2.openlab.infn.it: Shutting down!
c16-serv1.openlab.infn.it: 'shutdown' command about to kill process 29670
c16-serv1.openlab.infn.it: Unloading modules from /lib/modules/2.6.32-358.el6.x86_64/extra
c16-serv3.openlab.infn.it: 'shutdown' command about to kill process 20207
c16-serv3.openlab.infn.it: Unloading modules from /lib/modules/2.6.32-358.el6.x86_64/extra
c16-serv1.openlab.infn.it: Unloading module mmfs26
c16-serv3.openlab.infn.it: Unloading module mmfs26
c16-serv2.openlab.infn.it: 'shutdown' command about to kill process 15607
c16-serv2.openlab.infn.it: Unloading modules from /lib/modules/2.6.32-358.el6.x86_64/extra
c16-serv2.openlab.infn.it: Unloading module mmfs26
c16-serv3.openlab.infn.it: Unloading module mmfslinux
c16-serv3.openlab.infn.it: Unloading module tracedev
c16-serv1.openlab.infn.it: Unloading module mmfslinux
c16-serv1.openlab.infn.it: Unloading module tracedev
c16-serv2.openlab.infn.it: Unloading module mmfslinux
c16-serv2.openlab.infn.it: Unloading module tracedev
Wed Nov 26 13:30:35 CET 2014: mmshutdown: Finished

# mmstartup -a
Wed Nov 26 13:30:39 CET 2014: mmstartup: Starting GPFS ...

# mmdiag --network

=== mmdiag: network ===

Pending messages:
(none)
Inter-node communication configuration:
tscTcpPort 1191
my address 192.168.200.246/24 (eth0)
my addr list 10.168.200.246/24 (eth1) 192.168.200.246/24 (eth0)
my node number 1
TCP Connections between nodes:
Device eth1:
hostname node destination status err sock sent(MB) recvd(MB) ostype
c16-serv2 10.168.200.247 connected 0 21 0 0 Linux/L
c16-serv3 10.168.200.248 connected 0 15 0 0 Linux/L
diag verbs: VERBS RDMA class not initialized

Snapshot

Crea una snapshot globale sul file system c16fs

# dd if=/dev/zero of=/gpfs/c16fs/datafile bs=1M count=1K
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB) copied, 5.33226 s, 201 MB/s

# dd if=/dev/zero of=/gpfs/c16fs/fset1/datafile bs=1M count=1K
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB) copied, 5.41672 s, 198 MB/s

# dd if=/dev/zero of=/gpfs/c16fs/fset2/datafile bs=1M count=1K
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB) copied, 5.55623 s, 193 MB/s

# mkdir /gpfs/c16fs/testdir

# mkdir /gpfs/c16fs/fset1/testdir

# mkdir /gpfs/c16fs/fset2/testdir

# df -k /gpfs/c16fs/ /gpfs/c16fs/fset1 /gpfs/c16fs/fset2
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/c16fs 4194304000 3695616 4190608384 1% /gpfs/c16fs
/dev/c16fs 10485760 1048576 9437184 10% /gpfs/c16fs
/dev/c16fs 10485760 1048576 9437184 10% /gpfs/c16fs


# mmcrsnapshot c16fs global_snap1
Writing dirty data to disk
Quiescing all file system operations
Writing dirty data to disk again
Resuming operations.
Snapshot global_snap1 created with id 5.


Visualizza la snapshot (sia l'occupazione che l'accesso al contenuto)

# mmlssnapshot c16fs -d
Snapshots in file system c16fs: [data and metadata in KB]
Directory SnapId Status Created Fileset Data Metadata
global_snap1 5 Valid Wed Nov 26 16:10:30 2014 0 1024

# ls /gpfs/c16fs/.snapshots/
global_snap1

Abilita la visualizzazione delle snapshot all'interno di ogni directory

# ls /gpfs/c16fs/testdir/.snapshots/
ls: cannot access /gpfs/c16fs/testdir/.snapshots/: No such file or directory

# mmsnapdir /dev/c16fs -a

# ls /gpfs/c16fs/testdir/.snapshots/
global_snap1

Verifica occupazione in occasione di modifiche

# ls -l /gpfs/c16fs/datafile
-rw-r--r-- 1 root root 1073741824 Nov 26 13:51 /gpfs/c16fs/datafile

# mmlssnapshot /dev/c16fs -d
Snapshots in file system c16fs: [data and metadata in KB]
Directory SnapId Status Created Fileset Data Metadata
global_snap1 5 Valid Wed Nov 26 16:10:30 2014 0 1024

# rm -f /gpfs/c16fs/datafile

# mmlssnapshot /dev/c16fs -d
Snapshots in file system c16fs: [data and metadata in KB]
Directory SnapId Status Created Fileset Data Metadata
global_snap1 5 Valid Wed Nov 26 16:10:30 2014 1048608 2080



Crea sanpshot su independent fileset e verifica che la visibilita' e' solo all'interno dello stesso

# mmcrsnapshot /dev/c16fs snap2 -j fset2
Writing dirty data to disk
Quiescing all file system operations
Writing dirty data to disk again
Resuming operations.
Snapshot snap2 created with id 6.

# mmlssnapshot /dev/c16fs -d
Snapshots in file system c16fs: [data and metadata in KB]
Directory SnapId Status Created Fileset Data Metadata
global_snap1 5 Valid Wed Nov 26 16:10:30 2014 1048608 2080
snap2 6 Valid Wed Nov 26 16:26:49 2014 fset2 0 0

# ls /gpfs/c16fs/.snapshots
global_snap1

# ls /gpfs/c16fs/fset2/.snapshots
snap2

Cloni

Creazione di un clone

# cd /gpfs/c16fs/

# mkdir cloni

# cd cloni

# dd if=/dev/zero of=./file.orig bs=1M count=1K
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB) copied, 6.87812 s, 156 MB/s

# ls -lis
total 1048576
244741 1048576 -rw-r--r-- 1 root root 1073741824 Nov 27 07:05 file.orig

# mmclone snap file.orig file.parent_clone

# ls -lis
total 1048576
244741 0 -rw-r--r-- 1 root root 1073741824 Nov 27 07:05 file.orig
244742 1048576 -rw-r--r-- 2 root root 1073741824 Nov 27 07:06 file.parent_clone

# mmclone show *
Parent Depth Parent inode File name
------ ----- -------------- ---------
no 1 244742 file.orig
yes 0 file.parent_clone


# mmclone copy file.parent_clone file.clone1

# ls -ils
total 1048576
244743 0 -rw-r--r-- 1 root root 1073741824 Nov 27 07:08 file.clone1
244741 0 -rw-r--r-- 1 root root 1073741824 Nov 27 07:05 file.orig
244742 1048576 -rw-r--r-- 3 root root 1073741824 Nov 27 07:06 file.parent_clone

# mmclone show *
Parent Depth Parent inode File name
------ ----- -------------- ---------
no 1 244742 file.clone1
no 1 244742 file.orig
yes 0 file.parent_clone

# dd if=/dev/zero of=./file.clone1 bs=1M count=10
10+0 records in
10+0 records out
10485760 bytes (10 MB) copied, 0.00578463 s, 1.8 GB/s

# ls -ils
total 1058816
244743 10240 -rw-r--r-- 1 root root 10485760 Nov 27 07:09 file.clone1
244741 0 -rw-r--r-- 1 root root 1073741824 Nov 27 07:05 file.orig
244742 1048576 -rw-r--r-- 3 root root 1073741824 Nov 27 07:06 file.parent_clone

Creazione di 100 cloni di un file da 1 GB

# time for i in `seq 2 100`; do mmclone copy file.parent_clone file.clone${i}; done

real 0m0.233s
user 0m0.005s
sys 0m0.014s

# mmclone show *
Parent Depth Parent inode File name
------ ----- -------------- ---------
no 1 244742 file.clone1
no 1 244742 file.clone10
no 1 244742 file.clone100
no 1 244742 file.clone11

no 1 244742 file.clone97
no 1 244742 file.clone98
no 1 244742 file.clone99
no 1 244742 file.orig
yes 0 file.parent_clone

# du -sk ../cloni
1058848 ../cloni

Crea gerarchia di cloni

# mmclone snap file.clone1 file.parent_clone1
[root@c16-serv1 cloni]# mmcopy file.parent_clone1 file2

# mmclone copy file.parent_clone1 file2

# mmclone copy file.parent_clone1 file3

# ls -lis file.parent_clone file.parent_clone1 file.clone1 file2 file3
244843 0 -rw-r--r-- 1 root root 10485760 Nov 27 07:14 file2
244844 0 -rw-r--r-- 1 root root 10485760 Nov 27 07:16 file3
244743 0 -rw-r--r-- 1 root root 10485760 Nov 27 07:09 file.clone1
244742 1048576 -rw-r--r-- 102 root root 1073741824 Nov 27 07:06 file.parent_clone
244842 10240 -rw-r--r-- 4 root root 10485760 Nov 27 07:13 file.parent_clone1

# mmclone show file.parent_clone file.parent_clone1 file.clone1 file2 file3
Parent Depth Parent inode File name
------ ----- -------------- ---------
yes 0 file.parent_clone
yes 1 244742 file.parent_clone1
no 2 244842 file.clone1
no 2 244842 file2
no 2 244842 file3

Esegui uno split di file2 (diviene file ordinario) ed un redirect di file1 (diviene clone del parent dell'attuale parent). Verifica.

# mmclone split file2
[root@c16-serv1 cloni]# mmclone redirect file3
[root@c16-serv1 cloni]# mmclone show file.parent_clone file.parent_clone1 file.clone1 file2 file3
Parent Depth Parent inode File name
------ ----- -------------- ---------
yes 0 file.parent_clone
yes 1 244742 file.parent_clone1
no 2 244842 file.clone1
file2
no 1 244742 file3


Rimuovi i cloni

# rm -f file.parent_clone
rm: cannot remove `file.parent_clone': Read-only file system

# rm -f file.clone? file.clone?? file.clone??? file2

# mmclone show *
Parent Depth Parent inode File name
------ ----- -------------- ---------
file2
no 1 244742 file3
no 1 244742 file.orig
yes 0 file.parent_clone
yes 1 244742 file.parent_clone1

# rm -f file.parent_clone1

# rm -f file3 file.orig

# rm -f file.parent_clone

cn/ccr/storage/s_g_c2014_soluzione3.txt · Last modified: 2014/11/27 07:49 by brunengo@infn.it