[Level 3] Solaris 10 Technical Conference ( 2009/10/25,11/13,12/4 ) -- Advanced ZFS hands-on lab

The following is my lab file, please refer to it. 

Wish this helps.

regards,
Stanley Huang

****************************************************************************************************
The purpose of this lab is to let you have advanced ZFS filesystem administration skill. And then you will have the following capabilities.
Lab 1:
* replace zpool disk.
Lab 2:
* take ZFS filesystem snapshot, rollback ZFS filesystem.
* clone ZFS filesystem.
Lab 3:
* use ZFS L2ARC
* use ZFS ZIL



Lab 1:
1. replace zpool disk.
# cd /labs/ZFS/files;
# zpool create mypool mirror `pwd`/f1 `pwd`/f2 spare `pwd`/f3;
# zpool replace mypool `pwd`/f2 `pwd`/f3;

# zpool status mypool;
-------------------------------------------------------------------------------
  pool: mypool
 state: ONLINE
 scrub: resilver completed after 0h0m with 0 errors on Sun Oct 18 11:13:15 2009
config:

    NAME            STATE     READ WRITE CKSUM
    mypool                 ONLINE       0     0     0
      /lab/ZFS/files/f1    ONLINE       0     0     0
      spare                ONLINE       0     0     0
        /lab/ZFS/files/f2  ONLINE       0     0     0
        /lab/ZFS/files/f3  ONLINE       0     0     0  47.5K resilvered
    spares
      /lab/ZFS/files/f3    INUSE     currently in use

errors: No known data errors
-------------------------------------------------------------------------------

# zpool replace mypool `pwd`/f2 `pwd`/f8;

# zpool status mypool;
-------------------------------------------------------------------------------
  pool: mypool
 state: ONLINE
 scrub: resilver completed after 0h0m with 0 errors on Sun Oct 18 11:20:55 2009
config:

    NAME          STATE     READ WRITE CKSUM
    mypool               ONLINE       0     0     0
      /lab/ZFS/files/f1  ONLINE       0     0     0
      /lab/ZFS/files/f8  ONLINE       0     0     0  57.5K resilvered
    spares
      /lab/ZFS/files/f3  AVAIL

errors: No known data errors
-------------------------------------------------------------------------------



Lab 2:
1. ZFS filesystem snapshot/rollback.
# zfs create mypool/myfs1;
# cp /etc/hosts /mypool/myfs1/hosts;
# ls -l /mypool/myfs1/hosts;
------------------------------------------------------------
-r--r--r-- 1 root root 4925 Oct 18 11:35 /mypool/myfs1/hosts
------------------------------------------------------------

# zfs snapshot mypool/myfs1@s1;
# cat /dev/null > /mypool/myfs1/hosts;
# ls -l /mypool/myfs1/hosts;
------------------------------------------------------------
-r--r--r-- 1 root root 0 Oct 18 11:36 /mypool/myfs1/hosts
------------------------------------------------------------

# zfs rollback mypool/myfs1@s1;
# ls -l /mypool/myfs1/hosts;
------------------------------------------------------------
-r--r--r-- 1 root root 4925 Oct 18 11:35 /mypool/myfs1/hosts
------------------------------------------------------------

2. clone ZFS filesystem, then promote it.
# zfs clone mypool/myfs1@s1 mypool/clonefs;
# zfs list -t all -r mypool;
-----------------------------------------------------
NAME              USED  AVAIL  REFER  MOUNTPOINT
mypool            218K  90.8M    24K  /mypool
mypool/clonefs     21K  90.8M    25K  /mypool/clonefs
mypool/myfs1       25K  90.8M    25K  /mypool/myfs1
mypool/myfs1@s1      0      -    25K  -
-----------------------------------------------------

# zfs get -r origin mypool;
--------------------------------------------------
NAME             PROPERTY  VALUE            SOURCE
mypool           origin    -                -
mypool/clonefs   origin    mypool/myfs1@s1  -
mypool/myfs1     origin    -                -
mypool/myfs1@s1  origin    -                -
--------------------------------------------------

# cd /mypool/clonefs/;
# ls -al;
----------------------------------------------
total 9
drwxr-xr-x 2 root root    3 Oct 18 11:35 .
drwxr-xr-x 6 root root    6 Oct 18 11:39 ..
-r--r--r-- 1 root root 4925 Oct 18 11:35 hosts
----------------------------------------------

# echo "192.168.100.1 host1" >> ./hosts;
# echo "192.168.100.2 host2" >> ./hosts;
# echo "192.168.100.3 host3" >> ./hosts;
# ls -l ./hosts;
------------------------------------------------
-r--r--r-- 1 root root 4985 Oct 18 11:44 ./hosts
------------------------------------------------

# tail -3 ./hosts;
-------------------
192.168.100.1 host1
192.168.100.2 host2
192.168.100.3 host3
-------------------

# cd /;
# zfs promote mypool/clonefs
# zfs get -r origin mypool;
------------------------------------------------------
NAME               PROPERTY  VALUE              SOURCE
mypool             origin    -                  -
mypool/clonefs     origin    -                  -
mypool/clonefs@s1  origin    -                  -
mypool/myfs1       origin    mypool/clonefs@s1  -
------------------------------------------------------

# zfs destroy -r mypool/clonefs@s1;
cannot destroy 'mypool/clonefs@s1': snapshot is cloned
no snapshots destroyed
# zfs destroy -R mypool/clonefs@s1;
# zfs rename mypool/clonefs mypool/fs1
root@Stanley-NB:/# zfs get -r origin mypool;
------------------------------------
NAME        PROPERTY  VALUE   SOURCE
mypool      origin    -       -
mypool/fs1  origin    -       -
------------------------------------

Lab 3:
# cd /labs/ZFS/files;
# zpool add mypool log `pwd`/f9;
# zpool status mypool;
-------------------------------------------------------------------------------
  pool: mypool
 state: ONLINE
 scrub: resilver completed after 0h0m with 0 errors on Wed Oct 18 11:22:13 2009
config:

    NAME                 STATE     READ WRITE CKSUM
    mypool               ONLINE       0     0     0
      /lab/ZFS/files/f1  ONLINE       0     0     0
      /lab/ZFS/files/f8  ONLINE       0     0     0  57.5K resilvered
    logs                 ONLINE       0     0     0
      /lab/ZFS/files/f9  ONLINE       0     0     0
    spares
      /lab/ZFS/files/f3  AVAIL  

errors: No known data errors
-------------------------------------------------------------------------------

# lofiadm -a `pwd`/f10; # cache only support vdev, so have to create vdev first.
/dev/lofi/1
# zfs add mypool cache /dev/lofi/1;
# zpool status mypool;
-------------------------------------------------------------------------------
  pool: mypool
 state: ONLINE
 scrub: resilver completed after 0h0m with 0 errors on Wed Oct 18 11:24:33 2009
config:

    NAME                 STATE     READ WRITE CKSUM
    mypool               ONLINE       0     0     0
      /lab/ZFS/files/f1  ONLINE       0     0     0
      /lab/ZFS/files/f8  ONLINE       0     0     0  57.5K resilvered
    logs                 ONLINE       0     0     0
      /lab/ZFS/files/f9  ONLINE       0     0     0
    cache
      /dev/lofi/1        ONLINE       0     0     0
    spares
      /lab/ZFS/files/f3  AVAIL  

errors: No known data errors
-------------------------------------------------------------------------------


Comments

Popular posts from this blog

[Level 1] Rar tool for Solaris.

[Level 2] iif in Python