ZFS utility / admin scripts

This commit is contained in:
kneutron 2021-04-12 14:40:45 -05:00 committed by GitHub
parent 59f802d805
commit 531e0e5926
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 2027 additions and 0 deletions

View File

@ -0,0 +1,279 @@
#!/bin/bash
#====================================================
# =LLC= © (C)opyright 2016 Boojum Consulting LLC / Dave Bechtel, All rights reserved.
## NOTICE: Only Boojum Consulting LLC personnel may use or redistribute this code,
## Unless given explicit permission by the author - see http://www.boojumconsultingsa.com
#
#====================================================
# make a dynamic-growing zfs pool from 1 disk to 6
# zfs pools are on loop files (virtual disks)
# trace on: set -x // off: set +x # REF: http://tldp.org/LDP/Bash-Beginners-Guide/html/sect_02_03.html
# PROTIP: Avoid having to turn off and avoid clutter by using subshell REF: http://serverfault.com/questions/16204/how-to-make-bash-scripts-print-out-every-command-before-it-executes
# TODO setme or pass "new" as arg1
newdisks=1 # set 1 to zero-out new disk files
# DONE redirect to non-ZFS spinning disk if avail, otherwise use RAMdisk
zdpath="/run/shm/zdisks"
# if milterausb3 is mounted, use it
usemil=`df |grep -c /mnt/milterausb3`
[ $usemil -gt 0 ] && zdpath="/mnt/milterausb3/zdisks"
mkdir -pv $zdpath
ln $zdpath /zdisks -sfn
cd /zdisks || exit 99
DS=400 # Disksize in MB
[ $usemil -gt 0 ] && DS=465 # sim 500GB "usable" # 500
let mkpop=$DS-100 # size of populate-data file in MB
# NOTE we need more loop devices; 1-time fix REF: http://askubuntu.com/questions/499131/how-to-use-more-than-255-loop-devices
logfile=~/mkdynamic-grow-pool.log
> $logfile # clearit
function nomorepool () {
zfs umount -f $1 2>> $logfile
# zpool export $1 2>> $logfile
zpool status $1
zpool destroy -f $1 2>> $logfile
zpool status -L -P -x
}
pool1="zdynpool1"
nomorepool $pool1
df -h
zpool status $pool1
echo "POOL $pool1 SHOULD BE GONE -PK"
# if not exist, trip
[ -e zdyndisk1 ] || let newdisks=1
[ "$1" = "new" ] && let newdisks=1 # if arg passed
[ $newdisks -gt 0 ] && read -n 1
[ $newdisks -gt 0 ] && /root/bin/clearcache
# getting rid of sync for each dd should speed things up
echo "Preparing virtual disks... (Size: $DS MB)"
for i in {1..8};do
printf $i...
# NECESSARY if re-using disks that were previously in a pool!!
zpool labelclear -f /zdisks/zdyndisk$i
[ $newdisks -gt 0 ] && time dd if=/dev/zero of=zdyndisk$i bs=1M count=$DS 2>&1 |egrep 'copied|real' >> $logfile
done
echo 'Syncing...'
time sync
ls -alh |tee -a $logfile
#NOTE: ' zfs add ' = add a disk (expand, non-redundant); ' zfs attach ' = add mirror
# REF: http://zfsonlinux.org/faq.html#WhatDevNamesShouldIUseWhenCreatingMyPool
# REF: https://flux.org.uk/tech/2007/03/zfs_tutorial_1.html
############ create 1-disk NORAID
(set -x
time zpool create -f -o ashift=12 -o autoexpand=on -O atime=off $pool1 \
/zdisks/zdyndisk1) #; set +x
echo 'o Populating pool with random,uncompressible data...'
# if file not there, create
[ -e /root/tmpfile ] && [ $newdisks -eq 0 ] || time dd if=/dev/urandom of=/root/tmpfile bs=1M count=$mkpop
time cp -v /root/tmpfile /$pool1
echo 'o Should now have a 1-disk, non-redundant pool with some data in it:'
zpool status $pool1; df -h /$pool1
echo '';printf 'PK to add mirror to single disk:';read -n 1
########### add mirror to 1-disk
# REF: http://docs.oracle.com/cd/E19253-01/819-5461/6n7ht6qvl/index.html
(set -x
time zpool attach $pool1 \
/zdisks/zdyndisk1 /zdisks/zdyndisk2)
echo 'o Should now have a 2-disk, MIRRORED pool with RAID1:'
zpool status $pool1; df -h /$pool1
echo '';echo 'o NOTE that available pool space has not changed yet - we have only added Redundancy'
echo '! NORMALLY we would wait for the resilver to complete before proceeding! # zpool status # until resilvered'
printf 'PK to add another set of mirrored disks to the existing pool:';read -n 1
########### add 2-disk mirror to 2-disk
# REF: http://docs.oracle.com/cd/E19253-01/819-5461/6n7ht6qvk/index.html
(set -x
time zpool add -o ashift=12 $pool1 \
mirror /zdisks/zdyndisk3 /zdisks/zdyndisk4)
echo 'o Populating pool with more data...'
time cp -v /$pool1/tmpfile /$pool1/tmpfile2
echo 'o Should now have a 4-disk, redundant pool with RAID10:'
zpool status $pool1; df -h /$pool1
ls -lh /$pool1/*
echo '';echo 'o NOTE that the available pool space should be approximately 2x what we had before, minus a bit of overhead'
echo '! Again - NORMALLY we would wait for the resilver to complete before proceeding! # zpool status # until resilvered'
# DONE adapt scrubwatch to wait4resilver
printf 'PK to add final 2-disk mirror with 1 hotspare:';read -n 1
########### add 2-disk mirror to 4-disk, with 1 spare
# REF: http://docs.oracle.com/cd/E19253-01/819-5461/6n7ht6qvk/index.html
(set -x
time zpool add -o ashift=12 $pool1 \
mirror /zdisks/zdyndisk5 /zdisks/zdyndisk6 \
spare /zdisks/zdyndisk7)
echo 'o Should now have a 6-disk, highly failure-resistant pool with RAID10:'
zpool status $pool1; df -h /$pool1; zpool list $pool1
echo ''
echo 'o NOTE that the available pool space should be approximately 1.5x what we had with the 4-disk mirror set,'
echo ' minus a bit of overhead'
echo '! Again - NORMALLY we would wait for the resilver to complete before proceeding! # zpool status # until resilvered'
printf 'PK to proceed with next phase (POOL WILL BE DESTROYED and rebuilt):';read -n 1
####################### start over and build pool from 2-disk RAID0
nomorepool $pool1
df -h
zpool status $pool1
echo "POOL $pool1 SHOULD BE GONE -PK";read -n 1
############ create 2-disk NORAID with max available space
(set -x
zpool create -f -o ashift=12 -o autoexpand=on -O atime=off $pool1 \
/zdisks/zdyndisk1 /zdisks/zdyndisk2)
echo 'o Populating pool with data...'
time cp -v /root/tmpfile /$pool1
time cp -v /$pool1/tmpfile /$pool1/tmpfile2
echo 'o OK, say our client has a limited budget and wants to start with 2 disks with maximum writable space, NO redundancy... (RAID0)'
zpool status $pool1; df -h /$pool1
echo '';echo 'o NOTE that the available pool space should be about the capacity of disk1+disk2 minus a bit of overhead;'
echo '+ it will be fast, but vulnerable to failure. NOTE that we will have done a *full burn-in test* on the drives 1st!'
echo 'o After a couple of weeks, client is able to buy 2 more drives -- we can both Expand the pool size AND add redundancy!'
printf 'PK to add a mirror to 1st disk:';read -n 1
########### add a mirror OTF to 1st disk of RAID0, 1st half
# REF: http://docs.oracle.com/cd/E19253-01/819-5461/6n7ht6qvl/index.html
(set -x
time zpool attach $pool1 \
/zdisks/zdyndisk1 /zdisks/zdyndisk3)
echo 'o Should now have a 3-disk, UNBALANCED MIRRORED pool with half-RAID1:'
zpool status $pool1; df -h /$pool1
echo '';echo 'o NOTE that available pool space has **not changed** - we have only added Redundancy to HALF of the pool!'
echo '! NORMALLY we would NEED to wait for the resilver to complete before proceeding! # zpool status # until resilvered'
printf 'PK to add a mirror to the 2nd half (will end up being RAID10):';read -n 1
########### add mirror OTF to 2nd disk of original RAID0
# REF: http://docs.oracle.com/cd/E19253-01/819-5461/6n7ht6qvl/index.html
(set -x
time zpool attach $pool1 \
/zdisks/zdyndisk2 /zdisks/zdyndisk4)
echo 'o Populating pool with more data...'
time cp -v /$pool1/tmpfile /$pool1/tmpfile2
echo 'o Should now have a 4-disk, redundant pool with RAID10 - but we built it differently since it started out as a RAID0:'
zpool status $pool1; df -h /$pool1
echo '';echo 'o NOTE that the available pool space is still the same as what we started with,'
echo '+ only now the pool has been upgraded in-place to be failure-resistant - with no downtime!'
echo '! Again - NORMALLY we would wait for the resilver to complete before proceeding! # zpool status # until resilvered'
printf 'PK to expand this now-RAID10 pool with the final 2-disk mirror, plus 1 hotspare:';read -n 1
########### add final 2-disk mirror to 4-disk RAID10, with 1 spare
# REF: http://docs.oracle.com/cd/E19253-01/819-5461/6n7ht6qvk/index.html
(set -x
time zpool add -o ashift=12 $pool1 \
mirror /zdisks/zdyndisk5 /zdisks/zdyndisk6 \
spare /zdisks/zdyndisk7)
echo ''
echo 'o We should now have ended up with a 6-disk, mirrored, failure-resistant pool with RAID10,'
echo ' with some acceptable risks but built on a budget:'
###########
# ENDIT
zpool status -L $pool1
#df -h |head -n 1 # unnec; fortuitously, grep also grabs "size"
df -h |grep z # /$pool1 /$pool2 /$pool3a /$pool3b /$pool3c
(set -x; zfs list $pool1; zpool list $pool1)
echo ''
echo "Log: $logfile"
/bin/rm -v /zdisks/tmpfile
echo "! CLEANUP: PK to Destroy example pool $pool1 or ^C to keep it"; read -n 1
nomorepool $pool1
zpool status
exit;
====================================================
# =LLC= © (C)opyright 2016 Boojum Consulting LLC / Dave Bechtel, All rights reserved.
## NOTICE: Only Boojum Consulting LLC personnel may use or redistribute this code,
## Unless given explicit permission by the author - see http://www.boojumconsultingsa.com
#
====================================================
# df -h /zdynpool1
Filesystem Size Used Avail Use% Mounted on
zdynpool1 832M 0 832M 0% /zdynpool1
# zpool status
pool: zdynpool1
state: ONLINE
scan: resilvered 276K in 0h0m with 0 errors on Mon May 9 16:49:45 2016
config:
NAME STATE READ WRITE CKSUM
zdynpool1 ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
/zdisks/zdyndisk1 ONLINE 0 0 0
/zdisks/zdyndisk2 ONLINE 0 0 0
mirror-1 ONLINE 0 0 0
/zdisks/zdyndisk3 ONLINE 0 0 0
/zdisks/zdyndisk4 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
/zdisks/zdyndisk5 ONLINE 0 0 0
/zdisks/zdyndisk6 ONLINE 0 0 0
spares
/zdisks/zdyndisk7 AVAIL
errors: No known data errors
========================================================
2016.0525 moving /zdisks to /run/shm due to usb3 thumbdrive heat / slowdowns
+ added "use milterausb3" capability, with bigger disk sizes
2016.0610 added "ashift=12" to zpool ADD commands (zfs quirk, ashift is not a global pool inherited property)

View File

@ -0,0 +1,286 @@
#!/bin/bash
# =LLC= © (C)opyright 2016 Boojum Consulting LLC / Dave Bechtel, All rights reserved.
## NOTICE: Only Boojum Consulting LLC personnel may use or redistribute this code,
## Unless given explicit permission by the author - see http://www.boojumconsultingsa.com
#
# REQUIRES zdynpool1 on mnt/milterausb3
# GOAL - replace all disks in zdynpool1 with larger disks ON THE FLY, no downtime;
# + also deal with if disk2 has already been replaced with spare disk8 (mkdynpoolFAIL ran before this)
# FACEPALM - uses different /zdisk path if pool was created (short /zdisks) or imported after reboot (long /mnt../zdisks)
# TODO - add check for already bigrdisk
# NOTE special handling for starting with RAID0 (D1+D2=NOMIR) then adding (1&3 + 2&4=RAID10)
# -- available space will only increase when an entire MIRROR COLUMN is done!
# assuming:
# zdynpool1 ONLINE 0 0 0
# 1 * mirror-0 ONLINE 0 0 0
# a /zdisks/zdyndisk1 ONLINE 0 0 0
# b /zdisks/zdyndisk3 ONLINE 0 0 0
# 2 * mirror-1 ONLINE 0 0 0
# c /zdisks/zdyndisk2 ONLINE 0 0 0
# d /zdisks/zdyndisk4 ONLINE 0 0 0
# To increase available space immediately, we would need to replace 1, then 3 // then 2... and finally 4
debugg=0
newdisks=1 # SHOULD NORMALLY BE 1 unless ^C before actually replacing ANY disks!
skipdisk=0 # Leave at 0 unless u know what u doing! for interrupt/resume AND requires manual number below!! xxxxx
DS=931 # Disksize in MB
let mkpop=$DS-100 # size of populate-data file in MB
logfile=~/mkpoolbigger-inplace.log
> $logfile # clearit
# TESTING virtual pool failure/hotspare + resilver
zp=zdynpool1
# TODO - can we make things easier by just adding a hotspare Xtimes and replacing with it??
# failexit.mrg
function failexit () {
echo '! Something failed! Code: '"$1 $2" # code # (and optional description)
exit $1
}
# Echo something to current console AND log
# Can also handle piped input ( cmd |logecho )
# Warning: Has trouble echoing '*' even when quoted.
function logecho () {
args=$@
if [ -z "$args" ]; then
args='tmp'
while [ 1 ]; do
read -e -t2 args
if [ -n "$args" ]; then
echo $args |tee -a $logfile;
else
break;
fi
done
else
echo $args |tee -a $logfile;
fi
} # END FUNC
lpath=/mnt/milterausb3/zdisks
spath=/zdisks
chkpoolmount=`df |grep -c $zp`
[ $chkpoolmount -gt 0 ] || zpool import -d $lpath $zp
#[ $chkpoolmount -gt 0 ] || zpool import -d /zdisks zdynpool1
# NOTE for some rsn import doesnt use short /zdisks path!
chkpoolmount=`df |grep -c $zp`
[ $chkpoolmount -eq 0 ] && failexit 9999 "! $zp was not imported / is still not mounted!"
# assuming: (if mkdynpoolFAIL-boojum.sh has run, otherwise disk8 will be disk2
# zdynpool1 ONLINE 0 0 0
# mirror-0 ONLINE 0 0 0
# /mnt/milterausb3/zdisks/zdyndisk1 ONLINE 0 0 0
# * /mnt/milterausb3/zdisks/zdyndisk8 ONLINE 0 0 0
# mirror-1 ONLINE 0 0 0
# /mnt/milterausb3/zdisks/zdyndisk3 ONLINE 0 0 0
# /mnt/milterausb3/zdisks/zdyndisk4 ONLINE 0 0 0
# mirror-2 ONLINE 0 0 0
# /mnt/milterausb3/zdisks/zdyndisk5 ONLINE 0 0 0
# /mnt/milterausb3/zdisks/zdyndisk6 ONLINE 0 0 0
declare -a pooldisks # regular indexed array
pooldisks[1]=zdyndisk1
pooldisks[2]=zdyndisk2
pooldisks[3]=zdyndisk3
pooldisks[4]=zdyndisk4
pooldisks[5]=zdyndisk5
pooldisks[6]=zdyndisk6
chkalreadyfailed=`zpool status -v $zp|grep -c disk8`
if [ $chkalreadyfailed -gt 0 ];then
#FAILD=8;REPW=2
pooldisks[2]=zdyndisk8
fi
[ $debugg -gt 0 ] && logecho "vdisk2: ${pooldisks[2]}"
# associative arrays REF: http://mywiki.wooledge.org/BashGuide/Arrays
# REF: http://www.artificialworlds.net/blog/2012/10/17/bash-associative-array-examples/
# NOTE CAPITAL A for assoc array!
declare -A ASrepdisks # associative array
# ASrepdisks == New disk name to replace original disk with
key=${pooldisks[1]} # zdyndisk1
ASrepdisks[$key]=zbigrdisk1 # ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J1NL656R -make this whatever new disk is in dev/disk/by-id
key=${pooldisks[2]} # zdyndisk2, or 8 if detected
ASrepdisks[$key]=zbigrdisk2 # ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J6KTJC0J
key=${pooldisks[3]} # zdyndisk3
ASrepdisks[$key]=zbigrdisk3 # ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J4KD08T6
key=${pooldisks[4]} # whatever 4 is set to
ASrepdisks[$key]=zbigrdisk4 # ata-WDC_WD10EZEX-00KUWA0_WD-WCC1S5925723
key=${pooldisks[5]} # whatever 5 is set to
ASrepdisks[$key]=zbigrdisk5
key=${pooldisks[6]} # whatever 6 is set to
ASrepdisks[$key]=zbigrdisk6
# ^^ HOW THIS WORKS:
# key=${pooldisks[1]} # returns: LET key=zdyndisk1
# ASrepdisks[$key]=zbigrdisk1 # ASrepdisks["zdyndisk1"]="zbigrdisk1" # LOOKUP and set!
# key=${pooldisks[2]} # returns: LET key=zdyndisk8 , if it was manually set, or zdyndisk2 otherwise
# ASrepdisks[$key]=zbigrdisk2 # ASrepdisks["zdyndisk8"]="zbigrdisk2" or whatever you want NEW disk to be
if [ $debugg -gt 0 ]; then
# minor sanity chk
logecho "key:$key: ASrepdisks $key == ${ASrepdisks[$key]}"
# echo "PK to proceed if OK"
# read
fi
# Evidently we can only do 1 setting at a time... turn trace=on for this 1 command in subshell
(set -x
zpool set autoexpand=on $zp) || failexit 99 "! Something failed with $zp - Run mkdynamic-grow-pool-boojum.sh to Create $zp"
(set -x
zpool set autoreplace=on $zp) || failexit 992 "! Something failed with $zp - Run mkdynamic-grow-pool-boojum.sh to Create $zp"
cd /zdisks || failexit 101 "! Cannot cd /zdisks; does $zp exist?"
chkdisk=${pooldisks[2]}
[ $debugg -gt 0 ] && logecho "o Checking for existence of /zdisks/$chkdisk"
[ -e $chkdisk ] || failexit 105 "! $lpath/$chkdisk does not exist! Run mkdynamic-grow-pool-boojum.sh to Create $zp before running $0 !"
zdpath=/tmp/failsafe
# if milterausb3 is mounted, use it
usemil=`df |grep -c /mnt/milterausb3`
if [ $usemil -gt 0 ]; then
zdpath="/mnt/milterausb3/zdisks"
else
failexit 404 '/mnt/milterausb3 needs to be mounted!'
fi
mkdir -pv $zdpath
ln $zdpath /zdisks -sfn
cd /zdisks || failexit 1011 "! Still cant cd to /zdisks! Check $logfile"
# DONE move up
#DS=800 # Disksize in MB
#let mkpop=$DS-100 # size of populate-data file in MB
if [ $newdisks -gt 0 ]; then
logecho "`date` - Preparing NEW set of Larger ($DS)MB virtual disks, no matter if they exist or not..."
for i in {1..8};do
printf $i...
(time dd if=/dev/zero of=zbigrdisk$i bs=1M count=$DS 2>&1) >> $logfile
done
else
logecho "Skipping new bigger disk creation"
fi
logecho "`date` - Syncing..."
time sync
# should now have zdyndisk1-8 PLUS zbigrdisk1-8
#ls -alh |logecho
du -h z* |logecho # cleaner ;-)
zpool status -v $zp >> $logfile
logecho "Dumping assoc array to log HERE:"
for K in "${!ASrepdisks[@]}"; do
echo $K --- ${ASrepdisks[$K]} >> $logfile
echo "$zp :INTENT: ZPOOL DISK: $K WILL BE REPLACED WITH: ${ASrepdisks[$K]}"
done
# check if pool was imported after reboot, uses longer path!
chklongpath=`zpool status -v |grep -c milterausb3`
if [ $chklongpath -gt 0 ]; then
usepath=$lpath
logecho "Using longer path $usepath"
else
usepath=$spath
logecho "Using shorter path $usepath"
fi
if [ $debugg -gt 0 ]; then
echo "CHECK LOG $logfile and PK to proceed!"
read
fi
################################# TEH MAIN THING
zpool status -v $zp #|logecho
#ls -lh /zdisks/ |logecho
#logecho "`date` - Starting pool size: `df |grep $zp`"
startdata1="`date` - Starting pool size: "
startdata2="`df |grep $zp`"
logecho $startdata1
logecho $startdata2
let startdisk=$skipdisk+1 # FYI only
#printf "o Replacing disks in $zp -- starting with $startdisk -- will end up with bigger pool" # -- ^C to quit!"; #read -n 1
echo "o Replacing disks in $zp -- starting with $startdisk -- will end up with bigger pool" # -- ^C to quit!"
# xxxxx TODO modify 1st/last disk numbers MANUALLY if nec, does not support vars here
for i in {1..6}; do
mykey=${pooldisks[$i]} # zdyndisk1
repdisk=${ASrepdisks[$mykey]} # zbigrdisk1
df -h |grep $zp
logecho "Replacing disk #$i -- $mykey -- OTF with Replacement disk: $repdisk - PK or ^C to quit!"
read -n 1
# NOTE subshell
(set -x
time zpool replace $zp $usepath/$mykey $usepath/$repdisk || failexit "32768 FML")
# END subshell
#ls -lh /zdisks/
zpool status -v $zp #|logecho
printf `date +%H:%M:%S`' ...waiting for resilver to complete...'
waitresilver=1
while [ $waitresilver -gt 0 ];do
waitresilver=`zpool status -v $zp |grep -c resilvering`
sleep 2
done
echo 'Syncing to be sure'; time sync;
date |logecho
logecho "o OK - we replaced $mykey with $repdisk ..."
logecho "+ check log and NOTE pool size has increased with every finished mirror column!"
zpool status -v $zp #|logecho
zpool status -v $zp >> $logfile
zfs list $zp >> $logfile # |logecho
zpool list $zp >> $logfile # |logecho
logecho "`date` - Disk $i = $mykey done - DF follows:"
df |grep $zp |logecho
done
#ls -lh $lpath # /zdisks/
#zpool status -v $zp
echo "REMEMBER we started with:"
echo "$startdata1"
echo "$startdata2"
echo "NOW we have a fully expanded pool with new larger disks:"
echo "`date` - Pool size after IN-PLACE expansion, NO DOWNTIME:"
echo "`df |grep $zp`"
echo 'o Complete!'
exit;

17
ZFS/mkgpt.sh Normal file
View File

@ -0,0 +1,17 @@
#!/bin/bash
argg=$1
source ~/bin/failexit.mrg
smartctl -a /dev/$argg |head -n 16
fdisk -l /dev/$argg
ls -l /dev/disk/by-id|grep $argg
echo "THIS WILL DESTRUCTIVELY APPLY A GPT LABEL - ARE YOU SURE - PK OR ^C"
read
parted -s /dev/$argg mklabel gpt || failexit 99 "! Failed to apply GPT label to /dev/$argg"
fdisk -l /dev/$argg

View File

@ -0,0 +1,42 @@
#!/bin/bash
df -h
zfs create -o atime=off zmirpool1/dv; chown daveb /zmirpool1/dv
zfs create -o compression=lz4 -o atime=off zmirpool1/dv/compr; chown daveb /zmirpool1/dv/compr
zfs create -o atime=off zpoolraidz2/dv; chown daveb /zpoolraidz2/dv
zfs create -o compression=lz4 -o atime=off zpoolraidz2/dv/compr; chown daveb /zpoolraidz2/dv/compr
zfs create -o atime=off zsepdata1/dv; chown daveb /zsepdata1/dv
zfs create -o compression=lz4 -o atime=off zsepdata1/dv/compr; chown daveb /zsepdata1/dv/compr
df -h
exit;
# zfs create -o mountpoint=/home -o atime=off bigvaiterazfs/home
# zfs create -o mountpoint=/mnt/bigvai500 -o atime=off bigvaiterazfs/dv/bigvai500
# zfs create -o compression=off -o atime=off \
-o mountpoint=/mnt/bluraytemp25 -o quota=25G bigvaiterazfs/bluraytemp; chown dave /mnt/bluraytemp25
localinfo.dat--b4-restore-2014-0710:bigvaiterazfs/bluraytemp 26214400 128 26214272 1% /mnt/bluraytemp25
localinfo.dat--b4-restore-2014-0710:# time (dd if=/dev/zero of=/mnt/bluraytemp25/bdiscimage.udf bs=2048 count=25025314814;sync)
localinfo.dat--b4-restore-2014-0710:dd: writing `/mnt/bluraytemp25/bdiscimage.udf': Disk quota exceeded
localinfo.dat--b4-restore-2014-0710:# zfs set quota=25.1G bigvaiterazfs/bluraytemp
localinfo.dat--b4-restore-2014-0710:# time (dd if=/dev/zero of=/mnt/bluraytemp25/bdiscimage.udf bs=2048 count=24220008448;sync)
localinfo.dat--b4-restore-2014-0710:dd: writing `/mnt/bluraytemp25/bdiscimage.udf': Disk quota exceeded
localinfo.dat--b4-restore-2014-0710:NOT: # truncate -s 25GB /mnt/bluraytemp25/bdiscimage.udf
localinfo.dat--b4-restore-2014-0710:# truncate -s 23.3GB /mnt/bluraytemp25/bdiscimage.udf
localinfo.dat--b4-restore-2014-0710:# zfs set quota=23.5G bigvaiterazfs/bluraytemp # DONE
localinfo.dat--b4-restore-2014-0710:# cd /mnt/bluraytemp25 && truncate -s 23652352K bdiscimage.udf
localinfo.dat--b4-restore-2014-0710:# cd /mnt/bluraytemp25 && mkudffs --vid="BRTESTBKP20131214" bdiscimage.udf
localinfo.dat--b4-restore-2014-0710:# mount -t udf -o loop /mnt/bluraytemp25/bdiscimage.udf /mnt/bluray-ondisk -onoatime

17
ZFS/mkzfsraid10.sh Normal file
View File

@ -0,0 +1,17 @@
#!/bin/bash
pname=zsgtera4compr
zpool create -f -o ashift=12 -o autoexpand=on -O atime=off -O compression=lz4 $pname \
mirror ata-ST4000VN000-1H4168_Z3073Z29 ata-ST4000VN000-1H4168_Z306G0K3 \
mirror ata-ST4000VN000-1H4168_Z3073ZAY ata-ST4000VN000-1H4168_Z306G7H8
pdir=tkita; zfs create -o atime=off -o sharesmb=on $pname/$pdir; chown tkita:dave /$pname/$pdir
ls -al /$pname/$pdir
zpool status
(set -r
smbpasswd -a tkita
)

357
ZFS/move-home-to-zfs.sh Normal file
View File

@ -0,0 +1,357 @@
#!/bin/bash
# TODO - sep datasets for users
# DONE new disk needs to be at least = size of /home du -s -h
# DONE free snapshot
# GOAL: move existing /home to zfs
# $1 = disk name (long or short) OR existing zfs poolname
# =LLC= © (C)opyright 2016 Boojum Consulting LLC / Dave Bechtel, All rights reserved.
## NOTICE: Only Boojum Consulting LLC personnel may use or redistribute this code,
## Unless given explicit permission by the author - see http://www.boojumconsultingsa.com
#
logfile=~/boojum-mvhome2zfs.log
source ~/bin/logecho.mrg
> $logfile
# If set to 1, will interactively kill user processes
# EITHER of these options (if set) will destroy existing ZFS datasets!
debugg=0
RESETALL=0
# DANGEROUS - ONLY SET IF U KNOW WHAT U DOING AFTER RESTORING A SNAPSHOT!
# WILL DESTROY $zp
# TODO edit this to be the name of the ZFS home pool you want to be created, if needed
zp=zhome
[ $RESETALL -gt 0 ] && (set -x;zpool destroy $zp)
tmpfile1=/tmp/mvh2zTMP1.txt
source ~/bin/failexit.mrg
modprobe zfs # shouldnt hurt even if its already loaded
# is zfs installed?
#zfsps=`zpool status |head -n 1`
zfsps=$(zfs list |head -n 1)
if [ `echo "$zfsps" |grep -c 'MOUNTPOINT'` -ge 1 ]; then
logecho 'Existing zfs pool(s) detected:'
zpool status |awk 'NF>0'
echo 'FYI: Pass a ZFS pool name to this script to move /home there, or pass a disk name to create a new pool'
elif [ `echo "$zfsps" |grep -c 'no datasets available'` ]; then
logecho "NOTE: ZFS is installed and appears to be working - will create a pool ( $zp ) to hold /home"
else
logecho '! ZFS does not appear to be installed or is not working correctly'
failexit 99 '! zpool status is not returning a valid result:'
(set -x
zpool status )
fi
# TODO fix/re-enable
#[ `mount |grep /home |grep -c 'type zfs'` -ge 1 ] && failexit 109 "! Home already appears to be ON zfs!"
# bigvaiterazfsNB/home on /home type zfs (rw,noatime,xattr,noacl)
# Is /home a dir hanging off root or sep partn?
#sephome=`df /home |grep /home |awk '{ print $1 }'`
hmnt=$(mount |grep /home |awk '{ print $1 }' |head -n 1) # 1st line only
# bigvaiterazfsNB/home OR /dev/sdX9
roothome=0
homespc=0
if [ `echo $hmnt |grep -c '/dev'` -gt 0 ]; then
echo '';logecho "o Your /home appears to be on $hmnt"
df -h /home
elif [ -d /home ]; then
logecho "o Your /home does not appear to be on a separate partition, is a directory on the root filesystem"
echo "...Please wait while I determine how much space it is using..."
homespc=$(du -s -k /home |awk '{print $1}') # 431484266 /home
logecho $homespc
roothome=1
else
failexit 201 "! This fallthru should not happen, cannot determine /home!"
fi
# skip header line and grab 3rd field (Used)
#[ $debugg -gt 0 ] && homespc=16011904 # 16GB # TODO testing - only set this if there is nothing in /home
[ $homespc = 0 ] && homespc=$(df -k /home |tail -n +2 |awk '{ print $3 }')
let hsbytes=$homespc*1024
# REF: https://unix.stackexchange.com/questions/222121/how-to-remove-a-column-or-multiple-columns-from-file-using-shell-command
# get a list of long drive names with short; strip out blank lines and unnec fields
/bin/ls -go /dev/disk/by-id /dev/disk/by-path \
|egrep -v 'part|wwn|total |dev/' \
|awk 'NF>0' \
|awk '{$1=$2=$3=$4=$5=$6=""; print $0}' \
|column -t \
> $tmpfile1
echo '';echo "o These are the hard drives found on your system:"
# NOT an unnec use of cat - REF: https://unix.stackexchange.com/questions/16279/should-i-care-about-unnecessary-cats
cat $tmpfile1
echo ''
# did we get passed a disk or existing ZFS pool?
argg=$1
[ "$argg" = "" ] && failexit 199 "! Cannot proceed - pass at least a disk device name (long or short form) OR zfs pool to move /home to!"
usepool=""; usedisk=""
if [ `grep -c $argg $tmpfile1` -gt 0 ]; then
logecho "o You apparently want me to use this disk:"
bothforms=`grep $argg $tmpfile1`
echo "$bothforms"
getlongdisk=`grep $argg $tmpfile1 |awk '{ print $1 }' |head -n 1`
shortdisk=${bothforms##*/} # strip off all leading "/"
shortdev=/dev/$shortdisk
usedisk=$getlongdisk
echo ''; logecho "o Using long-form diskname: $usedisk - Short form: $shortdev"
echo "^^ If this is incorrect, then rerun this script and use a more specific device name!"
# test for cd = add all results (tmpusingcd)
ttlusecd=0 # TOTAL
#TMPusecd
tucd=`echo $argg |egrep -c 'sr0|sr1|scd0|scd1|cdrom|cdrw|dvdrw'`
let ttlusecd=$ttlusecd+$tucd
tucd=`echo $shortdisk |egrep -c 'sr0|sr1|scd0|scd1|cdrom|cdrw|dvdrw'`
let ttlusecd=$ttlusecd+$tucd
# [ `echo $argg |grep -c sr1` -gt 0 ] && failexit 401 "! I cant use a CDROM device, wiseguy!!"
[ $ttlusecd -gt 0 ] && failexit 401 "! I cant put /home on a CDROM device, wiseguy!! Try again with a hard drive!"
# test for existing filesystem on destination disk - especially if sda!
echo "...Checking blkid and zpools to see if the disk you specified is OK to use..."
[ `echo $hmnt |grep -c $shortdev` -gt 0 ] && failexit 32768 "! You CRAZY MANIAC - you cant re-use your existing home disk in-place for ZFS!!"
alreadyf=`blkid |grep -c $argg`
alreadyf2=`blkid |grep -c $shortdev`
let alreadyf=$alreadyf+$alreadyf2
#/dev/sde1: LABEL="zredpool2" UUID="17065421584496359800" UUID_SUB="1595728817173195411" TYPE="zfs_member" PARTLABEL="zfs"
#/dev/sda2: LABEL="xubuntu1404" UUID="103f019e-1275-4c27-a972-5b5d3874b863" TYPE="ext4" PARTUUID="b680669e-02"
# ISSUE - blkid is not always up to date, not detecting newly created test pools!
alreadyf2=`zpool status |grep -c $usedisk`
let alreadyf=$alreadyf+$alreadyf2
alreadyf2=`zpool status |grep -c $shortdisk`
let alreadyf=$alreadyf+$alreadyf2
alreadyf2=`zpool status |grep -c $argg`
let alreadyf=$alreadyf+$alreadyf2
# NOTE empty GPT label will not show on blkid!
[ $alreadyf -gt 0 ] && failexit 502 "! Disk is already formatted/IN USE and needs to either be blank or have an empty GPT label: $shortdev / $usedisk"
# Check disk capacity against existing
# fdisk -l /dev/sdb |grep Disk |grep -v 'identifier'
# 1 2 3 4 5
#Disk /dev/sdb: 1000 GB, 1000202273280 bytes
dcap=`fdisk -l $shortdev |grep Disk |grep -v 'identifier' |awk '{print $5}'`
[ $debugg -gt 0 ] && logecho "dcap: $dcap ^^ homespc: $hsbytes"
# comma-sep nums - REF: https://unix.stackexchange.com/questions/113795/add-thousands-separator-in-a-number
if [ $dcap -lt $hsbytes ]; then
dcapcma=`printf "%'d" $dcap`
hsbcma=`printf "%'d" $hsbytes`
logecho "! Disk capacity of $usedisk is less than home data usage!"
logecho "Home: $hsbcma"
logecho "Disk: $dcapcma"
failexit 999 "! Selected Disk capacity of $usedisk is less than home data usage - choose a larger disk or use a larger zpool!"
fi
################################# POINT OF NO RETURN - POSSIBLE DATA DESTRUCTION AFTER THIS!
fdisk -l $shortdev |tee -a $logfile 2>>$logfile
echo '';logecho "YOU ARE ABOUT TO DESTRUCTIVELY GPT LABEL DISK: $usedisk"
echo "ENTER ADMIN PASSWORD TO PROCEED OR ^C: "
read
( set -x
zpool labelclear $shortdev
parted -s $shortdev mklabel gpt
fdisk -l $shortdev |tee -a $logfile)
elif [ `zfs list -d0 |grep -c $argg` -gt 0 ]; then
logecho "o You apparently want me to use this pre-existing ZFS pool for /home:"
zfs list -d0 |grep $argg |head -n 1
usepool=$argg
zp=$argg # using pre-existing pool
else
failexit 404 "! Cannot proceed - $argg was not found on the system!"
fi # did we get passed a disk or existing ZFS pool?
# create the pool if needed
if [ "$usedisk" != "" ] && [ "$usepool" = "" ]; then
[ "$zp" = "" ] && zp=zhome
# set a default name
(set -x
zpool create -o ashift=12 -o autoexpand=on -o autoreplace=on \
-O atime=off -O compression=lz4 \
$zp \
$usedisk
zpool status |awk 'NF>0'
)
fi
# from now on, we are using pool!
(set -x
[ $debugg -gt 0 ] && zfs destroy $zp/home
zfs create -o sharesmb=off $zp/home )
zfs list -p $zp
# TODO check for zfs pool free space vs home use
zpcap=`zfs list -p $zp |awk '{ print $3 }' |tail -n +2` # skip header and get bytes
[ $debugg -gt 0 ] && logecho "zpcap: $zpcap ^^ homespc: $hsbytes"
if [ $zpcap -lt $hsbytes ]; then
zpcapcma=`printf "%'d" $zpcap`
hsbcma=`printf "%'d" $hsbytes`
logecho "! Usable ZFS pool capacity of $zp is less than home data usage!"
logecho "Home: $hsbcma"
logecho "Pool: $zpcapcma"
failexit 919 "! Selected ZFS pool $zp is smaller than home data usage - choose a larger disk or use a larger zpool!"
fi
# Permission was already given, but make sure it's OK to logoff all users
logecho "! NOTE: by proceeding from here, you will be shutting down the X window manager (GUI) and LOGGING OFF all non-root users!"
logecho ' /^^ MAKE SURE ALL YOUR DATA IS BACKED UP / SAVED BEFORE PROCEEDING ^^\'
logecho "You need to be DIRECTLY logged into tty1 or similar as the root userid!"
logecho "ENTER ADMIN PASSWORD TO PROCEED, OR ^C - you need to be running this script directly as root without using sudo!"
read
# Determine WM
xwm=`pstree -psu -A|grep Xorg`
# |-lightdm(1325)-+-Xorg(1388)
xwmedit=`echo $xwm |awk -F\( '{ print $1 }'`
#|-lightdm
xwmedit2=${xwmedit##*-} # strip off to "-" ${tmp2##*-}
#lightdm
[ "$xwmedit2" = "" ] || service $xwmedit2 stop
sleep 2
# OK so far, check if anything in /home is locked
function checkhomelock () {
flocks=`lsof |grep -c /home`
}
logecho "`date` ! Force-logging off anyone who is locking /home files..."
# xxxxx workaround, root getting fragged off
flocks=0
while [ $flocks -gt 0 ]; do
[ $debugg -gt 0 ] && fopts="-i "
# for myuser in `w -h |grep -v root |awk '{print $1}' |sort |uniq`; do
# fuser $fopts -u -v $myuser
fuser $fopts -k -u -v -m /home
# done
checkhomelock
sleep 5;date
done
lsof |grep /home
logecho "o All /home files should be free!"
du -s -h /home
logecho "`date` - Copying /home data over to /$zp/home"
cd /$zp/home || failexit 405 "! FARK - I cant cd to /$zp/home !"
cd /home; df -hT /home /$zp
# had problems with ownership permissions
#time tar cpf - * |pv |(cd /$zp/home; tar xpf - ) || failexit 1000 "! Copying home data failed - check free space!"
# xxxxx 2024.0404 EXPERIMENTAL but appears to work
time rsync -r -t -p -o -g -v --delete -l -s \
--exclude=.thumbnails/* \
/home/ \
/$zp/home \
2>~/rsync-error.log \
|| failexit 1000 "! Copying home data failed - check free space!"
date
df -hT |grep /home
if [ $debugg -gt 0 ]; then
logecho "PK to unmount old /home or move it out of the way:"
read
else
logecho "Unmounting old /home / moving it out of the way:"
fi
if [ $roothome -gt 0 ]; then
mv -v /home /home--old
ls -l / |grep home
else
cd
umount /home
fi
# SKIP edit fstab for noauto - NO, too dangerous to risk
zfs set mountpoint=/home $zp/home
df -h
[ "$xwmedit2" = "" ] || service $xwmedit2 start
logecho "`date` - Finished migrating /home"
zfs snapshot -r $zp@snapshot1
zfs list -r -t snapshot
logecho " -- Dont forget to restart the window manager if needed, and edit /etc/fstab - put /home as noauto!"
logecho "Example: # service lightdm restart"
logecho "EXAMPLE /etc/fstab:"
logecho "LABEL=home /home ext4 defaults,noauto,noatime,errors=remount-ro 0 1"
exit;
REQUIRES:
o Working 'zpool status' and 'zfs list'
o grep, awk, column
o parted, fdisk, blkid
o pstree, lsof, fuser
o tar, pv
lrwxrwxrwx 1 9 Apr 26 13:20 usb-VBOX_HARDDISK-0:0 -> ../../sde
lrwxrwxrwx 1 9 Apr 26 13:20 pci-0000:00:0b.0-usb-0:1:1.0-scsi-0:0:0:0 -> ../../sde
lrwxrwxrwx 1 9 Apr 26 13:20 pci-0000:00:14.0-scsi-0:0:0:0 -> ../../sdb
lrwxrwxrwx 1 9 Apr 26 13:20 pci-0000:00:16.0-sas-0x00060504030201a0-lun-0 -> ../../sdc
# column omits leading spaces :)
$ ls -go /dev/disk/by-id /dev/disk/by-path |egrep -v 'part|wwn|total |dev/' |awk 'NF>0' |awk '{$1=$2=$3=$4=$5=$6=""; print $0}'|column -t
ata-VBOX_CD-ROM_VB2-01700376 -> ../../sr0
ata-VBOX_HARDDISK_VB2cf5f3dc-6b93417b -> ../../sdg
ata-VBOX_HARDDISK_VB3c729abf-3f210fcb -> ../../sdd
ata-VBOX_HARDDISK_VB409c8b16-836d593c -> ../../sdf
ata-VBOX_HARDDISK_VBb85ec192-1f9a60c7 -> ../../sda
usb-VBOX_HARDDISK-0:0 -> ../../sde
pci-0000:00:0b.0-usb-0:1:1.0-scsi-0:0:0:0 -> ../../sde
pci-0000:00:14.0-scsi-0:0:0:0 -> ../../sdb
pci-0000:00:16.0-sas-0x00060504030201a0-lun-0 -> ../../sdc
alreadyf=`blkid |grep -c $argg`
#/dev/sde1: LABEL="zredpool2" UUID="17065421584496359800" UUID_SUB="1595728817173195411" TYPE="zfs_member" PARTLABEL="zfs" PARTUUID="05762e8f-a4e7-fe42-9b1b-3b2431b1f967"
#/dev/sde9: PARTUUID="9a508052-75aa-3047-a9ef-38d8c2d14649"
#/dev/sda2: LABEL="xubuntu1404" UUID="103f019e-1275-4c27-a972-5b5d3874b863" TYPE="ext4" PARTUUID="b680669e-02"
#/dev/sda3: LABEL="rootantiX-16" UUID="264d2bdc-d4df-4d76-a1dc-3096a5e68bb1" TYPE="ext4" PARTUUID="b680669e-03"
#/dev/sda1: PARTUUID="b680669e-01"
alreadyf2=`blkid |grep -c $shortdev`
let alreadyf=$alreadyf+$alreadyf2
#/dev/sde1: LABEL="zredpool2" UUID="17065421584496359800" UUID_SUB="1595728817173195411" TYPE="zfs_member" PARTLABEL="zfs"
#/dev/sda2: LABEL="xubuntu1404" UUID="103f019e-1275-4c27-a972-5b5d3874b863" TYPE="ext4" PARTUUID="b680669e-02"
#/dev/sda1: PARTUUID="b680669e-01"

View File

@ -0,0 +1,261 @@
#!/bin/bash
# TODO test with 320GB -> 500GB
# HOWTO - edit, search this file for TODO and replace things where necessary before running!
# NOTE this script will auto-GPT-label new disks!!!
# =LLC= © (C)opyright 2016 Boojum Consulting LLC / Dave Bechtel, All rights reserved.
## NOTICE: Only Boojum Consulting LLC personnel may use or redistribute this code,
## Unless given explicit permission by the author - see http://www.boojumconsultingsa.com
#
# GOAL - replace all disks in pool1 with larger disks ON THE FLY, no downtime;
# Adapted from mkdynpoolbigger-inplace--boojum
# NOTE special handling for starting with RAID0 (D1+D2=NOMIR) then adding (1&3 + 2&4=RAID10)
# -- available space will only increase when an entire MIRROR COLUMN is done!
# assuming:
# zdynpool1 ONLINE 0 0 0
# 1 * mirror-0 ONLINE 0 0 0
# a /zdisks/zdyndisk1 ONLINE 0 0 0
# b /zdisks/zdyndisk3 ONLINE 0 0 0
# 2 * mirror-1 ONLINE 0 0 0
# c /zdisks/zdyndisk2 ONLINE 0 0 0
# d /zdisks/zdyndisk4 ONLINE 0 0 0
# To increase available space immediately, we would need to replace 1, then 3 // then 2... and finally 4
debugg=1
skipdisk=0 # Leave at 0 unless u know what u doing! for interrupt/resume AND requires manual number below!! xxxxx
logfile=~/replacedrives-withbigger.log
> $logfile # clearit
# TODO xxxxx change this to the zfs pool you are working on!
zp=zmir320comp
# TODO - can we make things easier by just adding a hotspare Xtimes and replacing with it??
# failexit.mrg
function failexit () {
echo '! Something failed! Code: '"$1 $2" # code # (and optional description)
exit $1
}
# Echo something to current console AND log
# Can also handle piped input ( cmd |logecho )
# Warning: Has trouble echoing '*' even when quoted.
function logecho () {
args=$@
if [ -z "$args" ]; then
args='tmp'
while [ 1 ]; do
read -e -t2 args
if [ -n "$args" ]; then
echo $args |tee -a $logfile;
else
break;
fi
done
else
echo $args |tee -a $logfile;
fi
} # END FUNC
# This also includes WWN
dpath=/dev/disk/by-id
#dpath=/dev/disk/by-path
# If doing SCSI drives, use this
chkpoolmount=`df |grep -c $zp`
[ $chkpoolmount -gt 0 ] || zpool import -d $dpath $zp
chkpoolmount=`df |grep -c $zp`
[ $chkpoolmount -eq 0 ] && failexit 9999 "! $zp was not imported / is still not mounted!"
# assuming: ( TODO paste relevant part of "zpool status" here as map before running )
# NAME STATE READ WRITE CKSUM
# zredtera1 ONLINE 0 0 0
# mirror-0 ONLINE 0 0 0
# ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J1NL656R ONLINE 0 0 0
# ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J6KTJC0J ONLINE 0 0 0
# mirror-1 ONLINE 0 0 0
# ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J4KD08T6 ONLINE 0 0 0
# ata-WDC_WD10EZEX-00KUWA0_WD-WCC1S5925723 ONLINE 0 0 0
# xxxxx TODO change disks here!
declare -a pooldisks # regular indexed array
pooldisks[1]=ata-SAMSUNG_HD322HJ_S17AJB0SA23730
pooldisks[2]=ata-ST3320620AS_9QF4BMH8
#pooldisks[3]=ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J4KD08T6
#pooldisks[4]=ata-WDC_WD10EZEX-00KUWA0_WD-WCC1S5925723
#pooldisks[5]=zdyndisk5
#pooldisks[6]=zdyndisk6
# associative arrays REF: http://mywiki.wooledge.org/BashGuide/Arrays
# REF: http://www.artificialworlds.net/blog/2012/10/17/bash-associative-array-examples/
# NOTE CAPITAL A for assoc array!
declare -A ASrepdisks # associative array
# xxxxx TODO put new disk names / WWN IDs here before running!
# ASrepdisks == New disk name to replace original disk with
key=${pooldisks[1]} # zdyndisk1
ASrepdisks[$key]=ata-ST3500641AS_3PM1523A
# ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J1NL656R -make this whatever new disk is in dev/disk/by-id
key=${pooldisks[2]} # zdyndisk2, or 8 if detected
ASrepdisks[$key]=ata-ST3500641AS_3PM14C8B
# ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J6KTJC0J
#key=${pooldisks[3]} # zdyndisk3
#ASrepdisks[$key]=zbigrdisk3
# ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J4KD08T6
#key=${pooldisks[4]} # whatever 4 is set to
#ASrepdisks[$key]=zbigrdisk4
# ata-WDC_WD10EZEX-00KUWA0_WD-WCC1S5925723
#key=${pooldisks[5]} # whatever 5 is set to
#ASrepdisks[$key]=zbigrdisk5
#key=${pooldisks[6]} # whatever 6 is set to
#ASrepdisks[$key]=zbigrdisk6
# ^^ HOW THIS WORKS:
# key=${pooldisks[1]} # returns: LET key=zdyndisk1
# ASrepdisks[$key]=zbigrdisk1 # ASrepdisks["zdyndisk1"]="zbigrdisk1" # LOOKUP and set!
# key=${pooldisks[2]} # returns: LET key=zdyndisk8 , if it was manually set, or zdyndisk2 otherwise
# ASrepdisks[$key]=zbigrdisk2 # ASrepdisks["zdyndisk8"]="zbigrdisk2" or whatever you want NEW disk to be
if [ $debugg -gt 0 ]; then
# minor sanity chk
logecho "key:$key: ASrepdisks $key == ${ASrepdisks[$key]}"
# echo "PK to proceed if OK"
# read
fi
# Evidently we can only do 1 setting at a time... turn trace=on for this 1 command in subshell
(set -x
zpool set autoexpand=on $zp) || failexit 99 "! Something failed with $zp - Run mkdynamic-grow-pool-boojum.sh to Create $zp"
(set -x
zpool set autoreplace=on $zp) || failexit 992 "! Something failed with $zp - Run mkdynamic-grow-pool-boojum.sh to Create $zp"
zpool status -v $zp >> $logfile
logecho "Dumping assoc array to log HERE:"
for K in "${!ASrepdisks[@]}"; do
echo $K --- ${ASrepdisks[$K]} >> $logfile
echo "$zp :INTENT: ZPOOL DISK: $K WILL BE REPLACED WITH: ${ASrepdisks[$K]}"
done
#if [ $debugg -gt 0 ]; then
# echo "CHECK LOG $logfile and PK to proceed!"
# read
#fi
################################# TEH MAIN THING
zpool status -v $zp #|logecho
#logecho "`date` - Starting pool size: `df |grep $zp`"
startdata1="`date` - Starting pool size: "
#startdata2="`df |grep $zp`"
startdata2=`df|head -n 1`
startdata2=$startdata2'\n'`df|grep $zp`
echo -e "$startdata2" >> $logfile
#Filesystem 1K-blocks Used Available Use% Mounted on
#zredpool2 722824320 33628416 689195904 5% /zredpool2
#zredpool2/bigvai750 1061294592 372098688 689195904 36% /zredpool2/bigvai750
#zredpool2/dvcompr 898452224 209256320 689195904 24% /zredpool2/dvcompr
#zredpool2/dvds 1270349696 581153792 689195904 46% /zredpool2/dvds
logecho $startdata1
#logecho $startdata2
echo -e "$startdata2"
let startdisk=$skipdisk+1 # FYI only
#printf "o Replacing disks in $zp -- starting with $startdisk -- will end up with bigger pool" # -- ^C to quit!"; #read -n 1
echo "o Replacing disks in $zp -- starting with $startdisk -- will end up with bigger pool" # -- ^C to quit!"
# xxxxx TODO modify 1st/last disk numbers MANUALLY if nec, does not support vars here
for i in {1..2}; do
mykey=${pooldisks[$i]} # zdyndisk1
repdisk=${ASrepdisks[$mykey]} # zbigrdisk1
df -h |grep $zp
logecho "Replacing disk #$i -- $mykey -- Insert Replacement disk: $repdisk into a free slot -- PK or ^C to quit!"
read -n 1
(set -x
zpool labelclear $dpath/$mykey #|| failexit 1000 "! Failed to zpool labelclear $dpath/$mykey"
parted -s $dpath/$mykey mklabel gpt || failexit 1234 "! Failed to apply GPT label to disk $mykey")
# xxxxx TODO parted on this path MAY NOT WORK, needs to be /dev/sdX ?
# NOTE subshell
# xxxxx THIS IS BEING TESTED!
(set -x
time zpool replace $zp $dpath/$mykey $dpath/$repdisk || failexit 32768 "! FML, failed to replace disk $dpath/$mykey ")
# END subshell
zpool status -v $zp >> $logfile
zpool status -v $zp
printf `date +%H:%M:%S`' ...waiting for resilver to complete...'
waitresilver=1
while [ $waitresilver -gt 0 ];do
waitresilver=`zpool status -v $zp |grep -c resilvering`
sleep 2
done
echo 'Syncing to be sure'; time sync;
date |logecho
logecho "o OK - we replaced $mykey with $repdisk ... Remove disk $mykey"
logecho "+ check log and NOTE pool size has increased with every finished mirror column!"
zpool status -v $zp >> $logfile
zpool status -v $zp
zfs list $zp >> $logfile
zpool list $zp >> $logfile
logecho "`date` - Disk $i = $mykey done - DF follows, moving on..."
df |grep $zp |logecho
done
#ls -lh $lpath # /zdisks/
#zpool status -v $zp
echo "REMEMBER we started with:"
echo "$startdata1"
echo -e "$startdata2"
echo "NOW we have a fully expanded pool with new larger disks:"
echo "`date` - Pool size after IN-PLACE expansion, NO DOWNTIME:"
echo "`df |grep $zp`"
echo 'o Complete!'
exit;
2016.0615 SUCCESSFULLY TESTED 320GB > 500GB DISKS :)
DONE startdata2:
sd2=`df|head -n 1`
sd2=$sd2'\n'`df|grep red`
echo -e "$sd2"
Filesystem 1K-blocks Used Available Use% Mounted on
zredpool2 722824320 33628416 689195904 5% /zredpool2
zredpool2/bigvai750 1061294592 372098688 689195904 36% /zredpool2/bigvai750
zredpool2/dvcompr 898452224 209256320 689195904 24% /zredpool2/dvcompr
zredpool2/dvds 1270349696 581153792 689195904 46% /zredpool2/dvds

View File

@ -0,0 +1,4 @@
#!/bin/bash
# http://askubuntu.com/questions/102924/list-samba-shares-and-current-users
smbstatus -v

28
ZFS/zfs-killsnaps.sh Normal file
View File

@ -0,0 +1,28 @@
#!/bin/bash
# REF: https://sysadminman.net/blog/2008/remove-all-zfs-snapshots-50
# destroy all snapshots on blue pool (to free space) and track what got killed
#zp=zblue500compr0
#zp=zredtera1
#crit=daily
crit=weekly
#crit=$zp
[ "$1" = "" ] || crit="$1"
logfile=/root/zfs-killsnaps.log
#for snapshot in `zfs list -H -t snapshot |grep hourly | cut -f 1`
#for snapshot in `zfs list -H -t snapshot |grep $zp | cut -f 1`
function dokill () {
crit=$1
for snapshot in `zfs list -H -t snapshot |grep $crit | cut -f 1`
do
echo "`date` - Killing $snapshot" |tee -a $logfile
time zfs destroy $snapshot
done
}
dokill $crit
dokill hourly
#dokill weekly

View File

@ -0,0 +1,4 @@
#!/bin/bash
# REF: https://pthree.org/2012/12/19/zfs-administration-part-xii-snapshots-and-clones/
zfs list -r -t snapshot -o name,used,refer,mountpoint,creation

View File

@ -0,0 +1,36 @@
#!/bin/bash
zfs list -r -o name,used,usedsnap |sort -h -k 3
zfs list -t snapshot -o name,refer,used,written,creation
# REF: https://www.reddit.com/r/zfs/comments/i0lx98/why_isnt_snapshot_used_value_0/
exit;
Note - use -p flag for parsable (exact) numbers (for spreadsheet) - date will be in seconds from epoch
Fields:
Used is the amount of space reclaimed by deleting only this snapshot.
Refer is the size of the tarball that would be created from this snapshots contents (give/take compression).
Written is the amount of data added or modified in the snapshot between the previous snapshot and this one
in particular written == refer for the first snapshot in the timeline
NAME USED USEDSNAP
zsgtera4/tmpdel-xattrsa 16.7G 120K
zsam52/notshrcompr-zsam52 680K 200K
zsam52 176G 268K
zsgtera4 1.66T 272K
zsam52/imac513-installed-old 869M 396K
zsam52/shrcompr-zsam52 94.7G 1.31M
zsgtera4/virtbox-virtmachines-linux 47.1G 1.64M
zsgtera4/dvdrips-shr 38.4G 4.35G
zsam52/dvdrips-shr-zsam52 25.6G 6.35G
zsgtera4/virtbox-virtmachines/zfsubuntu2FIXEDClone 33.4G 9.31G
zsam52/tmpdel-zsam52 55.0G 16.2G
zsgtera4/virtbox-virtmachines 429G 23.9G
zsgtera4/notshrcompr-zsgt2B 375G 32.2G
zsgtera4/notshrcompr-zsgt2B/bkp-bookmarks 68.9G 36.3G
zsgtera4/shrcompr-zsgt2B 795G 47.4G

74
ZFS/zfs-newds.sh Normal file
View File

@ -0,0 +1,74 @@
#!/bin/bash
# =LLC= © (C)opyright 2017 Boojum Consulting LLC / Dave Bechtel, All rights reserved.
## NOTICE: Only Boojum Consulting LLC personnel may use or redistribute this code,
## Unless given explicit permission by the author - see http://www.boojumconsultingsa.com
#
# cre8 a new ZFS dataset with options
echo "$0 opt1=(1)compression opt1=(1)sharesmb, 0 == OFF zpool dirname"
# TODO -e /tmp/infile read it and process it
source ~/bin/failexit.mrg
logfile=/var/root/boojum-zfs-newds.log
# TODO editme
#zp=zredpool2; myds=home/vmtmpdir/vmware-virtmachines
zp="$2"; myds="$3"
user=dave
#user=nerdz
# defaults
compr=lz4
shrwin=off
# opt1=compression, opt2=sharesmb
case "$1" in
"10" )
# use defaults
compr=lz4; shrwin=off
;;
"11" )
compr=lz4; shrwin=on
;;
"01" )
compr=off; shrwin=on
;;
"00" )
compr=off; shrwin=off
;;
"" )
# no arg passed; bash NOP ref: https://stackoverflow.com/questions/17583578/what-command-means-do-nothing-in-a-conditional-in-bash
:
;;
* )
echo "WNG: Invalid arg passed, +$1+ not recognized"
;;
esac
# trace on
(set -x
zfs create -o \
atime=off -o compression=$compr -o sharesmb=$shrwin -o recordsize=1024k \
$zp/$myds || failexit 99 "! Failed to create ZFS $zp/$myds"
)
echo "`date` + $zp/$myds + compr=$compr:shr=$shrwin + owner:$user" >> $logfile
# NOTE does not take into account alt.mountpoints like /home!
chown -v $user /$zp/$myds; ls -al /$zp/$myds
#df -h /$zp/$myds
df -h |head -n 1
df -h |grep $myds
exit;
# MAC mods
/var/root/bin/boojum/zfs-newds.sh: line 57: /root/boojum-zfs-newds.log: No such file or directory
chown: /zwdgreentera/dvnotshrcompr: No such file or directory
ls: /zwdgreentera/dvnotshrcompr: No such file or directory
Filesystem Size Used Avail Capacity iused ifree %iused Mounted on
zwdgreentera/dvnotshrcompr 449Gi 324Ki 449Gi 1% 10 942667768 0% /Volumes/zwdgreentera/dvnotshrcompr
40 root ~ # pwd
/var/root

View File

@ -0,0 +1,2 @@
#!/bin/bash
smbclient -N -L localhost

View File

@ -0,0 +1,72 @@
#!/bin/bash
# =LLC= © (C)opyright 2016 Boojum Consulting LLC / Dave Bechtel, All rights reserved.
## NOTICE: Only Boojum Consulting LLC personnel may use or redistribute this code,
## Unless given explicit permission by the author - see http://www.boojumconsultingsa.com
#
# arg1 is poolname
sdate=`date`
#mv ~/scrublog.log ~/scrublog-prev.log
#> ~/scrublog.log
# do forever
while :; do
clear
echo "Pool: $1 - NOW: `date` -- Watchresilver started: $sdate"
# E WORKY! - note, egrep 4 canceled not breakloop
# zpool status $1 |tee -a ~/scrublog.log |grep -A 2 'resilver in progress' || break 2
zpool status $1 |grep -A 2 'resilver in progress' || break 2
zpool iostat -v $1 2 3 &
# zpool iostat -T d -v $1 2 3 & # with timestamp
sleep 9
date
done
ndate=`date`
zpool status $1
echo "o Resilver watch $1 start: $sdate // Completed: $ndate"
hd-power-status
exit;
# zpool status |egrep -B 2 -A 2 "scrub in progress|bigvaiterazfs" # $1
zpool status
pool: tank0
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE
CKSUM
tank0 ONLINE 0 0
0
gptid/8194f816-80cd-11e1-8a71-00221516e8b8 ONLINE 0 0
0
errors: No known data errors
pool: tank1
state: ONLINE
scan: scrub in progress since Tue May 1 23:28:07 2012
146G scanned out of 1.24T at 177M/s, 1h47m to go
0 repaired, 11.56% done
config:
NAME STATE READ WRITE CKSUM
tank1 ONLINE 0 0 0
raidz1-0 ONLINE 0 0 0
label/zdisk1 ONLINE 0 0 0
label/zdisk2 ONLINE 0 0 0
label/zdisk3 ONLINE 0 0 0
label/zdisk4 ONLINE 0 0 0
errors: No known data errors

View File

@ -0,0 +1,320 @@
#!/bin/bash
# =LLC= © (C)opyright 2017 Boojum Consulting LLC / Dave Bechtel, All rights reserved.
## NOTICE: Only Boojum Consulting LLC personnel may use or redistribute this code,
## Unless given explicit permission by the author - see http://www.boojumconsultingsa.com
#
# TODO color print banner
# Intent - start with (1) 1TB RED disk in Probox 4-bay,
# + Add mirror to 1TB (RAID1)
# + Add another 2-disk mirror set to make RAID10
#usb-WDC_WD10_EFRX-68FYTN0_152D00539000-0:0 -> sdb START WITH THIS pooldisks[1]
# + Add 100MB file(s) of random data
#usb-WDC_WD10_EFRX-68FYTN0_152D00539000-0:1 -> sdc +Add mirror pooldisks[2] = RAID1
#usb-WDC_WD10_EFRX-68FYTN0_152D00539000-0:2 -> sdd +ADD RAID10A pooldisks[3] = RAID10
#usb-WDC_WD10_EFRX-68FYTN0_152D00539000-0:3 -> sde +ADD RAID10B pooldisks[4] = RAID10
# HOWTO - edit, search this file for TODO and replace things where necessary before running!
# NOTE this script will auto-GPT-label new disks and destroy existing pool+data!!!
# GOAL - create an expandable ZFS pool in real-time
# NOTE special handling for starting with RAID0 (D1+D2=NOMIR) then adding (1&3 + 2&4=RAID10)
# -- available space will only increase when an entire MIRROR COLUMN is done!
# assuming:
# zdynpool1 ONLINE 0 0 0
# 1 * mirror-0 ONLINE 0 0 0
# a /zdisks/zdyndisk1 ONLINE 0 0 0
# b /zdisks/zdyndisk3 ONLINE 0 0 0
# 2 * mirror-1 ONLINE 0 0 0
# c /zdisks/zdyndisk2 ONLINE 0 0 0
# d /zdisks/zdyndisk4 ONLINE 0 0 0
# To increase available space immediately, we would need to replace 1, then 3 // then 2... and finally 4
# enables sound; set to 2 for extra debug
debugg=1
# TODO xxxxx change this to the zfs pool you are working on!
zp=zredpool1
logfile=~/zfsdemo-$zp-expand.log
> $logfile # clearit
# TODO update this 1st if disks change!
source /root/bin/getdrive-byids
# This also includes WWN
dpath=/dev/disk/by-id
#dpath=/dev/disk/by-path
# If doing SCSI drives, use this
# failexit.mrg
function failexit () {
echo '! Something failed! Code: '"$1 $2" # code # (and optional description)
exit $1
}
# Echo something to current console AND log
# Can also handle piped input ( cmd |logecho )
# Warning: Has trouble echoing '*' even when quoted.
function logecho () {
args=$@
if [ -z "$args" ]; then
args='tmp'
while [ 1 ]; do
read -e -t2 args
if [ -n "$args" ]; then
echo $args |tee -a $logfile;
else
break;
fi
done
else
echo $args |tee -a $logfile;
fi
} # END FUNC
# xxxxx TODO change disks here!
declare -a pooldisks # regular indexed array
pooldisks[1]=$Dzredpool1A # usb-WDC_WD10_EFRX-68FYTN0_152D00539000-0:0
pooldisks[2]=$Dzredpool1B # usb-WDC_WD10_EFRX-68FYTN0_152D00539000-0:1
pooldisks[3]=$Dzredpool1C # usb-WDC_WD10_EFRX-68FYTN0_152D00539000-0:2
pooldisks[4]=$Dzredpool1D # usb-WDC_WD10_EFRX-68FYTN0_152D00539000-0:3
#pooldisks[5]=
function waitforresilver () {
printf `date +%H:%M:%S`' ...waiting for resilver to complete...'
waitresilver=1
while [ $waitresilver -gt 0 ];do
waitresilver=`zpool status -v $zp |grep -c resilvering`
sleep 5
done
echo 'Syncing to be sure...'; time sync;
date |logecho
}
function initdisk () {
([ "$debugg" -gt 1 ] && set -x
logecho "FYI GPT initdisk $1"
# using getdrive-byids doesnt need dpath
# zpool labelclear $dpath/$1 || failexit 1000 "! Failed to zpool labelclear $1"
zpool labelclear $1 || failexit 1000 "! Failed to zpool labelclear $1"
# parted -s $dpath/$1 mklabel gpt || failexit 1234 "! Failed to apply GPT label to disk $1"
parted -s $1 mklabel gpt || failexit 1234 "! Failed to apply GPT label to disk $1"
)
}
function logzpstatus () {
df /$zp
df /$zp >> $logfile
zpool status -v $zp |awk 'NF > 0' # no blank lines
zpool status -v $zp >> $logfile
zfs list $zp >> $logfile
zpool list $zp >> $logfile
echo '=====' >> $logfile
}
function nomorepool () {
zpool status -v $1 |awk 'NF > 0'
df -h /$1
[ "$debugg" -gt 0 ] && \
AUDIODRIVER=alsa /usr/bin/play -q /home/dave/wavs/destruct.wav 2>/dev/null
logecho "!!! I AM ABOUT TO DESTROY ZFS POOL $1 !!!"
logecho "-- ENTER BOOJUM ADMIN PASSWORD TO CONFIRM OR Press ^C to abort!"
read
zfs umount -f $1 2>> $logfile
zpool export $1 2>> $logfile
# zpool status $1
zpool destroy -f $1 2>> $logfile
zpool status -L -P -x
}
################################# TEH MAIN THING
clear
logecho `date`
#df -h
# TODO random color print
echo "*********************************************************************"
echo "* ** Welcome to the Boojum Consulting LLC ZFS Demo! ** *"
echo "* We will start with creating a 1-disk ZFS pool with no redundancy, *"
echo "* Add a mirror disk on-the-fly to make it RAID1, *"
echo "* And then dynamically grow the pool to a RAID10, all in real-time! *"
echo "*********************************************************************"
# DESTROY!!
umount -f /mnt/demoshare # samba
nomorepool $zp
df -h
zpool status $zp
echo "POOL $zp SHOULD BE GONE -Press Enter TO PROCEED, or ^C"
read
# double check!
[ `df |grep $zp |wc -l` -gt 0 ] && failexit 999 "! Cannot proceed - $zp still exists!"
# getting rid of sync for each dd should speed things up
# xxx TODO alter number for however many disks
logecho "o Preparing disks..."
for i in {1..4};do
printf $i...
# NECESSARY if re-using disks that were previously in a pool!!
initdisk ${pooldisks[$i]}
done
############ create 1-disk NORAID
(set -x
time zpool create -f -o ashift=12 -O compression=off -O atime=off $zp \
${pooldisks[1]} )
# Evidently we can only do 1 setting at a time... turn trace=on for this 1 command in subshell
(set -x
zpool set autoexpand=on $zp) || failexit 99 "! Autoexpand failed with $zp"
(set -x
zpool set autoreplace=on $zp) || failexit 992 "! Autoreplace failed with $zp"
echo ''
logecho 'o Initial state of new 1-disk pool:'
df /$zp
startdata1="NOTE Starting pool size: `date`"
startdata2=`df|head -n 1`
startdata2=$startdata2'\n'`df |grep $zp`
#Filesystem 1K-blocks Used Available Use% Mounted on
#zredpool2 722824320 33628416 689195904 5% /zredpool2
#zredpool2/dvcompr 898452224 209256320 689195904 24% /zredpool2/dvcompr
echo $startdata1 >> $logfile
echo -e "$startdata2" >> $logfile
logecho 'o Populating pool with random,uncompressible data...'
# if file not there, create
[ -e /root/tmpfile ] || time dd if=/dev/urandom of=/root/tmpfile bs=1M count=100
# Make 9 copies of random data
for i in $(seq 9);do
cp -v /root/tmpfile /$zp/tmpfile$i
done
sync
ls -lh /$zp >> $logfile
echo ''
logecho 'o We should now have a 1-disk, non-redundant ZFS pool with some data in it:'
logzpstatus
echo '';printf 'Press Enter to add a Mirror disk to the single-disk pool, or ^C:';read -n 1
########### add mirror to 1-disk
# REF: http://docs.oracle.com/cd/E19253-01/819-5461/6n7ht6qvl/index.html
echo 'o Adding mirror to single-disk pool for RAID1...' >> $logfile
(set -x
time zpool attach $zp \
${pooldisks[1]} ${pooldisks[2]} )
echo ''
logecho 'o We should now have a 2-disk, MIRRORED ZFS pool with RAID1 redundancy:'
logzpstatus
echo ''
echo '! We need to wait for the resilver to complete before proceeding! # zpool status ## until resilvered'
waitforresilver
logzpstatus
logecho 'o NOTE that available pool space has not increased yet - we have only added a "failsafe" mirror drive!'
echo '';printf 'Press Enter to add another set of mirrored disks to the existing pool to make the pool RAID10:';read -n 1
########### add 2-disk mirror to 2-disk for RAID10
# REF: http://docs.oracle.com/cd/E19253-01/819-5461/6n7ht6qvk/index.html
(set -x
time zpool add -o ashift=12 $zp \
mirror ${pooldisks[3]} ${pooldisks[4]} )
waitforresilver
logecho 'o Populating pool with a bit more data... Watch the blinkenlights!'
for i in $(seq 4);do
cp -v /root/tmpfile /$zp/tmpfileTWO$i
done
sync
echo ''
logecho 'o We should now have a 4-disk, redundant pool with RAID10:'
logzpstatus
echo '';logecho 'o NOTE that the available pool space should be approximately 2x what we had before, minus a bit of overhead...'
echo ''
logecho "REMEMBER we started with:"
logecho "$startdata1"
echo -e "$startdata2"
echo -e "$startdata2" >> $logfile
echo ''
logecho "NOW we have a fully expanded-in-place RAID10 ZFS pool with more free space..."
logecho "+ Pool size after in-situ expansion, with NO DOWNTIME:"
df /$zp
df /$zp >> $logfile
echo ''
# Make some datasets
# TODO make func.mrg
# zfs create -o compression=lz4 -o atime=off -o sharesmb=on zredpool2/0DISPOSABLE-VEEAM-P3300-BKP ;chown dave /zredpool2/0DISPOSABLE-VEEAM-P3300-BKP
# TODO change if needed
myuser=dave; carg=""
myds=sambasharecompr
[ "$debugg" -gt 1 ] && carg="-v"
(set -x
zfs create -o compression=lz4 -o atime=off -o sharesmb=on $zp/$myds ; chown $carg $myuser /$zp/$myds ) # chown -v
myds=notsharedcompr
(set -x
zfs create -o compression=lz4 -o atime=off -o sharesmb=off $zp/$myds ; chown $carg $myuser /$zp/$myds )
# mount samba share locally
mount /mnt/demoshare
logecho "o Taking demo snapshot of $zp and all datasets..."
# we dont need this, pool is brand new
#zfs destroy -R -v $zp@demosnapshot 2>&1 >>$logfile
zfs snapshot -r $zp@demosnapshot
zfs-list-snaps--boojum.sh #|logecho
zfs-list-snaps--boojum.sh >> $logfile
logecho "# time zfs rollback $zp@demosnapshot ## after deleting data"
echo "# mount //court2130antix/zredpool1_sambasharecompr /mnt/tmp -ouid=dave,credentials=/root/.smb-court" >> $logfile
df -h -T >> $logfile
logecho 'o Complete!'
logecho `date`
exit;
2017.0323 SUCCESSFULLY TESTED 4X1TB WD RED DISKS with Probox 4-bay!

4
ZFS/ziostatt.sh Normal file
View File

@ -0,0 +1,4 @@
#!/bin/bash
date
zpool iostat $1 -y -T d -v 5

View File

@ -0,0 +1,25 @@
#!/bin/bash
# create ZFS pool that should be cross-boot compatible and import OK
# EDITME!
# REF: https://www.reddit.com/r/zfs/comments/b092at/cant_import_pool_from_zol_to_bsd/
# REF: https://zgrep.org/zfs.html # zfs by-OS compat. chart
# REF ' man zpool-features ' - large_blocks needed for recordsize=1024k
zp=zmac320
diskk=/dev/disk0s8 # Linux - use dev/disk/by-id
zpool create -f -d \
-o feature@async_destroy=enabled \
-o feature@bookmarks=enabled \
-o feature@embedded_data=enabled \
-o feature@empty_bpobj=enabled \
-o feature@enabled_txg=enabled \
-o feature@spacemap_histogram=enabled \
-o feature@filesystem_limits=enabled \
-o feature@lz4_compress=enabled \
-o feature@large_blocks=enabled \
-o ashift=12 -o autoexpand=off \
-O atime=off -O compression=lz4 $zp "$diskk"

3
ZFS/zpool-import-ro.sh Normal file
View File

@ -0,0 +1,3 @@
#!/bin/bash
zpool import -f -o readonly=on "$*"
zpool status -v

View File

@ -0,0 +1,115 @@
#!/bin/bash
# 2020.0620
# ADAPTED FROM # zpool-resizeup-mirror--no-degradation--raid10.sh
# NOTE - SCRUB 1st!!
source ~/bin/boojum/wait4resilver.mrg
source ~/bin/failexit.mrg
zp=zseatera4
disk1=ata-ST4000VN000-2AH166_WDH0SB5N
disk2=ata-ST4000VN000-1H4168_Z3076XVL
disk3=ata-HGST_HUS726060ALE614_K8HU3M7N # sdh
disk4=ata-HGST_HUS726060ALE614_K8HUH6YN # sdf
zpool set autoexpand=on $zp
# speed up resilver I/O for zfs 0.8.x
#echo 0 > /sys/module/zfs/parameters/zfs_resilver_delay
echo 8000 > /sys/module/zfs/parameters/zfs_resilver_min_time_ms
# original value: 3000
echo "o Attach disk1=disk3 - $(date)"
time zpool attach $zp $disk1 $disk3 || failexit 103 "zpool attach disk1=disk3 $disk1 = $disk3 failed `date`"
waitforresilver $zp
echo "o Attach disk2=disk4 - $(date)"
time zpool attach $zp $disk2 $disk4 || failexit 104 "zpool attach disk2=disk4 $disk2 = $disk4 failed `date`"
waitforresilver $zp
zpool status -v
df -hT
echo "`date` - PK to detach smaller mirror disks and increase pool size"
read -n 1
time zpool detach $zp $disk1
time zpool detach $zp $disk2
zpool status -v
df -hT
exit;
GIVEN @ START:
( zfs list )
NAME USED AVAIL REFER MOUNTPOINT
zseatera4 5.60T 1.43T 340K /zseatera4
( zpool list )
NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
zseatera4 7.25T 5.60T 1.65T - - 22% 77% 1.00x ONLINE -
capacity
pool alloc free
----------------------------------- ----- -----
zseatera4 5.60T 1.65T
mirror 2.79T 850G
ata-ST4000VN000-2AH166_WDH0SB5N - -
ata-ST4000VN000-1H4168_Z3076XVL - -
mirror 2.80T 844G
ata-ST4000VN000-1H4168_Z3073Z7X - -
ata-ST4000VN008-2DR166_ZGY005C6 - -
+ replace 1st 2 Mirror disks with HGST 6TB to increase pool size
-----
Afterward:
Sun 21 Jun 2020 09:07:43 AM CDT - PK to detach smaller mirror disks and increase pool size
real 0m0.559s
real 0m0.367s
pool: zseatera4
state: ONLINE
status: Some supported features are not enabled on the pool. The pool can
still be used, but some features are unavailable.
action: Enable all features using 'zpool upgrade'. Once this is done,
the pool may no longer be accessible by software that does not support
the features. See zpool-features(5) for details.
scan: resilvered 2.80T in 0 days 04:49:39 with 0 errors on Sun Jun 21 09:07:36 2020
config:
NAME STATE READ WRITE CKSUM
zseatera4 ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
ata-HGST_HUS726060ALE614_K8HU3M7N ONLINE 0 0 0
ata-HGST_HUS726060ALE614_K8HUH6YN ONLINE 0 0 0
mirror-1 ONLINE 0 0 0
ata-ST4000VN000-1H4168_Z3073Z7X ONLINE 0 0 0
ata-ST4000VN008-2DR166_ZGY005C6 ONLINE 0 0 0
errors: No known data errors
Filesystem Type Size Used Avail Use% Mounted on
zseatera4 zfs 3.2T 384K 3.2T 1% /zseatera4
zseatera4/from-imacdual-zredtera1 zfs 3.2T 384K 3.2T 1% /zseatera4/from-imacdual-zredtera1
zseatera4/dvdshare zfs 6.3T 3.1T 3.2T 49% /zseatera4/dvdshare
zseatera4/dvshr zfs 3.8T 541G 3.2T 15% /zseatera4/dvshr
zseatera4/notshrcompr zfs 3.7T 474G 3.2T 13% /zseatera4/notshrcompr
zseatera4/virtbox-virtmachines zfs 3.2T 9.2G 3.2T 1% /zseatera4/virtbox-virtmachines
zseatera4/from-imacdual-zredtera1/notshrcompr-zrt1 zfs 3.3T 40G 3.2T 2% /zseatera4/from-imacdual-zredtera1/notshrcompr-zrt1
zseatera4/dvdshare/0MKV zfs 3.4T 217G 3.2T 7% /zseatera4/dvdshare/0MKV
zseatera4/dvdshare/0BLURAY zfs 4.3T 1.1T 3.2T 26% /zseatera4/dvdshare/0BLURAY
zseatera4/from-imacdual-zredtera1/virtbox-virtmachines zfs 3.3T 72G 3.2T 3% /zseatera4/from-imacdual-zredtera1/virtbox-virtmachines
zseatera4/from-imacdual-zredtera1/shrcompr-zrt1 zfs 3.3T 41G 3.2T 2% /zseatera4/from-imacdual-zredtera1/shrcompr-zrt1
zseatera4/dvdshare/DMZ-W zfs 3.3T 96G 3.2T 3% /zseatera4/dvdshare/DMZ-W
zseatera4/virtbox-virtmachines/OSXELCAPTEST zfs 3.3T 40G 3.2T 2% /zseatera4/virtbox-virtmachines/OSXELCAPTEST
# zpool list zseatera4
NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
zseatera4 9.06T 5.60T 3.47T - - 17% 61% 1.00x ONLINE -

View File

@ -0,0 +1,81 @@
#!/bin/bash
# OSX - gdd
# Mod for RAID10 test (replace 4TB with 6TB HGST) == OK
# REF: https://www.reddit.com/r/zfs/comments/fwv7ky/help_expanding_zfs_mirror/
# make sure mirror is not degraded when replacing disks with larger sizes
source ~/bin/failexit.mrg
logfile=~/zpool-resizeup-mirror.log
cd /Users/dave
zp=ztestpool
disk1=zdisk1
disk2=zdisk2
disk3=zdisk3
disk4=zdisk4
disk5=zdisk5L
disk6=zdisk6L
ddp=dd
[ -e /usr/local/bin/gdd ] && ddp=gdd
# Note hfs+ DOES NOT support sparse files
function mkdisks () {
echo "Checking exist / creating pool $zp disks"
[ -e $disk1 ] || time $ddp if=/dev/zero of=$disk1 bs=1M count=1024
[ -e $disk2 ] || time $ddp if=/dev/zero of=$disk2 bs=1M count=1024
[ -e $disk3 ] || time $ddp if=/dev/zero of=$disk3 bs=1M count=1024
[ -e $disk4 ] || time $ddp if=/dev/zero of=$disk4 bs=1M count=1024
[ -e $disk5 ] || time $ddp if=/dev/zero of=$disk5 bs=1M count=2048
[ -e $disk6 ] || time $ddp if=/dev/zero of=$disk6 bs=1M count=2048
ls -alh
# only if not exist
[ `echo $(zpool list|grep -c $zp)` -ge 1 ] || \
time zpool create -o ashift=12 -o autoexpand=on -O atime=off -O compression=lz4 $zp \
mirror $PWD/$disk1 $PWD/$disk2 \
mirror $PWD/$disk3 $PWD/$disk4 \
|| failexit 101 "Cant create zpool $zp"
echo "Populating $zp with data"
time cp -v /Volumes/zsgtera2B/shrcompr-zsgt2B/ISO/bl-Helium_i386_cdsized+build2.iso /Volumes/$zp \
|| failexit 102 "Copy file to pool $zp fail"
} #END FUNC
function zps () {
zpool status -v $zp |awk 'NF > 0'
zpool list -v
}
mkdisks # comment me if nec
zps |tee -a $logfile
echo "PK to attach larger disks ONE AT A TIME to 1st mirror-0"
read -n 1
# REF: https://docs.oracle.com/cd/E19253-01/819-5461/gcfhe/index.html
time zpool attach $zp $PWD/$disk1 $PWD/$disk5 || failexit 103 "zpool attach disk5 fail"
zfs-watchresilver-boojum.sh
time zpool attach $zp $PWD/$disk2 $PWD/$disk6 || failexit 104 "zpool attach disk6 fail"
zfs-watchresilver-boojum.sh
zps |tee -a $logfile
gdf -hT |tee -a $logfile
echo "PK to detach smaller mirror disks and increase pool size"
read -n 1
time zpool detach $zp $PWD/$disk1
time zpool detach $zp $PWD/$disk2
zps |tee -a $logfile
gdf -hT |tee -a $logfile