mirror of
https://github.com/kneutron/ansitest.git
synced 2025-01-16 04:42:55 +08:00
Add files via upload
This commit is contained in:
parent
1392d9b87b
commit
944150d78b
86
ZFS/draid-pooldisks-assoc.sh
Normal file
86
ZFS/draid-pooldisks-assoc.sh
Normal file
@ -0,0 +1,86 @@
|
||||
#!/bin/bash
|
||||
|
||||
# working with 90 pooldisks, put in assoc array
|
||||
# REMEMBER ARRAYS START AT 0
|
||||
|
||||
logfile=/tmp/draid-pooldisks-assoc.log
|
||||
> $logfile # clearit
|
||||
#source ~/bin/logecho.mrg
|
||||
|
||||
DD=/dev/disk
|
||||
|
||||
debugg=0
|
||||
|
||||
if [ $debugg -gt 0 ]; then
|
||||
declare -a pooldisks # regular indexed array
|
||||
pooldisks=(sd{b..y}) # 24, skipping sda=root and sdz=hotspare
|
||||
#pooldisks=(/dev/sd{b..y}) # 24, skipping sda=root and sdz=hotspare
|
||||
# echo ${pd[0]} = sdb; echo ${pd[24]} = sdy
|
||||
|
||||
# associative arrays REF: http://mywiki.wooledge.org/BashGuide/Arrays
|
||||
# REF: http://www.artificialworlds.net/blog/2012/10/17/bash-associative-array-examples/
|
||||
|
||||
# NOTE CAPITAL A for assoc array!
|
||||
declare -A ASpooldisks
|
||||
|
||||
key=${pooldisks[0]} # sdb
|
||||
ASpooldisks[$key]=$(ls -lR $DD |grep -w /$key |head -n 1 |awk '{print $9}')
|
||||
# ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J1NL656R -make this whatever this disk is in dev/disk/by-id
|
||||
# for SAS this will be pci-0000:00:16.0-sas-phy0-lun-0 so we cant limit search to disk/by-id
|
||||
|
||||
# ^^ HOW THIS WORKS:
|
||||
# key=${pooldisks[0]} # returns: LET key="sda"
|
||||
# ASpooldisks[$key]=ata-VBOX_HARDDISK_blah # ASpooldisks["sda"]="ata-*" # LOOKUP and set!
|
||||
# key=${pooldisks[1]} # returns: LET key="sdb"
|
||||
# ASpooldisks[$key]=pci-* # ASpooldisks["sdb"]="pci-*" or whatever
|
||||
|
||||
echo "key:$key: ASpooldisks $key == ${ASpooldisks[$key]}"
|
||||
# expected:
|
||||
# key:sdb: ASpooldisks sdb == pci-0000:00:16.0-sas-phy0-lun-0
|
||||
exit; # early
|
||||
fi
|
||||
|
||||
|
||||
# TEH MAIN THING
|
||||
declare -a pooldisks # regular indexed array
|
||||
pooldisks=(sd{b..y} sda{a..x} sdb{a..x} sdc{a..l})
|
||||
# 24 in 1st set, skipping sda=root and sdz=hotspare
|
||||
# 24 in 2nd + 3rd set, 12 in 4th set, (84) total
|
||||
hotspares=(sdz sday sdaz sdby sdbz sdcm) # 6, will be sitting idle for replaces
|
||||
# echo ${pd[0]} = sdb; echo ${pd[24]} = sdy
|
||||
|
||||
# NOTE CAPITAL A for assoc array!
|
||||
declare -A ASpooldisks
|
||||
|
||||
# populate
|
||||
idx=0
|
||||
for disk in ${pooldisks[@]}; do
|
||||
key=${pooldisks[$idx]} # sdb
|
||||
ASpooldisks[$key]=$(ls -lR $DD |grep -w /$key |head -n 1 |awk '{print $9}')
|
||||
let idx=idx+1
|
||||
done
|
||||
# ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J1NL656R -make this whatever this disk is in dev/disk/by-id
|
||||
# for SAS this will be pci-0000:00:16.0-sas-phy0-lun-0 so we cant limit search to disk/by-id
|
||||
|
||||
# ^^ HOW THIS WORKS:
|
||||
# key=${pooldisks[0]} # returns: LET key="sda"
|
||||
# ASpooldisks[$key]=ata-VBOX_HARDDISK_blah # ASpooldisks["sda"]="ata-*" # LOOKUP and set!
|
||||
# key=${pooldisks[1]} # returns: LET key="sdb"
|
||||
# ASpooldisks[$key]=pci-* # ASpooldisks["sdb"]="pci-*" or whatever
|
||||
|
||||
#echo "key:$key: ASpooldisks $key == ${ASpooldisks[$key]}"
|
||||
|
||||
echo "Dumping shortdisk == longdisk assoc array to $logfile"
|
||||
for K in "${!ASpooldisks[@]}"; do
|
||||
echo "$K == ${ASpooldisks[$K]}" >> $logfile
|
||||
# echo "INTENT: ZPOOL DISK: $K == ${ASpooldisks[$K]}" >> $logfile
|
||||
done
|
||||
|
||||
exit;
|
||||
|
||||
|
||||
# la $DBI |grep -w /sda |head -n 1
|
||||
lrwxrwxrwx 1 root root 9 Jul 3 14:45 ata-VBOX_HARDDISK_VB7d75d4dd-69ea47dd -> ../../sda
|
||||
1 2 3 4 5 6 7 8 9 10 11
|
||||
# la $DBI |grep -w /sda |head -n 1 |awk '{print $9}'
|
||||
ata-VBOX_HARDDISK_VB7d75d4dd-69ea47dd
|
37
ZFS/mon90.sh
Normal file
37
ZFS/mon90.sh
Normal file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 2021 Dave Bechtel
|
||||
# for 1440x900
|
||||
|
||||
# Setup 4 xterms for monitoring I/O for (90) drives
|
||||
# Used ' xwininfo ' to get geom
|
||||
# Occupy-all
|
||||
|
||||
cmdstr="iostat -k 5 --dec=0 -y -z sd{a..z}"
|
||||
xterm -bg black -fg green -sl 2000 -rightbar -geometry 72x33+0+31 \
|
||||
-name IOSTAT \
|
||||
-e "$cmdstr" &
|
||||
# Corners: +955+369 -1+369 -1-215 +955-215
|
||||
# -geometry 80x24-0+345
|
||||
|
||||
cmdstr="iostat -k 5 --dec=0 -y -z sda{a..z}"
|
||||
xterm -bg black -fg green -sl 2000 -rightbar -geometry 72x33+0-0 \
|
||||
-name IOSTAT \
|
||||
-e "$cmdstr" &
|
||||
# Corners: +955+369 -1+369 -1-215 +955-215
|
||||
# -geometry 80x24-0+345
|
||||
|
||||
cmdstr="iostat -k 5 --dec=0 -y -z sdb{a..z}"
|
||||
xterm -bg black -fg green -sl 2000 -rightbar -geometry 72x33-0+31 \
|
||||
-name IOSTAT \
|
||||
-e "$cmdstr" &
|
||||
# Corners: +955+369 -1+369 -1-215 +955-215
|
||||
# -geometry 80x24-0+345
|
||||
|
||||
cmdstr="iostat -k 5 --dec=0 -y -z sdc{a..m}"
|
||||
xterm -bg black -fg green -sl 2000 -rightbar -geometry 72x33-0-0 \
|
||||
-name IOSTAT \
|
||||
-e "$cmdstr" &
|
||||
# Corners: +955+369 -1+369 -1-215 +955-215
|
||||
# -geometry 80x24-0+345
|
||||
|
519
ZFS/test-zfs-21-Draid--90drives.vbox
Normal file
519
ZFS/test-zfs-21-Draid--90drives.vbox
Normal file
@ -0,0 +1,519 @@
|
||||
<?xml version="1.0"?>
|
||||
<!--
|
||||
** DO NOT EDIT THIS FILE.
|
||||
** If you make changes to this file while any VirtualBox related application
|
||||
** is running, your changes will be overwritten later, without taking effect.
|
||||
** Use VBoxManage or the VirtualBox Manager GUI to make changes.
|
||||
-->
|
||||
<VirtualBox xmlns="http://www.virtualbox.org/" version="1.16-linux">
|
||||
<Machine uuid="{3b8ee5b1-aa7f-4f28-8435-9eb2fbc578b9}" name="test-zfs-21-Draid" OSType="Ubuntu_64" snapshotFolder="Snapshots" lastStateChange="2021-07-03T22:00:32Z">
|
||||
<Description>2021.0702 (2021 Dave Bechtel) Created Devuan 3.1--64 (minimal XFCE install) to test ZFS 2.1.x DRAID
|
||||
|
||||
user/userpass
|
||||
rt/rootpass
|
||||
|
||||
vbox guest adds: 6.1.22
|
||||
kernel 4.19.0-17
|
||||
|
||||
Openssh server is installed, you can ssh in
|
||||
|
||||
You will find some interesting things in /root/bin and /etc/rc.local
|
||||
Have a lot of fun :^)
|
||||
|
||||
migrated to fryserver and increased disks to 90 + SAS</Description>
|
||||
<MediaRegistry>
|
||||
<HardDisks>
|
||||
<HardDisk uuid="{0b1c4917-6ed8-4870-a468-a8cca43a8dc0}" location="test-zfs-21-Draid-sata0-0-roothomeswap.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{7d75d4dd-9144-4a01-9ef0-1659dd47ea69}" location="test-zfs-21-Draid-sata0-0.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{a5ac7f8f-f6dd-494b-8517-915992f3808a}" location="zfs01.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{3e6c2216-5b66-499d-ad0d-d23b9740a8e4}" location="zfs02.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{df46700d-57f8-4f66-88cc-d79634caff57}" location="zfs03.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{93e2157f-183c-49a2-a42e-5c70561dabdd}" location="zfs04.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{a832bbfb-317b-4f17-ba70-a37f24930372}" location="zfs05.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{faec32a1-d19c-4d5c-934a-c92336d6703e}" location="zfs06.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{f341e4d8-8c25-46e3-bb0c-25a7c1f13e2d}" location="zfs07.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{6e562a6b-f43f-4534-82e2-1c97a7e7700a}" location="zfs08.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{2b5edd5d-9103-4093-8e81-ab4c8ece724a}" location="zfs09.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{e94d901c-29ad-41cb-bcdb-02b5e64c6272}" location="zfs10.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{d7d3e82a-afed-4ae9-8acb-992ac194280f}" location="zfs11.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{7ba0f6fc-921c-456f-98af-58d982fc2e68}" location="zfs12.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{159ac173-68d5-46c7-9a87-902576870ea1}" location="zfs13.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{d640cdea-560e-4c43-8e9c-e63ca0bdf657}" location="zfs14.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{17592c2d-83b6-4249-919f-8a6454e1b9ad}" location="zfs15.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{6fd46fec-f381-497b-944d-15b0be164585}" location="zfs16.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{5eb81d86-dba4-4309-a55d-2550130fbd30}" location="zfs17.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{104bd37f-b95a-45ea-8aeb-362643fffb4c}" location="zfs18.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{0482fe5a-bfef-4a7d-9639-adfbecbc3395}" location="zfs19.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{f406b98e-97f7-484f-af0d-a75a505d0863}" location="zfs20.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{2bb3428a-fa97-4e48-84e9-ec4af1c7b478}" location="zfs21.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{3ccc0544-c7eb-4701-967c-66fb888e178e}" location="zfs22.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{b0b1ed03-393b-4a0e-936f-0e6f99fd8cfa}" location="zfs23.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{c6a9e0b6-3699-4491-8520-7ec831b1b048}" location="zfs24.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{160852b4-d406-4d75-b265-db7a845dd525}" location="zfs-SAS25.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{e94d33b4-4ce7-49e3-8c76-07a17e4c2e9d}" location="zfs-SAS26.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{73f372a3-da55-45e7-b5d9-81f669e94ffe}" location="zfs-SAS27.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{d3da0d8e-ac68-48ef-8c47-ba4992ec6b7e}" location="zfs-SAS28.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{d55b16bf-8059-4741-8e03-1292192e84a2}" location="zfs-SAS29.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{33a6cb43-046d-42c9-8e85-55bcb74a5bab}" location="zfs-SAS30.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{5a7797d2-8dd6-4fa0-900f-b85f680384fe}" location="zfs-SAS31.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{49c61e69-7e79-4c85-8952-99c6fe68546b}" location="zfs-SAS32.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{00706932-d0fb-44ca-84ab-4c04aa99b444}" location="zfs-SAS33.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{1791c42c-f71d-48b7-9c7c-cda83b4deede}" location="zfs-SAS34.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{b1da894c-d442-4e37-8714-0661978879ad}" location="zfs-SAS35.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{ffc21422-a683-45b4-972e-681087f1bd19}" location="zfs-SAS36.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{10a46250-2f05-45b5-800a-e5008920a940}" location="zfs-SAS37.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{50d3e4de-8136-41d5-ad67-4a738d4acf14}" location="zfs-SAS38.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{22e286e0-de90-4453-ac1c-2587fb944b24}" location="zfs-SAS39.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{1dce6338-79a2-4654-860f-cab4bc26ee29}" location="zfs-SAS40.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{b34f057e-3440-469e-beb8-daabb2e2f6fb}" location="zfs-SAS41.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{9484c81f-134b-4d7e-bc66-7a0d86d81496}" location="zfs-SAS42.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{49bdedcc-7814-4456-b9a2-816da46a8016}" location="zfs-SAS43.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{3e96d1f7-1709-4766-98c0-abaf045574c2}" location="zfs-SAS44.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{ce618a3e-7f60-46cb-b0fb-6d0987ccff3e}" location="zfs-SAS45.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{70bb8082-ef5f-4909-93c1-1c1bdd3d4446}" location="zfs-SAS46.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{031ad331-43c1-48e6-83dc-a7638e1e4acf}" location="zfs-SAS47.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{752877d3-02fd-43b4-b607-92aeb9931810}" location="zfs-SAS48.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{be4c68c5-7505-4c95-9104-99c198b2f7bd}" location="zfs-SAS49.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{a65f55da-5b98-4802-b56d-13addd333e4a}" location="zfs-SAS50.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{ba88e128-f738-4f94-9fac-b19e3cb1cd54}" location="zfs-SAS51.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{7b1da897-0716-4c36-865e-1a31454d8870}" location="zfs-SAS52.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{e2172563-62ff-4552-9071-8d2797ecce73}" location="zfs-SAS53.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{89d15419-347c-413b-bceb-184bf40a0f28}" location="zfs-SAS54.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{b28e528d-0f4b-47bf-a64a-83bec1239322}" location="zfs-SAS55.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{1342f82b-3611-4f93-96f4-04b0a3d60da7}" location="zfs-SAS56.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{8d177c84-06f6-4188-a7f2-ee12dd1a9e10}" location="zfs-SAS57.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{dea8afef-b6e9-4592-ac57-e4bf9564b818}" location="zfs-SAS58.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{7ced439f-be5b-40a2-93c7-b93d862604b3}" location="zfs-SAS59.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{dd639d04-0ab1-4d4c-a68a-8d3439543701}" location="zfs-SAS60.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{843b9257-d61e-44b8-8245-60caf9848ff7}" location="zfs-SAS61.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{ceae4fc7-059a-4b0b-b045-49c3e25234ad}" location="zfs-SAS62.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{e334c60d-6019-4da0-a3a2-457f39775621}" location="zfs-SAS63.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{d01820a1-305f-4be1-a594-3bf13e631a1f}" location="zfs-SAS64.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{eabe9c14-7362-4ce2-b7c6-3d836aee8288}" location="zfs-SAS65.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{0000f777-37aa-4ea9-9a7b-20c1768b0106}" location="zfs-SAS66.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{27bf6d4c-fd6b-47c7-98d5-5cff89d1e8e6}" location="zfs-SAS67.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{f1eb3095-977f-456a-a503-36dade415047}" location="zfs-SAS68.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{b7b1a6a8-1b03-4b75-a727-1990dc2e7249}" location="zfs-SAS69.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{da42df32-4174-4d18-99ef-821bce22dc92}" location="zfs-SAS70.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{dda85502-b1b3-41ac-8f24-0786ce142f66}" location="zfs-SAS71.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{70df442b-ff19-4a9d-9b71-c1cec32c787f}" location="zfs-SAS72.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{1c4c45d2-7c3d-4ac9-bae3-18f6e8f700bc}" location="zfs-SAS73.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{ff887352-c502-41b1-978e-8c3b9e595be7}" location="zfs-SAS74.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{47e6737c-42fd-461c-a753-291a24d563f9}" location="zfs-SAS75.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{41b92a30-fefb-4197-b3b7-f6428c803059}" location="zfs-SAS76.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{52ed44c8-0c26-49fb-88d5-1cd2fb0c03ce}" location="zfs-SAS77.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{95e40197-6e80-4eee-833d-8a0b4c3cd6d8}" location="zfs-SAS78.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{06915273-753a-42aa-8d39-c5420ea96d0b}" location="zfs-SAS79.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{9dded7a6-9cbc-41b6-aab7-f9d59121a9d6}" location="zfs-SAS80.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{6f7abea2-9f93-48d3-9d06-4073a625171f}" location="zfs-SAS81.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{a4e719c6-ce75-4497-98e3-f2f94cbc9ef9}" location="zfs-SAS82.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{544f2361-3443-42f8-82dc-b1b9ff05f580}" location="zfs-SAS83.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{9f268ae0-08b9-426f-988a-c4be84cd110c}" location="zfs-SAS84.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{6c2c9ef6-1069-48c7-bdf5-af0984e1a0ec}" location="zfs-SAS85.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{efc50a32-3bac-43be-9815-2bac81138638}" location="zfs-SAS86.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{dde947d5-4c3b-4846-bd68-911b21306ab7}" location="zfs-SAS87.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{50b2f510-185a-416f-ae45-dc075f8165fa}" location="zfs-SAS88.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{19ec34e7-7bf5-435d-a116-c71360574ca3}" location="zfs-SAS89.vdi" format="VDI" type="Normal"/>
|
||||
<HardDisk uuid="{48f4b82f-fcf6-4886-b30d-5ae306592319}" location="zfs-SAS90.vdi" format="VDI" type="Normal"/>
|
||||
</HardDisks>
|
||||
<DVDImages>
|
||||
<Image uuid="{f39d5af2-c31a-499e-b2d7-435cc604b2bd}" location="/zdellblue/shrcompr/ISO/ubuntu-20.04-desktop-amd64.iso"/>
|
||||
<Image uuid="{ead321e2-e2bc-47e2-a4da-6d931706b5e3}" location="/zdellblue/shrcompr/ISO/debian-10.10.0-amd64-netinst.iso"/>
|
||||
<Image uuid="{2cff0f20-96b2-435d-92af-b4f3edb1f552}" location="/zdellblue/shrcompr/ISO/devuan_beowulf_3.0.0_amd64-netinstall.iso"/>
|
||||
</DVDImages>
|
||||
</MediaRegistry>
|
||||
<ExtraData>
|
||||
<ExtraDataItem name="GUI/LastCloseAction" value="PowerOff"/>
|
||||
<ExtraDataItem name="GUI/LastGuestSizeHint" value="1440,979"/>
|
||||
<ExtraDataItem name="GUI/LastNormalWindowPosition" value="475,91,800,644"/>
|
||||
</ExtraData>
|
||||
<Hardware>
|
||||
<CPU count="2">
|
||||
<PAE enabled="false"/>
|
||||
<LongMode enabled="true"/>
|
||||
<X2APIC enabled="true"/>
|
||||
<HardwareVirtExLargePages enabled="false"/>
|
||||
</CPU>
|
||||
<Memory RAMSize="8192"/>
|
||||
<HID Pointing="USBTablet"/>
|
||||
<Chipset type="ICH9"/>
|
||||
<Boot>
|
||||
<Order position="1" device="DVD"/>
|
||||
<Order position="2" device="HardDisk"/>
|
||||
<Order position="3" device="None"/>
|
||||
<Order position="4" device="None"/>
|
||||
</Boot>
|
||||
<Display controller="VMSVGA" VRAMSize="64"/>
|
||||
<VideoCapture screens="1" file="." fps="25"/>
|
||||
<BIOS>
|
||||
<IOAPIC enabled="true"/>
|
||||
<SmbiosUuidLittleEndian enabled="true"/>
|
||||
</BIOS>
|
||||
<USB>
|
||||
<Controllers>
|
||||
<Controller name="OHCI" type="OHCI"/>
|
||||
<Controller name="EHCI" type="EHCI"/>
|
||||
</Controllers>
|
||||
</USB>
|
||||
<Network>
|
||||
<Adapter slot="0" enabled="true" MACAddress="080027A635A6" type="virtio">
|
||||
<DisabledModes>
|
||||
<InternalNetwork name="intnet"/>
|
||||
<NATNetwork name="NatNetwork"/>
|
||||
</DisabledModes>
|
||||
<HostOnlyInterface name="vboxnet0"/>
|
||||
</Adapter>
|
||||
<Adapter slot="1" enabled="true" MACAddress="0800279837F1" type="virtio">
|
||||
<DisabledModes>
|
||||
<InternalNetwork name="intnet"/>
|
||||
</DisabledModes>
|
||||
<NATNetwork name="NatNetwork"/>
|
||||
</Adapter>
|
||||
<Adapter slot="8" cable="false"/>
|
||||
<Adapter slot="9" cable="false"/>
|
||||
<Adapter slot="10" cable="false"/>
|
||||
<Adapter slot="11" cable="false"/>
|
||||
<Adapter slot="12" cable="false"/>
|
||||
<Adapter slot="13" cable="false"/>
|
||||
<Adapter slot="14" cable="false"/>
|
||||
<Adapter slot="15" cable="false"/>
|
||||
<Adapter slot="16" cable="false"/>
|
||||
<Adapter slot="17" cable="false"/>
|
||||
<Adapter slot="18" cable="false"/>
|
||||
<Adapter slot="19" cable="false"/>
|
||||
<Adapter slot="20" cable="false"/>
|
||||
<Adapter slot="21" cable="false"/>
|
||||
<Adapter slot="22" cable="false"/>
|
||||
<Adapter slot="23" cable="false"/>
|
||||
<Adapter slot="24" cable="false"/>
|
||||
<Adapter slot="25" cable="false"/>
|
||||
<Adapter slot="26" cable="false"/>
|
||||
<Adapter slot="27" cable="false"/>
|
||||
<Adapter slot="28" cable="false"/>
|
||||
<Adapter slot="29" cable="false"/>
|
||||
<Adapter slot="30" cable="false"/>
|
||||
<Adapter slot="31" cable="false"/>
|
||||
<Adapter slot="32" cable="false"/>
|
||||
<Adapter slot="33" cable="false"/>
|
||||
<Adapter slot="34" cable="false"/>
|
||||
<Adapter slot="35" cable="false"/>
|
||||
</Network>
|
||||
<AudioAdapter codec="AD1980" driver="Pulse" enabled="true" enabledIn="false"/>
|
||||
<RTC localOrUTC="UTC"/>
|
||||
<Clipboard mode="Bidirectional"/>
|
||||
<GuestProperties>
|
||||
<GuestProperty name="/VirtualBox/GuestAdd/HostVerLastChecked" value="6.1.22" timestamp="1625341569858893000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestAdd/Revision" value="144080" timestamp="1625349674091802000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestAdd/Version" value="6.1.22" timestamp="1625349674091594000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestAdd/VersionExt" value="6.1.22" timestamp="1625349674091706000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/Net/0/MAC" value="080027A635A6" timestamp="1625349684091550000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/Net/0/Name" value="eth0" timestamp="1625349684091618000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/Net/0/Status" value="Up" timestamp="1625349674094150000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/Net/0/V4/Broadcast" value="192.168.56.255" timestamp="1625349684091465000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/Net/0/V4/IP" value="192.168.56.103" timestamp="1625349684091249000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/Net/0/V4/Netmask" value="255.255.255.0" timestamp="1625349674093907000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/Net/1/MAC" value="0800279837F1" timestamp="1625349684091950000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/Net/1/Name" value="eth1" timestamp="1625349684092027000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/Net/1/Status" value="Up" timestamp="1625349684091980000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/Net/1/V4/Broadcast" value="10.0.2.255" timestamp="1625349684091813000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/Net/1/V4/IP" value="10.0.2.10" timestamp="1625349684091711000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/Net/1/V4/Netmask" value="255.255.255.0" timestamp="1625349684091878000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/Net/Count" value="2" timestamp="1625349684092121000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/OS/LoggedInUsers" value="1" timestamp="1625349674093318000" flags="TRANSIENT, TRANSRESET"/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/OS/LoggedInUsersList" value="user" timestamp="1625349674093140000" flags="TRANSIENT, TRANSRESET"/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/OS/NoLoggedInUsers" value="false" timestamp="1625349674093515000" flags="TRANSIENT, TRANSRESET"/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/OS/Product" value="Linux" timestamp="1625349674090731000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/OS/Release" value="4.19.0-17-amd64" timestamp="1625349674091018000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/GuestInfo/OS/Version" value="#1 SMP Debian 4.19.194-2 (2021-06-21)" timestamp="1625349674091382000" flags=""/>
|
||||
<GuestProperty name="/VirtualBox/HostInfo/GUI/LanguageID" value="en_US" timestamp="1625349686299177000" flags="RDONLYGUEST"/>
|
||||
<GuestProperty name="/VirtualBox/HostInfo/VBoxRev" value="144080" timestamp="1625341507693561002" flags="TRANSIENT, RDONLYGUEST"/>
|
||||
<GuestProperty name="/VirtualBox/HostInfo/VBoxVer" value="6.1.22" timestamp="1625341507693561000" flags="TRANSIENT, RDONLYGUEST"/>
|
||||
<GuestProperty name="/VirtualBox/HostInfo/VBoxVerExt" value="6.1.22" timestamp="1625341507693561001" flags="TRANSIENT, RDONLYGUEST"/>
|
||||
<GuestProperty name="/VirtualBox/VMInfo/ResetCounter" value="1" timestamp="1625349638733605000" flags="TRANSIENT, RDONLYGUEST"/>
|
||||
<GuestProperty name="/VirtualBox/VMInfo/ResumeCounter" value="1" timestamp="1625349632494561000" flags="TRANSIENT, RDONLYGUEST"/>
|
||||
</GuestProperties>
|
||||
</Hardware>
|
||||
<StorageControllers>
|
||||
<StorageController name="IDE" type="PIIX4" PortCount="2" useHostIOCache="true" Bootable="true">
|
||||
<AttachedDevice passthrough="false" type="DVD" hotpluggable="false" port="0" device="0"/>
|
||||
</StorageController>
|
||||
<StorageController name="SATA" type="AHCI" PortCount="26" useHostIOCache="true" Bootable="true" IDE0MasterEmulationPort="0" IDE0SlaveEmulationPort="1" IDE1MasterEmulationPort="2" IDE1SlaveEmulationPort="3">
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="0" device="0">
|
||||
<Image uuid="{7d75d4dd-9144-4a01-9ef0-1659dd47ea69}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="1" device="0">
|
||||
<Image uuid="{a5ac7f8f-f6dd-494b-8517-915992f3808a}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="2" device="0">
|
||||
<Image uuid="{3e6c2216-5b66-499d-ad0d-d23b9740a8e4}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="3" device="0">
|
||||
<Image uuid="{df46700d-57f8-4f66-88cc-d79634caff57}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="4" device="0">
|
||||
<Image uuid="{93e2157f-183c-49a2-a42e-5c70561dabdd}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="5" device="0">
|
||||
<Image uuid="{a832bbfb-317b-4f17-ba70-a37f24930372}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="6" device="0">
|
||||
<Image uuid="{faec32a1-d19c-4d5c-934a-c92336d6703e}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="7" device="0">
|
||||
<Image uuid="{f341e4d8-8c25-46e3-bb0c-25a7c1f13e2d}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="8" device="0">
|
||||
<Image uuid="{6e562a6b-f43f-4534-82e2-1c97a7e7700a}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="9" device="0">
|
||||
<Image uuid="{2b5edd5d-9103-4093-8e81-ab4c8ece724a}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="10" device="0">
|
||||
<Image uuid="{e94d901c-29ad-41cb-bcdb-02b5e64c6272}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="11" device="0">
|
||||
<Image uuid="{d7d3e82a-afed-4ae9-8acb-992ac194280f}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="12" device="0">
|
||||
<Image uuid="{7ba0f6fc-921c-456f-98af-58d982fc2e68}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="13" device="0">
|
||||
<Image uuid="{159ac173-68d5-46c7-9a87-902576870ea1}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="14" device="0">
|
||||
<Image uuid="{d640cdea-560e-4c43-8e9c-e63ca0bdf657}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="15" device="0">
|
||||
<Image uuid="{17592c2d-83b6-4249-919f-8a6454e1b9ad}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="16" device="0">
|
||||
<Image uuid="{6fd46fec-f381-497b-944d-15b0be164585}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="17" device="0">
|
||||
<Image uuid="{5eb81d86-dba4-4309-a55d-2550130fbd30}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="18" device="0">
|
||||
<Image uuid="{104bd37f-b95a-45ea-8aeb-362643fffb4c}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="19" device="0">
|
||||
<Image uuid="{0482fe5a-bfef-4a7d-9639-adfbecbc3395}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="20" device="0">
|
||||
<Image uuid="{f406b98e-97f7-484f-af0d-a75a505d0863}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="21" device="0">
|
||||
<Image uuid="{2bb3428a-fa97-4e48-84e9-ec4af1c7b478}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="22" device="0">
|
||||
<Image uuid="{3ccc0544-c7eb-4701-967c-66fb888e178e}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="23" device="0">
|
||||
<Image uuid="{b0b1ed03-393b-4a0e-936f-0e6f99fd8cfa}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="24" device="0">
|
||||
<Image uuid="{c6a9e0b6-3699-4491-8520-7ec831b1b048}"/>
|
||||
</AttachedDevice>
|
||||
</StorageController>
|
||||
<StorageController name="SAS" type="LsiLogicSas" PortCount="70" useHostIOCache="true" Bootable="true">
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="0" device="0">
|
||||
<Image uuid="{160852b4-d406-4d75-b265-db7a845dd525}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="1" device="0">
|
||||
<Image uuid="{e94d33b4-4ce7-49e3-8c76-07a17e4c2e9d}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="2" device="0">
|
||||
<Image uuid="{73f372a3-da55-45e7-b5d9-81f669e94ffe}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="3" device="0">
|
||||
<Image uuid="{d3da0d8e-ac68-48ef-8c47-ba4992ec6b7e}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="4" device="0">
|
||||
<Image uuid="{d55b16bf-8059-4741-8e03-1292192e84a2}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="5" device="0">
|
||||
<Image uuid="{33a6cb43-046d-42c9-8e85-55bcb74a5bab}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="6" device="0">
|
||||
<Image uuid="{5a7797d2-8dd6-4fa0-900f-b85f680384fe}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="7" device="0">
|
||||
<Image uuid="{49c61e69-7e79-4c85-8952-99c6fe68546b}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="8" device="0">
|
||||
<Image uuid="{00706932-d0fb-44ca-84ab-4c04aa99b444}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="9" device="0">
|
||||
<Image uuid="{1791c42c-f71d-48b7-9c7c-cda83b4deede}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="10" device="0">
|
||||
<Image uuid="{b1da894c-d442-4e37-8714-0661978879ad}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="11" device="0">
|
||||
<Image uuid="{ffc21422-a683-45b4-972e-681087f1bd19}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="12" device="0">
|
||||
<Image uuid="{10a46250-2f05-45b5-800a-e5008920a940}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="13" device="0">
|
||||
<Image uuid="{50d3e4de-8136-41d5-ad67-4a738d4acf14}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="14" device="0">
|
||||
<Image uuid="{22e286e0-de90-4453-ac1c-2587fb944b24}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="15" device="0">
|
||||
<Image uuid="{1dce6338-79a2-4654-860f-cab4bc26ee29}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="16" device="0">
|
||||
<Image uuid="{b34f057e-3440-469e-beb8-daabb2e2f6fb}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="17" device="0">
|
||||
<Image uuid="{9484c81f-134b-4d7e-bc66-7a0d86d81496}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="18" device="0">
|
||||
<Image uuid="{49bdedcc-7814-4456-b9a2-816da46a8016}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="19" device="0">
|
||||
<Image uuid="{3e96d1f7-1709-4766-98c0-abaf045574c2}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="20" device="0">
|
||||
<Image uuid="{ce618a3e-7f60-46cb-b0fb-6d0987ccff3e}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="21" device="0">
|
||||
<Image uuid="{70bb8082-ef5f-4909-93c1-1c1bdd3d4446}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="22" device="0">
|
||||
<Image uuid="{031ad331-43c1-48e6-83dc-a7638e1e4acf}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="23" device="0">
|
||||
<Image uuid="{752877d3-02fd-43b4-b607-92aeb9931810}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="24" device="0">
|
||||
<Image uuid="{be4c68c5-7505-4c95-9104-99c198b2f7bd}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="25" device="0">
|
||||
<Image uuid="{a65f55da-5b98-4802-b56d-13addd333e4a}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="26" device="0">
|
||||
<Image uuid="{ba88e128-f738-4f94-9fac-b19e3cb1cd54}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="27" device="0">
|
||||
<Image uuid="{7b1da897-0716-4c36-865e-1a31454d8870}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="28" device="0">
|
||||
<Image uuid="{e2172563-62ff-4552-9071-8d2797ecce73}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="29" device="0">
|
||||
<Image uuid="{89d15419-347c-413b-bceb-184bf40a0f28}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="30" device="0">
|
||||
<Image uuid="{b28e528d-0f4b-47bf-a64a-83bec1239322}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="31" device="0">
|
||||
<Image uuid="{1342f82b-3611-4f93-96f4-04b0a3d60da7}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="32" device="0">
|
||||
<Image uuid="{8d177c84-06f6-4188-a7f2-ee12dd1a9e10}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="33" device="0">
|
||||
<Image uuid="{dea8afef-b6e9-4592-ac57-e4bf9564b818}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="34" device="0">
|
||||
<Image uuid="{7ced439f-be5b-40a2-93c7-b93d862604b3}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="35" device="0">
|
||||
<Image uuid="{dd639d04-0ab1-4d4c-a68a-8d3439543701}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="36" device="0">
|
||||
<Image uuid="{843b9257-d61e-44b8-8245-60caf9848ff7}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="37" device="0">
|
||||
<Image uuid="{ceae4fc7-059a-4b0b-b045-49c3e25234ad}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="38" device="0">
|
||||
<Image uuid="{e334c60d-6019-4da0-a3a2-457f39775621}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="39" device="0">
|
||||
<Image uuid="{d01820a1-305f-4be1-a594-3bf13e631a1f}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="40" device="0">
|
||||
<Image uuid="{eabe9c14-7362-4ce2-b7c6-3d836aee8288}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="41" device="0">
|
||||
<Image uuid="{0000f777-37aa-4ea9-9a7b-20c1768b0106}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="42" device="0">
|
||||
<Image uuid="{27bf6d4c-fd6b-47c7-98d5-5cff89d1e8e6}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="43" device="0">
|
||||
<Image uuid="{f1eb3095-977f-456a-a503-36dade415047}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="44" device="0">
|
||||
<Image uuid="{b7b1a6a8-1b03-4b75-a727-1990dc2e7249}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="45" device="0">
|
||||
<Image uuid="{da42df32-4174-4d18-99ef-821bce22dc92}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="46" device="0">
|
||||
<Image uuid="{dda85502-b1b3-41ac-8f24-0786ce142f66}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="47" device="0">
|
||||
<Image uuid="{70df442b-ff19-4a9d-9b71-c1cec32c787f}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="48" device="0">
|
||||
<Image uuid="{1c4c45d2-7c3d-4ac9-bae3-18f6e8f700bc}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="49" device="0">
|
||||
<Image uuid="{ff887352-c502-41b1-978e-8c3b9e595be7}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="50" device="0">
|
||||
<Image uuid="{47e6737c-42fd-461c-a753-291a24d563f9}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="51" device="0">
|
||||
<Image uuid="{41b92a30-fefb-4197-b3b7-f6428c803059}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="52" device="0">
|
||||
<Image uuid="{52ed44c8-0c26-49fb-88d5-1cd2fb0c03ce}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="53" device="0">
|
||||
<Image uuid="{95e40197-6e80-4eee-833d-8a0b4c3cd6d8}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="54" device="0">
|
||||
<Image uuid="{06915273-753a-42aa-8d39-c5420ea96d0b}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="55" device="0">
|
||||
<Image uuid="{9dded7a6-9cbc-41b6-aab7-f9d59121a9d6}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="56" device="0">
|
||||
<Image uuid="{6f7abea2-9f93-48d3-9d06-4073a625171f}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="57" device="0">
|
||||
<Image uuid="{a4e719c6-ce75-4497-98e3-f2f94cbc9ef9}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="58" device="0">
|
||||
<Image uuid="{544f2361-3443-42f8-82dc-b1b9ff05f580}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="59" device="0">
|
||||
<Image uuid="{9f268ae0-08b9-426f-988a-c4be84cd110c}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="60" device="0">
|
||||
<Image uuid="{6c2c9ef6-1069-48c7-bdf5-af0984e1a0ec}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="61" device="0">
|
||||
<Image uuid="{efc50a32-3bac-43be-9815-2bac81138638}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="62" device="0">
|
||||
<Image uuid="{dde947d5-4c3b-4846-bd68-911b21306ab7}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="63" device="0">
|
||||
<Image uuid="{50b2f510-185a-416f-ae45-dc075f8165fa}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="64" device="0">
|
||||
<Image uuid="{19ec34e7-7bf5-435d-a116-c71360574ca3}"/>
|
||||
</AttachedDevice>
|
||||
<AttachedDevice type="HardDisk" hotpluggable="false" port="65" device="0">
|
||||
<Image uuid="{48f4b82f-fcf6-4886-b30d-5ae306592319}"/>
|
||||
</AttachedDevice>
|
||||
</StorageController>
|
||||
</StorageControllers>
|
||||
</Machine>
|
||||
</VirtualBox>
|
63
ZFS/vbox-create-n-attach-disks-SAS90.sh
Normal file
63
ZFS/vbox-create-n-attach-disks-SAS90.sh
Normal file
@ -0,0 +1,63 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 2021 Dave Bechtel - for testing ZFS DRAID 2.1.x
|
||||
# create ZFS data disks and attach them to existing VM
|
||||
# REF: http://www.allgoodbits.org/articles/view/54
|
||||
# REF: https://superuser.com/questions/741734/virtualbox-how-can-i-add-mount-a-iso-image-file-from-command-line
|
||||
|
||||
vmname=test-zfs-21-Draid
|
||||
|
||||
#VBoxManage createvm --name "$vmname" --ostype 'Linux_64' --basefolder "$HOME" --register
|
||||
#VBoxManage modifyvm "$vmname" --description "NOTE this is just a temp VM used to conveniently register ISOs with vbox media manager - it was created with $0"
|
||||
|
||||
#VBoxManage storagectl $vmname --name IDE --add ide --controller piix3 --portcount 2 --bootable on
|
||||
#VBoxManage storageattach "$vmname" --storagectl IDE --port 0 --device 0 --type dvddrive --medium emptydrive #"X:\Folder\containing\the.iso"
|
||||
|
||||
#4,000,797,696 - must be evenly div.by 512 = sector size
|
||||
nd=24
|
||||
# root is already on port 0
|
||||
port=1
|
||||
function mkdisks () {
|
||||
for this in $(seq -w 01 $nd); do
|
||||
echo $PWD/${this}
|
||||
|
||||
# "Actual" 2TB - REF: https://www.virtualbox.org/manual/ch08.html#vboxmanage-createmedium
|
||||
# time VBoxManage createmedium disk --filename $PWD/zfs$this.vdi --sizebyte 2000398934016
|
||||
|
||||
# "Actual" 4GB - REF: https://www.virtualbox.org/manual/ch08.html#vboxmanage-createmedium
|
||||
time VBoxManage createmedium disk --filename $PWD/zfs$this.vdi --sizebyte 4000797696 # 400079786802
|
||||
VBoxManage storageattach "$vmname" --storagectl SATA --port $port --device 0 --type hdd --medium $PWD/zfs$this.vdi
|
||||
|
||||
let port=$port+1
|
||||
done
|
||||
}
|
||||
|
||||
mkdisks
|
||||
|
||||
# SAS controller, goin up to 90
|
||||
pnd=$nd
|
||||
nd=90
|
||||
let startd=$pnd+1 # 90-24 = 66 more disks on SAS
|
||||
echo "startd=$startd - nd=$nd"
|
||||
port=0
|
||||
function mkdiskSAS () {
|
||||
for this in $(seq -w $startd $nd); do
|
||||
echo $PWD/${this}
|
||||
|
||||
# "Actual" 2TB - REF: https://www.virtualbox.org/manual/ch08.html#vboxmanage-createmedium
|
||||
# time VBoxManage createmedium disk --filename $PWD/zfs$this.vdi --sizebyte 2000398934016
|
||||
|
||||
# "Actual" 4GB - REF: https://www.virtualbox.org/manual/ch08.html#vboxmanage-createmedium
|
||||
time VBoxManage createmedium disk --filename $PWD/zfs-SAS$this.vdi --sizebyte 4000797696
|
||||
VBoxManage storageattach "$vmname" --storagectl SAS --port $port --device 0 --type hdd --medium $PWD/zfs-SAS$this.vdi
|
||||
|
||||
let port=$port+1
|
||||
done
|
||||
}
|
||||
|
||||
mkdiskSAS
|
||||
|
||||
VBoxManage showvminfo "$vmname"
|
||||
date
|
||||
|
||||
exit;
|
594
ZFS/zfs-mk-draid-90drives.sh
Normal file
594
ZFS/zfs-mk-draid-90drives.sh
Normal file
@ -0,0 +1,594 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "$0 - 2021 Dave Bechtel - make a ZFS DRAID pool"
|
||||
echo "- pass arg1='reset' to destroy test pool"
|
||||
echo "- pass arg1='fail' and arg2=dev2fail to simulate failure"
|
||||
|
||||
# Requires at least zfs 2.1.0
|
||||
DBI=/dev/disk/by-id
|
||||
|
||||
# total disks for pool / children
|
||||
td=84 # 90 - spares
|
||||
|
||||
# raidz level (usually 2)
|
||||
rzl=1
|
||||
|
||||
# spares
|
||||
spr=1
|
||||
|
||||
# TODO EDITME
|
||||
zp=zdraidtest
|
||||
|
||||
function zps () {
|
||||
zpool status -v |awk 'NF>0'
|
||||
}
|
||||
|
||||
#pooldisks=$(echo /dev/sd{b..y})
|
||||
|
||||
#pooldisks1=$(echo /dev/sd{b..m})
|
||||
#pooldisks2=$(echo /dev/sd{n..y})
|
||||
#pooldisks=$pooldisks1' '$pooldisks2 # need entire set for reset
|
||||
|
||||
pooldisks01=$(echo /dev/sd{b..g}) # a is rootdisk
|
||||
pooldisks02=$(echo /dev/sd{h..m})
|
||||
pooldisks03=$(echo /dev/sd{n..s})
|
||||
pooldisks04=$(echo /dev/sd{t..y}) # z is spare
|
||||
|
||||
pooldisks05=$(echo /dev/sda{a..f}) #abcdef
|
||||
pooldisks06=$(echo /dev/sda{g..l}) #ghijkl
|
||||
pooldisks07=$(echo /dev/sda{m..r}) #mnopqr
|
||||
pooldisks08=$(echo /dev/sda{s..x}) #stuvwx # yz are spares
|
||||
|
||||
pooldisks09=$(echo /dev/sdb{a..f}) #abcdef
|
||||
pooldisks10=$(echo /dev/sdb{g..l}) #ghijkl
|
||||
pooldisks11=$(echo /dev/sdb{m..r}) #mnopqr
|
||||
pooldisks12=$(echo /dev/sdb{s..x}) #stuvwx
|
||||
|
||||
pooldisks13=$(echo /dev/sdc{a..f}) #abcdef
|
||||
pooldisks14=$(echo /dev/sdc{g..l}) #ghijkl # m is spare
|
||||
#pooldisks=$pooldisks1' '$pooldisks2' '$pooldisks3' '$pooldisks4 # need entire set for reset
|
||||
pooldisks=$pooldisks01' '$pooldisks02' '$pooldisks03' '$pooldisks04' '$pooldisks05' '$pooldisks06
|
||||
pooldisks=$pooldisks' '$pooldisks07' '$pooldisks08' '$pooldisks09' '$pooldisks10' '$pooldisks11
|
||||
pooldisks=$pooldisks' '$pooldisks12' '$pooldisks13' '$pooldisks14
|
||||
|
||||
# need entire set for reset
|
||||
# sdb sdc sdd sde sdf sdg sdh sdi sdj sdk sdl sdm sdn sdo sdp sdq sdr sds sdt sdu sdv sdw sdx sdy
|
||||
# 1 2 3 4 5 6 1 2 3 4 5 6 1 2 3 4 5 6 1 2 3 4 5 6
|
||||
# D D D Z2 Z2 S
|
||||
|
||||
# extending to 32 disks
|
||||
#pooldisks2=$(echo /dev/sda{a..h})
|
||||
#sdaa sdab sdac sdad sdae sdaf sdag sdah
|
||||
|
||||
# failexit.mrg
|
||||
function failexit () {
|
||||
echo '! Something failed! Code: '"$1 $2" # code # (and optional description)
|
||||
exit $1
|
||||
}
|
||||
|
||||
if [ "$1" = "reset" ]; then
|
||||
zpool destroy $zp
|
||||
for d in $pooldisks; do
|
||||
echo -e -n "o Clearing label for disk $d \r"
|
||||
zpool labelclear -f "$d"1
|
||||
done
|
||||
echo ''
|
||||
zpool status -v
|
||||
|
||||
exit; # early
|
||||
fi
|
||||
|
||||
if [ "$1" = "fail" ]; then
|
||||
echo "$(date) - Simulating disk failure for $(ls -l $DBI |grep $2)"
|
||||
echo offline > /sys/block/$2/device/state
|
||||
cat /sys/block/$2/device/state
|
||||
|
||||
time dd if=/dev/urandom of=/$zp/^^tmpfileDELME bs=1M count=1; sync
|
||||
# force a write; if not work, try scrub
|
||||
|
||||
zps
|
||||
|
||||
exit; # early
|
||||
fi
|
||||
|
||||
# zpool create <pool> draid[<parity>][:<data>d][:<children>c][:<spares>s] <vdevs...>
|
||||
# ex: draid2:4d:1s:11c
|
||||
|
||||
# SLOW writing to zstd-3
|
||||
# draid$rzl:8d:12'c':$spr's' $pooldisks1 \
|
||||
# draid$rzl:8d:12'c':$spr's' $pooldisks2 \
|
||||
|
||||
iteration=1
|
||||
if [ "$iteration" = "1" ]; then
|
||||
# compression=zstd-3
|
||||
# -o ashift=12
|
||||
( set -x
|
||||
time zpool create -o autoexpand=on -O atime=off -O compression=lz4 \
|
||||
$zp \
|
||||
draid$rzl:4d:6'c':$spr's' $pooldisks01 \
|
||||
draid$rzl:4d:6'c':$spr's' $pooldisks02 \
|
||||
draid$rzl:4d:6'c':$spr's' $pooldisks03 \
|
||||
draid$rzl:4d:6'c':$spr's' $pooldisks04 \
|
||||
draid$rzl:4d:6'c':$spr's' $pooldisks05 \
|
||||
draid$rzl:4d:6'c':$spr's' $pooldisks06 \
|
||||
draid$rzl:4d:6'c':$spr's' $pooldisks07 \
|
||||
draid$rzl:4d:6'c':$spr's' $pooldisks08 \
|
||||
draid$rzl:4d:6'c':$spr's' $pooldisks09 \
|
||||
draid$rzl:4d:6'c':$spr's' $pooldisks10 \
|
||||
draid$rzl:4d:6'c':$spr's' $pooldisks11 \
|
||||
draid$rzl:4d:6'c':$spr's' $pooldisks12 \
|
||||
draid$rzl:4d:6'c':$spr's' $pooldisks13 \
|
||||
draid$rzl:4d:6'c':$spr's' $pooldisks14 \
|
||||
|| failexit 101 "Failed to create DRAID"
|
||||
)
|
||||
else
|
||||
# One Big Mother
|
||||
# -o ashift=12
|
||||
# raidz level (usually 2)
|
||||
rzl=2
|
||||
# spares
|
||||
spr=4
|
||||
( set -x
|
||||
time zpool create -o autoexpand=on -O atime=off -O compression=lz4 \
|
||||
$zp \
|
||||
draid$rzl:6d:$td'c':$spr's' $pooldisks \
|
||||
|| failexit 101 "Failed to create DRAID"
|
||||
)
|
||||
|
||||
# draid$rzl:6d:$td'c':$spr's' $pooldisks \
|
||||
# invalid number of dRAID children; 84 required but 30 provided
|
||||
fi
|
||||
|
||||
# requires external script in the same PATH
|
||||
# going with lz4 so not limited by CPU for compression
|
||||
zfs-newds.sh 11 $zp shrcompr
|
||||
zfs-newds.sh 10 $zp notshrcompr
|
||||
|
||||
zps
|
||||
zpool list
|
||||
zfs list
|
||||
|
||||
df -hT |egrep 'ilesystem|zfs'
|
||||
|
||||
echo "NOTE - best practice is to export the pool and # zpool import -a -d $DBI"
|
||||
|
||||
date
|
||||
exit;
|
||||
|
||||
|
||||
# REFS:
|
||||
https://openzfs.github.io/openzfs-docs/Basic%20Concepts/dRAID%20Howto.html
|
||||
|
||||
https://klarasystems.com/articles/openzfs-draid-finally/
|
||||
|
||||
https://www.reddit.com/r/zfs/comments/lnoh7v/im_trying_to_understand_how_draid_works_but_im/
|
||||
|
||||
https://insider-voice.com/a-deep-dive-into-the-new-openzfs-2-1-distributed-raid-topology/
|
||||
|
||||
https://docs.google.com/presentation/d/1uo0nBfY84HIhEqGWEx-Tbm8fPbJKtIP3ICo4toOPcJo/edit#slide=id.g9d6b9fd59f_0_27
|
||||
|
||||
Group size must divide evenly into draid size
|
||||
E.g., 30 drives can only support
|
||||
3 drive group
|
||||
5 drive group
|
||||
10 drive group
|
||||
15 drive group
|
||||
|
||||
Only need to specify group size at creation
|
||||
|
||||
Group Size - the number of pieces the data is partitioned into plus the amount of parity
|
||||
o The amount of parity determines the redundancy
|
||||
o The number of data pieces determines the overhead
|
||||
|
||||
dRAID Size - the number of drives used for data
|
||||
(Does not include spare drives)
|
||||
|
||||
-----
|
||||
|
||||
Iteration 1:
|
||||
# make a draid with raidz1, x14 VDEVs, 4 data disks, 6 children, 1 spare
|
||||
# since we are using fairly small (4GB) disks this should not be an issue
|
||||
|
||||
+ zpool create -o autoexpand=on -O atime=off -O compression=lz4 zdraidtest \
|
||||
draid1:4d:6c:1s /dev/sdb /dev/sdc /dev/sdd /dev/sde /dev/sdf /dev/sdg \
|
||||
draid1:4d:6c:1s /dev/sdh /dev/sdi /dev/sdj /dev/sdk /dev/sdl /dev/sdm \
|
||||
draid1:4d:6c:1s /dev/sdn /dev/sdo /dev/sdp /dev/sdq /dev/sdr /dev/sds \
|
||||
draid1:4d:6c:1s /dev/sdt /dev/sdu /dev/sdv /dev/sdw /dev/sdx /dev/sdy \
|
||||
draid1:4d:6c:1s /dev/sdaa /dev/sdab /dev/sdac /dev/sdad /dev/sdae /dev/sdaf \
|
||||
draid1:4d:6c:1s /dev/sdag /dev/sdah /dev/sdai /dev/sdaj /dev/sdak /dev/sdal \
|
||||
draid1:4d:6c:1s /dev/sdam /dev/sdan /dev/sdao /dev/sdap /dev/sdaq /dev/sdar \
|
||||
draid1:4d:6c:1s /dev/sdas /dev/sdat /dev/sdau /dev/sdav /dev/sdaw /dev/sdax \
|
||||
draid1:4d:6c:1s /dev/sdba /dev/sdbb /dev/sdbc /dev/sdbd /dev/sdbe /dev/sdbf \
|
||||
draid1:4d:6c:1s /dev/sdbg /dev/sdbh /dev/sdbi /dev/sdbj /dev/sdbk /dev/sdbl \
|
||||
draid1:4d:6c:1s /dev/sdbm /dev/sdbn /dev/sdbo /dev/sdbp /dev/sdbq /dev/sdbr \
|
||||
draid1:4d:6c:1s /dev/sdbs /dev/sdbt /dev/sdbu /dev/sdbv /dev/sdbw /dev/sdbx \
|
||||
draid1:4d:6c:1s /dev/sdca /dev/sdcb /dev/sdcc /dev/sdcd /dev/sdce /dev/sdcf \
|
||||
draid1:4d:6c:1s /dev/sdcg /dev/sdch /dev/sdci /dev/sdcj /dev/sdck /dev/sdcl
|
||||
real 0m15.619s
|
||||
|
||||
+ zfs create -o atime=off -o compression=lz4 -o sharesmb=on -o xattr=sa -o recordsize=1024k zdraidtest/shrcompr
|
||||
cannot share 'zdraidtest/shrcompr: system error': SMB share creation failed
|
||||
filesystem successfully created, but not shared
|
||||
changed ownership of '/zdraidtest/shrcompr' from root to user
|
||||
|
||||
Filesystem Type Size Used Avail Use% Mounted on
|
||||
zdraidtest/shrcompr zfs 196G 1.0M 196G 1% /zdraidtest/shrcompr
|
||||
|
||||
+ zfs create -o atime=off -o compression=lz4 -o sharesmb=off -o recordsize=1024k zdraidtest/notshrcompr
|
||||
changed ownership of '/zdraidtest/notshrcompr' from root to user
|
||||
|
||||
Filesystem Type Size Used Avail Use% Mounted on
|
||||
zdraidtest/notshrcompr zfs 196G 1.0M 196G 1% /zdraidtest/notshrcompr
|
||||
|
||||
pool: zdraidtest
|
||||
state: ONLINE
|
||||
config:
|
||||
NAME STATE READ WRITE CKSUM
|
||||
zdraidtest ONLINE 0 0 0
|
||||
draid1:4d:6c:1s-0 ONLINE 0 0 0
|
||||
sdb ONLINE 0 0 0
|
||||
sdc ONLINE 0 0 0
|
||||
sdd ONLINE 0 0 0
|
||||
sde ONLINE 0 0 0
|
||||
sdf ONLINE 0 0 0
|
||||
sdg ONLINE 0 0 0
|
||||
draid1:4d:6c:1s-1 ONLINE 0 0 0
|
||||
sdh ONLINE 0 0 0
|
||||
sdi ONLINE 0 0 0
|
||||
sdj ONLINE 0 0 0
|
||||
sdk ONLINE 0 0 0
|
||||
sdl ONLINE 0 0 0
|
||||
sdm ONLINE 0 0 0
|
||||
draid1:4d:6c:1s-2 ONLINE 0 0 0
|
||||
sdn ONLINE 0 0 0
|
||||
sdo ONLINE 0 0 0
|
||||
sdp ONLINE 0 0 0
|
||||
sdq ONLINE 0 0 0
|
||||
sdr ONLINE 0 0 0
|
||||
sds ONLINE 0 0 0
|
||||
draid1:4d:6c:1s-3 ONLINE 0 0 0
|
||||
sdt ONLINE 0 0 0
|
||||
sdu ONLINE 0 0 0
|
||||
sdv ONLINE 0 0 0
|
||||
sdw ONLINE 0 0 0
|
||||
sdx ONLINE 0 0 0
|
||||
sdy ONLINE 0 0 0
|
||||
draid1:4d:6c:1s-4 ONLINE 0 0 0
|
||||
sdaa ONLINE 0 0 0
|
||||
sdab ONLINE 0 0 0
|
||||
sdac ONLINE 0 0 0
|
||||
sdad ONLINE 0 0 0
|
||||
sdae ONLINE 0 0 0
|
||||
sdaf ONLINE 0 0 0
|
||||
draid1:4d:6c:1s-5 ONLINE 0 0 0
|
||||
sdag ONLINE 0 0 0
|
||||
sdah ONLINE 0 0 0
|
||||
sdai ONLINE 0 0 0
|
||||
sdaj ONLINE 0 0 0
|
||||
sdak ONLINE 0 0 0
|
||||
sdal ONLINE 0 0 0
|
||||
draid1:4d:6c:1s-6 ONLINE 0 0 0
|
||||
sdam ONLINE 0 0 0
|
||||
sdan ONLINE 0 0 0
|
||||
sdao ONLINE 0 0 0
|
||||
sdap ONLINE 0 0 0
|
||||
sdaq ONLINE 0 0 0
|
||||
sdar ONLINE 0 0 0
|
||||
draid1:4d:6c:1s-7 ONLINE 0 0 0
|
||||
sdas ONLINE 0 0 0
|
||||
sdat ONLINE 0 0 0
|
||||
sdau ONLINE 0 0 0
|
||||
sdav ONLINE 0 0 0
|
||||
sdaw ONLINE 0 0 0
|
||||
sdax ONLINE 0 0 0
|
||||
draid1:4d:6c:1s-8 ONLINE 0 0 0
|
||||
sdba ONLINE 0 0 0
|
||||
sdbb ONLINE 0 0 0
|
||||
sdbc ONLINE 0 0 0
|
||||
sdbd ONLINE 0 0 0
|
||||
sdbe ONLINE 0 0 0
|
||||
sdbf ONLINE 0 0 0
|
||||
draid1:4d:6c:1s-9 ONLINE 0 0 0
|
||||
sdbg ONLINE 0 0 0
|
||||
sdbh ONLINE 0 0 0
|
||||
sdbi ONLINE 0 0 0
|
||||
sdbj ONLINE 0 0 0
|
||||
sdbk ONLINE 0 0 0
|
||||
sdbl ONLINE 0 0 0
|
||||
draid1:4d:6c:1s-10 ONLINE 0 0 0
|
||||
sdbm ONLINE 0 0 0
|
||||
sdbn ONLINE 0 0 0
|
||||
sdbo ONLINE 0 0 0
|
||||
sdbp ONLINE 0 0 0
|
||||
sdbq ONLINE 0 0 0
|
||||
sdbr ONLINE 0 0 0
|
||||
draid1:4d:6c:1s-11 ONLINE 0 0 0
|
||||
sdbs ONLINE 0 0 0
|
||||
sdbt ONLINE 0 0 0
|
||||
sdbu ONLINE 0 0 0
|
||||
sdbv ONLINE 0 0 0
|
||||
sdbw ONLINE 0 0 0
|
||||
sdbx ONLINE 0 0 0
|
||||
draid1:4d:6c:1s-12 ONLINE 0 0 0
|
||||
sdca ONLINE 0 0 0
|
||||
sdcb ONLINE 0 0 0
|
||||
sdcc ONLINE 0 0 0
|
||||
sdcd ONLINE 0 0 0
|
||||
sdce ONLINE 0 0 0
|
||||
sdcf ONLINE 0 0 0
|
||||
draid1:4d:6c:1s-13 ONLINE 0 0 0
|
||||
sdcg ONLINE 0 0 0
|
||||
sdch ONLINE 0 0 0
|
||||
sdci ONLINE 0 0 0
|
||||
sdcj ONLINE 0 0 0
|
||||
sdck ONLINE 0 0 0
|
||||
sdcl ONLINE 0 0 0
|
||||
spares
|
||||
draid1-0-0 AVAIL
|
||||
draid1-1-0 AVAIL
|
||||
draid1-2-0 AVAIL
|
||||
draid1-3-0 AVAIL
|
||||
draid1-4-0 AVAIL
|
||||
draid1-5-0 AVAIL
|
||||
draid1-6-0 AVAIL
|
||||
draid1-7-0 AVAIL
|
||||
draid1-8-0 AVAIL
|
||||
draid1-9-0 AVAIL
|
||||
draid1-10-0 AVAIL
|
||||
draid1-11-0 AVAIL
|
||||
draid1-12-0 AVAIL
|
||||
draid1-13-0 AVAIL
|
||||
errors: No known data errors
|
||||
|
||||
NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
|
||||
zdraidtest 252G 895K 252G - - 0% 0% 1.00x ONLINE -
|
||||
|
||||
NAME USED AVAIL REFER MOUNTPOINT
|
||||
zdraidtest 497K 195G 51.9K /zdraidtest
|
||||
zdraidtest/notshrcompr 51.9K 195G 51.9K /zdraidtest/notshrcompr
|
||||
zdraidtest/shrcompr 51.9K 195G 51.9K /zdraidtest/shrcompr
|
||||
|
||||
Filesystem Type Size Used Avail Use% Mounted on
|
||||
zdraidtest zfs 196G 128K 196G 1% /zdraidtest
|
||||
zdraidtest/shrcompr zfs 196G 1.0M 196G 1% /zdraidtest/shrcompr
|
||||
zdraidtest/notshrcompr zfs 196G 1.0M 196G 1% /zdraidtest/notshrcompr
|
||||
|
||||
NOTE - best practice is to export the pool and # zpool import -a -d /dev/disk/by-id
|
||||
|
||||
|
||||
-----
|
||||
|
||||
draid$rzl:6d:$td'c':$spr's' $pooldisks \
|
||||
|
||||
Iteration 2 - make a DRAID raidz2 with 6 data disks, (84) children, 4 spares = more space available
|
||||
Note that we are allocating more virtual spares than the raidz2 level, as well as having
|
||||
idle hotspare disks available - we can sustain 2 failures with no data loss, replace them
|
||||
with virtual spares, and once the resilver finishes we should be able to sustain ANOTHER 2 fails
|
||||
HOWEVER - if we get 4x simultaneous fails, the pool I/O gets suspended and we HAVE TO reboot
|
||||
because the zfs commands will hang
|
||||
|
||||
+ zpool create -o autoexpand=on -O atime=off -O compression=lz4 zdraidtest \
|
||||
draid2:6d:84c:4s \
|
||||
/dev/sdb /dev/sdc /dev/sdd /dev/sde /dev/sdf /dev/sdg \
|
||||
/dev/sdh /dev/sdi /dev/sdj /dev/sdk /dev/sdl /dev/sdm /dev/sdn /dev/sdo \
|
||||
/dev/sdp /dev/sdq /dev/sdr /dev/sds /dev/sdt /dev/sdu /dev/sdv /dev/sdw \
|
||||
/dev/sdx /dev/sdy /dev/sdaa /dev/sdab /dev/sdac /dev/sdad /dev/sdae \
|
||||
/dev/sdaf /dev/sdag /dev/sdah /dev/sdai /dev/sdaj /dev/sdak /dev/sdal \
|
||||
/dev/sdam /dev/sdan /dev/sdao /dev/sdap /dev/sdaq /dev/sdar /dev/sdas \
|
||||
/dev/sdat /dev/sdau /dev/sdav /dev/sdaw /dev/sdax /dev/sdba /dev/sdbb \
|
||||
/dev/sdbc /dev/sdbd /dev/sdbe /dev/sdbf /dev/sdbg /dev/sdbh /dev/sdbi \
|
||||
/dev/sdbj /dev/sdbk /dev/sdbl /dev/sdbm /dev/sdbn /dev/sdbo /dev/sdbp \
|
||||
/dev/sdbq /dev/sdbr /dev/sdbs /dev/sdbt /dev/sdbu /dev/sdbv /dev/sdbw \
|
||||
/dev/sdbx /dev/sdca /dev/sdcb /dev/sdcc /dev/sdcd /dev/sdce /dev/sdcf \
|
||||
/dev/sdcg /dev/sdch /dev/sdci /dev/sdcj /dev/sdck /dev/sdcl
|
||||
real 0m11.846s
|
||||
|
||||
+ zfs create -o atime=off -o compression=lz4 -o sharesmb=on -o xattr=sa -o recordsize=1024k zdraidtest/shrcompr
|
||||
cannot share 'zdraidtest/shrcompr: system error': SMB share creation failed
|
||||
filesystem successfully created, but not shared
|
||||
changed ownership of '/zdraidtest/shrcompr' from root to user
|
||||
|
||||
Filesystem Type Size Used Avail Use% Mounted on
|
||||
zdraidtest/shrcompr zfs 211G 1.0M 211G 1% /zdraidtest/shrcompr
|
||||
|
||||
+ zfs create -o atime=off -o compression=lz4 -o sharesmb=off -o recordsize=1024k zdraidtest/notshrcompr
|
||||
changed ownership of '/zdraidtest/notshrcompr' from root to user
|
||||
|
||||
Filesystem Type Size Used Avail Use% Mounted on
|
||||
zdraidtest/notshrcompr zfs 211G 1.0M 211G 1% /zdraidtest/notshrcompr
|
||||
|
||||
pool: zdraidtest
|
||||
state: ONLINE
|
||||
config:
|
||||
NAME STATE READ WRITE CKSUM
|
||||
zdraidtest ONLINE 0 0 0
|
||||
draid2:6d:84c:4s-0 ONLINE 0 0 0
|
||||
sdb ONLINE 0 0 0
|
||||
sdc ONLINE 0 0 0
|
||||
sdd ONLINE 0 0 0
|
||||
sde ONLINE 0 0 0
|
||||
sdf ONLINE 0 0 0
|
||||
sdg ONLINE 0 0 0
|
||||
sdh ONLINE 0 0 0
|
||||
sdi ONLINE 0 0 0
|
||||
sdj ONLINE 0 0 0
|
||||
sdk ONLINE 0 0 0
|
||||
sdl ONLINE 0 0 0
|
||||
sdm ONLINE 0 0 0
|
||||
sdn ONLINE 0 0 0
|
||||
sdo ONLINE 0 0 0
|
||||
sdp ONLINE 0 0 0
|
||||
sdq ONLINE 0 0 0
|
||||
sdr ONLINE 0 0 0
|
||||
sds ONLINE 0 0 0
|
||||
sdt ONLINE 0 0 0
|
||||
sdu ONLINE 0 0 0
|
||||
sdv ONLINE 0 0 0
|
||||
sdw ONLINE 0 0 0
|
||||
sdx ONLINE 0 0 0
|
||||
sdy ONLINE 0 0 0
|
||||
sdaa ONLINE 0 0 0
|
||||
sdab ONLINE 0 0 0
|
||||
sdac ONLINE 0 0 0
|
||||
sdad ONLINE 0 0 0
|
||||
sdae ONLINE 0 0 0
|
||||
sdaf ONLINE 0 0 0
|
||||
sdag ONLINE 0 0 0
|
||||
sdah ONLINE 0 0 0
|
||||
sdai ONLINE 0 0 0
|
||||
sdaj ONLINE 0 0 0
|
||||
sdak ONLINE 0 0 0
|
||||
sdal ONLINE 0 0 0
|
||||
sdam ONLINE 0 0 0
|
||||
sdan ONLINE 0 0 0
|
||||
sdao ONLINE 0 0 0
|
||||
sdap ONLINE 0 0 0
|
||||
sdaq ONLINE 0 0 0
|
||||
sdar ONLINE 0 0 0
|
||||
sdas ONLINE 0 0 0
|
||||
sdat ONLINE 0 0 0
|
||||
sdau ONLINE 0 0 0
|
||||
sdav ONLINE 0 0 0
|
||||
sdaw ONLINE 0 0 0
|
||||
sdax ONLINE 0 0 0
|
||||
sdba ONLINE 0 0 0
|
||||
sdbb ONLINE 0 0 0
|
||||
sdbc ONLINE 0 0 0
|
||||
sdbd ONLINE 0 0 0
|
||||
sdbe ONLINE 0 0 0
|
||||
sdbf ONLINE 0 0 0
|
||||
sdbg ONLINE 0 0 0
|
||||
sdbh ONLINE 0 0 0
|
||||
sdbi ONLINE 0 0 0
|
||||
sdbj ONLINE 0 0 0
|
||||
sdbk ONLINE 0 0 0
|
||||
sdbl ONLINE 0 0 0
|
||||
sdbm ONLINE 0 0 0
|
||||
sdbn ONLINE 0 0 0
|
||||
sdbo ONLINE 0 0 0
|
||||
sdbp ONLINE 0 0 0
|
||||
sdbq ONLINE 0 0 0
|
||||
sdbr ONLINE 0 0 0
|
||||
sdbs ONLINE 0 0 0
|
||||
sdbt ONLINE 0 0 0
|
||||
sdbu ONLINE 0 0 0
|
||||
sdbv ONLINE 0 0 0
|
||||
sdbw ONLINE 0 0 0
|
||||
sdbx ONLINE 0 0 0
|
||||
sdca ONLINE 0 0 0
|
||||
sdcb ONLINE 0 0 0
|
||||
sdcc ONLINE 0 0 0
|
||||
sdcd ONLINE 0 0 0
|
||||
sdce ONLINE 0 0 0
|
||||
sdcf ONLINE 0 0 0
|
||||
sdcg ONLINE 0 0 0
|
||||
sdch ONLINE 0 0 0
|
||||
sdci ONLINE 0 0 0
|
||||
sdcj ONLINE 0 0 0
|
||||
sdck ONLINE 0 0 0
|
||||
sdcl ONLINE 0 0 0
|
||||
spares
|
||||
draid2-0-0 AVAIL
|
||||
draid2-0-1 AVAIL
|
||||
draid2-0-2 AVAIL
|
||||
draid2-0-3 AVAIL
|
||||
errors: No known data errors
|
||||
|
||||
NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
|
||||
zdraidtest 292G 1.21M 292G - - 0% 0% 1.00x ONLINE -
|
||||
|
||||
NAME USED AVAIL REFER MOUNTPOINT
|
||||
zdraidtest 634K 211G 77.4K /zdraidtest
|
||||
zdraidtest/notshrcompr 77.4K 211G 77.4K /zdraidtest/notshrcompr
|
||||
zdraidtest/shrcompr 77.4K 211G 77.4K /zdraidtest/shrcompr
|
||||
|
||||
Filesystem Type Size Used Avail Use% Mounted on
|
||||
zdraidtest zfs 211G 128K 211G 1% /zdraidtest
|
||||
zdraidtest/shrcompr zfs 211G 1.0M 211G 1% /zdraidtest/shrcompr
|
||||
zdraidtest/notshrcompr zfs 211G 1.0M 211G 1% /zdraidtest/notshrcompr
|
||||
|
||||
NOTE - best practice is to export the pool and # zpool import -a -d /dev/disk/by-id
|
||||
|
||||
|
||||
-----
|
||||
|
||||
Here is a simulated severely degraded draidZ2 pool with multiple drive failures and spares in use,
|
||||
|
||||
|
||||
NOTE that unless an extra disk is added to the system, the virtual spares for draid1:4d:6c:1s-3 are all burned up;
|
||||
if ANY of sdu-sdx also fails at this point, we will have a dead pool.
|
||||
Spares for draid1-0-0, 1-1-0 and 1-2-0 CANNOT be used for column 3.
|
||||
|
||||
-----
|
||||
|
||||
NOTE if you simulate/take a drive offline, you cant just "echo online" to it later, that wont bring it back up!
|
||||
try rescan-scsi-bus.sh or reboot
|
||||
|
||||
FIX: if a drive is offline, replace it temporarily with a builtin spare:
|
||||
# zpool replace zdraidtest sdd draid2-0-0
|
||||
|
||||
# zps
|
||||
pool: zdraidtest
|
||||
state: DEGRADED
|
||||
status: One or more devices could not be used because the label is missing or
|
||||
invalid. Sufficient replicas exist for the pool to continue
|
||||
functioning in a degraded state.
|
||||
action: Replace the device using 'zpool replace'.
|
||||
see: https://openzfs.github.io/openzfs-docs/msg/ZFS-8000-4J
|
||||
scan: resilvered 0B in 00:00:00 with 0 errors on Sat Jul 3 14:43:51 2021
|
||||
config:
|
||||
NAME STATE READ WRITE CKSUM
|
||||
zdraidtest DEGRADED 0 0 0
|
||||
draid2:5d:24c:2s-0 DEGRADED 0 0 0
|
||||
sdb ONLINE 0 0 0
|
||||
sdc ONLINE 0 0 0
|
||||
spare-2 DEGRADED 0 0 0
|
||||
sdd UNAVAIL 0 0 0
|
||||
draid2-0-0 ONLINE 0 0 0
|
||||
sde ONLINE 0 0 0
|
||||
sdf ONLINE 0 0 0
|
||||
sdg ONLINE 0 0 0
|
||||
sdh ONLINE 0 0 0
|
||||
sdi ONLINE 0 0 0
|
||||
sdj ONLINE 0 0 0
|
||||
sdk ONLINE 0 0 0
|
||||
sdl ONLINE 0 0 0
|
||||
sdm ONLINE 0 0 0
|
||||
sdn ONLINE 0 0 0
|
||||
sdo ONLINE 0 0 0
|
||||
sdp ONLINE 0 0 0
|
||||
sdq ONLINE 0 0 0
|
||||
sdr ONLINE 0 0 0
|
||||
sds ONLINE 0 0 0
|
||||
sdt ONLINE 0 0 0
|
||||
sdu ONLINE 0 0 0
|
||||
sdv ONLINE 0 0 0
|
||||
sdw ONLINE 0 0 0
|
||||
sdx ONLINE 0 0 0
|
||||
sdy ONLINE 0 0 0
|
||||
spares
|
||||
draid2-0-0 INUSE currently in use
|
||||
draid2-0-1 AVAIL
|
||||
errors: No known data errors
|
||||
|
||||
HOWTO fix the above situation with the same disk (you rebooted / it came back online) and decouple the in-use spare:
|
||||
|
||||
zpool export -a
|
||||
|
||||
fdisk -l /dev/sdd # scsi-SATA_VBOX_HARDDISK_VBbcc6c97e-f68b8368
|
||||
zpool labelclear /dev/sdd
|
||||
zpool labelclear -f /dev/sdd1
|
||||
|
||||
zpool import -a
|
||||
zpool status -v # This will show a degraded pool with a missing disk
|
||||
|
||||
# This wont work but gives useful info:
|
||||
zpool replace zdraidtest spare-2 scsi-SATA_VBOX_HARDDISK_VBbcc6c97e-f68b8368 # got error, use detach
|
||||
|
||||
zpool detach zdraidtest 2582498653363374334 # this was listed as UNAVAIL with the spare in-use underneath it
|
||||
zpool status -v # should now show only the spare where sdd was
|
||||
|
||||
# we labelcleared it so it should be ready for re-use;
|
||||
# if you want to be really thorough you can DD zeros to the entire drive but not really necessary
|
||||
zpool replace zdraidtest draid2-0-0 scsi-SATA_VBOX_HARDDISK_VBbcc6c97e-f68b8368 # same disk (sdd) but labelcleared
|
||||
zpool status -v
|
Loading…
x
Reference in New Issue
Block a user