svn commit: r329029 - projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd

Alan Somers asomers at FreeBSD.org
Thu Feb 8 20:28:31 UTC 2018


Author: asomers
Date: Thu Feb  8 20:28:30 2018
New Revision: 329029
URL: https://svnweb.freebsd.org/changeset/base/329029

Log:
  WIP removing SAS phy control from the ZFS test suite
  
  Convert zfsd_replace_002_pos to use libgnop instead of libsas
  
  tests/sys/cddl/zfs/tests/zfsd/zfsd_replace_002_pos.ksh
  tests/sys/cddl/zfs/tests/zfsd/zfsd_test.sh
  	* Simulate disk failures with gnop, not sas phy control
  	* Reduce disk count to 3
  	* Reduce timeouts, so the test fails faster
  
  	The test is still disabled, because we never implemented this
  	feature in zfsd.
  
  Sponsored by:	Spectra Logic Corp

Modified:
  projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_replace_002_pos.ksh
  projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_test.sh

Modified: projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_replace_002_pos.ksh
==============================================================================
--- projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_replace_002_pos.ksh	Thu Feb  8 20:25:52 2018	(r329028)
+++ projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_replace_002_pos.ksh	Thu Feb  8 20:28:30 2018	(r329029)
@@ -24,7 +24,7 @@
 # Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
-# Copyright 2012,2013 Spectra Logic Corporation.  All rights reserved.
+# Copyright 2012-2018 Spectra Logic Corporation.  All rights reserved.
 # Use is subject to license terms.
 # 
 # Portions taken from:
@@ -33,144 +33,41 @@
 # $FreeBSD$
 
 . $STF_SUITE/include/libtest.kshlib
-. $STF_SUITE/include/libsas.kshlib
+. $STF_SUITE/include/libgnop.kshlib
 
-verify_runnable "global"
-
-function cleanup
+function is_pool_unavail # pool
 {
-	reap_children
-	destroy_pool $TESTPOOL
-
-	# See if the phy has been disabled, and try to re-enable it if possible.
-	for CURDISK in $TMPDISKS[*]; do
-		if [ ! -z ${EXPANDER_LIST[$CURDISK]} -a ! -z ${PHY_LIST[$CURDISK]} ]; then
-			find_disk_by_phy ${EXPANDER_LIST[$CURDISK]} ${PHY_LIST[$CURDISK]}
-			[ -n "$FOUNDDISK" ] && continue
-		fi
-		enable_sas_disk ${EXPANDER_LIST[$CURDISK]} ${PHY_LIST[$CURDISK]}
-	done
-	rescan_disks
-
-	[[ -e $TESTDIR ]] && log_must $RM -rf $TESTDIR/*
+	is_pool_state "$1" "UNAVAIL"
 }
 
 log_assert "A pool can come back online after all disks are failed and reactivated"
 
-log_unsupported "This test is currently unsupported, ZFS hangs when all drives fail and come back"
+log_unsupported "This feature has not yet been implemented in zfsd"
 
-log_onexit cleanup
-
-child_pids=""
-
 ensure_zfsd_running
-set -A TMPDISKS $DISKS
-NUMDISKS=${#TMPDISKS[*]}
+set_disks
+typeset ALLDISKS="${DISK0} ${DISK1} ${DISK2}"
+typeset ALLNOPS=${ALLDISKS//~(E)([[:space:]]+|$)/.nop\1}
 
-# Trim out any /dev prefix on the disk.
-((i=0))   
-while [ $i -lt $NUMDISKS ]; do   
-	TMPDISKS[$i]=${TMPDISKS[$i]##*/}
-	((i++));
-done
-
+log_must create_gnops $ALLDISKS
 for type in "raidz" "mirror"; do
 	# Create a pool on the supplied disks
-	create_pool $TESTPOOL $type $DISKS
+	create_pool $TESTPOOL $type $ALLNOPS
 	log_must $ZFS create $TESTPOOL/$TESTFS
 	log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
 
-	unset EXPANDER_LIST
-	typeset -A EXPANDER_LIST
-	unset PHY_LIST
-	typeset -A PHY_LIST
+	# Disable all vdevs.  The pool should become UNAVAIL
+	log_must destroy_gnop $DISK0
+	log_must destroy_gnop $DISK1
+	log_must destroy_gnop $DISK2
+	wait_for 5 1 is_pool_unavail $TESTPOOL
 
-	# First, disable the PHYs for all of the disks.
-	for CURDISK in ${TMPDISKS[*]}; do
-		# Find the first disk, get the expander and phy
-		log_note "Looking for expander and phy information for $CURDISK"
-		find_verify_sas_disk $CURDISK
+	# Renable all vdevs.  The pool should become healthy again
+	log_must create_gnop $DISK0
+	log_must create_gnop $DISK1
+	log_must create_gnop $DISK2
 
-		# Record the expander and PHY for this particular disk, so
-		# that we can re-enable the disk later, even if it comes
-		# back as a different da(4) instance.
-		EXPANDER_LIST[$CURDISK]=$EXPANDER
-		PHY_LIST[$CURDISK]=$PHY
-
-		log_note "Disabling \"$CURDISK\" on expander $EXPANDER phy $PHY"
-		# Disable the first disk.  We have to do this first, because if
-		# there is I/O active to the
-		disable_sas_disk $EXPANDER $PHY
-	done
-
-	# Make sure that the pool status is "UNAVAIL".  We have taken all
-	# of the drives offline, so it should be.
-	log_must is_pool_state $TESTPOOL UNAVAIL
-
-	# Now we re-enable all of the PHYs.  Note that we turned off the
-	# sleep inside enable_sas_disk, so this should quickly.
-	for CURDISK in ${TMPDISKS[*]}; do
-		# Re-enable the disk, we don't want to leave it turned off
-		log_note "Re-enabling phy ${PHY_LIST[$CURDISK]} on expander ${EXPANDER_LIST[$CURDISK]}"
-		enable_sas_disk ${EXPANDER_LIST[$CURDISK]} ${PHY_LIST[$CURDISK]}
-	done
-
-	unset DISK_FOUND
-	typeset -A DISK_FOUND
-
-	log_note "Checking to see whether disks have reappeared"
-	((retries=0))
-	while [ ${#DISK_FOUND[*]} -lt $NUMDISKS ] && [ $retries -lt 3 ]; do
-		# If this isn't the first time through, give the disk a
-		# little more time to show up.
-		[ $retries -ne 0 ] && $SLEEP 5
-
-		for CURDISK in ${TMPDISKS[*]}; do
-			# If we already found this disk, we don't need to
-			# check again.  Note that the new name may not be
-			# the same as the name referenced in CURDISK.  That
-			# is why we look for the disk by expander and PHY.
-			[ ! -z ${DISK_FOUND[$CURDISK]} ] && continue
-
-			# Make sure the disk is back in the topology
-			find_disk_by_phy ${EXPANDER_LIST[$CURDISK]} ${PHY_LIST[$CURDISK]}
-			if [ ! -z "$FOUNDDISK" ]; then
-				# This does serve as a mapping from the old
-				# disk name to the new disk name.
-				DISK_FOUND[$CURDISK]=$FOUNDDISK
-			fi
-		done
-		((retries++))
-	done
-
-	if [ ${#DISK_FOUND[*]} -lt $NUMDISKS ]; then
-		for CURDISK in ${TMPDISKS[*]}; do
-			[ ! -z ${DISK_FOUND[$CURDISK]} ] && continue
-			log_note "Disk $CURDISK has not appeared at phy $PHY_LIST[$CURDISK] on expander $EXPANDER_LIST[$CURDISK] after 20 seconds"
-		done
-		((num_missing=${NUM_DISKS} - ${#DISK_FOUND[*]}))
-		log_fail "Missing $num_missing Disks out of $NUM_DISKS Disks"
-	else
-		for CURDISK in ${TMPDISKS[*]}; do
-			log_note "Disk $CURDISK is back as ${DISK_FOUND[$CURDISK]}"
-		done
-		# Reset our array of disks, because we may have disks that
-		# have come back at a different ID.  i.e. da0 may now be da7,
-		# and da0 may no longer be a disk that we are authorized to use.
-		# This is a more generic problem that we may need to tackle
-		# with this test.  We may need to reset the DISKS list itself.
-		set -A TMPDISKS ${DISK_FOUND[*]}
-	fi
-
-	log_note "Raid type is $type"
-
-	# In theory the pool should be back online.
-	$ZPOOL status $TESTPOOL |grep ONLINE > /dev/null
-	if [ $? != 0 ]; then
-		log_fail "Pool $TESTPOOL is disk $TMPDISK did not automatically join the $TESTPOOL"
-	else 
-		log_note "After reinsertion, disk is back in pool and online"
-	fi
+	wait_for 5 1 is_pool_healthy $TESTPOOL
 
 	destroy_pool $TESTPOOL
 	log_must $RM -rf /$TESTPOOL

Modified: projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_test.sh
==============================================================================
--- projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_test.sh	Thu Feb  8 20:25:52 2018	(r329028)
+++ projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_test.sh	Thu Feb  8 20:28:30 2018	(r329029)
@@ -474,11 +474,11 @@ zfsd_replace_002_pos_head()
 }
 zfsd_replace_002_pos_body()
 {
-	atf_expect_fail "ZFS hangs when an array becomes critical"
+	atf_expect_fail "Not yet implemented in zfsd"
 	. $(atf_get_srcdir)/../../include/default.cfg
 	. $(atf_get_srcdir)/zfsd.cfg
 
-	verify_disk_count "$DISKS" 2
+	verify_disk_count "$DISKS" 3
 	ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
 	ksh93 $(atf_get_srcdir)/zfsd_replace_002_pos.ksh
 	if [[ $? != 0 ]]; then


More information about the svn-src-projects mailing list