/*
* Problems have been easier to trigger when spreading the
* workload over the available CPUs.
+ *
+ * If CPU hotplug is active, this can randomly fail so dump the error
+ * to stderror so it can be filtered out easily by the caller.
*/
CPU_ZERO(&cpuset);
CPU_SET(mycpu, &cpuset);
if (sched_setaffinity(mytid, sizeof(cpuset), &cpuset)) {
- printf("FAILED to set thread %d to run on cpu %ld\n",
+ fprintf(stderr, "FAILED to set thread %d to run on cpu %ld\n",
mytid, mycpu);
}
testfile=$TEST_DIR/aio-testfile
$XFS_IO_PROG -ftc "pwrite 0 10m" $testfile | _filter_xfs_io
-$AIO_TEST 0 100 $testfile
+# This can emit cpu affinity setting failures that aren't considered test
+# failures but cause golden image failures. Redirect the test output to
+# $seqres.full so that it is captured but doesn't directly cause test failures.
+$AIO_TEST 0 100 $testfile 2>> $seqres.full
if [ $? -ne 0 ]; then
exit $status
fi
-$AIO_TEST 1 100 $testfile
+$AIO_TEST 1 100 $testfile 2>> $seqres.full
if [ $? -ne 0 ]; then
exit $status
fi
_require_metadata_journaling
_require_test_program "t_open_tmpfiles"
-_scratch_mkfs >> $seqres.full 2>&1
+_scratch_mkfs "-l size=256m" >> $seqres.full 2>&1
_scratch_mount
# Set ULIMIT_NOFILE to min(file-max / 2, 50000 files per LOAD_FACTOR)
_require_xfs_io_command "-T"
_require_test_program "t_open_tmpfiles"
-_scratch_mkfs >> $seqres.full 2>&1
+# On high CPU count machines, this runs a -lot- of create and unlink
+# concurrency. Set the filesytsem up to handle this.
+if [ $FSTYP = "xfs" ]; then
+ _scratch_mkfs "-d agcount=32" >> $seqres.full 2>&1
+else
+ _scratch_mkfs >> $seqres.full 2>&1
+fi
_scratch_mount
# Try to load up all the CPUs, two threads per CPU.
mkdir -p $dir
for i in $(seq 0 $count)
do
- touch $dir/$i 2>&1 | filter_enospc
+ echo -n > $dir/$i 2>&1 | filter_enospc
done
}
do
file=$((RANDOM % count))
rm -f $dir/$file
- touch $dir/$file 2>&1 | filter_enospc
+ echo -n > $dir/$file 2>&1 | filter_enospc
done
}
dir=$1
i=0
- while [ true ]; do
- touch $dir/$i 2>> $seqres.full || break
+ ( while [ true ]; do
+ echo -n > $dir/$i || break
i=$((i + 1))
- done
+ done ) >> $seqres.full 2>&1
}
dir=$1
i=0
- while [ true ]; do
- echo -n > $dir/$i >> $seqres.full 2>&1 || break
+ ( while [ true ]; do
+ echo -n > $dir/$i || break
i=$((i + 1))
- done
+ done ) >> $seqres.full 2>&1
}
# Find a sparse inode cluster after logend_agno/logend_agino.
STRESS_DIR="$SCRATCH_MNT/testdir"
mkdir -p $STRESS_DIR
-_run_fsstress_bg -d $STRESS_DIR -n 1000 -p 1000 $FSSTRESS_AVOID
+# turn off sync as this can lead to near deadlock conditions due to every
+# fsstress process lockstepping against freeze on large CPU count machines
+_run_fsstress_bg -d $STRESS_DIR -f sync=0 -n 1000 -p 1000 $FSSTRESS_AVOID
# Freeze/unfreeze file system randomly
echo "Start freeze/unfreeze randomly" | tee -a $seqres.full
_require_scratch
_require_test_program "t_open_tmpfiles"
-_scratch_mkfs >> $seqres.full 2>&1
+_scratch_mkfs "-l size=256m" >> $seqres.full 2>&1
_scratch_mount
# Set ULIMIT_NOFILE to min(file-max / 2, 30000 files per LOAD_FACTOR)
_require_scratch
_require_test_program "t_open_tmpfiles"
-_scratch_mkfs | _filter_mkfs 2> $tmp.mkfs > /dev/null
+_scratch_mkfs "-l size=256m" | _filter_mkfs 2> $tmp.mkfs > /dev/null
cat $tmp.mkfs >> $seqres.full
. $tmp.mkfs