The hugepage-mmap, hugepage-shm, map_hugetlb, compaction, and userfaultfd tests cannot be run due to unmet dependencies. Bug: 67017050 --- a/tools/testing/selftests/vm/run_vmtests +++ b/tools/testing/selftests/vm/run_vmtests @@ -8,161 +8,161 @@ mnt=./huge exitcode=0 -#get huge pagesize and freepages from /proc/meminfo -while read name size unit; do - if [ "$name" = "HugePages_Free:" ]; then - freepgs=$size - fi - if [ "$name" = "Hugepagesize:" ]; then - hpgsize_KB=$size - fi -done < /proc/meminfo - -# Simple hugetlbfs tests have a hardcoded minimum requirement of -# huge pages totaling 256MB (262144KB) in size. The userfaultfd -# hugetlb test requires a minimum of 2 * nr_cpus huge pages. Take -# both of these requirements into account and attempt to increase -# number of huge pages available. -nr_cpus=$(nproc) -hpgsize_MB=$((hpgsize_KB / 1024)) -half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128)) -needmem_KB=$((half_ufd_size_MB * 2 * 1024)) - -#set proper nr_hugepages -if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then - nr_hugepgs=`cat /proc/sys/vm/nr_hugepages` - needpgs=$((needmem_KB / hpgsize_KB)) - tries=2 - while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do - lackpgs=$(( $needpgs - $freepgs )) - echo 3 > /proc/sys/vm/drop_caches - echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages - if [ $? -ne 0 ]; then - echo "Please run this test as root" - exit $ksft_skip - fi - while read name size unit; do - if [ "$name" = "HugePages_Free:" ]; then - freepgs=$size - fi - done < /proc/meminfo - tries=$((tries - 1)) - done - if [ $freepgs -lt $needpgs ]; then - printf "Not enough huge pages available (%d < %d)\n" \ - $freepgs $needpgs - exit 1 - fi -else - echo "no hugetlbfs support in kernel?" - exit 1 -fi - -#filter 64bit architectures -ARCH64STR="arm64 ia64 mips64 parisc64 ppc64 riscv64 s390x sh64 sparc64 x86_64" -if [ -z $ARCH ]; then - ARCH=`uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/'` -fi -VADDR64=0 -echo "$ARCH64STR" | grep $ARCH && VADDR64=1 - -mkdir $mnt -mount -t hugetlbfs none $mnt - -echo "---------------------" -echo "running hugepage-mmap" -echo "---------------------" -./hugepage-mmap -if [ $? -ne 0 ]; then - echo "[FAIL]" - exitcode=1 -else - echo "[PASS]" -fi - -shmmax=`cat /proc/sys/kernel/shmmax` -shmall=`cat /proc/sys/kernel/shmall` -echo 268435456 > /proc/sys/kernel/shmmax -echo 4194304 > /proc/sys/kernel/shmall -echo "--------------------" -echo "running hugepage-shm" -echo "--------------------" -./hugepage-shm -if [ $? -ne 0 ]; then - echo "[FAIL]" - exitcode=1 -else - echo "[PASS]" -fi -echo $shmmax > /proc/sys/kernel/shmmax -echo $shmall > /proc/sys/kernel/shmall - -echo "-------------------" -echo "running map_hugetlb" -echo "-------------------" -./map_hugetlb -if [ $? -ne 0 ]; then - echo "[FAIL]" - exitcode=1 -else - echo "[PASS]" -fi - -echo "NOTE: The above hugetlb tests provide minimal coverage. Use" -echo " https://github.com/libhugetlbfs/libhugetlbfs.git for" -echo " hugetlb regression testing." - -echo "-------------------" -echo "running userfaultfd" -echo "-------------------" -./userfaultfd anon 128 32 -if [ $? -ne 0 ]; then - echo "[FAIL]" - exitcode=1 -else - echo "[PASS]" -fi - -echo "---------------------------" -echo "running userfaultfd_hugetlb" -echo "---------------------------" -# Test requires source and destination huge pages. Size of source -# (half_ufd_size_MB) is passed as argument to test. -./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file -if [ $? -ne 0 ]; then - echo "[FAIL]" - exitcode=1 -else - echo "[PASS]" -fi -rm -f $mnt/ufd_test_file - -echo "-------------------------" -echo "running userfaultfd_shmem" -echo "-------------------------" -./userfaultfd shmem 128 32 -if [ $? -ne 0 ]; then - echo "[FAIL]" - exitcode=1 -else - echo "[PASS]" -fi - -#cleanup -umount $mnt -rm -rf $mnt -echo $nr_hugepgs > /proc/sys/vm/nr_hugepages - -echo "-----------------------" -echo "running compaction_test" -echo "-----------------------" -./compaction_test -if [ $? -ne 0 ]; then - echo "[FAIL]" - exitcode=1 -else - echo "[PASS]" -fi +##get huge pagesize and freepages from /proc/meminfo +#while read name size unit; do +# if [ "$name" = "HugePages_Free:" ]; then +# freepgs=$size +# fi +# if [ "$name" = "Hugepagesize:" ]; then +# hpgsize_KB=$size +# fi +#done < /proc/meminfo +# +## Simple hugetlbfs tests have a hardcoded minimum requirement of +## huge pages totaling 256MB (262144KB) in size. The userfaultfd +## hugetlb test requires a minimum of 2 * nr_cpus huge pages. Take +## both of these requirements into account and attempt to increase +## number of huge pages available. +#nr_cpus=$(nproc) +#hpgsize_MB=$((hpgsize_KB / 1024)) +#half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128)) +#needmem_KB=$((half_ufd_size_MB * 2 * 1024)) +# +##set proper nr_hugepages +#if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then +# nr_hugepgs=`cat /proc/sys/vm/nr_hugepages` +# needpgs=$((needmem_KB / hpgsize_KB)) +# tries=2 +# while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do +# lackpgs=$(( $needpgs - $freepgs )) +# echo 3 > /proc/sys/vm/drop_caches +# echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages +# if [ $? -ne 0 ]; then +# echo "Please run this test as root" +# exit $ksft_skip +# fi +# while read name size unit; do +# if [ "$name" = "HugePages_Free:" ]; then +# freepgs=$size +# fi +# done < /proc/meminfo +# tries=$((tries - 1)) +# done +# if [ $freepgs -lt $needpgs ]; then +# printf "Not enough huge pages available (%d < %d)\n" \ +# $freepgs $needpgs +# exit 1 +# fi +#else +# echo "no hugetlbfs support in kernel?" +# exit 1 +#fi +# +##filter 64bit architectures +#ARCH64STR="arm64 ia64 mips64 parisc64 ppc64 riscv64 s390x sh64 sparc64 x86_64" +#if [ -z $ARCH ]; then +# ARCH=`uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/'` +#fi +#VADDR64=0 +#echo "$ARCH64STR" | grep $ARCH && VADDR64=1 +# +#mkdir $mnt +#mount -t hugetlbfs none $mnt +# +#echo "---------------------" +#echo "running hugepage-mmap" +#echo "---------------------" +#./hugepage-mmap +#if [ $? -ne 0 ]; then +# echo "[FAIL]" +# exitcode=1 +#else +# echo "[PASS]" +#fi +# +#shmmax=`cat /proc/sys/kernel/shmmax` +#shmall=`cat /proc/sys/kernel/shmall` +#echo 268435456 > /proc/sys/kernel/shmmax +#echo 4194304 > /proc/sys/kernel/shmall +#echo "--------------------" +#echo "running hugepage-shm" +#echo "--------------------" +#./hugepage-shm +#if [ $? -ne 0 ]; then +# echo "[FAIL]" +# exitcode=1 +#else +# echo "[PASS]" +#fi +#echo $shmmax > /proc/sys/kernel/shmmax +#echo $shmall > /proc/sys/kernel/shmall +# +#echo "-------------------" +#echo "running map_hugetlb" +#echo "-------------------" +#./map_hugetlb +#if [ $? -ne 0 ]; then +# echo "[FAIL]" +# exitcode=1 +#else +# echo "[PASS]" +#fi +# +#echo "NOTE: The above hugetlb tests provide minimal coverage. Use" +#echo " https://github.com/libhugetlbfs/libhugetlbfs.git for" +#echo " hugetlb regression testing." +# +#echo "-------------------" +#echo "running userfaultfd" +#echo "-------------------" +#./userfaultfd anon 128 32 +#if [ $? -ne 0 ]; then +# echo "[FAIL]" +# exitcode=1 +#else +# echo "[PASS]" +#fi +# +#echo "---------------------------" +#echo "running userfaultfd_hugetlb" +#echo "---------------------------" +## Test requires source and destination huge pages. Size of source +## (half_ufd_size_MB) is passed as argument to test. +#./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file +#if [ $? -ne 0 ]; then +# echo "[FAIL]" +# exitcode=1 +#else +# echo "[PASS]" +#fi +#rm -f $mnt/ufd_test_file +# +#echo "-------------------------" +#echo "running userfaultfd_shmem" +#echo "-------------------------" +#./userfaultfd shmem 128 32 +#if [ $? -ne 0 ]; then +# echo "[FAIL]" +# exitcode=1 +#else +# echo "[PASS]" +#fi +# +##cleanup +#umount $mnt +#rm -rf $mnt +#echo $nr_hugepgs > /proc/sys/vm/nr_hugepages +# +#echo "-----------------------" +#echo "running compaction_test" +#echo "-----------------------" +#./compaction_test +#if [ $? -ne 0 ]; then +# echo "[FAIL]" +# exitcode=1 +#else +# echo "[PASS]" +#fi echo "----------------------" echo "running on-fault-limit"