1import os, time, re, subprocess, shutil, logging 2from autotest_lib.client.bin import utils, test 3from autotest_lib.client.common_lib import error 4 5 6class dma_memtest(test.test): 7 """ 8 A test for the memory subsystem against heavy IO and DMA operations, 9 implemented based on the work of Doug Leford 10 (http://people.redhat.com/dledford/memtest.shtml) 11 12 @author Lucas Meneghel Rodrigues (lucasmr@br.ibm.com) 13 @author Rodrigo Sampaio Vaz (rsampaio@br.ibm.com) 14 """ 15 version = 1 16 def initialize(self): 17 self.cachedir = os.path.join(self.bindir, 'cache') 18 self.nfail = 0 19 20 21 def setup(self, tarball_base='linux-2.6.18.tar.bz2', parallel=True): 22 """ 23 Downloads a copy of the linux kernel, calculate an estimated size of 24 the uncompressed tarball, use this value to calculate the number of 25 copies of the linux kernel that will be uncompressed. 26 27 @param tarball_base: Name of the kernel tarball location that will 28 be looked up on the kernel.org mirrors. 29 @param parallel: If we are going to uncompress the copies of the 30 kernel in parallel or not 31 """ 32 if not os.path.isdir(self.cachedir): 33 os.makedirs(self.cachedir) 34 self.parallel = parallel 35 36 kernel_repo = 'http://www.kernel.org/pub/linux/kernel/v2.6' 37 tarball_url = os.path.join(kernel_repo, tarball_base) 38 tarball_md5 = '296a6d150d260144639c3664d127d174' 39 logging.info('Downloading linux kernel tarball') 40 self.tarball = utils.unmap_url_cache(self.cachedir, tarball_url, 41 tarball_md5) 42 size_tarball = os.path.getsize(self.tarball) / 1024 / 1024 43 # Estimation of the tarball size after uncompression 44 compress_ratio = 5 45 est_size = size_tarball * compress_ratio 46 self.sim_cps = self.get_sim_cps(est_size) 47 logging.info('Source file: %s' % tarball_base) 48 logging.info('Megabytes per copy: %s' % size_tarball) 49 logging.info('Compress ratio: %s' % compress_ratio) 50 logging.info('Estimated size after uncompression: %s' % est_size) 51 logging.info('Number of copies: %s' % self.sim_cps) 52 logging.info('Parallel: %s' % parallel) 53 54 55 def get_sim_cps(self, est_size): 56 ''' 57 Calculate the amount of simultaneous copies that can be uncompressed 58 so that it will make the system swap. 59 60 @param est_size: Estimated size of uncompressed linux tarball 61 ''' 62 mem_str = utils.system_output('grep MemTotal /proc/meminfo') 63 mem = int(re.search(r'\d+', mem_str).group(0)) 64 mem = int(mem / 1024) 65 66 # The general idea here is that we'll make an amount of copies of the 67 # kernel tree equal to 1.5 times the physical RAM, to make sure the 68 # system swaps, therefore reading and writing stuff to the disk. The 69 # DMA reads and writes together with the memory operations that will 70 # make it more likely to reveal failures in the memory subsystem. 71 sim_cps = (1.5 * mem) / est_size 72 73 if (mem % est_size) >= (est_size / 2): 74 sim_cps += 1 75 76 if (mem / 32) < 1: 77 sim_cps += 1 78 79 return int(sim_cps) 80 81 82 def run_once(self): 83 """ 84 Represents a single iteration of the process. Uncompresses a previously 85 calculated number of copies of the linux kernel, sequentially or in 86 parallel, and then compares the tree with a base tree, that was 87 uncompressed on the very beginning. 88 """ 89 90 parallel_procs = [] 91 92 os.chdir(self.tmpdir) 93 # This is the reference copy of the linux tarball 94 # that will be used for subsequent comparisons 95 logging.info('Unpacking base copy') 96 base_dir = os.path.join(self.tmpdir, 'linux.orig') 97 utils.extract_tarball_to_dir(self.tarball, base_dir) 98 logging.info('Unpacking test copies') 99 for j in range(self.sim_cps): 100 tmp_dir = 'linux.%s' % j 101 if self.parallel: 102 os.mkdir(tmp_dir) 103 # Start parallel process 104 tar_cmd = ['tar', 'jxf', self.tarball, '-C', tmp_dir] 105 logging.debug("Unpacking tarball to %s", tmp_dir) 106 parallel_procs.append(subprocess.Popen(tar_cmd, 107 stdout=subprocess.PIPE, 108 stderr=subprocess.PIPE)) 109 else: 110 logging.debug("Unpacking tarball to %s", tmp_dir) 111 utils.extract_tarball_to_dir(self.tarball, tmp_dir) 112 # Wait for the subprocess before comparison 113 if self.parallel: 114 logging.debug("Wait background processes before proceed") 115 for proc in parallel_procs: 116 proc.wait() 117 118 parallel_procs = [] 119 120 logging.info('Comparing test copies with base copy') 121 for j in range(self.sim_cps): 122 tmp_dir = 'linux.%s/%s' % (j, 123 os.path.basename(self.tarball).strip('.tar.bz2')) 124 if self.parallel: 125 diff_cmd = ['diff', '-U3', '-rN', 'linux.orig', tmp_dir] 126 logging.debug("Comparing linux.orig with %s", tmp_dir) 127 p = subprocess.Popen(diff_cmd, 128 stdout=subprocess.PIPE, 129 stderr=subprocess.PIPE) 130 parallel_procs.append(p) 131 else: 132 try: 133 logging.debug('Comparing linux.orig with %s', tmp_dir) 134 utils.system('diff -U3 -rN linux.orig linux.%s' % j) 135 except error.CmdError, e: 136 self.nfail += 1 137 logging.error('Error comparing trees: %s', e) 138 139 for proc in parallel_procs: 140 out_buf = proc.stdout.read() 141 out_buf += proc.stderr.read() 142 proc.wait() 143 if out_buf != "": 144 self.nfail += 1 145 logging.error('Error comparing trees: %s', out_buf) 146 147 # Clean up for the next iteration 148 parallel_procs = [] 149 150 logging.info('Cleaning up') 151 for j in range(self.sim_cps): 152 tmp_dir = 'linux.%s' % j 153 shutil.rmtree(tmp_dir) 154 shutil.rmtree(base_dir) 155 156 157 def cleanup(self): 158 if self.nfail != 0: 159 raise error.TestError('DMA memory test failed.') 160 else: 161 logging.info('DMA memory test passed.') 162