1#!/usr/bin/env python3.4
2#
3#   Copyright 2017 - The Android Open Source Project
4#
5#   Licensed under the Apache License, Version 2.0 (the 'License');
6#   you may not use this file except in compliance with the License.
7#   You may obtain a copy of the License at
8#
9#       http://www.apache.org/licenses/LICENSE-2.0
10#
11#   Unless required by applicable law or agreed to in writing, software
12#   distributed under the License is distributed on an 'AS IS' BASIS,
13#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14#   See the License for the specific language governing permissions and
15#   limitations under the License.
16
17import collections
18import itertools
19import json
20import logging
21import numpy
22import os
23import time
24from acts import asserts
25from acts import base_test
26from acts import utils
27from acts.controllers import iperf_server as ipf
28from acts.controllers.utils_lib import ssh
29from acts.metrics.loggers.blackbox import BlackboxMappedMetricLogger
30from acts.test_utils.wifi import ota_chamber
31from acts.test_utils.wifi import ota_sniffer
32from acts.test_utils.wifi import wifi_performance_test_utils as wputils
33from acts.test_utils.wifi import wifi_retail_ap as retail_ap
34from acts.test_utils.wifi import wifi_test_utils as wutils
35from functools import partial
36
37
38class WifiRvrTest(base_test.BaseTestClass):
39    """Class to test WiFi rate versus range.
40
41    This class implements WiFi rate versus range tests on single AP single STA
42    links. The class setups up the AP in the desired configurations, configures
43    and connects the phone to the AP, and runs iperf throughput test while
44    sweeping attenuation. For an example config file to run this test class see
45    example_connectivity_performance_ap_sta.json.
46    """
47
48    TEST_TIMEOUT = 6
49    MAX_CONSECUTIVE_ZEROS = 3
50
51    def __init__(self, controllers):
52        base_test.BaseTestClass.__init__(self, controllers)
53        self.testcase_metric_logger = (
54            BlackboxMappedMetricLogger.for_test_case())
55        self.testclass_metric_logger = (
56            BlackboxMappedMetricLogger.for_test_class())
57        self.publish_testcase_metrics = True
58
59    def setup_class(self):
60        """Initializes common test hardware and parameters.
61
62        This function initializes hardwares and compiles parameters that are
63        common to all tests in this class.
64        """
65        self.dut = self.android_devices[-1]
66        req_params = [
67            'RetailAccessPoints', 'rvr_test_params', 'testbed_params',
68            'RemoteServer'
69        ]
70        opt_params = ['main_network', 'golden_files_list', 'OTASniffer']
71        self.unpack_userparams(req_params, opt_params)
72        self.testclass_params = self.rvr_test_params
73        self.num_atten = self.attenuators[0].instrument.num_atten
74        self.iperf_server = self.iperf_servers[0]
75        self.remote_server = ssh.connection.SshConnection(
76            ssh.settings.from_config(self.RemoteServer[0]['ssh_config']))
77        self.iperf_client = self.iperf_clients[0]
78        self.access_point = retail_ap.create(self.RetailAccessPoints)[0]
79        if hasattr(self, 'OTASniffer'):
80            self.sniffer = ota_sniffer.create(self.OTASniffer)[0]
81        self.log.info('Access Point Configuration: {}'.format(
82            self.access_point.ap_settings))
83        self.log_path = os.path.join(logging.log_path, 'results')
84        os.makedirs(self.log_path, exist_ok=True)
85        if not hasattr(self, 'golden_files_list'):
86            self.golden_files_list = [
87                os.path.join(self.testbed_params['golden_results_path'], file)
88                for file in os.listdir(
89                    self.testbed_params['golden_results_path'])
90            ]
91        if hasattr(self, 'bdf'):
92            self.log.info('Pushing WiFi BDF to DUT.')
93            wputils.push_bdf(self.dut, self.bdf)
94        if hasattr(self, 'firmware'):
95            self.log.info('Pushing WiFi firmware to DUT.')
96            wlanmdsp = [
97                file for file in self.firmware if "wlanmdsp.mbn" in file
98            ][0]
99            data_msc = [file for file in self.firmware
100                        if "Data.msc" in file][0]
101            wputils.push_firmware(self.dut, wlanmdsp, data_msc)
102        self.testclass_results = []
103
104        # Turn WiFi ON
105        if self.testclass_params.get('airplane_mode', 1):
106            self.log.info('Turning on airplane mode.')
107            asserts.assert_true(utils.force_airplane_mode(self.dut, True),
108                                "Can not turn on airplane mode.")
109        wutils.wifi_toggle_state(self.dut, True)
110
111    def teardown_test(self):
112        self.iperf_server.stop()
113
114    def teardown_class(self):
115        # Turn WiFi OFF
116        for dev in self.android_devices:
117            wutils.wifi_toggle_state(dev, False)
118        self.process_testclass_results()
119
120    def process_testclass_results(self):
121        """Saves plot with all test results to enable comparison."""
122        # Plot and save all results
123        plots = collections.OrderedDict()
124        for result in self.testclass_results:
125            plot_id = (result['testcase_params']['channel'],
126                       result['testcase_params']['mode'])
127            if plot_id not in plots:
128                plots[plot_id] = wputils.BokehFigure(
129                    title='Channel {} {} ({})'.format(
130                        result['testcase_params']['channel'],
131                        result['testcase_params']['mode'],
132                        result['testcase_params']['traffic_type']),
133                    x_label='Attenuation (dB)',
134                    primary_y_label='Throughput (Mbps)')
135            plots[plot_id].add_line(result['total_attenuation'],
136                                    result['throughput_receive'],
137                                    result['test_name'],
138                                    marker='circle')
139        figure_list = []
140        for plot_id, plot in plots.items():
141            plot.generate_figure()
142            figure_list.append(plot)
143        output_file_path = os.path.join(self.log_path, 'results.html')
144        wputils.BokehFigure.save_figures(figure_list, output_file_path)
145
146    def pass_fail_check(self, rvr_result):
147        """Check the test result and decide if it passed or failed.
148
149        Checks the RvR test result and compares to a throughput limites for
150        the same configuration. The pass/fail tolerances are provided in the
151        config file.
152
153        Args:
154            rvr_result: dict containing attenuation, throughput and other data
155        """
156        try:
157            throughput_limits = self.compute_throughput_limits(rvr_result)
158        except:
159            asserts.fail('Test failed: Golden file not found')
160
161        failure_count = 0
162        for idx, current_throughput in enumerate(
163                rvr_result['throughput_receive']):
164            if (current_throughput < throughput_limits['lower_limit'][idx]
165                    or current_throughput >
166                    throughput_limits['upper_limit'][idx]):
167                failure_count = failure_count + 1
168
169        # Set test metrics
170        rvr_result['metrics']['failure_count'] = failure_count
171        if self.publish_testcase_metrics:
172            self.testcase_metric_logger.add_metric('failure_count',
173                                                   failure_count)
174
175        # Assert pass or fail
176        if failure_count >= self.testclass_params['failure_count_tolerance']:
177            asserts.fail('Test failed. Found {} points outside limits.'.format(
178                failure_count))
179        asserts.explicit_pass(
180            'Test passed. Found {} points outside throughput limits.'.format(
181                failure_count))
182
183    def compute_throughput_limits(self, rvr_result):
184        """Compute throughput limits for current test.
185
186        Checks the RvR test result and compares to a throughput limites for
187        the same configuration. The pass/fail tolerances are provided in the
188        config file.
189
190        Args:
191            rvr_result: dict containing attenuation, throughput and other meta
192            data
193        Returns:
194            throughput_limits: dict containing attenuation and throughput limit data
195        """
196        test_name = self.current_test_name
197        golden_path = next(file_name for file_name in self.golden_files_list
198                           if test_name in file_name)
199        with open(golden_path, 'r') as golden_file:
200            golden_results = json.load(golden_file)
201            golden_attenuation = [
202                att + golden_results['fixed_attenuation']
203                for att in golden_results['attenuation']
204            ]
205        attenuation = []
206        lower_limit = []
207        upper_limit = []
208        for idx, current_throughput in enumerate(
209                rvr_result['throughput_receive']):
210            current_att = rvr_result['attenuation'][idx] + rvr_result[
211                'fixed_attenuation']
212            att_distances = [
213                abs(current_att - golden_att)
214                for golden_att in golden_attenuation
215            ]
216            sorted_distances = sorted(enumerate(att_distances),
217                                      key=lambda x: x[1])
218            closest_indeces = [dist[0] for dist in sorted_distances[0:3]]
219            closest_throughputs = [
220                golden_results['throughput_receive'][index]
221                for index in closest_indeces
222            ]
223            closest_throughputs.sort()
224
225            attenuation.append(current_att)
226            lower_limit.append(
227                max(
228                    closest_throughputs[0] - max(
229                        self.testclass_params['abs_tolerance'],
230                        closest_throughputs[0] *
231                        self.testclass_params['pct_tolerance'] / 100), 0))
232            upper_limit.append(closest_throughputs[-1] + max(
233                self.testclass_params['abs_tolerance'], closest_throughputs[-1]
234                * self.testclass_params['pct_tolerance'] / 100))
235        throughput_limits = {
236            'attenuation': attenuation,
237            'lower_limit': lower_limit,
238            'upper_limit': upper_limit
239        }
240        return throughput_limits
241
242    def process_test_results(self, rvr_result):
243        """Saves plots and JSON formatted results.
244
245        Args:
246            rvr_result: dict containing attenuation, throughput and other meta
247            data
248        """
249        # Save output as text file
250        test_name = self.current_test_name
251        results_file_path = os.path.join(
252            self.log_path, '{}.json'.format(self.current_test_name))
253        with open(results_file_path, 'w') as results_file:
254            json.dump(rvr_result, results_file, indent=4)
255        # Plot and save
256        figure = wputils.BokehFigure(title=test_name,
257                                     x_label='Attenuation (dB)',
258                                     primary_y_label='Throughput (Mbps)')
259        try:
260            golden_path = next(file_name
261                               for file_name in self.golden_files_list
262                               if test_name in file_name)
263            with open(golden_path, 'r') as golden_file:
264                golden_results = json.load(golden_file)
265            golden_attenuation = [
266                att + golden_results['fixed_attenuation']
267                for att in golden_results['attenuation']
268            ]
269            throughput_limits = self.compute_throughput_limits(rvr_result)
270            shaded_region = {
271                'x_vector': throughput_limits['attenuation'],
272                'lower_limit': throughput_limits['lower_limit'],
273                'upper_limit': throughput_limits['upper_limit']
274            }
275            figure.add_line(golden_attenuation,
276                            golden_results['throughput_receive'],
277                            'Golden Results',
278                            color='green',
279                            marker='circle',
280                            shaded_region=shaded_region)
281        except:
282            self.log.warning('ValueError: Golden file not found')
283
284        # Generate graph annotatios
285        hover_text = [
286            'TX MCS = {0} ({1:.1f}%). RX MCS = {2} ({3:.1f}%)'.format(
287                curr_llstats['summary']['common_tx_mcs'],
288                curr_llstats['summary']['common_tx_mcs_freq'] * 100,
289                curr_llstats['summary']['common_rx_mcs'],
290                curr_llstats['summary']['common_rx_mcs_freq'] * 100)
291            for curr_llstats in rvr_result['llstats']
292        ]
293        figure.add_line(rvr_result['total_attenuation'],
294                        rvr_result['throughput_receive'],
295                        'Test Results',
296                        hover_text=hover_text,
297                        color='red',
298                        marker='circle')
299
300        output_file_path = os.path.join(self.log_path,
301                                        '{}.html'.format(test_name))
302        figure.generate_figure(output_file_path)
303
304        #Set test metrics
305        rvr_result['metrics'] = {}
306        rvr_result['metrics']['peak_tput'] = max(
307            rvr_result['throughput_receive'])
308        if self.publish_testcase_metrics:
309            self.testcase_metric_logger.add_metric(
310                'peak_tput', rvr_result['metrics']['peak_tput'])
311
312        tput_below_limit = [
313            tput < self.testclass_params['tput_metric_targets'][
314                rvr_result['testcase_params']['mode']]['high']
315            for tput in rvr_result['throughput_receive']
316        ]
317        rvr_result['metrics']['high_tput_range'] = -1
318        for idx in range(len(tput_below_limit)):
319            if all(tput_below_limit[idx:]):
320                if idx == 0:
321                    #Throughput was never above limit
322                    rvr_result['metrics']['high_tput_range'] = -1
323                else:
324                    rvr_result['metrics']['high_tput_range'] = rvr_result[
325                        'total_attenuation'][max(idx, 1) - 1]
326                break
327        if self.publish_testcase_metrics:
328            self.testcase_metric_logger.add_metric(
329                'high_tput_range', rvr_result['metrics']['high_tput_range'])
330
331        tput_below_limit = [
332            tput < self.testclass_params['tput_metric_targets'][
333                rvr_result['testcase_params']['mode']]['low']
334            for tput in rvr_result['throughput_receive']
335        ]
336        for idx in range(len(tput_below_limit)):
337            if all(tput_below_limit[idx:]):
338                rvr_result['metrics']['low_tput_range'] = rvr_result[
339                    'total_attenuation'][max(idx, 1) - 1]
340                break
341        else:
342            rvr_result['metrics']['low_tput_range'] = -1
343        if self.publish_testcase_metrics:
344            self.testcase_metric_logger.add_metric(
345                'low_tput_range', rvr_result['metrics']['low_tput_range'])
346
347    def run_rvr_test(self, testcase_params):
348        """Test function to run RvR.
349
350        The function runs an RvR test in the current device/AP configuration.
351        Function is called from another wrapper function that sets up the
352        testbed for the RvR test
353
354        Args:
355            testcase_params: dict containing test-specific parameters
356        Returns:
357            rvr_result: dict containing rvr_results and meta data
358        """
359        self.log.info('Start running RvR')
360        # Refresh link layer stats before test
361        llstats_obj = wputils.LinkLayerStats(self.dut)
362        zero_counter = 0
363        throughput = []
364        llstats = []
365        rssi = []
366        for atten in testcase_params['atten_range']:
367            for dev in self.android_devices:
368                if not wputils.health_check(dev, 5, 50):
369                    asserts.skip('DUT health check failed. Skipping test.')
370            # Set Attenuation
371            for attenuator in self.attenuators:
372                attenuator.set_atten(atten, strict=False)
373            # Refresh link layer stats
374            llstats_obj.update_stats()
375            # Setup sniffer
376            if self.testbed_params['sniffer_enable']:
377                self.sniffer.start_capture(
378                    network=testcase_params['test_network'],
379                    duration=self.testclass_params['iperf_duration'] / 5)
380            # Start iperf session
381            self.iperf_server.start(tag=str(atten))
382            rssi_future = wputils.get_connected_rssi_nb(
383                self.dut, self.testclass_params['iperf_duration'] - 1, 1, 1)
384            client_output_path = self.iperf_client.start(
385                testcase_params['iperf_server_address'],
386                testcase_params['iperf_args'], str(atten),
387                self.testclass_params['iperf_duration'] + self.TEST_TIMEOUT)
388            server_output_path = self.iperf_server.stop()
389            rssi_result = rssi_future.result()
390            current_rssi = {
391                'signal_poll_rssi': rssi_result['signal_poll_rssi']['mean'],
392                'chain_0_rssi': rssi_result['chain_0_rssi']['mean'],
393                'chain_1_rssi': rssi_result['chain_1_rssi']['mean']
394            }
395            rssi.append(current_rssi)
396            # Stop sniffer
397            if self.testbed_params['sniffer_enable']:
398                self.sniffer.stop_capture(tag=str(atten))
399            # Parse and log result
400            if testcase_params['use_client_output']:
401                iperf_file = client_output_path
402            else:
403                iperf_file = server_output_path
404            try:
405                iperf_result = ipf.IPerfResult(iperf_file)
406                curr_throughput = numpy.mean(iperf_result.instantaneous_rates[
407                    self.testclass_params['iperf_ignored_interval']:-1]
408                                             ) * 8 * (1.024**2)
409            except:
410                self.log.warning(
411                    'ValueError: Cannot get iperf result. Setting to 0')
412                curr_throughput = 0
413            throughput.append(curr_throughput)
414            llstats_obj.update_stats()
415            curr_llstats = llstats_obj.llstats_incremental.copy()
416            llstats.append(curr_llstats)
417            self.log.info(
418                ('Throughput at {0:.2f} dB is {1:.2f} Mbps. '
419                 'RSSI = {2:.2f} [{3:.2f}, {4:.2f}].').format(
420                     atten, curr_throughput, current_rssi['signal_poll_rssi'],
421                     current_rssi['chain_0_rssi'],
422                     current_rssi['chain_1_rssi']))
423            if curr_throughput == 0 and (
424                    current_rssi['signal_poll_rssi'] < -80
425                    or numpy.isnan(current_rssi['signal_poll_rssi'])):
426                zero_counter = zero_counter + 1
427            else:
428                zero_counter = 0
429            if zero_counter == self.MAX_CONSECUTIVE_ZEROS:
430                self.log.info(
431                    'Throughput stable at 0 Mbps. Stopping test now.')
432                throughput.extend(
433                    [0] *
434                    (len(testcase_params['atten_range']) - len(throughput)))
435                break
436        for attenuator in self.attenuators:
437            attenuator.set_atten(0, strict=False)
438        # Compile test result and meta data
439        rvr_result = collections.OrderedDict()
440        rvr_result['test_name'] = self.current_test_name
441        rvr_result['testcase_params'] = testcase_params.copy()
442        rvr_result['ap_settings'] = self.access_point.ap_settings.copy()
443        rvr_result['fixed_attenuation'] = self.testbed_params[
444            'fixed_attenuation'][str(testcase_params['channel'])]
445        rvr_result['attenuation'] = list(testcase_params['atten_range'])
446        rvr_result['total_attenuation'] = [
447            att + rvr_result['fixed_attenuation']
448            for att in rvr_result['attenuation']
449        ]
450        rvr_result['rssi'] = rssi
451        rvr_result['throughput_receive'] = throughput
452        rvr_result['llstats'] = llstats
453        return rvr_result
454
455    def setup_ap(self, testcase_params):
456        """Sets up the access point in the configuration required by the test.
457
458        Args:
459            testcase_params: dict containing AP and other test params
460        """
461        band = self.access_point.band_lookup_by_channel(
462            testcase_params['channel'])
463        if '2G' in band:
464            frequency = wutils.WifiEnums.channel_2G_to_freq[
465                testcase_params['channel']]
466        else:
467            frequency = wutils.WifiEnums.channel_5G_to_freq[
468                testcase_params['channel']]
469        if frequency in wutils.WifiEnums.DFS_5G_FREQUENCIES:
470            self.access_point.set_region(self.testbed_params['DFS_region'])
471        else:
472            self.access_point.set_region(self.testbed_params['default_region'])
473        self.access_point.set_channel(band, testcase_params['channel'])
474        self.access_point.set_bandwidth(band, testcase_params['mode'])
475        self.log.info('Access Point Configuration: {}'.format(
476            self.access_point.ap_settings))
477
478    def setup_dut(self, testcase_params):
479        """Sets up the DUT in the configuration required by the test.
480
481        Args:
482            testcase_params: dict containing AP and other test params
483        """
484        # Check battery level before test
485        if not wputils.health_check(
486                self.dut, 20) and testcase_params['traffic_direction'] == 'UL':
487            asserts.skip('Overheating or Battery level low. Skipping test.')
488        # Turn screen off to preserve battery
489        self.dut.go_to_sleep()
490        if wputils.validate_network(self.dut,
491                                    testcase_params['test_network']['SSID']):
492            self.log.info('Already connected to desired network')
493        else:
494            wutils.reset_wifi(self.dut)
495            wutils.set_wifi_country_code(self.dut,
496                                         self.testclass_params['country_code'])
497            testcase_params['test_network']['channel'] = testcase_params[
498                'channel']
499            wutils.wifi_connect(self.dut,
500                                testcase_params['test_network'],
501                                num_of_tries=5,
502                                check_connectivity=True)
503        self.dut_ip = self.dut.droid.connectivityGetIPv4Addresses('wlan0')[0]
504
505    def setup_rvr_test(self, testcase_params):
506        """Function that gets devices ready for the test.
507
508        Args:
509            testcase_params: dict containing test-specific parameters
510        """
511        # Configure AP
512        self.setup_ap(testcase_params)
513        # Set attenuator to 0 dB
514        for attenuator in self.attenuators:
515            attenuator.set_atten(0, strict=False)
516        # Reset, configure, and connect DUT
517        self.setup_dut(testcase_params)
518        # Wait before running the first wifi test
519        first_test_delay = self.testclass_params.get('first_test_delay', 600)
520        if first_test_delay > 0 and len(self.testclass_results) == 0:
521            self.log.info('Waiting before the first RvR test.')
522            time.sleep(first_test_delay)
523            self.setup_dut(testcase_params)
524        # Get iperf_server address
525        if isinstance(self.iperf_server, ipf.IPerfServerOverAdb):
526            testcase_params['iperf_server_address'] = self.dut_ip
527        else:
528            testcase_params[
529                'iperf_server_address'] = wputils.get_server_address(
530                    self.remote_server, self.dut_ip, '255.255.255.0')
531
532    def compile_test_params(self, testcase_params):
533        """Function that completes all test params based on the test name.
534
535        Args:
536            testcase_params: dict containing test-specific parameters
537        """
538        num_atten_steps = int((self.testclass_params['atten_stop'] -
539                               self.testclass_params['atten_start']) /
540                              self.testclass_params['atten_step'])
541        testcase_params['atten_range'] = [
542            self.testclass_params['atten_start'] +
543            x * self.testclass_params['atten_step']
544            for x in range(0, num_atten_steps)
545        ]
546        band = self.access_point.band_lookup_by_channel(
547            testcase_params['channel'])
548        testcase_params['test_network'] = self.main_network[band]
549        if (testcase_params['traffic_direction'] == 'DL'
550                and not isinstance(self.iperf_server, ipf.IPerfServerOverAdb)
551            ) or (testcase_params['traffic_direction'] == 'UL'
552                  and isinstance(self.iperf_server, ipf.IPerfServerOverAdb)):
553            testcase_params['iperf_args'] = wputils.get_iperf_arg_string(
554                duration=self.testclass_params['iperf_duration'],
555                reverse_direction=1,
556                traffic_type=testcase_params['traffic_type'])
557            testcase_params['use_client_output'] = True
558        else:
559            testcase_params['iperf_args'] = wputils.get_iperf_arg_string(
560                duration=self.testclass_params['iperf_duration'],
561                reverse_direction=0,
562                traffic_type=testcase_params['traffic_type'])
563            testcase_params['use_client_output'] = False
564        return testcase_params
565
566    def _test_rvr(self, testcase_params):
567        """ Function that gets called for each test case
568
569        Args:
570            testcase_params: dict containing test-specific parameters
571        """
572        # Compile test parameters from config and test name
573        testcase_params = self.compile_test_params(testcase_params)
574
575        # Prepare devices and run test
576        self.setup_rvr_test(testcase_params)
577        rvr_result = self.run_rvr_test(testcase_params)
578
579        # Post-process results
580        self.testclass_results.append(rvr_result)
581        self.process_test_results(rvr_result)
582        self.pass_fail_check(rvr_result)
583
584    def generate_test_cases(self, channels, modes, traffic_types,
585                            traffic_directions):
586        """Function that auto-generates test cases for a test class."""
587        test_cases = []
588        allowed_configs = {
589            'VHT20': [
590                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 64, 100,
591                116, 132, 140, 149, 153, 157, 161
592            ],
593            'VHT40': [36, 44, 100, 149, 157],
594            'VHT80': [36, 100, 149]
595        }
596
597        for channel, mode, traffic_type, traffic_direction in itertools.product(
598                channels, modes, traffic_types, traffic_directions):
599            if channel not in allowed_configs[mode]:
600                continue
601            test_name = 'test_rvr_{}_{}_ch{}_{}'.format(
602                traffic_type, traffic_direction, channel, mode)
603            test_params = collections.OrderedDict(
604                channel=channel,
605                mode=mode,
606                traffic_type=traffic_type,
607                traffic_direction=traffic_direction)
608            setattr(self, test_name, partial(self._test_rvr, test_params))
609            test_cases.append(test_name)
610        return test_cases
611
612
613# Classes defining test suites
614class WifiRvr_2GHz_Test(WifiRvrTest):
615    def __init__(self, controllers):
616        super().__init__(controllers)
617        self.tests = self.generate_test_cases(channels=[1, 6, 11],
618                                              modes=['VHT20'],
619                                              traffic_types=['TCP'],
620                                              traffic_directions=['DL', 'UL'])
621
622
623class WifiRvr_UNII1_Test(WifiRvrTest):
624    def __init__(self, controllers):
625        super().__init__(controllers)
626        self.tests = self.generate_test_cases(
627            channels=[36, 40, 44, 48],
628            modes=['VHT20', 'VHT40', 'VHT80'],
629            traffic_types=['TCP'],
630            traffic_directions=['DL', 'UL'])
631
632
633class WifiRvr_UNII3_Test(WifiRvrTest):
634    def __init__(self, controllers):
635        super().__init__(controllers)
636        self.tests = self.generate_test_cases(
637            channels=[149, 153, 157, 161],
638            modes=['VHT20', 'VHT40', 'VHT80'],
639            traffic_types=['TCP'],
640            traffic_directions=['DL', 'UL'])
641
642
643class WifiRvr_SampleDFS_Test(WifiRvrTest):
644    def __init__(self, controllers):
645        super().__init__(controllers)
646        self.tests = self.generate_test_cases(
647            channels=[64, 100, 116, 132, 140],
648            modes=['VHT20', 'VHT40', 'VHT80'],
649            traffic_types=['TCP'],
650            traffic_directions=['DL', 'UL'])
651
652
653class WifiRvr_SampleUDP_Test(WifiRvrTest):
654    def __init__(self, controllers):
655        super().__init__(controllers)
656        self.tests = self.generate_test_cases(
657            channels=[6, 36, 149],
658            modes=['VHT20', 'VHT40', 'VHT80'],
659            traffic_types=['UDP'],
660            traffic_directions=['DL', 'UL'])
661
662
663class WifiRvr_TCP_All_Test(WifiRvrTest):
664    def __init__(self, controllers):
665        super().__init__(controllers)
666        self.tests = self.generate_test_cases(
667            channels=[1, 6, 11, 36, 40, 44, 48, 149, 153, 157, 161],
668            modes=['VHT20', 'VHT40', 'VHT80'],
669            traffic_types=['TCP'],
670            traffic_directions=['DL', 'UL'])
671
672
673class WifiRvr_TCP_Downlink_Test(WifiRvrTest):
674    def __init__(self, controllers):
675        super().__init__(controllers)
676        self.tests = self.generate_test_cases(
677            channels=[1, 6, 11, 36, 40, 44, 48, 149, 153, 157, 161],
678            modes=['VHT20', 'VHT40', 'VHT80'],
679            traffic_types=['TCP'],
680            traffic_directions=['DL'])
681
682
683class WifiRvr_TCP_Uplink_Test(WifiRvrTest):
684    def __init__(self, controllers):
685        super().__init__(controllers)
686        self.tests = self.generate_test_cases(
687            channels=[1, 6, 11, 36, 40, 44, 48, 149, 153, 157, 161],
688            modes=['VHT20', 'VHT40', 'VHT80'],
689            traffic_types=['TCP'],
690            traffic_directions=['UL'])
691
692
693# Over-the air version of RVR tests
694class WifiOtaRvrTest(WifiRvrTest):
695    """Class to test over-the-air RvR
696
697    This class implements measures WiFi RvR tests in an OTA chamber. It enables
698    setting turntable orientation and other chamber parameters to study
699    performance in varying channel conditions
700    """
701    def __init__(self, controllers):
702        base_test.BaseTestClass.__init__(self, controllers)
703        self.testcase_metric_logger = (
704            BlackboxMappedMetricLogger.for_test_case())
705        self.testclass_metric_logger = (
706            BlackboxMappedMetricLogger.for_test_class())
707        self.publish_testcase_metrics = False
708
709    def setup_class(self):
710        WifiRvrTest.setup_class(self)
711        self.ota_chamber = ota_chamber.create(
712            self.user_params['OTAChamber'])[0]
713
714    def teardown_class(self):
715        WifiRvrTest.teardown_class(self)
716        self.ota_chamber.reset_chamber()
717
718    def extract_test_id(self, testcase_params, id_fields):
719        test_id = collections.OrderedDict(
720            (param, testcase_params[param]) for param in id_fields)
721        return test_id
722
723    def process_testclass_results(self):
724        """Saves plot with all test results to enable comparison."""
725        # Plot individual test id results raw data and compile metrics
726        plots = collections.OrderedDict()
727        compiled_data = collections.OrderedDict()
728        for result in self.testclass_results:
729            test_id = tuple(
730                self.extract_test_id(
731                    result['testcase_params'],
732                    ['channel', 'mode', 'traffic_type', 'traffic_direction'
733                     ]).items())
734            if test_id not in plots:
735                # Initialize test id data when not present
736                compiled_data[test_id] = {'throughput': [], 'metrics': {}}
737                compiled_data[test_id]['metrics'] = {
738                    key: []
739                    for key in result['metrics'].keys()
740                }
741                plots[test_id] = wputils.BokehFigure(
742                    title='Channel {} {} ({} {})'.format(
743                        result['testcase_params']['channel'],
744                        result['testcase_params']['mode'],
745                        result['testcase_params']['traffic_type'],
746                        result['testcase_params']['traffic_direction']),
747                    x_label='Attenuation (dB)',
748                    primary_y_label='Throughput (Mbps)')
749            # Compile test id data and metrics
750            compiled_data[test_id]['throughput'].append(
751                result['throughput_receive'])
752            compiled_data[test_id]['total_attenuation'] = result[
753                'total_attenuation']
754            for metric_key, metric_value in result['metrics'].items():
755                compiled_data[test_id]['metrics'][metric_key].append(
756                    metric_value)
757            # Add test id to plots
758            plots[test_id].add_line(result['total_attenuation'],
759                                    result['throughput_receive'],
760                                    result['test_name'],
761                                    width=1,
762                                    style='dashed',
763                                    marker='circle')
764
765        # Compute average RvRs and compount metrics over orientations
766        for test_id, test_data in compiled_data.items():
767            test_id_dict = dict(test_id)
768            metric_tag = '{}_{}_ch{}_{}'.format(
769                test_id_dict['traffic_type'],
770                test_id_dict['traffic_direction'], test_id_dict['channel'],
771                test_id_dict['mode'])
772            high_tput_hit_freq = numpy.mean(
773                numpy.not_equal(test_data['metrics']['high_tput_range'], -1))
774            self.testclass_metric_logger.add_metric(
775                '{}.high_tput_hit_freq'.format(metric_tag), high_tput_hit_freq)
776            for metric_key, metric_value in test_data['metrics'].items():
777                metric_key = "{}.avg_{}".format(metric_tag, metric_key)
778                metric_value = numpy.mean(metric_value)
779                self.testclass_metric_logger.add_metric(
780                    metric_key, metric_value)
781            test_data['avg_rvr'] = numpy.mean(test_data['throughput'], 0)
782            test_data['median_rvr'] = numpy.median(test_data['throughput'], 0)
783            plots[test_id].add_line(test_data['total_attenuation'],
784                                    test_data['avg_rvr'],
785                                    legend='Average Throughput',
786                                    marker='circle')
787            plots[test_id].add_line(test_data['total_attenuation'],
788                                    test_data['median_rvr'],
789                                    legend='Median Throughput',
790                                    marker='square')
791
792        figure_list = []
793        for test_id, plot in plots.items():
794            plot.generate_figure()
795            figure_list.append(plot)
796        output_file_path = os.path.join(self.log_path, 'results.html')
797        wputils.BokehFigure.save_figures(figure_list, output_file_path)
798
799    def setup_rvr_test(self, testcase_params):
800        # Set turntable orientation
801        self.ota_chamber.set_orientation(testcase_params['orientation'])
802        # Continue test setup
803        WifiRvrTest.setup_rvr_test(self, testcase_params)
804
805    def generate_test_cases(self, channels, modes, angles, traffic_types,
806                            directions):
807        test_cases = []
808        allowed_configs = {
809            'VHT20': [
810                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 149, 153,
811                157, 161
812            ],
813            'VHT40': [36, 44, 149, 157],
814            'VHT80': [36, 149]
815        }
816        for channel, mode, angle, traffic_type, direction in itertools.product(
817                channels, modes, angles, traffic_types, directions):
818            if channel not in allowed_configs[mode]:
819                continue
820            testcase_name = 'test_rvr_{}_{}_ch{}_{}_{}deg'.format(
821                traffic_type, direction, channel, mode, angle)
822            test_params = collections.OrderedDict(channel=channel,
823                                                  mode=mode,
824                                                  traffic_type=traffic_type,
825                                                  traffic_direction=direction,
826                                                  orientation=angle)
827            setattr(self, testcase_name, partial(self._test_rvr, test_params))
828            test_cases.append(testcase_name)
829        return test_cases
830
831
832class WifiOtaRvr_StandardOrientation_Test(WifiOtaRvrTest):
833    def __init__(self, controllers):
834        WifiOtaRvrTest.__init__(self, controllers)
835        self.tests = self.generate_test_cases(
836            [1, 6, 11, 36, 40, 44, 48, 149, 153, 157, 161],
837            ['VHT20', 'VHT40', 'VHT80'], list(range(0, 360,
838                                                    45)), ['TCP'], ['DL'])
839
840
841class WifiOtaRvr_SampleChannel_Test(WifiOtaRvrTest):
842    def __init__(self, controllers):
843        WifiOtaRvrTest.__init__(self, controllers)
844        self.tests = self.generate_test_cases([6], ['VHT20'],
845                                              list(range(0, 360, 45)), ['TCP'],
846                                              ['DL'])
847        self.tests.extend(
848            self.generate_test_cases([36, 149], ['VHT80'],
849                                     list(range(0, 360, 45)), ['TCP'], ['DL']))
850
851
852class WifiOtaRvr_SingleOrientation_Test(WifiOtaRvrTest):
853    def __init__(self, controllers):
854        WifiOtaRvrTest.__init__(self, controllers)
855        self.tests = self.generate_test_cases(
856            [6, 36, 40, 44, 48, 149, 153, 157, 161],
857            ['VHT20', 'VHT40', 'VHT80'], [0], ['TCP'], ['DL', 'UL'])
858