Update sample scripts to gather 802.11ac data.

In more detail:

* ifstats now captures a system_profiler report on OS X, since the
  airport utility scan results don't distinguish between 802.11n and
  802.11ac networks.
* tcpdump now saves a raw packet capture, since the tcpdump command line
  doesn't process VHT headers in tested versions (OS X 10.10.3 and
  gfrg200-44.0-pre).
* iperf now uses iperf3 in both directions.
* fabric env.always_use_pty is now False; this works around output
  streamed over ssh being incomplete for iperf3.
* An isostream test is now included and run by default.
* Added `timeout` shell script from
  <http://www.pixelbeat.org/scripts/timeout>
* Added IPv6 support, which was essentially a one-line change :)

Reporting changes to consume this data will follow in a separate CL.
We need to test networks day and night, and gathering the test data
immediately makes this easier to do.

Change-Id: Ida5c0a85005abe66243151f5531e5a014b86e4bb
diff --git a/wifitables/ifstats.py b/wifitables/ifstats.py
index ae277a9..4ad0059 100644
--- a/wifitables/ifstats.py
+++ b/wifitables/ifstats.py
@@ -37,11 +37,20 @@
     """Gather information about other observable networks from `airport`."""
     return self.run('airport -s')
 
+  def SystemProfiler(self):
+    """Gather information about wireless capabilities from `system_profiler`.
+
+    On a Mac with 802.11ac wireless, this will also contain a wireless network
+    scan result with more information than `airport -s` provides.
+    """
+    return self.run('system_profiler -xml SPAirPortDataType')
+
   def GatherDarwin(self):
     """Gather wireless network information on Darwin (Mac OS X)."""
     return {
         'airport': self.AirportI(),
         'airportscan': self.AirportScan(),
+        'systemprofiler': self.SystemProfiler()
     }
 
   def GatherLinux(self):
@@ -82,6 +91,7 @@
       raise OSError('System {} unsupported for InterfaceStats'.format(
           self.system))
 
+
 def ParseIwLink(text):
   ol = text.splitlines()
 
diff --git a/wifitables/iperf.py b/wifitables/iperf.py
index efe6781..4815762 100644
--- a/wifitables/iperf.py
+++ b/wifitables/iperf.py
@@ -1,11 +1,15 @@
 """iperf: run a series of iperf tests over a wireless network."""
 
+import collections
+import json
 import os
-import pipes
 import re
 import sys
 
-DEVNULL = open(os.devnull, 'wb')
+
+def Mbps(mbps):
+  """Given a bandwidth in Mbps, return the bandwidth in bit/s."""
+  return mbps * 1000000
 
 
 class Iperf(object):
@@ -14,24 +18,21 @@
   def __init__(self, runner=None, bind=None):
     self.run = runner
     self.bind = bind
-    self.cache = {}
 
   def Ping(self, host):
-    line = 'ping -c 1 {}'.format(pipes.quote(host))
-    return self.run(line).return_code
+    args = ['-c', '1', host]
+    return (self.run(['ping'] + args).succeeded or
+            self.run(['ping6'] + args).succeeded)
 
   def _Iperf(self, host, udp=False, bandwidth=20):
     """Run iperf against host and return string containing stdout from run."""
-    line = 'iperf -c {}'
-    args = [host]
+    args = ['iperf', '-c', host]
     if udp:
-      line += ' -u -b {}'
-      args += [str(bandwidth * 1000000)]
+      args += ['-u', '-b', str(Mbps(bandwidth))]
     if self.bind:
-      line += ' -B {}'
-      args += [self.bind]
+      args += ['-B', self.bind]
 
-    return self.run(line.format(*[pipes.quote(arg) for arg in args]))
+    return self.run(args)
 
   def IperfTCP(self, host='127.0.0.1'):
     return self._Iperf(host)
@@ -39,30 +40,69 @@
   def IperfUDP(self, host='127.0.0.1', bandwidth=20):
     return self._Iperf(host, udp=True, bandwidth=bandwidth)
 
+  def Iperf3(self, host, udp=False, reverse=False, bandwidth=1e6):
+    """Run iperf against host and return string containing stdout from run."""
+    args = ['iperf3', '-c', host, '-J']
+    if udp:
+      args += ['-u', '-b', str(bandwidth)]
+    if reverse:
+      args += ['-R']
+
+    if self.bind:
+      args += ['-B', self.bind]
+
+    return self.run(args)
+
   def RunTestSeries(self, host):
     """RunTestSeries runs iperf tests and returns their results.
 
     Args:
-      host: string containing the hostname to run tests against
+      host: A string containing the hostname to run tests against.
     Returns:
-      a list of files; each file contains output for one test
+      A dict mapping unique names (strings, suitable for use as filenames) to
+      strings containing JSON-formatted iperf3 results.
     """
     outputs = {}
-    status = self.Ping(host)
-    if not status:
-      it = self.IperfTCP(host)
-      outputs['iperf'] = it
+    if not self.Ping(host):
+      print >> sys.stderr, ('Could not ping destination host {}; '
+                            'skipping performance tests').format(host)
+      return outputs
+
+    counter = collections.Counter()
+    def NextName(counter, basename):
+      """Return `logrotate(1)`-style names for a bunch of iperf3 files."""
+      count = counter[basename]
+      if count:
+        name = '{}.{}'.format(basename, count)
+      else:
+        name = basename
+
+      counter[basename] += 1
+      return name
+
+    for reverse in (False, True):
+      it = self.Iperf3(host, reverse=reverse)
+      outputs[NextName(counter, 'iperf3')] = it
 
       # Empirically about 1.25x more packets make it through in UDP than TCP.
       # Try to saturate the channel by sending a bit more than that over UDP.
-      bandwidth = ParseIperfTCP(it).get('bandwidth', 0.01)
-      outputs['iperfu'] = self.IperfUDP(host, bandwidth=bandwidth * 1.5)
-    else:
-      print >> sys.stderr, ('Could not ping destination host {0}; '
-                            'skipping performance tests').format(host)
+      try:
+        itp = ParseIperf3(it)
+      except ValueError:
+        print >> sys.stderr, ('Could not decode iperf3 TCP result, proceeding '
+                              'as if it were empty.')
+        itp = {}
+
+      bw = (itp.get('end', {})
+            .get('sum_received', {})
+            .get('bits_per_second', 1))
+
+      iu = self.Iperf3(host, udp=True, reverse=reverse, bandwidth=bw * 1.5)
+      outputs[NextName(counter, 'iperf3')] = iu
 
     return outputs
 
+
 def _ParseIperf(text, udp=False):
   """Parse summary line written by an `iperf` run into a Python dict."""
   pattern = (r'\[(.{3})\]\s+(?P<interval>.*?sec)\s+'
@@ -100,15 +140,18 @@
   return _ParseIperf(text, udp=True)
 
 
+def ParseIperf3(text):
+  return json.loads(text)
+
+
 def Restore(report_dir):
   """Restores an `Iperf` cache from data on the filesystem."""
   cache = {}
   for name in ['iperf', 'iperfu']:
-    ipath = os.path.join(report_dir, name)
-    if os.path.exists(ipath):
-      with open(ipath) as infile:
+    try:
+      with open(os.path.join(report_dir, name)) as infile:
         cache[name] = infile.read()
-    else:
+    except IOError:
       cache[name] = ''
 
   return cache
diff --git a/wifitables/isostream.py b/wifitables/isostream.py
new file mode 100644
index 0000000..bb546e8
--- /dev/null
+++ b/wifitables/isostream.py
@@ -0,0 +1,11 @@
+"""isostream: runs an isostream test"""
+
+import pipes
+
+def RunIsostreamTest(run, host, bandwidth=14, time=10):
+  """RunIsostreamTest runs an isostream test and returns the results."""
+  # add 1 second to the timeout so we get results for the full requested time.
+  isostream_cmd = ['./timeout', '{:d}'.format(time + 1),
+                   'isostream', '-b', '{:d}'.format(bandwidth), host]
+  return run(isostream_cmd)
+
diff --git a/wifitables/sample.py b/wifitables/sample.py
index 89cfe2f..2e2f550 100755
--- a/wifitables/sample.py
+++ b/wifitables/sample.py
@@ -5,16 +5,18 @@
 import atexit
 import functools
 import os
+import pipes
 import platform
 import subprocess
 import sys
 import time
 
-from fabric.api import env, execute, local, run
-from fabric.network import disconnect_all
+from fabric import api
+from fabric import network
 
 import ifstats
 import iperf
+import isostream
 import options
 import tcpdump
 
@@ -28,9 +30,38 @@
 i,interface=    wireless interface to use for outgoing connections
 m,monitor=      wireless monitor interface to use
 r,remote=       remote host to run tests on
+t,time=         length of time in seconds to run isostream test for [50]
 """
 
 
+def _Run(cmd_or_args, dirname='.', local=False):
+  if isinstance(cmd_or_args, list):
+    cmd = ' '.join([pipes.quote(arg) for arg in cmd_or_args])
+  else:
+    cmd = cmd_or_args
+
+  if local:
+    with api.lcd(dirname):
+      return api.local(cmd, capture=True)
+  else:
+    with api.cd(dirname):
+      return api.run(cmd)
+
+
+def _SetupTestHost():
+  # work around current embedded image's lack of mktemp(1).
+  wd = api.run('python -c "import tempfile; print tempfile.mkdtemp()"')
+  with api.cd(wd):
+    for script in ['timeout']:
+      api.put(script, script, mode=0755)
+  return wd
+
+
+def _CleanupTestHost(dirname):
+  api.run('rm -r {}'.format(pipes.quote(dirname)))
+  network.disconnect_all()
+
+
 def main():
   system = platform.system()
   defaults = {
@@ -60,8 +91,6 @@
   report_name = 'wifi-{}-{:04}'.format(time.time(), opt.steps)
 
   if opt.journal:
-    with open(opt.journal, 'a') as journal:
-      print >> journal, report_name
     dest_dir = os.path.join(os.path.dirname(opt.journal), report_name)
   else:
     dest_dir = report_name
@@ -77,28 +106,30 @@
   execute_args = {}
   if opt.remote:
     # update Fabric env for embedded systems
-    env.update({
+    api.env.update({
+        'always_use_pty': False,
         'key_filename': os.path.expanduser('~/.ssh/bruno-sshkey'),
         'user': 'root',
         'shell': 'sh -l -c',
     })
     execute_args['host'] = opt.remote
 
-    ifsystem = 'Linux'
-    ifrun = run
+    wd = api.execute(_SetupTestHost, **execute_args).values()[0]
+    atexit.register(api.execute, _CleanupTestHost, wd, **execute_args)
 
-    atexit.register(disconnect_all)
+    ifsystem = 'Linux'
+    run = functools.partial(_Run, dirname=wd, local=False)
   else:
     ifsystem = system
-    ifrun = functools.partial(local, capture=True)
+    run = functools.partial(_Run, local=True)
 
   ifs = ifstats.InterfaceStats(system=ifsystem,
-                               runner=ifrun,
+                               runner=run,
                                interface=opt.interface)
 
   # since we're only executing over one host, ignore the return from `execute`
   # that says which host it was for now.
-  cache = execute(ifs.Gather, **execute_args).values()[0]
+  cache = api.execute(ifs.Gather, **execute_args).values()[0]
   results = ifstats.Parse(ifsystem, cache)
 
   bssid = results['link']['BSSID']
@@ -111,22 +142,37 @@
     addr = addr[:mask]
 
   if opt.monitor:
-    sudo_tcpdump, mcs_out, mcs_err = tcpdump.MCS(bssid, opt.monitor, dest_dir)
-    print 'Gathering tcpdump in background as', sudo_tcpdump.pid
+    tcpdump_proc, tcpdump_stderr = tcpdump.tcpdump(bssid, opt.monitor, dest_dir)
+    print 'Gathering tcpdump in background as', tcpdump_proc.pid
 
-  ips = iperf.Iperf(runner=ifrun, bind=addr)
-  env.warn_only = True  # `iperf` returns 56 if it can't reach the server,
-                        # or 57 if it doesn't receive a final report from it
-                        # on Linux; don't abort in these cases
-  cache.update(
-      execute(ips.RunTestSeries, opt.destination, **execute_args).values()[0]
-  )
+  if opt.bind and not addr:
+    addr = opt.bind
+
+  ips = iperf.Iperf(runner=run, bind=addr)
+
+  # `iperf` returns 56 if it can't reach the server, or 57 if it doesn't receive
+  # a final report from it on Linux; don't abort in these cases
+  with api.settings(warn_only=True):
+    cache.update(
+        api.execute(ips.RunTestSeries, opt.destination,
+                    **execute_args).values()[0]
+    )
+
+  # `isostream` won't end on its own, so we wrap it with `timeout` and accept a
+  # return code of 124 (timed out) as well.
+  with api.settings(ok_ret_codes=[0, 124]):
+    cache['isostream'] = api.execute(isostream.RunIsostreamTest, run,
+                                     opt.destination, time=opt.time,
+                                     **execute_args).values()[0]
 
   if opt.monitor:
-    subprocess.check_call(['sudo', 'kill', str(sudo_tcpdump.pid)])
-    for stream in [mcs_out, mcs_err]:
-      stream.flush()
-      stream.close()
+    try:
+      tcpdump_proc.terminate()
+    except OSError:
+      subprocess.check_call(['sudo', 'kill', str(tcpdump_proc.pid)])
+
+    tcpdump_stderr.flush()
+    tcpdump_stderr.close()
 
   if opt.journal:
     with open(opt.journal, 'a') as journal:
diff --git a/wifitables/tcpdump.py b/wifitables/tcpdump.py
index b1e9b3d..44226bb 100644
--- a/wifitables/tcpdump.py
+++ b/wifitables/tcpdump.py
@@ -56,3 +56,27 @@
 
   return sudo_tcpdump, out, err
 
+
+def tcpdump(bssid, interface, report_dir='', filtered=True):
+  """Runs tcpdump in the background to gather wireless MCS."""
+
+  cap = os.path.join(report_dir, 'testnetwork.pcap')
+  tcpdump_args = ['tcpdump',  '-IUnei', interface, '-w', cap]
+
+  login = os.getlogin()
+  if login != 'root':
+    print 'Please enter password for `sudo` if prompted.'
+    subprocess.call(['sudo', '-v'])
+    tcpdump_args = ['sudo'] + tcpdump_args + ['-Z', login]
+
+  if filtered:
+    filt = ('(not subtype beacon and not subtype ack) and '
+            '(wlan addr1 {0} or wlan addr2 {0} or wlan addr3 {0})'.format(
+                bssid, bssid, bssid))
+    tcpdump_args += [filt]
+
+  err = open(os.path.join(report_dir, 'mcserr'), 'w+b')
+  tcpdump = subprocess.Popen(tcpdump_args, stderr=err)
+
+  return tcpdump, err
+
diff --git a/wifitables/timeout b/wifitables/timeout
new file mode 100755
index 0000000..c30762a
--- /dev/null
+++ b/wifitables/timeout
@@ -0,0 +1,57 @@
+#!/bin/sh
+
+# Execute a command with a timeout
+
+# License: LGPLv2
+# Author:
+#    http://www.pixelbeat.org/
+# Notes:
+#    Note there is a timeout command packaged with coreutils since v7.0
+#    If the timeout occurs the exit status is 124.
+#    There is an asynchronous (and buggy) equivalent of this
+#    script packaged with bash (under /usr/share/doc/ in my distro),
+#    which I only noticed after writing this.
+#    I noticed later again that there is a C equivalent of this packaged
+#    with satan by Wietse Venema, and copied to forensics by Dan Farmer.
+# Changes:
+#    V1.0, Nov  3 2006, Initial release
+#    V1.1, Nov 20 2007, Brad Greenlee <brad@footle.org>
+#                       Make more portable by using the 'CHLD'
+#                       signal spec rather than 17.
+#    V1.3, Oct 29 2009, Ján Sáreník <jasan@x31.com>
+#                       Even though this runs under dash,ksh etc.
+#                       it doesn't actually timeout. So enforce bash for now.
+#                       Also change exit on timeout from 128 to 124
+#                       to match coreutils.
+#    V2.0, Oct 30 2009, Ján Sáreník <jasan@x31.com>
+#                       Rewritten to cover compatibility with other
+#                       Bourne shell implementations (pdksh, dash)
+
+if [ "$#" -lt "2" ]; then
+    echo "Usage:   `basename $0` timeout_in_seconds command" >&2
+    echo "Example: `basename $0` 2 sleep 3 || echo timeout" >&2
+    exit 1
+fi
+
+cleanup()
+{
+    trap - ALRM               #reset handler to default
+    kill -ALRM $a 2>/dev/null #stop timer subshell if running
+    kill $! 2>/dev/null &&    #kill last job
+      exit 124                #exit with 124 if it was running
+}
+
+watchit()
+{
+    trap "cleanup" ALRM
+    sleep $1& wait
+    kill -ALRM $$
+}
+
+watchit $1& a=$!         #start the timeout
+shift                    #first param was timeout for sleep
+trap "cleanup" ALRM INT  #cleanup after timeout
+"$@"& wait $!; RET=$?    #start the job wait for it and save its return value
+kill -ALRM $a            #send ALRM signal to watchit
+wait $a                  #wait for watchit to finish cleanup
+exit $RET                #return the value