1# this is a quick and dirty script to run a netperf TCP_RR and
2# TCP_STREAM test concurrently to allow one to see the effect of
3# buffer bloat on latency.  We assume that netperf has been compiled
4# with demo-mode enabled via ./configure --enable-demo
5
6NETPERF=`which netperf`
7if [ $? -ne 0 ]
8then
9    echo "Unable to find a netperf binary."
10    exit -1
11fi
12
13CHUNK=60
14
15# first, start the TCP_RR test
16RR_START=`date +%s`
17echo "Starting netperf TCP_RR at $RR_START" | tee bloat.log
18# a negative value for the demo interval (-D) will cause netperf to
19# make gettimeofday() calls after every transaction. this will result
20# in more accurate demo intervals once the STREAM test kicks-in, but a
21# somewhat lower transaction rate.  not unlike enabling histogram
22# mode.
23netperf -H $1 -l 7200 -t TCP_RR -D -0.5 -v 2 -- -r 1 2>&1 > netperf_rr.out &
24
25# sleep CHUNK seconds
26sleep $CHUNK
27
28# now run the TCP_STREAM test
29
30STREAM_START=`date +%s`
31echo "Starting netperf TCP_STREAM test at $STREAM_START" | tee -a bloat.log
32netperf -H $1 -l `expr $CHUNK \* 2` -t TCP_STREAM -D 0.25 -v 2 -- -m 1K 2>&1 > netperf_stream.out
33STREAM_STOP=`date +%s`
34echo "Netperf TCP_STREAM test stopped at $STREAM_STOP" | tee -a bloat.log
35
36# sleep another CHUNK seconds
37sleep $CHUNK
38
39pkill -ALRM netperf
40RR_STOP=`date +%s`
41echo "Netperf TCP_RR test stopped at $RR_STOP" | tee -a bloat.log
42
43RRDTOOL=`which rrdtool`
44if [ $? -ne 0 ]
45then
46    echo "Unable to find rrdtool.  You will have to post-process the results by hand"
47    exit 0
48fi
49
50MIN_TIMESTAMP=`grep Interim netperf_rr.out | head -1 | awk '{print int($10)}'`
51MAX_TIMESTAMP=`grep Interim netperf_rr.out | tail -1 | awk '{print int($10)}'`
52MAX_INTERVAL=`grep Interim netperf_rr.out | awk 'BEGIN{max=0.0} ($6 > max) {max = $6}END{print int(max) + 1}'`
53LENGTH=`expr $MAX_TIMESTAMP - $MIN_TIMESTAMP`
54
55$RRDTOOL create netperf_rr.rrd --step 1 --start $MIN_TIMESTAMP \
56    DS:tps:GAUGE:$MAX_INTERVAL:U:U RRA:AVERAGE:0.5:1:$LENGTH
57
58# now fill it
59awk -v rrdtool=$RRDTOOL '($1 == "Interim"){printf("%s update netperf_rr.rrd %.3f:%f\n",rrdtool,$10,$3)}' netperf_rr.out | sh
60
61# now post process the tcp_stream test. we could use STREAM_START and
62# STREAM_STOP but these will be just a bit more accurate
63STREAM_MIN_TIMESTAMP=`grep Interim netperf_stream.out | head -1 | awk '{print int($10)}'`
64STREAM_MAX_TIMESTAMP=`grep Interim netperf_stream.out | tail -1 | awk '{print int($10)}'`
65STREAM_MAX_INTERVAL=`grep Interim netperf_stream.out | awk 'BEGIN{max=0.0} ($6 > max) {max = $6}END{print int(max) + 1}'`
66STREAM_LENGTH=`expr $STREAM_MAX_TIMESTAMP - $STREAM_MIN_TIMESTAMP`
67
68$RRDTOOL create netperf_stream.rrd --step 1 --start $STREAM_MIN_TIMESTAMP \
69    DS:mbps:GAUGE:$STREAM_MAX_INTERVAL:U:U RRA:AVERAGE:0.5:1:$STREAM_LENGTH
70
71# now fill it
72awk -v rrdtool=$RRDTOOL '($1 == "Interim"){printf("%s update netperf_stream.rrd %.3f:%f\n",rrdtool,$10,$3)}' netperf_stream.out | sh
73
74
75# now graph it. we want to make sure the chart is at least 800 pixels
76# wide, and has enough pixels for every data point
77
78WIDTH=$LENGTH
79if [ $WIDTH -lt 800 ]
80then
81    WIDTH=800
82fi
83
84SIZE="-w $WIDTH -h 400"
85
86# we want to find the scaling factor for the throughput, with the goal
87# being that latency can go to the top of the charts and throughput
88# will go half-way up
89
90MAXLATMAXBPS=`$RRDTOOL graph /dev/null \
91    --start $MIN_TIMESTAMP --end $MAX_TIMESTAMP \
92    DEF:trans=netperf_rr.rrd:tps:AVERAGE \
93    CDEF:latency=1.0,trans,/ \
94    VDEF:maxlatency=latency,MAXIMUM \
95    DEF:mbps=netperf_stream.rrd:mbps:AVERAGE \
96    CDEF:bps=mbps,2000000,\* \
97    VDEF:maxbps=bps,MAXIMUM \
98    PRINT:maxlatency:"%.20lf" \
99    PRINT:maxbps:"%.20lf" | sed 1d`
100
101# should I check the completion status of the previous command?
102# probably :)
103
104SCALE=`echo $MAXLATMAXBPS | awk '{print $2/$1}'`
105
106$RRDTOOL graph bloat.svg --imgformat SVG \
107    $SIZE \
108    --lower-limit 0 \
109    --start $MIN_TIMESTAMP --end $MAX_TIMESTAMP \
110    -t "Effect of bulk transfer on latency to $1" \
111    -v "Seconds" \
112    --right-axis $SCALE:0 \
113    --right-axis-label "Bits per Second" \
114    DEF:trans=netperf_rr.rrd:tps:AVERAGE \
115    CDEF:latency=1.0,trans,/ \
116    LINE2:latency#00FF0080:"TCP_RR Latency" \
117    DEF:mbps=netperf_stream.rrd:mbps:AVERAGE \
118    CDEF:bps=mbps,1000000,\* \
119    CDEF:sbps=bps,$SCALE,/ \
120    LINE2:sbps#0000FFF0:"TCP_STREAM Throughput" \
121    VRULE:${STREAM_START}#FF000080:"TCP_STREAM start" \
122    VRULE:${STREAM_STOP}#00000080:"TCP_STREAM stop" \
123    --x-grid SECOND:10:SECOND:60:SECOND:60:0:%X
124