1 #ifndef __LINUX_PKT_SCHED_H
2 #define __LINUX_PKT_SCHED_H
3 
4 /* Logical priority bands not depending on specific packet scheduler.
5    Every scheduler will map them to real traffic classes, if it has
6    no more precise mechanism to classify packets.
7 
8    These numbers have no special meaning, though their coincidence
9    with obsolete IPv6 values is not occasional :-). New IPv6 drafts
10    preferred full anarchy inspired by diffserv group.
11 
12    Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
13    class, actually, as rule it will be handled with more care than
14    filler or even bulk.
15  */
16 
17 #define TC_PRIO_BESTEFFORT		0
18 #define TC_PRIO_FILLER			1
19 #define TC_PRIO_BULK			2
20 #define TC_PRIO_INTERACTIVE_BULK	4
21 #define TC_PRIO_INTERACTIVE		6
22 #define TC_PRIO_CONTROL			7
23 
24 #define TC_PRIO_MAX			15
25 
26 /* Generic queue statistics, available for all the elements.
27    Particular schedulers may have also their private records.
28  */
29 
30 struct tc_stats
31 {
32 	__u64	bytes;			/* NUmber of enqueues bytes */
33 	__u32	packets;		/* Number of enqueued packets	*/
34 	__u32	drops;			/* Packets dropped because of lack of resources */
35 	__u32	overlimits;		/* Number of throttle events when this
36 					 * flow goes out of allocated bandwidth */
37 	__u32	bps;			/* Current flow byte rate */
38 	__u32	pps;			/* Current flow packet rate */
39 	__u32	qlen;
40 	__u32	backlog;
41 };
42 
43 struct tc_estimator
44 {
45 	signed char	interval;
46 	unsigned char	ewma_log;
47 };
48 
49 /* "Handles"
50    ---------
51 
52     All the traffic control objects have 32bit identifiers, or "handles".
53 
54     They can be considered as opaque numbers from user API viewpoint,
55     but actually they always consist of two fields: major and
56     minor numbers, which are interpreted by kernel specially,
57     that may be used by applications, though not recommended.
58 
59     F.e. qdisc handles always have minor number equal to zero,
60     classes (or flows) have major equal to parent qdisc major, and
61     minor uniquely identifying class inside qdisc.
62 
63     Macros to manipulate handles:
64  */
65 
66 #define TC_H_MAJ_MASK (0xFFFF0000U)
67 #define TC_H_MIN_MASK (0x0000FFFFU)
68 #define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
69 #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
70 #define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
71 
72 #define TC_H_UNSPEC	(0U)
73 #define TC_H_ROOT	(0xFFFFFFFFU)
74 #define TC_H_INGRESS    (0xFFFFFFF1U)
75 
76 struct tc_ratespec
77 {
78 	unsigned char	cell_log;
79 	unsigned char	__reserved;
80 	unsigned short	feature;
81 	short		addend;
82 	unsigned short	mpu;
83 	__u32		rate;
84 };
85 
86 /* FIFO section */
87 
88 struct tc_fifo_qopt
89 {
90 	__u32	limit;	/* Queue length: bytes for bfifo, packets for pfifo */
91 };
92 
93 /* PRIO section */
94 
95 #define TCQ_PRIO_BANDS	16
96 #define TCQ_MIN_PRIO_BANDS 2
97 
98 struct tc_prio_qopt
99 {
100 	int	bands;			/* Number of bands */
101 	__u8	priomap[TC_PRIO_MAX+1];	/* Map: logical priority -> PRIO band */
102 };
103 
104 enum
105 {
106 	TCA_PRIO_UNSPEC,
107 	TCA_PRIO_MQ,
108 	__TCA_PRIO_MAX
109 };
110 
111 #define TCA_PRIO_MAX    (__TCA_PRIO_MAX - 1)
112 
113 /* TBF section */
114 
115 struct tc_tbf_qopt
116 {
117 	struct tc_ratespec rate;
118 	struct tc_ratespec peakrate;
119 	__u32		limit;
120 	__u32		buffer;
121 	__u32		mtu;
122 };
123 
124 enum
125 {
126 	TCA_TBF_UNSPEC,
127 	TCA_TBF_PARMS,
128 	TCA_TBF_RTAB,
129 	TCA_TBF_PTAB,
130 	__TCA_TBF_MAX,
131 };
132 
133 #define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
134 
135 
136 /* TEQL section */
137 
138 /* TEQL does not require any parameters */
139 
140 /* SFQ section */
141 
142 struct tc_sfq_qopt
143 {
144 	unsigned	quantum;	/* Bytes per round allocated to flow */
145 	int		perturb_period;	/* Period of hash perturbation */
146 	__u32		limit;		/* Maximal packets in queue */
147 	unsigned	divisor;	/* Hash divisor  */
148 	unsigned	flows;		/* Maximal number of flows  */
149 };
150 
151 /*
152  *  NOTE: limit, divisor and flows are hardwired to code at the moment.
153  *
154  *	limit=flows=128, divisor=1024;
155  *
156  *	The only reason for this is efficiency, it is possible
157  *	to change these parameters in compile time.
158  */
159 
160 /* RED section */
161 
162 enum
163 {
164 	TCA_RED_UNSPEC,
165 	TCA_RED_PARMS,
166 	TCA_RED_STAB,
167 	__TCA_RED_MAX,
168 };
169 
170 #define TCA_RED_MAX (__TCA_RED_MAX - 1)
171 
172 struct tc_red_qopt
173 {
174 	__u32		limit;		/* HARD maximal queue length (bytes)	*/
175 	__u32		qth_min;	/* Min average length threshold (bytes) */
176 	__u32		qth_max;	/* Max average length threshold (bytes) */
177 	unsigned char   Wlog;		/* log(W)		*/
178 	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
179 	unsigned char   Scell_log;	/* cell size for idle damping */
180 	unsigned char	flags;
181 #define TC_RED_ECN	1
182 #define TC_RED_HARDDROP	2
183 };
184 
185 struct tc_red_xstats
186 {
187 	__u32           early;          /* Early drops */
188 	__u32           pdrop;          /* Drops due to queue limits */
189 	__u32           other;          /* Drops due to drop() calls */
190 	__u32           marked;         /* Marked packets */
191 };
192 
193 /* GRED section */
194 
195 #define MAX_DPs 16
196 
197 enum
198 {
199        TCA_GRED_UNSPEC,
200        TCA_GRED_PARMS,
201        TCA_GRED_STAB,
202        TCA_GRED_DPS,
203 	   __TCA_GRED_MAX,
204 };
205 
206 #define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
207 
208 struct tc_gred_qopt
209 {
210 	__u32		limit;        /* HARD maximal queue length (bytes)    */
211 	__u32		qth_min;      /* Min average length threshold (bytes) */
212 	__u32		qth_max;      /* Max average length threshold (bytes) */
213 	__u32		DP;           /* upto 2^32 DPs */
214 	__u32		backlog;
215 	__u32		qave;
216 	__u32		forced;
217 	__u32		early;
218 	__u32		other;
219 	__u32		pdrop;
220 	__u8		Wlog;         /* log(W)               */
221 	__u8		Plog;         /* log(P_max/(qth_max-qth_min)) */
222 	__u8		Scell_log;    /* cell size for idle damping */
223 	__u8		prio;         /* prio of this VQ */
224 	__u32		packets;
225 	__u32		bytesin;
226 };
227 
228 /* gred setup */
229 struct tc_gred_sopt
230 {
231 	__u32		DPs;
232 	__u32		def_DP;
233 	__u8		grio;
234 	__u8		flags;
235 	__u16		pad1;
236 };
237 
238 /* HTB section */
239 #define TC_HTB_NUMPRIO		8
240 #define TC_HTB_MAXDEPTH		8
241 #define TC_HTB_PROTOVER		3 /* the same as HTB and TC's major */
242 
243 struct tc_htb_opt
244 {
245 	struct tc_ratespec 	rate;
246 	struct tc_ratespec 	ceil;
247 	__u32	buffer;
248 	__u32	cbuffer;
249 	__u32	quantum;
250 	__u32	level;		/* out only */
251 	__u32	prio;
252 };
253 struct tc_htb_glob
254 {
255 	__u32 version;		/* to match HTB/TC */
256     	__u32 rate2quantum;	/* bps->quantum divisor */
257     	__u32 defcls;		/* default class number */
258 	__u32 debug;		/* debug flags */
259 
260 	/* stats */
261 	__u32 direct_pkts; /* count of non shapped packets */
262 };
263 enum
264 {
265 	TCA_HTB_UNSPEC,
266 	TCA_HTB_PARMS,
267 	TCA_HTB_INIT,
268 	TCA_HTB_CTAB,
269 	TCA_HTB_RTAB,
270 	__TCA_HTB_MAX,
271 };
272 
273 #define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
274 
275 struct tc_htb_xstats
276 {
277 	__u32 lends;
278 	__u32 borrows;
279 	__u32 giants;	/* too big packets (rate will not be accurate) */
280 	__u32 tokens;
281 	__u32 ctokens;
282 };
283 
284 /* HFSC section */
285 
286 struct tc_hfsc_qopt
287 {
288 	__u16	defcls;		/* default class */
289 };
290 
291 struct tc_service_curve
292 {
293 	__u32	m1;		/* slope of the first segment in bps */
294 	__u32	d;		/* x-projection of the first segment in us */
295 	__u32	m2;		/* slope of the second segment in bps */
296 };
297 
298 struct tc_hfsc_stats
299 {
300 	__u64	work;		/* total work done */
301 	__u64	rtwork;		/* work done by real-time criteria */
302 	__u32	period;		/* current period */
303 	__u32	level;		/* class level in hierarchy */
304 };
305 
306 enum
307 {
308 	TCA_HFSC_UNSPEC,
309 	TCA_HFSC_RSC,
310 	TCA_HFSC_FSC,
311 	TCA_HFSC_USC,
312 	__TCA_HFSC_MAX,
313 };
314 
315 #define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
316 
317 
318 /* CBQ section */
319 
320 #define TC_CBQ_MAXPRIO		8
321 #define TC_CBQ_MAXLEVEL		8
322 #define TC_CBQ_DEF_EWMA		5
323 
324 struct tc_cbq_lssopt
325 {
326 	unsigned char	change;
327 	unsigned char	flags;
328 #define TCF_CBQ_LSS_BOUNDED	1
329 #define TCF_CBQ_LSS_ISOLATED	2
330 	unsigned char  	ewma_log;
331 	unsigned char  	level;
332 #define TCF_CBQ_LSS_FLAGS	1
333 #define TCF_CBQ_LSS_EWMA	2
334 #define TCF_CBQ_LSS_MAXIDLE	4
335 #define TCF_CBQ_LSS_MINIDLE	8
336 #define TCF_CBQ_LSS_OFFTIME	0x10
337 #define TCF_CBQ_LSS_AVPKT	0x20
338 	__u32		maxidle;
339 	__u32		minidle;
340 	__u32		offtime;
341 	__u32		avpkt;
342 };
343 
344 struct tc_cbq_wrropt
345 {
346 	unsigned char	flags;
347 	unsigned char	priority;
348 	unsigned char	cpriority;
349 	unsigned char	__reserved;
350 	__u32		allot;
351 	__u32		weight;
352 };
353 
354 struct tc_cbq_ovl
355 {
356 	unsigned char	strategy;
357 #define	TC_CBQ_OVL_CLASSIC	0
358 #define	TC_CBQ_OVL_DELAY	1
359 #define	TC_CBQ_OVL_LOWPRIO	2
360 #define	TC_CBQ_OVL_DROP		3
361 #define	TC_CBQ_OVL_RCLASSIC	4
362 	unsigned char	priority2;
363 	__u16		pad;
364 	__u32		penalty;
365 };
366 
367 struct tc_cbq_police
368 {
369 	unsigned char	police;
370 	unsigned char	__res1;
371 	unsigned short	__res2;
372 };
373 
374 struct tc_cbq_fopt
375 {
376 	__u32		split;
377 	__u32		defmap;
378 	__u32		defchange;
379 };
380 
381 struct tc_cbq_xstats
382 {
383 	__u32		borrows;
384 	__u32		overactions;
385 	__s32		avgidle;
386 	__s32		undertime;
387 };
388 
389 enum
390 {
391 	TCA_CBQ_UNSPEC,
392 	TCA_CBQ_LSSOPT,
393 	TCA_CBQ_WRROPT,
394 	TCA_CBQ_FOPT,
395 	TCA_CBQ_OVL_STRATEGY,
396 	TCA_CBQ_RATE,
397 	TCA_CBQ_RTAB,
398 	TCA_CBQ_POLICE,
399 	__TCA_CBQ_MAX,
400 };
401 
402 #define TCA_CBQ_MAX	(__TCA_CBQ_MAX - 1)
403 
404 /* dsmark section */
405 
406 enum {
407 	TCA_DSMARK_UNSPEC,
408 	TCA_DSMARK_INDICES,
409 	TCA_DSMARK_DEFAULT_INDEX,
410 	TCA_DSMARK_SET_TC_INDEX,
411 	TCA_DSMARK_MASK,
412 	TCA_DSMARK_VALUE,
413 	__TCA_DSMARK_MAX,
414 };
415 
416 #define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
417 
418 /* ATM  section */
419 
420 enum {
421 	TCA_ATM_UNSPEC,
422 	TCA_ATM_FD,		/* file/socket descriptor */
423 	TCA_ATM_PTR,		/* pointer to descriptor - later */
424 	TCA_ATM_HDR,		/* LL header */
425 	TCA_ATM_EXCESS,		/* excess traffic class (0 for CLP)  */
426 	TCA_ATM_ADDR,		/* PVC address (for output only) */
427 	TCA_ATM_STATE,		/* VC state (ATM_VS_*; for output only) */
428 	__TCA_ATM_MAX,
429 };
430 
431 #define TCA_ATM_MAX	(__TCA_ATM_MAX - 1)
432 
433 /* Network emulator */
434 
435 enum
436 {
437 	TCA_NETEM_UNSPEC,
438 	TCA_NETEM_CORR,
439 	TCA_NETEM_DELAY_DIST,
440 	TCA_NETEM_REORDER,
441 	TCA_NETEM_CORRUPT,
442 	__TCA_NETEM_MAX,
443 };
444 
445 #define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
446 
447 struct tc_netem_qopt
448 {
449 	__u32	latency;	/* added delay (us) */
450 	__u32   limit;		/* fifo limit (packets) */
451 	__u32	loss;		/* random packet loss (0=none ~0=100%) */
452 	__u32	gap;		/* re-ordering gap (0 for none) */
453 	__u32   duplicate;	/* random packet dup  (0=none ~0=100%) */
454 	__u32	jitter;		/* random jitter in latency (us) */
455 };
456 
457 struct tc_netem_corr
458 {
459 	__u32	delay_corr;	/* delay correlation */
460 	__u32	loss_corr;	/* packet loss correlation */
461 	__u32	dup_corr;	/* duplicate correlation  */
462 };
463 
464 struct tc_netem_reorder
465 {
466 	__u32	probability;
467 	__u32	correlation;
468 };
469 
470 struct tc_netem_corrupt
471 {
472 	__u32	probability;
473 	__u32	correlation;
474 };
475 
476 #define NETEM_DIST_SCALE	8192
477 
478 #endif
479