Home | History | Annotate | Download | only in linux
      1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
      2 #ifndef __LINUX_PKT_SCHED_H
      3 #define __LINUX_PKT_SCHED_H
      4 
      5 #include <linux/types.h>
      6 
      7 /* Logical priority bands not depending on specific packet scheduler.
      8    Every scheduler will map them to real traffic classes, if it has
      9    no more precise mechanism to classify packets.
     10 
     11    These numbers have no special meaning, though their coincidence
     12    with obsolete IPv6 values is not occasional :-). New IPv6 drafts
     13    preferred full anarchy inspired by diffserv group.
     14 
     15    Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
     16    class, actually, as rule it will be handled with more care than
     17    filler or even bulk.
     18  */
     19 
     20 #define TC_PRIO_BESTEFFORT		0
     21 #define TC_PRIO_FILLER			1
     22 #define TC_PRIO_BULK			2
     23 #define TC_PRIO_INTERACTIVE_BULK	4
     24 #define TC_PRIO_INTERACTIVE		6
     25 #define TC_PRIO_CONTROL			7
     26 
     27 #define TC_PRIO_MAX			15
     28 
     29 /* Generic queue statistics, available for all the elements.
     30    Particular schedulers may have also their private records.
     31  */
     32 
     33 struct tc_stats {
     34 	__u64	bytes;			/* Number of enqueued bytes */
     35 	__u32	packets;		/* Number of enqueued packets	*/
     36 	__u32	drops;			/* Packets dropped because of lack of resources */
     37 	__u32	overlimits;		/* Number of throttle events when this
     38 					 * flow goes out of allocated bandwidth */
     39 	__u32	bps;			/* Current flow byte rate */
     40 	__u32	pps;			/* Current flow packet rate */
     41 	__u32	qlen;
     42 	__u32	backlog;
     43 };
     44 
     45 struct tc_estimator {
     46 	signed char	interval;
     47 	unsigned char	ewma_log;
     48 };
     49 
     50 /* "Handles"
     51    ---------
     52 
     53     All the traffic control objects have 32bit identifiers, or "handles".
     54 
     55     They can be considered as opaque numbers from user API viewpoint,
     56     but actually they always consist of two fields: major and
     57     minor numbers, which are interpreted by kernel specially,
     58     that may be used by applications, though not recommended.
     59 
     60     F.e. qdisc handles always have minor number equal to zero,
     61     classes (or flows) have major equal to parent qdisc major, and
     62     minor uniquely identifying class inside qdisc.
     63 
     64     Macros to manipulate handles:
     65  */
     66 
     67 #define TC_H_MAJ_MASK (0xFFFF0000U)
     68 #define TC_H_MIN_MASK (0x0000FFFFU)
     69 #define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
     70 #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
     71 #define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
     72 
     73 #define TC_H_UNSPEC	(0U)
     74 #define TC_H_ROOT	(0xFFFFFFFFU)
     75 #define TC_H_INGRESS    (0xFFFFFFF1U)
     76 #define TC_H_CLSACT	TC_H_INGRESS
     77 
     78 #define TC_H_MIN_INGRESS	0xFFF2U
     79 #define TC_H_MIN_EGRESS		0xFFF3U
     80 
     81 /* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
     82 enum tc_link_layer {
     83 	TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
     84 	TC_LINKLAYER_ETHERNET,
     85 	TC_LINKLAYER_ATM,
     86 };
     87 #define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
     88 
     89 struct tc_ratespec {
     90 	unsigned char	cell_log;
     91 	__u8		linklayer; /* lower 4 bits */
     92 	unsigned short	overhead;
     93 	short		cell_align;
     94 	unsigned short	mpu;
     95 	__u32		rate;
     96 };
     97 
     98 #define TC_RTAB_SIZE	1024
     99 
    100 struct tc_sizespec {
    101 	unsigned char	cell_log;
    102 	unsigned char	size_log;
    103 	short		cell_align;
    104 	int		overhead;
    105 	unsigned int	linklayer;
    106 	unsigned int	mpu;
    107 	unsigned int	mtu;
    108 	unsigned int	tsize;
    109 };
    110 
    111 enum {
    112 	TCA_STAB_UNSPEC,
    113 	TCA_STAB_BASE,
    114 	TCA_STAB_DATA,
    115 	__TCA_STAB_MAX
    116 };
    117 
    118 #define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
    119 
    120 /* FIFO section */
    121 
    122 struct tc_fifo_qopt {
    123 	__u32	limit;	/* Queue length: bytes for bfifo, packets for pfifo */
    124 };
    125 
    126 /* PRIO section */
    127 
    128 #define TCQ_PRIO_BANDS	16
    129 #define TCQ_MIN_PRIO_BANDS 2
    130 
    131 struct tc_prio_qopt {
    132 	int	bands;			/* Number of bands */
    133 	__u8	priomap[TC_PRIO_MAX+1];	/* Map: logical priority -> PRIO band */
    134 };
    135 
    136 /* MULTIQ section */
    137 
    138 struct tc_multiq_qopt {
    139 	__u16	bands;			/* Number of bands */
    140 	__u16	max_bands;		/* Maximum number of queues */
    141 };
    142 
    143 /* PLUG section */
    144 
    145 #define TCQ_PLUG_BUFFER                0
    146 #define TCQ_PLUG_RELEASE_ONE           1
    147 #define TCQ_PLUG_RELEASE_INDEFINITE    2
    148 #define TCQ_PLUG_LIMIT                 3
    149 
    150 struct tc_plug_qopt {
    151 	/* TCQ_PLUG_BUFFER: Inset a plug into the queue and
    152 	 *  buffer any incoming packets
    153 	 * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
    154 	 *   to beginning of the next plug.
    155 	 * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
    156 	 *   Stop buffering packets until the next TCQ_PLUG_BUFFER
    157 	 *   command is received (just act as a pass-thru queue).
    158 	 * TCQ_PLUG_LIMIT: Increase/decrease queue size
    159 	 */
    160 	int             action;
    161 	__u32           limit;
    162 };
    163 
    164 /* TBF section */
    165 
    166 struct tc_tbf_qopt {
    167 	struct tc_ratespec rate;
    168 	struct tc_ratespec peakrate;
    169 	__u32		limit;
    170 	__u32		buffer;
    171 	__u32		mtu;
    172 };
    173 
    174 enum {
    175 	TCA_TBF_UNSPEC,
    176 	TCA_TBF_PARMS,
    177 	TCA_TBF_RTAB,
    178 	TCA_TBF_PTAB,
    179 	TCA_TBF_RATE64,
    180 	TCA_TBF_PRATE64,
    181 	TCA_TBF_BURST,
    182 	TCA_TBF_PBURST,
    183 	TCA_TBF_PAD,
    184 	__TCA_TBF_MAX,
    185 };
    186 
    187 #define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
    188 
    189 
    190 /* TEQL section */
    191 
    192 /* TEQL does not require any parameters */
    193 
    194 /* SFQ section */
    195 
    196 struct tc_sfq_qopt {
    197 	unsigned	quantum;	/* Bytes per round allocated to flow */
    198 	int		perturb_period;	/* Period of hash perturbation */
    199 	__u32		limit;		/* Maximal packets in queue */
    200 	unsigned	divisor;	/* Hash divisor  */
    201 	unsigned	flows;		/* Maximal number of flows  */
    202 };
    203 
    204 struct tc_sfqred_stats {
    205 	__u32           prob_drop;      /* Early drops, below max threshold */
    206 	__u32           forced_drop;	/* Early drops, after max threshold */
    207 	__u32           prob_mark;      /* Marked packets, below max threshold */
    208 	__u32           forced_mark;    /* Marked packets, after max threshold */
    209 	__u32           prob_mark_head; /* Marked packets, below max threshold */
    210 	__u32           forced_mark_head;/* Marked packets, after max threshold */
    211 };
    212 
    213 struct tc_sfq_qopt_v1 {
    214 	struct tc_sfq_qopt v0;
    215 	unsigned int	depth;		/* max number of packets per flow */
    216 	unsigned int	headdrop;
    217 /* SFQRED parameters */
    218 	__u32		limit;		/* HARD maximal flow queue length (bytes) */
    219 	__u32		qth_min;	/* Min average length threshold (bytes) */
    220 	__u32		qth_max;	/* Max average length threshold (bytes) */
    221 	unsigned char   Wlog;		/* log(W)		*/
    222 	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
    223 	unsigned char   Scell_log;	/* cell size for idle damping */
    224 	unsigned char	flags;
    225 	__u32		max_P;		/* probability, high resolution */
    226 /* SFQRED stats */
    227 	struct tc_sfqred_stats stats;
    228 };
    229 
    230 
    231 struct tc_sfq_xstats {
    232 	__s32		allot;
    233 };
    234 
    235 /* RED section */
    236 
    237 enum {
    238 	TCA_RED_UNSPEC,
    239 	TCA_RED_PARMS,
    240 	TCA_RED_STAB,
    241 	TCA_RED_MAX_P,
    242 	__TCA_RED_MAX,
    243 };
    244 
    245 #define TCA_RED_MAX (__TCA_RED_MAX - 1)
    246 
    247 struct tc_red_qopt {
    248 	__u32		limit;		/* HARD maximal queue length (bytes)	*/
    249 	__u32		qth_min;	/* Min average length threshold (bytes) */
    250 	__u32		qth_max;	/* Max average length threshold (bytes) */
    251 	unsigned char   Wlog;		/* log(W)		*/
    252 	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
    253 	unsigned char   Scell_log;	/* cell size for idle damping */
    254 	unsigned char	flags;
    255 #define TC_RED_ECN		1
    256 #define TC_RED_HARDDROP		2
    257 #define TC_RED_ADAPTATIVE	4
    258 };
    259 
    260 struct tc_red_xstats {
    261 	__u32           early;          /* Early drops */
    262 	__u32           pdrop;          /* Drops due to queue limits */
    263 	__u32           other;          /* Drops due to drop() calls */
    264 	__u32           marked;         /* Marked packets */
    265 };
    266 
    267 /* GRED section */
    268 
    269 #define MAX_DPs 16
    270 
    271 enum {
    272        TCA_GRED_UNSPEC,
    273        TCA_GRED_PARMS,
    274        TCA_GRED_STAB,
    275        TCA_GRED_DPS,
    276        TCA_GRED_MAX_P,
    277        TCA_GRED_LIMIT,
    278        __TCA_GRED_MAX,
    279 };
    280 
    281 #define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
    282 
    283 struct tc_gred_qopt {
    284 	__u32		limit;        /* HARD maximal queue length (bytes)    */
    285 	__u32		qth_min;      /* Min average length threshold (bytes) */
    286 	__u32		qth_max;      /* Max average length threshold (bytes) */
    287 	__u32		DP;           /* up to 2^32 DPs */
    288 	__u32		backlog;
    289 	__u32		qave;
    290 	__u32		forced;
    291 	__u32		early;
    292 	__u32		other;
    293 	__u32		pdrop;
    294 	__u8		Wlog;         /* log(W)               */
    295 	__u8		Plog;         /* log(P_max/(qth_max-qth_min)) */
    296 	__u8		Scell_log;    /* cell size for idle damping */
    297 	__u8		prio;         /* prio of this VQ */
    298 	__u32		packets;
    299 	__u32		bytesin;
    300 };
    301 
    302 /* gred setup */
    303 struct tc_gred_sopt {
    304 	__u32		DPs;
    305 	__u32		def_DP;
    306 	__u8		grio;
    307 	__u8		flags;
    308 	__u16		pad1;
    309 };
    310 
    311 /* CHOKe section */
    312 
    313 enum {
    314 	TCA_CHOKE_UNSPEC,
    315 	TCA_CHOKE_PARMS,
    316 	TCA_CHOKE_STAB,
    317 	TCA_CHOKE_MAX_P,
    318 	__TCA_CHOKE_MAX,
    319 };
    320 
    321 #define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
    322 
    323 struct tc_choke_qopt {
    324 	__u32		limit;		/* Hard queue length (packets)	*/
    325 	__u32		qth_min;	/* Min average threshold (packets) */
    326 	__u32		qth_max;	/* Max average threshold (packets) */
    327 	unsigned char   Wlog;		/* log(W)		*/
    328 	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
    329 	unsigned char   Scell_log;	/* cell size for idle damping */
    330 	unsigned char	flags;		/* see RED flags */
    331 };
    332 
    333 struct tc_choke_xstats {
    334 	__u32		early;          /* Early drops */
    335 	__u32		pdrop;          /* Drops due to queue limits */
    336 	__u32		other;          /* Drops due to drop() calls */
    337 	__u32		marked;         /* Marked packets */
    338 	__u32		matched;	/* Drops due to flow match */
    339 };
    340 
    341 /* HTB section */
    342 #define TC_HTB_NUMPRIO		8
    343 #define TC_HTB_MAXDEPTH		8
    344 #define TC_HTB_PROTOVER		3 /* the same as HTB and TC's major */
    345 
    346 struct tc_htb_opt {
    347 	struct tc_ratespec 	rate;
    348 	struct tc_ratespec 	ceil;
    349 	__u32	buffer;
    350 	__u32	cbuffer;
    351 	__u32	quantum;
    352 	__u32	level;		/* out only */
    353 	__u32	prio;
    354 };
    355 struct tc_htb_glob {
    356 	__u32 version;		/* to match HTB/TC */
    357     	__u32 rate2quantum;	/* bps->quantum divisor */
    358     	__u32 defcls;		/* default class number */
    359 	__u32 debug;		/* debug flags */
    360 
    361 	/* stats */
    362 	__u32 direct_pkts; /* count of non shaped packets */
    363 };
    364 enum {
    365 	TCA_HTB_UNSPEC,
    366 	TCA_HTB_PARMS,
    367 	TCA_HTB_INIT,
    368 	TCA_HTB_CTAB,
    369 	TCA_HTB_RTAB,
    370 	TCA_HTB_DIRECT_QLEN,
    371 	TCA_HTB_RATE64,
    372 	TCA_HTB_CEIL64,
    373 	TCA_HTB_PAD,
    374 	__TCA_HTB_MAX,
    375 };
    376 
    377 #define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
    378 
    379 struct tc_htb_xstats {
    380 	__u32 lends;
    381 	__u32 borrows;
    382 	__u32 giants;	/* too big packets (rate will not be accurate) */
    383 	__u32 tokens;
    384 	__u32 ctokens;
    385 };
    386 
    387 /* HFSC section */
    388 
    389 struct tc_hfsc_qopt {
    390 	__u16	defcls;		/* default class */
    391 };
    392 
    393 struct tc_service_curve {
    394 	__u32	m1;		/* slope of the first segment in bps */
    395 	__u32	d;		/* x-projection of the first segment in us */
    396 	__u32	m2;		/* slope of the second segment in bps */
    397 };
    398 
    399 struct tc_hfsc_stats {
    400 	__u64	work;		/* total work done */
    401 	__u64	rtwork;		/* work done by real-time criteria */
    402 	__u32	period;		/* current period */
    403 	__u32	level;		/* class level in hierarchy */
    404 };
    405 
    406 enum {
    407 	TCA_HFSC_UNSPEC,
    408 	TCA_HFSC_RSC,
    409 	TCA_HFSC_FSC,
    410 	TCA_HFSC_USC,
    411 	__TCA_HFSC_MAX,
    412 };
    413 
    414 #define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
    415 
    416 
    417 /* CBQ section */
    418 
    419 #define TC_CBQ_MAXPRIO		8
    420 #define TC_CBQ_MAXLEVEL		8
    421 #define TC_CBQ_DEF_EWMA		5
    422 
    423 struct tc_cbq_lssopt {
    424 	unsigned char	change;
    425 	unsigned char	flags;
    426 #define TCF_CBQ_LSS_BOUNDED	1
    427 #define TCF_CBQ_LSS_ISOLATED	2
    428 	unsigned char  	ewma_log;
    429 	unsigned char  	level;
    430 #define TCF_CBQ_LSS_FLAGS	1
    431 #define TCF_CBQ_LSS_EWMA	2
    432 #define TCF_CBQ_LSS_MAXIDLE	4
    433 #define TCF_CBQ_LSS_MINIDLE	8
    434 #define TCF_CBQ_LSS_OFFTIME	0x10
    435 #define TCF_CBQ_LSS_AVPKT	0x20
    436 	__u32		maxidle;
    437 	__u32		minidle;
    438 	__u32		offtime;
    439 	__u32		avpkt;
    440 };
    441 
    442 struct tc_cbq_wrropt {
    443 	unsigned char	flags;
    444 	unsigned char	priority;
    445 	unsigned char	cpriority;
    446 	unsigned char	__reserved;
    447 	__u32		allot;
    448 	__u32		weight;
    449 };
    450 
    451 struct tc_cbq_ovl {
    452 	unsigned char	strategy;
    453 #define	TC_CBQ_OVL_CLASSIC	0
    454 #define	TC_CBQ_OVL_DELAY	1
    455 #define	TC_CBQ_OVL_LOWPRIO	2
    456 #define	TC_CBQ_OVL_DROP		3
    457 #define	TC_CBQ_OVL_RCLASSIC	4
    458 	unsigned char	priority2;
    459 	__u16		pad;
    460 	__u32		penalty;
    461 };
    462 
    463 struct tc_cbq_police {
    464 	unsigned char	police;
    465 	unsigned char	__res1;
    466 	unsigned short	__res2;
    467 };
    468 
    469 struct tc_cbq_fopt {
    470 	__u32		split;
    471 	__u32		defmap;
    472 	__u32		defchange;
    473 };
    474 
    475 struct tc_cbq_xstats {
    476 	__u32		borrows;
    477 	__u32		overactions;
    478 	__s32		avgidle;
    479 	__s32		undertime;
    480 };
    481 
    482 enum {
    483 	TCA_CBQ_UNSPEC,
    484 	TCA_CBQ_LSSOPT,
    485 	TCA_CBQ_WRROPT,
    486 	TCA_CBQ_FOPT,
    487 	TCA_CBQ_OVL_STRATEGY,
    488 	TCA_CBQ_RATE,
    489 	TCA_CBQ_RTAB,
    490 	TCA_CBQ_POLICE,
    491 	__TCA_CBQ_MAX,
    492 };
    493 
    494 #define TCA_CBQ_MAX	(__TCA_CBQ_MAX - 1)
    495 
    496 /* dsmark section */
    497 
    498 enum {
    499 	TCA_DSMARK_UNSPEC,
    500 	TCA_DSMARK_INDICES,
    501 	TCA_DSMARK_DEFAULT_INDEX,
    502 	TCA_DSMARK_SET_TC_INDEX,
    503 	TCA_DSMARK_MASK,
    504 	TCA_DSMARK_VALUE,
    505 	__TCA_DSMARK_MAX,
    506 };
    507 
    508 #define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
    509 
    510 /* ATM  section */
    511 
    512 enum {
    513 	TCA_ATM_UNSPEC,
    514 	TCA_ATM_FD,		/* file/socket descriptor */
    515 	TCA_ATM_PTR,		/* pointer to descriptor - later */
    516 	TCA_ATM_HDR,		/* LL header */
    517 	TCA_ATM_EXCESS,		/* excess traffic class (0 for CLP)  */
    518 	TCA_ATM_ADDR,		/* PVC address (for output only) */
    519 	TCA_ATM_STATE,		/* VC state (ATM_VS_*; for output only) */
    520 	__TCA_ATM_MAX,
    521 };
    522 
    523 #define TCA_ATM_MAX	(__TCA_ATM_MAX - 1)
    524 
    525 /* Network emulator */
    526 
    527 enum {
    528 	TCA_NETEM_UNSPEC,
    529 	TCA_NETEM_CORR,
    530 	TCA_NETEM_DELAY_DIST,
    531 	TCA_NETEM_REORDER,
    532 	TCA_NETEM_CORRUPT,
    533 	TCA_NETEM_LOSS,
    534 	TCA_NETEM_RATE,
    535 	TCA_NETEM_ECN,
    536 	TCA_NETEM_RATE64,
    537 	TCA_NETEM_PAD,
    538 	__TCA_NETEM_MAX,
    539 };
    540 
    541 #define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
    542 
    543 struct tc_netem_qopt {
    544 	__u32	latency;	/* added delay (us) */
    545 	__u32   limit;		/* fifo limit (packets) */
    546 	__u32	loss;		/* random packet loss (0=none ~0=100%) */
    547 	__u32	gap;		/* re-ordering gap (0 for none) */
    548 	__u32   duplicate;	/* random packet dup  (0=none ~0=100%) */
    549 	__u32	jitter;		/* random jitter in latency (us) */
    550 };
    551 
    552 struct tc_netem_corr {
    553 	__u32	delay_corr;	/* delay correlation */
    554 	__u32	loss_corr;	/* packet loss correlation */
    555 	__u32	dup_corr;	/* duplicate correlation  */
    556 };
    557 
    558 struct tc_netem_reorder {
    559 	__u32	probability;
    560 	__u32	correlation;
    561 };
    562 
    563 struct tc_netem_corrupt {
    564 	__u32	probability;
    565 	__u32	correlation;
    566 };
    567 
    568 struct tc_netem_rate {
    569 	__u32	rate;	/* byte/s */
    570 	__s32	packet_overhead;
    571 	__u32	cell_size;
    572 	__s32	cell_overhead;
    573 };
    574 
    575 enum {
    576 	NETEM_LOSS_UNSPEC,
    577 	NETEM_LOSS_GI,		/* General Intuitive - 4 state model */
    578 	NETEM_LOSS_GE,		/* Gilbert Elliot models */
    579 	__NETEM_LOSS_MAX
    580 };
    581 #define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
    582 
    583 /* State transition probabilities for 4 state model */
    584 struct tc_netem_gimodel {
    585 	__u32	p13;
    586 	__u32	p31;
    587 	__u32	p32;
    588 	__u32	p14;
    589 	__u32	p23;
    590 };
    591 
    592 /* Gilbert-Elliot models */
    593 struct tc_netem_gemodel {
    594 	__u32 p;
    595 	__u32 r;
    596 	__u32 h;
    597 	__u32 k1;
    598 };
    599 
    600 #define NETEM_DIST_SCALE	8192
    601 #define NETEM_DIST_MAX		16384
    602 
    603 /* DRR */
    604 
    605 enum {
    606 	TCA_DRR_UNSPEC,
    607 	TCA_DRR_QUANTUM,
    608 	__TCA_DRR_MAX
    609 };
    610 
    611 #define TCA_DRR_MAX	(__TCA_DRR_MAX - 1)
    612 
    613 struct tc_drr_stats {
    614 	__u32	deficit;
    615 };
    616 
    617 /* MQPRIO */
    618 #define TC_QOPT_BITMASK 15
    619 #define TC_QOPT_MAX_QUEUE 16
    620 
    621 enum {
    622 	TC_MQPRIO_HW_OFFLOAD_NONE,	/* no offload requested */
    623 	TC_MQPRIO_HW_OFFLOAD_TCS,	/* offload TCs, no queue counts */
    624 	__TC_MQPRIO_HW_OFFLOAD_MAX
    625 };
    626 
    627 #define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
    628 
    629 struct tc_mqprio_qopt {
    630 	__u8	num_tc;
    631 	__u8	prio_tc_map[TC_QOPT_BITMASK + 1];
    632 	__u8	hw;
    633 	__u16	count[TC_QOPT_MAX_QUEUE];
    634 	__u16	offset[TC_QOPT_MAX_QUEUE];
    635 };
    636 
    637 /* SFB */
    638 
    639 enum {
    640 	TCA_SFB_UNSPEC,
    641 	TCA_SFB_PARMS,
    642 	__TCA_SFB_MAX,
    643 };
    644 
    645 #define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
    646 
    647 /*
    648  * Note: increment, decrement are Q0.16 fixed-point values.
    649  */
    650 struct tc_sfb_qopt {
    651 	__u32 rehash_interval;	/* delay between hash move, in ms */
    652 	__u32 warmup_time;	/* double buffering warmup time in ms (warmup_time < rehash_interval) */
    653 	__u32 max;		/* max len of qlen_min */
    654 	__u32 bin_size;		/* maximum queue length per bin */
    655 	__u32 increment;	/* probability increment, (d1 in Blue) */
    656 	__u32 decrement;	/* probability decrement, (d2 in Blue) */
    657 	__u32 limit;		/* max SFB queue length */
    658 	__u32 penalty_rate;	/* inelastic flows are rate limited to 'rate' pps */
    659 	__u32 penalty_burst;
    660 };
    661 
    662 struct tc_sfb_xstats {
    663 	__u32 earlydrop;
    664 	__u32 penaltydrop;
    665 	__u32 bucketdrop;
    666 	__u32 queuedrop;
    667 	__u32 childdrop; /* drops in child qdisc */
    668 	__u32 marked;
    669 	__u32 maxqlen;
    670 	__u32 maxprob;
    671 	__u32 avgprob;
    672 };
    673 
    674 #define SFB_MAX_PROB 0xFFFF
    675 
    676 /* QFQ */
    677 enum {
    678 	TCA_QFQ_UNSPEC,
    679 	TCA_QFQ_WEIGHT,
    680 	TCA_QFQ_LMAX,
    681 	__TCA_QFQ_MAX
    682 };
    683 
    684 #define TCA_QFQ_MAX	(__TCA_QFQ_MAX - 1)
    685 
    686 struct tc_qfq_stats {
    687 	__u32 weight;
    688 	__u32 lmax;
    689 };
    690 
    691 /* CODEL */
    692 
    693 enum {
    694 	TCA_CODEL_UNSPEC,
    695 	TCA_CODEL_TARGET,
    696 	TCA_CODEL_LIMIT,
    697 	TCA_CODEL_INTERVAL,
    698 	TCA_CODEL_ECN,
    699 	TCA_CODEL_CE_THRESHOLD,
    700 	__TCA_CODEL_MAX
    701 };
    702 
    703 #define TCA_CODEL_MAX	(__TCA_CODEL_MAX - 1)
    704 
    705 struct tc_codel_xstats {
    706 	__u32	maxpacket; /* largest packet we've seen so far */
    707 	__u32	count;	   /* how many drops we've done since the last time we
    708 			    * entered dropping state
    709 			    */
    710 	__u32	lastcount; /* count at entry to dropping state */
    711 	__u32	ldelay;    /* in-queue delay seen by most recently dequeued packet */
    712 	__s32	drop_next; /* time to drop next packet */
    713 	__u32	drop_overlimit; /* number of time max qdisc packet limit was hit */
    714 	__u32	ecn_mark;  /* number of packets we ECN marked instead of dropped */
    715 	__u32	dropping;  /* are we in dropping state ? */
    716 	__u32	ce_mark;   /* number of CE marked packets because of ce_threshold */
    717 };
    718 
    719 /* FQ_CODEL */
    720 
    721 enum {
    722 	TCA_FQ_CODEL_UNSPEC,
    723 	TCA_FQ_CODEL_TARGET,
    724 	TCA_FQ_CODEL_LIMIT,
    725 	TCA_FQ_CODEL_INTERVAL,
    726 	TCA_FQ_CODEL_ECN,
    727 	TCA_FQ_CODEL_FLOWS,
    728 	TCA_FQ_CODEL_QUANTUM,
    729 	TCA_FQ_CODEL_CE_THRESHOLD,
    730 	TCA_FQ_CODEL_DROP_BATCH_SIZE,
    731 	TCA_FQ_CODEL_MEMORY_LIMIT,
    732 	__TCA_FQ_CODEL_MAX
    733 };
    734 
    735 #define TCA_FQ_CODEL_MAX	(__TCA_FQ_CODEL_MAX - 1)
    736 
    737 enum {
    738 	TCA_FQ_CODEL_XSTATS_QDISC,
    739 	TCA_FQ_CODEL_XSTATS_CLASS,
    740 };
    741 
    742 struct tc_fq_codel_qd_stats {
    743 	__u32	maxpacket;	/* largest packet we've seen so far */
    744 	__u32	drop_overlimit; /* number of time max qdisc
    745 				 * packet limit was hit
    746 				 */
    747 	__u32	ecn_mark;	/* number of packets we ECN marked
    748 				 * instead of being dropped
    749 				 */
    750 	__u32	new_flow_count; /* number of time packets
    751 				 * created a 'new flow'
    752 				 */
    753 	__u32	new_flows_len;	/* count of flows in new list */
    754 	__u32	old_flows_len;	/* count of flows in old list */
    755 	__u32	ce_mark;	/* packets above ce_threshold */
    756 	__u32	memory_usage;	/* in bytes */
    757 	__u32	drop_overmemory;
    758 };
    759 
    760 struct tc_fq_codel_cl_stats {
    761 	__s32	deficit;
    762 	__u32	ldelay;		/* in-queue delay seen by most recently
    763 				 * dequeued packet
    764 				 */
    765 	__u32	count;
    766 	__u32	lastcount;
    767 	__u32	dropping;
    768 	__s32	drop_next;
    769 };
    770 
    771 struct tc_fq_codel_xstats {
    772 	__u32	type;
    773 	union {
    774 		struct tc_fq_codel_qd_stats qdisc_stats;
    775 		struct tc_fq_codel_cl_stats class_stats;
    776 	};
    777 };
    778 
    779 /* FQ */
    780 
    781 enum {
    782 	TCA_FQ_UNSPEC,
    783 
    784 	TCA_FQ_PLIMIT,		/* limit of total number of packets in queue */
    785 
    786 	TCA_FQ_FLOW_PLIMIT,	/* limit of packets per flow */
    787 
    788 	TCA_FQ_QUANTUM,		/* RR quantum */
    789 
    790 	TCA_FQ_INITIAL_QUANTUM,		/* RR quantum for new flow */
    791 
    792 	TCA_FQ_RATE_ENABLE,	/* enable/disable rate limiting */
    793 
    794 	TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
    795 
    796 	TCA_FQ_FLOW_MAX_RATE,	/* per flow max rate */
    797 
    798 	TCA_FQ_BUCKETS_LOG,	/* log2(number of buckets) */
    799 
    800 	TCA_FQ_FLOW_REFILL_DELAY,	/* flow credit refill delay in usec */
    801 
    802 	TCA_FQ_ORPHAN_MASK,	/* mask applied to orphaned skb hashes */
    803 
    804 	TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
    805 
    806 	__TCA_FQ_MAX
    807 };
    808 
    809 #define TCA_FQ_MAX	(__TCA_FQ_MAX - 1)
    810 
    811 struct tc_fq_qd_stats {
    812 	__u64	gc_flows;
    813 	__u64	highprio_packets;
    814 	__u64	tcp_retrans;
    815 	__u64	throttled;
    816 	__u64	flows_plimit;
    817 	__u64	pkts_too_long;
    818 	__u64	allocation_errors;
    819 	__s64	time_next_delayed_flow;
    820 	__u32	flows;
    821 	__u32	inactive_flows;
    822 	__u32	throttled_flows;
    823 	__u32	unthrottle_latency_ns;
    824 };
    825 
    826 /* Heavy-Hitter Filter */
    827 
    828 enum {
    829 	TCA_HHF_UNSPEC,
    830 	TCA_HHF_BACKLOG_LIMIT,
    831 	TCA_HHF_QUANTUM,
    832 	TCA_HHF_HH_FLOWS_LIMIT,
    833 	TCA_HHF_RESET_TIMEOUT,
    834 	TCA_HHF_ADMIT_BYTES,
    835 	TCA_HHF_EVICT_TIMEOUT,
    836 	TCA_HHF_NON_HH_WEIGHT,
    837 	__TCA_HHF_MAX
    838 };
    839 
    840 #define TCA_HHF_MAX	(__TCA_HHF_MAX - 1)
    841 
    842 struct tc_hhf_xstats {
    843 	__u32	drop_overlimit; /* number of times max qdisc packet limit
    844 				 * was hit
    845 				 */
    846 	__u32	hh_overlimit;   /* number of times max heavy-hitters was hit */
    847 	__u32	hh_tot_count;   /* number of captured heavy-hitters so far */
    848 	__u32	hh_cur_count;   /* number of current heavy-hitters */
    849 };
    850 
    851 /* PIE */
    852 enum {
    853 	TCA_PIE_UNSPEC,
    854 	TCA_PIE_TARGET,
    855 	TCA_PIE_LIMIT,
    856 	TCA_PIE_TUPDATE,
    857 	TCA_PIE_ALPHA,
    858 	TCA_PIE_BETA,
    859 	TCA_PIE_ECN,
    860 	TCA_PIE_BYTEMODE,
    861 	__TCA_PIE_MAX
    862 };
    863 #define TCA_PIE_MAX   (__TCA_PIE_MAX - 1)
    864 
    865 struct tc_pie_xstats {
    866 	__u32 prob;             /* current probability */
    867 	__u32 delay;            /* current delay in ms */
    868 	__u32 avg_dq_rate;      /* current average dq_rate in bits/pie_time */
    869 	__u32 packets_in;       /* total number of packets enqueued */
    870 	__u32 dropped;          /* packets dropped due to pie_action */
    871 	__u32 overlimit;        /* dropped due to lack of space in queue */
    872 	__u32 maxq;             /* maximum queue size */
    873 	__u32 ecn_mark;         /* packets marked with ecn*/
    874 };
    875 #endif
    876