Lines Matching +full:1 +full:q

101  * @rec_inv_sqrt:	reciprocal value of sqrt(count) >> 1
261 CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
300 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
306 1, 1, 1, 1, 1, 1, 1, 1,
316 2, 0, 1, 2, 4, 2, 2, 2,
317 1, 2, 1, 2, 1, 2, 1, 2,
327 0, 1, 0, 0, 2, 0, 0, 0,
328 1, 0, 0, 0, 0, 0, 0, 0,
338 0, 1, 0, 0, 2, 0, 0, 0,
339 1, 0, 0, 0, 0, 0, 0, 0,
361 static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
362 static const u8 bulk_order[] = {1, 0, 2, 3};
366 * values, particularly when stepping from count 1 to 2 or vice versa. Hence,
399 val = (val * invsqrt) >> (32 - 2 + 1); in cobalt_newton_step()
448 vars->count = 1; in cobalt_queue_full()
509 * both the '0' and '1' entries in the invsqrt cache are 0xFFFFFFFF, as close in cobalt_should_drop()
530 vars->count = 1; in cobalt_should_drop()
633 static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q, in cake_dec_srchost_bulk_flow_count() argument
638 q->hosts[flow->srchost].srchost_bulk_flow_count)) in cake_dec_srchost_bulk_flow_count()
639 q->hosts[flow->srchost].srchost_bulk_flow_count--; in cake_dec_srchost_bulk_flow_count()
642 static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q, in cake_inc_srchost_bulk_flow_count() argument
647 q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES)) in cake_inc_srchost_bulk_flow_count()
648 q->hosts[flow->srchost].srchost_bulk_flow_count++; in cake_inc_srchost_bulk_flow_count()
651 static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q, in cake_dec_dsthost_bulk_flow_count() argument
656 q->hosts[flow->dsthost].dsthost_bulk_flow_count)) in cake_dec_dsthost_bulk_flow_count()
657 q->hosts[flow->dsthost].dsthost_bulk_flow_count--; in cake_dec_dsthost_bulk_flow_count()
660 static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q, in cake_inc_dsthost_bulk_flow_count() argument
665 q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES)) in cake_inc_dsthost_bulk_flow_count()
666 q->hosts[flow->dsthost].dsthost_bulk_flow_count++; in cake_inc_dsthost_bulk_flow_count()
669 static u16 cake_get_flow_quantum(struct cake_tin_data *q, in cake_get_flow_quantum() argument
673 u16 host_load = 1; in cake_get_flow_quantum()
677 q->hosts[flow->srchost].srchost_bulk_flow_count); in cake_get_flow_quantum()
681 q->hosts[flow->dsthost].dsthost_bulk_flow_count); in cake_get_flow_quantum()
686 return (q->flow_quantum * quantum_div[host_load] + in cake_get_flow_quantum()
690 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, in cake_hash() argument
766 flow_hash = flow_override - 1; in cake_hash()
770 dsthost_hash = host_override - 1; in cake_hash()
771 srchost_hash = host_override - 1; in cake_hash()
786 if (likely(q->tags[reduced_hash] == flow_hash && in cake_hash()
787 q->flows[reduced_hash].set)) { in cake_hash()
788 q->way_directs++; in cake_hash()
800 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
801 if (q->tags[outer_hash + k] == flow_hash) { in cake_hash()
803 q->way_hits++; in cake_hash()
805 if (!q->flows[outer_hash + k].set) { in cake_hash()
819 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
820 if (!q->flows[outer_hash + k].set) { in cake_hash()
821 q->way_misses++; in cake_hash()
831 q->way_collisions++; in cake_hash()
835 if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { in cake_hash()
836 cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode); in cake_hash()
837 cake_dec_dsthost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode); in cake_hash()
842 q->tags[reduced_hash] = flow_hash; in cake_hash()
849 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
850 if (q->hosts[outer_hash + k].srchost_tag == in cake_hash()
855 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
856 if (!q->hosts[outer_hash + k].srchost_bulk_flow_count) in cake_hash()
859 q->hosts[outer_hash + k].srchost_tag = srchost_hash; in cake_hash()
862 q->flows[reduced_hash].srchost = srchost_idx; in cake_hash()
864 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash()
865 cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode); in cake_hash()
873 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
874 if (q->hosts[outer_hash + k].dsthost_tag == in cake_hash()
879 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
880 if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count) in cake_hash()
883 q->hosts[outer_hash + k].dsthost_tag = dsthost_hash; in cake_hash()
886 q->flows[reduced_hash].dsthost = dsthost_idx; in cake_hash()
888 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash()
889 cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode); in cake_hash()
1005 const u8 *ptr = (const u8 *)(tcph + 1); in cake_get_tcpopt()
1040 * @return -1, 0 or 1 as normal compare functions
1060 return -1; in cake_tcph_sack_compare()
1063 return 1; in cake_tcph_sack_compare()
1077 return -1; in cake_tcph_sack_compare()
1099 return -1; in cake_tcph_sack_compare()
1109 return bytes_b > bytes_a ? 1 : 0; in cake_tcph_sack_compare()
1131 const u8 *ptr = (const u8 *)(tcph + 1); in cake_tcph_may_drop()
1138 * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero) in cake_tcph_may_drop()
1198 static struct sk_buff *cake_ack_filter(struct cake_sched_data *q, in cake_ack_filter() argument
1201 bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE; in cake_ack_filter()
1271 WARN_ON(1); /* shouldn't happen */ in cake_ack_filter()
1361 static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off) in cake_calc_overhead() argument
1363 if (q->rate_flags & CAKE_FLAG_OVERHEAD) in cake_calc_overhead()
1366 if (q->max_netlen < len) in cake_calc_overhead()
1367 q->max_netlen = len; in cake_calc_overhead()
1368 if (q->min_netlen > len) in cake_calc_overhead()
1369 q->min_netlen = len; in cake_calc_overhead()
1371 len += q->rate_overhead; in cake_calc_overhead()
1373 if (len < q->rate_mpu) in cake_calc_overhead()
1374 len = q->rate_mpu; in cake_calc_overhead()
1376 if (q->atm_mode == CAKE_ATM_ATM) { in cake_calc_overhead()
1380 } else if (q->atm_mode == CAKE_ATM_PTM) { in cake_calc_overhead()
1388 if (q->max_adjlen < len) in cake_calc_overhead()
1389 q->max_adjlen = len; in cake_calc_overhead()
1390 if (q->min_adjlen > len) in cake_calc_overhead()
1391 q->min_adjlen = len; in cake_calc_overhead()
1396 static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb) in cake_overhead() argument
1402 u16 segs = 1; in cake_overhead()
1404 q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8); in cake_overhead()
1407 return cake_calc_overhead(q, len, off); in cake_overhead()
1437 last_len = skb->len - shinfo->gso_size * (segs - 1); in cake_overhead()
1439 return (cake_calc_overhead(q, len, off) * (segs - 1) + in cake_overhead()
1440 cake_calc_overhead(q, last_len, off)); in cake_overhead()
1443 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j) in cake_heap_swap() argument
1445 struct cake_heap_entry ii = q->overflow_heap[i]; in cake_heap_swap()
1446 struct cake_heap_entry jj = q->overflow_heap[j]; in cake_heap_swap()
1448 q->overflow_heap[i] = jj; in cake_heap_swap()
1449 q->overflow_heap[j] = ii; in cake_heap_swap()
1451 q->tins[ii.t].overflow_idx[ii.b] = j; in cake_heap_swap()
1452 q->tins[jj.t].overflow_idx[jj.b] = i; in cake_heap_swap()
1455 static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i) in cake_heap_get_backlog() argument
1457 struct cake_heap_entry ii = q->overflow_heap[i]; in cake_heap_get_backlog()
1459 return q->tins[ii.t].backlogs[ii.b]; in cake_heap_get_backlog()
1462 static void cake_heapify(struct cake_sched_data *q, u16 i) in cake_heapify() argument
1465 u32 mb = cake_heap_get_backlog(q, i); in cake_heapify()
1469 u32 l = m + m + 1; in cake_heapify()
1470 u32 r = l + 1; in cake_heapify()
1473 u32 lb = cake_heap_get_backlog(q, l); in cake_heapify()
1482 u32 rb = cake_heap_get_backlog(q, r); in cake_heapify()
1491 cake_heap_swap(q, i, m); in cake_heapify()
1499 static void cake_heapify_up(struct cake_sched_data *q, u16 i) in cake_heapify_up() argument
1502 u16 p = (i - 1) >> 1; in cake_heapify_up()
1503 u32 ib = cake_heap_get_backlog(q, i); in cake_heapify_up()
1504 u32 pb = cake_heap_get_backlog(q, p); in cake_heapify_up()
1507 cake_heap_swap(q, i, p); in cake_heapify_up()
1515 static int cake_advance_shaper(struct cake_sched_data *q, in cake_advance_shaper() argument
1525 if (q->rate_ns) { in cake_advance_shaper()
1527 u64 global_dur = (len * q->rate_ns) >> q->rate_shft; in cake_advance_shaper()
1528 u64 failsafe_dur = global_dur + (global_dur >> 1); in cake_advance_shaper()
1538 q->time_next_packet = ktime_add_ns(q->time_next_packet, in cake_advance_shaper()
1541 q->failsafe_next_packet = \ in cake_advance_shaper()
1542 ktime_add_ns(q->failsafe_next_packet, in cake_advance_shaper()
1550 struct cake_sched_data *q = qdisc_priv(sch); in cake_drop() local
1558 if (!q->overflow_timeout) { in cake_drop()
1561 for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2 - 1; i >= 0; i--) in cake_drop()
1562 cake_heapify(q, i); in cake_drop()
1564 q->overflow_timeout = 65535; in cake_drop()
1567 qq = q->overflow_heap[0]; in cake_drop()
1571 b = &q->tins[tin]; in cake_drop()
1576 q->overflow_timeout = 0; in cake_drop()
1584 q->buffer_used -= skb->truesize; in cake_drop()
1592 if (q->rate_flags & CAKE_FLAG_INGRESS) in cake_drop()
1593 cake_advance_shaper(q, b, skb, now, true); in cake_drop()
1596 sch->q.qlen--; in cake_drop()
1597 qdisc_tree_reduce_backlog(sch, 1, len); in cake_drop()
1599 cake_heapify(q, 0); in cake_drop()
1663 struct cake_sched_data *q = qdisc_priv(sch); in cake_select_tin() local
1672 mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft; in cake_select_tin()
1673 wash = !!(q->rate_flags & CAKE_FLAG_WASH); in cake_select_tin()
1677 if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) in cake_select_tin()
1680 else if (mark && mark <= q->tin_cnt) in cake_select_tin()
1681 tin = q->tin_order[mark - 1]; in cake_select_tin()
1685 TC_H_MIN(skb->priority) <= q->tin_cnt) in cake_select_tin()
1686 tin = q->tin_order[TC_H_MIN(skb->priority) - 1]; in cake_select_tin()
1691 tin = q->tin_index[dscp]; in cake_select_tin()
1693 if (unlikely(tin >= q->tin_cnt)) in cake_select_tin()
1697 return &q->tins[tin]; in cake_select_tin()
1703 struct cake_sched_data *q = qdisc_priv(sch); in cake_classify() local
1709 filter = rcu_dereference_bh(q->filter_list); in cake_classify()
1735 return cake_hash(*t, skb, flow_mode, flow, host) + 1; in cake_classify()
1743 struct cake_sched_data *q = qdisc_priv(sch); in cake_enqueue() local
1753 idx = cake_classify(sch, &b, skb, q->flow_mode, &ret); in cake_enqueue()
1768 if (!sch->q.qlen) { in cake_enqueue()
1769 if (ktime_before(q->time_next_packet, now)) { in cake_enqueue()
1770 q->failsafe_next_packet = now; in cake_enqueue()
1771 q->time_next_packet = now; in cake_enqueue()
1772 } else if (ktime_after(q->time_next_packet, now) && in cake_enqueue()
1773 ktime_after(q->failsafe_next_packet, now)) { in cake_enqueue()
1775 min(ktime_to_ns(q->time_next_packet), in cake_enqueue()
1777 q->failsafe_next_packet)); in cake_enqueue()
1779 qdisc_watchdog_schedule_ns(&q->watchdog, next); in cake_enqueue()
1787 if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { in cake_enqueue()
1800 get_cobalt_cb(segs)->adjusted_len = cake_overhead(q, in cake_enqueue()
1804 sch->q.qlen++; in cake_enqueue()
1807 q->buffer_used += segs->truesize; in cake_enqueue()
1816 q->avg_window_bytes += slen; in cake_enqueue()
1818 qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen); in cake_enqueue()
1823 get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb); in cake_enqueue()
1826 if (q->ack_filter) in cake_enqueue()
1827 ack = cake_ack_filter(q, flow); in cake_enqueue()
1834 q->buffer_used += skb->truesize - ack->truesize; in cake_enqueue()
1835 if (q->rate_flags & CAKE_FLAG_INGRESS) in cake_enqueue()
1836 cake_advance_shaper(q, b, ack, now, true); in cake_enqueue()
1838 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack)); in cake_enqueue()
1841 sch->q.qlen++; in cake_enqueue()
1842 q->buffer_used += skb->truesize; in cake_enqueue()
1851 q->avg_window_bytes += len; in cake_enqueue()
1854 if (q->overflow_timeout) in cake_enqueue()
1855 cake_heapify_up(q, b->overflow_idx[idx]); in cake_enqueue()
1858 if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) { in cake_enqueue()
1860 ktime_to_ns(ktime_sub(now, q->last_packet_time)); in cake_enqueue()
1866 q->avg_packet_interval = \ in cake_enqueue()
1867 cake_ewma(q->avg_packet_interval, in cake_enqueue()
1869 (packet_interval > q->avg_packet_interval ? in cake_enqueue()
1872 q->last_packet_time = now; in cake_enqueue()
1874 if (packet_interval > q->avg_packet_interval) { in cake_enqueue()
1877 q->avg_window_begin)); in cake_enqueue()
1878 u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC; in cake_enqueue()
1881 q->avg_peak_bandwidth = in cake_enqueue()
1882 cake_ewma(q->avg_peak_bandwidth, b, in cake_enqueue()
1883 b > q->avg_peak_bandwidth ? 2 : 8); in cake_enqueue()
1884 q->avg_window_bytes = 0; in cake_enqueue()
1885 q->avg_window_begin = now; in cake_enqueue()
1888 ktime_add_ms(q->last_reconfig_time, in cake_enqueue()
1890 q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4; in cake_enqueue()
1895 q->avg_window_bytes = 0; in cake_enqueue()
1896 q->last_packet_time = now; in cake_enqueue()
1910 flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode); in cake_enqueue()
1919 cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode); in cake_enqueue()
1920 cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode); in cake_enqueue()
1923 if (q->buffer_used > q->buffer_max_used) in cake_enqueue()
1924 q->buffer_max_used = q->buffer_used; in cake_enqueue()
1926 if (q->buffer_used > q->buffer_limit) { in cake_enqueue()
1929 while (q->buffer_used > q->buffer_limit) { in cake_enqueue()
1940 struct cake_sched_data *q = qdisc_priv(sch); in cake_dequeue_one() local
1941 struct cake_tin_data *b = &q->tins[q->cur_tin]; in cake_dequeue_one()
1942 struct cake_flow *flow = &b->flows[q->cur_flow]; in cake_dequeue_one()
1949 b->backlogs[q->cur_flow] -= len; in cake_dequeue_one()
1952 q->buffer_used -= skb->truesize; in cake_dequeue_one()
1953 sch->q.qlen--; in cake_dequeue_one()
1955 if (q->overflow_timeout) in cake_dequeue_one()
1956 cake_heapify(q, b->overflow_idx[q->cur_flow]); in cake_dequeue_one()
1964 struct cake_sched_data *q = qdisc_priv(sch); in cake_clear_tin() local
1967 q->cur_tin = tin; in cake_clear_tin()
1968 for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++) in cake_clear_tin()
1975 struct cake_sched_data *q = qdisc_priv(sch); in cake_dequeue() local
1976 struct cake_tin_data *b = &q->tins[q->cur_tin]; in cake_dequeue()
1987 if (!sch->q.qlen) in cake_dequeue()
1991 if (ktime_after(q->time_next_packet, now) && in cake_dequeue()
1992 ktime_after(q->failsafe_next_packet, now)) { in cake_dequeue()
1993 u64 next = min(ktime_to_ns(q->time_next_packet), in cake_dequeue()
1994 ktime_to_ns(q->failsafe_next_packet)); in cake_dequeue()
1997 qdisc_watchdog_schedule_ns(&q->watchdog, next); in cake_dequeue()
2002 if (!q->rate_ns) { in cake_dequeue()
2015 q->cur_tin++; in cake_dequeue()
2017 if (q->cur_tin >= q->tin_cnt) { in cake_dequeue()
2018 q->cur_tin = 0; in cake_dequeue()
2019 b = q->tins; in cake_dequeue()
2022 /* It's possible for q->qlen to be in cake_dequeue()
2041 for (tin = 0; tin < q->tin_cnt; tin++) { in cake_dequeue()
2042 b = q->tins + tin; in cake_dequeue()
2056 q->cur_tin = best_tin; in cake_dequeue()
2057 b = q->tins + best_tin; in cake_dequeue()
2079 q->cur_flow = flow - b->flows; in cake_dequeue()
2093 cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode); in cake_dequeue()
2094 cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode); in cake_dequeue()
2106 flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode); in cake_dequeue()
2113 while (1) { in cake_dequeue()
2130 cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode); in cake_dequeue()
2131 cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode); in cake_dequeue()
2149 cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode); in cake_dequeue()
2150 cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode); in cake_dequeue()
2161 !!(q->rate_flags & in cake_dequeue()
2168 if (q->rate_flags & CAKE_FLAG_INGRESS) { in cake_dequeue()
2169 len = cake_advance_shaper(q, b, skb, in cake_dequeue()
2176 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); in cake_dequeue()
2179 if (q->rate_flags & CAKE_FLAG_INGRESS) in cake_dequeue()
2194 len = cake_advance_shaper(q, b, skb, now, false); in cake_dequeue()
2198 if (ktime_after(q->time_next_packet, now) && sch->q.qlen) { in cake_dequeue()
2199 u64 next = min(ktime_to_ns(q->time_next_packet), in cake_dequeue()
2200 ktime_to_ns(q->failsafe_next_packet)); in cake_dequeue()
2202 qdisc_watchdog_schedule_ns(&q->watchdog, next); in cake_dequeue()
2203 } else if (!sch->q.qlen) { in cake_dequeue()
2206 for (i = 0; i < q->tin_cnt; i++) { in cake_dequeue()
2207 if (q->tins[i].decaying_flow_count) { in cake_dequeue()
2210 q->tins[i].cparams.target); in cake_dequeue()
2212 qdisc_watchdog_schedule_ns(&q->watchdog, in cake_dequeue()
2219 if (q->overflow_timeout) in cake_dequeue()
2220 q->overflow_timeout--; in cake_dequeue()
2227 struct cake_sched_data *q = qdisc_priv(sch); in cake_reset() local
2230 if (!q->tins) in cake_reset()
2237 static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
2276 rate_ns >>= 1; in cake_set_rate()
2292 b->cparams.p_inc = 1 << 24; /* 1/256 */ in cake_set_rate()
2293 b->cparams.p_dec = 1 << 20; /* 1/4096 */ in cake_set_rate()
2298 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_besteffort() local
2299 struct cake_tin_data *b = &q->tins[0]; in cake_config_besteffort()
2301 u64 rate = q->rate_bps; in cake_config_besteffort()
2303 q->tin_cnt = 1; in cake_config_besteffort()
2305 q->tin_index = besteffort; in cake_config_besteffort()
2306 q->tin_order = normal_order; in cake_config_besteffort()
2309 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_besteffort()
2318 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_precedence() local
2320 u64 rate = q->rate_bps; in cake_config_precedence()
2324 q->tin_cnt = 8; in cake_config_precedence()
2325 q->tin_index = precedence; in cake_config_precedence()
2326 q->tin_order = normal_order; in cake_config_precedence()
2328 for (i = 0; i < q->tin_cnt; i++) { in cake_config_precedence()
2329 struct cake_tin_data *b = &q->tins[i]; in cake_config_precedence()
2331 cake_set_rate(b, rate, mtu, us_to_ns(q->target), in cake_config_precedence()
2332 us_to_ns(q->interval)); in cake_config_precedence()
2334 b->tin_quantum = max_t(u16, 1U, quantum); in cake_config_precedence()
2353 * Assured Forwarding 1 (AF1x) - x3
2357 * Precedence Class 1 (CS1)
2407 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_diffserv8() local
2409 u64 rate = q->rate_bps; in cake_config_diffserv8()
2413 q->tin_cnt = 8; in cake_config_diffserv8()
2416 q->tin_index = diffserv8; in cake_config_diffserv8()
2417 q->tin_order = normal_order; in cake_config_diffserv8()
2420 for (i = 0; i < q->tin_cnt; i++) { in cake_config_diffserv8()
2421 struct cake_tin_data *b = &q->tins[i]; in cake_config_diffserv8()
2423 cake_set_rate(b, rate, mtu, us_to_ns(q->target), in cake_config_diffserv8()
2424 us_to_ns(q->interval)); in cake_config_diffserv8()
2426 b->tin_quantum = max_t(u16, 1U, quantum); in cake_config_diffserv8()
2451 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_diffserv4() local
2453 u64 rate = q->rate_bps; in cake_config_diffserv4()
2456 q->tin_cnt = 4; in cake_config_diffserv4()
2459 q->tin_index = diffserv4; in cake_config_diffserv4()
2460 q->tin_order = bulk_order; in cake_config_diffserv4()
2463 cake_set_rate(&q->tins[0], rate, mtu, in cake_config_diffserv4()
2464 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2465 cake_set_rate(&q->tins[1], rate >> 4, mtu, in cake_config_diffserv4()
2466 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2467 cake_set_rate(&q->tins[2], rate >> 1, mtu, in cake_config_diffserv4()
2468 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2469 cake_set_rate(&q->tins[3], rate >> 2, mtu, in cake_config_diffserv4()
2470 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2473 q->tins[0].tin_quantum = quantum; in cake_config_diffserv4()
2474 q->tins[1].tin_quantum = quantum >> 4; in cake_config_diffserv4()
2475 q->tins[2].tin_quantum = quantum >> 1; in cake_config_diffserv4()
2476 q->tins[3].tin_quantum = quantum >> 2; in cake_config_diffserv4()
2488 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_diffserv3() local
2490 u64 rate = q->rate_bps; in cake_config_diffserv3()
2493 q->tin_cnt = 3; in cake_config_diffserv3()
2496 q->tin_index = diffserv3; in cake_config_diffserv3()
2497 q->tin_order = bulk_order; in cake_config_diffserv3()
2500 cake_set_rate(&q->tins[0], rate, mtu, in cake_config_diffserv3()
2501 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv3()
2502 cake_set_rate(&q->tins[1], rate >> 4, mtu, in cake_config_diffserv3()
2503 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv3()
2504 cake_set_rate(&q->tins[2], rate >> 2, mtu, in cake_config_diffserv3()
2505 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv3()
2508 q->tins[0].tin_quantum = quantum; in cake_config_diffserv3()
2509 q->tins[1].tin_quantum = quantum >> 4; in cake_config_diffserv3()
2510 q->tins[2].tin_quantum = quantum >> 2; in cake_config_diffserv3()
2517 struct cake_sched_data *q = qdisc_priv(sch); in cake_reconfigure() local
2520 switch (q->tin_mode) { in cake_reconfigure()
2543 for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) { in cake_reconfigure()
2545 q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time; in cake_reconfigure()
2548 q->rate_ns = q->tins[ft].tin_rate_ns; in cake_reconfigure()
2549 q->rate_shft = q->tins[ft].tin_rate_shft; in cake_reconfigure()
2551 if (q->buffer_config_limit) { in cake_reconfigure()
2552 q->buffer_limit = q->buffer_config_limit; in cake_reconfigure()
2553 } else if (q->rate_bps) { in cake_reconfigure()
2554 u64 t = q->rate_bps * q->interval; in cake_reconfigure()
2557 q->buffer_limit = max_t(u32, t, 4U << 20); in cake_reconfigure()
2559 q->buffer_limit = ~0; in cake_reconfigure()
2564 q->buffer_limit = min(q->buffer_limit, in cake_reconfigure()
2566 q->buffer_config_limit)); in cake_reconfigure()
2572 struct cake_sched_data *q = qdisc_priv(sch); in cake_change() local
2573 struct nlattr *tb[TCA_CAKE_MAX + 1]; in cake_change()
2583 flow_mode = q->flow_mode; in cake_change()
2597 WRITE_ONCE(q->rate_bps, in cake_change()
2601 WRITE_ONCE(q->tin_mode, in cake_change()
2604 rate_flags = q->rate_flags; in cake_change()
2618 WRITE_ONCE(q->atm_mode, in cake_change()
2622 WRITE_ONCE(q->rate_overhead, in cake_change()
2626 q->max_netlen = 0; in cake_change()
2627 q->max_adjlen = 0; in cake_change()
2628 q->min_netlen = ~0; in cake_change()
2629 q->min_adjlen = ~0; in cake_change()
2635 q->max_netlen = 0; in cake_change()
2636 q->max_adjlen = 0; in cake_change()
2637 q->min_netlen = ~0; in cake_change()
2638 q->min_adjlen = ~0; in cake_change()
2642 WRITE_ONCE(q->rate_mpu, in cake_change()
2648 WRITE_ONCE(q->interval, max(interval, 1U)); in cake_change()
2654 WRITE_ONCE(q->target, max(target, 1U)); in cake_change()
2672 WRITE_ONCE(q->ack_filter, in cake_change()
2676 WRITE_ONCE(q->buffer_config_limit, in cake_change()
2687 WRITE_ONCE(q->fwmark_mask, nla_get_u32(tb[TCA_CAKE_FWMARK])); in cake_change()
2688 WRITE_ONCE(q->fwmark_shft, in cake_change()
2689 q->fwmark_mask ? __ffs(q->fwmark_mask) : 0); in cake_change()
2692 WRITE_ONCE(q->rate_flags, rate_flags); in cake_change()
2693 WRITE_ONCE(q->flow_mode, flow_mode); in cake_change()
2694 if (q->tins) { in cake_change()
2705 struct cake_sched_data *q = qdisc_priv(sch); in cake_destroy() local
2707 qdisc_watchdog_cancel(&q->watchdog); in cake_destroy()
2708 tcf_block_put(q->block); in cake_destroy()
2709 kvfree(q->tins); in cake_destroy()
2715 struct cake_sched_data *q = qdisc_priv(sch); in cake_init() local
2719 q->tin_mode = CAKE_DIFFSERV_DIFFSERV3; in cake_init()
2720 q->flow_mode = CAKE_FLOW_TRIPLE; in cake_init()
2722 q->rate_bps = 0; /* unlimited by default */ in cake_init()
2724 q->interval = 100000; /* 100ms default */ in cake_init()
2725 q->target = 5000; /* 5ms: codel RFC argues in cake_init()
2728 q->rate_flags |= CAKE_FLAG_SPLIT_GSO; in cake_init()
2729 q->cur_tin = 0; in cake_init()
2730 q->cur_flow = 0; in cake_init()
2732 qdisc_watchdog_init(&q->watchdog, sch); in cake_init()
2741 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in cake_init()
2746 for (i = 1; i <= CAKE_QUEUES; i++) in cake_init()
2749 q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data), in cake_init()
2751 if (!q->tins) in cake_init()
2755 struct cake_tin_data *b = q->tins + i; in cake_init()
2771 q->overflow_heap[k].t = i; in cake_init()
2772 q->overflow_heap[k].b = j; in cake_init()
2778 q->avg_peak_bandwidth = q->rate_bps; in cake_init()
2779 q->min_netlen = ~0; in cake_init()
2780 q->min_adjlen = ~0; in cake_init()
2786 struct cake_sched_data *q = qdisc_priv(sch); in cake_dump() local
2796 READ_ONCE(q->rate_bps), TCA_CAKE_PAD)) in cake_dump()
2799 flow_mode = READ_ONCE(q->flow_mode); in cake_dump()
2803 if (nla_put_u32(skb, TCA_CAKE_RTT, READ_ONCE(q->interval))) in cake_dump()
2806 if (nla_put_u32(skb, TCA_CAKE_TARGET, READ_ONCE(q->target))) in cake_dump()
2810 READ_ONCE(q->buffer_config_limit))) in cake_dump()
2813 rate_flags = READ_ONCE(q->rate_flags); in cake_dump()
2822 if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, READ_ONCE(q->ack_filter))) in cake_dump()
2829 if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, READ_ONCE(q->tin_mode))) in cake_dump()
2836 if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, READ_ONCE(q->rate_overhead))) in cake_dump()
2843 if (nla_put_u32(skb, TCA_CAKE_ATM, READ_ONCE(q->atm_mode))) in cake_dump()
2846 if (nla_put_u32(skb, TCA_CAKE_MPU, READ_ONCE(q->rate_mpu))) in cake_dump()
2853 if (nla_put_u32(skb, TCA_CAKE_FWMARK, READ_ONCE(q->fwmark_mask))) in cake_dump()
2859 return -1; in cake_dump()
2865 struct cake_sched_data *q = qdisc_priv(sch); in cake_dump_stats() local
2870 return -1; in cake_dump_stats()
2882 PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth); in cake_dump_stats()
2883 PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit); in cake_dump_stats()
2884 PUT_STAT_U32(MEMORY_USED, q->buffer_max_used); in cake_dump_stats()
2885 PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16)); in cake_dump_stats()
2886 PUT_STAT_U32(MAX_NETLEN, q->max_netlen); in cake_dump_stats()
2887 PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen); in cake_dump_stats()
2888 PUT_STAT_U32(MIN_NETLEN, q->min_netlen); in cake_dump_stats()
2889 PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen); in cake_dump_stats()
2908 for (i = 0; i < q->tin_cnt; i++) { in cake_dump_stats()
2909 struct cake_tin_data *b = &q->tins[q->tin_order[i]]; in cake_dump_stats()
2911 ts = nla_nest_start_noflag(d->skb, i + 1); in cake_dump_stats()
2958 return -1; in cake_dump_stats()
2977 static void cake_unbind(struct Qdisc *q, unsigned long cl) in cake_unbind() argument
2984 struct cake_sched_data *q = qdisc_priv(sch); in cake_tcf_block() local
2988 return q->block; in cake_tcf_block()
3001 struct cake_sched_data *q = qdisc_priv(sch); in cake_dump_class_stats() local
3005 u32 idx = cl - 1; in cake_dump_class_stats()
3007 if (idx < CAKE_QUEUES * q->tin_cnt) { in cake_dump_class_stats()
3009 &q->tins[q->tin_order[idx / CAKE_QUEUES]]; in cake_dump_class_stats()
3027 return -1; in cake_dump_class_stats()
3033 return -1; in cake_dump_class_stats()
3062 return -1; in cake_dump_class_stats()
3069 return -1; in cake_dump_class_stats()
3074 struct cake_sched_data *q = qdisc_priv(sch); in cake_walk() local
3080 for (i = 0; i < q->tin_cnt; i++) { in cake_walk()
3081 struct cake_tin_data *b = &q->tins[q->tin_order[i]]; in cake_walk()
3088 if (!tc_qdisc_stats_dump(sch, i * CAKE_QUEUES + j + 1, in cake_walk()