mirror of
https://github.com/openwrt/openwrt.git
synced 2026-05-01 14:30:27 +04:00
105eb9ca95
Add the required patches in order to backport cake-mq from Linux 7.0. Many thanks to Toke Høiland-Jørgensen for providing the git trees with backports for both 6.12 and 6.18. Signed-off-by: Rui Salvaterra <rsalvaterra@gmail.com> Link: https://github.com/openwrt/openwrt/pull/21964 Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
637 lines
19 KiB
Diff
637 lines
19 KiB
Diff
From 23090d3e9db80c2a374df1e636247eff9b6dd972 Mon Sep 17 00:00:00 2001
|
|
From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
|
|
Date: Fri, 9 Jan 2026 14:15:31 +0100
|
|
Subject: [PATCH 2/7] net/sched: sch_cake: Factor out config variables into
|
|
separate struct
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
Factor out all the user-configurable variables into a separate struct
|
|
and embed it into struct cake_sched_data. This is done in preparation
|
|
for sharing the configuration across multiple instances of cake in an mq
|
|
setup.
|
|
|
|
No functional change is intended with this patch.
|
|
|
|
Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
|
|
Reviewed-by: Willem de Bruijn <willemb@google.com>
|
|
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
|
|
Link: https://patch.msgid.link/20260109-mq-cake-sub-qdisc-v8-2-8d613fece5d8@redhat.com
|
|
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
|
---
|
|
net/sched/sch_cake.c | 245 +++++++++++++++++++++++--------------------
|
|
1 file changed, 133 insertions(+), 112 deletions(-)
|
|
|
|
--- a/net/sched/sch_cake.c
|
|
+++ b/net/sched/sch_cake.c
|
|
@@ -197,40 +197,42 @@ struct cake_tin_data {
|
|
u32 way_collisions;
|
|
}; /* number of tins is small, so size of this struct doesn't matter much */
|
|
|
|
+struct cake_sched_config {
|
|
+ u64 rate_bps;
|
|
+ u64 interval;
|
|
+ u64 target;
|
|
+ u32 buffer_config_limit;
|
|
+ u32 fwmark_mask;
|
|
+ u16 fwmark_shft;
|
|
+ s16 rate_overhead;
|
|
+ u16 rate_mpu;
|
|
+ u16 rate_flags;
|
|
+ u8 tin_mode;
|
|
+ u8 flow_mode;
|
|
+ u8 atm_mode;
|
|
+ u8 ack_filter;
|
|
+};
|
|
+
|
|
struct cake_sched_data {
|
|
struct tcf_proto __rcu *filter_list; /* optional external classifier */
|
|
struct tcf_block *block;
|
|
struct cake_tin_data *tins;
|
|
+ struct cake_sched_config *config;
|
|
|
|
struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS];
|
|
- u16 overflow_timeout;
|
|
-
|
|
- u16 tin_cnt;
|
|
- u8 tin_mode;
|
|
- u8 flow_mode;
|
|
- u8 ack_filter;
|
|
- u8 atm_mode;
|
|
-
|
|
- u32 fwmark_mask;
|
|
- u16 fwmark_shft;
|
|
|
|
/* time_next = time_this + ((len * rate_ns) >> rate_shft) */
|
|
- u16 rate_shft;
|
|
ktime_t time_next_packet;
|
|
ktime_t failsafe_next_packet;
|
|
u64 rate_ns;
|
|
- u64 rate_bps;
|
|
- u16 rate_flags;
|
|
- s16 rate_overhead;
|
|
- u16 rate_mpu;
|
|
- u64 interval;
|
|
- u64 target;
|
|
+ u16 rate_shft;
|
|
+ u16 overflow_timeout;
|
|
+ u16 tin_cnt;
|
|
|
|
/* resource tracking */
|
|
u32 buffer_used;
|
|
u32 buffer_max_used;
|
|
u32 buffer_limit;
|
|
- u32 buffer_config_limit;
|
|
|
|
/* indices for dequeue */
|
|
u16 cur_tin;
|
|
@@ -1195,7 +1197,7 @@ static bool cake_tcph_may_drop(const str
|
|
static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
|
|
struct cake_flow *flow)
|
|
{
|
|
- bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE;
|
|
+ bool aggressive = q->config->ack_filter == CAKE_ACK_AGGRESSIVE;
|
|
struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL;
|
|
struct sk_buff *skb_check, *skb_prev = NULL;
|
|
const struct ipv6hdr *ipv6h, *ipv6h_check;
|
|
@@ -1355,15 +1357,17 @@ static u64 cake_ewma(u64 avg, u64 sample
|
|
return avg;
|
|
}
|
|
|
|
-static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
|
|
+static u32 cake_calc_overhead(struct cake_sched_data *qd, u32 len, u32 off)
|
|
{
|
|
+ struct cake_sched_config *q = qd->config;
|
|
+
|
|
if (q->rate_flags & CAKE_FLAG_OVERHEAD)
|
|
len -= off;
|
|
|
|
- if (q->max_netlen < len)
|
|
- q->max_netlen = len;
|
|
- if (q->min_netlen > len)
|
|
- q->min_netlen = len;
|
|
+ if (qd->max_netlen < len)
|
|
+ qd->max_netlen = len;
|
|
+ if (qd->min_netlen > len)
|
|
+ qd->min_netlen = len;
|
|
|
|
len += q->rate_overhead;
|
|
|
|
@@ -1382,10 +1386,10 @@ static u32 cake_calc_overhead(struct cak
|
|
len += (len + 63) / 64;
|
|
}
|
|
|
|
- if (q->max_adjlen < len)
|
|
- q->max_adjlen = len;
|
|
- if (q->min_adjlen > len)
|
|
- q->min_adjlen = len;
|
|
+ if (qd->max_adjlen < len)
|
|
+ qd->max_adjlen = len;
|
|
+ if (qd->min_adjlen > len)
|
|
+ qd->min_adjlen = len;
|
|
|
|
return len;
|
|
}
|
|
@@ -1587,7 +1591,7 @@ static unsigned int cake_drop(struct Qdi
|
|
b->tin_dropped++;
|
|
sch->qstats.drops++;
|
|
|
|
- if (q->rate_flags & CAKE_FLAG_INGRESS)
|
|
+ if (q->config->rate_flags & CAKE_FLAG_INGRESS)
|
|
cake_advance_shaper(q, b, skb, now, true);
|
|
|
|
__qdisc_drop(skb, to_free);
|
|
@@ -1657,7 +1661,8 @@ static u8 cake_handle_diffserv(struct sk
|
|
static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
|
|
struct sk_buff *skb)
|
|
{
|
|
- struct cake_sched_data *q = qdisc_priv(sch);
|
|
+ struct cake_sched_data *qd = qdisc_priv(sch);
|
|
+ struct cake_sched_config *q = qd->config;
|
|
u32 tin, mark;
|
|
bool wash;
|
|
u8 dscp;
|
|
@@ -1674,24 +1679,24 @@ static struct cake_tin_data *cake_select
|
|
if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
|
|
tin = 0;
|
|
|
|
- else if (mark && mark <= q->tin_cnt)
|
|
- tin = q->tin_order[mark - 1];
|
|
+ else if (mark && mark <= qd->tin_cnt)
|
|
+ tin = qd->tin_order[mark - 1];
|
|
|
|
else if (TC_H_MAJ(skb->priority) == sch->handle &&
|
|
TC_H_MIN(skb->priority) > 0 &&
|
|
- TC_H_MIN(skb->priority) <= q->tin_cnt)
|
|
- tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
|
|
+ TC_H_MIN(skb->priority) <= qd->tin_cnt)
|
|
+ tin = qd->tin_order[TC_H_MIN(skb->priority) - 1];
|
|
|
|
else {
|
|
if (!wash)
|
|
dscp = cake_handle_diffserv(skb, wash);
|
|
- tin = q->tin_index[dscp];
|
|
+ tin = qd->tin_index[dscp];
|
|
|
|
- if (unlikely(tin >= q->tin_cnt))
|
|
+ if (unlikely(tin >= qd->tin_cnt))
|
|
tin = 0;
|
|
}
|
|
|
|
- return &q->tins[tin];
|
|
+ return &qd->tins[tin];
|
|
}
|
|
|
|
static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
|
|
@@ -1747,7 +1752,7 @@ static s32 cake_enqueue(struct sk_buff *
|
|
bool same_flow = false;
|
|
|
|
/* choose flow to insert into */
|
|
- idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
|
|
+ idx = cake_classify(sch, &b, skb, q->config->flow_mode, &ret);
|
|
if (idx == 0) {
|
|
if (ret & __NET_XMIT_BYPASS)
|
|
qdisc_qstats_drop(sch);
|
|
@@ -1782,7 +1787,7 @@ static s32 cake_enqueue(struct sk_buff *
|
|
if (unlikely(len > b->max_skblen))
|
|
b->max_skblen = len;
|
|
|
|
- if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
|
|
+ if (skb_is_gso(skb) && q->config->rate_flags & CAKE_FLAG_SPLIT_GSO) {
|
|
struct sk_buff *segs, *nskb;
|
|
netdev_features_t features = netif_skb_features(skb);
|
|
unsigned int slen = 0, numsegs = 0;
|
|
@@ -1823,7 +1828,7 @@ static s32 cake_enqueue(struct sk_buff *
|
|
get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
|
|
flow_queue_add(flow, skb);
|
|
|
|
- if (q->ack_filter)
|
|
+ if (q->config->ack_filter)
|
|
ack = cake_ack_filter(q, flow);
|
|
|
|
if (ack) {
|
|
@@ -1832,7 +1837,7 @@ static s32 cake_enqueue(struct sk_buff *
|
|
ack_pkt_len = qdisc_pkt_len(ack);
|
|
b->bytes += ack_pkt_len;
|
|
q->buffer_used += skb->truesize - ack->truesize;
|
|
- if (q->rate_flags & CAKE_FLAG_INGRESS)
|
|
+ if (q->config->rate_flags & CAKE_FLAG_INGRESS)
|
|
cake_advance_shaper(q, b, ack, now, true);
|
|
|
|
qdisc_tree_reduce_backlog(sch, 1, ack_pkt_len);
|
|
@@ -1855,7 +1860,7 @@ static s32 cake_enqueue(struct sk_buff *
|
|
cake_heapify_up(q, b->overflow_idx[idx]);
|
|
|
|
/* incoming bandwidth capacity estimate */
|
|
- if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
|
|
+ if (q->config->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
|
|
u64 packet_interval = \
|
|
ktime_to_ns(ktime_sub(now, q->last_packet_time));
|
|
|
|
@@ -1887,7 +1892,7 @@ static s32 cake_enqueue(struct sk_buff *
|
|
if (ktime_after(now,
|
|
ktime_add_ms(q->last_reconfig_time,
|
|
250))) {
|
|
- q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
|
|
+ q->config->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
|
|
cake_reconfigure(sch);
|
|
}
|
|
}
|
|
@@ -1907,7 +1912,7 @@ static s32 cake_enqueue(struct sk_buff *
|
|
flow->set = CAKE_SET_SPARSE;
|
|
b->sparse_flow_count++;
|
|
|
|
- flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode);
|
|
+ flow->deficit = cake_get_flow_quantum(b, flow, q->config->flow_mode);
|
|
} else if (flow->set == CAKE_SET_SPARSE_WAIT) {
|
|
/* this flow was empty, accounted as a sparse flow, but actually
|
|
* in the bulk rotation.
|
|
@@ -1916,8 +1921,8 @@ static s32 cake_enqueue(struct sk_buff *
|
|
b->sparse_flow_count--;
|
|
b->bulk_flow_count++;
|
|
|
|
- cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
|
|
- cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
|
|
+ cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
|
|
+ cake_inc_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
|
|
}
|
|
|
|
if (q->buffer_used > q->buffer_max_used)
|
|
@@ -2103,8 +2108,8 @@ retry:
|
|
b->sparse_flow_count--;
|
|
b->bulk_flow_count++;
|
|
|
|
- cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
|
|
- cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
|
|
+ cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
|
|
+ cake_inc_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
|
|
|
|
flow->set = CAKE_SET_BULK;
|
|
} else {
|
|
@@ -2116,7 +2121,7 @@ retry:
|
|
}
|
|
}
|
|
|
|
- flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode);
|
|
+ flow->deficit += cake_get_flow_quantum(b, flow, q->config->flow_mode);
|
|
list_move_tail(&flow->flowchain, &b->old_flows);
|
|
|
|
goto retry;
|
|
@@ -2140,8 +2145,8 @@ retry:
|
|
if (flow->set == CAKE_SET_BULK) {
|
|
b->bulk_flow_count--;
|
|
|
|
- cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
|
|
- cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
|
|
+ cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
|
|
+ cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
|
|
|
|
b->decaying_flow_count++;
|
|
} else if (flow->set == CAKE_SET_SPARSE ||
|
|
@@ -2159,8 +2164,8 @@ retry:
|
|
else if (flow->set == CAKE_SET_BULK) {
|
|
b->bulk_flow_count--;
|
|
|
|
- cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
|
|
- cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
|
|
+ cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
|
|
+ cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
|
|
} else
|
|
b->decaying_flow_count--;
|
|
|
|
@@ -2172,13 +2177,13 @@ retry:
|
|
/* Last packet in queue may be marked, shouldn't be dropped */
|
|
if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
|
|
(b->bulk_flow_count *
|
|
- !!(q->rate_flags &
|
|
+ !!(q->config->rate_flags &
|
|
CAKE_FLAG_INGRESS))) ||
|
|
!flow->head)
|
|
break;
|
|
|
|
/* drop this packet, get another one */
|
|
- if (q->rate_flags & CAKE_FLAG_INGRESS) {
|
|
+ if (q->config->rate_flags & CAKE_FLAG_INGRESS) {
|
|
len = cake_advance_shaper(q, b, skb,
|
|
now, true);
|
|
flow->deficit -= len;
|
|
@@ -2189,7 +2194,7 @@ retry:
|
|
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
|
|
qdisc_qstats_drop(sch);
|
|
kfree_skb(skb);
|
|
- if (q->rate_flags & CAKE_FLAG_INGRESS)
|
|
+ if (q->config->rate_flags & CAKE_FLAG_INGRESS)
|
|
goto retry;
|
|
}
|
|
|
|
@@ -2311,7 +2316,7 @@ static int cake_config_besteffort(struct
|
|
struct cake_sched_data *q = qdisc_priv(sch);
|
|
struct cake_tin_data *b = &q->tins[0];
|
|
u32 mtu = psched_mtu(qdisc_dev(sch));
|
|
- u64 rate = q->rate_bps;
|
|
+ u64 rate = q->config->rate_bps;
|
|
|
|
q->tin_cnt = 1;
|
|
|
|
@@ -2319,7 +2324,7 @@ static int cake_config_besteffort(struct
|
|
q->tin_order = normal_order;
|
|
|
|
cake_set_rate(b, rate, mtu,
|
|
- us_to_ns(q->target), us_to_ns(q->interval));
|
|
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
|
|
b->tin_quantum = 65535;
|
|
|
|
return 0;
|
|
@@ -2330,7 +2335,7 @@ static int cake_config_precedence(struct
|
|
/* convert high-level (user visible) parameters into internal format */
|
|
struct cake_sched_data *q = qdisc_priv(sch);
|
|
u32 mtu = psched_mtu(qdisc_dev(sch));
|
|
- u64 rate = q->rate_bps;
|
|
+ u64 rate = q->config->rate_bps;
|
|
u32 quantum = 256;
|
|
u32 i;
|
|
|
|
@@ -2341,8 +2346,8 @@ static int cake_config_precedence(struct
|
|
for (i = 0; i < q->tin_cnt; i++) {
|
|
struct cake_tin_data *b = &q->tins[i];
|
|
|
|
- cake_set_rate(b, rate, mtu, us_to_ns(q->target),
|
|
- us_to_ns(q->interval));
|
|
+ cake_set_rate(b, rate, mtu, us_to_ns(q->config->target),
|
|
+ us_to_ns(q->config->interval));
|
|
|
|
b->tin_quantum = max_t(u16, 1U, quantum);
|
|
|
|
@@ -2419,7 +2424,7 @@ static int cake_config_diffserv8(struct
|
|
|
|
struct cake_sched_data *q = qdisc_priv(sch);
|
|
u32 mtu = psched_mtu(qdisc_dev(sch));
|
|
- u64 rate = q->rate_bps;
|
|
+ u64 rate = q->config->rate_bps;
|
|
u32 quantum = 256;
|
|
u32 i;
|
|
|
|
@@ -2433,8 +2438,8 @@ static int cake_config_diffserv8(struct
|
|
for (i = 0; i < q->tin_cnt; i++) {
|
|
struct cake_tin_data *b = &q->tins[i];
|
|
|
|
- cake_set_rate(b, rate, mtu, us_to_ns(q->target),
|
|
- us_to_ns(q->interval));
|
|
+ cake_set_rate(b, rate, mtu, us_to_ns(q->config->target),
|
|
+ us_to_ns(q->config->interval));
|
|
|
|
b->tin_quantum = max_t(u16, 1U, quantum);
|
|
|
|
@@ -2463,7 +2468,7 @@ static int cake_config_diffserv4(struct
|
|
|
|
struct cake_sched_data *q = qdisc_priv(sch);
|
|
u32 mtu = psched_mtu(qdisc_dev(sch));
|
|
- u64 rate = q->rate_bps;
|
|
+ u64 rate = q->config->rate_bps;
|
|
u32 quantum = 1024;
|
|
|
|
q->tin_cnt = 4;
|
|
@@ -2474,13 +2479,13 @@ static int cake_config_diffserv4(struct
|
|
|
|
/* class characteristics */
|
|
cake_set_rate(&q->tins[0], rate, mtu,
|
|
- us_to_ns(q->target), us_to_ns(q->interval));
|
|
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
|
|
cake_set_rate(&q->tins[1], rate >> 4, mtu,
|
|
- us_to_ns(q->target), us_to_ns(q->interval));
|
|
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
|
|
cake_set_rate(&q->tins[2], rate >> 1, mtu,
|
|
- us_to_ns(q->target), us_to_ns(q->interval));
|
|
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
|
|
cake_set_rate(&q->tins[3], rate >> 2, mtu,
|
|
- us_to_ns(q->target), us_to_ns(q->interval));
|
|
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
|
|
|
|
/* bandwidth-sharing weights */
|
|
q->tins[0].tin_quantum = quantum;
|
|
@@ -2500,7 +2505,7 @@ static int cake_config_diffserv3(struct
|
|
*/
|
|
struct cake_sched_data *q = qdisc_priv(sch);
|
|
u32 mtu = psched_mtu(qdisc_dev(sch));
|
|
- u64 rate = q->rate_bps;
|
|
+ u64 rate = q->config->rate_bps;
|
|
u32 quantum = 1024;
|
|
|
|
q->tin_cnt = 3;
|
|
@@ -2511,11 +2516,11 @@ static int cake_config_diffserv3(struct
|
|
|
|
/* class characteristics */
|
|
cake_set_rate(&q->tins[0], rate, mtu,
|
|
- us_to_ns(q->target), us_to_ns(q->interval));
|
|
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
|
|
cake_set_rate(&q->tins[1], rate >> 4, mtu,
|
|
- us_to_ns(q->target), us_to_ns(q->interval));
|
|
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
|
|
cake_set_rate(&q->tins[2], rate >> 2, mtu,
|
|
- us_to_ns(q->target), us_to_ns(q->interval));
|
|
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
|
|
|
|
/* bandwidth-sharing weights */
|
|
q->tins[0].tin_quantum = quantum;
|
|
@@ -2527,7 +2532,8 @@ static int cake_config_diffserv3(struct
|
|
|
|
static void cake_reconfigure(struct Qdisc *sch)
|
|
{
|
|
- struct cake_sched_data *q = qdisc_priv(sch);
|
|
+ struct cake_sched_data *qd = qdisc_priv(sch);
|
|
+ struct cake_sched_config *q = qd->config;
|
|
int c, ft;
|
|
|
|
switch (q->tin_mode) {
|
|
@@ -2553,36 +2559,37 @@ static void cake_reconfigure(struct Qdis
|
|
break;
|
|
}
|
|
|
|
- for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) {
|
|
+ for (c = qd->tin_cnt; c < CAKE_MAX_TINS; c++) {
|
|
cake_clear_tin(sch, c);
|
|
- q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time;
|
|
+ qd->tins[c].cparams.mtu_time = qd->tins[ft].cparams.mtu_time;
|
|
}
|
|
|
|
- q->rate_ns = q->tins[ft].tin_rate_ns;
|
|
- q->rate_shft = q->tins[ft].tin_rate_shft;
|
|
+ qd->rate_ns = qd->tins[ft].tin_rate_ns;
|
|
+ qd->rate_shft = qd->tins[ft].tin_rate_shft;
|
|
|
|
if (q->buffer_config_limit) {
|
|
- q->buffer_limit = q->buffer_config_limit;
|
|
+ qd->buffer_limit = q->buffer_config_limit;
|
|
} else if (q->rate_bps) {
|
|
u64 t = q->rate_bps * q->interval;
|
|
|
|
do_div(t, USEC_PER_SEC / 4);
|
|
- q->buffer_limit = max_t(u32, t, 4U << 20);
|
|
+ qd->buffer_limit = max_t(u32, t, 4U << 20);
|
|
} else {
|
|
- q->buffer_limit = ~0;
|
|
+ qd->buffer_limit = ~0;
|
|
}
|
|
|
|
sch->flags &= ~TCQ_F_CAN_BYPASS;
|
|
|
|
- q->buffer_limit = min(q->buffer_limit,
|
|
- max(sch->limit * psched_mtu(qdisc_dev(sch)),
|
|
- q->buffer_config_limit));
|
|
+ qd->buffer_limit = min(qd->buffer_limit,
|
|
+ max(sch->limit * psched_mtu(qdisc_dev(sch)),
|
|
+ q->buffer_config_limit));
|
|
}
|
|
|
|
static int cake_change(struct Qdisc *sch, struct nlattr *opt,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
- struct cake_sched_data *q = qdisc_priv(sch);
|
|
+ struct cake_sched_data *qd = qdisc_priv(sch);
|
|
+ struct cake_sched_config *q = qd->config;
|
|
struct nlattr *tb[TCA_CAKE_MAX + 1];
|
|
u16 rate_flags;
|
|
u8 flow_mode;
|
|
@@ -2636,19 +2643,19 @@ static int cake_change(struct Qdisc *sch
|
|
nla_get_s32(tb[TCA_CAKE_OVERHEAD]));
|
|
rate_flags |= CAKE_FLAG_OVERHEAD;
|
|
|
|
- q->max_netlen = 0;
|
|
- q->max_adjlen = 0;
|
|
- q->min_netlen = ~0;
|
|
- q->min_adjlen = ~0;
|
|
+ qd->max_netlen = 0;
|
|
+ qd->max_adjlen = 0;
|
|
+ qd->min_netlen = ~0;
|
|
+ qd->min_adjlen = ~0;
|
|
}
|
|
|
|
if (tb[TCA_CAKE_RAW]) {
|
|
rate_flags &= ~CAKE_FLAG_OVERHEAD;
|
|
|
|
- q->max_netlen = 0;
|
|
- q->max_adjlen = 0;
|
|
- q->min_netlen = ~0;
|
|
- q->min_adjlen = ~0;
|
|
+ qd->max_netlen = 0;
|
|
+ qd->max_adjlen = 0;
|
|
+ qd->min_netlen = ~0;
|
|
+ qd->min_adjlen = ~0;
|
|
}
|
|
|
|
if (tb[TCA_CAKE_MPU])
|
|
@@ -2704,7 +2711,7 @@ static int cake_change(struct Qdisc *sch
|
|
|
|
WRITE_ONCE(q->rate_flags, rate_flags);
|
|
WRITE_ONCE(q->flow_mode, flow_mode);
|
|
- if (q->tins) {
|
|
+ if (qd->tins) {
|
|
sch_tree_lock(sch);
|
|
cake_reconfigure(sch);
|
|
sch_tree_unlock(sch);
|
|
@@ -2720,14 +2727,20 @@ static void cake_destroy(struct Qdisc *s
|
|
qdisc_watchdog_cancel(&q->watchdog);
|
|
tcf_block_put(q->block);
|
|
kvfree(q->tins);
|
|
+ kvfree(q->config);
|
|
}
|
|
|
|
static int cake_init(struct Qdisc *sch, struct nlattr *opt,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
- struct cake_sched_data *q = qdisc_priv(sch);
|
|
+ struct cake_sched_data *qd = qdisc_priv(sch);
|
|
+ struct cake_sched_config *q;
|
|
int i, j, err;
|
|
|
|
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
|
|
+ if (!q)
|
|
+ return -ENOMEM;
|
|
+
|
|
sch->limit = 10240;
|
|
q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
|
|
q->flow_mode = CAKE_FLOW_TRIPLE;
|
|
@@ -2739,33 +2752,36 @@ static int cake_init(struct Qdisc *sch,
|
|
* for 5 to 10% of interval
|
|
*/
|
|
q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
|
|
- q->cur_tin = 0;
|
|
- q->cur_flow = 0;
|
|
+ qd->cur_tin = 0;
|
|
+ qd->cur_flow = 0;
|
|
+ qd->config = q;
|
|
|
|
- qdisc_watchdog_init(&q->watchdog, sch);
|
|
+ qdisc_watchdog_init(&qd->watchdog, sch);
|
|
|
|
if (opt) {
|
|
err = cake_change(sch, opt, extack);
|
|
|
|
if (err)
|
|
- return err;
|
|
+ goto err;
|
|
}
|
|
|
|
- err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
|
|
+ err = tcf_block_get(&qd->block, &qd->filter_list, sch, extack);
|
|
if (err)
|
|
- return err;
|
|
+ goto err;
|
|
|
|
quantum_div[0] = ~0;
|
|
for (i = 1; i <= CAKE_QUEUES; i++)
|
|
quantum_div[i] = 65535 / i;
|
|
|
|
- q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
|
|
- GFP_KERNEL);
|
|
- if (!q->tins)
|
|
- return -ENOMEM;
|
|
+ qd->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
|
|
+ GFP_KERNEL);
|
|
+ if (!qd->tins) {
|
|
+ err = -ENOMEM;
|
|
+ goto err;
|
|
+ }
|
|
|
|
for (i = 0; i < CAKE_MAX_TINS; i++) {
|
|
- struct cake_tin_data *b = q->tins + i;
|
|
+ struct cake_tin_data *b = qd->tins + i;
|
|
|
|
INIT_LIST_HEAD(&b->new_flows);
|
|
INIT_LIST_HEAD(&b->old_flows);
|
|
@@ -2781,22 +2797,27 @@ static int cake_init(struct Qdisc *sch,
|
|
INIT_LIST_HEAD(&flow->flowchain);
|
|
cobalt_vars_init(&flow->cvars);
|
|
|
|
- q->overflow_heap[k].t = i;
|
|
- q->overflow_heap[k].b = j;
|
|
+ qd->overflow_heap[k].t = i;
|
|
+ qd->overflow_heap[k].b = j;
|
|
b->overflow_idx[j] = k;
|
|
}
|
|
}
|
|
|
|
cake_reconfigure(sch);
|
|
- q->avg_peak_bandwidth = q->rate_bps;
|
|
- q->min_netlen = ~0;
|
|
- q->min_adjlen = ~0;
|
|
+ qd->avg_peak_bandwidth = q->rate_bps;
|
|
+ qd->min_netlen = ~0;
|
|
+ qd->min_adjlen = ~0;
|
|
return 0;
|
|
+err:
|
|
+ kvfree(qd->config);
|
|
+ qd->config = NULL;
|
|
+ return err;
|
|
}
|
|
|
|
static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|
{
|
|
- struct cake_sched_data *q = qdisc_priv(sch);
|
|
+ struct cake_sched_data *qd = qdisc_priv(sch);
|
|
+ struct cake_sched_config *q = qd->config;
|
|
struct nlattr *opts;
|
|
u16 rate_flags;
|
|
u8 flow_mode;
|