summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2026-02-20 22:26:05 +0000
committerSasha Levin <sashal@kernel.org>2026-03-04 07:20:55 -0500
commited712dc0d64dee5f0d05e4d8ca57711f8a9c850c (patch)
tree7a1ce0c44b10ac055d43a6630151689c4803a8b8 /net/core
parent8dd43f9a9323f9c01bc8246da8d81a4c783c9e97 (diff)
net: do not pass flow_id to set_rps_cpu()
[ Upstream commit 8a8a9fac9efa6423fd74938b940cb7d731780718 ] Blamed commit made the assumption that the RPS table for each receive queue would have the same size, and that it would not change. Compute flow_id in set_rps_cpu(), do not assume we can use the value computed by get_rps_cpu(). Otherwise we risk out-of-bound access and/or crashes. Fixes: 48aa30443e52 ("net: Cache hash and flow_id to avoid recalculation") Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Krishna Kumar <krikku@gmail.com> Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com> Link: https://patch.msgid.link/20260220222605.3468081-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index f5e4040e0839..60a26208cbd8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4988,8 +4988,7 @@ static bool rps_flow_is_active(struct rps_dev_flow *rflow,
static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
- struct rps_dev_flow *rflow, u16 next_cpu, u32 hash,
- u32 flow_id)
+ struct rps_dev_flow *rflow, u16 next_cpu, u32 hash)
{
if (next_cpu < nr_cpu_ids) {
u32 head;
@@ -5000,6 +4999,7 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *tmp_rflow;
unsigned int tmp_cpu;
u16 rxq_index;
+ u32 flow_id;
int rc;
/* Should we steer this flow to a different hardware queue? */
@@ -5015,6 +5015,7 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (!flow_table)
goto out;
+ flow_id = rfs_slot(hash, flow_table);
tmp_rflow = &flow_table->flows[flow_id];
tmp_cpu = READ_ONCE(tmp_rflow->cpu);
@@ -5062,7 +5063,6 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow_table *flow_table;
struct rps_map *map;
int cpu = -1;
- u32 flow_id;
u32 tcpu;
u32 hash;
@@ -5109,8 +5109,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
/* OK, now we know there is a match,
* we can look at the local (per receive queue) flow table
*/
- flow_id = rfs_slot(hash, flow_table);
- rflow = &flow_table->flows[flow_id];
+ rflow = &flow_table->flows[rfs_slot(hash, flow_table)];
tcpu = rflow->cpu;
/*
@@ -5129,8 +5128,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) -
rflow->last_qtail)) >= 0)) {
tcpu = next_cpu;
- rflow = set_rps_cpu(dev, skb, rflow, next_cpu, hash,
- flow_id);
+ rflow = set_rps_cpu(dev, skb, rflow, next_cpu, hash);
}
if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {