diff --git a/root/target/linux/generic/hack-5.4/952-net-conntrack-events-support-multiple-registrant.patch b/root/target/linux/generic/hack-5.4/952-net-conntrack-events-support-multiple-registrant.patch new file mode 100644 index 000000000..e7ff0d462 --- /dev/null +++ b/root/target/linux/generic/hack-5.4/952-net-conntrack-events-support-multiple-registrant.patch @@ -0,0 +1,319 @@ +diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h +index 12d967b..c2b98b6 100644 +--- a/include/net/netfilter/nf_conntrack_ecache.h ++++ b/include/net/netfilter/nf_conntrack_ecache.h +@@ -72,6 +72,10 @@ struct nf_ct_event { + int report; + }; + ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++extern int nf_conntrack_register_notifier(struct net *net, struct notifier_block *nb); ++extern int nf_conntrack_unregister_notifier(struct net *net, struct notifier_block *nb); ++#else + struct nf_ct_event_notifier { + int (*fcn)(unsigned int events, struct nf_ct_event *item); + }; +@@ -80,6 +84,7 @@ int nf_conntrack_register_notifier(struc + struct nf_ct_event_notifier *nb); + void nf_conntrack_unregister_notifier(struct net *net, + struct nf_ct_event_notifier *nb); ++#endif + + void nf_ct_deliver_cached_events(struct nf_conn *ct); + int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct, +@@ -105,11 +110,13 @@ int nf_conntrack_eventmask_report(unsign + nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) + { + #ifdef CONFIG_NF_CONNTRACK_EVENTS +- struct net *net = nf_ct_net(ct); + struct nf_conntrack_ecache *e; ++#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++ struct net *net = nf_ct_net(ct); + + if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) + return; ++#endif + + e = nf_ct_ecache_find(ct); + if (e == NULL) +@@ -124,10 +131,12 @@ static inline int + u32 portid, int report) + { + #ifdef CONFIG_NF_CONNTRACK_EVENTS ++#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS + const struct net *net = nf_ct_net(ct); + + if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) + return 0; ++#endif + + return nf_conntrack_eventmask_report(1 << event, ct, portid, report); + #else +@@ -139,10 +148,12 @@ nf_conntrack_event_report(enum ip_conntr + nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct) + { + #ifdef CONFIG_NF_CONNTRACK_EVENTS ++#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS + const struct net *net = nf_ct_net(ct); + + if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) + return 0; ++#endif + + return nf_conntrack_eventmask_report(1 << event, ct, 0, 0); + #else +diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h +index e469e85..1d31db8 100644 +--- a/include/net/netns/conntrack.h ++++ b/include/net/netns/conntrack.h +@@ -112,7 +112,11 @@ struct netns_ct { + + struct ct_pcpu __percpu *pcpu_lists; + struct ip_conntrack_stat __percpu *stat; ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++ struct atomic_notifier_head nf_conntrack_chain; ++#else + struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; ++#endif + struct nf_exp_event_notifier __rcu *nf_expect_event_cb; + struct nf_ip_net nf_ct_proto; + #if defined(CONFIG_NF_CONNTRACK_LABELS) +diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig +index 63073be..08d7aab 100644 +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -148,6 +148,14 @@ config NF_CONNTRACK_RTCACHE + To compile it as a module, choose M here. If unsure, say N. + The module will be called nf_conntrack_rtcache. + ++config NF_CONNTRACK_CHAIN_EVENTS ++ bool "Register multiple callbacks to ct events" ++ depends on NF_CONNTRACK_EVENTS ++ help ++ Support multiple registrations. ++ ++ If unsure, say `N'. ++ + config NF_CONNTRACK_TIMEOUT + bool 'Connection tracking timeout' + depends on NETFILTER_ADVANCED +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c +index 6bd1508..9b81c7c 100644 +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -2592,6 +2592,9 @@ int nf_conntrack_init_net(struct net *ne + nf_conntrack_helper_pernet_init(net); + nf_conntrack_proto_pernet_init(net); + ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++ ATOMIC_INIT_NOTIFIER_HEAD(&net->ct.nf_conntrack_chain); ++#endif + return 0; + + err_expect: +diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c +index da9df2d..e0e2a8f 100644 +--- a/net/netfilter/nf_conntrack_ecache.c ++++ b/net/netfilter/nf_conntrack_ecache.c +@@ -17,6 +17,9 @@ + #include + #include + #include ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++#include ++#endif + #include + #include + #include +@@ -117,6 +120,38 @@ static void ecache_work(struct work_stru + schedule_delayed_work(&ctnet->ecache_dwork, delay); + } + ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++int ++nf_conntrack_eventmask_report(unsigned int eventmask, ++ struct nf_conn *ct, ++ u32 portid, ++ int report) ++{ ++ struct nf_conntrack_ecache *e; ++ struct net *net = nf_ct_net(ct); ++ ++ e = nf_ct_ecache_find(ct); ++ if (e == NULL) ++ return 0; ++ ++ if (nf_ct_is_confirmed(ct)) { ++ struct nf_ct_event item = { ++ .ct = ct, ++ .portid = e->portid ? e->portid : portid, ++ .report = report ++ }; ++ /* This is a resent of a destroy event? If so, skip missed */ ++ unsigned long missed = e->portid ? 0 : e->missed; ++ ++ if (!((eventmask | missed) & e->ctmask)) ++ return 0; ++ ++ atomic_notifier_call_chain(&net->ct.nf_conntrack_chain, eventmask | missed, &item); ++ } ++ ++ return 0; ++} ++#else + int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct, + u32 portid, int report) + { +@@ -171,10 +206,52 @@ out_unlock: + rcu_read_unlock(); + return ret; + } ++#endif + EXPORT_SYMBOL_GPL(nf_conntrack_eventmask_report); + + /* deliver cached events and clear cache entry - must be called with locally + * disabled softirqs */ ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++void nf_ct_deliver_cached_events(struct nf_conn *ct) ++{ ++ unsigned long events, missed; ++ struct nf_conntrack_ecache *e; ++ struct nf_ct_event item; ++ struct net *net = nf_ct_net(ct); ++ ++ e = nf_ct_ecache_find(ct); ++ if (e == NULL) ++ return; ++ ++ events = xchg(&e->cache, 0); ++ ++ if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events) ++ return; ++ ++ /* We make a copy of the missed event cache without taking ++ * the lock, thus we may send missed events twice. However, ++ * this does not harm and it happens very rarely. */ ++ missed = e->missed; ++ ++ if (!((events | missed) & e->ctmask)) ++ return; ++ ++ item.ct = ct; ++ item.portid = 0; ++ item.report = 0; ++ ++ atomic_notifier_call_chain(&net->ct.nf_conntrack_chain, ++ events | missed, ++ &item); ++ ++ if (likely(!missed)) ++ return; ++ ++ spin_lock_bh(&ct->lock); ++ e->missed &= ~missed; ++ spin_unlock_bh(&ct->lock); ++} ++#else + void nf_ct_deliver_cached_events(struct nf_conn *ct) + { + struct net *net = nf_ct_net(ct); +@@ -225,6 +302,7 @@ void nf_ct_deliver_cached_events(struct + out_unlock: + rcu_read_unlock(); + } ++#endif + EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); + + void nf_ct_expect_event_report(enum ip_conntrack_expect_events event, +@@ -257,6 +335,12 @@ out_unlock: + rcu_read_unlock(); + } + ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++int nf_conntrack_register_notifier(struct net *net, struct notifier_block *nb) ++{ ++ return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb); ++} ++#else + int nf_conntrack_register_notifier(struct net *net, + struct nf_ct_event_notifier *new) + { +@@ -277,8 +361,15 @@ out_unlock: + mutex_unlock(&nf_ct_ecache_mutex); + return ret; + } ++#endif + EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier); + ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++int nf_conntrack_unregister_notifier(struct net *net, struct notifier_block *nb) ++{ ++ return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb); ++} ++#else + void nf_conntrack_unregister_notifier(struct net *net, + struct nf_ct_event_notifier *new) + { +@@ -292,6 +383,7 @@ void nf_conntrack_unregister_notifier(st + mutex_unlock(&nf_ct_ecache_mutex); + /* synchronize_rcu() is called from ctnetlink_exit. */ + } ++#endif + EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); + + int nf_ct_expect_register_notifier(struct net *net, +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c +index 04111c1..8c741f7 100644 +--- a/net/netfilter/nf_conntrack_netlink.c ++++ b/net/netfilter/nf_conntrack_netlink.c +@@ -32,6 +32,11 @@ + #include + + #include ++ ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++#include ++#endif ++ + #include + #include + #include +@@ -676,14 +681,22 @@ static size_t ctnetlink_nlmsg_size(const + ; + } + ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++static int ctnetlink_conntrack_event(struct notifier_block *this, ++ unsigned long events, void *ptr) ++#else + static int + ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) ++#endif + { + const struct nf_conntrack_zone *zone; + struct net *net; + struct nlmsghdr *nlh; + struct nfgenmsg *nfmsg; + struct nlattr *nest_parms; ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++ struct nf_ct_event *item = (struct nf_ct_event *)ptr; ++#endif + struct nf_conn *ct = item->ct; + struct sk_buff *skb; + unsigned int type; +@@ -3504,9 +3517,15 @@ static int ctnetlink_stat_exp_cpu(struct + } + + #ifdef CONFIG_NF_CONNTRACK_EVENTS ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++static struct notifier_block ctnl_notifier = { ++ .notifier_call = ctnetlink_conntrack_event, ++}; ++#else + static struct nf_ct_event_notifier ctnl_notifier = { + .fcn = ctnetlink_conntrack_event, + }; ++#endif + + static struct nf_exp_event_notifier ctnl_notifier_exp = { + .fcn = ctnetlink_expect_event, diff --git a/root/target/linux/generic/hack-5.4/953-net-patch-linux-kernel-to-support-shortcut-fe.patch b/root/target/linux/generic/hack-5.4/953-net-patch-linux-kernel-to-support-shortcut-fe.patch new file mode 100644 index 000000000..23e3b66a7 --- /dev/null +++ b/root/target/linux/generic/hack-5.4/953-net-patch-linux-kernel-to-support-shortcut-fe.patch @@ -0,0 +1,253 @@ +--- a/include/linux/if_bridge.h ++++ b/include/linux/if_bridge.h +@@ -52,6 +52,9 @@ struct br_ip_list { + + extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); + ++extern void br_dev_update_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *nlstats); ++ + #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) + int br_multicast_list_adjacent(struct net_device *dev, + struct list_head *br_ip_list); +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -826,6 +826,10 @@ struct sk_buff { + #endif + __u8 gro_skip:1; + ++#ifdef CONFIG_SHORTCUT_FE ++ __u16 fast_forwarded:1; ++#endif ++ + #ifdef CONFIG_NET_SCHED + __u16 tc_index; /* traffic control index */ + #endif +--- a/include/linux/timer.h ++++ b/include/linux/timer.h +@@ -18,6 +18,10 @@ struct timer_list { + void (*function)(struct timer_list *); + u32 flags; + ++#ifdef CONFIG_SHORTCUT_FE ++ unsigned long cust_data; ++#endif ++ + #ifdef CONFIG_LOCKDEP + struct lockdep_map lockdep_map; + #endif +--- a/include/net/netfilter/nf_conntrack_ecache.h ++++ b/include/net/netfilter/nf_conntrack_ecache.h +@@ -75,6 +75,8 @@ struct nf_ct_event { + #ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS + extern int nf_conntrack_register_notifier(struct net *net, struct notifier_block *nb); + extern int nf_conntrack_unregister_notifier(struct net *net, struct notifier_block *nb); ++extern int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb); ++extern int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb); + #else + struct nf_ct_event_notifier { + int (*fcn)(unsigned int events, struct nf_ct_event *item); +--- a/net/bridge/br_if.c ++++ b/net/bridge/br_if.c +@@ -746,6 +746,28 @@ void br_port_flags_change(struct net_bri + br_recalculate_neigh_suppress_enabled(br); + } + ++void br_dev_update_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *nlstats) ++{ ++ struct net_bridge *br; ++ struct pcpu_sw_netstats *stats; ++ ++ /* Is this a bridge? */ ++ if (!(dev->priv_flags & IFF_EBRIDGE)) ++ return; ++ ++ br = netdev_priv(dev); ++ stats = this_cpu_ptr(br->stats); ++ ++ u64_stats_update_begin(&stats->syncp); ++ stats->rx_packets += nlstats->rx_packets; ++ stats->rx_bytes += nlstats->rx_bytes; ++ stats->tx_packets += nlstats->tx_packets; ++ stats->tx_bytes += nlstats->tx_bytes; ++ u64_stats_update_end(&stats->syncp); ++} ++EXPORT_SYMBOL_GPL(br_dev_update_stats); ++ + bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag) + { + struct net_bridge_port *p; + +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3189,8 +3189,17 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev, + unsigned int len; + int rc; + ++#ifdef CONFIG_SHORTCUT_FE ++ /* If this skb has been fast forwarded then we don't want it to ++ * go to any taps (by definition we're trying to bypass them). ++ */ ++ if (!skb->fast_forwarded) { ++#endif + if (dev_nit_active(dev)) + dev_queue_xmit_nit(skb, dev); ++#ifdef CONFIG_SHORTCUT_FE ++ } ++#endif + + #ifdef CONFIG_ETHERNET_PACKET_MANGLE + if (!dev->eth_mangle_tx || +@@ -4683,6 +4691,11 @@ void netdev_rx_handler_unregister(struct + } + EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); + ++#ifdef CONFIG_SHORTCUT_FE ++int (*athrs_fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly; ++EXPORT_SYMBOL_GPL(athrs_fast_nat_recv); ++#endif ++ + /* + * Limit the use of PFMEMALLOC reserves to those protocols that implement + * the special handling of PFMEMALLOC skbs. +@@ -4733,6 +4746,10 @@ static int __netif_receive_skb_core(stru + int ret = NET_RX_DROP; + __be16 type; + ++#ifdef CONFIG_SHORTCUT_FE ++ int (*fast_recv)(struct sk_buff *skb); ++#endif ++ + net_timestamp_check(!netdev_tstamp_prequeue, skb); + + trace_netif_receive_skb(skb); +@@ -4772,6 +4789,16 @@ another_round: + goto out; + } + ++#ifdef CONFIG_SHORTCUT_FE ++ fast_recv = rcu_dereference(athrs_fast_nat_recv); ++ if (fast_recv) { ++ if (fast_recv(skb)) { ++ ret = NET_RX_SUCCESS; ++ goto out; ++ } ++ } ++#endif ++ + if (skb_skip_tc_classify(skb)) + goto skip_classify; + +--- a/net/Kconfig ++++ b/net/Kconfig +@@ -473,3 +473,6 @@ config HAVE_CBPF_JIT + # Extended BPF JIT (eBPF) + config HAVE_EBPF_JIT + bool ++ ++config SHORTCUT_FE ++ bool "Enables kernel network stack path for Shortcut Forwarding Engine +--- a/net/netfilter/nf_conntrack_proto_tcp.c ++++ b/net/netfilter/nf_conntrack_proto_tcp.c +@@ -34,11 +34,19 @@ + /* Do not check the TCP window for incoming packets */ + static int nf_ct_tcp_no_window_check __read_mostly = 1; + ++#ifdef CONFIG_SHORTCUT_FE ++EXPORT_SYMBOL_GPL(nf_ct_tcp_no_window_check); ++#endif ++ + /* "Be conservative in what you do, + be liberal in what you accept from others." + If it's non-zero, we mark only out of window RST segments as INVALID. */ + static int nf_ct_tcp_be_liberal __read_mostly = 0; + ++#ifdef CONFIG_SHORTCUT_FE ++EXPORT_SYMBOL_GPL(nf_ct_tcp_be_liberal); ++#endif ++ + /* If it is set to zero, we disable picking up already established + connections. */ + static int nf_ct_tcp_loose __read_mostly = 1; +--- a/net/netfilter/nf_conntrack_ecache.c ++++ b/net/netfilter/nf_conntrack_ecache.c +@@ -162,7 +162,11 @@ int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct, + + rcu_read_lock(); + notify = rcu_dereference(net->ct.nf_conntrack_event_cb); ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++ if (!notify && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head)) ++#else + if (!notify) ++#endif + goto out_unlock; + + e = nf_ct_ecache_find(ct); +@@ -181,7 +185,14 @@ int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct, + if (!((eventmask | missed) & e->ctmask)) + goto out_unlock; + ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++ ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain, ++ eventmask | missed, &item); ++ if (notify) ++ ret = notify->fcn(eventmask | missed, &item); ++#else + ret = notify->fcn(eventmask | missed, &item); ++#endif + if (unlikely(ret < 0 || missed)) { + spin_lock_bh(&ct->lock); + if (ret < 0) { +@@ -263,7 +274,11 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct) + + rcu_read_lock(); + notify = rcu_dereference(net->ct.nf_conntrack_event_cb); ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++ if ((notify == NULL) && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head)) ++#else + if (notify == NULL) ++#endif + goto out_unlock; + + e = nf_ct_ecache_find(ct); +@@ -287,7 +302,15 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct) + item.portid = 0; + item.report = 0; + ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++ ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain, ++ events | missed, ++ &item); ++ if (notify != NULL) ++ ret = notify->fcn(events | missed, &item); ++#else + ret = notify->fcn(events | missed, &item); ++#endif + + if (likely(ret == 0 && !missed)) + goto out_unlock; +@@ -340,6 +363,11 @@ int nf_conntrack_register_notifier(struct net *net, struct notifier_block *nb) + { + return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb); + } ++int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb) ++{ ++ return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb); ++} ++EXPORT_SYMBOL_GPL(nf_conntrack_register_chain_notifier); + #else + int nf_conntrack_register_notifier(struct net *net, + struct nf_ct_event_notifier *new) +@@ -369,6 +397,11 @@ int nf_conntrack_unregister_notifier(struct net *net, struct notifier_block *nb) + { + return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb); + } ++int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb) ++{ ++ return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb); ++} ++EXPORT_SYMBOL_GPL(nf_conntrack_unregister_chain_notifier); + #else + void nf_conntrack_unregister_notifier(struct net *net, + struct nf_ct_event_notifier *new)