Skip to content

Commit

Permalink
airoha: an7581: backport patch to support ETS and HTB sched
Browse files Browse the repository at this point in the history
Backport patch to support ETS and HTB scheduler for airoha ethernet
driver.

Signed-off-by: Christian Marangi <[email protected]>
  • Loading branch information
Ansuel committed Jan 10, 2025
1 parent b4ae537 commit 3ec3ea5
Show file tree
Hide file tree
Showing 4 changed files with 776 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
From 5f795590380476f1c9b7ed0ac945c9b0269dc23a Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <[email protected]>
Date: Fri, 3 Jan 2025 13:17:02 +0100
Subject: [PATCH 1/4] net: airoha: Enable Tx drop capability for each Tx DMA
ring

This is a preliminary patch in order to enable hw Qdisc offloading.

Signed-off-by: Lorenzo Bianconi <[email protected]>
Signed-off-by: Paolo Abeni <[email protected]>
---
drivers/net/ethernet/mediatek/airoha_eth.c | 4 ++++
1 file changed, 4 insertions(+)

--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -1790,6 +1790,10 @@ static int airoha_qdma_init_tx_queue(str
WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
}

+ /* xmit ring drop default setting */
+ airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
+ TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
+
airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
From 2b288b81560b94958cd68bbe54673e55a1730c95 Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <[email protected]>
Date: Fri, 3 Jan 2025 13:17:03 +0100
Subject: [PATCH 2/4] net: airoha: Introduce ndo_select_queue callback

Airoha EN7581 SoC supports 32 Tx DMA rings used to feed packets to QoS
channels. Each channels supports 8 QoS queues where the user can apply
QoS scheduling policies. In a similar way, the user can configure hw
rate shaping for each QoS channel.
Introduce ndo_select_queue callback in order to select the tx queue
based on QoS channel and QoS queue. In particular, for dsa device select
QoS channel according to the dsa user port index, rely on port id
otherwise. Select QoS queue based on the skb priority.

Signed-off-by: Lorenzo Bianconi <[email protected]>
Signed-off-by: Paolo Abeni <[email protected]>
---
drivers/net/ethernet/mediatek/airoha_eth.c | 30 ++++++++++++++++++++--
1 file changed, 28 insertions(+), 2 deletions(-)

--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -23,6 +23,8 @@
#define AIROHA_MAX_NUM_XSI_RSTS 5
#define AIROHA_MAX_MTU 2000
#define AIROHA_MAX_PACKET_SIZE 2048
+#define AIROHA_NUM_QOS_CHANNELS 4
+#define AIROHA_NUM_QOS_QUEUES 8
#define AIROHA_NUM_TX_RING 32
#define AIROHA_NUM_RX_RING 32
#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
@@ -2422,21 +2424,44 @@ static void airoha_dev_get_stats64(struc
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}

+static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ int queue, channel;
+
+ /* For dsa device select QoS channel according to the dsa user port
+ * index, rely on port id otherwise. Select QoS queue based on the
+ * skb priority.
+ */
+ channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
+ channel = channel % AIROHA_NUM_QOS_CHANNELS;
+ queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
+ queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
+
+ return queue < dev->num_tx_queues ? queue : 0;
+}
+
static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct skb_shared_info *sinfo = skb_shinfo(skb);
struct airoha_gdm_port *port = netdev_priv(dev);
- u32 msg0 = 0, msg1, len = skb_headlen(skb);
- int i, qid = skb_get_queue_mapping(skb);
+ u32 msg0, msg1, len = skb_headlen(skb);
struct airoha_qdma *qdma = port->qdma;
u32 nr_frags = 1 + sinfo->nr_frags;
struct netdev_queue *txq;
struct airoha_queue *q;
void *data = skb->data;
+ int i, qid;
u16 index;
u8 fport;

+ qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
+ msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
+ qid / AIROHA_NUM_QOS_QUEUES) |
+ FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
+ qid % AIROHA_NUM_QOS_QUEUES);
if (skb->ip_summed == CHECKSUM_PARTIAL)
msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
@@ -2610,6 +2635,7 @@ static const struct net_device_ops airoh
.ndo_init = airoha_dev_init,
.ndo_open = airoha_dev_open,
.ndo_stop = airoha_dev_stop,
+ .ndo_select_queue = airoha_dev_select_queue,
.ndo_start_xmit = airoha_dev_xmit,
.ndo_get_stats64 = airoha_dev_get_stats64,
.ndo_set_mac_address = airoha_dev_set_macaddr,
Loading

0 comments on commit 3ec3ea5

Please sign in to comment.