1
0
mirror of https://github.com/corundum/corundum.git synced 2025-01-16 08:12:53 +08:00

modules/mqnic: Improve indirection table handling

Signed-off-by: Alex Forencich <alex@alexforencich.com>
This commit is contained in:
Alex Forencich 2023-05-03 16:50:59 -07:00
parent eecaef6e6f
commit 32db67b066
3 changed files with 44 additions and 9 deletions

View File

@ -492,6 +492,9 @@ struct mqnic_priv {
u32 max_desc_block_size;
u32 rx_queue_map_indir_table_size;
u32 *rx_queue_map_indir_table;
struct hwtstamp_config hwts_config;
struct i2c_client *mod_i2c_client;
@ -544,6 +547,7 @@ u32 mqnic_port_get_rx_status(struct mqnic_port *port);
// mqnic_netdev.c
int mqnic_start_port(struct net_device *ndev);
void mqnic_stop_port(struct net_device *ndev);
int mqnic_update_indir_table(struct net_device *ndev);
void mqnic_update_stats(struct net_device *ndev);
struct net_device *mqnic_create_netdev(struct mqnic_if *interface, int index, int dev_port);
void mqnic_destroy_netdev(struct net_device *ndev);

View File

@ -160,6 +160,7 @@ static int mqnic_set_channels(struct net_device *ndev,
u32 txq_count, rxq_count;
int port_up = priv->port_up;
int ret = 0;
int k;
if (channel->combined_count || channel->other_count)
return -EINVAL;
@ -179,6 +180,10 @@ static int mqnic_set_channels(struct net_device *ndev,
dev_info(priv->dev, "New TX channel count: %d", txq_count);
dev_info(priv->dev, "New RX channel count: %d", rxq_count);
if (rxq_count != priv->rxq_count)
for (k = 0; k < priv->rx_queue_map_indir_table_size; k++)
priv->rx_queue_map_indir_table[k] = k % rxq_count;
mutex_lock(&priv->mdev->state_lock);
if (port_up)

View File

@ -169,15 +169,7 @@ int mqnic_start_port(struct net_device *ndev)
mqnic_interface_set_rx_mtu(iface, ndev->mtu + ETH_HLEN);
// configure RX indirection and RSS
mqnic_interface_set_rx_queue_map_rss_mask(iface, 0, 0xffffffff);
mqnic_interface_set_rx_queue_map_app_mask(iface, 0, 0);
for (k = 0; k < iface->rx_queue_map_indir_table_size; k++) {
rcu_read_lock();
q = radix_tree_lookup(&priv->rxq_table, k % priv->rxq_count);
rcu_read_unlock();
mqnic_interface_set_rx_queue_map_indir_table(iface, 0, k, q->index);
}
mqnic_update_indir_table(ndev);
priv->port_up = true;
@ -336,6 +328,28 @@ static int mqnic_close(struct net_device *ndev)
return ret;
}
int mqnic_update_indir_table(struct net_device *ndev)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_if *iface = priv->interface;
struct mqnic_ring *q;
int k;
mqnic_interface_set_rx_queue_map_rss_mask(iface, 0, 0xffffffff);
mqnic_interface_set_rx_queue_map_app_mask(iface, 0, 0);
for (k = 0; k < priv->rx_queue_map_indir_table_size; k++) {
rcu_read_lock();
q = radix_tree_lookup(&priv->rxq_table, priv->rx_queue_map_indir_table[k]);
rcu_read_unlock();
if (q)
mqnic_interface_set_rx_queue_map_indir_table(iface, 0, k, q->index);
}
return 0;
}
void mqnic_update_stats(struct net_device *ndev)
{
struct mqnic_priv *priv = netdev_priv(ndev);
@ -613,6 +627,16 @@ struct net_device *mqnic_create_netdev(struct mqnic_if *interface, int index, in
desc_block_size = min_t(u32, interface->max_desc_block_size, 4);
priv->rx_queue_map_indir_table_size = interface->rx_queue_map_indir_table_size;
priv->rx_queue_map_indir_table = kzalloc(sizeof(u32)*priv->rx_queue_map_indir_table_size, GFP_KERNEL);
if (!priv->rx_queue_map_indir_table) {
ret = -ENOMEM;
goto fail;
}
for (k = 0; k < priv->rx_queue_map_indir_table_size; k++)
priv->rx_queue_map_indir_table[k] = k % priv->rxq_count;
// entry points
ndev->netdev_ops = &mqnic_netdev_ops;
ndev->ethtool_ops = &mqnic_ethtool_ops;
@ -662,5 +686,7 @@ void mqnic_destroy_netdev(struct net_device *ndev)
if (priv->registered)
unregister_netdev(ndev);
kfree(priv->rx_queue_map_indir_table);
free_netdev(ndev);
}