mirror of
https://github.com/corundum/corundum.git
synced 2025-01-16 08:12:53 +08:00
Remove unnecessary priv parameters
This commit is contained in:
parent
7a43618e3c
commit
c739b05b69
@ -196,6 +196,10 @@ struct mqnic_ring {
|
||||
struct mqnic_rx_info *rx_info;
|
||||
};
|
||||
|
||||
struct device *dev;
|
||||
struct net_device *ndev;
|
||||
struct mqnic_priv *priv;
|
||||
|
||||
u32 hw_ptr_mask;
|
||||
u8 __iomem *hw_addr;
|
||||
u8 __iomem *hw_head_ptr;
|
||||
@ -216,6 +220,7 @@ struct mqnic_cq_ring {
|
||||
dma_addr_t buf_dma_addr;
|
||||
|
||||
struct net_device *ndev;
|
||||
struct mqnic_priv *priv;
|
||||
struct napi_struct napi;
|
||||
int ring_index;
|
||||
int eq_index;
|
||||
@ -242,6 +247,7 @@ struct mqnic_eq_ring {
|
||||
dma_addr_t buf_dma_addr;
|
||||
|
||||
struct net_device *ndev;
|
||||
struct mqnic_priv *priv;
|
||||
int int_index;
|
||||
|
||||
int irq;
|
||||
@ -257,6 +263,7 @@ struct mqnic_eq_ring {
|
||||
struct mqnic_port {
|
||||
struct device *dev;
|
||||
struct net_device *ndev;
|
||||
struct mqnic_priv *priv;
|
||||
|
||||
int index;
|
||||
|
||||
@ -330,7 +337,7 @@ void mqnic_destroy_netdev(struct net_device *ndev);
|
||||
// mqnic_port.c
|
||||
int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr,
|
||||
int index, u8 __iomem *hw_addr);
|
||||
void mqnic_destroy_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr);
|
||||
void mqnic_destroy_port(struct mqnic_port **port_ptr);
|
||||
int mqnic_activate_port(struct mqnic_port *port);
|
||||
void mqnic_deactivate_port(struct mqnic_port *port);
|
||||
u32 mqnic_port_get_rss_mask(struct mqnic_port *port);
|
||||
@ -361,24 +368,22 @@ void mqnic_board_deinit(struct mqnic_dev *mqnic);
|
||||
// mqnic_eq.c
|
||||
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr,
|
||||
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||
void mqnic_destroy_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr);
|
||||
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring,
|
||||
int int_index);
|
||||
void mqnic_deactivate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring);
|
||||
void mqnic_destroy_eq_ring(struct mqnic_eq_ring **ring_ptr);
|
||||
int mqnic_activate_eq_ring(struct mqnic_eq_ring *ring, int int_index);
|
||||
void mqnic_deactivate_eq_ring(struct mqnic_eq_ring *ring);
|
||||
bool mqnic_is_eq_ring_empty(const struct mqnic_eq_ring *ring);
|
||||
bool mqnic_is_eq_ring_full(const struct mqnic_eq_ring *ring);
|
||||
void mqnic_eq_read_head_ptr(struct mqnic_eq_ring *ring);
|
||||
void mqnic_eq_write_tail_ptr(struct mqnic_eq_ring *ring);
|
||||
void mqnic_arm_eq(struct mqnic_eq_ring *ring);
|
||||
void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring);
|
||||
void mqnic_process_eq(struct mqnic_eq_ring *eq_ring);
|
||||
|
||||
// mqnic_cq.c
|
||||
int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr,
|
||||
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||
void mqnic_destroy_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr);
|
||||
int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring,
|
||||
int eq_index);
|
||||
void mqnic_deactivate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring);
|
||||
void mqnic_destroy_cq_ring(struct mqnic_cq_ring **ring_ptr);
|
||||
int mqnic_activate_cq_ring(struct mqnic_cq_ring *ring, int eq_index);
|
||||
void mqnic_deactivate_cq_ring(struct mqnic_cq_ring *ring);
|
||||
bool mqnic_is_cq_ring_empty(const struct mqnic_cq_ring *ring);
|
||||
bool mqnic_is_cq_ring_full(const struct mqnic_cq_ring *ring);
|
||||
void mqnic_cq_read_head_ptr(struct mqnic_cq_ring *ring);
|
||||
@ -388,19 +393,16 @@ void mqnic_arm_cq(struct mqnic_cq_ring *ring);
|
||||
// mqnic_tx.c
|
||||
int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||
void mqnic_destroy_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr);
|
||||
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int cpl_index);
|
||||
void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
||||
void mqnic_destroy_tx_ring(struct mqnic_ring **ring_ptr);
|
||||
int mqnic_activate_tx_ring(struct mqnic_ring *ring, int cpl_index);
|
||||
void mqnic_deactivate_tx_ring(struct mqnic_ring *ring);
|
||||
bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring);
|
||||
bool mqnic_is_tx_ring_full(const struct mqnic_ring *ring);
|
||||
void mqnic_tx_read_tail_ptr(struct mqnic_ring *ring);
|
||||
void mqnic_tx_write_head_ptr(struct mqnic_ring *ring);
|
||||
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int index, int napi_budget);
|
||||
int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
||||
int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||
int napi_budget);
|
||||
void mqnic_free_tx_desc(struct mqnic_ring *ring, int index, int napi_budget);
|
||||
int mqnic_free_tx_buf(struct mqnic_ring *ring);
|
||||
int mqnic_process_tx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget);
|
||||
void mqnic_tx_irq(struct mqnic_cq_ring *cq);
|
||||
int mqnic_poll_tx_cq(struct napi_struct *napi, int budget);
|
||||
netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
@ -408,22 +410,18 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
// mqnic_rx.c
|
||||
int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||
void mqnic_destroy_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr);
|
||||
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int cpl_index);
|
||||
void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
||||
void mqnic_destroy_rx_ring(struct mqnic_ring **ring_ptr);
|
||||
int mqnic_activate_rx_ring(struct mqnic_ring *ring, int cpl_index);
|
||||
void mqnic_deactivate_rx_ring(struct mqnic_ring *ring);
|
||||
bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring);
|
||||
bool mqnic_is_rx_ring_full(const struct mqnic_ring *ring);
|
||||
void mqnic_rx_read_tail_ptr(struct mqnic_ring *ring);
|
||||
void mqnic_rx_write_head_ptr(struct mqnic_ring *ring);
|
||||
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int index);
|
||||
int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
||||
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int index);
|
||||
void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
||||
int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||
int napi_budget);
|
||||
void mqnic_free_rx_desc(struct mqnic_ring *ring, int index);
|
||||
int mqnic_free_rx_buf(struct mqnic_ring *ring);
|
||||
int mqnic_prepare_rx_desc(struct mqnic_ring *ring, int index);
|
||||
void mqnic_refill_rx_buffers(struct mqnic_ring *ring);
|
||||
int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget);
|
||||
void mqnic_rx_irq(struct mqnic_cq_ring *cq);
|
||||
int mqnic_poll_rx_cq(struct napi_struct *napi, int budget);
|
||||
|
||||
|
@ -47,6 +47,7 @@ int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_pt
|
||||
return -ENOMEM;
|
||||
|
||||
ring->ndev = priv->ndev;
|
||||
ring->priv = priv;
|
||||
|
||||
ring->size = roundup_pow_of_two(size);
|
||||
ring->size_mask = ring->size - 1;
|
||||
@ -90,19 +91,18 @@ fail_ring:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mqnic_destroy_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr)
|
||||
void mqnic_destroy_cq_ring(struct mqnic_cq_ring **ring_ptr)
|
||||
{
|
||||
struct device *dev = priv->dev;
|
||||
struct mqnic_cq_ring *ring = *ring_ptr;
|
||||
*ring_ptr = NULL;
|
||||
|
||||
mqnic_deactivate_cq_ring(priv, ring);
|
||||
mqnic_deactivate_cq_ring(ring);
|
||||
|
||||
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
|
||||
dma_free_coherent(ring->priv->dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
|
||||
kfree(ring);
|
||||
}
|
||||
|
||||
int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring, int eq_index)
|
||||
int mqnic_activate_cq_ring(struct mqnic_cq_ring *ring, int eq_index)
|
||||
{
|
||||
ring->eq_index = eq_index;
|
||||
|
||||
@ -123,7 +123,7 @@ int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mqnic_deactivate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring)
|
||||
void mqnic_deactivate_cq_ring(struct mqnic_cq_ring *ring)
|
||||
{
|
||||
// deactivate queue
|
||||
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
|
@ -47,6 +47,7 @@ int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_pt
|
||||
return -ENOMEM;
|
||||
|
||||
ring->ndev = priv->ndev;
|
||||
ring->priv = priv;
|
||||
|
||||
ring->size = roundup_pow_of_two(size);
|
||||
ring->size_mask = ring->size - 1;
|
||||
@ -92,20 +93,19 @@ fail_ring:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mqnic_destroy_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr)
|
||||
void mqnic_destroy_eq_ring(struct mqnic_eq_ring **ring_ptr)
|
||||
{
|
||||
struct device *dev = priv->dev;
|
||||
struct mqnic_eq_ring *ring = *ring_ptr;
|
||||
struct device *dev = ring->priv->dev;
|
||||
*ring_ptr = NULL;
|
||||
|
||||
mqnic_deactivate_eq_ring(priv, ring);
|
||||
mqnic_deactivate_eq_ring(ring);
|
||||
|
||||
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
|
||||
kfree(ring);
|
||||
}
|
||||
|
||||
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring,
|
||||
int int_index)
|
||||
int mqnic_activate_eq_ring(struct mqnic_eq_ring *ring, int int_index)
|
||||
{
|
||||
ring->int_index = int_index;
|
||||
|
||||
@ -128,7 +128,7 @@ int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mqnic_deactivate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring)
|
||||
void mqnic_deactivate_eq_ring(struct mqnic_eq_ring *ring)
|
||||
{
|
||||
// deactivate queue
|
||||
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
@ -162,9 +162,9 @@ void mqnic_arm_eq(struct mqnic_eq_ring *ring)
|
||||
ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
||||
}
|
||||
|
||||
void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
|
||||
void mqnic_process_eq(struct mqnic_eq_ring *eq_ring)
|
||||
{
|
||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||
struct mqnic_priv *priv = eq_ring->priv;
|
||||
struct mqnic_event *event;
|
||||
struct mqnic_cq_ring *cq_ring;
|
||||
u32 eq_index;
|
||||
|
@ -99,7 +99,7 @@ static irqreturn_t mqnic_interrupt(int irq, void *data)
|
||||
continue;
|
||||
|
||||
if (priv->event_ring[l]->irq == irq) {
|
||||
mqnic_process_eq(priv->ndev, priv->event_ring[l]);
|
||||
mqnic_process_eq(priv->event_ring[l]);
|
||||
mqnic_arm_eq(priv->event_ring[l]);
|
||||
}
|
||||
}
|
||||
|
@ -46,13 +46,13 @@ static int mqnic_start_port(struct net_device *ndev)
|
||||
// set up event queues
|
||||
for (k = 0; k < priv->event_queue_count; k++) {
|
||||
priv->event_ring[k]->irq = mdev->irq_map[k % mdev->irq_count];
|
||||
mqnic_activate_eq_ring(priv, priv->event_ring[k], k % mdev->irq_count);
|
||||
mqnic_activate_eq_ring(priv->event_ring[k], k % mdev->irq_count);
|
||||
mqnic_arm_eq(priv->event_ring[k]);
|
||||
}
|
||||
|
||||
// set up RX completion queues
|
||||
for (k = 0; k < priv->rx_cpl_queue_count; k++) {
|
||||
mqnic_activate_cq_ring(priv, priv->rx_cpl_ring[k], k % priv->event_queue_count);
|
||||
mqnic_activate_cq_ring(priv->rx_cpl_ring[k], k % priv->event_queue_count);
|
||||
priv->rx_cpl_ring[k]->ring_index = k;
|
||||
priv->rx_cpl_ring[k]->handler = mqnic_rx_irq;
|
||||
|
||||
@ -70,12 +70,12 @@ static int mqnic_start_port(struct net_device *ndev)
|
||||
priv->rx_ring[k]->page_order = 0;
|
||||
else
|
||||
priv->rx_ring[k]->page_order = ilog2((ndev->mtu + ETH_HLEN + PAGE_SIZE - 1) / PAGE_SIZE - 1) + 1;
|
||||
mqnic_activate_rx_ring(priv, priv->rx_ring[k], k);
|
||||
mqnic_activate_rx_ring(priv->rx_ring[k], k);
|
||||
}
|
||||
|
||||
// set up TX completion queues
|
||||
for (k = 0; k < priv->tx_cpl_queue_count; k++) {
|
||||
mqnic_activate_cq_ring(priv, priv->tx_cpl_ring[k], k % priv->event_queue_count);
|
||||
mqnic_activate_cq_ring(priv->tx_cpl_ring[k], k % priv->event_queue_count);
|
||||
priv->tx_cpl_ring[k]->ring_index = k;
|
||||
priv->tx_cpl_ring[k]->handler = mqnic_tx_irq;
|
||||
|
||||
@ -88,7 +88,7 @@ static int mqnic_start_port(struct net_device *ndev)
|
||||
|
||||
// set up TX queues
|
||||
for (k = 0; k < priv->tx_queue_count; k++) {
|
||||
mqnic_activate_tx_ring(priv, priv->tx_ring[k], k);
|
||||
mqnic_activate_tx_ring(priv->tx_ring[k], k);
|
||||
priv->tx_ring[k]->tx_queue = netdev_get_tx_queue(ndev, k);
|
||||
}
|
||||
|
||||
@ -140,11 +140,11 @@ static int mqnic_stop_port(struct net_device *ndev)
|
||||
|
||||
// deactivate TX queues
|
||||
for (k = 0; k < priv->tx_queue_count; k++)
|
||||
mqnic_deactivate_tx_ring(priv, priv->tx_ring[k]);
|
||||
mqnic_deactivate_tx_ring(priv->tx_ring[k]);
|
||||
|
||||
// deactivate TX completion queues
|
||||
for (k = 0; k < priv->tx_cpl_queue_count; k++) {
|
||||
mqnic_deactivate_cq_ring(priv, priv->tx_cpl_ring[k]);
|
||||
mqnic_deactivate_cq_ring(priv->tx_cpl_ring[k]);
|
||||
|
||||
napi_disable(&priv->tx_cpl_ring[k]->napi);
|
||||
netif_napi_del(&priv->tx_cpl_ring[k]->napi);
|
||||
@ -152,11 +152,11 @@ static int mqnic_stop_port(struct net_device *ndev)
|
||||
|
||||
// deactivate RX queues
|
||||
for (k = 0; k < priv->rx_queue_count; k++)
|
||||
mqnic_deactivate_rx_ring(priv, priv->rx_ring[k]);
|
||||
mqnic_deactivate_rx_ring(priv->rx_ring[k]);
|
||||
|
||||
// deactivate RX completion queues
|
||||
for (k = 0; k < priv->rx_cpl_queue_count; k++) {
|
||||
mqnic_deactivate_cq_ring(priv, priv->rx_cpl_ring[k]);
|
||||
mqnic_deactivate_cq_ring(priv->rx_cpl_ring[k]);
|
||||
|
||||
napi_disable(&priv->rx_cpl_ring[k]->napi);
|
||||
netif_napi_del(&priv->rx_cpl_ring[k]->napi);
|
||||
@ -164,17 +164,17 @@ static int mqnic_stop_port(struct net_device *ndev)
|
||||
|
||||
// deactivate event queues
|
||||
for (k = 0; k < priv->event_queue_count; k++)
|
||||
mqnic_deactivate_eq_ring(priv, priv->event_ring[k]);
|
||||
mqnic_deactivate_eq_ring(priv->event_ring[k]);
|
||||
|
||||
msleep(20);
|
||||
|
||||
// free descriptors in TX queues
|
||||
for (k = 0; k < priv->tx_queue_count; k++)
|
||||
mqnic_free_tx_buf(priv, priv->tx_ring[k]);
|
||||
mqnic_free_tx_buf(priv->tx_ring[k]);
|
||||
|
||||
// free descriptors in RX queues
|
||||
for (k = 0; k < priv->rx_queue_count; k++)
|
||||
mqnic_free_rx_buf(priv, priv->rx_ring[k]);
|
||||
mqnic_free_rx_buf(priv->rx_ring[k]);
|
||||
|
||||
netif_carrier_off(ndev);
|
||||
return 0;
|
||||
@ -579,27 +579,27 @@ void mqnic_destroy_netdev(struct net_device *ndev)
|
||||
// free rings
|
||||
for (k = 0; k < ARRAY_SIZE(priv->event_ring); k++)
|
||||
if (priv->event_ring[k])
|
||||
mqnic_destroy_eq_ring(priv, &priv->event_ring[k]);
|
||||
mqnic_destroy_eq_ring(&priv->event_ring[k]);
|
||||
|
||||
for (k = 0; k < ARRAY_SIZE(priv->tx_ring); k++)
|
||||
if (priv->tx_ring[k])
|
||||
mqnic_destroy_tx_ring(priv, &priv->tx_ring[k]);
|
||||
mqnic_destroy_tx_ring(&priv->tx_ring[k]);
|
||||
|
||||
for (k = 0; k < ARRAY_SIZE(priv->tx_cpl_ring); k++)
|
||||
if (priv->tx_cpl_ring[k])
|
||||
mqnic_destroy_cq_ring(priv, &priv->tx_cpl_ring[k]);
|
||||
mqnic_destroy_cq_ring(&priv->tx_cpl_ring[k]);
|
||||
|
||||
for (k = 0; k < ARRAY_SIZE(priv->rx_ring); k++)
|
||||
if (priv->rx_ring[k])
|
||||
mqnic_destroy_rx_ring(priv, &priv->rx_ring[k]);
|
||||
mqnic_destroy_rx_ring(&priv->rx_ring[k]);
|
||||
|
||||
for (k = 0; k < ARRAY_SIZE(priv->rx_cpl_ring); k++)
|
||||
if (priv->rx_cpl_ring[k])
|
||||
mqnic_destroy_cq_ring(priv, &priv->rx_cpl_ring[k]);
|
||||
mqnic_destroy_cq_ring(&priv->rx_cpl_ring[k]);
|
||||
|
||||
for (k = 0; k < ARRAY_SIZE(priv->ports); k++)
|
||||
if (priv->ports[k])
|
||||
mqnic_destroy_port(priv, &priv->ports[k]);
|
||||
mqnic_destroy_port(&priv->ports[k]);
|
||||
|
||||
free_netdev(ndev);
|
||||
}
|
||||
|
@ -49,6 +49,7 @@ int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr,
|
||||
|
||||
port->dev = dev;
|
||||
port->ndev = priv->ndev;
|
||||
port->priv = priv;
|
||||
|
||||
port->index = index;
|
||||
|
||||
@ -78,7 +79,7 @@ int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mqnic_destroy_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr)
|
||||
void mqnic_destroy_port(struct mqnic_port **port_ptr)
|
||||
{
|
||||
struct mqnic_port *port = *port_ptr;
|
||||
*port_ptr = NULL;
|
||||
|
@ -46,6 +46,10 @@ int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||
if (!ring)
|
||||
return -ENOMEM;
|
||||
|
||||
ring->dev = priv->dev;
|
||||
ring->ndev = priv->ndev;
|
||||
ring->priv = priv;
|
||||
|
||||
ring->size = roundup_pow_of_two(size);
|
||||
ring->size_mask = ring->size - 1;
|
||||
ring->stride = roundup_pow_of_two(stride);
|
||||
@ -103,24 +107,22 @@ fail_ring:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mqnic_destroy_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr)
|
||||
void mqnic_destroy_rx_ring(struct mqnic_ring **ring_ptr)
|
||||
{
|
||||
struct device *dev = priv->dev;
|
||||
struct mqnic_ring *ring = *ring_ptr;
|
||||
*ring_ptr = NULL;
|
||||
|
||||
mqnic_deactivate_rx_ring(priv, ring);
|
||||
mqnic_deactivate_rx_ring(ring);
|
||||
|
||||
mqnic_free_rx_buf(priv, ring);
|
||||
mqnic_free_rx_buf(ring);
|
||||
|
||||
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
|
||||
dma_free_coherent(ring->dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
|
||||
kvfree(ring->rx_info);
|
||||
ring->rx_info = NULL;
|
||||
kfree(ring);
|
||||
}
|
||||
|
||||
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int cpl_index)
|
||||
int mqnic_activate_rx_ring(struct mqnic_ring *ring, int cpl_index)
|
||||
{
|
||||
// deactivate queue
|
||||
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
@ -136,12 +138,12 @@ int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK,
|
||||
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
mqnic_refill_rx_buffers(priv, ring);
|
||||
mqnic_refill_rx_buffers(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||
void mqnic_deactivate_rx_ring(struct mqnic_ring *ring)
|
||||
{
|
||||
// deactivate queue
|
||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
|
||||
@ -168,27 +170,26 @@ void mqnic_rx_write_head_ptr(struct mqnic_ring *ring)
|
||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr);
|
||||
}
|
||||
|
||||
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int index)
|
||||
void mqnic_free_rx_desc(struct mqnic_ring *ring, int index)
|
||||
{
|
||||
struct mqnic_rx_info *rx_info = &ring->rx_info[index];
|
||||
struct page *page = rx_info->page;
|
||||
|
||||
dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr),
|
||||
dma_unmap_page(ring->dev, dma_unmap_addr(rx_info, dma_addr),
|
||||
dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE);
|
||||
rx_info->dma_addr = 0;
|
||||
__free_pages(page, rx_info->page_order);
|
||||
rx_info->page = NULL;
|
||||
}
|
||||
|
||||
int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||
int mqnic_free_rx_buf(struct mqnic_ring *ring)
|
||||
{
|
||||
u32 index;
|
||||
int cnt = 0;
|
||||
|
||||
while (!mqnic_is_rx_ring_empty(ring)) {
|
||||
index = ring->clean_tail_ptr & ring->size_mask;
|
||||
mqnic_free_rx_desc(priv, ring, index);
|
||||
mqnic_free_rx_desc(ring, index);
|
||||
ring->clean_tail_ptr++;
|
||||
cnt++;
|
||||
}
|
||||
@ -200,8 +201,7 @@ int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||
return cnt;
|
||||
}
|
||||
|
||||
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int index)
|
||||
int mqnic_prepare_rx_desc(struct mqnic_ring *ring, int index)
|
||||
{
|
||||
struct mqnic_rx_info *rx_info = &ring->rx_info[index];
|
||||
struct mqnic_desc *rx_desc = (struct mqnic_desc *)(ring->buf + index * ring->stride);
|
||||
@ -211,24 +211,24 @@ int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
if (unlikely(page)) {
|
||||
dev_err(priv->dev, "%s: skb not yet processed on port %d",
|
||||
__func__, priv->port);
|
||||
dev_err(ring->dev, "%s: skb not yet processed on port %d",
|
||||
__func__, ring->priv->port);
|
||||
return -1;
|
||||
}
|
||||
|
||||
page = dev_alloc_pages(page_order);
|
||||
if (unlikely(!page)) {
|
||||
dev_err(priv->dev, "%s: failed to allocate memory on port %d",
|
||||
__func__, priv->port);
|
||||
dev_err(ring->dev, "%s: failed to allocate memory on port %d",
|
||||
__func__, ring->priv->port);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// map page
|
||||
dma_addr = dma_map_page(priv->dev, page, 0, len, PCI_DMA_FROMDEVICE);
|
||||
dma_addr = dma_map_page(ring->dev, page, 0, len, PCI_DMA_FROMDEVICE);
|
||||
|
||||
if (unlikely(dma_mapping_error(priv->dev, dma_addr))) {
|
||||
dev_err(priv->dev, "%s: DMA mapping failed on port %d",
|
||||
__func__, priv->port);
|
||||
if (unlikely(dma_mapping_error(ring->dev, dma_addr))) {
|
||||
dev_err(ring->dev, "%s: DMA mapping failed on port %d",
|
||||
__func__, ring->priv->port);
|
||||
__free_pages(page, page_order);
|
||||
return -1;
|
||||
}
|
||||
@ -247,7 +247,7 @@ int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||
void mqnic_refill_rx_buffers(struct mqnic_ring *ring)
|
||||
{
|
||||
u32 missing = ring->size - (ring->head_ptr - ring->clean_tail_ptr);
|
||||
|
||||
@ -255,7 +255,7 @@ void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||
return;
|
||||
|
||||
for (; missing-- > 0;) {
|
||||
if (mqnic_prepare_rx_desc(priv, ring, ring->head_ptr & ring->size_mask))
|
||||
if (mqnic_prepare_rx_desc(ring, ring->head_ptr & ring->size_mask))
|
||||
break;
|
||||
ring->head_ptr++;
|
||||
}
|
||||
@ -265,10 +265,10 @@ void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||
mqnic_rx_write_head_ptr(ring);
|
||||
}
|
||||
|
||||
int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||
int napi_budget)
|
||||
int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
{
|
||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||
struct mqnic_priv *priv = cq_ring->priv;
|
||||
struct net_device *ndev = priv->ndev;
|
||||
struct mqnic_ring *ring = priv->rx_ring[cq_ring->ring_index];
|
||||
struct mqnic_rx_info *rx_info;
|
||||
struct mqnic_cpl *cpl;
|
||||
@ -382,14 +382,14 @@ int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||
WRITE_ONCE(ring->clean_tail_ptr, ring_clean_tail_ptr);
|
||||
|
||||
// replenish buffers
|
||||
mqnic_refill_rx_buffers(priv, ring);
|
||||
mqnic_refill_rx_buffers(ring);
|
||||
|
||||
return done;
|
||||
}
|
||||
|
||||
void mqnic_rx_irq(struct mqnic_cq_ring *cq)
|
||||
{
|
||||
struct mqnic_priv *priv = netdev_priv(cq->ndev);
|
||||
struct mqnic_priv *priv = cq->priv;
|
||||
|
||||
if (likely(priv->port_up))
|
||||
napi_schedule_irqoff(&cq->napi);
|
||||
@ -400,10 +400,9 @@ void mqnic_rx_irq(struct mqnic_cq_ring *cq)
|
||||
int mqnic_poll_rx_cq(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct mqnic_cq_ring *cq_ring = container_of(napi, struct mqnic_cq_ring, napi);
|
||||
struct net_device *ndev = cq_ring->ndev;
|
||||
int done;
|
||||
|
||||
done = mqnic_process_rx_cq(ndev, cq_ring, budget);
|
||||
done = mqnic_process_rx_cq(cq_ring, budget);
|
||||
|
||||
if (done == budget)
|
||||
return done;
|
||||
|
@ -47,6 +47,10 @@ int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||
if (!ring)
|
||||
return -ENOMEM;
|
||||
|
||||
ring->dev = priv->dev;
|
||||
ring->ndev = priv->ndev;
|
||||
ring->priv = priv;
|
||||
|
||||
ring->size = roundup_pow_of_two(size);
|
||||
ring->full_size = ring->size >> 1;
|
||||
ring->size_mask = ring->size - 1;
|
||||
@ -105,24 +109,22 @@ fail_ring:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mqnic_destroy_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr)
|
||||
void mqnic_destroy_tx_ring(struct mqnic_ring **ring_ptr)
|
||||
{
|
||||
struct device *dev = priv->dev;
|
||||
struct mqnic_ring *ring = *ring_ptr;
|
||||
*ring_ptr = NULL;
|
||||
|
||||
mqnic_deactivate_tx_ring(priv, ring);
|
||||
mqnic_deactivate_tx_ring(ring);
|
||||
|
||||
mqnic_free_tx_buf(priv, ring);
|
||||
mqnic_free_tx_buf(ring);
|
||||
|
||||
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
|
||||
dma_free_coherent(ring->dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
|
||||
kvfree(ring->tx_info);
|
||||
ring->tx_info = NULL;
|
||||
kfree(ring);
|
||||
}
|
||||
|
||||
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int cpl_index)
|
||||
int mqnic_activate_tx_ring(struct mqnic_ring *ring, int cpl_index)
|
||||
{
|
||||
// deactivate queue
|
||||
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
@ -141,7 +143,7 @@ int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||
void mqnic_deactivate_tx_ring(struct mqnic_ring *ring)
|
||||
{
|
||||
// deactivate queue
|
||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
|
||||
@ -168,8 +170,7 @@ void mqnic_tx_write_head_ptr(struct mqnic_ring *ring)
|
||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr);
|
||||
}
|
||||
|
||||
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int index, int napi_budget)
|
||||
void mqnic_free_tx_desc(struct mqnic_ring *ring, int index, int napi_budget)
|
||||
{
|
||||
struct mqnic_tx_info *tx_info = &ring->tx_info[index];
|
||||
struct sk_buff *skb = tx_info->skb;
|
||||
@ -177,27 +178,27 @@ void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
|
||||
prefetchw(&skb->users);
|
||||
|
||||
dma_unmap_single(priv->dev, dma_unmap_addr(tx_info, dma_addr),
|
||||
dma_unmap_single(ring->dev, dma_unmap_addr(tx_info, dma_addr),
|
||||
dma_unmap_len(tx_info, len), PCI_DMA_TODEVICE);
|
||||
dma_unmap_addr_set(tx_info, dma_addr, 0);
|
||||
|
||||
// unmap frags
|
||||
for (i = 0; i < tx_info->frag_count; i++)
|
||||
dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr,
|
||||
dma_unmap_page(ring->dev, tx_info->frags[i].dma_addr,
|
||||
tx_info->frags[i].len, PCI_DMA_TODEVICE);
|
||||
|
||||
napi_consume_skb(skb, napi_budget);
|
||||
tx_info->skb = NULL;
|
||||
}
|
||||
|
||||
int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||
int mqnic_free_tx_buf(struct mqnic_ring *ring)
|
||||
{
|
||||
u32 index;
|
||||
int cnt = 0;
|
||||
|
||||
while (!mqnic_is_tx_ring_empty(ring)) {
|
||||
index = ring->clean_tail_ptr & ring->size_mask;
|
||||
mqnic_free_tx_desc(priv, ring, index, 0);
|
||||
mqnic_free_tx_desc(ring, index, 0);
|
||||
ring->clean_tail_ptr++;
|
||||
cnt++;
|
||||
}
|
||||
@ -209,10 +210,9 @@ int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||
return cnt;
|
||||
}
|
||||
|
||||
int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||
int napi_budget)
|
||||
int mqnic_process_tx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
{
|
||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||
struct mqnic_priv *priv = cq_ring->priv;
|
||||
struct mqnic_ring *ring = priv->tx_ring[cq_ring->ring_index];
|
||||
struct mqnic_tx_info *tx_info;
|
||||
struct mqnic_cpl *cpl;
|
||||
@ -251,7 +251,7 @@ int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||
skb_tstamp_tx(tx_info->skb, &hwts);
|
||||
}
|
||||
// free TX descriptor
|
||||
mqnic_free_tx_desc(priv, ring, ring_index, napi_budget);
|
||||
mqnic_free_tx_desc(ring, ring_index, napi_budget);
|
||||
|
||||
packets++;
|
||||
bytes += le16_to_cpu(cpl->len);
|
||||
@ -298,7 +298,7 @@ int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||
|
||||
void mqnic_tx_irq(struct mqnic_cq_ring *cq)
|
||||
{
|
||||
struct mqnic_priv *priv = netdev_priv(cq->ndev);
|
||||
struct mqnic_priv *priv = cq->priv;
|
||||
|
||||
if (likely(priv->port_up))
|
||||
napi_schedule_irqoff(&cq->napi);
|
||||
@ -309,10 +309,9 @@ void mqnic_tx_irq(struct mqnic_cq_ring *cq)
|
||||
int mqnic_poll_tx_cq(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct mqnic_cq_ring *cq_ring = container_of(napi, struct mqnic_cq_ring, napi);
|
||||
struct net_device *ndev = cq_ring->ndev;
|
||||
int done;
|
||||
|
||||
done = mqnic_process_tx_cq(ndev, cq_ring, budget);
|
||||
done = mqnic_process_tx_cq(cq_ring, budget);
|
||||
|
||||
if (done == budget)
|
||||
return done;
|
||||
@ -324,9 +323,8 @@ int mqnic_poll_tx_cq(struct napi_struct *napi, int budget)
|
||||
return done;
|
||||
}
|
||||
|
||||
static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
struct mqnic_tx_info *tx_info, struct mqnic_desc *tx_desc,
|
||||
struct sk_buff *skb)
|
||||
static bool mqnic_map_skb(struct mqnic_ring *ring, struct mqnic_tx_info *tx_info,
|
||||
struct mqnic_desc *tx_desc, struct sk_buff *skb)
|
||||
{
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
const skb_frag_t *frag;
|
||||
@ -341,8 +339,8 @@ static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
for (i = 0; i < shinfo->nr_frags; i++) {
|
||||
frag = &shinfo->frags[i];
|
||||
len = skb_frag_size(frag);
|
||||
dma_addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
|
||||
dma_addr = skb_frag_dma_map(ring->dev, frag, 0, len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(ring->dev, dma_addr)))
|
||||
// mapping failed
|
||||
goto map_error;
|
||||
|
||||
@ -363,9 +361,9 @@ static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
|
||||
// map skb
|
||||
len = skb_headlen(skb);
|
||||
dma_addr = dma_map_single(priv->dev, skb->data, len, PCI_DMA_TODEVICE);
|
||||
dma_addr = dma_map_single(ring->dev, skb->data, len, PCI_DMA_TODEVICE);
|
||||
|
||||
if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
|
||||
if (unlikely(dma_mapping_error(ring->dev, dma_addr)))
|
||||
// mapping failed
|
||||
goto map_error;
|
||||
|
||||
@ -380,11 +378,11 @@ static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
return true;
|
||||
|
||||
map_error:
|
||||
dev_err(priv->dev, "%s: DMA mapping failed", __func__);
|
||||
dev_err(ring->dev, "%s: DMA mapping failed", __func__);
|
||||
|
||||
// unmap frags
|
||||
for (i = 0; i < tx_info->frag_count; i++)
|
||||
dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr,
|
||||
dma_unmap_page(ring->dev, tx_info->frags[i].dma_addr,
|
||||
tx_info->frags[i].len, PCI_DMA_TODEVICE);
|
||||
|
||||
// update tx_info
|
||||
@ -465,7 +463,7 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
}
|
||||
|
||||
// map skb
|
||||
if (!mqnic_map_skb(priv, ring, tx_info, tx_desc, skb))
|
||||
if (!mqnic_map_skb(ring, tx_info, tx_desc, skb))
|
||||
// map failed
|
||||
goto tx_drop_count;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user