2021-10-21 14:55:48 -07:00
|
|
|
// SPDX-License-Identifier: BSD-2-Clause-Views
|
2019-07-17 18:13:51 -07:00
|
|
|
/*
|
2021-10-21 14:55:48 -07:00
|
|
|
* Copyright 2019-2021, The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* 2. Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials provided
|
|
|
|
* with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* The views and conclusions contained in the software and documentation
|
|
|
|
* are those of the authors and should not be interpreted as representing
|
|
|
|
* official policies, either expressed or implied, of The Regents of the
|
|
|
|
* University of California.
|
|
|
|
*/
|
2019-07-17 18:13:51 -07:00
|
|
|
|
|
|
|
#include "mqnic.h"
|
|
|
|
|
2021-10-08 18:31:53 -07:00
|
|
|
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr,
|
2021-10-21 13:54:00 -07:00
|
|
|
int size, int stride, int index, u8 __iomem *hw_addr)
|
2019-07-17 18:13:51 -07:00
|
|
|
{
|
2021-10-08 18:31:53 -07:00
|
|
|
struct device *dev = priv->dev;
|
|
|
|
struct mqnic_eq_ring *ring;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
2021-10-21 14:01:29 -07:00
|
|
|
if (!ring)
|
2021-10-08 18:31:53 -07:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ring->ndev = priv->ndev;
|
2021-12-10 20:59:44 -08:00
|
|
|
ring->priv = priv;
|
2021-10-08 18:31:53 -07:00
|
|
|
|
2021-12-10 21:03:46 -08:00
|
|
|
ring->ring_index = index;
|
2021-12-10 21:04:52 -08:00
|
|
|
ring->active = 0;
|
2021-12-10 21:03:46 -08:00
|
|
|
|
2021-10-08 18:31:53 -07:00
|
|
|
ring->size = roundup_pow_of_two(size);
|
|
|
|
ring->size_mask = ring->size - 1;
|
|
|
|
ring->stride = roundup_pow_of_two(stride);
|
|
|
|
|
|
|
|
ring->buf_size = ring->size * ring->stride;
|
|
|
|
ring->buf = dma_alloc_coherent(dev, ring->buf_size,
|
2021-10-21 13:54:00 -07:00
|
|
|
&ring->buf_dma_addr, GFP_KERNEL);
|
2021-10-08 18:31:53 -07:00
|
|
|
if (!ring->buf) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto fail_ring;
|
|
|
|
}
|
|
|
|
|
|
|
|
ring->hw_addr = hw_addr;
|
|
|
|
ring->hw_ptr_mask = 0xffff;
|
|
|
|
ring->hw_head_ptr = hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG;
|
|
|
|
ring->hw_tail_ptr = hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG;
|
|
|
|
|
|
|
|
ring->head_ptr = 0;
|
|
|
|
ring->tail_ptr = 0;
|
|
|
|
|
|
|
|
// deactivate queue
|
|
|
|
iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
|
|
|
// set base address
|
|
|
|
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 0);
|
|
|
|
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 4);
|
|
|
|
// set interrupt index
|
|
|
|
iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
|
|
|
// set pointers
|
|
|
|
iowrite32(ring->head_ptr & ring->hw_ptr_mask,
|
2021-10-21 13:54:00 -07:00
|
|
|
ring->hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
|
2021-10-08 18:31:53 -07:00
|
|
|
iowrite32(ring->tail_ptr & ring->hw_ptr_mask,
|
2021-10-21 13:54:00 -07:00
|
|
|
ring->hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
|
2021-10-08 18:31:53 -07:00
|
|
|
// set size
|
|
|
|
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
|
|
|
|
|
|
|
*ring_ptr = ring;
|
|
|
|
return 0;
|
2019-07-17 18:13:51 -07:00
|
|
|
|
|
|
|
fail_ring:
|
2021-10-08 18:31:53 -07:00
|
|
|
kfree(ring);
|
|
|
|
*ring_ptr = NULL;
|
|
|
|
return ret;
|
2019-07-17 18:13:51 -07:00
|
|
|
}
|
|
|
|
|
2021-12-10 20:59:44 -08:00
|
|
|
void mqnic_destroy_eq_ring(struct mqnic_eq_ring **ring_ptr)
|
2019-07-17 18:13:51 -07:00
|
|
|
{
|
2021-10-08 18:31:53 -07:00
|
|
|
struct mqnic_eq_ring *ring = *ring_ptr;
|
2021-12-10 20:59:44 -08:00
|
|
|
struct device *dev = ring->priv->dev;
|
2021-10-08 18:31:53 -07:00
|
|
|
*ring_ptr = NULL;
|
2019-07-17 18:13:51 -07:00
|
|
|
|
2021-12-10 21:04:52 -08:00
|
|
|
if (ring->active)
|
|
|
|
mqnic_deactivate_eq_ring(ring);
|
2019-07-17 18:13:51 -07:00
|
|
|
|
2021-10-08 18:31:53 -07:00
|
|
|
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
|
|
|
|
kfree(ring);
|
2019-07-17 18:13:51 -07:00
|
|
|
}
|
|
|
|
|
2021-12-10 20:59:44 -08:00
|
|
|
int mqnic_activate_eq_ring(struct mqnic_eq_ring *ring, int int_index)
|
2019-07-17 18:13:51 -07:00
|
|
|
{
|
2021-12-10 21:04:52 -08:00
|
|
|
if (ring->active)
|
|
|
|
mqnic_deactivate_eq_ring(ring);
|
|
|
|
|
2021-10-08 18:31:53 -07:00
|
|
|
ring->int_index = int_index;
|
|
|
|
|
|
|
|
// deactivate queue
|
|
|
|
iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
|
|
|
// set base address
|
|
|
|
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 0);
|
|
|
|
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 4);
|
|
|
|
// set interrupt index
|
|
|
|
iowrite32(int_index, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
|
|
|
// set pointers
|
|
|
|
iowrite32(ring->head_ptr & ring->hw_ptr_mask,
|
2021-10-21 13:54:00 -07:00
|
|
|
ring->hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
|
2021-10-08 18:31:53 -07:00
|
|
|
iowrite32(ring->tail_ptr & ring->hw_ptr_mask,
|
2021-10-21 13:54:00 -07:00
|
|
|
ring->hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
|
2021-10-08 18:31:53 -07:00
|
|
|
// set size and activate queue
|
|
|
|
iowrite32(ilog2(ring->size) | MQNIC_EVENT_QUEUE_ACTIVE_MASK,
|
2021-10-21 13:54:00 -07:00
|
|
|
ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
2021-10-08 18:31:53 -07:00
|
|
|
|
2021-12-10 21:04:52 -08:00
|
|
|
ring->active = 1;
|
|
|
|
|
2021-10-08 18:31:53 -07:00
|
|
|
return 0;
|
2019-07-17 18:13:51 -07:00
|
|
|
}
|
|
|
|
|
2021-12-10 20:59:44 -08:00
|
|
|
void mqnic_deactivate_eq_ring(struct mqnic_eq_ring *ring)
|
2019-07-17 18:13:51 -07:00
|
|
|
{
|
2021-10-08 18:31:53 -07:00
|
|
|
// deactivate queue
|
|
|
|
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
|
|
|
// disarm queue
|
|
|
|
iowrite32(ring->int_index, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
2021-12-10 21:04:52 -08:00
|
|
|
|
|
|
|
ring->active = 0;
|
2019-07-17 18:13:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
bool mqnic_is_eq_ring_empty(const struct mqnic_eq_ring *ring)
|
|
|
|
{
|
2021-10-08 18:31:53 -07:00
|
|
|
return ring->head_ptr == ring->tail_ptr;
|
2019-07-17 18:13:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
bool mqnic_is_eq_ring_full(const struct mqnic_eq_ring *ring)
|
|
|
|
{
|
2021-10-08 18:31:53 -07:00
|
|
|
return ring->head_ptr - ring->tail_ptr >= ring->size;
|
2019-07-17 18:13:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void mqnic_eq_read_head_ptr(struct mqnic_eq_ring *ring)
|
|
|
|
{
|
2021-10-08 18:31:53 -07:00
|
|
|
ring->head_ptr += (ioread32(ring->hw_head_ptr) - ring->head_ptr) & ring->hw_ptr_mask;
|
2019-07-17 18:13:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void mqnic_eq_write_tail_ptr(struct mqnic_eq_ring *ring)
|
|
|
|
{
|
2021-10-08 18:31:53 -07:00
|
|
|
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_tail_ptr);
|
2019-07-17 18:13:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void mqnic_arm_eq(struct mqnic_eq_ring *ring)
|
|
|
|
{
|
2021-10-08 18:31:53 -07:00
|
|
|
iowrite32(ring->int_index | MQNIC_EVENT_QUEUE_ARM_MASK,
|
2021-10-21 13:54:00 -07:00
|
|
|
ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
2019-07-17 18:13:51 -07:00
|
|
|
}
|
|
|
|
|
2021-12-10 20:59:44 -08:00
|
|
|
void mqnic_process_eq(struct mqnic_eq_ring *eq_ring)
|
2019-07-17 18:13:51 -07:00
|
|
|
{
|
2021-12-10 20:59:44 -08:00
|
|
|
struct mqnic_priv *priv = eq_ring->priv;
|
2021-10-08 18:31:53 -07:00
|
|
|
struct mqnic_event *event;
|
2021-10-21 13:54:00 -07:00
|
|
|
struct mqnic_cq_ring *cq_ring;
|
2021-10-08 18:31:53 -07:00
|
|
|
u32 eq_index;
|
|
|
|
u32 eq_tail_ptr;
|
|
|
|
int done = 0;
|
|
|
|
|
|
|
|
if (unlikely(!priv->port_up))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// read head pointer from NIC
|
|
|
|
mqnic_eq_read_head_ptr(eq_ring);
|
|
|
|
|
|
|
|
eq_tail_ptr = eq_ring->tail_ptr;
|
|
|
|
eq_index = eq_tail_ptr & eq_ring->size_mask;
|
|
|
|
|
|
|
|
while (eq_ring->head_ptr != eq_tail_ptr) {
|
|
|
|
event = (struct mqnic_event *)(eq_ring->buf + eq_index * eq_ring->stride);
|
|
|
|
|
|
|
|
if (event->type == MQNIC_EVENT_TYPE_TX_CPL) {
|
|
|
|
// transmit completion event
|
|
|
|
if (unlikely(le16_to_cpu(event->source) > priv->tx_cpl_queue_count)) {
|
2021-10-21 14:44:05 -07:00
|
|
|
dev_err(priv->dev, "%s on port %d: unknown event source %d (index %d, type %d)",
|
2021-12-10 21:01:51 -08:00
|
|
|
__func__, priv->index, le16_to_cpu(event->source), eq_index,
|
2021-10-21 13:54:00 -07:00
|
|
|
le16_to_cpu(event->type));
|
2021-10-08 18:31:53 -07:00
|
|
|
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
|
2021-10-21 13:54:00 -07:00
|
|
|
event, MQNIC_EVENT_SIZE, true);
|
2021-10-08 18:31:53 -07:00
|
|
|
} else {
|
2021-10-21 13:54:00 -07:00
|
|
|
cq_ring = priv->tx_cpl_ring[le16_to_cpu(event->source)];
|
2021-10-08 18:31:53 -07:00
|
|
|
if (likely(cq_ring && cq_ring->handler))
|
|
|
|
cq_ring->handler(cq_ring);
|
|
|
|
}
|
|
|
|
} else if (le16_to_cpu(event->type) == MQNIC_EVENT_TYPE_RX_CPL) {
|
|
|
|
// receive completion event
|
|
|
|
if (unlikely(le16_to_cpu(event->source) > priv->rx_cpl_queue_count)) {
|
2021-10-21 14:44:05 -07:00
|
|
|
dev_err(priv->dev, "%s on port %d: unknown event source %d (index %d, type %d)",
|
2021-12-10 21:01:51 -08:00
|
|
|
__func__, priv->index, le16_to_cpu(event->source), eq_index,
|
2021-10-21 13:54:00 -07:00
|
|
|
le16_to_cpu(event->type));
|
2021-10-08 18:31:53 -07:00
|
|
|
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
|
2021-10-21 13:54:00 -07:00
|
|
|
event, MQNIC_EVENT_SIZE, true);
|
2021-10-08 18:31:53 -07:00
|
|
|
} else {
|
2021-10-21 13:54:00 -07:00
|
|
|
cq_ring = priv->rx_cpl_ring[le16_to_cpu(event->source)];
|
2021-10-08 18:31:53 -07:00
|
|
|
if (likely(cq_ring && cq_ring->handler))
|
|
|
|
cq_ring->handler(cq_ring);
|
|
|
|
}
|
|
|
|
} else {
|
2021-10-21 14:44:05 -07:00
|
|
|
dev_err(priv->dev, "%s on port %d: unknown event type %d (index %d, source %d)",
|
2021-12-10 21:01:51 -08:00
|
|
|
__func__, priv->index, le16_to_cpu(event->type), eq_index,
|
2021-10-21 13:54:00 -07:00
|
|
|
le16_to_cpu(event->source));
|
2021-10-08 18:31:53 -07:00
|
|
|
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
|
2021-10-21 13:54:00 -07:00
|
|
|
event, MQNIC_EVENT_SIZE, true);
|
2021-10-08 18:31:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
done++;
|
|
|
|
|
|
|
|
eq_tail_ptr++;
|
|
|
|
eq_index = eq_tail_ptr & eq_ring->size_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
// update eq tail
|
|
|
|
eq_ring->tail_ptr = eq_tail_ptr;
|
|
|
|
mqnic_eq_write_tail_ptr(eq_ring);
|
2019-07-17 18:13:51 -07:00
|
|
|
}
|