1
0
mirror of https://github.com/corundum/corundum.git synced 2025-01-16 08:12:53 +08:00

Add PCIe interface shim for Xilinx UltraScale

This commit is contained in:
Alex Forencich 2021-08-04 01:03:31 -07:00
parent b95f030408
commit 836d14bad6
8 changed files with 3049 additions and 0 deletions

520
rtl/pcie_us_if.v Normal file
View File

@ -0,0 +1,520 @@
/*
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`timescale 1ns / 1ps
/*
* Xilinx UltraScale PCIe interface adapter
*/
module pcie_us_if #
(
// Width of PCIe AXI stream interfaces in bits
parameter AXIS_PCIE_DATA_WIDTH = 256,
// PCIe AXI stream tkeep signal width (words per cycle)
parameter AXIS_PCIE_KEEP_WIDTH = (AXIS_PCIE_DATA_WIDTH/32),
// PCIe AXI stream RC tuser signal width
parameter AXIS_PCIE_RC_USER_WIDTH = AXIS_PCIE_DATA_WIDTH < 512 ? 75 : 161,
// PCIe AXI stream RQ tuser signal width
parameter AXIS_PCIE_RQ_USER_WIDTH = AXIS_PCIE_DATA_WIDTH < 512 ? 60 : 137,
// PCIe AXI stream CQ tuser signal width
parameter AXIS_PCIE_CQ_USER_WIDTH = AXIS_PCIE_DATA_WIDTH < 512 ? 85 : 183,
// PCIe AXI stream CC tuser signal width
parameter AXIS_PCIE_CC_USER_WIDTH = AXIS_PCIE_DATA_WIDTH < 512 ? 33 : 81,
// RQ sequence number width
parameter RQ_SEQ_NUM_WIDTH = AXIS_PCIE_RQ_USER_WIDTH == 60 ? 4 : 6,
// TLP segment count
parameter TLP_SEG_COUNT = 1,
// TLP segment data width
parameter TLP_SEG_DATA_WIDTH = AXIS_PCIE_DATA_WIDTH/TLP_SEG_COUNT,
// TLP segment strobe width
parameter TLP_SEG_STRB_WIDTH = TLP_SEG_DATA_WIDTH/32,
// TLP segment header width
parameter TLP_SEG_HDR_WIDTH = 128,
// TX sequence number count
parameter TX_SEQ_NUM_COUNT = AXIS_PCIE_DATA_WIDTH < 512 ? 1 : 2,
// TX sequence number width
parameter TX_SEQ_NUM_WIDTH = RQ_SEQ_NUM_WIDTH-1,
// Number of PFs
parameter PF_COUNT = 1,
// Number of VFs
parameter VF_COUNT = 0,
// Total number of functions
parameter F_COUNT = PF_COUNT+VF_COUNT,
// Read extended tag enable bit
parameter READ_EXT_TAG_ENABLE = 1,
// Read max read request size field
parameter READ_MAX_READ_REQ_SIZE = 1,
// Read max payload size field
parameter READ_MAX_PAYLOAD_SIZE = 1,
// enable MSI support
parameter MSI_ENABLE = 1,
// MSI vector count
parameter MSI_COUNT = 32
)
(
input wire clk,
input wire rst,
/*
* AXI input (RC)
*/
input wire [AXIS_PCIE_DATA_WIDTH-1:0] s_axis_rc_tdata,
input wire [AXIS_PCIE_KEEP_WIDTH-1:0] s_axis_rc_tkeep,
input wire s_axis_rc_tvalid,
output wire s_axis_rc_tready,
input wire s_axis_rc_tlast,
input wire [AXIS_PCIE_RC_USER_WIDTH-1:0] s_axis_rc_tuser,
/*
* AXI output (RQ)
*/
output wire [AXIS_PCIE_DATA_WIDTH-1:0] m_axis_rq_tdata,
output wire [AXIS_PCIE_KEEP_WIDTH-1:0] m_axis_rq_tkeep,
output wire m_axis_rq_tvalid,
input wire m_axis_rq_tready,
output wire m_axis_rq_tlast,
output wire [AXIS_PCIE_RQ_USER_WIDTH-1:0] m_axis_rq_tuser,
/*
* AXI input (CQ)
*/
input wire [AXIS_PCIE_DATA_WIDTH-1:0] s_axis_cq_tdata,
input wire [AXIS_PCIE_KEEP_WIDTH-1:0] s_axis_cq_tkeep,
input wire s_axis_cq_tvalid,
output wire s_axis_cq_tready,
input wire s_axis_cq_tlast,
input wire [AXIS_PCIE_CQ_USER_WIDTH-1:0] s_axis_cq_tuser,
/*
* AXI output (CC)
*/
output wire [AXIS_PCIE_DATA_WIDTH-1:0] m_axis_cc_tdata,
output wire [AXIS_PCIE_KEEP_WIDTH-1:0] m_axis_cc_tkeep,
output wire m_axis_cc_tvalid,
input wire m_axis_cc_tready,
output wire m_axis_cc_tlast,
output wire [AXIS_PCIE_CC_USER_WIDTH-1:0] m_axis_cc_tuser,
/*
* Transmit sequence number input
*/
input wire [RQ_SEQ_NUM_WIDTH-1:0] s_axis_rq_seq_num_0,
input wire s_axis_rq_seq_num_valid_0,
input wire [RQ_SEQ_NUM_WIDTH-1:0] s_axis_rq_seq_num_1,
input wire s_axis_rq_seq_num_valid_1,
/*
* Configuration interface
*/
output wire [9:0] cfg_mgmt_addr,
output wire [7:0] cfg_mgmt_function_number,
output wire cfg_mgmt_write,
output wire [31:0] cfg_mgmt_write_data,
output wire [3:0] cfg_mgmt_byte_enable,
output wire cfg_mgmt_read,
input wire [31:0] cfg_mgmt_read_data,
input wire cfg_mgmt_read_write_done,
/*
* Interrupt interface
*/
input wire [3:0] cfg_interrupt_msi_enable,
input wire [7:0] cfg_interrupt_msi_vf_enable,
input wire [11:0] cfg_interrupt_msi_mmenable,
input wire cfg_interrupt_msi_mask_update,
input wire [31:0] cfg_interrupt_msi_data,
output wire [3:0] cfg_interrupt_msi_select,
output wire [31:0] cfg_interrupt_msi_int,
output wire [31:0] cfg_interrupt_msi_pending_status,
output wire cfg_interrupt_msi_pending_status_data_enable,
output wire [3:0] cfg_interrupt_msi_pending_status_function_num,
input wire cfg_interrupt_msi_sent,
input wire cfg_interrupt_msi_fail,
output wire [2:0] cfg_interrupt_msi_attr,
output wire cfg_interrupt_msi_tph_present,
output wire [1:0] cfg_interrupt_msi_tph_type,
output wire [8:0] cfg_interrupt_msi_tph_st_tag,
output wire [3:0] cfg_interrupt_msi_function_number,
/*
* TLP output (request to BAR)
*/
output wire [TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH-1:0] rx_req_tlp_data,
output wire [TLP_SEG_COUNT*TLP_SEG_HDR_WIDTH-1:0] rx_req_tlp_hdr,
output wire [TLP_SEG_COUNT*3-1:0] rx_req_tlp_bar_id,
output wire [TLP_SEG_COUNT*8-1:0] rx_req_tlp_func_num,
output wire [TLP_SEG_COUNT-1:0] rx_req_tlp_valid,
output wire [TLP_SEG_COUNT-1:0] rx_req_tlp_sop,
output wire [TLP_SEG_COUNT-1:0] rx_req_tlp_eop,
input wire rx_req_tlp_ready,
/*
* TLP output (completion to DMA)
*/
output wire [TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH-1:0] rx_cpl_tlp_data,
output wire [TLP_SEG_COUNT*TLP_SEG_HDR_WIDTH-1:0] rx_cpl_tlp_hdr,
output wire [TLP_SEG_COUNT*4-1:0] rx_cpl_tlp_error,
output wire [TLP_SEG_COUNT-1:0] rx_cpl_tlp_valid,
output wire [TLP_SEG_COUNT-1:0] rx_cpl_tlp_sop,
output wire [TLP_SEG_COUNT-1:0] rx_cpl_tlp_eop,
input wire rx_cpl_tlp_ready,
/*
* TLP input (read request from DMA)
*/
input wire [TLP_SEG_COUNT*TLP_SEG_HDR_WIDTH-1:0] tx_rd_req_tlp_hdr,
input wire [TLP_SEG_COUNT*TX_SEQ_NUM_WIDTH-1:0] tx_rd_req_tlp_seq,
input wire [TLP_SEG_COUNT-1:0] tx_rd_req_tlp_valid,
input wire [TLP_SEG_COUNT-1:0] tx_rd_req_tlp_sop,
input wire [TLP_SEG_COUNT-1:0] tx_rd_req_tlp_eop,
output wire tx_rd_req_tlp_ready,
/*
* Transmit sequence number output (DMA read request)
*/
output wire [TX_SEQ_NUM_COUNT*TX_SEQ_NUM_WIDTH-1:0] m_axis_rd_req_tx_seq_num,
output wire [TX_SEQ_NUM_COUNT-1:0] m_axis_rd_req_tx_seq_num_valid,
/*
* TLP input (write request from DMA)
*/
input wire [TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH-1:0] tx_wr_req_tlp_data,
input wire [TLP_SEG_COUNT*TLP_SEG_STRB_WIDTH-1:0] tx_wr_req_tlp_strb,
input wire [TLP_SEG_COUNT*TLP_SEG_HDR_WIDTH-1:0] tx_wr_req_tlp_hdr,
input wire [TLP_SEG_COUNT*TX_SEQ_NUM_WIDTH-1:0] tx_wr_req_tlp_seq,
input wire [TLP_SEG_COUNT-1:0] tx_wr_req_tlp_valid,
input wire [TLP_SEG_COUNT-1:0] tx_wr_req_tlp_sop,
input wire [TLP_SEG_COUNT-1:0] tx_wr_req_tlp_eop,
output wire tx_wr_req_tlp_ready,
/*
* Transmit sequence number output (DMA write request)
*/
output wire [TX_SEQ_NUM_COUNT*TX_SEQ_NUM_WIDTH-1:0] m_axis_wr_req_tx_seq_num,
output wire [TX_SEQ_NUM_COUNT-1:0] m_axis_wr_req_tx_seq_num_valid,
/*
* TLP input (completion from BAR)
*/
input wire [TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH-1:0] tx_cpl_tlp_data,
input wire [TLP_SEG_COUNT*TLP_SEG_STRB_WIDTH-1:0] tx_cpl_tlp_strb,
input wire [TLP_SEG_COUNT*TLP_SEG_HDR_WIDTH-1:0] tx_cpl_tlp_hdr,
input wire [TLP_SEG_COUNT-1:0] tx_cpl_tlp_valid,
input wire [TLP_SEG_COUNT-1:0] tx_cpl_tlp_sop,
input wire [TLP_SEG_COUNT-1:0] tx_cpl_tlp_eop,
output wire tx_cpl_tlp_ready,
/*
* Configuration outputs
*/
output wire [F_COUNT-1:0] ext_tag_enable,
output wire [F_COUNT*3-1:0] max_read_request_size,
output wire [F_COUNT*3-1:0] max_payload_size,
/*
* MSI request inputs
*/
input wire [MSI_COUNT-1:0] msi_irq
);
pcie_us_if_rc #(
.AXIS_PCIE_DATA_WIDTH(AXIS_PCIE_DATA_WIDTH),
.AXIS_PCIE_KEEP_WIDTH(AXIS_PCIE_KEEP_WIDTH),
.AXIS_PCIE_RC_USER_WIDTH(AXIS_PCIE_RC_USER_WIDTH),
.TLP_SEG_COUNT(TLP_SEG_COUNT),
.TLP_SEG_DATA_WIDTH(TLP_SEG_DATA_WIDTH),
.TLP_SEG_STRB_WIDTH(TLP_SEG_STRB_WIDTH),
.TLP_SEG_HDR_WIDTH(TLP_SEG_HDR_WIDTH)
)
pcie_us_if_rc_inst
(
.clk(clk),
.rst(rst),
/*
* AXI input (RC)
*/
.s_axis_rc_tdata(s_axis_rc_tdata),
.s_axis_rc_tkeep(s_axis_rc_tkeep),
.s_axis_rc_tvalid(s_axis_rc_tvalid),
.s_axis_rc_tready(s_axis_rc_tready),
.s_axis_rc_tlast(s_axis_rc_tlast),
.s_axis_rc_tuser(s_axis_rc_tuser),
/*
* TLP output (completion to DMA)
*/
.rx_cpl_tlp_data(rx_cpl_tlp_data),
.rx_cpl_tlp_hdr(rx_cpl_tlp_hdr),
.rx_cpl_tlp_error(rx_cpl_tlp_error),
.rx_cpl_tlp_valid(rx_cpl_tlp_valid),
.rx_cpl_tlp_sop(rx_cpl_tlp_sop),
.rx_cpl_tlp_eop(rx_cpl_tlp_eop),
.rx_cpl_tlp_ready(rx_cpl_tlp_ready)
);
pcie_us_if_rq #(
.AXIS_PCIE_DATA_WIDTH(AXIS_PCIE_DATA_WIDTH),
.AXIS_PCIE_KEEP_WIDTH(AXIS_PCIE_KEEP_WIDTH),
.AXIS_PCIE_RQ_USER_WIDTH(AXIS_PCIE_RQ_USER_WIDTH),
.RQ_SEQ_NUM_WIDTH(RQ_SEQ_NUM_WIDTH),
.TLP_SEG_COUNT(TLP_SEG_COUNT),
.TLP_SEG_DATA_WIDTH(TLP_SEG_DATA_WIDTH),
.TLP_SEG_STRB_WIDTH(TLP_SEG_STRB_WIDTH),
.TLP_SEG_HDR_WIDTH(TLP_SEG_HDR_WIDTH),
.TX_SEQ_NUM_COUNT(TX_SEQ_NUM_COUNT),
.TX_SEQ_NUM_WIDTH(TX_SEQ_NUM_WIDTH)
)
pcie_us_if_rq_inst
(
.clk(clk),
.rst(rst),
/*
* AXI output (RQ)
*/
.m_axis_rq_tdata(m_axis_rq_tdata),
.m_axis_rq_tkeep(m_axis_rq_tkeep),
.m_axis_rq_tvalid(m_axis_rq_tvalid),
.m_axis_rq_tready(m_axis_rq_tready),
.m_axis_rq_tlast(m_axis_rq_tlast),
.m_axis_rq_tuser(m_axis_rq_tuser),
/*
* Transmit sequence number input
*/
.s_axis_rq_seq_num_0(s_axis_rq_seq_num_0),
.s_axis_rq_seq_num_valid_0(s_axis_rq_seq_num_valid_0),
.s_axis_rq_seq_num_1(s_axis_rq_seq_num_1),
.s_axis_rq_seq_num_valid_1(s_axis_rq_seq_num_valid_1),
/*
* TLP input (read request from DMA)
*/
.tx_rd_req_tlp_hdr(tx_rd_req_tlp_hdr),
.tx_rd_req_tlp_seq(tx_rd_req_tlp_seq),
.tx_rd_req_tlp_valid(tx_rd_req_tlp_valid),
.tx_rd_req_tlp_sop(tx_rd_req_tlp_sop),
.tx_rd_req_tlp_eop(tx_rd_req_tlp_eop),
.tx_rd_req_tlp_ready(tx_rd_req_tlp_ready),
/*
* Transmit sequence number output (DMA read request)
*/
.m_axis_rd_req_tx_seq_num(m_axis_rd_req_tx_seq_num),
.m_axis_rd_req_tx_seq_num_valid(m_axis_rd_req_tx_seq_num_valid),
/*
* TLP input (write request from DMA)
*/
.tx_wr_req_tlp_data(tx_wr_req_tlp_data),
.tx_wr_req_tlp_strb(tx_wr_req_tlp_strb),
.tx_wr_req_tlp_hdr(tx_wr_req_tlp_hdr),
.tx_wr_req_tlp_seq(tx_wr_req_tlp_seq),
.tx_wr_req_tlp_valid(tx_wr_req_tlp_valid),
.tx_wr_req_tlp_sop(tx_wr_req_tlp_sop),
.tx_wr_req_tlp_eop(tx_wr_req_tlp_eop),
.tx_wr_req_tlp_ready(tx_wr_req_tlp_ready),
/*
* Transmit sequence number output (DMA write request)
*/
.m_axis_wr_req_tx_seq_num(m_axis_wr_req_tx_seq_num),
.m_axis_wr_req_tx_seq_num_valid(m_axis_wr_req_tx_seq_num_valid)
);
pcie_us_if_cq #(
.AXIS_PCIE_DATA_WIDTH(AXIS_PCIE_DATA_WIDTH),
.AXIS_PCIE_KEEP_WIDTH(AXIS_PCIE_KEEP_WIDTH),
.AXIS_PCIE_CQ_USER_WIDTH(AXIS_PCIE_CQ_USER_WIDTH),
.TLP_SEG_COUNT(TLP_SEG_COUNT),
.TLP_SEG_DATA_WIDTH(TLP_SEG_DATA_WIDTH),
.TLP_SEG_STRB_WIDTH(TLP_SEG_STRB_WIDTH),
.TLP_SEG_HDR_WIDTH(TLP_SEG_HDR_WIDTH)
)
pcie_us_if_cq_inst
(
.clk(clk),
.rst(rst),
/*
* AXI input (CQ)
*/
.s_axis_cq_tdata(s_axis_cq_tdata),
.s_axis_cq_tkeep(s_axis_cq_tkeep),
.s_axis_cq_tvalid(s_axis_cq_tvalid),
.s_axis_cq_tready(s_axis_cq_tready),
.s_axis_cq_tlast(s_axis_cq_tlast),
.s_axis_cq_tuser(s_axis_cq_tuser),
/*
* TLP output (request to BAR)
*/
.rx_req_tlp_data(rx_req_tlp_data),
.rx_req_tlp_hdr(rx_req_tlp_hdr),
.rx_req_tlp_bar_id(rx_req_tlp_bar_id),
.rx_req_tlp_func_num(rx_req_tlp_func_num),
.rx_req_tlp_valid(rx_req_tlp_valid),
.rx_req_tlp_sop(rx_req_tlp_sop),
.rx_req_tlp_eop(rx_req_tlp_eop),
.rx_req_tlp_ready(rx_req_tlp_ready)
);
pcie_us_if_cc #(
.AXIS_PCIE_DATA_WIDTH(AXIS_PCIE_DATA_WIDTH),
.AXIS_PCIE_KEEP_WIDTH(AXIS_PCIE_KEEP_WIDTH),
.AXIS_PCIE_CC_USER_WIDTH(AXIS_PCIE_CC_USER_WIDTH),
.TLP_SEG_COUNT(TLP_SEG_COUNT),
.TLP_SEG_DATA_WIDTH(TLP_SEG_DATA_WIDTH),
.TLP_SEG_STRB_WIDTH(TLP_SEG_STRB_WIDTH),
.TLP_SEG_HDR_WIDTH(TLP_SEG_HDR_WIDTH)
)
pcie_us_if_cc_inst
(
.clk(clk),
.rst(rst),
/*
* AXI output (CC)
*/
.m_axis_cc_tdata(m_axis_cc_tdata),
.m_axis_cc_tkeep(m_axis_cc_tkeep),
.m_axis_cc_tvalid(m_axis_cc_tvalid),
.m_axis_cc_tready(m_axis_cc_tready),
.m_axis_cc_tlast(m_axis_cc_tlast),
.m_axis_cc_tuser(m_axis_cc_tuser),
/*
* TLP input (completion from BAR)
*/
.tx_cpl_tlp_data(tx_cpl_tlp_data),
.tx_cpl_tlp_strb(tx_cpl_tlp_strb),
.tx_cpl_tlp_hdr(tx_cpl_tlp_hdr),
.tx_cpl_tlp_valid(tx_cpl_tlp_valid),
.tx_cpl_tlp_sop(tx_cpl_tlp_sop),
.tx_cpl_tlp_eop(tx_cpl_tlp_eop),
.tx_cpl_tlp_ready(tx_cpl_tlp_ready)
);
generate
if (READ_EXT_TAG_ENABLE || READ_MAX_READ_REQ_SIZE || READ_MAX_PAYLOAD_SIZE) begin
pcie_us_cfg #(
.PF_COUNT(PF_COUNT),
.VF_COUNT(VF_COUNT),
.VF_OFFSET(AXIS_PCIE_RQ_USER_WIDTH == 60 ? 64 : 4),
.PCIE_CAP_OFFSET(AXIS_PCIE_RQ_USER_WIDTH == 60 ? 12'h0C0 : 12'h070)
)
pcie_us_cfg_inst (
.clk(clk),
.rst(rst),
/*
* Configuration outputs
*/
.ext_tag_enable(ext_tag_enable),
.max_read_request_size(max_read_request_size),
.max_payload_size(max_payload_size),
/*
* Interface to Ultrascale PCIe IP core
*/
.cfg_mgmt_addr(cfg_mgmt_addr),
.cfg_mgmt_function_number(cfg_mgmt_function_number),
.cfg_mgmt_write(cfg_mgmt_write),
.cfg_mgmt_write_data(cfg_mgmt_write_data),
.cfg_mgmt_byte_enable(cfg_mgmt_byte_enable),
.cfg_mgmt_read(cfg_mgmt_read),
.cfg_mgmt_read_data(cfg_mgmt_read_data),
.cfg_mgmt_read_write_done(cfg_mgmt_read_write_done)
);
end else begin
assign cfg_mgmt_addr = 0;
assign cfg_mgmt_function_number = 0;
assign cfg_mgmt_write = 0;
assign cfg_mgmt_write_data = 0;
assign cfg_mgmt_byte_enable = 0;
assign cfg_mgmt_read = 0;
end
if (MSI_ENABLE) begin
pcie_us_msi #(
.MSI_COUNT(MSI_COUNT)
)
pcie_us_msi_inst (
.clk(clk),
.rst(rst),
/*
* Interrupt request inputs
*/
.msi_irq(msi_irq),
/*
* Interface to Ultrascale PCIe IP core
*/
.cfg_interrupt_msi_enable(cfg_interrupt_msi_enable),
.cfg_interrupt_msi_vf_enable(cfg_interrupt_msi_vf_enable),
.cfg_interrupt_msi_mmenable(cfg_interrupt_msi_mmenable),
.cfg_interrupt_msi_mask_update(cfg_interrupt_msi_mask_update),
.cfg_interrupt_msi_data(cfg_interrupt_msi_data),
.cfg_interrupt_msi_select(cfg_interrupt_msi_select),
.cfg_interrupt_msi_int(cfg_interrupt_msi_int),
.cfg_interrupt_msi_pending_status(cfg_interrupt_msi_pending_status),
.cfg_interrupt_msi_pending_status_data_enable(cfg_interrupt_msi_pending_status_data_enable),
.cfg_interrupt_msi_pending_status_function_num(cfg_interrupt_msi_pending_status_function_num),
.cfg_interrupt_msi_sent(cfg_interrupt_msi_sent),
.cfg_interrupt_msi_fail(cfg_interrupt_msi_fail),
.cfg_interrupt_msi_attr(cfg_interrupt_msi_attr),
.cfg_interrupt_msi_tph_present(cfg_interrupt_msi_tph_present),
.cfg_interrupt_msi_tph_type(cfg_interrupt_msi_tph_type),
.cfg_interrupt_msi_tph_st_tag(cfg_interrupt_msi_tph_st_tag),
.cfg_interrupt_msi_function_number(cfg_interrupt_msi_function_number)
);
end else begin
assign cfg_interrupt_msi_select = 0;
assign cfg_interrupt_msi_int = 0;
assign cfg_interrupt_msi_pending_status = 0;
assign cfg_interrupt_msi_pending_status_data_enable = 0;
assign cfg_interrupt_msi_pending_status_function_num = 0;
assign cfg_interrupt_msi_attr = 0;
assign cfg_interrupt_msi_tph_present = 0;
assign cfg_interrupt_msi_tph_type = 0;
assign cfg_interrupt_msi_tph_st_tag = 0;
assign cfg_interrupt_msi_function_number = 0;
end
endgenerate
endmodule

449
rtl/pcie_us_if_cc.v Normal file
View File

@ -0,0 +1,449 @@
/*
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`timescale 1ns / 1ps
/*
* Xilinx UltraScale PCIe interface adapter (Completer Completion)
*/
module pcie_us_if_cc #
(
// Width of PCIe AXI stream interfaces in bits
parameter AXIS_PCIE_DATA_WIDTH = 256,
// PCIe AXI stream tkeep signal width (words per cycle)
parameter AXIS_PCIE_KEEP_WIDTH = (AXIS_PCIE_DATA_WIDTH/32),
// PCIe AXI stream CC tuser signal width
parameter AXIS_PCIE_CC_USER_WIDTH = AXIS_PCIE_DATA_WIDTH < 512 ? 33 : 81,
// TLP segment count
parameter TLP_SEG_COUNT = 1,
// TLP segment data width
parameter TLP_SEG_DATA_WIDTH = AXIS_PCIE_DATA_WIDTH/TLP_SEG_COUNT,
// TLP segment strobe width
parameter TLP_SEG_STRB_WIDTH = TLP_SEG_DATA_WIDTH/32,
// TLP segment header width
parameter TLP_SEG_HDR_WIDTH = 128
)
(
input wire clk,
input wire rst,
/*
* AXI output (CC)
*/
output wire [AXIS_PCIE_DATA_WIDTH-1:0] m_axis_cc_tdata,
output wire [AXIS_PCIE_KEEP_WIDTH-1:0] m_axis_cc_tkeep,
output wire m_axis_cc_tvalid,
input wire m_axis_cc_tready,
output wire m_axis_cc_tlast,
output wire [AXIS_PCIE_CC_USER_WIDTH-1:0] m_axis_cc_tuser,
/*
* TLP input (completion from BAR)
*/
input wire [TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH-1:0] tx_cpl_tlp_data,
input wire [TLP_SEG_COUNT*TLP_SEG_STRB_WIDTH-1:0] tx_cpl_tlp_strb,
input wire [TLP_SEG_COUNT*TLP_SEG_HDR_WIDTH-1:0] tx_cpl_tlp_hdr,
input wire [TLP_SEG_COUNT-1:0] tx_cpl_tlp_valid,
input wire [TLP_SEG_COUNT-1:0] tx_cpl_tlp_sop,
input wire [TLP_SEG_COUNT-1:0] tx_cpl_tlp_eop,
output wire tx_cpl_tlp_ready
);
parameter TLP_DATA_WIDTH = TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH;
parameter TLP_STRB_WIDTH = TLP_SEG_COUNT*TLP_SEG_STRB_WIDTH;
parameter TLP_DATA_WIDTH_BYTES = TLP_DATA_WIDTH/8;
parameter TLP_DATA_WIDTH_DWORDS = TLP_DATA_WIDTH/32;
parameter OUTPUT_FIFO_ADDR_WIDTH = 5;
// bus width assertions
initial begin
if (AXIS_PCIE_DATA_WIDTH != 64 && AXIS_PCIE_DATA_WIDTH != 128 && AXIS_PCIE_DATA_WIDTH != 256 && AXIS_PCIE_DATA_WIDTH != 512) begin
$error("Error: PCIe interface width must be 64, 128, 256, or 512 (instance %m)");
$finish;
end
if (AXIS_PCIE_KEEP_WIDTH * 32 != AXIS_PCIE_DATA_WIDTH) begin
$error("Error: PCIe interface requires dword (32-bit) granularity (instance %m)");
$finish;
end
if (AXIS_PCIE_DATA_WIDTH == 512) begin
if (AXIS_PCIE_CC_USER_WIDTH != 81) begin
$error("Error: PCIe CC tuser width must be 81 (instance %m)");
$finish;
end
end else begin
if (AXIS_PCIE_CC_USER_WIDTH != 33) begin
$error("Error: PCIe CC tuser width must be 33 (instance %m)");
$finish;
end
end
if (TLP_SEG_COUNT != 1) begin
$error("Error: TLP segment count must be 1 (instance %m)");
$finish;
end
if (TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH != AXIS_PCIE_DATA_WIDTH) begin
$error("Error: Interface widths must match (instance %m)");
$finish;
end
if (TLP_SEG_HDR_WIDTH != 128) begin
$error("Error: TLP segment header width must be 128 (instance %m)");
$finish;
end
end
localparam [2:0]
TLP_FMT_3DW = 3'b000,
TLP_FMT_4DW = 3'b001,
TLP_FMT_3DW_DATA = 3'b010,
TLP_FMT_4DW_DATA = 3'b011,
TLP_FMT_PREFIX = 3'b100;
localparam [2:0]
CPL_STATUS_SC = 3'b000, // successful completion
CPL_STATUS_UR = 3'b001, // unsupported request
CPL_STATUS_CRS = 3'b010, // configuration request retry status
CPL_STATUS_CA = 3'b100; // completer abort
reg tx_cpl_tlp_ready_cmb;
assign tx_cpl_tlp_ready = tx_cpl_tlp_ready_cmb;
// process outgoing TLPs
localparam [1:0]
TLP_OUTPUT_STATE_IDLE = 2'd0,
TLP_OUTPUT_STATE_HEADER = 2'd1,
TLP_OUTPUT_STATE_PAYLOAD = 2'd2;
reg [1:0] tlp_output_state_reg = TLP_OUTPUT_STATE_IDLE, tlp_output_state_next;
reg [TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH-1:0] out_tlp_data_reg = 0, out_tlp_data_next;
reg [TLP_SEG_COUNT*TLP_SEG_STRB_WIDTH-1:0] out_tlp_strb_reg = 0, out_tlp_strb_next;
reg [TLP_SEG_COUNT-1:0] out_tlp_eop_reg = 0, out_tlp_eop_next;
reg [2:0] tx_cpl_tlp_hdr_fmt;
reg [4:0] tx_cpl_tlp_hdr_type;
reg [2:0] tx_cpl_tlp_hdr_tc;
reg tx_cpl_tlp_hdr_ln;
reg tx_cpl_tlp_hdr_th;
reg tx_cpl_tlp_hdr_td;
reg tx_cpl_tlp_hdr_ep;
reg [2:0] tx_cpl_tlp_hdr_attr;
reg [1:0] tx_cpl_tlp_hdr_at;
reg [9:0] tx_cpl_tlp_hdr_length;
reg [15:0] tx_cpl_tlp_hdr_completer_id;
reg [2:0] tx_cpl_tlp_hdr_cpl_status;
reg tx_cpl_tlp_hdr_bcm;
reg [11:0] tx_cpl_tlp_hdr_byte_count;
reg [15:0] tx_cpl_tlp_hdr_requester_id;
reg [9:0] tx_cpl_tlp_hdr_tag;
reg [6:0] tx_cpl_tlp_hdr_lower_addr;
reg [95:0] tlp_header_data;
reg [AXIS_PCIE_CC_USER_WIDTH-1:0] tlp_tuser;
reg [AXIS_PCIE_DATA_WIDTH-1:0] m_axis_cc_tdata_int = 0;
reg [AXIS_PCIE_KEEP_WIDTH-1:0] m_axis_cc_tkeep_int = 0;
reg m_axis_cc_tvalid_int = 0;
wire m_axis_cc_tready_int;
reg m_axis_cc_tlast_int = 0;
reg [AXIS_PCIE_CC_USER_WIDTH-1:0] m_axis_cc_tuser_int = 0;
always @* begin
tlp_output_state_next = TLP_OUTPUT_STATE_IDLE;
out_tlp_data_next = out_tlp_data_reg;
out_tlp_strb_next = out_tlp_strb_reg;
out_tlp_eop_next = out_tlp_eop_reg;
tx_cpl_tlp_ready_cmb = 1'b0;
// TLP header parsing
// DW 0
tx_cpl_tlp_hdr_fmt = tx_cpl_tlp_hdr[127:125]; // fmt
tx_cpl_tlp_hdr_type = tx_cpl_tlp_hdr[124:120]; // type
tx_cpl_tlp_hdr_tag[9] = tx_cpl_tlp_hdr[119]; // T9
tx_cpl_tlp_hdr_tc = tx_cpl_tlp_hdr[118:116]; // TC
tx_cpl_tlp_hdr_tag[8] = tx_cpl_tlp_hdr[115]; // T8
tx_cpl_tlp_hdr_attr[2] = tx_cpl_tlp_hdr[114]; // attr
tx_cpl_tlp_hdr_ln = tx_cpl_tlp_hdr[113]; // LN
tx_cpl_tlp_hdr_th = tx_cpl_tlp_hdr[112]; // TH
tx_cpl_tlp_hdr_td = tx_cpl_tlp_hdr[111]; // TD
tx_cpl_tlp_hdr_ep = tx_cpl_tlp_hdr[110]; // EP
tx_cpl_tlp_hdr_attr[1:0] = tx_cpl_tlp_hdr[109:108]; // attr
tx_cpl_tlp_hdr_at = tx_cpl_tlp_hdr[107:106]; // AT
tx_cpl_tlp_hdr_length = tx_cpl_tlp_hdr[105:96]; // length
// DW 1
tx_cpl_tlp_hdr_completer_id = tx_cpl_tlp_hdr[95:80]; // completer ID
tx_cpl_tlp_hdr_cpl_status = tx_cpl_tlp_hdr[79:77]; // completion status
tx_cpl_tlp_hdr_bcm = tx_cpl_tlp_hdr[76]; // BCM
tx_cpl_tlp_hdr_byte_count = tx_cpl_tlp_hdr[75:64]; // byte count
// DW 2
tx_cpl_tlp_hdr_requester_id = tx_cpl_tlp_hdr[63:48]; // requester ID
tx_cpl_tlp_hdr_tag[7:0] = tx_cpl_tlp_hdr[47:40]; // tag
tx_cpl_tlp_hdr_lower_addr = tx_cpl_tlp_hdr[38:32]; // lower address
tlp_header_data[6:0] = tx_cpl_tlp_hdr_lower_addr; // lower address
tlp_header_data[7] = 1'b0;
tlp_header_data[9:8] = tx_cpl_tlp_hdr_at; // AT
tlp_header_data[15:10] = 6'd0;
tlp_header_data[28:16] = tx_cpl_tlp_hdr_byte_count; // Byte count
tlp_header_data[29] = 1'b0; // locked read completion
tlp_header_data[31:30] = 2'd0;
tlp_header_data[42:32] = tx_cpl_tlp_hdr_length; // DWORD count
tlp_header_data[45:43] = tx_cpl_tlp_hdr_cpl_status; // completion status
tlp_header_data[46] = tx_cpl_tlp_hdr_ep; // poisoned
tlp_header_data[47] = 1'b0;
tlp_header_data[63:48] = tx_cpl_tlp_hdr_requester_id; // requester ID
tlp_header_data[71:64] = tx_cpl_tlp_hdr_tag; // tag
tlp_header_data[87:72] = tx_cpl_tlp_hdr_completer_id; // completer ID
tlp_header_data[88] = 1'b0; // completer ID enable
tlp_header_data[91:89] = tx_cpl_tlp_hdr_tc; // TC
tlp_header_data[94:92] = tx_cpl_tlp_hdr_attr; // attr
tlp_header_data[95] = 1'b0; // force ECRC
if (AXIS_PCIE_DATA_WIDTH == 512) begin
tlp_tuser[1:0] = 2'b01; // is_sop
tlp_tuser[3:2] = 2'd0; // is_sop0_ptr
tlp_tuser[5:4] = 2'd0; // is_sop1_ptr
tlp_tuser[7:6] = 2'b01; // is_eop
tlp_tuser[11:8] = 4'd3; // is_eop0_ptr
tlp_tuser[15:12] = 4'd0; // is_eop1_ptr
tlp_tuser[16] = 1'b0; // discontinue
tlp_tuser[80:17] = 64'd0; // parity
end else begin
tlp_tuser[0] = 1'b0; // discontinue
tlp_tuser[32:1] = 32'd0; // parity
end
// TLP output
m_axis_cc_tdata_int = 0;
m_axis_cc_tkeep_int = 0;
m_axis_cc_tvalid_int = 1'b0;
m_axis_cc_tlast_int = 1'b0;
m_axis_cc_tuser_int = 0;
// combine header and payload, merge in read request TLPs
case (tlp_output_state_reg)
TLP_OUTPUT_STATE_IDLE: begin
// idle state
if (tx_cpl_tlp_valid && m_axis_cc_tready_int) begin
if (AXIS_PCIE_DATA_WIDTH == 64) begin
// 64 bit interface, send first half of header
m_axis_cc_tdata_int = tlp_header_data[63:0];
m_axis_cc_tkeep_int = 2'b11;
m_axis_cc_tvalid_int = 1'b1;
m_axis_cc_tlast_int = 1'b0;
m_axis_cc_tuser_int = tlp_tuser;
tlp_output_state_next = TLP_OUTPUT_STATE_HEADER;
end else begin
// wider interface, send header and start of payload
m_axis_cc_tdata_int = {tx_cpl_tlp_data, tlp_header_data};
m_axis_cc_tkeep_int = {tx_cpl_tlp_strb, 3'b111};
m_axis_cc_tvalid_int = 1'b1;
m_axis_cc_tlast_int = 1'b0;
m_axis_cc_tuser_int = tlp_tuser;
tx_cpl_tlp_ready_cmb = 1'b1;
out_tlp_data_next = tx_cpl_tlp_data;
out_tlp_strb_next = tx_cpl_tlp_strb;
out_tlp_eop_next = tx_cpl_tlp_eop;
if (tx_cpl_tlp_eop && ((tx_cpl_tlp_strb >> (TLP_DATA_WIDTH_DWORDS-3)) == 0)) begin
m_axis_cc_tlast_int = 1'b1;
tlp_output_state_next = TLP_OUTPUT_STATE_IDLE;
end else begin
tlp_output_state_next = TLP_OUTPUT_STATE_PAYLOAD;
end
end
end else begin
tlp_output_state_next = TLP_OUTPUT_STATE_IDLE;
end
end
TLP_OUTPUT_STATE_HEADER: begin
// second cycle of header (64 bit interface width only)
if (AXIS_PCIE_DATA_WIDTH == 64) begin
m_axis_cc_tdata_int = {tx_cpl_tlp_data, tlp_header_data[95:64]};
m_axis_cc_tkeep_int = {tx_cpl_tlp_strb, 1'b1};
m_axis_cc_tvalid_int = 1'b1;
m_axis_cc_tlast_int = 1'b0;
m_axis_cc_tuser_int = tlp_tuser;
tx_cpl_tlp_ready_cmb = 1'b1;
out_tlp_data_next = tx_cpl_tlp_data;
out_tlp_strb_next = tx_cpl_tlp_strb;
out_tlp_eop_next = tx_cpl_tlp_eop;
if (tx_cpl_tlp_eop && ((tx_cpl_tlp_strb >> (TLP_DATA_WIDTH_DWORDS-1)) == 0)) begin
m_axis_cc_tlast_int = 1'b1;
tlp_output_state_next = TLP_OUTPUT_STATE_IDLE;
end else begin
tlp_output_state_next = TLP_OUTPUT_STATE_PAYLOAD;
end
end
end
TLP_OUTPUT_STATE_PAYLOAD: begin
// transfer payload
if (AXIS_PCIE_DATA_WIDTH >= 128) begin
m_axis_cc_tdata_int = {tx_cpl_tlp_data, out_tlp_data_reg[TLP_DATA_WIDTH-1:TLP_DATA_WIDTH-96]};
if (tx_cpl_tlp_valid && !out_tlp_eop_reg) begin
m_axis_cc_tkeep_int = {tx_cpl_tlp_strb, out_tlp_strb_reg[TLP_STRB_WIDTH-1:TLP_DATA_WIDTH_DWORDS-3]};
end else begin
m_axis_cc_tkeep_int = out_tlp_strb_reg[TLP_STRB_WIDTH-1:TLP_DATA_WIDTH_DWORDS-3];
end
m_axis_cc_tlast_int = 1'b0;
m_axis_cc_tuser_int = tlp_tuser;
if ((tx_cpl_tlp_valid || out_tlp_eop_reg) && m_axis_cc_tready_int) begin
m_axis_cc_tvalid_int = 1'b1;
tx_cpl_tlp_ready_cmb = !out_tlp_eop_reg;
out_tlp_data_next = tx_cpl_tlp_data;
out_tlp_strb_next = tx_cpl_tlp_strb;
out_tlp_eop_next = tx_cpl_tlp_eop;
if (out_tlp_eop_reg || (tx_cpl_tlp_eop && ((tx_cpl_tlp_strb >> (TLP_DATA_WIDTH_DWORDS-3)) == 0))) begin
m_axis_cc_tlast_int = 1'b1;
tlp_output_state_next = TLP_OUTPUT_STATE_IDLE;
end else begin
tlp_output_state_next = TLP_OUTPUT_STATE_PAYLOAD;
end
end else begin
tlp_output_state_next = TLP_OUTPUT_STATE_PAYLOAD;
end
end else begin
m_axis_cc_tdata_int = {tx_cpl_tlp_data, out_tlp_data_reg[TLP_DATA_WIDTH-1:TLP_DATA_WIDTH-32]};
if (tx_cpl_tlp_valid && !out_tlp_eop_reg) begin
m_axis_cc_tkeep_int = {tx_cpl_tlp_strb, out_tlp_strb_reg[TLP_STRB_WIDTH-1:TLP_DATA_WIDTH_DWORDS-1]};
end else begin
m_axis_cc_tkeep_int = out_tlp_strb_reg[TLP_STRB_WIDTH-1:TLP_DATA_WIDTH_DWORDS-1];
end
m_axis_cc_tlast_int = 1'b0;
m_axis_cc_tuser_int = tlp_tuser;
if ((tx_cpl_tlp_valid || out_tlp_eop_reg) && m_axis_cc_tready_int) begin
m_axis_cc_tvalid_int = 1'b1;
tx_cpl_tlp_ready_cmb = !out_tlp_eop_reg;
out_tlp_data_next = tx_cpl_tlp_data;
out_tlp_strb_next = tx_cpl_tlp_strb;
out_tlp_eop_next = tx_cpl_tlp_eop;
if (out_tlp_eop_reg || (tx_cpl_tlp_eop && ((tx_cpl_tlp_strb >> (TLP_DATA_WIDTH_DWORDS-1)) == 0))) begin
m_axis_cc_tlast_int = 1'b1;
tlp_output_state_next = TLP_OUTPUT_STATE_IDLE;
end else begin
tlp_output_state_next = TLP_OUTPUT_STATE_PAYLOAD;
end
end else begin
tlp_output_state_next = TLP_OUTPUT_STATE_PAYLOAD;
end
end
end
endcase
end
always @(posedge clk) begin
tlp_output_state_reg <= tlp_output_state_next;
out_tlp_data_reg <= out_tlp_data_next;
out_tlp_strb_reg <= out_tlp_strb_next;
out_tlp_eop_reg <= out_tlp_eop_next;
if (rst) begin
tlp_output_state_reg <= TLP_OUTPUT_STATE_IDLE;
end
end
// output datapath logic (PCIe TLP)
reg [AXIS_PCIE_DATA_WIDTH-1:0] m_axis_cc_tdata_reg = {AXIS_PCIE_DATA_WIDTH{1'b0}};
reg [AXIS_PCIE_KEEP_WIDTH-1:0] m_axis_cc_tkeep_reg = {AXIS_PCIE_KEEP_WIDTH{1'b0}};
reg m_axis_cc_tvalid_reg = 1'b0, m_axis_cc_tvalid_next;
reg m_axis_cc_tlast_reg = 1'b0;
reg [AXIS_PCIE_CC_USER_WIDTH-1:0] m_axis_cc_tuser_reg = {AXIS_PCIE_CC_USER_WIDTH{1'b0}};
reg [OUTPUT_FIFO_ADDR_WIDTH+1-1:0] out_fifo_wr_ptr_reg = 0;
reg [OUTPUT_FIFO_ADDR_WIDTH+1-1:0] out_fifo_rd_ptr_reg = 0;
reg out_fifo_half_full_reg = 1'b0;
wire out_fifo_full = out_fifo_wr_ptr_reg == (out_fifo_rd_ptr_reg ^ {1'b1, {OUTPUT_FIFO_ADDR_WIDTH{1'b0}}});
wire out_fifo_empty = out_fifo_wr_ptr_reg == out_fifo_rd_ptr_reg;
(* ram_style = "distributed" *)
reg [AXIS_PCIE_DATA_WIDTH-1:0] out_fifo_tdata[2**OUTPUT_FIFO_ADDR_WIDTH-1:0];
(* ram_style = "distributed" *)
reg [AXIS_PCIE_KEEP_WIDTH-1:0] out_fifo_tkeep[2**OUTPUT_FIFO_ADDR_WIDTH-1:0];
(* ram_style = "distributed" *)
reg out_fifo_tlast[2**OUTPUT_FIFO_ADDR_WIDTH-1:0];
(* ram_style = "distributed" *)
reg [AXIS_PCIE_CC_USER_WIDTH-1:0] out_fifo_tuser[2**OUTPUT_FIFO_ADDR_WIDTH-1:0];
assign m_axis_cc_tready_int = !out_fifo_half_full_reg;
assign m_axis_cc_tdata = m_axis_cc_tdata_reg;
assign m_axis_cc_tkeep = m_axis_cc_tkeep_reg;
assign m_axis_cc_tvalid = m_axis_cc_tvalid_reg;
assign m_axis_cc_tlast = m_axis_cc_tlast_reg;
assign m_axis_cc_tuser = m_axis_cc_tuser_reg;
always @(posedge clk) begin
m_axis_cc_tvalid_reg <= m_axis_cc_tvalid_reg && !m_axis_cc_tready;
out_fifo_half_full_reg <= $unsigned(out_fifo_wr_ptr_reg - out_fifo_rd_ptr_reg) >= 2**(OUTPUT_FIFO_ADDR_WIDTH-1);
if (!out_fifo_full && m_axis_cc_tvalid_int) begin
out_fifo_tdata[out_fifo_wr_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]] <= m_axis_cc_tdata_int;
out_fifo_tkeep[out_fifo_wr_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]] <= m_axis_cc_tkeep_int;
out_fifo_tlast[out_fifo_wr_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]] <= m_axis_cc_tlast_int;
out_fifo_tuser[out_fifo_wr_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]] <= m_axis_cc_tuser_int;
out_fifo_wr_ptr_reg <= out_fifo_wr_ptr_reg + 1;
end
if (!out_fifo_empty && (!m_axis_cc_tvalid_reg || m_axis_cc_tready)) begin
m_axis_cc_tdata_reg <= out_fifo_tdata[out_fifo_rd_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]];
m_axis_cc_tkeep_reg <= out_fifo_tkeep[out_fifo_rd_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]];
m_axis_cc_tvalid_reg <= 1'b1;
m_axis_cc_tlast_reg <= out_fifo_tlast[out_fifo_rd_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]];
m_axis_cc_tuser_reg <= out_fifo_tuser[out_fifo_rd_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]];
out_fifo_rd_ptr_reg <= out_fifo_rd_ptr_reg + 1;
end
if (rst) begin
out_fifo_wr_ptr_reg <= 0;
out_fifo_rd_ptr_reg <= 0;
m_axis_cc_tvalid_reg <= 1'b0;
end
end
endmodule

401
rtl/pcie_us_if_cq.v Normal file
View File

@ -0,0 +1,401 @@
/*
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`timescale 1ns / 1ps
/*
* Xilinx UltraScale PCIe interface adapter (Completer reQuest)
*/
module pcie_us_if_cq #
(
// Width of PCIe AXI stream interfaces in bits
parameter AXIS_PCIE_DATA_WIDTH = 256,
// PCIe AXI stream tkeep signal width (words per cycle)
parameter AXIS_PCIE_KEEP_WIDTH = (AXIS_PCIE_DATA_WIDTH/32),
// PCIe AXI stream CQ tuser signal width
parameter AXIS_PCIE_CQ_USER_WIDTH = AXIS_PCIE_DATA_WIDTH < 512 ? 85 : 183,
// TLP segment count
parameter TLP_SEG_COUNT = 1,
// TLP segment data width
parameter TLP_SEG_DATA_WIDTH = AXIS_PCIE_DATA_WIDTH/TLP_SEG_COUNT,
// TLP segment strobe width
parameter TLP_SEG_STRB_WIDTH = TLP_SEG_DATA_WIDTH/32,
// TLP segment header width
parameter TLP_SEG_HDR_WIDTH = 128
)
(
input wire clk,
input wire rst,
/*
* AXI input (CQ)
*/
input wire [AXIS_PCIE_DATA_WIDTH-1:0] s_axis_cq_tdata,
input wire [AXIS_PCIE_KEEP_WIDTH-1:0] s_axis_cq_tkeep,
input wire s_axis_cq_tvalid,
output wire s_axis_cq_tready,
input wire s_axis_cq_tlast,
input wire [AXIS_PCIE_CQ_USER_WIDTH-1:0] s_axis_cq_tuser,
/*
* TLP output (request to BAR)
*/
output wire [TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH-1:0] rx_req_tlp_data,
output wire [TLP_SEG_COUNT*TLP_SEG_HDR_WIDTH-1:0] rx_req_tlp_hdr,
output wire [TLP_SEG_COUNT*3-1:0] rx_req_tlp_bar_id,
output wire [TLP_SEG_COUNT*8-1:0] rx_req_tlp_func_num,
output wire [TLP_SEG_COUNT-1:0] rx_req_tlp_valid,
output wire [TLP_SEG_COUNT-1:0] rx_req_tlp_sop,
output wire [TLP_SEG_COUNT-1:0] rx_req_tlp_eop,
input wire rx_req_tlp_ready
);
parameter TLP_DATA_WIDTH = TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH;
parameter TLP_STRB_WIDTH = TLP_SEG_COUNT*TLP_SEG_STRB_WIDTH;
parameter TLP_DATA_WIDTH_BYTES = TLP_DATA_WIDTH/8;
parameter TLP_DATA_WIDTH_DWORDS = TLP_DATA_WIDTH/32;
parameter OUTPUT_FIFO_ADDR_WIDTH = 5;
// bus width assertions
initial begin
if (AXIS_PCIE_DATA_WIDTH != 64 && AXIS_PCIE_DATA_WIDTH != 128 && AXIS_PCIE_DATA_WIDTH != 256 && AXIS_PCIE_DATA_WIDTH != 512) begin
$error("Error: PCIe interface width must be 64, 128, 256, or 512 (instance %m)");
$finish;
end
if (AXIS_PCIE_KEEP_WIDTH * 32 != AXIS_PCIE_DATA_WIDTH) begin
$error("Error: PCIe interface requires dword (32-bit) granularity (instance %m)");
$finish;
end
if (AXIS_PCIE_DATA_WIDTH == 512) begin
if (AXIS_PCIE_CQ_USER_WIDTH != 183) begin
$error("Error: PCIe CQ tuser width must be 183 (instance %m)");
$finish;
end
end else begin
if (AXIS_PCIE_CQ_USER_WIDTH != 85 && AXIS_PCIE_CQ_USER_WIDTH != 88) begin
$error("Error: PCIe CQ tuser width must be 85 or 88 (instance %m)");
$finish;
end
end
if (TLP_SEG_COUNT != 1) begin
$error("Error: TLP segment count must be 1 (instance %m)");
$finish;
end
if (TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH != AXIS_PCIE_DATA_WIDTH) begin
$error("Error: Interface widths must match (instance %m)");
$finish;
end
if (TLP_SEG_HDR_WIDTH != 128) begin
$error("Error: TLP segment header width must be 128 (instance %m)");
$finish;
end
end
localparam [2:0]
TLP_FMT_3DW = 3'b000,
TLP_FMT_4DW = 3'b001,
TLP_FMT_3DW_DATA = 3'b010,
TLP_FMT_4DW_DATA = 3'b011,
TLP_FMT_PREFIX = 3'b100;
localparam [3:0]
REQ_MEM_READ = 4'b0000,
REQ_MEM_WRITE = 4'b0001,
REQ_IO_READ = 4'b0010,
REQ_IO_WRITE = 4'b0011,
REQ_MEM_FETCH_ADD = 4'b0100,
REQ_MEM_SWAP = 4'b0101,
REQ_MEM_CAS = 4'b0110,
REQ_MEM_READ_LOCKED = 4'b0111,
REQ_CFG_READ_0 = 4'b1000,
REQ_CFG_READ_1 = 4'b1001,
REQ_CFG_WRITE_0 = 4'b1010,
REQ_CFG_WRITE_1 = 4'b1011,
REQ_MSG = 4'b1100,
REQ_MSG_VENDOR = 4'b1101,
REQ_MSG_ATS = 4'b1110;
reg [TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH-1:0] rx_req_tlp_data_reg = 0, rx_req_tlp_data_next;
reg [TLP_SEG_COUNT*TLP_SEG_HDR_WIDTH-1:0] rx_req_tlp_hdr_reg = 0, rx_req_tlp_hdr_next;
reg [TLP_SEG_COUNT*3-1:0] rx_req_tlp_bar_id_reg = 0, rx_req_tlp_bar_id_next;
reg [TLP_SEG_COUNT*7-1:0] rx_req_tlp_func_num_reg = 0, rx_req_tlp_func_num_next;
reg [TLP_SEG_COUNT-1:0] rx_req_tlp_valid_reg = 0, rx_req_tlp_valid_next;
reg [TLP_SEG_COUNT-1:0] rx_req_tlp_sop_reg = 0, rx_req_tlp_sop_next;
reg [TLP_SEG_COUNT-1:0] rx_req_tlp_eop_reg = 0, rx_req_tlp_eop_next;
assign rx_req_tlp_data = rx_req_tlp_data_reg;
assign rx_req_tlp_hdr = rx_req_tlp_hdr_reg;
assign rx_req_tlp_bar_id = rx_req_tlp_bar_id_reg;
assign rx_req_tlp_func_num = rx_req_tlp_func_num_reg;
assign rx_req_tlp_valid = rx_req_tlp_valid_reg;
assign rx_req_tlp_sop = rx_req_tlp_sop_reg;
assign rx_req_tlp_eop = rx_req_tlp_eop_reg;
localparam [1:0]
TLP_INPUT_STATE_IDLE = 2'd0,
TLP_INPUT_STATE_HEADER = 2'd1,
TLP_INPUT_STATE_PAYLOAD = 2'd2;
reg [1:0] tlp_input_state_reg = TLP_INPUT_STATE_IDLE, tlp_input_state_next;
reg s_axis_cq_tready_cmb;
reg tlp_input_frame_reg = 1'b0, tlp_input_frame_next;
reg [AXIS_PCIE_DATA_WIDTH-1:0] cq_tdata_int_reg = {AXIS_PCIE_DATA_WIDTH{1'b0}}, cq_tdata_int_next;
reg cq_tvalid_int_reg = 1'b0, cq_tvalid_int_next;
reg cq_tlast_int_reg = 1'b0, cq_tlast_int_next;
reg [AXIS_PCIE_CQ_USER_WIDTH-1:0] cq_tuser_int_reg = {AXIS_PCIE_CQ_USER_WIDTH{1'b0}}, cq_tuser_int_next;
wire [AXIS_PCIE_DATA_WIDTH*2-1:0] cq_tdata = {s_axis_cq_tdata, cq_tdata_int_reg};
assign s_axis_cq_tready = s_axis_cq_tready_cmb;
always @* begin
tlp_input_state_next = TLP_INPUT_STATE_IDLE;
rx_req_tlp_data_next = rx_req_tlp_data_reg;
rx_req_tlp_hdr_next = rx_req_tlp_hdr_reg;
rx_req_tlp_bar_id_next = rx_req_tlp_bar_id_reg;
rx_req_tlp_func_num_next = rx_req_tlp_func_num_reg;
rx_req_tlp_valid_next = rx_req_tlp_valid_reg && !rx_req_tlp_ready;
rx_req_tlp_sop_next = rx_req_tlp_sop_reg;
rx_req_tlp_eop_next = rx_req_tlp_eop_reg;
s_axis_cq_tready_cmb = rx_req_tlp_ready;
tlp_input_frame_next = tlp_input_frame_reg;
cq_tdata_int_next = cq_tdata_int_reg;
cq_tvalid_int_next = cq_tvalid_int_reg;
cq_tlast_int_next = cq_tlast_int_reg;
cq_tuser_int_next = cq_tuser_int_reg;
case (tlp_input_state_reg)
TLP_INPUT_STATE_IDLE: begin
s_axis_cq_tready_cmb = rx_req_tlp_ready;
if (cq_tvalid_int_reg && rx_req_tlp_ready) begin
// DW 0
case (cq_tdata[78:75])
REQ_MEM_READ: begin
rx_req_tlp_hdr_next[127:125] = TLP_FMT_4DW; // fmt
rx_req_tlp_hdr_next[124:120] = {5'b00000}; // type
end
REQ_MEM_WRITE: begin
rx_req_tlp_hdr_next[127:125] = TLP_FMT_4DW_DATA; // fmt
rx_req_tlp_hdr_next[124:120] = {5'b00000}; // type
end
REQ_IO_READ: begin
rx_req_tlp_hdr_next[127:125] = TLP_FMT_4DW; // fmt
rx_req_tlp_hdr_next[124:120] = {5'b00010}; // type
end
REQ_IO_WRITE: begin
rx_req_tlp_hdr_next[127:125] = TLP_FMT_4DW_DATA; // fmt
rx_req_tlp_hdr_next[124:120] = {5'b00010}; // type
end
REQ_MEM_FETCH_ADD: begin
rx_req_tlp_hdr_next[127:125] = TLP_FMT_4DW_DATA; // fmt
rx_req_tlp_hdr_next[124:120] = {5'b01100}; // type
end
REQ_MEM_SWAP: begin
rx_req_tlp_hdr_next[127:125] = TLP_FMT_4DW_DATA; // fmt
rx_req_tlp_hdr_next[124:120] = {5'b01101}; // type
end
REQ_MEM_CAS: begin
rx_req_tlp_hdr_next[127:125] = TLP_FMT_4DW_DATA; // fmt
rx_req_tlp_hdr_next[124:120] = {5'b01110}; // type
end
REQ_MEM_READ_LOCKED: begin
rx_req_tlp_hdr_next[127:125] = TLP_FMT_4DW; // fmt
rx_req_tlp_hdr_next[124:120] = {5'b00001}; // type
end
REQ_MSG: begin
if (cq_tdata[74:64]) begin
rx_req_tlp_hdr_next[127:125] = TLP_FMT_4DW_DATA; // fmt
end else begin
rx_req_tlp_hdr_next[127:125] = TLP_FMT_4DW; // fmt
end
rx_req_tlp_hdr_next[124:120] = {2'b10, cq_tdata[114:112]}; // type
end
REQ_MSG_VENDOR: begin
if (cq_tdata[74:64]) begin
rx_req_tlp_hdr_next[127:125] = TLP_FMT_4DW_DATA; // fmt
end else begin
rx_req_tlp_hdr_next[127:125] = TLP_FMT_4DW; // fmt
end
rx_req_tlp_hdr_next[124:120] = {2'b10, cq_tdata[114:112]}; // type
end
REQ_MSG_ATS: begin
if (cq_tdata[74:64]) begin
rx_req_tlp_hdr_next[127:125] = TLP_FMT_4DW_DATA; // fmt
end else begin
rx_req_tlp_hdr_next[127:125] = TLP_FMT_4DW; // fmt
end
rx_req_tlp_hdr_next[124:120] = {2'b10, cq_tdata[114:112]}; // type
end
default: begin
rx_req_tlp_hdr_next[127:125] = TLP_FMT_4DW; // fmt
rx_req_tlp_hdr_next[124:120] = {5'b00000}; // type
end
endcase
rx_req_tlp_hdr_next[119] = 1'b0; // T9
rx_req_tlp_hdr_next[118:116] = cq_tdata[123:121]; // TC
rx_req_tlp_hdr_next[115] = 1'b0; // T8
rx_req_tlp_hdr_next[114] = cq_tdata[126]; // attr
rx_req_tlp_hdr_next[113] = 1'b0; // LN
rx_req_tlp_hdr_next[112] = 1'b0; // TH
rx_req_tlp_hdr_next[111] = 1'b0; // TD
rx_req_tlp_hdr_next[110] = 1'b0; // EP
rx_req_tlp_hdr_next[109:108] = cq_tdata[125:124]; // attr
rx_req_tlp_hdr_next[107:106] = cq_tdata[1:0]; // AT
rx_req_tlp_hdr_next[105:96] = cq_tdata[74:64]; // length
// DW 1
rx_req_tlp_hdr_next[95:80] = cq_tdata[95:80]; // requester ID
rx_req_tlp_hdr_next[79:72] = cq_tdata[103:96]; // tag
if (AXIS_PCIE_DATA_WIDTH == 512) begin
rx_req_tlp_hdr_next[71:68] = cq_tuser_int_reg[11:8]; // last BE
rx_req_tlp_hdr_next[67:64] = cq_tuser_int_reg[3:0]; // first BE
end else begin
rx_req_tlp_hdr_next[71:68] = cq_tuser_int_reg[7:4]; // last BE
rx_req_tlp_hdr_next[67:64] = cq_tuser_int_reg[3:0]; // first BE
end
// DW 2+3
rx_req_tlp_hdr_next[63:2] = cq_tdata[63:2]; // address
rx_req_tlp_hdr_next[1:0] = 2'b00; // PH
rx_req_tlp_bar_id_next = cq_tdata[114:112];
rx_req_tlp_func_num_next = cq_tdata[111:104];
if (AXIS_PCIE_DATA_WIDTH > 64) begin
rx_req_tlp_data_next = cq_tdata[AXIS_PCIE_DATA_WIDTH+128-1:128];
rx_req_tlp_sop_next = 1'b1;
rx_req_tlp_eop_next = 1'b0;
tlp_input_frame_next = 1'b1;
if (cq_tlast_int_reg) begin
rx_req_tlp_valid_next = 1'b1;
rx_req_tlp_eop_next = 1'b1;
cq_tvalid_int_next = 1'b0;
tlp_input_frame_next = 1'b0;
tlp_input_state_next = TLP_INPUT_STATE_IDLE;
end else if (s_axis_cq_tready && s_axis_cq_tvalid) begin
rx_req_tlp_valid_next = 1'b1;
tlp_input_state_next = TLP_INPUT_STATE_PAYLOAD;
end else begin
tlp_input_state_next = TLP_INPUT_STATE_IDLE;
end
end else begin
if (cq_tlast_int_reg) begin
cq_tvalid_int_next = 1'b0;
tlp_input_frame_next = 1'b0;
tlp_input_state_next = TLP_INPUT_STATE_IDLE;
end else if (s_axis_cq_tready && s_axis_cq_tvalid) begin
tlp_input_state_next = TLP_INPUT_STATE_PAYLOAD;
end else begin
tlp_input_state_next = TLP_INPUT_STATE_IDLE;
end
end
end else begin
tlp_input_state_next = TLP_INPUT_STATE_IDLE;
end
end
TLP_INPUT_STATE_PAYLOAD: begin
s_axis_cq_tready_cmb = rx_req_tlp_ready;
if (cq_tvalid_int_reg && rx_req_tlp_ready) begin
if (AXIS_PCIE_DATA_WIDTH > 128) begin
rx_req_tlp_data_next = cq_tdata[AXIS_PCIE_DATA_WIDTH+128-1:128];
rx_req_tlp_sop_next = 1'b0;
end else begin
rx_req_tlp_data_next = s_axis_cq_tdata;
rx_req_tlp_sop_next = !tlp_input_frame_reg;
end
rx_req_tlp_eop_next = 1'b0;
if (cq_tlast_int_reg) begin
rx_req_tlp_valid_next = 1'b1;
rx_req_tlp_eop_next = 1'b1;
cq_tvalid_int_next = 1'b0;
tlp_input_frame_next = 1'b0;
tlp_input_state_next = TLP_INPUT_STATE_IDLE;
end else if (s_axis_cq_tready && s_axis_cq_tvalid) begin
rx_req_tlp_valid_next = 1'b1;
tlp_input_frame_next = 1'b1;
tlp_input_state_next = TLP_INPUT_STATE_PAYLOAD;
end else begin
tlp_input_state_next = TLP_INPUT_STATE_PAYLOAD;
end
end else begin
tlp_input_state_next = TLP_INPUT_STATE_PAYLOAD;
end
end
endcase
if (s_axis_cq_tready && s_axis_cq_tvalid) begin
cq_tdata_int_next = s_axis_cq_tdata;
cq_tvalid_int_next = s_axis_cq_tvalid;
cq_tlast_int_next = s_axis_cq_tlast;
cq_tuser_int_next = s_axis_cq_tuser;
end
end
always @(posedge clk) begin
tlp_input_state_reg <= tlp_input_state_next;
rx_req_tlp_data_reg <= rx_req_tlp_data_next;
rx_req_tlp_hdr_reg <= rx_req_tlp_hdr_next;
rx_req_tlp_bar_id_reg <= rx_req_tlp_bar_id_next;
rx_req_tlp_func_num_reg <= rx_req_tlp_func_num_next;
rx_req_tlp_valid_reg <= rx_req_tlp_valid_next;
rx_req_tlp_sop_reg <= rx_req_tlp_sop_next;
rx_req_tlp_eop_reg <= rx_req_tlp_eop_next;
tlp_input_frame_reg <= tlp_input_frame_next;
cq_tdata_int_reg <= cq_tdata_int_next;
cq_tvalid_int_reg <= cq_tvalid_int_next;
cq_tlast_int_reg <= cq_tlast_int_next;
cq_tuser_int_reg <= cq_tuser_int_next;
if (rst) begin
tlp_input_state_reg <= TLP_INPUT_STATE_IDLE;
rx_req_tlp_valid_reg <= 0;
cq_tvalid_int_reg <= 1'b0;
end
end
endmodule

359
rtl/pcie_us_if_rc.v Normal file
View File

@ -0,0 +1,359 @@
/*
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`timescale 1ns / 1ps
/*
* Xilinx UltraScale PCIe interface adapter (Requester Completion)
*/
module pcie_us_if_rc #
(
// Width of PCIe AXI stream interfaces in bits
parameter AXIS_PCIE_DATA_WIDTH = 256,
// PCIe AXI stream tkeep signal width (words per cycle)
parameter AXIS_PCIE_KEEP_WIDTH = (AXIS_PCIE_DATA_WIDTH/32),
// PCIe AXI stream RC tuser signal width
parameter AXIS_PCIE_RC_USER_WIDTH = AXIS_PCIE_DATA_WIDTH < 512 ? 75 : 161,
// PCIe AXI stream RQ tuser signal width
parameter AXIS_PCIE_RQ_USER_WIDTH = AXIS_PCIE_DATA_WIDTH < 512 ? 60 : 137,
// TLP segment count
parameter TLP_SEG_COUNT = 1,
// TLP segment data width
parameter TLP_SEG_DATA_WIDTH = AXIS_PCIE_DATA_WIDTH/TLP_SEG_COUNT,
// TLP segment strobe width
parameter TLP_SEG_STRB_WIDTH = TLP_SEG_DATA_WIDTH/32,
// TLP segment header width
parameter TLP_SEG_HDR_WIDTH = 128
)
(
input wire clk,
input wire rst,
/*
* AXI input (RC)
*/
input wire [AXIS_PCIE_DATA_WIDTH-1:0] s_axis_rc_tdata,
input wire [AXIS_PCIE_KEEP_WIDTH-1:0] s_axis_rc_tkeep,
input wire s_axis_rc_tvalid,
output wire s_axis_rc_tready,
input wire s_axis_rc_tlast,
input wire [AXIS_PCIE_RC_USER_WIDTH-1:0] s_axis_rc_tuser,
/*
* TLP output (completion to DMA)
*/
output wire [TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH-1:0] rx_cpl_tlp_data,
output wire [TLP_SEG_COUNT*TLP_SEG_HDR_WIDTH-1:0] rx_cpl_tlp_hdr,
output wire [TLP_SEG_COUNT*4-1:0] rx_cpl_tlp_error,
output wire [TLP_SEG_COUNT-1:0] rx_cpl_tlp_valid,
output wire [TLP_SEG_COUNT-1:0] rx_cpl_tlp_sop,
output wire [TLP_SEG_COUNT-1:0] rx_cpl_tlp_eop,
input wire rx_cpl_tlp_ready
);
parameter TLP_DATA_WIDTH = TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH;
parameter TLP_STRB_WIDTH = TLP_SEG_COUNT*TLP_SEG_STRB_WIDTH;
parameter TLP_DATA_WIDTH_BYTES = TLP_DATA_WIDTH/8;
parameter TLP_DATA_WIDTH_DWORDS = TLP_DATA_WIDTH/32;
parameter OUTPUT_FIFO_ADDR_WIDTH = 5;
// bus width assertions
initial begin
if (AXIS_PCIE_DATA_WIDTH != 64 && AXIS_PCIE_DATA_WIDTH != 128 && AXIS_PCIE_DATA_WIDTH != 256 && AXIS_PCIE_DATA_WIDTH != 512) begin
$error("Error: PCIe interface width must be 64, 128, 256, or 512 (instance %m)");
$finish;
end
if (AXIS_PCIE_KEEP_WIDTH * 32 != AXIS_PCIE_DATA_WIDTH) begin
$error("Error: PCIe interface requires dword (32-bit) granularity (instance %m)");
$finish;
end
if (AXIS_PCIE_DATA_WIDTH == 512) begin
if (AXIS_PCIE_RC_USER_WIDTH != 161) begin
$error("Error: PCIe RC tuser width must be 161 (instance %m)");
$finish;
end
end else begin
if (AXIS_PCIE_RC_USER_WIDTH != 75) begin
$error("Error: PCIe RC tuser width must be 75 (instance %m)");
$finish;
end
end
if (TLP_SEG_COUNT != 1) begin
$error("Error: TLP segment count must be 1 (instance %m)");
$finish;
end
if (TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH != AXIS_PCIE_DATA_WIDTH) begin
$error("Error: Interface widths must match (instance %m)");
$finish;
end
if (TLP_SEG_HDR_WIDTH != 128) begin
$error("Error: TLP segment header width must be 128 (instance %m)");
$finish;
end
end
localparam [2:0]
TLP_FMT_3DW = 3'b000,
TLP_FMT_4DW = 3'b001,
TLP_FMT_3DW_DATA = 3'b010,
TLP_FMT_4DW_DATA = 3'b011,
TLP_FMT_PREFIX = 3'b100;
localparam [2:0]
CPL_STATUS_SC = 3'b000, // successful completion
CPL_STATUS_UR = 3'b001, // unsupported request
CPL_STATUS_CRS = 3'b010, // configuration request retry status
CPL_STATUS_CA = 3'b100; // completer abort
localparam [3:0]
RC_ERROR_NORMAL_TERMINATION = 4'b0000,
RC_ERROR_POISONED = 4'b0001,
RC_ERROR_BAD_STATUS = 4'b0010,
RC_ERROR_INVALID_LENGTH = 4'b0011,
RC_ERROR_MISMATCH = 4'b0100,
RC_ERROR_INVALID_ADDRESS = 4'b0101,
RC_ERROR_INVALID_TAG = 4'b0110,
RC_ERROR_TIMEOUT = 4'b1001,
RC_ERROR_FLR = 4'b1000;
localparam [3:0]
PCIE_ERROR_NONE = 4'd0,
PCIE_ERROR_POISONED = 4'd1,
PCIE_ERROR_BAD_STATUS = 4'd2,
PCIE_ERROR_MISMATCH = 4'd3,
PCIE_ERROR_INVALID_LEN = 4'd4,
PCIE_ERROR_INVALID_ADDR = 4'd5,
PCIE_ERROR_INVALID_TAG = 4'd6,
PCIE_ERROR_FLR = 4'd8,
PCIE_ERROR_TIMEOUT = 4'd15;
reg [TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH-1:0] rx_cpl_tlp_data_reg = 0, rx_cpl_tlp_data_next;
reg [TLP_SEG_COUNT*TLP_SEG_HDR_WIDTH-1:0] rx_cpl_tlp_hdr_reg = 0, rx_cpl_tlp_hdr_next;
reg [TLP_SEG_COUNT*4-1:0] rx_cpl_tlp_error_reg = 0, rx_cpl_tlp_error_next;
reg [TLP_SEG_COUNT-1:0] rx_cpl_tlp_valid_reg = 0, rx_cpl_tlp_valid_next;
reg [TLP_SEG_COUNT-1:0] rx_cpl_tlp_sop_reg = 0, rx_cpl_tlp_sop_next;
reg [TLP_SEG_COUNT-1:0] rx_cpl_tlp_eop_reg = 0, rx_cpl_tlp_eop_next;
assign rx_cpl_tlp_data = rx_cpl_tlp_data_reg;
assign rx_cpl_tlp_hdr = rx_cpl_tlp_hdr_reg;
assign rx_cpl_tlp_error = rx_cpl_tlp_error_reg;
assign rx_cpl_tlp_valid = rx_cpl_tlp_valid_reg;
assign rx_cpl_tlp_sop = rx_cpl_tlp_sop_reg;
assign rx_cpl_tlp_eop = rx_cpl_tlp_eop_reg;
localparam [1:0]
TLP_INPUT_STATE_IDLE = 2'd0,
TLP_INPUT_STATE_HEADER = 2'd1,
TLP_INPUT_STATE_PAYLOAD = 2'd2;
reg [1:0] tlp_input_state_reg = TLP_INPUT_STATE_IDLE, tlp_input_state_next;
reg s_axis_rc_tready_cmb;
reg tlp_input_frame_reg = 1'b0, tlp_input_frame_next;
reg [AXIS_PCIE_DATA_WIDTH-1:0] rc_tdata_int_reg = {AXIS_PCIE_DATA_WIDTH{1'b0}}, rc_tdata_int_next;
reg rc_tvalid_int_reg = 1'b0, rc_tvalid_int_next;
reg rc_tlast_int_reg = 1'b0, rc_tlast_int_next;
wire [AXIS_PCIE_DATA_WIDTH*2-1:0] rc_tdata = {s_axis_rc_tdata, rc_tdata_int_reg};
assign s_axis_rc_tready = s_axis_rc_tready_cmb;
always @* begin
tlp_input_state_next = TLP_INPUT_STATE_IDLE;
rx_cpl_tlp_data_next = rx_cpl_tlp_data_reg;
rx_cpl_tlp_hdr_next = rx_cpl_tlp_hdr_reg;
rx_cpl_tlp_error_next = rx_cpl_tlp_error_reg;
rx_cpl_tlp_valid_next = rx_cpl_tlp_valid_reg && !rx_cpl_tlp_ready;
rx_cpl_tlp_sop_next = rx_cpl_tlp_sop_reg;
rx_cpl_tlp_eop_next = rx_cpl_tlp_eop_reg;
s_axis_rc_tready_cmb = rx_cpl_tlp_ready;
tlp_input_frame_next = tlp_input_frame_reg;
rc_tdata_int_next = rc_tdata_int_reg;
rc_tvalid_int_next = rc_tvalid_int_reg;
rc_tlast_int_next = rc_tlast_int_reg;
case (tlp_input_state_reg)
TLP_INPUT_STATE_IDLE: begin
s_axis_rc_tready_cmb = rx_cpl_tlp_ready;
if (rc_tvalid_int_reg && rx_cpl_tlp_ready) begin
// DW 0
if (rc_tdata[42:32] != 0) begin
rx_cpl_tlp_hdr_next[127:125] = TLP_FMT_3DW_DATA; // fmt - 3DW with data
end else begin
rx_cpl_tlp_hdr_next[127:125] = TLP_FMT_3DW; // fmt - 3DW without data
end
rx_cpl_tlp_hdr_next[124:120] = {4'b0101, rc_tdata[29]}; // type - completion
rx_cpl_tlp_hdr_next[119] = 1'b0; // T9
rx_cpl_tlp_hdr_next[118:116] = rc_tdata[91:89]; // TC
rx_cpl_tlp_hdr_next[115] = 1'b0; // T8
rx_cpl_tlp_hdr_next[114] = rc_tdata[94]; // attr
rx_cpl_tlp_hdr_next[113] = 1'b0; // LN
rx_cpl_tlp_hdr_next[112] = 1'b0; // TH
rx_cpl_tlp_hdr_next[111] = 1'b0; // TD
rx_cpl_tlp_hdr_next[110] = rc_tdata[46]; // EP
rx_cpl_tlp_hdr_next[109:108] = rc_tdata[93:92]; // attr
rx_cpl_tlp_hdr_next[107:106] = 2'b00; // AT
rx_cpl_tlp_hdr_next[105:96] = rc_tdata[42:32]; // length
// DW 1
rx_cpl_tlp_hdr_next[95:80] = rc_tdata[87:72]; // completer ID
rx_cpl_tlp_hdr_next[79:77] = rc_tdata[45:43]; // completion status
rx_cpl_tlp_hdr_next[76] = 1'b0; // BCM
rx_cpl_tlp_hdr_next[75:64] = rc_tdata[28:16]; // byte count
// DW 2
rx_cpl_tlp_hdr_next[63:48] = rc_tdata[63:48]; // requester ID
rx_cpl_tlp_hdr_next[47:40] = rc_tdata[71:64]; // tag
rx_cpl_tlp_hdr_next[39] = 1'b0;
rx_cpl_tlp_hdr_next[38:32] = rc_tdata[6:0]; // lower address
// DW 3
rx_cpl_tlp_hdr_next[31:0] = 32'd0;
// error code
case (rc_tdata[15:12])
RC_ERROR_NORMAL_TERMINATION: rx_cpl_tlp_error_next = PCIE_ERROR_NONE;
RC_ERROR_POISONED: rx_cpl_tlp_error_next = PCIE_ERROR_POISONED;
RC_ERROR_BAD_STATUS: rx_cpl_tlp_error_next = PCIE_ERROR_BAD_STATUS;
RC_ERROR_INVALID_LENGTH: rx_cpl_tlp_error_next = PCIE_ERROR_INVALID_LEN;
RC_ERROR_MISMATCH: rx_cpl_tlp_error_next = PCIE_ERROR_MISMATCH;
RC_ERROR_INVALID_ADDRESS: rx_cpl_tlp_error_next = PCIE_ERROR_INVALID_ADDR;
RC_ERROR_INVALID_TAG: rx_cpl_tlp_error_next = PCIE_ERROR_INVALID_TAG;
RC_ERROR_FLR: rx_cpl_tlp_error_next = PCIE_ERROR_FLR;
RC_ERROR_TIMEOUT: rx_cpl_tlp_error_next = PCIE_ERROR_TIMEOUT;
default: rx_cpl_tlp_error_next = PCIE_ERROR_NONE;
endcase
if (AXIS_PCIE_DATA_WIDTH > 64) begin
rx_cpl_tlp_data_next = rc_tdata[AXIS_PCIE_DATA_WIDTH+96-1:96];
rx_cpl_tlp_sop_next = 1'b1;
rx_cpl_tlp_eop_next = 1'b0;
tlp_input_frame_next = 1'b1;
if (rc_tlast_int_reg) begin
rx_cpl_tlp_valid_next = 1'b1;
rx_cpl_tlp_eop_next = 1'b1;
rc_tvalid_int_next = 1'b0;
tlp_input_frame_next = 1'b0;
tlp_input_state_next = TLP_INPUT_STATE_IDLE;
end else if (s_axis_rc_tready && s_axis_rc_tvalid) begin
rx_cpl_tlp_valid_next = 1'b1;
tlp_input_state_next = TLP_INPUT_STATE_PAYLOAD;
end else begin
tlp_input_state_next = TLP_INPUT_STATE_IDLE;
end
end else begin
if (rc_tlast_int_reg) begin
rc_tvalid_int_next = 1'b0;
tlp_input_frame_next = 1'b0;
tlp_input_state_next = TLP_INPUT_STATE_IDLE;
end else if (s_axis_rc_tready && s_axis_rc_tvalid) begin
tlp_input_state_next = TLP_INPUT_STATE_PAYLOAD;
end else begin
tlp_input_state_next = TLP_INPUT_STATE_IDLE;
end
end
end else begin
tlp_input_state_next = TLP_INPUT_STATE_IDLE;
end
end
TLP_INPUT_STATE_PAYLOAD: begin
s_axis_rc_tready_cmb = rx_cpl_tlp_ready;
if (rc_tvalid_int_reg && rx_cpl_tlp_ready) begin
if (AXIS_PCIE_DATA_WIDTH > 64) begin
rx_cpl_tlp_data_next = rc_tdata[AXIS_PCIE_DATA_WIDTH+96-1:96];
rx_cpl_tlp_sop_next = 1'b0;
end else begin
rx_cpl_tlp_data_next = rc_tdata[AXIS_PCIE_DATA_WIDTH+32-1:32];
rx_cpl_tlp_sop_next = !tlp_input_frame_reg;
end
rx_cpl_tlp_eop_next = 1'b0;
if (rc_tlast_int_reg) begin
rx_cpl_tlp_valid_next = 1'b1;
rx_cpl_tlp_eop_next = 1'b1;
rc_tvalid_int_next = 1'b0;
tlp_input_frame_next = 1'b0;
tlp_input_state_next = TLP_INPUT_STATE_IDLE;
end else if (s_axis_rc_tready && s_axis_rc_tvalid) begin
rx_cpl_tlp_valid_next = 1'b1;
tlp_input_frame_next = 1'b1;
tlp_input_state_next = TLP_INPUT_STATE_PAYLOAD;
end else begin
tlp_input_state_next = TLP_INPUT_STATE_PAYLOAD;
end
end else begin
tlp_input_state_next = TLP_INPUT_STATE_PAYLOAD;
end
end
endcase
if (s_axis_rc_tready && s_axis_rc_tvalid) begin
rc_tdata_int_next = s_axis_rc_tdata;
rc_tvalid_int_next = s_axis_rc_tvalid;
rc_tlast_int_next = s_axis_rc_tlast;
end
end
always @(posedge clk) begin
tlp_input_state_reg <= tlp_input_state_next;
rx_cpl_tlp_data_reg <= rx_cpl_tlp_data_next;
rx_cpl_tlp_hdr_reg <= rx_cpl_tlp_hdr_next;
rx_cpl_tlp_error_reg <= rx_cpl_tlp_error_next;
rx_cpl_tlp_valid_reg <= rx_cpl_tlp_valid_next;
rx_cpl_tlp_sop_reg <= rx_cpl_tlp_sop_next;
rx_cpl_tlp_eop_reg <= rx_cpl_tlp_eop_next;
tlp_input_frame_reg <= tlp_input_frame_next;
rc_tdata_int_reg <= rc_tdata_int_next;
rc_tvalid_int_reg <= rc_tvalid_int_next;
rc_tlast_int_reg <= rc_tlast_int_next;
if (rst) begin
tlp_input_state_reg <= TLP_INPUT_STATE_IDLE;
rx_cpl_tlp_valid_reg <= 0;
rc_tvalid_int_reg <= 1'b0;
end
end
endmodule

616
rtl/pcie_us_if_rq.v Normal file
View File

@ -0,0 +1,616 @@
/*
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`timescale 1ns / 1ps
/*
* Xilinx UltraScale PCIe interface adapter (Requester reQuest)
*/
module pcie_us_if_rq #
(
// Width of PCIe AXI stream interfaces in bits
parameter AXIS_PCIE_DATA_WIDTH = 256,
// PCIe AXI stream tkeep signal width (words per cycle)
parameter AXIS_PCIE_KEEP_WIDTH = (AXIS_PCIE_DATA_WIDTH/32),
// PCIe AXI stream RQ tuser signal width
parameter AXIS_PCIE_RQ_USER_WIDTH = AXIS_PCIE_DATA_WIDTH < 512 ? 60 : 137,
// RQ sequence number width
parameter RQ_SEQ_NUM_WIDTH = AXIS_PCIE_RQ_USER_WIDTH == 60 ? 4 : 6,
// TLP segment count
parameter TLP_SEG_COUNT = 1,
// TLP segment data width
parameter TLP_SEG_DATA_WIDTH = AXIS_PCIE_DATA_WIDTH/TLP_SEG_COUNT,
// TLP segment strobe width
parameter TLP_SEG_STRB_WIDTH = TLP_SEG_DATA_WIDTH/32,
// TLP segment header width
parameter TLP_SEG_HDR_WIDTH = 128,
// TX sequence number count
parameter TX_SEQ_NUM_COUNT = AXIS_PCIE_DATA_WIDTH < 512 ? 1 : 2,
// TX sequence number width
parameter TX_SEQ_NUM_WIDTH = RQ_SEQ_NUM_WIDTH-1
)
(
input wire clk,
input wire rst,
/*
* AXI output (RQ)
*/
output wire [AXIS_PCIE_DATA_WIDTH-1:0] m_axis_rq_tdata,
output wire [AXIS_PCIE_KEEP_WIDTH-1:0] m_axis_rq_tkeep,
output wire m_axis_rq_tvalid,
input wire m_axis_rq_tready,
output wire m_axis_rq_tlast,
output wire [AXIS_PCIE_RQ_USER_WIDTH-1:0] m_axis_rq_tuser,
/*
* Transmit sequence number input
*/
input wire [RQ_SEQ_NUM_WIDTH-1:0] s_axis_rq_seq_num_0,
input wire s_axis_rq_seq_num_valid_0,
input wire [RQ_SEQ_NUM_WIDTH-1:0] s_axis_rq_seq_num_1,
input wire s_axis_rq_seq_num_valid_1,
/*
* TLP input (read request from DMA)
*/
input wire [TLP_SEG_COUNT*TLP_SEG_HDR_WIDTH-1:0] tx_rd_req_tlp_hdr,
input wire [TLP_SEG_COUNT*TX_SEQ_NUM_WIDTH-1:0] tx_rd_req_tlp_seq,
input wire [TLP_SEG_COUNT-1:0] tx_rd_req_tlp_valid,
input wire [TLP_SEG_COUNT-1:0] tx_rd_req_tlp_sop,
input wire [TLP_SEG_COUNT-1:0] tx_rd_req_tlp_eop,
output wire tx_rd_req_tlp_ready,
/*
* Transmit sequence number output (DMA read request)
*/
output wire [TX_SEQ_NUM_COUNT*TX_SEQ_NUM_WIDTH-1:0] m_axis_rd_req_tx_seq_num,
output wire [TX_SEQ_NUM_COUNT-1:0] m_axis_rd_req_tx_seq_num_valid,
/*
* TLP input (write request from DMA)
*/
input wire [TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH-1:0] tx_wr_req_tlp_data,
input wire [TLP_SEG_COUNT*TLP_SEG_STRB_WIDTH-1:0] tx_wr_req_tlp_strb,
input wire [TLP_SEG_COUNT*TLP_SEG_HDR_WIDTH-1:0] tx_wr_req_tlp_hdr,
input wire [TLP_SEG_COUNT*TX_SEQ_NUM_WIDTH-1:0] tx_wr_req_tlp_seq,
input wire [TLP_SEG_COUNT-1:0] tx_wr_req_tlp_valid,
input wire [TLP_SEG_COUNT-1:0] tx_wr_req_tlp_sop,
input wire [TLP_SEG_COUNT-1:0] tx_wr_req_tlp_eop,
output wire tx_wr_req_tlp_ready,
/*
* Transmit sequence number output (DMA write request)
*/
output wire [TX_SEQ_NUM_COUNT*TX_SEQ_NUM_WIDTH-1:0] m_axis_wr_req_tx_seq_num,
output wire [TX_SEQ_NUM_COUNT-1:0] m_axis_wr_req_tx_seq_num_valid
);
parameter TLP_DATA_WIDTH = TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH;
parameter TLP_STRB_WIDTH = TLP_SEG_COUNT*TLP_SEG_STRB_WIDTH;
parameter TLP_DATA_WIDTH_BYTES = TLP_DATA_WIDTH/8;
parameter TLP_DATA_WIDTH_DWORDS = TLP_DATA_WIDTH/32;
parameter SEQ_NUM_MASK = {RQ_SEQ_NUM_WIDTH-1{1'b1}};
parameter SEQ_NUM_FLAG = {1'b1, {RQ_SEQ_NUM_WIDTH-1{1'b0}}};
parameter OUTPUT_FIFO_ADDR_WIDTH = 5;
// bus width assertions
initial begin
if (AXIS_PCIE_DATA_WIDTH != 64 && AXIS_PCIE_DATA_WIDTH != 128 && AXIS_PCIE_DATA_WIDTH != 256 && AXIS_PCIE_DATA_WIDTH != 512) begin
$error("Error: PCIe interface width must be 64, 128, 256, or 512 (instance %m)");
$finish;
end
if (AXIS_PCIE_KEEP_WIDTH * 32 != AXIS_PCIE_DATA_WIDTH) begin
$error("Error: PCIe interface requires dword (32-bit) granularity (instance %m)");
$finish;
end
if (AXIS_PCIE_DATA_WIDTH == 512) begin
if (AXIS_PCIE_RQ_USER_WIDTH != 137) begin
$error("Error: PCIe RQ tuser width must be 137 (instance %m)");
$finish;
end
if (TX_SEQ_NUM_COUNT != 2) begin
$error("Error: TX sequence number count must be 2 (instance %m)");
$finish;
end
end else begin
if (AXIS_PCIE_RQ_USER_WIDTH != 60 && AXIS_PCIE_RQ_USER_WIDTH != 62) begin
$error("Error: PCIe RQ tuser width must be 60 or 62 (instance %m)");
$finish;
end
if (TX_SEQ_NUM_COUNT != 1) begin
$error("Error: TX sequence number count must be 1 (instance %m)");
$finish;
end
end
if (AXIS_PCIE_RQ_USER_WIDTH == 60) begin
if (RQ_SEQ_NUM_WIDTH != 4) begin
$error("Error: RQ sequence number width must be 4 (instance %m)");
$finish;
end
end else begin
if (RQ_SEQ_NUM_WIDTH != 6) begin
$error("Error: RQ sequence number width must be 6 (instance %m)");
$finish;
end
end
if (TLP_SEG_COUNT != 1) begin
$error("Error: TLP segment count must be 1 (instance %m)");
$finish;
end
if (TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH != AXIS_PCIE_DATA_WIDTH) begin
$error("Error: Interface widths must match (instance %m)");
$finish;
end
if (TLP_SEG_HDR_WIDTH != 128) begin
$error("Error: TLP segment header width must be 128 (instance %m)");
$finish;
end
if (TX_SEQ_NUM_WIDTH > RQ_SEQ_NUM_WIDTH-1) begin
$error("Error: TX sequence number width must be less than RQ_SEQ_NUM_WIDTH (instance %m)");
$finish;
end
end
localparam [3:0]
REQ_MEM_READ = 4'b0000,
REQ_MEM_WRITE = 4'b0001,
REQ_IO_READ = 4'b0010,
REQ_IO_WRITE = 4'b0011,
REQ_MEM_FETCH_ADD = 4'b0100,
REQ_MEM_SWAP = 4'b0101,
REQ_MEM_CAS = 4'b0110,
REQ_MEM_READ_LOCKED = 4'b0111,
REQ_CFG_READ_0 = 4'b1000,
REQ_CFG_READ_1 = 4'b1001,
REQ_CFG_WRITE_0 = 4'b1010,
REQ_CFG_WRITE_1 = 4'b1011,
REQ_MSG = 4'b1100,
REQ_MSG_VENDOR = 4'b1101,
REQ_MSG_ATS = 4'b1110;
reg tx_rd_req_tlp_ready_cmb;
wire [TLP_SEG_COUNT*RQ_SEQ_NUM_WIDTH-1:0] tx_rd_req_tlp_seq_int = {1'b1, tx_rd_req_tlp_seq};
reg tx_wr_req_tlp_ready_cmb;
wire [TLP_SEG_COUNT*RQ_SEQ_NUM_WIDTH-1:0] tx_wr_req_tlp_seq_int = {1'b0, tx_wr_req_tlp_seq};
assign tx_rd_req_tlp_ready = tx_rd_req_tlp_ready_cmb;
assign tx_wr_req_tlp_ready = tx_wr_req_tlp_ready_cmb;
generate
assign m_axis_rd_req_tx_seq_num[TX_SEQ_NUM_WIDTH*0 +: TX_SEQ_NUM_WIDTH] = s_axis_rq_seq_num_0;
assign m_axis_rd_req_tx_seq_num_valid[0] = s_axis_rq_seq_num_valid_0 && ((s_axis_rq_seq_num_0 & SEQ_NUM_FLAG) != 0);
if (TX_SEQ_NUM_COUNT > 1) begin
assign m_axis_rd_req_tx_seq_num[TX_SEQ_NUM_WIDTH*1 +: TX_SEQ_NUM_WIDTH] = s_axis_rq_seq_num_1;
assign m_axis_rd_req_tx_seq_num_valid[1] = s_axis_rq_seq_num_valid_1 && ((s_axis_rq_seq_num_1 & SEQ_NUM_FLAG) != 0);
end
assign m_axis_wr_req_tx_seq_num[TX_SEQ_NUM_WIDTH*0 +: TX_SEQ_NUM_WIDTH] = s_axis_rq_seq_num_0;
assign m_axis_wr_req_tx_seq_num_valid[0] = s_axis_rq_seq_num_valid_0 && ((s_axis_rq_seq_num_0 & SEQ_NUM_FLAG) == 0);
if (TX_SEQ_NUM_COUNT > 1) begin
assign m_axis_wr_req_tx_seq_num[TX_SEQ_NUM_WIDTH*1 +: TX_SEQ_NUM_WIDTH] = s_axis_rq_seq_num_1;
assign m_axis_wr_req_tx_seq_num_valid[1] = s_axis_rq_seq_num_valid_1 && ((s_axis_rq_seq_num_1 & SEQ_NUM_FLAG) == 0);
end
endgenerate
localparam [1:0]
TLP_OUTPUT_STATE_IDLE = 2'd0,
TLP_OUTPUT_STATE_RD_HEADER = 2'd1,
TLP_OUTPUT_STATE_WR_HEADER = 2'd2,
TLP_OUTPUT_STATE_WR_PAYLOAD = 2'd3;
reg [1:0] tlp_output_state_reg = TLP_OUTPUT_STATE_IDLE, tlp_output_state_next;
reg [TLP_SEG_COUNT*TLP_SEG_DATA_WIDTH-1:0] out_tlp_data_reg = 0, out_tlp_data_next;
reg [TLP_SEG_COUNT*TLP_SEG_STRB_WIDTH-1:0] out_tlp_strb_reg = 0, out_tlp_strb_next;
reg [TLP_SEG_COUNT-1:0] out_tlp_eop_reg = 0, out_tlp_eop_next;
reg [127:0] tlp_header_data_rd;
reg [AXIS_PCIE_RQ_USER_WIDTH-1:0] tlp_tuser_rd;
reg [127:0] tlp_header_data_wr;
reg [AXIS_PCIE_RQ_USER_WIDTH-1:0] tlp_tuser_wr;
reg [AXIS_PCIE_DATA_WIDTH-1:0] m_axis_rq_tdata_int = 0;
reg [AXIS_PCIE_KEEP_WIDTH-1:0] m_axis_rq_tkeep_int = 0;
reg m_axis_rq_tvalid_int = 0;
wire m_axis_rq_tready_int;
reg m_axis_rq_tlast_int = 0;
reg [AXIS_PCIE_RQ_USER_WIDTH-1:0] m_axis_rq_tuser_int = 0;
always @* begin
tlp_output_state_next = TLP_OUTPUT_STATE_IDLE;
out_tlp_data_next = out_tlp_data_reg;
out_tlp_strb_next = out_tlp_strb_reg;
out_tlp_eop_next = out_tlp_eop_reg;
tx_rd_req_tlp_ready_cmb = 1'b0;
tx_wr_req_tlp_ready_cmb = 1'b0;
// TLP header and sideband data
tlp_header_data_rd[1:0] = tx_rd_req_tlp_hdr[107:106]; // address type
tlp_header_data_rd[63:2] = tx_rd_req_tlp_hdr[63:2]; // address
tlp_header_data_rd[74:64] = (tx_rd_req_tlp_hdr[105:96] != 0) ? tx_rd_req_tlp_hdr[105:96] : 11'd1024; // DWORD count
if (tx_rd_req_tlp_hdr[124:120] == 5'h02) begin
tlp_header_data_rd[78:75] = REQ_IO_READ; // request type - IO read
end else begin
tlp_header_data_rd[78:75] = REQ_MEM_READ; // request type - memory read
end
tlp_header_data_rd[79] = tx_rd_req_tlp_hdr[110]; // poisoned request
tlp_header_data_rd[95:80] = tx_rd_req_tlp_hdr[95:80]; // requester ID
tlp_header_data_rd[103:96] = tx_rd_req_tlp_hdr[79:72]; // tag
tlp_header_data_rd[119:104] = 16'd0; // completer ID
tlp_header_data_rd[120] = 1'b0; // requester ID enable
tlp_header_data_rd[123:121] = tx_rd_req_tlp_hdr[118:116]; // traffic class
tlp_header_data_rd[126:124] = {tx_rd_req_tlp_hdr[114], tx_rd_req_tlp_hdr[109:108]}; // attr
tlp_header_data_rd[127] = 1'b0; // force ECRC
if (AXIS_PCIE_DATA_WIDTH == 512) begin
tlp_tuser_rd[3:0] = tx_rd_req_tlp_hdr[67:64]; // first BE 0
tlp_tuser_rd[7:4] = 4'd0; // first BE 1
tlp_tuser_rd[11:8] = tx_rd_req_tlp_hdr[71:68]; // last BE 0
tlp_tuser_rd[15:12] = 4'd0; // last BE 1
tlp_tuser_rd[19:16] = 3'd0; // addr_offset
tlp_tuser_rd[21:20] = 2'b01; // is_sop
tlp_tuser_rd[23:22] = 2'd0; // is_sop0_ptr
tlp_tuser_rd[25:24] = 2'd0; // is_sop1_ptr
tlp_tuser_rd[27:26] = 2'b01; // is_eop
tlp_tuser_rd[31:28] = 4'd3; // is_eop0_ptr
tlp_tuser_rd[35:32] = 4'd0; // is_eop1_ptr
tlp_tuser_rd[36] = 1'b0; // discontinue
tlp_tuser_rd[38:37] = 2'b00; // tph_present
tlp_tuser_rd[42:39] = 4'b0000; // tph_type
tlp_tuser_rd[44:43] = 2'b00; // tph_indirect_tag_en
tlp_tuser_rd[60:45] = 16'd0; // tph_st_tag
tlp_tuser_rd[66:61] = tx_rd_req_tlp_seq_int; // seq_num0
tlp_tuser_rd[72:67] = 6'd0; // seq_num1
tlp_tuser_rd[136:73] = 64'd0; // parity
end else begin
tlp_tuser_rd[3:0] = tx_rd_req_tlp_hdr[67:64]; // first BE
tlp_tuser_rd[7:4] = tx_rd_req_tlp_hdr[71:68]; // last BE
tlp_tuser_rd[10:8] = 3'd0; // addr_offset
tlp_tuser_rd[11] = 1'b0; // discontinue
tlp_tuser_rd[12] = 1'b0; // tph_present
tlp_tuser_rd[14:13] = 2'b00; // tph_type
tlp_tuser_rd[15] = 1'b0; // tph_indirect_tag_en
tlp_tuser_rd[23:16] = 8'd0; // tph_st_tag
tlp_tuser_rd[27:24] = tx_rd_req_tlp_seq_int; // seq_num
tlp_tuser_rd[59:28] = 32'd0; // parity
if (AXIS_PCIE_RQ_USER_WIDTH == 62) begin
tlp_tuser_rd[61:60] = tx_rd_req_tlp_seq_int >> 4; // seq_num
end
end
tlp_header_data_wr[1:0] = tx_wr_req_tlp_hdr[107:106]; // address type
tlp_header_data_wr[63:2] = tx_wr_req_tlp_hdr[63:2]; // address
tlp_header_data_wr[74:64] = (tx_wr_req_tlp_hdr[105:96] != 0) ? tx_wr_req_tlp_hdr[105:96] : 11'd1024; // DWORD count
if (tx_wr_req_tlp_hdr[124:120] == 5'h02) begin
tlp_header_data_wr[78:75] = REQ_IO_WRITE; // request type - IO write
end else begin
tlp_header_data_wr[78:75] = REQ_MEM_WRITE; // request type - memory write
end
tlp_header_data_wr[79] = tx_wr_req_tlp_hdr[110]; // poisoned request
tlp_header_data_wr[95:80] = tx_wr_req_tlp_hdr[95:80]; // requester ID
tlp_header_data_wr[103:96] = tx_wr_req_tlp_hdr[79:72]; // tag
tlp_header_data_wr[119:104] = 16'd0; // completer ID
tlp_header_data_wr[120] = 1'b0; // requester ID enable
tlp_header_data_wr[123:121] = tx_wr_req_tlp_hdr[118:116]; // traffic class
tlp_header_data_wr[126:124] = {tx_wr_req_tlp_hdr[114], tx_wr_req_tlp_hdr[109:108]}; // attr
tlp_header_data_wr[127] = 1'b0; // force ECRC
if (AXIS_PCIE_DATA_WIDTH == 512) begin
tlp_tuser_wr[3:0] = tx_wr_req_tlp_hdr[67:64]; // first BE 0
tlp_tuser_wr[7:4] = 4'd0; // first BE 1
tlp_tuser_wr[11:8] = tx_wr_req_tlp_hdr[71:68]; // last BE 0
tlp_tuser_wr[15:12] = 4'd0; // last BE 1
tlp_tuser_wr[19:16] = 3'd0; // addr_offset
tlp_tuser_wr[21:20] = 2'b01; // is_sop
tlp_tuser_wr[23:22] = 2'd0; // is_sop0_ptr
tlp_tuser_wr[25:24] = 2'd0; // is_sop1_ptr
tlp_tuser_wr[27:26] = 2'b01; // is_eop
tlp_tuser_wr[31:28] = 4'd3; // is_eop0_ptr
tlp_tuser_wr[35:32] = 4'd0; // is_eop1_ptr
tlp_tuser_wr[36] = 1'b0; // discontinue
tlp_tuser_wr[38:37] = 2'b00; // tph_present
tlp_tuser_wr[42:39] = 4'b0000; // tph_type
tlp_tuser_wr[44:43] = 2'b00; // tph_indirect_tag_en
tlp_tuser_wr[60:45] = 16'd0; // tph_st_tag
tlp_tuser_wr[66:61] = tx_wr_req_tlp_seq_int; // seq_num0
tlp_tuser_wr[72:67] = 6'd0; // seq_num1
tlp_tuser_wr[136:73] = 64'd0; // parity
end else begin
tlp_tuser_wr[3:0] = tx_wr_req_tlp_hdr[67:64]; // first BE
tlp_tuser_wr[7:4] = tx_wr_req_tlp_hdr[71:68]; // last BE
tlp_tuser_wr[10:8] = 3'd0; // addr_offset
tlp_tuser_wr[11] = 1'b0; // discontinue
tlp_tuser_wr[12] = 1'b0; // tph_present
tlp_tuser_wr[14:13] = 2'b00; // tph_type
tlp_tuser_wr[15] = 1'b0; // tph_indirect_tag_en
tlp_tuser_wr[23:16] = 8'd0; // tph_st_tag
tlp_tuser_wr[27:24] = tx_wr_req_tlp_seq_int; // seq_num
tlp_tuser_wr[59:28] = 32'd0; // parity
if (AXIS_PCIE_RQ_USER_WIDTH == 62) begin
tlp_tuser_wr[61:60] = tx_wr_req_tlp_seq_int >> 4; // seq_num
end
end
// TLP output
m_axis_rq_tdata_int = 0;
m_axis_rq_tkeep_int = 0;
m_axis_rq_tvalid_int = 1'b0;
m_axis_rq_tlast_int = 1'b0;
m_axis_rq_tuser_int = 0;
// combine header and payload, merge in read request TLPs
case (tlp_output_state_reg)
TLP_OUTPUT_STATE_IDLE: begin
// idle state
if (tx_rd_req_tlp_valid && m_axis_rq_tready_int) begin
if (AXIS_PCIE_DATA_WIDTH == 64) begin
// 64 bit interface, send first half of header (read request)
m_axis_rq_tdata_int = tlp_header_data_rd[63:0];
m_axis_rq_tkeep_int = 2'b11;
m_axis_rq_tvalid_int = 1'b1;
m_axis_rq_tlast_int = 1'b0;
m_axis_rq_tuser_int = tlp_tuser_rd;
tlp_output_state_next = TLP_OUTPUT_STATE_RD_HEADER;
end else begin
// wider interface, send complete header (read request)
m_axis_rq_tdata_int = tlp_header_data_rd;
m_axis_rq_tkeep_int = 4'b1111;
m_axis_rq_tvalid_int = 1'b1;
m_axis_rq_tlast_int = 1'b1;
m_axis_rq_tuser_int = tlp_tuser_rd;
tx_rd_req_tlp_ready_cmb = 1'b1;
tlp_output_state_next = TLP_OUTPUT_STATE_IDLE;
end
end else if (tx_wr_req_tlp_valid && m_axis_rq_tready_int) begin
if (AXIS_PCIE_DATA_WIDTH == 64) begin
// 64 bit interface, send first half of header (write request)
m_axis_rq_tdata_int = tlp_header_data_wr[63:0];
m_axis_rq_tkeep_int = 2'b11;
m_axis_rq_tvalid_int = 1'b1;
m_axis_rq_tlast_int = 1'b0;
m_axis_rq_tuser_int = tlp_tuser_wr;
tlp_output_state_next = TLP_OUTPUT_STATE_WR_HEADER;
end else if (AXIS_PCIE_DATA_WIDTH == 128) begin
// 128 bit interface, send complete header (write request)
m_axis_rq_tdata_int = tlp_header_data_wr;
m_axis_rq_tkeep_int = 4'b1111;
m_axis_rq_tvalid_int = 1'b1;
m_axis_rq_tlast_int = 1'b0;
m_axis_rq_tuser_int = tlp_tuser_wr;
tlp_output_state_next = TLP_OUTPUT_STATE_WR_PAYLOAD;
end else begin
// wider interface, send header and start of payload (write request)
m_axis_rq_tdata_int = {tx_wr_req_tlp_data, tlp_header_data_wr};
m_axis_rq_tkeep_int = {tx_wr_req_tlp_strb, 4'b1111};
m_axis_rq_tvalid_int = 1'b1;
m_axis_rq_tlast_int = 1'b0;
m_axis_rq_tuser_int = tlp_tuser_wr;
tx_wr_req_tlp_ready_cmb = 1'b1;
out_tlp_data_next = tx_wr_req_tlp_data;
out_tlp_strb_next = tx_wr_req_tlp_strb;
out_tlp_eop_next = tx_wr_req_tlp_eop;
if (tx_wr_req_tlp_eop && ((tx_wr_req_tlp_strb >> (TLP_DATA_WIDTH_DWORDS-4)) == 0)) begin
m_axis_rq_tlast_int = 1'b1;
tlp_output_state_next = TLP_OUTPUT_STATE_IDLE;
end else begin
tlp_output_state_next = TLP_OUTPUT_STATE_WR_PAYLOAD;
end
end
end else begin
tlp_output_state_next = TLP_OUTPUT_STATE_IDLE;
end
end
TLP_OUTPUT_STATE_RD_HEADER: begin
// second cycle of header (read request) (64 bit interface width only)
if (AXIS_PCIE_DATA_WIDTH == 64) begin
m_axis_rq_tdata_int = tlp_header_data_rd[127:64];
m_axis_rq_tkeep_int = 2'b11;
m_axis_rq_tlast_int = 1'b1;
m_axis_rq_tuser_int = tlp_tuser_rd;
if (tx_rd_req_tlp_valid && m_axis_rq_tready_int) begin
m_axis_rq_tvalid_int = 1'b1;
tx_rd_req_tlp_ready_cmb = 1'b1;
tlp_output_state_next = TLP_OUTPUT_STATE_IDLE;
end else begin
tlp_output_state_next = TLP_OUTPUT_STATE_RD_HEADER;
end
end
end
TLP_OUTPUT_STATE_WR_HEADER: begin
// second cycle of header (write request) (64 bit interface width only)
if (AXIS_PCIE_DATA_WIDTH == 64) begin
m_axis_rq_tdata_int = tlp_header_data_wr[127:64];
m_axis_rq_tkeep_int = 2'b11;
m_axis_rq_tlast_int = 1'b0;
m_axis_rq_tuser_int = tlp_tuser_wr;
if (tx_wr_req_tlp_valid && m_axis_rq_tready_int) begin
m_axis_rq_tvalid_int = 1'b1;
tlp_output_state_next = TLP_OUTPUT_STATE_WR_PAYLOAD;
end else begin
tlp_output_state_next = TLP_OUTPUT_STATE_WR_HEADER;
end
end
end
TLP_OUTPUT_STATE_WR_PAYLOAD: begin
// transfer payload (write request)
if (AXIS_PCIE_DATA_WIDTH >= 256) begin
m_axis_rq_tdata_int = {tx_wr_req_tlp_data, out_tlp_data_reg[TLP_DATA_WIDTH-1:TLP_DATA_WIDTH-128]};
if (tx_wr_req_tlp_valid && !out_tlp_eop_reg) begin
m_axis_rq_tkeep_int = {tx_wr_req_tlp_strb, out_tlp_strb_reg[TLP_STRB_WIDTH-1:TLP_DATA_WIDTH_DWORDS-4]};
end else begin
m_axis_rq_tkeep_int = out_tlp_strb_reg[TLP_STRB_WIDTH-1:TLP_DATA_WIDTH_DWORDS-4];
end
m_axis_rq_tlast_int = 1'b0;
m_axis_rq_tuser_int = tlp_tuser_wr;
if ((tx_wr_req_tlp_valid || out_tlp_eop_reg) && m_axis_rq_tready_int) begin
m_axis_rq_tvalid_int = 1'b1;
tx_wr_req_tlp_ready_cmb = !out_tlp_eop_reg;
out_tlp_data_next = tx_wr_req_tlp_data;
out_tlp_strb_next = tx_wr_req_tlp_strb;
out_tlp_eop_next = tx_wr_req_tlp_eop;
if (out_tlp_eop_reg || (tx_wr_req_tlp_eop && ((tx_wr_req_tlp_strb >> (TLP_DATA_WIDTH_DWORDS-4)) == 0))) begin
m_axis_rq_tlast_int = 1'b1;
tlp_output_state_next = TLP_OUTPUT_STATE_IDLE;
end else begin
tlp_output_state_next = TLP_OUTPUT_STATE_WR_PAYLOAD;
end
end else begin
tlp_output_state_next = TLP_OUTPUT_STATE_WR_PAYLOAD;
end
end else begin
m_axis_rq_tdata_int = tx_wr_req_tlp_data;
m_axis_rq_tkeep_int = tx_wr_req_tlp_strb;
m_axis_rq_tlast_int = 1'b0;
m_axis_rq_tuser_int = tlp_tuser_wr;
if (tx_wr_req_tlp_valid && m_axis_rq_tready_int) begin
m_axis_rq_tvalid_int = 1'b1;
tx_wr_req_tlp_ready_cmb = 1'b1;
if (tx_wr_req_tlp_eop) begin
m_axis_rq_tlast_int = 1'b1;
tlp_output_state_next = TLP_OUTPUT_STATE_IDLE;
end else begin
tlp_output_state_next = TLP_OUTPUT_STATE_WR_PAYLOAD;
end
end else begin
tlp_output_state_next = TLP_OUTPUT_STATE_WR_PAYLOAD;
end
end
end
endcase
end
always @(posedge clk) begin
tlp_output_state_reg <= tlp_output_state_next;
out_tlp_data_reg <= out_tlp_data_next;
out_tlp_strb_reg <= out_tlp_strb_next;
out_tlp_eop_reg <= out_tlp_eop_next;
if (rst) begin
tlp_output_state_reg <= TLP_OUTPUT_STATE_IDLE;
end
end
// output datapath logic (PCIe TLP)
reg [AXIS_PCIE_DATA_WIDTH-1:0] m_axis_rq_tdata_reg = {AXIS_PCIE_DATA_WIDTH{1'b0}};
reg [AXIS_PCIE_KEEP_WIDTH-1:0] m_axis_rq_tkeep_reg = {AXIS_PCIE_KEEP_WIDTH{1'b0}};
reg m_axis_rq_tvalid_reg = 1'b0, m_axis_rq_tvalid_next;
reg m_axis_rq_tlast_reg = 1'b0;
reg [AXIS_PCIE_RQ_USER_WIDTH-1:0] m_axis_rq_tuser_reg = {AXIS_PCIE_RQ_USER_WIDTH{1'b0}};
reg [OUTPUT_FIFO_ADDR_WIDTH+1-1:0] out_fifo_wr_ptr_reg = 0;
reg [OUTPUT_FIFO_ADDR_WIDTH+1-1:0] out_fifo_rd_ptr_reg = 0;
reg out_fifo_half_full_reg = 1'b0;
wire out_fifo_full = out_fifo_wr_ptr_reg == (out_fifo_rd_ptr_reg ^ {1'b1, {OUTPUT_FIFO_ADDR_WIDTH{1'b0}}});
wire out_fifo_empty = out_fifo_wr_ptr_reg == out_fifo_rd_ptr_reg;
(* ram_style = "distributed" *)
reg [AXIS_PCIE_DATA_WIDTH-1:0] out_fifo_tdata[2**OUTPUT_FIFO_ADDR_WIDTH-1:0];
(* ram_style = "distributed" *)
reg [AXIS_PCIE_KEEP_WIDTH-1:0] out_fifo_tkeep[2**OUTPUT_FIFO_ADDR_WIDTH-1:0];
(* ram_style = "distributed" *)
reg out_fifo_tlast[2**OUTPUT_FIFO_ADDR_WIDTH-1:0];
(* ram_style = "distributed" *)
reg [AXIS_PCIE_RQ_USER_WIDTH-1:0] out_fifo_tuser[2**OUTPUT_FIFO_ADDR_WIDTH-1:0];
assign m_axis_rq_tready_int = !out_fifo_half_full_reg;
assign m_axis_rq_tdata = m_axis_rq_tdata_reg;
assign m_axis_rq_tkeep = m_axis_rq_tkeep_reg;
assign m_axis_rq_tvalid = m_axis_rq_tvalid_reg;
assign m_axis_rq_tlast = m_axis_rq_tlast_reg;
assign m_axis_rq_tuser = m_axis_rq_tuser_reg;
always @(posedge clk) begin
m_axis_rq_tvalid_reg <= m_axis_rq_tvalid_reg && !m_axis_rq_tready;
out_fifo_half_full_reg <= $unsigned(out_fifo_wr_ptr_reg - out_fifo_rd_ptr_reg) >= 2**(OUTPUT_FIFO_ADDR_WIDTH-1);
if (!out_fifo_full && m_axis_rq_tvalid_int) begin
out_fifo_tdata[out_fifo_wr_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]] <= m_axis_rq_tdata_int;
out_fifo_tkeep[out_fifo_wr_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]] <= m_axis_rq_tkeep_int;
out_fifo_tlast[out_fifo_wr_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]] <= m_axis_rq_tlast_int;
out_fifo_tuser[out_fifo_wr_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]] <= m_axis_rq_tuser_int;
out_fifo_wr_ptr_reg <= out_fifo_wr_ptr_reg + 1;
end
if (!out_fifo_empty && (!m_axis_rq_tvalid_reg || m_axis_rq_tready)) begin
m_axis_rq_tdata_reg <= out_fifo_tdata[out_fifo_rd_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]];
m_axis_rq_tkeep_reg <= out_fifo_tkeep[out_fifo_rd_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]];
m_axis_rq_tvalid_reg <= 1'b1;
m_axis_rq_tlast_reg <= out_fifo_tlast[out_fifo_rd_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]];
m_axis_rq_tuser_reg <= out_fifo_tuser[out_fifo_rd_ptr_reg[OUTPUT_FIFO_ADDR_WIDTH-1:0]];
out_fifo_rd_ptr_reg <= out_fifo_rd_ptr_reg + 1;
end
if (rst) begin
out_fifo_wr_ptr_reg <= 0;
out_fifo_rd_ptr_reg <= 0;
m_axis_rq_tvalid_reg <= 1'b0;
end
end
endmodule

136
tb/pcie_us_if/Makefile Normal file
View File

@ -0,0 +1,136 @@
# Copyright (c) 2021 Alex Forencich
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
TOPLEVEL_LANG = verilog
SIM ?= icarus
WAVES ?= 0
COCOTB_HDL_TIMEUNIT = 1ns
COCOTB_HDL_TIMEPRECISION = 1ps
DUT = pcie_us_if
TOPLEVEL = $(DUT)
MODULE = test_$(DUT)
VERILOG_SOURCES += ../../rtl/$(DUT).v
VERILOG_SOURCES += ../../rtl/$(DUT)_rc.v
VERILOG_SOURCES += ../../rtl/$(DUT)_rq.v
VERILOG_SOURCES += ../../rtl/$(DUT)_cc.v
VERILOG_SOURCES += ../../rtl/$(DUT)_cq.v
VERILOG_SOURCES += ../../rtl/pcie_us_cfg.v
VERILOG_SOURCES += ../../rtl/pcie_us_msi.v
VERILOG_SOURCES += ../../rtl/arbiter.v
VERILOG_SOURCES += ../../rtl/priority_encoder.v
# module parameters
export PARAM_AXIS_PCIE_DATA_WIDTH ?= 64
export PARAM_AXIS_PCIE_KEEP_WIDTH ?= $(shell expr $(PARAM_AXIS_PCIE_DATA_WIDTH) / 32 )
export PARAM_AXIS_PCIE_RQ_USER_WIDTH ?= $(if $(filter-out 512,$(PARAM_AXIS_PCIE_DATA_WIDTH)),62,137)
export PARAM_AXIS_PCIE_RC_USER_WIDTH ?= $(if $(filter-out 512,$(PARAM_AXIS_PCIE_DATA_WIDTH)),75,161)
export PARAM_AXIS_PCIE_CQ_USER_WIDTH ?= $(if $(filter-out 512,$(PARAM_AXIS_PCIE_DATA_WIDTH)),88,183)
export PARAM_AXIS_PCIE_CC_USER_WIDTH ?= $(if $(filter-out 512,$(PARAM_AXIS_PCIE_DATA_WIDTH)),33,81)
export PARAM_RQ_SEQ_NUM_WIDTH ?= $(if $(filter-out 60,$(PARAM_AXIS_PCIE_RQ_USER_WIDTH)),6,4)
export PARAM_TLP_SEG_COUNT ?= 1
export PARAM_TLP_SEG_DATA_WIDTH ?= $(shell expr $(PARAM_AXIS_PCIE_DATA_WIDTH) / $(PARAM_TLP_SEG_COUNT) )
export PARAM_TLP_SEG_STRB_WIDTH ?= $(shell expr $(PARAM_TLP_SEG_DATA_WIDTH) / 32 )
export PARAM_TLP_SEG_HDR_WIDTH ?= 128
export PARAM_TX_SEQ_NUM_COUNT ?= $(if $(filter-out 512,$(PARAM_AXIS_PCIE_DATA_WIDTH)),1,2)
export PARAM_TX_SEQ_NUM_WIDTH ?= $(shell expr $(PARAM_RQ_SEQ_NUM_WIDTH) - 1 )
export PARAM_PF_COUNT ?= 1
export PARAM_VF_COUNT ?= 0
export PARAM_F_COUNT ?= $(shell expr $(PARAM_PF_COUNT) + $(PARAM_VF_COUNT) )
export PARAM_READ_EXT_TAG_ENABLE ?= 1
export PARAM_READ_MAX_READ_REQ_SIZE ?= 1
export PARAM_READ_MAX_PAYLOAD_SIZE ?= 1
export PARAM_MSI_ENABLE ?= 1
export PARAM_MSI_COUNT ?= 32
ifeq ($(SIM), icarus)
PLUSARGS += -fst
COMPILE_ARGS += -P $(TOPLEVEL).AXIS_PCIE_DATA_WIDTH=$(PARAM_AXIS_PCIE_DATA_WIDTH)
COMPILE_ARGS += -P $(TOPLEVEL).AXIS_PCIE_KEEP_WIDTH=$(PARAM_AXIS_PCIE_KEEP_WIDTH)
COMPILE_ARGS += -P $(TOPLEVEL).AXIS_PCIE_RQ_USER_WIDTH=$(PARAM_AXIS_PCIE_RQ_USER_WIDTH)
COMPILE_ARGS += -P $(TOPLEVEL).AXIS_PCIE_RC_USER_WIDTH=$(PARAM_AXIS_PCIE_RC_USER_WIDTH)
COMPILE_ARGS += -P $(TOPLEVEL).AXIS_PCIE_CQ_USER_WIDTH=$(PARAM_AXIS_PCIE_CQ_USER_WIDTH)
COMPILE_ARGS += -P $(TOPLEVEL).AXIS_PCIE_CC_USER_WIDTH=$(PARAM_AXIS_PCIE_CC_USER_WIDTH)
COMPILE_ARGS += -P $(TOPLEVEL).RQ_SEQ_NUM_WIDTH=$(PARAM_RQ_SEQ_NUM_WIDTH)
COMPILE_ARGS += -P $(TOPLEVEL).TLP_SEG_COUNT=$(PARAM_TLP_SEG_COUNT)
COMPILE_ARGS += -P $(TOPLEVEL).TLP_SEG_DATA_WIDTH=$(PARAM_TLP_SEG_DATA_WIDTH)
COMPILE_ARGS += -P $(TOPLEVEL).TLP_SEG_STRB_WIDTH=$(PARAM_TLP_SEG_STRB_WIDTH)
COMPILE_ARGS += -P $(TOPLEVEL).TLP_SEG_HDR_WIDTH=$(PARAM_TLP_SEG_HDR_WIDTH)
COMPILE_ARGS += -P $(TOPLEVEL).TX_SEQ_NUM_COUNT=$(PARAM_TX_SEQ_NUM_COUNT)
COMPILE_ARGS += -P $(TOPLEVEL).TX_SEQ_NUM_WIDTH=$(PARAM_TX_SEQ_NUM_WIDTH)
COMPILE_ARGS += -P $(TOPLEVEL).PF_COUNT=$(PARAM_PF_COUNT)
COMPILE_ARGS += -P $(TOPLEVEL).VF_COUNT=$(PARAM_VF_COUNT)
COMPILE_ARGS += -P $(TOPLEVEL).F_COUNT=$(PARAM_F_COUNT)
COMPILE_ARGS += -P $(TOPLEVEL).READ_EXT_TAG_ENABLE=$(PARAM_READ_EXT_TAG_ENABLE)
COMPILE_ARGS += -P $(TOPLEVEL).READ_MAX_READ_REQ_SIZE=$(PARAM_READ_MAX_READ_REQ_SIZE)
COMPILE_ARGS += -P $(TOPLEVEL).READ_MAX_PAYLOAD_SIZE=$(PARAM_READ_MAX_PAYLOAD_SIZE)
COMPILE_ARGS += -P $(TOPLEVEL).MSI_ENABLE=$(PARAM_MSI_ENABLE)
COMPILE_ARGS += -P $(TOPLEVEL).MSI_COUNT=$(PARAM_MSI_COUNT)
ifeq ($(WAVES), 1)
VERILOG_SOURCES += iverilog_dump.v
COMPILE_ARGS += -s iverilog_dump
endif
else ifeq ($(SIM), verilator)
COMPILE_ARGS += -Wno-SELRANGE -Wno-WIDTH
COMPILE_ARGS += -GAXIS_PCIE_DATA_WIDTH=$(PARAM_AXIS_PCIE_DATA_WIDTH)
COMPILE_ARGS += -GAXIS_PCIE_KEEP_WIDTH=$(PARAM_AXIS_PCIE_KEEP_WIDTH)
COMPILE_ARGS += -GAXIS_PCIE_RQ_USER_WIDTH=$(PARAM_AXIS_PCIE_RQ_USER_WIDTH)
COMPILE_ARGS += -GAXIS_PCIE_RC_USER_WIDTH=$(PARAM_AXIS_PCIE_RC_USER_WIDTH)
COMPILE_ARGS += -GAXIS_PCIE_CQ_USER_WIDTH=$(PARAM_AXIS_PCIE_CQ_USER_WIDTH)
COMPILE_ARGS += -GAXIS_PCIE_CC_USER_WIDTH=$(PARAM_AXIS_PCIE_CC_USER_WIDTH)
COMPILE_ARGS += -GRQ_SEQ_NUM_WIDTH=$(PARAM_RQ_SEQ_NUM_WIDTH)
COMPILE_ARGS += -GTLP_SEG_COUNT=$(PARAM_TLP_SEG_COUNT)
COMPILE_ARGS += -GTLP_SEG_DATA_WIDTH=$(PARAM_TLP_SEG_DATA_WIDTH)
COMPILE_ARGS += -GTLP_SEG_STRB_WIDTH=$(PARAM_TLP_SEG_STRB_WIDTH)
COMPILE_ARGS += -GTLP_SEG_HDR_WIDTH=$(PARAM_TLP_SEG_HDR_WIDTH)
COMPILE_ARGS += -GTX_SEQ_NUM_COUNT=$(PARAM_TX_SEQ_NUM_COUNT)
COMPILE_ARGS += -GTX_SEQ_NUM_WIDTH=$(PARAM_TX_SEQ_NUM_WIDTH)
COMPILE_ARGS += -GPF_COUNT=$(PARAM_PF_COUNT)
COMPILE_ARGS += -GVF_COUNT=$(PARAM_VF_COUNT)
COMPILE_ARGS += -GF_COUNT=$(PARAM_F_COUNT)
COMPILE_ARGS += -GREAD_EXT_TAG_ENABLE=$(PARAM_READ_EXT_TAG_ENABLE)
COMPILE_ARGS += -GREAD_MAX_READ_REQ_SIZE=$(PARAM_READ_MAX_READ_REQ_SIZE)
COMPILE_ARGS += -GREAD_MAX_PAYLOAD_SIZE=$(PARAM_READ_MAX_PAYLOAD_SIZE)
COMPILE_ARGS += -GMSI_ENABLE=$(PARAM_MSI_ENABLE)
COMPILE_ARGS += -GMSI_COUNT=$(PARAM_MSI_COUNT)
ifeq ($(WAVES), 1)
COMPILE_ARGS += --trace-fst
endif
endif
include $(shell cocotb-config --makefiles)/Makefile.sim
iverilog_dump.v:
echo 'module iverilog_dump();' > $@
echo 'initial begin' >> $@
echo ' $$dumpfile("$(TOPLEVEL).fst");' >> $@
echo ' $$dumpvars(0, $(TOPLEVEL));' >> $@
echo 'end' >> $@
echo 'endmodule' >> $@
clean::
@rm -rf iverilog_dump.v
@rm -rf dump.fst $(TOPLEVEL).fst

1
tb/pcie_us_if/pcie_if.py Symbolic link
View File

@ -0,0 +1 @@
../pcie_if.py

View File

@ -0,0 +1,567 @@
#!/usr/bin/env python
"""
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import sys
import cocotb_test.simulator
import pytest
import cocotb
from cocotb.triggers import RisingEdge, FallingEdge, Timer
from cocotb.regression import TestFactory
from cocotbext.axi import AxiStreamBus
from cocotbext.pcie.core import RootComplex
from cocotbext.pcie.xilinx.us import UltraScalePlusPcieDevice
try:
from pcie_if import PcieIfTestDevice, PcieIfRxBus, PcieIfTxBus
except ImportError:
# attempt import from current directory
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
try:
from pcie_if import PcieIfTestDevice, PcieIfRxBus, PcieIfTxBus
finally:
del sys.path[0]
class TB(object):
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
# PCIe
self.rc = RootComplex()
self.dev = UltraScalePlusPcieDevice(
# configuration options
pcie_generation=3,
# pcie_link_width=2,
# user_clk_frequency=250e6,
alignment="dword",
cq_cc_straddle=False,
rq_rc_straddle=False,
rc_4tlp_straddle=False,
enable_pf1=False,
enable_client_tag=True,
enable_extended_tag=False,
enable_parity=False,
enable_rx_msg_interface=False,
enable_sriov=False,
enable_extended_configuration=False,
enable_pf0_msi=True,
enable_pf1_msi=False,
# signals
# Clock and Reset Interface
user_clk=dut.clk,
user_reset=dut.rst,
# user_lnk_up
# sys_clk
# sys_clk_gt
# sys_reset
# phy_rdy_out
# Requester reQuest Interface
rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"),
# pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0,
# pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0,
# pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1,
# pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1,
# pcie_rq_tag0
# pcie_rq_tag1
# pcie_rq_tag_av
# pcie_rq_tag_vld0
# pcie_rq_tag_vld1
# Requester Completion Interface
rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"),
# Completer reQuest Interface
cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq"),
# pcie_cq_np_req
# pcie_cq_np_req_count
# Completer Completion Interface
cc_bus=AxiStreamBus.from_prefix(dut, "m_axis_cc"),
# Transmit Flow Control Interface
# pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
# pcie_tfc_npd_av=dut.pcie_tfc_npd_av,
# Configuration Management Interface
cfg_mgmt_addr=dut.cfg_mgmt_addr,
cfg_mgmt_function_number=dut.cfg_mgmt_function_number,
cfg_mgmt_write=dut.cfg_mgmt_write,
cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
cfg_mgmt_read=dut.cfg_mgmt_read,
cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
# cfg_mgmt_debug_access
# Configuration Status Interface
# cfg_phy_link_down
# cfg_phy_link_status
# cfg_negotiated_width
# cfg_current_speed
# cfg_max_payload
# cfg_max_read_req
# cfg_function_status
# cfg_vf_status
# cfg_function_power_state
# cfg_vf_power_state
# cfg_link_power_state
# cfg_err_cor_out
# cfg_err_nonfatal_out
# cfg_err_fatal_out
# cfg_local_error_out
# cfg_local_error_valid
# cfg_rx_pm_state
# cfg_tx_pm_state
# cfg_ltssm_state
# cfg_rcb_status
# cfg_obff_enable
# cfg_pl_status_change
# cfg_tph_requester_enable
# cfg_tph_st_mode
# cfg_vf_tph_requester_enable
# cfg_vf_tph_st_mode
# Configuration Received Message Interface
# cfg_msg_received
# cfg_msg_received_data
# cfg_msg_received_type
# Configuration Transmit Message Interface
# cfg_msg_transmit
# cfg_msg_transmit_type
# cfg_msg_transmit_data
# cfg_msg_transmit_done
# Configuration Flow Control Interface
# cfg_fc_ph=dut.cfg_fc_ph,
# cfg_fc_pd=dut.cfg_fc_pd,
# cfg_fc_nph=dut.cfg_fc_nph,
# cfg_fc_npd=dut.cfg_fc_npd,
# cfg_fc_cplh=dut.cfg_fc_cplh,
# cfg_fc_cpld=dut.cfg_fc_cpld,
# cfg_fc_sel=dut.cfg_fc_sel,
# Configuration Control Interface
# cfg_hot_reset_in
# cfg_hot_reset_out
# cfg_config_space_enable
# cfg_dsn
# cfg_bus_number
# cfg_ds_port_number
# cfg_ds_bus_number
# cfg_ds_device_number
# cfg_ds_function_number
# cfg_power_state_change_ack
# cfg_power_state_change_interrupt
# cfg_err_cor_in
# cfg_err_uncor_in
# cfg_flr_in_process
# cfg_flr_done
# cfg_vf_flr_in_process
# cfg_vf_flr_func_num
# cfg_vf_flr_done
# cfg_pm_aspm_l1_entry_reject
# cfg_pm_aspm_tx_l0s_entry_disable
# cfg_req_pm_transition_l23_ready
# cfg_link_training_enable
# Configuration Interrupt Controller Interface
# cfg_interrupt_int
# cfg_interrupt_sent
# cfg_interrupt_pending
cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
# cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
cfg_interrupt_msi_pending_status=dut.cfg_interrupt_msi_pending_status,
cfg_interrupt_msi_pending_status_data_enable=dut.cfg_interrupt_msi_pending_status_data_enable,
# cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num,
cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
# cfg_interrupt_msix_enable
# cfg_interrupt_msix_mask
# cfg_interrupt_msix_vf_enable
# cfg_interrupt_msix_vf_mask
# cfg_interrupt_msix_address
# cfg_interrupt_msix_data
# cfg_interrupt_msix_int
# cfg_interrupt_msix_vec_pending
# cfg_interrupt_msix_vec_pending_status
cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
# cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
# cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,
# Configuration Extend Interface
# cfg_ext_read_received
# cfg_ext_write_received
# cfg_ext_register_number
# cfg_ext_function_number
# cfg_ext_write_data
# cfg_ext_write_byte_enable
# cfg_ext_read_data
# cfg_ext_read_data_valid
)
self.test_dev = PcieIfTestDevice(
force_64bit_addr=True,
clk=dut.clk,
rst=dut.rst,
rx_req_tlp_bus=PcieIfRxBus.from_prefix(dut, "rx_req_tlp"),
tx_cpl_tlp_bus=PcieIfTxBus.from_prefix(dut, "tx_cpl_tlp"),
tx_rd_req_tlp_bus=PcieIfTxBus.from_prefix(dut, "tx_rd_req_tlp"),
rd_req_tx_seq_num=dut.m_axis_rd_req_tx_seq_num,
rd_req_tx_seq_num_valid=dut.m_axis_rd_req_tx_seq_num_valid,
tx_wr_req_tlp_bus=PcieIfTxBus.from_prefix(dut, "tx_wr_req_tlp"),
wr_req_tx_seq_num=dut.m_axis_wr_req_tx_seq_num,
wr_req_tx_seq_num_valid=dut.m_axis_wr_req_tx_seq_num_valid,
rx_cpl_tlp_bus=PcieIfRxBus.from_prefix(dut, "rx_cpl_tlp"),
)
self.dev.log.setLevel(logging.DEBUG)
self.rc.make_port().connect(self.dev)
self.dev.functions[0].msi_cap.msi_multiple_message_capable = 5
self.dev.functions[0].configure_bar(0, 1024*1024)
self.test_dev.add_mem_region(1024*1024)
self.dev.functions[0].configure_bar(1, 1024*1024, True, True)
self.test_dev.add_prefetchable_mem_region(1024*1024)
self.dev.functions[0].configure_bar(3, 1024, False, False, True)
self.test_dev.add_io_region(1024)
self.dut.msi_irq.setimmediatevalue(0)
def set_idle_generator(self, generator=None):
if generator:
self.dev.rc_source.set_pause_generator(generator())
self.dev.cq_source.set_pause_generator(generator())
self.test_dev.tx_cpl_tlp_source.set_pause_generator(generator())
self.test_dev.tx_rd_req_tlp_source.set_pause_generator(generator())
self.test_dev.tx_wr_req_tlp_source.set_pause_generator(generator())
def set_backpressure_generator(self, generator=None):
if generator:
self.dev.rq_sink.set_pause_generator(generator())
self.dev.cc_sink.set_pause_generator(generator())
self.test_dev.rx_req_tlp_sink.set_pause_generator(generator())
self.test_dev.rx_cpl_tlp_sink.set_pause_generator(generator())
async def run_test_mem(dut, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
await FallingEdge(dut.rst)
await Timer(100, 'ns')
await tb.rc.enumerate(enable_bus_mastering=True, configure_msi=True)
tb.test_dev.dev_max_payload = tb.dev.functions[0].pcie_cap.max_payload_size
tb.test_dev.dev_max_read_req = tb.dev.functions[0].pcie_cap.max_read_request_size
tb.test_dev.dev_bus_num = tb.dev.bus_num
dev_bar0 = tb.rc.tree[0][0].bar_addr[0]
dev_bar1 = tb.rc.tree[0][0].bar_addr[1]
dev_bar3 = tb.rc.tree[0][0].bar_addr[3]
for length in list(range(0, 8)):
for offset in list(range(8)):
tb.log.info("IO operation length: %d offset: %d", length, offset)
addr = dev_bar3+offset
test_data = bytearray([x % 256 for x in range(length)])
await tb.rc.io_write(addr, test_data, 5000)
assert tb.test_dev.regions[3][1][offset:offset+length] == test_data
assert await tb.rc.io_read(addr, length, 5000) == test_data
for length in list(range(0, 32))+[1024]:
for offset in list(range(8))+list(range(4096-8, 4096)):
tb.log.info("Memory operation (32-bit BAR) length: %d offset: %d", length, offset)
addr = dev_bar0+offset
test_data = bytearray([x % 256 for x in range(length)])
await tb.rc.mem_write(addr, test_data, 100)
# wait for write to complete
await tb.rc.mem_read(addr, 1, 5000)
assert tb.test_dev.regions[0][1][offset:offset+length] == test_data
assert await tb.rc.mem_read(addr, length, 5000) == test_data
for length in list(range(0, 32))+[1024]:
for offset in list(range(8))+list(range(4096-8, 4096)):
tb.log.info("Memory operation (64-bit BAR) length: %d offset: %d", length, offset)
addr = dev_bar1+offset
test_data = bytearray([x % 256 for x in range(length)])
await tb.rc.mem_write(addr, test_data, 100)
# wait for write to complete
await tb.rc.mem_read(addr, 1, 5000)
assert tb.test_dev.regions[1][1][offset:offset+length] == test_data
assert await tb.rc.mem_read(addr, length, 5000) == test_data
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_dma(dut, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
mem_base, mem_data = tb.rc.alloc_region(1024*1024)
io_base, io_data = tb.rc.alloc_io_region(1024)
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
await FallingEdge(dut.rst)
await Timer(100, 'ns')
await tb.rc.enumerate(enable_bus_mastering=True, configure_msi=True)
tb.test_dev.dev_max_payload = tb.dev.functions[0].pcie_cap.max_payload_size
tb.test_dev.dev_max_read_req = tb.dev.functions[0].pcie_cap.max_read_request_size
tb.test_dev.dev_bus_num = tb.dev.bus_num
for length in list(range(0, 32))+[1024]:
for offset in list(range(8))+list(range(4096-8, 4096)):
tb.log.info("Memory operation (DMA) length: %d offset: %d", length, offset)
addr = mem_base+offset
test_data = bytearray([x % 256 for x in range(length)])
await tb.test_dev.dma_mem_write(addr, test_data, 5000, 'ns')
# wait for write to complete
while not tb.test_dev.tx_wr_req_tlp_source.empty() or tb.test_dev.tx_wr_req_tlp_source.active:
await RisingEdge(dut.clk)
await tb.test_dev.dma_mem_read(addr, 1, 5000, 'ns')
assert mem_data[offset:offset+length] == test_data
assert await tb.test_dev.dma_mem_read(addr, length, 5000, 'ns') == test_data
for length in list(range(0, 8)):
for offset in list(range(8)):
tb.log.info("IO operation (DMA) length: %d offset: %d", length, offset)
addr = io_base+offset
test_data = bytearray([x % 256 for x in range(length)])
await tb.test_dev.dma_io_write(addr, test_data, 5000, 'ns')
assert io_data[offset:offset+length] == test_data
assert await tb.test_dev.dma_io_read(addr, length, 5000, 'ns') == test_data
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_dma_errors(dut, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
await FallingEdge(dut.rst)
await Timer(100, 'ns')
await tb.rc.enumerate(enable_bus_mastering=True, configure_msi=True)
mem_base, mem_data = tb.rc.alloc_region(1024*1024)
tb.test_dev.dev_max_payload = tb.dev.functions[0].pcie_cap.max_payload_size
tb.test_dev.dev_max_read_req = tb.dev.functions[0].pcie_cap.max_read_request_size
tb.test_dev.dev_bus_num = tb.dev.bus_num
tb.log.info("Memory operation (DMA) bad read (UR) short")
try:
await tb.test_dev.dma_mem_read(mem_base - 1024, 8, 5000, 'ns')
except Exception:
pass
else:
assert False, "Expected exception"
tb.log.info("Memory operation (DMA) bad read (UR) first")
try:
await tb.test_dev.dma_mem_read(mem_base - 512, 1024, 5000, 'ns')
except Exception:
pass
else:
assert False, "Expected exception"
tb.log.info("Memory operation (DMA) bad read (UR) last")
try:
await tb.test_dev.dma_mem_read(mem_base + 1024*1024 - 512, 1024, 5000, 'ns')
except Exception:
pass
else:
assert False, "Expected exception"
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_msi(dut, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
await FallingEdge(dut.rst)
await Timer(100, 'ns')
await tb.rc.enumerate(enable_bus_mastering=True, configure_msi=True)
for k in range(32):
tb.log.info("Send MSI %d", k)
await RisingEdge(dut.clk)
tb.dut.msi_irq <= 1 << k
await RisingEdge(dut.clk)
tb.dut.msi_irq <= 0
event = tb.rc.msi_get_event(tb.dev.functions[0].pcie_id, k)
event.clear()
await event.wait()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def cycle_pause():
return itertools.cycle([1, 1, 1, 0])
if cocotb.SIM_NAME:
for test in [
run_test_mem,
run_test_dma,
run_test_dma_errors,
run_test_msi,
]:
factory = TestFactory(test)
factory.add_option(("idle_inserter", "backpressure_inserter"), [(None, None), (cycle_pause, cycle_pause)])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
@pytest.mark.parametrize("axis_pcie_data_width", [64, 128, 256, 512])
def test_pcie_us_if(request, axis_pcie_data_width):
dut = "pcie_us_if"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, f"{dut}_rc.v"),
os.path.join(rtl_dir, f"{dut}_rq.v"),
os.path.join(rtl_dir, f"{dut}_cc.v"),
os.path.join(rtl_dir, f"{dut}_cq.v"),
os.path.join(rtl_dir, "pcie_us_cfg.v"),
os.path.join(rtl_dir, "pcie_us_msi.v"),
os.path.join(rtl_dir, "arbiter.v"),
os.path.join(rtl_dir, "priority_encoder.v"),
]
parameters = {}
# segmented interface parameters
tlp_seg_count = 1
tlp_seg_data_width = axis_pcie_data_width // tlp_seg_count
tlp_seg_strb_width = tlp_seg_data_width // 32
parameters['AXIS_PCIE_DATA_WIDTH'] = axis_pcie_data_width
parameters['AXIS_PCIE_KEEP_WIDTH'] = parameters['AXIS_PCIE_DATA_WIDTH'] // 32
parameters['AXIS_PCIE_RQ_USER_WIDTH'] = 62 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 137
parameters['AXIS_PCIE_RC_USER_WIDTH'] = 75 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 161
parameters['AXIS_PCIE_CQ_USER_WIDTH'] = 88 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 183
parameters['AXIS_PCIE_CC_USER_WIDTH'] = 33 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 81
parameters['RQ_SEQ_NUM_WIDTH'] = 4 if parameters['AXIS_PCIE_RQ_USER_WIDTH'] == 60 else 6
parameters['TLP_SEG_COUNT'] = tlp_seg_count
parameters['TLP_SEG_DATA_WIDTH'] = tlp_seg_data_width
parameters['TLP_SEG_STRB_WIDTH'] = tlp_seg_strb_width
parameters['TLP_SEG_HDR_WIDTH'] = 128
parameters['TX_SEQ_NUM_COUNT'] = 1 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 2
parameters['TX_SEQ_NUM_WIDTH'] = parameters['RQ_SEQ_NUM_WIDTH']-1
parameters['PF_COUNT'] = 1
parameters['VF_COUNT'] = 0
parameters['F_COUNT'] = parameters['PF_COUNT']+parameters['VF_COUNT']
parameters['READ_EXT_TAG_ENABLE'] = 1
parameters['READ_MAX_READ_REQ_SIZE'] = 1
parameters['READ_MAX_PAYLOAD_SIZE'] = 1
parameters['MSI_ENABLE'] = 1
parameters['MSI_COUNT'] = 32
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)