mirror of
https://github.com/corundum/corundum.git
synced 2025-01-30 08:32:52 +08:00
Minor refactor of PCIe read request TLP size computation signals
Signed-off-by: Alex Forencich <alex@alexforencich.com>
This commit is contained in:
parent
0828de78e8
commit
f59c5b78c8
@ -298,9 +298,10 @@ reg [1:0] tlp_state_reg = TLP_STATE_IDLE, tlp_state_next;
|
||||
// datapath control signals
|
||||
reg last_cycle;
|
||||
|
||||
reg [3:0] first_be;
|
||||
reg [3:0] last_be;
|
||||
reg [10:0] dword_count;
|
||||
reg [3:0] req_first_be;
|
||||
reg [3:0] req_last_be;
|
||||
reg [12:0] req_tlp_count;
|
||||
reg [10:0] req_dword_count;
|
||||
reg req_last_tlp;
|
||||
reg [PCIE_ADDR_WIDTH-1:0] req_pcie_addr;
|
||||
|
||||
@ -313,7 +314,6 @@ reg [PCIE_ADDR_WIDTH-1:0] req_pcie_addr_reg = {PCIE_ADDR_WIDTH{1'b0}}, req_pcie_
|
||||
reg [RAM_SEL_WIDTH-1:0] req_ram_sel_reg = {RAM_SEL_WIDTH{1'b0}}, req_ram_sel_next;
|
||||
reg [RAM_ADDR_WIDTH-1:0] req_ram_addr_reg = {RAM_ADDR_WIDTH{1'b0}}, req_ram_addr_next;
|
||||
reg [LEN_WIDTH-1:0] req_op_count_reg = {LEN_WIDTH{1'b0}}, req_op_count_next;
|
||||
reg [12:0] req_tlp_count_reg = 13'd0, req_tlp_count_next;
|
||||
reg req_zero_len_reg = 1'b0, req_zero_len_next;
|
||||
reg [OP_TAG_WIDTH-1:0] req_op_tag_reg = {OP_TAG_WIDTH{1'b0}}, req_op_tag_next;
|
||||
reg req_op_tag_valid_reg = 1'b0, req_op_tag_valid_next;
|
||||
@ -612,7 +612,6 @@ always @* begin
|
||||
req_ram_sel_next = req_ram_sel_reg;
|
||||
req_ram_addr_next = req_ram_addr_reg;
|
||||
req_op_count_next = req_op_count_reg;
|
||||
req_tlp_count_next = req_tlp_count_reg;
|
||||
req_zero_len_next = req_zero_len_reg;
|
||||
req_op_tag_next = req_op_tag_reg;
|
||||
req_op_tag_valid_next = req_op_tag_valid_reg;
|
||||
@ -636,16 +635,16 @@ always @* begin
|
||||
// packet smaller than max read request size
|
||||
if (((req_pcie_addr_reg & 12'hfff) + (req_op_count_reg & 12'hfff)) >> 12 != 0 || req_op_count_reg >> 12 != 0) begin
|
||||
// crosses 4k boundary, split on 4K boundary
|
||||
req_tlp_count_next = 13'h1000 - req_pcie_addr_reg[11:0];
|
||||
dword_count = 11'h400 - req_pcie_addr_reg[11:2];
|
||||
req_tlp_count = 13'h1000 - req_pcie_addr_reg[11:0];
|
||||
req_dword_count = 11'h400 - req_pcie_addr_reg[11:2];
|
||||
req_last_tlp = (((req_pcie_addr_reg & 12'hfff) + (req_op_count_reg & 12'hfff)) & 12'hfff) == 0 && req_op_count_reg >> 12 == 0;
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count_next
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count
|
||||
req_pcie_addr[PCIE_ADDR_WIDTH-1:12] = req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:12]+1;
|
||||
req_pcie_addr[11:0] = 12'd0;
|
||||
end else begin
|
||||
// does not cross 4k boundary, send one TLP
|
||||
req_tlp_count_next = req_op_count_reg;
|
||||
dword_count = (req_op_count_reg + req_pcie_addr_reg[1:0] + 3) >> 2;
|
||||
req_tlp_count = req_op_count_reg;
|
||||
req_dword_count = (req_op_count_reg + req_pcie_addr_reg[1:0] + 3) >> 2;
|
||||
req_last_tlp = 1'b1;
|
||||
// always last TLP, so next address is irrelevant
|
||||
req_pcie_addr[PCIE_ADDR_WIDTH-1:12] = req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:12];
|
||||
@ -655,18 +654,18 @@ always @* begin
|
||||
// packet larger than max read request size
|
||||
if (((req_pcie_addr_reg & 12'hfff) + {max_read_request_size_dw_reg, 2'b00}) >> 12 != 0) begin
|
||||
// crosses 4k boundary, split on 4K boundary
|
||||
req_tlp_count_next = 13'h1000 - req_pcie_addr_reg[11:0];
|
||||
dword_count = 11'h400 - req_pcie_addr_reg[11:2];
|
||||
req_tlp_count = 13'h1000 - req_pcie_addr_reg[11:0];
|
||||
req_dword_count = 11'h400 - req_pcie_addr_reg[11:2];
|
||||
req_last_tlp = 1'b0;
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count_next
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count
|
||||
req_pcie_addr[PCIE_ADDR_WIDTH-1:12] = req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:12]+1;
|
||||
req_pcie_addr[11:0] = 12'd0;
|
||||
end else begin
|
||||
// does not cross 4k boundary, split on 128-byte read completion boundary
|
||||
req_tlp_count_next = {max_read_request_size_dw_reg, 2'b00} - req_pcie_addr_reg[6:0];
|
||||
dword_count = max_read_request_size_dw_reg - req_pcie_addr_reg[6:2];
|
||||
req_tlp_count = {max_read_request_size_dw_reg, 2'b00} - req_pcie_addr_reg[6:0];
|
||||
req_dword_count = max_read_request_size_dw_reg - req_pcie_addr_reg[6:2];
|
||||
req_last_tlp = 1'b0;
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count_next
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count
|
||||
req_pcie_addr[PCIE_ADDR_WIDTH-1:12] = req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:12];
|
||||
req_pcie_addr[11:0] = {{req_pcie_addr_reg[11:7], 5'd0} + max_read_request_size_dw_reg, 2'b00};
|
||||
end
|
||||
@ -674,13 +673,13 @@ always @* begin
|
||||
|
||||
pcie_tag_table_start_ptr_next = req_pcie_tag_reg;
|
||||
pcie_tag_table_start_ram_sel_next = req_ram_sel_reg;
|
||||
pcie_tag_table_start_ram_addr_next = req_ram_addr_reg + req_tlp_count_next;
|
||||
pcie_tag_table_start_ram_addr_next = req_ram_addr_reg + req_tlp_count;
|
||||
pcie_tag_table_start_op_tag_next = req_op_tag_reg;
|
||||
pcie_tag_table_start_zero_len_next = req_zero_len_reg;
|
||||
pcie_tag_table_start_en_next = 1'b0;
|
||||
|
||||
first_be = 4'b1111 << req_pcie_addr_reg[1:0];
|
||||
last_be = 4'b1111 >> (3 - ((req_pcie_addr_reg[1:0] + req_tlp_count_next[1:0] - 1) & 3));
|
||||
req_first_be = 4'b1111 << req_pcie_addr_reg[1:0];
|
||||
req_last_be = 4'b1111 >> (3 - ((req_pcie_addr_reg[1:0] + req_tlp_count[1:0] - 1) & 3));
|
||||
|
||||
tx_rd_req_tlp_hdr_next = tx_rd_req_tlp_hdr_reg;
|
||||
tx_rd_req_tlp_valid_next = tx_rd_req_tlp_valid_reg && !tx_rd_req_tlp_ready;
|
||||
@ -703,12 +702,12 @@ always @* begin
|
||||
tlp_hdr[110] = 1'b0; // EP
|
||||
tlp_hdr[109:108] = 2'b00; // attr
|
||||
tlp_hdr[107:106] = 3'b000; // AT
|
||||
tlp_hdr[105:96] = dword_count; // length
|
||||
tlp_hdr[105:96] = req_dword_count; // length
|
||||
// DW 1
|
||||
tlp_hdr[95:80] = requester_id; // requester ID
|
||||
tlp_hdr[79:72] = req_pcie_tag_reg; // tag
|
||||
tlp_hdr[71:68] = req_zero_len_reg ? 4'b0000 : (dword_count == 1 ? 4'b0000 : last_be); // last BE
|
||||
tlp_hdr[67:64] = req_zero_len_reg ? 4'b0000 : (dword_count == 1 ? first_be & last_be : first_be); // first BE
|
||||
tlp_hdr[71:68] = req_zero_len_reg ? 4'b0000 : (req_dword_count == 1 ? 4'b0000 : req_last_be); // last BE
|
||||
tlp_hdr[67:64] = req_zero_len_reg ? 4'b0000 : (req_dword_count == 1 ? req_first_be & req_last_be : req_first_be); // first BE
|
||||
if (((req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:2] >> 30) != 0) || TLP_FORCE_64_BIT_ADDR) begin
|
||||
// DW 2+3
|
||||
tlp_hdr[63:2] = req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:2]; // address
|
||||
@ -762,12 +761,12 @@ always @* begin
|
||||
inc_active_tx = 1'b1;
|
||||
|
||||
req_pcie_addr_next = req_pcie_addr;
|
||||
req_ram_addr_next = req_ram_addr_reg + req_tlp_count_next;
|
||||
req_op_count_next = req_op_count_reg - req_tlp_count_next;
|
||||
req_ram_addr_next = req_ram_addr_reg + req_tlp_count;
|
||||
req_op_count_next = req_op_count_reg - req_tlp_count;
|
||||
|
||||
pcie_tag_table_start_ptr_next = req_pcie_tag_reg;
|
||||
pcie_tag_table_start_ram_sel_next = req_ram_sel_reg;
|
||||
pcie_tag_table_start_ram_addr_next = req_ram_addr_reg + req_tlp_count_next;
|
||||
pcie_tag_table_start_ram_addr_next = req_ram_addr_reg + req_tlp_count;
|
||||
pcie_tag_table_start_op_tag_next = req_op_tag_reg;
|
||||
pcie_tag_table_start_zero_len_next = req_zero_len_reg;
|
||||
pcie_tag_table_start_en_next = 1'b1;
|
||||
@ -780,7 +779,7 @@ always @* begin
|
||||
req_pcie_tag_valid_next = 1'b0;
|
||||
|
||||
stat_rd_req_start_tag_next = req_pcie_tag_reg;
|
||||
stat_rd_req_start_len_next = req_zero_len_reg ? 0 : req_tlp_count_next;
|
||||
stat_rd_req_start_len_next = req_zero_len_reg ? 0 : req_tlp_count;
|
||||
stat_rd_req_start_valid_next = 1'b1;
|
||||
|
||||
if (!req_last_tlp) begin
|
||||
@ -1329,7 +1328,6 @@ always @(posedge clk) begin
|
||||
req_ram_sel_reg <= req_ram_sel_next;
|
||||
req_ram_addr_reg <= req_ram_addr_next;
|
||||
req_op_count_reg <= req_op_count_next;
|
||||
req_tlp_count_reg <= req_tlp_count_next;
|
||||
req_zero_len_reg <= req_zero_len_next;
|
||||
req_op_tag_reg <= req_op_tag_next;
|
||||
req_op_tag_valid_reg <= req_op_tag_valid_next;
|
||||
|
@ -334,9 +334,10 @@ reg [1:0] tlp_state_reg = TLP_STATE_IDLE, tlp_state_next;
|
||||
// datapath control signals
|
||||
reg last_cycle;
|
||||
|
||||
reg [3:0] first_be;
|
||||
reg [3:0] last_be;
|
||||
reg [10:0] dword_count;
|
||||
reg [3:0] req_first_be;
|
||||
reg [3:0] req_last_be;
|
||||
reg [12:0] req_tlp_count;
|
||||
reg [10:0] req_dword_count;
|
||||
reg req_last_tlp;
|
||||
reg [PCIE_ADDR_WIDTH-1:0] req_pcie_addr;
|
||||
|
||||
@ -349,7 +350,6 @@ reg [PCIE_ADDR_WIDTH-1:0] req_pcie_addr_reg = {PCIE_ADDR_WIDTH{1'b0}}, req_pcie_
|
||||
reg [RAM_SEL_WIDTH-1:0] req_ram_sel_reg = {RAM_SEL_WIDTH{1'b0}}, req_ram_sel_next;
|
||||
reg [RAM_ADDR_WIDTH-1:0] req_ram_addr_reg = {RAM_ADDR_WIDTH{1'b0}}, req_ram_addr_next;
|
||||
reg [LEN_WIDTH-1:0] req_op_count_reg = {LEN_WIDTH{1'b0}}, req_op_count_next;
|
||||
reg [12:0] req_tlp_count_reg = 13'd0, req_tlp_count_next;
|
||||
reg req_zero_len_reg = 1'b0, req_zero_len_next;
|
||||
reg [OP_TAG_WIDTH-1:0] req_op_tag_reg = {OP_TAG_WIDTH{1'b0}}, req_op_tag_next;
|
||||
reg req_op_tag_valid_reg = 1'b0, req_op_tag_valid_next;
|
||||
@ -570,7 +570,6 @@ always @* begin
|
||||
req_ram_sel_next = req_ram_sel_reg;
|
||||
req_ram_addr_next = req_ram_addr_reg;
|
||||
req_op_count_next = req_op_count_reg;
|
||||
req_tlp_count_next = req_tlp_count_reg;
|
||||
req_zero_len_next = req_zero_len_reg;
|
||||
req_op_tag_next = req_op_tag_reg;
|
||||
req_op_tag_valid_next = req_op_tag_valid_reg;
|
||||
@ -592,16 +591,16 @@ always @* begin
|
||||
// packet smaller than max read request size
|
||||
if (((req_pcie_addr_reg & 12'hfff) + (req_op_count_reg & 12'hfff)) >> 12 != 0 || req_op_count_reg >> 12 != 0) begin
|
||||
// crosses 4k boundary, split on 4K boundary
|
||||
req_tlp_count_next = 13'h1000 - req_pcie_addr_reg[11:0];
|
||||
dword_count = 11'h400 - req_pcie_addr_reg[11:2];
|
||||
req_tlp_count = 13'h1000 - req_pcie_addr_reg[11:0];
|
||||
req_dword_count = 11'h400 - req_pcie_addr_reg[11:2];
|
||||
req_last_tlp = (((req_pcie_addr_reg & 12'hfff) + (req_op_count_reg & 12'hfff)) & 12'hfff) == 0 && req_op_count_reg >> 12 == 0;
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count_next
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count
|
||||
req_pcie_addr[PCIE_ADDR_WIDTH-1:12] = req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:12]+1;
|
||||
req_pcie_addr[11:0] = 12'd0;
|
||||
end else begin
|
||||
// does not cross 4k boundary, send one TLP
|
||||
req_tlp_count_next = req_op_count_reg;
|
||||
dword_count = (req_op_count_reg + req_pcie_addr_reg[1:0] + 3) >> 2;
|
||||
req_tlp_count = req_op_count_reg;
|
||||
req_dword_count = (req_op_count_reg + req_pcie_addr_reg[1:0] + 3) >> 2;
|
||||
req_last_tlp = 1'b1;
|
||||
// always last TLP, so next address is irrelevant
|
||||
req_pcie_addr[PCIE_ADDR_WIDTH-1:12] = req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:12];
|
||||
@ -611,18 +610,18 @@ always @* begin
|
||||
// packet larger than max read request size
|
||||
if (((req_pcie_addr_reg & 12'hfff) + {max_read_request_size_dw_reg, 2'b00}) >> 12 != 0) begin
|
||||
// crosses 4k boundary, split on 4K boundary
|
||||
req_tlp_count_next = 13'h1000 - req_pcie_addr_reg[11:0];
|
||||
dword_count = 11'h400 - req_pcie_addr_reg[11:2];
|
||||
req_tlp_count = 13'h1000 - req_pcie_addr_reg[11:0];
|
||||
req_dword_count = 11'h400 - req_pcie_addr_reg[11:2];
|
||||
req_last_tlp = 1'b0;
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count_next
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count
|
||||
req_pcie_addr[PCIE_ADDR_WIDTH-1:12] = req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:12]+1;
|
||||
req_pcie_addr[11:0] = 12'd0;
|
||||
end else begin
|
||||
// does not cross 4k boundary, split on 128-byte read completion boundary
|
||||
req_tlp_count_next = {max_read_request_size_dw_reg, 2'b00} - req_pcie_addr_reg[6:0];
|
||||
dword_count = max_read_request_size_dw_reg - req_pcie_addr_reg[6:2];
|
||||
req_tlp_count = {max_read_request_size_dw_reg, 2'b00} - req_pcie_addr_reg[6:0];
|
||||
req_dword_count = max_read_request_size_dw_reg - req_pcie_addr_reg[6:2];
|
||||
req_last_tlp = 1'b0;
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count_next
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count
|
||||
req_pcie_addr[PCIE_ADDR_WIDTH-1:12] = req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:12];
|
||||
req_pcie_addr[11:0] = {{req_pcie_addr_reg[11:7], 5'd0} + max_read_request_size_dw_reg, 2'b00};
|
||||
end
|
||||
@ -630,18 +629,18 @@ always @* begin
|
||||
|
||||
pcie_tag_table_start_ptr_next = req_pcie_tag_reg;
|
||||
pcie_tag_table_start_ram_sel_next = req_ram_sel_reg;
|
||||
pcie_tag_table_start_ram_addr_next = req_ram_addr_reg + req_tlp_count_next;
|
||||
pcie_tag_table_start_ram_addr_next = req_ram_addr_reg + req_tlp_count;
|
||||
pcie_tag_table_start_op_tag_next = req_op_tag_reg;
|
||||
pcie_tag_table_start_zero_len_next = req_zero_len_reg;
|
||||
pcie_tag_table_start_en_next = 1'b0;
|
||||
|
||||
first_be = 4'b1111 << req_pcie_addr_reg[1:0];
|
||||
last_be = 4'b1111 >> (3 - ((req_pcie_addr_reg[1:0] + req_tlp_count_next[1:0] - 1) & 3));
|
||||
req_first_be = 4'b1111 << req_pcie_addr_reg[1:0];
|
||||
req_last_be = 4'b1111 >> (3 - ((req_pcie_addr_reg[1:0] + req_tlp_count[1:0] - 1) & 3));
|
||||
|
||||
// TLP header and sideband data
|
||||
tlp_header_data[1:0] = 2'b0; // address type
|
||||
tlp_header_data[63:2] = req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:2]; // address
|
||||
tlp_header_data[74:64] = dword_count; // DWORD count
|
||||
tlp_header_data[74:64] = req_dword_count; // DWORD count
|
||||
tlp_header_data[78:75] = REQ_MEM_READ; // request type - memory read
|
||||
tlp_header_data[79] = 1'b0; // poisoned request
|
||||
tlp_header_data[95:80] = requester_id;
|
||||
@ -653,9 +652,9 @@ always @* begin
|
||||
tlp_header_data[127] = 1'b0; // force ECRC
|
||||
|
||||
if (AXIS_PCIE_DATA_WIDTH == 512) begin
|
||||
tlp_tuser[3:0] = req_zero_len_reg ? 4'b0000 : (dword_count == 1 ? first_be & last_be : first_be); // first BE 0
|
||||
tlp_tuser[3:0] = req_zero_len_reg ? 4'b0000 : (req_dword_count == 1 ? req_first_be & req_last_be : req_first_be); // first BE 0
|
||||
tlp_tuser[7:4] = 4'd0; // first BE 1
|
||||
tlp_tuser[11:8] = req_zero_len_reg ? 4'b0000 : (dword_count == 1 ? 4'b0000 : last_be); // last BE 0
|
||||
tlp_tuser[11:8] = req_zero_len_reg ? 4'b0000 : (req_dword_count == 1 ? 4'b0000 : req_last_be); // last BE 0
|
||||
tlp_tuser[15:12] = 4'd0; // last BE 1
|
||||
tlp_tuser[19:16] = 3'd0; // addr_offset
|
||||
tlp_tuser[21:20] = 2'b01; // is_sop
|
||||
@ -673,8 +672,8 @@ always @* begin
|
||||
tlp_tuser[72:67] = 6'd0; // seq_num1
|
||||
tlp_tuser[136:73] = 64'd0; // parity
|
||||
end else begin
|
||||
tlp_tuser[3:0] = req_zero_len_reg ? 4'b0000 : (dword_count == 1 ? first_be & last_be : first_be); // first BE
|
||||
tlp_tuser[7:4] = req_zero_len_reg ? 4'b0000 : (dword_count == 1 ? 4'b0000 : last_be); // last BE
|
||||
tlp_tuser[3:0] = req_zero_len_reg ? 4'b0000 : (req_dword_count == 1 ? req_first_be & req_last_be : req_first_be); // first BE
|
||||
tlp_tuser[7:4] = req_zero_len_reg ? 4'b0000 : (req_dword_count == 1 ? 4'b0000 : req_last_be); // last BE
|
||||
tlp_tuser[10:8] = 3'd0; // addr_offset
|
||||
tlp_tuser[11] = 1'b0; // discontinue
|
||||
tlp_tuser[12] = 1'b0; // tph_present
|
||||
@ -742,12 +741,12 @@ always @* begin
|
||||
|
||||
if (AXIS_PCIE_DATA_WIDTH > 64) begin
|
||||
req_pcie_addr_next = req_pcie_addr;
|
||||
req_ram_addr_next = req_ram_addr_reg + req_tlp_count_next;
|
||||
req_op_count_next = req_op_count_reg - req_tlp_count_next;
|
||||
req_ram_addr_next = req_ram_addr_reg + req_tlp_count;
|
||||
req_op_count_next = req_op_count_reg - req_tlp_count;
|
||||
|
||||
pcie_tag_table_start_ptr_next = req_pcie_tag_reg;
|
||||
pcie_tag_table_start_ram_sel_next = req_ram_sel_reg;
|
||||
pcie_tag_table_start_ram_addr_next = req_ram_addr_reg + req_tlp_count_next;
|
||||
pcie_tag_table_start_ram_addr_next = req_ram_addr_reg + req_tlp_count;
|
||||
pcie_tag_table_start_op_tag_next = req_op_tag_reg;
|
||||
pcie_tag_table_start_zero_len_next = req_zero_len_reg;
|
||||
pcie_tag_table_start_en_next = 1'b1;
|
||||
@ -780,14 +779,14 @@ always @* begin
|
||||
|
||||
if (m_axis_rq_tready_int_reg && req_pcie_tag_valid_reg) begin
|
||||
req_pcie_addr_next = req_pcie_addr;
|
||||
req_ram_addr_next = req_ram_addr_reg + req_tlp_count_next;
|
||||
req_op_count_next = req_op_count_reg - req_tlp_count_next;
|
||||
req_ram_addr_next = req_ram_addr_reg + req_tlp_count;
|
||||
req_op_count_next = req_op_count_reg - req_tlp_count;
|
||||
|
||||
m_axis_rq_tvalid_int = 1'b1;
|
||||
|
||||
pcie_tag_table_start_ptr_next = req_pcie_tag_reg;
|
||||
pcie_tag_table_start_ram_sel_next = req_ram_sel_reg;
|
||||
pcie_tag_table_start_ram_addr_next = req_ram_addr_reg + req_tlp_count_next;
|
||||
pcie_tag_table_start_ram_addr_next = req_ram_addr_reg + req_tlp_count;
|
||||
pcie_tag_table_start_op_tag_next = req_op_tag_reg;
|
||||
pcie_tag_table_start_zero_len_next = req_zero_len_reg;
|
||||
pcie_tag_table_start_en_next = 1'b1;
|
||||
@ -1470,7 +1469,6 @@ always @(posedge clk) begin
|
||||
req_ram_sel_reg <= req_ram_sel_next;
|
||||
req_ram_addr_reg <= req_ram_addr_next;
|
||||
req_op_count_reg <= req_op_count_next;
|
||||
req_tlp_count_reg <= req_tlp_count_next;
|
||||
req_zero_len_reg <= req_zero_len_next;
|
||||
req_op_tag_reg <= req_op_tag_next;
|
||||
req_op_tag_valid_reg <= req_op_tag_valid_next;
|
||||
|
@ -342,9 +342,10 @@ reg [2:0] tlp_state_reg = TLP_STATE_IDLE, tlp_state_next;
|
||||
// datapath control signals
|
||||
reg transfer_in_save;
|
||||
|
||||
reg [3:0] first_be;
|
||||
reg [3:0] last_be;
|
||||
reg [10:0] dword_count;
|
||||
reg [3:0] req_first_be;
|
||||
reg [3:0] req_last_be;
|
||||
reg [12:0] req_tlp_count;
|
||||
reg [10:0] req_dword_count;
|
||||
reg req_last_tlp;
|
||||
reg [PCIE_ADDR_WIDTH-1:0] req_pcie_addr;
|
||||
|
||||
@ -356,7 +357,6 @@ reg init_op_tag_reg = 1'b1;
|
||||
reg [PCIE_ADDR_WIDTH-1:0] req_pcie_addr_reg = {PCIE_ADDR_WIDTH{1'b0}}, req_pcie_addr_next;
|
||||
reg [AXI_ADDR_WIDTH-1:0] req_axi_addr_reg = {AXI_ADDR_WIDTH{1'b0}}, req_axi_addr_next;
|
||||
reg [LEN_WIDTH-1:0] req_op_count_reg = {LEN_WIDTH{1'b0}}, req_op_count_next;
|
||||
reg [12:0] req_tlp_count_reg = 13'd0, req_tlp_count_next;
|
||||
reg req_zero_len_reg = 1'b0, req_zero_len_next;
|
||||
reg [OP_TAG_WIDTH-1:0] req_op_tag_reg = {OP_TAG_WIDTH{1'b0}}, req_op_tag_next;
|
||||
reg req_op_tag_valid_reg = 1'b0, req_op_tag_valid_next;
|
||||
@ -587,7 +587,6 @@ always @* begin
|
||||
req_pcie_addr_next = req_pcie_addr_reg;
|
||||
req_axi_addr_next = req_axi_addr_reg;
|
||||
req_op_count_next = req_op_count_reg;
|
||||
req_tlp_count_next = req_tlp_count_reg;
|
||||
req_zero_len_next = req_zero_len_reg;
|
||||
req_op_tag_next = req_op_tag_reg;
|
||||
req_op_tag_valid_next = req_op_tag_valid_reg;
|
||||
@ -609,16 +608,16 @@ always @* begin
|
||||
// packet smaller than max read request size
|
||||
if (((req_pcie_addr_reg & 12'hfff) + (req_op_count_reg & 12'hfff)) >> 12 != 0 || req_op_count_reg >> 12 != 0) begin
|
||||
// crosses 4k boundary, split on 4K boundary
|
||||
req_tlp_count_next = 13'h1000 - req_pcie_addr_reg[11:0];
|
||||
dword_count = 11'h400 - req_pcie_addr_reg[11:2];
|
||||
req_tlp_count = 13'h1000 - req_pcie_addr_reg[11:0];
|
||||
req_dword_count = 11'h400 - req_pcie_addr_reg[11:2];
|
||||
req_last_tlp = (((req_pcie_addr_reg & 12'hfff) + (req_op_count_reg & 12'hfff)) & 12'hfff) == 0 && req_op_count_reg >> 12 == 0;
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count_next
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count
|
||||
req_pcie_addr[PCIE_ADDR_WIDTH-1:12] = req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:12]+1;
|
||||
req_pcie_addr[11:0] = 12'd0;
|
||||
end else begin
|
||||
// does not cross 4k boundary, send one TLP
|
||||
req_tlp_count_next = req_op_count_reg;
|
||||
dword_count = (req_op_count_reg + req_pcie_addr_reg[1:0] + 3) >> 2;
|
||||
req_tlp_count = req_op_count_reg;
|
||||
req_dword_count = (req_op_count_reg + req_pcie_addr_reg[1:0] + 3) >> 2;
|
||||
req_last_tlp = 1'b1;
|
||||
// always last TLP, so next address is irrelevant
|
||||
req_pcie_addr[PCIE_ADDR_WIDTH-1:12] = req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:12];
|
||||
@ -628,36 +627,36 @@ always @* begin
|
||||
// packet larger than max read request size
|
||||
if (((req_pcie_addr_reg & 12'hfff) + {max_read_request_size_dw_reg, 2'b00}) >> 12 != 0) begin
|
||||
// crosses 4k boundary, split on 4K boundary
|
||||
req_tlp_count_next = 13'h1000 - req_pcie_addr_reg[11:0];
|
||||
dword_count = 11'h400 - req_pcie_addr_reg[11:2];
|
||||
req_tlp_count = 13'h1000 - req_pcie_addr_reg[11:0];
|
||||
req_dword_count = 11'h400 - req_pcie_addr_reg[11:2];
|
||||
req_last_tlp = 1'b0;
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count_next
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count
|
||||
req_pcie_addr[PCIE_ADDR_WIDTH-1:12] = req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:12]+1;
|
||||
req_pcie_addr[11:0] = 12'd0;
|
||||
end else begin
|
||||
// does not cross 4k boundary, split on 128-byte read completion boundary
|
||||
req_tlp_count_next = {max_read_request_size_dw_reg, 2'b00} - req_pcie_addr_reg[6:0];
|
||||
dword_count = max_read_request_size_dw_reg - req_pcie_addr_reg[6:2];
|
||||
req_tlp_count = {max_read_request_size_dw_reg, 2'b00} - req_pcie_addr_reg[6:0];
|
||||
req_dword_count = max_read_request_size_dw_reg - req_pcie_addr_reg[6:2];
|
||||
req_last_tlp = 1'b0;
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count_next
|
||||
// optimized req_pcie_addr = req_pcie_addr_reg + req_tlp_count
|
||||
req_pcie_addr[PCIE_ADDR_WIDTH-1:12] = req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:12];
|
||||
req_pcie_addr[11:0] = {{req_pcie_addr_reg[11:7], 5'd0} + max_read_request_size_dw_reg, 2'b00};
|
||||
end
|
||||
end
|
||||
|
||||
pcie_tag_table_start_ptr_next = req_pcie_tag_reg;
|
||||
pcie_tag_table_start_axi_addr_next = req_axi_addr_reg + req_tlp_count_next;
|
||||
pcie_tag_table_start_axi_addr_next = req_axi_addr_reg + req_tlp_count;
|
||||
pcie_tag_table_start_op_tag_next = req_op_tag_reg;
|
||||
pcie_tag_table_start_zero_len_next = req_zero_len_reg;
|
||||
pcie_tag_table_start_en_next = 1'b0;
|
||||
|
||||
first_be = 4'b1111 << req_pcie_addr_reg[1:0];
|
||||
last_be = 4'b1111 >> (3 - ((req_pcie_addr_reg[1:0] + req_tlp_count_next[1:0] - 1) & 3));
|
||||
req_first_be = 4'b1111 << req_pcie_addr_reg[1:0];
|
||||
req_last_be = 4'b1111 >> (3 - ((req_pcie_addr_reg[1:0] + req_tlp_count[1:0] - 1) & 3));
|
||||
|
||||
// TLP header and sideband data
|
||||
tlp_header_data[1:0] = 2'b0; // address type
|
||||
tlp_header_data[63:2] = req_pcie_addr_reg[PCIE_ADDR_WIDTH-1:2]; // address
|
||||
tlp_header_data[74:64] = dword_count; // DWORD count
|
||||
tlp_header_data[74:64] = req_dword_count; // DWORD count
|
||||
tlp_header_data[78:75] = REQ_MEM_READ; // request type - memory read
|
||||
tlp_header_data[79] = 1'b0; // poisoned request
|
||||
tlp_header_data[95:80] = requester_id;
|
||||
@ -669,9 +668,9 @@ always @* begin
|
||||
tlp_header_data[127] = 1'b0; // force ECRC
|
||||
|
||||
if (AXIS_PCIE_DATA_WIDTH == 512) begin
|
||||
tlp_tuser[3:0] = req_zero_len_reg ? 4'b0000 : (dword_count == 1 ? first_be & last_be : first_be); // first BE 0
|
||||
tlp_tuser[3:0] = req_zero_len_reg ? 4'b0000 : (req_dword_count == 1 ? req_first_be & req_last_be : req_first_be); // first BE 0
|
||||
tlp_tuser[7:4] = 4'd0; // first BE 1
|
||||
tlp_tuser[11:8] = req_zero_len_reg ? 4'b0000 : (dword_count == 1 ? 4'b0000 : last_be); // last BE 0
|
||||
tlp_tuser[11:8] = req_zero_len_reg ? 4'b0000 : (req_dword_count == 1 ? 4'b0000 : req_last_be); // last BE 0
|
||||
tlp_tuser[15:12] = 4'd0; // last BE 1
|
||||
tlp_tuser[19:16] = 3'd0; // addr_offset
|
||||
tlp_tuser[21:20] = 2'b01; // is_sop
|
||||
@ -689,8 +688,8 @@ always @* begin
|
||||
tlp_tuser[72:67] = 6'd0; // seq_num1
|
||||
tlp_tuser[136:73] = 64'd0; // parity
|
||||
end else begin
|
||||
tlp_tuser[3:0] = req_zero_len_reg ? 4'b0000 : (dword_count == 1 ? first_be & last_be : first_be); // first BE
|
||||
tlp_tuser[7:4] = req_zero_len_reg ? 4'b0000 : (dword_count == 1 ? 4'b0000 : last_be); // last BE
|
||||
tlp_tuser[3:0] = req_zero_len_reg ? 4'b0000 : (req_dword_count == 1 ? req_first_be & req_last_be : req_first_be); // first BE
|
||||
tlp_tuser[7:4] = req_zero_len_reg ? 4'b0000 : (req_dword_count == 1 ? 4'b0000 : req_last_be); // last BE
|
||||
tlp_tuser[10:8] = 3'd0; // addr_offset
|
||||
tlp_tuser[11] = 1'b0; // discontinue
|
||||
tlp_tuser[12] = 1'b0; // tph_present
|
||||
@ -758,11 +757,11 @@ always @* begin
|
||||
|
||||
if (AXIS_PCIE_DATA_WIDTH > 64) begin
|
||||
req_pcie_addr_next = req_pcie_addr;
|
||||
req_axi_addr_next = req_axi_addr_reg + req_tlp_count_next;
|
||||
req_op_count_next = req_op_count_reg - req_tlp_count_next;
|
||||
req_axi_addr_next = req_axi_addr_reg + req_tlp_count;
|
||||
req_op_count_next = req_op_count_reg - req_tlp_count;
|
||||
|
||||
pcie_tag_table_start_ptr_next = req_pcie_tag_reg;
|
||||
pcie_tag_table_start_axi_addr_next = req_axi_addr_reg + req_tlp_count_next;
|
||||
pcie_tag_table_start_axi_addr_next = req_axi_addr_reg + req_tlp_count;
|
||||
pcie_tag_table_start_op_tag_next = req_op_tag_reg;
|
||||
pcie_tag_table_start_zero_len_next = req_zero_len_reg;
|
||||
pcie_tag_table_start_en_next = 1'b1;
|
||||
@ -795,13 +794,13 @@ always @* begin
|
||||
|
||||
if (m_axis_rq_tready_int_reg && req_pcie_tag_valid_reg) begin
|
||||
req_pcie_addr_next = req_pcie_addr;
|
||||
req_axi_addr_next = req_axi_addr_reg + req_tlp_count_next;
|
||||
req_op_count_next = req_op_count_reg - req_tlp_count_next;
|
||||
req_axi_addr_next = req_axi_addr_reg + req_tlp_count;
|
||||
req_op_count_next = req_op_count_reg - req_tlp_count;
|
||||
|
||||
m_axis_rq_tvalid_int = 1'b1;
|
||||
|
||||
pcie_tag_table_start_ptr_next = req_pcie_tag_reg;
|
||||
pcie_tag_table_start_axi_addr_next = req_axi_addr_reg + req_tlp_count_next;
|
||||
pcie_tag_table_start_axi_addr_next = req_axi_addr_reg + req_tlp_count;
|
||||
pcie_tag_table_start_op_tag_next = req_op_tag_reg;
|
||||
pcie_tag_table_start_zero_len_next = req_zero_len_reg;
|
||||
pcie_tag_table_start_en_next = 1'b1;
|
||||
@ -1503,7 +1502,6 @@ always @(posedge clk) begin
|
||||
req_pcie_addr_reg <= req_pcie_addr_next;
|
||||
req_axi_addr_reg <= req_axi_addr_next;
|
||||
req_op_count_reg <= req_op_count_next;
|
||||
req_tlp_count_reg <= req_tlp_count_next;
|
||||
req_zero_len_reg <= req_zero_len_next;
|
||||
req_op_tag_reg <= req_op_tag_next;
|
||||
req_op_tag_valid_reg <= req_op_tag_valid_next;
|
||||
|
Loading…
x
Reference in New Issue
Block a user