Merge pull request #2061 from hathach/imx-usbhost

iMX.RT EHCI add dcache support and other fixes + refactor
This commit is contained in:
Ha Thach 2023-05-19 14:18:07 +07:00 committed by GitHub
commit 6ecd480006
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 633 additions and 442 deletions

2
.idea/cmake.xml generated
View File

@ -2,6 +2,7 @@
<project version="4">
<component name="CMakeSharedSettings">
<configurations>
<configuration PROFILE_NAME="rt1060 evk" ENABLED="true" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DFAMILY=imxrt -DBOARD=mimxrt1060_evk" />
<configuration PROFILE_NAME="pca10095" ENABLED="true" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DFAMILY=nrf -DBOARD=pca10095" BUILD_OPTIONS="-v" />
<configuration PROFILE_NAME="pca10056" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DFAMILY=nrf -DBOARD=pca10056" BUILD_OPTIONS="-v" />
<configuration PROFILE_NAME="lpc55s69" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DFAMILY=lpc55 -DBOARD=lpcxpresso55s69" />
@ -27,6 +28,7 @@
</configuration>
<configuration PROFILE_NAME="rp2040" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DFAMILY=rp2040 -DBOARD=raspberry_pi_pico" />
<configuration PROFILE_NAME="rt1010 evk" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DFAMILY=imxrt -DBOARD=mimxrt1010_evk" />
<configuration PROFILE_NAME="rt1064 evk" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DFAMILY=imxrt -DBOARD=mimxrt1064_evk" />
</configurations>
</component>
</project>

View File

@ -1,10 +0,0 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="rt1010" type="com.jetbrains.cidr.embedded.customgdbserver.type" factoryName="com.jetbrains.cidr.embedded.customgdbserver.factory" PROGRAM_PARAMS="gdbserver -p 55503 -t mimxrt1010" REDIRECT_INPUT="false" ELEVATE="false" USE_EXTERNAL_CONSOLE="false" PASS_PARENT_ENVS_2="true" PROJECT_NAME="cdc_msc" TARGET_NAME="cdc_msc" CONFIG_NAME="rt1060 evk" version="1" RUN_TARGET_PROJECT_NAME="cdc_msc" RUN_TARGET_NAME="cdc_msc">
<custom-gdb-server version="1" gdb-connect="tcp::55503" executable="$USER_HOME$/.local/bin/pyocd" warmup-ms="0" download-type="UPDATED_ONLY" reset-cmd="monitor reset" reset-type="AFTER_DOWNLOAD">
<debugger kind="GDB" isBundled="true" />
</custom-gdb-server>
<method v="2">
<option name="CLION.COMPOUND.BUILD" enabled="true" />
</method>
</configuration>
</component>

View File

@ -0,0 +1,10 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="rt1010 nxplink" type="com.jetbrains.cidr.embedded.customgdbserver.type" factoryName="com.jetbrains.cidr.embedded.customgdbserver.factory" PROGRAM_PARAMS="gdbserver --gdb-port 55503 --semihost-port -1 MIMXRT1011xxxxx:EVK-MIMXRT1010" REDIRECT_INPUT="false" ELEVATE="false" USE_EXTERNAL_CONSOLE="false" PASS_PARENT_ENVS_2="true" PROJECT_NAME="host_hid_to_device_cdc" TARGET_NAME="host_hid_to_device_cdc" CONFIG_NAME="rt1060 evk" version="1" RUN_TARGET_PROJECT_NAME="host_hid_to_device_cdc" RUN_TARGET_NAME="host_hid_to_device_cdc">
<custom-gdb-server version="1" gdb-connect="tcp::55503" executable="/usr/local/LinkServer/LinkServer" warmup-ms="0" download-type="ALWAYS" reset-cmd="monitor reset" reset-type="AFTER_DOWNLOAD">
<debugger kind="GDB" isBundled="true" />
</custom-gdb-server>
<method v="2">
<option name="CLION.COMPOUND.BUILD" enabled="true" />
</method>
</configuration>
</component>

10
.idea/runConfigurations/rt1060_jlink.xml generated Normal file
View File

@ -0,0 +1,10 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="rt1060 jlink" type="com.jetbrains.cidr.embedded.customgdbserver.type" factoryName="com.jetbrains.cidr.embedded.customgdbserver.factory" PROGRAM_PARAMS="-device &quot;MIMXRT1062xxx5A&quot; -if swd -speed 8000 -port 25321 -nogui -singlerun" REDIRECT_INPUT="false" ELEVATE="false" USE_EXTERNAL_CONSOLE="false" PASS_PARENT_ENVS_2="true" PROJECT_NAME="host_hid_to_device_cdc" TARGET_NAME="host_hid_to_device_cdc" CONFIG_NAME="rt1060 evk" version="1" RUN_TARGET_PROJECT_NAME="host_hid_to_device_cdc" RUN_TARGET_NAME="host_hid_to_device_cdc">
<custom-gdb-server version="1" gdb-connect="tcp::25321" executable="/usr/bin/JLinkGDBServer" warmup-ms="0" download-type="UPDATED_ONLY" reset-cmd="monitor reset" reset-type="AFTER_DOWNLOAD">
<debugger kind="GDB" isBundled="true" />
</custom-gdb-server>
<method v="2">
<option name="CLION.COMPOUND.BUILD" enabled="true" />
</method>
</configuration>
</component>

View File

@ -84,10 +84,6 @@
#define CFG_TUH_RPI_PIO_USB 1
#endif
// CFG_TUSB_DEBUG is defined by compiler in DEBUG build
// #define CFG_TUSB_DEBUG 0
/* USB DMA on some MCUs can only access a specific SRAM region with restriction on alignment.
* Tinyusb use follows macros to declare transferring memory so that they can be put
* into those specific section.
@ -133,7 +129,7 @@
#endif
#ifndef CFG_TUH_MEM_ALIGN
#define CFG_TUH_MEM_ALIGN __attribute__ ((aligned(4)))
#define CFG_TUH_MEM_ALIGN __attribute__ ((aligned(4)))
#endif
#define CFG_TUH_HUB 1

View File

@ -263,7 +263,7 @@ static void process_generic_report(uint8_t dev_addr, uint8_t instance, uint8_t c
if (!rpt_info)
{
printf("Couldn't find the report info for this report !\r\n");
printf("Couldn't find report info !\r\n");
return;
}

View File

@ -131,7 +131,10 @@ void board_init(void)
freq = CLOCK_GetOscFreq() / (CLOCK_GetDiv(kCLOCK_UartDiv) + 1U);
}
LPUART_Init(UART_PORT, &uart_config, freq);
if ( kStatus_Success != LPUART_Init(UART_PORT, &uart_config, freq) ) {
// failed to init uart, probably baudrate is not supported
// TU_BREAKPOINT();
}
//------------- USB -------------//
// Note: RT105x RT106x and later have dual USB controllers.

View File

@ -57,12 +57,16 @@ if (NOT TARGET ${BOARD_TARGET})
)
update_board(${BOARD_TARGET})
if (NOT DEFINED LD_FILE_${TOOLCHAIN})
set(LD_FILE_gcc ${SDK_DIR}/devices/${MCU_VARIANT}/gcc/${MCU_VARIANT}xxxxx_flexspi_nor.ld)
endif ()
if (TOOLCHAIN STREQUAL "gcc")
target_sources(${BOARD_TARGET} PUBLIC
${SDK_DIR}/devices/${MCU_VARIANT}/gcc/startup_${MCU_VARIANT}.S
)
target_link_options(${BOARD_TARGET} PUBLIC
"LINKER:--script=${SDK_DIR}/devices/${MCU_VARIANT}/gcc/${MCU_VARIANT}xxxxx_flexspi_nor.ld"
"LINKER:--script=${LD_FILE_gcc}"
"LINKER:-Map=$<IF:$<BOOL:$<TARGET_PROPERTY:OUTPUT_NAME>>,$<TARGET_PROPERTY:OUTPUT_NAME>,$<TARGET_PROPERTY:NAME>>${CMAKE_EXECUTABLE_SUFFIX}.map"
# nanolib
--specs=nosys.specs
@ -144,6 +148,21 @@ function(family_configure_target TARGET)
COMMAND ${LINKSERVER_PATH} flash ${NXPLINK_DEVICE} load $<TARGET_FILE:${TARGET}>
)
# Flash using jlink
set(JLINKEXE JLinkExe)
file(GENERATE
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${TARGET}.jlink
CONTENT "halt
loadfile $<TARGET_FILE:${TARGET}>
r
go
exit"
)
add_custom_target(${TARGET}-jlink
DEPENDS ${TARGET}
COMMAND ${JLINKEXE} -device ${JLINK_DEVICE} -if swd -JTAGConf -1,-1 -speed auto -CommandFile ${CMAKE_CURRENT_BINARY_DIR}/${TARGET}.jlink
)
endfunction()

View File

@ -35,8 +35,7 @@
// Debug level, TUSB_CFG_DEBUG must be at least this level for debug message
#define CDCH_DEBUG 2
#define TU_LOG_CDCH(...) TU_LOG(CDCH_DEBUG, __VA_ARGS__)
#define TU_LOG_DRV(...) TU_LOG(CDCH_DEBUG, __VA_ARGS__)
//--------------------------------------------------------------------+
// Host CDC Interface
@ -537,6 +536,8 @@ void cdch_close(uint8_t daddr)
cdch_interface_t* p_cdc = &cdch_data[idx];
if (p_cdc->daddr == daddr)
{
TU_LOG_DRV(" CDCh close addr = %u index = %u\r\n", daddr, idx);
// Invoke application callback
if (tuh_cdc_umount_cb) tuh_cdc_umount_cb(idx);
@ -804,7 +805,7 @@ static void acm_process_config(tuh_xfer_t* xfer)
static bool acm_set_control_line_state(cdch_interface_t* p_cdc, uint16_t line_state, tuh_xfer_cb_t complete_cb, uintptr_t user_data) {
TU_VERIFY(p_cdc->acm_capability.support_line_request);
TU_LOG_CDCH("CDC ACM Set Control Line State\r\n");
TU_LOG_DRV("CDC ACM Set Control Line State\r\n");
tusb_control_request_t const request = {
.bmRequestType_bit = {
@ -834,7 +835,7 @@ static bool acm_set_control_line_state(cdch_interface_t* p_cdc, uint16_t line_st
}
static bool acm_set_line_coding(cdch_interface_t* p_cdc, cdc_line_coding_t const* line_coding, tuh_xfer_cb_t complete_cb, uintptr_t user_data) {
TU_LOG_CDCH("CDC ACM Set Line Conding\r\n");
TU_LOG_DRV("CDC ACM Set Line Conding\r\n");
tusb_control_request_t const request = {
.bmRequestType_bit = {
@ -894,7 +895,7 @@ static bool ftdi_open(uint8_t daddr, const tusb_desc_interface_t *itf_desc, uint
cdch_interface_t * p_cdc = make_new_itf(daddr, itf_desc);
TU_VERIFY(p_cdc);
TU_LOG_CDCH("FTDI opened\r\n");
TU_LOG_DRV("FTDI opened\r\n");
p_cdc->serial_drid = SERIAL_DRIVER_FTDI;
@ -938,7 +939,7 @@ static bool ftdi_sio_reset(cdch_interface_t* p_cdc, tuh_xfer_cb_t complete_cb, u
static bool ftdi_sio_set_modem_ctrl(cdch_interface_t* p_cdc, uint16_t line_state, tuh_xfer_cb_t complete_cb, uintptr_t user_data)
{
TU_LOG_CDCH("CDC FTDI Set Control Line State\r\n");
TU_LOG_DRV("CDC FTDI Set Control Line State\r\n");
p_cdc->user_control_cb = complete_cb;
TU_ASSERT(ftdi_sio_set_request(p_cdc, FTDI_SIO_MODEM_CTRL, 0x0300 | line_state,
complete_cb ? cdch_internal_control_complete : NULL, user_data));
@ -974,7 +975,7 @@ static uint32_t ftdi_232bm_baud_to_divisor(uint32_t baud)
static bool ftdi_sio_set_baudrate(cdch_interface_t* p_cdc, uint32_t baudrate, tuh_xfer_cb_t complete_cb, uintptr_t user_data)
{
uint16_t const divisor = (uint16_t) ftdi_232bm_baud_to_divisor(baudrate);
TU_LOG_CDCH("CDC FTDI Set BaudRate = %lu, divisor = 0x%04x\n", baudrate, divisor);
TU_LOG_DRV("CDC FTDI Set BaudRate = %lu, divisor = 0x%04x\n", baudrate, divisor);
p_cdc->user_control_cb = complete_cb;
_ftdi_requested_baud = baudrate;
@ -1061,7 +1062,7 @@ static bool cp210x_open(uint8_t daddr, tusb_desc_interface_t const *itf_desc, ui
cdch_interface_t * p_cdc = make_new_itf(daddr, itf_desc);
TU_VERIFY(p_cdc);
TU_LOG_CDCH("CP210x opened\r\n");
TU_LOG_DRV("CP210x opened\r\n");
p_cdc->serial_drid = SERIAL_DRIVER_CP210X;
// endpoint pair
@ -1109,7 +1110,7 @@ static bool cp210x_ifc_enable(cdch_interface_t* p_cdc, uint16_t enabled, tuh_xfe
}
static bool cp210x_set_baudrate(cdch_interface_t* p_cdc, uint32_t baudrate, tuh_xfer_cb_t complete_cb, uintptr_t user_data) {
TU_LOG_CDCH("CDC CP210x Set BaudRate = %lu\n", baudrate);
TU_LOG_DRV("CDC CP210x Set BaudRate = %lu\n", baudrate);
uint32_t baud_le = tu_htole32(baudrate);
p_cdc->user_control_cb = complete_cb;
return cp210x_set_request(p_cdc, CP210X_SET_BAUDRATE, 0, (uint8_t *) &baud_le, 4,
@ -1118,7 +1119,7 @@ static bool cp210x_set_baudrate(cdch_interface_t* p_cdc, uint32_t baudrate, tuh_
static bool cp210x_set_modem_ctrl(cdch_interface_t* p_cdc, uint16_t line_state, tuh_xfer_cb_t complete_cb, uintptr_t user_data)
{
TU_LOG_CDCH("CDC CP210x Set Control Line State\r\n");
TU_LOG_DRV("CDC CP210x Set Control Line State\r\n");
p_cdc->user_control_cb = complete_cb;
return cp210x_set_request(p_cdc, CP210X_SET_MHS, 0x0300 | line_state, NULL, 0,
complete_cb ? cdch_internal_control_complete : NULL, user_data);

View File

@ -33,6 +33,10 @@
#include "hid_host.h"
// Debug level, TUSB_CFG_DEBUG must be at least this level for debug message
#define HIDH_DEBUG 2
#define TU_LOG_DRV(...) TU_LOG(HIDH_DEBUG, __VA_ARGS__)
//--------------------------------------------------------------------+
// MACRO CONSTANT TYPEDEF
//--------------------------------------------------------------------+
@ -68,7 +72,7 @@ tu_static hidh_interface_t _hidh_itf[CFG_TUH_HID];
TU_ATTR_ALWAYS_INLINE static inline
hidh_interface_t* get_hid_itf(uint8_t daddr, uint8_t idx)
{
TU_ASSERT(daddr && idx < CFG_TUH_HID, NULL);
TU_ASSERT(daddr > 0 && idx < CFG_TUH_HID, NULL);
hidh_interface_t* p_hid = &_hidh_itf[idx];
return (p_hid->daddr == daddr) ? p_hid : NULL;
}
@ -207,7 +211,7 @@ static void set_protocol_complete(tuh_xfer_t* xfer)
static bool _hidh_set_protocol(uint8_t daddr, uint8_t itf_num, uint8_t protocol, tuh_xfer_cb_t complete_cb, uintptr_t user_data)
{
TU_LOG2("HID Set Protocol = %d\r\n", protocol);
TU_LOG_DRV("HID Set Protocol = %d\r\n", protocol);
tusb_control_request_t const request =
{
@ -246,7 +250,7 @@ bool tuh_hid_set_protocol(uint8_t daddr, uint8_t idx, uint8_t protocol)
static void set_report_complete(tuh_xfer_t* xfer)
{
TU_LOG2("HID Set Report complete\r\n");
TU_LOG_DRV("HID Set Report complete\r\n");
if (tuh_hid_set_report_complete_cb)
{
@ -266,7 +270,7 @@ bool tuh_hid_set_report(uint8_t daddr, uint8_t idx, uint8_t report_id, uint8_t r
hidh_interface_t* p_hid = get_hid_itf(daddr, idx);
TU_VERIFY(p_hid);
TU_LOG2("HID Set Report: id = %u, type = %u, len = %u\r\n", report_id, report_type, len);
TU_LOG_DRV("HID Set Report: id = %u, type = %u, len = %u\r\n", report_id, report_type, len);
tusb_control_request_t const request =
{
@ -298,7 +302,7 @@ bool tuh_hid_set_report(uint8_t daddr, uint8_t idx, uint8_t report_id, uint8_t r
static bool _hidh_set_idle(uint8_t daddr, uint8_t itf_num, uint16_t idle_rate, tuh_xfer_cb_t complete_cb, uintptr_t user_data)
{
// SET IDLE request, device can stall if not support this request
TU_LOG2("HID Set Idle \r\n");
TU_LOG_DRV("HID Set Idle \r\n");
tusb_control_request_t const request =
{
@ -367,7 +371,7 @@ bool tuh_hid_send_ready(uint8_t dev_addr, uint8_t idx)
bool tuh_hid_send_report(uint8_t daddr, uint8_t idx, uint8_t report_id, const void* report, uint16_t len)
{
TU_LOG2("HID Send Report %d\r\n", report_id);
TU_LOG_DRV("HID Send Report %d\r\n", report_id);
hidh_interface_t* p_hid = get_hid_itf(daddr, idx);
TU_VERIFY(p_hid);
@ -430,7 +434,7 @@ bool hidh_xfer_cb(uint8_t daddr, uint8_t ep_addr, xfer_result_t result, uint32_t
if ( dir == TUSB_DIR_IN )
{
TU_LOG2(" Get Report callback (%u, %u)\r\n", daddr, idx);
TU_LOG_DRV(" Get Report callback (%u, %u)\r\n", daddr, idx);
TU_LOG3_MEM(p_hid->epin_buf, xferred_bytes, 2);
tuh_hid_report_received_cb(daddr, idx, p_hid->epin_buf, (uint16_t) xferred_bytes);
}else
@ -448,8 +452,9 @@ void hidh_close(uint8_t daddr)
hidh_interface_t* p_hid = &_hidh_itf[i];
if (p_hid->daddr == daddr)
{
if(tuh_hid_umount_cb) tuh_hid_umount_cb(daddr, i);
p_hid->daddr = 0;
TU_LOG_DRV(" HIDh close addr = %u index = %u\r\n", daddr, i);
if(tuh_hid_umount_cb) tuh_hid_umount_cb(daddr, i);
p_hid->daddr = 0;
}
}
}
@ -465,7 +470,7 @@ bool hidh_open(uint8_t rhport, uint8_t daddr, tusb_desc_interface_t const *desc_
TU_VERIFY(TUSB_CLASS_HID == desc_itf->bInterfaceClass);
TU_LOG2("[%u] HID opening Interface %u\r\n", daddr, desc_itf->bInterfaceNumber);
TU_LOG_DRV("[%u] HID opening Interface %u\r\n", daddr, desc_itf->bInterfaceNumber);
// len = interface + hid + n*endpoints
uint16_t const drv_len = (uint16_t) (sizeof(tusb_desc_interface_t) + sizeof(tusb_hid_descriptor_hid_t) +
@ -592,7 +597,7 @@ static void process_set_config(tuh_xfer_t* xfer)
// using usbh enumeration buffer since report descriptor can be very long
if( p_hid->report_desc_len > CFG_TUH_ENUMERATION_BUFSIZE )
{
TU_LOG2("HID Skip Report Descriptor since it is too large %u bytes\r\n", p_hid->report_desc_len);
TU_LOG_DRV("HID Skip Report Descriptor since it is too large %u bytes\r\n", p_hid->report_desc_len);
// Driver is mounted without report descriptor
config_driver_mount_complete(daddr, idx, NULL, 0);
@ -763,7 +768,7 @@ uint8_t tuh_hid_parse_report_descriptor(tuh_hid_report_info_t* report_info_arr,
for ( uint8_t i = 0; i < report_num; i++ )
{
info = report_info_arr+i;
TU_LOG2("%u: id = %u, usage_page = %u, usage = %u\r\n", i, info->report_id, info->usage_page, info->usage);
TU_LOG_DRV("%u: id = %u, usage_page = %u, usage = %u\r\n", i, info->report_id, info->usage_page, info->usage);
}
return report_num;

View File

@ -35,7 +35,6 @@
// Debug level, TUSB_CFG_DEBUG must be at least this level for debug message
#define MSCH_DEBUG 2
#define TU_LOG_MSCH(...) TU_LOG(MSCH_DEBUG, __VA_ARGS__)
//--------------------------------------------------------------------+
@ -82,6 +81,7 @@ CFG_TUH_MEM_SECTION static msch_interface_t _msch_itf[CFG_TUH_DEVICE_MAX];
CFG_TUH_MEM_SECTION CFG_TUH_MEM_ALIGN
static uint8_t _msch_buffer[sizeof(scsi_inquiry_resp_t)];
// FIXME potential nul reference
TU_ATTR_ALWAYS_INLINE
static inline msch_interface_t* get_itf(uint8_t dev_addr)
{
@ -305,11 +305,15 @@ void msch_init(void)
void msch_close(uint8_t dev_addr)
{
TU_VERIFY(dev_addr <= CFG_TUH_DEVICE_MAX, );
msch_interface_t* p_msc = get_itf(dev_addr);
TU_VERIFY(p_msc->configured, );
TU_LOG_MSCH(" MSCh close addr = %d\r\n", dev_addr);
// invoke Application Callback
if (p_msc->mounted && tuh_msc_umount_cb) tuh_msc_umount_cb(dev_addr);
if (p_msc->mounted) {
if(tuh_msc_umount_cb) tuh_msc_umount_cb(dev_addr);
}
tu_memclr(p_msc, sizeof(msch_interface_t));
}

View File

@ -46,6 +46,7 @@
#if CFG_TUSB_DEBUG >= 2
extern char const* const tu_str_speed[];
extern char const* const tu_str_std_request[];
extern char const* const tu_str_xfer_result[];
#endif
void tu_print_mem(void const *buf, uint32_t count, uint8_t indent);

View File

@ -516,9 +516,9 @@ void tud_task_ext(uint32_t timeout_ms, bool in_isr)
_usbd_dev.connected = 1;
// mark both in & out control as free
_usbd_dev.ep_status[0][TUSB_DIR_OUT].busy = false;
_usbd_dev.ep_status[0][TUSB_DIR_OUT].busy = 0;
_usbd_dev.ep_status[0][TUSB_DIR_OUT].claimed = 0;
_usbd_dev.ep_status[0][TUSB_DIR_IN ].busy = false;
_usbd_dev.ep_status[0][TUSB_DIR_IN ].busy = 0;
_usbd_dev.ep_status[0][TUSB_DIR_IN ].claimed = 0;
// Process control request
@ -540,12 +540,13 @@ void tud_task_ext(uint32_t timeout_ms, bool in_isr)
TU_LOG(USBD_DBG, "on EP %02X with %u bytes\r\n", ep_addr, (unsigned int) event.xfer_complete.len);
_usbd_dev.ep_status[epnum][ep_dir].busy = false;
_usbd_dev.ep_status[epnum][ep_dir].busy = 0;
_usbd_dev.ep_status[epnum][ep_dir].claimed = 0;
if ( 0 == epnum )
{
usbd_control_xfer_cb(event.rhport, ep_addr, (xfer_result_t)event.xfer_complete.result, event.xfer_complete.len);
usbd_control_xfer_cb(event.rhport, ep_addr, (xfer_result_t) event.xfer_complete.result, event.xfer_complete
.len);
}
else
{
@ -553,7 +554,7 @@ void tud_task_ext(uint32_t timeout_ms, bool in_isr)
TU_ASSERT(driver, );
TU_LOG(USBD_DBG, " %s xfer callback\r\n", driver->name);
driver->xfer_cb(event.rhport, ep_addr, (xfer_result_t)event.xfer_complete.result, event.xfer_complete.len);
driver->xfer_cb(event.rhport, ep_addr, (xfer_result_t) event.xfer_complete.result, event.xfer_complete.len);
}
}
break;
@ -1244,7 +1245,7 @@ bool usbd_edpt_xfer(uint8_t rhport, uint8_t ep_addr, uint8_t * buffer, uint16_t
// Set busy first since the actual transfer can be complete before dcd_edpt_xfer()
// could return and USBD task can preempt and clear the busy
_usbd_dev.ep_status[epnum][dir].busy = true;
_usbd_dev.ep_status[epnum][dir].busy = 1;
if ( dcd_edpt_xfer(rhport, ep_addr, buffer, total_bytes) )
{
@ -1252,7 +1253,7 @@ bool usbd_edpt_xfer(uint8_t rhport, uint8_t ep_addr, uint8_t * buffer, uint16_t
}else
{
// DCD error, mark endpoint as ready to allow next transfer
_usbd_dev.ep_status[epnum][dir].busy = false;
_usbd_dev.ep_status[epnum][dir].busy = 0;
_usbd_dev.ep_status[epnum][dir].claimed = 0;
TU_LOG(USBD_DBG, "FAILED\r\n");
TU_BREAKPOINT();
@ -1278,7 +1279,7 @@ bool usbd_edpt_xfer_fifo(uint8_t rhport, uint8_t ep_addr, tu_fifo_t * ff, uint16
// Set busy first since the actual transfer can be complete before dcd_edpt_xfer() could return
// and usbd task can preempt and clear the busy
_usbd_dev.ep_status[epnum][dir].busy = true;
_usbd_dev.ep_status[epnum][dir].busy = 1;
if (dcd_edpt_xfer_fifo(rhport, ep_addr, ff, total_bytes))
{
@ -1287,7 +1288,7 @@ bool usbd_edpt_xfer_fifo(uint8_t rhport, uint8_t ep_addr, tu_fifo_t * ff, uint16
}else
{
// DCD error, mark endpoint as ready to allow next transfer
_usbd_dev.ep_status[epnum][dir].busy = false;
_usbd_dev.ep_status[epnum][dir].busy = 0;
_usbd_dev.ep_status[epnum][dir].claimed = 0;
TU_LOG(USBD_DBG, "failed\r\n");
TU_BREAKPOINT();
@ -1317,8 +1318,8 @@ void usbd_edpt_stall(uint8_t rhport, uint8_t ep_addr)
{
TU_LOG(USBD_DBG, " Stall EP %02X\r\n", ep_addr);
dcd_edpt_stall(rhport, ep_addr);
_usbd_dev.ep_status[epnum][dir].stalled = true;
_usbd_dev.ep_status[epnum][dir].busy = true;
_usbd_dev.ep_status[epnum][dir].stalled = 1;
_usbd_dev.ep_status[epnum][dir].busy = 1;
}
}
@ -1334,8 +1335,8 @@ void usbd_edpt_clear_stall(uint8_t rhport, uint8_t ep_addr)
{
TU_LOG(USBD_DBG, " Clear Stall EP %02X\r\n", ep_addr);
dcd_edpt_clear_stall(rhport, ep_addr);
_usbd_dev.ep_status[epnum][dir].stalled = false;
_usbd_dev.ep_status[epnum][dir].busy = false;
_usbd_dev.ep_status[epnum][dir].stalled = 0;
_usbd_dev.ep_status[epnum][dir].busy = 0;
}
}
@ -1366,9 +1367,9 @@ void usbd_edpt_close(uint8_t rhport, uint8_t ep_addr)
uint8_t const dir = tu_edpt_dir(ep_addr);
dcd_edpt_close(rhport, ep_addr);
_usbd_dev.ep_status[epnum][dir].stalled = false;
_usbd_dev.ep_status[epnum][dir].busy = false;
_usbd_dev.ep_status[epnum][dir].claimed = false;
_usbd_dev.ep_status[epnum][dir].stalled = 0;
_usbd_dev.ep_status[epnum][dir].busy = 0;
_usbd_dev.ep_status[epnum][dir].claimed = 0;
return;
}
@ -1403,9 +1404,9 @@ bool usbd_edpt_iso_activate(uint8_t rhport, tusb_desc_endpoint_t const * desc_ep
TU_ASSERT(epnum < CFG_TUD_ENDPPOINT_MAX);
TU_ASSERT(tu_edpt_validate(desc_ep, (tusb_speed_t) _usbd_dev.speed));
_usbd_dev.ep_status[epnum][dir].stalled = false;
_usbd_dev.ep_status[epnum][dir].busy = false;
_usbd_dev.ep_status[epnum][dir].claimed = false;
_usbd_dev.ep_status[epnum][dir].stalled = 0;
_usbd_dev.ep_status[epnum][dir].busy = 0;
_usbd_dev.ep_status[epnum][dir].claimed = 0;
return dcd_edpt_iso_activate(rhport, desc_ep);
}

View File

@ -32,7 +32,10 @@
#include "tusb.h"
#include "device/usbd_pvt.h"
#if CFG_TUSB_DEBUG >= 2
// Debug level of USBD Control
#define USBD_CONTROL_DEBUG 2
#if CFG_TUSB_DEBUG >= USBD_CONTROL_DEBUG
extern void usbd_driver_print_control_complete_name(usbd_control_xfer_cb_t callback);
#endif
@ -188,7 +191,7 @@ bool usbd_control_xfer_cb (uint8_t rhport, uint8_t ep_addr, xfer_result_t result
{
TU_VERIFY(_ctrl_xfer.buffer);
memcpy(_ctrl_xfer.buffer, _usbd_ctrl_buf, xferred_bytes);
TU_LOG_MEM(2, _usbd_ctrl_buf, xferred_bytes, 2);
TU_LOG_MEM(USBD_CONTROL_DEBUG, _usbd_ctrl_buf, xferred_bytes, 2);
}
_ctrl_xfer.total_xferred += (uint16_t) xferred_bytes;
@ -205,7 +208,7 @@ bool usbd_control_xfer_cb (uint8_t rhport, uint8_t ep_addr, xfer_result_t result
// callback can still stall control in status phase e.g out data does not make sense
if ( _ctrl_xfer.complete_cb )
{
#if CFG_TUSB_DEBUG >= 2
#if CFG_TUSB_DEBUG >= USBD_CONTROL_DEBUG
usbd_driver_print_control_complete_name(_ctrl_xfer.complete_cb);
#endif

View File

@ -39,8 +39,10 @@
// Configuration
//--------------------------------------------------------------------+
// Max number of endpoints per device
// TODO optimize memory usage
#ifndef CFG_TUH_ENDPOINT_MAX
#define CFG_TUH_ENDPOINT_MAX (CFG_TUH_HUB + CFG_TUH_HID*2 + CFG_TUH_MSC*2 + CFG_TUH_CDC*3)
#define CFG_TUH_ENDPOINT_MAX 16
// #ifdef TUP_HCD_ENDPOINT_MAX
// #define CFG_TUH_ENDPPOINT_MAX TUP_HCD_ENDPOINT_MAX
// #else
@ -102,6 +104,22 @@ typedef struct
uint8_t speed;
} hcd_devtree_info_t;
//--------------------------------------------------------------------+
// Memory API
//--------------------------------------------------------------------+
// clean/flush data cache: write cache -> memory.
// Required before an DMA TX transfer to make sure data is in memory
void hcd_dcache_clean(void* addr, uint32_t data_size) TU_ATTR_WEAK;
// invalidate data cache: mark cache as invalid, next read will read from memory
// Required BOTH before and after an DMA RX transfer
void hcd_dcache_invalidate(void* addr, uint32_t data_size) TU_ATTR_WEAK;
// clean and invalidate data cache
// Required before an DMA transfer where memory is both read/write by DMA
void hcd_dcache_clean_invalidate(void* addr, uint32_t data_size) TU_ATTR_WEAK;
//--------------------------------------------------------------------+
// Controller API
//--------------------------------------------------------------------+
@ -157,7 +175,7 @@ bool hcd_edpt_xfer(uint8_t rhport, uint8_t dev_addr, uint8_t ep_addr, uint8_t *
bool hcd_setup_send(uint8_t rhport, uint8_t dev_addr, uint8_t const setup_packet[8]);
// clear stall, data toggle is also reset to DATA0
bool hcd_edpt_clear_stall(uint8_t dev_addr, uint8_t ep_addr);
bool hcd_edpt_clear_stall(uint8_t daddr, uint8_t ep_addr);
//--------------------------------------------------------------------+
// USBH implemented API
@ -182,6 +200,7 @@ void hcd_event_device_attach(uint8_t rhport, bool in_isr)
event.event_id = HCD_EVENT_DEVICE_ATTACH;
event.connection.hub_addr = 0;
event.connection.hub_port = 0;
hcd_event_handler(&event, in_isr);
}
@ -212,7 +231,6 @@ void hcd_event_xfer_complete(uint8_t dev_addr, uint8_t ep_addr, uint32_t xferred
event.xfer_complete.result = result;
event.xfer_complete.len = xferred_bytes;
hcd_event_handler(&event, in_isr);
}

View File

@ -33,6 +33,10 @@
#include "usbh_classdriver.h"
#include "hub.h"
// Debug level, TUSB_CFG_DEBUG must be at least this level for debug message
#define HUB_DEBUG 2
#define TU_LOG_DRV(...) TU_LOG(HUB_DEBUG, __VA_ARGS__)
//--------------------------------------------------------------------+
// MACRO CONSTANT TYPEDEF
//--------------------------------------------------------------------+
@ -218,7 +222,10 @@ void hub_close(uint8_t dev_addr)
TU_VERIFY(dev_addr > CFG_TUH_DEVICE_MAX, );
hub_interface_t* p_hub = get_itf(dev_addr);
if (p_hub->ep_in) tu_memclr(p_hub, sizeof( hub_interface_t));
if (p_hub->ep_in) {
TU_LOG_DRV(" HUB close addr = %d\r\n", dev_addr);
tu_memclr(p_hub, sizeof( hub_interface_t));
}
}
bool hub_edpt_status_xfer(uint8_t dev_addr)

View File

@ -61,6 +61,8 @@ typedef struct
uint8_t hub_addr;
uint8_t hub_port;
uint8_t speed;
// enumeration is in progress, done when all interfaces are configured
volatile uint8_t enumerating;
// struct TU_ATTR_PACKED {
@ -79,10 +81,12 @@ typedef struct {
// Device State
struct TU_ATTR_PACKED {
volatile uint8_t connected : 1;
volatile uint8_t addressed : 1;
volatile uint8_t configured : 1;
volatile uint8_t suspended : 1;
volatile uint8_t connected : 1; // After 1st transfer
volatile uint8_t addressed : 1; // After SET_ADDR
volatile uint8_t configured : 1; // After SET_CONFIG and all drivers are configured
volatile uint8_t suspended : 1; // Bus suspended
// volatile uint8_t removing : 1; // Physically disconnected, waiting to be processed by usbh
};
// Device Descriptor
@ -246,7 +250,7 @@ static inline usbh_device_t* get_device(uint8_t dev_addr)
}
static bool enum_new_device(hcd_event_t* event);
static void process_device_unplugged(uint8_t rhport, uint8_t hub_addr, uint8_t hub_port);
static void process_removing_device(uint8_t rhport, uint8_t hub_addr, uint8_t hub_port);
static bool usbh_edpt_control_open(uint8_t dev_addr, uint8_t max_packet_size);
static bool usbh_control_xfer_cb (uint8_t daddr, uint8_t ep_addr, xfer_result_t result, uint32_t xferred_bytes);
@ -418,7 +422,7 @@ void tuh_task_ext(uint32_t timeout_ms, bool in_isr)
case HCD_EVENT_DEVICE_REMOVE:
TU_LOG_USBH("[%u:%u:%u] USBH DEVICE REMOVED\r\n", event.rhport, event.connection.hub_addr, event.connection.hub_port);
process_device_unplugged(event.rhport, event.connection.hub_addr, event.connection.hub_port);
process_removing_device(event.rhport, event.connection.hub_addr, event.connection.hub_port);
#if CFG_TUH_HUB
// TODO remove
@ -436,7 +440,8 @@ void tuh_task_ext(uint32_t timeout_ms, bool in_isr)
uint8_t const epnum = tu_edpt_number(ep_addr);
uint8_t const ep_dir = tu_edpt_dir(ep_addr);
TU_LOG_USBH("on EP %02X with %u bytes\r\n", ep_addr, (unsigned int) event.xfer_complete.len);
TU_LOG_USBH("on EP %02X with %u bytes: %s\r\n", ep_addr, (unsigned int) event.xfer_complete.len,
tu_str_xfer_result[event.xfer_complete.result]);
if (event.dev_addr == 0)
{
@ -447,7 +452,7 @@ void tuh_task_ext(uint32_t timeout_ms, bool in_isr)
else
{
usbh_device_t* dev = get_device(event.dev_addr);
TU_ASSERT(dev, );
TU_VERIFY(dev && dev->connected, );
dev->ep_status[epnum][ep_dir].busy = 0;
dev->ep_status[epnum][ep_dir].claimed = 0;
@ -736,29 +741,33 @@ void usbh_int_set(bool enabled)
// TODO has some duplication code with device, refactor later
bool usbh_edpt_claim(uint8_t dev_addr, uint8_t ep_addr)
{
// Note: addr0 only use tuh_control_xfer
usbh_device_t* dev = get_device(dev_addr);
// addr0 only use tuh_control_xfer
TU_ASSERT(dev);
TU_ASSERT(dev && dev->connected);
uint8_t const epnum = tu_edpt_number(ep_addr);
uint8_t const dir = tu_edpt_dir(ep_addr);
return tu_edpt_claim(&dev->ep_status[epnum][dir], _usbh_mutex);
TU_VERIFY(tu_edpt_claim(&dev->ep_status[epnum][dir], _usbh_mutex));
TU_LOG_USBH("[%u] Claimed EP 0x%02x\r\n", dev_addr, ep_addr);
return true;
}
// TODO has some duplication code with device, refactor later
bool usbh_edpt_release(uint8_t dev_addr, uint8_t ep_addr)
{
// Note: addr0 only use tuh_control_xfer
usbh_device_t* dev = get_device(dev_addr);
// addr0 only use tuh_control_xfer
TU_ASSERT(dev);
TU_VERIFY(dev && dev->connected);
uint8_t const epnum = tu_edpt_number(ep_addr);
uint8_t const dir = tu_edpt_dir(ep_addr);
return tu_edpt_release(&dev->ep_status[epnum][dir], _usbh_mutex);
TU_VERIFY(tu_edpt_release(&dev->ep_status[epnum][dir], _usbh_mutex));
TU_LOG_USBH("[%u] Released EP 0x%02x\r\n", dev_addr, ep_addr);
return true;
}
// TODO has some duplication code with device, refactor later
@ -866,6 +875,10 @@ TU_ATTR_FAST_FUNC void hcd_event_handler(hcd_event_t const* event, bool in_isr)
{
switch (event->event_id)
{
// case HCD_EVENT_DEVICE_REMOVE:
// // mark device as removing to prevent further xfer before the event is processed in usbh task
// break;
default:
osal_queue_send(_usbh_q, event, in_isr);
break;
@ -1109,7 +1122,7 @@ uint8_t tuh_descriptor_get_serial_string_sync(uint8_t daddr, uint16_t language_i
}
//--------------------------------------------------------------------+
//
// Detaching
//--------------------------------------------------------------------+
TU_ATTR_ALWAYS_INLINE
@ -1118,47 +1131,79 @@ static inline bool is_hub_addr(uint8_t daddr)
return (CFG_TUH_HUB > 0) && (daddr > CFG_TUH_DEVICE_MAX);
}
//static void mark_removing_device_isr(uint8_t rhport, uint8_t hub_addr, uint8_t hub_port) {
// for (uint8_t dev_id = 0; dev_id < TOTAL_DEVICES; dev_id++) {
// usbh_device_t *dev = &_usbh_devices[dev_id];
// uint8_t const daddr = dev_id + 1;
//
// // hub_addr = 0 means roothub, hub_port = 0 means all devices of downstream hub
// if (dev->rhport == rhport && dev->connected &&
// (hub_addr == 0 || dev->hub_addr == hub_addr) &&
// (hub_port == 0 || dev->hub_port == hub_port)) {
// if (is_hub_addr(daddr)) {
// // If the device itself is a usb hub, mark all downstream devices.
// // FIXME recursive calls
// mark_removing_device_isr(rhport, daddr, 0);
// }
//
// dev->removing = 1;
// }
// }
//}
// a device unplugged from rhport:hub_addr:hub_port
static void process_device_unplugged(uint8_t rhport, uint8_t hub_addr, uint8_t hub_port)
static void process_removing_device(uint8_t rhport, uint8_t hub_addr, uint8_t hub_port)
{
//------------- find the all devices (star-network) under port that is unplugged -------------//
// TODO mark as disconnected in ISR, also handle dev0
for ( uint8_t dev_id = 0; dev_id < TU_ARRAY_SIZE(_usbh_devices); dev_id++ )
{
usbh_device_t* dev = &_usbh_devices[dev_id];
uint8_t const dev_addr = dev_id+1;
// TODO Hub multiple level
if (dev->rhport == rhport &&
(hub_addr == 0 || dev->hub_addr == hub_addr) && // hub_addr = 0 means roothub
(hub_port == 0 || dev->hub_port == hub_port) && // hub_port = 0 means all devices of downstream hub
dev->connected)
{
TU_LOG_USBH(" Address = %u\r\n", dev_addr);
#if 0
// index as hub addr, value is hub port (0xFF for invalid)
uint8_t removing_hubs[CFG_TUH_HUB];
memset(removing_hubs, TUSB_INDEX_INVALID_8, sizeof(removing_hubs));
if (is_hub_addr(dev_addr))
{
TU_LOG(USBH_DEBUG, "HUB address = %u is unmounted\r\n", dev_addr);
// If the device itself is a usb hub, unplug downstream devices.
// FIXME un-roll recursive calls to prevent potential stack overflow
process_device_unplugged(rhport, dev_addr, 0);
}else
{
// Invoke callback before closing driver
if (tuh_umount_cb) tuh_umount_cb(dev_addr);
removing_hubs[hub_addr-CFG_TUH_DEVICE_MAX] = hub_port;
// consecutive non-removing hub
uint8_t nop_count = 0;
#endif
for (uint8_t dev_id = 0; dev_id < TOTAL_DEVICES; dev_id++) {
usbh_device_t *dev = &_usbh_devices[dev_id];
uint8_t const daddr = dev_id + 1;
// hub_addr = 0 means roothub, hub_port = 0 means all devices of downstream hub
if (dev->rhport == rhport && dev->connected &&
(hub_addr == 0 || dev->hub_addr == hub_addr) &&
(hub_port == 0 || dev->hub_port == hub_port)) {
TU_LOG_USBH("Device unplugged address = %u\r\n", daddr);
if (is_hub_addr(daddr)) {
TU_LOG(USBH_DEBUG, " is a HUB device\r\n", daddr);
// Submit removed event If the device itself is a hub (un-rolled recursive)
// TODO a better to unroll recursrive is using array of removing_hubs and mark it here
hcd_event_t event;
event.rhport = rhport;
event.event_id = HCD_EVENT_DEVICE_REMOVE;
event.connection.hub_addr = daddr;
event.connection.hub_port = 0;
hcd_event_handler(&event, false);
} else {
// Invoke callback before closing driver (maybe call it later ?)
if (tuh_umount_cb) tuh_umount_cb(daddr);
}
// Close class driver
for (uint8_t drv_id = 0; drv_id < USBH_CLASS_DRIVER_COUNT; drv_id++)
{
TU_LOG_USBH("%s close\r\n", usbh_class_drivers[drv_id].name);
usbh_class_drivers[drv_id].close(dev_addr);
for (uint8_t drv_id = 0; drv_id < USBH_CLASS_DRIVER_COUNT; drv_id++) {
usbh_class_drivers[drv_id].close(daddr);
}
hcd_device_close(rhport, dev_addr);
hcd_device_close(rhport, daddr);
clear_device(dev);
// abort on-going control xfer if any
if (_ctrl_xfer.daddr == dev_addr) _set_control_xfer_stage(CONTROL_STAGE_IDLE);
if (_ctrl_xfer.daddr == daddr) _set_control_xfer_stage(CONTROL_STAGE_IDLE);
}
}
}
@ -1210,6 +1255,7 @@ static void process_enumeration(tuh_xfer_t* xfer)
{
failed_count++;
osal_task_delay(ATTEMPT_DELAY_MS); // delay a bit
TU_LOG1("Enumeration attempt %u\r\n", failed_count);
TU_ASSERT(tuh_control_xfer(xfer), );
}else
{
@ -1449,6 +1495,7 @@ static uint8_t get_new_address(bool is_hub)
{
uint8_t start;
uint8_t end;
if ( is_hub )
{
start = CFG_TUH_DEVICE_MAX;
@ -1459,7 +1506,7 @@ static uint8_t get_new_address(bool is_hub)
end = start + CFG_TUH_DEVICE_MAX;
}
for ( uint8_t idx = start; idx < end; idx++)
for (uint8_t idx = start; idx < end; idx++)
{
if (!_usbh_devices[idx].connected) return (idx+1);
}

View File

@ -616,15 +616,6 @@ void dcd_int_handler(uint8_t rhport)
uint32_t const edpt_complete = dcd_reg->ENDPTCOMPLETE;
dcd_reg->ENDPTCOMPLETE = edpt_complete; // acknowledge
if (dcd_reg->ENDPTSETUPSTAT)
{
//------------- Set up Received -------------//
// 23.10.10.2 Operational model for setup transfers
dcd_reg->ENDPTSETUPSTAT = dcd_reg->ENDPTSETUPSTAT;
dcd_event_setup_received(rhport, (uint8_t*)(uintptr_t) &_dcd_data.qhd[0][0].setup_request, true);
}
// 23.10.12.3 Failed QTD also get ENDPTCOMPLETE set
// nothing to do, we will submit xfer as error to usbd
// if (int_status & INTR_ERROR) { }
@ -637,6 +628,15 @@ void dcd_int_handler(uint8_t rhport)
if ( tu_bit_test(edpt_complete, epnum+16) ) process_edpt_complete_isr(rhport, epnum, TUSB_DIR_IN);
}
}
// Set up Received
// 23.10.10.2 Operational model for setup transfers
// Must be after normal transfer complete since it is possible to have both previous control status + new setup
// in the same frame and we should handle previous status first.
if (dcd_reg->ENDPTSETUPSTAT) {
dcd_reg->ENDPTSETUPSTAT = dcd_reg->ENDPTSETUPSTAT;
dcd_event_setup_received(rhport, (uint8_t *) (uintptr_t) &_dcd_data.qhd[0][0].setup_request, true);
}
}
if (int_status & INTR_SOF)

View File

@ -41,6 +41,30 @@
#if CFG_TUSB_MCU == OPT_MCU_MIMXRT
#include "ci_hs_imxrt.h"
// check if memory is cacheable i.e not in DTCM
TU_ATTR_ALWAYS_INLINE static inline bool is_cache_mem(uint32_t addr) {
return !(0x20000000 <= addr && addr < 0x20100000);
}
void hcd_dcache_clean(void* addr, uint32_t data_size) {
if (is_cache_mem((uint32_t) addr)) {
SCB_CleanDCache_by_Addr((uint32_t *) addr, (int32_t) data_size);
}
}
void hcd_dcache_invalidate(void* addr, uint32_t data_size) {
if (is_cache_mem((uint32_t) addr)) {
SCB_InvalidateDCache_by_Addr(addr, (int32_t) data_size);
}
}
void hcd_dcache_clean_invalidate(void* addr, uint32_t data_size) {
if (is_cache_mem((uint32_t) addr)) {
SCB_CleanInvalidateDCache_by_Addr(addr, (int32_t) data_size);
}
}
#elif TU_CHECK_MCU(OPT_MCU_LPC18XX, OPT_MCU_LPC43XX)
#include "ci_hs_lpc18_43.h"
#else

View File

@ -58,7 +58,8 @@
#define FRAMELIST_SIZE (1024 >> FRAMELIST_SIZE_BIT_VALUE)
#define QHD_MAX (CFG_TUH_DEVICE_MAX*CFG_TUH_ENDPOINT_MAX)
// Total queue head pool. TODO should be user configurable and more optimize memory usage in the future
#define QHD_MAX (CFG_TUH_DEVICE_MAX*CFG_TUH_ENDPOINT_MAX + CFG_TUH_HUB)
#define QTD_MAX QHD_MAX
typedef struct
@ -91,17 +92,31 @@ CFG_TUH_MEM_SECTION TU_ATTR_ALIGNED(4096) static ehci_data_t ehci_data;
//--------------------------------------------------------------------+
// Debug
//--------------------------------------------------------------------+
#if CFG_TUSB_DEBUG >= EHCI_DBG
static inline void print_portsc(ehci_registers_t* regs)
{
#if CFG_TUSB_DEBUG >= (EHCI_DBG + 1)
static inline void print_portsc(ehci_registers_t* regs) {
TU_LOG_HEX(EHCI_DBG, regs->portsc);
TU_LOG(EHCI_DBG, " Current Connect Status: %u\r\n", regs->portsc_bm.current_connect_status);
TU_LOG(EHCI_DBG, " Connect Status Change : %u\r\n", regs->portsc_bm.connect_status_change);
TU_LOG(EHCI_DBG, " Port Enabled : %u\r\n", regs->portsc_bm.port_enabled);
TU_LOG(EHCI_DBG, " Port Enabled Change : %u\r\n", regs->portsc_bm.port_enable_change);
TU_LOG(EHCI_DBG, " Connect Status : %u\r\n", regs->portsc_bm.current_connect_status);
TU_LOG(EHCI_DBG, " Connect Change : %u\r\n", regs->portsc_bm.connect_status_change);
TU_LOG(EHCI_DBG, " Enabled : %u\r\n", regs->portsc_bm.port_enabled);
TU_LOG(EHCI_DBG, " Enabled Change : %u\r\n", regs->portsc_bm.port_enable_change);
TU_LOG(EHCI_DBG, " Port Reset : %u\r\n", regs->portsc_bm.port_reset);
TU_LOG(EHCI_DBG, " Port Power : %u\r\n", regs->portsc_bm.port_power);
TU_LOG(EHCI_DBG, " OverCurr Change: %u\r\n", regs->portsc_bm.over_current_change);
TU_LOG(EHCI_DBG, " Force Resume : %u\r\n", regs->portsc_bm.force_port_resume);
TU_LOG(EHCI_DBG, " Suspend : %u\r\n", regs->portsc_bm.suspend);
TU_LOG(EHCI_DBG, " Reset : %u\r\n", regs->portsc_bm.port_reset);
TU_LOG(EHCI_DBG, " Power : %u\r\n", regs->portsc_bm.port_power);
}
static inline void print_intr(uint32_t intr) {
TU_LOG_HEX(EHCI_DBG, intr);
TU_LOG(EHCI_DBG, " USB Interrupt : %u\r\n", (intr & EHCI_INT_MASK_USB) ? 1 : 0);
TU_LOG(EHCI_DBG, " USB Error : %u\r\n", (intr & EHCI_INT_MASK_ERROR) ? 1 : 0);
TU_LOG(EHCI_DBG, " Port Change Detect : %u\r\n", (intr & EHCI_INT_MASK_PORT_CHANGE) ? 1 : 0);
TU_LOG(EHCI_DBG, " Frame List Rollover: %u\r\n", (intr & EHCI_INT_MASK_FRAMELIST_ROLLOVER) ? 1 : 0);
TU_LOG(EHCI_DBG, " Host System Error : %u\r\n", (intr & EHCI_INT_MASK_PCI_HOST_SYSTEM_ERROR) ? 1 : 0);
TU_LOG(EHCI_DBG, " Async Advance : %u\r\n", (intr & EHCI_INT_MASK_ASYNC_ADVANCE) ? 1 : 0);
// TU_LOG(EHCI_DBG, " Interrupt on Async: %u\r\n", (intr & EHCI_INT_MASK_NXP_ASYNC));
// TU_LOG(EHCI_DBG, " Periodic Schedule : %u\r\n", (intr & EHCI_INT_MASK_NXP_PERIODIC));
}
#else
@ -138,24 +153,33 @@ static inline ehci_qtd_t* qtd_control(uint8_t dev_addr)
static inline ehci_qhd_t* qhd_next (ehci_qhd_t const * p_qhd);
static inline ehci_qhd_t* qhd_find_free (void);
static inline ehci_qhd_t* qhd_get_from_addr (uint8_t dev_addr, uint8_t ep_addr);
// determine if a queue head has bus-related error
static inline bool qhd_has_xact_error (ehci_qhd_t * p_qhd)
{
return (p_qhd->qtd_overlay.buffer_err || p_qhd->qtd_overlay.babble_err || p_qhd->qtd_overlay.xact_err);
//p_qhd->qtd_overlay.non_hs_period_missed_uframe || p_qhd->qtd_overlay.pingstate_err TODO split transaction error
}
static void qhd_init(ehci_qhd_t *p_qhd, uint8_t dev_addr, tusb_desc_endpoint_t const * ep_desc);
static void qhd_attach_qtd(ehci_qhd_t *qhd, ehci_qtd_t *qtd);
static inline ehci_qtd_t* qtd_find_free (void);
static inline ehci_qtd_t* qtd_next (ehci_qtd_t const * p_qtd);
static inline void qtd_insert_to_qhd (ehci_qhd_t *p_qhd, ehci_qtd_t *p_qtd_new);
static inline void qtd_remove_1st_from_qhd (ehci_qhd_t *p_qhd);
static void qtd_init (ehci_qtd_t* p_qtd, void const* buffer, uint16_t total_bytes);
static void qtd_init (ehci_qtd_t* qtd, void const* buffer, uint16_t total_bytes);
static inline void list_insert (ehci_link_t *current, ehci_link_t *new, uint8_t new_type);
static inline ehci_link_t* list_next (ehci_link_t *p_link_pointer);
static inline ehci_link_t* list_next (ehci_link_t const *p_link);
TU_ATTR_WEAK void hcd_dcache_clean(void* addr, uint32_t data_size) {
(void) addr;
(void) data_size;
}
// invalidate data cache: mark cache as invalid, next read will read from memory
// Required BOTH before and after an DMA RX transfer
TU_ATTR_WEAK void hcd_dcache_invalidate(void* addr, uint32_t data_size) {
(void) addr;
(void) data_size;
}
// clean and invalidate data cache
// Required before an DMA transfer where memory is both read/write by DMA
TU_ATTR_WEAK void hcd_dcache_clean_invalidate(void* addr, uint32_t data_size) {
(void) addr;
(void) data_size;
}
//--------------------------------------------------------------------+
// HCD API
@ -173,8 +197,8 @@ void hcd_port_reset(uint8_t rhport)
ehci_registers_t* regs = ehci_data.regs;
// mask out all change bits since they are Write 1 to clear
uint32_t portsc = regs->portsc & ~EHCI_PORTSC_MASK_CHANGE_ALL;
// mask out Write-1-to-Clear bits
uint32_t portsc = regs->portsc & ~EHCI_PORTSC_MASK_W1C;
// EHCI Table 2-16 PortSC
// when software writes Port Reset bit to a one, it must also write a zero to the Port Enable bit.
@ -211,24 +235,22 @@ tusb_speed_t hcd_port_speed_get(uint8_t rhport)
return (tusb_speed_t) ehci_data.regs->portsc_bm.nxp_port_speed; // NXP specific port speed
}
static void list_remove_qhd_by_addr(ehci_link_t* list_head, uint8_t dev_addr)
{
for(ehci_link_t* prev = list_head;
!prev->terminate && (tu_align32(prev->address) != (uint32_t) list_head) && prev != NULL;
prev = list_next(prev) )
{
// TODO check type for ISO iTD and siTD
// TODO Suppress cast-align warning
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-align"
ehci_qhd_t* qhd = (ehci_qhd_t*) list_next(prev);
#pragma GCC diagnostic pop
if ( qhd->dev_addr == dev_addr )
{
static void list_remove_qhd_by_daddr(ehci_link_t* list_head, uint8_t dev_addr) {
ehci_link_t* prev = list_head;
while (prev && !prev->terminate) {
ehci_qhd_t* qhd = (ehci_qhd_t*) (uintptr_t) list_next(prev);
// done if loop back to head
if ( (uintptr_t) qhd == (uintptr_t) list_head) {
break;
}
if ( qhd->dev_addr == dev_addr ) {
// TODO deactivate all TD, wait for QHD to inactive before removal
prev->address = qhd->next.address;
// EHCI 4.8.2 link the removed qhd to async head (which always reachable by Host Controller)
// EHCI 4.8.2 link the removed qhd's next to async head (which always reachable by Host Controller)
qhd->next.address = ((uint32_t) list_head) | (EHCI_QTYPE_QHD << 1);
if ( qhd->int_smask )
@ -241,29 +263,69 @@ static void list_remove_qhd_by_addr(ehci_link_t* list_head, uint8_t dev_addr)
// mark as removing, will completely re-usable when async advance isr occurs
qhd->removing = 1;
}
hcd_dcache_clean(qhd, sizeof(ehci_qhd_t));
hcd_dcache_clean(prev, sizeof(ehci_qhd_t));
}else {
prev = list_next(prev);
}
}
}
// Close all opened endpoint belong to this device
void hcd_device_close(uint8_t rhport, uint8_t dev_addr)
void hcd_device_close(uint8_t rhport, uint8_t daddr)
{
// skip dev0
if (dev_addr == 0) return;
if (daddr == 0) {
return;
}
// Remove from async list
list_remove_qhd_by_addr( (ehci_link_t*) qhd_async_head(rhport), dev_addr );
list_remove_qhd_by_daddr((ehci_link_t *) qhd_async_head(rhport), daddr);
// Remove from all interval period list
for(uint8_t i = 0; i < TU_ARRAY_SIZE(ehci_data.period_head_arr); i++)
{
list_remove_qhd_by_addr( (ehci_link_t*) &ehci_data.period_head_arr[i], dev_addr);
for(uint8_t i = 0; i < TU_ARRAY_SIZE(ehci_data.period_head_arr); i++) {
list_remove_qhd_by_daddr((ehci_link_t *) &ehci_data.period_head_arr[i], daddr);
}
// Async doorbell (EHCI 4.8.2 for operational details)
ehci_data.regs->command_bm.async_adv_doorbell = 1;
}
static void init_periodic_list(uint8_t rhport) {
// Build the polling interval tree with 1 ms, 2 ms, 4 ms and 8 ms (framesize) only
for ( uint32_t i = 0; i < TU_ARRAY_SIZE(ehci_data.period_head_arr); i++ ) {
ehci_data.period_head_arr[i].int_smask = 1; // queue head in period list must have smask non-zero
ehci_data.period_head_arr[i].qtd_overlay.halted = 1; // dummy node, always inactive
}
ehci_link_t * const framelist = ehci_data.period_framelist;
ehci_link_t * const period_1ms = get_period_head(rhport, 1u);
// all links --> period_head_arr[0] (1ms)
// 0, 2, 4, 6 etc --> period_head_arr[1] (2ms)
// 1, 5 --> period_head_arr[2] (4ms)
// 3 --> period_head_arr[3] (8ms)
// TODO EHCI_FRAMELIST_SIZE with other size than 8
for (uint32_t i = 0; i < FRAMELIST_SIZE; i++) {
framelist[i].address = (uint32_t) period_1ms;
framelist[i].type = EHCI_QTYPE_QHD;
}
for (uint32_t i = 0; i < FRAMELIST_SIZE; i += 2) {
list_insert(framelist + i, get_period_head(rhport, 2u), EHCI_QTYPE_QHD);
}
for (uint32_t i = 1; i < FRAMELIST_SIZE; i += 4) {
list_insert(framelist + i, get_period_head(rhport, 4u), EHCI_QTYPE_QHD);
}
list_insert(framelist + 3, get_period_head(rhport, 8u), EHCI_QTYPE_QHD);
period_1ms->terminate = 1;
}
bool ehci_init(uint8_t rhport, uint32_t capability_reg, uint32_t operatial_reg)
{
tu_memclr(&ehci_data, sizeof(ehci_data_t));
@ -302,43 +364,10 @@ bool ehci_init(uint8_t rhport, uint32_t capability_reg, uint32_t operatial_reg)
regs->async_list_addr = (uint32_t) async_head;
//------------- Periodic List -------------//
// Build the polling interval tree with 1 ms, 2 ms, 4 ms and 8 ms (framesize) only
for ( uint32_t i = 0; i < TU_ARRAY_SIZE(ehci_data.period_head_arr); i++ )
{
ehci_data.period_head_arr[i].int_smask = 1; // queue head in period list must have smask non-zero
ehci_data.period_head_arr[i].qtd_overlay.halted = 1; // dummy node, always inactive
}
init_periodic_list(rhport);
regs->periodic_list_base = (uint32_t) ehci_data.period_framelist;
ehci_link_t * const framelist = ehci_data.period_framelist;
ehci_link_t * const period_1ms = get_period_head(rhport, 1u);
// all links --> period_head_arr[0] (1ms)
// 0, 2, 4, 6 etc --> period_head_arr[1] (2ms)
// 1, 5 --> period_head_arr[2] (4ms)
// 3 --> period_head_arr[3] (8ms)
// TODO EHCI_FRAMELIST_SIZE with other size than 8
for(uint32_t i=0; i<FRAMELIST_SIZE; i++)
{
framelist[i].address = (uint32_t) period_1ms;
framelist[i].type = EHCI_QTYPE_QHD;
}
for(uint32_t i=0; i<FRAMELIST_SIZE; i+=2)
{
list_insert(framelist + i, get_period_head(rhport, 2u), EHCI_QTYPE_QHD);
}
for(uint32_t i=1; i<FRAMELIST_SIZE; i+=4)
{
list_insert(framelist + i, get_period_head(rhport, 4u), EHCI_QTYPE_QHD);
}
list_insert(framelist+3, get_period_head(rhport, 8u), EHCI_QTYPE_QHD);
period_1ms->terminate = 1;
regs->periodic_list_base = (uint32_t) framelist;
hcd_dcache_clean(&ehci_data, sizeof(ehci_data_t));
//------------- TT Control (NXP only) -------------//
regs->nxp_tt_control = 0;
@ -354,7 +383,7 @@ bool ehci_init(uint8_t rhport, uint32_t capability_reg, uint32_t operatial_reg)
// Power Control (PPC) field in the HCSPARAMS register.
if (ehci_data.cap_regs->hcsparams_bm.port_power_control) {
// mask out all change bits since they are Write 1 to clear
uint32_t portsc = (regs->portsc & ~EHCI_PORTSC_MASK_CHANGE_ALL);
uint32_t portsc = (regs->portsc & ~EHCI_PORTSC_MASK_W1C);
portsc |= ECHI_PORTSC_MASK_PORT_POWER;
regs->portsc = portsc;
@ -389,15 +418,7 @@ bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, tusb_desc_endpoint_t const
TU_ASSERT (ep_desc->bmAttributes.xfer != TUSB_XFER_ISOCHRONOUS);
//------------- Prepare Queue Head -------------//
ehci_qhd_t * p_qhd;
if ( ep_desc->bEndpointAddress == 0 )
{
p_qhd = qhd_control(dev_addr);
}else
{
p_qhd = qhd_find_free();
}
ehci_qhd_t *p_qhd = (ep_desc->bEndpointAddress == 0) ? qhd_control(dev_addr) : qhd_find_free();
TU_ASSERT(p_qhd);
qhd_init(p_qhd, dev_addr, ep_desc);
@ -431,6 +452,9 @@ bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, tusb_desc_endpoint_t const
// TODO might need to disable async/period list
list_insert(list_head, (ehci_link_t*) p_qhd, EHCI_QTYPE_QHD);
hcd_dcache_clean(p_qhd, sizeof(ehci_qhd_t));
hcd_dcache_clean(list_head, sizeof(ehci_qhd_t));
return true;
}
@ -442,16 +466,12 @@ bool hcd_setup_send(uint8_t rhport, uint8_t dev_addr, uint8_t const setup_packet
ehci_qtd_t* td = &ehci_data.control[dev_addr].qtd;
qtd_init(td, setup_packet, 8);
td->pid = EHCI_PID_SETUP;
td->int_on_complete = 1;
td->next.terminate = 1;
td->pid = EHCI_PID_SETUP;
// sw region
qhd->p_qtd_list_head = td;
qhd->p_qtd_list_tail = td;
hcd_dcache_clean((void *) setup_packet, 8);
// attach TD
qhd->qtd_overlay.next.address = (uint32_t) td;
// attach TD to QHD -> start transferring
qhd_attach_qtd(qhd, td);
return true;
}
@ -463,50 +483,45 @@ bool hcd_edpt_xfer(uint8_t rhport, uint8_t dev_addr, uint8_t ep_addr, uint8_t *
uint8_t const epnum = tu_edpt_number(ep_addr);
uint8_t const dir = tu_edpt_dir(ep_addr);
if ( epnum == 0 )
{
ehci_qhd_t* qhd = qhd_control(dev_addr);
ehci_qtd_t* qtd = qtd_control(dev_addr);
ehci_qhd_t* qhd;
ehci_qtd_t* qtd;
if (epnum == 0) {
qhd = qhd_control(dev_addr);
qtd = qtd_control(dev_addr);
qtd_init(qtd, buffer, buflen);
// first first data toggle is always 1 (data & setup stage)
// first data toggle is always 1 (data & setup stage)
qtd->data_toggle = 1;
qtd->pid = dir ? EHCI_PID_IN : EHCI_PID_OUT;
qtd->int_on_complete = 1;
qtd->next.terminate = 1;
} else {
qhd = qhd_get_from_addr(dev_addr, ep_addr);
qtd = qtd_find_free();
TU_ASSERT(qtd);
// sw region
qhd->p_qtd_list_head = qtd;
qhd->p_qtd_list_tail = qtd;
// attach TD
qhd->qtd_overlay.next.address = (uint32_t) qtd;
}else
{
ehci_qhd_t *p_qhd = qhd_get_from_addr(dev_addr, ep_addr);
ehci_qtd_t *p_qtd = qtd_find_free();
TU_ASSERT(p_qtd);
qtd_init(p_qtd, buffer, buflen);
p_qtd->pid = p_qhd->pid;
// Insert TD to QH
qtd_insert_to_qhd(p_qhd, p_qtd);
p_qhd->p_qtd_list_tail->int_on_complete = 1;
// attach head QTD to QHD start transferring
p_qhd->qtd_overlay.next.address = (uint32_t) p_qhd->p_qtd_list_head;
qtd_init(qtd, buffer, buflen);
qtd->pid = qhd->pid;
}
// IN transfer: invalidate buffer, OUT transfer: clean buffer
if (dir) {
hcd_dcache_invalidate(buffer, buflen);
}else {
hcd_dcache_clean(buffer, buflen);
}
// attach TD to QHD -> start transferring
qhd_attach_qtd(qhd, qtd);
return true;
}
bool hcd_edpt_clear_stall(uint8_t dev_addr, uint8_t ep_addr)
bool hcd_edpt_clear_stall(uint8_t daddr, uint8_t ep_addr)
{
ehci_qhd_t *p_qhd = qhd_get_from_addr(dev_addr, ep_addr);
p_qhd->qtd_overlay.halted = 0;
ehci_qhd_t *qhd = qhd_get_from_addr(daddr, ep_addr);
qhd->qtd_overlay.halted = 0;
hcd_dcache_clean_invalidate(qhd, sizeof(ehci_qhd_t));
// TODO reset data toggle ?
return true;
}
@ -518,22 +533,22 @@ bool hcd_edpt_clear_stall(uint8_t dev_addr, uint8_t ep_addr)
// async_advance is handshake between usb stack & ehci controller.
// This isr mean it is safe to modify previously removed queue head from async list.
// In tinyusb, queue head is only removed when device is unplugged.
static void async_advance_isr(uint8_t rhport)
TU_ATTR_ALWAYS_INLINE static inline
void async_advance_isr(uint8_t rhport)
{
(void) rhport;
ehci_qhd_t* qhd_pool = ehci_data.qhd_pool;
for(uint32_t i = 0; i < QHD_MAX; i++)
{
if ( qhd_pool[i].removing )
{
ehci_qhd_t *qhd_pool = ehci_data.qhd_pool;
for (uint32_t i = 0; i < QHD_MAX; i++) {
if (qhd_pool[i].removing) {
qhd_pool[i].removing = 0;
qhd_pool[i].used = 0;
qhd_pool[i].used = 0;
}
}
}
static void port_connect_status_change_isr(uint8_t rhport)
TU_ATTR_ALWAYS_INLINE static inline
void port_connect_status_change_isr(uint8_t rhport)
{
// NOTE There is an sequence plug->unplug->…..-> plug if device is powering with pre-plugged device
if (ehci_data.regs->portsc_bm.current_connect_status)
@ -546,132 +561,169 @@ static void port_connect_status_change_isr(uint8_t rhport)
}
}
static void qhd_xfer_complete_isr(ehci_qhd_t * p_qhd)
{
// free all TDs from the head td to the first active TD
while(p_qhd->p_qtd_list_head != NULL && !p_qhd->p_qtd_list_head->active)
{
ehci_qtd_t * volatile qtd = (ehci_qtd_t * volatile) p_qhd->p_qtd_list_head;
bool const is_ioc = (qtd->int_on_complete != 0);
uint8_t const ep_addr = tu_edpt_addr(p_qhd->ep_number, qtd->pid == EHCI_PID_IN ? 1 : 0);
TU_ATTR_ALWAYS_INLINE static inline
void qhd_xfer_complete_isr(ehci_qhd_t * qhd) {
// examine TD attached to queue head
ehci_qtd_t * volatile qtd = (ehci_qtd_t * volatile) qhd->attached_qtd;
if (qtd == NULL) return; // no TD attached
hcd_dcache_invalidate(qtd, sizeof(ehci_qtd_t));
p_qhd->total_xferred_bytes += qtd->expected_bytes - qtd->total_bytes;
// TD need to be freed and removed from qhd, before invoking callback
qtd->used = 0; // free QTD
qtd_remove_1st_from_qhd(p_qhd);
if (is_ioc)
{
hcd_event_xfer_complete(p_qhd->dev_addr, ep_addr, p_qhd->total_xferred_bytes, XFER_RESULT_SUCCESS, true);
p_qhd->total_xferred_bytes = 0;
}
// TD is still active, no need to process
if (qtd->active) {
return;
}
uint8_t dir = (qtd->pid == EHCI_PID_IN) ? 1 : 0;
uint32_t const xferred_bytes = qtd->expected_bytes - qtd->total_bytes;
// invalidate dcache if IN transfer
if (dir == 1 && qhd->attached_buffer != 0 && xferred_bytes > 0) {
hcd_dcache_invalidate((void*) qhd->attached_buffer, xferred_bytes);
}
// remove and free TD before invoking callback
qhd->attached_qtd = NULL;
qhd->attached_buffer = 0;
qtd->used = 0; // free QTD
// notify usbh
uint8_t const ep_addr = tu_edpt_addr(qhd->ep_number, dir);
hcd_event_xfer_complete(qhd->dev_addr, ep_addr, xferred_bytes, XFER_RESULT_SUCCESS, true);
}
static void async_list_xfer_complete_isr(ehci_qhd_t * const async_head)
TU_ATTR_ALWAYS_INLINE static inline
void async_list_xfer_complete_isr(ehci_qhd_t * const async_head)
{
ehci_qhd_t *p_qhd = async_head;
do
{
if ( !p_qhd->qtd_overlay.halted ) // halted or error is processed in error isr
{
hcd_dcache_invalidate(p_qhd, sizeof(ehci_qhd_t));
// halted or error is processed in error isr
if ( !p_qhd->qtd_overlay.halted ) {
qhd_xfer_complete_isr(p_qhd);
}
p_qhd = qhd_next(p_qhd);
}while(p_qhd != async_head); // async list traversal, stop if loop around
}
static void period_list_xfer_complete_isr(uint8_t hostid, uint32_t interval_ms)
TU_ATTR_ALWAYS_INLINE static inline
void period_list_xfer_complete_isr(uint8_t rhport, uint32_t interval_ms)
{
uint16_t max_loop = 0;
uint32_t const period_1ms_addr = (uint32_t) get_period_head(hostid, 1u);
ehci_link_t next_item = * get_period_head(hostid, interval_ms);
uint32_t const period_1ms_addr = (uint32_t) get_period_head(rhport, 1u);
ehci_link_t next_link = * get_period_head(rhport, interval_ms);
// TODO abstract max loop guard for period
while( !next_item.terminate &&
!(interval_ms > 1 && period_1ms_addr == tu_align32(next_item.address)) &&
max_loop < (QHD_MAX + EHCI_MAX_ITD + EHCI_MAX_SITD)*CFG_TUH_DEVICE_MAX)
{
switch ( next_item.type )
{
case EHCI_QTYPE_QHD:
{
ehci_qhd_t *p_qhd_int = (ehci_qhd_t *) tu_align32(next_item.address);
if ( !p_qhd_int->qtd_overlay.halted )
{
qhd_xfer_complete_isr(p_qhd_int);
while (!next_link.terminate) {
if (interval_ms > 1 && period_1ms_addr == tu_align32(next_link.address)) {
// 1ms period list is end of list for all larger interval
break;
}
uintptr_t const entry_addr = tu_align32(next_link.address);
switch (next_link.type) {
case EHCI_QTYPE_QHD: {
ehci_qhd_t *qhd = (ehci_qhd_t *) entry_addr;
hcd_dcache_invalidate(qhd, sizeof(ehci_qhd_t));
if (!qhd->qtd_overlay.halted) {
qhd_xfer_complete_isr(qhd);
}
}
break;
break;
case EHCI_QTYPE_ITD:
// TODO support hs ISO
break;
case EHCI_QTYPE_ITD: // TODO support hs/fs ISO
case EHCI_QTYPE_SITD:
// TODO support split ISO
break;
case EHCI_QTYPE_FSTN:
default: break;
default:
break;
}
next_item = *list_next(&next_item);
max_loop++;
next_link = *list_next(&next_link);
}
}
static void qhd_xfer_error_isr(ehci_qhd_t * p_qhd)
// TODO merge with qhd_xfer_complete_isr()
TU_ATTR_ALWAYS_INLINE static inline
void qhd_xfer_error_isr(ehci_qhd_t * qhd)
{
if ( (p_qhd->dev_addr != 0 && p_qhd->qtd_overlay.halted) || // addr0 cannot be protocol STALL
qhd_has_xact_error(p_qhd) )
{
// current qhd has error in transaction
xfer_result_t error_event;
volatile ehci_qtd_t *qtd_overlay = &qhd->qtd_overlay;
// no error bits are set, endpoint is halted due to STALL
error_event = qhd_has_xact_error(p_qhd) ? XFER_RESULT_FAILED : XFER_RESULT_STALLED;
// TD has error
if (qtd_overlay->halted) {
xfer_result_t xfer_result;
p_qhd->total_xferred_bytes += p_qhd->p_qtd_list_head->expected_bytes - p_qhd->p_qtd_list_head->total_bytes;
// if ( XFER_RESULT_FAILED == error_event ) TU_BREAKPOINT(); // TODO skip unplugged device
p_qhd->p_qtd_list_head->used = 0; // free QTD
qtd_remove_1st_from_qhd(p_qhd);
if ( 0 == p_qhd->ep_number )
{
// control cannot be halted --> clear all qtd list
p_qhd->p_qtd_list_head = NULL;
p_qhd->p_qtd_list_tail = NULL;
p_qhd->qtd_overlay.next.terminate = 1;
p_qhd->qtd_overlay.alternate.terminate = 1;
p_qhd->qtd_overlay.halted = 0;
ehci_qtd_t *p_setup = qtd_control(p_qhd->dev_addr);
p_setup->used = 0;
if (qtd_overlay->xact_err || qtd_overlay->err_count == 0 || qtd_overlay->buffer_err || qtd_overlay->babble_err) {
// Error count = 0 often occurs when device disconnected, or other bus-related error
xfer_result = XFER_RESULT_FAILED;
}else {
// no error bits are set, endpoint is halted due to STALL
xfer_result = XFER_RESULT_STALLED;
}
// call USBH callback
hcd_event_xfer_complete(p_qhd->dev_addr, tu_edpt_addr(p_qhd->ep_number, p_qhd->pid == EHCI_PID_IN ? 1 : 0), p_qhd->total_xferred_bytes, error_event, true);
// if (XFER_RESULT_FAILED == xfer_result ) {
// TU_LOG1(" QHD xfer err count: %d\n", qtd_overlay->err_count);
// TU_BREAKPOINT(); // TODO skip unplugged device
// }
p_qhd->total_xferred_bytes = 0;
ehci_qtd_t * volatile qtd = (ehci_qtd_t * volatile) qhd->attached_qtd;
TU_ASSERT(qtd, ); // No TD yet, probably a race condition or cache issue !?
hcd_dcache_invalidate(qtd, sizeof(ehci_qtd_t));
uint8_t dir = (qtd->pid == EHCI_PID_IN) ? 1 : 0;
uint32_t const xferred_bytes = qtd->expected_bytes - qtd->total_bytes;
// invalidate dcache if IN transfer
if (dir == 1 && qhd->attached_buffer != 0 && xferred_bytes > 0) {
hcd_dcache_invalidate((void*) qhd->attached_buffer, xferred_bytes);
}
// remove and free TD before invoking callback
qhd->attached_qtd = NULL;
qhd->attached_buffer = 0;
qtd->used = 0; // free QTD
if (0 == qhd->ep_number ) {
// control cannot be halted
qhd->qtd_overlay.next.terminate = 1;
qhd->qtd_overlay.alternate.terminate = 1;
qhd->qtd_overlay.halted = 0;
hcd_dcache_clean(qhd, sizeof(ehci_qhd_t));
}
// notify usbh
uint8_t const ep_addr = tu_edpt_addr(qhd->ep_number, dir);
hcd_event_xfer_complete(qhd->dev_addr, ep_addr, xferred_bytes, xfer_result, true);
}
}
static void xfer_error_isr(uint8_t hostid)
TU_ATTR_ALWAYS_INLINE static inline
void xfer_error_isr(uint8_t rhport)
{
//------------- async list -------------//
ehci_qhd_t * const async_head = qhd_async_head(hostid);
ehci_qhd_t * const async_head = qhd_async_head(rhport);
ehci_qhd_t *p_qhd = async_head;
do
{
hcd_dcache_invalidate(p_qhd, sizeof(ehci_qhd_t));
qhd_xfer_error_isr( p_qhd );
p_qhd = qhd_next(p_qhd);
}while(p_qhd != async_head); // async list traversal, stop if loop around
//------------- TODO refractor period list -------------//
uint32_t const period_1ms_addr = (uint32_t) get_period_head(hostid, 1u);
uint32_t const period_1ms_addr = (uint32_t) get_period_head(rhport, 1u);
for (uint32_t interval_ms=1; interval_ms <= FRAMELIST_SIZE; interval_ms *= 2)
{
ehci_link_t next_item = * get_period_head(hostid, interval_ms);
ehci_link_t next_item = * get_period_head(rhport, interval_ms);
// TODO abstract max loop guard for period
while( !next_item.terminate &&
@ -682,6 +734,8 @@ static void xfer_error_isr(uint8_t hostid)
case EHCI_QTYPE_QHD:
{
ehci_qhd_t *p_qhd_int = (ehci_qhd_t *) tu_align32(next_item.address);
hcd_dcache_invalidate(p_qhd_int, sizeof(ehci_qhd_t));
qhd_xfer_error_isr(p_qhd_int);
}
break;
@ -702,55 +756,63 @@ static void xfer_error_isr(uint8_t hostid)
void hcd_int_handler(uint8_t rhport)
{
ehci_registers_t* regs = ehci_data.regs;
uint32_t const int_status = regs->status;
uint32_t int_status = regs->status;
int_status &= regs->inten;
regs->status = int_status; // Acknowledge handled interrupt
if (int_status == 0) return;
if (int_status & EHCI_INT_MASK_FRAMELIST_ROLLOVER)
{
ehci_data.uframe_number += (FRAMELIST_SIZE << 3);
if (int_status & EHCI_INT_MASK_HC_HALTED) {
// something seriously wrong, maybe forget to flush/invalidate cache
TU_BREAKPOINT();
TU_LOG1(" HC halted\n");
return;
}
if (int_status & EHCI_INT_MASK_PORT_CHANGE)
{
uint32_t const port_status = regs->portsc & EHCI_PORTSC_MASK_CHANGE_ALL;
if (int_status & EHCI_INT_MASK_FRAMELIST_ROLLOVER) {
ehci_data.uframe_number += (FRAMELIST_SIZE << 3);
regs->status = EHCI_INT_MASK_FRAMELIST_ROLLOVER; // Acknowledge
}
if (int_status & EHCI_INT_MASK_PORT_CHANGE) {
// Including: Force port resume, over-current change, enable/disable change and connect status change.
uint32_t const port_status = regs->portsc & EHCI_PORTSC_MASK_W1C;
print_portsc(regs);
if (regs->portsc_bm.connect_status_change)
{
if (regs->portsc_bm.connect_status_change) {
port_connect_status_change_isr(rhport);
}
regs->portsc |= port_status; // Acknowledge change bits in portsc
regs->status = EHCI_INT_MASK_PORT_CHANGE; // Acknowledge
}
if (int_status & EHCI_INT_MASK_ERROR)
{
if (int_status & EHCI_INT_MASK_ERROR) {
xfer_error_isr(rhport);
regs->status = EHCI_INT_MASK_ERROR; // Acknowledge
}
//------------- some QTD/SITD/ITD with IOC set is completed -------------//
if (int_status & EHCI_INT_MASK_NXP_ASYNC)
{
async_list_xfer_complete_isr( qhd_async_head(rhport) );
if (int_status & EHCI_INT_MASK_NXP_ASYNC) {
async_list_xfer_complete_isr(qhd_async_head(rhport));
regs->status = EHCI_INT_MASK_NXP_ASYNC; // Acknowledge
}
if (int_status & EHCI_INT_MASK_NXP_PERIODIC)
{
for (uint32_t i=1; i <= FRAMELIST_SIZE; i *= 2)
{
period_list_xfer_complete_isr( rhport, i );
period_list_xfer_complete_isr(rhport, i);
}
regs->status = EHCI_INT_MASK_NXP_PERIODIC; // Acknowledge
}
if (int_status & EHCI_INT_MASK_USB) {
// TODO standard EHCI xfer complete
regs->status = EHCI_INT_MASK_USB; // Acknowledge
}
//------------- There is some removed async previously -------------//
if (int_status & EHCI_INT_MASK_ASYNC_ADVANCE) // need to place after EHCI_INT_MASK_NXP_ASYNC
{
// need to place after EHCI_INT_MASK_NXP_ASYNC
if (int_status & EHCI_INT_MASK_ASYNC_ADVANCE) {
async_advance_isr(rhport);
regs->status = EHCI_INT_MASK_ASYNC_ADVANCE; // Acknowledge
}
}
@ -791,50 +853,10 @@ static inline ehci_qhd_t* qhd_get_from_addr(uint8_t dev_addr, uint8_t ep_addr)
return NULL;
}
//------------- TD helper -------------//
static inline ehci_qtd_t* qtd_find_free(void)
{
for (uint32_t i=0; i<QTD_MAX; i++)
{
if ( !ehci_data.qtd_pool[i].used ) return &ehci_data.qtd_pool[i];
}
return NULL;
}
static inline ehci_qtd_t* qtd_next(ehci_qtd_t const * p_qtd )
{
return (ehci_qtd_t*) tu_align32(p_qtd->next.address);
}
static inline void qtd_remove_1st_from_qhd(ehci_qhd_t *p_qhd)
{
if (p_qhd->p_qtd_list_head == p_qhd->p_qtd_list_tail) // last TD --> make it NULL
{
p_qhd->p_qtd_list_head = p_qhd->p_qtd_list_tail = NULL;
}else
{
p_qhd->p_qtd_list_head = qtd_next( p_qhd->p_qtd_list_head );
}
}
static inline void qtd_insert_to_qhd(ehci_qhd_t *p_qhd, ehci_qtd_t *p_qtd_new)
{
if (p_qhd->p_qtd_list_head == NULL) // empty list
{
p_qhd->p_qtd_list_head = p_qhd->p_qtd_list_tail = p_qtd_new;
}else
{
p_qhd->p_qtd_list_tail->next.address = (uint32_t) p_qtd_new;
p_qhd->p_qtd_list_tail = p_qtd_new;
}
}
static void qhd_init(ehci_qhd_t *p_qhd, uint8_t dev_addr, tusb_desc_endpoint_t const * ep_desc)
{
// address 0 is used as async head, which always on the list --> cannot be cleared (ehci halted otherwise)
if (dev_addr != 0)
{
if (dev_addr != 0) {
tu_memclr(p_qhd, sizeof(ehci_qhd_t));
}
@ -884,58 +906,80 @@ static void qhd_init(ehci_qhd_t *p_qhd, uint8_t dev_addr, tusb_desc_endpoint_t c
p_qhd->int_smask = p_qhd->fl_int_cmask = 0;
}
p_qhd->fl_hub_addr = devtree_info.hub_addr;
p_qhd->fl_hub_port = devtree_info.hub_port;
p_qhd->mult = 1; // TODO not use high bandwidth/park mode yet
p_qhd->fl_hub_addr = devtree_info.hub_addr;
p_qhd->fl_hub_port = devtree_info.hub_port;
p_qhd->mult = 1; // TODO not use high bandwidth/park mode yet
//------------- HCD Management Data -------------//
p_qhd->used = 1;
p_qhd->removing = 0;
p_qhd->p_qtd_list_head = NULL;
p_qhd->p_qtd_list_tail = NULL;
p_qhd->used = 1;
p_qhd->removing = 0;
p_qhd->attached_qtd = NULL;
p_qhd->pid = tu_edpt_dir(ep_desc->bEndpointAddress) ? EHCI_PID_IN : EHCI_PID_OUT; // PID for TD under this endpoint
//------------- active, but no TD list -------------//
p_qhd->qtd_overlay.halted = 0;
p_qhd->qtd_overlay.next.terminate = 1;
p_qhd->qtd_overlay.alternate.terminate = 1;
if (TUSB_XFER_BULK == xfer_type && p_qhd->ep_speed == TUSB_SPEED_HIGH && p_qhd->pid == EHCI_PID_OUT)
{
p_qhd->qtd_overlay.ping_err = 1; // do PING for Highspeed Bulk OUT, EHCI section 4.11
}
}
static void qtd_init(ehci_qtd_t* p_qtd, void const* buffer, uint16_t total_bytes)
static void qhd_attach_qtd(ehci_qhd_t *qhd, ehci_qtd_t *qtd) {
qhd->attached_qtd = qtd;
qhd->attached_buffer = qtd->buffer[0];
// clean and invalidate cache before physically write
hcd_dcache_clean_invalidate(qtd, sizeof(ehci_qtd_t));
qhd->qtd_overlay.next.address = (uint32_t) qtd;
hcd_dcache_clean_invalidate(qhd, sizeof(ehci_qhd_t));
}
//------------- TD helper -------------//
static inline ehci_qtd_t *qtd_find_free(void) {
for (uint32_t i = 0; i < QTD_MAX; i++) {
if (!ehci_data.qtd_pool[i].used) return &ehci_data.qtd_pool[i];
}
return NULL;
}
static void qtd_init(ehci_qtd_t* qtd, void const* buffer, uint16_t total_bytes)
{
tu_memclr(p_qtd, sizeof(ehci_qtd_t));
tu_memclr(qtd, sizeof(ehci_qtd_t));
qtd->used = 1;
p_qtd->used = 1;
qtd->next.terminate = 1; // init to null
qtd->alternate.terminate = 1; // not used, always set to terminated
qtd->active = 1;
qtd->err_count = 3; // TODO 3 consecutive errors tolerance
qtd->data_toggle = 0;
qtd->int_on_complete = 1;
qtd->total_bytes = total_bytes;
qtd->expected_bytes = total_bytes;
p_qtd->next.terminate = 1; // init to null
p_qtd->alternate.terminate = 1; // not used, always set to terminated
p_qtd->active = 1;
p_qtd->err_count = 3; // TODO 3 consecutive errors tolerance
p_qtd->data_toggle = 0;
p_qtd->total_bytes = total_bytes;
p_qtd->expected_bytes = total_bytes;
p_qtd->buffer[0] = (uint32_t) buffer;
qtd->buffer[0] = (uint32_t) buffer;
for(uint8_t i=1; i<5; i++)
{
p_qtd->buffer[i] |= tu_align4k( p_qtd->buffer[i-1] ) + 4096;
qtd->buffer[i] |= tu_align4k(qtd->buffer[i - 1] ) + 4096;
}
}
//------------- List Managing Helper -------------//
// insert at head
static inline void list_insert(ehci_link_t *current, ehci_link_t *new, uint8_t new_type)
{
new->address = current->address;
current->address = ((uint32_t) new) | (new_type << 1);
}
static inline ehci_link_t* list_next(ehci_link_t *p_link_pointer)
static inline ehci_link_t* list_next(ehci_link_t const *p_link)
{
return (ehci_link_t*) tu_align32(p_link_pointer->address);
return (ehci_link_t*) tu_align32(p_link->address);
}
#endif

View File

@ -81,6 +81,8 @@ typedef union {
};
}ehci_link_t;
TU_VERIFY_STATIC( sizeof(ehci_link_t) == 4, "size is not correct" );
/// Queue Element Transfer Descriptor
/// Qtd is used to declare overlay in ehci_qhd_t -> cannot be declared with TU_ATTR_ALIGNED(32)
typedef struct
@ -162,11 +164,12 @@ typedef struct TU_ATTR_ALIGNED(32)
uint8_t pid;
uint8_t interval_ms; // polling interval in frames (or millisecond)
uint16_t total_xferred_bytes; // number of bytes xferred until a qtd with ioc bit set
uint8_t reserved2[2];
uint8_t TU_RESERVED[4];
ehci_qtd_t * volatile p_qtd_list_head; // head of the scheduled TD list
ehci_qtd_t * volatile p_qtd_list_tail; // tail of the scheduled TD list
// Attached TD management, note usbh will only queue 1 TD per QHD.
// buffer for dcache invalidate since td's buffer is modified by HC and finding initial buffer address is not trivial
uint32_t attached_buffer;
ehci_qtd_t * volatile attached_qtd;
} ehci_qhd_t;
TU_VERIFY_STATIC( sizeof(ehci_qhd_t) == 64, "size is not correct" );
@ -245,14 +248,6 @@ typedef struct TU_ATTR_ALIGNED(32)
/// Word 4-5: Buffer Pointer List
uint32_t buffer[2]; // buffer[1] TP: Transaction Position - T-Count: Transaction Count
// union{
// uint32_t BufferPointer1;
// struct {
// volatile uint32_t TCount : 3;
// volatile uint32_t TPosition : 2;
// };
// };
/*---------- Word 6 ----------*/
ehci_link_t back;
@ -268,6 +263,7 @@ TU_VERIFY_STATIC( sizeof(ehci_sitd_t) == 32, "size is not correct" );
// EHCI Operational Register
//--------------------------------------------------------------------+
enum {
// Bit 0-5 has maskable in interrupt enabled register
EHCI_INT_MASK_USB = TU_BIT(0),
EHCI_INT_MASK_ERROR = TU_BIT(1),
EHCI_INT_MASK_PORT_CHANGE = TU_BIT(2),
@ -276,6 +272,12 @@ enum {
EHCI_INT_MASK_ASYNC_ADVANCE = TU_BIT(5),
EHCI_INT_MASK_NXP_SOF = TU_BIT(7),
EHCI_INT_MASK_HC_HALTED = TU_BIT(12),
EHCI_INT_MASK_RECLAIMATION = TU_BIT(13),
EHCI_INT_MASK_PERIODIC_SCHED_STATUS = TU_BIT(14),
EHCI_INT_MASK_ASYNC_SCHED_STATUS = TU_BIT(15),
EHCI_INT_MASK_NXP_ASYNC = TU_BIT(18),
EHCI_INT_MASK_NXP_PERIODIC = TU_BIT(19),
@ -306,7 +308,7 @@ enum {
EHCI_PORTSC_MASK_PORT_RESET = TU_BIT(8),
ECHI_PORTSC_MASK_PORT_POWER = TU_BIT(12),
EHCI_PORTSC_MASK_CHANGE_ALL =
EHCI_PORTSC_MASK_W1C =
EHCI_PORTSC_MASK_CONNECT_STATUS_CHANGE |
EHCI_PORTSC_MASK_PORT_ENABLE_CHANGE |
EHCI_PORTSC_MASK_OVER_CURRENT_CHANGE

View File

@ -439,6 +439,10 @@ char const* const tu_str_std_request[] =
"Synch Frame"
};
char const* const tu_str_xfer_result[] = {
"OK", "FAILED", "STALLED", "TIMEOUT"
};
#endif
static void dump_str_line(uint8_t const* buf, uint16_t count)