Merge pull request #2450 from HiFiPhile/vendor_fifo

Allow vendor class to be used without FIFO.
This commit is contained in:
Ha Thach 2024-09-11 10:10:06 +07:00 committed by GitHub
commit 4b107a2b9e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 373 additions and 434 deletions

12
.idea/cmake.xml generated
View File

@ -2,8 +2,8 @@
<project version="4">
<component name="CMakeSharedSettings">
<configurations>
<configuration PROFILE_NAME="pico" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=raspberry_pi_pico -DLOG=1" />
<configuration PROFILE_NAME="pico pio-host" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=raspberry_pi_pico -DLOG=1 -DCFLAGS_CLI=&quot;-DCFG_TUH_RPI_PIO_USB=1&quot;" />
<configuration PROFILE_NAME="raspberry_pi_pico" ENABLED="false" CONFIG_NAME="MinSizeRel" GENERATION_OPTIONS="-DBOARD=raspberry_pi_pico -DLOG=1" />
<configuration PROFILE_NAME="raspberry_pi_pico-pio_host" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=raspberry_pi_pico -DLOG=1 -DCFLAGS_CLI=&quot;-DCFG_TUH_RPI_PIO_USB=1&quot;" />
<configuration PROFILE_NAME="feather_rp2040" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=pico_sdk -DPICO_BOARD=adafruit_feather_rp2040 -DLOG=1" />
<configuration PROFILE_NAME="feather_rp2040_max3421" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=feather_rp2040_max3421 -DLOG=1" />
<configuration PROFILE_NAME="metro_rp2040" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=pico_sdk -DPICO_BOARD=adafruit_metro_rp2040 -DLOG=1 -DMAX3421_HOST=1" />
@ -35,7 +35,7 @@
</envs>
</ADDITIONAL_GENERATION_ENVIRONMENT>
</configuration>
<configuration PROFILE_NAME="espressif_s3_devkitc" ENABLED="false" TOOLCHAIN_NAME="ESP-IDF" GENERATION_OPTIONS="-DBOARD=espressif_s3_devkitc -DMAX3421_HOST=1 -DLOG=1">
<configuration PROFILE_NAME="espressif_s3_devkitc" ENABLED="false" TOOLCHAIN_NAME="ESP-IDF" GENERATION_OPTIONS="-DBOARD=espressif_s3_devkitc -DLOG=1">
<ADDITIONAL_GENERATION_ENVIRONMENT>
<envs>
<env name="ESPBAUD" value="1500000" />
@ -66,7 +66,7 @@
<configuration PROFILE_NAME="feather_m0_express" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=feather_m0_express -DLOG=1 -DLOGGER=RTT -DMAX3421_HOST=1" />
<configuration PROFILE_NAME="metro_m0_express" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=metro_m0_express -DLOG=1 -DLOGGER=RTT -DMAX3421_HOST=1" />
<configuration PROFILE_NAME="feather_m4_express" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=feather_m4_express -DLOG=1 -DLOGGER=RTT -DMAX3421_HOST=1" />
<configuration PROFILE_NAME="metro_m4_express" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=metro_m4_express" />
<configuration PROFILE_NAME="metro_m4_express" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=metro_m4_express -DLOG=1 -DLOGGER=RTT -DMAX3421_HOST=1" />
<configuration PROFILE_NAME="itsybitsy_m4" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=itsybitsy_m4" />
<configuration PROFILE_NAME="same54_xplained" ENABLED="false" GENERATION_OPTIONS="-DBOARD=same54_xplained -DLOG=1 -DLOGGER=RTT" />
<configuration PROFILE_NAME="feather_nrf52840_express" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=feather_nrf52840_express -DLOG=1 -DLOGGER=RTT -DMAX3421_HOST=1" />
@ -140,8 +140,8 @@
<configuration PROFILE_NAME="ch32v307v_r1_1v0" ENABLED="false" GENERATION_OPTIONS="-DBOARD=ch32v307v_r1_1v0 -DLOG=1" />
<configuration PROFILE_NAME="ch32v307v_r1_1v0 USBFS" ENABLED="false" GENERATION_OPTIONS="-DBOARD=ch32v307v_r1_1v0 -DSPEED=full" />
<configuration PROFILE_NAME="da14695_dk_usb" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=da14695_dk_usb" />
<configuration PROFILE_NAME="max32650fthr" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=max32650fthr -DLOG=1 -DLOGGER=RTT" />
<configuration PROFILE_NAME="max32666fthr" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=max32666fthr -DLOG=1 -DLOGGER=RTT" />
<configuration PROFILE_NAME="max32650fthr" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=max32650fthr -DLOG=0 -DLOGGER=RTT" />
<configuration PROFILE_NAME="max32666fthr" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=max32666fthr -DLOG=0 -DLOGGER=RTT" />
<configuration PROFILE_NAME="max32690evkit" ENABLED="false" CONFIG_NAME="Debug" GENERATION_OPTIONS="-DBOARD=max32690evkit -DLOG=1 -DLOGGER=RTT" />
</configurations>
</component>

View File

@ -73,8 +73,7 @@ static uint32_t blink_interval_ms = BLINK_NOT_MOUNTED;
#define URL "example.tinyusb.org/webusb-serial/index.html"
const tusb_desc_webusb_url_t desc_url =
{
const tusb_desc_webusb_url_t desc_url = {
.bLength = 3 + sizeof(URL) - 1,
.bDescriptorType = 3, // WEBUSB URL type
.bScheme = 1, // 0: http, 1: https
@ -86,11 +85,9 @@ static bool web_serial_connected = false;
//------------- prototypes -------------//
void led_blinking_task(void);
void cdc_task(void);
void webserial_task(void);
/*------------- MAIN -------------*/
int main(void)
{
int main(void) {
board_init();
// init device stack on configured roothub port
@ -100,33 +97,28 @@ int main(void)
board_init_after_tusb();
}
while (1)
{
while (1) {
tud_task(); // tinyusb device task
cdc_task();
webserial_task();
led_blinking_task();
}
}
// send characters to both CDC and WebUSB
void echo_all(uint8_t buf[], uint32_t count)
{
void echo_all(const uint8_t buf[], uint32_t count) {
// echo to web serial
if ( web_serial_connected )
{
if (web_serial_connected) {
tud_vendor_write(buf, count);
tud_vendor_write_flush();
}
// echo to cdc
if ( tud_cdc_connected() )
{
for(uint32_t i=0; i<count; i++)
{
if (tud_cdc_connected()) {
for (uint32_t i = 0; i < count; i++) {
tud_cdc_write_char(buf[i]);
if ( buf[i] == '\r' ) tud_cdc_write_char('\n');
if (buf[i] == '\r') {
tud_cdc_write_char('\n');
}
}
tud_cdc_write_flush();
}
@ -137,29 +129,25 @@ void echo_all(uint8_t buf[], uint32_t count)
//--------------------------------------------------------------------+
// Invoked when device is mounted
void tud_mount_cb(void)
{
void tud_mount_cb(void) {
blink_interval_ms = BLINK_MOUNTED;
}
// Invoked when device is unmounted
void tud_umount_cb(void)
{
void tud_umount_cb(void) {
blink_interval_ms = BLINK_NOT_MOUNTED;
}
// Invoked when usb bus is suspended
// remote_wakeup_en : if host allow us to perform remote wakeup
// Within 7ms, device must draw an average of current less than 2.5 mA from bus
void tud_suspend_cb(bool remote_wakeup_en)
{
(void) remote_wakeup_en;
void tud_suspend_cb(bool remote_wakeup_en) {
(void)remote_wakeup_en;
blink_interval_ms = BLINK_SUSPENDED;
}
// Invoked when usb bus is resumed
void tud_resume_cb(void)
{
void tud_resume_cb(void) {
blink_interval_ms = tud_mounted() ? BLINK_MOUNTED : BLINK_NOT_MOUNTED;
}
@ -170,61 +158,53 @@ void tud_resume_cb(void)
// Invoked when a control transfer occurred on an interface of this class
// Driver response accordingly to the request and the transfer stage (setup/data/ack)
// return false to stall control endpoint (e.g unsupported request)
bool tud_vendor_control_xfer_cb(uint8_t rhport, uint8_t stage, tusb_control_request_t const * request)
{
bool tud_vendor_control_xfer_cb(uint8_t rhport, uint8_t stage, tusb_control_request_t const* request) {
// nothing to with DATA & ACK stage
if (stage != CONTROL_STAGE_SETUP) return true;
switch (request->bmRequestType_bit.type)
{
switch (request->bmRequestType_bit.type) {
case TUSB_REQ_TYPE_VENDOR:
switch (request->bRequest)
{
switch (request->bRequest) {
case VENDOR_REQUEST_WEBUSB:
// match vendor request in BOS descriptor
// Get landing page url
return tud_control_xfer(rhport, request, (void*)(uintptr_t) &desc_url, desc_url.bLength);
return tud_control_xfer(rhport, request, (void*)(uintptr_t)&desc_url, desc_url.bLength);
case VENDOR_REQUEST_MICROSOFT:
if ( request->wIndex == 7 )
{
if (request->wIndex == 7) {
// Get Microsoft OS 2.0 compatible descriptor
uint16_t total_len;
memcpy(&total_len, desc_ms_os_20+8, 2);
memcpy(&total_len, desc_ms_os_20 + 8, 2);
return tud_control_xfer(rhport, request, (void*)(uintptr_t) desc_ms_os_20, total_len);
}else
{
return tud_control_xfer(rhport, request, (void*)(uintptr_t)desc_ms_os_20, total_len);
} else {
return false;
}
default: break;
}
break;
break;
case TUSB_REQ_TYPE_CLASS:
if (request->bRequest == 0x22)
{
if (request->bRequest == 0x22) {
// Webserial simulate the CDC_REQUEST_SET_CONTROL_LINE_STATE (0x22) to connect and disconnect.
web_serial_connected = (request->wValue != 0);
// Always lit LED if connected
if ( web_serial_connected )
{
if (web_serial_connected) {
board_led_write(true);
blink_interval_ms = BLINK_ALWAYS_ON;
tud_vendor_write_str("\r\nWebUSB interface connected\r\n");
tud_vendor_write_flush();
}else
{
} else {
blink_interval_ms = BLINK_MOUNTED;
}
// response with status OK
return tud_control_status(rhport, request);
}
break;
break;
default: break;
}
@ -233,32 +213,24 @@ bool tud_vendor_control_xfer_cb(uint8_t rhport, uint8_t stage, tusb_control_requ
return false;
}
void webserial_task(void)
{
if ( web_serial_connected )
{
if ( tud_vendor_available() )
{
uint8_t buf[64];
uint32_t count = tud_vendor_read(buf, sizeof(buf));
void tud_vendor_rx_cb(uint8_t itf, uint8_t const* buffer, uint16_t bufsize) {
(void) itf;
// echo back to both web serial and cdc
echo_all(buf, count);
}
}
echo_all(buffer, bufsize);
// if using RX buffered is enabled, we need to flush the buffer to make room for new data
#if CFG_TUD_VENDOR_RX_BUFSIZE > 0
tud_vendor_read_flush();
#endif
}
//--------------------------------------------------------------------+
// USB CDC
//--------------------------------------------------------------------+
void cdc_task(void)
{
if ( tud_cdc_connected() )
{
void cdc_task(void) {
if (tud_cdc_connected()) {
// connected and there are data available
if ( tud_cdc_available() )
{
if (tud_cdc_available()) {
uint8_t buf[64];
uint32_t count = tud_cdc_read(buf, sizeof(buf));
@ -270,34 +242,30 @@ void cdc_task(void)
}
// Invoked when cdc when line state changed e.g connected/disconnected
void tud_cdc_line_state_cb(uint8_t itf, bool dtr, bool rts)
{
(void) itf;
void tud_cdc_line_state_cb(uint8_t itf, bool dtr, bool rts) {
(void)itf;
// connected
if ( dtr && rts )
{
if (dtr && rts) {
// print initial message when connected
tud_cdc_write_str("\r\nTinyUSB WebUSB device example\r\n");
}
}
// Invoked when CDC interface received data from host
void tud_cdc_rx_cb(uint8_t itf)
{
(void) itf;
void tud_cdc_rx_cb(uint8_t itf) {
(void)itf;
}
//--------------------------------------------------------------------+
// BLINKING TASK
//--------------------------------------------------------------------+
void led_blinking_task(void)
{
void led_blinking_task(void) {
static uint32_t start_ms = 0;
static bool led_state = false;
// Blink every interval ms
if ( board_millis() - start_ms < blink_interval_ms) return; // not enough time
if (board_millis() - start_ms < blink_interval_ms) return; // not enough time
start_ms += blink_interval_ms;
board_led_write(led_state);

View File

@ -102,7 +102,7 @@
#define CFG_TUD_CDC_TX_BUFSIZE (TUD_OPT_HIGH_SPEED ? 512 : 64)
// Vendor FIFO size of TX and RX
// If not configured vendor endpoints will not be buffered
// If zero: vendor endpoints will not be buffered
#define CFG_TUD_VENDOR_RX_BUFSIZE (TUD_OPT_HIGH_SPEED ? 512 : 64)
#define CFG_TUD_VENDOR_TX_BUFSIZE (TUD_OPT_HIGH_SPEED ? 512 : 64)

View File

@ -341,14 +341,14 @@ uint32_t tuh_cdc_write(uint8_t idx, void const* buffer, uint32_t bufsize) {
cdch_interface_t* p_cdc = get_itf(idx);
TU_VERIFY(p_cdc);
return tu_edpt_stream_write(&p_cdc->stream.tx, buffer, bufsize);
return tu_edpt_stream_write(p_cdc->daddr, &p_cdc->stream.tx, buffer, bufsize);
}
uint32_t tuh_cdc_write_flush(uint8_t idx) {
cdch_interface_t* p_cdc = get_itf(idx);
TU_VERIFY(p_cdc);
return tu_edpt_stream_write_xfer(&p_cdc->stream.tx);
return tu_edpt_stream_write_xfer(p_cdc->daddr, &p_cdc->stream.tx);
}
bool tuh_cdc_write_clear(uint8_t idx) {
@ -362,7 +362,7 @@ uint32_t tuh_cdc_write_available(uint8_t idx) {
cdch_interface_t* p_cdc = get_itf(idx);
TU_VERIFY(p_cdc);
return tu_edpt_stream_write_available(&p_cdc->stream.tx);
return tu_edpt_stream_write_available(p_cdc->daddr, &p_cdc->stream.tx);
}
//--------------------------------------------------------------------+
@ -373,7 +373,7 @@ uint32_t tuh_cdc_read (uint8_t idx, void* buffer, uint32_t bufsize) {
cdch_interface_t* p_cdc = get_itf(idx);
TU_VERIFY(p_cdc);
return tu_edpt_stream_read(&p_cdc->stream.rx, buffer, bufsize);
return tu_edpt_stream_read(p_cdc->daddr, &p_cdc->stream.rx, buffer, bufsize);
}
uint32_t tuh_cdc_read_available(uint8_t idx) {
@ -395,7 +395,7 @@ bool tuh_cdc_read_clear (uint8_t idx) {
TU_VERIFY(p_cdc);
bool ret = tu_edpt_stream_clear(&p_cdc->stream.rx);
tu_edpt_stream_read_xfer(&p_cdc->stream.rx);
tu_edpt_stream_read_xfer(p_cdc->daddr, &p_cdc->stream.rx);
return ret;
}
@ -677,10 +677,10 @@ bool cdch_xfer_cb(uint8_t daddr, uint8_t ep_addr, xfer_result_t event, uint32_t
// invoke tx complete callback to possibly refill tx fifo
if (tuh_cdc_tx_complete_cb) tuh_cdc_tx_complete_cb(idx);
if ( 0 == tu_edpt_stream_write_xfer(&p_cdc->stream.tx) ) {
if ( 0 == tu_edpt_stream_write_xfer(daddr, &p_cdc->stream.tx) ) {
// If there is no data left, a ZLP should be sent if:
// - xferred_bytes is multiple of EP Packet size and not zero
tu_edpt_stream_write_zlp_if_needed(&p_cdc->stream.tx, xferred_bytes);
tu_edpt_stream_write_zlp_if_needed(daddr, &p_cdc->stream.tx, xferred_bytes);
}
} else if ( ep_addr == p_cdc->stream.rx.ep_addr ) {
#if CFG_TUH_CDC_FTDI
@ -698,7 +698,7 @@ bool cdch_xfer_cb(uint8_t daddr, uint8_t ep_addr, xfer_result_t event, uint32_t
if (tuh_cdc_rx_cb) tuh_cdc_rx_cb(idx);
// prepare for next transfer if needed
tu_edpt_stream_read_xfer(&p_cdc->stream.rx);
tu_edpt_stream_read_xfer(daddr, &p_cdc->stream.rx);
}else if ( ep_addr == p_cdc->ep_notif ) {
// TODO handle notification endpoint
}else {
@ -719,9 +719,9 @@ static bool open_ep_stream_pair(cdch_interface_t* p_cdc, tusb_desc_endpoint_t co
TU_ASSERT(tuh_edpt_open(p_cdc->daddr, desc_ep));
if (tu_edpt_dir(desc_ep->bEndpointAddress) == TUSB_DIR_IN) {
tu_edpt_stream_open(&p_cdc->stream.rx, p_cdc->daddr, desc_ep);
tu_edpt_stream_open(&p_cdc->stream.rx, desc_ep);
} else {
tu_edpt_stream_open(&p_cdc->stream.tx, p_cdc->daddr, desc_ep);
tu_edpt_stream_open(&p_cdc->stream.tx, desc_ep);
}
desc_ep = (tusb_desc_endpoint_t const*) tu_desc_next(desc_ep);
@ -763,7 +763,7 @@ static void set_config_complete(cdch_interface_t * p_cdc, uint8_t idx, uint8_t i
if (tuh_cdc_mount_cb) tuh_cdc_mount_cb(idx);
// Prepare for incoming data
tu_edpt_stream_read_xfer(&p_cdc->stream.rx);
tu_edpt_stream_read_xfer(p_cdc->daddr, &p_cdc->stream.rx);
// notify usbh that driver enumeration is complete
usbh_driver_set_config_complete(p_cdc->daddr, itf_num);

View File

@ -36,136 +36,103 @@
//--------------------------------------------------------------------+
// MACRO CONSTANT TYPEDEF
//--------------------------------------------------------------------+
#define BULK_PACKET_SIZE (TUD_OPT_HIGH_SPEED ? 512 : 64)
typedef struct
{
typedef struct {
uint8_t itf_num;
uint8_t ep_in;
uint8_t ep_out;
/*------------- From this point, data is not cleared by bus reset -------------*/
tu_fifo_t rx_ff;
tu_fifo_t tx_ff;
uint8_t rx_ff_buf[CFG_TUD_VENDOR_RX_BUFSIZE];
uint8_t tx_ff_buf[CFG_TUD_VENDOR_TX_BUFSIZE];
OSAL_MUTEX_DEF(rx_ff_mutex);
OSAL_MUTEX_DEF(tx_ff_mutex);
// Endpoint Transfer buffer
CFG_TUSB_MEM_ALIGN uint8_t epout_buf[CFG_TUD_VENDOR_EPSIZE];
CFG_TUSB_MEM_ALIGN uint8_t epin_buf[CFG_TUD_VENDOR_EPSIZE];
CFG_TUD_MEM_ALIGN uint8_t epout_buf[CFG_TUD_VENDOR_EPSIZE];
CFG_TUD_MEM_ALIGN uint8_t epin_buf[CFG_TUD_VENDOR_EPSIZE];
struct {
tu_edpt_stream_t stream;
#if CFG_TUD_VENDOR_TX_BUFSIZE > 0
uint8_t ff_buf[CFG_TUD_VENDOR_TX_BUFSIZE];
#endif
}tx;
struct {
tu_edpt_stream_t stream;
#if CFG_TUD_VENDOR_RX_BUFSIZE > 0
uint8_t ff_buf[CFG_TUD_VENDOR_RX_BUFSIZE];
#endif
} rx;
} vendord_interface_t;
CFG_TUD_MEM_SECTION tu_static vendord_interface_t _vendord_itf[CFG_TUD_VENDOR];
CFG_TUD_MEM_SECTION static vendord_interface_t _vendord_itf[CFG_TUD_VENDOR];
#define ITF_MEM_RESET_SIZE offsetof(vendord_interface_t, rx_ff)
#define ITF_MEM_RESET_SIZE (offsetof(vendord_interface_t, itf_num) + sizeof(((vendord_interface_t *)0)->itf_num))
//--------------------------------------------------------------------
// Application API
//--------------------------------------------------------------------
bool tud_vendor_n_mounted (uint8_t itf)
{
return _vendord_itf[itf].ep_in && _vendord_itf[itf].ep_out;
}
uint32_t tud_vendor_n_available (uint8_t itf)
{
return tu_fifo_count(&_vendord_itf[itf].rx_ff);
}
bool tud_vendor_n_peek(uint8_t itf, uint8_t* u8)
{
return tu_fifo_peek(&_vendord_itf[itf].rx_ff, u8);
bool tud_vendor_n_mounted(uint8_t itf) {
TU_VERIFY(itf < CFG_TUD_VENDOR);
vendord_interface_t* p_itf = &_vendord_itf[itf];
return p_itf->rx.stream.ep_addr || p_itf->tx.stream.ep_addr;
}
//--------------------------------------------------------------------+
// Read API
//--------------------------------------------------------------------+
static void _prep_out_transaction (vendord_interface_t* p_itf)
{
uint32_t tud_vendor_n_available(uint8_t itf) {
TU_VERIFY(itf < CFG_TUD_VENDOR, 0);
vendord_interface_t* p_itf = &_vendord_itf[itf];
return tu_edpt_stream_read_available(&p_itf->rx.stream);
}
bool tud_vendor_n_peek(uint8_t itf, uint8_t* u8) {
TU_VERIFY(itf < CFG_TUD_VENDOR);
vendord_interface_t* p_itf = &_vendord_itf[itf];
return tu_edpt_stream_peek(&p_itf->rx.stream, u8);
}
uint32_t tud_vendor_n_read (uint8_t itf, void* buffer, uint32_t bufsize) {
TU_VERIFY(itf < CFG_TUD_VENDOR, 0);
vendord_interface_t* p_itf = &_vendord_itf[itf];
uint8_t const rhport = 0;
// claim endpoint
TU_VERIFY(usbd_edpt_claim(rhport, p_itf->ep_out), );
// Prepare for incoming data but only allow what we can store in the ring buffer.
uint16_t max_read = tu_fifo_remaining(&p_itf->rx_ff);
if ( max_read >= CFG_TUD_VENDOR_EPSIZE )
{
usbd_edpt_xfer(rhport, p_itf->ep_out, p_itf->epout_buf, CFG_TUD_VENDOR_EPSIZE);
}
else
{
// Release endpoint since we don't make any transfer
usbd_edpt_release(rhport, p_itf->ep_out);
}
return tu_edpt_stream_read(rhport, &p_itf->rx.stream, buffer, bufsize);
}
uint32_t tud_vendor_n_read (uint8_t itf, void* buffer, uint32_t bufsize)
{
void tud_vendor_n_read_flush (uint8_t itf) {
TU_VERIFY(itf < CFG_TUD_VENDOR, );
vendord_interface_t* p_itf = &_vendord_itf[itf];
uint32_t num_read = tu_fifo_read_n(&p_itf->rx_ff, buffer, (uint16_t) bufsize);
_prep_out_transaction(p_itf);
return num_read;
}
uint8_t const rhport = 0;
void tud_vendor_n_read_flush (uint8_t itf)
{
vendord_interface_t* p_itf = &_vendord_itf[itf];
tu_fifo_clear(&p_itf->rx_ff);
_prep_out_transaction(p_itf);
tu_edpt_stream_clear(&p_itf->rx.stream);
tu_edpt_stream_read_xfer(rhport, &p_itf->rx.stream);
}
//--------------------------------------------------------------------+
// Write API
//--------------------------------------------------------------------+
uint32_t tud_vendor_n_write (uint8_t itf, void const* buffer, uint32_t bufsize)
{
uint32_t tud_vendor_n_write (uint8_t itf, void const* buffer, uint32_t bufsize) {
TU_VERIFY(itf < CFG_TUD_VENDOR, 0);
vendord_interface_t* p_itf = &_vendord_itf[itf];
uint16_t ret = tu_fifo_write_n(&p_itf->tx_ff, buffer, (uint16_t) bufsize);
// flush if queue more than packet size
if (tu_fifo_count(&p_itf->tx_ff) >= CFG_TUD_VENDOR_EPSIZE) {
tud_vendor_n_write_flush(itf);
}
return ret;
}
uint32_t tud_vendor_n_write_flush (uint8_t itf)
{
vendord_interface_t* p_itf = &_vendord_itf[itf];
// Skip if usb is not ready yet
TU_VERIFY( tud_ready(), 0 );
// No data to send
if ( !tu_fifo_count(&p_itf->tx_ff) ) return 0;
uint8_t const rhport = 0;
// Claim the endpoint
TU_VERIFY( usbd_edpt_claim(rhport, p_itf->ep_in), 0 );
// Pull data from FIFO
uint16_t const count = tu_fifo_read_n(&p_itf->tx_ff, p_itf->epin_buf, sizeof(p_itf->epin_buf));
if ( count )
{
TU_ASSERT( usbd_edpt_xfer(rhport, p_itf->ep_in, p_itf->epin_buf, count), 0 );
return count;
}else
{
// Release endpoint since we don't make any transfer
// Note: data is dropped if terminal is not connected
usbd_edpt_release(rhport, p_itf->ep_in);
return 0;
}
return tu_edpt_stream_write(rhport, &p_itf->tx.stream, buffer, (uint16_t) bufsize);
}
uint32_t tud_vendor_n_write_available (uint8_t itf)
{
return tu_fifo_remaining(&_vendord_itf[itf].tx_ff);
uint32_t tud_vendor_n_write_flush (uint8_t itf) {
TU_VERIFY(itf < CFG_TUD_VENDOR, 0);
vendord_interface_t* p_itf = &_vendord_itf[itf];
uint8_t const rhport = 0;
return tu_edpt_stream_write_xfer(rhport, &p_itf->tx.stream);
}
uint32_t tud_vendor_n_write_available (uint8_t itf) {
TU_VERIFY(itf < CFG_TUD_VENDOR, 0);
vendord_interface_t* p_itf = &_vendord_itf[itf];
uint8_t const rhport = 0;
return tu_edpt_stream_write_available(rhport, &p_itf->tx.stream);
}
//--------------------------------------------------------------------+
@ -177,70 +144,59 @@ void vendord_init(void) {
for(uint8_t i=0; i<CFG_TUD_VENDOR; i++) {
vendord_interface_t* p_itf = &_vendord_itf[i];
// config fifo
tu_fifo_config(&p_itf->rx_ff, p_itf->rx_ff_buf, CFG_TUD_VENDOR_RX_BUFSIZE, 1, false);
tu_fifo_config(&p_itf->tx_ff, p_itf->tx_ff_buf, CFG_TUD_VENDOR_TX_BUFSIZE, 1, false);
uint8_t* rx_ff_buf =
#if CFG_TUD_VENDOR_RX_BUFSIZE > 0
p_itf->rx.ff_buf;
#else
NULL;
#endif
#if OSAL_MUTEX_REQUIRED
osal_mutex_t mutex_rd = osal_mutex_create(&p_itf->rx_ff_mutex);
osal_mutex_t mutex_wr = osal_mutex_create(&p_itf->tx_ff_mutex);
TU_ASSERT(mutex_rd && mutex_wr,);
tu_edpt_stream_init(&p_itf->rx.stream, false, false, false,
rx_ff_buf, CFG_TUD_VENDOR_RX_BUFSIZE,
p_itf->epout_buf, CFG_TUD_VENDOR_EPSIZE);
tu_fifo_config_mutex(&p_itf->rx_ff, NULL, mutex_rd);
tu_fifo_config_mutex(&p_itf->tx_ff, mutex_wr, NULL);
#endif
uint8_t* tx_ff_buf =
#if CFG_TUD_VENDOR_TX_BUFSIZE > 0
p_itf->tx.ff_buf;
#else
NULL;
#endif
tu_edpt_stream_init(&p_itf->tx.stream, false, true, false,
tx_ff_buf, CFG_TUD_VENDOR_TX_BUFSIZE,
p_itf->epin_buf, CFG_TUD_VENDOR_EPSIZE);
}
}
bool vendord_deinit(void) {
#if OSAL_MUTEX_REQUIRED
for(uint8_t i=0; i<CFG_TUD_VENDOR; i++) {
vendord_interface_t* p_itf = &_vendord_itf[i];
osal_mutex_t mutex_rd = p_itf->rx_ff.mutex_rd;
osal_mutex_t mutex_wr = p_itf->tx_ff.mutex_wr;
if (mutex_rd) {
osal_mutex_delete(mutex_rd);
tu_fifo_config_mutex(&p_itf->rx_ff, NULL, NULL);
}
if (mutex_wr) {
osal_mutex_delete(mutex_wr);
tu_fifo_config_mutex(&p_itf->tx_ff, NULL, NULL);
}
tu_edpt_stream_deinit(&p_itf->rx.stream);
tu_edpt_stream_deinit(&p_itf->tx.stream);
}
#endif
return true;
}
void vendord_reset(uint8_t rhport)
{
void vendord_reset(uint8_t rhport) {
(void) rhport;
for(uint8_t i=0; i<CFG_TUD_VENDOR; i++)
{
for(uint8_t i=0; i<CFG_TUD_VENDOR; i++) {
vendord_interface_t* p_itf = &_vendord_itf[i];
tu_memclr(p_itf, ITF_MEM_RESET_SIZE);
tu_fifo_clear(&p_itf->rx_ff);
tu_fifo_clear(&p_itf->tx_ff);
tu_edpt_stream_clear(&p_itf->rx.stream);
tu_edpt_stream_clear(&p_itf->tx.stream);
}
}
uint16_t vendord_open(uint8_t rhport, tusb_desc_interface_t const * desc_itf, uint16_t max_len)
{
uint16_t vendord_open(uint8_t rhport, tusb_desc_interface_t const * desc_itf, uint16_t max_len) {
TU_VERIFY(TUSB_CLASS_VENDOR_SPECIFIC == desc_itf->bInterfaceClass, 0);
uint8_t const * p_desc = tu_desc_next(desc_itf);
uint8_t const * desc_end = p_desc + max_len;
const uint8_t* p_desc = tu_desc_next(desc_itf);
const uint8_t* desc_end = p_desc + max_len;
// Find available interface
vendord_interface_t* p_vendor = NULL;
for(uint8_t i=0; i<CFG_TUD_VENDOR; i++)
{
if ( _vendord_itf[i].ep_in == 0 && _vendord_itf[i].ep_out == 0 )
{
for(uint8_t i=0; i<CFG_TUD_VENDOR; i++) {
if (!tud_vendor_n_mounted(i)) {
p_vendor = &_vendord_itf[i];
break;
}
@ -248,71 +204,68 @@ uint16_t vendord_open(uint8_t rhport, tusb_desc_interface_t const * desc_itf, ui
TU_VERIFY(p_vendor, 0);
p_vendor->itf_num = desc_itf->bInterfaceNumber;
if (desc_itf->bNumEndpoints)
{
uint8_t found_ep = 0;
while (found_ep < desc_itf->bNumEndpoints) {
// skip non-endpoint descriptors
while ( (TUSB_DESC_ENDPOINT != tu_desc_type(p_desc)) && (p_desc < desc_end) )
{
while ( (TUSB_DESC_ENDPOINT != tu_desc_type(p_desc)) && (p_desc < desc_end) ) {
p_desc = tu_desc_next(p_desc);
}
// Open endpoint pair with usbd helper
TU_ASSERT(usbd_open_edpt_pair(rhport, p_desc, desc_itf->bNumEndpoints, TUSB_XFER_BULK, &p_vendor->ep_out, &p_vendor->ep_in), 0);
p_desc += desc_itf->bNumEndpoints*sizeof(tusb_desc_endpoint_t);
// Prepare for incoming data
if ( p_vendor->ep_out )
{
_prep_out_transaction(p_vendor);
if (p_desc >= desc_end) {
break;
}
if ( p_vendor->ep_in ) tud_vendor_n_write_flush((uint8_t)(p_vendor - _vendord_itf));
const tusb_desc_endpoint_t* desc_ep = (const tusb_desc_endpoint_t*) p_desc;
TU_ASSERT(usbd_edpt_open(rhport, desc_ep));
found_ep++;
if (tu_edpt_dir(desc_ep->bEndpointAddress) == TUSB_DIR_IN) {
tu_edpt_stream_open(&p_vendor->tx.stream, desc_ep);
tud_vendor_n_write_flush((uint8_t)(p_vendor - _vendord_itf));
} else {
tu_edpt_stream_open(&p_vendor->rx.stream, desc_ep);
TU_ASSERT(tu_edpt_stream_read_xfer(rhport, &p_vendor->rx.stream) > 0, 0); // prepare for incoming data
}
p_desc = tu_desc_next(p_desc);
}
return (uint16_t) ((uintptr_t) p_desc - (uintptr_t) desc_itf);
}
bool vendord_xfer_cb(uint8_t rhport, uint8_t ep_addr, xfer_result_t result, uint32_t xferred_bytes)
{
bool vendord_xfer_cb(uint8_t rhport, uint8_t ep_addr, xfer_result_t result, uint32_t xferred_bytes) {
(void) result;
uint8_t itf = 0;
vendord_interface_t* p_itf = _vendord_itf;
for ( ; ; itf++, p_itf++)
{
if (itf >= TU_ARRAY_SIZE(_vendord_itf)) return false;
if ( ( ep_addr == p_itf->ep_out ) || ( ep_addr == p_itf->ep_in ) ) break;
for ( ; ; itf++, p_itf++) {
if (itf >= CFG_TUD_VENDOR) return false;
if ((ep_addr == p_itf->rx.stream.ep_addr) || (ep_addr == p_itf->tx.stream.ep_addr)) break;
}
if ( ep_addr == p_itf->ep_out )
{
// Receive new data
tu_fifo_write_n(&p_itf->rx_ff, p_itf->epout_buf, (uint16_t) xferred_bytes);
if ( ep_addr == p_itf->rx.stream.ep_addr ) {
// Received new data: put into stream's fifo
tu_edpt_stream_read_xfer_complete(&p_itf->rx.stream, xferred_bytes);
// Invoked callback if any
if (tud_vendor_rx_cb) tud_vendor_rx_cb(itf);
_prep_out_transaction(p_itf);
}
else if ( ep_addr == p_itf->ep_in )
{
if (tud_vendor_tx_cb) tud_vendor_tx_cb(itf, (uint16_t) xferred_bytes);
// Send complete, try to send more if possible
if ( 0 == tud_vendor_n_write_flush(itf) )
{
// If there is no data left, a ZLP should be sent if
// xferred_bytes is multiple of EP Packet size and not zero
if ( !tu_fifo_count(&p_itf->tx_ff) && xferred_bytes && (0 == (xferred_bytes & (BULK_PACKET_SIZE-1))) )
{
if ( usbd_edpt_claim(rhport, p_itf->ep_in) )
{
usbd_edpt_xfer(rhport, p_itf->ep_in, NULL, 0);
}
}
if (tud_vendor_rx_cb) {
tud_vendor_rx_cb(itf, p_itf->epout_buf, (uint16_t) xferred_bytes);
}
tu_edpt_stream_read_xfer(rhport, &p_itf->rx.stream);
} else if ( ep_addr == p_itf->tx.stream.ep_addr ) {
// Send complete
if (tud_vendor_tx_cb) {
tud_vendor_tx_cb(itf, (uint16_t) xferred_bytes);
}
#if CFG_TUD_VENDOR_TX_BUFSIZE > 0
// try to send more if possible
if ( 0 == tu_edpt_stream_write_xfer(rhport, &p_itf->tx.stream) ) {
// If there is no data left, a ZLP should be sent if xferred_bytes is multiple of EP Packet size and not zero
tu_edpt_stream_write_zlp_if_needed(rhport, &p_itf->tx.stream, xferred_bytes);
}
#endif
}
return true;

View File

@ -33,15 +33,24 @@
#define CFG_TUD_VENDOR_EPSIZE 64
#endif
// RX FIFO can be disabled by setting this value to 0
#ifndef CFG_TUD_VENDOR_RX_BUFSIZE
#define CFG_TUD_VENDOR_RX_BUFSIZE 64
#endif
// TX FIFO can be disabled by setting this value to 0
#ifndef CFG_TUD_VENDOR_TX_BUFSIZE
#define CFG_TUD_VENDOR_TX_BUFSIZE 64
#endif
#ifdef __cplusplus
extern "C" {
#endif
//--------------------------------------------------------------------+
// Application API (Multiple Interfaces)
// Application API (Multiple Interfaces) i.e CFG_TUD_VENDOR > 1
//--------------------------------------------------------------------+
bool tud_vendor_n_mounted (uint8_t itf);
uint32_t tud_vendor_n_available (uint8_t itf);
uint32_t tud_vendor_n_read (uint8_t itf, void* buffer, uint32_t bufsize);
bool tud_vendor_n_peek (uint8_t itf, uint8_t* ui8);
@ -51,23 +60,56 @@ uint32_t tud_vendor_n_write (uint8_t itf, void const* buffer, uint32_t
uint32_t tud_vendor_n_write_flush (uint8_t itf);
uint32_t tud_vendor_n_write_available (uint8_t itf);
static inline uint32_t tud_vendor_n_write_str (uint8_t itf, char const* str);
TU_ATTR_ALWAYS_INLINE static inline uint32_t tud_vendor_n_write_str (uint8_t itf, char const* str);
// backward compatible
#define tud_vendor_n_flush(itf) tud_vendor_n_write_flush(itf)
//--------------------------------------------------------------------+
// Application API (Single Port)
// Application API (Single Port) i.e CFG_TUD_VENDOR = 1
//--------------------------------------------------------------------+
static inline bool tud_vendor_mounted (void);
static inline uint32_t tud_vendor_available (void);
static inline uint32_t tud_vendor_read (void* buffer, uint32_t bufsize);
static inline bool tud_vendor_peek (uint8_t* ui8);
static inline void tud_vendor_read_flush (void);
static inline uint32_t tud_vendor_write (void const* buffer, uint32_t bufsize);
static inline uint32_t tud_vendor_write_str (char const* str);
static inline uint32_t tud_vendor_write_available (void);
static inline uint32_t tud_vendor_write_flush (void);
TU_ATTR_ALWAYS_INLINE static inline uint32_t tud_vendor_n_write_str(uint8_t itf, char const* str) {
return tud_vendor_n_write(itf, str, strlen(str));
}
TU_ATTR_ALWAYS_INLINE static inline bool tud_vendor_mounted(void) {
return tud_vendor_n_mounted(0);
}
TU_ATTR_ALWAYS_INLINE static inline uint32_t tud_vendor_available(void) {
return tud_vendor_n_available(0);
}
TU_ATTR_ALWAYS_INLINE static inline uint32_t tud_vendor_read(void* buffer, uint32_t bufsize) {
return tud_vendor_n_read(0, buffer, bufsize);
}
TU_ATTR_ALWAYS_INLINE static inline bool tud_vendor_peek(uint8_t* ui8) {
return tud_vendor_n_peek(0, ui8);
}
TU_ATTR_ALWAYS_INLINE static inline void tud_vendor_read_flush(void) {
tud_vendor_n_read_flush(0);
}
TU_ATTR_ALWAYS_INLINE static inline uint32_t tud_vendor_write(void const* buffer, uint32_t bufsize) {
return tud_vendor_n_write(0, buffer, bufsize);
}
TU_ATTR_ALWAYS_INLINE static inline uint32_t tud_vendor_write_str(char const* str) {
return tud_vendor_n_write_str(0, str);
}
TU_ATTR_ALWAYS_INLINE static inline uint32_t tud_vendor_write_flush(void) {
return tud_vendor_n_write_flush(0);
}
#if CFG_TUD_VENDOR_TX_BUFSIZE > 0
TU_ATTR_ALWAYS_INLINE static inline uint32_t tud_vendor_write_available(void) {
return tud_vendor_n_write_available(0);
}
#endif
// backward compatible
#define tud_vendor_flush() tud_vendor_write_flush()
@ -77,7 +119,7 @@ static inline uint32_t tud_vendor_write_flush (void);
//--------------------------------------------------------------------+
// Invoked when received new data
TU_ATTR_WEAK void tud_vendor_rx_cb(uint8_t itf);
TU_ATTR_WEAK void tud_vendor_rx_cb(uint8_t itf, uint8_t const* buffer, uint16_t bufsize);
// Invoked when last rx transfer finished
TU_ATTR_WEAK void tud_vendor_tx_cb(uint8_t itf, uint32_t sent_bytes);
@ -85,55 +127,6 @@ TU_ATTR_WEAK void tud_vendor_tx_cb(uint8_t itf, uint32_t sent_bytes);
// Inline Functions
//--------------------------------------------------------------------+
static inline uint32_t tud_vendor_n_write_str (uint8_t itf, char const* str)
{
return tud_vendor_n_write(itf, str, strlen(str));
}
static inline bool tud_vendor_mounted (void)
{
return tud_vendor_n_mounted(0);
}
static inline uint32_t tud_vendor_available (void)
{
return tud_vendor_n_available(0);
}
static inline uint32_t tud_vendor_read (void* buffer, uint32_t bufsize)
{
return tud_vendor_n_read(0, buffer, bufsize);
}
static inline bool tud_vendor_peek (uint8_t* ui8)
{
return tud_vendor_n_peek(0, ui8);
}
static inline void tud_vendor_read_flush(void)
{
tud_vendor_n_read_flush(0);
}
static inline uint32_t tud_vendor_write (void const* buffer, uint32_t bufsize)
{
return tud_vendor_n_write(0, buffer, bufsize);
}
static inline uint32_t tud_vendor_write_flush (void)
{
return tud_vendor_n_write_flush(0);
}
static inline uint32_t tud_vendor_write_str (char const* str)
{
return tud_vendor_n_write_str(0, str);
}
static inline uint32_t tud_vendor_write_available (void)
{
return tud_vendor_n_write_available(0);
}
//--------------------------------------------------------------------+
// Internal Class Driver API

View File

@ -34,32 +34,24 @@
extern "C" {
#endif
typedef struct TU_ATTR_PACKED
{
typedef struct TU_ATTR_PACKED {
volatile uint8_t busy : 1;
volatile uint8_t stalled : 1;
volatile uint8_t claimed : 1;
}tu_edpt_state_t;
typedef struct {
bool is_host; // host or device most
union {
uint8_t daddr;
uint8_t rhport;
uint8_t hwid;
struct TU_ATTR_PACKED {
uint8_t is_host : 1; // 1: host, 0: device
uint8_t is_mps512 : 1; // 1: 512, 0: 64 since stream is used for Bulk only
};
uint8_t ep_addr;
uint8_t ep_speed;
uint16_t ep_packetsize;
uint16_t ep_bufsize;
// TODO xfer_fifo can skip this buffer
uint8_t* ep_buf;
uint8_t* ep_buf; // TODO xfer_fifo can skip this buffer
tu_fifo_t ff;
// mutex: read if ep rx, write if e tx
// mutex: read if rx, otherwise write
OSAL_MUTEX_DEF(ff_mutexdef);
}tu_edpt_stream_t;
@ -95,18 +87,15 @@ bool tu_edpt_stream_init(tu_edpt_stream_t* s, bool is_host, bool is_tx, bool ove
bool tu_edpt_stream_deinit(tu_edpt_stream_t* s);
// Open an stream for an endpoint
// hwid is either device address (host mode) or rhport (device mode)
TU_ATTR_ALWAYS_INLINE static inline
void tu_edpt_stream_open(tu_edpt_stream_t* s, uint8_t hwid, tusb_desc_endpoint_t const *desc_ep) {
void tu_edpt_stream_open(tu_edpt_stream_t* s, tusb_desc_endpoint_t const *desc_ep) {
tu_fifo_clear(&s->ff);
s->hwid = hwid;
s->ep_addr = desc_ep->bEndpointAddress;
s->ep_packetsize = tu_edpt_packet_size(desc_ep);
s->is_mps512 = (tu_edpt_packet_size(desc_ep) == 512) ? 1 : 0;
}
TU_ATTR_ALWAYS_INLINE static inline
void tu_edpt_stream_close(tu_edpt_stream_t* s) {
s->hwid = 0;
s->ep_addr = 0;
}
@ -121,40 +110,40 @@ bool tu_edpt_stream_clear(tu_edpt_stream_t* s) {
//--------------------------------------------------------------------+
// Write to stream
uint32_t tu_edpt_stream_write(tu_edpt_stream_t* s, void const *buffer, uint32_t bufsize);
uint32_t tu_edpt_stream_write(uint8_t hwid, tu_edpt_stream_t* s, void const *buffer, uint32_t bufsize);
// Start an usb transfer if endpoint is not busy
uint32_t tu_edpt_stream_write_xfer(tu_edpt_stream_t* s);
uint32_t tu_edpt_stream_write_xfer(uint8_t hwid, tu_edpt_stream_t* s);
// Start an zero-length packet if needed
bool tu_edpt_stream_write_zlp_if_needed(tu_edpt_stream_t* s, uint32_t last_xferred_bytes);
bool tu_edpt_stream_write_zlp_if_needed(uint8_t hwid, tu_edpt_stream_t* s, uint32_t last_xferred_bytes);
// Get the number of bytes available for writing
TU_ATTR_ALWAYS_INLINE static inline
uint32_t tu_edpt_stream_write_available(tu_edpt_stream_t* s) {
return (uint32_t) tu_fifo_remaining(&s->ff);
}
// Get the number of bytes available for writing to FIFO
// Note: if no fifo, return endpoint size if not busy, 0 otherwise
uint32_t tu_edpt_stream_write_available(uint8_t hwid, tu_edpt_stream_t* s);
//--------------------------------------------------------------------+
// Stream Read
//--------------------------------------------------------------------+
// Read from stream
uint32_t tu_edpt_stream_read(tu_edpt_stream_t* s, void* buffer, uint32_t bufsize);
uint32_t tu_edpt_stream_read(uint8_t hwid, tu_edpt_stream_t* s, void* buffer, uint32_t bufsize);
// Start an usb transfer if endpoint is not busy
uint32_t tu_edpt_stream_read_xfer(tu_edpt_stream_t* s);
uint32_t tu_edpt_stream_read_xfer(uint8_t hwid, tu_edpt_stream_t* s);
// Must be called in the transfer complete callback
TU_ATTR_ALWAYS_INLINE static inline
void tu_edpt_stream_read_xfer_complete(tu_edpt_stream_t* s, uint32_t xferred_bytes) {
tu_fifo_write_n(&s->ff, s->ep_buf, (uint16_t) xferred_bytes);
if (tu_fifo_depth(&s->ff)) {
tu_fifo_write_n(&s->ff, s->ep_buf, (uint16_t) xferred_bytes);
}
}
// Same as tu_edpt_stream_read_xfer_complete but skip the first n bytes
TU_ATTR_ALWAYS_INLINE static inline
void tu_edpt_stream_read_xfer_complete_offset(tu_edpt_stream_t* s, uint32_t xferred_bytes, uint32_t skip_offset) {
if (skip_offset < xferred_bytes) {
if (tu_fifo_depth(&s->ff) && (skip_offset < xferred_bytes)) {
tu_fifo_write_n(&s->ff, s->ep_buf + skip_offset, (uint16_t) (xferred_bytes - skip_offset));
}
}

View File

@ -28,6 +28,7 @@
#include "osal/osal.h"
#include "common/tusb_fifo.h"
#include "common/tusb_private.h"
#ifdef __cplusplus
extern "C" {

View File

@ -216,13 +216,17 @@ uint16_t tu_desc_get_interface_total_len(tusb_desc_interface_t const* desc_itf,
bool tu_edpt_stream_init(tu_edpt_stream_t* s, bool is_host, bool is_tx, bool overwritable,
void* ff_buf, uint16_t ff_bufsize, uint8_t* ep_buf, uint16_t ep_bufsize) {
osal_mutex_t new_mutex = osal_mutex_create(&s->ff_mutexdef);
(void) new_mutex;
(void) is_tx;
s->is_host = is_host;
tu_fifo_config(&s->ff, ff_buf, ff_bufsize, 1, overwritable);
tu_fifo_config_mutex(&s->ff, is_tx ? new_mutex : NULL, is_tx ? NULL : new_mutex);
#if OSAL_MUTEX_REQUIRED
if (ff_buf && ff_bufsize) {
osal_mutex_t new_mutex = osal_mutex_create(&s->ff_mutexdef);
tu_fifo_config_mutex(&s->ff, is_tx ? new_mutex : NULL, is_tx ? NULL : new_mutex);
}
#endif
s->ep_buf = ep_buf;
s->ep_bufsize = ep_bufsize;
@ -239,43 +243,40 @@ bool tu_edpt_stream_deinit(tu_edpt_stream_t* s) {
return true;
}
TU_ATTR_ALWAYS_INLINE static inline
bool stream_claim(tu_edpt_stream_t* s) {
TU_ATTR_ALWAYS_INLINE static inline bool stream_claim(uint8_t hwid, tu_edpt_stream_t* s) {
if (s->is_host) {
#if CFG_TUH_ENABLED
return usbh_edpt_claim(s->daddr, s->ep_addr);
return usbh_edpt_claim(hwid, s->ep_addr);
#endif
} else {
#if CFG_TUD_ENABLED
return usbd_edpt_claim(s->rhport, s->ep_addr);
return usbd_edpt_claim(hwid, s->ep_addr);
#endif
}
return false;
}
TU_ATTR_ALWAYS_INLINE static inline
bool stream_xfer(tu_edpt_stream_t* s, uint16_t count) {
TU_ATTR_ALWAYS_INLINE static inline bool stream_xfer(uint8_t hwid, tu_edpt_stream_t* s, uint16_t count) {
if (s->is_host) {
#if CFG_TUH_ENABLED
return usbh_edpt_xfer(s->daddr, s->ep_addr, count ? s->ep_buf : NULL, count);
return usbh_edpt_xfer(hwid, s->ep_addr, count ? s->ep_buf : NULL, count);
#endif
} else {
#if CFG_TUD_ENABLED
return usbd_edpt_xfer(s->rhport, s->ep_addr, count ? s->ep_buf : NULL, count);
return usbd_edpt_xfer(hwid, s->ep_addr, count ? s->ep_buf : NULL, count);
#endif
}
return false;
}
TU_ATTR_ALWAYS_INLINE static inline
bool stream_release(tu_edpt_stream_t* s) {
TU_ATTR_ALWAYS_INLINE static inline bool stream_release(uint8_t hwid, tu_edpt_stream_t* s) {
if (s->is_host) {
#if CFG_TUH_ENABLED
return usbh_edpt_release(s->daddr, s->ep_addr);
return usbh_edpt_release(hwid, s->ep_addr);
#endif
} else {
#if CFG_TUD_ENABLED
return usbd_edpt_release(s->rhport, s->ep_addr);
return usbd_edpt_release(hwid, s->ep_addr);
#endif
}
return false;
@ -284,83 +285,117 @@ bool stream_release(tu_edpt_stream_t* s) {
//--------------------------------------------------------------------+
// Stream Write
//--------------------------------------------------------------------+
bool tu_edpt_stream_write_zlp_if_needed(tu_edpt_stream_t* s, uint32_t last_xferred_bytes) {
bool tu_edpt_stream_write_zlp_if_needed(uint8_t hwid, tu_edpt_stream_t* s, uint32_t last_xferred_bytes) {
// ZLP condition: no pending data, last transferred bytes is multiple of packet size
TU_VERIFY(!tu_fifo_count(&s->ff) && last_xferred_bytes && (0 == (last_xferred_bytes & (s->ep_packetsize - 1))));
TU_VERIFY(stream_claim(s));
TU_ASSERT(stream_xfer(s, 0));
const uint16_t mps = s->is_mps512 ? TUSB_EPSIZE_BULK_HS : TUSB_EPSIZE_BULK_FS;
TU_VERIFY(!tu_fifo_count(&s->ff) && last_xferred_bytes && (0 == (last_xferred_bytes & (mps - 1))));
TU_VERIFY(stream_claim(hwid, s));
TU_ASSERT(stream_xfer(hwid, s, 0));
return true;
}
uint32_t tu_edpt_stream_write_xfer(tu_edpt_stream_t* s) {
uint32_t tu_edpt_stream_write_xfer(uint8_t hwid, tu_edpt_stream_t* s) {
// skip if no data
TU_VERIFY(tu_fifo_count(&s->ff), 0);
// Claim the endpoint
TU_VERIFY(stream_claim(s), 0);
TU_VERIFY(stream_claim(hwid, s), 0);
// Pull data from FIFO -> EP buf
uint16_t const count = tu_fifo_read_n(&s->ff, s->ep_buf, s->ep_bufsize);
if (count) {
TU_ASSERT(stream_xfer(s, count), 0);
TU_ASSERT(stream_xfer(hwid, s, count), 0);
return count;
} else {
// Release endpoint since we don't make any transfer
// Note: data is dropped if terminal is not connected
stream_release(s);
stream_release(hwid, s);
return 0;
}
}
uint32_t tu_edpt_stream_write(tu_edpt_stream_t* s, void const* buffer, uint32_t bufsize) {
uint32_t tu_edpt_stream_write(uint8_t hwid, tu_edpt_stream_t* s, void const* buffer, uint32_t bufsize) {
TU_VERIFY(bufsize); // TODO support ZLP
uint16_t ret = tu_fifo_write_n(&s->ff, buffer, (uint16_t) bufsize);
// flush if fifo has more than packet size or
// in rare case: fifo depth is configured too small (which never reach packet size)
if ((tu_fifo_count(&s->ff) >= s->ep_packetsize) || (tu_fifo_depth(&s->ff) < s->ep_packetsize)) {
tu_edpt_stream_write_xfer(s);
if (0 == tu_fifo_depth(&s->ff)) {
// no fifo for buffered
TU_VERIFY(stream_claim(hwid, s), 0);
const uint32_t xact_len = tu_min32(bufsize, s->ep_bufsize);
memcpy(s->ep_buf, buffer, xact_len);
TU_ASSERT(stream_xfer(hwid, s, (uint16_t) xact_len), 0);
return xact_len;
} else {
const uint16_t ret = tu_fifo_write_n(&s->ff, buffer, (uint16_t) bufsize);
// flush if fifo has more than packet size or
// in rare case: fifo depth is configured too small (which never reach packet size)
const uint16_t mps = s->is_mps512 ? TUSB_EPSIZE_BULK_HS : TUSB_EPSIZE_BULK_FS;
if ((tu_fifo_count(&s->ff) >= mps) || (tu_fifo_depth(&s->ff) < mps)) {
tu_edpt_stream_write_xfer(hwid, s);
}
return ret;
}
}
return ret;
uint32_t tu_edpt_stream_write_available(uint8_t hwid, tu_edpt_stream_t* s) {
if (tu_fifo_depth(&s->ff)) {
return (uint32_t) tu_fifo_remaining(&s->ff);
} else {
bool is_busy = true;
if (s->is_host) {
#if CFG_TUH_ENABLED
is_busy = usbh_edpt_busy(hwid, s->ep_addr);
#endif
} else {
#if CFG_TUD_ENABLED
is_busy = usbd_edpt_busy(hwid, s->ep_addr);
#endif
}
return is_busy ? 0 : s->ep_bufsize;
}
}
//--------------------------------------------------------------------+
// Stream Read
//--------------------------------------------------------------------+
uint32_t tu_edpt_stream_read_xfer(tu_edpt_stream_t* s) {
uint16_t available = tu_fifo_remaining(&s->ff);
// Prepare for incoming data but only allow what we can store in the ring buffer.
// TODO Actually we can still carry out the transfer, keeping count of received bytes
// and slowly move it to the FIFO when read().
// This pre-check reduces endpoint claiming
TU_VERIFY(available >= s->ep_packetsize);
// claim endpoint
TU_VERIFY(stream_claim(s), 0);
// get available again since fifo can be changed before endpoint is claimed
available = tu_fifo_remaining(&s->ff);
if (available >= s->ep_packetsize) {
// multiple of packet size limit by ep bufsize
uint16_t count = (uint16_t) (available & ~(s->ep_packetsize - 1));
count = tu_min16(count, s->ep_bufsize);
TU_ASSERT(stream_xfer(s, count), 0);
return count;
uint32_t tu_edpt_stream_read_xfer(uint8_t hwid, tu_edpt_stream_t* s) {
if (0 == tu_fifo_depth(&s->ff)) {
// no fifo for buffered
TU_VERIFY(stream_claim(hwid, s), 0);
TU_ASSERT(stream_xfer(hwid, s, s->ep_bufsize), 0);
return s->ep_bufsize;
} else {
// Release endpoint since we don't make any transfer
stream_release(s);
return 0;
const uint16_t mps = s->is_mps512 ? TUSB_EPSIZE_BULK_HS : TUSB_EPSIZE_BULK_FS;
uint16_t available = tu_fifo_remaining(&s->ff);
// Prepare for incoming data but only allow what we can store in the ring buffer.
// TODO Actually we can still carry out the transfer, keeping count of received bytes
// and slowly move it to the FIFO when read().
// This pre-check reduces endpoint claiming
TU_VERIFY(available >= mps);
TU_VERIFY(stream_claim(hwid, s), 0);
// get available again since fifo can be changed before endpoint is claimed
available = tu_fifo_remaining(&s->ff);
if (available >= mps) {
// multiple of packet size limit by ep bufsize
uint16_t count = (uint16_t) (available & ~(mps - 1));
count = tu_min16(count, s->ep_bufsize);
TU_ASSERT(stream_xfer(hwid, s, count), 0);
return count;
} else {
// Release endpoint since we don't make any transfer
stream_release(hwid, s);
return 0;
}
}
}
uint32_t tu_edpt_stream_read(tu_edpt_stream_t* s, void* buffer, uint32_t bufsize) {
uint32_t tu_edpt_stream_read(uint8_t hwid, tu_edpt_stream_t* s, void* buffer, uint32_t bufsize) {
uint32_t num_read = tu_fifo_read_n(&s->ff, buffer, (uint16_t) bufsize);
tu_edpt_stream_read_xfer(s);
tu_edpt_stream_read_xfer(hwid, s);
return num_read;
}