Merge branch 'master' into h7rs

This commit is contained in:
HiFiPhile
2025-05-15 21:40:36 +02:00
95 changed files with 2150 additions and 1945 deletions

View File

@@ -672,7 +672,7 @@ void cdch_close(uint8_t daddr) {
bool cdch_xfer_cb(uint8_t daddr, uint8_t ep_addr, xfer_result_t event, uint32_t xferred_bytes) {
// TODO handle stall response, retry failed transfer ...
TU_ASSERT(event == XFER_RESULT_SUCCESS);
TU_VERIFY(event == XFER_RESULT_SUCCESS);
uint8_t const idx = get_idx_by_ep_addr(daddr, ep_addr);
cdch_interface_t * p_cdc = get_itf(idx);

View File

@@ -444,7 +444,7 @@ bool hidh_xfer_cb(uint8_t daddr, uint8_t ep_addr, xfer_result_t result, uint32_t
hidh_epbuf_t* epbuf = get_hid_epbuf(idx);
if (dir == TUSB_DIR_IN) {
TU_LOG_DRV(" Get Report callback (%u, %u)\r\n", daddr, idx);
TU_LOG_DRV(" [idx=%u] Get Report callback\r\n", idx);
TU_LOG3_MEM(epbuf->epin, xferred_bytes, 2);
tuh_hid_report_received_cb(daddr, idx, epbuf->epin, (uint16_t) xferred_bytes);
} else {
@@ -461,7 +461,9 @@ void hidh_close(uint8_t daddr) {
hidh_interface_t* p_hid = &_hidh_itf[i];
if (p_hid->daddr == daddr) {
TU_LOG_DRV(" HIDh close addr = %u index = %u\r\n", daddr, i);
if (tuh_hid_umount_cb) tuh_hid_umount_cb(daddr, i);
if (tuh_hid_umount_cb) {
tuh_hid_umount_cb(daddr, i);
}
tu_memclr(p_hid, sizeof(hidh_interface_t));
}
}

View File

@@ -344,7 +344,7 @@ bool mscd_xfer_cb(uint8_t rhport, uint8_t ep_addr, xfer_result_t event, uint32_t
msc_csw_t * p_csw = &p_msc->csw;
switch (p_msc->stage) {
case MSC_STAGE_CMD:
case MSC_STAGE_CMD: {
//------------- new CBW received -------------//
// Complete IN while waiting for CMD is usually Status of previous SCSI op, ignore it
if (ep_addr != p_msc->ep_out) {
@@ -441,7 +441,8 @@ bool mscd_xfer_cb(uint8_t rhport, uint8_t ep_addr, xfer_result_t event, uint32_t
}
}
}
break;
break;
}
case MSC_STAGE_DATA:
TU_LOG_DRV(" SCSI Data [Lun%u]\r\n", p_cbw->lun);

View File

@@ -108,15 +108,13 @@ typedef struct {
} tu_lookup_table_t;
static inline const char* tu_lookup_find(tu_lookup_table_t const* p_table, uint32_t key) {
tu_static char not_found[11];
for(uint16_t i=0; i<p_table->count; i++) {
if (p_table->items[i].key == key) return p_table->items[i].data;
if (p_table->items[i].key == key) { return p_table->items[i].data; }
}
// not found return the key value in hex
static char not_found[11];
snprintf(not_found, sizeof(not_found), "0x%08lX", (unsigned long) key);
return not_found;
}

View File

@@ -90,13 +90,6 @@ typedef struct {
};
} hcd_event_t;
typedef struct {
uint8_t rhport;
uint8_t hub_addr;
uint8_t hub_port;
uint8_t speed;
} hcd_devtree_info_t;
//--------------------------------------------------------------------+
// Memory API
//--------------------------------------------------------------------+
@@ -186,13 +179,6 @@ bool hcd_edpt_clear_stall(uint8_t rhport, uint8_t dev_addr, uint8_t ep_addr);
// USBH implemented API
//--------------------------------------------------------------------+
// Get device tree information of a device
// USB device tree can be complicated and manged by USBH, this help HCD to retrieve
// needed topology info to carry out its work
extern void hcd_devtree_get_info(uint8_t dev_addr, hcd_devtree_info_t* devtree_info);
//------------- Event API -------------//
// Called by HCD to notify stack
extern void hcd_event_handler(hcd_event_t const* event, bool in_isr);
@@ -239,4 +225,4 @@ void hcd_event_xfer_complete(uint8_t dev_addr, uint8_t ep_addr, uint32_t xferred
}
#endif
#endif /* _TUSB_HCD_H_ */
#endif

View File

@@ -57,9 +57,11 @@ typedef struct {
TUH_EPBUF_DEF(ctrl_buf, CFG_TUH_HUB_BUFSIZE);
} hub_epbuf_t;
static tuh_xfer_cb_t user_complete_cb = NULL;
static hub_interface_t hub_itfs[CFG_TUH_HUB];
CFG_TUH_MEM_SECTION static hub_epbuf_t hub_epbufs[CFG_TUH_HUB];
TU_ATTR_ALWAYS_INLINE static inline hub_interface_t* get_hub_itf(uint8_t daddr) {
return &hub_itfs[daddr-1-CFG_TUH_DEVICE_MAX];
}
@@ -142,10 +144,23 @@ bool hub_port_set_feature(uint8_t hub_addr, uint8_t hub_port, uint8_t feature,
};
TU_LOG_DRV("HUB Set Feature: %s, addr = %u port = %u\r\n", _hub_feature_str[feature], hub_addr, hub_port);
TU_ASSERT( tuh_control_xfer(&xfer) );
TU_ASSERT(tuh_control_xfer(&xfer));
return true;
}
static void port_get_status_complete (tuh_xfer_t* xfer) {
if (xfer->result == XFER_RESULT_SUCCESS) {
hub_interface_t* p_hub = get_hub_itf(xfer->daddr);
p_hub->port_status = *((const hub_port_status_response_t *) (uintptr_t) xfer->buffer);
}
xfer->complete_cb = user_complete_cb;
user_complete_cb = NULL;
if (xfer->complete_cb) {
xfer->complete_cb(xfer);
}
}
bool hub_port_get_status(uint8_t hub_addr, uint8_t hub_port, void* resp,
tuh_xfer_cb_t complete_cb, uintptr_t user_data) {
tusb_control_request_t const request = {
@@ -169,8 +184,25 @@ bool hub_port_get_status(uint8_t hub_addr, uint8_t hub_port, void* resp,
.user_data = user_data
};
if (hub_port != 0) {
// intercept complete callback to save port status, ignore resp
hub_epbuf_t* p_epbuf = get_hub_epbuf(hub_addr);
xfer.complete_cb = port_get_status_complete;
xfer.buffer = p_epbuf->ctrl_buf;
user_complete_cb = complete_cb;
} else {
user_complete_cb = NULL;
}
TU_LOG_DRV("HUB Get Port Status: addr = %u port = %u\r\n", hub_addr, hub_port);
TU_VERIFY( tuh_control_xfer(&xfer) );
TU_VERIFY(tuh_control_xfer(&xfer));
return true;
}
bool hub_port_get_status_local(uint8_t hub_addr, uint8_t hub_port, hub_port_status_response_t* resp) {
(void) hub_port;
hub_interface_t* p_hub = get_hub_itf(hub_addr);
*resp = p_hub->port_status;
return true;
}
@@ -238,10 +270,10 @@ bool hub_edpt_status_xfer(uint8_t daddr) {
static void config_set_port_power (tuh_xfer_t* xfer);
static void config_port_power_complete (tuh_xfer_t* xfer);
bool hub_set_config(uint8_t dev_addr, uint8_t itf_num) {
hub_interface_t* p_hub = get_hub_itf(dev_addr);
bool hub_set_config(uint8_t daddr, uint8_t itf_num) {
hub_interface_t* p_hub = get_hub_itf(daddr);
TU_ASSERT(itf_num == p_hub->itf_num);
hub_epbuf_t* p_epbuf = get_hub_epbuf(dev_addr);
hub_epbuf_t* p_epbuf = get_hub_epbuf(daddr);
// Get Hub Descriptor
tusb_control_request_t const request = {
@@ -257,7 +289,7 @@ bool hub_set_config(uint8_t dev_addr, uint8_t itf_num) {
};
tuh_xfer_t xfer = {
.daddr = dev_addr,
.daddr = daddr,
.ep_addr = 0,
.setup = &request,
.buffer = p_epbuf->ctrl_buf,
@@ -312,11 +344,15 @@ static void config_port_power_complete (tuh_xfer_t* xfer) {
//--------------------------------------------------------------------+
// Connection Changes
//--------------------------------------------------------------------+
static void get_status_complete (tuh_xfer_t* xfer);
static void port_get_status_complete (tuh_xfer_t* xfer);
static void port_clear_feature_complete_stub(tuh_xfer_t* xfer);
static void connection_clear_conn_change_complete (tuh_xfer_t* xfer);
static void connection_port_reset_complete (tuh_xfer_t* xfer);
enum {
STATE_IDLE = 0,
STATE_HUB_STATUS,
STATE_CLEAR_CHANGE,
STATE_CHECK_CONN,
STATE_COMPLETE
};
static void process_new_status(tuh_xfer_t* xfer);
// callback as response of interrupt endpoint polling
bool hub_xfer_cb(uint8_t daddr, uint8_t ep_addr, xfer_result_t result, uint32_t xferred_bytes) {
@@ -337,12 +373,12 @@ bool hub_xfer_cb(uint8_t daddr, uint8_t ep_addr, xfer_result_t result, uint32_t
processed = false;
} else if (tu_bit_test(status_change, 0)) {
// Hub bit 0 is for the hub device events
processed = hub_get_status(daddr, p_epbuf->ctrl_buf, get_status_complete, 0);
processed = hub_get_status(daddr, p_epbuf->ctrl_buf, process_new_status, STATE_HUB_STATUS);
} else {
// Hub bits 1 to n are hub port events
for (uint8_t port=1; port <= p_hub->bNbrPorts; port++) {
if (tu_bit_test(status_change, port)) {
processed = hub_port_get_status(daddr, port, p_epbuf->ctrl_buf, port_get_status_complete, 0);
processed = hub_port_get_status(daddr, port, NULL, process_new_status, STATE_CLEAR_CHANGE);
break; // after completely processed one port, we will re-queue the status poll and handle next one
}
}
@@ -358,117 +394,85 @@ bool hub_xfer_cb(uint8_t daddr, uint8_t ep_addr, xfer_result_t result, uint32_t
return true;
}
static void port_clear_feature_complete_stub(tuh_xfer_t* xfer) {
hub_edpt_status_xfer(xfer->daddr);
}
static void get_status_complete(tuh_xfer_t *xfer) {
const uint8_t daddr = xfer->daddr;
bool processed = false; // true if new status is processed
if (xfer->result == XFER_RESULT_SUCCESS) {
hub_status_response_t hub_status = *((const hub_status_response_t *) (uintptr_t) xfer->buffer);
TU_LOG_DRV("HUB Got hub status, addr = %u, status = %04x\r\n", daddr, hub_status.change.value);
if (hub_status.change.local_power_source) {
TU_LOG_DRV(" Local Power Change\r\n");
processed = hub_clear_feature(daddr, HUB_FEATURE_HUB_LOCAL_POWER_CHANGE, port_clear_feature_complete_stub, 0);
} else if (hub_status.change.over_current) {
TU_LOG_DRV(" Over Current\r\n");
processed = hub_clear_feature(daddr, HUB_FEATURE_HUB_OVER_CURRENT_CHANGE, port_clear_feature_complete_stub, 0);
}
}
if (!processed) {
TU_ASSERT(hub_edpt_status_xfer(daddr), );
}
}
static void port_get_status_complete(tuh_xfer_t *xfer) {
const uint8_t daddr = xfer->daddr;
bool processed = false; // true if new status is processed
if (xfer->result == XFER_RESULT_SUCCESS) {
const uint8_t port_num = (uint8_t) tu_le16toh(xfer->setup->wIndex);
hub_interface_t *p_hub = get_hub_itf(daddr);
p_hub->port_status = *((const hub_port_status_response_t *) (uintptr_t) xfer->buffer);
// Clear port status change interrupts
if (p_hub->port_status.change.connection) {
// Connection change
// Port is powered and enabled
//TU_VERIFY(port_status.status_current.port_power && port_status.status_current.port_enable, );
// Acknowledge Port Connection Change
processed = hub_port_clear_feature(daddr, port_num, HUB_FEATURE_PORT_CONNECTION_CHANGE, connection_clear_conn_change_complete, 0);
} else if (p_hub->port_status.change.port_enable) {
processed = hub_port_clear_feature(daddr, port_num, HUB_FEATURE_PORT_ENABLE_CHANGE, port_clear_feature_complete_stub, 0);
} else if (p_hub->port_status.change.suspend) {
processed = hub_port_clear_feature(daddr, port_num, HUB_FEATURE_PORT_SUSPEND_CHANGE, port_clear_feature_complete_stub, 0);
} else if (p_hub->port_status.change.over_current) {
processed = hub_port_clear_feature(daddr, port_num, HUB_FEATURE_PORT_OVER_CURRENT_CHANGE, port_clear_feature_complete_stub, 0);
} else if (p_hub->port_status.change.reset) {
processed = hub_port_clear_feature(daddr, port_num, HUB_FEATURE_PORT_RESET_CHANGE, port_clear_feature_complete_stub, 0);
}
}
if (!processed) {
TU_ASSERT(hub_edpt_status_xfer(daddr), );
}
}
static void connection_clear_conn_change_complete (tuh_xfer_t* xfer) {
static void process_new_status(tuh_xfer_t* xfer) {
const uint8_t daddr = xfer->daddr;
if (xfer->result != XFER_RESULT_SUCCESS) {
TU_ASSERT(hub_edpt_status_xfer(daddr), );
TU_ASSERT(hub_edpt_status_xfer(daddr),);
return;
}
const uint8_t port_num = (uint8_t) tu_le16toh(xfer->setup->wIndex);
hub_interface_t *p_hub = get_hub_itf(daddr);
const uint8_t port_num = (uint8_t) tu_le16toh(xfer->setup->wIndex);
const uintptr_t state = xfer->user_data;
bool processed = false; // true if new status is processed
if (p_hub->port_status.status.connection) {
// Reset port if attach event
hub_port_reset(daddr, port_num, connection_port_reset_complete, 0);
} else {
// submit detach event
const hcd_event_t event = {
.rhport = usbh_get_rhport(daddr),
.event_id = HCD_EVENT_DEVICE_REMOVE,
.connection = {
.hub_addr = daddr,
.hub_port = port_num
}
};
hcd_event_handler(&event, false);
}
}
static void connection_port_reset_complete (tuh_xfer_t* xfer) {
const uint8_t daddr = xfer->daddr;
if (xfer->result != XFER_RESULT_SUCCESS) {
// retry port reset if failed
if (!tuh_control_xfer(xfer)) {
TU_ASSERT(hub_edpt_status_xfer(daddr), ); // back to status poll if failed to queue request
switch (state) {
case STATE_HUB_STATUS: {
hub_status_response_t hub_status = *((const hub_status_response_t *) (uintptr_t) xfer->buffer);
TU_LOG_DRV("HUB Got hub status, addr = %u, status = %04x\r\n", daddr, hub_status.change.value);
if (hub_status.change.local_power_source) {
TU_LOG_DRV(" Local Power Change\r\n");
processed = hub_clear_feature(daddr, HUB_FEATURE_HUB_LOCAL_POWER_CHANGE,
process_new_status, STATE_COMPLETE);
} else if (hub_status.change.over_current) {
TU_LOG_DRV(" Over Current\r\n");
processed = hub_clear_feature(daddr, HUB_FEATURE_HUB_OVER_CURRENT_CHANGE,
process_new_status, STATE_COMPLETE);
}
break;
}
return;
case STATE_CLEAR_CHANGE:
// Get port status complete --> clear change
if (p_hub->port_status.change.connection) {
// Connection change
// Port is powered and enabled
//TU_VERIFY(port_status.status_current.port_power && port_status.status_current.port_enable, );
// Acknowledge Port Connection Change
processed = hub_port_clear_feature(daddr, port_num, HUB_FEATURE_PORT_CONNECTION_CHANGE,
process_new_status, STATE_CHECK_CONN);
} else if (p_hub->port_status.change.port_enable) {
processed = hub_port_clear_feature(daddr, port_num, HUB_FEATURE_PORT_ENABLE_CHANGE,
process_new_status, STATE_COMPLETE);
} else if (p_hub->port_status.change.suspend) {
processed = hub_port_clear_feature(daddr, port_num, HUB_FEATURE_PORT_SUSPEND_CHANGE,
process_new_status, STATE_COMPLETE);
} else if (p_hub->port_status.change.over_current) {
processed = hub_port_clear_feature(daddr, port_num, HUB_FEATURE_PORT_OVER_CURRENT_CHANGE,
process_new_status, STATE_COMPLETE);
} else if (p_hub->port_status.change.reset) {
processed = hub_port_clear_feature(daddr, port_num, HUB_FEATURE_PORT_RESET_CHANGE,
process_new_status, STATE_COMPLETE);
}
break;
case STATE_CHECK_CONN: {
const hcd_event_t event = {
.rhport = usbh_get_rhport(daddr),
.event_id = p_hub->port_status.status.connection ? HCD_EVENT_DEVICE_ATTACH : HCD_EVENT_DEVICE_REMOVE,
.connection = {
.hub_addr = daddr,
.hub_port = port_num
}
};
hcd_event_handler(&event, false);
// skip status for attach event, usbh will do it after handled this enumeration
processed = (event.event_id == HCD_EVENT_DEVICE_ATTACH);
break;
}
case STATE_COMPLETE:
default:
processed = false; // complete this status, queue next status
break;
}
const uint8_t port_num = (uint8_t) tu_le16toh(xfer->setup->wIndex);
// submit attach event
hcd_event_t event = {
.rhport = usbh_get_rhport(daddr),
.event_id = HCD_EVENT_DEVICE_ATTACH,
.connection = {
.hub_addr = daddr,
.hub_port = port_num
}
};
hcd_event_handler(&event, false);
if (!processed) {
TU_ASSERT(hub_edpt_status_xfer(daddr),);
}
}
#endif

View File

@@ -170,9 +170,13 @@ bool hub_port_set_feature(uint8_t hub_addr, uint8_t hub_port, uint8_t feature,
tuh_xfer_cb_t complete_cb, uintptr_t user_data);
// Get port status
// If hub_port != 0, resp is ignored. hub_port_get_status_local() can be used to retrieve the status
bool hub_port_get_status(uint8_t hub_addr, uint8_t hub_port, void *resp,
tuh_xfer_cb_t complete_cb, uintptr_t user_data);
// Get port status from local cache. This does not send a request to the device
bool hub_port_get_status_local(uint8_t hub_addr, uint8_t hub_port, hub_port_status_response_t* resp);
// Get status from Interrupt endpoint
bool hub_edpt_status_xfer(uint8_t daddr);
@@ -188,7 +192,7 @@ bool hub_port_clear_reset_change(uint8_t hub_addr, uint8_t hub_port, tuh_xfer_cb
return hub_port_clear_feature(hub_addr, hub_port, HUB_FEATURE_PORT_RESET_CHANGE, complete_cb, user_data);
}
// Get Hub status
// Get Hub status (port = 0)
TU_ATTR_ALWAYS_INLINE static inline
bool hub_get_status(uint8_t hub_addr, void* resp, tuh_xfer_cb_t complete_cb, uintptr_t user_data) {
return hub_port_get_status(hub_addr, 0, resp, complete_cb, user_data);
@@ -205,7 +209,7 @@ bool hub_clear_feature(uint8_t hub_addr, uint8_t feature, tuh_xfer_cb_t complete
bool hub_init (void);
bool hub_deinit (void);
bool hub_open (uint8_t rhport, uint8_t dev_addr, tusb_desc_interface_t const *itf_desc, uint16_t max_len);
bool hub_set_config (uint8_t dev_addr, uint8_t itf_num);
bool hub_set_config (uint8_t daddr, uint8_t itf_num);
bool hub_xfer_cb (uint8_t daddr, uint8_t ep_addr, xfer_result_t event, uint32_t xferred_bytes);
void hub_close (uint8_t dev_addr);

File diff suppressed because it is too large Load Diff

View File

@@ -47,7 +47,6 @@
// forward declaration
struct tuh_xfer_s;
typedef struct tuh_xfer_s tuh_xfer_t;
typedef void (*tuh_xfer_cb_t)(tuh_xfer_t* xfer);
// Note1: layout and order of this will be changed in near future
@@ -80,6 +79,17 @@ typedef struct {
tusb_desc_interface_t desc;
} tuh_itf_info_t;
typedef struct {
uint8_t rhport;
uint8_t hub_addr;
uint8_t hub_port;
uint8_t speed;
} tuh_bus_info_t;
// backward compatibility for hcd_devtree_info_t, maybe removed in the future
#define hcd_devtree_info_t tuh_bus_info_t
#define hcd_devtree_get_info(_daddr, _bus_info) tuh_bus_info_get(_daddr, _bus_info)
// ConfigID for tuh_configure()
enum {
TUH_CFGID_INVALID = 0,
@@ -177,6 +187,8 @@ extern void hcd_int_handler(uint8_t rhport, bool in_isr);
#define _tuh_int_handler_arg0() TU_VERIFY_STATIC(false, "tuh_int_handler() must have 1 or 2 arguments")
#define _tuh_int_handler_arg1(_rhport) hcd_int_handler(_rhport, true)
#define _tuh_int_handler_arg2(_rhport, _in_isr) hcd_int_handler(_rhport, _in_isr)
// 1st argument is rhport (mandatory), 2nd argument in_isr (optional)
#define tuh_int_handler(...) TU_FUNC_OPTIONAL_ARG(_tuh_int_handler, __VA_ARGS__)
// Check if roothub port is initialized and active as a host
@@ -214,6 +226,9 @@ TU_ATTR_ALWAYS_INLINE static inline bool tuh_ready(uint8_t daddr) {
return tuh_mounted(daddr) && !tuh_suspended(daddr);
}
// Get bus information of device
bool tuh_bus_info_get(uint8_t daddr, tuh_bus_info_t* bus_info);
//--------------------------------------------------------------------+
// Transfer API
//--------------------------------------------------------------------+
@@ -238,6 +253,10 @@ bool tuh_edpt_close(uint8_t daddr, uint8_t ep_addr);
// Return true if a queued transfer is aborted, false if there is no transfer to abort
bool tuh_edpt_abort_xfer(uint8_t daddr, uint8_t ep_addr);
// Set Address (control transfer)
bool tuh_address_set(uint8_t daddr, uint8_t new_addr,
tuh_xfer_cb_t complete_cb, uintptr_t user_data);
// Set Configuration (control transfer)
// config_num = 0 will un-configure device. Note: config_num = config_descriptor_index + 1
// true on success, false if there is on-going control transfer or incorrect parameters

View File

@@ -63,7 +63,7 @@ usbh_class_driver_t const* usbh_app_driver_get_cb(uint8_t* driver_count) TU_ATTR
// Call by class driver to tell USBH that it has complete the enumeration
void usbh_driver_set_config_complete(uint8_t dev_addr, uint8_t itf_num);
uint8_t usbh_get_rhport(uint8_t dev_addr);
uint8_t usbh_get_rhport(uint8_t daddr);
uint8_t* usbh_get_enum_buf(void);

View File

@@ -35,6 +35,7 @@
//--------------------------------------------------------------------+
#include "common/tusb_common.h"
#include "host/hcd.h"
#include "host/usbh.h"
#include "portable/ehci/ehci_api.h"
#include "ci_hs_type.h"
@@ -92,12 +93,6 @@ bool hcd_init(uint8_t rhport, const tusb_rhport_init_t* rh_init) {
hcd_reg->USBMODE = USBMODE_CM_HOST;
#endif
// FIXME force full speed, still have issue with Highspeed enumeration
// probably due to physical connection bouncing when plug/unplug
// 1. Have issue when plug/unplug devices, maybe the port is not reset properly
// 2. Also does not seems to detect disconnection
hcd_reg->PORTSC1 |= PORTSC1_FORCE_FULL_SPEED;
return ehci_init(rhport, (uint32_t) &hcd_reg->CAPLENGTH, (uint32_t) &hcd_reg->USBCMD);
}

View File

@@ -34,6 +34,7 @@
#include "osal/osal.h"
#include "host/hcd.h"
#include "host/usbh.h"
#include "ehci_api.h"
#include "ehci.h"
@@ -837,8 +838,8 @@ static void qhd_init(ehci_qhd_t *p_qhd, uint8_t dev_addr, tusb_desc_endpoint_t c
tu_memclr(p_qhd, sizeof(ehci_qhd_t));
}
hcd_devtree_info_t devtree_info;
hcd_devtree_get_info(dev_addr, &devtree_info);
tuh_bus_info_t bus_info;
tuh_bus_info_get(dev_addr, &bus_info);
uint8_t const xfer_type = ep_desc->bmAttributes.xfer;
uint8_t const interval = ep_desc->bInterval;
@@ -846,7 +847,7 @@ static void qhd_init(ehci_qhd_t *p_qhd, uint8_t dev_addr, tusb_desc_endpoint_t c
p_qhd->dev_addr = dev_addr;
p_qhd->fl_inactive_next_xact = 0;
p_qhd->ep_number = tu_edpt_number(ep_desc->bEndpointAddress);
p_qhd->ep_speed = devtree_info.speed;
p_qhd->ep_speed = bus_info.speed;
p_qhd->data_toggle_control= (xfer_type == TUSB_XFER_CONTROL) ? 1 : 0;
p_qhd->head_list_flag = (dev_addr == 0) ? 1 : 0; // addr0's endpoint is the static async list head
p_qhd->max_packet_size = tu_edpt_packet_size(ep_desc);
@@ -887,8 +888,8 @@ static void qhd_init(ehci_qhd_t *p_qhd, uint8_t dev_addr, tusb_desc_endpoint_t c
default: break;
}
p_qhd->fl_hub_addr = devtree_info.hub_addr;
p_qhd->fl_hub_port = devtree_info.hub_port;
p_qhd->fl_hub_addr = bus_info.hub_addr;
p_qhd->fl_hub_port = bus_info.hub_port;
p_qhd->mult = 1; // TODO not use high bandwidth/park mode yet
//------------- HCD Management Data -------------//

View File

@@ -36,6 +36,7 @@ _Pragma("GCC diagnostic ignored \"-Waddress-of-packed-member\"");
#endif
#include "host/hcd.h"
#include "host/usbh.h"
#include "musb_type.h"
@@ -695,16 +696,16 @@ bool hcd_setup_send(uint8_t rhport, uint8_t dev_addr, uint8_t const setup_packet
_hcd.pipe0.length = 8;
_hcd.pipe0.remaining = 0;
hcd_devtree_info_t devtree;
hcd_devtree_get_info(dev_addr, &devtree);
switch (devtree.speed) {
tuh_bus_info_t bus_info;
tuh_bus_info_get(dev_addr, &bus_info);
switch (bus_info.speed) {
default: return false;
case TUSB_SPEED_LOW: USB0->TYPE0 = USB_TYPE0_SPEED_LOW; break;
case TUSB_SPEED_FULL: USB0->TYPE0 = USB_TYPE0_SPEED_FULL; break;
case TUSB_SPEED_HIGH: USB0->TYPE0 = USB_TYPE0_SPEED_HIGH; break;
}
USB0->TXHUBADDR0 = devtree.hub_addr;
USB0->TXHUBPORT0 = devtree.hub_port;
USB0->TXHUBADDR0 = bus_info.hub_addr;
USB0->TXHUBPORT0 = bus_info.hub_port;
USB0->TXFUNCADDR0 = dev_addr;
USB0->CSRL0 = USB_CSRL0_TXRDY | USB_CSRL0_SETUP;
return true;
@@ -744,9 +745,9 @@ bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, tusb_desc_endpoint_t const
pipe->remaining = 0;
uint8_t pipe_type = 0;
hcd_devtree_info_t devtree;
hcd_devtree_get_info(dev_addr, &devtree);
switch (devtree.speed) {
tuh_bus_info_t bus_info;
tuh_bus_info_get(dev_addr, &bus_info);
switch (bus_info.speed) {
default: return false;
case TUSB_SPEED_LOW: pipe_type |= USB_TXTYPE1_SPEED_LOW; break;
case TUSB_SPEED_FULL: pipe_type |= USB_TXTYPE1_SPEED_FULL; break;
@@ -763,8 +764,8 @@ bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, tusb_desc_endpoint_t const
hw_endpoint_t volatile *regs = edpt_regs(pipenum - 1);
if (dir_tx) {
fadr->TXFUNCADDR = dev_addr;
fadr->TXHUBADDR = devtree.hub_addr;
fadr->TXHUBPORT = devtree.hub_port;
fadr->TXHUBADDR = bus_info.hub_addr;
fadr->TXHUBPORT = bus_info.hub_port;
regs->TXMAXP = mps;
regs->TXTYPE = pipe_type | epn;
regs->TXINTERVAL = ep_desc->bInterval;
@@ -775,8 +776,8 @@ bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, tusb_desc_endpoint_t const
USB0->TXIE |= TU_BIT(pipenum);
} else {
fadr->RXFUNCADDR = dev_addr;
fadr->RXHUBADDR = devtree.hub_addr;
fadr->RXHUBPORT = devtree.hub_port;
fadr->RXHUBADDR = bus_info.hub_addr;
fadr->RXHUBPORT = bus_info.hub_port;
regs->RXMAXP = mps;
regs->RXTYPE = pipe_type | epn;
regs->RXINTERVAL = ep_desc->bInterval;

View File

@@ -36,6 +36,7 @@
#endif
#include "host/hcd.h"
#include "host/usbh.h"
//--------------------------------------------------------------------+
// MACRO TYPEDEF CONSTANT ENUM DECLARATION

View File

@@ -31,6 +31,7 @@
#include "chip.h"
#include "host/hcd.h"
#include "host/usbh.h"
void hcd_int_enable(uint8_t rhport)
{

View File

@@ -38,6 +38,7 @@
#include "osal/osal.h"
#include "host/hcd.h"
#include "host/usbh.h"
#include "ohci.h"
// TODO remove
@@ -328,13 +329,13 @@ static void ed_init(ohci_ed_t *p_ed, uint8_t dev_addr, uint16_t ep_size, uint8_t
tu_memclr(p_ed, sizeof(ohci_ed_t));
}
hcd_devtree_info_t devtree_info;
hcd_devtree_get_info(dev_addr, &devtree_info);
tuh_bus_info_t bus_info;
tuh_bus_info_get(dev_addr, &bus_info);
p_ed->dev_addr = dev_addr;
p_ed->ep_number = ep_addr & 0x0F;
p_ed->pid = (xfer_type == TUSB_XFER_CONTROL) ? PID_FROM_TD : (tu_edpt_dir(ep_addr) ? PID_IN : PID_OUT);
p_ed->speed = devtree_info.speed;
p_ed->speed = bus_info.speed;
p_ed->is_iso = (xfer_type == TUSB_XFER_ISOCHRONOUS) ? 1 : 0;
p_ed->max_packet_size = ep_size;

View File

@@ -114,9 +114,9 @@ void hcd_int_disable(uint8_t rhport) {
//--------------------------------------------------------------------+
bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, tusb_desc_endpoint_t const *desc_ep) {
hcd_devtree_info_t dev_tree;
hcd_devtree_get_info(dev_addr, &dev_tree);
bool const need_pre = (dev_tree.hub_addr && dev_tree.speed == TUSB_SPEED_LOW);
tuh_bus_info_t bus_info;
tuh_bus_info_get(dev_addr, &bus_info);
bool const need_pre = (bus_info.hub_addr && bus_info.speed == TUSB_SPEED_LOW);
uint8_t const pio_rhport = RHPORT_PIO(rhport);
return pio_usb_host_endpoint_open(pio_rhport, dev_addr, (uint8_t const *) desc_ep, need_pre);

View File

@@ -30,6 +30,7 @@
#if CFG_TUH_ENABLED && defined(TUP_USBIP_RUSB2)
#include "host/hcd.h"
#include "host/usbh.h"
#include "rusb2_type.h"
#if TU_CHECK_MCU(OPT_MCU_RX63X, OPT_MCU_RX65X, OPT_MCU_RX72N)
@@ -662,13 +663,13 @@ bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, tusb_desc_endpoint_t const
if (0 == epn) {
rusb->DCPCTR = RUSB2_PIPE_CTR_PID_NAK;
hcd_devtree_info_t devtree;
hcd_devtree_get_info(dev_addr, &devtree);
tuh_bus_info_t bus_info;
tuh_bus_info_get(dev_addr, &bus_info);
uint16_t volatile *devadd = (uint16_t volatile *)(uintptr_t) &rusb->DEVADD[0];
devadd += dev_addr;
while (rusb->DCPCTR_b.PBUSY) {}
rusb->DCPMAXP = (dev_addr << 12) | mps;
*devadd = (TUSB_SPEED_FULL == devtree.speed) ? RUSB2_DEVADD_USBSPD_FS : RUSB2_DEVADD_USBSPD_LS;
*devadd = (TUSB_SPEED_FULL == bus_info.speed) ? RUSB2_DEVADD_USBSPD_FS : RUSB2_DEVADD_USBSPD_LS;
_hcd.ctl_mps[dev_addr] = mps;
return true;
}

View File

@@ -41,12 +41,6 @@
#include "device/dcd.h"
#include "dwc2_common.h"
#if TU_CHECK_MCU(OPT_MCU_GD32VF103)
#define DWC2_EP_COUNT(_dwc2) DWC2_EP_MAX
#else
#define DWC2_EP_COUNT(_dwc2) ((_dwc2)->ghwcfg2_bm.num_dev_ep + 1)
#endif
//--------------------------------------------------------------------+
// MACRO TYPEDEF CONSTANT ENUM
//--------------------------------------------------------------------+
@@ -79,6 +73,16 @@ CFG_TUD_MEM_SECTION static struct {
TUD_EPBUF_DEF(setup_packet, 8);
} _dcd_usbbuf;
TU_ATTR_ALWAYS_INLINE static inline uint8_t dwc2_ep_count(const dwc2_regs_t* dwc2) {
#if TU_CHECK_MCU(OPT_MCU_GD32VF103)
return DWC2_EP_MAX;
#else
const dwc2_ghwcfg2_t ghwcfg2 = {.value = dwc2->ghwcfg2};
return ghwcfg2.num_dev_ep + 1;
#endif
}
//--------------------------------------------------------------------
// DMA
//--------------------------------------------------------------------
@@ -102,7 +106,8 @@ bool dcd_dcache_clean_invalidate(const void* addr, uint32_t data_size) {
TU_ATTR_ALWAYS_INLINE static inline bool dma_device_enabled(const dwc2_regs_t* dwc2) {
(void) dwc2;
// Internal DMA only
return CFG_TUD_DWC2_DMA_ENABLE && dwc2->ghwcfg2_bm.arch == GHWCFG2_ARCH_INTERNAL_DMA;
const dwc2_ghwcfg2_t ghwcfg2 = {.value = dwc2->ghwcfg2};
return CFG_TUD_DWC2_DMA_ENABLE && ghwcfg2.arch == GHWCFG2_ARCH_INTERNAL_DMA;
}
static void dma_setup_prepare(uint8_t rhport) {
@@ -250,20 +255,15 @@ static void edpt_activate(uint8_t rhport, const tusb_desc_endpoint_t* p_endpoint
xfer->interval = p_endpoint_desc->bInterval;
// Endpoint control
union {
uint32_t value;
dwc2_depctl_t bm;
} depctl;
depctl.value = 0;
depctl.bm.mps = xfer->max_size;
depctl.bm.active = 1;
depctl.bm.type = p_endpoint_desc->bmAttributes.xfer;
dwc2_depctl_t depctl = {.value = 0};
depctl.mps = xfer->max_size;
depctl.active = 1;
depctl.type = p_endpoint_desc->bmAttributes.xfer;
if (p_endpoint_desc->bmAttributes.xfer != TUSB_XFER_ISOCHRONOUS) {
depctl.bm.set_data0_iso_even = 1;
depctl.set_data0_iso_even = 1;
}
if (dir == TUSB_DIR_IN) {
depctl.bm.tx_fifo_num = epnum;
depctl.tx_fifo_num = epnum;
}
dwc2_dep_t* dep = &dwc2->ep[dir == TUSB_DIR_IN ? 0 : 1][epnum];
@@ -343,31 +343,22 @@ static void edpt_schedule_packets(uint8_t rhport, const uint8_t epnum, const uin
}
// transfer size: A full OUT transfer (multiple packets, possibly) triggers XFRC.
union {
uint32_t value;
dwc2_ep_tsize_t bm;
} deptsiz;
deptsiz.value = 0;
deptsiz.bm.xfer_size = total_bytes;
deptsiz.bm.packet_count = num_packets;
dwc2_ep_tsize_t deptsiz = {.value = 0};
deptsiz.xfer_size = total_bytes;
deptsiz.packet_count = num_packets;
dep->tsiz = deptsiz.value;
// control
union {
dwc2_depctl_t bm;
uint32_t value;
} depctl;
depctl.value = dep->ctl;
depctl.bm.clear_nak = 1;
depctl.bm.enable = 1;
if (depctl.bm.type == DEPCTL_EPTYPE_ISOCHRONOUS && xfer->interval == 1) {
const uint32_t odd_now = (dwc2->dsts_bm.frame_number & 1u);
dwc2_depctl_t depctl = {.value = dep->ctl};
depctl.clear_nak = 1;
depctl.enable = 1;
if (depctl.type == DEPCTL_EPTYPE_ISOCHRONOUS && xfer->interval == 1) {
const dwc2_dsts_t dsts = {.value = dwc2->dsts};
const uint32_t odd_now = dsts.frame_number & 1u;
if (odd_now) {
depctl.bm.set_data0_iso_even = 1;
depctl.set_data0_iso_even = 1;
} else {
depctl.bm.set_data1_iso_odd = 1;
depctl.set_data1_iso_odd = 1;
}
}
@@ -410,7 +401,8 @@ bool dcd_init(uint8_t rhport, const tusb_rhport_init_t* rh_init) {
// XCVRDLY: transceiver delay between xcvr_sel and txvalid during device chirp is required
// when using with some PHYs such as USB334x (USB3341, USB3343, USB3346, USB3347)
if (dwc2->ghwcfg2_bm.hs_phy_type == GHWCFG2_HSPHY_ULPI) {
const dwc2_ghwcfg2_t ghwcfg2 = {.value = dwc2->ghwcfg2};
if (ghwcfg2.hs_phy_type == GHWCFG2_HSPHY_ULPI) {
dcfg |= DCFG_XCVRDLY;
}
} else {
@@ -641,7 +633,7 @@ void dcd_edpt_clear_stall(uint8_t rhport, uint8_t ep_addr) {
// 7.4.1 Initialization on USB Reset
static void handle_bus_reset(uint8_t rhport) {
dwc2_regs_t *dwc2 = DWC2_REG(rhport);
const uint8_t ep_count = DWC2_EP_COUNT(dwc2);
const uint8_t ep_count = dwc2_ep_count(dwc2);
tu_memclr(xfer_status, sizeof(xfer_status));
@@ -671,7 +663,9 @@ static void handle_bus_reset(uint8_t rhport) {
dfifo_device_init(rhport);
// 5. Reset device address
dwc2->dcfg_bm.address = 0;
dwc2_dcfg_t dcfg = {.value = dwc2->dcfg};
dcfg.address = 0;
dwc2->dcfg = dcfg.value;
// Fixed both control EP0 size to 64 bytes
dwc2->epin[0].ctl &= ~(0x03 << DIEPCTL_MPSIZ_Pos);
@@ -691,8 +685,9 @@ static void handle_bus_reset(uint8_t rhport) {
static void handle_enum_done(uint8_t rhport) {
dwc2_regs_t *dwc2 = DWC2_REG(rhport);
const dwc2_dsts_t dsts = {.value = dwc2->dsts};
tusb_speed_t speed;
switch (dwc2->dsts_bm.enum_speed) {
switch (dsts.enum_speed) {
case DCFG_SPEED_HIGH:
speed = TUSB_SPEED_HIGH;
break;
@@ -737,12 +732,12 @@ static void handle_rxflvl_irq(uint8_t rhport) {
const volatile uint32_t* rx_fifo = dwc2->fifo[0];
// Pop control word off FIFO
const dwc2_grxstsp_t grxstsp_bm = dwc2->grxstsp_bm;
const uint8_t epnum = grxstsp_bm.ep_ch_num;
const dwc2_grxstsp_t grxstsp = {.value = dwc2->grxstsp};
const uint8_t epnum = grxstsp.ep_ch_num;
dwc2_dep_t* epout = &dwc2->epout[epnum];
switch (grxstsp_bm.packet_status) {
switch (grxstsp.packet_status) {
case GRXSTS_PKTSTS_GLOBAL_OUT_NAK:
// Global OUT NAK: do nothing
break;
@@ -764,7 +759,7 @@ static void handle_rxflvl_irq(uint8_t rhport) {
case GRXSTS_PKTSTS_RX_DATA: {
// Out packet received
const uint16_t byte_count = grxstsp_bm.byte_count;
const uint16_t byte_count = grxstsp.byte_count;
xfer_ctl_t* xfer = XFER_CTL_BASE(epnum, TUSB_DIR_OUT);
if (byte_count) {
@@ -778,7 +773,8 @@ static void handle_rxflvl_irq(uint8_t rhport) {
// short packet, minus remaining bytes (xfer_size)
if (byte_count < xfer->max_size) {
xfer->total_len -= epout->tsiz_bm.xfer_size;
const dwc2_ep_tsize_t tsiz = {.value = epout->tsiz};
xfer->total_len -= tsiz.xfer_size;
if (epnum == 0) {
xfer->total_len -= _dcd_data.ep0_pending[TUSB_DIR_OUT];
_dcd_data.ep0_pending[TUSB_DIR_OUT] = 0;
@@ -840,11 +836,13 @@ static void handle_epin_slave(uint8_t rhport, uint8_t epnum, dwc2_diepint_t diep
// - 64 bytes or
// - Half/Empty of TX FIFO size (configured by GAHBCFG.TXFELVL)
if (diepint_bm.txfifo_empty && (dwc2->diepempmsk & (1 << epnum))) {
const uint16_t remain_packets = epin->tsiz_bm.packet_count;
dwc2_ep_tsize_t tsiz = {.value = epin->tsiz};
const uint16_t remain_packets = tsiz.packet_count;
// Process every single packet (only whole packets can be written to fifo)
for (uint16_t i = 0; i < remain_packets; i++) {
const uint16_t remain_bytes = (uint16_t) epin->tsiz_bm.xfer_size;
tsiz.value = epin->tsiz;
const uint16_t remain_bytes = (uint16_t) tsiz.xfer_size;
const uint16_t xact_bytes = tu_min16(remain_bytes, xfer->max_size);
// Check if dtxfsts has enough space available
@@ -863,7 +861,8 @@ static void handle_epin_slave(uint8_t rhport, uint8_t epnum, dwc2_diepint_t diep
}
// Turn off TXFE if all bytes are written.
if (epin->tsiz_bm.xfer_size == 0) {
tsiz.value = epin->tsiz;
if (tsiz.xfer_size == 0) {
dwc2->diepempmsk &= ~(1 << epnum);
}
}
@@ -894,7 +893,8 @@ static void handle_epout_dma(uint8_t rhport, uint8_t epnum, dwc2_doepint_t doepi
xfer_ctl_t* xfer = XFER_CTL_BASE(epnum, TUSB_DIR_OUT);
// determine actual received bytes
const uint16_t remain = epout->tsiz_bm.xfer_size;
const dwc2_ep_tsize_t tsiz = {.value = epout->tsiz};
const uint16_t remain = tsiz.xfer_size;
xfer->total_len -= remain;
// this is ZLP, so prepare EP0 for next setup
@@ -930,7 +930,7 @@ static void handle_epin_dma(uint8_t rhport, uint8_t epnum, dwc2_diepint_t diepin
static void handle_ep_irq(uint8_t rhport, uint8_t dir) {
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
const bool is_dma = dma_device_enabled(dwc2);
const uint8_t ep_count = DWC2_EP_COUNT(dwc2);
const uint8_t ep_count = dwc2_ep_count(dwc2);
const uint8_t daint_offset = (dir == TUSB_DIR_IN) ? DAINT_IEPINT_Pos : DAINT_OEPINT_Pos;
dwc2_dep_t* ep_base = &dwc2->ep[dir == TUSB_DIR_IN ? 0 : 1][0];

View File

@@ -36,6 +36,7 @@
#if CFG_TUH_ENABLED
#include "host/hcd.h"
#include "host/usbh.h"
#endif
#include "dwc2_common.h"
@@ -88,11 +89,13 @@ static void phy_fs_init(dwc2_regs_t* dwc2) {
static void phy_hs_init(dwc2_regs_t* dwc2) {
uint32_t gusbcfg = dwc2->gusbcfg;
const dwc2_ghwcfg2_t ghwcfg2 = {.value = dwc2->ghwcfg2};
const dwc2_ghwcfg4_t ghwcfg4 = {.value = dwc2->ghwcfg4};
// De-select FS PHY
gusbcfg &= ~GUSBCFG_PHYSEL;
if (dwc2->ghwcfg2_bm.hs_phy_type == GHWCFG2_HSPHY_ULPI) {
if (ghwcfg2.hs_phy_type == GHWCFG2_HSPHY_ULPI) {
TU_LOG(DWC2_COMMON_DEBUG, "Highspeed ULPI PHY init\r\n");
// Select ULPI PHY (external)
@@ -116,7 +119,7 @@ static void phy_hs_init(dwc2_regs_t* dwc2) {
gusbcfg &= ~GUSBCFG_ULPI_UTMI_SEL;
// Set 16-bit interface if supported
if (dwc2->ghwcfg4_bm.phy_data_width) {
if (ghwcfg4.phy_data_width) {
gusbcfg |= GUSBCFG_PHYIF16; // 16 bit
} else {
gusbcfg &= ~GUSBCFG_PHYIF16; // 8 bit
@@ -127,7 +130,7 @@ static void phy_hs_init(dwc2_regs_t* dwc2) {
dwc2->gusbcfg = gusbcfg;
// mcu specific phy init
dwc2_phy_init(dwc2, dwc2->ghwcfg2_bm.hs_phy_type);
dwc2_phy_init(dwc2, ghwcfg2.hs_phy_type);
// Reset core after selecting PHY
reset_core(dwc2);
@@ -136,11 +139,11 @@ static void phy_hs_init(dwc2_regs_t* dwc2) {
// - 9 if using 8-bit PHY interface
// - 5 if using 16-bit PHY interface
gusbcfg &= ~GUSBCFG_TRDT_Msk;
gusbcfg |= (dwc2->ghwcfg4_bm.phy_data_width ? 5u : 9u) << GUSBCFG_TRDT_Pos;
gusbcfg |= (ghwcfg4.phy_data_width ? 5u : 9u) << GUSBCFG_TRDT_Pos;
dwc2->gusbcfg = gusbcfg;
// MCU specific PHY update post reset
dwc2_phy_update(dwc2, dwc2->ghwcfg2_bm.hs_phy_type);
dwc2_phy_update(dwc2, ghwcfg2.hs_phy_type);
}
static bool check_dwc2(dwc2_regs_t* dwc2) {
@@ -171,7 +174,6 @@ static bool check_dwc2(dwc2_regs_t* dwc2) {
//--------------------------------------------------------------------
bool dwc2_core_is_highspeed(dwc2_regs_t* dwc2, tusb_role_t role) {
(void)dwc2;
#if CFG_TUD_ENABLED
if (role == TUSB_ROLE_DEVICE && !TUD_OPT_HIGH_SPEED) {
return false;
@@ -183,7 +185,8 @@ bool dwc2_core_is_highspeed(dwc2_regs_t* dwc2, tusb_role_t role) {
}
#endif
return dwc2->ghwcfg2_bm.hs_phy_type != GHWCFG2_HSPHY_NOT_SUPPORTED;
const dwc2_ghwcfg2_t ghwcfg2 = {.value = dwc2->ghwcfg2};
return ghwcfg2.hs_phy_type != GHWCFG2_HSPHY_NOT_SUPPORTED;
}
/* dwc2 has several PHYs option

File diff suppressed because it is too large Load Diff

View File

@@ -36,6 +36,7 @@
#define DWC2_DEBUG 2
#include "host/hcd.h"
#include "host/usbh.h"
#include "dwc2_common.h"
// Max number of endpoints application can open, can be larger than DWC2_CHANNEL_COUNT_MAX
@@ -44,8 +45,6 @@
#endif
#define DWC2_CHANNEL_COUNT_MAX 16 // absolute max channel count
#define DWC2_CHANNEL_COUNT(_dwc2) tu_min8((_dwc2)->ghwcfg2_bm.num_host_ch + 1, DWC2_CHANNEL_COUNT_MAX)
TU_VERIFY_STATIC(CFG_TUH_DWC2_ENDPOINT_MAX <= 255, "currently only use 8-bit for index");
enum {
@@ -77,9 +76,9 @@ typedef struct {
struct TU_ATTR_PACKED {
uint32_t uframe_interval : 18; // micro-frame interval
uint32_t speed : 2;
uint32_t next_pid : 2;
uint32_t do_ping : 1;
uint32_t speed : 2;
uint32_t next_pid : 2; // PID for next transfer
uint32_t next_do_ping : 1; // Do PING for next transfer if possible (highspeed OUT)
// uint32_t : 9;
};
@@ -97,7 +96,6 @@ typedef struct {
uint8_t err_count : 3;
uint8_t period_split_nyet_count : 3;
uint8_t halted_nyet : 1;
uint8_t halted_sof_schedule : 1;
};
uint8_t result;
@@ -116,9 +114,15 @@ hcd_data_t _hcd_data;
//--------------------------------------------------------------------
//
//--------------------------------------------------------------------
TU_ATTR_ALWAYS_INLINE static inline uint8_t dwc2_channel_count(const dwc2_regs_t* dwc2) {
const dwc2_ghwcfg2_t ghwcfg2 = {.value = dwc2->ghwcfg2};
return tu_min8(ghwcfg2.num_host_ch + 1, DWC2_CHANNEL_COUNT_MAX);
}
TU_ATTR_ALWAYS_INLINE static inline tusb_speed_t hprt_speed_get(dwc2_regs_t* dwc2) {
tusb_speed_t speed;
switch(dwc2->hprt_bm.speed) {
const dwc2_hprt_t hprt = {.value = dwc2->hprt};
switch(hprt.speed) {
case HPRT_SPEED_HIGH: speed = TUSB_SPEED_HIGH; break;
case HPRT_SPEED_FULL: speed = TUSB_SPEED_FULL; break;
case HPRT_SPEED_LOW : speed = TUSB_SPEED_LOW ; break;
@@ -133,7 +137,8 @@ TU_ATTR_ALWAYS_INLINE static inline tusb_speed_t hprt_speed_get(dwc2_regs_t* dwc
TU_ATTR_ALWAYS_INLINE static inline bool dma_host_enabled(const dwc2_regs_t* dwc2) {
(void) dwc2;
// Internal DMA only
return CFG_TUH_DWC2_DMA_ENABLE && dwc2->ghwcfg2_bm.arch == GHWCFG2_ARCH_INTERNAL_DMA;
const dwc2_ghwcfg2_t ghwcfg2 = {.value = dwc2->ghwcfg2};
return CFG_TUH_DWC2_DMA_ENABLE && ghwcfg2.arch == GHWCFG2_ARCH_INTERNAL_DMA;
}
#if CFG_TUH_MEM_DCACHE_ENABLE
@@ -155,7 +160,7 @@ bool hcd_dcache_clean_invalidate(const void* addr, uint32_t data_size) {
// Allocate a channel for new transfer
TU_ATTR_ALWAYS_INLINE static inline uint8_t channel_alloc(dwc2_regs_t* dwc2) {
const uint8_t max_channel = DWC2_CHANNEL_COUNT(dwc2);
const uint8_t max_channel = dwc2_channel_count(dwc2);
for (uint8_t ch_id = 0; ch_id < max_channel; ch_id++) {
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
if (!xfer->allocated) {
@@ -168,15 +173,18 @@ TU_ATTR_ALWAYS_INLINE static inline uint8_t channel_alloc(dwc2_regs_t* dwc2) {
}
// Check if is periodic (interrupt/isochronous)
TU_ATTR_ALWAYS_INLINE static inline bool edpt_is_periodic(uint8_t ep_type) {
return ep_type == HCCHAR_EPTYPE_INTERRUPT || ep_type == HCCHAR_EPTYPE_ISOCHRONOUS;
TU_ATTR_ALWAYS_INLINE static inline bool channel_is_periodic(uint32_t hcchar) {
const dwc2_channel_char_t hcchar_bm = {.value = hcchar};
return hcchar_bm.ep_type == HCCHAR_EPTYPE_INTERRUPT || hcchar_bm.ep_type == HCCHAR_EPTYPE_ISOCHRONOUS;
}
TU_ATTR_ALWAYS_INLINE static inline uint8_t req_queue_avail(const dwc2_regs_t* dwc2, bool is_period) {
if (is_period) {
return dwc2->hptxsts_bm.req_queue_available;
const dwc2_hptxsts_t hptxsts = {.value = dwc2->hptxsts};
return hptxsts.req_queue_available;
} else {
return dwc2->hnptxsts_bm.req_queue_available;
const dwc2_hnptxsts_t hnptxsts = {.value = dwc2->hnptxsts};
return hnptxsts.req_queue_available;
}
}
@@ -188,7 +196,7 @@ TU_ATTR_ALWAYS_INLINE static inline void channel_dealloc(dwc2_regs_t* dwc2, uint
TU_ATTR_ALWAYS_INLINE static inline bool channel_disable(const dwc2_regs_t* dwc2, dwc2_channel_t* channel) {
// disable also require request queue
TU_ASSERT(req_queue_avail(dwc2, edpt_is_periodic(channel->hcchar_bm.ep_type)));
TU_ASSERT(req_queue_avail(dwc2, channel_is_periodic(channel->hcchar)));
channel->hcintmsk |= HCINT_HALTED;
channel->hcchar |= HCCHAR_CHDIS | HCCHAR_CHENA; // must set both CHDIS and CHENA
return true;
@@ -196,18 +204,18 @@ TU_ATTR_ALWAYS_INLINE static inline bool channel_disable(const dwc2_regs_t* dwc2
// attempt to send IN token to receive data
TU_ATTR_ALWAYS_INLINE static inline bool channel_send_in_token(const dwc2_regs_t* dwc2, dwc2_channel_t* channel) {
TU_ASSERT(req_queue_avail(dwc2, edpt_is_periodic(channel->hcchar_bm.ep_type)));
TU_ASSERT(req_queue_avail(dwc2, channel_is_periodic(channel->hcchar)));
channel->hcchar |= HCCHAR_CHENA;
return true;
}
// Find currently enabled channel. Note: EP0 is bidirectional
TU_ATTR_ALWAYS_INLINE static inline uint8_t channel_find_enabled(dwc2_regs_t* dwc2, uint8_t dev_addr, uint8_t ep_num, uint8_t ep_dir) {
const uint8_t max_channel = DWC2_CHANNEL_COUNT(dwc2);
const uint8_t max_channel = dwc2_channel_count(dwc2);
for (uint8_t ch_id = 0; ch_id < max_channel; ch_id++) {
if (_hcd_data.xfer[ch_id].allocated) {
const dwc2_channel_char_t hcchar_bm = dwc2->channel[ch_id].hcchar_bm;
if (hcchar_bm.dev_addr == dev_addr && hcchar_bm.ep_num == ep_num && (ep_num == 0 || hcchar_bm.ep_dir == ep_dir)) {
const dwc2_channel_char_t hcchar = {.value = dwc2->channel[ch_id].hcchar};
if (hcchar.dev_addr == dev_addr && hcchar.ep_num == ep_num && (ep_num == 0 || hcchar.ep_dir == ep_dir)) {
return ch_id;
}
}
@@ -304,12 +312,13 @@ TU_ATTR_ALWAYS_INLINE static inline uint8_t cal_next_pid(uint8_t pid, uint8_t pa
static void dfifo_host_init(uint8_t rhport) {
const dwc2_controller_t* dwc2_controller = &_dwc2_controller[rhport];
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
const dwc2_ghwcfg2_t ghwcfg2 = {.value = dwc2->ghwcfg2};
// Scatter/Gather DMA mode is not yet supported. Buffer DMA only need 1 words per channel
const bool is_dma = dma_host_enabled(dwc2);
uint16_t dfifo_top = dwc2_controller->ep_fifo_size/4;
if (is_dma) {
dfifo_top -= dwc2->ghwcfg2_bm.num_host_ch;
dfifo_top -= ghwcfg2.num_host_ch;
}
// fixed allocation for now, improve later:
@@ -319,7 +328,7 @@ static void dfifo_host_init(uint8_t rhport) {
uint32_t ptx_largest = is_highspeed ? TUSB_EPSIZE_ISO_HS_MAX/4 : 256/4;
uint16_t nptxfsiz = 2 * nptx_largest;
uint16_t rxfsiz = 2 * (ptx_largest + 2) + dwc2->ghwcfg2_bm.num_host_ch;
uint16_t rxfsiz = 2 * (ptx_largest + 2) + ghwcfg2.num_host_ch;
TU_ASSERT(dfifo_top >= (nptxfsiz + rxfsiz),);
uint16_t ptxfsiz = dfifo_top - (nptxfsiz + rxfsiz);
@@ -381,7 +390,7 @@ bool hcd_init(uint8_t rhport, const tusb_rhport_init_t* rh_init) {
dwc2->hprt = HPRT_POWER; // turn on VBUS
// Enable required interrupts
dwc2->gintmsk |= GINTSTS_OTGINT | GINTSTS_CONIDSTSCHNG | GINTSTS_HPRTINT | GINTSTS_HCINT;
dwc2->gintmsk |= GINTSTS_OTGINT | GINTSTS_CONIDSTSCHNG | GINTSTS_HPRTINT | GINTSTS_HCINT | GINTSTS_DISCINT;
// NPTX can hold at least 2 packet, change interrupt level to half-empty
uint32_t gahbcfg = dwc2->gahbcfg & ~GAHBCFG_TX_FIFO_EPMTY_LVL;
@@ -461,8 +470,8 @@ bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, const tusb_desc_endpoint_t*
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
const tusb_speed_t rh_speed = hprt_speed_get(dwc2);
hcd_devtree_info_t devtree_info;
hcd_devtree_get_info(dev_addr, &devtree_info);
tuh_bus_info_t bus_info;
tuh_bus_info_get(dev_addr, &bus_info);
// find a free endpoint
const uint8_t ep_id = edpt_alloc();
@@ -473,7 +482,7 @@ bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, const tusb_desc_endpoint_t*
hcchar_bm->ep_size = tu_edpt_packet_size(desc_ep);
hcchar_bm->ep_num = tu_edpt_number(desc_ep->bEndpointAddress);
hcchar_bm->ep_dir = tu_edpt_dir(desc_ep->bEndpointAddress);
hcchar_bm->low_speed_dev = (devtree_info.speed == TUSB_SPEED_LOW) ? 1 : 0;
hcchar_bm->low_speed_dev = (bus_info.speed == TUSB_SPEED_LOW) ? 1 : 0;
hcchar_bm->ep_type = desc_ep->bmAttributes.xfer; // ep_type matches TUSB_XFER_*
hcchar_bm->err_multi_count = 0;
hcchar_bm->dev_addr = dev_addr;
@@ -482,21 +491,21 @@ bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, const tusb_desc_endpoint_t*
hcchar_bm->enable = 1;
dwc2_channel_split_t* hcsplt_bm = &edpt->hcsplt_bm;
hcsplt_bm->hub_port = devtree_info.hub_port;
hcsplt_bm->hub_addr = devtree_info.hub_addr;
hcsplt_bm->hub_port = bus_info.hub_port;
hcsplt_bm->hub_addr = bus_info.hub_addr;
hcsplt_bm->xact_pos = 0;
hcsplt_bm->split_compl = 0;
hcsplt_bm->split_en = (rh_speed == TUSB_SPEED_HIGH && devtree_info.speed != TUSB_SPEED_HIGH) ? 1 : 0;
hcsplt_bm->split_en = (rh_speed == TUSB_SPEED_HIGH && bus_info.speed != TUSB_SPEED_HIGH) ? 1 : 0;
edpt->speed = devtree_info.speed;
edpt->speed = bus_info.speed;
edpt->next_pid = HCTSIZ_PID_DATA0;
if (desc_ep->bmAttributes.xfer == TUSB_XFER_ISOCHRONOUS) {
edpt->uframe_interval = 1 << (desc_ep->bInterval - 1);
if (devtree_info.speed == TUSB_SPEED_FULL) {
if (bus_info.speed == TUSB_SPEED_FULL) {
edpt->uframe_interval <<= 3;
}
} else if (desc_ep->bmAttributes.xfer == TUSB_XFER_INTERRUPT) {
if (devtree_info.speed == TUSB_SPEED_HIGH) {
if (bus_info.speed == TUSB_SPEED_HIGH) {
edpt->uframe_interval = 1 << (desc_ep->bInterval - 1);
} else {
edpt->uframe_interval = desc_ep->bInterval << 3;
@@ -514,10 +523,11 @@ bool hcd_edpt_close(uint8_t rhport, uint8_t daddr, uint8_t ep_addr) {
// clean up channel after part of transfer is done but the whole urb is not complete
static void channel_xfer_out_wrapup(dwc2_regs_t* dwc2, uint8_t ch_id) {
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
dwc2_channel_t* channel = &dwc2->channel[ch_id];
const dwc2_channel_t* channel = &dwc2->channel[ch_id];
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
edpt->next_pid = channel->hctsiz_bm.pid; // save PID
const dwc2_channel_tsize_t hctsiz = {.value = channel->hctsiz};
edpt->next_pid = hctsiz.pid; // save PID
/* Since hctsiz.xfersize field reflects the number of bytes transferred via the AHB, not the USB)
* For IN: we can use hctsiz.xfersize as remaining bytes.
@@ -525,9 +535,10 @@ static void channel_xfer_out_wrapup(dwc2_regs_t* dwc2, uint8_t ch_id) {
* number of packets that have been transferred via the USB. This is always an integral number of packets if the
* transfer was halted before its normal completion.
*/
const uint16_t remain_packets = channel->hctsiz_bm.packet_count;
const uint16_t total_packets = cal_packet_count(edpt->buflen, channel->hcchar_bm.ep_size);
const uint16_t actual_bytes = (total_packets - remain_packets) * channel->hcchar_bm.ep_size;
const uint16_t remain_packets = hctsiz.packet_count;
const dwc2_channel_char_t hcchar = {.value = channel->hcchar};
const uint16_t total_packets = cal_packet_count(edpt->buflen, hcchar.ep_size);
const uint16_t actual_bytes = (total_packets - remain_packets) * hcchar.ep_size;
xfer->fifo_bytes = 0;
xfer->xferred_bytes += actual_bytes;
@@ -540,7 +551,7 @@ static bool channel_xfer_start(dwc2_regs_t* dwc2, uint8_t ch_id) {
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
dwc2_channel_char_t* hcchar_bm = &edpt->hcchar_bm;
dwc2_channel_t* channel = &dwc2->channel[ch_id];
bool const is_period = edpt_is_periodic(hcchar_bm->ep_type);
bool const is_period = channel_is_periodic(edpt->hcchar);
// clear previous state
xfer->fifo_bytes = 0;
@@ -553,13 +564,16 @@ static bool channel_xfer_start(dwc2_regs_t* dwc2, uint8_t ch_id) {
// hctsiz: zero length packet still count as 1
const uint16_t packet_count = cal_packet_count(edpt->buflen, hcchar_bm->ep_size);
uint32_t hctsiz = (edpt->next_pid << HCTSIZ_PID_Pos) | (packet_count << HCTSIZ_PKTCNT_Pos) | edpt->buflen;
if (edpt->do_ping && edpt->speed == TUSB_SPEED_HIGH &&
dwc2_channel_tsize_t hctsiz = {.value = 0};
hctsiz.pid = edpt->next_pid; // next PID is set in transfer complete interrupt
hctsiz.packet_count = packet_count;
hctsiz.xfer_size = edpt->buflen;
if (edpt->next_do_ping && edpt->speed == TUSB_SPEED_HIGH &&
edpt->next_pid != HCTSIZ_PID_SETUP && hcchar_bm->ep_dir == TUSB_DIR_OUT) {
hctsiz |= HCTSIZ_DOPING;
hctsiz.do_ping = 1;
}
channel->hctsiz = hctsiz;
edpt->do_ping = 0;
channel->hctsiz = hctsiz.value;
edpt->next_do_ping = 0;
// pre-calculate next PID based on packet count, adjusted in transfer complete interrupt if short packet
if (hcchar_bm->ep_num == 0) {
@@ -590,7 +604,7 @@ static bool channel_xfer_start(dwc2_regs_t* dwc2, uint8_t ch_id) {
hcintmsk |= HCINT_BABBLE_ERR | HCINT_DATATOGGLE_ERR | HCINT_ACK;
} else {
hcintmsk |= HCINT_NYET;
if (edpt->hcsplt_bm.split_en) {
if (edpt->hcsplt_bm.split_en || hctsiz.do_ping) {
hcintmsk |= HCINT_ACK;
}
}
@@ -699,18 +713,23 @@ bool hcd_edpt_clear_stall(uint8_t rhport, uint8_t dev_addr, uint8_t ep_addr) {
//--------------------------------------------------------------------
// HCD Event Handler
//--------------------------------------------------------------------
// retry an IN transfer, channel must be halted
static void channel_xfer_in_retry(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hcint) {
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
dwc2_channel_t* channel = &dwc2->channel[ch_id];
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
dwc2_channel_t* channel = &dwc2->channel[ch_id];
dwc2_channel_char_t hcchar = {.value = channel->hcchar};
if (edpt_is_periodic(channel->hcchar_bm.ep_type)){
if (channel_is_periodic(hcchar.value)){
const dwc2_channel_split_t hcsplt = {.value = channel->hcsplt};
// retry immediately for periodic split NYET if we haven't reach max retry
if (channel->hcsplt_bm.split_en && channel->hcsplt_bm.split_compl && (hcint & HCINT_NYET || xfer->halted_nyet)) {
if (hcsplt.split_en && hcsplt.split_compl && (hcint & HCINT_NYET || xfer->halted_nyet)) {
xfer->period_split_nyet_count++;
xfer->halted_nyet = 0;
if (xfer->period_split_nyet_count < HCD_XFER_PERIOD_SPLIT_NYET_MAX) {
channel->hcchar_bm.odd_frame = 1 - (dwc2->hfnum & 1); // transfer on next frame
hcchar.odd_frame = 1 - (dwc2->hfnum & 1); // transfer on next frame
channel->hcchar = hcchar.value;
channel_send_in_token(dwc2, channel);
return;
} else {
@@ -719,18 +738,24 @@ static void channel_xfer_in_retry(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hci
}
}
// for periodic, de-allocate channel, enable SOF set frame counter for later transfer
edpt->next_pid = channel->hctsiz_bm.pid; // save PID
edpt->uframe_countdown = edpt->uframe_interval;
dwc2->gintmsk |= GINTSTS_SOF;
if (hcint & HCINT_HALTED) {
const uint32_t ucount = (hprt_speed_get(dwc2) == TUSB_SPEED_HIGH ? 1 : 8);
if (edpt->uframe_interval == ucount) {
// retry on next frame if bInterval is 1
hcchar.odd_frame = 1 - (dwc2->hfnum & 1);
channel->hcchar = hcchar.value;
channel_send_in_token(dwc2, channel);
} else {
// otherwise, de-allocate channel, enable SOF set frame counter for later transfer
const dwc2_channel_tsize_t hctsiz = {.value = channel->hctsiz};
edpt->next_pid = hctsiz.pid; // save PID
edpt->uframe_countdown = edpt->uframe_interval - ucount;
// enable SOF interrupt if not already enabled
if (!(dwc2->gintmsk & GINTMSK_SOFM)) {
dwc2->gintsts = GINTSTS_SOF;
dwc2->gintmsk |= GINTMSK_SOFM;
}
// already halted, de-allocate channel (called from DMA isr)
channel_dealloc(dwc2, ch_id);
} else {
// disable channel first if not halted (called slave isr)
xfer->halted_sof_schedule = 1;
channel_disable(dwc2, channel);
}
} else {
// for control/bulk: retry immediately
@@ -761,13 +786,13 @@ static void handle_rxflvl_irq(uint8_t rhport) {
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
// Pop control word off FIFO
const dwc2_grxstsp_t grxstsp_bm = dwc2->grxstsp_bm;
const uint8_t ch_id = grxstsp_bm.ep_ch_num;
const dwc2_grxstsp_t grxstsp = {.value= dwc2->grxstsp};
const uint8_t ch_id = grxstsp.ep_ch_num;
switch (grxstsp_bm.packet_status) {
switch (grxstsp.packet_status) {
case GRXSTS_PKTSTS_RX_DATA: {
// In packet received, pop this entry --> ACK interrupt
const uint16_t byte_count = grxstsp_bm.byte_count;
const uint16_t byte_count = grxstsp.byte_count;
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
TU_ASSERT(xfer->ep_id < CFG_TUH_DWC2_ENDPOINT_MAX,);
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
@@ -801,25 +826,26 @@ static void handle_rxflvl_irq(uint8_t rhport) {
// return true if there is still pending data and need more ISR
static bool handle_txfifo_empty(dwc2_regs_t* dwc2, bool is_periodic) {
// Use period txsts for both p/np to get request queue space available (1-bit difference, it is small enough)
volatile dwc2_hptxsts_t* txsts_bm = (volatile dwc2_hptxsts_t*) (is_periodic ? &dwc2->hptxsts : &dwc2->hnptxsts);
const dwc2_hptxsts_t txsts = {.value = (is_periodic ? dwc2->hptxsts : dwc2->hnptxsts)};
const uint8_t max_channel = DWC2_CHANNEL_COUNT(dwc2);
const uint8_t max_channel = dwc2_channel_count(dwc2);
for (uint8_t ch_id = 0; ch_id < max_channel; ch_id++) {
dwc2_channel_t* channel = &dwc2->channel[ch_id];
const dwc2_channel_char_t hcchar = {.value = channel->hcchar};
// skip writing to FIFO if channel is expecting halted.
if (!(channel->hcintmsk & HCINT_HALTED) && (channel->hcchar_bm.ep_dir == TUSB_DIR_OUT)) {
if (!(channel->hcintmsk & HCINT_HALTED) && (hcchar.ep_dir == TUSB_DIR_OUT)) {
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
TU_ASSERT(xfer->ep_id < CFG_TUH_DWC2_ENDPOINT_MAX);
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
const uint16_t remain_packets = channel->hctsiz_bm.packet_count;
const dwc2_channel_tsize_t hctsiz = {.value = channel->hctsiz};
const uint16_t remain_packets = hctsiz.packet_count;
for (uint16_t i = 0; i < remain_packets; i++) {
const uint16_t remain_bytes = edpt->buflen - xfer->fifo_bytes;
const uint16_t xact_bytes = tu_min16(remain_bytes, channel->hcchar_bm.ep_size);
const uint16_t xact_bytes = tu_min16(remain_bytes, hcchar.ep_size);
// skip if there is not enough space in FIFO and RequestQueue.
// Packet's last word written to FIFO will trigger a request queue
if ((xact_bytes > (txsts_bm->fifo_available << 2)) || (txsts_bm->req_queue_available == 0)) {
if ((xact_bytes > (txsts.fifo_available << 2)) || (txsts.req_queue_available == 0)) {
return true;
}
@@ -836,23 +862,26 @@ static bool handle_channel_in_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t h
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
dwc2_channel_t* channel = &dwc2->channel[ch_id];
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
dwc2_channel_split_t hcsplt = {.value = channel->hcsplt};
const dwc2_channel_tsize_t hctsiz = {.value = channel->hctsiz};
bool is_done = false;
// if (channel->hcsplt_bm.split_en) {
// if (hcsplt.split_en) {
// if (edpt->hcchar_bm.ep_num == 1) {
// TU_LOG1("Frame %u, ch %u: ep %u, hcint 0x%04lX ", dwc2->hfnum_bm.num, ch_id, channel->hcchar_bm.ep_num, hcint);
// TU_LOG1("Frame %u, ch %u: ep %u, hcint 0x%04lX ", dwc2->hfnum_bm.num, ch_id, hcsplt.ep_num, hcint);
// print_hcint(hcint);
// }
if (hcint & HCINT_XFER_COMPLETE) {
if (edpt->hcchar_bm.ep_num != 0) {
edpt->next_pid = channel->hctsiz_bm.pid; // save pid (already toggled)
edpt->next_pid = hctsiz.pid; // save pid (already toggled)
}
const uint16_t remain_packets = channel->hctsiz_bm.packet_count;
if (channel->hcsplt_bm.split_en && remain_packets && xfer->fifo_bytes == edpt->hcchar_bm.ep_size) {
const uint16_t remain_packets = hctsiz.packet_count;
if (hcsplt.split_en && remain_packets && xfer->fifo_bytes == edpt->hcchar_bm.ep_size) {
// Split can only complete 1 transaction (up to 1 packet) at a time, schedule more
channel->hcsplt_bm.split_compl = 0;
hcsplt.split_compl = 0;
channel->hcsplt = hcsplt.value;
} else {
xfer->result = XFER_RESULT_SUCCESS;
}
@@ -871,43 +900,44 @@ static bool handle_channel_in_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t h
channel_disable(dwc2, channel);
} else if (hcint & HCINT_NYET) {
// restart complete split
channel->hcsplt_bm.split_compl = 1;
hcsplt.split_compl = 1;
channel->hcsplt = hcsplt.value;
xfer->halted_nyet = 1;
channel_disable(dwc2, channel);
} else if (hcint & HCINT_NAK) {
// NAK received, re-enable channel if request queue is available
if (channel->hcsplt_bm.split_en) {
channel->hcsplt_bm.split_compl = 0; // restart with start-split
// NAK received, disable channel to flush all posted request and try again
if (hcsplt.split_en) {
hcsplt.split_compl = 0; // restart with start-split
channel->hcsplt = hcsplt.value;
}
channel_disable(dwc2, channel);
} else if (hcint & HCINT_ACK) {
xfer->err_count = 0;
if (channel->hcsplt_bm.split_en) {
if (!channel->hcsplt_bm.split_compl) {
if (hcsplt.split_en) {
if (!hcsplt.split_compl) {
// start split is ACK --> do complete split
channel->hcintmsk |= HCINT_NYET;
channel->hcsplt_bm.split_compl = 1;
hcsplt.split_compl = 1;
channel->hcsplt = hcsplt.value;
channel_send_in_token(dwc2, channel);
} else {
// do nothing for complete split with DATA, this will trigger XferComplete and handled there
}
} else {
// ACK with data
const uint16_t remain_packets = channel->hctsiz_bm.packet_count;
const uint16_t remain_packets = hctsiz.packet_count;
if (remain_packets) {
// still more packet to receive, also reset to start split
channel->hcsplt_bm.split_compl = 0;
hcsplt.split_compl = 0;
channel->hcsplt = hcsplt.value;
channel_send_in_token(dwc2, channel);
}
}
} else if (hcint & HCINT_HALTED) {
channel->hcintmsk &= ~HCINT_HALTED;
if (xfer->halted_sof_schedule) {
// de-allocate channel but does not complete xfer, we schedule it in the SOF interrupt
channel_dealloc(dwc2, ch_id);
} else if (xfer->result != XFER_RESULT_INVALID) {
if (xfer->result != XFER_RESULT_INVALID) {
is_done = true;
} else if (xfer->err_count == HCD_XFER_ERROR_MAX) {
xfer->result = XFER_RESULT_FAILED;
@@ -927,23 +957,29 @@ static bool handle_channel_out_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
dwc2_channel_t* channel = &dwc2->channel[ch_id];
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
dwc2_channel_split_t hcsplt = {.value = channel->hcsplt};
bool is_done = false;
if (hcint & HCINT_XFER_COMPLETE) {
is_done = true;
xfer->result = XFER_RESULT_SUCCESS;
channel->hcintmsk &= ~HCINT_ACK;
if (hcint & HCINT_NYET) {
// complete transfer with NYET, do ping next time
edpt->next_do_ping = 1;
}
} else if (hcint & HCINT_STALL) {
xfer->result = XFER_RESULT_STALLED;
channel_disable(dwc2, channel);
} else if (hcint & HCINT_NYET) {
xfer->err_count = 0;
if (channel->hcsplt_bm.split_en) {
if (hcsplt.split_en) {
// retry complete split
channel->hcsplt_bm.split_compl = 1;
hcsplt.split_compl = 1;
channel->hcsplt = hcsplt.value;
channel->hcchar |= HCCHAR_CHENA;
} else {
edpt->do_ping = 1;
edpt->next_do_ping = 1;
channel_xfer_out_wrapup(dwc2, ch_id);
channel_disable(dwc2, channel);
}
@@ -956,7 +992,7 @@ static bool handle_channel_out_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t
channel->hcintmsk |= HCINT_ACK;
} else {
// NAK disable channel to flush all posted request and try again
edpt->do_ping = 1;
edpt->next_do_ping = 1;
xfer->err_count = 0;
}
} else if (hcint & HCINT_HALTED) {
@@ -973,9 +1009,17 @@ static bool handle_channel_out_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t
} else if (hcint & HCINT_ACK) {
xfer->err_count = 0;
channel->hcintmsk &= ~HCINT_ACK;
if (channel->hcsplt_bm.split_en && !channel->hcsplt_bm.split_compl) {
// start split is ACK --> do complete split
channel->hcsplt_bm.split_compl = 1;
if (hcsplt.split_en) {
if (!hcsplt.split_compl) {
// ACK for start split --> do complete split
hcsplt.split_compl = 1;
channel->hcsplt = hcsplt.value;
channel->hcchar |= HCCHAR_CHENA;
}
} else {
// ACK interrupt is only enabled for Split and PING
// ACK for PING, which mean device is ready to receive data
channel->hctsiz &= ~HCTSIZ_DOPING; // HC already cleared PING bit, but we clear anyway
channel->hcchar |= HCCHAR_CHENA;
}
}
@@ -994,6 +1038,9 @@ static bool handle_channel_in_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hci
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
dwc2_channel_t* channel = &dwc2->channel[ch_id];
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
dwc2_channel_char_t hcchar = {.value = channel->hcchar};
dwc2_channel_split_t hcsplt = {.value = channel->hcsplt};
const dwc2_channel_tsize_t hctsiz = {.value = channel->hctsiz};
bool is_done = false;
@@ -1001,8 +1048,8 @@ static bool handle_channel_in_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hci
if (hcint & HCINT_HALTED) {
if (hcint & (HCINT_XFER_COMPLETE | HCINT_STALL | HCINT_BABBLE_ERR)) {
const uint16_t remain_bytes = (uint16_t) channel->hctsiz_bm.xfer_size;
const uint16_t remain_packets = channel->hctsiz_bm.packet_count;
const uint16_t remain_bytes = (uint16_t) hctsiz.xfer_size;
const uint16_t remain_packets = hctsiz.packet_count;
const uint16_t actual_len = edpt->buflen - remain_bytes;
xfer->xferred_bytes += actual_len;
@@ -1012,13 +1059,14 @@ static bool handle_channel_in_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hci
xfer->result = XFER_RESULT_STALLED;
} else if (hcint & HCINT_BABBLE_ERR) {
xfer->result = XFER_RESULT_FAILED;
} else if (channel->hcsplt_bm.split_en && remain_packets && actual_len == edpt->hcchar_bm.ep_size) {
} else if (hcsplt.split_en && remain_packets && actual_len == hcchar.ep_size) {
// Split can only complete 1 transaction (up to 1 packet) at a time, schedule more
is_done = false;
edpt->buffer += actual_len;
edpt->buflen -= actual_len;
channel->hcsplt_bm.split_compl = 0;
hcsplt.split_compl = 0;
channel->hcsplt = hcsplt.value;
channel_xfer_in_retry(dwc2, ch_id, hcint);
} else {
xfer->result = XFER_RESULT_SUCCESS;
@@ -1033,33 +1081,38 @@ static bool handle_channel_in_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hci
xfer->result = XFER_RESULT_FAILED;
} else {
channel->hcintmsk |= HCINT_ACK | HCINT_NAK | HCINT_DATATOGGLE_ERR;
channel->hcsplt_bm.split_compl = 0;
hcsplt.split_compl = 0;
channel->hcsplt = hcsplt.value;
channel_xfer_in_retry(dwc2, ch_id, hcint);
}
} else if (hcint & HCINT_NYET) {
// Must handle nyet before nak or ack. Could get a nyet at the same time as either of those on a BULK/CONTROL
// OUT that started with a PING. The nyet takes precedence.
if (channel->hcsplt_bm.split_en) {
if (hcsplt.split_en) {
// split not yet mean hub has no data, retry complete split
channel->hcsplt_bm.split_compl = 1;
hcsplt.split_compl = 1;
channel->hcsplt = hcsplt.value;
channel_xfer_in_retry(dwc2, ch_id, hcint);
}
} else if (hcint & HCINT_ACK) {
xfer->err_count = 0;
channel->hcintmsk &= ~HCINT_ACK;
if (channel->hcsplt_bm.split_en) {
if (hcsplt.split_en) {
// start split is ACK --> do complete split
// TODO: for ISO must use xact_pos to plan complete split based on microframe (up to 187.5 bytes/uframe)
channel->hcsplt_bm.split_compl = 1;
if (edpt_is_periodic(channel->hcchar_bm.ep_type)) {
channel->hcchar_bm.odd_frame = 1 - (dwc2->hfnum & 1); // transfer on next frame
hcsplt.split_compl = 1;
channel->hcsplt = hcsplt.value;
if (channel_is_periodic(channel->hcchar)) {
hcchar.odd_frame = 1 - (dwc2->hfnum & 1); // transfer on next frame
channel->hcchar = hcchar.value;
}
channel_send_in_token(dwc2, channel);
}
} else if (hcint & (HCINT_NAK | HCINT_DATATOGGLE_ERR)) {
xfer->err_count = 0;
channel->hcintmsk &= ~(HCINT_NAK | HCINT_DATATOGGLE_ERR);
channel->hcsplt_bm.split_compl = 0; // restart with start-split
hcsplt.split_compl = 0; // restart with start-split
channel->hcsplt = hcsplt.value;
channel_xfer_in_retry(dwc2, ch_id, hcint);
} else if (hcint & HCINT_FARME_OVERRUN) {
// retry start-split in next binterval
@@ -1074,6 +1127,8 @@ static bool handle_channel_out_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hc
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
dwc2_channel_t* channel = &dwc2->channel[ch_id];
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
const dwc2_channel_char_t hcchar = {.value = channel->hcchar};
dwc2_channel_split_t hcsplt = {.value = channel->hcsplt};
bool is_done = false;
@@ -1109,16 +1164,18 @@ static bool handle_channel_out_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hc
}
}
} else if (hcint & HCINT_NYET) {
if (channel->hcsplt_bm.split_en && channel->hcsplt_bm.split_compl) {
if (hcsplt.split_en && hcsplt.split_compl) {
// split not yet mean hub has no data, retry complete split
channel->hcsplt_bm.split_compl = 1;
hcsplt.split_compl = 1;
channel->hcsplt = hcsplt.value;
channel->hcchar |= HCCHAR_CHENA;
}
} else if (hcint & HCINT_ACK) {
xfer->err_count = 0;
if (channel->hcsplt_bm.split_en && !channel->hcsplt_bm.split_compl) {
if (hcsplt.split_en && !hcsplt.split_compl) {
// start split is ACK --> do complete split
channel->hcsplt_bm.split_compl = 1;
hcsplt.split_compl = 1;
channel->hcsplt = hcsplt.value;
channel->hcchar |= HCCHAR_CHENA;
}
}
@@ -1134,14 +1191,14 @@ static bool handle_channel_out_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hc
static void handle_channel_irq(uint8_t rhport, bool in_isr) {
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
const bool is_dma = dma_host_enabled(dwc2);
const uint8_t max_channel = DWC2_CHANNEL_COUNT(dwc2);
const uint8_t max_channel = dwc2_channel_count(dwc2);
for (uint8_t ch_id = 0; ch_id < max_channel; ch_id++) {
if (tu_bit_test(dwc2->haint, ch_id)) {
dwc2_channel_t* channel = &dwc2->channel[ch_id];
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
TU_ASSERT(xfer->ep_id < CFG_TUH_DWC2_ENDPOINT_MAX,);
dwc2_channel_char_t hcchar_bm = channel->hcchar_bm;
dwc2_channel_char_t hcchar = {.value = channel->hcchar};
const uint32_t hcint = channel->hcint;
channel->hcint = hcint; // clear interrupt
@@ -1149,7 +1206,7 @@ static void handle_channel_irq(uint8_t rhport, bool in_isr) {
bool is_done = false;
if (is_dma) {
#if CFG_TUH_DWC2_DMA_ENABLE
if (hcchar_bm.ep_dir == TUSB_DIR_OUT) {
if (hcchar.ep_dir == TUSB_DIR_OUT) {
is_done = handle_channel_out_dma(dwc2, ch_id, hcint);
} else {
is_done = handle_channel_in_dma(dwc2, ch_id, hcint);
@@ -1161,7 +1218,7 @@ static void handle_channel_irq(uint8_t rhport, bool in_isr) {
#endif
} else {
#if CFG_TUH_DWC2_SLAVE_ENABLE
if (hcchar_bm.ep_dir == TUSB_DIR_OUT) {
if (hcchar.ep_dir == TUSB_DIR_OUT) {
is_done = handle_channel_out_slave(dwc2, ch_id, hcint);
} else {
is_done = handle_channel_in_slave(dwc2, ch_id, hcint);
@@ -1170,8 +1227,8 @@ static void handle_channel_irq(uint8_t rhport, bool in_isr) {
}
if (is_done) {
const uint8_t ep_addr = tu_edpt_addr(hcchar_bm.ep_num, hcchar_bm.ep_dir);
hcd_event_xfer_complete(hcchar_bm.dev_addr, ep_addr, xfer->xferred_bytes, xfer->result, in_isr);
const uint8_t ep_addr = tu_edpt_addr(hcchar.ep_num, hcchar.ep_dir);
hcd_event_xfer_complete(hcchar.dev_addr, ep_addr, xfer->xferred_bytes, (xfer_result_t)xfer->result, in_isr);
channel_dealloc(dwc2, ch_id);
}
}
@@ -1191,7 +1248,7 @@ static bool handle_sof_irq(uint8_t rhport, bool in_isr) {
for(uint8_t ep_id = 0; ep_id < CFG_TUH_DWC2_ENDPOINT_MAX; ep_id++) {
hcd_endpoint_t* edpt = &_hcd_data.edpt[ep_id];
if (edpt->hcchar_bm.enable && edpt_is_periodic(edpt->hcchar_bm.ep_type) && edpt->uframe_countdown > 0) {
if (edpt->hcchar_bm.enable && channel_is_periodic(edpt->hcchar) && edpt->uframe_countdown > 0) {
edpt->uframe_countdown -= tu_min32(ucount, edpt->uframe_countdown);
if (edpt->uframe_countdown == 0) {
if (!edpt_xfer_kickoff(dwc2, ep_id)) {
@@ -1210,10 +1267,10 @@ static bool handle_sof_irq(uint8_t rhport, bool in_isr) {
static void port0_enable(dwc2_regs_t* dwc2, tusb_speed_t speed) {
uint32_t hcfg = dwc2->hcfg & ~HCFG_FSLS_PHYCLK_SEL;
const dwc2_gusbcfg_t gusbcfg_bm = dwc2->gusbcfg_bm;
const dwc2_gusbcfg_t gusbcfg = {.value = dwc2->gusbcfg};
uint32_t phy_clock;
if (gusbcfg_bm.phy_sel) {
if (gusbcfg.phy_sel) {
phy_clock = 48; // dedicated FS is 48Mhz
if (speed == TUSB_SPEED_LOW) {
hcfg |= HCFG_FSLS_PHYCLK_SEL_6MHZ;
@@ -1221,11 +1278,11 @@ static void port0_enable(dwc2_regs_t* dwc2, tusb_speed_t speed) {
hcfg |= HCFG_FSLS_PHYCLK_SEL_48MHZ;
}
} else {
if (gusbcfg_bm.ulpi_utmi_sel) {
if (gusbcfg.ulpi_utmi_sel) {
phy_clock = 60; // ULPI 8-bit is 60Mhz
} else {
// UTMI+ 16-bit is 30Mhz, 8-bit is 60Mhz
phy_clock = gusbcfg_bm.phy_if16 ? 30 : 60;
phy_clock = gusbcfg.phy_if16 ? 30 : 60;
// Enable UTMI+ low power mode 48Mhz external clock if not highspeed
if (speed == TUSB_SPEED_HIGH) {
@@ -1242,9 +1299,9 @@ static void port0_enable(dwc2_regs_t* dwc2, tusb_speed_t speed) {
uint32_t hfir = dwc2->hfir & ~HFIR_FRIVL_Msk;
if (speed == TUSB_SPEED_HIGH) {
hfir |= 125*phy_clock;
hfir |= 125*phy_clock - 1; // The "- 1" is the correct value. The Synopsys databook was corrected in 3.30a
} else {
hfir |= 1000*phy_clock;
hfir |= 1000*phy_clock - 1;
}
dwc2->hfir = hfir;
@@ -1257,21 +1314,19 @@ static void port0_enable(dwc2_regs_t* dwc2, tusb_speed_t speed) {
*/
static void handle_hprt_irq(uint8_t rhport, bool in_isr) {
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
uint32_t hprt = dwc2->hprt & ~HPRT_W1_MASK;
const dwc2_hprt_t hprt_bm = dwc2->hprt_bm;
const dwc2_hprt_t hprt_bm = {.value = dwc2->hprt};
uint32_t hprt = hprt_bm.value & ~HPRT_W1_MASK;
if (dwc2->hprt & HPRT_CONN_DETECT) {
if (hprt_bm.conn_detected) {
// Port Connect Detect
hprt |= HPRT_CONN_DETECT;
if (hprt_bm.conn_status) {
hcd_event_device_attach(rhport, in_isr);
} else {
hcd_event_device_remove(rhport, in_isr);
}
}
if (dwc2->hprt & HPRT_ENABLE_CHANGE) {
if (hprt_bm.enable_change) {
// Port enable change
hprt |= HPRT_ENABLE_CHANGE;
@@ -1330,6 +1385,15 @@ void hcd_int_handler(uint8_t rhport, bool in_isr) {
handle_channel_irq(rhport, in_isr);
}
if (gintsts & GINTSTS_DISCINT) {
// Device disconnected
dwc2->gintsts = GINTSTS_DISCINT;
if (!(dwc2->hprt & HPRT_CONN_STATUS)) {
hcd_event_device_remove(rhport, in_isr);
}
}
#if CFG_TUH_DWC2_SLAVE_ENABLE
// RxFIFO non-empty interrupt handling
if (gintsts & GINTSTS_RXFLVL) {

View File

@@ -29,6 +29,7 @@
#if CFG_TUH_ENABLED && CFG_TUSB_MCU == OPT_MCU_NONE
#include "host/hcd.h"
#include "host/usbh.h"
//--------------------------------------------------------------------+
// Controller API
@@ -106,6 +107,11 @@ void hcd_device_close(uint8_t rhport, uint8_t dev_addr) {
// Open an endpoint
bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, tusb_desc_endpoint_t const * ep_desc) {
(void) rhport; (void) dev_addr; (void) ep_desc;
// NOTE: ep_desc is allocated on the stack when called from usbh_edpt_control_open()
// You need to copy the data into a local variable who maintains the state of the endpoint and transfer.
// Check _hcd_data in hcd_dwc2.c for example.
return false;
}

View File

@@ -35,7 +35,7 @@
#define TUSB_VERSION_REVISION 0
#define TUSB_VERSION_NUMBER (TUSB_VERSION_MAJOR * 10000 + TUSB_VERSION_MINOR * 100 + TUSB_VERSION_REVISION)
#define TUSB_VERSION_STRING TU_STRING(TUSB_VERSION_MAJOR) "." TU_STRING(TUSB_VERSION_MINOR) "." TU_STRING(TUSB_VERSION_REVISION)
#define TUSB_VERSION_STRING TU_XSTRING(TUSB_VERSION_MAJOR) "." TU_XSTRING(TUSB_VERSION_MINOR) "." TU_XSTRING(TUSB_VERSION_REVISION)
//--------------------------------------------------------------------+
// Supported MCUs