dwc2: refactor bitfields.

Signed-off-by: HiFiPhile <admin@hifiphile.com>
This commit is contained in:
HiFiPhile
2025-04-09 01:31:16 +02:00
parent 1be4171d2a
commit 084c0802c3
4 changed files with 643 additions and 606 deletions

View File

@@ -44,7 +44,7 @@
#if TU_CHECK_MCU(OPT_MCU_GD32VF103) #if TU_CHECK_MCU(OPT_MCU_GD32VF103)
#define DWC2_EP_COUNT(_dwc2) DWC2_EP_MAX #define DWC2_EP_COUNT(_dwc2) DWC2_EP_MAX
#else #else
#define DWC2_EP_COUNT(_dwc2) ((_dwc2)->ghwcfg2_bm.num_dev_ep + 1) #define DWC2_EP_COUNT(_dwc2) ({const dwc2_ghwcfg2_t ghwcfg2 = {.value = (_dwc2)->ghwcfg2}; ghwcfg2.num_dev_ep + 1;})
#endif #endif
//--------------------------------------------------------------------+ //--------------------------------------------------------------------+
@@ -102,7 +102,8 @@ bool dcd_dcache_clean_invalidate(const void* addr, uint32_t data_size) {
TU_ATTR_ALWAYS_INLINE static inline bool dma_device_enabled(const dwc2_regs_t* dwc2) { TU_ATTR_ALWAYS_INLINE static inline bool dma_device_enabled(const dwc2_regs_t* dwc2) {
(void) dwc2; (void) dwc2;
// Internal DMA only // Internal DMA only
return CFG_TUD_DWC2_DMA_ENABLE && dwc2->ghwcfg2_bm.arch == GHWCFG2_ARCH_INTERNAL_DMA; const dwc2_ghwcfg2_t ghwcfg2 = {.value = dwc2->ghwcfg2};
return CFG_TUD_DWC2_DMA_ENABLE && ghwcfg2.arch == GHWCFG2_ARCH_INTERNAL_DMA;
} }
static void dma_setup_prepare(uint8_t rhport) { static void dma_setup_prepare(uint8_t rhport) {
@@ -250,20 +251,15 @@ static void edpt_activate(uint8_t rhport, const tusb_desc_endpoint_t* p_endpoint
xfer->interval = p_endpoint_desc->bInterval; xfer->interval = p_endpoint_desc->bInterval;
// Endpoint control // Endpoint control
union { dwc2_depctl_t depctl = {.value = 0};
uint32_t value; depctl.mps = xfer->max_size;
dwc2_depctl_t bm; depctl.active = 1;
} depctl; depctl.type = p_endpoint_desc->bmAttributes.xfer;
depctl.value = 0;
depctl.bm.mps = xfer->max_size;
depctl.bm.active = 1;
depctl.bm.type = p_endpoint_desc->bmAttributes.xfer;
if (p_endpoint_desc->bmAttributes.xfer != TUSB_XFER_ISOCHRONOUS) { if (p_endpoint_desc->bmAttributes.xfer != TUSB_XFER_ISOCHRONOUS) {
depctl.bm.set_data0_iso_even = 1; depctl.set_data0_iso_even = 1;
} }
if (dir == TUSB_DIR_IN) { if (dir == TUSB_DIR_IN) {
depctl.bm.tx_fifo_num = epnum; depctl.tx_fifo_num = epnum;
} }
dwc2_dep_t* dep = &dwc2->ep[dir == TUSB_DIR_IN ? 0 : 1][epnum]; dwc2_dep_t* dep = &dwc2->ep[dir == TUSB_DIR_IN ? 0 : 1][epnum];
@@ -343,31 +339,22 @@ static void edpt_schedule_packets(uint8_t rhport, const uint8_t epnum, const uin
} }
// transfer size: A full OUT transfer (multiple packets, possibly) triggers XFRC. // transfer size: A full OUT transfer (multiple packets, possibly) triggers XFRC.
union { dwc2_ep_tsize_t deptsiz = {.value = 0};
uint32_t value; deptsiz.xfer_size = total_bytes;
dwc2_ep_tsize_t bm; deptsiz.packet_count = num_packets;
} deptsiz;
deptsiz.value = 0;
deptsiz.bm.xfer_size = total_bytes;
deptsiz.bm.packet_count = num_packets;
dep->tsiz = deptsiz.value; dep->tsiz = deptsiz.value;
// control // control
union { dwc2_depctl_t depctl = {.value = dep->ctl};
dwc2_depctl_t bm; depctl.clear_nak = 1;
uint32_t value; depctl.enable = 1;
} depctl; if (depctl.type == DEPCTL_EPTYPE_ISOCHRONOUS && xfer->interval == 1) {
depctl.value = dep->ctl; const dwc2_dsts_t dsts = {.value = dwc2->dsts};
const uint32_t odd_now = dsts.frame_number & 1u;
depctl.bm.clear_nak = 1;
depctl.bm.enable = 1;
if (depctl.bm.type == DEPCTL_EPTYPE_ISOCHRONOUS && xfer->interval == 1) {
const uint32_t odd_now = (dwc2->dsts_bm.frame_number & 1u);
if (odd_now) { if (odd_now) {
depctl.bm.set_data0_iso_even = 1; depctl.set_data0_iso_even = 1;
} else { } else {
depctl.bm.set_data1_iso_odd = 1; depctl.set_data1_iso_odd = 1;
} }
} }
@@ -410,7 +397,8 @@ bool dcd_init(uint8_t rhport, const tusb_rhport_init_t* rh_init) {
// XCVRDLY: transceiver delay between xcvr_sel and txvalid during device chirp is required // XCVRDLY: transceiver delay between xcvr_sel and txvalid during device chirp is required
// when using with some PHYs such as USB334x (USB3341, USB3343, USB3346, USB3347) // when using with some PHYs such as USB334x (USB3341, USB3343, USB3346, USB3347)
if (dwc2->ghwcfg2_bm.hs_phy_type == GHWCFG2_HSPHY_ULPI) { const dwc2_ghwcfg2_t ghwcfg2 = {.value = dwc2->ghwcfg2};
if (ghwcfg2.hs_phy_type == GHWCFG2_HSPHY_ULPI) {
dcfg |= DCFG_XCVRDLY; dcfg |= DCFG_XCVRDLY;
} }
} else { } else {
@@ -671,7 +659,9 @@ static void handle_bus_reset(uint8_t rhport) {
dfifo_device_init(rhport); dfifo_device_init(rhport);
// 5. Reset device address // 5. Reset device address
dwc2->dcfg_bm.address = 0; dwc2_dcfg_t dcfg = {.value = dwc2->dcfg};
dcfg.address = 0;
dwc2->dcfg = dcfg.value;
// Fixed both control EP0 size to 64 bytes // Fixed both control EP0 size to 64 bytes
dwc2->epin[0].ctl &= ~(0x03 << DIEPCTL_MPSIZ_Pos); dwc2->epin[0].ctl &= ~(0x03 << DIEPCTL_MPSIZ_Pos);
@@ -691,8 +681,9 @@ static void handle_bus_reset(uint8_t rhport) {
static void handle_enum_done(uint8_t rhport) { static void handle_enum_done(uint8_t rhport) {
dwc2_regs_t *dwc2 = DWC2_REG(rhport); dwc2_regs_t *dwc2 = DWC2_REG(rhport);
const dwc2_dsts_t dsts = {.value = dwc2->dsts};
tusb_speed_t speed; tusb_speed_t speed;
switch (dwc2->dsts_bm.enum_speed) { switch (dsts.enum_speed) {
case DCFG_SPEED_HIGH: case DCFG_SPEED_HIGH:
speed = TUSB_SPEED_HIGH; speed = TUSB_SPEED_HIGH;
break; break;
@@ -737,12 +728,12 @@ static void handle_rxflvl_irq(uint8_t rhport) {
const volatile uint32_t* rx_fifo = dwc2->fifo[0]; const volatile uint32_t* rx_fifo = dwc2->fifo[0];
// Pop control word off FIFO // Pop control word off FIFO
const dwc2_grxstsp_t grxstsp_bm = dwc2->grxstsp_bm; const dwc2_grxstsp_t grxstsp = {.value = dwc2->grxstsp};
const uint8_t epnum = grxstsp_bm.ep_ch_num; const uint8_t epnum = grxstsp.ep_ch_num;
dwc2_dep_t* epout = &dwc2->epout[epnum]; dwc2_dep_t* epout = &dwc2->epout[epnum];
switch (grxstsp_bm.packet_status) { switch (grxstsp.packet_status) {
case GRXSTS_PKTSTS_GLOBAL_OUT_NAK: case GRXSTS_PKTSTS_GLOBAL_OUT_NAK:
// Global OUT NAK: do nothing // Global OUT NAK: do nothing
break; break;
@@ -764,7 +755,7 @@ static void handle_rxflvl_irq(uint8_t rhport) {
case GRXSTS_PKTSTS_RX_DATA: { case GRXSTS_PKTSTS_RX_DATA: {
// Out packet received // Out packet received
const uint16_t byte_count = grxstsp_bm.byte_count; const uint16_t byte_count = grxstsp.byte_count;
xfer_ctl_t* xfer = XFER_CTL_BASE(epnum, TUSB_DIR_OUT); xfer_ctl_t* xfer = XFER_CTL_BASE(epnum, TUSB_DIR_OUT);
if (byte_count) { if (byte_count) {
@@ -778,7 +769,8 @@ static void handle_rxflvl_irq(uint8_t rhport) {
// short packet, minus remaining bytes (xfer_size) // short packet, minus remaining bytes (xfer_size)
if (byte_count < xfer->max_size) { if (byte_count < xfer->max_size) {
xfer->total_len -= epout->tsiz_bm.xfer_size; const dwc2_ep_tsize_t tsiz = {.value = epout->tsiz};
xfer->total_len -= tsiz.xfer_size;
if (epnum == 0) { if (epnum == 0) {
xfer->total_len -= _dcd_data.ep0_pending[TUSB_DIR_OUT]; xfer->total_len -= _dcd_data.ep0_pending[TUSB_DIR_OUT];
_dcd_data.ep0_pending[TUSB_DIR_OUT] = 0; _dcd_data.ep0_pending[TUSB_DIR_OUT] = 0;
@@ -840,11 +832,13 @@ static void handle_epin_slave(uint8_t rhport, uint8_t epnum, dwc2_diepint_t diep
// - 64 bytes or // - 64 bytes or
// - Half/Empty of TX FIFO size (configured by GAHBCFG.TXFELVL) // - Half/Empty of TX FIFO size (configured by GAHBCFG.TXFELVL)
if (diepint_bm.txfifo_empty && (dwc2->diepempmsk & (1 << epnum))) { if (diepint_bm.txfifo_empty && (dwc2->diepempmsk & (1 << epnum))) {
const uint16_t remain_packets = epin->tsiz_bm.packet_count; dwc2_ep_tsize_t tsiz = {.value = epin->tsiz};
const uint16_t remain_packets = tsiz.packet_count;
// Process every single packet (only whole packets can be written to fifo) // Process every single packet (only whole packets can be written to fifo)
for (uint16_t i = 0; i < remain_packets; i++) { for (uint16_t i = 0; i < remain_packets; i++) {
const uint16_t remain_bytes = (uint16_t) epin->tsiz_bm.xfer_size; tsiz.value = epin->tsiz;
const uint16_t remain_bytes = (uint16_t) tsiz.xfer_size;
const uint16_t xact_bytes = tu_min16(remain_bytes, xfer->max_size); const uint16_t xact_bytes = tu_min16(remain_bytes, xfer->max_size);
// Check if dtxfsts has enough space available // Check if dtxfsts has enough space available
@@ -863,7 +857,8 @@ static void handle_epin_slave(uint8_t rhport, uint8_t epnum, dwc2_diepint_t diep
} }
// Turn off TXFE if all bytes are written. // Turn off TXFE if all bytes are written.
if (epin->tsiz_bm.xfer_size == 0) { tsiz.value = epin->tsiz;
if (tsiz.xfer_size == 0) {
dwc2->diepempmsk &= ~(1 << epnum); dwc2->diepempmsk &= ~(1 << epnum);
} }
} }
@@ -894,7 +889,8 @@ static void handle_epout_dma(uint8_t rhport, uint8_t epnum, dwc2_doepint_t doepi
xfer_ctl_t* xfer = XFER_CTL_BASE(epnum, TUSB_DIR_OUT); xfer_ctl_t* xfer = XFER_CTL_BASE(epnum, TUSB_DIR_OUT);
// determine actual received bytes // determine actual received bytes
const uint16_t remain = epout->tsiz_bm.xfer_size; const dwc2_ep_tsize_t tsiz = {.value = epout->tsiz};
const uint16_t remain = tsiz.xfer_size;
xfer->total_len -= remain; xfer->total_len -= remain;
// this is ZLP, so prepare EP0 for next setup // this is ZLP, so prepare EP0 for next setup

View File

@@ -88,11 +88,13 @@ static void phy_fs_init(dwc2_regs_t* dwc2) {
static void phy_hs_init(dwc2_regs_t* dwc2) { static void phy_hs_init(dwc2_regs_t* dwc2) {
uint32_t gusbcfg = dwc2->gusbcfg; uint32_t gusbcfg = dwc2->gusbcfg;
const dwc2_ghwcfg2_t ghwcfg2 = {.value = dwc2->ghwcfg2};
const dwc2_ghwcfg4_t ghwcfg4 = {.value = dwc2->ghwcfg4};
// De-select FS PHY // De-select FS PHY
gusbcfg &= ~GUSBCFG_PHYSEL; gusbcfg &= ~GUSBCFG_PHYSEL;
if (dwc2->ghwcfg2_bm.hs_phy_type == GHWCFG2_HSPHY_ULPI) { if (ghwcfg2.hs_phy_type == GHWCFG2_HSPHY_ULPI) {
TU_LOG(DWC2_COMMON_DEBUG, "Highspeed ULPI PHY init\r\n"); TU_LOG(DWC2_COMMON_DEBUG, "Highspeed ULPI PHY init\r\n");
// Select ULPI PHY (external) // Select ULPI PHY (external)
@@ -116,7 +118,7 @@ static void phy_hs_init(dwc2_regs_t* dwc2) {
gusbcfg &= ~GUSBCFG_ULPI_UTMI_SEL; gusbcfg &= ~GUSBCFG_ULPI_UTMI_SEL;
// Set 16-bit interface if supported // Set 16-bit interface if supported
if (dwc2->ghwcfg4_bm.phy_data_width) { if (ghwcfg4.phy_data_width) {
gusbcfg |= GUSBCFG_PHYIF16; // 16 bit gusbcfg |= GUSBCFG_PHYIF16; // 16 bit
} else { } else {
gusbcfg &= ~GUSBCFG_PHYIF16; // 8 bit gusbcfg &= ~GUSBCFG_PHYIF16; // 8 bit
@@ -127,7 +129,7 @@ static void phy_hs_init(dwc2_regs_t* dwc2) {
dwc2->gusbcfg = gusbcfg; dwc2->gusbcfg = gusbcfg;
// mcu specific phy init // mcu specific phy init
dwc2_phy_init(dwc2, dwc2->ghwcfg2_bm.hs_phy_type); dwc2_phy_init(dwc2, ghwcfg2.hs_phy_type);
// Reset core after selecting PHY // Reset core after selecting PHY
reset_core(dwc2); reset_core(dwc2);
@@ -136,11 +138,11 @@ static void phy_hs_init(dwc2_regs_t* dwc2) {
// - 9 if using 8-bit PHY interface // - 9 if using 8-bit PHY interface
// - 5 if using 16-bit PHY interface // - 5 if using 16-bit PHY interface
gusbcfg &= ~GUSBCFG_TRDT_Msk; gusbcfg &= ~GUSBCFG_TRDT_Msk;
gusbcfg |= (dwc2->ghwcfg4_bm.phy_data_width ? 5u : 9u) << GUSBCFG_TRDT_Pos; gusbcfg |= (ghwcfg4.phy_data_width ? 5u : 9u) << GUSBCFG_TRDT_Pos;
dwc2->gusbcfg = gusbcfg; dwc2->gusbcfg = gusbcfg;
// MCU specific PHY update post reset // MCU specific PHY update post reset
dwc2_phy_update(dwc2, dwc2->ghwcfg2_bm.hs_phy_type); dwc2_phy_update(dwc2, ghwcfg2.hs_phy_type);
} }
static bool check_dwc2(dwc2_regs_t* dwc2) { static bool check_dwc2(dwc2_regs_t* dwc2) {
@@ -171,7 +173,7 @@ static bool check_dwc2(dwc2_regs_t* dwc2) {
//-------------------------------------------------------------------- //--------------------------------------------------------------------
bool dwc2_core_is_highspeed(dwc2_regs_t* dwc2, tusb_role_t role) { bool dwc2_core_is_highspeed(dwc2_regs_t* dwc2, tusb_role_t role) {
(void)dwc2; (void)dwc2;
const dwc2_ghwcfg2_t ghwcfg2 = {.value = dwc2->ghwcfg2};
#if CFG_TUD_ENABLED #if CFG_TUD_ENABLED
if (role == TUSB_ROLE_DEVICE && !TUD_OPT_HIGH_SPEED) { if (role == TUSB_ROLE_DEVICE && !TUD_OPT_HIGH_SPEED) {
return false; return false;
@@ -183,7 +185,7 @@ bool dwc2_core_is_highspeed(dwc2_regs_t* dwc2, tusb_role_t role) {
} }
#endif #endif
return dwc2->ghwcfg2_bm.hs_phy_type != GHWCFG2_HSPHY_NOT_SUPPORTED; return ghwcfg2.hs_phy_type != GHWCFG2_HSPHY_NOT_SUPPORTED;
} }
/* dwc2 has several PHYs option /* dwc2 has several PHYs option

File diff suppressed because it is too large Load Diff

View File

@@ -44,7 +44,7 @@
#endif #endif
#define DWC2_CHANNEL_COUNT_MAX 16 // absolute max channel count #define DWC2_CHANNEL_COUNT_MAX 16 // absolute max channel count
#define DWC2_CHANNEL_COUNT(_dwc2) tu_min8((_dwc2)->ghwcfg2_bm.num_host_ch + 1, DWC2_CHANNEL_COUNT_MAX) #define DWC2_CHANNEL_COUNT(_dwc2) ({const dwc2_ghwcfg2_t ghwcfg2 = {.value = (_dwc2)->ghwcfg2}; tu_min8(ghwcfg2.num_host_ch + 1, DWC2_CHANNEL_COUNT_MAX);})
TU_VERIFY_STATIC(CFG_TUH_DWC2_ENDPOINT_MAX <= 255, "currently only use 8-bit for index"); TU_VERIFY_STATIC(CFG_TUH_DWC2_ENDPOINT_MAX <= 255, "currently only use 8-bit for index");
@@ -118,7 +118,8 @@ hcd_data_t _hcd_data;
//-------------------------------------------------------------------- //--------------------------------------------------------------------
TU_ATTR_ALWAYS_INLINE static inline tusb_speed_t hprt_speed_get(dwc2_regs_t* dwc2) { TU_ATTR_ALWAYS_INLINE static inline tusb_speed_t hprt_speed_get(dwc2_regs_t* dwc2) {
tusb_speed_t speed; tusb_speed_t speed;
switch(dwc2->hprt_bm.speed) { const dwc2_hprt_t hprt = {.value = dwc2->hprt};
switch(hprt.speed) {
case HPRT_SPEED_HIGH: speed = TUSB_SPEED_HIGH; break; case HPRT_SPEED_HIGH: speed = TUSB_SPEED_HIGH; break;
case HPRT_SPEED_FULL: speed = TUSB_SPEED_FULL; break; case HPRT_SPEED_FULL: speed = TUSB_SPEED_FULL; break;
case HPRT_SPEED_LOW : speed = TUSB_SPEED_LOW ; break; case HPRT_SPEED_LOW : speed = TUSB_SPEED_LOW ; break;
@@ -133,7 +134,8 @@ TU_ATTR_ALWAYS_INLINE static inline tusb_speed_t hprt_speed_get(dwc2_regs_t* dwc
TU_ATTR_ALWAYS_INLINE static inline bool dma_host_enabled(const dwc2_regs_t* dwc2) { TU_ATTR_ALWAYS_INLINE static inline bool dma_host_enabled(const dwc2_regs_t* dwc2) {
(void) dwc2; (void) dwc2;
// Internal DMA only // Internal DMA only
return CFG_TUH_DWC2_DMA_ENABLE && dwc2->ghwcfg2_bm.arch == GHWCFG2_ARCH_INTERNAL_DMA; const dwc2_ghwcfg2_t ghwcfg2 = {.value = dwc2->ghwcfg2};
return CFG_TUH_DWC2_DMA_ENABLE && ghwcfg2.arch == GHWCFG2_ARCH_INTERNAL_DMA;
} }
#if CFG_TUH_MEM_DCACHE_ENABLE #if CFG_TUH_MEM_DCACHE_ENABLE
@@ -168,15 +170,18 @@ TU_ATTR_ALWAYS_INLINE static inline uint8_t channel_alloc(dwc2_regs_t* dwc2) {
} }
// Check if is periodic (interrupt/isochronous) // Check if is periodic (interrupt/isochronous)
TU_ATTR_ALWAYS_INLINE static inline bool edpt_is_periodic(uint8_t ep_type) { TU_ATTR_ALWAYS_INLINE static inline bool channel_is_periodic(uint32_t hcchar) {
return ep_type == HCCHAR_EPTYPE_INTERRUPT || ep_type == HCCHAR_EPTYPE_ISOCHRONOUS; const dwc2_channel_char_t hcchar_bm = {.value = hcchar};
return hcchar_bm.ep_type == HCCHAR_EPTYPE_INTERRUPT || hcchar_bm.ep_type == HCCHAR_EPTYPE_ISOCHRONOUS;
} }
TU_ATTR_ALWAYS_INLINE static inline uint8_t req_queue_avail(const dwc2_regs_t* dwc2, bool is_period) { TU_ATTR_ALWAYS_INLINE static inline uint8_t req_queue_avail(const dwc2_regs_t* dwc2, bool is_period) {
if (is_period) { if (is_period) {
return dwc2->hptxsts_bm.req_queue_available; const dwc2_hptxsts_t hptxsts = {.value = dwc2->hptxsts};
return hptxsts.req_queue_available;
} else { } else {
return dwc2->hnptxsts_bm.req_queue_available; const dwc2_hnptxsts_t hnptxsts = {.value = dwc2->hnptxsts};
return hnptxsts.req_queue_available;
} }
} }
@@ -188,7 +193,7 @@ TU_ATTR_ALWAYS_INLINE static inline void channel_dealloc(dwc2_regs_t* dwc2, uint
TU_ATTR_ALWAYS_INLINE static inline bool channel_disable(const dwc2_regs_t* dwc2, dwc2_channel_t* channel) { TU_ATTR_ALWAYS_INLINE static inline bool channel_disable(const dwc2_regs_t* dwc2, dwc2_channel_t* channel) {
// disable also require request queue // disable also require request queue
TU_ASSERT(req_queue_avail(dwc2, edpt_is_periodic(channel->hcchar_bm.ep_type))); TU_ASSERT(req_queue_avail(dwc2, channel_is_periodic(channel->hcchar)));
channel->hcintmsk |= HCINT_HALTED; channel->hcintmsk |= HCINT_HALTED;
channel->hcchar |= HCCHAR_CHDIS | HCCHAR_CHENA; // must set both CHDIS and CHENA channel->hcchar |= HCCHAR_CHDIS | HCCHAR_CHENA; // must set both CHDIS and CHENA
return true; return true;
@@ -196,7 +201,7 @@ TU_ATTR_ALWAYS_INLINE static inline bool channel_disable(const dwc2_regs_t* dwc2
// attempt to send IN token to receive data // attempt to send IN token to receive data
TU_ATTR_ALWAYS_INLINE static inline bool channel_send_in_token(const dwc2_regs_t* dwc2, dwc2_channel_t* channel) { TU_ATTR_ALWAYS_INLINE static inline bool channel_send_in_token(const dwc2_regs_t* dwc2, dwc2_channel_t* channel) {
TU_ASSERT(req_queue_avail(dwc2, edpt_is_periodic(channel->hcchar_bm.ep_type))); TU_ASSERT(req_queue_avail(dwc2, channel_is_periodic(channel->hcchar)));
channel->hcchar |= HCCHAR_CHENA; channel->hcchar |= HCCHAR_CHENA;
return true; return true;
} }
@@ -206,8 +211,8 @@ TU_ATTR_ALWAYS_INLINE static inline uint8_t channel_find_enabled(dwc2_regs_t* dw
const uint8_t max_channel = DWC2_CHANNEL_COUNT(dwc2); const uint8_t max_channel = DWC2_CHANNEL_COUNT(dwc2);
for (uint8_t ch_id = 0; ch_id < max_channel; ch_id++) { for (uint8_t ch_id = 0; ch_id < max_channel; ch_id++) {
if (_hcd_data.xfer[ch_id].allocated) { if (_hcd_data.xfer[ch_id].allocated) {
const dwc2_channel_char_t hcchar_bm = dwc2->channel[ch_id].hcchar_bm; const dwc2_channel_char_t hcchar = {.value = dwc2->channel[ch_id].hcchar};
if (hcchar_bm.dev_addr == dev_addr && hcchar_bm.ep_num == ep_num && (ep_num == 0 || hcchar_bm.ep_dir == ep_dir)) { if (hcchar.dev_addr == dev_addr && hcchar.ep_num == ep_num && (ep_num == 0 || hcchar.ep_dir == ep_dir)) {
return ch_id; return ch_id;
} }
} }
@@ -304,12 +309,13 @@ TU_ATTR_ALWAYS_INLINE static inline uint8_t cal_next_pid(uint8_t pid, uint8_t pa
static void dfifo_host_init(uint8_t rhport) { static void dfifo_host_init(uint8_t rhport) {
const dwc2_controller_t* dwc2_controller = &_dwc2_controller[rhport]; const dwc2_controller_t* dwc2_controller = &_dwc2_controller[rhport];
dwc2_regs_t* dwc2 = DWC2_REG(rhport); dwc2_regs_t* dwc2 = DWC2_REG(rhport);
const dwc2_ghwcfg2_t ghwcfg2 = {.value = dwc2->ghwcfg2};
// Scatter/Gather DMA mode is not yet supported. Buffer DMA only need 1 words per channel // Scatter/Gather DMA mode is not yet supported. Buffer DMA only need 1 words per channel
const bool is_dma = dma_host_enabled(dwc2); const bool is_dma = dma_host_enabled(dwc2);
uint16_t dfifo_top = dwc2_controller->ep_fifo_size/4; uint16_t dfifo_top = dwc2_controller->ep_fifo_size/4;
if (is_dma) { if (is_dma) {
dfifo_top -= dwc2->ghwcfg2_bm.num_host_ch; dfifo_top -= ghwcfg2.num_host_ch;
} }
// fixed allocation for now, improve later: // fixed allocation for now, improve later:
@@ -319,7 +325,7 @@ static void dfifo_host_init(uint8_t rhport) {
uint32_t ptx_largest = is_highspeed ? TUSB_EPSIZE_ISO_HS_MAX/4 : 256/4; uint32_t ptx_largest = is_highspeed ? TUSB_EPSIZE_ISO_HS_MAX/4 : 256/4;
uint16_t nptxfsiz = 2 * nptx_largest; uint16_t nptxfsiz = 2 * nptx_largest;
uint16_t rxfsiz = 2 * (ptx_largest + 2) + dwc2->ghwcfg2_bm.num_host_ch; uint16_t rxfsiz = 2 * (ptx_largest + 2) + ghwcfg2.num_host_ch;
TU_ASSERT(dfifo_top >= (nptxfsiz + rxfsiz),); TU_ASSERT(dfifo_top >= (nptxfsiz + rxfsiz),);
uint16_t ptxfsiz = dfifo_top - (nptxfsiz + rxfsiz); uint16_t ptxfsiz = dfifo_top - (nptxfsiz + rxfsiz);
@@ -509,10 +515,11 @@ bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, const tusb_desc_endpoint_t*
// clean up channel after part of transfer is done but the whole urb is not complete // clean up channel after part of transfer is done but the whole urb is not complete
static void channel_xfer_out_wrapup(dwc2_regs_t* dwc2, uint8_t ch_id) { static void channel_xfer_out_wrapup(dwc2_regs_t* dwc2, uint8_t ch_id) {
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id]; hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
dwc2_channel_t* channel = &dwc2->channel[ch_id]; const dwc2_channel_t* channel = &dwc2->channel[ch_id];
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id]; hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
edpt->next_pid = channel->hctsiz_bm.pid; // save PID const dwc2_channel_tsize_t hctsiz = {.value = channel->hctsiz};
edpt->next_pid = hctsiz.pid; // save PID
/* Since hctsiz.xfersize field reflects the number of bytes transferred via the AHB, not the USB) /* Since hctsiz.xfersize field reflects the number of bytes transferred via the AHB, not the USB)
* For IN: we can use hctsiz.xfersize as remaining bytes. * For IN: we can use hctsiz.xfersize as remaining bytes.
@@ -520,9 +527,10 @@ static void channel_xfer_out_wrapup(dwc2_regs_t* dwc2, uint8_t ch_id) {
* number of packets that have been transferred via the USB. This is always an integral number of packets if the * number of packets that have been transferred via the USB. This is always an integral number of packets if the
* transfer was halted before its normal completion. * transfer was halted before its normal completion.
*/ */
const uint16_t remain_packets = channel->hctsiz_bm.packet_count; const uint16_t remain_packets = hctsiz.packet_count;
const uint16_t total_packets = cal_packet_count(edpt->buflen, channel->hcchar_bm.ep_size); const dwc2_channel_char_t hcchar = {.value = channel->hcchar};
const uint16_t actual_bytes = (total_packets - remain_packets) * channel->hcchar_bm.ep_size; const uint16_t total_packets = cal_packet_count(edpt->buflen, hcchar.ep_size);
const uint16_t actual_bytes = (total_packets - remain_packets) * hcchar.ep_size;
xfer->fifo_bytes = 0; xfer->fifo_bytes = 0;
xfer->xferred_bytes += actual_bytes; xfer->xferred_bytes += actual_bytes;
@@ -535,7 +543,7 @@ static bool channel_xfer_start(dwc2_regs_t* dwc2, uint8_t ch_id) {
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id]; hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
dwc2_channel_char_t* hcchar_bm = &edpt->hcchar_bm; dwc2_channel_char_t* hcchar_bm = &edpt->hcchar_bm;
dwc2_channel_t* channel = &dwc2->channel[ch_id]; dwc2_channel_t* channel = &dwc2->channel[ch_id];
bool const is_period = edpt_is_periodic(hcchar_bm->ep_type); bool const is_period = channel_is_periodic(hcchar_bm->ep_type);
// clear previous state // clear previous state
xfer->fifo_bytes = 0; xfer->fifo_bytes = 0;
@@ -548,12 +556,15 @@ static bool channel_xfer_start(dwc2_regs_t* dwc2, uint8_t ch_id) {
// hctsiz: zero length packet still count as 1 // hctsiz: zero length packet still count as 1
const uint16_t packet_count = cal_packet_count(edpt->buflen, hcchar_bm->ep_size); const uint16_t packet_count = cal_packet_count(edpt->buflen, hcchar_bm->ep_size);
uint32_t hctsiz = (edpt->next_pid << HCTSIZ_PID_Pos) | (packet_count << HCTSIZ_PKTCNT_Pos) | edpt->buflen; dwc2_channel_tsize_t hctsiz = {.value = 0};
hctsiz.pid = edpt->next_pid; // next PID is set in transfer complete interrupt
hctsiz.packet_count = packet_count;
hctsiz.xfer_size = edpt->buflen;
if (edpt->do_ping && edpt->speed == TUSB_SPEED_HIGH && if (edpt->do_ping && edpt->speed == TUSB_SPEED_HIGH &&
edpt->next_pid != HCTSIZ_PID_SETUP && hcchar_bm->ep_dir == TUSB_DIR_OUT) { edpt->next_pid != HCTSIZ_PID_SETUP && hcchar_bm->ep_dir == TUSB_DIR_OUT) {
hctsiz |= HCTSIZ_DOPING; hctsiz.do_ping = 1;
} }
channel->hctsiz = hctsiz; channel->hctsiz = hctsiz.value;
edpt->do_ping = 0; edpt->do_ping = 0;
// pre-calculate next PID based on packet count, adjusted in transfer complete interrupt if short packet // pre-calculate next PID based on packet count, adjusted in transfer complete interrupt if short packet
@@ -699,13 +710,16 @@ static void channel_xfer_in_retry(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hci
dwc2_channel_t* channel = &dwc2->channel[ch_id]; dwc2_channel_t* channel = &dwc2->channel[ch_id];
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id]; hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
if (edpt_is_periodic(channel->hcchar_bm.ep_type)){ if (channel_is_periodic(channel->hcchar)){
const dwc2_channel_split_t hcsplt = {.value = channel->hcsplt};
// retry immediately for periodic split NYET if we haven't reach max retry // retry immediately for periodic split NYET if we haven't reach max retry
if (channel->hcsplt_bm.split_en && channel->hcsplt_bm.split_compl && (hcint & HCINT_NYET || xfer->halted_nyet)) { if (hcsplt.split_en && hcsplt.split_compl && (hcint & HCINT_NYET || xfer->halted_nyet)) {
xfer->period_split_nyet_count++; xfer->period_split_nyet_count++;
xfer->halted_nyet = 0; xfer->halted_nyet = 0;
if (xfer->period_split_nyet_count < HCD_XFER_PERIOD_SPLIT_NYET_MAX) { if (xfer->period_split_nyet_count < HCD_XFER_PERIOD_SPLIT_NYET_MAX) {
channel->hcchar_bm.odd_frame = 1 - (dwc2->hfnum & 1); // transfer on next frame dwc2_channel_char_t hcchar = {.value = channel->hcchar};
hcchar.odd_frame = 1 - (dwc2->hfnum & 1); // transfer on next frame
channel->hcchar = hcchar.value;
channel_send_in_token(dwc2, channel); channel_send_in_token(dwc2, channel);
return; return;
} else { } else {
@@ -715,7 +729,8 @@ static void channel_xfer_in_retry(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hci
} }
// for periodic, de-allocate channel, enable SOF set frame counter for later transfer // for periodic, de-allocate channel, enable SOF set frame counter for later transfer
edpt->next_pid = channel->hctsiz_bm.pid; // save PID const dwc2_channel_tsize_t hctsiz = {.value = channel->hctsiz};
edpt->next_pid = hctsiz.pid; // save PID
edpt->uframe_countdown = edpt->uframe_interval; edpt->uframe_countdown = edpt->uframe_interval;
dwc2->gintmsk |= GINTSTS_SOF; dwc2->gintmsk |= GINTSTS_SOF;
@@ -756,13 +771,13 @@ static void handle_rxflvl_irq(uint8_t rhport) {
dwc2_regs_t* dwc2 = DWC2_REG(rhport); dwc2_regs_t* dwc2 = DWC2_REG(rhport);
// Pop control word off FIFO // Pop control word off FIFO
const dwc2_grxstsp_t grxstsp_bm = dwc2->grxstsp_bm; const dwc2_grxstsp_t grxstsp = {.value= dwc2->grxstsp};
const uint8_t ch_id = grxstsp_bm.ep_ch_num; const uint8_t ch_id = grxstsp.ep_ch_num;
switch (grxstsp_bm.packet_status) { switch (grxstsp.packet_status) {
case GRXSTS_PKTSTS_RX_DATA: { case GRXSTS_PKTSTS_RX_DATA: {
// In packet received, pop this entry --> ACK interrupt // In packet received, pop this entry --> ACK interrupt
const uint16_t byte_count = grxstsp_bm.byte_count; const uint16_t byte_count = grxstsp.byte_count;
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id]; hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
TU_ASSERT(xfer->ep_id < CFG_TUH_DWC2_ENDPOINT_MAX,); TU_ASSERT(xfer->ep_id < CFG_TUH_DWC2_ENDPOINT_MAX,);
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id]; hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
@@ -796,25 +811,26 @@ static void handle_rxflvl_irq(uint8_t rhport) {
// return true if there is still pending data and need more ISR // return true if there is still pending data and need more ISR
static bool handle_txfifo_empty(dwc2_regs_t* dwc2, bool is_periodic) { static bool handle_txfifo_empty(dwc2_regs_t* dwc2, bool is_periodic) {
// Use period txsts for both p/np to get request queue space available (1-bit difference, it is small enough) // Use period txsts for both p/np to get request queue space available (1-bit difference, it is small enough)
volatile dwc2_hptxsts_t* txsts_bm = (volatile dwc2_hptxsts_t*) (is_periodic ? &dwc2->hptxsts : &dwc2->hnptxsts); const dwc2_hptxsts_t txsts = {.value = (is_periodic ? dwc2->hptxsts : dwc2->hnptxsts)};
const uint8_t max_channel = DWC2_CHANNEL_COUNT(dwc2); const uint8_t max_channel = DWC2_CHANNEL_COUNT(dwc2);
for (uint8_t ch_id = 0; ch_id < max_channel; ch_id++) { for (uint8_t ch_id = 0; ch_id < max_channel; ch_id++) {
dwc2_channel_t* channel = &dwc2->channel[ch_id]; dwc2_channel_t* channel = &dwc2->channel[ch_id];
const dwc2_channel_char_t hcchar = {.value = channel->hcchar};
// skip writing to FIFO if channel is expecting halted. // skip writing to FIFO if channel is expecting halted.
if (!(channel->hcintmsk & HCINT_HALTED) && (channel->hcchar_bm.ep_dir == TUSB_DIR_OUT)) { if (!(channel->hcintmsk & HCINT_HALTED) && (hcchar.ep_dir == TUSB_DIR_OUT)) {
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id]; hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
TU_ASSERT(xfer->ep_id < CFG_TUH_DWC2_ENDPOINT_MAX); TU_ASSERT(xfer->ep_id < CFG_TUH_DWC2_ENDPOINT_MAX);
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id]; hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
const dwc2_channel_tsize_t hctsiz = {.value = channel->hctsiz};
const uint16_t remain_packets = channel->hctsiz_bm.packet_count; const uint16_t remain_packets = hctsiz.packet_count;
for (uint16_t i = 0; i < remain_packets; i++) { for (uint16_t i = 0; i < remain_packets; i++) {
const uint16_t remain_bytes = edpt->buflen - xfer->fifo_bytes; const uint16_t remain_bytes = edpt->buflen - xfer->fifo_bytes;
const uint16_t xact_bytes = tu_min16(remain_bytes, channel->hcchar_bm.ep_size); const uint16_t xact_bytes = tu_min16(remain_bytes, hcchar.ep_size);
// skip if there is not enough space in FIFO and RequestQueue. // skip if there is not enough space in FIFO and RequestQueue.
// Packet's last word written to FIFO will trigger a request queue // Packet's last word written to FIFO will trigger a request queue
if ((xact_bytes > (txsts_bm->fifo_available << 2)) || (txsts_bm->req_queue_available == 0)) { if ((xact_bytes > (txsts.fifo_available << 2)) || (txsts.req_queue_available == 0)) {
return true; return true;
} }
@@ -831,23 +847,27 @@ static bool handle_channel_in_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t h
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id]; hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
dwc2_channel_t* channel = &dwc2->channel[ch_id]; dwc2_channel_t* channel = &dwc2->channel[ch_id];
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id]; hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
const dwc2_channel_char_t hcchar = {.value = channel->hcchar};
dwc2_channel_split_t hcsplt = {.value = channel->hcsplt};
const dwc2_channel_tsize_t hctsiz = {.value = channel->hctsiz};
bool is_done = false; bool is_done = false;
// if (channel->hcsplt_bm.split_en) { // if (hcsplt.split_en) {
// if (edpt->hcchar_bm.ep_num == 1) { // if (edpt->hcchar_bm.ep_num == 1) {
// TU_LOG1("Frame %u, ch %u: ep %u, hcint 0x%04lX ", dwc2->hfnum_bm.num, ch_id, channel->hcchar_bm.ep_num, hcint); // TU_LOG1("Frame %u, ch %u: ep %u, hcint 0x%04lX ", dwc2->hfnum_bm.num, ch_id, hcsplt.ep_num, hcint);
// print_hcint(hcint); // print_hcint(hcint);
// } // }
if (hcint & HCINT_XFER_COMPLETE) { if (hcint & HCINT_XFER_COMPLETE) {
if (edpt->hcchar_bm.ep_num != 0) { if (edpt->hcchar_bm.ep_num != 0) {
edpt->next_pid = channel->hctsiz_bm.pid; // save pid (already toggled) edpt->next_pid = hctsiz.pid; // save pid (already toggled)
} }
const uint16_t remain_packets = channel->hctsiz_bm.packet_count; const uint16_t remain_packets = hctsiz.packet_count;
if (channel->hcsplt_bm.split_en && remain_packets && xfer->fifo_bytes == edpt->hcchar_bm.ep_size) { if (hcsplt.split_en && remain_packets && xfer->fifo_bytes == edpt->hcchar_bm.ep_size) {
// Split can only complete 1 transaction (up to 1 packet) at a time, schedule more // Split can only complete 1 transaction (up to 1 packet) at a time, schedule more
channel->hcsplt_bm.split_compl = 0; hcsplt.split_compl = 0;
channel->hcsplt = hcsplt.value;
} else { } else {
xfer->result = XFER_RESULT_SUCCESS; xfer->result = XFER_RESULT_SUCCESS;
} }
@@ -866,34 +886,38 @@ static bool handle_channel_in_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t h
channel_disable(dwc2, channel); channel_disable(dwc2, channel);
} else if (hcint & HCINT_NYET) { } else if (hcint & HCINT_NYET) {
// restart complete split // restart complete split
channel->hcsplt_bm.split_compl = 1; hcsplt.split_compl = 1;
channel->hcsplt = hcsplt.value;
xfer->halted_nyet = 1; xfer->halted_nyet = 1;
channel_disable(dwc2, channel); channel_disable(dwc2, channel);
} else if (hcint & HCINT_NAK) { } else if (hcint & HCINT_NAK) {
// NAK received, re-enable channel if request queue is available // NAK received, re-enable channel if request queue is available
if (channel->hcsplt_bm.split_en) { if (hcsplt.split_en) {
channel->hcsplt_bm.split_compl = 0; // restart with start-split hcsplt.split_compl = 0; // restart with start-split
channel->hcsplt = hcsplt.value;
} }
channel_disable(dwc2, channel); channel_disable(dwc2, channel);
} else if (hcint & HCINT_ACK) { } else if (hcint & HCINT_ACK) {
xfer->err_count = 0; xfer->err_count = 0;
if (channel->hcsplt_bm.split_en) { if (hcsplt.split_en) {
if (!channel->hcsplt_bm.split_compl) { if (!hcsplt.split_compl) {
// start split is ACK --> do complete split // start split is ACK --> do complete split
channel->hcintmsk |= HCINT_NYET; channel->hcintmsk |= HCINT_NYET;
channel->hcsplt_bm.split_compl = 1; hcsplt.split_compl = 1;
channel->hcsplt = hcsplt.value;
channel_send_in_token(dwc2, channel); channel_send_in_token(dwc2, channel);
} else { } else {
// do nothing for complete split with DATA, this will trigger XferComplete and handled there // do nothing for complete split with DATA, this will trigger XferComplete and handled there
} }
} else { } else {
// ACK with data // ACK with data
const uint16_t remain_packets = channel->hctsiz_bm.packet_count; const uint16_t remain_packets = hctsiz.packet_count;
if (remain_packets) { if (remain_packets) {
// still more packet to receive, also reset to start split // still more packet to receive, also reset to start split
channel->hcsplt_bm.split_compl = 0; hcsplt.split_compl = 0;
channel->hcsplt = hcsplt.value;
channel_send_in_token(dwc2, channel); channel_send_in_token(dwc2, channel);
} }
} }
@@ -922,6 +946,7 @@ static bool handle_channel_out_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id]; hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
dwc2_channel_t* channel = &dwc2->channel[ch_id]; dwc2_channel_t* channel = &dwc2->channel[ch_id];
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id]; hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
dwc2_channel_split_t hcsplt = {.value = channel->hcsplt};
bool is_done = false; bool is_done = false;
if (hcint & HCINT_XFER_COMPLETE) { if (hcint & HCINT_XFER_COMPLETE) {
@@ -933,9 +958,10 @@ static bool handle_channel_out_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t
channel_disable(dwc2, channel); channel_disable(dwc2, channel);
} else if (hcint & HCINT_NYET) { } else if (hcint & HCINT_NYET) {
xfer->err_count = 0; xfer->err_count = 0;
if (channel->hcsplt_bm.split_en) { if (hcsplt.split_en) {
// retry complete split // retry complete split
channel->hcsplt_bm.split_compl = 1; hcsplt.split_compl = 1;
channel->hcsplt = hcsplt.value;
channel->hcchar |= HCCHAR_CHENA; channel->hcchar |= HCCHAR_CHENA;
} else { } else {
edpt->do_ping = 1; edpt->do_ping = 1;
@@ -968,9 +994,10 @@ static bool handle_channel_out_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t
} else if (hcint & HCINT_ACK) { } else if (hcint & HCINT_ACK) {
xfer->err_count = 0; xfer->err_count = 0;
channel->hcintmsk &= ~HCINT_ACK; channel->hcintmsk &= ~HCINT_ACK;
if (channel->hcsplt_bm.split_en && !channel->hcsplt_bm.split_compl) { if (hcsplt.split_en && !hcsplt.split_compl) {
// start split is ACK --> do complete split // start split is ACK --> do complete split
channel->hcsplt_bm.split_compl = 1; hcsplt.split_compl = 1;
channel->hcsplt = hcsplt.value;
channel->hcchar |= HCCHAR_CHENA; channel->hcchar |= HCCHAR_CHENA;
} }
} }
@@ -989,6 +1016,9 @@ static bool handle_channel_in_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hci
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id]; hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
dwc2_channel_t* channel = &dwc2->channel[ch_id]; dwc2_channel_t* channel = &dwc2->channel[ch_id];
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id]; hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
dwc2_channel_char_t hcchar = {.value = channel->hcchar};
dwc2_channel_split_t hcsplt = {.value = channel->hcsplt};
const dwc2_channel_tsize_t hctsiz = {.value = channel->hctsiz};
bool is_done = false; bool is_done = false;
@@ -996,8 +1026,8 @@ static bool handle_channel_in_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hci
if (hcint & HCINT_HALTED) { if (hcint & HCINT_HALTED) {
if (hcint & (HCINT_XFER_COMPLETE | HCINT_STALL | HCINT_BABBLE_ERR)) { if (hcint & (HCINT_XFER_COMPLETE | HCINT_STALL | HCINT_BABBLE_ERR)) {
const uint16_t remain_bytes = (uint16_t) channel->hctsiz_bm.xfer_size; const uint16_t remain_bytes = (uint16_t) hctsiz.xfer_size;
const uint16_t remain_packets = channel->hctsiz_bm.packet_count; const uint16_t remain_packets = hctsiz.packet_count;
const uint16_t actual_len = edpt->buflen - remain_bytes; const uint16_t actual_len = edpt->buflen - remain_bytes;
xfer->xferred_bytes += actual_len; xfer->xferred_bytes += actual_len;
@@ -1007,13 +1037,14 @@ static bool handle_channel_in_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hci
xfer->result = XFER_RESULT_STALLED; xfer->result = XFER_RESULT_STALLED;
} else if (hcint & HCINT_BABBLE_ERR) { } else if (hcint & HCINT_BABBLE_ERR) {
xfer->result = XFER_RESULT_FAILED; xfer->result = XFER_RESULT_FAILED;
} else if (channel->hcsplt_bm.split_en && remain_packets && actual_len == edpt->hcchar_bm.ep_size) { } else if (hcsplt.split_en && remain_packets && actual_len == hcchar.ep_size) {
// Split can only complete 1 transaction (up to 1 packet) at a time, schedule more // Split can only complete 1 transaction (up to 1 packet) at a time, schedule more
is_done = false; is_done = false;
edpt->buffer += actual_len; edpt->buffer += actual_len;
edpt->buflen -= actual_len; edpt->buflen -= actual_len;
channel->hcsplt_bm.split_compl = 0; hcsplt.split_compl = 0;
channel->hcsplt = hcsplt.value;
channel_xfer_in_retry(dwc2, ch_id, hcint); channel_xfer_in_retry(dwc2, ch_id, hcint);
} else { } else {
xfer->result = XFER_RESULT_SUCCESS; xfer->result = XFER_RESULT_SUCCESS;
@@ -1028,33 +1059,38 @@ static bool handle_channel_in_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hci
xfer->result = XFER_RESULT_FAILED; xfer->result = XFER_RESULT_FAILED;
} else { } else {
channel->hcintmsk |= HCINT_ACK | HCINT_NAK | HCINT_DATATOGGLE_ERR; channel->hcintmsk |= HCINT_ACK | HCINT_NAK | HCINT_DATATOGGLE_ERR;
channel->hcsplt_bm.split_compl = 0; hcsplt.split_compl = 0;
channel->hcsplt = hcsplt.value;
channel_xfer_in_retry(dwc2, ch_id, hcint); channel_xfer_in_retry(dwc2, ch_id, hcint);
} }
} else if (hcint & HCINT_NYET) { } else if (hcint & HCINT_NYET) {
// Must handle nyet before nak or ack. Could get a nyet at the same time as either of those on a BULK/CONTROL // Must handle nyet before nak or ack. Could get a nyet at the same time as either of those on a BULK/CONTROL
// OUT that started with a PING. The nyet takes precedence. // OUT that started with a PING. The nyet takes precedence.
if (channel->hcsplt_bm.split_en) { if (hcsplt.split_en) {
// split not yet mean hub has no data, retry complete split // split not yet mean hub has no data, retry complete split
channel->hcsplt_bm.split_compl = 1; hcsplt.split_compl = 1;
channel->hcsplt = hcsplt.value;
channel_xfer_in_retry(dwc2, ch_id, hcint); channel_xfer_in_retry(dwc2, ch_id, hcint);
} }
} else if (hcint & HCINT_ACK) { } else if (hcint & HCINT_ACK) {
xfer->err_count = 0; xfer->err_count = 0;
channel->hcintmsk &= ~HCINT_ACK; channel->hcintmsk &= ~HCINT_ACK;
if (channel->hcsplt_bm.split_en) { if (hcsplt.split_en) {
// start split is ACK --> do complete split // start split is ACK --> do complete split
// TODO: for ISO must use xact_pos to plan complete split based on microframe (up to 187.5 bytes/uframe) // TODO: for ISO must use xact_pos to plan complete split based on microframe (up to 187.5 bytes/uframe)
channel->hcsplt_bm.split_compl = 1; hcsplt.split_compl = 1;
if (edpt_is_periodic(channel->hcchar_bm.ep_type)) { channel->hcsplt = hcsplt.value;
channel->hcchar_bm.odd_frame = 1 - (dwc2->hfnum & 1); // transfer on next frame if (channel_is_periodic(channel->hcchar)) {
hcchar.odd_frame = 1 - (dwc2->hfnum & 1); // transfer on next frame
channel->hcchar = hcchar.value;
} }
channel_send_in_token(dwc2, channel); channel_send_in_token(dwc2, channel);
} }
} else if (hcint & (HCINT_NAK | HCINT_DATATOGGLE_ERR)) { } else if (hcint & (HCINT_NAK | HCINT_DATATOGGLE_ERR)) {
xfer->err_count = 0; xfer->err_count = 0;
channel->hcintmsk &= ~(HCINT_NAK | HCINT_DATATOGGLE_ERR); channel->hcintmsk &= ~(HCINT_NAK | HCINT_DATATOGGLE_ERR);
channel->hcsplt_bm.split_compl = 0; // restart with start-split hcsplt.split_compl = 0; // restart with start-split
channel->hcsplt = hcsplt.value;
channel_xfer_in_retry(dwc2, ch_id, hcint); channel_xfer_in_retry(dwc2, ch_id, hcint);
} else if (hcint & HCINT_FARME_OVERRUN) { } else if (hcint & HCINT_FARME_OVERRUN) {
// retry start-split in next binterval // retry start-split in next binterval
@@ -1069,6 +1105,8 @@ static bool handle_channel_out_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hc
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id]; hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
dwc2_channel_t* channel = &dwc2->channel[ch_id]; dwc2_channel_t* channel = &dwc2->channel[ch_id];
hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id]; hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
const dwc2_channel_char_t hcchar = {.value = channel->hcchar};
dwc2_channel_split_t hcsplt = {.value = channel->hcsplt};
bool is_done = false; bool is_done = false;
@@ -1104,16 +1142,18 @@ static bool handle_channel_out_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hc
} }
} }
} else if (hcint & HCINT_NYET) { } else if (hcint & HCINT_NYET) {
if (channel->hcsplt_bm.split_en && channel->hcsplt_bm.split_compl) { if (hcsplt.split_en && hcsplt.split_compl) {
// split not yet mean hub has no data, retry complete split // split not yet mean hub has no data, retry complete split
channel->hcsplt_bm.split_compl = 1; hcsplt.split_compl = 1;
channel->hcsplt = hcsplt.value;
channel->hcchar |= HCCHAR_CHENA; channel->hcchar |= HCCHAR_CHENA;
} }
} else if (hcint & HCINT_ACK) { } else if (hcint & HCINT_ACK) {
xfer->err_count = 0; xfer->err_count = 0;
if (channel->hcsplt_bm.split_en && !channel->hcsplt_bm.split_compl) { if (hcsplt.split_en && !hcsplt.split_compl) {
// start split is ACK --> do complete split // start split is ACK --> do complete split
channel->hcsplt_bm.split_compl = 1; hcsplt.split_compl = 1;
channel->hcsplt = hcsplt.value;
channel->hcchar |= HCCHAR_CHENA; channel->hcchar |= HCCHAR_CHENA;
} }
} }
@@ -1136,7 +1176,7 @@ static void handle_channel_irq(uint8_t rhport, bool in_isr) {
dwc2_channel_t* channel = &dwc2->channel[ch_id]; dwc2_channel_t* channel = &dwc2->channel[ch_id];
hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id]; hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
TU_ASSERT(xfer->ep_id < CFG_TUH_DWC2_ENDPOINT_MAX,); TU_ASSERT(xfer->ep_id < CFG_TUH_DWC2_ENDPOINT_MAX,);
dwc2_channel_char_t hcchar_bm = channel->hcchar_bm; dwc2_channel_char_t hcchar = {.value = channel->hcchar};
const uint32_t hcint = channel->hcint; const uint32_t hcint = channel->hcint;
channel->hcint = hcint; // clear interrupt channel->hcint = hcint; // clear interrupt
@@ -1144,7 +1184,7 @@ static void handle_channel_irq(uint8_t rhport, bool in_isr) {
bool is_done = false; bool is_done = false;
if (is_dma) { if (is_dma) {
#if CFG_TUH_DWC2_DMA_ENABLE #if CFG_TUH_DWC2_DMA_ENABLE
if (hcchar_bm.ep_dir == TUSB_DIR_OUT) { if (hcchar.ep_dir == TUSB_DIR_OUT) {
is_done = handle_channel_out_dma(dwc2, ch_id, hcint); is_done = handle_channel_out_dma(dwc2, ch_id, hcint);
} else { } else {
is_done = handle_channel_in_dma(dwc2, ch_id, hcint); is_done = handle_channel_in_dma(dwc2, ch_id, hcint);
@@ -1156,7 +1196,7 @@ static void handle_channel_irq(uint8_t rhport, bool in_isr) {
#endif #endif
} else { } else {
#if CFG_TUH_DWC2_SLAVE_ENABLE #if CFG_TUH_DWC2_SLAVE_ENABLE
if (hcchar_bm.ep_dir == TUSB_DIR_OUT) { if (hcchar.ep_dir == TUSB_DIR_OUT) {
is_done = handle_channel_out_slave(dwc2, ch_id, hcint); is_done = handle_channel_out_slave(dwc2, ch_id, hcint);
} else { } else {
is_done = handle_channel_in_slave(dwc2, ch_id, hcint); is_done = handle_channel_in_slave(dwc2, ch_id, hcint);
@@ -1165,8 +1205,8 @@ static void handle_channel_irq(uint8_t rhport, bool in_isr) {
} }
if (is_done) { if (is_done) {
const uint8_t ep_addr = tu_edpt_addr(hcchar_bm.ep_num, hcchar_bm.ep_dir); const uint8_t ep_addr = tu_edpt_addr(hcchar.ep_num, hcchar.ep_dir);
hcd_event_xfer_complete(hcchar_bm.dev_addr, ep_addr, xfer->xferred_bytes, xfer->result, in_isr); hcd_event_xfer_complete(hcchar.dev_addr, ep_addr, xfer->xferred_bytes, (xfer_result_t)xfer->result, in_isr);
channel_dealloc(dwc2, ch_id); channel_dealloc(dwc2, ch_id);
} }
} }
@@ -1185,7 +1225,7 @@ static bool handle_sof_irq(uint8_t rhport, bool in_isr) {
for(uint8_t ep_id = 0; ep_id < CFG_TUH_DWC2_ENDPOINT_MAX; ep_id++) { for(uint8_t ep_id = 0; ep_id < CFG_TUH_DWC2_ENDPOINT_MAX; ep_id++) {
hcd_endpoint_t* edpt = &_hcd_data.edpt[ep_id]; hcd_endpoint_t* edpt = &_hcd_data.edpt[ep_id];
if (edpt->hcchar_bm.enable && edpt_is_periodic(edpt->hcchar_bm.ep_type) && edpt->uframe_countdown > 0) { if (edpt->hcchar_bm.enable && channel_is_periodic(edpt->hcchar) && edpt->uframe_countdown > 0) {
edpt->uframe_countdown -= tu_min32(ucount, edpt->uframe_countdown); edpt->uframe_countdown -= tu_min32(ucount, edpt->uframe_countdown);
if (edpt->uframe_countdown == 0) { if (edpt->uframe_countdown == 0) {
if (!edpt_xfer_kickoff(dwc2, ep_id)) { if (!edpt_xfer_kickoff(dwc2, ep_id)) {
@@ -1204,10 +1244,10 @@ static bool handle_sof_irq(uint8_t rhport, bool in_isr) {
static void port0_enable(dwc2_regs_t* dwc2, tusb_speed_t speed) { static void port0_enable(dwc2_regs_t* dwc2, tusb_speed_t speed) {
uint32_t hcfg = dwc2->hcfg & ~HCFG_FSLS_PHYCLK_SEL; uint32_t hcfg = dwc2->hcfg & ~HCFG_FSLS_PHYCLK_SEL;
const dwc2_gusbcfg_t gusbcfg_bm = dwc2->gusbcfg_bm; const dwc2_gusbcfg_t gusbcfg = {.value = dwc2->gusbcfg};
uint32_t phy_clock; uint32_t phy_clock;
if (gusbcfg_bm.phy_sel) { if (gusbcfg.phy_sel) {
phy_clock = 48; // dedicated FS is 48Mhz phy_clock = 48; // dedicated FS is 48Mhz
if (speed == TUSB_SPEED_LOW) { if (speed == TUSB_SPEED_LOW) {
hcfg |= HCFG_FSLS_PHYCLK_SEL_6MHZ; hcfg |= HCFG_FSLS_PHYCLK_SEL_6MHZ;
@@ -1215,11 +1255,11 @@ static void port0_enable(dwc2_regs_t* dwc2, tusb_speed_t speed) {
hcfg |= HCFG_FSLS_PHYCLK_SEL_48MHZ; hcfg |= HCFG_FSLS_PHYCLK_SEL_48MHZ;
} }
} else { } else {
if (gusbcfg_bm.ulpi_utmi_sel) { if (gusbcfg.ulpi_utmi_sel) {
phy_clock = 60; // ULPI 8-bit is 60Mhz phy_clock = 60; // ULPI 8-bit is 60Mhz
} else { } else {
// UTMI+ 16-bit is 30Mhz, 8-bit is 60Mhz // UTMI+ 16-bit is 30Mhz, 8-bit is 60Mhz
phy_clock = gusbcfg_bm.phy_if16 ? 30 : 60; phy_clock = gusbcfg.phy_if16 ? 30 : 60;
// Enable UTMI+ low power mode 48Mhz external clock if not highspeed // Enable UTMI+ low power mode 48Mhz external clock if not highspeed
if (speed == TUSB_SPEED_HIGH) { if (speed == TUSB_SPEED_HIGH) {
@@ -1252,7 +1292,7 @@ static void port0_enable(dwc2_regs_t* dwc2, tusb_speed_t speed) {
static void handle_hprt_irq(uint8_t rhport, bool in_isr) { static void handle_hprt_irq(uint8_t rhport, bool in_isr) {
dwc2_regs_t* dwc2 = DWC2_REG(rhport); dwc2_regs_t* dwc2 = DWC2_REG(rhport);
uint32_t hprt = dwc2->hprt & ~HPRT_W1_MASK; uint32_t hprt = dwc2->hprt & ~HPRT_W1_MASK;
const dwc2_hprt_t hprt_bm = dwc2->hprt_bm; const dwc2_hprt_t hprt_bm = {.value = hprt};
if (dwc2->hprt & HPRT_CONN_DETECT) { if (dwc2->hprt & HPRT_CONN_DETECT) {
// Port Connect Detect // Port Connect Detect