Merge pull request #2543 from IngHK/max3421_nak_retry
MAX3421E NAK retry handling next frame
This commit is contained in:
@@ -30,6 +30,7 @@
|
||||
|
||||
#include <stdatomic.h>
|
||||
#include "host/hcd.h"
|
||||
#include "host/usbh.h"
|
||||
|
||||
//--------------------------------------------------------------------+
|
||||
//
|
||||
@@ -166,6 +167,17 @@ enum {
|
||||
DEFAULT_HIEN = HIRQ_CONDET_IRQ | HIRQ_FRAME_IRQ | HIRQ_HXFRDN_IRQ | HIRQ_RCVDAV_IRQ
|
||||
};
|
||||
|
||||
enum {
|
||||
MAX_NAK_DEFAULT = 1 // Number of NAK per endpoint per usb frame
|
||||
};
|
||||
|
||||
enum {
|
||||
EP_STATE_IDLE = 0,
|
||||
EP_STATE_COMPLETE = 1,
|
||||
EP_STATE_ATTEMPT_1 = 2, // pending 1st attempt
|
||||
EP_STATE_ATTEMPT_MAX = 15
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------+
|
||||
//
|
||||
//--------------------------------------------------------------------+
|
||||
@@ -173,18 +185,21 @@ enum {
|
||||
typedef struct {
|
||||
uint8_t daddr;
|
||||
|
||||
struct TU_ATTR_PACKED {
|
||||
uint8_t ep_dir : 1;
|
||||
uint8_t is_iso : 1;
|
||||
uint8_t is_setup : 1;
|
||||
uint8_t data_toggle : 1;
|
||||
uint8_t xfer_pending : 1;
|
||||
uint8_t xfer_complete : 1;
|
||||
union { ;
|
||||
struct TU_ATTR_PACKED {
|
||||
uint8_t ep_num : 4;
|
||||
uint8_t is_setup : 1;
|
||||
uint8_t is_out : 1;
|
||||
uint8_t is_iso : 1;
|
||||
}hxfr_bm;
|
||||
|
||||
uint8_t hxfr;
|
||||
};
|
||||
|
||||
struct TU_ATTR_PACKED {
|
||||
uint8_t ep_num : 4;
|
||||
uint16_t packet_size : 12;
|
||||
uint8_t state : 4;
|
||||
uint8_t data_toggle : 1;
|
||||
uint16_t packet_size : 11;
|
||||
};
|
||||
|
||||
uint16_t total_len;
|
||||
@@ -195,6 +210,8 @@ typedef struct {
|
||||
TU_VERIFY_STATIC(sizeof(max3421_ep_t) == 12, "size is not correct");
|
||||
|
||||
typedef struct {
|
||||
volatile uint16_t frame_count;
|
||||
|
||||
// cached register
|
||||
uint8_t sndbc;
|
||||
uint8_t hirq;
|
||||
@@ -204,18 +221,20 @@ typedef struct {
|
||||
uint8_t hxfr;
|
||||
|
||||
atomic_flag busy; // busy transferring
|
||||
volatile uint16_t frame_count;
|
||||
|
||||
max3421_ep_t ep[CFG_TUH_MAX3421_ENDPOINT_TOTAL]; // [0] is reserved for addr0
|
||||
|
||||
OSAL_MUTEX_DEF(spi_mutexdef);
|
||||
#if OSAL_MUTEX_REQUIRED
|
||||
OSAL_MUTEX_DEF(spi_mutexdef);
|
||||
osal_mutex_t spi_mutex;
|
||||
#endif
|
||||
|
||||
max3421_ep_t ep[CFG_TUH_MAX3421_ENDPOINT_TOTAL]; // [0] is reserved for addr0
|
||||
} max3421_data_t;
|
||||
|
||||
static max3421_data_t _hcd_data;
|
||||
|
||||
// max NAK before giving up in a frame. 0 means infinite NAKs
|
||||
static uint8_t _max_nak = MAX_NAK_DEFAULT;
|
||||
|
||||
//--------------------------------------------------------------------+
|
||||
// API: SPI transfer with MAX3421E
|
||||
// - spi_cs_api(), spi_xfer_api(), int_api(): must be implemented by application
|
||||
@@ -304,7 +323,6 @@ static void fifo_write(uint8_t rhport, uint8_t reg, uint8_t const * buffer, uint
|
||||
tuh_max3421_spi_xfer_api(rhport, buffer, NULL, len);
|
||||
|
||||
max3421_spi_unlock(rhport, in_isr);
|
||||
|
||||
}
|
||||
|
||||
static void fifo_read(uint8_t rhport, uint8_t * buffer, uint16_t len, bool in_isr) {
|
||||
@@ -359,10 +377,11 @@ TU_ATTR_ALWAYS_INLINE static inline void sndbc_write(uint8_t rhport, uint8_t dat
|
||||
//--------------------------------------------------------------------+
|
||||
|
||||
static max3421_ep_t* find_ep_not_addr0(uint8_t daddr, uint8_t ep_num, uint8_t ep_dir) {
|
||||
uint8_t const is_out = 1-ep_dir;
|
||||
for(size_t i=1; i<CFG_TUH_MAX3421_ENDPOINT_TOTAL; i++) {
|
||||
max3421_ep_t* ep = &_hcd_data.ep[i];
|
||||
// for control endpoint, skip direction check
|
||||
if (daddr == ep->daddr && ep_num == ep->ep_num && (ep_dir == ep->ep_dir || ep_num == 0)) {
|
||||
// control endpoint is bi-direction (skip check)
|
||||
if (daddr == ep->daddr && ep_num == ep->hxfr_bm.ep_num && (ep_num == 0 || is_out == ep->hxfr_bm.is_out)) {
|
||||
return ep;
|
||||
}
|
||||
}
|
||||
@@ -393,14 +412,23 @@ static void free_ep(uint8_t daddr) {
|
||||
}
|
||||
}
|
||||
|
||||
// Check if endpoint has an queued transfer and not reach max NAK
|
||||
TU_ATTR_ALWAYS_INLINE static inline bool is_ep_pending(max3421_ep_t const * ep) {
|
||||
uint8_t const state = ep->state;
|
||||
return ep->packet_size && (state >= EP_STATE_ATTEMPT_1) &&
|
||||
(_max_nak == 0 || state < EP_STATE_ATTEMPT_1 + _max_nak);
|
||||
}
|
||||
|
||||
// Find the next pending endpoint using round-robin scheduling, starting from next endpoint.
|
||||
// return NULL if not found
|
||||
// TODO respect interrupt endpoint's interval
|
||||
static max3421_ep_t * find_next_pending_ep(max3421_ep_t * cur_ep) {
|
||||
size_t const idx = (size_t) (cur_ep - _hcd_data.ep);
|
||||
|
||||
// starting from next endpoint
|
||||
for (size_t i = idx + 1; i < CFG_TUH_MAX3421_ENDPOINT_TOTAL; i++) {
|
||||
max3421_ep_t* ep = &_hcd_data.ep[i];
|
||||
if (ep->xfer_pending && ep->packet_size) {
|
||||
// TU_LOG3("next pending i = %u\r\n", i);
|
||||
if (is_ep_pending(ep)) {
|
||||
return ep;
|
||||
}
|
||||
}
|
||||
@@ -408,8 +436,7 @@ static max3421_ep_t * find_next_pending_ep(max3421_ep_t * cur_ep) {
|
||||
// wrap around including current endpoint
|
||||
for (size_t i = 0; i <= idx; i++) {
|
||||
max3421_ep_t* ep = &_hcd_data.ep[i];
|
||||
if (ep->xfer_pending && ep->packet_size) {
|
||||
// TU_LOG3("next pending i = %u\r\n", i);
|
||||
if (is_ep_pending(ep)) {
|
||||
return ep;
|
||||
}
|
||||
}
|
||||
@@ -424,10 +451,11 @@ static max3421_ep_t * find_next_pending_ep(max3421_ep_t * cur_ep) {
|
||||
// optional hcd configuration, called by tuh_configure()
|
||||
bool hcd_configure(uint8_t rhport, uint32_t cfg_id, const void* cfg_param) {
|
||||
(void) rhport;
|
||||
(void) cfg_id;
|
||||
(void) cfg_param;
|
||||
TU_VERIFY(cfg_id == TUH_CFGID_MAX3421);
|
||||
|
||||
return false;
|
||||
tuh_configure_param_t const* cfg = (tuh_configure_param_t const*) cfg_param;
|
||||
_max_nak = cfg->max3421.max_nak;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Initialize controller to host mode
|
||||
@@ -438,6 +466,7 @@ bool hcd_init(uint8_t rhport) {
|
||||
|
||||
TU_LOG2_INT(sizeof(max3421_ep_t));
|
||||
TU_LOG2_INT(sizeof(max3421_data_t));
|
||||
TU_LOG2_INT(offsetof(max3421_data_t, ep));
|
||||
|
||||
tu_memclr(&_hcd_data, sizeof(_hcd_data));
|
||||
_hcd_data.peraddr = 0xff; // invalid
|
||||
@@ -557,7 +586,6 @@ void hcd_device_close(uint8_t rhport, uint8_t dev_addr) {
|
||||
// Open an endpoint
|
||||
bool hcd_edpt_open(uint8_t rhport, uint8_t daddr, tusb_desc_endpoint_t const * ep_desc) {
|
||||
(void) rhport;
|
||||
(void) daddr;
|
||||
|
||||
uint8_t const ep_num = tu_edpt_number(ep_desc->bEndpointAddress);
|
||||
tusb_dir_t const ep_dir = tu_edpt_dir(ep_desc->bEndpointAddress);
|
||||
@@ -569,12 +597,9 @@ bool hcd_edpt_open(uint8_t rhport, uint8_t daddr, tusb_desc_endpoint_t const * e
|
||||
ep = allocate_ep();
|
||||
TU_ASSERT(ep);
|
||||
ep->daddr = daddr;
|
||||
ep->ep_num = (uint8_t) (ep_num & 0x0f);
|
||||
ep->ep_dir = (ep_dir == TUSB_DIR_IN) ? 1 : 0;
|
||||
}
|
||||
|
||||
if ( TUSB_XFER_ISOCHRONOUS == ep_desc->bmAttributes.xfer ) {
|
||||
ep->is_iso = 1;
|
||||
ep->hxfr_bm.ep_num = (uint8_t) (ep_num & 0x0f);
|
||||
ep->hxfr_bm.is_out = (ep_dir == TUSB_DIR_OUT) ? 1 : 0;
|
||||
ep->hxfr_bm.is_iso = (TUSB_XFER_ISOCHRONOUS == ep_desc->bmAttributes.xfer) ? 1 : 0;
|
||||
}
|
||||
|
||||
ep->packet_size = (uint16_t) (tu_edpt_packet_size(ep_desc) & 0x7ff);
|
||||
@@ -582,7 +607,7 @@ bool hcd_edpt_open(uint8_t rhport, uint8_t daddr, tusb_desc_endpoint_t const * e
|
||||
return true;
|
||||
}
|
||||
|
||||
void xact_out(uint8_t rhport, max3421_ep_t *ep, bool switch_ep, bool in_isr) {
|
||||
static void xact_out(uint8_t rhport, max3421_ep_t *ep, bool switch_ep, bool in_isr) {
|
||||
// Page 12: Programming BULK-OUT Transfers
|
||||
// TODO double buffered
|
||||
if (switch_ep) {
|
||||
@@ -598,12 +623,10 @@ void xact_out(uint8_t rhport, max3421_ep_t *ep, bool switch_ep, bool in_isr) {
|
||||
fifo_write(rhport, SNDFIFO_ADDR, ep->buf, xact_len, in_isr);
|
||||
}
|
||||
sndbc_write(rhport, xact_len, in_isr);
|
||||
|
||||
uint8_t const hxfr = (uint8_t ) (ep->ep_num | HXFR_OUT_NIN | (ep->is_iso ? HXFR_ISO : 0));
|
||||
hxfr_write(rhport, hxfr, in_isr);
|
||||
hxfr_write(rhport, ep->hxfr, in_isr);
|
||||
}
|
||||
|
||||
void xact_in(uint8_t rhport, max3421_ep_t *ep, bool switch_ep, bool in_isr) {
|
||||
static void xact_in(uint8_t rhport, max3421_ep_t *ep, bool switch_ep, bool in_isr) {
|
||||
// Page 13: Programming BULK-IN Transfers
|
||||
if (switch_ep) {
|
||||
peraddr_write(rhport, ep->daddr, in_isr);
|
||||
@@ -612,34 +635,36 @@ void xact_in(uint8_t rhport, max3421_ep_t *ep, bool switch_ep, bool in_isr) {
|
||||
reg_write(rhport, HCTL_ADDR, hctl, in_isr);
|
||||
}
|
||||
|
||||
uint8_t const hxfr = (uint8_t) (ep->ep_num | (ep->is_iso ? HXFR_ISO : 0));
|
||||
hxfr_write(rhport, hxfr, in_isr);
|
||||
hxfr_write(rhport, ep->hxfr, in_isr);
|
||||
}
|
||||
|
||||
TU_ATTR_ALWAYS_INLINE static inline
|
||||
void xact_inout(uint8_t rhport, max3421_ep_t *ep, bool switch_ep, bool in_isr) {
|
||||
if (ep->ep_num == 0 ) {
|
||||
static void xact_setup(uint8_t rhport, max3421_ep_t *ep, bool in_isr) {
|
||||
peraddr_write(rhport, ep->daddr, in_isr);
|
||||
fifo_write(rhport, SUDFIFO_ADDR, ep->buf, 8, in_isr);
|
||||
hxfr_write(rhport, HXFR_SETUP, in_isr);
|
||||
}
|
||||
|
||||
static void xact_generic(uint8_t rhport, max3421_ep_t *ep, bool switch_ep, bool in_isr) {
|
||||
if (ep->hxfr_bm.ep_num == 0 ) {
|
||||
// setup
|
||||
if (ep->is_setup) {
|
||||
peraddr_write(rhport, ep->daddr, in_isr);
|
||||
fifo_write(rhport, SUDFIFO_ADDR, ep->buf, 8, in_isr);
|
||||
hxfr_write(rhport, HXFR_SETUP, in_isr);
|
||||
if (ep->hxfr_bm.is_setup) {
|
||||
xact_setup(rhport, ep, in_isr);
|
||||
return;
|
||||
}
|
||||
|
||||
// status
|
||||
if (ep->buf == NULL || ep->total_len == 0) {
|
||||
uint8_t const hxfr = HXFR_HS | (ep->ep_dir ? 0 : HXFR_OUT_NIN);
|
||||
uint8_t const hxfr = HXFR_HS | (ep->hxfr_bm.is_out ? HXFR_OUT_NIN : 0);
|
||||
peraddr_write(rhport, ep->daddr, in_isr);
|
||||
hxfr_write(rhport, hxfr, in_isr);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (ep->ep_dir) {
|
||||
xact_in(rhport, ep, switch_ep, in_isr);
|
||||
}else {
|
||||
if (ep->hxfr_bm.is_out) {
|
||||
xact_out(rhport, ep, switch_ep, in_isr);
|
||||
}else {
|
||||
xact_in(rhport, ep, switch_ep, in_isr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -652,24 +677,21 @@ bool hcd_edpt_xfer(uint8_t rhport, uint8_t daddr, uint8_t ep_addr, uint8_t * buf
|
||||
TU_VERIFY(ep);
|
||||
|
||||
// control transfer can switch direction
|
||||
ep->ep_dir = ep_dir ? 1u : 0u;
|
||||
ep->hxfr_bm.is_out = ep_dir ? 0u : 1u;
|
||||
|
||||
ep->buf = buffer;
|
||||
ep->total_len = buflen;
|
||||
ep->xferred_len = 0;
|
||||
ep->xfer_complete = 0;
|
||||
ep->xfer_pending = 1;
|
||||
ep->state = EP_STATE_ATTEMPT_1;
|
||||
|
||||
if ( ep_num == 0 ) {
|
||||
ep->is_setup = 0;
|
||||
if (ep_num == 0) {
|
||||
ep->hxfr_bm.is_setup = 0;
|
||||
ep->data_toggle = 1;
|
||||
}
|
||||
|
||||
// carry out transfer if not busy
|
||||
if ( !atomic_flag_test_and_set(&_hcd_data.busy) ) {
|
||||
xact_inout(rhport, ep, true, false);
|
||||
} else {
|
||||
return true;
|
||||
if (!atomic_flag_test_and_set(&_hcd_data.busy)) {
|
||||
xact_generic(rhport, ep, true, false);
|
||||
}
|
||||
|
||||
return true;
|
||||
@@ -692,17 +714,16 @@ bool hcd_setup_send(uint8_t rhport, uint8_t daddr, uint8_t const setup_packet[8]
|
||||
max3421_ep_t* ep = find_opened_ep(daddr, 0, 0);
|
||||
TU_ASSERT(ep);
|
||||
|
||||
ep->ep_dir = 0;
|
||||
ep->is_setup = 1;
|
||||
ep->hxfr_bm.is_out = 1;
|
||||
ep->hxfr_bm.is_setup = 1;
|
||||
ep->buf = (uint8_t*)(uintptr_t) setup_packet;
|
||||
ep->total_len = 8;
|
||||
ep->xferred_len = 0;
|
||||
ep->xfer_complete = 0;
|
||||
ep->xfer_pending = 1;
|
||||
ep->state = EP_STATE_ATTEMPT_1;
|
||||
|
||||
// carry out transfer if not busy
|
||||
if ( !atomic_flag_test_and_set(&_hcd_data.busy) ) {
|
||||
xact_inout(rhport, ep, true, false);
|
||||
if (!atomic_flag_test_and_set(&_hcd_data.busy)) {
|
||||
xact_setup(rhport, ep, false);
|
||||
}
|
||||
|
||||
return true;
|
||||
@@ -767,22 +788,23 @@ static void handle_connect_irq(uint8_t rhport, bool in_isr) {
|
||||
}
|
||||
|
||||
static void xfer_complete_isr(uint8_t rhport, max3421_ep_t *ep, xfer_result_t result, uint8_t hrsl, bool in_isr) {
|
||||
uint8_t const ep_addr = tu_edpt_addr(ep->ep_num, ep->ep_dir);
|
||||
uint8_t const ep_dir = 1-ep->hxfr_bm.is_out;
|
||||
uint8_t const ep_addr = tu_edpt_addr(ep->hxfr_bm.ep_num, ep_dir);
|
||||
|
||||
// save data toggle
|
||||
if (ep->ep_dir) {
|
||||
if (ep_dir) {
|
||||
ep->data_toggle = (hrsl & HRSL_RCVTOGRD) ? 1u : 0u;
|
||||
}else {
|
||||
ep->data_toggle = (hrsl & HRSL_SNDTOGRD) ? 1u : 0u;
|
||||
}
|
||||
|
||||
ep->xfer_pending = 0;
|
||||
ep->state = EP_STATE_IDLE;
|
||||
hcd_event_xfer_complete(ep->daddr, ep_addr, ep->xferred_len, result, in_isr);
|
||||
|
||||
// Find next pending endpoint
|
||||
max3421_ep_t *next_ep = find_next_pending_ep(ep);
|
||||
max3421_ep_t * next_ep = find_next_pending_ep(ep);
|
||||
if (next_ep) {
|
||||
xact_inout(rhport, next_ep, true, in_isr);
|
||||
xact_generic(rhport, next_ep, true, in_isr);
|
||||
}else {
|
||||
// no more pending
|
||||
atomic_flag_clear(&_hcd_data.busy);
|
||||
@@ -812,20 +834,23 @@ static void handle_xfer_done(uint8_t rhport, bool in_isr) {
|
||||
|
||||
case HRSL_NAK:
|
||||
if (ep_num == 0) {
|
||||
// NAK on control, retry immediately
|
||||
// control endpoint -> retry immediately
|
||||
hxfr_write(rhport, _hcd_data.hxfr, in_isr);
|
||||
}else {
|
||||
// NAK on non-control, find next pending to switch
|
||||
max3421_ep_t *next_ep = find_next_pending_ep(ep);
|
||||
} else {
|
||||
if (ep->state < EP_STATE_ATTEMPT_MAX) {
|
||||
ep->state++;
|
||||
}
|
||||
|
||||
max3421_ep_t * next_ep = find_next_pending_ep(ep);
|
||||
if (ep == next_ep) {
|
||||
// this endpoint is only one pending, retry immediately
|
||||
// this endpoint is only one pending -> retry immediately
|
||||
hxfr_write(rhport, _hcd_data.hxfr, in_isr);
|
||||
}else if (next_ep) {
|
||||
// switch to next pending TODO could have issue with double buffered if not clear previously out data
|
||||
xact_inout(rhport, next_ep, true, in_isr);
|
||||
}else {
|
||||
TU_ASSERT(false,);
|
||||
} else if (next_ep) {
|
||||
// switch to next pending endpoint TODO could have issue with double buffered if not clear previously out data
|
||||
xact_generic(rhport, next_ep, true, in_isr);
|
||||
} else {
|
||||
// no more pending in this frame -> clear busy
|
||||
atomic_flag_clear(&_hcd_data.busy);
|
||||
}
|
||||
}
|
||||
return;
|
||||
@@ -847,12 +872,14 @@ static void handle_xfer_done(uint8_t rhport, bool in_isr) {
|
||||
|
||||
if (ep_dir) {
|
||||
// IN transfer: fifo data is already received in RCVDAV IRQ
|
||||
if ( hxfr_type & HXFR_HS ) {
|
||||
ep->xfer_complete = 1;
|
||||
|
||||
// mark control handshake as complete
|
||||
if (hxfr_type & HXFR_HS) {
|
||||
ep->state = EP_STATE_COMPLETE;
|
||||
}
|
||||
|
||||
// short packet or all bytes transferred
|
||||
if ( ep->xfer_complete ) {
|
||||
if (ep->state == EP_STATE_COMPLETE) {
|
||||
xfer_complete_isr(rhport, ep, xfer_result, hrsl, in_isr);
|
||||
}else {
|
||||
// more to transfer
|
||||
@@ -886,13 +913,13 @@ static void handle_xfer_done(uint8_t rhport, bool in_isr) {
|
||||
void print_hirq(uint8_t hirq) {
|
||||
TU_LOG3_HEX(hirq);
|
||||
|
||||
if (hirq & HIRQ_HXFRDN_IRQ) TU_LOG3(" HXFRDN");
|
||||
if (hirq & HIRQ_FRAME_IRQ) TU_LOG3(" FRAME");
|
||||
if (hirq & HIRQ_CONDET_IRQ) TU_LOG3(" CONDET");
|
||||
if (hirq & HIRQ_SUSDN_IRQ) TU_LOG3(" SUSDN");
|
||||
if (hirq & HIRQ_SNDBAV_IRQ) TU_LOG3(" SNDBAV");
|
||||
if (hirq & HIRQ_RCVDAV_IRQ) TU_LOG3(" RCVDAV");
|
||||
if (hirq & HIRQ_RWU_IRQ) TU_LOG3(" RWU");
|
||||
if (hirq & HIRQ_HXFRDN_IRQ) TU_LOG3(" HXFRDN");
|
||||
if (hirq & HIRQ_FRAME_IRQ) TU_LOG3(" FRAME");
|
||||
if (hirq & HIRQ_CONDET_IRQ) TU_LOG3(" CONDET");
|
||||
if (hirq & HIRQ_SUSDN_IRQ) TU_LOG3(" SUSDN");
|
||||
if (hirq & HIRQ_SNDBAV_IRQ) TU_LOG3(" SNDBAV");
|
||||
if (hirq & HIRQ_RCVDAV_IRQ) TU_LOG3(" RCVDAV");
|
||||
if (hirq & HIRQ_RWU_IRQ) TU_LOG3(" RWU");
|
||||
if (hirq & HIRQ_BUSEVENT_IRQ) TU_LOG3(" BUSEVENT");
|
||||
|
||||
TU_LOG3("\r\n");
|
||||
@@ -909,6 +936,25 @@ void hcd_int_handler(uint8_t rhport, bool in_isr) {
|
||||
|
||||
if (hirq & HIRQ_FRAME_IRQ) {
|
||||
_hcd_data.frame_count++;
|
||||
|
||||
max3421_ep_t* ep_retry = NULL;
|
||||
|
||||
// reset all endpoints attempt counter
|
||||
for (size_t i = 0; i < CFG_TUH_MAX3421_ENDPOINT_TOTAL; i++) {
|
||||
max3421_ep_t* ep = &_hcd_data.ep[i];
|
||||
if (ep->packet_size && ep->state > EP_STATE_ATTEMPT_1) {
|
||||
ep->state = EP_STATE_ATTEMPT_1;
|
||||
|
||||
if (ep_retry == NULL) {
|
||||
ep_retry = ep;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// start usb transfer if not busy
|
||||
if (ep_retry != NULL && !atomic_flag_test_and_set(&_hcd_data.busy)) {
|
||||
xact_generic(rhport, ep_retry, true, in_isr);
|
||||
}
|
||||
}
|
||||
|
||||
if (hirq & HIRQ_CONDET_IRQ) {
|
||||
@@ -917,17 +963,17 @@ void hcd_int_handler(uint8_t rhport, bool in_isr) {
|
||||
|
||||
// queue more transfer in handle_xfer_done() can cause hirq to be set again while external IRQ may not catch and/or
|
||||
// not call this handler again. So we need to loop until all IRQ are cleared
|
||||
while ( hirq & (HIRQ_RCVDAV_IRQ | HIRQ_HXFRDN_IRQ) ) {
|
||||
if ( hirq & HIRQ_RCVDAV_IRQ ) {
|
||||
while (hirq & (HIRQ_RCVDAV_IRQ | HIRQ_HXFRDN_IRQ)) {
|
||||
if (hirq & HIRQ_RCVDAV_IRQ) {
|
||||
uint8_t const ep_num = _hcd_data.hxfr & HXFR_EPNUM_MASK;
|
||||
max3421_ep_t *ep = find_opened_ep(_hcd_data.peraddr, ep_num, 1);
|
||||
max3421_ep_t* ep = find_opened_ep(_hcd_data.peraddr, ep_num, 1);
|
||||
uint8_t xact_len = 0;
|
||||
|
||||
// RCVDAV_IRQ can trigger 2 times (dual buffered)
|
||||
while ( hirq & HIRQ_RCVDAV_IRQ ) {
|
||||
while (hirq & HIRQ_RCVDAV_IRQ) {
|
||||
uint8_t rcvbc = reg_read(rhport, RCVBC_ADDR, in_isr);
|
||||
xact_len = (uint8_t) tu_min16(rcvbc, ep->total_len - ep->xferred_len);
|
||||
if ( xact_len ) {
|
||||
if (xact_len) {
|
||||
fifo_read(rhport, ep->buf, xact_len, in_isr);
|
||||
ep->buf += xact_len;
|
||||
ep->xferred_len += xact_len;
|
||||
@@ -938,12 +984,12 @@ void hcd_int_handler(uint8_t rhport, bool in_isr) {
|
||||
hirq = reg_read(rhport, HIRQ_ADDR, in_isr);
|
||||
}
|
||||
|
||||
if ( xact_len < ep->packet_size || ep->xferred_len >= ep->total_len ) {
|
||||
ep->xfer_complete = 1;
|
||||
if (xact_len < ep->packet_size || ep->xferred_len >= ep->total_len) {
|
||||
ep->state = EP_STATE_COMPLETE;
|
||||
}
|
||||
}
|
||||
|
||||
if ( hirq & HIRQ_HXFRDN_IRQ ) {
|
||||
if (hirq & HIRQ_HXFRDN_IRQ) {
|
||||
hirq_write(rhport, HIRQ_HXFRDN_IRQ, in_isr);
|
||||
handle_xfer_done(rhport, in_isr);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user