add usbh_spin_lock/unlock() use spinlock instead of atomic flag for hcd max3421

This commit is contained in:
hathach
2025-05-21 15:27:18 +07:00
parent 3a042b37da
commit e41a63c60d
6 changed files with 129 additions and 17 deletions

View File

@@ -147,6 +147,9 @@ static osal_mutex_t _usbh_mutex;
#define _usbh_mutex NULL
#endif
// Spinlock for interrupt handler
static OSAL_SPINLOCK_DEF(_usbh_spin, usbh_int_set);
// Event queue: usbh_int_set() is used as mutex in OS NONE config
OSAL_QUEUE_DEF(usbh_int_set, _usbh_qdef, CFG_TUH_TASK_QUEUE_SZ, hcd_event_t);
static osal_queue_t _usbh_q;
@@ -424,6 +427,8 @@ bool tuh_rhport_init(uint8_t rhport, const tusb_rhport_init_t* rh_init) {
TU_LOG_INT_USBH(sizeof(tu_fifo_t));
TU_LOG_INT_USBH(sizeof(tu_edpt_stream_t));
osal_spin_init(&_usbh_spin);
// Event queue
_usbh_q = osal_queue_create(&_usbh_qdef);
TU_ASSERT(_usbh_q != NULL);
@@ -895,6 +900,14 @@ void usbh_int_set(bool enabled) {
}
}
void usbh_spin_lock(bool in_isr) {
osal_spin_lock(&_usbh_spin, in_isr);
}
void usbh_spin_unlock(bool in_isr) {
osal_spin_unlock(&_usbh_spin, in_isr);
}
void usbh_defer_func(osal_task_func_t func, void *param, bool in_isr) {
hcd_event_t event = { 0 };
event.event_id = USBH_EVENT_FUNC_CALL;

View File

@@ -71,6 +71,9 @@ void usbh_int_set(bool enabled);
void usbh_defer_func(osal_task_func_t func, void *param, bool in_isr);
void usbh_spin_lock(bool in_isr);
void usbh_spin_unlock(bool in_isr);
//--------------------------------------------------------------------+
// USBH Endpoint API
//--------------------------------------------------------------------+

View File

@@ -28,9 +28,9 @@
#if CFG_TUH_ENABLED && defined(CFG_TUH_MAX3421) && CFG_TUH_MAX3421
#include <stdatomic.h>
#include "host/hcd.h"
#include "host/usbh.h"
#include "host/usbh_pvt.h"
//--------------------------------------------------------------------+
//
@@ -233,7 +233,7 @@ typedef struct {
uint8_t hxfr;
}sndfifo_owner;
atomic_flag busy; // busy transferring
bool busy_lock; // busy transferring
#if OSAL_MUTEX_REQUIRED
OSAL_MUTEX_DEF(spi_mutexdef);
@@ -327,7 +327,9 @@ TU_ATTR_ALWAYS_INLINE static inline void mode_write(uint8_t rhport, uint8_t data
}
TU_ATTR_ALWAYS_INLINE static inline void peraddr_write(uint8_t rhport, uint8_t data, bool in_isr) {
if ( _hcd_data.peraddr == data ) return; // no need to change address
if (_hcd_data.peraddr == data) {
return; // no need to change address
}
_hcd_data.peraddr = data;
reg_write(rhport, PERADDR_ADDR, data, in_isr);
@@ -373,7 +375,7 @@ TU_ATTR_ALWAYS_INLINE static inline void hwfifo_setup(uint8_t rhport, const uint
static void hwfifo_receive(uint8_t rhport, uint8_t * buffer, uint16_t len, bool in_isr) {
uint8_t hirq;
uint8_t const reg = RCVVFIFO_ADDR;
const uint8_t reg = RCVVFIFO_ADDR;
max3421_spi_lock(rhport, in_isr);
@@ -389,7 +391,7 @@ static void hwfifo_receive(uint8_t rhport, uint8_t * buffer, uint16_t len, bool
//--------------------------------------------------------------------+
static max3421_ep_t* find_ep_not_addr0(uint8_t daddr, uint8_t ep_num, uint8_t ep_dir) {
uint8_t const is_out = 1-ep_dir;
const uint8_t is_out = 1-ep_dir;
for(size_t i=1; i<CFG_TUH_MAX3421_ENDPOINT_TOTAL; i++) {
max3421_ep_t* ep = &_hcd_data.ep[i];
// control endpoint is bi-direction (skip check)
@@ -727,8 +729,8 @@ static void xact_generic(uint8_t rhport, max3421_ep_t *ep, bool switch_ep, bool
// Submit a transfer, when complete hcd_event_xfer_complete() must be invoked
bool hcd_edpt_xfer(uint8_t rhport, uint8_t daddr, uint8_t ep_addr, uint8_t * buffer, uint16_t buflen) {
uint8_t const ep_num = tu_edpt_number(ep_addr);
uint8_t const ep_dir = (uint8_t) tu_edpt_dir(ep_addr);
const uint8_t ep_num = tu_edpt_number(ep_addr);
const uint8_t ep_dir = (uint8_t) tu_edpt_dir(ep_addr);
max3421_ep_t* ep = find_opened_ep(daddr, ep_num, ep_dir);
TU_VERIFY(ep);
@@ -744,8 +746,17 @@ bool hcd_edpt_xfer(uint8_t rhport, uint8_t daddr, uint8_t ep_addr, uint8_t * buf
ep->xferred_len = 0;
ep->state = EP_STATE_ATTEMPT_1;
bool has_xfer = false;
usbh_spin_lock(false);
if (!_hcd_data.busy_lock) {
_hcd_data.busy_lock = true;
has_xfer = true;
}
usbh_spin_unlock(false);
// carry out transfer if not busy
if (!atomic_flag_test_and_set(&_hcd_data.busy)) {
if (has_xfer) {
xact_generic(rhport, ep, true, false);
}
@@ -781,8 +792,17 @@ bool hcd_setup_send(uint8_t rhport, uint8_t daddr, uint8_t const setup_packet[8]
ep->xferred_len = 0;
ep->state = EP_STATE_ATTEMPT_1;
bool has_xfer = false;
usbh_spin_lock(false);
if (!_hcd_data.busy_lock) {
_hcd_data.busy_lock = true;
has_xfer = true;
}
usbh_spin_unlock(false);
// carry out transfer if not busy
if (!atomic_flag_test_and_set(&_hcd_data.busy)) {
if (has_xfer) {
xact_setup(rhport, ep, false);
}
@@ -848,8 +868,8 @@ static void handle_connect_irq(uint8_t rhport, bool in_isr) {
}
static void xfer_complete_isr(uint8_t rhport, max3421_ep_t *ep, xfer_result_t result, uint8_t hrsl, bool in_isr) {
uint8_t const ep_dir = 1-ep->hxfr_bm.is_out;
uint8_t const ep_addr = tu_edpt_addr(ep->hxfr_bm.ep_num, ep_dir);
const uint8_t ep_dir = 1 - ep->hxfr_bm.is_out;
const uint8_t ep_addr = tu_edpt_addr(ep->hxfr_bm.ep_num, ep_dir);
// save data toggle
if (ep_dir) {
@@ -867,7 +887,9 @@ static void xfer_complete_isr(uint8_t rhport, max3421_ep_t *ep, xfer_result_t re
xact_generic(rhport, next_ep, true, in_isr);
}else {
// no more pending
atomic_flag_clear(&_hcd_data.busy);
usbh_spin_lock(in_isr);
_hcd_data.busy_lock = false;
usbh_spin_unlock(in_isr);
}
}
@@ -906,7 +928,9 @@ static void handle_xfer_done(uint8_t rhport, bool in_isr) {
xact_generic(rhport, next_ep, true, in_isr);
} else {
// no more pending in this frame -> clear busy
atomic_flag_clear(&_hcd_data.busy);
usbh_spin_lock(in_isr);
_hcd_data.busy_lock = false;
usbh_spin_unlock(in_isr);
}
return;
@@ -997,8 +1021,8 @@ void print_hirq(uint8_t hirq) {
// Interrupt handler
void hcd_int_handler(uint8_t rhport, bool in_isr) {
uint8_t hirq = reg_read(rhport, HIRQ_ADDR, in_isr) & _hcd_data.hien;
if (!hirq) return;
// print_hirq(hirq);
if (!hirq) { return; }
// print_hirq(hirq);
if (hirq & HIRQ_FRAME_IRQ) {
_hcd_data.frame_count++;
@@ -1017,8 +1041,19 @@ void hcd_int_handler(uint8_t rhport, bool in_isr) {
}
// start usb transfer if not busy
if (ep_retry != NULL && !atomic_flag_test_and_set(&_hcd_data.busy)) {
xact_generic(rhport, ep_retry, true, in_isr);
if (ep_retry != NULL) {
bool has_xfer = false;
usbh_spin_lock(in_isr);
if (!_hcd_data.busy_lock) {
_hcd_data.busy_lock = true;
has_xfer = true;
}
usbh_spin_unlock(in_isr);
if (has_xfer) {
xact_generic(rhport, ep_retry, true, in_isr);
}
}
}