add keyboard led mask
make usbd control request to subtask add get/set report via control pipe to hidd enforce soft DMA to control pipe for lpc11u (lpc17xx not yet) temp add led_blinking_set_interval to change led blinking interval refractor dcd_pipe_control_xfer to have interrupt on complete option add get/set report support of moused_app and keyboardd_app, keyboard LED will make LED blink faster
This commit is contained in:
@@ -81,7 +81,7 @@ void dcd_controller_set_address(uint8_t coreid, uint8_t dev_addr);
|
||||
void dcd_controller_set_configuration(uint8_t coreid);
|
||||
|
||||
//------------- PIPE API -------------//
|
||||
tusb_error_t dcd_pipe_control_xfer(uint8_t coreid, tusb_direction_t dir, void * p_buffer, uint16_t length);
|
||||
tusb_error_t dcd_pipe_control_xfer(uint8_t coreid, tusb_direction_t dir, void * p_buffer, uint16_t length, bool int_on_complete);
|
||||
void dcd_pipe_control_stall(uint8_t coreid);
|
||||
|
||||
endpoint_handle_t dcd_pipe_open(uint8_t coreid, tusb_descriptor_endpoint_t const * p_endpoint_desc, uint8_t class_code) ATTR_WARN_UNUSED_RESULT;
|
||||
|
||||
@@ -114,7 +114,7 @@ typedef struct {
|
||||
}next_td[DCD_11U_13U_QHD_COUNT];
|
||||
|
||||
uint32_t current_ioc; ///< interrupt on complete mask for current TD
|
||||
uint32_t next_ioc; ///< interrupt on complete mask for next TD
|
||||
uint32_t next_ioc; ///< interrupt on complete mask for next TD
|
||||
|
||||
// should start from 128
|
||||
ATTR_ALIGNED(64) tusb_control_request_t setup_request;
|
||||
@@ -200,6 +200,61 @@ static void bus_reset(void)
|
||||
LPC_USB->INTEN = INT_MASK_DEVICE_STATUS | BIT_(0) | BIT_(1); // enable device status & control endpoints
|
||||
}
|
||||
|
||||
static void endpoint_non_control_isr(uint32_t int_status)
|
||||
{
|
||||
for(uint8_t ep_id = 2; ep_id < DCD_11U_13U_QHD_COUNT; ep_id++ )
|
||||
{
|
||||
if ( BIT_TEST_(int_status, ep_id) )
|
||||
{
|
||||
dcd_11u_13u_qhd_t * const arr_qhd = dcd_data.qhd[ep_id];
|
||||
|
||||
// when double buffering, the complete buffer is opposed to the current active buffer in EPINUSE
|
||||
uint8_t const buff_idx = LPC_USB->EPINUSE & BIT_(ep_id) ? 0 : 1;
|
||||
uint16_t const xferred_bytes = dcd_data.current_td[ep_id].queued_bytes_in_buff[buff_idx] - arr_qhd[buff_idx].total_bytes;
|
||||
|
||||
dcd_data.current_td[ep_id].xferred_total += xferred_bytes;
|
||||
|
||||
// there are still data to transfer.
|
||||
if ( (arr_qhd[buff_idx].total_bytes == 0) && (dcd_data.current_td[ep_id].remaining_bytes > 0) )
|
||||
{ // NOTE although buff_addr_offset has been increased when xfer is completed
|
||||
// but we still need to increase it one more as we are using double buffering.
|
||||
queue_xfer_to_buffer(ep_id, buff_idx, arr_qhd[buff_idx].buff_addr_offset+1, dcd_data.current_td[ep_id].remaining_bytes);
|
||||
}
|
||||
// short packet or (no more byte and both buffers are finished)
|
||||
else if ( (arr_qhd[buff_idx].total_bytes > 0) || !arr_qhd[1-buff_idx].active )
|
||||
{ // current TD (request) is completed
|
||||
LPC_USB->EPSKIP = BIT_SET_(LPC_USB->EPSKIP, ep_id); // skip other endpoint in case of short-package
|
||||
|
||||
dcd_data.current_td[ep_id].remaining_bytes = 0;
|
||||
|
||||
if ( BIT_TEST_(dcd_data.current_ioc, ep_id) )
|
||||
{
|
||||
endpoint_handle_t edpt_hdl =
|
||||
{
|
||||
.coreid = 0,
|
||||
.index = ep_id,
|
||||
.class_code = dcd_data.class_code[ep_id]
|
||||
};
|
||||
|
||||
dcd_data.current_ioc = BIT_CLR_(dcd_data.current_ioc, edpt_hdl.index);
|
||||
|
||||
// TODO no way determine if the transfer is failed or not
|
||||
usbd_xfer_isr(edpt_hdl, TUSB_EVENT_XFER_COMPLETE, dcd_data.current_td[ep_id].xferred_total);
|
||||
}
|
||||
|
||||
//------------- Next TD is available -------------//
|
||||
if ( dcd_data.next_td[ep_id].total_bytes != 0 )
|
||||
{
|
||||
queue_xfer_in_next_td(ep_id);
|
||||
}
|
||||
}else
|
||||
{
|
||||
// transfer complete, there is no more remaining bytes, but this buffer is not the last transaction (the other is)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void dcd_isr(uint8_t coreid)
|
||||
{
|
||||
(void) coreid;
|
||||
@@ -248,7 +303,7 @@ void dcd_isr(uint8_t coreid)
|
||||
// }
|
||||
}
|
||||
|
||||
//------------- Control Endpoint -------------//
|
||||
//------------- Setup Received -------------//
|
||||
if ( BIT_TEST_(int_status, 0) && (dev_cmd_stat & CMDSTAT_SETUP_RECEIVED_MASK) )
|
||||
{ // received control request from host
|
||||
// copy setup request & acknowledge so that the next setup can be received by hw
|
||||
@@ -260,88 +315,73 @@ void dcd_isr(uint8_t coreid)
|
||||
LPC_USB->DEVCMDSTAT |= CMDSTAT_SETUP_RECEIVED_MASK;
|
||||
dcd_data.qhd[0][1].buff_addr_offset = addr_offset(&dcd_data.setup_request);
|
||||
}
|
||||
//------------- Control Endpoint -------------//
|
||||
else if ( int_status & 0x03 )
|
||||
{ // either control endpoints
|
||||
endpoint_handle_t edpt_hdl =
|
||||
uint8_t const ep_id = ( int_status & BIT_(0) ) ? 0 : 1;
|
||||
|
||||
// there are still data to transfer.
|
||||
if ( (dcd_data.qhd[ep_id][0].total_bytes == 0) && (dcd_data.current_td[ep_id].remaining_bytes > 0) )
|
||||
{
|
||||
.coreid = coreid,
|
||||
.index = BIT_TEST_(int_status, 1) ? 1 : 0
|
||||
};
|
||||
|
||||
// FIXME xferred_byte for control xfer is not needed now !!!
|
||||
usbd_xfer_isr(edpt_hdl, TUSB_EVENT_XFER_COMPLETE, 0);
|
||||
}
|
||||
|
||||
//------------- Non-Control Endpoints -------------//
|
||||
for(uint8_t ep_id = 2; ep_id < DCD_11U_13U_QHD_COUNT; ep_id++ )
|
||||
{
|
||||
if ( BIT_TEST_(int_status, ep_id) )
|
||||
queue_xfer_to_buffer(ep_id, 0, dcd_data.qhd[ep_id][0].buff_addr_offset, dcd_data.current_td[ep_id].remaining_bytes);
|
||||
}else
|
||||
{
|
||||
dcd_11u_13u_qhd_t * const arr_qhd = dcd_data.qhd[ep_id];
|
||||
dcd_data.current_td[ep_id].remaining_bytes = 0;
|
||||
|
||||
// when double buffering, the complete buffer is opposed to the current active buffer in EPINUSE
|
||||
uint8_t const buff_idx = LPC_USB->EPINUSE & BIT_(ep_id) ? 0 : 1;
|
||||
uint16_t const xferred_bytes = dcd_data.current_td[ep_id].queued_bytes_in_buff[buff_idx] - arr_qhd[buff_idx].total_bytes;
|
||||
|
||||
dcd_data.current_td[ep_id].xferred_total += xferred_bytes;
|
||||
|
||||
// there are still data to transfer.
|
||||
if ( (arr_qhd[buff_idx].total_bytes == 0) && (dcd_data.current_td[ep_id].remaining_bytes > 0) )
|
||||
{ // NOTE although buff_addr_offset has been increased when xfer is completed
|
||||
// but we still need to increase it one more as we are using double buffering.
|
||||
queue_xfer_to_buffer(ep_id, buff_idx, arr_qhd[buff_idx].buff_addr_offset+1, dcd_data.current_td[ep_id].remaining_bytes);
|
||||
}
|
||||
// short packet or (no more byte and both buffers are finished)
|
||||
else if ( (arr_qhd[buff_idx].total_bytes > 0) || !arr_qhd[1-buff_idx].active )
|
||||
{ // current TD (request) is completed
|
||||
LPC_USB->EPSKIP = BIT_SET_(LPC_USB->EPSKIP, ep_id); // skip other endpoint in case of short-package
|
||||
|
||||
dcd_data.current_td[ep_id].remaining_bytes = 0;
|
||||
|
||||
if ( BIT_TEST_(dcd_data.current_ioc, ep_id) )
|
||||
if ( BIT_TEST_(dcd_data.current_ioc, ep_id) )
|
||||
{
|
||||
endpoint_handle_t edpt_hdl =
|
||||
{
|
||||
endpoint_handle_t edpt_hdl =
|
||||
{
|
||||
.coreid = coreid,
|
||||
.index = ep_id,
|
||||
.class_code = dcd_data.class_code[ep_id]
|
||||
};
|
||||
.coreid = coreid,
|
||||
.index = 0
|
||||
};
|
||||
|
||||
dcd_data.current_ioc = BIT_CLR_(dcd_data.current_ioc, edpt_hdl.index);
|
||||
dcd_data.current_ioc = BIT_CLR_(dcd_data.current_ioc, edpt_hdl.index);
|
||||
|
||||
// TODO no way determine if the transfer is failed or not
|
||||
usbd_xfer_isr(edpt_hdl, TUSB_EVENT_XFER_COMPLETE, dcd_data.current_td[ep_id].xferred_total);
|
||||
}
|
||||
|
||||
//------------- Next TD is available -------------//
|
||||
if ( dcd_data.next_td[ep_id].total_bytes != 0 )
|
||||
{
|
||||
queue_xfer_in_next_td(ep_id);
|
||||
}
|
||||
// FIXME xferred_byte for control xfer is not needed now !!!
|
||||
usbd_xfer_isr(edpt_hdl, TUSB_EVENT_XFER_COMPLETE, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//------------- Non-Control Endpoints -------------//
|
||||
endpoint_non_control_isr(int_status);
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------+
|
||||
// CONTROL PIPE API
|
||||
//--------------------------------------------------------------------+
|
||||
void dcd_pipe_control_stall(uint8_t coreid)
|
||||
{ // TODO cannot able to STALL Control OUT endpoint !!!!!
|
||||
{
|
||||
(void) coreid;
|
||||
|
||||
// TODO cannot able to STALL Control OUT endpoint !!!!! FIXME try some walk-around
|
||||
dcd_data.qhd[0][0].stall = dcd_data.qhd[1][0].stall = 1;
|
||||
}
|
||||
|
||||
tusb_error_t dcd_pipe_control_xfer(uint8_t coreid, tusb_direction_t dir, void * p_buffer, uint16_t length)
|
||||
tusb_error_t dcd_pipe_control_xfer(uint8_t coreid, tusb_direction_t dir, void * p_buffer, uint16_t length, bool int_on_complete)
|
||||
{
|
||||
(void) coreid;
|
||||
|
||||
uint8_t const ep_id = dir; // IN : 1, OUT = 0
|
||||
// determine Endpoint where Data & Status phase occurred (IN or OUT)
|
||||
uint8_t const ep_data = (dir == TUSB_DIR_DEV_TO_HOST) ? 1 : 0;
|
||||
uint8_t const ep_status = 1 - ep_data;
|
||||
|
||||
dcd_data.qhd[ep_id][0].buff_addr_offset = (length ? addr_offset(p_buffer) : 0 );
|
||||
dcd_data.qhd[ep_id][0].total_bytes = length;
|
||||
dcd_data.qhd[ep_id][0].active = 1 ;
|
||||
dcd_data.current_ioc = int_on_complete ? BIT_SET_(dcd_data.current_ioc, ep_status) : BIT_CLR_(dcd_data.current_ioc, ep_status);
|
||||
|
||||
//------------- Data Phase -------------//
|
||||
if (length)
|
||||
{
|
||||
dcd_data.current_td[ep_data].remaining_bytes = length;
|
||||
dcd_data.current_td[ep_data].xferred_total = 0;
|
||||
|
||||
queue_xfer_to_buffer(ep_data, 0, addr_offset(p_buffer), length);
|
||||
}
|
||||
|
||||
//------------- Status Phase -------------//
|
||||
dcd_data.current_td[ep_status].remaining_bytes = 0;
|
||||
dcd_data.current_td[ep_status].xferred_total = 0;
|
||||
|
||||
queue_xfer_to_buffer(ep_status, 0, NULL, 0);
|
||||
|
||||
return TUSB_ERROR_NONE;
|
||||
}
|
||||
|
||||
@@ -49,13 +49,6 @@
|
||||
#include "tusb_descriptors.h" // TODO callback include
|
||||
#include "usbd_dcd.h"
|
||||
|
||||
// Some MCUs cannot transfer more than 64 bytes each queue, thus require special task-alike treatment
|
||||
#if TUSB_CFG_MCU == MCU_LPC11UXX || TUSB_CFG_MCU == MCU_LPC175X_6X
|
||||
#define USBD_CONTROL_ONE_PACKET_EACH_XFER // for each Transfer, cannot queue more than packet size
|
||||
enum {
|
||||
USBD_COTNROL_MAX_LENGTH_EACH_XFER = 64
|
||||
};
|
||||
#endif
|
||||
//--------------------------------------------------------------------+
|
||||
// MACRO CONSTANT TYPEDEF
|
||||
//--------------------------------------------------------------------+
|
||||
@@ -67,33 +60,33 @@ static usbd_class_driver_t const usbd_class_drivers[TUSB_CLASS_MAPPED_INDEX_STAR
|
||||
#if DEVICE_CLASS_HID
|
||||
[TUSB_CLASS_HID] =
|
||||
{
|
||||
.init = hidd_init,
|
||||
.open = hidd_open,
|
||||
.control_request = hidd_control_request,
|
||||
.xfer_cb = hidd_xfer_cb,
|
||||
.close = hidd_close
|
||||
.init = hidd_init,
|
||||
.open = hidd_open,
|
||||
.control_request_subtask = hidd_control_request_subtask,
|
||||
.xfer_cb = hidd_xfer_cb,
|
||||
.close = hidd_close
|
||||
},
|
||||
#endif
|
||||
|
||||
#if TUSB_CFG_DEVICE_MSC
|
||||
[TUSB_CLASS_MSC] =
|
||||
{
|
||||
.init = mscd_init,
|
||||
.open = mscd_open,
|
||||
.control_request = mscd_control_request,
|
||||
.xfer_cb = mscd_xfer_cb,
|
||||
.close = mscd_close
|
||||
.init = mscd_init,
|
||||
.open = mscd_open,
|
||||
.control_request_subtask = mscd_control_request_subtask,
|
||||
.xfer_cb = mscd_xfer_cb,
|
||||
.close = mscd_close
|
||||
},
|
||||
#endif
|
||||
|
||||
#if TUSB_CFG_DEVICE_CDC
|
||||
[TUSB_CLASS_CDC] =
|
||||
{
|
||||
.init = cdcd_init,
|
||||
.open = cdcd_open,
|
||||
.control_request = cdcd_control_request,
|
||||
.xfer_cb = cdcd_xfer_cb,
|
||||
.close = cdcd_close
|
||||
.init = cdcd_init,
|
||||
.open = cdcd_open,
|
||||
.control_request_subtask = cdcd_control_request_subtask,
|
||||
.xfer_cb = cdcd_xfer_cb,
|
||||
.close = cdcd_close
|
||||
},
|
||||
#endif
|
||||
|
||||
@@ -151,7 +144,7 @@ OSAL_QUEUE_DEF(usbd_queue_def, USBD_TASK_QUEUE_DEPTH, usbd_task_event_t);
|
||||
OSAL_SEM_DEF(usbd_control_xfer_semaphore_def);
|
||||
|
||||
static osal_queue_handle_t usbd_queue_hdl;
|
||||
static osal_semaphore_handle_t usbd_control_xfer_sem_hdl;
|
||||
/*static*/ osal_semaphore_handle_t usbd_control_xfer_sem_hdl; // TODO may need to change to static with wrapper function
|
||||
|
||||
tusb_error_t usbd_control_request_subtask(uint8_t coreid, tusb_control_request_t const * const p_request)
|
||||
{
|
||||
@@ -165,29 +158,14 @@ tusb_error_t usbd_control_request_subtask(uint8_t coreid, tusb_control_request_t
|
||||
{
|
||||
if ( TUSB_REQUEST_GET_DESCRIPTOR == p_request->bRequest )
|
||||
{
|
||||
OSAL_VAR uint8_t* p_buffer = NULL;
|
||||
OSAL_VAR uint16_t length = 0;
|
||||
uint8_t* p_buffer = NULL;
|
||||
uint16_t length = 0;
|
||||
|
||||
error = get_descriptor_subtask(coreid, p_request, &p_buffer, &length);
|
||||
|
||||
#ifdef USBD_CONTROL_ONE_PACKET_EACH_XFER
|
||||
while ( length > USBD_COTNROL_MAX_LENGTH_EACH_XFER && error == TUSB_ERROR_NONE )
|
||||
{
|
||||
usbd_devices[coreid].is_waiting_control_xfer = true;
|
||||
|
||||
dcd_pipe_control_xfer(coreid, p_request->bmRequestType_bit.direction, p_buffer, USBD_COTNROL_MAX_LENGTH_EACH_XFER); // zero length
|
||||
osal_semaphore_wait(usbd_control_xfer_sem_hdl, OSAL_TIMEOUT_NORMAL, &error);
|
||||
|
||||
length -= USBD_COTNROL_MAX_LENGTH_EACH_XFER;
|
||||
p_buffer += USBD_COTNROL_MAX_LENGTH_EACH_XFER;
|
||||
|
||||
usbd_devices[coreid].is_waiting_control_xfer = false;
|
||||
}
|
||||
#endif
|
||||
|
||||
if ( TUSB_ERROR_NONE == error )
|
||||
{
|
||||
dcd_pipe_control_xfer(coreid, p_request->bmRequestType_bit.direction, p_buffer, length);
|
||||
dcd_pipe_control_xfer(coreid, p_request->bmRequestType_bit.direction, p_buffer, length, false);
|
||||
}
|
||||
}
|
||||
else if ( TUSB_REQUEST_SET_ADDRESS == p_request->bRequest )
|
||||
@@ -207,12 +185,14 @@ tusb_error_t usbd_control_request_subtask(uint8_t coreid, tusb_control_request_t
|
||||
//------------- Class/Interface Specific Request -------------//
|
||||
else if ( TUSB_REQUEST_RECIPIENT_INTERFACE == p_request->bmRequestType_bit.recipient)
|
||||
{
|
||||
tusb_std_class_code_t class_code = usbd_devices[coreid].interface2class[ u16_low_u8(p_request->wIndex) ];
|
||||
OSAL_VAR tusb_std_class_code_t class_code;
|
||||
|
||||
class_code = usbd_devices[coreid].interface2class[ u16_low_u8(p_request->wIndex) ];
|
||||
|
||||
if ( (TUSB_CLASS_AUDIO <= class_code) && (class_code <= TUSB_CLASS_AUDIO_VIDEO) &&
|
||||
usbd_class_drivers[class_code].control_request )
|
||||
usbd_class_drivers[class_code].control_request_subtask )
|
||||
{
|
||||
error = usbd_class_drivers[class_code].control_request(coreid, p_request);
|
||||
OSAL_SUBTASK_INVOKED_AND_WAIT( usbd_class_drivers[class_code].control_request_subtask(coreid, p_request), error );
|
||||
}else
|
||||
{
|
||||
error = TUSB_ERROR_DCD_CONTROL_REQUEST_NOT_SUPPORT;
|
||||
@@ -234,9 +214,9 @@ tusb_error_t usbd_control_request_subtask(uint8_t coreid, tusb_control_request_t
|
||||
{ // Response with Protocol Stall if request is not supported
|
||||
dcd_pipe_control_stall(coreid);
|
||||
// ASSERT(error == TUSB_ERROR_NONE, VOID_RETURN);
|
||||
}else
|
||||
{ // status phase
|
||||
dcd_pipe_control_xfer(coreid, 1-p_request->bmRequestType_bit.direction, NULL, 0); // zero length
|
||||
}else if (p_request->wLength == 0)
|
||||
{
|
||||
dcd_pipe_control_xfer(coreid, p_request->bmRequestType_bit.direction, NULL, 0, false); // zero length for non-data
|
||||
}
|
||||
|
||||
OSAL_SUBTASK_END
|
||||
@@ -418,10 +398,7 @@ void usbd_xfer_isr(endpoint_handle_t edpt_hdl, tusb_event_t event, uint32_t xfer
|
||||
{
|
||||
if (edpt_hdl.class_code == 0 ) // Control Transfer
|
||||
{
|
||||
if (usbd_devices[edpt_hdl.coreid].is_waiting_control_xfer)
|
||||
{
|
||||
osal_semaphore_post( usbd_control_xfer_sem_hdl );
|
||||
}
|
||||
osal_semaphore_post( usbd_control_xfer_sem_hdl );
|
||||
}else
|
||||
{
|
||||
usbd_task_event_t task_event =
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
typedef struct {
|
||||
void (* const init) (void);
|
||||
tusb_error_t (* const open)(uint8_t, tusb_descriptor_interface_t const *, uint16_t*);
|
||||
tusb_error_t (* const control_request) (uint8_t, tusb_control_request_t const *);
|
||||
tusb_error_t (* const control_request_subtask) (uint8_t, tusb_control_request_t const *);
|
||||
tusb_error_t (* const xfer_cb) (endpoint_handle_t, tusb_event_t, uint32_t);
|
||||
void (* const close) (uint8_t);
|
||||
} usbd_class_driver_t;
|
||||
@@ -90,8 +90,11 @@ bool tusbd_is_configured(uint8_t coreid) ATTR_WARN_UNUSED_RESULT;
|
||||
//--------------------------------------------------------------------+
|
||||
#ifdef _TINY_USB_SOURCE_FILE_
|
||||
|
||||
extern osal_semaphore_handle_t usbd_control_xfer_sem_hdl;
|
||||
|
||||
tusb_error_t usbd_init(void);
|
||||
OSAL_TASK_FUNCTION (usbd_task) (void* p_task_para);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
@@ -68,7 +68,6 @@ typedef enum {
|
||||
|
||||
typedef struct {
|
||||
volatile uint8_t state;
|
||||
uint8_t is_waiting_control_xfer; // set if task is waiting for control xfer to complete to proceed
|
||||
uint8_t interface2class[USBD_INTERFACE_NUM_MAX]; // determine interface number belongs to which class
|
||||
}usbd_device_info_t;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user