USB HCD uses IRPs directly for transfer

This commit updates the HCD so that transfer requests are no longer used.
The USB IRP object is updated so that the it can be directly passed to the HCD
to start a transfer. HCD API, tests, and documentation updated accordingly.
pull/6974/head
Darian Leung 2021-03-06 05:46:25 +08:00
rodzic 69309c88a0
commit 738af3fd38
5 zmienionych plików z 541 dodań i 664 usunięć

Wyświetl plik

@ -58,12 +58,22 @@
#define NUM_PORTS 1 //The controller only has one port.
typedef enum {
XFER_REQ_STATE_IDLE, //The transfer request is not enqueued
XFER_REQ_STATE_PENDING, //The transfer request is enqueued and pending execution
XFER_REQ_STATE_INFLIGHT, //The transfer request is currently being executed
XFER_REQ_STATE_DONE, //The transfer request has completed executed or is retired, and is waiting to be dequeued
} xfer_req_state_t;
// ------------------------ Flags --------------------------
/**
* @brief Bit masks for the HCD to use in the IRPs reserved_flags field
*
* The IRP object has a reserved_flags member for host stack's internal use. The following flags will be set in
* reserved_flags in order to keep track of state of an IRP within the HCD.
*/
#define IRP_STATE_IDLE 0x0 //The IRP is not enqueued in an HCD pipe
#define IRP_STATE_PENDING 0x1 //The IRP is enqueued and pending execution
#define IRP_STATE_INFLIGHT 0x2 //The IRP is currently in flight
#define IRP_STATE_DONE 0x3 //The IRP has completed execution or is retired, and is waiting to be dequeued
#define IRP_STATE_MASK 0x3 //Bit mask of all the IRP state flags
#define IRP_STATE_SET(reserved_flags, state) (reserved_flags = (reserved_flags & ~IRP_STATE_MASK) | state)
#define IRP_STATE_GET(reserved_flags) (reserved_flags & IRP_STATE_MASK)
// -------------------- Convenience ------------------------
@ -86,31 +96,19 @@ typedef enum {
// ------------------------------------------------------ Types --------------------------------------------------------
typedef struct xfer_req_obj xfer_req_t;
typedef struct pipe_obj pipe_t;
typedef struct port_obj port_t;
/**
* @brief Object representing an HCD transfer request
*/
struct xfer_req_obj {
TAILQ_ENTRY(xfer_req_obj) tailq_entry; //TailQ entry for pending or done tailq in pipe object
pipe_t *pipe; //Target pipe of transfer request
usb_irp_t *irp; //Target IRP
void *context; //Context variable of transfer request
xfer_req_state_t state; //Current state of the transfer request
};
/**
* @brief Object representing a pipe in the HCD layer
*/
struct pipe_obj {
//Transfer requests related
TAILQ_HEAD(tailhead_xfer_req_pend, xfer_req_obj) pend_xfer_req_tailq;
TAILQ_HEAD(tailhead_xfer_req_done, xfer_req_obj) done_xfer_req_tailq;
int num_xfer_req_pending;
int num_xfer_req_done;
xfer_req_t *inflight_xfer_req; //Pointer to the current transfer request being executed by the pipe. NULL if none.
//IRP queueing related
TAILQ_HEAD(tailhead_irp_pending, usb_irp_obj) pending_irp_tailq;
TAILQ_HEAD(tailhead_irp_done, usb_irp_obj) done_irp_tailq;
int num_irp_pending;
int num_irp_done;
usb_irp_t *inflight_irp; //Pointer to the in-flight IRP (i.e., the IRP currently being executed). NULL if none.
//Port related
port_t *port; //The port to which this pipe is routed through
TAILQ_ENTRY(pipe_obj) tailq_entry; //TailQ entry for port's list of pipes
@ -149,7 +147,7 @@ struct port_obj {
usbh_hal_context_t *hal;
//Pipes routed through this port
TAILQ_HEAD(tailhead_pipes_idle, pipe_obj) pipes_idle_tailq;
TAILQ_HEAD(tailhead_pipes_queued, pipe_obj) pipes_queued_tailq;
TAILQ_HEAD(tailhead_pipes_queued, pipe_obj) pipes_active_tailq;
int num_pipes_idle;
int num_pipes_queued;
//Port status, state, and events
@ -246,7 +244,7 @@ static bool _internal_pipe_event_notify(pipe_t *pipe, bool from_isr);
* Entry:
* - The port or its connected device is no longer valid. This guarantees that none of the pipes will be transferring
* Exit:
* - Each pipe will have any pending transfer request moved to their respective done tailq
* - Each pipe will have any pending IRPs moved to their respective done tailq
* - Each pipe will be put into the invalid state
* - Generate a HCD_PIPE_EVENT_INVALID event on each pipe and run their respective callbacks
*
@ -262,8 +260,8 @@ static void _port_invalidate_all_pipes(port_t *port);
* Entry:
* - The port is in the HCD_PORT_STATE_ENABLED state (i.e., there is a connected device which has been reset)
* Exit:
* - All pipes of the port have either paused, or are waiting to complete their inflight transfer request to pause
* - If waiting for one or more pipes, _internal_port_event_wait() must be called after this function returns
* - All pipes routed through the port have either paused, or are waiting to complete their in-flight IRPs before pausing
* - If waiting for one or more pipes to pause, _internal_port_event_wait() must be called after this function returns
*
* @param port Port object
* @return true All pipes have been paused
@ -280,7 +278,7 @@ static bool _port_pause_all_pipes(port_t *port);
* - The port is in the HCD_PORT_STATE_ENABLED state
* - All pipes are paused
* Exit:
* - All pipes un-paused. If those pipes have pending transfer requests, they will be started.
* - All pipes un-paused. If those pipes have pending IRPs, they will be started.
*
* @param port Port object
*/
@ -369,66 +367,66 @@ static bool _port_debounce(port_t *port);
// ------------------------ Pipe ---------------------------
/**
* @brief Get the next pending transfer request from the pending tailq
* @brief Get the next pending IRP from the pending tailq
*
* Entry:
* - The inflight transfer request must be set to NULL (indicating the pipe currently has no inflight transfer request)
* - The in-flight IRP must be set to NULL (indicating the pipe currently has no in-flight IRP)
* Exit:
* - If (num_xfer_req_pending > 0), the first transfer request is removed from pend_xfer_req_tailq and and
* inflight_xfer_req is set to that transfer request.
* - If there are no more queued transfer requests, inflight_xfer_req is left as NULL
* - If (num_irp_pending > 0), the first IRP is removed from pending_irp_tailq and and
* inflight_irp is set to that IRP.
* - If there are no more queued IRPs, inflight_irp is left as NULL
*
* @param pipe Pipe object
* @return true A pending transfer request is now set as the inflight transfer request
* @return false No more pending transfer requests
* @return true A pending IRP is now set as the in-flight IRP
* @return false No more pending IRPs
*/
static bool _pipe_get_next_xfer_req(pipe_t *pipe);
static bool _pipe_get_next_irp(pipe_t *pipe);
/**
* @brief Return the inflight transfer request to the done tailq
* @brief Return the pipe's current IRP (inflight_irp) to the done tailq
*
* Entry:
* - The inflight transfer request must already have been parsed (i.e., results have been checked)
* - The inflight_irp must already have been parsed (i.e., results have been checked)
* Exit:
* - The inflight transfer request is returned to the done tailq and inflight_xfer_req is set to NULL
* - The IRP is returned to the done tailq and inflight_irp is set to NULL
*
* @param pipe Pipe object
*/
static void _pipe_ret_cur_xfer_req(pipe_t *pipe);
static void _pipe_return_cur_irp(pipe_t *pipe);
/**
* @brief Wait until a pipe's inflight transfer request is done
* @brief Wait until a pipe's in-flight IRP is done
*
* If the pipe has an inflight transfer request, this function will block until it is done (via a internal pipe event).
* If the pipe has no inflight transfer request, this function do nothing and return immediately.
* If the pipe has an in-flight IRP, this function will block until it is done (via a internal pipe event).
* If the pipe has no in-flight IRP, this function do nothing and return immediately.
* If the pipe's state changes unexpectedely, this function will return false.
*
* @note This function is blocking (will exit and re-enter the critical section to do so)
*
* @param pipe Pipe object
* @return true Pipes inflight transfer request is done
* @return true Pipes in-flight IRP is done
* @return false Pipes state unexpectedly changed
*/
static bool _pipe_wait_done(pipe_t *pipe);
/**
* @brief Retires all transfer requests (those that were previously inflight or pending)
* @brief Retires all IRPs (those that were previously in-flight or pending)
*
* Retiring all transfer requests will result in any pending transfer request being moved to the done tailq. This
* function will update the IPR status of each transfer request.
* Retiring all IRPs will result in any pending IRP being moved to the done tailq. This
* function will update the IPR status of each IRP.
* - If the retiring is self-initiated (i.e., due to a pipe command), the IRP status will be set to USB_TRANSFER_STATUS_CANCELLED.
* - If the retiring is NOT self-initiated (i.e., the pipe is no longer valid), the IRP status will be set to USB_TRANSFER_STATUS_NO_DEVICE
*
* Entry:
* - There can be no inflight transfer request (must already be parsed and returned to done queue)
* - There can be no in-flight IRP (must already be parsed and returned to done queue)
* Exit:
* - If there was an inflight transfer request, it is parsed and returned to the done queue
* - If there are any pending transfer requests:
* - If there was an in-flight IRP, it is parsed and returned to the done queue
* - If there are any pending IRPs:
* - They are moved to the done tailq
*
* @param pipe Pipe object
* @param cancelled Are we actively Pipe retire is initialized by the user due to a command, thus transfer request are actively
* cancelled
* @param cancelled Are we actively Pipe retire is initialized by the user due to a command, thus IRP are
* actively cancelled.
*/
static void _pipe_retire(pipe_t *pipe, bool self_initiated);
@ -440,45 +438,45 @@ static void _pipe_retire(pipe_t *pipe, bool self_initiated);
*/
static inline hcd_pipe_event_t pipe_decode_error_event(usbh_hal_chan_error_t chan_error);
// ------------------ Transfer Requests --------------------
// ----------------- Transfer Descriptors ------------------
/**
* @brief Fill a transfer request into the pipe's transfer descriptor list
* @brief Fill the inflight_irp into the pipe's transfer descriptor list
*
* Entry:
* - The pipe's inflight_xfer_req must be set to the next transfer request
* - The pipe's inflight_irp must be set to the next IRP
* Exit:
* - inflight_xfer_req filled into the pipe's transfer descriptor list
* - inflight_irp filled into the pipe's transfer descriptor list
* - Starting PIDs and directions set
* - Channel slot acquired. Will need to call usbh_hal_chan_activate() to actually start execution
*
* @param pipe Pipe where inflight_xfer_req is already set to the next transfer request
* @param pipe Pipe where inflight_irp is already set to the next IRP
*/
static void _xfer_req_fill(pipe_t *pipe);
static void _xfer_desc_list_fill(pipe_t *pipe);
/**
* @brief Continue a transfer request
* @brief Continue the execution of the transfer descriptor list
*
* @note This is currently only used for control transfers
*
* @param pipe Pipe where inflight_xfer_req contains the transfer request to continue
* @param pipe Pipe object
*/
static void _xfer_req_continue(pipe_t *pipe);
static void _xfer_desc_list_continue(pipe_t *pipe);
/**
* @brief Parse the results of a pipe's transfer descriptor list into a transfer request
* @brief Parse the pipe's transfer descriptor list to fill the result of the transfers into the pipe's IRP
*
* Entry:
* - The pipe must have stop transferring either due a channel event or a port disconnection.
* - The pipe's state and last_event must be updated before parsing the transfer request as
* they will used to determine the resuult of the transfer request
* - The pipe's state and last_event must be updated before parsing the IRP as they will used to determine the result
* of the IRP
* Exit:
* - The pipe's inflight_xfer_req is filled with result of the transfer request (i.e., the underlying IRP has its status set)
* - The pipe's inflight_irp is filled with result of the IRP (i.e., the underlying IRP has its status set)
*
* @param pipe Pipe where inflight_xfer_req contains the completed transfer request
* @param pipe Pipe object
* @param error_occurred Are we parsing after the pipe had an error (or has become invalid)
*/
static void _xfer_req_parse(pipe_t *pipe, bool error_occurred);
static void _xfer_desc_list_parse(pipe_t *pipe, bool error_occurred);
// ----------------------------------------------- Interrupt Handling --------------------------------------------------
@ -562,7 +560,7 @@ static hcd_port_event_t _intr_hdlr_hprt(port_t *port, usbh_hal_port_event_t hal_
}
case USBH_HAL_PORT_EVENT_DISCONN: {
if (port->flags.conn_devc_ena) {
//The port was previously enabled, so this is a sudden disconenction
//The port was previously enabled, so this is a sudden disconnection
port->state = HCD_PORT_STATE_RECOVERY;
port_event = HCD_PORT_EVENT_SUDDEN_DISCONN;
} else {
@ -638,10 +636,10 @@ static hcd_pipe_event_t _intr_hdlr_chan(pipe_t *pipe, usbh_hal_chan_t *chan_obj,
switch (chan_event) {
case USBH_HAL_CHAN_EVENT_SLOT_DONE: {
//An entire transfer descriptor list has completed execution
pipe->last_event = HCD_PIPE_EVENT_XFER_REQ_DONE;
event = HCD_PIPE_EVENT_XFER_REQ_DONE;
_xfer_req_parse(pipe, false); //Parse results of transfer request
_pipe_ret_cur_xfer_req(pipe); //Return the transfer request to the pipe's done tailq
pipe->last_event = HCD_PIPE_EVENT_IRP_DONE;
event = HCD_PIPE_EVENT_IRP_DONE;
_xfer_desc_list_parse(pipe, false); //Parse results of IRP
_pipe_return_cur_irp(pipe); //Return the IRP to the pipe's done tailq
if (pipe->flags.waiting_xfer_done) {
//A port/pipe command is waiting for this pipe to complete its transfer. So don't load the next transfer
pipe->flags.waiting_xfer_done = 0;
@ -658,9 +656,9 @@ static hcd_pipe_event_t _intr_hdlr_chan(pipe_t *pipe, usbh_hal_chan_t *chan_obj,
//Pipe command is waiting for transfer to complete
*yield |= _internal_pipe_event_notify(pipe, true);
}
} else if (_pipe_get_next_xfer_req(pipe)) {
//Fill the descriptor list with the transfer request and start the transfer
_xfer_req_fill(pipe);
} else if (_pipe_get_next_irp(pipe)) {
//Fill the descriptor list with the IRP and start the transfer
_xfer_desc_list_fill(pipe);
usbh_hal_chan_activate(chan_obj, 0); //Start with the first descriptor
}
break;
@ -668,7 +666,7 @@ static hcd_pipe_event_t _intr_hdlr_chan(pipe_t *pipe, usbh_hal_chan_t *chan_obj,
case USBH_HAL_CHAN_EVENT_SLOT_HALT: {
//A transfer descriptor list has partially completed. This currently only happens on control pipes
assert(pipe->ep_char.type == USB_PRIV_XFER_TYPE_CTRL);
_xfer_req_continue(pipe); //Continue the transfer request.
_xfer_desc_list_continue(pipe); //Continue the transfer request.
//We are continuing a transfer, so no event has occurred
break;
}
@ -679,9 +677,9 @@ static hcd_pipe_event_t _intr_hdlr_chan(pipe_t *pipe, usbh_hal_chan_t *chan_obj,
pipe->last_event = pipe_decode_error_event(chan_error);
event = pipe->last_event;
pipe->state = HCD_PIPE_STATE_HALTED;
//Parse the failed transfer request and update it's IRP status
_xfer_req_parse(pipe, true);
_pipe_ret_cur_xfer_req(pipe); //Return the transfer request to the pipe's done tailq
//Parse the failed IRP and update it's IRP status
_xfer_desc_list_parse(pipe, true);
_pipe_return_cur_irp(pipe); //Return the IRP to the pipe's done tailq
break;
}
case USBH_HAL_CHAN_EVENT_HALT_REQ: //We currently don't halt request so this event should never occur
@ -705,7 +703,7 @@ static hcd_pipe_event_t _intr_hdlr_chan(pipe_t *pipe, usbh_hal_chan_t *chan_obj,
*/
static void intr_hdlr_main(void *arg)
{
port_t *port = (port_t *)arg;
port_t *port = (port_t *) arg;
bool yield = false;
HCD_ENTER_CRITICAL_ISR();
@ -792,7 +790,7 @@ esp_err_t hcd_install(const hcd_config_t *config)
//Allocate resources for each port (there's only one)
p_hcd_obj_dmy->port_obj = port_obj_alloc();
esp_err_t intr_alloc_ret = esp_intr_alloc(ETS_USB_INTR_SOURCE,
config->intr_flags | ESP_INTR_FLAG_INTRDISABLED, //The interruupt must be disabled until the port is initialized
config->intr_flags | ESP_INTR_FLAG_INTRDISABLED, //The interrupt must be disabled until the port is initialized
intr_hdlr_main,
(void *)p_hcd_obj_dmy->port_obj,
&p_hcd_obj_dmy->isr_hdl);
@ -811,7 +809,7 @@ esp_err_t hcd_install(const hcd_config_t *config)
goto err;
}
s_hcd_obj = p_hcd_obj_dmy;
//Set HW prereqs for each port (there's only one)
//Set HW prerequisites for each port (there's only one)
periph_module_enable(PERIPH_USB_MODULE);
periph_module_reset(PERIPH_USB_MODULE);
/*
@ -839,7 +837,7 @@ err:
esp_err_t hcd_uninstall(void)
{
HCD_ENTER_CRITICAL();
//Check that all ports have been disabled (theres only one)
//Check that all ports have been disabled (there's only one port)
if (s_hcd_obj == NULL || s_hcd_obj->port_obj->initialized) {
HCD_EXIT_CRITICAL();
return ESP_ERR_INVALID_STATE;
@ -865,17 +863,17 @@ static void _port_invalidate_all_pipes(port_t *port)
//This function should only be called when the port is invalid
assert(!port->flags.conn_devc_ena);
pipe_t *pipe;
//Process all pipes that have queued transfer requests
TAILQ_FOREACH(pipe, &port->pipes_queued_tailq, tailq_entry) {
//Process all pipes that have queued IRPs
TAILQ_FOREACH(pipe, &port->pipes_active_tailq, tailq_entry) {
//Mark the pipe as invalid and set an invalid event
pipe->state = HCD_PIPE_STATE_INVALID;
pipe->last_event = HCD_PIPE_EVENT_INVALID;
//If the pipe had an inflight transfer, parse and return it
if (pipe->inflight_xfer_req != NULL) {
_xfer_req_parse(pipe, true);
_pipe_ret_cur_xfer_req(pipe);
//If the pipe had an in-flight transfer, parse and return it
if (pipe->inflight_irp != NULL) {
_xfer_desc_list_parse(pipe, true);
_pipe_return_cur_irp(pipe);
}
//Retire any remaining transfer requests
//Retire any remaining IRPs
_pipe_retire(pipe, false);
if (pipe->task_waiting_pipe_notif != NULL) {
//Unblock the thread/task waiting for a notification from the pipe as the pipe is no longer valid.
@ -905,14 +903,14 @@ static bool _port_pause_all_pipes(port_t *port)
assert(port->state == HCD_PORT_STATE_ENABLED);
pipe_t *pipe;
int num_pipes_waiting_done = 0;
//Process all pipes that have queued transfer requests
TAILQ_FOREACH(pipe, &port->pipes_queued_tailq, tailq_entry) {
if (pipe->inflight_xfer_req != NULL) {
//Pipe has an inflight transfer. Indicate to the pipe we are waiting the transfer to complete
//Process all pipes that have queued IRPs
TAILQ_FOREACH(pipe, &port->pipes_active_tailq, tailq_entry) {
if (pipe->inflight_irp != NULL) {
//Pipe has an in-flight transfer. Indicate to the pipe we are waiting the transfer to complete
pipe->flags.waiting_xfer_done = 1;
num_pipes_waiting_done++;
} else {
//No inflight transfer so no need to wait
//No in-flight transfer so no need to wait
pipe->flags.paused = 1;
}
}
@ -937,12 +935,12 @@ static void _port_unpause_all_pipes(port_t *port)
TAILQ_FOREACH(pipe, &port->pipes_idle_tailq, tailq_entry) {
pipe->flags.paused = 0;
}
//Process all pipes that have queued transfer requests
TAILQ_FOREACH(pipe, &port->pipes_queued_tailq, tailq_entry) {
//Process all pipes that have queued IRPs
TAILQ_FOREACH(pipe, &port->pipes_active_tailq, tailq_entry) {
pipe->flags.paused = 0;
//If the pipe has more pending transfer request, start them.
if (_pipe_get_next_xfer_req(pipe)) {
_xfer_req_fill(pipe);
//If the pipe has more pending IRP, start them.
if (_pipe_get_next_irp(pipe)) {
_xfer_desc_list_fill(pipe);
usbh_hal_chan_activate(pipe->chan_obj, 0);
}
}
@ -983,7 +981,7 @@ static bool _port_bus_suspend(port_t *port)
//Need to wait for some pipes to pause. Wait for notification from ISR
_internal_port_event_wait(port);
if (port->state != HCD_PORT_STATE_ENABLED || !port->flags.conn_devc_ena) {
//Port state unexpectedley changed
//Port state unexpectedly changed
goto bailout;
}
}
@ -1082,10 +1080,10 @@ esp_err_t hcd_port_init(int port_number, hcd_port_config_t *port_config, hcd_por
HCD_ENTER_CRITICAL();
HCD_CHECK_FROM_CRIT(s_hcd_obj != NULL && !s_hcd_obj->port_obj->initialized, ESP_ERR_INVALID_STATE);
//Port object memory and resources (such as mutex) already be allocated. Just need to initialize necessary fields only
//Port object memory and resources (such as the mutex) already be allocated. Just need to initialize necessary fields only
port_t *port_obj = s_hcd_obj->port_obj;
TAILQ_INIT(&port_obj->pipes_idle_tailq);
TAILQ_INIT(&port_obj->pipes_queued_tailq);
TAILQ_INIT(&port_obj->pipes_active_tailq);
port_obj->state = HCD_PORT_STATE_NOT_POWERED;
port_obj->last_event = HCD_PORT_EVENT_NONE;
port_obj->callback = port_config->callback;
@ -1292,17 +1290,18 @@ void *hcd_port_get_ctx(hcd_port_handle_t port_hdl)
// ----------------------- Private -------------------------
static bool _pipe_get_next_xfer_req(pipe_t *pipe)
static bool _pipe_get_next_irp(pipe_t *pipe)
{
assert(pipe->inflight_xfer_req == NULL);
assert(pipe->inflight_irp == NULL);
bool ret;
//This function assigns the next pending transfer request to the inflight_xfer_req
if (pipe->num_xfer_req_pending > 0) {
//Set inflight_xfer_req to the next pending transfer request
pipe->inflight_xfer_req = TAILQ_FIRST(&pipe->pend_xfer_req_tailq);
TAILQ_REMOVE(&pipe->pend_xfer_req_tailq, pipe->inflight_xfer_req, tailq_entry);
pipe->inflight_xfer_req->state = XFER_REQ_STATE_INFLIGHT;
pipe->num_xfer_req_pending--;
//This function assigns the next pending IRP to the inflight_irp
if (pipe->num_irp_pending > 0) {
//Set inflight_irp to the next pending IRP
pipe->inflight_irp = TAILQ_FIRST(&pipe->pending_irp_tailq);
TAILQ_REMOVE(&pipe->pending_irp_tailq, pipe->inflight_irp, tailq_entry);
pipe->num_irp_pending--;
//Update the IRP's current state
IRP_STATE_SET(pipe->inflight_irp->reserved_flags, IRP_STATE_INFLIGHT);
ret = true;
} else {
ret = false;
@ -1310,20 +1309,21 @@ static bool _pipe_get_next_xfer_req(pipe_t *pipe)
return ret;
}
static void _pipe_ret_cur_xfer_req(pipe_t *pipe)
static void _pipe_return_cur_irp(pipe_t *pipe)
{
assert(pipe->inflight_xfer_req != NULL);
//Add the transfer request to the pipe's done tailq
TAILQ_INSERT_TAIL(&pipe->done_xfer_req_tailq, pipe->inflight_xfer_req, tailq_entry);
pipe->inflight_xfer_req->state = XFER_REQ_STATE_DONE;
pipe->inflight_xfer_req = NULL;
pipe->num_xfer_req_done++;
assert(pipe->inflight_irp != NULL);
//Add the IRP to the pipe's done tailq
TAILQ_INSERT_TAIL(&pipe->done_irp_tailq, pipe->inflight_irp, tailq_entry);
//Update the IRP's current state
IRP_STATE_SET(pipe->inflight_irp->reserved_flags, IRP_STATE_DONE);
pipe->inflight_irp = NULL;
pipe->num_irp_done++;
}
static bool _pipe_wait_done(pipe_t *pipe)
{
//Check if there is a currently inflight transfer request
if (pipe->inflight_xfer_req != NULL) {
//Check if there is a currently in-flight IRP
if (pipe->inflight_irp != NULL) {
//Wait for pipe to complete its transfer
pipe->flags.waiting_xfer_done = 1;
_internal_pipe_event_wait(pipe);
@ -1341,20 +1341,21 @@ static bool _pipe_wait_done(pipe_t *pipe)
static void _pipe_retire(pipe_t *pipe, bool self_initiated)
{
//Cannot have any inflight transfer request
assert(pipe->inflight_xfer_req == NULL);
if (pipe->num_xfer_req_pending > 0) {
//Process all remaining pending transfer requests
xfer_req_t *xfer_req;
TAILQ_FOREACH(xfer_req, &pipe->pend_xfer_req_tailq, tailq_entry) {
xfer_req->state = XFER_REQ_STATE_DONE;
//If we are initiating the retire, mark the transfer request as cancelled
xfer_req->irp->status = (self_initiated) ? USB_TRANSFER_STATUS_CANCELLED : USB_TRANSFER_STATUS_NO_DEVICE;
//Cannot have any in-flight IRP
assert(pipe->inflight_irp == NULL);
if (pipe->num_irp_pending > 0) {
//Process all remaining pending IRPs
usb_irp_t *irp;
TAILQ_FOREACH(irp, &pipe->pending_irp_tailq, tailq_entry) {
//Update the IRP's current state
IRP_STATE_SET(irp->reserved_flags, IRP_STATE_DONE);
//If we are initiating the retire, mark the IRP as cancelled
irp->status = (self_initiated) ? USB_TRANSFER_STATUS_CANCELLED : USB_TRANSFER_STATUS_NO_DEVICE;
}
//Concatenated pending tailq to the done tailq
TAILQ_CONCAT(&pipe->done_xfer_req_tailq, &pipe->pend_xfer_req_tailq, tailq_entry);
pipe->num_xfer_req_done += pipe->num_xfer_req_pending;
pipe->num_xfer_req_pending = 0;
TAILQ_CONCAT(&pipe->done_irp_tailq, &pipe->pending_irp_tailq, tailq_entry);
pipe->num_irp_done += pipe->num_irp_pending;
pipe->num_irp_pending = 0;
}
}
@ -1366,7 +1367,7 @@ static inline hcd_pipe_event_t pipe_decode_error_event(usbh_hal_chan_error_t cha
event = HCD_PIPE_EVENT_ERROR_XFER;
break;
case USBH_HAL_CHAN_ERROR_BNA:
event = HCD_PIPE_EVENT_ERROR_XFER_NOT_AVAIL;
event = HCD_PIPE_EVENT_ERROR_IRP_NOT_AVAIL;
break;
case USBH_HAL_CHAN_ERROR_PKT_BBL:
event = HCD_PIPE_EVENT_ERROR_OVERFLOW;
@ -1379,13 +1380,13 @@ static inline hcd_pipe_event_t pipe_decode_error_event(usbh_hal_chan_error_t cha
}
// ----------------------- Public --------------------------
#include "esp_rom_sys.h"
esp_err_t hcd_pipe_alloc(hcd_port_handle_t port_hdl, const hcd_pipe_config_t *pipe_config, hcd_pipe_handle_t *pipe_hdl)
{
HCD_CHECK(port_hdl != NULL && pipe_config != NULL && pipe_hdl != NULL, ESP_ERR_INVALID_ARG);
port_t *port = (port_t *)port_hdl;
HCD_ENTER_CRITICAL();
//Can only allocate a pipe if the targetted port is initialized and conencted to an enabled device
//Can only allocate a pipe if the targeted port is initialized and connected to an enabled device
HCD_CHECK_FROM_CRIT(port->initialized && port->flags.conn_devc_ena, ESP_ERR_INVALID_STATE);
usb_speed_t port_speed = port->speed;
HCD_EXIT_CRITICAL();
@ -1432,8 +1433,8 @@ esp_err_t hcd_pipe_alloc(hcd_port_handle_t port_hdl, const hcd_pipe_config_t *pi
}
//Initialize pipe object
TAILQ_INIT(&pipe->pend_xfer_req_tailq);
TAILQ_INIT(&pipe->done_xfer_req_tailq);
TAILQ_INIT(&pipe->pending_irp_tailq);
TAILQ_INIT(&pipe->done_irp_tailq);
pipe->port = port;
pipe->xfer_desc_list = xfer_desc_list;
pipe->flags.xfer_desc_list_len = num_xfer_desc;
@ -1502,12 +1503,12 @@ esp_err_t hcd_pipe_free(hcd_pipe_handle_t pipe_hdl)
{
pipe_t *pipe = (pipe_t *)pipe_hdl;
HCD_ENTER_CRITICAL();
//Check that all transfer requests have been removed and pipe has no pending events
HCD_CHECK_FROM_CRIT(pipe->inflight_xfer_req == NULL
&& pipe->num_xfer_req_pending == 0
&& pipe->num_xfer_req_done == 0,
//Check that all IRPs have been removed and pipe has no pending events
HCD_CHECK_FROM_CRIT(pipe->inflight_irp == NULL
&& pipe->num_irp_pending == 0
&& pipe->num_irp_done == 0,
ESP_ERR_INVALID_STATE);
//Remove pipe from the list of idle pipes (it must be in the idle list because it should have no queued transfer requests)
//Remove pipe from the list of idle pipes (it must be in the idle list because it should have no queued IRPs)
TAILQ_REMOVE(&pipe->port->pipes_idle_tailq, pipe, tailq_entry);
pipe->port->num_pipes_idle--;
usbh_hal_chan_free(pipe->port->hal, pipe->chan_obj);
@ -1527,10 +1528,10 @@ esp_err_t hcd_pipe_update(hcd_pipe_handle_t pipe_hdl, uint8_t dev_addr, int mps)
//Check if pipe is in the correct state to be updated
HCD_CHECK_FROM_CRIT(pipe->state != HCD_PIPE_STATE_INVALID
&& !pipe->flags.pipe_cmd_processing
&& pipe->num_xfer_req_pending == 0
&& pipe->num_xfer_req_done == 0,
&& pipe->num_irp_pending == 0
&& pipe->num_irp_done == 0,
ESP_ERR_INVALID_STATE);
//Check that all transfer requests have been removed and pipe has no pending events
//Check that all IRPs have been removed and pipe has no pending events
pipe->ep_char.dev_addr = dev_addr;
pipe->ep_char.mps = mps;
usbh_hal_chan_set_ep_char(pipe->chan_obj, &pipe->ep_char);
@ -1540,7 +1541,7 @@ esp_err_t hcd_pipe_update(hcd_pipe_handle_t pipe_hdl, uint8_t dev_addr, int mps)
void *hcd_pipe_get_ctx(hcd_pipe_handle_t pipe_hdl)
{
pipe_t *pipe = (pipe_t *) pipe_hdl;
pipe_t *pipe = (pipe_t *)pipe_hdl;
void *ret;
HCD_ENTER_CRITICAL();
ret = pipe->context;
@ -1551,7 +1552,7 @@ void *hcd_pipe_get_ctx(hcd_pipe_handle_t pipe_hdl)
hcd_pipe_state_t hcd_pipe_get_state(hcd_pipe_handle_t pipe_hdl)
{
hcd_pipe_state_t ret;
pipe_t *pipe = (pipe_t *) pipe_hdl;
pipe_t *pipe = (pipe_t *)pipe_hdl;
HCD_ENTER_CRITICAL();
//If there is no enabled device, all existing pipes are invalid.
if (pipe->port->state != HCD_PORT_STATE_ENABLED
@ -1567,7 +1568,7 @@ hcd_pipe_state_t hcd_pipe_get_state(hcd_pipe_handle_t pipe_hdl)
esp_err_t hcd_pipe_command(hcd_pipe_handle_t pipe_hdl, hcd_pipe_cmd_t command)
{
pipe_t *pipe = (pipe_t *) pipe_hdl;
pipe_t *pipe = (pipe_t *)pipe_hdl;
bool ret = ESP_OK;
HCD_ENTER_CRITICAL();
@ -1578,7 +1579,7 @@ esp_err_t hcd_pipe_command(hcd_pipe_handle_t pipe_hdl, hcd_pipe_cmd_t command)
pipe->flags.pipe_cmd_processing = 1;
switch (command) {
case HCD_PIPE_CMD_ABORT: {
//Retire all scheduled transfer requests. Pipe's state remains unchanged
//Retire all scheduled IRPs. Pipe's state remains unchanged
if (!_pipe_wait_done(pipe)) { //Stop any on going transfers
ret = ESP_ERR_INVALID_RESPONSE;
break;
@ -1587,7 +1588,7 @@ esp_err_t hcd_pipe_command(hcd_pipe_handle_t pipe_hdl, hcd_pipe_cmd_t command)
break;
}
case HCD_PIPE_CMD_RESET: {
//Retire all scheduled transfer requests. Pipe's state moves to active
//Retire all scheduled IRPs. Pipe's state moves to active
if (!_pipe_wait_done(pipe)) { //Stop any on going transfers
ret = ESP_ERR_INVALID_RESPONSE;
break;
@ -1601,9 +1602,9 @@ esp_err_t hcd_pipe_command(hcd_pipe_handle_t pipe_hdl, hcd_pipe_cmd_t command)
if (pipe->state == HCD_PIPE_STATE_HALTED) {
pipe->state = HCD_PIPE_STATE_ACTIVE;
//Start the next pending transfer if it exists
if (_pipe_get_next_xfer_req(pipe)) {
//Fill the descriptor list with the transfer request and start the transfer
_xfer_req_fill(pipe);
if (_pipe_get_next_irp(pipe)) {
//Fill the descriptor list with the IRP and start the transfer
_xfer_desc_list_fill(pipe);
usbh_hal_chan_activate(pipe->chan_obj, 0); //Start with the first descriptor
}
}
@ -1627,7 +1628,7 @@ esp_err_t hcd_pipe_command(hcd_pipe_handle_t pipe_hdl, hcd_pipe_cmd_t command)
hcd_pipe_event_t hcd_pipe_get_event(hcd_pipe_handle_t pipe_hdl)
{
pipe_t *pipe = (pipe_t *) pipe_hdl;
pipe_t *pipe = (pipe_t *)pipe_hdl;
hcd_pipe_event_t ret;
HCD_ENTER_CRITICAL();
ret = pipe->last_event;
@ -1636,19 +1637,19 @@ hcd_pipe_event_t hcd_pipe_get_event(hcd_pipe_handle_t pipe_hdl)
return ret;
}
// ----------------------------------------------- HCD Transfer Requests -----------------------------------------------
// ---------------------------------------------- HCD Transfer Descriptors ---------------------------------------------
// ----------------------- Private -------------------------
static void _xfer_req_fill(pipe_t *pipe)
static void _xfer_desc_list_fill(pipe_t *pipe)
{
//inflight_xfer_req of the pipe must already set to the target transfer request
assert(pipe->inflight_xfer_req != NULL);
//Fill transfer descriptor list with a single transfer request
usb_irp_t *usb_irp = pipe->inflight_xfer_req->irp;
//inflight_irp of the pipe must already set to the target IRP
assert(pipe->inflight_irp != NULL);
//Fill transfer descriptor list with a single IRP
usb_irp_t *usb_irp = pipe->inflight_irp;
switch (pipe->ep_char.type) {
case USB_XFER_TYPE_CTRL: {
//Get information about the contorl transfer by analyzing the setup packet (the first 8 bytes)
//Get information about the control transfer by analyzing the setup packet (the first 8 bytes)
usb_ctrl_req_t *ctrl_req = (usb_ctrl_req_t *)usb_irp->data_buffer;
pipe->flags.ctrl_data_stg_in = ((ctrl_req->bRequestType & USB_B_REQUEST_TYPE_DIR_IN) != 0);
pipe->flags.ctrl_data_stg_skip = (usb_irp->num_bytes == 0);
@ -1686,7 +1687,7 @@ static void _xfer_req_fill(pipe_t *pipe)
usbh_hal_chan_slot_acquire(pipe->chan_obj, pipe->xfer_desc_list, pipe->flags.xfer_desc_list_len, (void *)pipe);
}
static void _xfer_req_continue(pipe_t *pipe)
static void _xfer_desc_list_continue(pipe_t *pipe)
{
int next_idx = usbh_hal_chan_get_next_desc_index(pipe->chan_obj);
bool next_dir_is_in; //Next descriptor direction is IN
@ -1717,9 +1718,9 @@ static void _xfer_req_continue(pipe_t *pipe)
usbh_hal_chan_activate(pipe->chan_obj, num_to_skip); //Start the next stage
}
static void _xfer_req_parse(pipe_t *pipe, bool error_occurred)
static void _xfer_desc_list_parse(pipe_t *pipe, bool error_occurred)
{
assert(pipe->inflight_xfer_req != NULL);
assert(pipe->inflight_irp != NULL);
//Release the slot
void *xfer_desc_list;
int xfer_desc_len;
@ -1728,7 +1729,7 @@ static void _xfer_req_parse(pipe_t *pipe, bool error_occurred)
(void) xfer_desc_len;
//Parse the transfer descriptor list for the result of the transfer
usb_irp_t *usb_irp = pipe->inflight_xfer_req->irp;
usb_irp_t *irp = pipe->inflight_irp;
usb_transfer_status_t xfer_status;
int xfer_rem_len;
if (error_occurred) {
@ -1748,13 +1749,13 @@ static void _xfer_req_parse(pipe_t *pipe, bool error_occurred)
xfer_status = USB_TRANSFER_STATUS_STALL;
break;
default:
//HCD_PIPE_EVENT_ERROR_XFER_NOT_AVAIL should never occur
//HCD_PIPE_EVENT_ERROR_IRP_NOT_AVAIL should never occur
abort();
break;
}
}
//We assume no bytes transmitted because of an error.
xfer_rem_len = usb_irp->num_bytes;
xfer_rem_len = irp->num_bytes;
} else {
int desc_status;
switch (pipe->ep_char.type) {
@ -1786,144 +1787,104 @@ static void _xfer_req_parse(pipe_t *pipe, bool error_occurred)
assert(desc_status == USBH_HAL_XFER_DESC_STS_SUCCESS);
}
//Write back results to IRP
usb_irp->actual_num_bytes = usb_irp->num_bytes - xfer_rem_len;
usb_irp->status = xfer_status;
irp->actual_num_bytes = irp->num_bytes - xfer_rem_len;
irp->status = xfer_status;
}
// ----------------------- Public --------------------------
hcd_xfer_req_handle_t hcd_xfer_req_alloc()
esp_err_t hcd_irp_enqueue(hcd_pipe_handle_t pipe_hdl, usb_irp_t *irp)
{
xfer_req_t *xfer_req = calloc(1, sizeof(xfer_req_t));
xfer_req->state = XFER_REQ_STATE_IDLE;
return (hcd_xfer_req_handle_t) xfer_req;
}
void hcd_xfer_req_free(hcd_xfer_req_handle_t req_hdl)
{
if (req_hdl == NULL) {
return;
}
xfer_req_t *xfer_req = (xfer_req_t *) req_hdl;
//Cannot free a transfer request that is still being used
assert(xfer_req->state == XFER_REQ_STATE_IDLE);
free(xfer_req);
}
void hcd_xfer_req_set_target(hcd_xfer_req_handle_t req_hdl, hcd_pipe_handle_t pipe_hdl, usb_irp_t *irp, void *context)
{
xfer_req_t *xfer_req = (xfer_req_t *) req_hdl;
//Can only set an transfer request's target when the transfer request is idl
assert(xfer_req->state == XFER_REQ_STATE_IDLE);
xfer_req->pipe = (pipe_t *) pipe_hdl;
xfer_req->irp = irp;
xfer_req->context = context;
}
void hcd_xfer_req_get_target(hcd_xfer_req_handle_t req_hdl, hcd_pipe_handle_t *pipe_hdl, usb_irp_t **irp, void **context)
{
xfer_req_t *xfer_req = (xfer_req_t *) req_hdl;
*pipe_hdl = (hcd_pipe_handle_t) xfer_req->pipe;
*irp = xfer_req->irp;
*context = xfer_req->context;
}
esp_err_t hcd_xfer_req_enqueue(hcd_xfer_req_handle_t req_hdl)
{
xfer_req_t *xfer_req = (xfer_req_t *) req_hdl;
HCD_CHECK(xfer_req->pipe != NULL && xfer_req->irp != NULL //The transfer request's target must be set
&& xfer_req->state == XFER_REQ_STATE_IDLE, //The transfer request cannot be already enqueued
//Check that IRP has not already been enqueued
HCD_CHECK(irp->reserved_ptr == NULL
&& IRP_STATE_GET(irp->reserved_flags) == IRP_STATE_IDLE,
ESP_ERR_INVALID_STATE);
pipe_t *pipe = xfer_req->pipe;
pipe_t *pipe = (pipe_t *)pipe_hdl;
HCD_ENTER_CRITICAL();
//Check that pipe and port are in the corrrect state to receive IRPs
HCD_CHECK_FROM_CRIT(pipe->port->state == HCD_PORT_STATE_ENABLED //The pipe's port must be in the correct state
&& pipe->state == HCD_PIPE_STATE_ACTIVE //The pipe must be in the correct state
&& !pipe->flags.pipe_cmd_processing, //Pipe cannot currently be processing a pipe command
ESP_ERR_INVALID_STATE);
//Use the IRP's reserved_ptr to store the pipe's
irp->reserved_ptr = (void *)pipe;
//Check if we can start execution on the pipe immediately
if (!pipe->flags.paused && pipe->num_xfer_req_pending == 0 && pipe->inflight_xfer_req == NULL) {
if (!pipe->flags.paused && pipe->num_irp_pending == 0 && pipe->inflight_irp == NULL) {
//Pipe isn't executing any transfers. Start immediately
pipe->inflight_xfer_req = xfer_req;
_xfer_req_fill(pipe);
pipe->inflight_irp = irp;
_xfer_desc_list_fill(pipe);
usbh_hal_chan_activate(pipe->chan_obj, 0); //Start with the first descriptor
xfer_req->state = XFER_REQ_STATE_INFLIGHT;
if (pipe->num_xfer_req_done == 0) {
//This is the first transfer request to be enqueued into the pipe. Move the pipe to the list of queued pipes
//use the IRP's reserved_flags to store the IRP's current state
IRP_STATE_SET(irp->reserved_flags, IRP_STATE_INFLIGHT);
if (pipe->num_irp_done == 0) {
//This is the first IRP to be enqueued into the pipe. Move the pipe to the list of active pipes
TAILQ_REMOVE(&pipe->port->pipes_idle_tailq, pipe, tailq_entry);
TAILQ_INSERT_TAIL(&pipe->port->pipes_queued_tailq, pipe, tailq_entry);
TAILQ_INSERT_TAIL(&pipe->port->pipes_active_tailq, pipe, tailq_entry);
pipe->port->num_pipes_idle--;
pipe->port->num_pipes_queued++;
}
} else {
//Add the transfer request to the pipe's pending tailq
TAILQ_INSERT_TAIL(&pipe->pend_xfer_req_tailq, xfer_req, tailq_entry);
pipe->num_xfer_req_pending++;
xfer_req->state = XFER_REQ_STATE_PENDING;
//Add the IRP to the pipe's pending tailq
TAILQ_INSERT_TAIL(&pipe->pending_irp_tailq, irp, tailq_entry);
pipe->num_irp_pending++;
//use the IRP's reserved_flags to store the IRP's current state
IRP_STATE_SET(irp->reserved_flags, IRP_STATE_PENDING);
}
HCD_EXIT_CRITICAL();
return ESP_OK;
}
hcd_xfer_req_handle_t hcd_xfer_req_dequeue(hcd_pipe_handle_t pipe_hdl)
usb_irp_t *hcd_irp_dequeue(hcd_pipe_handle_t pipe_hdl)
{
pipe_t *pipe = (pipe_t *)pipe_hdl;
hcd_xfer_req_handle_t ret;
usb_irp_t *irp;
HCD_ENTER_CRITICAL();
if (pipe->num_xfer_req_done > 0) {
xfer_req_t *xfer_req = TAILQ_FIRST(&pipe->done_xfer_req_tailq);
TAILQ_REMOVE(&pipe->done_xfer_req_tailq, xfer_req, tailq_entry);
pipe->num_xfer_req_done--;
assert(xfer_req->state == XFER_REQ_STATE_DONE);
xfer_req->state = XFER_REQ_STATE_IDLE;
ret = (hcd_xfer_req_handle_t) xfer_req;
if (pipe->num_xfer_req_done == 0 && pipe->num_xfer_req_pending == 0) {
//This pipe has no more enqueued transfers. Move the pipe to the list of idle pipes
TAILQ_REMOVE(&pipe->port->pipes_queued_tailq, pipe, tailq_entry);
if (pipe->num_irp_done > 0) {
irp = TAILQ_FIRST(&pipe->done_irp_tailq);
TAILQ_REMOVE(&pipe->done_irp_tailq, irp, tailq_entry);
pipe->num_irp_done--;
//Check the IRP's reserved fields then reset them
assert(irp->reserved_ptr == (void *)pipe && IRP_STATE_GET(irp->reserved_flags) == IRP_STATE_DONE); //The IRP's reserved field should have been set to this pipe
irp->reserved_ptr = NULL;
IRP_STATE_SET(irp->reserved_flags, IRP_STATE_IDLE);
if (pipe->num_irp_done == 0 && pipe->num_irp_pending == 0) {
//This pipe has no more enqueued IRPs. Move the pipe to the list of idle pipes
TAILQ_REMOVE(&pipe->port->pipes_active_tailq, pipe, tailq_entry);
TAILQ_INSERT_TAIL(&pipe->port->pipes_idle_tailq, pipe, tailq_entry);
pipe->port->num_pipes_idle++;
pipe->port->num_pipes_queued--;
}
} else {
ret = NULL;
//No more IRPs to dequeue from this pipe
irp = NULL;
}
HCD_EXIT_CRITICAL();
return ret;
return irp;
}
esp_err_t hcd_xfer_req_abort(hcd_xfer_req_handle_t req_hdl)
esp_err_t hcd_irp_abort(usb_irp_t *irp)
{
xfer_req_t *xfer_req = (xfer_req_t *) req_hdl;
esp_err_t ret;
HCD_ENTER_CRITICAL();
switch (xfer_req->state) {
case XFER_REQ_STATE_PENDING: {
//Transfer request has not been executed so it can be aborted
pipe_t *pipe = xfer_req->pipe;
//Remove it form the pending queue
TAILQ_REMOVE(&pipe->pend_xfer_req_tailq, xfer_req, tailq_entry);
pipe->num_xfer_req_pending--;
//Add it to the done queue
TAILQ_INSERT_TAIL(&pipe->done_xfer_req_tailq, xfer_req, tailq_entry);
pipe->num_xfer_req_done++;
//Update the transfer request and associated IRP's status
xfer_req->state = XFER_REQ_STATE_DONE;
xfer_req->irp->status = USB_TRANSFER_STATUS_CANCELLED;
ret = ESP_OK;
break;
}
case XFER_REQ_STATE_IDLE: {
//Cannot abort a transfer request that was never enqueued
ret = ESP_ERR_INVALID_STATE;
break;
}
default :{
//Transfer request is currently or has already been executed. Nothing to do.
ret = ESP_OK;
break;
}
}
//Check that the IRP was enqueued to begin with
HCD_CHECK_FROM_CRIT(irp->reserved_ptr != NULL
&& IRP_STATE_GET(irp->reserved_flags) != IRP_STATE_IDLE,
ESP_ERR_INVALID_STATE);
if (IRP_STATE_GET(irp->reserved_flags) == IRP_STATE_PENDING) {
//IRP has not been executed so it can be aborted
pipe_t *pipe = (pipe_t *)irp->reserved_ptr;
//Remove it form the pending queue
TAILQ_REMOVE(&pipe->pending_irp_tailq, irp, tailq_entry);
pipe->num_irp_pending--;
//Add it to the done queue
TAILQ_INSERT_TAIL(&pipe->done_irp_tailq, irp, tailq_entry);
pipe->num_irp_done++;
//Update the IRP's current state and status
IRP_STATE_SET(irp->reserved_flags, IRP_STATE_DONE);
irp->status = USB_TRANSFER_STATUS_CANCELLED;
}// Otherwise, the IRP is in-flight or already done thus cannot be aborted
HCD_EXIT_CRITICAL();
return ret;
return ESP_OK;
}

Wyświetl plik

@ -39,10 +39,10 @@ The HAL layer abstracts the DWC_OTG operating in Host Mode using Internal Scatte
# Host Controller Driver (HCD)
The HCD (Host Controller Driver) abstracts the DWC_OTG as N number of ports and an arbitrary number of pipes that can be routed through one of the ports to a device. However note that the underlying hardware controller only has one port, so technically only one port can ever be enabled.
The HCD (Host Controller Driver) abstracts the DWC_OTG as N number of ports and an arbitrary number of pipes that can be routed through one of the ports to a device. However note that **the underlying hardware controller only has a single port, so technically only one port can ever be enabled**.
- In other words, the HCD essentially implements a root hub (not fully behavioral compliant) that contains a single port.
- Pipes are "an association between an endpoint on a device and software on the host". Transfer requests (where each transfer request represents an entire USB transfer) can be enqueued into a pipe for transmission, and dequeued from a pipe when completed.
- Pipes are "an association between an endpoint on a device and software on the host". IRPs (I/O Request Packets) that each represent a USB transfer can be enqueued into a pipe for transmission, and dequeued from a pipe when completed.
The HCD currently has the following limitations:
@ -63,10 +63,11 @@ The HCD currently has the following limitations:
## HCD Pipes
- Pipes can be opened to a particular endpoint based on a descriptor provided on allocation. If opening a default pipe, a `NULL` descriptor can be provided.
- Transfer requests can be enqueued into a pipe. Pipes use a linked list internally, so there is in-theory no limit to the number of transfer requests that can be enqueued.
- Transfer requests need to be dequeued once they are completed.
- Transfer requests are essentially wrappers for USB IRPs (I/O Request Packets). Once allocated, transfer request need to have their target IRP and pipe set before being enqueued.
- Since the IRP is a `typedef` used throughout the entire Host stack, each layer simply needs to pass the pointer of the IRP to the next layer thus minimizing the amount of copying required.
- IRPs can be enqueued into a pipe. Pipes use a linked list internally, so there is in-theory no limit to the number of IRPs that can be enqueued.
- IRPs need to be dequeued once they are completed.
- IRPs need to have the transfer information (such as data buffer, transfer length in bytes) filled before they should be enqueued.
- IRPs will be owned by the HCD until they are dequeued. Thus, users should not attempt to modify an IRP object (and the IRP's data buffer) until the IRP is dequeued.
- The IRP is defined in `usb.h` instead of `hcd.h` so that it can be used throughout the entire Host stack. Each layer simply needs to pass the pointer of the IRP to the next layer thus minimizing the amount of copying required.
## HCD SW Arch

Wyświetl plik

@ -51,14 +51,14 @@ typedef enum {
* @brief States of an HCD pipe
*
* Active:
* - Pipe is able to transmit data. Transfer request can be enqueued.
* - Event if pipe has no transfer requests enqueued, it can still be in the active state.
* - Pipe is able to transmit data. IRPs can be enqueued.
* - Event if pipe has no IRPs enqueued, it can still be in the active state.
* Halted:
* - An error has occurred on the pipe. Transfer request will no longer be executed.
* - An error has occurred on the pipe. IRPs will no longer be executed.
* - Halt should be cleared using the clear command
* Invalid:
* - The underlying device that the pipe connects is not longer valid, thus making the pipe invalid.
* - Pending transfer requests should be dequeued and the pipe should be freed.
* - Pending IRPs should be dequeued and the pipe should be freed.
*/
typedef enum {
HCD_PIPE_STATE_ACTIVE, /**< The pipe is active */
@ -91,10 +91,10 @@ typedef enum {
*/
typedef enum {
HCD_PIPE_EVENT_NONE, /**< The pipe has no events (used to indicate no events when polling) */
HCD_PIPE_EVENT_XFER_REQ_DONE, /**< The pipe has completed a transfer request and can be dequeued */
HCD_PIPE_EVENT_IRP_DONE, /**< The pipe has completed an IRP. The IRP can be dequeued */
HCD_PIPE_EVENT_INVALID, /**< The pipe is invalid because */
HCD_PIPE_EVENT_ERROR_XFER, /**< Excessive (three consecutive) transaction errors (e.g., no ACK, bad CRC etc) */
HCD_PIPE_EVENT_ERROR_XFER_NOT_AVAIL, /**< Transfer request was not available */
HCD_PIPE_EVENT_ERROR_IRP_NOT_AVAIL, /**< IRP was not available */
HCD_PIPE_EVENT_ERROR_OVERFLOW, /**< Received more data than requested. Usually a Packet babble error
(i.e., an IN packet has exceeded the endpoint's MPS) */
HCD_PIPE_EVENT_ERROR_STALL, /**< Pipe received a STALL response received */
@ -120,8 +120,8 @@ typedef enum {
* The pipe commands represent the list of pipe manipulations outlined in 10.5.2.2. of USB2.0 specification.
*/
typedef enum {
HCD_PIPE_CMD_ABORT, /**< Retire all scheduled transfer requests. Pipe's state remains unchanged */
HCD_PIPE_CMD_RESET, /**< Retire all scheduled transfer requests. Pipe's state moves to active */
HCD_PIPE_CMD_ABORT, /**< Retire all scheduled IRPs. Pipe's state remains unchanged */
HCD_PIPE_CMD_RESET, /**< Retire all scheduled IRPs. Pipe's state moves to active */
HCD_PIPE_CMD_CLEAR, /**< Pipe's state moves from halted to active */
HCD_PIPE_CMD_HALT /**< Pipe's state moves to halted */
} hcd_pipe_cmd_t;
@ -138,11 +138,6 @@ typedef void * hcd_port_handle_t;
*/
typedef void * hcd_pipe_handle_t;
/**
* @brief HCD transfer request handle type
*/
typedef void * hcd_xfer_req_handle_t;
/**
* @brief Port event callback type
*
@ -360,7 +355,7 @@ esp_err_t hcd_pipe_alloc(hcd_port_handle_t port_hdl, const hcd_pipe_config_t *pi
*
* Frees the resources used by an HCD pipe. The pipe's handle should be discarded after calling this function. The pipe
* must be in following condition before it can be freed:
* - All transfers have been dequeued
* - All IRPs have been dequeued
*
* @param pipe_hdl Pipe handle
*
@ -376,7 +371,7 @@ esp_err_t hcd_pipe_free(hcd_pipe_handle_t pipe_hdl);
* address and maximum packet size. This function can only be called on a pipe that has met the following conditions:
* - Pipe is still valid (i.e., not in the HCD_PIPE_STATE_INVALID state)
* - Pipe is not currently processing a command
* - All transfer request have been dequeued from the pipe
* - All IRPs have been dequeued from the pipe
*
* @param pipe_hdl Pipe handle
* @param dev_addr New device address
@ -406,12 +401,12 @@ hcd_pipe_state_t hcd_pipe_get_state(hcd_pipe_handle_t pipe_hdl);
/**
* @brief Execute a command on a particular pipe
*
* Pipe commands allow a pipe to be manipulated (such as clearing a halt, retiring all transfer requests etc). The
* following conditions must for a pipe command to be issued:
* Pipe commands allow a pipe to be manipulated (such as clearing a halt, retiring all IRPs etc). The following
* conditions must for a pipe command to be issued:
* - Pipe is still valid (i.e., not in the HCD_PIPE_STATE_INVALID)
* - No other thread/task processing a command on the pipe concurrently (will return)
*
* @note Some pipe commands will block until the pipe's current inflight transfer is completed. If the pipe's state
* @note Some pipe commands will block until the pipe's current inflight IRP is complete. If the pipe's state
* changes unexpectedley, this function will return ESP_ERR_INVALID_RESPONSE
*
* @param pipe_hdl Pipe handle
@ -433,92 +428,47 @@ esp_err_t hcd_pipe_command(hcd_pipe_handle_t pipe_hdl, hcd_pipe_cmd_t command);
*/
hcd_pipe_event_t hcd_pipe_get_event(hcd_pipe_handle_t pipe_hdl);
// ----------------------------------------------- HCD Transfer Requests -----------------------------------------------
// ---------------------------------------------------- HCD IRPs -------------------------------------------------------
/**
* @brief Allocate a transfer request
* @brief Enqueue an IRP to a particular pipe
*
* @note The allocate transfer request will not have its target set (i.e., no target pipe and associated IRP). Call
* hcd_xfer_req_set_target() before enqueueing the transfer request
*
* @return hcd_xfer_req_handle_t Transfer request handle or NULL if failed.
*/
hcd_xfer_req_handle_t hcd_xfer_req_alloc(void);
/**
* @brief Free a transfer request
*
* @note The transfer request must be dequeued before it can be freed
*
* @param req_hdl Transfer request handle
*/
void hcd_xfer_req_free(hcd_xfer_req_handle_t req_hdl);
/**
* @brief Set a transfer request's target
*
* Setting a transfer request's target will associate a transfer request with a pipe and a USB IRP (i.e., the data). A
* transfer request's target must be set before it can be enqueued.
*
* @note This should only be called when a transfer requests that are not currently enqueued
*
* @param req_hdl Transfer request handle
* @param pipe_hdl Target pipe's handle
* @param irp Target IRP handle
* @param context Context variable to associate transfer request with upper layer object
*/
void hcd_xfer_req_set_target(hcd_xfer_req_handle_t req_hdl, hcd_pipe_handle_t pipe_hdl, usb_irp_t *irp, void *context);
/**
* @brief Get the target of a transfer request
*
* @note This should only be called when a transfer requests that are not currently enqueued
*
* @param[in] req_hdl Transfer request handle
* @param[out] pipe_hdl Target pipe's handle
* @param[out] irp Target IRP's handle
* @param[out] context Context variable
*/
void hcd_xfer_req_get_target(hcd_xfer_req_handle_t req_hdl, hcd_pipe_handle_t *pipe_hdl, usb_irp_t **irp, void **context);
/**
* @brief Enqueue a transfer request
*
* The following conditions must be met for a transfer request to be enqueued:
* - The transfer request's target must be set
* - Transfer request must not already be enqueued
* - The target pipe must be in the HCD_PIPE_STATE_ACTIVE state
*
* @param req_hdl Transfer request handle
* @retval ESP_OK: Transfer request enqueued successfully
* @retval ESP_ERR_INVALID_STATE: Conditions not met to enqueue transfer request
*/
esp_err_t hcd_xfer_req_enqueue(hcd_xfer_req_handle_t req_hdl);
/**
* @brief Dequeue a completed transfer request from a pipe
*
* This function should be called on a pipe after it receives an pipe event. If a pipe has multiple transfer requests
* that can be dequeued, this function must be called repeatedely until all transfer requests are dequeued. If a pipe
* has no more transfer requests to dequeue, this function will return NULL.
* The following conditions must be met before an IRP can be enqueued:
* - The IRP is properly initialized (data buffer and transfer length are set)
* - The IRP must not already be enqueued
* - The pipe must be in the HCD_PIPE_STATE_ACTIVE state
*
* @param pipe_hdl Pipe handle
* @return hcd_xfer_req_handle_t Transfer request handle or NULL if no more transfer requests to dequeue.
* @param irp I/O Request Packet to enqueue
* @retval ESP_OK: IRP enqueued successfully
* @retval ESP_ERR_INVALID_STATE: Conditions not met to enqueue IRP
*/
hcd_xfer_req_handle_t hcd_xfer_req_dequeue(hcd_pipe_handle_t pipe_hdl);
esp_err_t hcd_irp_enqueue(hcd_pipe_handle_t pipe_hdl, usb_irp_t *irp);
/**
* @brief Abort an ongoing transfer request
* @brief Dequeue an IRP from a particular pipe
*
* This function will attempt to abort an enqueued transfer request. If the transfer request has not yet been executed,
* it will be marked as "cancelled" and can be dequeued. If a transfer request is already in progress or has completed,
* it will not be affected by this function.
* This function should be called on a pipe after a pipe receives a HCD_PIPE_EVENT_IRP_DONE event. If a pipe has
* multiple IRPs that can be dequeued, this function should be called repeatedely until all IRPs are dequeued. If a pipe
* has no more IRPs to dequeue, this function will return NULL.
*
* @param req_hdl Transfer request handle
* @retval ESP_OK: Transfer request successfully aborted, or did not need to be aborted
* @retval ESP_ERR_INVALID_STATE: Transfer request was never enqueued
* @param pipe_hdl Pipe handle
* @return usb_irp_t* Dequeued I/O Request Packet, or NULL if no more IRPs to dequeue
*/
esp_err_t hcd_xfer_req_abort(hcd_xfer_req_handle_t req_hdl);
usb_irp_t *hcd_irp_dequeue(hcd_pipe_handle_t pipe_hdl);
/**
* @brief Abort an enqueued IRP
*
* This function will attempt to abort an IRP that is already enqueued. If the IRP has yet to be executed, it will be
* "cancelled" and can then be dequeued. If the IRP is currenty inflight or has already completed, the IRP will not be
* affected by this function.
*
* @param irp I/O Request Packet to abort
* @retval ESP_OK: IRP successfully aborted, or was not affected by this function
* @retval ESP_ERR_INVALID_STATE: IRP was never enqueued
*/
esp_err_t hcd_irp_abort(usb_irp_t *irp);
#ifdef __cplusplus
}

Wyświetl plik

@ -28,6 +28,9 @@ extern "C"
{
#endif
#include <stdint.h>
#include <sys/queue.h>
#define USB_CTRL_REQ_ATTR __attribute__((packed))
#define USB_DESC_ATTR __attribute__((packed))
@ -64,7 +67,7 @@ typedef enum {
USB_TRANSFER_STATUS_COMPLETED, /**< The transfer was successful (but may be short) */
USB_TRANSFER_STATUS_ERROR, /**< The transfer failed because due to excessive errors (e.g. no response or CRC error) */
USB_TRANSFER_STATUS_TIMED_OUT, /**< The transfer failed due to a time out */
USB_TRANSFER_STATUS_CANCELLED, /**< The transfer was cancelled */
USB_TRANSFER_STATUS_CANCELLED, /**< The transfer was canceled */
USB_TRANSFER_STATUS_STALL, /**< The transfer was stalled */
USB_TRANSFER_STATUS_NO_DEVICE, /**< The transfer failed because the device is no longer valid (e.g., disconnected */
USB_TRANSFER_STATUS_OVERFLOW, /**< The transfer as more data was sent than was requested */
@ -87,33 +90,38 @@ typedef struct {
/**
* @brief USB IRP (I/O Request Packet). See USB2.0 Spec
*
* An identifiable request by a software client to move data between itself (on the
* host) and an endpoint of a device in an appropriate direction.
* An IRP is used to represent data transfer request form a software client to and endpoint over the USB bus. The same
* IRP object type is used at each layer of the USB stack. This minimizes copying/conversion across the different layers
* of the stack as each layer will pass a pointer to this type of object.
*
* This structure represents the bare-bones of the request. Different layers of
* USB drivers will wrap their own objects around this.
* See 10.5.3.1 os USB2.0 specification
* Bulk: Represents a single bulk transfer which a pipe will transparently split into multiple MPS transactions (until
* the last)
* Control: Represents a single control transfer with the setup packet at the first 8 bytes of the buffer.
* Interrupt: Represents a single interrupt transaction
* Isochronous: Represents a buffer of a stream of bytes which the pipe will transparently transfer the stream of bytes
* one or more service periods
*
* See 10.5.3.1 of USB2.0 specification for the full details regarding IRPs and their implications on each transfer type.
* - Bulk: Represents a single bulk transfer where a pipe will internally split the transfer into one or more MPS
* packets (except for the last packet) until all the bytes have been sent/received.
* Control: Represents a single control transfer with the setup packet at the first 8 bytes of the buffer. A pipe will
* internally split the transfer into its Setup, Data, and Status stages.
* Interrupt: Represents an interrupt transfer where a pipe will internally split the transfer into one or more MPS
* packets (except for the last packet). Each packet is transmitted at the pipes established period (i.e.,
* the period specified by bInterval).
* Isochronous: Represents an Isochronous transfer (i.e., buffer of a stream of bytes). The pipe will internally split
* the stream into one or more packets and transmit each packet at the pipe's established period (i.e., the
* period specified by bInterval). The size of each packet is specified in its respective Isochronous
* Packet Descriptor in the IRP.
* @note The tailq_entry and reserved variables are used by the USB Host stack internally. Users should not modify those fields.
* @note Once an IRP is submitted, users should not modify the IRP as the Host stack takes ownership of the IRP.
*/
typedef struct {
int num_bytes; /**< Number of bytes in IRP. Control should exclude size of setup. IN should be integer multiple of MPS */
int actual_num_bytes; /**< Actual number of bytes transmitted/received in the IRP */
uint8_t *data_buffer; /**< Pointer to data buffer. Must be DMA capable memory */
usb_transfer_status_t status; /**< Status of the transfer */
int num_iso_packets; /**< Only relevant to isochronous. Number of service periods to transfer data buffer over. Set to 0 for non-iso transfers */
struct usb_irp_obj {
//Internal members
TAILQ_ENTRY(usb_irp_obj) tailq_entry; /**< TAILQ entry that allows this object to be added to linked lists. Users should NOT modify this field */
void *reserved_ptr; /**< Reserved pointer variable for internal use in the stack. Users should set this to NULL on allocation and NOT modify this afterwards */
uint32_t reserved_flags; /**< Reserved variable for flags used internally in the stack. Users should set this to 0 on allocation and NOT modify this afterwards */
//Public members
uint8_t *data_buffer; /**< Pointer to data buffer. Must be DMA capable memory */
int num_bytes; /**< Number of bytes in IRP. Control should exclude size of setup. IN should be integer multiple of MPS */
int actual_num_bytes; /**< Actual number of bytes transmitted/receives in the IRP */
usb_transfer_status_t status; /**< Status of the transfer */
uint32_t timeout; /**< Timeout (in milliseconds) of the packet */
void *context; /**< Context variable used to associate the IRP object with another object */
int num_iso_packets; /**< Only relevant to Isochronous. Number of service periods to transfer data buffer over. Set to 0 for non-iso transfers */
usb_iso_packet_desc_t iso_packet_desc[0]; /**< Descriptors for each ISO packet */
} usb_irp_t;
};
typedef struct usb_irp_obj usb_irp_t;
// ---------------------------------------------------- Chapter 9 ------------------------------------------------------

Wyświetl plik

@ -17,13 +17,14 @@
#include "freertos/semphr.h"
#include "unity.h"
#include "test_utils.h"
#include "soc/gpio_pins.h"
#include "soc/gpio_sig_map.h"
#include "esp_intr_alloc.h"
#include "esp_err.h"
#include "esp_attr.h"
#include "esp_rom_gpio.h"
#include "soc/gpio_pins.h"
#include "soc/gpio_sig_map.h"
#include "hal/usbh_ll.h"
#include "usb.h"
#include "hcd.h"
// -------------------------------------------------- PHY Control ------------------------------------------------------
@ -33,10 +34,10 @@ static void phy_force_conn_state(bool connected, TickType_t delay_ticks)
vTaskDelay(delay_ticks);
usb_wrap_dev_t *wrap = &USB_WRAP;
if (connected) {
//Swap back to internal PHY that is connected to a devicee
//Swap back to internal PHY that is connected to a device
wrap->otg_conf.phy_sel = 0;
} else {
//Set externa PHY input signals to fixed voltage levels mimicing a disconnected state
//Set external PHY input signals to fixed voltage levels mimicking a disconnected state
esp_rom_gpio_connect_in_signal(GPIO_MATRIX_CONST_ZERO_INPUT, USB_EXTPHY_VP_IDX, false);
esp_rom_gpio_connect_in_signal(GPIO_MATRIX_CONST_ZERO_INPUT, USB_EXTPHY_VM_IDX, false);
esp_rom_gpio_connect_in_signal(GPIO_MATRIX_CONST_ONE_INPUT, USB_EXTPHY_RCV_IDX, false);
@ -48,9 +49,10 @@ static void phy_force_conn_state(bool connected, TickType_t delay_ticks)
// ------------------------------------------------ Helper Functions ---------------------------------------------------
#define EVENT_QUEUE_LEN 5
#define NUM_XFER_REQS 3
#define XFER_DATA_MAX_LEN 256 //Just assume that will only IN/OUT 256 bytes for now
#define NUM_IRPS 3
#define TRANSFER_DATA_MAX_BYTES 256 //Just assume that will only IN/OUT 256 bytes for now
#define PORT_NUM 1
#define IRP_CONTEXT_VAL ((void *)0xDEADBEEF) //Conext value for created IRPs
typedef struct {
hcd_port_handle_t port_hdl;
@ -214,13 +216,11 @@ static void wait_for_disconnection(hcd_port_handle_t port_hdl, QueueHandle_t por
TEST_ASSERT_EQUAL(HCD_PORT_STATE_NOT_POWERED, hcd_port_get_state(port_hdl));
}
static void alloc_pipe_and_xfer_reqs(hcd_port_handle_t port_hdl,
QueueHandle_t pipe_evt_queue,
hcd_pipe_handle_t *pipe_hdl,
hcd_xfer_req_handle_t *req_hdls,
uint8_t **data_buffers,
usb_irp_t **irps,
int num_xfers)
static void alloc_pipe_and_irp_list(hcd_port_handle_t port_hdl,
QueueHandle_t pipe_evt_queue,
int num_irps,
hcd_pipe_handle_t *pipe_hdl,
usb_irp_t ***irp_list)
{
//We don't support hubs yet. Just get the speed of the port to determine the speed of the device
usb_speed_t port_speed;
@ -238,38 +238,37 @@ static void alloc_pipe_and_xfer_reqs(hcd_port_handle_t port_hdl,
};
TEST_ASSERT_EQUAL(ESP_OK, hcd_pipe_alloc(port_hdl, &config, pipe_hdl));
TEST_ASSERT_NOT_EQUAL(NULL, *pipe_hdl);
//Create transfer requests (and other required objects such as IRPs and data buffers)
printf("Creating transfer requests\n");
for (int i = 0; i < num_xfers; i++) {
//Allocate transfer request object
req_hdls[i] = hcd_xfer_req_alloc();
TEST_ASSERT_NOT_EQUAL(NULL, req_hdls[i]);
//Allocate data buffers
data_buffers[i] = heap_caps_malloc(sizeof(usb_ctrl_req_t) + XFER_DATA_MAX_LEN, MALLOC_CAP_DMA);
TEST_ASSERT_NOT_EQUAL(NULL, data_buffers[i]);
//Allocate IRP object
irps[i] = heap_caps_malloc(sizeof(usb_irp_t), MALLOC_CAP_DEFAULT);
TEST_ASSERT_NOT_EQUAL(NULL, irps[i]);
//Set the transfer request's target
hcd_xfer_req_set_target(req_hdls[i], *pipe_hdl, irps[i], NULL);
//Create IRPs and their data buffers
printf("Creating IRPs and IRP list\n");
*irp_list = heap_caps_malloc(sizeof(usb_irp_t *) * num_irps, MALLOC_CAP_DEFAULT);
TEST_ASSERT_NOT_EQUAL(NULL, *irp_list);
for (int i = 0; i < num_irps; i++) {
//Allocate IRP
usb_irp_t *irp = heap_caps_calloc(1, sizeof(usb_irp_t), MALLOC_CAP_DEFAULT);
TEST_ASSERT_NOT_EQUAL(NULL, irp);
//Allocate data buffer
uint8_t *data_buffer = heap_caps_malloc(sizeof(usb_ctrl_req_t) + TRANSFER_DATA_MAX_BYTES, MALLOC_CAP_DMA);
TEST_ASSERT_NOT_EQUAL(NULL, data_buffer);
//Initialize IRP and IRP list
irp->data_buffer = data_buffer;
irp->num_iso_packets = 0;
(*irp_list)[i] = irp;
}
}
static void free_pipe_and_xfer_reqs(hcd_pipe_handle_t pipe_hdl,
hcd_xfer_req_handle_t *req_hdls,
uint8_t **data_buffers,
usb_irp_t **irps,
int num_xfers)
static void free_pipe_and_irp_list(hcd_pipe_handle_t pipe_hdl,
int num_irps,
usb_irp_t **irp_list)
{
printf("Freeing transfer requets\n");
//Free transfer requests (and their associated objects such as IRPs and data buffers)
for (int i = 0; i < num_xfers; i++) {
heap_caps_free(irps[i]);
heap_caps_free(data_buffers[i]);
hcd_xfer_req_free(req_hdls[i]);
printf("Freeing IRPs and IRP list\n");
for (int i = 0; i < num_irps; i++) {
usb_irp_t *irp = irp_list[i] ;
//Free data buffer
heap_caps_free(irp->data_buffer);
heap_caps_free(irp);
}
heap_caps_free(irp_list);
printf("Freeing default pipe\n");
//Delete the pipe
TEST_ASSERT_EQUAL(ESP_OK, hcd_pipe_free(pipe_hdl));
}
@ -280,13 +279,13 @@ Test a port sudden disconnect and port recovery
Purpose: Test that when sudden disconnection happens on an HCD port, the port will
- Generate the HCD_PORT_EVENT_SUDDEN_DISCONN and be put into the HCD_PORT_STATE_RECOVERY state
- Ongoing transfers requests and pipes are handled correctly
- Ongoing IRPs and pipes are handled correctly
Procedure:
- Setup HCD, a default pipe, and multiple transfer requests
- Setup HCD, a default pipe, and multiple IRPs
- Start transfers but immediately trigger a disconnect
- Check that HCD_PORT_EVENT_SUDDEN_DISCONN event is generated
- Check that default pipe is invalid and transfer requests can be dequeued
- Check that default pipe is invalid and IRPs can be dequeued
- Recover the port and try to connect then disconnect again (to make sure the port works port recovery)
- Teardown HCD
*/
@ -299,24 +298,21 @@ TEST_CASE("Test HCD port sudden disconnect", "[hcd][ignore]")
wait_for_connection(port_hdl, port_evt_queue);
vTaskDelay(pdMS_TO_TICKS(100)); //Short delay send of SOF (for FS) or EOPs (for LS)
//Allocate transfer requests
//Allocate default pipe and IRPs
hcd_pipe_handle_t default_pipe;
hcd_xfer_req_handle_t req_hdls[NUM_XFER_REQS];
uint8_t *data_buffers[NUM_XFER_REQS];
usb_irp_t *irps[NUM_XFER_REQS];
alloc_pipe_and_xfer_reqs(port_hdl, pipe_evt_queue, &default_pipe, req_hdls, data_buffers, irps, NUM_XFER_REQS);
usb_irp_t **irp_list;
alloc_pipe_and_irp_list(port_hdl, pipe_evt_queue, NUM_IRPS, &default_pipe, &irp_list);
//Initialize transfer requests to send a "Get Device Descriptor" request
for (int i = 0; i < NUM_XFER_REQS; i++) {
irps[i]->num_bytes = 64; //1 worst case MPS
USB_CTRL_REQ_INIT_GET_CFG_DESC((usb_ctrl_req_t *) data_buffers[i], 0, XFER_DATA_MAX_LEN);
irps[i]->data_buffer = data_buffers[i];
irps[i]->num_iso_packets = 0;
//Initialize IRPs to send a "Get Device Descriptor" request
for (int i = 0; i < NUM_IRPS; i++) {
irp_list[i]->num_bytes = 64; //1 worst case MPS
USB_CTRL_REQ_INIT_GET_CFG_DESC((usb_ctrl_req_t *) irp_list[i]->data_buffer, 0, TRANSFER_DATA_MAX_BYTES);
irp_list[i]->context = IRP_CONTEXT_VAL;
}
//Enqueue those transfer requests
for (int i = 0; i < NUM_XFER_REQS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_xfer_req_enqueue(req_hdls[i]));
//Enqueue those IRPs
for (int i = 0; i < NUM_IRPS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_irp_enqueue(default_pipe, irp_list[i]));
}
phy_force_conn_state(false, 0); //Force disconnected state on PHY
@ -325,35 +321,30 @@ TEST_CASE("Test HCD port sudden disconnect", "[hcd][ignore]")
TEST_ASSERT_EQUAL(HCD_PORT_STATE_RECOVERY, hcd_port_get_state(port_hdl));
printf("Sudden disconnect\n");
//Handling the disconenction event should have invalidated all pipes.
//Pipe should have received (zero or more HCD_PIPE_EVENT_XFER_REQ_DONE) followed by a HCD_PIPE_EVENT_INVALID (MUST OCCUR)
//Handling the disconnection event should have invalidated all pipes.
//Pipe should have received (zero or more HCD_PIPE_EVENT_IRP_DONE) followed by a HCD_PIPE_EVENT_INVALID (MUST OCCUR)
int num_pipe_events = EVENT_QUEUE_LEN - uxQueueSpacesAvailable(pipe_evt_queue);
for (int i = 0; i < num_pipe_events - 1; i++) {
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_XFER_REQ_DONE);
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_IRP_DONE);
}
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_INVALID);
TEST_ASSERT_EQUAL(hcd_pipe_get_state(default_pipe), HCD_PIPE_STATE_INVALID);
//Dequeue transfer requests
for (int i = 0; i < NUM_XFER_REQS; i++) {
hcd_xfer_req_handle_t req_hdl = hcd_xfer_req_dequeue(default_pipe);
hcd_pipe_handle_t pipe_hdl;
usb_irp_t *irp;
void *context;
hcd_xfer_req_get_target(req_hdl, &pipe_hdl, &irp, &context);
TEST_ASSERT_EQUAL(default_pipe, pipe_hdl);
TEST_ASSERT_EQUAL(irps[i], irp);
//Dequeue IRPs
for (int i = 0; i < NUM_IRPS; i++) {
usb_irp_t *irp = hcd_irp_dequeue(default_pipe);
TEST_ASSERT_NOT_EQUAL(NULL, irp);
TEST_ASSERT(irp->status == USB_TRANSFER_STATUS_COMPLETED || irp->status == USB_TRANSFER_STATUS_NO_DEVICE);
TEST_ASSERT_EQUAL(NULL, context);
TEST_ASSERT(irp->context == IRP_CONTEXT_VAL);
}
//Free transfer requests
free_pipe_and_xfer_reqs(default_pipe, req_hdls, data_buffers, irps, NUM_XFER_REQS);
//Free IRPs
free_pipe_and_irp_list(default_pipe, NUM_IRPS, irp_list);
//Recover the port should return to the to NOT POWERED state
TEST_ASSERT_EQUAL(ESP_OK, hcd_port_recover(port_hdl));
TEST_ASSERT_EQUAL(HCD_PORT_STATE_NOT_POWERED, hcd_port_get_state(port_hdl));
//Recovered port should be able to connect and disconenct again
//Recovered port should be able to connect and disconnect again
wait_for_connection(port_hdl, port_evt_queue);
wait_for_disconnection(port_hdl, port_evt_queue, false);
teardown(port_evt_queue, pipe_evt_queue, port_hdl);
@ -363,15 +354,15 @@ TEST_CASE("Test HCD port sudden disconnect", "[hcd][ignore]")
Test port suspend and resume with active pipes
Purpose:
- Test p[ort suspend and resume commands work correctly whilst there are active pipes with ongoing transfers
- Test port suspend and resume commands work correctly whilst there are active pipes with ongoing transfers
- When suspending, the pipes should be allowed to finish their current ongoing transfer before the bus is suspended.
- When resuming, pipes with pending transfer should be started after the bus is resumed.
Procedure:
- Setup HCD, a port, a default pipe, and multiple transfer requests
- Setup HCD, a port, a default pipe, and multiple IRPS
- Start transfers but immediately suspend the port
- Resume the port
- Check all transfer requests have also be resumed and completed on port resume
- Check all IRPs have also be resumed and completed on port resume
- Teardown
*/
TEST_CASE("Test HCD port suspend and resume", "[hcd][ignore]")
@ -383,23 +374,23 @@ TEST_CASE("Test HCD port suspend and resume", "[hcd][ignore]")
wait_for_connection(port_hdl, port_evt_queue);
vTaskDelay(pdMS_TO_TICKS(100)); //Short delay send of SOF (for FS) or EOPs (for LS)
//Allocate transfer requests
//Allocate default pipe and IRPs
hcd_pipe_handle_t default_pipe;
hcd_xfer_req_handle_t req_hdls[NUM_XFER_REQS];
uint8_t *data_buffers[NUM_XFER_REQS];
usb_irp_t *irps[NUM_XFER_REQS];
alloc_pipe_and_xfer_reqs(port_hdl, pipe_evt_queue, &default_pipe, req_hdls, data_buffers, irps, NUM_XFER_REQS);
//Initialize transfer requests to send a "Get Device Descriptor" request
for (int i = 0; i < NUM_XFER_REQS; i++) {
irps[i]->num_bytes = 64; //1 worst case MPS
USB_CTRL_REQ_INIT_GET_CFG_DESC((usb_ctrl_req_t *) data_buffers[i], 0, XFER_DATA_MAX_LEN);
irps[i]->data_buffer = data_buffers[i];
irps[i]->num_iso_packets = 0;
usb_irp_t **irp_list;
alloc_pipe_and_irp_list(port_hdl, pipe_evt_queue, NUM_IRPS, &default_pipe, &irp_list);
//Initialize IRPs to send a "Get Device Descriptor" request
for (int i = 0; i < NUM_IRPS; i++) {
irp_list[i]->num_bytes = 64; //1 worst case MPS
USB_CTRL_REQ_INIT_GET_CFG_DESC((usb_ctrl_req_t *)irp_list[i]->data_buffer, 0, TRANSFER_DATA_MAX_BYTES);
irp_list[i]->context = IRP_CONTEXT_VAL;
}
//Enqueue those transfer requests
for (int i = 0; i < NUM_XFER_REQS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_xfer_req_enqueue(req_hdls[i]));
//Enqueue those IRPs
for (int i = 0; i < NUM_IRPS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_irp_enqueue(default_pipe, irp_list[i]));
}
//Immediately suspend the bus whilst pies are active
TEST_ASSERT_EQUAL(ESP_OK, hcd_port_command(port_hdl, HCD_PORT_CMD_SUSPEND));
TEST_ASSERT_EQUAL(HCD_PORT_STATE_SUSPENDED, hcd_port_get_state(port_hdl));
@ -410,22 +401,18 @@ TEST_CASE("Test HCD port suspend and resume", "[hcd][ignore]")
TEST_ASSERT_EQUAL(HCD_PORT_STATE_ENABLED, hcd_port_get_state(port_hdl));
vTaskDelay(pdMS_TO_TICKS(100)); //Give some time for resumed transfers to complete
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_XFER_REQ_DONE);
//Dequeue transfer requests
for (int i = 0; i < NUM_XFER_REQS; i++) {
hcd_xfer_req_handle_t req_hdl = hcd_xfer_req_dequeue(default_pipe);
hcd_pipe_handle_t pipe_hdl;
usb_irp_t *irp;
void *context;
hcd_xfer_req_get_target(req_hdl, &pipe_hdl, &irp, &context);
TEST_ASSERT_EQUAL(default_pipe, pipe_hdl);
TEST_ASSERT_EQUAL(irps[i], irp);
//Dequeue IRPs
for (int i = 0; i < NUM_IRPS; i++) {
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_IRP_DONE);
usb_irp_t *irp = hcd_irp_dequeue(default_pipe);
TEST_ASSERT_NOT_EQUAL(NULL, irp);
TEST_ASSERT(irp->status == USB_TRANSFER_STATUS_COMPLETED);
TEST_ASSERT_EQUAL(NULL, context);
TEST_ASSERT(irp->context == IRP_CONTEXT_VAL);
}
//Free IRPs
free_pipe_and_irp_list(default_pipe, NUM_IRPS, irp_list);
//Free transfer requests
free_pipe_and_xfer_reqs(default_pipe, req_hdls, data_buffers, irps, NUM_XFER_REQS);
//Cleanup
vTaskDelay(pdMS_TO_TICKS(100)); //Short delay send of SOF (for FS) or EOPs (for LS)
wait_for_disconnection(port_hdl, port_evt_queue, false);
teardown(port_evt_queue, pipe_evt_queue, port_hdl);
@ -440,7 +427,7 @@ Purpose:
- After disabling the port, all pipes should become invalid.
Procedure:
- Setup HCD, a default pipe, and multiple transfer requests
- Setup HCD, a default pipe, and multiple IRPs
- Start transfers but immediately disable the port
- Check pipe received invalid event
- Check that transfer are either done or not executed
@ -455,47 +442,44 @@ TEST_CASE("Test HCD port disable", "[hcd][ignore]")
wait_for_connection(port_hdl, port_evt_queue);
vTaskDelay(pdMS_TO_TICKS(100)); //Short delay send of SOF (for FS) or EOPs (for LS)
//Allocate transfer requests
//Allocate default pipe and IRPs
hcd_pipe_handle_t default_pipe;
hcd_xfer_req_handle_t req_hdls[NUM_XFER_REQS];
uint8_t *data_buffers[NUM_XFER_REQS];
usb_irp_t *irps[NUM_XFER_REQS];
alloc_pipe_and_xfer_reqs(port_hdl, pipe_evt_queue, &default_pipe, req_hdls, data_buffers, irps, NUM_XFER_REQS);
//Initialize transfer requests to send a "Get Device Descriptor" request
for (int i = 0; i < NUM_XFER_REQS; i++) {
irps[i]->num_bytes = 64; //1 worst case MPS
USB_CTRL_REQ_INIT_GET_CFG_DESC((usb_ctrl_req_t *) data_buffers[i], 0, XFER_DATA_MAX_LEN);
irps[i]->data_buffer = data_buffers[i];
irps[i]->num_iso_packets = 0;
usb_irp_t **irp_list;
alloc_pipe_and_irp_list(port_hdl, pipe_evt_queue, NUM_IRPS, &default_pipe, &irp_list);
//Initialize IRPs to send a "Get Device Descriptor" request
for (int i = 0; i < NUM_IRPS; i++) {
irp_list[i]->num_bytes = 64; //1 worst case MPS
USB_CTRL_REQ_INIT_GET_CFG_DESC((usb_ctrl_req_t *) irp_list[i]->data_buffer, 0, TRANSFER_DATA_MAX_BYTES);
irp_list[i]->context = IRP_CONTEXT_VAL;
}
//Enqueue those transfer requests
for (int i = 0; i < NUM_XFER_REQS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_xfer_req_enqueue(req_hdls[i]));
//Enqueue those IRPs
for (int i = 0; i < NUM_IRPS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_irp_enqueue(default_pipe, irp_list[i]));
}
//Immediately disable port
TEST_ASSERT_EQUAL(ESP_OK, hcd_port_command(port_hdl, HCD_PORT_CMD_DISABLE));
TEST_ASSERT_EQUAL(HCD_PORT_STATE_DISABLED, hcd_port_get_state(port_hdl));
printf("Disabled\n");
//Pipe should have received (zero or more HCD_PIPE_EVENT_XFER_REQ_DONE) followed by a HCD_PIPE_EVENT_INVALID (MUST OCCUR)
//Pipe should have received (zero or more HCD_PIPE_EVENT_IRP_DONE) followed by a HCD_PIPE_EVENT_INVALID (MUST OCCUR)
int num_pipe_events = EVENT_QUEUE_LEN - uxQueueSpacesAvailable(pipe_evt_queue);
for (int i = 0; i < num_pipe_events - 1; i++) {
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_XFER_REQ_DONE);
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_IRP_DONE);
}
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_INVALID);
for (int i = 0; i < NUM_XFER_REQS; i++) {
hcd_xfer_req_handle_t req_hdl = hcd_xfer_req_dequeue(default_pipe);
hcd_pipe_handle_t pipe_hdl;
usb_irp_t *irp;
void *context;
hcd_xfer_req_get_target(req_hdl, &pipe_hdl, &irp, &context);
TEST_ASSERT_EQUAL(default_pipe, pipe_hdl);
TEST_ASSERT_EQUAL(irps[i], irp);
TEST_ASSERT(irp->status == USB_TRANSFER_STATUS_COMPLETED || irp->status == USB_TRANSFER_STATUS_NO_DEVICE);
TEST_ASSERT_EQUAL(NULL, context);
}
//Free transfer requests
free_pipe_and_xfer_reqs(default_pipe, req_hdls, data_buffers, irps, NUM_XFER_REQS);
//Dequeue IRPs
for (int i = 0; i < NUM_IRPS; i++) {
usb_irp_t *irp = hcd_irp_dequeue(default_pipe);
TEST_ASSERT_NOT_EQUAL(NULL, irp);
TEST_ASSERT(irp->status == USB_TRANSFER_STATUS_COMPLETED || irp->status == USB_TRANSFER_STATUS_NO_DEVICE);
TEST_ASSERT(irp->context == IRP_CONTEXT_VAL);
}
//Free IRPs
free_pipe_and_irp_list(default_pipe, NUM_IRPS, irp_list);
//Already disabled. Disconnect and teardown
wait_for_disconnection(port_hdl, port_evt_queue, true);
teardown(port_evt_queue, pipe_evt_queue, port_hdl);
@ -519,7 +503,7 @@ static void concurrent_task(void *arg)
SemaphoreHandle_t sync_sem = (SemaphoreHandle_t) arg;
xSemaphoreTake(sync_sem, portMAX_DELAY);
vTaskDelay(pdMS_TO_TICKS(10)); //Give a short delay let reset command start in main thread
//Forcibly a disconenction
//Forcibly a disconnection
phy_force_conn_state(false, 0);
vTaskDelay(portMAX_DELAY); //Block forever and wait to be deleted
}
@ -562,24 +546,25 @@ TEST_CASE("Test HCD port command bailout", "[hcd][ignore]")
}
// --------------------------------------------------- Pipe Tests ------------------------------------------------------
/*
Test HCD Transfer Requests (normal completion and early abort)
Test HCD IRPs (normal completion and early abort)
Purpose:
- Test that pipes can be created
- Transfer requests can be created and enqueued
- Pipe returns HCD_PIPE_EVENT_XFER_REQ_DONE
- Test that transfer requests can be aborted when enqueued
- IRPs can be created and enqueued
- Pipe returns HCD_PIPE_EVENT_IRP_DONE
- Test that IRPs can be aborted when enqueued
Procedure:
- Setup
- Allocate transfer requests. Initialize as Get Device Descriptor request
- Enqueue transfer requests
- Expect HCD_PIPE_EVENT_XFER_REQ_DONE. Deallocate transfer requests
- Requeue transfer requests, but abort them immediately
- Allocate IRPs. Initialize as Get Device Descriptor request
- Enqueue IRPs
- Expect HCD_PIPE_EVENT_IRP_DONE. Deallocate IRPs
- Requeue IRPs, but abort them immediately
- Teardown
*/
TEST_CASE("Test HCD pipe transfer request", "[hcd][ignore]")
TEST_CASE("Test HCD IRP enqueue", "[hcd][ignore]")
{
QueueHandle_t port_evt_queue;
QueueHandle_t pipe_evt_queue;
@ -588,69 +573,56 @@ TEST_CASE("Test HCD pipe transfer request", "[hcd][ignore]")
wait_for_connection(port_hdl, port_evt_queue);
vTaskDelay(pdMS_TO_TICKS(100)); //Short delay send of SOF (for FS) or EOPs (for LS)
//Allocate transfer requests
//Allocate default pipe and IRPs
hcd_pipe_handle_t default_pipe;
hcd_xfer_req_handle_t req_hdls[NUM_XFER_REQS];
uint8_t *data_buffers[NUM_XFER_REQS];
usb_irp_t *irps[NUM_XFER_REQS];
alloc_pipe_and_xfer_reqs(port_hdl, pipe_evt_queue, &default_pipe, req_hdls, data_buffers, irps, NUM_XFER_REQS);
usb_irp_t **irp_list;
alloc_pipe_and_irp_list(port_hdl, pipe_evt_queue, NUM_IRPS, &default_pipe, &irp_list);
//Initialize transfer requests to send a "Get Device Descriptor" request
for (int i = 0; i < NUM_XFER_REQS; i++) {
irps[i]->num_bytes = 64; //1 worst case MPS
USB_CTRL_REQ_INIT_GET_DEVC_DESC((usb_ctrl_req_t *) data_buffers[i]);
irps[i]->data_buffer = data_buffers[i];
irps[i]->num_iso_packets = 0;
//Initialize IRPs to send a "Get Config Descriptor 0" request
for (int i = 0; i < NUM_IRPS; i++) {
irp_list[i]->num_bytes = 64; //1 worst case MPS
USB_CTRL_REQ_INIT_GET_CFG_DESC((usb_ctrl_req_t *) irp_list[i]->data_buffer, 0, TRANSFER_DATA_MAX_BYTES);
irp_list[i]->context = IRP_CONTEXT_VAL;
}
//Enqueue those transfer requests
for (int i = 0; i < NUM_XFER_REQS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_xfer_req_enqueue(req_hdls[i]));
}
//Wait for each done event of each transfer request
for (int i = 0; i < NUM_XFER_REQS; i++) {
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_XFER_REQ_DONE);
//Enqueue those IRPs
for (int i = 0; i < NUM_IRPS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_irp_enqueue(default_pipe, irp_list[i]));
}
//Dequeue transfer requests and check results.
for (int i = 0; i < NUM_XFER_REQS; i++) {
hcd_xfer_req_handle_t req_hdl = hcd_xfer_req_dequeue(default_pipe);
TEST_ASSERT_EQUAL(req_hdls[i], req_hdl);
hcd_pipe_handle_t pipe_hdl;
usb_irp_t *irp;
void *context;
hcd_xfer_req_get_target(req_hdl, &pipe_hdl, &irp, &context);
TEST_ASSERT_EQUAL(default_pipe, pipe_hdl);
TEST_ASSERT_EQUAL(irp, irps[i]);
TEST_ASSERT_EQUAL(USB_TRANSFER_STATUS_COMPLETED, irp->status);
TEST_ASSERT_EQUAL(NULL, context);
//Wait for each done event of each IRP
for (int i = 0; i < NUM_IRPS; i++) {
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_IRP_DONE);
}
//Dequeue IRPs
for (int i = 0; i < NUM_IRPS; i++) {
usb_irp_t *irp = hcd_irp_dequeue(default_pipe);
TEST_ASSERT_NOT_EQUAL(NULL, irp);
TEST_ASSERT(irp->status == USB_TRANSFER_STATUS_COMPLETED);
TEST_ASSERT(irp->context == IRP_CONTEXT_VAL);
}
//Enqueue them again but abort them short after
for (int i = 0; i < NUM_XFER_REQS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_xfer_req_enqueue(req_hdls[i]));
for (int i = 0; i < NUM_IRPS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_irp_enqueue(default_pipe, irp_list[i]));
}
for (int i = 0; i < NUM_XFER_REQS; i++) {
hcd_xfer_req_abort(req_hdls[i]);
for (int i = 0; i < NUM_IRPS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_irp_abort(irp_list[i]));
}
vTaskDelay(pdMS_TO_TICKS(100)); //Give some time for any inflight transfers to complete
vTaskDelay(pdMS_TO_TICKS(100)); //Give some time for any in-flight transfers to complete
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_XFER_REQ_DONE);
//Dequeue transfer rqeuests and check results of aborted transfer request
for (int i = 0; i < NUM_XFER_REQS; i++) {
hcd_xfer_req_handle_t req_hdl = hcd_xfer_req_dequeue(default_pipe);
hcd_pipe_handle_t pipe_hdl;
usb_irp_t *irp;
void *context;
hcd_xfer_req_get_target(req_hdl, &pipe_hdl, &irp, &context);
TEST_ASSERT_EQUAL(default_pipe, pipe_hdl);
//No need to check req_hdl or IRP order as abort will cause them to dequeu out of order
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_IRP_DONE);
//Wait for the IRPs to complete and dequeue them, then check results
for (int i = 0; i < NUM_IRPS; i++) {
usb_irp_t *irp = hcd_irp_dequeue(default_pipe);
TEST_ASSERT_NOT_EQUAL(NULL, irp);
TEST_ASSERT(irp->status == USB_TRANSFER_STATUS_COMPLETED || irp->status == USB_TRANSFER_STATUS_CANCELLED);
TEST_ASSERT_EQUAL(NULL, context);
TEST_ASSERT(irp->context == IRP_CONTEXT_VAL);
}
//Free transfer requests
free_pipe_and_xfer_reqs(default_pipe, req_hdls, data_buffers, irps, NUM_XFER_REQS);
//Free IRPs and default pipe
free_pipe_and_irp_list(default_pipe, NUM_IRPS, irp_list);
vTaskDelay(pdMS_TO_TICKS(100)); //Short delay send of SOF (for FS) or EOPs (for LS)
wait_for_disconnection(port_hdl, port_evt_queue, false);
@ -662,13 +634,13 @@ Test HCD pipe STALL condition, abort, and clear
Purpose:
- Test that a pipe can react to a STALL (i.e., a HCD_PIPE_EVENT_HALTED event)
- The HCD_PIPE_CMD_ABORT can retire all transfer requests
- The HCD_PIPE_CMD_ABORT can retire all IRPs
- Pipe clear command can return the pipe to being active
Procedure:
- Setup HCD and a port, a default pipe, and multiple transfer requests
- Corrupt the first transfer request, then enqueue all of them.
- The corrupted transfer request should trigger a STALL response from the endpoint
- Setup HCD and a port, a default pipe, and multiple IRPs
- Corrupt the first IRP, then enqueue all of them.
- The corrupted IRP should trigger a STALL response from the endpoint
- Check that the correct pipe event, error, and state is returned from the pipe
- Check that the other transfers can be retired using the abort command
- Check that the halt can be cleared by using the clear command
@ -684,24 +656,23 @@ TEST_CASE("Test HCD pipe STALL", "[hcd][ignore]")
wait_for_connection(port_hdl, port_evt_queue);
vTaskDelay(pdMS_TO_TICKS(100)); //Short delay send of SOF (for FS) or EOPs (for LS)
//Allocate transfer requests
//Allocate default pipe and IRPs
hcd_pipe_handle_t default_pipe;
hcd_xfer_req_handle_t req_hdls[NUM_XFER_REQS];
uint8_t *data_buffers[NUM_XFER_REQS];
usb_irp_t *irps[NUM_XFER_REQS];
alloc_pipe_and_xfer_reqs(port_hdl, pipe_evt_queue, &default_pipe, req_hdls, data_buffers, irps, NUM_XFER_REQS);
//Initialize transfer requests to send a "Get Device Descriptor" request
for (int i = 0; i < NUM_XFER_REQS; i++) {
irps[i]->num_bytes = 64; //1 worst case MPS
USB_CTRL_REQ_INIT_GET_CFG_DESC((usb_ctrl_req_t *) data_buffers[i], 0, XFER_DATA_MAX_LEN);
irps[i]->data_buffer = data_buffers[i];
irps[i]->num_iso_packets = 0;
usb_irp_t **irp_list;
alloc_pipe_and_irp_list(port_hdl, pipe_evt_queue, NUM_IRPS, &default_pipe, &irp_list);
//Initialize IRPs to send a "Get Device Descriptor" request
for (int i = 0; i < NUM_IRPS; i++) {
irp_list[i]->num_bytes = 64; //1 worst case MPS
USB_CTRL_REQ_INIT_GET_CFG_DESC((usb_ctrl_req_t *) irp_list[i]->data_buffer, 0, TRANSFER_DATA_MAX_BYTES);
irp_list[i]->context = IRP_CONTEXT_VAL;
}
//Corrupt first transfer so that it triggers a STALL
((usb_ctrl_req_t *) data_buffers[0])->bRequest = 0xAA;
//Enqueue those transfer requests
for (int i = 0; i < NUM_XFER_REQS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_xfer_req_enqueue(req_hdls[i]));
((usb_ctrl_req_t *)irp_list[0]->data_buffer)->bRequest = 0xAA;
//Enqueue those IRPs
for (int i = 0; i < NUM_IRPS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_irp_enqueue(default_pipe, irp_list[i]));
}
vTaskDelay(pdMS_TO_TICKS(100)); //Give some time for transfers to complete
@ -709,47 +680,39 @@ TEST_CASE("Test HCD pipe STALL", "[hcd][ignore]")
printf("Expecting STALL\n");
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_ERROR_STALL);
TEST_ASSERT_EQUAL(HCD_PIPE_STATE_HALTED, hcd_pipe_get_state(default_pipe));
//Call the pipe abort command to retire all transfers then dequeue all transfers
TEST_ASSERT_EQUAL(ESP_OK, hcd_pipe_command(default_pipe, HCD_PIPE_CMD_ABORT));
for (int i = 0; i < NUM_XFER_REQS; i++) {
hcd_xfer_req_handle_t req_hdl = hcd_xfer_req_dequeue(default_pipe);
hcd_pipe_handle_t pipe_hdl;
usb_irp_t *irp;
void *context;
hcd_xfer_req_get_target(req_hdl, &pipe_hdl, &irp, &context);
TEST_ASSERT_EQUAL(default_pipe, pipe_hdl);
TEST_ASSERT_EQUAL(irp, irps[i]);
//Dequeue IRPs
for (int i = 0; i < NUM_IRPS; i++) {
usb_irp_t *irp = hcd_irp_dequeue(default_pipe);
TEST_ASSERT_NOT_EQUAL(NULL, irp);
TEST_ASSERT(irp->status == USB_TRANSFER_STATUS_STALL || irp->status == USB_TRANSFER_STATUS_CANCELLED);
TEST_ASSERT_EQUAL(NULL, context);
TEST_ASSERT(irp->context == IRP_CONTEXT_VAL);
}
//Call the clear command to un-stall the pipe
TEST_ASSERT_EQUAL(ESP_OK, hcd_pipe_command(default_pipe, HCD_PIPE_CMD_CLEAR));
TEST_ASSERT_EQUAL(HCD_PIPE_STATE_ACTIVE, hcd_pipe_get_state(default_pipe));
//Correct first transfer then requeue
USB_CTRL_REQ_INIT_GET_CFG_DESC((usb_ctrl_req_t *) data_buffers[0], 0, XFER_DATA_MAX_LEN);
for (int i = 0; i < NUM_XFER_REQS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_xfer_req_enqueue(req_hdls[i]));
USB_CTRL_REQ_INIT_GET_CFG_DESC((usb_ctrl_req_t *) irp_list[0]->data_buffer, 0, TRANSFER_DATA_MAX_BYTES);
for (int i = 0; i < NUM_IRPS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_irp_enqueue(default_pipe, irp_list[i]));
}
vTaskDelay(pdMS_TO_TICKS(100)); //Give some time for transfers to complete
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_XFER_REQ_DONE);
//Dequeue transfer requests and check results.
for (int i = 0; i < NUM_XFER_REQS; i++) {
hcd_xfer_req_handle_t req_hdl = hcd_xfer_req_dequeue(default_pipe);
TEST_ASSERT_EQUAL(req_hdls[i], req_hdl);
hcd_pipe_handle_t pipe_hdl;
usb_irp_t *irp;
void *context;
hcd_xfer_req_get_target(req_hdl, &pipe_hdl, &irp, &context);
TEST_ASSERT_EQUAL(default_pipe, pipe_hdl);
TEST_ASSERT_EQUAL(irp, irps[i]);
TEST_ASSERT_EQUAL(USB_TRANSFER_STATUS_COMPLETED, irp->status);
TEST_ASSERT_EQUAL(NULL, context);
//Wait for the IRPs to complete and dequeue them, then check results
for (int i = 0; i < NUM_IRPS; i++) {
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_IRP_DONE);
usb_irp_t *irp = hcd_irp_dequeue(default_pipe);
TEST_ASSERT_NOT_EQUAL(NULL, irp);
TEST_ASSERT(irp->status == USB_TRANSFER_STATUS_COMPLETED);
TEST_ASSERT(irp->context == IRP_CONTEXT_VAL);
}
//Free IRPs
free_pipe_and_irp_list(default_pipe, NUM_IRPS, irp_list);
//Free transfer requests
free_pipe_and_xfer_reqs(default_pipe, req_hdls, data_buffers, irps, NUM_XFER_REQS);
vTaskDelay(pdMS_TO_TICKS(100)); //Short delay send of SOF (for FS) or EOPs (for LS)
wait_for_disconnection(port_hdl, port_evt_queue, false);
teardown(port_evt_queue, pipe_evt_queue, port_hdl);
@ -759,16 +722,16 @@ TEST_CASE("Test HCD pipe STALL", "[hcd][ignore]")
Test Pipe runtime halt and clear
Purpose:
- Test that a pipe can be halted with a command whilst there are ongoing transfer requests
- Test that a pipe can be halted with a command whilst there are ongoing IRPs
- Test that a pipe can be un-halted with a HCD_PIPE_CMD_CLEAR
- Test that enqueued transfer requests are resumed when pipe is cleared
- Test that enqueued IRPs are resumed when pipe is cleared
Procedure:
- Setup HCD, a default pipe, and multiple transfer requests
- Enqueue transfer requests but execute a HCD_PIPE_CMD_HALT command immediately after. Halt command should let on
the current going transfer request finish before actually halting the pipe.
- Clear the pipe halt using a HCD_PIPE_CMD_HALT command. Enqueued transfer requests will be resumed
- Check that all transfer requests have completed successfully.
- Setup HCD, a default pipe, and multiple IRPs
- Enqueue IRPs but execute a HCD_PIPE_CMD_HALT command immediately after. Halt command should let on
the current going IRP finish before actually halting the pipe.
- Clear the pipe halt using a HCD_PIPE_CMD_HALT command. Enqueued IRPs will be resumed
- Check that all IRPs have completed successfully.
- Teardown
*/
TEST_CASE("Test HCD pipe runtime halt and clear", "[hcd][ignore]")
@ -780,53 +743,47 @@ TEST_CASE("Test HCD pipe runtime halt and clear", "[hcd][ignore]")
wait_for_connection(port_hdl, port_evt_queue);
vTaskDelay(pdMS_TO_TICKS(100)); //Short delay send of SOF (for FS) or EOPs (for LS)
//Allocate transfer requests
//Allocate default pipe and IRPs
hcd_pipe_handle_t default_pipe;
hcd_xfer_req_handle_t req_hdls[NUM_XFER_REQS];
uint8_t *data_buffers[NUM_XFER_REQS];
usb_irp_t *irps[NUM_XFER_REQS];
alloc_pipe_and_xfer_reqs(port_hdl, pipe_evt_queue, &default_pipe, req_hdls, data_buffers, irps, NUM_XFER_REQS);
usb_irp_t **irp_list;
alloc_pipe_and_irp_list(port_hdl, pipe_evt_queue, NUM_IRPS, &default_pipe, &irp_list);
//Initialize transfer requests to send a "Get Device Descriptor" request
for (int i = 0; i < NUM_XFER_REQS; i++) {
irps[i]->num_bytes = 64; //1 worst case MPS
USB_CTRL_REQ_INIT_GET_CFG_DESC((usb_ctrl_req_t *) data_buffers[i], 0, XFER_DATA_MAX_LEN);
irps[i]->data_buffer = data_buffers[i];
irps[i]->num_iso_packets = 0;
//Initialize IRPs to send a "Get Device Descriptor" request
for (int i = 0; i < NUM_IRPS; i++) {
irp_list[i]->num_bytes = 64; //1 worst case MPS
USB_CTRL_REQ_INIT_GET_CFG_DESC((usb_ctrl_req_t *)irp_list[i]->data_buffer, 0, TRANSFER_DATA_MAX_BYTES);
irp_list[i]->context = IRP_CONTEXT_VAL;
}
printf("Enqueuing transfer requests\n");
//Enqueue those transfer requests
for (int i = 0; i < NUM_XFER_REQS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_xfer_req_enqueue(req_hdls[i]));
printf("Enqueuing IRPs\n");
//Enqueue those IRPs
for (int i = 0; i < NUM_IRPS; i++) {
TEST_ASSERT_EQUAL(ESP_OK, hcd_irp_enqueue(default_pipe, irp_list[i]));
}
//Halt the pipe immediately
TEST_ASSERT_EQUAL(ESP_OK, hcd_pipe_command(default_pipe, HCD_PIPE_CMD_HALT));
TEST_ASSERT_EQUAL(HCD_PIPE_STATE_HALTED, hcd_pipe_get_state(default_pipe));
printf("Pipe halted\n");
vTaskDelay(pdMS_TO_TICKS(100)); //Give some time for current inflight transfer to complete
vTaskDelay(pdMS_TO_TICKS(100)); //Give some time for current in-flight transfer to complete
//Clear command to un-halt the pipe
TEST_ASSERT_EQUAL(ESP_OK, hcd_pipe_command(default_pipe, HCD_PIPE_CMD_CLEAR));
TEST_ASSERT_EQUAL(HCD_PIPE_STATE_ACTIVE, hcd_pipe_get_state(default_pipe));
printf("Pipe cleared\n");
vTaskDelay(pdMS_TO_TICKS(100)); //Give some time pending for transfers to restart and complete
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_XFER_REQ_DONE);
for (int i = 0; i < NUM_XFER_REQS; i++) {
hcd_xfer_req_handle_t req_hdl = hcd_xfer_req_dequeue(default_pipe);
TEST_ASSERT_EQUAL(req_hdls[i], req_hdl);
hcd_pipe_handle_t pipe_hdl;
usb_irp_t *irp;
void *context;
hcd_xfer_req_get_target(req_hdl, &pipe_hdl, &irp, &context);
TEST_ASSERT_EQUAL(default_pipe, pipe_hdl);
TEST_ASSERT_EQUAL(irp, irps[i]);
//Dequeue IRPs
for (int i = 0; i < NUM_IRPS; i++) {
expect_pipe_event(pipe_evt_queue, default_pipe, HCD_PIPE_EVENT_IRP_DONE);
usb_irp_t *irp = hcd_irp_dequeue(default_pipe);
TEST_ASSERT_NOT_EQUAL(NULL, irp);
TEST_ASSERT(irp->status == USB_TRANSFER_STATUS_COMPLETED);
TEST_ASSERT_EQUAL(NULL, context);
TEST_ASSERT(irp->context == IRP_CONTEXT_VAL);
}
//Free IRPs
free_pipe_and_irp_list(default_pipe, NUM_IRPS, irp_list);
//Free transfer requests
free_pipe_and_xfer_reqs(default_pipe, req_hdls, data_buffers, irps, NUM_XFER_REQS);
vTaskDelay(pdMS_TO_TICKS(100)); //Short delay send of SOF (for FS) or EOPs (for LS)
wait_for_disconnection(port_hdl, port_evt_queue, false);
teardown(port_evt_queue, pipe_evt_queue, port_hdl);