PERFORCE change 129934 for review
Hans Petter Selasky
hselasky at FreeBSD.org
Sat Dec 1 16:42:23 PST 2007
http://perforce.freebsd.org/chv.cgi?CH=129934
Change 129934 by hselasky at hselasky_laptop001 on 2007/12/02 00:42:00
This commit finishes change 129799 with regard to
"usb_transfer.c".
The allocated data buffer for each USB transfer
is now all in virtual memory.
Affected files ...
.. //depot/projects/usb/src/sys/dev/usb/usb_transfer.c#55 edit
Differences ...
==== //depot/projects/usb/src/sys/dev/usb/usb_transfer.c#55 (text+ko) ====
@@ -52,12 +52,13 @@
/* prototypes */
static void usbd_pipe_enter_wrapper(struct usbd_xfer *xfer);
-static void usbd_bdma_start_event(struct usbd_xfer *xfer);
static void usbd_compute_max_frame_size(struct usbd_xfer *xfer);
static void usbd_drop_refcount(struct usbd_memory_info *info);
static uint8_t usbd_start_hardware_sub(struct usbd_xfer *xfer);
static void usbd_premature_callback(struct usbd_xfer *xfer, usbd_status error);
static void usbd_delayed_transfer_start(void *arg);
+static void usbd_bdma_work_loop(struct usbd_memory_info *info);
+static void usbd_bdma_cancel_event(struct usbd_xfer *xfer);
#ifdef USB_DEBUG
void
@@ -549,22 +550,17 @@
}
xfer->max_data_length -= REQ_SIZE;
}
- /* align data */
- parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
+ /* setup "frlengths" */
- xfer->frlengths = USBD_ADD_BYTES(parm->buf, parm->size[0]);
+ xfer->frlengths = parm->xfer_length_ptr;
- parm->size[0] += (n_frlengths * sizeof(xfer->frlengths[0]));
+ parm->xfer_length_ptr += n_frlengths;
- /* align data */
- parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
+ /* setup "frbuffers" */
- xfer->frbuffers = USBD_ADD_BYTES(parm->buf, parm->size[0]);
+ xfer->frbuffers = parm->xfer_page_cache_ptr;
- parm->size[0] += (n_frbuffers * sizeof(xfer->frbuffers[0]));
-
- /* align data */
- parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
+ parm->xfer_page_cache_ptr += n_frbuffers;
/*
* check if we need to setup
@@ -574,40 +570,32 @@
if (!xfer->flags.ext_buffer) {
/* align data */
- parm->size[1] += ((-parm->size[1]) & (USB_HOST_ALIGN - 1));
+ parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
if (parm->buf) {
- usbd_page_cache_init
- (xfer->frbuffers + 0, parm->page_ptr,
- parm->size[1], parm->bufsize);
+ xfer->local_buffer =
+ USBD_ADD_BYTES(parm->buf, parm->size[0]);
+
+ usbd_set_frame_offset(xfer, 0, 0);
if (type == UE_CONTROL) {
-
- usbd_page_cache_init
- (xfer->frbuffers + 1, parm->page_ptr,
- parm->size[1] + REQ_SIZE,
- parm->bufsize - REQ_SIZE);
+ usbd_set_frame_offset(xfer, REQ_SIZE, 1);
}
- /*
- * make a copy of the page cache that
- * starts at offset zero:
- */
-
- xfer->buf_data = xfer->frbuffers[0];
}
- parm->size[1] += parm->bufsize;
+ parm->size[0] += parm->bufsize;
/* align data again */
- parm->size[1] += ((-parm->size[1]) & (USB_HOST_ALIGN - 1));
+ parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
}
/*
- * check if we need to allocate
- * temporary usbd_page structure(s)
- * for loading memory into DMA
+ * Compute maximum buffer size
*/
- if (xfer->flags.bdma_enable) {
+ if (parm->bufsize_max < parm->bufsize) {
+ parm->bufsize_max = parm->bufsize;
+ }
+ if (xfer->flags_int.bdma_enable) {
/*
* Setup "dma_page_ptr".
*
@@ -648,18 +636,32 @@
parm->err = USBD_INVAL;
goto done;
}
- /* initialize transfer backpointer */
+ /* initialize frame buffers */
if (parm->buf) {
- for (x = 0; x < n_frbuffers; x++) {
- xfer->frbuffers[x].p_xfer = xfer;
+ for (x = 0; x != n_frbuffers; x++) {
+ xfer->frbuffers[x].xfer = xfer;
+
+ if (xfer->flags_int.bdma_enable &&
+ (parm->bufsize_max > 0)) {
+
+ if (usbd_pc_dmamap_create(
+ xfer->frbuffers + x,
+ parm->bufsize_max)) {
+ parm->err = USBD_NOMEM;
+ goto done;
+ }
+ }
}
}
done:
if (parm->err) {
- xfer->max_usb_frame_size = 1; /* XXX avoid division by zero */
- xfer->max_frame_size = 1; /* XXX avoid division by zero */
- xfer->max_packet_size = 1; /* XXX avoid division by zero */
+ /*
+ * Set some dummy values so that we avoid division by zero:
+ */
+ xfer->max_usb_frame_size = 1;
+ xfer->max_frame_size = 1;
+ xfer->max_packet_size = 1;
xfer->max_data_length = 0;
xfer->nframes = 0;
}
@@ -736,31 +738,30 @@
while (1) {
- parm.size[0] = 0;
- parm.size[1] = 0;
- parm.buf = buf;
-
- /* align data to 8 byte boundary */
- parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
-
if (buf) {
- info = USBD_ADD_BYTES(buf, parm.size[0]);
+ info = USBD_ADD_BYTES(buf, 0);
info->memory_base = buf;
- info->memory_size = parm.total_size[0];
+ info->memory_size = parm.size[0];
- info->page_base = parm.page_ptr;
- info->page_size = parm.total_size[1];
+ info->dma_page_cache_start = USBD_ADD_BYTES(buf, parm.size[4]);
+ info->dma_page_cache_end = USBD_ADD_BYTES(buf, parm.size[5]);
+ info->xfer_page_cache_start = USBD_ADD_BYTES(buf, parm.size[5]);
+ info->xfer_page_cache_end = USBD_ADD_BYTES(buf, parm.size[2]);
info->usb_mtx = &(udev->bus->mtx);
+ info->dma_tag_p = parm.dma_tag_p;
+ info->dma_tag_max = parm.dma_tag_max;
- usbd_page_cache_init(&(parm.pc), parm.page_ptr,
- 0, parm.total_size[1] * USB_PAGE_SIZE);
} else {
info = NULL;
}
+ /* reset sizes */
+
+ parm.size[0] = 0;
+ parm.buf = buf;
parm.size[0] += sizeof(info[0]);
for (setup = setup_start, n = 0;
@@ -810,13 +811,6 @@
(udev->bus->methods->xfer_setup) (&parm);
- if (xfer->flags.bdma_enable &&
- xfer->flags_int.bdma_enable) {
- if (parm.err) {
- goto done;
- }
- usbd_dma_load_setup(&parm);
- }
if (parm.err) {
goto done;
}
@@ -825,18 +819,30 @@
if (buf || parm.err) {
goto done;
}
- /* compute number of USB pages required */
- parm.total_size[1] =
- (parm.size[1] + USB_PAGE_SIZE - 1) / USB_PAGE_SIZE;
-
/* align data to 8 byte boundary */
parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
/* store offset temporarily */
- parm.size[2] = parm.size[0];
+ parm.size[1] = parm.size[0];
+
+ /*
+ * UHCI need DMA tags for fixup buffers. There
+ * is a maximum of one tag for each endpoint.
+ */
+ parm.dma_tag_max += MIN(n_setup, USB_MAX_ENDPOINTS);
+
+ /*
+ * DMA tags for QH, TD, Data and more.
+ */
+ parm.dma_tag_max += 8;
+
+ parm.dma_tag_p += parm.dma_tag_max;
+
+ parm.size[0] += ((uint8_t *)parm.dma_tag_p) -
+ ((uint8_t *)0);
- parm.size[0] += (sizeof(parm.page_ptr[0]) *
- parm.total_size[1]);
+ /* align data to 8 byte boundary */
+ parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
/* store offset temporarily */
parm.size[3] = parm.size[0];
@@ -844,31 +850,51 @@
parm.size[0] += ((uint8_t *)parm.dma_page_ptr) -
((uint8_t *)0);
- /* store total buffer size */
- parm.total_size[0] = parm.size[0];
+ /* align data to 8 byte boundary */
+ parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
+
+ /* store offset temporarily */
+ parm.size[4] = parm.size[0];
+
+ parm.size[0] += ((uint8_t *)parm.dma_page_cache_ptr) -
+ ((uint8_t *)0);
+
+ /* store end offset temporarily */
+ parm.size[5] = parm.size[0];
+
+ parm.size[0] += ((uint8_t *)parm.xfer_page_cache_ptr) -
+ ((uint8_t *)0);
+
+ /* store end offset temporarily */
+
+ parm.size[2] = parm.size[0];
+
+ /* align data to 8 byte boundary */
+ parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
+
+ parm.size[6] = parm.size[0];
+
+ parm.size[0] += ((uint8_t *)parm.xfer_length_ptr) -
+ ((uint8_t *)0);
+
+ /* align data to 8 byte boundary */
+ parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
/* allocate zeroed memory */
- buf = malloc(parm.total_size[0], M_USB, M_WAITOK | M_ZERO);
+ buf = malloc(parm.size[0], M_USB, M_WAITOK | M_ZERO);
if (buf == NULL) {
parm.err = USBD_NOMEM;
PRINTFN(-1, ("cannot allocate memory block for "
"configuration (%d bytes)\n",
- parm.total_size[0]));
+ parm.size[0]));
goto done;
}
- parm.page_ptr = USBD_ADD_BYTES(buf, parm.size[2]);
+ parm.dma_tag_p = USBD_ADD_BYTES(buf, parm.size[1]);
parm.dma_page_ptr = USBD_ADD_BYTES(buf, parm.size[3]);
-
- if (usbd_page_alloc(udev->bus->dma_tag_ps,
- parm.page_ptr, parm.total_size[1])) {
- free(buf, M_USB);
- parm.err = USBD_NOMEM;
- PRINTFN(-1, ("cannot allocate memory block for "
- "configuration (%d USB pages)\n",
- parm.total_size[1]));
- goto done;
- }
+ parm.dma_page_cache_ptr = USBD_ADD_BYTES(buf, parm.size[4]);
+ parm.xfer_page_cache_ptr = USBD_ADD_BYTES(buf, parm.size[5]);
+ parm.xfer_length_ptr = USBD_ADD_BYTES(buf, parm.size[6]);
}
done:
@@ -912,6 +938,7 @@
{
struct usbd_xfer *xfer;
struct usbd_memory_info *info;
+ struct usbd_page_cache *pc;
int error;
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
@@ -963,8 +990,6 @@
usb_callout_drain(&(xfer->timeout_handle));
- usbd_dma_load_unsetup(xfer);
-
if (xfer->usb_root) {
info = xfer->usb_root;
@@ -996,8 +1021,25 @@
mtx_unlock(info->usb_mtx);
/* free DMA'able memory, if any */
+ pc = info->dma_page_cache_start;
+ while (pc != info->dma_page_cache_end) {
+ usbd_pc_free_mem(pc);
+ pc++;
+ }
+
+ /*
+ * free DMA maps in all
+ * "xfer->frbuffers"
+ */
+ pc = info->xfer_page_cache_start;
+ while (pc != info->xfer_page_cache_end) {
+ usbd_pc_dmamap_destroy(pc);
+ pc++;
+ }
- usbd_page_free(info->page_base, info->page_size);
+ /* free all DMA tags */
+ usbd_dma_tag_unsetup(info->dma_tag_p,
+ info->dma_tag_max);
/*
* free the "memory_base" last,
@@ -1282,6 +1324,7 @@
void
usbd_start_hardware(struct usbd_xfer *xfer)
{
+ struct usbd_memory_info *info;
uint32_t x;
PRINTFN(0, ("xfer=%p, pipe=%p, nframes=%d, dir=%s\n",
@@ -1377,8 +1420,31 @@
* buffers into DMA, if any:
*/
- if (xfer->flags.bdma_enable) {
- usbd_bdma_start_event(xfer);
+ if (xfer->flags_int.bdma_enable) {
+
+ /*
+ * If the transfer is not inserted, insert
+ * the transfer into the DMA queue
+ */
+ if (xfer->dma_list.le_prev == NULL) {
+ LIST_INSERT_HEAD(&(xfer->usb_root->dma_head),
+ xfer, dma_list);
+ }
+ info = xfer->usb_root;
+
+ /*
+ * If there is no current transfer, set one
+ */
+ if (info->dma_curr_xfer == NULL) {
+ info->dma_curr_xfer = xfer;
+ info->dma_no_callback = 1;
+ }
+ /*
+ * Only call the BUS-DMA work loop when it is not busy
+ */
+ if (info->dma_refcount == 0) {
+ usbd_bdma_work_loop(info);
+ }
return;
}
/*
@@ -1414,84 +1480,158 @@
}
/*------------------------------------------------------------------------*
- * usbd_bdma_start_event
+ * usbd_bdma_cancel_event
+ *
+ * This function will cancel any BUS-DMA operations.
+ *------------------------------------------------------------------------*/
+static void
+usbd_bdma_cancel_event(struct usbd_xfer *xfer)
+{
+ struct usbd_memory_info *info;
+
+ mtx_assert(info->priv_mtx, MA_OWNED);
+
+ info = xfer->usb_root;
+
+ if (info->dma_curr_xfer == xfer) {
+ /* prepare next USB transfer to load, if any */
+ info->dma_curr_xfer =
+ LIST_PREV(&(info->dma_head), xfer, dma_list);
+ info->dma_no_callback = 1;
+ }
+ if (xfer->dma_list.le_prev) {
+ LIST_REMOVE(xfer, dma_list);
+ xfer->dma_list.le_prev = NULL;
+ }
+ return;
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_bdma_work_loop
+ *
+ * This function handles loading of virtual buffers into DMA.
*------------------------------------------------------------------------*/
static void
-usbd_bdma_start_event(struct usbd_xfer *xfer)
+usbd_bdma_work_loop(struct usbd_memory_info *info)
{
- struct usbd_dma_load_mem_info info;
+ struct usbd_xfer *xfer;
+ struct usbd_page *pg;
uint32_t nframes;
+ uint32_t frlength_0;
uint8_t isread;
- uint8_t state;
+
+ mtx_assert(info->priv_mtx, MA_OWNED);
+
+ if (info->dma_draining) {
+ /* someone is waiting for us to drain */
+ info->dma_draining = 0;
+ wakeup(&(info->dma_draining));
+ }
+repeat:
+
+ xfer = info->dma_curr_xfer;
+
+ if (xfer) {
- mtx_assert(xfer->priv_mtx, MA_OWNED);
+ if (!info->dma_no_callback) {
- state = xfer->flags_int.bdma_state;
+ /* prepare next USB transfer to load, if any */
+ info->dma_curr_xfer =
+ LIST_PREV(&(info->dma_head), xfer, dma_list);
+ info->dma_no_callback = 1;
+ LIST_REMOVE(xfer, dma_list);
+ xfer->dma_list.le_prev = NULL;
- if (state == USBD_BDMA_ST_IDLE) {
+ /* check for DMA error */
- /* we are about to load the buffers */
- xfer->flags_int.bdma_state = USBD_BDMA_ST_LOADING;
+ if (info->dma_error) {
- isread = USBD_GET_DATA_ISREAD(xfer);
+ /* prevent recursion by increasing refcount */
- info.page_ptr = xfer->dma_page_ptr;
+ info->dma_refcount++;
- if (xfer->flags_int.isochronous_xfr) {
- /* only one frame buffer */
- nframes = 1;
- } else {
- /* can be multiple frame buffers */
- nframes = xfer->nframes;
- }
+ /* report error */
- /* reset BUS-DMA load state */
+ usbd_premature_callback(xfer,
+ USBD_DMA_LOAD_FAILED);
- xfer->dma_refcount = 1;
- xfer->flags_int.bdma_error = 0;
+ info->dma_refcount--;
- while (nframes--) {
- if (xfer->frbuffers[nframes].p_buffer == NULL) {
- /* nothing to load */
- continue;
+ goto repeat;
}
- /*
- * Set DMA direction first. This is needed to
- * select the correct cache invalidate and cache
- * flush operations.
- */
- if ((nframes == 0) &&
- xfer->flags_int.control_xfr &&
- xfer->flags_int.control_hdr) {
- /* special case */
+ /* go ahead */
+ usbd_bdma_pre_sync(xfer);
+
+ /* finally start the hardware */
+ usbd_pipe_enter_wrapper(xfer);
- if (xfer->flags_int.is_dci) {
- xfer->frbuffers[0].isread = 0;
- } else {
- xfer->frbuffers[0].isread = 1;
- }
- } else {
+ /* load next USB transfer, if any */
+ goto repeat;
+ }
+ } else {
+ /* nothing more to do */
+ return;
+ }
- /* default case */
+ /* reset BUS-DMA load state */
- xfer->frbuffers[nframes].isread = isread;
- }
+ info->dma_refcount = 1;
+ info->dma_error = 0;
+ info->dma_no_callback = 0;
- info.page_cache = xfer->frbuffers + nframes;
- info.frame_length = xfer->frlengths[nframes];
+ if (xfer->flags_int.isochronous_xfr) {
+ /* only one frame buffer */
+ nframes = 1;
+ frlength_0 = xfer->sumlen;
+ } else {
+ /* can be multiple frame buffers */
+ nframes = xfer->nframes;
+ frlength_0 = xfer->frlengths[0];
+ }
- /* check if the hardware supports DMA */
+ /*
+ * Set DMA direction first. This is needed to
+ * select the correct cache invalidate and cache
+ * flush operations.
+ */
+ isread = USBD_GET_DATA_ISREAD(xfer);
+ pg = xfer->dma_page_ptr;
- if (xfer->flags_int.bdma_enable)
- usbd_dma_load_mem(xfer, &info);
- else
- usbd_pio_load_mem(xfer, &info);
+ if (xfer->flags_int.control_xfr &&
+ xfer->flags_int.control_hdr) {
+ /* special case */
+ if (xfer->flags_int.is_dci) {
+ xfer->frbuffers[0].isread = 0;
+ } else {
+ xfer->frbuffers[0].isread = 1;
}
- usbd_bdma_done_event(xfer);
+ } else {
+ /* default case */
+ xfer->frbuffers[0].isread = isread;
+ }
+
+ xfer->frbuffers[0].page_start = pg;
+
+ usbd_pc_load_mem(xfer->frbuffers + 0, frlength_0);
+
+ pg += (frlength_0 / USB_PAGE_SIZE);
+ pg += 2;
+
+ while (--nframes > 0) {
+ xfer->frbuffers[nframes].isread = isread;
+
+ xfer->frbuffers[nframes].page_start = pg;
+
+ usbd_pc_load_mem(xfer->frbuffers + nframes,
+ xfer->frlengths[nframes]);
+
+ pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE);
+ pg += 2;
+ }
- } else if (state == USBD_BDMA_ST_CANCELLING) {
- /* we have to postphone the load until later */
- xfer->flags_int.bdma_state = USBD_BDMA_ST_CANCEL_AND_LOAD;
+ if (--(info->dma_refcount) == 0) {
+ /* we are complete */
+ goto repeat;
}
return;
}
@@ -1503,51 +1643,77 @@
* into DMA, if any
*------------------------------------------------------------------------*/
void
-usbd_bdma_done_event(struct usbd_xfer *xfer)
+usbd_bdma_done_event(struct usbd_memory_info *info)
{
- uint8_t state;
+ mtx_assert(info->priv_mtx, MA_OWNED);
- mtx_assert(xfer->priv_mtx, MA_OWNED);
+ if (--(info->dma_refcount) == 0) {
+ usbd_bdma_work_loop(info);
+ }
+ return;
+}
- xfer->dma_refcount--;
+/*------------------------------------------------------------------------*
+ * usbd_bdma_pre_sync
+ *------------------------------------------------------------------------*/
+void
+usbd_bdma_pre_sync(struct usbd_xfer *xfer)
+{
+ struct usbd_page_cache *pc;
+ uint32_t nframes;
- if (xfer->dma_refcount != 0) {
- return;
+ if (xfer->flags_int.isochronous_xfr) {
+ /* only one frame buffer */
+ nframes = 1;
+ } else {
+ /* can be multiple frame buffers */
+ nframes = xfer->nframes;
}
- /* get current BUS-DMA state */
- state = xfer->flags_int.bdma_state;
- /* set new state first */
- xfer->flags_int.bdma_state = USBD_BDMA_ST_IDLE;
+ pc = xfer->frbuffers;
- if (state == USBD_BDMA_ST_LOADING) {
+ while (nframes--) {
- if (xfer->flags_int.bdma_error) {
- /* report error */
- usbd_premature_callback(xfer, USBD_DMA_LOAD_FAILED);
- } else {
- if (xfer->flags_int.bdma_enable) {
- usbd_dma_load_pre_sync(xfer);
+ if (pc->page_offset_buf != pc->page_offset_end) {
+ if (pc->isread) {
+ usbd_pc_cpu_invalidate(pc);
+ } else {
+ usbd_pc_cpu_flush(pc);
}
- /* finally start the hardware */
- usbd_pipe_enter_wrapper(xfer);
}
+ pc++;
+ }
+
+ return;
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_bdma_post_sync
+ *------------------------------------------------------------------------*/
+void
+usbd_bdma_post_sync(struct usbd_xfer *xfer)
+{
+ struct usbd_page_cache *pc;
+ uint32_t nframes;
- } else if (state == USBD_BDMA_ST_CANCEL_AND_LOAD) {
+ if (xfer->flags_int.isochronous_xfr) {
+ /* only one frame buffer */
+ nframes = 1;
+ } else {
+ /* can be multiple frame buffers */
+ nframes = xfer->nframes;
+ }
- /* we are about to load new buffers */
- usbd_bdma_start_event(xfer);
+ pc = xfer->frbuffers;
- } else if (state == USBD_BDMA_ST_CANCELLING) {
+ while (nframes--) {
- /*
- * Check if we need to wakeup someone
- * that is waiting for DMA to cancel
- */
- if (xfer->flags_int.bdma_draining) {
- xfer->flags_int.bdma_draining = 0;
- wakeup(&(xfer->dma_page_ptr));
+ if (pc->page_offset_buf != pc->page_offset_end) {
+ if (pc->isread) {
+ usbd_pc_cpu_invalidate(pc);
+ }
}
+ pc++;
}
return;
}
@@ -1624,8 +1790,6 @@
void
usbd_transfer_stop(struct usbd_xfer *xfer)
{
- uint8_t state;
-
if (xfer == NULL) {
/* transfer is gone */
return;
@@ -1662,16 +1826,10 @@
/* always set error */
xfer->error = USBD_CANCELLED;
- state = xfer->flags_int.bdma_state;
+ /* cancel loading of virtual buffers, if any */
+ usbd_bdma_cancel_event(xfer);
/*
- * Update BUS-DMA state to cancelling, if needed
- */
- if ((state == USBD_BDMA_ST_LOADING) ||
- (state == USBD_BDMA_ST_CANCEL_AND_LOAD)) {
- xfer->flags_int.bdma_state = USBD_BDMA_ST_CANCELLING;
- }
- /*
* Check if we are doing a transfer and if so
* do a Cancel Callback
*/
@@ -1695,6 +1853,8 @@
void
usbd_transfer_drain(struct usbd_xfer *xfer)
{
+ struct usbd_memory_info *info;
+
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"usbd_transfer_drain can sleep!");
@@ -1708,16 +1868,23 @@
usbd_transfer_stop(xfer);
- while (xfer->dma_refcount > 0) {
+ info = xfer->usb_root;
+
+ if (info->dma_refcount > 0) {
- xfer->flags_int.bdma_draining = 1;
+ /*
+ * Wait until the current outstanding DMA
+ * operation is complete before we return.
+ * That way we are sure that no DMA operation
+ * belonging to our USB transfer is pending!
+ */
+ info->dma_draining = 1;
- if (mtx_sleep(&(xfer->dma_page_ptr), xfer->priv_mtx,
+ if (mtx_sleep(&(info->dma_draining), xfer->priv_mtx,
0, "usbdrain", 0)) {
/* should not happen */
}
}
-
mtx_unlock(xfer->priv_mtx);
return;
@@ -1726,15 +1893,17 @@
/*------------------------------------------------------------------------*
* usbd_set_frame_data
*
- * This function sets the pointer and length of data that should loaded
- * directly into DMA for the given USB frame.
+ * This function sets the pointer and length of data that should
+ * loaded directly into DMA for the given USB frame. Passing "ptr"
+ * equal to NULL while "len" is greater than zero gives undefined
+ * results!
*------------------------------------------------------------------------*/
void
usbd_set_frame_data(struct usbd_xfer *xfer, void *ptr, uint32_t len,
uint32_t frindex)
{
- xfer->frbuffers[frindex].p_buffer = ptr; /* enable loading of
- * memory into DMA */
+ /* set virtual address to load and length */
+ xfer->frbuffers[frindex].buffer = ptr;
xfer->frlengths[frindex] = len;
return;
}
@@ -1749,19 +1918,12 @@
usbd_set_frame_offset(struct usbd_xfer *xfer, uint32_t offset,
uint32_t frindex)
{
- struct usbd_page *page = xfer->buf_data.page_start;
- struct usbd_page_cache *pc = xfer->frbuffers + frindex;
-
__KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
- " when the USB buffer is external!\n"));
+ "when the USB buffer is external!\n"));
- pc->page_offset_end = xfer->buf_data.page_offset_end - offset;
-
- offset += xfer->buf_data.page_offset_buf;
- page += (offset / USB_PAGE_SIZE);
- pc->page_start = page;
- pc->page_offset_buf = (offset % USB_PAGE_SIZE);
- pc->p_buffer = NULL; /* disable loading of memory into DMA */
+ /* set virtual address to load */
+ xfer->frbuffers[frindex].buffer =
+ USBD_ADD_BYTES(xfer->local_buffer, offset);
return;
}
@@ -1793,9 +1955,10 @@
} else {
xfer->usb_state = USBD_ST_TRANSFERRED;
- if (xfer->flags.bdma_enable &&
- xfer->flags_int.bdma_enable) {
- usbd_dma_load_post_sync(xfer);
+ /* sync any DMA memory */
+ if (xfer->flags_int.bdma_enable &&
+ (!xfer->flags_int.bdma_no_post_sync)) {
+ usbd_bdma_post_sync(xfer);
}
}
}
@@ -1940,12 +2103,6 @@
* the pipe queue, but remain first. To enforce USB transfer removal call
* this function passing the error code "USBD_CANCELLED".
*------------------------------------------------------------------------*/
-#undef LIST_PREV
-#define LIST_PREV(head,elm,field) \
- (((elm) == LIST_FIRST(head)) ? ((__typeof(elm))0) : \
- ((__typeof(elm))(((uint8_t *)((elm)->field.le_prev)) - \
- ((uint8_t *)&LIST_NEXT((__typeof(elm))0,field)))))
-
void
usbd_transfer_dequeue(struct usbd_xfer *xfer, usbd_status error)
{
More information about the p4-projects
mailing list