summaryrefslogtreecommitdiff
path: root/drivers/usb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/host/xhci-dbgcap.c94
-rw-r--r--drivers/usb/host/xhci-debugfs.c5
-rw-r--r--drivers/usb/host/xhci-mem.c74
-rw-r--r--drivers/usb/host/xhci.c24
-rw-r--r--drivers/usb/host/xhci.h9
5 files changed, 113 insertions, 93 deletions
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index bdc664ad6a93..1fcc9348dd43 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -101,13 +101,34 @@ static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
return string_length;
}
+static void xhci_dbc_init_ep_contexts(struct xhci_dbc *dbc)
+{
+ struct xhci_ep_ctx *ep_ctx;
+ unsigned int max_burst;
+ dma_addr_t deq;
+
+ max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
+
+ /* Populate bulk out endpoint context: */
+ ep_ctx = dbc_bulkout_ctx(dbc);
+ deq = dbc_bulkout_enq(dbc);
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
+ ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
+
+ /* Populate bulk in endpoint context: */
+ ep_ctx = dbc_bulkin_ctx(dbc);
+ deq = dbc_bulkin_enq(dbc);
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
+ ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
+}
+
static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
{
struct dbc_info_context *info;
- struct xhci_ep_ctx *ep_ctx;
u32 dev_info;
- dma_addr_t deq, dma;
- unsigned int max_burst;
+ dma_addr_t dma;
if (!dbc)
return;
@@ -121,20 +142,8 @@ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
info->length = cpu_to_le32(string_length);
- /* Populate bulk out endpoint context: */
- ep_ctx = dbc_bulkout_ctx(dbc);
- max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
- deq = dbc_bulkout_enq(dbc);
- ep_ctx->ep_info = 0;
- ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
- ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
-
- /* Populate bulk in endpoint context: */
- ep_ctx = dbc_bulkin_ctx(dbc);
- deq = dbc_bulkin_enq(dbc);
- ep_ctx->ep_info = 0;
- ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
- ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
+ /* Populate bulk in and out endpoint contexts: */
+ xhci_dbc_init_ep_contexts(dbc);
/* Set DbC context and info registers: */
lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
@@ -435,6 +444,42 @@ dbc_alloc_ctx(struct device *dev, gfp_t flags)
return ctx;
}
+static void xhci_dbc_ring_init(struct xhci_ring *ring)
+{
+ struct xhci_segment *seg = ring->first_seg;
+
+ /* clear all trbs on ring in case of old ring */
+ memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
+
+ /* Only event ring does not use link TRB */
+ if (ring->type != TYPE_EVENT) {
+ union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
+
+ trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma);
+ trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
+ }
+ xhci_initialize_ring_info(ring);
+}
+
+static int xhci_dbc_reinit_ep_rings(struct xhci_dbc *dbc)
+{
+ struct xhci_ring *in_ring = dbc->eps[BULK_IN].ring;
+ struct xhci_ring *out_ring = dbc->eps[BULK_OUT].ring;
+
+ if (!in_ring || !out_ring || !dbc->ctx) {
+ dev_warn(dbc->dev, "Can't re-init unallocated endpoints\n");
+ return -ENODEV;
+ }
+
+ xhci_dbc_ring_init(in_ring);
+ xhci_dbc_ring_init(out_ring);
+
+ /* set ep context enqueue, dequeue, and cycle to initial values */
+ xhci_dbc_init_ep_contexts(dbc);
+
+ return 0;
+}
+
static struct xhci_ring *
xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
{
@@ -463,15 +508,10 @@ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
seg->dma = dma;
- /* Only event ring does not use link TRB */
- if (type != TYPE_EVENT) {
- union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
-
- trb->link.segment_ptr = cpu_to_le64(dma);
- trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
- }
INIT_LIST_HEAD(&ring->td_list);
- xhci_initialize_ring_info(ring, 1);
+
+ xhci_dbc_ring_init(ring);
+
return ring;
dma_fail:
kfree(seg);
@@ -863,7 +903,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
dev_info(dbc->dev, "DbC cable unplugged\n");
dbc->state = DS_ENABLED;
xhci_dbc_flush_requests(dbc);
-
+ xhci_dbc_reinit_ep_rings(dbc);
return EVT_DISC;
}
@@ -873,7 +913,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
writel(portsc, &dbc->regs->portsc);
dbc->state = DS_ENABLED;
xhci_dbc_flush_requests(dbc);
-
+ xhci_dbc_reinit_ep_rings(dbc);
return EVT_DISC;
}
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index f8ba15e7c225..570210e8a8e8 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -214,14 +214,11 @@ static void xhci_ring_dump_segment(struct seq_file *s,
static int xhci_ring_trb_show(struct seq_file *s, void *unused)
{
- int i;
struct xhci_ring *ring = *(struct xhci_ring **)s->private;
struct xhci_segment *seg = ring->first_seg;
- for (i = 0; i < ring->num_segs; i++) {
+ xhci_for_each_ring_seg(ring->first_seg, seg)
xhci_ring_dump_segment(s, seg);
- seg = seg->next;
- }
return 0;
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 91b47f9573cd..c9694526b157 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -27,14 +27,12 @@
* "All components of all Command and Transfer TRBs shall be initialized to '0'"
*/
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
- unsigned int cycle_state,
unsigned int max_packet,
unsigned int num,
gfp_t flags)
{
struct xhci_segment *seg;
dma_addr_t dma;
- int i;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
@@ -56,11 +54,6 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
return NULL;
}
}
- /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
- if (cycle_state == 0) {
- for (i = 0; i < TRBS_PER_SEGMENT; i++)
- seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
- }
seg->num = num;
seg->dma = dma;
seg->next = NULL;
@@ -138,6 +131,14 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
chain_links = xhci_link_chain_quirk(xhci, ring->type);
+ /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
+ if (ring->cycle_state == 0) {
+ xhci_for_each_ring_seg(ring->first_seg, seg) {
+ for (int i = 0; i < TRBS_PER_SEGMENT; i++)
+ seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
+ }
+ }
+
next = ring->enq_seg->next;
xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
xhci_link_segments(last, next, ring->type, chain_links);
@@ -224,7 +225,6 @@ static int xhci_update_stream_segment_mapping(
struct radix_tree_root *trb_address_map,
struct xhci_ring *ring,
struct xhci_segment *first_seg,
- struct xhci_segment *last_seg,
gfp_t mem_flags)
{
struct xhci_segment *seg;
@@ -234,28 +234,22 @@ static int xhci_update_stream_segment_mapping(
if (WARN_ON_ONCE(trb_address_map == NULL))
return 0;
- seg = first_seg;
- do {
+ xhci_for_each_ring_seg(first_seg, seg) {
ret = xhci_insert_segment_mapping(trb_address_map,
ring, seg, mem_flags);
if (ret)
goto remove_streams;
- if (seg == last_seg)
- return 0;
- seg = seg->next;
- } while (seg != first_seg);
+ }
return 0;
remove_streams:
failed_seg = seg;
- seg = first_seg;
- do {
+ xhci_for_each_ring_seg(first_seg, seg) {
xhci_remove_segment_mapping(trb_address_map, seg);
if (seg == failed_seg)
return ret;
- seg = seg->next;
- } while (seg != first_seg);
+ }
return ret;
}
@@ -267,17 +261,14 @@ static void xhci_remove_stream_mapping(struct xhci_ring *ring)
if (WARN_ON_ONCE(ring->trb_address_map == NULL))
return;
- seg = ring->first_seg;
- do {
+ xhci_for_each_ring_seg(ring->first_seg, seg)
xhci_remove_segment_mapping(ring->trb_address_map, seg);
- seg = seg->next;
- } while (seg != ring->first_seg);
}
static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
{
return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
- ring->first_seg, ring->last_seg, mem_flags);
+ ring->first_seg, mem_flags);
}
/* XXX: Do we need the hcd structure in all these functions? */
@@ -297,8 +288,7 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
kfree(ring);
}
-void xhci_initialize_ring_info(struct xhci_ring *ring,
- unsigned int cycle_state)
+void xhci_initialize_ring_info(struct xhci_ring *ring)
{
/* The ring is empty, so the enqueue pointer == dequeue pointer */
ring->enqueue = ring->first_seg->trbs;
@@ -312,7 +302,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring,
* New rings are initialized with cycle state equal to 1; if we are
* handling ring expansion, set the cycle state equal to the old ring.
*/
- ring->cycle_state = cycle_state;
+ ring->cycle_state = 1;
/*
* Each segment has a link TRB, and leave an extra TRB for SW
@@ -327,7 +317,6 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment **first,
struct xhci_segment **last,
unsigned int num_segs,
- unsigned int cycle_state,
enum xhci_ring_type type,
unsigned int max_packet,
gfp_t flags)
@@ -338,7 +327,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
chain_links = xhci_link_chain_quirk(xhci, type);
- prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags);
+ prev = xhci_segment_alloc(xhci, max_packet, num, flags);
if (!prev)
return -ENOMEM;
num++;
@@ -347,8 +336,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
while (num < num_segs) {
struct xhci_segment *next;
- next = xhci_segment_alloc(xhci, cycle_state, max_packet, num,
- flags);
+ next = xhci_segment_alloc(xhci, max_packet, num, flags);
if (!next)
goto free_segments;
@@ -373,9 +361,8 @@ free_segments:
* Set the end flag and the cycle toggle bit on the last segment.
* See section 4.9.1 and figures 15 and 16.
*/
-struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
- unsigned int num_segs, unsigned int cycle_state,
- enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
+struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
+ enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{
struct xhci_ring *ring;
int ret;
@@ -393,7 +380,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
return ring;
ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, &ring->last_seg, num_segs,
- cycle_state, type, max_packet, flags);
+ type, max_packet, flags);
if (ret)
goto fail;
@@ -403,7 +390,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
cpu_to_le32(LINK_TOGGLE);
}
- xhci_initialize_ring_info(ring, cycle_state);
+ xhci_initialize_ring_info(ring);
trace_xhci_ring_alloc(ring);
return ring;
@@ -431,14 +418,14 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *last;
int ret;
- ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->cycle_state,
- ring->type, ring->bounce_buf_len, flags);
+ ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->type,
+ ring->bounce_buf_len, flags);
if (ret)
return -ENOMEM;
if (ring->type == TYPE_STREAM) {
ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
- ring, first, last, flags);
+ ring, first, flags);
if (ret)
goto free_segments;
}
@@ -642,8 +629,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] =
- xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
- mem_flags);
+ xhci_ring_alloc(xhci, 2, TYPE_STREAM, max_packet, mem_flags);
cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring)
goto cleanup_rings;
@@ -984,7 +970,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
}
/* Allocate endpoint 0 ring */
- dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
+ dev->eps[0].ring = xhci_ring_alloc(xhci, 2, TYPE_CTRL, 0, flags);
if (!dev->eps[0].ring)
goto fail;
@@ -1467,7 +1453,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
/* Set up the endpoint ring */
virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
+ xhci_ring_alloc(xhci, 2, ring_type, max_packet, mem_flags);
if (!virt_dev->eps[ep_index].new_ring)
return -ENOMEM;
@@ -2276,7 +2262,7 @@ xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags)
if (!ir)
return NULL;
- ir->event_ring = xhci_ring_alloc(xhci, segs, 1, TYPE_EVENT, 0, flags);
+ ir->event_ring = xhci_ring_alloc(xhci, segs, TYPE_EVENT, 0, flags);
if (!ir->event_ring) {
xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
kfree(ir);
@@ -2482,7 +2468,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
goto fail;
/* Set up the command ring to have one segments for now. */
- xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags);
if (!xhci->cmd_ring)
goto fail;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index d5bcd5475b72..3970ec831b8c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -41,15 +41,15 @@ MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
{
- struct xhci_segment *seg = ring->first_seg;
+ struct xhci_segment *seg;
if (!td || !td->start_seg)
return false;
- do {
+
+ xhci_for_each_ring_seg(ring->first_seg, seg) {
if (seg == td->start_seg)
return true;
- seg = seg->next;
- } while (seg && seg != ring->first_seg);
+ }
return false;
}
@@ -764,16 +764,12 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
struct xhci_segment *seg;
ring = xhci->cmd_ring;
- seg = ring->deq_seg;
- do {
- memset(seg->trbs, 0,
- sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
- seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
- cpu_to_le32(~TRB_CYCLE);
- seg = seg->next;
- } while (seg != ring->deq_seg);
-
- xhci_initialize_ring_info(ring, 1);
+ xhci_for_each_ring_seg(ring->deq_seg, seg) {
+ memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE);
+ }
+
+ xhci_initialize_ring_info(ring);
/*
* Reset the hardware dequeue pointer.
* Yes, this will need to be re-written after resume, but we're paranoid
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 67ee2e049943..b2aeb444daaf 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1263,6 +1263,9 @@ static inline const char *xhci_trb_type_string(u8 type)
#define AVOID_BEI_INTERVAL_MIN 8
#define AVOID_BEI_INTERVAL_MAX 32
+#define xhci_for_each_ring_seg(head, seg) \
+ for (seg = head; seg != NULL; seg = (seg->next != head ? seg->next : NULL))
+
struct xhci_segment {
union xhci_trb *trbs;
/* private to HCD */
@@ -1800,14 +1803,12 @@ void xhci_slot_copy(struct xhci_hcd *xhci,
int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
struct usb_device *udev, struct usb_host_endpoint *ep,
gfp_t mem_flags);
-struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
- unsigned int num_segs, unsigned int cycle_state,
+struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
enum xhci_ring_type type, unsigned int max_packet, gfp_t flags);
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs, gfp_t flags);
-void xhci_initialize_ring_info(struct xhci_ring *ring,
- unsigned int cycle_state);
+void xhci_initialize_ring_info(struct xhci_ring *ring);
void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index);