forked from Qortal/Brooklyn
Fix for USB 3 rare dcd
This commit is contained in:
parent
f01da3e636
commit
d3a21b579a
@ -1,13 +1,3 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* xHCI host controller driver
|
||||
*
|
||||
* Copyright (C) 2008 Intel Corp.
|
||||
*
|
||||
* Author: Sarah Sharp
|
||||
* Some code borrowed from the Linux EHCI driver.
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/irq.h>
|
||||
@ -20,10 +10,11 @@
|
||||
|
||||
#include "xhci.h"
|
||||
#include "xhci-trace.h"
|
||||
#include "xhci-mtk.h"
|
||||
#include "xhci-debugfs.h"
|
||||
#include "xhci-dbgcap.h"
|
||||
|
||||
#define DRIVER_AUTHOR "Sarah Sharp"
|
||||
#define DRIVER_AUTHOR "Qortal Project"
|
||||
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
|
||||
|
||||
#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
|
||||
@ -52,19 +43,6 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* xhci_handshake - spin reading hc until handshake completes or fails
|
||||
* @ptr: address of hc register to be read
|
||||
* @mask: bits to look at in result of read
|
||||
* @done: value of those bits when handshake succeeds
|
||||
* @usec: timeout in microseconds
|
||||
*
|
||||
* Returns negative errno, or zero on success
|
||||
*
|
||||
* Success happens when the "mask" bits have the specified value (hardware
|
||||
* handshake done). There are two failure modes: "usec" have passed (major
|
||||
* hardware flakeout), or the register reads as all-ones (hardware removed).
|
||||
*/
|
||||
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
|
||||
{
|
||||
u32 result;
|
||||
@ -80,9 +58,6 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable interrupts and begin the xHCI halting process.
|
||||
*/
|
||||
void xhci_quiesce(struct xhci_hcd *xhci)
|
||||
{
|
||||
u32 halted;
|
||||
@ -99,14 +74,6 @@ void xhci_quiesce(struct xhci_hcd *xhci)
|
||||
writel(cmd, &xhci->op_regs->command);
|
||||
}
|
||||
|
||||
/*
|
||||
* Force HC into halt state.
|
||||
*
|
||||
* Disable any IRQs and clear the run/stop bit.
|
||||
* HC will complete any current and actively pipelined transactions, and
|
||||
* should halt within 16 ms of the run/stop bit being cleared.
|
||||
* Read HC Halted bit in the status register to see when the HC is finished.
|
||||
*/
|
||||
int xhci_halt(struct xhci_hcd *xhci)
|
||||
{
|
||||
int ret;
|
||||
@ -124,9 +91,6 @@ int xhci_halt(struct xhci_hcd *xhci)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the run bit and wait for the host to be running.
|
||||
*/
|
||||
int xhci_start(struct xhci_hcd *xhci)
|
||||
{
|
||||
u32 temp;
|
||||
@ -155,13 +119,6 @@ int xhci_start(struct xhci_hcd *xhci)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset a halted HC.
|
||||
*
|
||||
* This resets pipelines, timers, counters, state machines, etc.
|
||||
* Transactions will be terminated immediately, and operational registers
|
||||
* will be set to their defaults.
|
||||
*/
|
||||
int xhci_reset(struct xhci_hcd *xhci)
|
||||
{
|
||||
u32 command;
|
||||
@ -185,13 +142,6 @@ int xhci_reset(struct xhci_hcd *xhci)
|
||||
command |= CMD_RESET;
|
||||
writel(command, &xhci->op_regs->command);
|
||||
|
||||
/* Existing Intel xHCI controllers require a delay of 1 mS,
|
||||
* after setting the CMD_RESET bit, and before accessing any
|
||||
* HC registers. This allows the HC to complete the
|
||||
* reset operation and be ready for HC register access.
|
||||
* Without this delay, the subsequent HC register access,
|
||||
* may result in a system hang very rarely.
|
||||
*/
|
||||
if (xhci->quirks & XHCI_INTEL_HOST)
|
||||
udelay(1000);
|
||||
|
||||
@ -205,10 +155,7 @@ int xhci_reset(struct xhci_hcd *xhci)
|
||||
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||
"Wait for controller to be ready for doorbell rings");
|
||||
/*
|
||||
* xHCI cannot write to any doorbells or operational registers other
|
||||
* than status until the "Controller Not Ready" flag is cleared.
|
||||
*/
|
||||
|
||||
ret = xhci_handshake(&xhci->op_regs->status,
|
||||
STS_CNR, 0, 10 * 1000 * 1000);
|
||||
|
||||
@ -229,26 +176,11 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
|
||||
u64 val;
|
||||
u32 intrs;
|
||||
|
||||
/*
|
||||
* Some Renesas controllers get into a weird state if they are
|
||||
* reset while programmed with 64bit addresses (they will preserve
|
||||
* the top half of the address in internal, non visible
|
||||
* registers). You end up with half the address coming from the
|
||||
* kernel, and the other half coming from the firmware. Also,
|
||||
* changing the programming leads to extra accesses even if the
|
||||
* controller is supposed to be halted. The controller ends up with
|
||||
* a fatal fault, and is then ripe for being properly reset.
|
||||
*
|
||||
* Special care is taken to only apply this if the device is behind
|
||||
* an iommu. Doing anything when there is no iommu is definitely
|
||||
* unsafe...
|
||||
*/
|
||||
if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
|
||||
return;
|
||||
|
||||
xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
|
||||
|
||||
/* Clear HSEIE so that faults do not get signaled */
|
||||
val = readl(&xhci->op_regs->command);
|
||||
val &= ~CMD_HSEIE;
|
||||
writel(val, &xhci->op_regs->command);
|
||||
@ -258,7 +190,6 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
|
||||
val |= STS_FATAL;
|
||||
writel(val, &xhci->op_regs->status);
|
||||
|
||||
/* Now zero the registers, and brace for impact */
|
||||
val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
|
||||
if (upper_32_bits(val))
|
||||
xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
|
||||
@ -296,9 +227,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
|
||||
static int xhci_setup_msi(struct xhci_hcd *xhci)
|
||||
{
|
||||
int ret;
|
||||
/*
|
||||
* TODO:Check with MSI Soc for sysdev
|
||||
*/
|
||||
|
||||
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
|
||||
|
||||
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
|
||||
@ -328,13 +257,6 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
|
||||
struct usb_hcd *hcd = xhci_to_hcd(xhci);
|
||||
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
|
||||
|
||||
/*
|
||||
* calculate number of msi-x vectors supported.
|
||||
* - HCS_MAX_INTRS: the max number of interrupts the host can handle,
|
||||
* with max number of interrupters based on the xhci HCSPARAMS1.
|
||||
* - num_online_cpus: maximum msi-x vectors per CPUs core.
|
||||
* Add additional 1 vector to ensure always available interrupt.
|
||||
*/
|
||||
xhci->msix_count = min(num_online_cpus() + 1,
|
||||
HCS_MAX_INTRS(xhci->hcs_params1));
|
||||
|
||||
@ -364,7 +286,6 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Free any IRQs and disable MSI-X */
|
||||
static void xhci_cleanup_msix(struct xhci_hcd *xhci)
|
||||
{
|
||||
struct usb_hcd *hcd = xhci_to_hcd(xhci);
|
||||
@ -512,16 +433,6 @@ static void compliance_mode_recovery(struct timer_list *t)
|
||||
jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
|
||||
}
|
||||
|
||||
/*
|
||||
* Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
|
||||
* that causes ports behind that hardware to enter compliance mode sometimes.
|
||||
* The quirk creates a timer that polls every 2 seconds the link state of
|
||||
* each host controller's port and recovers it by issuing a Warm reset
|
||||
* if Compliance mode is detected, otherwise the port will become "dead" (no
|
||||
* device connections or disconnections will be detected anymore). Becasue no
|
||||
* status event is generated when entering compliance mode (per xhci spec),
|
||||
* this quirk is needed on systems that have the failing hardware installed.
|
||||
*/
|
||||
static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
|
||||
{
|
||||
xhci->port_status_u0 = 0;
|
||||
@ -535,12 +446,6 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
|
||||
"Compliance mode recovery timer initialized");
|
||||
}
|
||||
|
||||
/*
|
||||
* This function identifies the systems that have installed the SN65LVPE502CP
|
||||
* USB3.0 re-driver and that need the Compliance Mode Quirk.
|
||||
* Systems:
|
||||
* Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
|
||||
*/
|
||||
static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
|
||||
{
|
||||
const char *dmi_product_name, *dmi_sys_vendor;
|
||||
@ -567,14 +472,6 @@ static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
|
||||
return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Initialize memory for HCD and xHC (one-time init).
|
||||
*
|
||||
* Program the PAGESIZE register, initialize the device context array, create
|
||||
* device contexts (?), set up a command ring segment (or two?), create event
|
||||
* ring (one for now).
|
||||
*/
|
||||
static int xhci_init(struct usb_hcd *hcd)
|
||||
{
|
||||
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
|
||||
@ -602,8 +499,6 @@ static int xhci_init(struct usb_hcd *hcd)
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
|
||||
static int xhci_run_finished(struct xhci_hcd *xhci)
|
||||
{
|
||||
@ -622,18 +517,6 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Start the HC after it was halted.
|
||||
*
|
||||
* This function is called by the USB core when the HC driver is added.
|
||||
* Its opposite is xhci_stop().
|
||||
*
|
||||
* xhci_init() must be called once before this function can be called.
|
||||
* Reset the HC, enable device slot contexts, program DCBAAP, and
|
||||
* set command ring pointer and event ring pointer.
|
||||
*
|
||||
* Setup MSI-X vectors and enable interrupts.
|
||||
*/
|
||||
int xhci_run(struct usb_hcd *hcd)
|
||||
{
|
||||
u32 temp;
|
||||
@ -641,10 +524,6 @@ int xhci_run(struct usb_hcd *hcd)
|
||||
int ret;
|
||||
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
|
||||
|
||||
/* Start the xHCI host controller running only after the USB 2.0 roothub
|
||||
* is setup.
|
||||
*/
|
||||
|
||||
hcd->uses_new_polling = 1;
|
||||
if (!usb_hcd_is_primary_hcd(hcd))
|
||||
return xhci_run_finished(xhci);
|
||||
@ -703,15 +582,6 @@ int xhci_run(struct usb_hcd *hcd)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_run);
|
||||
|
||||
/*
|
||||
* Stop xHCI driver.
|
||||
*
|
||||
* This function is called by the USB core when the HC driver is removed.
|
||||
* Its opposite is xhci_run().
|
||||
*
|
||||
* Disable device contexts, disable IRQs, and quiesce the HC.
|
||||
* Reset the HC, finish any completed transactions, and cleanup memory.
|
||||
*/
|
||||
static void xhci_stop(struct usb_hcd *hcd)
|
||||
{
|
||||
u32 temp;
|
||||
@ -764,15 +634,6 @@ static void xhci_stop(struct usb_hcd *hcd)
|
||||
mutex_unlock(&xhci->mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Shutdown HC (not bus-specific)
|
||||
*
|
||||
* This is called when the machine is rebooting or halting. We assume that the
|
||||
* machine will be powered off, and the HC's internal state will be reset.
|
||||
* Don't bother to free memory.
|
||||
*
|
||||
* This will only ever be called with the main usb_hcd (the USB3 roothub).
|
||||
*/
|
||||
void xhci_shutdown(struct usb_hcd *hcd)
|
||||
{
|
||||
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
|
||||
@ -839,15 +700,6 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
|
||||
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
|
||||
}
|
||||
|
||||
/*
|
||||
* The whole command ring must be cleared to zero when we suspend the host.
|
||||
*
|
||||
* The host doesn't save the command ring pointer in the suspend well, so we
|
||||
* need to re-program it on resume. Unfortunately, the pointer must be 64-byte
|
||||
* aligned, because of the reserved bits in the command ring dequeue pointer
|
||||
* register. Therefore, we can't just set the dequeue pointer back in the
|
||||
* middle of the ring (TRBs are 16-byte aligned).
|
||||
*/
|
||||
static void xhci_clear_command_ring(struct xhci_hcd *xhci)
|
||||
{
|
||||
struct xhci_ring *ring;
|
||||
@ -876,25 +728,9 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
|
||||
*/
|
||||
ring->cycle_state = 1;
|
||||
|
||||
/*
|
||||
* Reset the hardware dequeue pointer.
|
||||
* Yes, this will need to be re-written after resume, but we're paranoid
|
||||
* and want to make sure the hardware doesn't access bogus memory
|
||||
* because, say, the BIOS or an SMI started the host without changing
|
||||
* the command ring pointers.
|
||||
*/
|
||||
xhci_set_cmd_ring_deq(xhci);
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable port wake bits if do_wakeup is not set.
|
||||
*
|
||||
* Also clear a possible internal port wake state left hanging for ports that
|
||||
* detected termination but never successfully enumerated (trained to 0U).
|
||||
* Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
|
||||
* at enumeration clears this wake, force one here as well for unconnected ports
|
||||
*/
|
||||
|
||||
static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
|
||||
struct xhci_hub *rhub,
|
||||
bool do_wakeup)
|
||||
@ -1270,113 +1106,6 @@ EXPORT_SYMBOL_GPL(xhci_resume);
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
|
||||
{
|
||||
void *temp;
|
||||
int ret = 0;
|
||||
unsigned int buf_len;
|
||||
enum dma_data_direction dir;
|
||||
|
||||
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
buf_len = urb->transfer_buffer_length;
|
||||
|
||||
temp = kzalloc_node(buf_len, GFP_ATOMIC,
|
||||
dev_to_node(hcd->self.sysdev));
|
||||
|
||||
if (usb_urb_dir_out(urb))
|
||||
sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
|
||||
temp, buf_len, 0);
|
||||
|
||||
urb->transfer_buffer = temp;
|
||||
urb->transfer_dma = dma_map_single(hcd->self.sysdev,
|
||||
urb->transfer_buffer,
|
||||
urb->transfer_buffer_length,
|
||||
dir);
|
||||
|
||||
if (dma_mapping_error(hcd->self.sysdev,
|
||||
urb->transfer_dma)) {
|
||||
ret = -EAGAIN;
|
||||
kfree(temp);
|
||||
} else {
|
||||
urb->transfer_flags |= URB_DMA_MAP_SINGLE;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd,
|
||||
struct urb *urb)
|
||||
{
|
||||
bool ret = false;
|
||||
unsigned int i;
|
||||
unsigned int len = 0;
|
||||
unsigned int trb_size;
|
||||
unsigned int max_pkt;
|
||||
struct scatterlist *sg;
|
||||
struct scatterlist *tail_sg;
|
||||
|
||||
tail_sg = urb->sg;
|
||||
max_pkt = usb_endpoint_maxp(&urb->ep->desc);
|
||||
|
||||
if (!urb->num_sgs)
|
||||
return ret;
|
||||
|
||||
if (urb->dev->speed >= USB_SPEED_SUPER)
|
||||
trb_size = TRB_CACHE_SIZE_SS;
|
||||
else
|
||||
trb_size = TRB_CACHE_SIZE_HS;
|
||||
|
||||
if (urb->transfer_buffer_length != 0 &&
|
||||
!(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
|
||||
for_each_sg(urb->sg, sg, urb->num_sgs, i) {
|
||||
len = len + sg->length;
|
||||
if (i > trb_size - 2) {
|
||||
len = len - tail_sg->length;
|
||||
if (len < max_pkt) {
|
||||
ret = true;
|
||||
break;
|
||||
}
|
||||
|
||||
tail_sg = sg_next(tail_sg);
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
|
||||
{
|
||||
unsigned int len;
|
||||
unsigned int buf_len;
|
||||
enum dma_data_direction dir;
|
||||
|
||||
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
|
||||
buf_len = urb->transfer_buffer_length;
|
||||
|
||||
if (IS_ENABLED(CONFIG_HAS_DMA) &&
|
||||
(urb->transfer_flags & URB_DMA_MAP_SINGLE))
|
||||
dma_unmap_single(hcd->self.sysdev,
|
||||
urb->transfer_dma,
|
||||
urb->transfer_buffer_length,
|
||||
dir);
|
||||
|
||||
if (usb_urb_dir_in(urb)) {
|
||||
len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs,
|
||||
urb->transfer_buffer,
|
||||
buf_len,
|
||||
0);
|
||||
if (len != buf_len) {
|
||||
xhci_dbg(hcd_to_xhci(hcd),
|
||||
"Copy from tmp buf to urb sg list failed\n");
|
||||
urb->actual_length = len;
|
||||
}
|
||||
}
|
||||
urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
|
||||
kfree(urb->transfer_buffer);
|
||||
urb->transfer_buffer = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
|
||||
* we'll copy the actual data into the TRB address register. This is limited to
|
||||
@ -1386,37 +1115,13 @@ static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
|
||||
static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
|
||||
gfp_t mem_flags)
|
||||
{
|
||||
struct xhci_hcd *xhci;
|
||||
|
||||
xhci = hcd_to_xhci(hcd);
|
||||
|
||||
if (xhci_urb_suitable_for_idt(urb))
|
||||
return 0;
|
||||
|
||||
if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) {
|
||||
if (xhci_urb_temp_buffer_required(hcd, urb))
|
||||
return xhci_map_temp_buffer(hcd, urb);
|
||||
}
|
||||
return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
|
||||
}
|
||||
|
||||
static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
|
||||
{
|
||||
struct xhci_hcd *xhci;
|
||||
bool unmap_temp_buf = false;
|
||||
|
||||
xhci = hcd_to_xhci(hcd);
|
||||
|
||||
if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE))
|
||||
unmap_temp_buf = true;
|
||||
|
||||
if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf)
|
||||
xhci_unmap_temp_buf(hcd, urb);
|
||||
else
|
||||
usb_hcd_unmap_urb_for_dma(hcd, urb);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
|
||||
* HCDs. Find the index for an endpoint given its descriptor. Use the return
|
||||
* value to right shift 1 for the bitmask.
|
||||
@ -1436,7 +1141,6 @@ unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
|
||||
(usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
|
||||
return index;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
|
||||
|
||||
/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
|
||||
* address from the XHCI endpoint index.
|
||||
@ -1457,6 +1161,15 @@ static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
|
||||
return 1 << (xhci_get_endpoint_index(desc) + 1);
|
||||
}
|
||||
|
||||
/* Find the flag for this endpoint (for use in the control context). Use the
|
||||
* endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
|
||||
* bit 1, etc.
|
||||
*/
|
||||
static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
|
||||
{
|
||||
return 1 << (ep_index + 1);
|
||||
}
|
||||
|
||||
/* Compute the last valid endpoint context index. Basically, this is the
|
||||
* endpoint index plus one. For slot contexts with more than valid endpoint,
|
||||
* we find the most significant bit set in the added contexts flags.
|
||||
@ -1595,12 +1308,6 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
|
||||
* - force an endpoint configure command
|
||||
* XXX: bandwidth is not recalculated. We should probably do that.
|
||||
*/
|
||||
|
||||
static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
|
||||
{
|
||||
return 1 << (ep_index + 1);
|
||||
}
|
||||
|
||||
static void xhci_fixup_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
struct usb_host_endpoint *ep, int interval)
|
||||
{
|
||||
@ -1713,9 +1420,11 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
|
||||
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
|
||||
ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
|
||||
|
||||
if (!HCD_HW_ACCESSIBLE(hcd))
|
||||
if (!HCD_HW_ACCESSIBLE(hcd)) {
|
||||
if (!in_interrupt())
|
||||
xhci_dbg(xhci, "urb submitted during PCI suspend\n");
|
||||
return -ESHUTDOWN;
|
||||
|
||||
}
|
||||
if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
|
||||
xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
|
||||
return -ENODEV;
|
||||
@ -1921,12 +1630,7 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
||||
|
||||
for (; i < urb_priv->num_tds; i++) {
|
||||
td = &urb_priv->td[i];
|
||||
/* TD can already be on cancelled list if ep halted on it */
|
||||
if (list_empty(&td->cancelled_td_list)) {
|
||||
td->cancel_status = TD_DIRTY;
|
||||
list_add_tail(&td->cancelled_td_list,
|
||||
&ep->cancelled_td_list);
|
||||
}
|
||||
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
|
||||
}
|
||||
|
||||
/* Queue a stop endpoint command, but only if this is
|
||||
@ -1972,8 +1676,8 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
||||
* disabled, so there's no need for mutual exclusion to protect
|
||||
* the xhci->devs[slot_id] structure.
|
||||
*/
|
||||
int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
struct usb_host_endpoint *ep)
|
||||
static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
struct usb_host_endpoint *ep)
|
||||
{
|
||||
struct xhci_hcd *xhci;
|
||||
struct xhci_container_ctx *in_ctx, *out_ctx;
|
||||
@ -2033,6 +1737,9 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
|
||||
xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
|
||||
|
||||
if (xhci->quirks & XHCI_MTK_HOST)
|
||||
xhci_mtk_drop_ep_quirk(hcd, udev, ep);
|
||||
|
||||
xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
|
||||
(unsigned int) ep->desc.bEndpointAddress,
|
||||
udev->slot_id,
|
||||
@ -2040,7 +1747,6 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
(unsigned int) new_add_flags);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
|
||||
|
||||
/* Add an endpoint to a new possible bandwidth configuration for this device.
|
||||
* Only one call to this function is allowed per endpoint before
|
||||
@ -2055,8 +1761,8 @@ EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
|
||||
* configuration or alt setting is installed in the device, so there's no need
|
||||
* for mutual exclusion to protect the xhci->devs[slot_id] structure.
|
||||
*/
|
||||
int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
struct usb_host_endpoint *ep)
|
||||
static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
struct usb_host_endpoint *ep)
|
||||
{
|
||||
struct xhci_hcd *xhci;
|
||||
struct xhci_container_ctx *in_ctx;
|
||||
@ -2130,6 +1836,15 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (xhci->quirks & XHCI_MTK_HOST) {
|
||||
ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
|
||||
if (ret < 0) {
|
||||
xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
|
||||
virt_dev->eps[ep_index].new_ring = NULL;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
|
||||
new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
|
||||
|
||||
@ -2154,7 +1869,6 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
(unsigned int) new_add_flags);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_add_endpoint);
|
||||
|
||||
static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
|
||||
{
|
||||
@ -3188,7 +2902,6 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
|
||||
|
||||
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
{
|
||||
@ -3213,7 +2926,6 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
}
|
||||
xhci_zero_in_ctx(xhci, virt_dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
|
||||
|
||||
static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
|
||||
struct xhci_container_ctx *in_ctx,
|
||||
@ -3227,6 +2939,84 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
|
||||
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
|
||||
}
|
||||
|
||||
static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
|
||||
unsigned int slot_id, unsigned int ep_index,
|
||||
struct xhci_dequeue_state *deq_state)
|
||||
{
|
||||
struct xhci_input_control_ctx *ctrl_ctx;
|
||||
struct xhci_container_ctx *in_ctx;
|
||||
struct xhci_ep_ctx *ep_ctx;
|
||||
u32 added_ctxs;
|
||||
dma_addr_t addr;
|
||||
|
||||
in_ctx = xhci->devs[slot_id]->in_ctx;
|
||||
ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
|
||||
if (!ctrl_ctx) {
|
||||
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
|
||||
__func__);
|
||||
return;
|
||||
}
|
||||
|
||||
xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
|
||||
xhci->devs[slot_id]->out_ctx, ep_index);
|
||||
ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
|
||||
addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
|
||||
deq_state->new_deq_ptr);
|
||||
if (addr == 0) {
|
||||
xhci_warn(xhci, "WARN Cannot submit config ep after "
|
||||
"reset ep command\n");
|
||||
xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
|
||||
deq_state->new_deq_seg,
|
||||
deq_state->new_deq_ptr);
|
||||
return;
|
||||
}
|
||||
ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
|
||||
|
||||
added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
|
||||
xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
|
||||
xhci->devs[slot_id]->out_ctx, ctrl_ctx,
|
||||
added_ctxs, added_ctxs);
|
||||
}
|
||||
|
||||
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
|
||||
unsigned int ep_index, unsigned int stream_id,
|
||||
struct xhci_td *td)
|
||||
{
|
||||
struct xhci_dequeue_state deq_state;
|
||||
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
|
||||
"Cleaning up stalled endpoint ring");
|
||||
/* We need to move the HW's dequeue pointer past this TD,
|
||||
* or it will attempt to resend it on the next doorbell ring.
|
||||
*/
|
||||
xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td,
|
||||
&deq_state);
|
||||
|
||||
if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
|
||||
return;
|
||||
|
||||
/* HW with the reset endpoint quirk will use the saved dequeue state to
|
||||
* issue a configure endpoint command later.
|
||||
*/
|
||||
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
|
||||
"Queueing new dequeue state");
|
||||
xhci_queue_new_dequeue_state(xhci, slot_id,
|
||||
ep_index, &deq_state);
|
||||
} else {
|
||||
/* Better hope no one uses the input context between now and the
|
||||
* reset endpoint completion!
|
||||
* XXX: No idea how this hardware will react when stream rings
|
||||
* are enabled.
|
||||
*/
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||
"Setting up input context for "
|
||||
"configure endpoint command");
|
||||
xhci_setup_input_ctx_for_quirk(xhci, slot_id,
|
||||
ep_index, &deq_state);
|
||||
}
|
||||
}
|
||||
|
||||
static void xhci_endpoint_disable(struct usb_hcd *hcd,
|
||||
struct usb_host_endpoint *host_ep)
|
||||
{
|
||||
@ -3314,10 +3104,13 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
|
||||
return;
|
||||
|
||||
/* Bail out if toggle is already being cleared by a endpoint reset */
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
|
||||
ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
/* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
|
||||
if (usb_endpoint_xfer_control(&host_ep->desc) ||
|
||||
usb_endpoint_xfer_isoc(&host_ep->desc))
|
||||
@ -3403,8 +3196,10 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
|
||||
xhci_free_command(xhci, cfg_cmd);
|
||||
cleanup:
|
||||
xhci_free_command(xhci, stop_cmd);
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
|
||||
ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
}
|
||||
|
||||
static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
|
||||
@ -4948,6 +4743,7 @@ static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
|
||||
if (xhci_update_timeout_for_endpoint(xhci, udev,
|
||||
&alt->endpoint[j].desc, state, timeout))
|
||||
return -E2BIG;
|
||||
continue;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -5345,12 +5141,10 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
|
||||
hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
|
||||
hcd->self.root_hub->rx_lanes = 2;
|
||||
hcd->self.root_hub->tx_lanes = 2;
|
||||
hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
|
||||
break;
|
||||
case 1:
|
||||
hcd->speed = HCD_USB31;
|
||||
hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
|
||||
hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
|
||||
break;
|
||||
}
|
||||
xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
|
||||
@ -5492,7 +5286,6 @@ static const struct hc_driver xhci_hc_driver = {
|
||||
* managing i/o requests and associated device resources
|
||||
*/
|
||||
.map_urb_for_dma = xhci_map_urb_for_dma,
|
||||
.unmap_urb_for_dma = xhci_unmap_urb_for_dma,
|
||||
.urb_enqueue = xhci_urb_enqueue,
|
||||
.urb_dequeue = xhci_urb_dequeue,
|
||||
.alloc_dev = xhci_alloc_dev,
|
||||
@ -5550,10 +5343,6 @@ void xhci_init_driver(struct hc_driver *drv,
|
||||
drv->reset = over->reset;
|
||||
if (over->start)
|
||||
drv->start = over->start;
|
||||
if (over->add_endpoint)
|
||||
drv->add_endpoint = over->add_endpoint;
|
||||
if (over->drop_endpoint)
|
||||
drv->drop_endpoint = over->drop_endpoint;
|
||||
if (over->check_bandwidth)
|
||||
drv->check_bandwidth = over->check_bandwidth;
|
||||
if (over->reset_bandwidth)
|
||||
|
Loading…
Reference in New Issue
Block a user