mirror of
https://github.com/Qortal/Brooklyn.git
synced 2025-02-01 07:42:18 +00:00
Mike is as important as a white crayon.
This commit is contained in:
parent
fbee1514cd
commit
3ee9a3d6da
@ -32,8 +32,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
|
||||
static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
|
||||
{
|
||||
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
|
||||
void __iomem *src = priv->mram_base + offset;
|
||||
|
||||
ioread32_rep(priv->mram_base + offset, val, val_count);
|
||||
while (val_count--) {
|
||||
*(unsigned int *)val = ioread32(src);
|
||||
val += 4;
|
||||
src += 4;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -51,8 +56,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
|
||||
const void *val, size_t val_count)
|
||||
{
|
||||
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
|
||||
void __iomem *dst = priv->mram_base + offset;
|
||||
|
||||
iowrite32_rep(priv->base + offset, val, val_count);
|
||||
while (val_count--) {
|
||||
iowrite32(*(unsigned int *)val, dst);
|
||||
val += 4;
|
||||
dst += 4;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -846,10 +846,12 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
|
||||
struct rcar_can_priv *priv = netdev_priv(ndev);
|
||||
u16 ctlr;
|
||||
|
||||
if (netif_running(ndev)) {
|
||||
netif_stop_queue(ndev);
|
||||
netif_device_detach(ndev);
|
||||
}
|
||||
if (!netif_running(ndev))
|
||||
return 0;
|
||||
|
||||
netif_stop_queue(ndev);
|
||||
netif_device_detach(ndev);
|
||||
|
||||
ctlr = readw(&priv->regs->ctlr);
|
||||
ctlr |= RCAR_CAN_CTLR_CANM_HALT;
|
||||
writew(ctlr, &priv->regs->ctlr);
|
||||
@ -868,6 +870,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
|
||||
u16 ctlr;
|
||||
int err;
|
||||
|
||||
if (!netif_running(ndev))
|
||||
return 0;
|
||||
|
||||
err = clk_enable(priv->clk);
|
||||
if (err) {
|
||||
netdev_err(ndev, "clk_enable() failed, error %d\n", err);
|
||||
@ -881,10 +886,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
|
||||
writew(ctlr, &priv->regs->ctlr);
|
||||
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||
|
||||
if (netif_running(ndev)) {
|
||||
netif_device_attach(ndev);
|
||||
netif_start_queue(ndev);
|
||||
}
|
||||
netif_device_attach(ndev);
|
||||
netif_start_queue(ndev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -752,16 +752,15 @@ static void peak_pci_remove(struct pci_dev *pdev)
|
||||
struct net_device *prev_dev = chan->prev_dev;
|
||||
|
||||
dev_info(&pdev->dev, "removing device %s\n", dev->name);
|
||||
/* do that only for first channel */
|
||||
if (!prev_dev && chan->pciec_card)
|
||||
peak_pciec_remove(chan->pciec_card);
|
||||
unregister_sja1000dev(dev);
|
||||
free_sja1000dev(dev);
|
||||
dev = prev_dev;
|
||||
|
||||
if (!dev) {
|
||||
/* do that only for first channel */
|
||||
if (chan->pciec_card)
|
||||
peak_pciec_remove(chan->pciec_card);
|
||||
if (!dev)
|
||||
break;
|
||||
}
|
||||
priv = netdev_priv(dev);
|
||||
chan = priv->priv;
|
||||
}
|
||||
|
@ -551,11 +551,10 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
|
||||
} else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
|
||||
new_state = CAN_STATE_ERROR_WARNING;
|
||||
} else {
|
||||
/* no error bit (so, no error skb, back to active state) */
|
||||
dev->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||
/* back to (or still in) ERROR_ACTIVE state */
|
||||
new_state = CAN_STATE_ERROR_ACTIVE;
|
||||
pdev->bec.txerr = 0;
|
||||
pdev->bec.rxerr = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* state hasn't changed */
|
||||
@ -568,8 +567,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
|
||||
|
||||
/* allocate an skb to store the error frame */
|
||||
skb = alloc_can_err_skb(netdev, &cf);
|
||||
if (skb)
|
||||
can_change_state(netdev, cf, tx_state, rx_state);
|
||||
can_change_state(netdev, cf, tx_state, rx_state);
|
||||
|
||||
/* things must be done even in case of OOM */
|
||||
if (new_state == CAN_STATE_BUS_OFF)
|
||||
|
@ -1035,9 +1035,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
|
||||
{
|
||||
struct mt7530_priv *priv = ds->priv;
|
||||
|
||||
if (!dsa_is_user_port(ds, port))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&priv->reg_mutex);
|
||||
|
||||
/* Allow the user port gets connected to the cpu port and also
|
||||
@ -1060,9 +1057,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
|
||||
{
|
||||
struct mt7530_priv *priv = ds->priv;
|
||||
|
||||
if (!dsa_is_user_port(ds, port))
|
||||
return;
|
||||
|
||||
mutex_lock(&priv->reg_mutex);
|
||||
|
||||
/* Clear up all port matrix which could be restored in the next
|
||||
@ -3211,7 +3205,7 @@ mt7530_probe(struct mdio_device *mdiodev)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->ds->dev = &mdiodev->dev;
|
||||
priv->ds->num_ports = DSA_MAX_PORTS;
|
||||
priv->ds->num_ports = MT7530_NUM_PORTS;
|
||||
|
||||
/* Use medatek,mcm property to distinguish hardware type that would
|
||||
* casues a little bit differences on power-on sequence.
|
||||
|
@ -1193,7 +1193,7 @@ static int nic_register_interrupts(struct nicpf *nic)
|
||||
dev_err(&nic->pdev->dev,
|
||||
"Request for #%d msix vectors failed, returned %d\n",
|
||||
nic->num_vec, ret);
|
||||
return 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Register mailbox interrupt handler */
|
||||
|
@ -1224,7 +1224,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
|
||||
if (ret < 0) {
|
||||
netdev_err(nic->netdev,
|
||||
"Req for #%d msix vectors failed\n", nic->num_vec);
|
||||
return 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
|
||||
@ -1243,7 +1243,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
|
||||
if (!nicvf_check_pf_ready(nic)) {
|
||||
nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
|
||||
nicvf_unregister_interrupts(nic);
|
||||
return 1;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -157,7 +157,7 @@ static const struct {
|
||||
{ ENETC_PM0_TFRM, "MAC tx frames" },
|
||||
{ ENETC_PM0_TFCS, "MAC tx fcs errors" },
|
||||
{ ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
|
||||
{ ENETC_PM0_TERR, "MAC tx frames" },
|
||||
{ ENETC_PM0_TERR, "MAC tx frame errors" },
|
||||
{ ENETC_PM0_TUCA, "MAC tx unicast frames" },
|
||||
{ ENETC_PM0_TMCA, "MAC tx multicast frames" },
|
||||
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },
|
||||
|
@ -517,10 +517,13 @@ static void enetc_port_si_configure(struct enetc_si *si)
|
||||
|
||||
static void enetc_configure_port_mac(struct enetc_hw *hw)
|
||||
{
|
||||
int tc;
|
||||
|
||||
enetc_port_wr(hw, ENETC_PM0_MAXFRM,
|
||||
ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
|
||||
|
||||
enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
|
||||
for (tc = 0; tc < 8; tc++)
|
||||
enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
|
||||
|
||||
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
|
||||
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
|
||||
|
@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list);
|
||||
static LIST_HEAD(hnae3_client_list);
|
||||
static LIST_HEAD(hnae3_ae_dev_list);
|
||||
|
||||
void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
|
||||
{
|
||||
const struct pci_device_id *pci_id;
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
|
||||
if (!ae_algo)
|
||||
return;
|
||||
|
||||
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
|
||||
if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
|
||||
continue;
|
||||
|
||||
pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
|
||||
if (!pci_id)
|
||||
continue;
|
||||
if (IS_ENABLED(CONFIG_PCI_IOV))
|
||||
pci_disable_sriov(ae_dev->pdev);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
|
||||
|
||||
/* we are keeping things simple and using single lock for all the
|
||||
* list. This is a non-critical code so other updations, if happen
|
||||
* in parallel, can wait.
|
||||
|
@ -853,6 +853,7 @@ struct hnae3_handle {
|
||||
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
|
||||
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
|
||||
|
||||
void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
|
||||
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
|
||||
void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
|
||||
|
||||
|
@ -137,7 +137,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
|
||||
.name = "uc",
|
||||
.cmd = HNAE3_DBG_CMD_MAC_UC,
|
||||
.dentry = HNS3_DBG_DENTRY_MAC,
|
||||
.buf_len = HNS3_DBG_READ_LEN,
|
||||
.buf_len = HNS3_DBG_READ_LEN_128KB,
|
||||
.init = hns3_dbg_common_file_init,
|
||||
},
|
||||
{
|
||||
@ -256,7 +256,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
|
||||
.name = "tqp",
|
||||
.cmd = HNAE3_DBG_CMD_REG_TQP,
|
||||
.dentry = HNS3_DBG_DENTRY_REG,
|
||||
.buf_len = HNS3_DBG_READ_LEN,
|
||||
.buf_len = HNS3_DBG_READ_LEN_128KB,
|
||||
.init = hns3_dbg_common_file_init,
|
||||
},
|
||||
{
|
||||
@ -298,7 +298,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
|
||||
.name = "fd_tcam",
|
||||
.cmd = HNAE3_DBG_CMD_FD_TCAM,
|
||||
.dentry = HNS3_DBG_DENTRY_FD,
|
||||
.buf_len = HNS3_DBG_READ_LEN,
|
||||
.buf_len = HNS3_DBG_READ_LEN_1MB,
|
||||
.init = hns3_dbg_common_file_init,
|
||||
},
|
||||
{
|
||||
@ -462,7 +462,7 @@ static const struct hns3_dbg_item rx_queue_info_items[] = {
|
||||
{ "TAIL", 2 },
|
||||
{ "HEAD", 2 },
|
||||
{ "FBDNUM", 2 },
|
||||
{ "PKTNUM", 2 },
|
||||
{ "PKTNUM", 5 },
|
||||
{ "COPYBREAK", 2 },
|
||||
{ "RING_EN", 2 },
|
||||
{ "RX_RING_EN", 2 },
|
||||
@ -565,7 +565,7 @@ static const struct hns3_dbg_item tx_queue_info_items[] = {
|
||||
{ "HEAD", 2 },
|
||||
{ "FBDNUM", 2 },
|
||||
{ "OFFSET", 2 },
|
||||
{ "PKTNUM", 2 },
|
||||
{ "PKTNUM", 5 },
|
||||
{ "RING_EN", 2 },
|
||||
{ "TX_RING_EN", 2 },
|
||||
{ "BASE_ADDR", 10 },
|
||||
@ -790,13 +790,13 @@ static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
|
||||
}
|
||||
|
||||
static const struct hns3_dbg_item tx_bd_info_items[] = {
|
||||
{ "BD_IDX", 5 },
|
||||
{ "ADDRESS", 2 },
|
||||
{ "BD_IDX", 2 },
|
||||
{ "ADDRESS", 13 },
|
||||
{ "VLAN_TAG", 2 },
|
||||
{ "SIZE", 2 },
|
||||
{ "T_CS_VLAN_TSO", 2 },
|
||||
{ "OT_VLAN_TAG", 3 },
|
||||
{ "TV", 2 },
|
||||
{ "TV", 5 },
|
||||
{ "OLT_VLAN_LEN", 2 },
|
||||
{ "PAYLEN_OL4CS", 2 },
|
||||
{ "BD_FE_SC_VLD", 2 },
|
||||
|
@ -1847,7 +1847,6 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
|
||||
|
||||
static int hns3_skb_linearize(struct hns3_enet_ring *ring,
|
||||
struct sk_buff *skb,
|
||||
u8 max_non_tso_bd_num,
|
||||
unsigned int bd_num)
|
||||
{
|
||||
/* 'bd_num == UINT_MAX' means the skb' fraglist has a
|
||||
@ -1864,8 +1863,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
|
||||
* will not help.
|
||||
*/
|
||||
if (skb->len > HNS3_MAX_TSO_SIZE ||
|
||||
(!skb_is_gso(skb) && skb->len >
|
||||
HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
|
||||
(!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.hw_limitation++;
|
||||
u64_stats_update_end(&ring->syncp);
|
||||
@ -1900,8 +1898,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
|
||||
bd_num))
|
||||
if (hns3_skb_linearize(ring, skb, bd_num))
|
||||
return -ENOMEM;
|
||||
|
||||
bd_num = hns3_tx_bd_count(skb->len);
|
||||
@ -3258,6 +3255,7 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
|
||||
{
|
||||
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
|
||||
ring->desc[i].addr = 0;
|
||||
ring->desc_cb[i].refill = 0;
|
||||
}
|
||||
|
||||
static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
|
||||
@ -3336,6 +3334,7 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
|
||||
|
||||
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
|
||||
ring->desc_cb[i].page_offset);
|
||||
ring->desc_cb[i].refill = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3365,6 +3364,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
|
||||
{
|
||||
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
|
||||
ring->desc_cb[i] = *res_cb;
|
||||
ring->desc_cb[i].refill = 1;
|
||||
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
|
||||
ring->desc_cb[i].page_offset);
|
||||
ring->desc[i].rx.bd_base_info = 0;
|
||||
@ -3373,6 +3373,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
|
||||
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
|
||||
{
|
||||
ring->desc_cb[i].reuse_flag = 0;
|
||||
ring->desc_cb[i].refill = 1;
|
||||
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
|
||||
ring->desc_cb[i].page_offset);
|
||||
ring->desc[i].rx.bd_base_info = 0;
|
||||
@ -3479,10 +3480,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
|
||||
int ntc = ring->next_to_clean;
|
||||
int ntu = ring->next_to_use;
|
||||
|
||||
if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
|
||||
return ring->desc_num;
|
||||
|
||||
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
|
||||
}
|
||||
|
||||
static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
|
||||
/* Return true if there is any allocation failure */
|
||||
static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
|
||||
int cleand_count)
|
||||
{
|
||||
struct hns3_desc_cb *desc_cb;
|
||||
@ -3507,7 +3512,10 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
|
||||
hns3_rl_err(ring_to_netdev(ring),
|
||||
"alloc rx buffer failed: %d\n",
|
||||
ret);
|
||||
break;
|
||||
|
||||
writel(i, ring->tqp->io_base +
|
||||
HNS3_RING_RX_RING_HEAD_REG);
|
||||
return true;
|
||||
}
|
||||
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
|
||||
|
||||
@ -3520,6 +3528,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
|
||||
}
|
||||
|
||||
writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
|
||||
@ -3824,6 +3833,7 @@ static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
|
||||
{
|
||||
ring->desc[ring->next_to_clean].rx.bd_base_info &=
|
||||
cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
|
||||
ring->desc_cb[ring->next_to_clean].refill = 0;
|
||||
ring->next_to_clean += 1;
|
||||
|
||||
if (unlikely(ring->next_to_clean == ring->desc_num))
|
||||
@ -4170,6 +4180,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
|
||||
{
|
||||
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
|
||||
int unused_count = hns3_desc_unused(ring);
|
||||
bool failure = false;
|
||||
int recv_pkts = 0;
|
||||
int err;
|
||||
|
||||
@ -4178,9 +4189,9 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
|
||||
while (recv_pkts < budget) {
|
||||
/* Reuse or realloc buffers */
|
||||
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
|
||||
hns3_nic_alloc_rx_buffers(ring, unused_count);
|
||||
unused_count = hns3_desc_unused(ring) -
|
||||
ring->pending_buf;
|
||||
failure = failure ||
|
||||
hns3_nic_alloc_rx_buffers(ring, unused_count);
|
||||
unused_count = 0;
|
||||
}
|
||||
|
||||
/* Poll one pkt */
|
||||
@ -4199,11 +4210,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
|
||||
}
|
||||
|
||||
out:
|
||||
/* Make all data has been write before submit */
|
||||
if (unused_count > 0)
|
||||
hns3_nic_alloc_rx_buffers(ring, unused_count);
|
||||
|
||||
return recv_pkts;
|
||||
return failure ? budget : recv_pkts;
|
||||
}
|
||||
|
||||
static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
|
||||
|
@ -186,11 +186,9 @@ enum hns3_nic_state {
|
||||
|
||||
#define HNS3_MAX_BD_SIZE 65535
|
||||
#define HNS3_MAX_TSO_BD_NUM 63U
|
||||
#define HNS3_MAX_TSO_SIZE \
|
||||
(HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
|
||||
#define HNS3_MAX_TSO_SIZE 1048576U
|
||||
#define HNS3_MAX_NON_TSO_SIZE 9728U
|
||||
|
||||
#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \
|
||||
(HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))
|
||||
|
||||
#define HNS3_VECTOR_GL0_OFFSET 0x100
|
||||
#define HNS3_VECTOR_GL1_OFFSET 0x200
|
||||
@ -332,6 +330,7 @@ struct hns3_desc_cb {
|
||||
u32 length; /* length of the buffer */
|
||||
|
||||
u16 reuse_flag;
|
||||
u16 refill;
|
||||
|
||||
/* desc type, used by the ring user to mark the type of the priv data */
|
||||
u16 type;
|
||||
|
@ -137,6 +137,15 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
|
||||
*changed = true;
|
||||
break;
|
||||
case IEEE_8021QAZ_TSA_ETS:
|
||||
/* The hardware will switch to sp mode if bandwidth is
|
||||
* 0, so limit ets bandwidth must be greater than 0.
|
||||
*/
|
||||
if (!ets->tc_tx_bw[i]) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"tc%u ets bw cannot be 0\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
|
||||
HCLGE_SCH_MODE_DWRR)
|
||||
*changed = true;
|
||||
|
@ -391,7 +391,7 @@ static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
|
||||
static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
|
||||
int *pos)
|
||||
{
|
||||
struct hclge_dbg_bitmap_cmd *bitmap;
|
||||
struct hclge_dbg_bitmap_cmd req;
|
||||
struct hclge_desc desc;
|
||||
u16 qset_id, qset_num;
|
||||
int ret;
|
||||
@ -408,12 +408,12 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
|
||||
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
|
||||
|
||||
*pos += scnprintf(buf + *pos, len - *pos,
|
||||
"%04u %#x %#x %#x %#x\n",
|
||||
qset_id, bitmap->bit0, bitmap->bit1,
|
||||
bitmap->bit2, bitmap->bit3);
|
||||
qset_id, req.bit0, req.bit1, req.bit2,
|
||||
req.bit3);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -422,7 +422,7 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
|
||||
static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
|
||||
int *pos)
|
||||
{
|
||||
struct hclge_dbg_bitmap_cmd *bitmap;
|
||||
struct hclge_dbg_bitmap_cmd req;
|
||||
struct hclge_desc desc;
|
||||
u8 pri_id, pri_num;
|
||||
int ret;
|
||||
@ -439,12 +439,11 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
|
||||
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
|
||||
|
||||
*pos += scnprintf(buf + *pos, len - *pos,
|
||||
"%03u %#x %#x %#x\n",
|
||||
pri_id, bitmap->bit0, bitmap->bit1,
|
||||
bitmap->bit2);
|
||||
pri_id, req.bit0, req.bit1, req.bit2);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -453,7 +452,7 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
|
||||
static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
|
||||
int *pos)
|
||||
{
|
||||
struct hclge_dbg_bitmap_cmd *bitmap;
|
||||
struct hclge_dbg_bitmap_cmd req;
|
||||
struct hclge_desc desc;
|
||||
u8 pg_id;
|
||||
int ret;
|
||||
@ -466,12 +465,11 @@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
|
||||
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
|
||||
|
||||
*pos += scnprintf(buf + *pos, len - *pos,
|
||||
"%03u %#x %#x %#x\n",
|
||||
pg_id, bitmap->bit0, bitmap->bit1,
|
||||
bitmap->bit2);
|
||||
pg_id, req.bit0, req.bit1, req.bit2);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -511,7 +509,7 @@ static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
|
||||
static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
|
||||
int *pos)
|
||||
{
|
||||
struct hclge_dbg_bitmap_cmd *bitmap;
|
||||
struct hclge_dbg_bitmap_cmd req;
|
||||
struct hclge_desc desc;
|
||||
u8 port_id = 0;
|
||||
int ret;
|
||||
@ -521,12 +519,12 @@ static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
|
||||
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
|
||||
|
||||
*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
|
||||
bitmap->bit0);
|
||||
req.bit0);
|
||||
*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
|
||||
bitmap->bit1);
|
||||
req.bit1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1560,8 +1560,11 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
|
||||
|
||||
/* configure TM QCN hw errors */
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false);
|
||||
if (en)
|
||||
desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE);
|
||||
if (en) {
|
||||
desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN);
|
||||
desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
|
||||
}
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret)
|
||||
|
@ -50,6 +50,8 @@
|
||||
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F
|
||||
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F
|
||||
#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3
|
||||
#define HCLGE_TM_QCN_ERR_INT_TYPE 0x29
|
||||
#define HCLGE_TM_QCN_FIFO_INT_EN 0xFFFF00
|
||||
#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
|
||||
#define HCLGE_NCSI_ERR_INT_EN 0x3
|
||||
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
|
||||
|
@ -2847,33 +2847,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
|
||||
{
|
||||
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
|
||||
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
|
||||
hclge_wq, &hdev->service_task, 0);
|
||||
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
|
||||
}
|
||||
|
||||
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
|
||||
{
|
||||
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||
test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
|
||||
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
|
||||
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
|
||||
hclge_wq, &hdev->service_task, 0);
|
||||
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
|
||||
}
|
||||
|
||||
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
|
||||
{
|
||||
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||
!test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
|
||||
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
|
||||
hclge_wq, &hdev->service_task, 0);
|
||||
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
|
||||
}
|
||||
|
||||
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
|
||||
{
|
||||
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||
!test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
|
||||
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
|
||||
hclge_wq, &hdev->service_task,
|
||||
delay_time);
|
||||
mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
|
||||
}
|
||||
|
||||
static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
|
||||
@ -3491,33 +3487,14 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
|
||||
hdev->num_msi_used += 1;
|
||||
}
|
||||
|
||||
static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
|
||||
const cpumask_t *mask)
|
||||
{
|
||||
struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
|
||||
affinity_notify);
|
||||
|
||||
cpumask_copy(&hdev->affinity_mask, mask);
|
||||
}
|
||||
|
||||
static void hclge_irq_affinity_release(struct kref *ref)
|
||||
{
|
||||
}
|
||||
|
||||
static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
|
||||
{
|
||||
irq_set_affinity_hint(hdev->misc_vector.vector_irq,
|
||||
&hdev->affinity_mask);
|
||||
|
||||
hdev->affinity_notify.notify = hclge_irq_affinity_notify;
|
||||
hdev->affinity_notify.release = hclge_irq_affinity_release;
|
||||
irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
|
||||
&hdev->affinity_notify);
|
||||
}
|
||||
|
||||
static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
|
||||
{
|
||||
irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
|
||||
irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
|
||||
}
|
||||
|
||||
@ -13052,7 +13029,7 @@ static int hclge_init(void)
|
||||
{
|
||||
pr_info("%s is initializing\n", HCLGE_NAME);
|
||||
|
||||
hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
|
||||
hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
|
||||
if (!hclge_wq) {
|
||||
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
|
||||
return -ENOMEM;
|
||||
@ -13065,6 +13042,7 @@ static int hclge_init(void)
|
||||
|
||||
static void hclge_exit(void)
|
||||
{
|
||||
hnae3_unregister_ae_algo_prepare(&ae_algo);
|
||||
hnae3_unregister_ae_algo(&ae_algo);
|
||||
destroy_workqueue(hclge_wq);
|
||||
}
|
||||
|
@ -944,7 +944,6 @@ struct hclge_dev {
|
||||
|
||||
/* affinity mask and notify for misc interrupt */
|
||||
cpumask_t affinity_mask;
|
||||
struct irq_affinity_notify affinity_notify;
|
||||
struct hclge_ptp *ptp;
|
||||
struct devlink *devlink;
|
||||
};
|
||||
|
@ -752,6 +752,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
|
||||
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
|
||||
for (k = 0; k < hdev->tm_info.num_tc; k++)
|
||||
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
|
||||
for (; k < HNAE3_MAX_TC; k++)
|
||||
hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2232,6 +2232,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
|
||||
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
|
||||
{
|
||||
if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
|
||||
test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
|
||||
!test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
|
||||
&hdev->state))
|
||||
mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
|
||||
@ -2273,9 +2274,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
|
||||
hdev->reset_attempts = 0;
|
||||
|
||||
hdev->last_reset_time = jiffies;
|
||||
while ((hdev->reset_type =
|
||||
hclgevf_get_reset_level(hdev, &hdev->reset_pending))
|
||||
!= HNAE3_NONE_RESET)
|
||||
hdev->reset_type =
|
||||
hclgevf_get_reset_level(hdev, &hdev->reset_pending);
|
||||
if (hdev->reset_type != HNAE3_NONE_RESET)
|
||||
hclgevf_reset(hdev);
|
||||
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
|
||||
&hdev->reset_state)) {
|
||||
@ -3449,6 +3450,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
||||
|
||||
hclgevf_init_rxd_adv_layout(hdev);
|
||||
|
||||
set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
|
||||
|
||||
hdev->last_reset_time = jiffies;
|
||||
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
|
||||
HCLGEVF_DRIVER_NAME);
|
||||
@ -3899,7 +3902,7 @@ static int hclgevf_init(void)
|
||||
{
|
||||
pr_info("%s is initializing\n", HCLGEVF_NAME);
|
||||
|
||||
hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
|
||||
hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
|
||||
if (!hclgevf_wq) {
|
||||
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
|
||||
return -ENOMEM;
|
||||
|
@ -146,6 +146,7 @@ enum hclgevf_states {
|
||||
HCLGEVF_STATE_REMOVING,
|
||||
HCLGEVF_STATE_NIC_REGISTERED,
|
||||
HCLGEVF_STATE_ROCE_REGISTERED,
|
||||
HCLGEVF_STATE_SERVICE_INITED,
|
||||
/* task states */
|
||||
HCLGEVF_STATE_RST_SERVICE_SCHED,
|
||||
HCLGEVF_STATE_RST_HANDLING,
|
||||
|
@ -113,7 +113,8 @@ enum e1000_boards {
|
||||
board_pch2lan,
|
||||
board_pch_lpt,
|
||||
board_pch_spt,
|
||||
board_pch_cnp
|
||||
board_pch_cnp,
|
||||
board_pch_tgp
|
||||
};
|
||||
|
||||
struct e1000_ps_page {
|
||||
@ -499,6 +500,7 @@ extern const struct e1000_info e1000_pch2_info;
|
||||
extern const struct e1000_info e1000_pch_lpt_info;
|
||||
extern const struct e1000_info e1000_pch_spt_info;
|
||||
extern const struct e1000_info e1000_pch_cnp_info;
|
||||
extern const struct e1000_info e1000_pch_tgp_info;
|
||||
extern const struct e1000_info e1000_es2_info;
|
||||
|
||||
void e1000e_ptp_init(struct e1000_adapter *adapter);
|
||||
|
@ -4813,7 +4813,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
|
||||
static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_mac_info *mac = &hw->mac;
|
||||
u32 ctrl_ext, txdctl, snoop;
|
||||
u32 ctrl_ext, txdctl, snoop, fflt_dbg;
|
||||
s32 ret_val;
|
||||
u16 i;
|
||||
|
||||
@ -4872,6 +4872,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
|
||||
snoop = (u32)~(PCIE_NO_SNOOP_ALL);
|
||||
e1000e_set_pcie_no_snoop(hw, snoop);
|
||||
|
||||
/* Enable workaround for packet loss issue on TGP PCH
|
||||
* Do not gate DMA clock from the modPHY block
|
||||
*/
|
||||
if (mac->type >= e1000_pch_tgp) {
|
||||
fflt_dbg = er32(FFLT_DBG);
|
||||
fflt_dbg |= E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK;
|
||||
ew32(FFLT_DBG, fflt_dbg);
|
||||
}
|
||||
|
||||
ctrl_ext = er32(CTRL_EXT);
|
||||
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
|
||||
ew32(CTRL_EXT, ctrl_ext);
|
||||
@ -5992,3 +6001,23 @@ const struct e1000_info e1000_pch_cnp_info = {
|
||||
.phy_ops = &ich8_phy_ops,
|
||||
.nvm_ops = &spt_nvm_ops,
|
||||
};
|
||||
|
||||
const struct e1000_info e1000_pch_tgp_info = {
|
||||
.mac = e1000_pch_tgp,
|
||||
.flags = FLAG_IS_ICH
|
||||
| FLAG_HAS_WOL
|
||||
| FLAG_HAS_HW_TIMESTAMP
|
||||
| FLAG_HAS_CTRLEXT_ON_LOAD
|
||||
| FLAG_HAS_AMT
|
||||
| FLAG_HAS_FLASH
|
||||
| FLAG_HAS_JUMBO_FRAMES
|
||||
| FLAG_APME_IN_WUC,
|
||||
.flags2 = FLAG2_HAS_PHY_STATS
|
||||
| FLAG2_HAS_EEE,
|
||||
.pba = 26,
|
||||
.max_hw_frame_size = 9022,
|
||||
.get_variants = e1000_get_variants_ich8lan,
|
||||
.mac_ops = &ich8_mac_ops,
|
||||
.phy_ops = &ich8_phy_ops,
|
||||
.nvm_ops = &spt_nvm_ops,
|
||||
};
|
||||
|
@ -289,6 +289,9 @@
|
||||
/* Proprietary Latency Tolerance Reporting PCI Capability */
|
||||
#define E1000_PCI_LTR_CAP_LPT 0xA8
|
||||
|
||||
/* Don't gate wake DMA clock */
|
||||
#define E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK 0x1000
|
||||
|
||||
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
|
||||
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
|
||||
bool state);
|
||||
|
@ -51,6 +51,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
|
||||
[board_pch_lpt] = &e1000_pch_lpt_info,
|
||||
[board_pch_spt] = &e1000_pch_spt_info,
|
||||
[board_pch_cnp] = &e1000_pch_cnp_info,
|
||||
[board_pch_tgp] = &e1000_pch_tgp_info,
|
||||
};
|
||||
|
||||
struct e1000_reg_info {
|
||||
@ -7896,28 +7897,28 @@ static const struct pci_device_id e1000_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },
|
||||
|
||||
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
|
||||
};
|
||||
|
@ -25,6 +25,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
|
||||
case ICE_DEV_ID_E810C_BACKPLANE:
|
||||
case ICE_DEV_ID_E810C_QSFP:
|
||||
case ICE_DEV_ID_E810C_SFP:
|
||||
case ICE_DEV_ID_E810_XXV_BACKPLANE:
|
||||
case ICE_DEV_ID_E810_XXV_QSFP:
|
||||
case ICE_DEV_ID_E810_XXV_SFP:
|
||||
hw->mac_type = ICE_MAC_E810;
|
||||
break;
|
||||
|
@ -21,6 +21,10 @@
|
||||
#define ICE_DEV_ID_E810C_QSFP 0x1592
|
||||
/* Intel(R) Ethernet Controller E810-C for SFP */
|
||||
#define ICE_DEV_ID_E810C_SFP 0x1593
|
||||
/* Intel(R) Ethernet Controller E810-XXV for backplane */
|
||||
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
|
||||
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
|
||||
#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
|
||||
/* Intel(R) Ethernet Controller E810-XXV for SFP */
|
||||
#define ICE_DEV_ID_E810_XXV_SFP 0x159B
|
||||
/* Intel(R) Ethernet Connection E823-C for backplane */
|
||||
|
@ -63,7 +63,8 @@ static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
|
||||
{
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
|
||||
snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver);
|
||||
snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver,
|
||||
hw->api_min_ver, hw->api_patch);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1668,7 +1668,7 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
|
||||
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
|
||||
if (hw->tnl.tbl[i].valid &&
|
||||
hw->tnl.tbl[i].type == type &&
|
||||
idx--)
|
||||
idx-- == 0)
|
||||
return i;
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
@ -1828,7 +1828,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
|
||||
u16 index;
|
||||
|
||||
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
|
||||
index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
|
||||
index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
|
||||
|
||||
status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
|
||||
if (status) {
|
||||
|
@ -100,9 +100,9 @@ static void ice_display_lag_info(struct ice_lag *lag)
|
||||
*/
|
||||
static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
|
||||
{
|
||||
struct net_device *event_netdev, *netdev_tmp;
|
||||
struct netdev_notifier_bonding_info *info;
|
||||
struct netdev_bonding_info *bonding_info;
|
||||
struct net_device *event_netdev;
|
||||
const char *lag_netdev_name;
|
||||
|
||||
event_netdev = netdev_notifier_info_to_dev(ptr);
|
||||
@ -123,19 +123,6 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
|
||||
goto lag_out;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_netdev_in_bond_rcu(lag->upper_netdev, netdev_tmp) {
|
||||
if (!netif_is_ice(netdev_tmp))
|
||||
continue;
|
||||
|
||||
if (netdev_tmp && netdev_tmp != lag->netdev &&
|
||||
lag->peer_netdev != netdev_tmp) {
|
||||
dev_hold(netdev_tmp);
|
||||
lag->peer_netdev = netdev_tmp;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (bonding_info->slave.state)
|
||||
ice_lag_set_backup(lag);
|
||||
else
|
||||
@ -319,6 +306,9 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
|
||||
case NETDEV_BONDING_INFO:
|
||||
ice_lag_info_event(lag, ptr);
|
||||
break;
|
||||
case NETDEV_UNREGISTER:
|
||||
ice_lag_unlink(lag, ptr);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -2841,6 +2841,7 @@ void ice_napi_del(struct ice_vsi *vsi)
|
||||
*/
|
||||
int ice_vsi_release(struct ice_vsi *vsi)
|
||||
{
|
||||
enum ice_status err;
|
||||
struct ice_pf *pf;
|
||||
|
||||
if (!vsi->back)
|
||||
@ -2912,6 +2913,10 @@ int ice_vsi_release(struct ice_vsi *vsi)
|
||||
|
||||
ice_fltr_remove_all(vsi);
|
||||
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
|
||||
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
|
||||
if (err)
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
|
||||
vsi->vsi_num, err);
|
||||
ice_vsi_delete(vsi);
|
||||
ice_vsi_free_q_vectors(vsi);
|
||||
|
||||
@ -3092,6 +3097,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
|
||||
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
|
||||
|
||||
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
|
||||
ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
|
||||
if (ret)
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
|
||||
vsi->vsi_num, ret);
|
||||
ice_vsi_free_q_vectors(vsi);
|
||||
|
||||
/* SR-IOV determines needed MSIX resources all at once instead of per
|
||||
|
@ -4224,6 +4224,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
|
||||
if (!pf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* initialize Auxiliary index to invalid value */
|
||||
pf->aux_idx = -1;
|
||||
|
||||
/* set up for high or low DMA */
|
||||
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
|
||||
if (err)
|
||||
@ -4615,7 +4618,8 @@ static void ice_remove(struct pci_dev *pdev)
|
||||
|
||||
ice_aq_cancel_waiting_tasks(pf);
|
||||
ice_unplug_aux_dev(pf);
|
||||
ida_free(&ice_aux_ida, pf->aux_idx);
|
||||
if (pf->aux_idx >= 0)
|
||||
ida_free(&ice_aux_ida, pf->aux_idx);
|
||||
set_bit(ICE_DOWN, pf->state);
|
||||
|
||||
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
|
||||
@ -5016,6 +5020,8 @@ static const struct pci_device_id ice_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
|
||||
|
@ -1571,6 +1571,9 @@ void ice_ptp_init(struct ice_pf *pf)
|
||||
*/
|
||||
void ice_ptp_release(struct ice_pf *pf)
|
||||
{
|
||||
if (!test_bit(ICE_FLAG_PTP, pf->flags))
|
||||
return;
|
||||
|
||||
/* Disable timestamping for both Tx and Rx */
|
||||
ice_ptp_cfg_timestamp(pf, false);
|
||||
|
||||
|
@ -2070,6 +2070,19 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
|
||||
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
|
||||
* @pi: port information structure
|
||||
* @vsi_handle: software VSI handle
|
||||
*
|
||||
* This function clears the VSI and its RDMA children nodes from scheduler tree
|
||||
* for all TCs.
|
||||
*/
|
||||
enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
|
||||
{
|
||||
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_agg_info - get the aggregator ID
|
||||
* @hw: pointer to the hardware structure
|
||||
|
@ -89,6 +89,7 @@ enum ice_status
|
||||
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
|
||||
u8 owner, bool enable);
|
||||
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
|
||||
enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
|
||||
|
||||
/* Tx scheduler rate limiter functions */
|
||||
enum ice_status
|
||||
|
@ -226,18 +226,85 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
|
||||
|
||||
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
|
||||
|
||||
static void get_lf_str_list(struct rvu_block block, int pcifunc,
|
||||
char *lfs)
|
||||
{
|
||||
int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
|
||||
|
||||
for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
|
||||
if (lf >= block.lf.max)
|
||||
break;
|
||||
|
||||
if (block.fn_map[lf] != pcifunc)
|
||||
continue;
|
||||
|
||||
if (lf == prev_lf + 1) {
|
||||
prev_lf = lf;
|
||||
seq = 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (seq)
|
||||
len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
|
||||
else
|
||||
len += (len ? sprintf(lfs + len, ",%d", lf) :
|
||||
sprintf(lfs + len, "%d", lf));
|
||||
|
||||
prev_lf = lf;
|
||||
seq = 0;
|
||||
}
|
||||
|
||||
if (seq)
|
||||
len += sprintf(lfs + len, "-%d", prev_lf);
|
||||
|
||||
lfs[len] = '\0';
|
||||
}
|
||||
|
||||
static int get_max_column_width(struct rvu *rvu)
|
||||
{
|
||||
int index, pf, vf, lf_str_size = 12, buf_size = 256;
|
||||
struct rvu_block block;
|
||||
u16 pcifunc;
|
||||
char *buf;
|
||||
|
||||
buf = kzalloc(buf_size, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
|
||||
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
|
||||
pcifunc = pf << 10 | vf;
|
||||
if (!pcifunc)
|
||||
continue;
|
||||
|
||||
for (index = 0; index < BLK_COUNT; index++) {
|
||||
block = rvu->hw->block[index];
|
||||
if (!strlen(block.name))
|
||||
continue;
|
||||
|
||||
get_lf_str_list(block, pcifunc, buf);
|
||||
if (lf_str_size <= strlen(buf))
|
||||
lf_str_size = strlen(buf) + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
kfree(buf);
|
||||
return lf_str_size;
|
||||
}
|
||||
|
||||
/* Dumps current provisioning status of all RVU block LFs */
|
||||
static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
|
||||
char __user *buffer,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
int index, off = 0, flag = 0, go_back = 0, len = 0;
|
||||
int index, off = 0, flag = 0, len = 0, i = 0;
|
||||
struct rvu *rvu = filp->private_data;
|
||||
int lf, pf, vf, pcifunc;
|
||||
int bytes_not_copied = 0;
|
||||
struct rvu_block block;
|
||||
int bytes_not_copied;
|
||||
int lf_str_size = 12;
|
||||
int pf, vf, pcifunc;
|
||||
int buf_size = 2048;
|
||||
int lf_str_size;
|
||||
char *lfs;
|
||||
char *buf;
|
||||
|
||||
@ -249,6 +316,9 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
|
||||
if (!buf)
|
||||
return -ENOSPC;
|
||||
|
||||
/* Get the maximum width of a column */
|
||||
lf_str_size = get_max_column_width(rvu);
|
||||
|
||||
lfs = kzalloc(lf_str_size, GFP_KERNEL);
|
||||
if (!lfs) {
|
||||
kfree(buf);
|
||||
@ -262,65 +332,69 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
|
||||
"%-*s", lf_str_size,
|
||||
rvu->hw->block[index].name);
|
||||
}
|
||||
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
|
||||
bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
|
||||
if (bytes_not_copied)
|
||||
goto out;
|
||||
|
||||
i++;
|
||||
*ppos += off;
|
||||
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
|
||||
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
|
||||
off = 0;
|
||||
flag = 0;
|
||||
pcifunc = pf << 10 | vf;
|
||||
if (!pcifunc)
|
||||
continue;
|
||||
|
||||
if (vf) {
|
||||
sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
|
||||
go_back = scnprintf(&buf[off],
|
||||
buf_size - 1 - off,
|
||||
"%-*s", lf_str_size, lfs);
|
||||
off = scnprintf(&buf[off],
|
||||
buf_size - 1 - off,
|
||||
"%-*s", lf_str_size, lfs);
|
||||
} else {
|
||||
sprintf(lfs, "PF%d", pf);
|
||||
go_back = scnprintf(&buf[off],
|
||||
buf_size - 1 - off,
|
||||
"%-*s", lf_str_size, lfs);
|
||||
off = scnprintf(&buf[off],
|
||||
buf_size - 1 - off,
|
||||
"%-*s", lf_str_size, lfs);
|
||||
}
|
||||
|
||||
off += go_back;
|
||||
for (index = 0; index < BLKTYPE_MAX; index++) {
|
||||
for (index = 0; index < BLK_COUNT; index++) {
|
||||
block = rvu->hw->block[index];
|
||||
if (!strlen(block.name))
|
||||
continue;
|
||||
len = 0;
|
||||
lfs[len] = '\0';
|
||||
for (lf = 0; lf < block.lf.max; lf++) {
|
||||
if (block.fn_map[lf] != pcifunc)
|
||||
continue;
|
||||
get_lf_str_list(block, pcifunc, lfs);
|
||||
if (strlen(lfs))
|
||||
flag = 1;
|
||||
len += sprintf(&lfs[len], "%d,", lf);
|
||||
}
|
||||
|
||||
if (flag)
|
||||
len--;
|
||||
lfs[len] = '\0';
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off,
|
||||
"%-*s", lf_str_size, lfs);
|
||||
if (!strlen(lfs))
|
||||
go_back += lf_str_size;
|
||||
}
|
||||
if (!flag)
|
||||
off -= go_back;
|
||||
else
|
||||
flag = 0;
|
||||
off--;
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
|
||||
if (flag) {
|
||||
off += scnprintf(&buf[off],
|
||||
buf_size - 1 - off, "\n");
|
||||
bytes_not_copied = copy_to_user(buffer +
|
||||
(i * off),
|
||||
buf, off);
|
||||
if (bytes_not_copied)
|
||||
goto out;
|
||||
|
||||
i++;
|
||||
*ppos += off;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bytes_not_copied = copy_to_user(buffer, buf, off);
|
||||
out:
|
||||
kfree(lfs);
|
||||
kfree(buf);
|
||||
|
||||
if (bytes_not_copied)
|
||||
return -EFAULT;
|
||||
|
||||
*ppos = off;
|
||||
return off;
|
||||
return *ppos;
|
||||
}
|
||||
|
||||
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
|
||||
@ -504,7 +578,7 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
|
||||
if (cmd_buf)
|
||||
ret = -EINVAL;
|
||||
|
||||
if (!strncmp(subtoken, "help", 4) || ret < 0) {
|
||||
if (ret < 0 || !strncmp(subtoken, "help", 4)) {
|
||||
dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
|
||||
goto qsize_write_done;
|
||||
}
|
||||
@ -1719,6 +1793,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
|
||||
u16 pcifunc;
|
||||
char *str;
|
||||
|
||||
/* Ingress policers do not exist on all platforms */
|
||||
if (!nix_hw->ipolicer)
|
||||
return 0;
|
||||
|
||||
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
|
||||
if (layer == BAND_PROF_INVAL_LAYER)
|
||||
continue;
|
||||
@ -1768,6 +1846,10 @@ static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
|
||||
int layer;
|
||||
char *str;
|
||||
|
||||
/* Ingress policers do not exist on all platforms */
|
||||
if (!nix_hw->ipolicer)
|
||||
return 0;
|
||||
|
||||
seq_puts(m, "\nBandwidth profile resource free count\n");
|
||||
seq_puts(m, "=====================================\n");
|
||||
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
|
||||
|
@ -2507,6 +2507,9 @@ static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
|
||||
return;
|
||||
|
||||
nix_hw = get_nix_hw(rvu->hw, blkaddr);
|
||||
if (!nix_hw)
|
||||
return;
|
||||
|
||||
vlan = &nix_hw->txvlan;
|
||||
|
||||
mutex_lock(&vlan->rsrc_lock);
|
||||
|
@ -199,6 +199,9 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
|
||||
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
|
||||
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
|
||||
|
||||
int mlx5e_fs_init(struct mlx5e_priv *priv);
|
||||
void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
|
||||
|
||||
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
|
||||
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
|
||||
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include "en_tc.h"
|
||||
#include "rep/tc.h"
|
||||
#include "rep/neigh.h"
|
||||
#include "lag.h"
|
||||
#include "lag_mp.h"
|
||||
|
||||
struct mlx5e_tc_tun_route_attr {
|
||||
struct net_device *out_dev;
|
||||
|
@ -141,8 +141,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
|
||||
* Pkt: MAC IP ESP IP L4
|
||||
*
|
||||
* Transport Mode:
|
||||
* SWP: OutL3 InL4
|
||||
* InL3
|
||||
* SWP: OutL3 OutL4
|
||||
* Pkt: MAC IP ESP L4
|
||||
*
|
||||
* Tunnel(VXLAN TCP/UDP) over Transport Mode
|
||||
@ -171,31 +170,35 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
|
||||
return;
|
||||
|
||||
if (!xo->inner_ipproto) {
|
||||
eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
if (xo->proto == IPPROTO_UDP)
|
||||
switch (xo->proto) {
|
||||
case IPPROTO_UDP:
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
|
||||
fallthrough;
|
||||
case IPPROTO_TCP:
|
||||
/* IP | ESP | TCP */
|
||||
eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* Tunnel(VXLAN TCP/UDP) over Transport Mode */
|
||||
switch (xo->inner_ipproto) {
|
||||
case IPPROTO_UDP:
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
||||
return;
|
||||
fallthrough;
|
||||
case IPPROTO_TCP:
|
||||
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset =
|
||||
(skb->csum_start + skb->head - skb->data) / 2;
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Tunnel(VXLAN TCP/UDP) over Transport Mode */
|
||||
switch (xo->inner_ipproto) {
|
||||
case IPPROTO_UDP:
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
||||
fallthrough;
|
||||
case IPPROTO_TCP:
|
||||
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
|
||||
|
@ -1186,10 +1186,6 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
|
||||
struct mlx5e_flow_table *ft;
|
||||
int err;
|
||||
|
||||
priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
|
||||
if (!priv->fs.vlan)
|
||||
return -ENOMEM;
|
||||
|
||||
ft = &priv->fs.vlan->ft;
|
||||
ft->num_groups = 0;
|
||||
|
||||
@ -1198,10 +1194,8 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
|
||||
ft_attr.prio = MLX5E_NIC_PRIO;
|
||||
|
||||
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
|
||||
if (IS_ERR(ft->t)) {
|
||||
err = PTR_ERR(ft->t);
|
||||
goto err_free_t;
|
||||
}
|
||||
if (IS_ERR(ft->t))
|
||||
return PTR_ERR(ft->t);
|
||||
|
||||
ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
|
||||
if (!ft->g) {
|
||||
@ -1221,9 +1215,6 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
|
||||
kfree(ft->g);
|
||||
err_destroy_vlan_table:
|
||||
mlx5_destroy_flow_table(ft->t);
|
||||
err_free_t:
|
||||
kvfree(priv->fs.vlan);
|
||||
priv->fs.vlan = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -1232,7 +1223,6 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5e_del_vlan_rules(priv);
|
||||
mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
|
||||
kvfree(priv->fs.vlan);
|
||||
}
|
||||
|
||||
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
|
||||
@ -1351,3 +1341,17 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
|
||||
mlx5e_arfs_destroy_tables(priv);
|
||||
mlx5e_ethtool_cleanup_steering(priv);
|
||||
}
|
||||
|
||||
int mlx5e_fs_init(struct mlx5e_priv *priv)
|
||||
{
|
||||
priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
|
||||
if (!priv->fs.vlan)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
|
||||
{
|
||||
kvfree(priv->fs.vlan);
|
||||
priv->fs.vlan = NULL;
|
||||
}
|
||||
|
@ -4578,6 +4578,12 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
|
||||
|
||||
mlx5e_timestamp_init(priv);
|
||||
|
||||
err = mlx5e_fs_init(priv);
|
||||
if (err) {
|
||||
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5e_ipsec_init(priv);
|
||||
if (err)
|
||||
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
|
||||
@ -4595,6 +4601,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
|
||||
mlx5e_health_destroy_reporters(priv);
|
||||
mlx5e_tls_cleanup(priv);
|
||||
mlx5e_ipsec_cleanup(priv);
|
||||
mlx5e_fs_cleanup(priv);
|
||||
}
|
||||
|
||||
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
|
||||
|
@ -67,6 +67,8 @@
|
||||
#include "lib/fs_chains.h"
|
||||
#include "diag/en_tc_tracepoint.h"
|
||||
#include <asm/div64.h>
|
||||
#include "lag.h"
|
||||
#include "lag_mp.h"
|
||||
|
||||
#define nic_chains(priv) ((priv)->fs.tc.chains)
|
||||
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
|
||||
|
@ -213,19 +213,18 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
|
||||
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
|
||||
}
|
||||
|
||||
/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
|
||||
* need to set L3 checksum flag for IPsec
|
||||
*/
|
||||
static void
|
||||
ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5_wqe_eth_seg *eseg)
|
||||
{
|
||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
|
||||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
|
||||
if (skb->encapsulation) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
|
||||
if (xo->inner_ipproto) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
|
||||
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats->csum_partial_inner++;
|
||||
} else {
|
||||
sq->stats->csum_partial++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -234,6 +233,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5e_accel_tx_state *accel,
|
||||
struct mlx5_wqe_eth_seg *eseg)
|
||||
{
|
||||
if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
|
||||
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
return;
|
||||
}
|
||||
|
||||
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
|
||||
if (skb->encapsulation) {
|
||||
@ -249,8 +253,6 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats->csum_partial++;
|
||||
#endif
|
||||
} else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
|
||||
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
} else
|
||||
sq->stats->csum_none++;
|
||||
}
|
||||
|
@ -473,10 +473,9 @@ esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
|
||||
|
||||
err_min_rate:
|
||||
list_del(&group->list);
|
||||
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
group->tsar_ix);
|
||||
if (err)
|
||||
if (mlx5_destroy_scheduling_element_cmd(esw->dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
group->tsar_ix))
|
||||
NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
|
||||
err_sched_elem:
|
||||
kfree(group);
|
||||
|
@ -442,6 +442,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
|
||||
if (!mlx5_lag_is_ready(ldev)) {
|
||||
do_bond = false;
|
||||
} else {
|
||||
/* VF LAG is in multipath mode, ignore bond change requests */
|
||||
if (mlx5_lag_is_multipath(dev0))
|
||||
return;
|
||||
|
||||
tracker = ldev->tracker;
|
||||
|
||||
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
|
||||
|
@ -9,20 +9,23 @@
|
||||
#include "eswitch.h"
|
||||
#include "lib/mlx5.h"
|
||||
|
||||
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
|
||||
{
|
||||
return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
|
||||
}
|
||||
|
||||
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
|
||||
{
|
||||
if (!mlx5_lag_is_ready(ldev))
|
||||
return false;
|
||||
|
||||
if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
|
||||
return false;
|
||||
|
||||
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
|
||||
ldev->pf[MLX5_LAG_P2].dev);
|
||||
}
|
||||
|
||||
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
|
||||
{
|
||||
return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
|
||||
}
|
||||
|
||||
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_lag *ldev;
|
||||
|
@ -24,12 +24,14 @@ struct lag_mp {
|
||||
void mlx5_lag_mp_reset(struct mlx5_lag *ldev);
|
||||
int mlx5_lag_mp_init(struct mlx5_lag *ldev);
|
||||
void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev);
|
||||
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
|
||||
|
||||
#else /* CONFIG_MLX5_ESWITCH */
|
||||
|
||||
static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {};
|
||||
static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; }
|
||||
static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {}
|
||||
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) { return false; }
|
||||
|
||||
#endif /* CONFIG_MLX5_ESWITCH */
|
||||
#endif /* __MLX5_LAG_MP_H__ */
|
||||
|
@ -353,13 +353,10 @@ static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
|
||||
struct sk_buff *skb;
|
||||
int err;
|
||||
|
||||
elem_info->u.rdq.skb = NULL;
|
||||
skb = netdev_alloc_skb_ip_align(NULL, buf_len);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Assume that wqe was previously zeroed. */
|
||||
|
||||
err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
|
||||
buf_len, DMA_FROM_DEVICE);
|
||||
if (err)
|
||||
@ -597,21 +594,26 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
|
||||
struct pci_dev *pdev = mlxsw_pci->pdev;
|
||||
struct mlxsw_pci_queue_elem_info *elem_info;
|
||||
struct mlxsw_rx_info rx_info = {};
|
||||
char *wqe;
|
||||
char wqe[MLXSW_PCI_WQE_SIZE];
|
||||
struct sk_buff *skb;
|
||||
u16 byte_count;
|
||||
int err;
|
||||
|
||||
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
|
||||
skb = elem_info->u.sdq.skb;
|
||||
if (!skb)
|
||||
return;
|
||||
wqe = elem_info->elem;
|
||||
mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
|
||||
skb = elem_info->u.rdq.skb;
|
||||
memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
|
||||
|
||||
if (q->consumer_counter++ != consumer_counter_limit)
|
||||
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
|
||||
|
||||
err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
|
||||
if (err) {
|
||||
dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
|
||||
|
||||
if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
|
||||
rx_info.is_lag = true;
|
||||
rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
|
||||
@ -647,10 +649,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
|
||||
skb_put(skb, byte_count);
|
||||
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
|
||||
|
||||
memset(wqe, 0, q->elem_size);
|
||||
err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
|
||||
if (err)
|
||||
dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
|
||||
out:
|
||||
/* Everything is set up, ring doorbell to pass elem to HW */
|
||||
q->producer_counter++;
|
||||
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
|
||||
|
@ -1743,6 +1743,16 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx)
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
|
||||
DMA_BIT_MASK(64))) {
|
||||
if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
|
||||
DMA_BIT_MASK(32))) {
|
||||
dev_warn(&tx->adapter->pdev->dev,
|
||||
"lan743x_: No suitable DMA available\n");
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
ring_allocation_size = ALIGN(tx->ring_size *
|
||||
sizeof(struct lan743x_tx_descriptor),
|
||||
PAGE_SIZE);
|
||||
@ -1934,7 +1944,8 @@ static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
|
||||
index);
|
||||
}
|
||||
|
||||
static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
|
||||
static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct net_device *netdev = rx->adapter->netdev;
|
||||
struct device *dev = &rx->adapter->pdev->dev;
|
||||
@ -1948,7 +1959,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
|
||||
|
||||
descriptor = &rx->ring_cpu_ptr[index];
|
||||
buffer_info = &rx->buffer_info[index];
|
||||
skb = __netdev_alloc_skb(netdev, buffer_length, GFP_ATOMIC | GFP_DMA);
|
||||
skb = __netdev_alloc_skb(netdev, buffer_length, gfp);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
|
||||
@ -2110,7 +2121,8 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
|
||||
|
||||
/* save existing skb, allocate new skb and map to dma */
|
||||
skb = buffer_info->skb;
|
||||
if (lan743x_rx_init_ring_element(rx, rx->last_head)) {
|
||||
if (lan743x_rx_init_ring_element(rx, rx->last_head,
|
||||
GFP_ATOMIC | GFP_DMA)) {
|
||||
/* failed to allocate next skb.
|
||||
* Memory is very low.
|
||||
* Drop this packet and reuse buffer.
|
||||
@ -2276,6 +2288,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
|
||||
DMA_BIT_MASK(64))) {
|
||||
if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
|
||||
DMA_BIT_MASK(32))) {
|
||||
dev_warn(&rx->adapter->pdev->dev,
|
||||
"lan743x_: No suitable DMA available\n");
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
ring_allocation_size = ALIGN(rx->ring_size *
|
||||
sizeof(struct lan743x_rx_descriptor),
|
||||
PAGE_SIZE);
|
||||
@ -2315,13 +2337,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
|
||||
|
||||
rx->last_head = 0;
|
||||
for (index = 0; index < rx->ring_size; index++) {
|
||||
ret = lan743x_rx_init_ring_element(rx, index);
|
||||
ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
}
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
netif_warn(rx->adapter, ifup, rx->adapter->netdev,
|
||||
"Error allocating memory for LAN743x\n");
|
||||
|
||||
lan743x_rx_ring_cleanup(rx);
|
||||
return ret;
|
||||
}
|
||||
@ -3019,6 +3044,8 @@ static int lan743x_pm_resume(struct device *dev)
|
||||
if (ret) {
|
||||
netif_err(adapter, probe, adapter->netdev,
|
||||
"lan743x_hardware_init returned %d\n", ret);
|
||||
lan743x_pci_cleanup(adapter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* open netdev when netdev is at running state while resume.
|
||||
|
@ -758,6 +758,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
|
||||
err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
|
||||
"port %u: missing serdes\n",
|
||||
portno);
|
||||
of_node_put(portnp);
|
||||
goto cleanup_config;
|
||||
}
|
||||
config->portno = portno;
|
||||
|
@ -969,6 +969,7 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
|
||||
target = ocelot_regmap_init(ocelot, res);
|
||||
if (IS_ERR(target)) {
|
||||
err = PTR_ERR(target);
|
||||
of_node_put(portnp);
|
||||
goto out_teardown;
|
||||
}
|
||||
|
||||
|
@ -182,15 +182,21 @@ static int
|
||||
nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
struct nfp_net *nn = netdev_priv(netdev);
|
||||
unsigned int max_mtu;
|
||||
struct nfp_bpf_vnic *bv;
|
||||
struct bpf_prog *prog;
|
||||
|
||||
if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
|
||||
return 0;
|
||||
|
||||
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
|
||||
if (new_mtu > max_mtu) {
|
||||
nn_info(nn, "BPF offload active, MTU over %u not supported\n",
|
||||
max_mtu);
|
||||
if (nn->xdp_hw.prog) {
|
||||
prog = nn->xdp_hw.prog;
|
||||
} else {
|
||||
bv = nn->app_priv;
|
||||
prog = bv->tc_prog;
|
||||
}
|
||||
|
||||
if (nfp_bpf_offload_check_mtu(nn, prog, new_mtu)) {
|
||||
nn_info(nn, "BPF offload active, potential packet access beyond hardware packet boundary");
|
||||
return -EBUSY;
|
||||
}
|
||||
return 0;
|
||||
|
@ -560,6 +560,8 @@ bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
|
||||
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
|
||||
int nfp_bpf_jit(struct nfp_prog *prog);
|
||||
bool nfp_bpf_supported_opcode(u8 code);
|
||||
bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
|
||||
unsigned int mtu);
|
||||
|
||||
int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
|
||||
int prev_insn_idx);
|
||||
|
@ -481,19 +481,28 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
|
||||
unsigned int mtu)
|
||||
{
|
||||
unsigned int fw_mtu, pkt_off;
|
||||
|
||||
fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
|
||||
pkt_off = min(prog->aux->max_pkt_offset, mtu);
|
||||
|
||||
return fw_mtu < pkt_off;
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
|
||||
unsigned int fw_mtu, pkt_off, max_stack, max_prog_len;
|
||||
unsigned int max_stack, max_prog_len;
|
||||
dma_addr_t dma_addr;
|
||||
void *img;
|
||||
int err;
|
||||
|
||||
fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
|
||||
pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu);
|
||||
if (fw_mtu < pkt_off) {
|
||||
if (nfp_bpf_offload_check_mtu(nn, prog, nn->dp.netdev->mtu)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
|
||||
}
|
||||
|
||||
reg->dst_lmextn = swreg_lmextn(dst);
|
||||
reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
|
||||
reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -277,7 +277,7 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
|
||||
}
|
||||
|
||||
reg->dst_lmextn = swreg_lmextn(dst);
|
||||
reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
|
||||
reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1015,9 +1015,6 @@ static int lpc_eth_close(struct net_device *ndev)
|
||||
napi_disable(&pldat->napi);
|
||||
netif_stop_queue(ndev);
|
||||
|
||||
if (ndev->phydev)
|
||||
phy_stop(ndev->phydev);
|
||||
|
||||
spin_lock_irqsave(&pldat->lock, flags);
|
||||
__lpc_eth_reset(pldat);
|
||||
netif_carrier_off(ndev);
|
||||
@ -1025,6 +1022,8 @@ static int lpc_eth_close(struct net_device *ndev)
|
||||
writel(0, LPC_ENET_MAC2(pldat->net_base));
|
||||
spin_unlock_irqrestore(&pldat->lock, flags);
|
||||
|
||||
if (ndev->phydev)
|
||||
phy_stop(ndev->phydev);
|
||||
clk_disable_unprepare(pldat->clk);
|
||||
|
||||
return 0;
|
||||
|
@ -157,6 +157,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(REALTEK, 0x8129) },
|
||||
{ PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT },
|
||||
{ PCI_VDEVICE(REALTEK, 0x8161) },
|
||||
{ PCI_VDEVICE(REALTEK, 0x8162) },
|
||||
{ PCI_VDEVICE(REALTEK, 0x8167) },
|
||||
{ PCI_VDEVICE(REALTEK, 0x8168) },
|
||||
{ PCI_VDEVICE(NCUBE, 0x8168) },
|
||||
|
@ -132,16 +132,27 @@ void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
|
||||
case MC_CMD_MEDIA_SFP_PLUS:
|
||||
case MC_CMD_MEDIA_QSFP_PLUS:
|
||||
SET_BIT(FIBRE);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) {
|
||||
SET_BIT(1000baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
|
||||
SET_BIT(10000baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
|
||||
SET_BIT(1000baseX_Full);
|
||||
}
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) {
|
||||
SET_BIT(10000baseCR_Full);
|
||||
SET_BIT(10000baseLR_Full);
|
||||
SET_BIT(10000baseSR_Full);
|
||||
}
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
|
||||
SET_BIT(40000baseCR4_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
|
||||
SET_BIT(40000baseSR4_Full);
|
||||
}
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) {
|
||||
SET_BIT(100000baseCR4_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
|
||||
SET_BIT(100000baseSR4_Full);
|
||||
}
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) {
|
||||
SET_BIT(25000baseCR_Full);
|
||||
SET_BIT(25000baseSR_Full);
|
||||
}
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
|
||||
SET_BIT(50000baseCR2_Full);
|
||||
break;
|
||||
@ -192,15 +203,19 @@ u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
|
||||
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
|
||||
if (TEST_BIT(1000baseT_Half))
|
||||
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
|
||||
if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
|
||||
if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full) ||
|
||||
TEST_BIT(1000baseX_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
|
||||
if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
|
||||
if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full) ||
|
||||
TEST_BIT(10000baseCR_Full) || TEST_BIT(10000baseLR_Full) ||
|
||||
TEST_BIT(10000baseSR_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
|
||||
if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
|
||||
if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full) ||
|
||||
TEST_BIT(40000baseSR4_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
|
||||
if (TEST_BIT(100000baseCR4_Full))
|
||||
if (TEST_BIT(100000baseCR4_Full) || TEST_BIT(100000baseSR4_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
|
||||
if (TEST_BIT(25000baseCR_Full))
|
||||
if (TEST_BIT(25000baseCR_Full) || TEST_BIT(25000baseSR_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
|
||||
if (TEST_BIT(50000baseCR2_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
|
||||
|
@ -648,7 +648,7 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
|
||||
} else if (rc == -EINVAL) {
|
||||
fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
|
||||
} else if (rc == -EPERM) {
|
||||
netif_info(efx, probe, efx->net_dev, "no PTP support\n");
|
||||
pci_info(efx->pci_dev, "no PTP support\n");
|
||||
return rc;
|
||||
} else {
|
||||
efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
|
||||
@ -824,7 +824,7 @@ static int efx_ptp_disable(struct efx_nic *efx)
|
||||
* should only have been called during probe.
|
||||
*/
|
||||
if (rc == -ENOSYS || rc == -EPERM)
|
||||
netif_info(efx, probe, efx->net_dev, "no PTP support\n");
|
||||
pci_info(efx->pci_dev, "no PTP support\n");
|
||||
else if (rc)
|
||||
efx_mcdi_display_error(efx, MC_CMD_PTP,
|
||||
MC_CMD_PTP_IN_DISABLE_LEN,
|
||||
|
@ -1057,7 +1057,7 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
|
||||
return;
|
||||
|
||||
if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
|
||||
netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n");
|
||||
pci_info(efx->pci_dev, "no SR-IOV VFs probed\n");
|
||||
return;
|
||||
}
|
||||
if (count > 0 && count > max_vfs)
|
||||
|
@ -736,7 +736,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
|
||||
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
|
||||
ptp_v2 = PTP_TCR_TSVER2ENA;
|
||||
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
|
||||
if (priv->synopsys_id != DWMAC_CORE_5_10)
|
||||
if (priv->synopsys_id < DWMAC_CORE_4_10)
|
||||
ts_event_en = PTP_TCR_TSEVNTENA;
|
||||
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
|
||||
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
|
||||
|
@ -623,16 +623,16 @@ static int receive(struct net_device *dev, int cnt)
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
#ifdef __i386__
|
||||
#if defined(__i386__) && !defined(CONFIG_UML)
|
||||
#include <asm/msr.h>
|
||||
#define GETTICK(x) \
|
||||
({ \
|
||||
if (boot_cpu_has(X86_FEATURE_TSC)) \
|
||||
x = (unsigned int)rdtsc(); \
|
||||
})
|
||||
#else /* __i386__ */
|
||||
#else /* __i386__ && !CONFIG_UML */
|
||||
#define GETTICK(x)
|
||||
#endif /* __i386__ */
|
||||
#endif /* __i386__ && !CONFIG_UML */
|
||||
|
||||
static void epp_bh(struct work_struct *work)
|
||||
{
|
||||
|
@ -243,62 +243,10 @@ static void phy_sanitize_settings(struct phy_device *phydev)
|
||||
}
|
||||
}
|
||||
|
||||
int phy_ethtool_ksettings_set(struct phy_device *phydev,
|
||||
const struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
|
||||
u8 autoneg = cmd->base.autoneg;
|
||||
u8 duplex = cmd->base.duplex;
|
||||
u32 speed = cmd->base.speed;
|
||||
|
||||
if (cmd->base.phy_address != phydev->mdio.addr)
|
||||
return -EINVAL;
|
||||
|
||||
linkmode_copy(advertising, cmd->link_modes.advertising);
|
||||
|
||||
/* We make sure that we don't pass unsupported values in to the PHY */
|
||||
linkmode_and(advertising, advertising, phydev->supported);
|
||||
|
||||
/* Verify the settings we care about. */
|
||||
if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
|
||||
return -EINVAL;
|
||||
|
||||
if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
|
||||
return -EINVAL;
|
||||
|
||||
if (autoneg == AUTONEG_DISABLE &&
|
||||
((speed != SPEED_1000 &&
|
||||
speed != SPEED_100 &&
|
||||
speed != SPEED_10) ||
|
||||
(duplex != DUPLEX_HALF &&
|
||||
duplex != DUPLEX_FULL)))
|
||||
return -EINVAL;
|
||||
|
||||
phydev->autoneg = autoneg;
|
||||
|
||||
if (autoneg == AUTONEG_DISABLE) {
|
||||
phydev->speed = speed;
|
||||
phydev->duplex = duplex;
|
||||
}
|
||||
|
||||
linkmode_copy(phydev->advertising, advertising);
|
||||
|
||||
linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
|
||||
phydev->advertising, autoneg == AUTONEG_ENABLE);
|
||||
|
||||
phydev->master_slave_set = cmd->base.master_slave_cfg;
|
||||
phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
|
||||
|
||||
/* Restart the PHY */
|
||||
phy_start_aneg(phydev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(phy_ethtool_ksettings_set);
|
||||
|
||||
void phy_ethtool_ksettings_get(struct phy_device *phydev,
|
||||
struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
mutex_lock(&phydev->lock);
|
||||
linkmode_copy(cmd->link_modes.supported, phydev->supported);
|
||||
linkmode_copy(cmd->link_modes.advertising, phydev->advertising);
|
||||
linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising);
|
||||
@ -317,6 +265,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
|
||||
cmd->base.autoneg = phydev->autoneg;
|
||||
cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
|
||||
cmd->base.eth_tp_mdix = phydev->mdix;
|
||||
mutex_unlock(&phydev->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(phy_ethtool_ksettings_get);
|
||||
|
||||
@ -750,6 +699,37 @@ static int phy_check_link_status(struct phy_device *phydev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* _phy_start_aneg - start auto-negotiation for this PHY device
|
||||
* @phydev: the phy_device struct
|
||||
*
|
||||
* Description: Sanitizes the settings (if we're not autonegotiating
|
||||
* them), and then calls the driver's config_aneg function.
|
||||
* If the PHYCONTROL Layer is operating, we change the state to
|
||||
* reflect the beginning of Auto-negotiation or forcing.
|
||||
*/
|
||||
static int _phy_start_aneg(struct phy_device *phydev)
|
||||
{
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&phydev->lock);
|
||||
|
||||
if (!phydev->drv)
|
||||
return -EIO;
|
||||
|
||||
if (AUTONEG_DISABLE == phydev->autoneg)
|
||||
phy_sanitize_settings(phydev);
|
||||
|
||||
err = phy_config_aneg(phydev);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (phy_is_started(phydev))
|
||||
err = phy_check_link_status(phydev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* phy_start_aneg - start auto-negotiation for this PHY device
|
||||
* @phydev: the phy_device struct
|
||||
@ -763,21 +743,8 @@ int phy_start_aneg(struct phy_device *phydev)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!phydev->drv)
|
||||
return -EIO;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
|
||||
if (AUTONEG_DISABLE == phydev->autoneg)
|
||||
phy_sanitize_settings(phydev);
|
||||
|
||||
err = phy_config_aneg(phydev);
|
||||
if (err < 0)
|
||||
goto out_unlock;
|
||||
|
||||
if (phy_is_started(phydev))
|
||||
err = phy_check_link_status(phydev);
|
||||
out_unlock:
|
||||
err = _phy_start_aneg(phydev);
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
return err;
|
||||
@ -800,6 +767,61 @@ static int phy_poll_aneg_done(struct phy_device *phydev)
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
int phy_ethtool_ksettings_set(struct phy_device *phydev,
|
||||
const struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
|
||||
u8 autoneg = cmd->base.autoneg;
|
||||
u8 duplex = cmd->base.duplex;
|
||||
u32 speed = cmd->base.speed;
|
||||
|
||||
if (cmd->base.phy_address != phydev->mdio.addr)
|
||||
return -EINVAL;
|
||||
|
||||
linkmode_copy(advertising, cmd->link_modes.advertising);
|
||||
|
||||
/* We make sure that we don't pass unsupported values in to the PHY */
|
||||
linkmode_and(advertising, advertising, phydev->supported);
|
||||
|
||||
/* Verify the settings we care about. */
|
||||
if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
|
||||
return -EINVAL;
|
||||
|
||||
if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
|
||||
return -EINVAL;
|
||||
|
||||
if (autoneg == AUTONEG_DISABLE &&
|
||||
((speed != SPEED_1000 &&
|
||||
speed != SPEED_100 &&
|
||||
speed != SPEED_10) ||
|
||||
(duplex != DUPLEX_HALF &&
|
||||
duplex != DUPLEX_FULL)))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
phydev->autoneg = autoneg;
|
||||
|
||||
if (autoneg == AUTONEG_DISABLE) {
|
||||
phydev->speed = speed;
|
||||
phydev->duplex = duplex;
|
||||
}
|
||||
|
||||
linkmode_copy(phydev->advertising, advertising);
|
||||
|
||||
linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
|
||||
phydev->advertising, autoneg == AUTONEG_ENABLE);
|
||||
|
||||
phydev->master_slave_set = cmd->base.master_slave_cfg;
|
||||
phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
|
||||
|
||||
/* Restart the PHY */
|
||||
_phy_start_aneg(phydev);
|
||||
|
||||
mutex_unlock(&phydev->lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(phy_ethtool_ksettings_set);
|
||||
|
||||
/**
|
||||
* phy_speed_down - set speed to lowest speed supported by both link partners
|
||||
* @phydev: the phy_device struct
|
||||
|
@ -117,6 +117,7 @@ config USB_LAN78XX
|
||||
select PHYLIB
|
||||
select MICROCHIP_PHY
|
||||
select FIXED_PHY
|
||||
select CRC32
|
||||
help
|
||||
This option adds support for Microchip LAN78XX based USB 2
|
||||
& USB 3 10/100/1000 Ethernet adapters.
|
||||
|
@ -4179,6 +4179,12 @@ static int lan78xx_probe(struct usb_interface *intf,
|
||||
|
||||
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
|
||||
|
||||
/* Reject broken descriptors. */
|
||||
if (dev->maxpacket == 0) {
|
||||
ret = -ENODEV;
|
||||
goto out4;
|
||||
}
|
||||
|
||||
/* driver requires remote-wakeup capability during autosuspend. */
|
||||
intf->needs_remote_wakeup = 1;
|
||||
|
||||
|
@ -1788,6 +1788,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
||||
if (!dev->rx_urb_size)
|
||||
dev->rx_urb_size = dev->hard_mtu;
|
||||
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
|
||||
if (dev->maxpacket == 0) {
|
||||
/* that is a broken device */
|
||||
status = -ENODEV;
|
||||
goto out4;
|
||||
}
|
||||
|
||||
/* let userspace know we have a random address */
|
||||
if (ether_addr_equal(net->dev_addr, node_id))
|
||||
|
@ -3833,7 +3833,6 @@ vmxnet3_suspend(struct device *device)
|
||||
vmxnet3_free_intr_resources(adapter);
|
||||
|
||||
netif_device_detach(netdev);
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
|
||||
/* Create wake-up filters. */
|
||||
pmConf = adapter->pm_conf;
|
||||
|
@ -1360,8 +1360,6 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
|
||||
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
|
||||
bool is_ndisc = ipv6_ndisc_frame(skb);
|
||||
|
||||
nf_reset_ct(skb);
|
||||
|
||||
/* loopback, multicast & non-ND link-local traffic; do not push through
|
||||
* packet taps again. Reset pkt_type for upper layers to process skb.
|
||||
* For strict packets with a source LLA, determine the dst using the
|
||||
@ -1424,8 +1422,6 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
|
||||
skb->skb_iif = vrf_dev->ifindex;
|
||||
IPCB(skb)->flags |= IPSKB_L3SLAVE;
|
||||
|
||||
nf_reset_ct(skb);
|
||||
|
||||
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
|
||||
goto out;
|
||||
|
||||
|
@ -1730,6 +1730,10 @@ static int netfront_resume(struct xenbus_device *dev)
|
||||
|
||||
dev_dbg(&dev->dev, "%s\n", dev->nodename);
|
||||
|
||||
netif_tx_lock_bh(info->netdev);
|
||||
netif_device_detach(info->netdev);
|
||||
netif_tx_unlock_bh(info->netdev);
|
||||
|
||||
xennet_disconnect_backend(info);
|
||||
return 0;
|
||||
}
|
||||
@ -2349,6 +2353,10 @@ static int xennet_connect(struct net_device *dev)
|
||||
* domain a kick because we've probably just requeued some
|
||||
* packets.
|
||||
*/
|
||||
netif_tx_lock_bh(np->netdev);
|
||||
netif_device_attach(np->netdev);
|
||||
netif_tx_unlock_bh(np->netdev);
|
||||
|
||||
netif_carrier_on(np->netdev);
|
||||
for (j = 0; j < num_queues; ++j) {
|
||||
queue = &np->queues[j];
|
||||
|
@ -926,12 +926,14 @@ static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
|
||||
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
|
||||
{
|
||||
struct nvme_tcp_queue *queue = req->queue;
|
||||
int req_data_len = req->data_len;
|
||||
|
||||
while (true) {
|
||||
struct page *page = nvme_tcp_req_cur_page(req);
|
||||
size_t offset = nvme_tcp_req_cur_offset(req);
|
||||
size_t len = nvme_tcp_req_cur_length(req);
|
||||
bool last = nvme_tcp_pdu_last_send(req, len);
|
||||
int req_data_sent = req->data_sent;
|
||||
int ret, flags = MSG_DONTWAIT;
|
||||
|
||||
if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
|
||||
@ -958,7 +960,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
|
||||
* in the request where we don't want to modify it as we may
|
||||
* compete with the RX path completing the request.
|
||||
*/
|
||||
if (req->data_sent + ret < req->data_len)
|
||||
if (req_data_sent + ret < req_data_len)
|
||||
nvme_tcp_advance_req(req, ret);
|
||||
|
||||
/* fully successful last send in current PDU */
|
||||
@ -1048,10 +1050,11 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
|
||||
static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
|
||||
{
|
||||
struct nvme_tcp_queue *queue = req->queue;
|
||||
size_t offset = req->offset;
|
||||
int ret;
|
||||
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
|
||||
struct kvec iov = {
|
||||
.iov_base = &req->ddgst + req->offset,
|
||||
.iov_base = (u8 *)&req->ddgst + req->offset,
|
||||
.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
|
||||
};
|
||||
|
||||
@ -1064,7 +1067,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
|
||||
if (unlikely(ret <= 0))
|
||||
return ret;
|
||||
|
||||
if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
|
||||
if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
|
||||
nvme_tcp_done_send_req(queue);
|
||||
return 1;
|
||||
}
|
||||
|
@ -702,7 +702,7 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
|
||||
struct nvmet_tcp_queue *queue = cmd->queue;
|
||||
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
|
||||
struct kvec iov = {
|
||||
.iov_base = &cmd->exp_ddgst + cmd->offset,
|
||||
.iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
|
||||
.iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
|
||||
};
|
||||
int ret;
|
||||
@ -1096,7 +1096,7 @@ static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
|
||||
}
|
||||
|
||||
if (queue->hdr_digest &&
|
||||
nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
|
||||
nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
|
||||
nvmet_tcp_fatal_error(queue); /* fatal */
|
||||
return -EPROTO;
|
||||
}
|
||||
@ -1428,6 +1428,7 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
|
||||
|
||||
static void nvmet_tcp_release_queue_work(struct work_struct *w)
|
||||
{
|
||||
struct page *page;
|
||||
struct nvmet_tcp_queue *queue =
|
||||
container_of(w, struct nvmet_tcp_queue, release_work);
|
||||
|
||||
@ -1447,6 +1448,8 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
|
||||
nvmet_tcp_free_crypto(queue);
|
||||
ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
|
||||
|
||||
page = virt_to_head_page(queue->pf_cache.va);
|
||||
__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
|
||||
kfree(queue);
|
||||
}
|
||||
|
||||
|
@ -2510,6 +2510,14 @@ static int bcm2835_codec_create_component(struct bcm2835_codec_ctx *ctx)
|
||||
MMAL_PARAMETER_VIDEO_STOP_ON_PAR_COLOUR_CHANGE,
|
||||
&enable,
|
||||
sizeof(enable));
|
||||
|
||||
enable = (unsigned int)-5;
|
||||
vchiq_mmal_port_parameter_set(dev->instance,
|
||||
&ctx->component->control,
|
||||
MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS,
|
||||
&enable,
|
||||
sizeof(enable));
|
||||
|
||||
} else if (dev->role == DEINTERLACE) {
|
||||
/* Select the default deinterlace algorithm. */
|
||||
int half_framerate = 0;
|
||||
|
@ -1032,7 +1032,9 @@ static int bcm2835_isp_node_try_fmt(struct file *file, void *priv,
|
||||
/* In all cases, we only support the defaults for these: */
|
||||
f->fmt.pix.ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(f->fmt.pix.colorspace);
|
||||
f->fmt.pix.xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(f->fmt.pix.colorspace);
|
||||
is_rgb = f->fmt.pix.colorspace == V4L2_COLORSPACE_SRGB;
|
||||
/* RAW counts as sRGB here so that we get full range. */
|
||||
is_rgb = f->fmt.pix.colorspace == V4L2_COLORSPACE_SRGB ||
|
||||
f->fmt.pix.colorspace == V4L2_COLORSPACE_RAW;
|
||||
f->fmt.pix.quantization = V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb, f->fmt.pix.colorspace,
|
||||
f->fmt.pix.ycbcr_enc);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user