This commit is contained in:
Raziel K. Crowe 2022-09-09 14:10:02 +05:00
parent 5b9ff8c28c
commit 9c4c3c24a5
553 changed files with 245404 additions and 0 deletions

View File

@ -0,0 +1,435 @@
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2020-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include "ef100_rep.h"
#include "ef100_netdev.h"
#include "ef100_nic.h"
#include "mae.h"
#include "rx_common.h"
#define EFX_EF100_REP_DRIVER "efx_ef100_rep"
#define EFX_REP_DEFAULT_PSEUDO_RING_SIZE 64
static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
unsigned int i)
{
efv->parent = efx;
efv->idx = i;
INIT_LIST_HEAD(&efv->list);
efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
INIT_LIST_HEAD(&efv->dflt.acts.list);
INIT_LIST_HEAD(&efv->rx_list);
spin_lock_init(&efv->rx_lock);
efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
NETIF_MSG_TX_ERR | NETIF_MSG_HW;
return 0;
}
static int efx_ef100_rep_open(struct net_device *net_dev)
{
struct efx_rep *efv = netdev_priv(net_dev);
netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll,
NAPI_POLL_WEIGHT);
napi_enable(&efv->napi);
return 0;
}
static int efx_ef100_rep_close(struct net_device *net_dev)
{
struct efx_rep *efv = netdev_priv(net_dev);
napi_disable(&efv->napi);
netif_napi_del(&efv->napi);
return 0;
}
static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct efx_rep *efv = netdev_priv(dev);
struct efx_nic *efx = efv->parent;
netdev_tx_t rc;
/* __ef100_hard_start_xmit() will always return success even in the
* case of TX drops, where it will increment efx's tx_dropped. The
* efv stats really only count attempted TX, not success/failure.
*/
atomic64_inc(&efv->stats.tx_packets);
atomic64_add(skb->len, &efv->stats.tx_bytes);
netif_tx_lock(efx->net_dev);
rc = __ef100_hard_start_xmit(skb, efx, dev, efv);
netif_tx_unlock(efx->net_dev);
return rc;
}
static int efx_ef100_rep_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{
struct efx_rep *efv = netdev_priv(dev);
struct efx_nic *efx = efv->parent;
struct ef100_nic_data *nic_data;
nic_data = efx->nic_data;
/* nic_data->port_id is a u8[] */
ppid->id_len = sizeof(nic_data->port_id);
memcpy(ppid->id, nic_data->port_id, sizeof(nic_data->port_id));
return 0;
}
static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
char *buf, size_t len)
{
struct efx_rep *efv = netdev_priv(dev);
struct efx_nic *efx = efv->parent;
struct ef100_nic_data *nic_data;
int ret;
nic_data = efx->nic_data;
ret = snprintf(buf, len, "p%upf%uvf%u", efx->port_num,
nic_data->pf_index, efv->idx);
if (ret >= len)
return -EOPNOTSUPP;
return 0;
}
static void efx_ef100_rep_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct efx_rep *efv = netdev_priv(dev);
stats->rx_packets = atomic64_read(&efv->stats.rx_packets);
stats->tx_packets = atomic64_read(&efv->stats.tx_packets);
stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes);
stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes);
stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped);
stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
}
static const struct net_device_ops efx_ef100_rep_netdev_ops = {
.ndo_open = efx_ef100_rep_open,
.ndo_stop = efx_ef100_rep_close,
.ndo_start_xmit = efx_ef100_rep_xmit,
.ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
.ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
.ndo_get_stats64 = efx_ef100_rep_get_stats64,
};
static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, EFX_EF100_REP_DRIVER, sizeof(drvinfo->driver));
}
static u32 efx_ef100_rep_ethtool_get_msglevel(struct net_device *net_dev)
{
struct efx_rep *efv = netdev_priv(net_dev);
return efv->msg_enable;
}
static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev,
u32 msg_enable)
{
struct efx_rep *efv = netdev_priv(net_dev);
efv->msg_enable = msg_enable;
}
static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kring,
struct netlink_ext_ack *ext_ack)
{
struct efx_rep *efv = netdev_priv(net_dev);
ring->rx_max_pending = U32_MAX;
ring->rx_pending = efv->rx_pring_size;
}
static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kring,
struct netlink_ext_ack *ext_ack)
{
struct efx_rep *efv = netdev_priv(net_dev);
if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending)
return -EINVAL;
efv->rx_pring_size = ring->rx_pending;
return 0;
}
static const struct ethtool_ops efx_ef100_rep_ethtool_ops = {
.get_drvinfo = efx_ef100_rep_get_drvinfo,
.get_msglevel = efx_ef100_rep_ethtool_get_msglevel,
.set_msglevel = efx_ef100_rep_ethtool_set_msglevel,
.get_ringparam = efx_ef100_rep_ethtool_get_ringparam,
.set_ringparam = efx_ef100_rep_ethtool_set_ringparam,
};
static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
unsigned int i)
{
struct net_device *net_dev;
struct efx_rep *efv;
int rc;
net_dev = alloc_etherdev_mq(sizeof(*efv), 1);
if (!net_dev)
return ERR_PTR(-ENOMEM);
efv = netdev_priv(net_dev);
rc = efx_ef100_rep_init_struct(efx, efv, i);
if (rc)
goto fail1;
efv->net_dev = net_dev;
rtnl_lock();
spin_lock_bh(&efx->vf_reps_lock);
list_add_tail(&efv->list, &efx->vf_reps);
spin_unlock_bh(&efx->vf_reps_lock);
if (netif_running(efx->net_dev) && efx->state == STATE_NET_UP) {
netif_device_attach(net_dev);
netif_carrier_on(net_dev);
} else {
netif_carrier_off(net_dev);
netif_tx_stop_all_queues(net_dev);
}
rtnl_unlock();
net_dev->netdev_ops = &efx_ef100_rep_netdev_ops;
net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops;
net_dev->min_mtu = EFX_MIN_MTU;
net_dev->max_mtu = EFX_MAX_MTU;
net_dev->features |= NETIF_F_LLTX;
net_dev->hw_features |= NETIF_F_LLTX;
return efv;
fail1:
free_netdev(net_dev);
return ERR_PTR(rc);
}
static int efx_ef100_configure_rep(struct efx_rep *efv)
{
struct efx_nic *efx = efv->parent;
u32 selector;
int rc;
efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
/* Construct mport selector for corresponding VF */
efx_mae_mport_vf(efx, efv->idx, &selector);
/* Look up actual mport ID */
rc = efx_mae_lookup_mport(efx, selector, &efv->mport);
if (rc)
return rc;
pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport);
/* mport label should fit in 16 bits */
WARN_ON(efv->mport >> 16);
return efx_tc_configure_default_rule_rep(efv);
}
static void efx_ef100_deconfigure_rep(struct efx_rep *efv)
{
struct efx_nic *efx = efv->parent;
efx_tc_deconfigure_default_rule(efx, &efv->dflt);
}
static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
{
struct efx_nic *efx = efv->parent;
rtnl_lock();
spin_lock_bh(&efx->vf_reps_lock);
list_del(&efv->list);
spin_unlock_bh(&efx->vf_reps_lock);
rtnl_unlock();
synchronize_rcu();
free_netdev(efv->net_dev);
}
int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
{
struct efx_rep *efv;
int rc;
efv = efx_ef100_rep_create_netdev(efx, i);
if (IS_ERR(efv)) {
rc = PTR_ERR(efv);
pci_err(efx->pci_dev,
"Failed to create representor for VF %d, rc %d\n", i,
rc);
return rc;
}
rc = efx_ef100_configure_rep(efv);
if (rc) {
pci_err(efx->pci_dev,
"Failed to configure representor for VF %d, rc %d\n",
i, rc);
goto fail1;
}
rc = register_netdev(efv->net_dev);
if (rc) {
pci_err(efx->pci_dev,
"Failed to register representor for VF %d, rc %d\n",
i, rc);
goto fail2;
}
pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i,
efv->net_dev->name);
return 0;
fail2:
efx_ef100_deconfigure_rep(efv);
fail1:
efx_ef100_rep_destroy_netdev(efv);
return rc;
}
void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
{
struct net_device *rep_dev;
rep_dev = efv->net_dev;
if (!rep_dev)
return;
netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
unregister_netdev(rep_dev);
efx_ef100_deconfigure_rep(efv);
efx_ef100_rep_destroy_netdev(efv);
}
void efx_ef100_fini_vfreps(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
struct efx_rep *efv, *next;
if (!nic_data->grp_mae)
return;
list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
efx_ef100_vfrep_destroy(efx, efv);
}
static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
{
struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
unsigned int read_index;
struct list_head head;
struct sk_buff *skb;
bool need_resched;
int spent = 0;
INIT_LIST_HEAD(&head);
/* Grab up to 'weight' pending SKBs */
spin_lock_bh(&efv->rx_lock);
read_index = efv->write_index;
while (spent < weight && !list_empty(&efv->rx_list)) {
skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
list_del(&skb->list);
list_add_tail(&skb->list, &head);
spent++;
}
spin_unlock_bh(&efv->rx_lock);
/* Receive them */
netif_receive_skb_list(&head);
if (spent < weight)
if (napi_complete_done(napi, spent)) {
spin_lock_bh(&efv->rx_lock);
efv->read_index = read_index;
/* If write_index advanced while we were doing the
* RX, then storing our read_index won't re-prime the
* fake-interrupt. In that case, we need to schedule
* NAPI again to consume the additional packet(s).
*/
need_resched = efv->write_index != read_index;
spin_unlock_bh(&efv->rx_lock);
if (need_resched)
napi_schedule(&efv->napi);
}
return spent;
}
void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf)
{
u8 *eh = efx_rx_buf_va(rx_buf);
struct sk_buff *skb;
bool primed;
/* Don't allow too many queued SKBs to build up, as they consume
* GFP_ATOMIC memory. If we overrun, just start dropping.
*/
if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
atomic64_inc(&efv->stats.rx_dropped);
if (net_ratelimit())
netif_dbg(efv->parent, rx_err, efv->net_dev,
"nodesc-dropped packet of length %u\n",
rx_buf->len);
return;
}
skb = netdev_alloc_skb(efv->net_dev, rx_buf->len);
if (!skb) {
atomic64_inc(&efv->stats.rx_dropped);
if (net_ratelimit())
netif_dbg(efv->parent, rx_err, efv->net_dev,
"noskb-dropped packet of length %u\n",
rx_buf->len);
return;
}
memcpy(skb->data, eh, rx_buf->len);
__skb_put(skb, rx_buf->len);
skb_record_rx_queue(skb, 0); /* rep is single-queue */
/* Move past the ethernet header */
skb->protocol = eth_type_trans(skb, efv->net_dev);
skb_checksum_none_assert(skb);
atomic64_inc(&efv->stats.rx_packets);
atomic64_add(rx_buf->len, &efv->stats.rx_bytes);
/* Add it to the rx list */
spin_lock_bh(&efv->rx_lock);
primed = efv->read_index == efv->write_index;
list_add_tail(&skb->list, &efv->rx_list);
efv->write_index++;
spin_unlock_bh(&efv->rx_lock);
/* Trigger rx work */
if (primed)
napi_schedule(&efv->napi);
}
struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport)
{
struct efx_rep *efv, *out = NULL;
/* spinlock guards against list mutation while we're walking it;
* but caller must also hold rcu_read_lock() to ensure the netdev
* isn't freed after we drop the spinlock.
*/
spin_lock_bh(&efx->vf_reps_lock);
list_for_each_entry(efv, &efx->vf_reps, list)
if (efv->mport == mport) {
out = efv;
break;
}
spin_unlock_bh(&efx->vf_reps_lock);
return out;
}

View File

@ -0,0 +1,69 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2020-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
/* Handling for ef100 representor netdevs */
#ifndef EF100_REP_H
#define EF100_REP_H
#include "net_driver.h"
#include "tc.h"
struct efx_rep_sw_stats {
atomic64_t rx_packets, tx_packets;
atomic64_t rx_bytes, tx_bytes;
atomic64_t rx_dropped, tx_errors;
};
/**
* struct efx_rep - Private data for an Efx representor
*
* @parent: the efx PF which manages this representor
* @net_dev: representor netdevice
* @msg_enable: log message enable flags
* @mport: m-port ID of corresponding VF
* @idx: VF index
* @write_index: number of packets enqueued to @rx_list
* @read_index: number of packets consumed from @rx_list
* @rx_pring_size: max length of RX list
* @dflt: default-rule for MAE switching
* @list: entry on efx->vf_reps
* @rx_list: list of SKBs queued for receive in NAPI poll
* @rx_lock: protects @rx_list
* @napi: NAPI control structure
* @stats: software traffic counters for netdev stats
*/
struct efx_rep {
struct efx_nic *parent;
struct net_device *net_dev;
u32 msg_enable;
u32 mport;
unsigned int idx;
unsigned int write_index, read_index;
unsigned int rx_pring_size;
struct efx_tc_flow_rule dflt;
struct list_head list;
struct list_head rx_list;
spinlock_t rx_lock;
struct napi_struct napi;
struct efx_rep_sw_stats stats;
};
int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i);
void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv);
void efx_ef100_fini_vfreps(struct efx_nic *efx);
void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf);
/* Returns the representor corresponding to a VF m-port, or NULL
* @mport is an m-port label, *not* an m-port ID!
* Caller must hold rcu_read_lock().
*/
struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport);
#endif /* EF100_REP_H */

View File

@ -0,0 +1,72 @@
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2020-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include "ef100_sriov.h"
#include "ef100_nic.h"
#include "ef100_rep.h"
static int efx_ef100_pci_sriov_enable(struct efx_nic *efx, int num_vfs)
{
struct ef100_nic_data *nic_data = efx->nic_data;
struct pci_dev *dev = efx->pci_dev;
struct efx_rep *efv, *next;
int rc, i;
efx->vf_count = num_vfs;
rc = pci_enable_sriov(dev, num_vfs);
if (rc)
goto fail1;
if (!nic_data->grp_mae)
return 0;
for (i = 0; i < num_vfs; i++) {
rc = efx_ef100_vfrep_create(efx, i);
if (rc)
goto fail2;
}
return 0;
fail2:
list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
efx_ef100_vfrep_destroy(efx, efv);
pci_disable_sriov(dev);
fail1:
netif_err(efx, probe, efx->net_dev, "Failed to enable SRIOV VFs\n");
efx->vf_count = 0;
return rc;
}
int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force)
{
struct pci_dev *dev = efx->pci_dev;
unsigned int vfs_assigned;
vfs_assigned = pci_vfs_assigned(dev);
if (vfs_assigned && !force) {
netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
"please detach them before disabling SR-IOV\n");
return -EBUSY;
}
efx_ef100_fini_vfreps(efx);
if (!vfs_assigned)
pci_disable_sriov(dev);
return 0;
}
int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs)
{
if (num_vfs == 0)
return efx_ef100_pci_sriov_disable(efx, false);
else
return efx_ef100_pci_sriov_enable(efx, num_vfs);
}

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2020-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include "net_driver.h"
int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs);
int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force);

View File

@ -0,0 +1,346 @@
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2020-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include "mae.h"
#include "mcdi.h"
#include "mcdi_pcol_mae.h"
int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LEN);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_LEN);
size_t outlen;
int rc;
if (WARN_ON_ONCE(!id))
return -EINVAL;
if (WARN_ON_ONCE(!label))
return -EINVAL;
MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_TYPE,
MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_ALIAS);
MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT,
MAE_MPORT_SELECTOR_ASSIGNED);
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_ALLOC, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < sizeof(outbuf))
return -EIO;
*id = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID);
*label = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_LABEL);
return 0;
}
int efx_mae_free_mport(struct efx_nic *efx, u32 id)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_FREE_IN_LEN);
BUILD_BUG_ON(MC_CMD_MAE_MPORT_FREE_OUT_LEN);
MCDI_SET_DWORD(inbuf, MAE_MPORT_FREE_IN_MPORT_ID, id);
return efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_FREE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
void efx_mae_mport_wire(struct efx_nic *efx, u32 *out)
{
efx_dword_t mport;
EFX_POPULATE_DWORD_2(mport,
MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_PPORT,
MAE_MPORT_SELECTOR_PPORT_ID, efx->port_num);
*out = EFX_DWORD_VAL(mport);
}
void efx_mae_mport_uplink(struct efx_nic *efx __always_unused, u32 *out)
{
efx_dword_t mport;
EFX_POPULATE_DWORD_3(mport,
MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
MAE_MPORT_SELECTOR_FUNC_VF_ID, MAE_MPORT_SELECTOR_FUNC_VF_ID_NULL);
*out = EFX_DWORD_VAL(mport);
}
void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out)
{
efx_dword_t mport;
EFX_POPULATE_DWORD_3(mport,
MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
MAE_MPORT_SELECTOR_FUNC_VF_ID, vf_id);
*out = EFX_DWORD_VAL(mport);
}
/* Constructs an mport selector from an mport ID, because they're not the same */
void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32 *out)
{
efx_dword_t mport;
EFX_POPULATE_DWORD_2(mport,
MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_MPORT_ID,
MAE_MPORT_SELECTOR_MPORT_ID, mport_id);
*out = EFX_DWORD_VAL(mport);
}
/* id is really only 24 bits wide */
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_LOOKUP_OUT_LEN);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_LOOKUP_IN_LEN);
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR, selector);
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_LOOKUP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < sizeof(outbuf))
return -EIO;
*id = MCDI_DWORD(outbuf, MAE_MPORT_LOOKUP_OUT_MPORT_ID);
return 0;
}
static bool efx_mae_asl_id(u32 id)
{
return !!(id & BIT(31));
}
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN);
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID,
MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL);
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID,
MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL);
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID,
MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL);
if (act->deliver)
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DELIVER,
act->dest_mport);
BUILD_BUG_ON(MAE_MPORT_SELECTOR_NULL);
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_ALLOC, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < sizeof(outbuf))
return -EIO;
act->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_ALLOC_OUT_AS_ID);
/* We rely on the high bit of AS IDs always being clear.
* The firmware API guarantees this, but let's check it ourselves.
*/
if (WARN_ON_ONCE(efx_mae_asl_id(act->fw_id))) {
efx_mae_free_action_set(efx, act->fw_id);
return -EIO;
}
return 0;
}
int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_FREE_OUT_LEN(1));
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_FREE_IN_LEN(1));
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_FREE_IN_AS_ID, fw_id);
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_FREE, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < sizeof(outbuf))
return -EIO;
/* FW freed a different ID than we asked for, should never happen.
* Warn because it means we've now got a different idea to the FW of
* what action-sets exist, which could cause mayhem later.
*/
if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_FREE_OUT_FREED_AS_ID) != fw_id))
return -EIO;
return 0;
}
int efx_mae_alloc_action_set_list(struct efx_nic *efx,
struct efx_tc_action_set_list *acts)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN);
struct efx_tc_action_set *act;
size_t inlen, outlen, i = 0;
efx_dword_t *inbuf;
int rc;
list_for_each_entry(act, &acts->list, list)
i++;
if (i == 0)
return -EINVAL;
if (i == 1) {
/* Don't wrap an ASL around a single AS, just use the AS_ID
* directly. ASLs are a more limited resource.
*/
act = list_first_entry(&acts->list, struct efx_tc_action_set, list);
acts->fw_id = act->fw_id;
return 0;
}
if (i > MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2)
return -EOPNOTSUPP; /* Too many actions */
inlen = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(i);
inbuf = kzalloc(inlen, GFP_KERNEL);
if (!inbuf)
return -ENOMEM;
i = 0;
list_for_each_entry(act, &acts->list, list) {
MCDI_SET_ARRAY_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS,
i, act->fw_id);
i++;
}
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_COUNT, i);
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_ALLOC, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto out_free;
if (outlen < sizeof(outbuf)) {
rc = -EIO;
goto out_free;
}
acts->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID);
/* We rely on the high bit of ASL IDs always being set.
* The firmware API guarantees this, but let's check it ourselves.
*/
if (WARN_ON_ONCE(!efx_mae_asl_id(acts->fw_id))) {
efx_mae_free_action_set_list(efx, acts);
rc = -EIO;
}
out_free:
kfree(inbuf);
return rc;
}
int efx_mae_free_action_set_list(struct efx_nic *efx,
struct efx_tc_action_set_list *acts)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1));
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1));
size_t outlen;
int rc;
/* If this is just an AS_ID with no ASL wrapper, then there is
* nothing for us to free. (The AS will be freed later.)
*/
if (efx_mae_asl_id(acts->fw_id)) {
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_FREE_IN_ASL_ID,
acts->fw_id);
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_FREE, inbuf,
sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < sizeof(outbuf))
return -EIO;
/* FW freed a different ID than we asked for, should never happen.
* Warn because it means we've now got a different idea to the FW of
* what action-set-lists exist, which could cause mayhem later.
*/
if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID) != acts->fw_id))
return -EIO;
}
/* We're probably about to free @acts, but let's just make sure its
* fw_id is blatted so that it won't look valid if it leaks out.
*/
acts->fw_id = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL;
return 0;
}
static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
const struct efx_tc_match *match)
{
if (match->mask.ingress_port) {
if (~match->mask.ingress_port)
return -EOPNOTSUPP;
MCDI_STRUCT_SET_DWORD(match_crit,
MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR,
match->value.ingress_port);
}
MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK,
match->mask.ingress_port);
return 0;
}
int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
u32 prio, u32 acts_id, u32 *id)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN));
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN);
MCDI_DECLARE_STRUCT_PTR(match_crit);
MCDI_DECLARE_STRUCT_PTR(response);
size_t outlen;
int rc;
if (!id)
return -EINVAL;
match_crit = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA);
response = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_RESPONSE);
if (efx_mae_asl_id(acts_id)) {
MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, acts_id);
MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID,
MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL);
} else {
/* We only had one AS, so we didn't wrap it in an ASL */
MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID,
MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, acts_id);
}
MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_PRIO, prio);
rc = efx_mae_populate_match_criteria(match_crit, match);
if (rc)
return rc;
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_INSERT, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < sizeof(outbuf))
return -EIO;
*id = MCDI_DWORD(outbuf, MAE_ACTION_RULE_INSERT_OUT_AR_ID);
return 0;
}
int efx_mae_delete_rule(struct efx_nic *efx, u32 id)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LEN(1));
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_DELETE_IN_LEN(1));
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_DELETE_IN_AR_ID, id);
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_DELETE, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < sizeof(outbuf))
return -EIO;
/* FW freed a different ID than we asked for, should also never happen.
* Warn because it means we've now got a different idea to the FW of
* what rules exist, which could cause mayhem later.
*/
if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID) != id))
return -EIO;
return 0;
}

View File

@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2020-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF100_MAE_H
#define EF100_MAE_H
/* MCDI interface for the ef100 Match-Action Engine */
#include "net_driver.h"
#include "tc.h"
#include "mcdi_pcol.h" /* needed for various MC_CMD_MAE_*_NULL defines */
int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label);
int efx_mae_free_mport(struct efx_nic *efx, u32 id);
void efx_mae_mport_wire(struct efx_nic *efx, u32 *out);
void efx_mae_mport_uplink(struct efx_nic *efx, u32 *out);
void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out);
void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act);
int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id);
int efx_mae_alloc_action_set_list(struct efx_nic *efx,
struct efx_tc_action_set_list *acts);
int efx_mae_free_action_set_list(struct efx_nic *efx,
struct efx_tc_action_set_list *acts);
int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
u32 prio, u32 acts_id, u32 *id);
int efx_mae_delete_rule(struct efx_nic *efx, u32 id);
#endif /* EF100_MAE_H */

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2019-2022 Xilinx, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef MCDI_PCOL_MAE_H
#define MCDI_PCOL_MAE_H
/* MCDI definitions for Match-Action Engine functionality, that are
* missing from the main mcdi_pcol.h
*/
/* MC_CMD_MAE_COUNTER_LIST_ALLOC is not (yet) a released API, but the
* following value is needed as an argument to MC_CMD_MAE_ACTION_SET_ALLOC.
*/
/* enum: A counter ID that is guaranteed never to represent a real counter */
#define MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL 0xffffffff
#endif /* MCDI_PCOL_MAE_H */

View File

@ -0,0 +1,46 @@
# SPDX-License-Identifier: GPL-2.0-only
config SFC_SIENA
tristate "Solarflare SFC9000 support"
depends on PCI
depends on PTP_1588_CLOCK
select MDIO
select CRC32
help
This driver supports 10-gigabit Ethernet cards based on
the Solarflare SFC9000 controller.
To compile this driver as a module, choose M here. The module
will be called sfc-siena.
config SFC_SIENA_MTD
bool "Solarflare SFC9000-family MTD support"
depends on SFC_SIENA && MTD && !(SFC_SIENA=y && MTD=m)
default y
help
This exposes the on-board flash and/or EEPROM as MTD devices
(e.g. /dev/mtd1). This is required to update the firmware or
the boot configuration under Linux.
config SFC_SIENA_MCDI_MON
bool "Solarflare SFC9000-family hwmon support"
depends on SFC_SIENA && HWMON && !(SFC_SIENA=y && HWMON=m)
default y
help
This exposes the on-board firmware-managed sensors as a
hardware monitor device.
config SFC_SIENA_SRIOV
bool "Solarflare SFC9000-family SR-IOV support"
depends on SFC_SIENA && PCI_IOV
default n
help
This enables support for the Single Root I/O Virtualization
features, allowing accelerated network performance in
virtualized environments.
config SFC_SIENA_MCDI_LOGGING
bool "Solarflare SFC9000-family MCDI logging support"
depends on SFC_SIENA
default y
help
This enables support for tracing of MCDI (Management-Controller-to-
Driver-Interface) commands and responses, allowing debugging of
driver/firmware interaction. The tracing is actually enabled by
a sysfs file 'mcdi_logging' under the PCI device, or via module
parameter mcdi_logging_default.

View File

@ -0,0 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
sfc-siena-y += farch.o siena.o \
efx.o efx_common.o efx_channels.o nic.o \
tx.o tx_common.o rx.o rx_common.o \
selftest.o ethtool.o ethtool_common.o ptp.o \
mcdi.o mcdi_port.o mcdi_port_common.o \
mcdi_mon.o
sfc-siena-$(CONFIG_SFC_SIENA_MTD) += mtd.o
sfc-siena-$(CONFIG_SFC_SIENA_SRIOV) += siena_sriov.o
obj-$(CONFIG_SFC_SIENA) += sfc-siena.o

View File

@ -0,0 +1,614 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*/
#ifndef EFX_BITFIELD_H
#define EFX_BITFIELD_H
/*
* Efx bitfield access
*
* Efx NICs make extensive use of bitfields up to 128 bits
* wide. Since there is no native 128-bit datatype on most systems,
* and since 64-bit datatypes are inefficient on 32-bit systems and
* vice versa, we wrap accesses in a way that uses the most efficient
* datatype.
*
* The NICs are PCI devices and therefore little-endian. Since most
* of the quantities that we deal with are DMAed to/from host memory,
* we define our datatypes (efx_oword_t, efx_qword_t and
* efx_dword_t) to be little-endian.
*/
/* Lowest bit numbers and widths */
#define EFX_DUMMY_FIELD_LBN 0
#define EFX_DUMMY_FIELD_WIDTH 0
#define EFX_WORD_0_LBN 0
#define EFX_WORD_0_WIDTH 16
#define EFX_WORD_1_LBN 16
#define EFX_WORD_1_WIDTH 16
#define EFX_DWORD_0_LBN 0
#define EFX_DWORD_0_WIDTH 32
#define EFX_DWORD_1_LBN 32
#define EFX_DWORD_1_WIDTH 32
#define EFX_DWORD_2_LBN 64
#define EFX_DWORD_2_WIDTH 32
#define EFX_DWORD_3_LBN 96
#define EFX_DWORD_3_WIDTH 32
#define EFX_QWORD_0_LBN 0
#define EFX_QWORD_0_WIDTH 64
/* Specified attribute (e.g. LBN) of the specified field */
#define EFX_VAL(field, attribute) field ## _ ## attribute
/* Low bit number of the specified field */
#define EFX_LOW_BIT(field) EFX_VAL(field, LBN)
/* Bit width of the specified field */
#define EFX_WIDTH(field) EFX_VAL(field, WIDTH)
/* High bit number of the specified field */
#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1)
/* Mask equal in width to the specified field.
*
* For example, a field with width 5 would have a mask of 0x1f.
*
* The maximum width mask that can be generated is 64 bits.
*/
#define EFX_MASK64(width) \
((width) == 64 ? ~((u64) 0) : \
(((((u64) 1) << (width))) - 1))
/* Mask equal in width to the specified field.
*
* For example, a field with width 5 would have a mask of 0x1f.
*
* The maximum width mask that can be generated is 32 bits. Use
* EFX_MASK64 for higher width fields.
*/
#define EFX_MASK32(width) \
((width) == 32 ? ~((u32) 0) : \
(((((u32) 1) << (width))) - 1))
/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
typedef union efx_dword {
__le32 u32[1];
} efx_dword_t;
/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
typedef union efx_qword {
__le64 u64[1];
__le32 u32[2];
efx_dword_t dword[2];
} efx_qword_t;
/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
typedef union efx_oword {
__le64 u64[2];
efx_qword_t qword[2];
__le32 u32[4];
efx_dword_t dword[4];
} efx_oword_t;
/* Format string and value expanders for printk */
#define EFX_DWORD_FMT "%08x"
#define EFX_QWORD_FMT "%08x:%08x"
#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x"
#define EFX_DWORD_VAL(dword) \
((unsigned int) le32_to_cpu((dword).u32[0]))
#define EFX_QWORD_VAL(qword) \
((unsigned int) le32_to_cpu((qword).u32[1])), \
((unsigned int) le32_to_cpu((qword).u32[0]))
#define EFX_OWORD_VAL(oword) \
((unsigned int) le32_to_cpu((oword).u32[3])), \
((unsigned int) le32_to_cpu((oword).u32[2])), \
((unsigned int) le32_to_cpu((oword).u32[1])), \
((unsigned int) le32_to_cpu((oword).u32[0]))
/*
* Extract bit field portion [low,high) from the native-endian element
* which contains bits [min,max).
*
* For example, suppose "element" represents the high 32 bits of a
* 64-bit value, and we wish to extract the bits belonging to the bit
* field occupying bits 28-45 of this 64-bit value.
*
* Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give
*
* ( element ) << 4
*
* The result will contain the relevant bits filled in in the range
* [0,high-low), with garbage in bits [high-low+1,...).
*/
#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
((low) > (max) || (high) < (min) ? 0 : \
(low) > (min) ? \
(native_element) >> ((low) - (min)) : \
(native_element) << ((min) - (low)))
/*
* Extract bit field portion [low,high) from the 64-bit little-endian
* element which contains bits [min,max)
*/
#define EFX_EXTRACT64(element, min, max, low, high) \
EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
/*
* Extract bit field portion [low,high) from the 32-bit little-endian
* element which contains bits [min,max)
*/
#define EFX_EXTRACT32(element, min, max, low, high) \
EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
#define EFX_EXTRACT_OWORD64(oword, low, high) \
((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
EFX_MASK64((high) + 1 - (low)))
#define EFX_EXTRACT_QWORD64(qword, low, high) \
(EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
EFX_MASK64((high) + 1 - (low)))
#define EFX_EXTRACT_OWORD32(oword, low, high) \
((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
EFX_MASK32((high) + 1 - (low)))
#define EFX_EXTRACT_QWORD32(qword, low, high) \
((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
EFX_MASK32((high) + 1 - (low)))
#define EFX_EXTRACT_DWORD(dword, low, high) \
(EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
EFX_MASK32((high) + 1 - (low)))
#define EFX_OWORD_FIELD64(oword, field) \
EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field))
#define EFX_QWORD_FIELD64(qword, field) \
EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field))
#define EFX_OWORD_FIELD32(oword, field) \
EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field))
#define EFX_QWORD_FIELD32(qword, field) \
EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field))
#define EFX_DWORD_FIELD(dword, field) \
EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field))
#define EFX_OWORD_IS_ZERO64(oword) \
(((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
#define EFX_QWORD_IS_ZERO64(qword) \
(((qword).u64[0]) == (__force __le64) 0)
#define EFX_OWORD_IS_ZERO32(oword) \
(((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
== (__force __le32) 0)
#define EFX_QWORD_IS_ZERO32(qword) \
(((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
#define EFX_DWORD_IS_ZERO(dword) \
(((dword).u32[0]) == (__force __le32) 0)
#define EFX_OWORD_IS_ALL_ONES64(oword) \
(((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
#define EFX_QWORD_IS_ALL_ONES64(qword) \
((qword).u64[0] == ~((__force __le64) 0))
#define EFX_OWORD_IS_ALL_ONES32(oword) \
(((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
== ~((__force __le32) 0))
#define EFX_QWORD_IS_ALL_ONES32(qword) \
(((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
#define EFX_DWORD_IS_ALL_ONES(dword) \
((dword).u32[0] == ~((__force __le32) 0))
#if BITS_PER_LONG == 64
#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64
#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64
#else
#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32
#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32
#endif
/*
* Construct bit field portion
*
* Creates the portion of the bit field [low,high) that lies within
* the range [min,max).
*/
#define EFX_INSERT_NATIVE64(min, max, low, high, value) \
(((low > max) || (high < min)) ? 0 : \
((low > min) ? \
(((u64) (value)) << (low - min)) : \
(((u64) (value)) >> (min - low))))
#define EFX_INSERT_NATIVE32(min, max, low, high, value) \
(((low > max) || (high < min)) ? 0 : \
((low > min) ? \
(((u32) (value)) << (low - min)) : \
(((u32) (value)) >> (min - low))))
#define EFX_INSERT_NATIVE(min, max, low, high, value) \
((((max - min) >= 32) || ((high - low) >= 32)) ? \
EFX_INSERT_NATIVE64(min, max, low, high, value) : \
EFX_INSERT_NATIVE32(min, max, low, high, value))
/*
* Construct bit field portion
*
* Creates the portion of the named bit field that lies within the
* range [min,max).
*/
#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \
EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field), value)
/*
* Construct bit field
*
* Creates the portion of the named bit fields that lie within the
* range [min,max).
*/
#define EFX_INSERT_FIELDS_NATIVE(min, max, \
field1, value1, \
field2, value2, \
field3, value3, \
field4, value4, \
field5, value5, \
field6, value6, \
field7, value7, \
field8, value8, \
field9, value9, \
field10, value10, \
field11, value11, \
field12, value12, \
field13, value13, \
field14, value14, \
field15, value15, \
field16, value16, \
field17, value17, \
field18, value18, \
field19, value19) \
(EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field11, (value11)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field12, (value12)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field13, (value13)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field14, (value14)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field15, (value15)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field16, (value16)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field17, (value17)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field18, (value18)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field19, (value19)))
#define EFX_INSERT_FIELDS64(...) \
cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
#define EFX_INSERT_FIELDS32(...) \
cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
#define EFX_POPULATE_OWORD64(oword, ...) do { \
(oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
(oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \
} while (0)
#define EFX_POPULATE_QWORD64(qword, ...) do { \
(qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
} while (0)
#define EFX_POPULATE_OWORD32(oword, ...) do { \
(oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
(oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
(oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \
(oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \
} while (0)
#define EFX_POPULATE_QWORD32(qword, ...) do { \
(qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
(qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
} while (0)
#define EFX_POPULATE_DWORD(dword, ...) do { \
(dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
} while (0)
#if BITS_PER_LONG == 64
#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
#else
#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
#endif
/* Populate an octword field with various numbers of arguments */
#define EFX_POPULATE_OWORD_19 EFX_POPULATE_OWORD
#define EFX_POPULATE_OWORD_18(oword, ...) \
EFX_POPULATE_OWORD_19(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_17(oword, ...) \
EFX_POPULATE_OWORD_18(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_16(oword, ...) \
EFX_POPULATE_OWORD_17(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_15(oword, ...) \
EFX_POPULATE_OWORD_16(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_14(oword, ...) \
EFX_POPULATE_OWORD_15(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_13(oword, ...) \
EFX_POPULATE_OWORD_14(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_12(oword, ...) \
EFX_POPULATE_OWORD_13(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_11(oword, ...) \
EFX_POPULATE_OWORD_12(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_10(oword, ...) \
EFX_POPULATE_OWORD_11(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_9(oword, ...) \
EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_8(oword, ...) \
EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_7(oword, ...) \
EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_6(oword, ...) \
EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_5(oword, ...) \
EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_4(oword, ...) \
EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_3(oword, ...) \
EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_2(oword, ...) \
EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_1(oword, ...) \
EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_ZERO_OWORD(oword) \
EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0)
#define EFX_SET_OWORD(oword) \
EFX_POPULATE_OWORD_4(oword, \
EFX_DWORD_0, 0xffffffff, \
EFX_DWORD_1, 0xffffffff, \
EFX_DWORD_2, 0xffffffff, \
EFX_DWORD_3, 0xffffffff)
/* Populate a quadword field with various numbers of arguments */
#define EFX_POPULATE_QWORD_19 EFX_POPULATE_QWORD
#define EFX_POPULATE_QWORD_18(qword, ...) \
EFX_POPULATE_QWORD_19(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_17(qword, ...) \
EFX_POPULATE_QWORD_18(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_16(qword, ...) \
EFX_POPULATE_QWORD_17(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_15(qword, ...) \
EFX_POPULATE_QWORD_16(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_14(qword, ...) \
EFX_POPULATE_QWORD_15(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_13(qword, ...) \
EFX_POPULATE_QWORD_14(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_12(qword, ...) \
EFX_POPULATE_QWORD_13(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_11(qword, ...) \
EFX_POPULATE_QWORD_12(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_10(qword, ...) \
EFX_POPULATE_QWORD_11(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_9(qword, ...) \
EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_8(qword, ...) \
EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_7(qword, ...) \
EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_6(qword, ...) \
EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_5(qword, ...) \
EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_4(qword, ...) \
EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_3(qword, ...) \
EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_2(qword, ...) \
EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_1(qword, ...) \
EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_ZERO_QWORD(qword) \
EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0)
#define EFX_SET_QWORD(qword) \
EFX_POPULATE_QWORD_2(qword, \
EFX_DWORD_0, 0xffffffff, \
EFX_DWORD_1, 0xffffffff)
/* Populate a dword field with various numbers of arguments */
#define EFX_POPULATE_DWORD_19 EFX_POPULATE_DWORD
#define EFX_POPULATE_DWORD_18(dword, ...) \
EFX_POPULATE_DWORD_19(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_17(dword, ...) \
EFX_POPULATE_DWORD_18(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_16(dword, ...) \
EFX_POPULATE_DWORD_17(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_15(dword, ...) \
EFX_POPULATE_DWORD_16(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_14(dword, ...) \
EFX_POPULATE_DWORD_15(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_13(dword, ...) \
EFX_POPULATE_DWORD_14(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_12(dword, ...) \
EFX_POPULATE_DWORD_13(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_11(dword, ...) \
EFX_POPULATE_DWORD_12(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_10(dword, ...) \
EFX_POPULATE_DWORD_11(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_9(dword, ...) \
EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_8(dword, ...) \
EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_7(dword, ...) \
EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_6(dword, ...) \
EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_5(dword, ...) \
EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_4(dword, ...) \
EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_3(dword, ...) \
EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_2(dword, ...) \
EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_1(dword, ...) \
EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_ZERO_DWORD(dword) \
EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0)
#define EFX_SET_DWORD(dword) \
EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff)
/*
* Modify a named field within an already-populated structure. Used
* for read-modify-write operations.
*
*/
#define EFX_INVERT_OWORD(oword) do { \
(oword).u64[0] = ~((oword).u64[0]); \
(oword).u64[1] = ~((oword).u64[1]); \
} while (0)
#define EFX_AND_OWORD(oword, from, mask) \
do { \
(oword).u64[0] = (from).u64[0] & (mask).u64[0]; \
(oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
} while (0)
#define EFX_AND_QWORD(qword, from, mask) \
(qword).u64[0] = (from).u64[0] & (mask).u64[0]
#define EFX_OR_OWORD(oword, from, mask) \
do { \
(oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
(oword).u64[1] = (from).u64[1] | (mask).u64[1]; \
} while (0)
#define EFX_INSERT64(min, max, low, high, value) \
cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value))
#define EFX_INSERT32(min, max, low, high, value) \
cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
#define EFX_INPLACE_MASK64(min, max, low, high) \
EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low)))
#define EFX_INPLACE_MASK32(min, max, low, high) \
EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low)))
#define EFX_SET_OWORD64(oword, low, high, value) do { \
(oword).u64[0] = (((oword).u64[0] \
& ~EFX_INPLACE_MASK64(0, 63, low, high)) \
| EFX_INSERT64(0, 63, low, high, value)); \
(oword).u64[1] = (((oword).u64[1] \
& ~EFX_INPLACE_MASK64(64, 127, low, high)) \
| EFX_INSERT64(64, 127, low, high, value)); \
} while (0)
#define EFX_SET_QWORD64(qword, low, high, value) do { \
(qword).u64[0] = (((qword).u64[0] \
& ~EFX_INPLACE_MASK64(0, 63, low, high)) \
| EFX_INSERT64(0, 63, low, high, value)); \
} while (0)
#define EFX_SET_OWORD32(oword, low, high, value) do { \
(oword).u32[0] = (((oword).u32[0] \
& ~EFX_INPLACE_MASK32(0, 31, low, high)) \
| EFX_INSERT32(0, 31, low, high, value)); \
(oword).u32[1] = (((oword).u32[1] \
& ~EFX_INPLACE_MASK32(32, 63, low, high)) \
| EFX_INSERT32(32, 63, low, high, value)); \
(oword).u32[2] = (((oword).u32[2] \
& ~EFX_INPLACE_MASK32(64, 95, low, high)) \
| EFX_INSERT32(64, 95, low, high, value)); \
(oword).u32[3] = (((oword).u32[3] \
& ~EFX_INPLACE_MASK32(96, 127, low, high)) \
| EFX_INSERT32(96, 127, low, high, value)); \
} while (0)
#define EFX_SET_QWORD32(qword, low, high, value) do { \
(qword).u32[0] = (((qword).u32[0] \
& ~EFX_INPLACE_MASK32(0, 31, low, high)) \
| EFX_INSERT32(0, 31, low, high, value)); \
(qword).u32[1] = (((qword).u32[1] \
& ~EFX_INPLACE_MASK32(32, 63, low, high)) \
| EFX_INSERT32(32, 63, low, high, value)); \
} while (0)
#define EFX_SET_DWORD32(dword, low, high, value) do { \
(dword).u32[0] = (((dword).u32[0] \
& ~EFX_INPLACE_MASK32(0, 31, low, high)) \
| EFX_INSERT32(0, 31, low, high, value)); \
} while (0)
#define EFX_SET_OWORD_FIELD64(oword, field, value) \
EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field), value)
#define EFX_SET_QWORD_FIELD64(qword, field, value) \
EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field), value)
#define EFX_SET_OWORD_FIELD32(oword, field, value) \
EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field), value)
#define EFX_SET_QWORD_FIELD32(qword, field, value) \
EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field), value)
#define EFX_SET_DWORD_FIELD(dword, field, value) \
EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field), value)
#if BITS_PER_LONG == 64
#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
#else
#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
#endif
/* Used to avoid compiler warnings about shift range exceeding width
* of the data types when dma_addr_t is only 32 bits wide.
*/
#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
#define EFX_DMA_TYPE_WIDTH(width) \
(((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
/* Static initialiser */
#define EFX_OWORD32(a, b, c, d) \
{ .u32 = { cpu_to_le32(a), cpu_to_le32(b), \
cpu_to_le32(c), cpu_to_le32(d) } }
#endif /* EFX_BITFIELD_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,218 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*/
#ifndef EFX_EFX_H
#define EFX_EFX_H
#include <linux/indirect_call_wrapper.h>
#include "net_driver.h"
#include "filter.h"
/* TX */
void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev);
netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
struct sk_buff *skb);
static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
return INDIRECT_CALL_1(tx_queue->efx->type->tx_enqueue,
__efx_siena_enqueue_skb, tx_queue, skb);
}
int efx_siena_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
/* RX */
void __efx_siena_rx_packet(struct efx_channel *channel);
void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags);
static inline void efx_rx_flush_packet(struct efx_channel *channel)
{
if (channel->rx_pkt_n_frags)
__efx_siena_rx_packet(channel);
}
/* Maximum number of TCP segments we support for soft-TSO */
#define EFX_TSO_MAX_SEGS 100
/* The smallest [rt]xq_entries that the driver supports. RX minimum
* is a bit arbitrary. For TX, we must have space for at least 2
* TSO skbs.
*/
#define EFX_RXQ_MIN_ENT 128U
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_siena_tx_max_skb_descs(efx))
/* All EF10 architecture NICs steal one bit of the DMAQ size for various
* other purposes when counting TxQ entries, so we halve the queue size.
*/
#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \
EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
static inline bool efx_rss_enabled(struct efx_nic *efx)
{
return efx->rss_spread > 1;
}
/* Filters */
/**
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
* @spec: Specification for the filter
* @replace_equal: Flag for whether the specified filter may replace an
* existing filter with equal priority
*
* On success, return the filter ID.
* On failure, return a negative error code.
*
* If existing filters have equal match values to the new filter spec,
* then the new filter might replace them or the function might fail,
* as follows.
*
* 1. If the existing filters have lower priority, or @replace_equal
* is set and they have equal priority, replace them.
*
* 2. If the existing filters have higher priority, return -%EPERM.
*
* 3. If !efx_siena_filter_is_mc_recipient(@spec), or the NIC does not
* support delivery to multiple recipients, return -%EEXIST.
*
* This implies that filters for multiple multicast recipients must
* all be inserted with the same priority and @replace_equal = %false.
*/
static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
struct efx_filter_spec *spec,
bool replace_equal)
{
return efx->type->filter_insert(efx, spec, replace_equal);
}
/**
* efx_filter_remove_id_safe - remove a filter by ID, carefully
* @efx: NIC from which to remove the filter
* @priority: Priority of filter, as passed to @efx_filter_insert_filter
* @filter_id: ID of filter, as returned by @efx_filter_insert_filter
*
* This function will range-check @filter_id, so it is safe to call
* with a value passed from userland.
*/
static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id)
{
return efx->type->filter_remove_safe(efx, priority, filter_id);
}
/**
* efx_filter_get_filter_safe - retrieve a filter by ID, carefully
* @efx: NIC from which to remove the filter
* @priority: Priority of filter, as passed to @efx_filter_insert_filter
* @filter_id: ID of filter, as returned by @efx_filter_insert_filter
* @spec: Buffer in which to store filter specification
*
* This function will range-check @filter_id, so it is safe to call
* with a value passed from userland.
*/
static inline int
efx_filter_get_filter_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *spec)
{
return efx->type->filter_get_safe(efx, priority, filter_id, spec);
}
static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority)
{
return efx->type->filter_count_rx_used(efx, priority);
}
static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
{
return efx->type->filter_get_rx_id_limit(efx);
}
static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 *buf, u32 size)
{
return efx->type->filter_get_rx_ids(efx, priority, buf, size);
}
/* RSS contexts */
static inline bool efx_rss_active(struct efx_rss_context *ctx)
{
return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
}
/* Ethtool support */
extern const struct ethtool_ops efx_siena_ethtool_ops;
/* Global */
unsigned int efx_siena_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
int efx_siena_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx);
void efx_siena_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
unsigned int *rx_usecs, bool *rx_adaptive);
/* Update the generic software stats in the passed stats array */
void efx_siena_update_sw_stats(struct efx_nic *efx, u64 *stats);
/* MTD */
#ifdef CONFIG_SFC_SIENA_MTD
int efx_siena_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
size_t n_parts, size_t sizeof_part);
static inline int efx_mtd_probe(struct efx_nic *efx)
{
return efx->type->mtd_probe(efx);
}
void efx_siena_mtd_rename(struct efx_nic *efx);
void efx_siena_mtd_remove(struct efx_nic *efx);
#else
static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
static inline void efx_siena_mtd_rename(struct efx_nic *efx) {}
static inline void efx_siena_mtd_remove(struct efx_nic *efx) {}
#endif
#ifdef CONFIG_SFC_SIENA_SRIOV
static inline unsigned int efx_vf_size(struct efx_nic *efx)
{
return 1 << efx->vi_scale;
}
#endif
static inline void efx_device_detach_sync(struct efx_nic *efx)
{
struct net_device *dev = efx->net_dev;
/* Lock/freeze all TX queues so that we can be sure the
* TX scheduler is stopped when we're done and before
* netif_device_present() becomes false.
*/
netif_tx_lock_bh(dev);
netif_device_detach(dev);
netif_tx_unlock_bh(dev);
}
static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx)
{
if ((efx->state != STATE_DISABLED) && !efx->reset_pending)
netif_device_attach(efx->net_dev);
}
static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
{
if (WARN_ON(down_read_trylock(sem))) {
up_read(sem);
return false;
}
return true;
}
int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n,
struct xdp_frame **xdpfs, bool flush);
#endif /* EFX_EFX_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2018 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_SIENA_CHANNELS_H
#define EFX_SIENA_CHANNELS_H
extern unsigned int efx_siena_interrupt_mode;
extern unsigned int efx_siena_rss_cpus;
int efx_siena_probe_interrupts(struct efx_nic *efx);
void efx_siena_remove_interrupts(struct efx_nic *efx);
int efx_siena_enable_interrupts(struct efx_nic *efx);
void efx_siena_disable_interrupts(struct efx_nic *efx);
void efx_siena_set_interrupt_affinity(struct efx_nic *efx);
void efx_siena_clear_interrupt_affinity(struct efx_nic *efx);
void efx_siena_start_eventq(struct efx_channel *channel);
void efx_siena_stop_eventq(struct efx_channel *channel);
int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries,
u32 txq_entries);
void efx_siena_set_channel_names(struct efx_nic *efx);
int efx_siena_init_channels(struct efx_nic *efx);
int efx_siena_probe_channels(struct efx_nic *efx);
int efx_siena_set_channels(struct efx_nic *efx);
void efx_siena_remove_channel(struct efx_channel *channel);
void efx_siena_remove_channels(struct efx_nic *efx);
void efx_siena_fini_channels(struct efx_nic *efx);
void efx_siena_start_channels(struct efx_nic *efx);
void efx_siena_stop_channels(struct efx_nic *efx);
void efx_siena_init_napi(struct efx_nic *efx);
void efx_siena_fini_napi(struct efx_nic *efx);
void efx_siena_channel_dummy_op_void(struct efx_channel *channel);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,118 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2018 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_COMMON_H
#define EFX_COMMON_H
int efx_siena_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
unsigned int mem_map_size);
void efx_siena_fini_io(struct efx_nic *efx);
int efx_siena_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
struct net_device *net_dev);
void efx_siena_fini_struct(struct efx_nic *efx);
#define EFX_MAX_DMAQ_SIZE 4096UL
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
#define EFX_MIN_DMAQ_SIZE 512UL
#define EFX_MAX_EVQ_SIZE 16384UL
#define EFX_MIN_EVQ_SIZE 512UL
void efx_siena_link_clear_advertising(struct efx_nic *efx);
void efx_siena_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc);
void efx_siena_start_all(struct efx_nic *efx);
void efx_siena_stop_all(struct efx_nic *efx);
void efx_siena_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats);
int efx_siena_create_reset_workqueue(void);
void efx_siena_queue_reset_work(struct efx_nic *efx);
void efx_siena_flush_reset_workqueue(struct efx_nic *efx);
void efx_siena_destroy_reset_workqueue(void);
void efx_siena_start_monitor(struct efx_nic *efx);
int __efx_siena_reconfigure_port(struct efx_nic *efx);
int efx_siena_reconfigure_port(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
if ((efx->state == STATE_READY) || \
(efx->state == STATE_RECOVERY) || \
(efx->state == STATE_DISABLED)) \
ASSERT_RTNL(); \
} while (0)
int efx_siena_try_recovery(struct efx_nic *efx);
void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method);
void efx_siena_watchdog(struct net_device *net_dev, unsigned int txqueue);
int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
int efx_siena_reset(struct efx_nic *efx, enum reset_type method);
void efx_siena_schedule_reset(struct efx_nic *efx, enum reset_type type);
/* Dummy PHY ops for PHY drivers */
int efx_siena_port_dummy_op_int(struct efx_nic *efx);
void efx_siena_port_dummy_op_void(struct efx_nic *efx);
static inline int efx_check_disabled(struct efx_nic *efx)
{
if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
netif_err(efx, drv, efx->net_dev,
"device is disabled due to earlier errors\n");
return -EIO;
}
return 0;
}
static inline void efx_schedule_channel(struct efx_channel *channel)
{
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
"channel %d scheduling NAPI poll on CPU%d\n",
channel->channel, raw_smp_processor_id());
napi_schedule(&channel->napi_str);
}
static inline void efx_schedule_channel_irq(struct efx_channel *channel)
{
channel->event_test_cpu = raw_smp_processor_id();
efx_schedule_channel(channel);
}
#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
void efx_siena_init_mcdi_logging(struct efx_nic *efx);
void efx_siena_fini_mcdi_logging(struct efx_nic *efx);
#else
static inline void efx_siena_init_mcdi_logging(struct efx_nic *efx) {}
static inline void efx_siena_fini_mcdi_logging(struct efx_nic *efx) {}
#endif
void efx_siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only);
int efx_siena_set_mac_address(struct net_device *net_dev, void *data);
void efx_siena_set_rx_mode(struct net_device *net_dev);
int efx_siena_set_features(struct net_device *net_dev, netdev_features_t data);
void efx_siena_link_status_changed(struct efx_nic *efx);
unsigned int efx_siena_xdp_max_mtu(struct efx_nic *efx);
int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu);
extern const struct pci_error_handlers efx_siena_err_handlers;
netdev_features_t efx_siena_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
int efx_siena_get_phys_port_id(struct net_device *net_dev,
struct netdev_phys_item_id *ppid);
int efx_siena_get_phys_port_name(struct net_device *net_dev,
char *name, size_t len);
#endif

View File

@ -0,0 +1,176 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2007-2013 Solarflare Communications Inc.
*/
#ifndef EFX_ENUM_H
#define EFX_ENUM_H
/**
* enum efx_loopback_mode - loopback modes
* @LOOPBACK_NONE: no loopback
* @LOOPBACK_DATA: data path loopback
* @LOOPBACK_GMAC: loopback within GMAC
* @LOOPBACK_XGMII: loopback after XMAC
* @LOOPBACK_XGXS: loopback within BPX after XGXS
* @LOOPBACK_XAUI: loopback within BPX before XAUI serdes
* @LOOPBACK_GMII: loopback within BPX after GMAC
* @LOOPBACK_SGMII: loopback within BPX within SGMII
* @LOOPBACK_XGBR: loopback within BPX within XGBR
* @LOOPBACK_XFI: loopback within BPX before XFI serdes
* @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes
* @LOOPBACK_GMII_FAR: loopback within BPX before SGMII
* @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII
* @LOOPBACK_XFI_FAR: loopback after XFI serdes
* @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level
* @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level
* @LOOPBACK_PCS: loopback within 10G PHY at PCS level
* @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level
* @LOOPBACK_XPORT: cross port loopback
* @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC
* @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes
* @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes
* @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes
* @LOOPBACK_GMII_WS: wireside loopback excluding GMAC
* @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes
* @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes
* @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level
*/
/* Please keep up-to-date w.r.t the following two #defines */
enum efx_loopback_mode {
LOOPBACK_NONE = 0,
LOOPBACK_DATA = 1,
LOOPBACK_GMAC = 2,
LOOPBACK_XGMII = 3,
LOOPBACK_XGXS = 4,
LOOPBACK_XAUI = 5,
LOOPBACK_GMII = 6,
LOOPBACK_SGMII = 7,
LOOPBACK_XGBR = 8,
LOOPBACK_XFI = 9,
LOOPBACK_XAUI_FAR = 10,
LOOPBACK_GMII_FAR = 11,
LOOPBACK_SGMII_FAR = 12,
LOOPBACK_XFI_FAR = 13,
LOOPBACK_GPHY = 14,
LOOPBACK_PHYXS = 15,
LOOPBACK_PCS = 16,
LOOPBACK_PMAPMD = 17,
LOOPBACK_XPORT = 18,
LOOPBACK_XGMII_WS = 19,
LOOPBACK_XAUI_WS = 20,
LOOPBACK_XAUI_WS_FAR = 21,
LOOPBACK_XAUI_WS_NEAR = 22,
LOOPBACK_GMII_WS = 23,
LOOPBACK_XFI_WS = 24,
LOOPBACK_XFI_WS_FAR = 25,
LOOPBACK_PHYXS_WS = 26,
LOOPBACK_MAX
};
#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
/* These loopbacks occur within the controller */
#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \
(1 << LOOPBACK_GMAC) | \
(1 << LOOPBACK_XGMII)| \
(1 << LOOPBACK_XGXS) | \
(1 << LOOPBACK_XAUI) | \
(1 << LOOPBACK_GMII) | \
(1 << LOOPBACK_SGMII) | \
(1 << LOOPBACK_XGBR) | \
(1 << LOOPBACK_XFI) | \
(1 << LOOPBACK_XAUI_FAR) | \
(1 << LOOPBACK_GMII_FAR) | \
(1 << LOOPBACK_SGMII_FAR) | \
(1 << LOOPBACK_XFI_FAR) | \
(1 << LOOPBACK_XGMII_WS) | \
(1 << LOOPBACK_XAUI_WS) | \
(1 << LOOPBACK_XAUI_WS_FAR) | \
(1 << LOOPBACK_XAUI_WS_NEAR) | \
(1 << LOOPBACK_GMII_WS) | \
(1 << LOOPBACK_XFI_WS) | \
(1 << LOOPBACK_XFI_WS_FAR))
#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \
(1 << LOOPBACK_XAUI_WS) | \
(1 << LOOPBACK_XAUI_WS_FAR) | \
(1 << LOOPBACK_XAUI_WS_NEAR) | \
(1 << LOOPBACK_GMII_WS) | \
(1 << LOOPBACK_XFI_WS) | \
(1 << LOOPBACK_XFI_WS_FAR) | \
(1 << LOOPBACK_PHYXS_WS))
#define LOOPBACKS_EXTERNAL(_efx) \
((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \
~(1 << LOOPBACK_NONE))
#define LOOPBACK_MASK(_efx) \
(1 << (_efx)->loopback_mode)
#define LOOPBACK_INTERNAL(_efx) \
(!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx)))
#define LOOPBACK_EXTERNAL(_efx) \
(!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx)))
#define LOOPBACK_CHANGED(_from, _to, _mask) \
(!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask)))
#define LOOPBACK_OUT_OF(_from, _to, _mask) \
((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
/*****************************************************************************/
/**
* enum reset_type - reset types
*
* %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
* %RESET_TYPE_DISABLE specify the method/scope of the reset. The
* other valuesspecify reasons, which efx_siena_schedule_reset() will choose
* a method for.
*
* Reset methods are numbered in order of increasing scope.
*
* @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
* @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL
* if unsuccessful.
* @RESET_TYPE_ALL: Reset datapath, MAC and PHY
* @RESET_TYPE_WORLD: Reset as much as possible
* @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
* unsuccessful.
* @RESET_TYPE_DATAPATH: Reset datapath only.
* @RESET_TYPE_MC_BIST: MC entering BIST mode.
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
* @RESET_TYPE_DMA_ERROR: DMA error
* @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
* @RESET_TYPE_MC_FAILURE: MC reboot/assertion
* @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout.
*/
enum reset_type {
RESET_TYPE_INVISIBLE,
RESET_TYPE_RECOVER_OR_ALL,
RESET_TYPE_ALL,
RESET_TYPE_WORLD,
RESET_TYPE_RECOVER_OR_DISABLE,
RESET_TYPE_DATAPATH,
RESET_TYPE_MC_BIST,
RESET_TYPE_DISABLE,
RESET_TYPE_MAX_METHOD,
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
/* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but
* it doesn't fit the scope hierarchy (not well-ordered by inclusion).
* We encode this by having its enum value be greater than
* RESET_TYPE_MAX_METHOD.
*/
RESET_TYPE_MCDI_TIMEOUT,
RESET_TYPE_MAX,
};
#endif /* EFX_ENUM_H */

View File

@ -0,0 +1,282 @@
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*/
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/in.h>
#include "net_driver.h"
#include "workarounds.h"
#include "selftest.h"
#include "efx.h"
#include "efx_channels.h"
#include "rx_common.h"
#include "tx_common.h"
#include "ethtool_common.h"
#include "filter.h"
#include "nic.h"
#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
/**************************************************************************
*
* Ethtool operations
*
**************************************************************************
*/
/* Identify device by flashing LEDs */
static int efx_ethtool_phys_id(struct net_device *net_dev,
enum ethtool_phys_id_state state)
{
struct efx_nic *efx = netdev_priv(net_dev);
enum efx_led_mode mode = EFX_LED_DEFAULT;
switch (state) {
case ETHTOOL_ID_ON:
mode = EFX_LED_ON;
break;
case ETHTOOL_ID_OFF:
mode = EFX_LED_OFF;
break;
case ETHTOOL_ID_INACTIVE:
mode = EFX_LED_DEFAULT;
break;
case ETHTOOL_ID_ACTIVE:
return 1; /* cycle on/off once per second */
}
return efx_siena_mcdi_set_id_led(efx, mode);
}
static int efx_ethtool_get_regs_len(struct net_device *net_dev)
{
return efx_siena_get_regs_len(netdev_priv(net_dev));
}
static void efx_ethtool_get_regs(struct net_device *net_dev,
struct ethtool_regs *regs, void *buf)
{
struct efx_nic *efx = netdev_priv(net_dev);
regs->version = efx->type->revision;
efx_siena_get_regs(efx, buf);
}
/*
* Each channel has a single IRQ and moderation timer, started by any
* completion (or other event). Unless the module parameter
* separate_tx_channels is set, IRQs and moderation are therefore
* shared between RX and TX completions. In this case, when RX IRQ
* moderation is explicitly changed then TX IRQ moderation is
* automatically changed too, but otherwise we fail if the two values
* are requested to be different.
*
* The hardware does not support a limit on the number of completions
* before an IRQ, so we do not use the max_frames fields. We should
* report and require that max_frames == (usecs != 0), but this would
* invalidate existing user documentation.
*
* The hardware does not have distinct settings for interrupt
* moderation while the previous IRQ is being handled, so we should
* not use the 'irq' fields. However, an earlier developer
* misunderstood the meaning of the 'irq' fields and the driver did
* not support the standard fields. To avoid invalidating existing
* user documentation, we report and accept changes through either the
* standard or 'irq' fields. If both are changed at the same time, we
* prefer the standard field.
*
* We implement adaptive IRQ moderation, but use a different algorithm
* from that assumed in the definition of struct ethtool_coalesce.
* Therefore we do not use any of the adaptive moderation parameters
* in it.
*/
static int efx_ethtool_get_coalesce(struct net_device *net_dev,
struct ethtool_coalesce *coalesce,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
unsigned int tx_usecs, rx_usecs;
bool rx_adaptive;
efx_siena_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
coalesce->tx_coalesce_usecs = tx_usecs;
coalesce->tx_coalesce_usecs_irq = tx_usecs;
coalesce->rx_coalesce_usecs = rx_usecs;
coalesce->rx_coalesce_usecs_irq = rx_usecs;
coalesce->use_adaptive_rx_coalesce = rx_adaptive;
return 0;
}
static int efx_ethtool_set_coalesce(struct net_device *net_dev,
struct ethtool_coalesce *coalesce,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
unsigned int tx_usecs, rx_usecs;
bool adaptive, rx_may_override_tx;
int rc;
efx_siena_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
if (coalesce->rx_coalesce_usecs != rx_usecs)
rx_usecs = coalesce->rx_coalesce_usecs;
else
rx_usecs = coalesce->rx_coalesce_usecs_irq;
adaptive = coalesce->use_adaptive_rx_coalesce;
/* If channels are shared, TX IRQ moderation can be quietly
* overridden unless it is changed from its old value.
*/
rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
coalesce->tx_coalesce_usecs_irq == tx_usecs);
if (coalesce->tx_coalesce_usecs != tx_usecs)
tx_usecs = coalesce->tx_coalesce_usecs;
else
tx_usecs = coalesce->tx_coalesce_usecs_irq;
rc = efx_siena_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
rx_may_override_tx);
if (rc != 0)
return rc;
efx_for_each_channel(channel, efx)
efx->type->push_irq_moderation(channel);
return 0;
}
static void
efx_ethtool_get_ringparam(struct net_device *net_dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
ring->rx_pending = efx->rxq_entries;
ring->tx_pending = efx->txq_entries;
}
static int
efx_ethtool_set_ringparam(struct net_device *net_dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
u32 txq_entries;
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
return -EINVAL;
if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
netif_err(efx, drv, efx->net_dev,
"RX queues cannot be smaller than %u\n",
EFX_RXQ_MIN_ENT);
return -EINVAL;
}
txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
if (txq_entries != ring->tx_pending)
netif_warn(efx, drv, efx->net_dev,
"increasing TX queue size to minimum of %u\n",
txq_entries);
return efx_siena_realloc_channels(efx, ring->rx_pending, txq_entries);
}
static void efx_ethtool_get_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
struct efx_nic *efx = netdev_priv(net_dev);
return efx->type->get_wol(efx, wol);
}
static int efx_ethtool_set_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
struct efx_nic *efx = netdev_priv(net_dev);
return efx->type->set_wol(efx, wol->wolopts);
}
static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
struct ethtool_fec_stats *fec_stats)
{
struct efx_nic *efx = netdev_priv(net_dev);
if (efx->type->get_fec_stats)
efx->type->get_fec_stats(efx, fec_stats);
}
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
struct ethtool_ts_info *ts_info)
{
struct efx_nic *efx = netdev_priv(net_dev);
/* Software capabilities */
ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE);
ts_info->phc_index = -1;
efx_siena_ptp_get_ts_info(efx, ts_info);
return 0;
}
const struct ethtool_ops efx_siena_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = efx_siena_ethtool_get_drvinfo,
.get_regs_len = efx_ethtool_get_regs_len,
.get_regs = efx_ethtool_get_regs,
.get_msglevel = efx_siena_ethtool_get_msglevel,
.set_msglevel = efx_siena_ethtool_set_msglevel,
.get_link = ethtool_op_get_link,
.get_coalesce = efx_ethtool_get_coalesce,
.set_coalesce = efx_ethtool_set_coalesce,
.get_ringparam = efx_ethtool_get_ringparam,
.set_ringparam = efx_ethtool_set_ringparam,
.get_pauseparam = efx_siena_ethtool_get_pauseparam,
.set_pauseparam = efx_siena_ethtool_set_pauseparam,
.get_sset_count = efx_siena_ethtool_get_sset_count,
.self_test = efx_siena_ethtool_self_test,
.get_strings = efx_siena_ethtool_get_strings,
.set_phys_id = efx_ethtool_phys_id,
.get_ethtool_stats = efx_siena_ethtool_get_stats,
.get_wol = efx_ethtool_get_wol,
.set_wol = efx_ethtool_set_wol,
.reset = efx_siena_ethtool_reset,
.get_rxnfc = efx_siena_ethtool_get_rxnfc,
.set_rxnfc = efx_siena_ethtool_set_rxnfc,
.get_rxfh_indir_size = efx_siena_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_siena_ethtool_get_rxfh_key_size,
.get_rxfh = efx_siena_ethtool_get_rxfh,
.set_rxfh = efx_siena_ethtool_set_rxfh,
.get_rxfh_context = efx_siena_ethtool_get_rxfh_context,
.set_rxfh_context = efx_siena_ethtool_set_rxfh_context,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_siena_ethtool_get_module_info,
.get_module_eeprom = efx_siena_ethtool_get_module_eeprom,
.get_link_ksettings = efx_siena_ethtool_get_link_ksettings,
.set_link_ksettings = efx_siena_ethtool_set_link_ksettings,
.get_fec_stats = efx_ethtool_get_fec_stats,
.get_fecparam = efx_siena_ethtool_get_fecparam,
.set_fecparam = efx_siena_ethtool_set_fecparam,
};

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,60 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_ETHTOOL_COMMON_H
#define EFX_ETHTOOL_COMMON_H
void efx_siena_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info);
u32 efx_siena_ethtool_get_msglevel(struct net_device *net_dev);
void efx_siena_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable);
void efx_siena_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data);
void efx_siena_ethtool_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause);
int efx_siena_ethtool_set_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause);
int efx_siena_ethtool_get_sset_count(struct net_device *net_dev, int string_set);
void efx_siena_ethtool_get_strings(struct net_device *net_dev, u32 string_set,
u8 *strings);
void efx_siena_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats __always_unused,
u64 *data);
int efx_siena_ethtool_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *out);
int efx_siena_ethtool_set_link_ksettings(struct net_device *net_dev,
const struct ethtool_link_ksettings *settings);
int efx_siena_ethtool_get_fecparam(struct net_device *net_dev,
struct ethtool_fecparam *fecparam);
int efx_siena_ethtool_set_fecparam(struct net_device *net_dev,
struct ethtool_fecparam *fecparam);
int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs);
int efx_siena_ethtool_set_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info);
u32 efx_siena_ethtool_get_rxfh_indir_size(struct net_device *net_dev);
u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev);
int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
u8 *hfunc);
int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
const u32 *indir, const u8 *key, const u8 hfunc);
int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
u8 *key, u8 *hfunc, u32 rss_context);
int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
const u32 *indir, const u8 *key,
const u8 hfunc, u32 *rss_context,
bool delete);
int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags);
int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
u8 *data);
int efx_siena_ethtool_get_module_info(struct net_device *net_dev,
struct ethtool_modinfo *modinfo);
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,309 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2013 Solarflare Communications Inc.
*/
#ifndef EFX_FILTER_H
#define EFX_FILTER_H
#include <linux/types.h>
#include <linux/if_ether.h>
#include <asm/byteorder.h>
/**
* enum efx_filter_match_flags - Flags for hardware filter match type
* @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address
* @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address
* @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address
* @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
* @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address
* @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
* @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
* @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
* @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
* @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
* @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
* @EFX_FILTER_MATCH_ENCAP_TYPE: Match by encapsulation type.
* Used for RX default unicast and multicast/broadcast filters.
*
* Only some combinations are supported, depending on NIC type:
*
* - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
* local 2-tuple (only implemented for Falcon B0)
*
* - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
* or local 2-tuple, or local MAC with or without outer VID, and RX
* default filters
*
* - Huntington supports filter matching controlled by firmware, potentially
* using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
* with or without outer and inner VID
*/
enum efx_filter_match_flags {
EFX_FILTER_MATCH_REM_HOST = 0x0001,
EFX_FILTER_MATCH_LOC_HOST = 0x0002,
EFX_FILTER_MATCH_REM_MAC = 0x0004,
EFX_FILTER_MATCH_REM_PORT = 0x0008,
EFX_FILTER_MATCH_LOC_MAC = 0x0010,
EFX_FILTER_MATCH_LOC_PORT = 0x0020,
EFX_FILTER_MATCH_ETHER_TYPE = 0x0040,
EFX_FILTER_MATCH_INNER_VID = 0x0080,
EFX_FILTER_MATCH_OUTER_VID = 0x0100,
EFX_FILTER_MATCH_IP_PROTO = 0x0200,
EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400,
EFX_FILTER_MATCH_ENCAP_TYPE = 0x0800,
};
/**
* enum efx_filter_priority - priority of a hardware filter specification
* @EFX_FILTER_PRI_HINT: Performance hint
* @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list
* or hardware requirements. This may only be used by the filter
* implementation for each NIC type.
* @EFX_FILTER_PRI_MANUAL: Manually configured filter
* @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
* networking and SR-IOV)
*/
enum efx_filter_priority {
EFX_FILTER_PRI_HINT = 0,
EFX_FILTER_PRI_AUTO,
EFX_FILTER_PRI_MANUAL,
EFX_FILTER_PRI_REQUIRED,
};
/**
* enum efx_filter_flags - flags for hardware filter specifications
* @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
* By default, matching packets will be delivered only to the
* specified queue. If this flag is set, they will be delivered
* to a range of queues offset from the specified queue number
* according to the indirection table.
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
* queue.
* @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is
* overriding an automatic filter (priority
* %EFX_FILTER_PRI_AUTO). This may only be set by the filter
* implementation for each type. A removal request will restore
* the automatic filter in its place.
* @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10,
};
/** enum efx_encap_type - types of encapsulation
* @EFX_ENCAP_TYPE_NONE: no encapsulation
* @EFX_ENCAP_TYPE_VXLAN: VXLAN encapsulation
* @EFX_ENCAP_TYPE_NVGRE: NVGRE encapsulation
* @EFX_ENCAP_TYPE_GENEVE: GENEVE encapsulation
* @EFX_ENCAP_FLAG_IPV6: indicates IPv6 outer frame
*
* Contains both enumerated types and flags.
* To get just the type, OR with @EFX_ENCAP_TYPES_MASK.
*/
enum efx_encap_type {
EFX_ENCAP_TYPE_NONE = 0,
EFX_ENCAP_TYPE_VXLAN = 1,
EFX_ENCAP_TYPE_NVGRE = 2,
EFX_ENCAP_TYPE_GENEVE = 3,
EFX_ENCAP_TYPES_MASK = 7,
EFX_ENCAP_FLAG_IPV6 = 8,
};
/**
* struct efx_filter_spec - specification for a hardware filter
* @match_flags: Match type flags, from &enum efx_filter_match_flags
* @priority: Priority of the filter, from &enum efx_filter_priority
* @flags: Miscellaneous flags, from &enum efx_filter_flags
* @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set. This
* is a user_id (with 0 meaning the driver/default RSS context), not an
* MCFW context_id.
* @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
* an RX drop filter
* @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
* @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
* @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
* %EFX_FILTER_MATCH_LOC_MAC_IG is set
* @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set
* @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set
* @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO
* is set
* @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set
* @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
* @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
* @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
* @encap_type: Encapsulation type to match (from &enum efx_encap_type), if
* %EFX_FILTER_MATCH_ENCAP_TYPE is set
*
* The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
* used to initialise the structure. The efx_filter_set_*() functions
* may then be used to set @rss_context, @match_flags and related
* fields.
*
* The @priority field is used by software to determine whether a new
* filter may replace an old one. The hardware priority of a filter
* depends on which fields are matched.
*/
struct efx_filter_spec {
u32 match_flags:12;
u32 priority:2;
u32 flags:6;
u32 dmaq_id:12;
u32 rss_context;
__be16 outer_vid __aligned(4); /* allow jhash2() of match values */
__be16 inner_vid;
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
__be16 ether_type;
u8 ip_proto;
__be32 loc_host[4];
__be32 rem_host[4];
__be16 loc_port;
__be16 rem_port;
u32 encap_type:4;
/* total 65 bytes */
};
enum {
EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff
};
static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
enum efx_filter_priority priority,
enum efx_filter_flags flags,
unsigned rxq_id)
{
memset(spec, 0, sizeof(*spec));
spec->priority = priority;
spec->flags = EFX_FILTER_FLAG_RX | flags;
spec->rss_context = 0;
spec->dmaq_id = rxq_id;
}
static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
unsigned txq_id)
{
memset(spec, 0, sizeof(*spec));
spec->priority = EFX_FILTER_PRI_REQUIRED;
spec->flags = EFX_FILTER_FLAG_TX;
spec->dmaq_id = txq_id;
}
/**
* efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
* @spec: Specification to initialise
* @proto: Transport layer protocol number
* @host: Local host address (network byte order)
* @port: Local port (network byte order)
*/
static inline int
efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
__be32 host, __be16 port)
{
spec->match_flags |=
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
spec->ether_type = htons(ETH_P_IP);
spec->ip_proto = proto;
spec->loc_host[0] = host;
spec->loc_port = port;
return 0;
}
/**
* efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
* @spec: Specification to initialise
* @proto: Transport layer protocol number
* @lhost: Local host address (network byte order)
* @lport: Local port (network byte order)
* @rhost: Remote host address (network byte order)
* @rport: Remote port (network byte order)
*/
static inline int
efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
__be32 lhost, __be16 lport,
__be32 rhost, __be16 rport)
{
spec->match_flags |=
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
spec->ether_type = htons(ETH_P_IP);
spec->ip_proto = proto;
spec->loc_host[0] = lhost;
spec->loc_port = lport;
spec->rem_host[0] = rhost;
spec->rem_port = rport;
return 0;
}
enum {
EFX_FILTER_VID_UNSPEC = 0xffff,
};
/**
* efx_filter_set_eth_local - specify local Ethernet address and/or VID
* @spec: Specification to initialise
* @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
* @addr: Local Ethernet MAC address, or %NULL
*/
static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
u16 vid, const u8 *addr)
{
if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL)
return -EINVAL;
if (vid != EFX_FILTER_VID_UNSPEC) {
spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
spec->outer_vid = htons(vid);
}
if (addr != NULL) {
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
ether_addr_copy(spec->loc_mac, addr);
}
return 0;
}
/**
* efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
* @spec: Specification to initialise
*/
static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec)
{
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
return 0;
}
/**
* efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
* @spec: Specification to initialise
*/
static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
{
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
spec->loc_mac[0] = 1;
return 0;
}
static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec,
enum efx_encap_type encap_type)
{
spec->match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
spec->encap_type = encap_type;
}
static inline enum efx_encap_type efx_filter_get_encap_type(
const struct efx_filter_spec *spec)
{
if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TYPE)
return spec->encap_type;
return EFX_ENCAP_TYPE_NONE;
}
#endif /* EFX_FILTER_H */

View File

@ -0,0 +1,310 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*/
#ifndef EFX_IO_H
#define EFX_IO_H
#include <linux/io.h>
#include <linux/spinlock.h>
/**************************************************************************
*
* NIC register I/O
*
**************************************************************************
*
* Notes on locking strategy for the Falcon architecture:
*
* Many CSRs are very wide and cannot be read or written atomically.
* Writes from the host are buffered by the Bus Interface Unit (BIU)
* up to 128 bits. Whenever the host writes part of such a register,
* the BIU collects the written value and does not write to the
* underlying register until all 4 dwords have been written. A
* similar buffering scheme applies to host access to the NIC's 64-bit
* SRAM.
*
* Writes to different CSRs and 64-bit SRAM words must be serialised,
* since interleaved access can result in lost writes. We use
* efx_nic::biu_lock for this.
*
* We also serialise reads from 128-bit CSRs and SRAM with the same
* spinlock. This may not be necessary, but it doesn't really matter
* as there are no such reads on the fast path.
*
* The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
* 128-bit but are special-cased in the BIU to avoid the need for
* locking in the host:
*
* - They are write-only.
* - The semantics of writing to these registers are such that
* replacing the low 96 bits with zero does not affect functionality.
* - If the host writes to the last dword address of such a register
* (i.e. the high 32 bits) the underlying register will always be
* written. If the collector and the current write together do not
* provide values for all 128 bits of the register, the low 96 bits
* will be written as zero.
* - If the host writes to the address of any other part of such a
* register while the collector already holds values for some other
* register, the write is discarded and the collector maintains its
* current state.
*
* The EF10 architecture exposes very few registers to the host and
* most of them are only 32 bits wide. The only exceptions are the MC
* doorbell register pair, which has its own latching, and
* TX_DESC_UPD, which works in a similar way to the Falcon
* architecture.
*/
#if BITS_PER_LONG == 64
#define EFX_USE_QWORD_IO 1
#endif
/* Hardware issue requires that only 64-bit naturally aligned writes
* are seen by hardware. Its not strictly necessary to restrict to
* x86_64 arch, but done for safety since unusual write combining behaviour
* can break PIO.
*/
#ifdef CONFIG_X86_64
/* PIO is a win only if write-combining is possible */
#ifdef ARCH_HAS_IOREMAP_WC
#define EFX_USE_PIO 1
#endif
#endif
static inline u32 efx_reg(struct efx_nic *efx, unsigned int reg)
{
return efx->reg_base + reg;
}
#ifdef EFX_USE_QWORD_IO
static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
unsigned int reg)
{
__raw_writeq((__force u64)value, efx->membase + reg);
}
static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
{
return (__force __le64)__raw_readq(efx->membase + reg);
}
#endif
static inline void _efx_writed(struct efx_nic *efx, __le32 value,
unsigned int reg)
{
__raw_writel((__force u32)value, efx->membase + reg);
}
static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
{
return (__force __le32)__raw_readl(efx->membase + reg);
}
/* Write a normal 128-bit CSR, locking as appropriate. */
static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
unsigned int reg)
{
unsigned long flags __attribute__ ((unused));
netif_vdbg(efx, hw, efx->net_dev,
"writing register %x with " EFX_OWORD_FMT "\n", reg,
EFX_OWORD_VAL(*value));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EFX_USE_QWORD_IO
_efx_writeq(efx, value->u64[0], reg + 0);
_efx_writeq(efx, value->u64[1], reg + 8);
#else
_efx_writed(efx, value->u32[0], reg + 0);
_efx_writed(efx, value->u32[1], reg + 4);
_efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
const efx_qword_t *value, unsigned int index)
{
unsigned int addr = index * sizeof(*value);
unsigned long flags __attribute__ ((unused));
netif_vdbg(efx, hw, efx->net_dev,
"writing SRAM address %x with " EFX_QWORD_FMT "\n",
addr, EFX_QWORD_VAL(*value));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EFX_USE_QWORD_IO
__raw_writeq((__force u64)value->u64[0], membase + addr);
#else
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
unsigned int reg)
{
netif_vdbg(efx, hw, efx->net_dev,
"writing register %x with "EFX_DWORD_FMT"\n",
reg, EFX_DWORD_VAL(*value));
/* No lock required */
_efx_writed(efx, value->u32[0], reg);
}
/* Read a 128-bit CSR, locking as appropriate. */
static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg)
{
unsigned long flags __attribute__ ((unused));
spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _efx_readd(efx, reg + 0);
value->u32[1] = _efx_readd(efx, reg + 4);
value->u32[2] = _efx_readd(efx, reg + 8);
value->u32[3] = _efx_readd(efx, reg + 12);
spin_unlock_irqrestore(&efx->biu_lock, flags);
netif_vdbg(efx, hw, efx->net_dev,
"read from register %x, got " EFX_OWORD_FMT "\n", reg,
EFX_OWORD_VAL(*value));
}
/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
efx_qword_t *value, unsigned int index)
{
unsigned int addr = index * sizeof(*value);
unsigned long flags __attribute__ ((unused));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EFX_USE_QWORD_IO
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
netif_vdbg(efx, hw, efx->net_dev,
"read from SRAM address %x, got "EFX_QWORD_FMT"\n",
addr, EFX_QWORD_VAL(*value));
}
/* Read a 32-bit CSR or SRAM */
static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg)
{
value->u32[0] = _efx_readd(efx, reg);
netif_vdbg(efx, hw, efx->net_dev,
"read from register %x, got "EFX_DWORD_FMT"\n",
reg, EFX_DWORD_VAL(*value));
}
/* Write a 128-bit CSR forming part of a table */
static inline void
efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value,
unsigned int reg, unsigned int index)
{
efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
}
/* Read a 128-bit CSR forming part of a table */
static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg, unsigned int index)
{
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}
/* default VI stride (step between per-VI registers) is 8K on EF10 and
* 64K on EF100
*/
#define EFX_DEFAULT_VI_STRIDE 0x2000
#define EF100_DEFAULT_VI_STRIDE 0x10000
/* Calculate offset to page-mapped register */
static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page,
unsigned int reg)
{
return page * efx->vi_stride + reg;
}
/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg, unsigned int page)
{
reg = efx_paged_reg(efx, page, reg);
netif_vdbg(efx, hw, efx->net_dev,
"writing register %x with " EFX_OWORD_FMT "\n", reg,
EFX_OWORD_VAL(*value));
#ifdef EFX_USE_QWORD_IO
_efx_writeq(efx, value->u64[0], reg + 0);
_efx_writeq(efx, value->u64[1], reg + 8);
#else
_efx_writed(efx, value->u32[0], reg + 0);
_efx_writed(efx, value->u32[1], reg + 4);
_efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12);
#endif
}
#define efx_writeo_page(efx, value, reg, page) \
_efx_writeo_page(efx, value, \
reg + \
BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
page)
/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
* high bits of RX_DESC_UPD or TX_DESC_UPD)
*/
static inline void
_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
unsigned int reg, unsigned int page)
{
efx_writed(efx, value, efx_paged_reg(efx, page, reg));
}
#define efx_writed_page(efx, value, reg, page) \
_efx_writed_page(efx, value, \
reg + \
BUILD_BUG_ON_ZERO((reg) != 0x180 && \
(reg) != 0x200 && \
(reg) != 0x400 && \
(reg) != 0x420 && \
(reg) != 0x830 && \
(reg) != 0x83c && \
(reg) != 0xa18 && \
(reg) != 0xa1c), \
page)
/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
* in the BIU means that writes to TIMER_COMMAND[0] invalidate the
* collector register.
*/
static inline void _efx_writed_page_locked(struct efx_nic *efx,
const efx_dword_t *value,
unsigned int reg,
unsigned int page)
{
unsigned long flags __attribute__ ((unused));
if (page == 0) {
spin_lock_irqsave(&efx->biu_lock, flags);
efx_writed(efx, value, efx_paged_reg(efx, page, reg));
spin_unlock_irqrestore(&efx->biu_lock, flags);
} else {
efx_writed(efx, value, efx_paged_reg(efx, page, reg));
}
}
#define efx_writed_page_locked(efx, value, reg, page) \
_efx_writed_page_locked(efx, value, \
reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
page)
#endif /* EFX_IO_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,386 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2008-2013 Solarflare Communications Inc.
*/
#ifndef EFX_MCDI_H
#define EFX_MCDI_H
/**
* enum efx_mcdi_state - MCDI request handling state
* @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
* mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
* @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
* Only the thread that moved into this state is allowed to move out of it.
* @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
* @MCDI_STATE_PROXY_WAIT: An MCDI request has completed with a response that
* indicates we must wait for a proxy try again message.
* @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
* has not yet consumed the result. For all other threads, equivalent to
* %MCDI_STATE_RUNNING.
*/
enum efx_mcdi_state {
MCDI_STATE_QUIESCENT,
MCDI_STATE_RUNNING_SYNC,
MCDI_STATE_RUNNING_ASYNC,
MCDI_STATE_PROXY_WAIT,
MCDI_STATE_COMPLETED,
};
/**
* enum efx_mcdi_mode - MCDI transaction mode
* @MCDI_MODE_POLL: poll for MCDI completion, until timeout
* @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once
* @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
*/
enum efx_mcdi_mode {
MCDI_MODE_POLL,
MCDI_MODE_EVENTS,
MCDI_MODE_FAIL,
};
/**
* struct efx_mcdi_iface - MCDI protocol context
* @efx: The associated NIC.
* @state: Request handling state. Waited for by @wq.
* @mode: Poll for mcdi completion, or wait for an mcdi_event.
* @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
* @new_epoch: Indicates start of day or start of MC reboot recovery
* @iface_lock: Serialises access to @seqno, @credits and response metadata
* @seqno: The next sequence number to use for mcdi requests.
* @credits: Number of spurious MCDI completion events allowed before we
* trigger a fatal error
* @resprc: Response error/success code (Linux numbering)
* @resp_hdr_len: Response header length
* @resp_data_len: Response data (SDU or error) length
* @async_lock: Serialises access to @async_list while event processing is
* enabled
* @async_list: Queue of asynchronous requests
* @async_timer: Timer for asynchronous request timeout
* @logging_buffer: buffer that may be used to build MCDI tracing messages
* @logging_enabled: whether to trace MCDI
* @proxy_rx_handle: Most recently received proxy authorisation handle
* @proxy_rx_status: Status of most recent proxy authorisation
* @proxy_rx_wq: Wait queue for updates to proxy_rx_handle
*/
struct efx_mcdi_iface {
struct efx_nic *efx;
enum efx_mcdi_state state;
enum efx_mcdi_mode mode;
wait_queue_head_t wq;
spinlock_t iface_lock;
bool new_epoch;
unsigned int credits;
unsigned int seqno;
int resprc;
int resprc_raw;
size_t resp_hdr_len;
size_t resp_data_len;
spinlock_t async_lock;
struct list_head async_list;
struct timer_list async_timer;
#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
char *logging_buffer;
bool logging_enabled;
#endif
unsigned int proxy_rx_handle;
int proxy_rx_status;
wait_queue_head_t proxy_rx_wq;
};
struct efx_mcdi_mon {
struct efx_buffer dma_buf;
struct mutex update_lock;
unsigned long last_update;
struct device *device;
struct efx_mcdi_mon_attribute *attrs;
struct attribute_group group;
const struct attribute_group *groups[2];
unsigned int n_attrs;
};
struct efx_mcdi_mtd_partition {
struct efx_mtd_partition common;
bool updating;
u16 nvram_type;
u16 fw_subtype;
};
#define to_efx_mcdi_mtd_partition(mtd) \
container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd)
/**
* struct efx_mcdi_data - extra state for NICs that implement MCDI
* @iface: Interface/protocol state
* @hwmon: Hardware monitor state
* @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
*/
struct efx_mcdi_data {
struct efx_mcdi_iface iface;
#ifdef CONFIG_SFC_SIENA_MCDI_MON
struct efx_mcdi_mon hwmon;
#endif
u32 fn_flags;
};
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
{
EFX_WARN_ON_PARANOID(!efx->mcdi);
return &efx->mcdi->iface;
}
#ifdef CONFIG_SFC_SIENA_MCDI_MON
static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
{
EFX_WARN_ON_PARANOID(!efx->mcdi);
return &efx->mcdi->hwmon;
}
#endif
int efx_siena_mcdi_init(struct efx_nic *efx);
void efx_siena_mcdi_detach(struct efx_nic *efx);
void efx_siena_mcdi_fini(struct efx_nic *efx);
int efx_siena_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
int efx_siena_mcdi_rpc_quiet(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
int efx_siena_mcdi_rpc_start(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen);
int efx_siena_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
size_t inlen, efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
int efx_siena_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned int cmd,
size_t inlen, efx_dword_t *outbuf,
size_t outlen, size_t *outlen_actual);
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
unsigned long cookie, int rc,
efx_dword_t *outbuf,
size_t outlen_actual);
int efx_siena_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen,
size_t outlen,
efx_mcdi_async_completer *complete,
unsigned long cookie);
int efx_siena_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen,
size_t outlen,
efx_mcdi_async_completer *complete,
unsigned long cookie);
void efx_siena_mcdi_display_error(struct efx_nic *efx, unsigned int cmd,
size_t inlen, efx_dword_t *outbuf,
size_t outlen, int rc);
int efx_siena_mcdi_poll_reboot(struct efx_nic *efx);
void efx_siena_mcdi_mode_poll(struct efx_nic *efx);
void efx_siena_mcdi_mode_event(struct efx_nic *efx);
void efx_siena_mcdi_flush_async(struct efx_nic *efx);
void efx_siena_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
void efx_siena_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
/* We expect that 16- and 32-bit fields in MCDI requests and responses
* are appropriately aligned, but 64-bit fields are only
* 32-bit-aligned. Also, on Siena we must copy to the MC shared
* memory strictly 32 bits at a time, so add any necessary padding.
*/
#define MCDI_TX_BUF_LEN(_len) DIV_ROUND_UP((_len), 4)
#define _MCDI_DECLARE_BUF(_name, _len) \
efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
#define MCDI_DECLARE_BUF(_name, _len) \
_MCDI_DECLARE_BUF(_name, _len) = {{{0}}}
#define MCDI_DECLARE_BUF_ERR(_name) \
MCDI_DECLARE_BUF(_name, 8)
#define _MCDI_PTR(_buf, _offset) \
((u8 *)(_buf) + (_offset))
#define MCDI_PTR(_buf, _field) \
_MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
#define _MCDI_CHECK_ALIGN(_ofst, _align) \
((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
#define _MCDI_DWORD(_buf, _field) \
((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
#define MCDI_BYTE(_buf, _field) \
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
*MCDI_PTR(_buf, _field))
#define MCDI_WORD(_buf, _field) \
((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
#define MCDI_SET_DWORD(_buf, _field, _value) \
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
#define MCDI_DWORD(_buf, _field) \
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1)
#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \
_name2, _value2) \
EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2)
#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \
_name2, _value2, _name3, _value3) \
EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2, \
MC_CMD_ ## _name3, _value3)
#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \
_name2, _value2, _name3, _value3, \
_name4, _value4) \
EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2, \
MC_CMD_ ## _name3, _value3, \
MC_CMD_ ## _name4, _value4)
#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \
_name2, _value2, _name3, _value3, \
_name4, _value4, _name5, _value5) \
EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2, \
MC_CMD_ ## _name3, _value3, \
MC_CMD_ ## _name4, _value4, \
MC_CMD_ ## _name5, _value5)
#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \
_name2, _value2, _name3, _value3, \
_name4, _value4, _name5, _value5, \
_name6, _value6) \
EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2, \
MC_CMD_ ## _name3, _value3, \
MC_CMD_ ## _name4, _value4, \
MC_CMD_ ## _name5, _value5, \
MC_CMD_ ## _name6, _value6)
#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \
_name2, _value2, _name3, _value3, \
_name4, _value4, _name5, _value5, \
_name6, _value6, _name7, _value7) \
EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2, \
MC_CMD_ ## _name3, _value3, \
MC_CMD_ ## _name4, _value4, \
MC_CMD_ ## _name5, _value5, \
MC_CMD_ ## _name6, _value6, \
MC_CMD_ ## _name7, _value7)
#define MCDI_SET_QWORD(_buf, _field, _value) \
do { \
EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
EFX_DWORD_0, (u32)(_value)); \
EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
EFX_DWORD_0, (u64)(_value) >> 32); \
} while (0)
#define MCDI_QWORD(_buf, _field) \
(EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \
(u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32)
#define MCDI_FIELD(_ptr, _type, _field) \
EFX_EXTRACT_DWORD( \
*(efx_dword_t *) \
_MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\
MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \
(MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \
MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1)
#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \
(_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\
+ (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align))
#define MCDI_DECLARE_STRUCT_PTR(_name) \
efx_dword_t *_name
#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \
((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
#define MCDI_VAR_ARRAY_LEN(_len, _field) \
min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \
((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN)
#define MCDI_ARRAY_WORD(_buf, _field, _index) \
(BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
le16_to_cpu(*(__force const __le16 *) \
_MCDI_ARRAY_PTR(_buf, _field, _index, 2)))
#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \
(BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \
(efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \
EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \
EFX_DWORD_0, _value)
#define MCDI_ARRAY_DWORD(_buf, _field, _index) \
EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0)
#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \
(BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \
(efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \
do { \
EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\
EFX_DWORD_0, (u32)(_value)); \
EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\
EFX_DWORD_0, (u64)(_value) >> 32); \
} while (0)
#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \
_type ## _TYPEDEF, _field2)
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
#define MCDI_CAPABILITY(field) \
MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _LBN
#define MCDI_CAPABILITY_OFST(field) \
MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _OFST
#define efx_has_cap(efx, field) \
efx->type->check_caps(efx, \
MCDI_CAPABILITY(field), \
MCDI_CAPABILITY_OFST(field))
void efx_siena_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
int efx_siena_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities);
int efx_siena_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
u32 dest_evq);
int efx_siena_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
int efx_siena_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
bool *protected_out);
int efx_siena_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_siena_mcdi_handle_assertion(struct efx_nic *efx);
int efx_siena_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
int efx_siena_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
int *id_out);
int efx_siena_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
int efx_siena_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
int efx_siena_mcdi_wol_filter_reset(struct efx_nic *efx);
int efx_siena_mcdi_flush_rxqs(struct efx_nic *efx);
void efx_siena_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
void efx_siena_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_siena_mcdi_mac_stop_stats(struct efx_nic *efx);
void efx_siena_mcdi_mac_pull_stats(struct efx_nic *efx);
enum reset_type efx_siena_mcdi_map_reset_reason(enum reset_type reason);
int efx_siena_mcdi_reset(struct efx_nic *efx, enum reset_type method);
#ifdef CONFIG_SFC_SIENA_MCDI_MON
int efx_siena_mcdi_mon_probe(struct efx_nic *efx);
void efx_siena_mcdi_mon_remove(struct efx_nic *efx);
#else
static inline int efx_siena_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
static inline void efx_siena_mcdi_mon_remove(struct efx_nic *efx) {}
#endif
#ifdef CONFIG_SFC_SIENA_MTD
int efx_siena_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
size_t *retlen, u8 *buffer);
int efx_siena_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
int efx_siena_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
size_t *retlen, const u8 *buffer);
int efx_siena_mcdi_mtd_sync(struct mtd_info *mtd);
void efx_siena_mcdi_mtd_rename(struct efx_mtd_partition *part);
#endif
#endif /* EFX_MCDI_H */

View File

@ -0,0 +1,531 @@
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2011-2013 Solarflare Communications Inc.
*/
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/hwmon.h>
#include <linux/stat.h>
#include "net_driver.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "nic.h"
enum efx_hwmon_type {
EFX_HWMON_UNKNOWN,
EFX_HWMON_TEMP, /* temperature */
EFX_HWMON_COOL, /* cooling device, probably a heatsink */
EFX_HWMON_IN, /* voltage */
EFX_HWMON_CURR, /* current */
EFX_HWMON_POWER, /* power */
EFX_HWMON_TYPES_COUNT
};
static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = {
[EFX_HWMON_TEMP] = " degC",
[EFX_HWMON_COOL] = " rpm", /* though nonsense for a heatsink */
[EFX_HWMON_IN] = " mV",
[EFX_HWMON_CURR] = " mA",
[EFX_HWMON_POWER] = " W",
};
static const struct {
const char *label;
enum efx_hwmon_type hwmon_type;
int port;
} efx_mcdi_sensor_type[] = {
#define SENSOR(name, label, hwmon_type, port) \
[MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
SENSOR(CONTROLLER_TEMP, "Controller board temp.", TEMP, -1),
SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
SENSOR(CONTROLLER_COOLING, "Controller heat sink", COOL, -1),
SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
SENSOR(PHY0_COOLING, "PHY heat sink", COOL, 0),
SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
SENSOR(PHY1_COOLING, "PHY heat sink", COOL, 1),
SENSOR(IN_1V0, "1.0V supply", IN, -1),
SENSOR(IN_1V2, "1.2V supply", IN, -1),
SENSOR(IN_1V8, "1.8V supply", IN, -1),
SENSOR(IN_2V5, "2.5V supply", IN, -1),
SENSOR(IN_3V3, "3.3V supply", IN, -1),
SENSOR(IN_12V0, "12.0V supply", IN, -1),
SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
SENSOR(IN_VREF, "Ref. voltage", IN, -1),
SENSOR(OUT_VAOE, "AOE FPGA supply", IN, -1),
SENSOR(AOE_TEMP, "AOE FPGA temp.", TEMP, -1),
SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", TEMP, -1),
SENSOR(PSU_TEMP, "Controller regulator temp.",
TEMP, -1),
SENSOR(FAN_0, "Fan 0", COOL, -1),
SENSOR(FAN_1, "Fan 1", COOL, -1),
SENSOR(FAN_2, "Fan 2", COOL, -1),
SENSOR(FAN_3, "Fan 3", COOL, -1),
SENSOR(FAN_4, "Fan 4", COOL, -1),
SENSOR(IN_VAOE, "AOE input supply", IN, -1),
SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
SENSOR(IN_IAOE, "AOE input current", CURR, -1),
SENSOR(NIC_POWER, "Board power use", POWER, -1),
SENSOR(IN_0V9, "0.9V supply", IN, -1),
SENSOR(IN_I0V9, "0.9V supply current", CURR, -1),
SENSOR(IN_I1V2, "1.2V supply current", CURR, -1),
SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", IN, -1),
SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", TEMP, -1),
SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", TEMP, -1),
SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
SENSOR(CONTROLLER_VPTAT,
"Controller PTAT voltage (int. ADC)", IN, -1),
SENSOR(CONTROLLER_INTERNAL_TEMP,
"Controller die temp. (int. ADC)", TEMP, -1),
SENSOR(CONTROLLER_VPTAT_EXTADC,
"Controller PTAT voltage (ext. ADC)", IN, -1),
SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
"Controller die temp. (ext. ADC)", TEMP, -1),
SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
SENSOR(AIRFLOW, "Air flow raw", IN, -1),
SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", IN, -1),
SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", IN, -1),
SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", TEMP, -1),
#undef SENSOR
};
static const char *const sensor_status_names[] = {
[MC_CMD_SENSOR_STATE_OK] = "OK",
[MC_CMD_SENSOR_STATE_WARNING] = "Warning",
[MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
[MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
[MC_CMD_SENSOR_STATE_NO_READING] = "No reading",
};
void efx_siena_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
{
unsigned int type, state, value;
enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN;
const char *name = NULL, *state_txt, *unit;
type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
/* Deal gracefully with the board having more drivers than we
* know about, but do not expect new sensor states. */
if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
name = efx_mcdi_sensor_type[type].label;
hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
}
if (!name)
name = "No sensor name available";
EFX_WARN_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
state_txt = sensor_status_names[state];
EFX_WARN_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT);
unit = efx_hwmon_unit[hwmon_type];
if (!unit)
unit = "";
netif_err(efx, hw, efx->net_dev,
"Sensor %d (%s) reports condition '%s' for value %d%s\n",
type, name, state_txt, value, unit);
}
#ifdef CONFIG_SFC_SIENA_MCDI_MON
struct efx_mcdi_mon_attribute {
struct device_attribute dev_attr;
unsigned int index;
unsigned int type;
enum efx_hwmon_type hwmon_type;
unsigned int limit_value;
char name[12];
};
static int efx_mcdi_mon_update(struct efx_nic *efx)
{
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN);
int rc;
MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR,
hwmon->dma_buf.dma_addr);
MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len);
rc = efx_siena_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
inbuf, sizeof(inbuf), NULL, 0, NULL);
if (rc == 0)
hwmon->last_update = jiffies;
return rc;
}
static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index,
efx_dword_t *entry)
{
struct efx_nic *efx = dev_get_drvdata(dev->parent);
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
int rc;
BUILD_BUG_ON(MC_CMD_READ_SENSORS_OUT_LEN != 0);
mutex_lock(&hwmon->update_lock);
/* Use cached value if last update was < 1 s ago */
if (time_before(jiffies, hwmon->last_update + HZ))
rc = 0;
else
rc = efx_mcdi_mon_update(efx);
/* Copy out the requested entry */
*entry = ((efx_dword_t *)hwmon->dma_buf.addr)[index];
mutex_unlock(&hwmon->update_lock);
return rc;
}
static ssize_t efx_mcdi_mon_show_value(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct efx_mcdi_mon_attribute *mon_attr =
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
efx_dword_t entry;
unsigned int value, state;
int rc;
rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
if (rc)
return rc;
state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
if (state == MC_CMD_SENSOR_STATE_NO_READING)
return -EBUSY;
value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
switch (mon_attr->hwmon_type) {
case EFX_HWMON_TEMP:
/* Convert temperature from degrees to milli-degrees Celsius */
value *= 1000;
break;
case EFX_HWMON_POWER:
/* Convert power from watts to microwatts */
value *= 1000000;
break;
default:
/* No conversion needed */
break;
}
return sprintf(buf, "%u\n", value);
}
static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct efx_mcdi_mon_attribute *mon_attr =
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
unsigned int value;
value = mon_attr->limit_value;
switch (mon_attr->hwmon_type) {
case EFX_HWMON_TEMP:
/* Convert temperature from degrees to milli-degrees Celsius */
value *= 1000;
break;
case EFX_HWMON_POWER:
/* Convert power from watts to microwatts */
value *= 1000000;
break;
default:
/* No conversion needed */
break;
}
return sprintf(buf, "%u\n", value);
}
static ssize_t efx_mcdi_mon_show_alarm(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct efx_mcdi_mon_attribute *mon_attr =
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
efx_dword_t entry;
int state;
int rc;
rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
if (rc)
return rc;
state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
return sprintf(buf, "%d\n", state != MC_CMD_SENSOR_STATE_OK);
}
static ssize_t efx_mcdi_mon_show_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct efx_mcdi_mon_attribute *mon_attr =
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
return sprintf(buf, "%s\n",
efx_mcdi_sensor_type[mon_attr->type].label);
}
static void
efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
ssize_t (*reader)(struct device *,
struct device_attribute *, char *),
unsigned int index, unsigned int type,
unsigned int limit_value)
{
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
strlcpy(attr->name, name, sizeof(attr->name));
attr->index = index;
attr->type = type;
if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
else
attr->hwmon_type = EFX_HWMON_UNKNOWN;
attr->limit_value = limit_value;
sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.name = attr->name;
attr->dev_attr.attr.mode = 0444;
attr->dev_attr.show = reader;
hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr;
}
int efx_siena_mcdi_mon_probe(struct efx_nic *efx)
{
unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0;
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX);
unsigned int n_pages, n_sensors, n_attrs, page;
size_t outlen;
char name[12];
u32 mask;
int rc, i, j, type;
/* Find out how many sensors are present */
n_sensors = 0;
page = 0;
do {
MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page);
rc = efx_siena_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf,
sizeof(inbuf), outbuf, sizeof(outbuf),
&outlen);
if (rc)
return rc;
if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
return -EIO;
mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
++page;
} while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT));
n_pages = page;
/* Don't create a device if there are none */
if (n_sensors == 0)
return 0;
rc = efx_siena_alloc_buffer(efx, &hwmon->dma_buf,
n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN,
GFP_KERNEL);
if (rc)
return rc;
mutex_init(&hwmon->update_lock);
efx_mcdi_mon_update(efx);
/* Allocate space for the maximum possible number of
* attributes for this set of sensors:
* value, min, max, crit, alarm and label for each sensor.
*/
n_attrs = 6 * n_sensors;
hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
if (!hwmon->attrs) {
rc = -ENOMEM;
goto fail;
}
hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *),
GFP_KERNEL);
if (!hwmon->group.attrs) {
rc = -ENOMEM;
goto fail;
}
for (i = 0, j = -1, type = -1; ; i++) {
enum efx_hwmon_type hwmon_type;
const char *hwmon_prefix;
unsigned hwmon_index;
u16 min1, max1, min2, max2;
/* Find next sensor type or exit if there is none */
do {
type++;
if ((type % 32) == 0) {
page = type / 32;
j = -1;
if (page == n_pages)
goto hwmon_register;
MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
page);
rc = efx_siena_mcdi_rpc(efx, MC_CMD_SENSOR_INFO,
inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf),
&outlen);
if (rc)
goto fail;
if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) {
rc = -EIO;
goto fail;
}
mask = (MCDI_DWORD(outbuf,
SENSOR_INFO_OUT_MASK) &
~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
/* Check again for short response */
if (outlen <
MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) {
rc = -EIO;
goto fail;
}
}
} while (!(mask & (1 << type % 32)));
j++;
if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
/* Skip sensors specific to a different port */
if (hwmon_type != EFX_HWMON_UNKNOWN &&
efx_mcdi_sensor_type[type].port >= 0 &&
efx_mcdi_sensor_type[type].port !=
efx_port_num(efx))
continue;
} else {
hwmon_type = EFX_HWMON_UNKNOWN;
}
switch (hwmon_type) {
case EFX_HWMON_TEMP:
hwmon_prefix = "temp";
hwmon_index = ++n_temp; /* 1-based */
break;
case EFX_HWMON_COOL:
/* This is likely to be a heatsink, but there
* is no convention for representing cooling
* devices other than fans.
*/
hwmon_prefix = "fan";
hwmon_index = ++n_cool; /* 1-based */
break;
default:
hwmon_prefix = "in";
hwmon_index = n_in++; /* 0-based */
break;
case EFX_HWMON_CURR:
hwmon_prefix = "curr";
hwmon_index = ++n_curr; /* 1-based */
break;
case EFX_HWMON_POWER:
hwmon_prefix = "power";
hwmon_index = ++n_power; /* 1-based */
break;
}
min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
SENSOR_INFO_ENTRY, j, MIN1);
max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
SENSOR_INFO_ENTRY, j, MAX1);
min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
SENSOR_INFO_ENTRY, j, MIN2);
max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
SENSOR_INFO_ENTRY, j, MAX2);
if (min1 != max1) {
snprintf(name, sizeof(name), "%s%u_input",
hwmon_prefix, hwmon_index);
efx_mcdi_mon_add_attr(
efx, name, efx_mcdi_mon_show_value, i, type, 0);
if (hwmon_type != EFX_HWMON_POWER) {
snprintf(name, sizeof(name), "%s%u_min",
hwmon_prefix, hwmon_index);
efx_mcdi_mon_add_attr(
efx, name, efx_mcdi_mon_show_limit,
i, type, min1);
}
snprintf(name, sizeof(name), "%s%u_max",
hwmon_prefix, hwmon_index);
efx_mcdi_mon_add_attr(
efx, name, efx_mcdi_mon_show_limit,
i, type, max1);
if (min2 != max2) {
/* Assume max2 is critical value.
* But we have no good way to expose min2.
*/
snprintf(name, sizeof(name), "%s%u_crit",
hwmon_prefix, hwmon_index);
efx_mcdi_mon_add_attr(
efx, name, efx_mcdi_mon_show_limit,
i, type, max2);
}
}
snprintf(name, sizeof(name), "%s%u_alarm",
hwmon_prefix, hwmon_index);
efx_mcdi_mon_add_attr(
efx, name, efx_mcdi_mon_show_alarm, i, type, 0);
if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
efx_mcdi_sensor_type[type].label) {
snprintf(name, sizeof(name), "%s%u_label",
hwmon_prefix, hwmon_index);
efx_mcdi_mon_add_attr(
efx, name, efx_mcdi_mon_show_label, i, type, 0);
}
}
hwmon_register:
hwmon->groups[0] = &hwmon->group;
hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev,
KBUILD_MODNAME, NULL,
hwmon->groups);
if (IS_ERR(hwmon->device)) {
rc = PTR_ERR(hwmon->device);
goto fail;
}
return 0;
fail:
efx_siena_mcdi_mon_remove(efx);
return rc;
}
void efx_siena_mcdi_mon_remove(struct efx_nic *efx)
{
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
if (hwmon->device)
hwmon_device_unregister(hwmon->device);
kfree(hwmon->attrs);
kfree(hwmon->group.attrs);
efx_siena_free_buffer(efx, &hwmon->dma_buf);
}
#endif /* CONFIG_SFC_SIENA_MCDI_MON */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,110 @@
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2009-2013 Solarflare Communications Inc.
*/
/*
* Driver for PHY related operations via MCDI.
*/
#include <linux/slab.h>
#include "efx.h"
#include "mcdi_port.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "nic.h"
#include "selftest.h"
#include "mcdi_port_common.h"
static int efx_mcdi_mdio_read(struct net_device *net_dev,
int prtad, int devad, u16 addr)
{
struct efx_nic *efx = netdev_priv(net_dev);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
rc = efx_siena_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
}
static int efx_mcdi_mdio_write(struct net_device *net_dev,
int prtad, int devad, u16 addr, u16 value)
{
struct efx_nic *efx = netdev_priv(net_dev);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
rc = efx_siena_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return 0;
}
bool efx_siena_mcdi_mac_check_fault(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
size_t outlength;
int rc;
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), &outlength);
if (rc)
return true;
return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
}
int efx_siena_mcdi_port_probe(struct efx_nic *efx)
{
int rc;
/* Set up MDIO structure for PHY */
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->mdio.mdio_read = efx_mcdi_mdio_read;
efx->mdio.mdio_write = efx_mcdi_mdio_write;
/* Fill out MDIO structure, loopback modes, and initial link state */
rc = efx_siena_mcdi_phy_probe(efx);
if (rc != 0)
return rc;
return efx_siena_mcdi_mac_init_stats(efx);
}
void efx_siena_mcdi_port_remove(struct efx_nic *efx)
{
efx_siena_mcdi_phy_remove(efx);
efx_siena_mcdi_mac_fini_stats(efx);
}

View File

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2008-2013 Solarflare Communications Inc.
* Copyright 2019-2020 Xilinx Inc.
*/
#ifndef EFX_MCDI_PORT_H
#define EFX_MCDI_PORT_H
#include "net_driver.h"
bool efx_siena_mcdi_mac_check_fault(struct efx_nic *efx);
int efx_siena_mcdi_port_probe(struct efx_nic *efx);
void efx_siena_mcdi_port_remove(struct efx_nic *efx);
#endif /* EFX_MCDI_PORT_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2018 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_MCDI_PORT_COMMON_H
#define EFX_MCDI_PORT_COMMON_H
#include "net_driver.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
struct efx_mcdi_phy_data {
u32 flags;
u32 type;
u32 supported_cap;
u32 channel;
u32 port;
u32 stats_mask;
u8 name[20];
u32 media;
u32 mmd_mask;
u8 revision[20];
u32 forced_cap;
};
void efx_siena_link_set_advertising(struct efx_nic *efx,
const unsigned long *advertising);
bool efx_siena_mcdi_phy_poll(struct efx_nic *efx);
int efx_siena_mcdi_phy_probe(struct efx_nic *efx);
void efx_siena_mcdi_phy_remove(struct efx_nic *efx);
void efx_siena_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
struct ethtool_link_ksettings *cmd);
int efx_siena_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
const struct ethtool_link_ksettings *cmd);
int efx_siena_mcdi_phy_get_fecparam(struct efx_nic *efx,
struct ethtool_fecparam *fec);
int efx_siena_mcdi_phy_set_fecparam(struct efx_nic *efx,
const struct ethtool_fecparam *fec);
int efx_siena_mcdi_phy_test_alive(struct efx_nic *efx);
int efx_siena_mcdi_port_reconfigure(struct efx_nic *efx);
int efx_siena_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
unsigned int flags);
const char *efx_siena_mcdi_phy_test_name(struct efx_nic *efx,
unsigned int index);
int efx_siena_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
struct ethtool_eeprom *ee, u8 *data);
int efx_siena_mcdi_phy_get_module_info(struct efx_nic *efx,
struct ethtool_modinfo *modinfo);
int efx_siena_mcdi_set_mac(struct efx_nic *efx);
int efx_siena_mcdi_mac_init_stats(struct efx_nic *efx);
void efx_siena_mcdi_mac_fini_stats(struct efx_nic *efx);
#endif

View File

@ -0,0 +1,124 @@
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*/
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include "net_driver.h"
#include "efx.h"
#define to_efx_mtd_partition(mtd) \
container_of(mtd, struct efx_mtd_partition, mtd)
/* MTD interface */
static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
{
struct efx_nic *efx = mtd->priv;
return efx->type->mtd_erase(mtd, erase->addr, erase->len);
}
static void efx_mtd_sync(struct mtd_info *mtd)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
int rc;
rc = efx->type->mtd_sync(mtd);
if (rc)
pr_err("%s: %s sync failed (%d)\n",
part->name, part->dev_type_name, rc);
}
static void efx_siena_mtd_remove_partition(struct efx_mtd_partition *part)
{
int rc;
for (;;) {
rc = mtd_device_unregister(&part->mtd);
if (rc != -EBUSY)
break;
ssleep(1);
}
WARN_ON(rc);
list_del(&part->node);
}
int efx_siena_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
size_t n_parts, size_t sizeof_part)
{
struct efx_mtd_partition *part;
size_t i;
for (i = 0; i < n_parts; i++) {
part = (struct efx_mtd_partition *)((char *)parts +
i * sizeof_part);
part->mtd.writesize = 1;
if (!(part->mtd.flags & MTD_NO_ERASE))
part->mtd.flags |= MTD_WRITEABLE;
part->mtd.owner = THIS_MODULE;
part->mtd.priv = efx;
part->mtd.name = part->name;
part->mtd._erase = efx_mtd_erase;
part->mtd._read = efx->type->mtd_read;
part->mtd._write = efx->type->mtd_write;
part->mtd._sync = efx_mtd_sync;
efx->type->mtd_rename(part);
if (mtd_device_register(&part->mtd, NULL, 0))
goto fail;
/* Add to list in order - efx_siena_mtd_remove() depends on this */
list_add_tail(&part->node, &efx->mtd_list);
}
return 0;
fail:
while (i--) {
part = (struct efx_mtd_partition *)((char *)parts +
i * sizeof_part);
efx_siena_mtd_remove_partition(part);
}
/* Failure is unlikely here, but probably means we're out of memory */
return -ENOMEM;
}
void efx_siena_mtd_remove(struct efx_nic *efx)
{
struct efx_mtd_partition *parts, *part, *next;
WARN_ON(efx_dev_registered(efx));
if (list_empty(&efx->mtd_list))
return;
parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition,
node);
list_for_each_entry_safe(part, next, &efx->mtd_list, node)
efx_siena_mtd_remove_partition(part);
kfree(parts);
}
void efx_siena_mtd_rename(struct efx_nic *efx)
{
struct efx_mtd_partition *part;
ASSERT_RTNL();
list_for_each_entry(part, &efx->mtd_list, node)
efx->type->mtd_rename(part);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,530 @@
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/cpu_rmap.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
#include "mcdi_pcol.h"
/**************************************************************************
*
* Generic buffer handling
* These buffers are used for interrupt status, MAC stats, etc.
*
**************************************************************************/
int efx_siena_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len, gfp_t gfp_flags)
{
buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
&buffer->dma_addr, gfp_flags);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
return 0;
}
void efx_siena_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
{
if (buffer->addr) {
dma_free_coherent(&efx->pci_dev->dev, buffer->len,
buffer->addr, buffer->dma_addr);
buffer->addr = NULL;
}
}
/* Check whether an event is present in the eventq at the current
* read pointer. Only useful for self-test.
*/
bool efx_siena_event_present(struct efx_channel *channel)
{
return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
}
void efx_siena_event_test_start(struct efx_channel *channel)
{
channel->event_test_cpu = -1;
smp_wmb();
channel->efx->type->ev_test_generate(channel);
}
int efx_siena_irq_test_start(struct efx_nic *efx)
{
efx->last_irq_cpu = -1;
smp_wmb();
return efx->type->irq_test_generate(efx);
}
/* Hook interrupt handler(s)
* Try MSI and then legacy interrupts.
*/
int efx_siena_init_interrupt(struct efx_nic *efx)
{
struct efx_channel *channel;
unsigned int n_irqs;
int rc;
if (!EFX_INT_MODE_USE_MSI(efx)) {
rc = request_irq(efx->legacy_irq,
efx->type->irq_handle_legacy, IRQF_SHARED,
efx->name, efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook legacy IRQ %d\n",
efx->pci_dev->irq);
goto fail1;
}
efx->irqs_hooked = true;
return 0;
}
#ifdef CONFIG_RFS_ACCEL
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
efx->net_dev->rx_cpu_rmap =
alloc_irq_cpu_rmap(efx->n_rx_channels);
if (!efx->net_dev->rx_cpu_rmap) {
rc = -ENOMEM;
goto fail1;
}
}
#endif
/* Hook MSI or MSI-X interrupt */
n_irqs = 0;
efx_for_each_channel(channel, efx) {
rc = request_irq(channel->irq, efx->type->irq_handle_msi,
IRQF_PROBE_SHARED, /* Not shared */
efx->msi_context[channel->channel].name,
&efx->msi_context[channel->channel]);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook IRQ %d\n", channel->irq);
goto fail2;
}
++n_irqs;
#ifdef CONFIG_RFS_ACCEL
if (efx->interrupt_mode == EFX_INT_MODE_MSIX &&
channel->channel < efx->n_rx_channels) {
rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
channel->irq);
if (rc)
goto fail2;
}
#endif
}
efx->irqs_hooked = true;
return 0;
fail2:
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL;
#endif
efx_for_each_channel(channel, efx) {
if (n_irqs-- == 0)
break;
free_irq(channel->irq, &efx->msi_context[channel->channel]);
}
fail1:
return rc;
}
void efx_siena_fini_interrupt(struct efx_nic *efx)
{
struct efx_channel *channel;
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL;
#endif
if (!efx->irqs_hooked)
return;
if (EFX_INT_MODE_USE_MSI(efx)) {
/* Disable MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx)
free_irq(channel->irq,
&efx->msi_context[channel->channel]);
} else {
/* Disable legacy interrupt */
free_irq(efx->legacy_irq, efx);
}
efx->irqs_hooked = false;
}
/* Register dump */
#define REGISTER_REVISION_FA 1
#define REGISTER_REVISION_FB 2
#define REGISTER_REVISION_FC 3
#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
#define REGISTER_REVISION_ED 4
#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
struct efx_nic_reg {
u32 offset:24;
u32 min_revision:3, max_revision:3;
};
#define REGISTER(name, arch, min_rev, max_rev) { \
arch ## R_ ## min_rev ## max_rev ## _ ## name, \
REGISTER_REVISION_ ## arch ## min_rev, \
REGISTER_REVISION_ ## arch ## max_rev \
}
#define REGISTER_AA(name) REGISTER(name, F, A, A)
#define REGISTER_AB(name) REGISTER(name, F, A, B)
#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
#define REGISTER_BB(name) REGISTER(name, F, B, B)
#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
static const struct efx_nic_reg efx_nic_regs[] = {
REGISTER_AZ(ADR_REGION),
REGISTER_AZ(INT_EN_KER),
REGISTER_BZ(INT_EN_CHAR),
REGISTER_AZ(INT_ADR_KER),
REGISTER_BZ(INT_ADR_CHAR),
/* INT_ACK_KER is WO */
/* INT_ISR0 is RC */
REGISTER_AZ(HW_INIT),
REGISTER_CZ(USR_EV_CFG),
REGISTER_AB(EE_SPI_HCMD),
REGISTER_AB(EE_SPI_HADR),
REGISTER_AB(EE_SPI_HDATA),
REGISTER_AB(EE_BASE_PAGE),
REGISTER_AB(EE_VPD_CFG0),
/* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
/* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
/* PCIE_CORE_INDIRECT is indirect */
REGISTER_AB(NIC_STAT),
REGISTER_AB(GPIO_CTL),
REGISTER_AB(GLB_CTL),
/* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
REGISTER_BZ(DP_CTRL),
REGISTER_AZ(MEM_STAT),
REGISTER_AZ(CS_DEBUG),
REGISTER_AZ(ALTERA_BUILD),
REGISTER_AZ(CSR_SPARE),
REGISTER_AB(PCIE_SD_CTL0123),
REGISTER_AB(PCIE_SD_CTL45),
REGISTER_AB(PCIE_PCS_CTL_STAT),
/* DEBUG_DATA_OUT is not used */
/* DRV_EV is WO */
REGISTER_AZ(EVQ_CTL),
REGISTER_AZ(EVQ_CNT1),
REGISTER_AZ(EVQ_CNT2),
REGISTER_AZ(BUF_TBL_CFG),
REGISTER_AZ(SRM_RX_DC_CFG),
REGISTER_AZ(SRM_TX_DC_CFG),
REGISTER_AZ(SRM_CFG),
/* BUF_TBL_UPD is WO */
REGISTER_AZ(SRM_UPD_EVQ),
REGISTER_AZ(SRAM_PARITY),
REGISTER_AZ(RX_CFG),
REGISTER_BZ(RX_FILTER_CTL),
/* RX_FLUSH_DESCQ is WO */
REGISTER_AZ(RX_DC_CFG),
REGISTER_AZ(RX_DC_PF_WM),
REGISTER_BZ(RX_RSS_TKEY),
/* RX_NODESC_DROP is RC */
REGISTER_AA(RX_SELF_RST),
/* RX_DEBUG, RX_PUSH_DROP are not used */
REGISTER_CZ(RX_RSS_IPV6_REG1),
REGISTER_CZ(RX_RSS_IPV6_REG2),
REGISTER_CZ(RX_RSS_IPV6_REG3),
/* TX_FLUSH_DESCQ is WO */
REGISTER_AZ(TX_DC_CFG),
REGISTER_AA(TX_CHKSM_CFG),
REGISTER_AZ(TX_CFG),
/* TX_PUSH_DROP is not used */
REGISTER_AZ(TX_RESERVED),
REGISTER_BZ(TX_PACE),
/* TX_PACE_DROP_QID is RC */
REGISTER_BB(TX_VLAN),
REGISTER_BZ(TX_IPFIL_PORTEN),
REGISTER_AB(MD_TXD),
REGISTER_AB(MD_RXD),
REGISTER_AB(MD_CS),
REGISTER_AB(MD_PHY_ADR),
REGISTER_AB(MD_ID),
/* MD_STAT is RC */
REGISTER_AB(MAC_STAT_DMA),
REGISTER_AB(MAC_CTRL),
REGISTER_BB(GEN_MODE),
REGISTER_AB(MAC_MC_HASH_REG0),
REGISTER_AB(MAC_MC_HASH_REG1),
REGISTER_AB(GM_CFG1),
REGISTER_AB(GM_CFG2),
/* GM_IPG and GM_HD are not used */
REGISTER_AB(GM_MAX_FLEN),
/* GM_TEST is not used */
REGISTER_AB(GM_ADR1),
REGISTER_AB(GM_ADR2),
REGISTER_AB(GMF_CFG0),
REGISTER_AB(GMF_CFG1),
REGISTER_AB(GMF_CFG2),
REGISTER_AB(GMF_CFG3),
REGISTER_AB(GMF_CFG4),
REGISTER_AB(GMF_CFG5),
REGISTER_BB(TX_SRC_MAC_CTL),
REGISTER_AB(XM_ADR_LO),
REGISTER_AB(XM_ADR_HI),
REGISTER_AB(XM_GLB_CFG),
REGISTER_AB(XM_TX_CFG),
REGISTER_AB(XM_RX_CFG),
REGISTER_AB(XM_MGT_INT_MASK),
REGISTER_AB(XM_FC),
REGISTER_AB(XM_PAUSE_TIME),
REGISTER_AB(XM_TX_PARAM),
REGISTER_AB(XM_RX_PARAM),
/* XM_MGT_INT_MSK (note no 'A') is RC */
REGISTER_AB(XX_PWR_RST),
REGISTER_AB(XX_SD_CTL),
REGISTER_AB(XX_TXDRV_CTL),
/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
/* XX_CORE_STAT is partly RC */
};
struct efx_nic_reg_table {
u32 offset:24;
u32 min_revision:3, max_revision:3;
u32 step:6, rows:21;
};
#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
offset, \
REGISTER_REVISION_ ## arch ## min_rev, \
REGISTER_REVISION_ ## arch ## max_rev, \
step, rows \
}
#define REGISTER_TABLE(name, arch, min_rev, max_rev) \
REGISTER_TABLE_DIMENSIONS( \
name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
arch, min_rev, max_rev, \
arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
#define REGISTER_TABLE_BB_CZ(name) \
REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
FR_BZ_ ## name ## _STEP, \
FR_BB_ ## name ## _ROWS), \
REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
FR_BZ_ ## name ## _STEP, \
FR_CZ_ ## name ## _ROWS)
#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
/* DRIVER is not used */
/* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
REGISTER_TABLE_BB(TX_IPFIL_TBL),
REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
/* We can't reasonably read all of the buffer table (up to 8MB!).
* However this driver will only use a few entries. Reading
* 1K entries allows for some expansion of queue count and
* size before we need to change the version. */
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
F, A, A, 8, 1024),
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
F, B, Z, 8, 1024),
REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
REGISTER_TABLE_BB_CZ(TIMER_TBL),
REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
/* TX_FILTER_TBL0 is huge and not used by this driver */
REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
REGISTER_TABLE_CZ(MC_TREG_SMEM),
/* MSIX_PBA_TABLE is not mapped */
/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
REGISTER_TABLE_BZ(RX_FILTER_TBL0),
};
size_t efx_siena_get_regs_len(struct efx_nic *efx)
{
const struct efx_nic_reg *reg;
const struct efx_nic_reg_table *table;
size_t len = 0;
for (reg = efx_nic_regs;
reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
reg++)
if (efx->type->revision >= reg->min_revision &&
efx->type->revision <= reg->max_revision)
len += sizeof(efx_oword_t);
for (table = efx_nic_reg_tables;
table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
table++)
if (efx->type->revision >= table->min_revision &&
efx->type->revision <= table->max_revision)
len += table->rows * min_t(size_t, table->step, 16);
return len;
}
void efx_siena_get_regs(struct efx_nic *efx, void *buf)
{
const struct efx_nic_reg *reg;
const struct efx_nic_reg_table *table;
for (reg = efx_nic_regs;
reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
reg++) {
if (efx->type->revision >= reg->min_revision &&
efx->type->revision <= reg->max_revision) {
efx_reado(efx, (efx_oword_t *)buf, reg->offset);
buf += sizeof(efx_oword_t);
}
}
for (table = efx_nic_reg_tables;
table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
table++) {
size_t size, i;
if (!(efx->type->revision >= table->min_revision &&
efx->type->revision <= table->max_revision))
continue;
size = min_t(size_t, table->step, 16);
for (i = 0; i < table->rows; i++) {
switch (table->step) {
case 4: /* 32-bit SRAM */
efx_readd(efx, buf, table->offset + 4 * i);
break;
case 8: /* 64-bit SRAM */
efx_sram_readq(efx,
efx->membase + table->offset,
buf, i);
break;
case 16: /* 128-bit-readable register */
efx_reado_table(efx, buf, table->offset, i);
break;
case 32: /* 128-bit register, interleaved */
efx_reado_table(efx, buf, table->offset, 2 * i);
break;
default:
WARN_ON(1);
return;
}
buf += size;
}
}
}
/**
* efx_siena_describe_stats - Describe supported statistics for ethtool
* @desc: Array of &struct efx_hw_stat_desc describing the statistics
* @count: Length of the @desc array
* @mask: Bitmask of which elements of @desc are enabled
* @names: Buffer to copy names to, or %NULL. The names are copied
* starting at intervals of %ETH_GSTRING_LEN bytes.
*
* Returns the number of visible statistics, i.e. the number of set
* bits in the first @count bits of @mask for which a name is defined.
*/
size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u8 *names)
{
size_t visible = 0;
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
if (names) {
strlcpy(names, desc[index].name,
ETH_GSTRING_LEN);
names += ETH_GSTRING_LEN;
}
++visible;
}
}
return visible;
}
/**
* efx_siena_update_stats - Convert statistics DMA buffer to array of u64
* @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
* layout. DMA widths of 0, 16, 32 and 64 are supported; where
* the width is specified as 0 the corresponding element of
* @stats is not updated.
* @count: Length of the @desc array
* @mask: Bitmask of which elements of @desc are enabled
* @stats: Buffer to update with the converted statistics. The length
* of this array must be at least @count.
* @dma_buf: DMA buffer containing hardware statistics
* @accumulate: If set, the converted values will be added rather than
* directly stored to the corresponding elements of @stats
*/
void efx_siena_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask,
u64 *stats, const void *dma_buf, bool accumulate)
{
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].dma_width) {
const void *addr = dma_buf + desc[index].offset;
u64 val;
switch (desc[index].dma_width) {
case 16:
val = le16_to_cpup((__le16 *)addr);
break;
case 32:
val = le32_to_cpup((__le32 *)addr);
break;
case 64:
val = le64_to_cpup((__le64 *)addr);
break;
default:
WARN_ON(1);
val = 0;
break;
}
if (accumulate)
stats[index] += val;
else
stats[index] = val;
}
}
}
void efx_siena_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
{
/* if down, or this is the first update after coming up */
if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
efx->rx_nodesc_drops_while_down +=
*rx_nodesc_drops - efx->rx_nodesc_drops_total;
efx->rx_nodesc_drops_total = *rx_nodesc_drops;
efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
*rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
}

View File

@ -0,0 +1,206 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*/
#ifndef EFX_NIC_H
#define EFX_NIC_H
#include "nic_common.h"
#include "efx.h"
u32 efx_farch_fpga_ver(struct efx_nic *efx);
enum {
PHY_TYPE_NONE = 0,
PHY_TYPE_TXC43128 = 1,
PHY_TYPE_88E1111 = 2,
PHY_TYPE_SFX7101 = 3,
PHY_TYPE_QT2022C2 = 4,
PHY_TYPE_PM8358 = 6,
PHY_TYPE_SFT9001A = 8,
PHY_TYPE_QT2025C = 9,
PHY_TYPE_SFT9001B = 10,
};
enum {
SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT,
SIENA_STAT_tx_good_bytes,
SIENA_STAT_tx_bad_bytes,
SIENA_STAT_tx_packets,
SIENA_STAT_tx_bad,
SIENA_STAT_tx_pause,
SIENA_STAT_tx_control,
SIENA_STAT_tx_unicast,
SIENA_STAT_tx_multicast,
SIENA_STAT_tx_broadcast,
SIENA_STAT_tx_lt64,
SIENA_STAT_tx_64,
SIENA_STAT_tx_65_to_127,
SIENA_STAT_tx_128_to_255,
SIENA_STAT_tx_256_to_511,
SIENA_STAT_tx_512_to_1023,
SIENA_STAT_tx_1024_to_15xx,
SIENA_STAT_tx_15xx_to_jumbo,
SIENA_STAT_tx_gtjumbo,
SIENA_STAT_tx_collision,
SIENA_STAT_tx_single_collision,
SIENA_STAT_tx_multiple_collision,
SIENA_STAT_tx_excessive_collision,
SIENA_STAT_tx_deferred,
SIENA_STAT_tx_late_collision,
SIENA_STAT_tx_excessive_deferred,
SIENA_STAT_tx_non_tcpudp,
SIENA_STAT_tx_mac_src_error,
SIENA_STAT_tx_ip_src_error,
SIENA_STAT_rx_bytes,
SIENA_STAT_rx_good_bytes,
SIENA_STAT_rx_bad_bytes,
SIENA_STAT_rx_packets,
SIENA_STAT_rx_good,
SIENA_STAT_rx_bad,
SIENA_STAT_rx_pause,
SIENA_STAT_rx_control,
SIENA_STAT_rx_unicast,
SIENA_STAT_rx_multicast,
SIENA_STAT_rx_broadcast,
SIENA_STAT_rx_lt64,
SIENA_STAT_rx_64,
SIENA_STAT_rx_65_to_127,
SIENA_STAT_rx_128_to_255,
SIENA_STAT_rx_256_to_511,
SIENA_STAT_rx_512_to_1023,
SIENA_STAT_rx_1024_to_15xx,
SIENA_STAT_rx_15xx_to_jumbo,
SIENA_STAT_rx_gtjumbo,
SIENA_STAT_rx_bad_gtjumbo,
SIENA_STAT_rx_overflow,
SIENA_STAT_rx_false_carrier,
SIENA_STAT_rx_symbol_error,
SIENA_STAT_rx_align_error,
SIENA_STAT_rx_length_error,
SIENA_STAT_rx_internal_error,
SIENA_STAT_rx_nodesc_drop_cnt,
SIENA_STAT_COUNT
};
/**
* struct siena_nic_data - Siena NIC state
* @efx: Pointer back to main interface structure
* @wol_filter_id: Wake-on-LAN packet filter id
* @stats: Hardware statistics
* @vf: Array of &struct siena_vf objects
* @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
* @vfdi_status: Common VFDI status page to be dmad to VF address space.
* @local_addr_list: List of local addresses. Protected by %local_lock.
* @local_page_list: List of DMA addressable pages used to broadcast
* %local_addr_list. Protected by %local_lock.
* @local_lock: Mutex protecting %local_addr_list and %local_page_list.
* @peer_work: Work item to broadcast peer addresses to VMs.
*/
struct siena_nic_data {
struct efx_nic *efx;
int wol_filter_id;
u64 stats[SIENA_STAT_COUNT];
#ifdef CONFIG_SFC_SIENA_SRIOV
struct siena_vf *vf;
struct efx_channel *vfdi_channel;
unsigned vf_buftbl_base;
struct efx_buffer vfdi_status;
struct list_head local_addr_list;
struct list_head local_page_list;
struct mutex local_lock;
struct work_struct peer_work;
#endif
};
extern const struct efx_nic_type siena_a0_nic_type;
int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
/* Falcon/Siena queue operations */
int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
int efx_farch_ev_probe(struct efx_channel *channel);
int efx_farch_ev_init(struct efx_channel *channel);
void efx_farch_ev_fini(struct efx_channel *channel);
void efx_farch_ev_remove(struct efx_channel *channel);
int efx_farch_ev_process(struct efx_channel *channel, int quota);
void efx_farch_ev_read_ack(struct efx_channel *channel);
void efx_farch_ev_test_generate(struct efx_channel *channel);
/* Falcon/Siena filter operations */
int efx_farch_filter_table_probe(struct efx_nic *efx);
void efx_farch_filter_table_restore(struct efx_nic *efx);
void efx_farch_filter_table_remove(struct efx_nic *efx);
void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
bool replace);
int efx_farch_filter_remove_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id);
int efx_farch_filter_get_safe(struct efx_nic *efx,
enum efx_filter_priority priority, u32 filter_id,
struct efx_filter_spec *);
int efx_farch_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority);
u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority);
u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
enum efx_filter_priority priority, u32 *buf,
u32 size);
#ifdef CONFIG_RFS_ACCEL
bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
unsigned int index);
#endif
void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
/* Falcon/Siena interrupts */
void efx_farch_irq_enable_master(struct efx_nic *efx);
int efx_farch_irq_test_generate(struct efx_nic *efx);
void efx_farch_irq_disable_master(struct efx_nic *efx);
irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
/* Global Resources */
void efx_siena_prepare_flush(struct efx_nic *efx);
int efx_farch_fini_dmaq(struct efx_nic *efx);
void efx_farch_finish_flr(struct efx_nic *efx);
void siena_finish_flush(struct efx_nic *efx);
void falcon_start_nic_stats(struct efx_nic *efx);
void falcon_stop_nic_stats(struct efx_nic *efx);
int falcon_reset_xaui(struct efx_nic *efx);
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
void efx_farch_init_common(struct efx_nic *efx);
void efx_farch_rx_push_indir_table(struct efx_nic *efx);
void efx_farch_rx_pull_indir_table(struct efx_nic *efx);
/* Tests */
struct efx_farch_register_test {
unsigned address;
efx_oword_t mask;
};
int efx_farch_test_registers(struct efx_nic *efx,
const struct efx_farch_register_test *regs,
size_t n_regs);
void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
efx_qword_t *event);
#endif /* EFX_NIC_H */

View File

@ -0,0 +1,251 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
* Copyright 2019-2020 Xilinx Inc.
*/
#ifndef EFX_NIC_COMMON_H
#define EFX_NIC_COMMON_H
#include "net_driver.h"
#include "efx_common.h"
#include "mcdi.h"
#include "ptp.h"
enum {
/* Revisions 0-2 were Falcon A0, A1 and B0 respectively.
* They are not supported by this driver but these revision numbers
* form part of the ethtool API for register dumping.
*/
EFX_REV_SIENA_A0 = 3,
EFX_REV_HUNT_A0 = 4,
EFX_REV_EF100 = 5,
};
static inline int efx_nic_rev(struct efx_nic *efx)
{
return efx->type->revision;
}
/* Read the current event from the event queue */
static inline efx_qword_t *efx_event(struct efx_channel *channel,
unsigned int index)
{
return ((efx_qword_t *) (channel->eventq.buf.addr)) +
(index & channel->eventq_mask);
}
/* See if an event is present
*
* We check both the high and low dword of the event for all ones. We
* wrote all ones when we cleared the event, and no valid event can
* have all ones in either its high or low dwords. This approach is
* robust against reordering.
*
* Note that using a single 64-bit comparison is incorrect; even
* though the CPU read will be atomic, the DMA write may not be.
*/
static inline int efx_event_present(efx_qword_t *event)
{
return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
EFX_DWORD_IS_ALL_ONES(event->dword[1]));
}
/* Returns a pointer to the specified transmit descriptor in the TX
* descriptor queue belonging to the specified channel.
*/
static inline efx_qword_t *
efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
{
return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
}
/* Report whether this TX queue would be empty for the given write_count.
* May return false negative.
*/
static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count)
{
unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
}
/* Decide whether to push a TX descriptor to the NIC vs merely writing
* the doorbell. This can reduce latency when we are adding a single
* descriptor to an empty queue, but is otherwise pointless. Further,
* Falcon and Siena have hardware bugs (SF bug 33851) that may be
* triggered if we don't check this.
* We use the write_count used for the last doorbell push, to get the
* NIC's view of the tx queue.
*/
static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
unsigned int write_count)
{
bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count);
tx_queue->empty_read_count = 0;
return was_empty && tx_queue->write_count - write_count == 1;
}
/* Returns a pointer to the specified descriptor in the RX descriptor queue */
static inline efx_qword_t *
efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
{
return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
}
/* Alignment of PCIe DMA boundaries (4KB) */
#define EFX_PAGE_SIZE 4096
/* Size and alignment of buffer table entries (same) */
#define EFX_BUF_SIZE EFX_PAGE_SIZE
/* NIC-generic software stats */
enum {
GENERIC_STAT_rx_noskb_drops,
GENERIC_STAT_rx_nodesc_trunc,
GENERIC_STAT_COUNT
};
#define EFX_GENERIC_SW_STAT(ext_name) \
[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
/* TX data path */
static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
{
return tx_queue->efx->type->tx_probe(tx_queue);
}
static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
{
tx_queue->efx->type->tx_init(tx_queue);
}
static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
{
if (tx_queue->efx->type->tx_remove)
tx_queue->efx->type->tx_remove(tx_queue);
}
static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
{
tx_queue->efx->type->tx_write(tx_queue);
}
/* RX data path */
static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
{
return rx_queue->efx->type->rx_probe(rx_queue);
}
static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
{
rx_queue->efx->type->rx_init(rx_queue);
}
static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
{
rx_queue->efx->type->rx_remove(rx_queue);
}
static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
{
rx_queue->efx->type->rx_write(rx_queue);
}
static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
{
rx_queue->efx->type->rx_defer_refill(rx_queue);
}
/* Event data path */
static inline int efx_nic_probe_eventq(struct efx_channel *channel)
{
return channel->efx->type->ev_probe(channel);
}
static inline int efx_nic_init_eventq(struct efx_channel *channel)
{
return channel->efx->type->ev_init(channel);
}
static inline void efx_nic_fini_eventq(struct efx_channel *channel)
{
channel->efx->type->ev_fini(channel);
}
static inline void efx_nic_remove_eventq(struct efx_channel *channel)
{
channel->efx->type->ev_remove(channel);
}
static inline int
efx_nic_process_eventq(struct efx_channel *channel, int quota)
{
return channel->efx->type->ev_process(channel, quota);
}
static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
{
channel->efx->type->ev_read_ack(channel);
}
void efx_siena_event_test_start(struct efx_channel *channel);
bool efx_siena_event_present(struct efx_channel *channel);
static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
{
if (efx->type->sensor_event)
efx->type->sensor_event(efx, ev);
}
static inline unsigned int efx_rx_recycle_ring_size(const struct efx_nic *efx)
{
return efx->type->rx_recycle_ring_size(efx);
}
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
* asynchronously. If the counters contributing to B are always read
* after those contributing to A, the computed value may be lower than
* the true value by some variable amount, and may decrease between
* subsequent computations.
*
* We should never allow statistics to decrease or to exceed the true
* value. Since the computed value will never be greater than the
* true value, we can achieve this by only storing the computed value
* when it increases.
*/
static inline void efx_update_diff_stat(u64 *stat, u64 diff)
{
if ((s64)(diff - *stat) > 0)
*stat = diff;
}
/* Interrupts */
int efx_siena_init_interrupt(struct efx_nic *efx);
int efx_siena_irq_test_start(struct efx_nic *efx);
void efx_siena_fini_interrupt(struct efx_nic *efx);
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
{
return READ_ONCE(channel->event_test_cpu);
}
static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
{
return READ_ONCE(efx->last_irq_cpu);
}
/* Global Resources */
int efx_siena_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len, gfp_t gfp_flags);
void efx_siena_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
size_t efx_siena_get_regs_len(struct efx_nic *efx);
void efx_siena_get_regs(struct efx_nic *efx, void *buf);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u8 *names);
void efx_siena_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
void efx_siena_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
#define EFX_MAX_FLUSH_TIME 5000
#endif /* EFX_NIC_COMMON_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
* Copyright 2019-2020 Xilinx Inc.
*/
#ifndef EFX_PTP_H
#define EFX_PTP_H
#include <linux/net_tstamp.h>
#include "net_driver.h"
struct ethtool_ts_info;
void efx_siena_ptp_defer_probe_with_channel(struct efx_nic *efx);
struct efx_channel *efx_siena_ptp_channel(struct efx_nic *efx);
int efx_siena_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
int efx_siena_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
void efx_siena_ptp_get_ts_info(struct efx_nic *efx,
struct ethtool_ts_info *ts_info);
bool efx_siena_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
int efx_siena_ptp_get_mode(struct efx_nic *efx);
int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
unsigned int new_mode);
int efx_siena_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
void efx_siena_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
size_t efx_siena_ptp_update_stats(struct efx_nic *efx, u64 *stats);
void efx_siena_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
void __efx_siena_rx_skb_attach_timestamp(struct efx_channel *channel,
struct sk_buff *skb);
static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
struct sk_buff *skb)
{
if (channel->sync_events_state == SYNC_EVENTS_VALID)
__efx_siena_rx_skb_attach_timestamp(channel, skb);
}
void efx_siena_ptp_start_datapath(struct efx_nic *efx);
void efx_siena_ptp_stop_datapath(struct efx_nic *efx);
bool efx_siena_ptp_use_mac_tx_timestamps(struct efx_nic *efx);
ktime_t efx_siena_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue);
#endif /* EFX_PTP_H */

View File

@ -0,0 +1,400 @@
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2005-2013 Solarflare Communications Inc.
*/
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/prefetch.h>
#include <linux/moduleparam.h>
#include <linux/iommu.h>
#include <net/ip.h>
#include <net/checksum.h>
#include <net/xdp.h>
#include <linux/bpf_trace.h>
#include "net_driver.h"
#include "efx.h"
#include "rx_common.h"
#include "filter.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
/* Preferred number of descriptors to fill at once */
#define EFX_RX_PREFERRED_BATCH 8U
/* Maximum rx prefix used by any architecture. */
#define EFX_MAX_RX_PREFIX_SIZE 16
/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS 128u
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
EFX_RX_USR_BUF_SIZE)
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf,
int len)
{
struct efx_nic *efx = rx_queue->efx;
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
if (likely(len <= max_len))
return;
/* The packet must be discarded, but this is only a fatal error
* if the caller indicated it was
*/
rx_buf->flags |= EFX_RX_PKT_DISCARD;
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
"RX queue %d overlength RX event (%#x > %#x)\n",
efx_rx_queue_index(rx_queue), len, max_len);
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
/* Allocate and construct an SKB around page fragments */
static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags,
u8 *eh, int hdr_len)
{
struct efx_nic *efx = channel->efx;
struct sk_buff *skb;
/* Allocate an SKB to store the headers */
skb = netdev_alloc_skb(efx->net_dev,
efx->rx_ip_align + efx->rx_prefix_size +
hdr_len);
if (unlikely(skb == NULL)) {
atomic_inc(&efx->n_rx_noskb_drops);
return NULL;
}
EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len);
memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
efx->rx_prefix_size + hdr_len);
skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
__skb_put(skb, hdr_len);
/* Append the remaining page(s) onto the frag list */
if (rx_buf->len > hdr_len) {
rx_buf->page_offset += hdr_len;
rx_buf->len -= hdr_len;
for (;;) {
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rx_buf->page, rx_buf->page_offset,
rx_buf->len, efx->rx_buffer_truesize);
rx_buf->page = NULL;
if (skb_shinfo(skb)->nr_frags == n_frags)
break;
rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
}
} else {
__free_pages(rx_buf->page, efx->rx_buffer_order);
rx_buf->page = NULL;
n_frags = 0;
}
/* Move past the ethernet header */
skb->protocol = eth_type_trans(skb, efx->net_dev);
skb_mark_napi_id(skb, &channel->napi_str);
return skb;
}
void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_rx_buffer *rx_buf;
rx_queue->rx_packets++;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->flags |= flags;
/* Validate the number of fragments and completed length */
if (n_frags == 1) {
if (!(flags & EFX_RX_PKT_PREFIX_LEN))
efx_rx_packet__check_len(rx_queue, rx_buf, len);
} else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
unlikely(len > n_frags * efx->rx_dma_len) ||
unlikely(!efx->rx_scatter)) {
/* If this isn't an explicit discard request, either
* the hardware or the driver is broken.
*/
WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
rx_buf->flags |= EFX_RX_PKT_DISCARD;
}
netif_vdbg(efx, rx_status, efx->net_dev,
"RX queue %d received ids %x-%x len %d %s%s\n",
efx_rx_queue_index(rx_queue), index,
(index + n_frags - 1) & rx_queue->ptr_mask, len,
(rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
(rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
/* Discard packet, if instructed to do so. Process the
* previous receive first.
*/
if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
efx_rx_flush_packet(channel);
efx_siena_discard_rx_packet(channel, rx_buf, n_frags);
return;
}
if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
rx_buf->len = len;
/* Release and/or sync the DMA mapping - assumes all RX buffers
* consumed in-order per RX queue.
*/
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.
*/
prefetch(efx_rx_buf_va(rx_buf));
rx_buf->page_offset += efx->rx_prefix_size;
rx_buf->len -= efx->rx_prefix_size;
if (n_frags > 1) {
/* Release/sync DMA mapping for additional fragments.
* Fix length for last fragment.
*/
unsigned int tail_frags = n_frags - 1;
for (;;) {
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
if (--tail_frags == 0)
break;
efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
}
rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
}
/* All fragments have been DMA-synced, so recycle pages. */
rx_buf = efx_rx_buffer(rx_queue, index);
efx_siena_recycle_rx_pages(channel, rx_buf, n_frags);
/* Pipeline receives so that we give time for packet headers to be
* prefetched into cache.
*/
efx_rx_flush_packet(channel);
channel->rx_pkt_n_frags = n_frags;
channel->rx_pkt_index = index;
}
static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags)
{
struct sk_buff *skb;
u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
if (unlikely(skb == NULL)) {
struct efx_rx_queue *rx_queue;
rx_queue = efx_channel_get_rx_queue(channel);
efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
return;
}
skb_record_rx_queue(skb, channel->rx_queue.core_index);
/* Set the SKB flags */
skb_checksum_none_assert(skb);
if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
}
efx_rx_skb_attach_timestamp(channel, skb);
if (channel->type->receive_skb)
if (channel->type->receive_skb(channel, skb))
return;
/* Pass the packet up */
if (channel->rx_list != NULL)
/* Add to list, will pass up later */
list_add_tail(&skb->list, channel->rx_list);
else
/* No list, so pass it up now */
netif_receive_skb(skb);
}
/** efx_do_xdp: perform XDP processing on a received packet
*
* Returns true if packet should still be delivered.
*/
static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
struct efx_rx_buffer *rx_buf, u8 **ehp)
{
u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
struct efx_rx_queue *rx_queue;
struct bpf_prog *xdp_prog;
struct xdp_frame *xdpf;
struct xdp_buff xdp;
u32 xdp_act;
s16 offset;
int err;
xdp_prog = rcu_dereference_bh(efx->xdp_prog);
if (!xdp_prog)
return true;
rx_queue = efx_channel_get_rx_queue(channel);
if (unlikely(channel->rx_pkt_n_frags > 1)) {
/* We can't do XDP on fragmented packets - drop. */
efx_siena_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags);
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
"XDP is not possible with multiple receive fragments (%d)\n",
channel->rx_pkt_n_frags);
channel->n_rx_xdp_bad_drops++;
return false;
}
dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
rx_buf->len, DMA_FROM_DEVICE);
/* Save the rx prefix. */
EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE);
memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
efx->rx_prefix_size);
xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info);
/* No support yet for XDP metadata */
xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM,
rx_buf->len, false);
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
offset = (u8 *)xdp.data - *ehp;
switch (xdp_act) {
case XDP_PASS:
/* Fix up rx prefix. */
if (offset) {
*ehp += offset;
rx_buf->page_offset += offset;
rx_buf->len -= offset;
memcpy(*ehp - efx->rx_prefix_size, rx_prefix,
efx->rx_prefix_size);
}
break;
case XDP_TX:
/* Buffer ownership passes to tx on success. */
xdpf = xdp_convert_buff_to_frame(&xdp);
err = efx_siena_xdp_tx_buffers(efx, 1, &xdpf, true);
if (unlikely(err != 1)) {
efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
"XDP TX failed (%d)\n", err);
channel->n_rx_xdp_bad_drops++;
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
} else {
channel->n_rx_xdp_tx++;
}
break;
case XDP_REDIRECT:
err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
if (unlikely(err)) {
efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
"XDP redirect failed (%d)\n", err);
channel->n_rx_xdp_bad_drops++;
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
} else {
channel->n_rx_xdp_redirect++;
}
break;
default:
bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act);
efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
channel->n_rx_xdp_bad_drops++;
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
break;
case XDP_ABORTED:
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
fallthrough;
case XDP_DROP:
efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
channel->n_rx_xdp_drops++;
break;
}
return xdp_act == XDP_PASS;
}
/* Handle a received packet. Second half: Touches packet payload. */
void __efx_siena_rx_packet(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
struct efx_rx_buffer *rx_buf =
efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
u8 *eh = efx_rx_buf_va(rx_buf);
/* Read length from the prefix if necessary. This already
* excludes the length of the prefix itself.
*/
if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
rx_buf->len = le16_to_cpup((__le16 *)
(eh + efx->rx_packet_len_offset));
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
if (unlikely(efx->loopback_selftest)) {
struct efx_rx_queue *rx_queue;
efx_siena_loopback_rx_packet(efx, eh, rx_buf->len);
rx_queue = efx_channel_get_rx_queue(channel);
efx_siena_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags);
goto out;
}
if (!efx_do_xdp(efx, channel, rx_buf, &eh))
goto out;
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
efx_siena_rx_packet_gro(channel, rx_buf,
channel->rx_pkt_n_frags, eh, 0);
else
efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
out:
channel->rx_pkt_n_frags = 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,110 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2018 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_RX_COMMON_H
#define EFX_RX_COMMON_H
/* Preferred number of descriptors to fill at once */
#define EFX_RX_PREFERRED_BATCH 8U
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
EFX_RX_USR_BUF_SIZE)
/* Number of RX buffers to recycle pages for. When creating the RX page recycle
* ring, this number is divided by the number of buffers per page to calculate
* the number of pages to store in the RX page recycle ring.
*/
#define EFX_RECYCLE_RING_SIZE_10G 256
static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
{
return page_address(buf->page) + buf->page_offset;
}
static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
#else
const u8 *data = eh + efx->rx_packet_hash_offset;
return (u32)data[0] |
(u32)data[1] << 8 |
(u32)data[2] << 16 |
(u32)data[3] << 24;
#endif
}
void efx_siena_rx_slow_fill(struct timer_list *t);
void efx_siena_recycle_rx_pages(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags);
void efx_siena_discard_rx_packet(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags);
int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue);
void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue);
static inline void efx_sync_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf,
unsigned int len)
{
dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
DMA_FROM_DEVICE);
}
void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf,
unsigned int num_bufs);
void efx_siena_rx_config_page_split(struct efx_nic *efx);
void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
bool atomic);
void
efx_siena_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh, __wsum csum);
struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx);
struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
u32 id);
void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx);
void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
struct efx_rss_context *ctx);
bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec);
bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left,
const struct efx_filter_spec *right);
u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec);
#ifdef CONFIG_RFS_ACCEL
bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule,
unsigned int filter_idx, bool *force);
struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx,
const struct efx_filter_spec *spec);
void efx_siena_rps_hash_del(struct efx_nic *efx,
const struct efx_filter_spec *spec);
int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
bool __efx_siena_filter_rfs_expire(struct efx_channel *channel,
unsigned int quota);
#endif
int efx_siena_probe_filters(struct efx_nic *efx);
void efx_siena_remove_filters(struct efx_nic *efx);
#endif

View File

@ -0,0 +1,807 @@
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2012 Solarflare Communications Inc.
*/
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/kernel_stat.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include "net_driver.h"
#include "efx.h"
#include "efx_common.h"
#include "efx_channels.h"
#include "nic.h"
#include "mcdi_port_common.h"
#include "selftest.h"
#include "workarounds.h"
/* IRQ latency can be enormous because:
* - All IRQs may be disabled on a CPU for a *long* time by e.g. a
* slow serial console or an old IDE driver doing error recovery
* - The PREEMPT_RT patches mostly deal with this, but also allow a
* tasklet or normal task to be given higher priority than our IRQ
* threads
* Try to avoid blaming the hardware for this.
*/
#define IRQ_TIMEOUT HZ
/*
* Loopback test packet structure
*
* The self-test should stress every RSS vector, and unfortunately
* Falcon only performs RSS on TCP/UDP packets.
*/
struct efx_loopback_payload {
struct ethhdr header;
struct iphdr ip;
struct udphdr udp;
__be16 iteration;
char msg[64];
} __packed;
/* Loopback test source MAC address */
static const u8 payload_source[ETH_ALEN] __aligned(2) = {
0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
};
static const char payload_msg[] =
"Hello world! This is an Efx loopback test in progress!";
/* Interrupt mode names */
static const unsigned int efx_siena_interrupt_mode_max = EFX_INT_MODE_MAX;
static const char *const efx_siena_interrupt_mode_names[] = {
[EFX_INT_MODE_MSIX] = "MSI-X",
[EFX_INT_MODE_MSI] = "MSI",
[EFX_INT_MODE_LEGACY] = "legacy",
};
#define INT_MODE(efx) \
STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_siena_interrupt_mode)
/**
* struct efx_loopback_state - persistent state during a loopback selftest
* @flush: Drop all packets in efx_siena_loopback_rx_packet
* @packet_count: Number of packets being used in this test
* @skbs: An array of skbs transmitted
* @offload_csum: Checksums are being offloaded
* @rx_good: RX good packet count
* @rx_bad: RX bad packet count
* @payload: Payload used in tests
*/
struct efx_loopback_state {
bool flush;
int packet_count;
struct sk_buff **skbs;
bool offload_csum;
atomic_t rx_good;
atomic_t rx_bad;
struct efx_loopback_payload payload;
};
/* How long to wait for all the packets to arrive (in ms) */
#define LOOPBACK_TIMEOUT_MS 1000
/**************************************************************************
*
* MII, NVRAM and register tests
*
**************************************************************************/
static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
{
int rc = 0;
rc = efx_siena_mcdi_phy_test_alive(efx);
tests->phy_alive = rc ? -1 : 1;
return rc;
}
static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
{
int rc = 0;
if (efx->type->test_nvram) {
rc = efx->type->test_nvram(efx);
if (rc == -EPERM)
rc = 0;
else
tests->nvram = rc ? -1 : 1;
}
return rc;
}
/**************************************************************************
*
* Interrupt and event queue testing
*
**************************************************************************/
/* Test generation and receipt of interrupts */
static int efx_test_interrupts(struct efx_nic *efx,
struct efx_self_tests *tests)
{
unsigned long timeout, wait;
int cpu;
int rc;
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
rc = efx_siena_irq_test_start(efx);
if (rc == -ENOTSUPP) {
netif_dbg(efx, drv, efx->net_dev,
"direct interrupt testing not supported\n");
tests->interrupt = 0;
return 0;
}
timeout = jiffies + IRQ_TIMEOUT;
wait = 1;
/* Wait for arrival of test interrupt. */
netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
do {
schedule_timeout_uninterruptible(wait);
cpu = efx_nic_irq_test_irq_cpu(efx);
if (cpu >= 0)
goto success;
wait *= 2;
} while (time_before(jiffies, timeout));
netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
return -ETIMEDOUT;
success:
netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
INT_MODE(efx), cpu);
tests->interrupt = 1;
return 0;
}
/* Test generation and receipt of interrupting events */
static int efx_test_eventq_irq(struct efx_nic *efx,
struct efx_self_tests *tests)
{
struct efx_channel *channel;
unsigned int read_ptr[EFX_MAX_CHANNELS];
unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
unsigned long timeout, wait;
BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG);
efx_for_each_channel(channel, efx) {
read_ptr[channel->channel] = channel->eventq_read_ptr;
set_bit(channel->channel, &dma_pend);
set_bit(channel->channel, &int_pend);
efx_siena_event_test_start(channel);
}
timeout = jiffies + IRQ_TIMEOUT;
wait = 1;
/* Wait for arrival of interrupts. NAPI processing may or may
* not complete in time, but we can cope in any case.
*/
do {
schedule_timeout_uninterruptible(wait);
efx_for_each_channel(channel, efx) {
efx_siena_stop_eventq(channel);
if (channel->eventq_read_ptr !=
read_ptr[channel->channel]) {
set_bit(channel->channel, &napi_ran);
clear_bit(channel->channel, &dma_pend);
clear_bit(channel->channel, &int_pend);
} else {
if (efx_siena_event_present(channel))
clear_bit(channel->channel, &dma_pend);
if (efx_nic_event_test_irq_cpu(channel) >= 0)
clear_bit(channel->channel, &int_pend);
}
efx_siena_start_eventq(channel);
}
wait *= 2;
} while ((dma_pend || int_pend) && time_before(jiffies, timeout));
efx_for_each_channel(channel, efx) {
bool dma_seen = !test_bit(channel->channel, &dma_pend);
bool int_seen = !test_bit(channel->channel, &int_pend);
tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
if (dma_seen && int_seen) {
netif_dbg(efx, drv, efx->net_dev,
"channel %d event queue passed (with%s NAPI)\n",
channel->channel,
test_bit(channel->channel, &napi_ran) ?
"" : "out");
} else {
/* Report failure and whether either interrupt or DMA
* worked
*/
netif_err(efx, drv, efx->net_dev,
"channel %d timed out waiting for event queue\n",
channel->channel);
if (int_seen)
netif_err(efx, drv, efx->net_dev,
"channel %d saw interrupt "
"during event queue test\n",
channel->channel);
if (dma_seen)
netif_err(efx, drv, efx->net_dev,
"channel %d event was generated, but "
"failed to trigger an interrupt\n",
channel->channel);
}
}
return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
}
static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
unsigned flags)
{
int rc;
mutex_lock(&efx->mac_lock);
rc = efx_siena_mcdi_phy_run_tests(efx, tests->phy_ext, flags);
mutex_unlock(&efx->mac_lock);
if (rc == -EPERM)
rc = 0;
else
netif_info(efx, drv, efx->net_dev,
"%s phy selftest\n", rc ? "Failed" : "Passed");
return rc;
}
/**************************************************************************
*
* Loopback testing
* NB Only one loopback test can be executing concurrently.
*
**************************************************************************/
/* Loopback test RX callback
* This is called for each received packet during loopback testing.
*/
void efx_siena_loopback_rx_packet(struct efx_nic *efx,
const char *buf_ptr, int pkt_len)
{
struct efx_loopback_state *state = efx->loopback_selftest;
struct efx_loopback_payload *received;
struct efx_loopback_payload *payload;
BUG_ON(!buf_ptr);
/* If we are just flushing, then drop the packet */
if ((state == NULL) || state->flush)
return;
payload = &state->payload;
received = (struct efx_loopback_payload *) buf_ptr;
received->ip.saddr = payload->ip.saddr;
if (state->offload_csum)
received->ip.check = payload->ip.check;
/* Check that header exists */
if (pkt_len < sizeof(received->header)) {
netif_err(efx, drv, efx->net_dev,
"saw runt RX packet (length %d) in %s loopback "
"test\n", pkt_len, LOOPBACK_MODE(efx));
goto err;
}
/* Check that the ethernet header exists */
if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
netif_err(efx, drv, efx->net_dev,
"saw non-loopback RX packet in %s loopback test\n",
LOOPBACK_MODE(efx));
goto err;
}
/* Check packet length */
if (pkt_len != sizeof(*payload)) {
netif_err(efx, drv, efx->net_dev,
"saw incorrect RX packet length %d (wanted %d) in "
"%s loopback test\n", pkt_len, (int)sizeof(*payload),
LOOPBACK_MODE(efx));
goto err;
}
/* Check that IP header matches */
if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
netif_err(efx, drv, efx->net_dev,
"saw corrupted IP header in %s loopback test\n",
LOOPBACK_MODE(efx));
goto err;
}
/* Check that msg and padding matches */
if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
netif_err(efx, drv, efx->net_dev,
"saw corrupted RX packet in %s loopback test\n",
LOOPBACK_MODE(efx));
goto err;
}
/* Check that iteration matches */
if (received->iteration != payload->iteration) {
netif_err(efx, drv, efx->net_dev,
"saw RX packet from iteration %d (wanted %d) in "
"%s loopback test\n", ntohs(received->iteration),
ntohs(payload->iteration), LOOPBACK_MODE(efx));
goto err;
}
/* Increase correct RX count */
netif_vdbg(efx, drv, efx->net_dev,
"got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
atomic_inc(&state->rx_good);
return;
err:
#ifdef DEBUG
if (atomic_read(&state->rx_bad) == 0) {
netif_err(efx, drv, efx->net_dev, "received packet:\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
buf_ptr, pkt_len, 0);
netif_err(efx, drv, efx->net_dev, "expected packet:\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
&state->payload, sizeof(state->payload), 0);
}
#endif
atomic_inc(&state->rx_bad);
}
/* Initialise an efx_siena_selftest_state for a new iteration */
static void efx_iterate_state(struct efx_nic *efx)
{
struct efx_loopback_state *state = efx->loopback_selftest;
struct net_device *net_dev = efx->net_dev;
struct efx_loopback_payload *payload = &state->payload;
/* Initialise the layerII header */
ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
payload->header.h_proto = htons(ETH_P_IP);
/* saddr set later and used as incrementing count */
payload->ip.daddr = htonl(INADDR_LOOPBACK);
payload->ip.ihl = 5;
payload->ip.check = (__force __sum16) htons(0xdead);
payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
payload->ip.version = IPVERSION;
payload->ip.protocol = IPPROTO_UDP;
/* Initialise udp header */
payload->udp.source = 0;
payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
sizeof(struct iphdr));
payload->udp.check = 0; /* checksum ignored */
/* Fill out payload */
payload->iteration = htons(ntohs(payload->iteration) + 1);
memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
/* Fill out remaining state members */
atomic_set(&state->rx_good, 0);
atomic_set(&state->rx_bad, 0);
smp_wmb();
}
static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
struct efx_loopback_state *state = efx->loopback_selftest;
struct efx_loopback_payload *payload;
struct sk_buff *skb;
int i;
netdev_tx_t rc;
/* Transmit N copies of buffer */
for (i = 0; i < state->packet_count; i++) {
/* Allocate an skb, holding an extra reference for
* transmit completion counting */
skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
if (!skb)
return -ENOMEM;
state->skbs[i] = skb;
skb_get(skb);
/* Copy the payload in, incrementing the source address to
* exercise the rss vectors */
payload = skb_put(skb, sizeof(state->payload));
memcpy(payload, &state->payload, sizeof(state->payload));
payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
/* Ensure everything we've written is visible to the
* interrupt handler. */
smp_wmb();
netif_tx_lock_bh(efx->net_dev);
rc = efx_enqueue_skb(tx_queue, skb);
netif_tx_unlock_bh(efx->net_dev);
if (rc != NETDEV_TX_OK) {
netif_err(efx, drv, efx->net_dev,
"TX queue %d could not transmit packet %d of "
"%d in %s loopback test\n", tx_queue->label,
i + 1, state->packet_count,
LOOPBACK_MODE(efx));
/* Defer cleaning up the other skbs for the caller */
kfree_skb(skb);
return -EPIPE;
}
}
return 0;
}
static int efx_poll_loopback(struct efx_nic *efx)
{
struct efx_loopback_state *state = efx->loopback_selftest;
return atomic_read(&state->rx_good) == state->packet_count;
}
static int efx_end_loopback(struct efx_tx_queue *tx_queue,
struct efx_loopback_self_tests *lb_tests)
{
struct efx_nic *efx = tx_queue->efx;
struct efx_loopback_state *state = efx->loopback_selftest;
struct sk_buff *skb;
int tx_done = 0, rx_good, rx_bad;
int i, rc = 0;
netif_tx_lock_bh(efx->net_dev);
/* Count the number of tx completions, and decrement the refcnt. Any
* skbs not already completed will be free'd when the queue is flushed */
for (i = 0; i < state->packet_count; i++) {
skb = state->skbs[i];
if (skb && !skb_shared(skb))
++tx_done;
dev_kfree_skb(skb);
}
netif_tx_unlock_bh(efx->net_dev);
/* Check TX completion and received packet counts */
rx_good = atomic_read(&state->rx_good);
rx_bad = atomic_read(&state->rx_bad);
if (tx_done != state->packet_count) {
/* Don't free the skbs; they will be picked up on TX
* overflow or channel teardown.
*/
netif_err(efx, drv, efx->net_dev,
"TX queue %d saw only %d out of an expected %d "
"TX completion events in %s loopback test\n",
tx_queue->label, tx_done, state->packet_count,
LOOPBACK_MODE(efx));
rc = -ETIMEDOUT;
/* Allow to fall through so we see the RX errors as well */
}
/* We may always be up to a flush away from our desired packet total */
if (rx_good != state->packet_count) {
netif_dbg(efx, drv, efx->net_dev,
"TX queue %d saw only %d out of an expected %d "
"received packets in %s loopback test\n",
tx_queue->label, rx_good, state->packet_count,
LOOPBACK_MODE(efx));
rc = -ETIMEDOUT;
/* Fall through */
}
/* Update loopback test structure */
lb_tests->tx_sent[tx_queue->label] += state->packet_count;
lb_tests->tx_done[tx_queue->label] += tx_done;
lb_tests->rx_good += rx_good;
lb_tests->rx_bad += rx_bad;
return rc;
}
static int
efx_test_loopback(struct efx_tx_queue *tx_queue,
struct efx_loopback_self_tests *lb_tests)
{
struct efx_nic *efx = tx_queue->efx;
struct efx_loopback_state *state = efx->loopback_selftest;
int i, begin_rc, end_rc;
for (i = 0; i < 3; i++) {
/* Determine how many packets to send */
state->packet_count = efx->txq_entries / 3;
state->packet_count = min(1 << (i << 2), state->packet_count);
state->skbs = kcalloc(state->packet_count,
sizeof(state->skbs[0]), GFP_KERNEL);
if (!state->skbs)
return -ENOMEM;
state->flush = false;
netif_dbg(efx, drv, efx->net_dev,
"TX queue %d (hw %d) testing %s loopback with %d packets\n",
tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx),
state->packet_count);
efx_iterate_state(efx);
begin_rc = efx_begin_loopback(tx_queue);
/* This will normally complete very quickly, but be
* prepared to wait much longer. */
msleep(1);
if (!efx_poll_loopback(efx)) {
msleep(LOOPBACK_TIMEOUT_MS);
efx_poll_loopback(efx);
}
end_rc = efx_end_loopback(tx_queue, lb_tests);
kfree(state->skbs);
if (begin_rc || end_rc) {
/* Wait a while to ensure there are no packets
* floating around after a failure. */
schedule_timeout_uninterruptible(HZ / 10);
return begin_rc ? begin_rc : end_rc;
}
}
netif_dbg(efx, drv, efx->net_dev,
"TX queue %d passed %s loopback test with a burst length "
"of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx),
state->packet_count);
return 0;
}
/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
* any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
* to delay and retry. Therefore, it's safer to just poll directly. Wait
* for link up and any faults to dissipate. */
static int efx_wait_for_link(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
int count, link_up_count = 0;
bool link_up;
for (count = 0; count < 40; count++) {
schedule_timeout_uninterruptible(HZ / 10);
if (efx->type->monitor != NULL) {
mutex_lock(&efx->mac_lock);
efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock);
}
mutex_lock(&efx->mac_lock);
link_up = link_state->up;
if (link_up)
link_up = !efx->type->check_mac_fault(efx);
mutex_unlock(&efx->mac_lock);
if (link_up) {
if (++link_up_count == 2)
return 0;
} else {
link_up_count = 0;
}
}
return -ETIMEDOUT;
}
static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
unsigned int loopback_modes)
{
enum efx_loopback_mode mode;
struct efx_loopback_state *state;
struct efx_channel *channel =
efx_get_channel(efx, efx->tx_channel_offset);
struct efx_tx_queue *tx_queue;
int rc = 0;
/* Set the port loopback_selftest member. From this point on
* all received packets will be dropped. Mark the state as
* "flushing" so all inflight packets are dropped */
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
BUG_ON(efx->loopback_selftest);
state->flush = true;
efx->loopback_selftest = state;
/* Test all supported loopback modes */
for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
if (!(loopback_modes & (1 << mode)))
continue;
/* Move the port into the specified loopback mode. */
state->flush = true;
mutex_lock(&efx->mac_lock);
efx->loopback_mode = mode;
rc = __efx_siena_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"unable to move into %s loopback\n",
LOOPBACK_MODE(efx));
goto out;
}
rc = efx_wait_for_link(efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"loopback %s never came up\n",
LOOPBACK_MODE(efx));
goto out;
}
/* Test all enabled types of TX queue */
efx_for_each_channel_tx_queue(tx_queue, channel) {
state->offload_csum = (tx_queue->type &
EFX_TXQ_TYPE_OUTER_CSUM);
rc = efx_test_loopback(tx_queue,
&tests->loopback[mode]);
if (rc)
goto out;
}
}
out:
/* Remove the flush. The caller will remove the loopback setting */
state->flush = true;
efx->loopback_selftest = NULL;
wmb();
kfree(state);
if (rc == -EPERM)
rc = 0;
return rc;
}
/**************************************************************************
*
* Entry point
*
*************************************************************************/
int efx_siena_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
unsigned int flags)
{
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode;
int rc_test = 0, rc_reset, rc;
efx_siena_selftest_async_cancel(efx);
/* Online (i.e. non-disruptive) testing
* This checks interrupt generation, event delivery and PHY presence. */
rc = efx_test_phy_alive(efx, tests);
if (rc && !rc_test)
rc_test = rc;
rc = efx_test_nvram(efx, tests);
if (rc && !rc_test)
rc_test = rc;
rc = efx_test_interrupts(efx, tests);
if (rc && !rc_test)
rc_test = rc;
rc = efx_test_eventq_irq(efx, tests);
if (rc && !rc_test)
rc_test = rc;
if (rc_test)
return rc_test;
if (!(flags & ETH_TEST_FL_OFFLINE))
return efx_test_phy(efx, tests, flags);
/* Offline (i.e. disruptive) testing
* This checks MAC and PHY loopback on the specified port. */
/* Detach the device so the kernel doesn't transmit during the
* loopback test and the watchdog timeout doesn't fire.
*/
efx_device_detach_sync(efx);
if (efx->type->test_chip) {
rc_reset = efx->type->test_chip(efx, tests);
if (rc_reset) {
netif_err(efx, hw, efx->net_dev,
"Unable to recover from chip test\n");
efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
return rc_reset;
}
if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
rc_test = -EIO;
}
/* Ensure that the phy is powered and out of loopback
* for the bist and loopback tests */
mutex_lock(&efx->mac_lock);
efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE;
__efx_siena_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
rc = efx_test_phy(efx, tests, flags);
if (rc && !rc_test)
rc_test = rc;
rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
if (rc && !rc_test)
rc_test = rc;
/* restore the PHY to the previous state */
mutex_lock(&efx->mac_lock);
efx->phy_mode = phy_mode;
efx->loopback_mode = loopback_mode;
__efx_siena_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
efx_device_attach_if_not_resetting(efx);
return rc_test;
}
void efx_siena_selftest_async_start(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_for_each_channel(channel, efx)
efx_siena_event_test_start(channel);
schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
}
void efx_siena_selftest_async_cancel(struct efx_nic *efx)
{
cancel_delayed_work_sync(&efx->selftest_work);
}
static void efx_siena_selftest_async_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic,
selftest_work.work);
struct efx_channel *channel;
int cpu;
efx_for_each_channel(channel, efx) {
cpu = efx_nic_event_test_irq_cpu(channel);
if (cpu < 0)
netif_err(efx, ifup, efx->net_dev,
"channel %d failed to trigger an interrupt\n",
channel->channel);
else
netif_dbg(efx, ifup, efx->net_dev,
"channel %d triggered interrupt on CPU %d\n",
channel->channel, cpu);
}
}
void efx_siena_selftest_async_init(struct efx_nic *efx)
{
INIT_DELAYED_WORK(&efx->selftest_work, efx_siena_selftest_async_work);
}

View File

@ -0,0 +1,52 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2012 Solarflare Communications Inc.
*/
#ifndef EFX_SELFTEST_H
#define EFX_SELFTEST_H
#include "net_driver.h"
/*
* Self tests
*/
struct efx_loopback_self_tests {
int tx_sent[EFX_MAX_TXQ_PER_CHANNEL];
int tx_done[EFX_MAX_TXQ_PER_CHANNEL];
int rx_good;
int rx_bad;
};
#define EFX_MAX_PHY_TESTS 20
/* Efx self test results
* For fields which are not counters, 1 indicates success and -1
* indicates failure; 0 indicates test could not be run.
*/
struct efx_self_tests {
/* online tests */
int phy_alive;
int nvram;
int interrupt;
int eventq_dma[EFX_MAX_CHANNELS];
int eventq_int[EFX_MAX_CHANNELS];
/* offline tests */
int memory;
int registers;
int phy_ext[EFX_MAX_PHY_TESTS];
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
};
void efx_siena_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
int pkt_len);
int efx_siena_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
unsigned int flags);
void efx_siena_selftest_async_init(struct efx_nic *efx);
void efx_siena_selftest_async_start(struct efx_nic *efx);
void efx_siena_selftest_async_cancel(struct efx_nic *efx);
#endif /* EFX_SELFTEST_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,79 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2015 Solarflare Communications Inc.
*/
#ifndef SIENA_SRIOV_H
#define SIENA_SRIOV_H
#include "net_driver.h"
/* On the SFC9000 family each port is associated with 1 PCI physical
* function (PF) handled by sfc and a configurable number of virtual
* functions (VFs) that may be handled by some other driver, often in
* a VM guest. The queue pointer registers are mapped in both PF and
* VF BARs such that an 8K region provides access to a single RX, TX
* and event queue (collectively a Virtual Interface, VI or VNIC).
*
* The PF has access to all 1024 VIs while VFs are mapped to VIs
* according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
* in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
* The number of VIs and the VI_SCALE value are configurable but must
* be established at boot time by firmware.
*/
/* Maximum VI_SCALE parameter supported by Siena */
#define EFX_VI_SCALE_MAX 6
/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
* so this is the smallest allowed value.
*/
#define EFX_VI_BASE 128U
/* Maximum number of VFs allowed */
#define EFX_VF_COUNT_MAX 127
/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
#define EFX_MAX_VF_EVQ_SIZE 8192UL
/* The number of buffer table entries reserved for each VI on a VF */
#define EFX_VF_BUFTBL_PER_VI \
((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
sizeof(efx_qword_t) / EFX_BUF_SIZE)
int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs);
int efx_siena_sriov_init(struct efx_nic *efx);
void efx_siena_sriov_fini(struct efx_nic *efx);
int efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
bool efx_siena_sriov_wanted(struct efx_nic *efx);
void efx_siena_sriov_reset(struct efx_nic *efx);
void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac);
int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf,
u16 vlan, u8 qos);
int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
bool spoofchk);
int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf,
struct ifla_vf_info *ivf);
#ifdef CONFIG_SFC_SIENA_SRIOV
static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
{
return efx->vf_init_count != 0;
}
int efx_init_sriov(void);
void efx_fini_sriov(void);
#else /* !CONFIG_SFC_SIENA_SRIOV */
static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
{
return false;
}
#endif /* CONFIG_SFC_SIENA_SRIOV */
void efx_siena_sriov_probe(struct efx_nic *efx);
void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
#endif /* SIENA_SRIOV_H */

View File

@ -0,0 +1,83 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2014-2015 Solarflare Communications Inc.
*/
#ifndef EFX_SRIOV_H
#define EFX_SRIOV_H
#include "net_driver.h"
#ifdef CONFIG_SFC_SIENA_SRIOV
static inline
int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
{
struct efx_nic *efx = netdev_priv(net_dev);
if (efx->type->sriov_set_vf_mac)
return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
else
return -EOPNOTSUPP;
}
static inline
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
u8 qos, __be16 vlan_proto)
{
struct efx_nic *efx = netdev_priv(net_dev);
if (efx->type->sriov_set_vf_vlan) {
if ((vlan & ~VLAN_VID_MASK) ||
(qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
return -EINVAL;
if (vlan_proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
} else {
return -EOPNOTSUPP;
}
}
static inline
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
bool spoofchk)
{
struct efx_nic *efx = netdev_priv(net_dev);
if (efx->type->sriov_set_vf_spoofchk)
return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
else
return -EOPNOTSUPP;
}
static inline
int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
struct ifla_vf_info *ivi)
{
struct efx_nic *efx = netdev_priv(net_dev);
if (efx->type->sriov_get_vf_config)
return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
else
return -EOPNOTSUPP;
}
static inline
int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
int link_state)
{
struct efx_nic *efx = netdev_priv(net_dev);
if (efx->type->sriov_set_vf_link_state)
return efx->type->sriov_set_vf_link_state(efx, vf_i,
link_state);
else
return -EOPNOTSUPP;
}
#endif /* CONFIG_SFC_SIENA_SRIOV */
#endif /* EFX_SRIOV_H */

View File

@ -0,0 +1,392 @@
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2005-2013 Solarflare Communications Inc.
*/
#include <linux/pci.h>
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
#include <net/ipv6.h>
#include <linux/if_ether.h>
#include <linux/highmem.h>
#include <linux/cache.h>
#include "net_driver.h"
#include "efx.h"
#include "io.h"
#include "nic.h"
#include "tx.h"
#include "tx_common.h"
#include "workarounds.h"
static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer)
{
unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
struct efx_buffer *page_buf =
&tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
unsigned int offset =
((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
if (unlikely(!page_buf->addr) &&
efx_siena_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
GFP_ATOMIC))
return NULL;
buffer->dma_addr = page_buf->dma_addr + offset;
buffer->unmap_len = 0;
return (u8 *)page_buf->addr + offset;
}
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{
/* We need to consider all queues that the net core sees as one */
struct efx_nic *efx = txq1->efx;
struct efx_tx_queue *txq2;
unsigned int fill_level;
fill_level = efx_channel_tx_old_fill_level(txq1->channel);
if (likely(fill_level < efx->txq_stop_thresh))
return;
/* We used the stale old_read_count above, which gives us a
* pessimistic estimate of the fill level (which may even
* validly be >= efx->txq_entries). Now try again using
* read_count (more likely to be a cache miss).
*
* If we read read_count and then conditionally stop the
* queue, it is possible for the completion path to race with
* us and complete all outstanding descriptors in the middle,
* after which there will be no more completions to wake it.
* Therefore we stop the queue first, then read read_count
* (with a memory barrier to ensure the ordering), then
* restart the queue if the fill level turns out to be low
* enough.
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
efx_for_each_channel_tx_queue(txq2, txq1->channel)
txq2->old_read_count = READ_ONCE(txq2->read_count);
fill_level = efx_channel_tx_old_fill_level(txq1->channel);
EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
if (likely(fill_level < efx->txq_stop_thresh)) {
smp_mb();
if (likely(!efx->loopback_selftest))
netif_tx_start_queue(txq1->core_txq);
}
}
static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
struct sk_buff *skb)
{
unsigned int copy_len = skb->len;
struct efx_tx_buffer *buffer;
u8 *copy_buffer;
int rc;
EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
if (unlikely(!copy_buffer))
return -ENOMEM;
rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
EFX_WARN_ON_PARANOID(rc);
buffer->len = copy_len;
buffer->skb = skb;
buffer->flags = EFX_TX_BUF_SKB;
++tx_queue->insert_count;
return rc;
}
/* Send any pending traffic for a channel. xmit_more is shared across all
* queues for a channel, so we must check all of them.
*/
static void efx_tx_send_pending(struct efx_channel *channel)
{
struct efx_tx_queue *q;
efx_for_each_channel_tx_queue(q, channel) {
if (q->xmit_pending)
efx_nic_push_buffers(q);
}
}
/*
* Add a socket buffer to a TX queue
*
* This maps all fragments of a socket buffer for DMA and adds them to
* the TX queue. The queue's insert pointer will be incremented by
* the number of fragments in the socket buffer.
*
* If any DMA mapping fails, any mapped fragments will be unmapped,
* the queue's insert pointer will be restored to its original value.
*
* This function is split out from efx_siena_hard_start_xmit to allow the
* loopback test to direct packets via specific TX queues.
*
* Returns NETDEV_TX_OK.
* You must hold netif_tx_lock() to call this function.
*/
netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
struct sk_buff *skb)
{
unsigned int old_insert_count = tx_queue->insert_count;
bool xmit_more = netdev_xmit_more();
bool data_mapped = false;
unsigned int segments;
unsigned int skb_len;
int rc;
skb_len = skb->len;
segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
if (segments == 1)
segments = 0; /* Don't use TSO for a single segment. */
/* Handle TSO first - it's *possible* (although unlikely) that we might
* be passed a packet to segment that's smaller than the copybreak/PIO
* size limit.
*/
if (segments) {
rc = efx_siena_tx_tso_fallback(tx_queue, skb);
tx_queue->tso_fallbacks++;
if (rc == 0)
return 0;
goto err;
} else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
/* Pad short packets or coalesce short fragmented packets. */
if (efx_enqueue_skb_copy(tx_queue, skb))
goto err;
tx_queue->cb_packets++;
data_mapped = true;
}
/* Map for DMA and create descriptors if we haven't done so already. */
if (!data_mapped && (efx_siena_tx_map_data(tx_queue, skb, segments)))
goto err;
efx_tx_maybe_stop_queue(tx_queue);
tx_queue->xmit_pending = true;
/* Pass off to hardware */
if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more))
efx_tx_send_pending(tx_queue->channel);
tx_queue->tx_packets++;
return NETDEV_TX_OK;
err:
efx_siena_enqueue_unwind(tx_queue, old_insert_count);
dev_kfree_skb_any(skb);
/* If we're not expecting another transmit and we had something to push
* on this queue or a partner queue then we need to push here to get the
* previous packets out.
*/
if (!xmit_more)
efx_tx_send_pending(tx_queue->channel);
return NETDEV_TX_OK;
}
/* Transmit a packet from an XDP buffer
*
* Returns number of packets sent on success, error code otherwise.
* Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
* (for XDP redirect).
*/
int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
bool flush)
{
struct efx_tx_buffer *tx_buffer;
struct efx_tx_queue *tx_queue;
struct xdp_frame *xdpf;
dma_addr_t dma_addr;
unsigned int len;
int space;
int cpu;
int i = 0;
if (unlikely(n && !xdpfs))
return -EINVAL;
if (unlikely(!n))
return 0;
cpu = raw_smp_processor_id();
if (unlikely(cpu >= efx->xdp_tx_queue_count))
return -EINVAL;
tx_queue = efx->xdp_tx_queues[cpu];
if (unlikely(!tx_queue))
return -EINVAL;
if (!tx_queue->initialised)
return -EINVAL;
if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
/* If we're borrowing net stack queues we have to handle stop-restart
* or we might block the queue and it will be considered as frozen
*/
if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
if (netif_tx_queue_stopped(tx_queue->core_txq))
goto unlock;
efx_tx_maybe_stop_queue(tx_queue);
}
/* Check for available space. We should never need multiple
* descriptors per frame.
*/
space = efx->txq_entries +
tx_queue->read_count - tx_queue->insert_count;
for (i = 0; i < n; i++) {
xdpf = xdpfs[i];
if (i >= space)
break;
/* We'll want a descriptor for this tx. */
prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
len = xdpf->len;
/* Map for DMA. */
dma_addr = dma_map_single(&efx->pci_dev->dev,
xdpf->data, len,
DMA_TO_DEVICE);
if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
break;
/* Create descriptor and set up for unmapping DMA. */
tx_buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len);
tx_buffer->xdpf = xdpf;
tx_buffer->flags = EFX_TX_BUF_XDP |
EFX_TX_BUF_MAP_SINGLE;
tx_buffer->dma_offset = 0;
tx_buffer->unmap_len = len;
tx_queue->tx_packets++;
}
/* Pass mapped frames to hardware. */
if (flush && i > 0)
efx_nic_push_buffers(tx_queue);
unlock:
if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
return i == 0 ? -EIO : i;
}
/* Initiate a packet transmission. We use one channel per CPU
* (sharing when we have more CPUs than channels).
*
* Context: non-blocking.
* Should always return NETDEV_TX_OK and consume the skb.
*/
netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
unsigned index, type;
EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
index = skb_get_queue_mapping(skb);
type = efx_tx_csum_type_skb(skb);
if (index >= efx->n_tx_channels) {
index -= efx->n_tx_channels;
type |= EFX_TXQ_TYPE_HIGHPRI;
}
/* PTP "event" packet */
if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
((efx_siena_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) ||
unlikely(efx_siena_ptp_is_ptp_tx(efx, skb)))) {
/* There may be existing transmits on the channel that are
* waiting for this packet to trigger the doorbell write.
* We need to send the packets at this point.
*/
efx_tx_send_pending(efx_get_tx_channel(efx, index));
return efx_siena_ptp_tx(efx, skb);
}
tx_queue = efx_get_tx_queue(efx, index, type);
if (WARN_ON_ONCE(!tx_queue)) {
/* We don't have a TXQ of the right type.
* This should never happen, as we don't advertise offload
* features unless we can support them.
*/
dev_kfree_skb_any(skb);
/* If we're not expecting another transmit and we had something to push
* on this queue or a partner queue then we need to push here to get the
* previous packets out.
*/
if (!netdev_xmit_more())
efx_tx_send_pending(tx_queue->channel);
return NETDEV_TX_OK;
}
return __efx_siena_enqueue_skb(tx_queue, skb);
}
void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
/* Must be inverse of queue lookup in efx_siena_hard_start_xmit() */
tx_queue->core_txq =
netdev_get_tx_queue(efx->net_dev,
tx_queue->channel->channel +
((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
efx->n_tx_channels : 0));
}
int efx_siena_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct tc_mqprio_qopt *mqprio = type_data;
unsigned tc, num_tc;
if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP;
/* Only Siena supported highpri queues */
if (efx_nic_rev(efx) > EFX_REV_SIENA_A0)
return -EOPNOTSUPP;
num_tc = mqprio->num_tc;
if (num_tc > EFX_MAX_TX_TC)
return -EINVAL;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
if (num_tc == net_dev->num_tc)
return 0;
for (tc = 0; tc < num_tc; tc++) {
net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
}
net_dev->num_tc = num_tc;
return netif_set_real_num_tx_queues(net_dev,
max_t(int, num_tc, 1) *
efx->n_tx_channels);
}

View File

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2015 Solarflare Communications Inc.
*/
#ifndef EFX_TX_H
#define EFX_TX_H
#include <linux/types.h>
/* Driver internal tx-path related declarations. */
/* What TXQ type will satisfy the checksum offloads required for this skb? */
static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb)
{
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0; /* no checksum offload */
if (skb->encapsulation &&
skb_checksum_start_offset(skb) == skb_inner_transport_offset(skb)) {
/* we only advertise features for IPv4 and IPv6 checksums on
* encapsulated packets, so if the checksum is for the inner
* packet, it must be one of them; no further checking required.
*/
/* Do we also need to offload the outer header checksum? */
if (skb_shinfo(skb)->gso_segs > 1 &&
!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
return EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM;
return EFX_TXQ_TYPE_INNER_CSUM;
}
/* similarly, we only advertise features for IPv4 and IPv6 checksums,
* so it must be one of them. No need for further checks.
*/
return EFX_TXQ_TYPE_OUTER_CSUM;
}
#endif /* EFX_TX_H */

View File

@ -0,0 +1,448 @@
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2018 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include "net_driver.h"
#include "efx.h"
#include "nic_common.h"
#include "tx_common.h"
static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
{
return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
PAGE_SIZE >> EFX_TX_CB_ORDER);
}
int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int entries;
int rc;
/* Create the smallest power-of-two aligned ring */
entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
tx_queue->ptr_mask = entries - 1;
netif_dbg(efx, probe, efx->net_dev,
"creating TX queue %d size %#x mask %#x\n",
tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
/* Allocate software ring */
tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
GFP_KERNEL);
if (!tx_queue->buffer)
return -ENOMEM;
tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
if (!tx_queue->cb_page) {
rc = -ENOMEM;
goto fail1;
}
/* Allocate hardware ring, determine TXQ type */
rc = efx_nic_probe_tx(tx_queue);
if (rc)
goto fail2;
tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue;
return 0;
fail2:
kfree(tx_queue->cb_page);
tx_queue->cb_page = NULL;
fail1:
kfree(tx_queue->buffer);
tx_queue->buffer = NULL;
return rc;
}
void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
netif_dbg(efx, drv, efx->net_dev,
"initialising TX queue %d\n", tx_queue->queue);
tx_queue->insert_count = 0;
tx_queue->notify_count = 0;
tx_queue->write_count = 0;
tx_queue->packet_write_count = 0;
tx_queue->old_write_count = 0;
tx_queue->read_count = 0;
tx_queue->old_read_count = 0;
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
tx_queue->xmit_pending = false;
tx_queue->timestamping = (efx_siena_ptp_use_mac_tx_timestamps(efx) &&
tx_queue->channel == efx_siena_ptp_channel(efx));
tx_queue->completed_timestamp_major = 0;
tx_queue->completed_timestamp_minor = 0;
tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
tx_queue->tso_version = 0;
/* Set up TX descriptor ring */
efx_nic_init_tx(tx_queue);
tx_queue->initialised = true;
}
void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue)
{
int i;
if (!tx_queue->buffer)
return;
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"destroying TX queue %d\n", tx_queue->queue);
efx_nic_remove_tx(tx_queue);
if (tx_queue->cb_page) {
for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
efx_siena_free_buffer(tx_queue->efx,
&tx_queue->cb_page[i]);
kfree(tx_queue->cb_page);
tx_queue->cb_page = NULL;
}
kfree(tx_queue->buffer);
tx_queue->buffer = NULL;
tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL;
}
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
unsigned int *bytes_compl)
{
if (buffer->unmap_len) {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
DMA_TO_DEVICE);
else
dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
DMA_TO_DEVICE);
buffer->unmap_len = 0;
}
if (buffer->flags & EFX_TX_BUF_SKB) {
struct sk_buff *skb = (struct sk_buff *)buffer->skb;
EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
(*pkts_compl)++;
(*bytes_compl) += skb->len;
if (tx_queue->timestamping &&
(tx_queue->completed_timestamp_major ||
tx_queue->completed_timestamp_minor)) {
struct skb_shared_hwtstamps hwtstamp;
hwtstamp.hwtstamp =
efx_siena_ptp_nic_to_kernel_time(tx_queue);
skb_tstamp_tx(skb, &hwtstamp);
tx_queue->completed_timestamp_major = 0;
tx_queue->completed_timestamp_minor = 0;
}
dev_consume_skb_any((struct sk_buff *)buffer->skb);
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
"TX queue %d transmission id %x complete\n",
tx_queue->queue, tx_queue->read_count);
} else if (buffer->flags & EFX_TX_BUF_XDP) {
xdp_return_frame_rx_napi(buffer->xdpf);
}
buffer->len = 0;
buffer->flags = 0;
}
void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer;
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"shutting down TX queue %d\n", tx_queue->queue);
if (!tx_queue->buffer)
return;
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
unsigned int pkts_compl = 0, bytes_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
++tx_queue->read_count;
}
tx_queue->xmit_pending = false;
netdev_tx_reset_queue(tx_queue->core_txq);
}
/* Remove packets from the TX queue
*
* This removes packets from the TX queue, up to and including the
* specified index.
*/
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
unsigned int index,
unsigned int *pkts_compl,
unsigned int *bytes_compl)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
stop_index = (index + 1) & tx_queue->ptr_mask;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
while (read_ptr != stop_index) {
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
if (!efx_tx_buffer_in_use(buffer)) {
netif_err(efx, tx_err, efx->net_dev,
"TX queue %d spurious TX completion id %d\n",
tx_queue->queue, read_ptr);
efx_siena_schedule_reset(efx, RESET_TYPE_TX_SKIP);
return;
}
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
}
}
void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
{
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
if (tx_queue->read_count == tx_queue->old_write_count) {
/* Ensure that read_count is flushed. */
smp_mb();
tx_queue->empty_read_count =
tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
}
}
}
void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
struct efx_nic *efx = tx_queue->efx;
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
if (pkts_compl > 1)
++tx_queue->merge_events;
/* See if we need to restart the netif queue. This memory
* barrier ensures that we write read_count (inside
* efx_dequeue_buffers()) before reading the queue status.
*/
smp_mb();
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
likely(efx->port_enabled) &&
likely(netif_device_present(efx->net_dev))) {
fill_level = efx_channel_tx_fill_level(tx_queue->channel);
if (fill_level <= efx->txq_wake_thresh)
netif_tx_wake_queue(tx_queue->core_txq);
}
efx_siena_xmit_done_check_empty(tx_queue);
}
/* Remove buffers put into a tx_queue for the current packet.
* None of the buffers must have an skb attached.
*/
void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count)
{
struct efx_tx_buffer *buffer;
unsigned int bytes_compl = 0;
unsigned int pkts_compl = 0;
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != insert_count) {
--tx_queue->insert_count;
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
}
}
struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, size_t len)
{
const struct efx_nic_type *nic_type = tx_queue->efx->type;
struct efx_tx_buffer *buffer;
unsigned int dma_len;
/* Map the fragment taking account of NIC-dependent DMA limits. */
do {
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
if (nic_type->tx_limit_len)
dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
else
dma_len = len;
buffer->len = dma_len;
buffer->dma_addr = dma_addr;
buffer->flags = EFX_TX_BUF_CONT;
len -= dma_len;
dma_addr += dma_len;
++tx_queue->insert_count;
} while (len);
return buffer;
}
static int efx_tx_tso_header_length(struct sk_buff *skb)
{
size_t header_len;
if (skb->encapsulation)
header_len = skb_inner_transport_header(skb) -
skb->data +
(inner_tcp_hdr(skb)->doff << 2u);
else
header_len = skb_transport_header(skb) - skb->data +
(tcp_hdr(skb)->doff << 2u);
return header_len;
}
/* Map all data from an SKB for DMA and create descriptors on the queue. */
int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
unsigned int segment_count)
{
struct efx_nic *efx = tx_queue->efx;
struct device *dma_dev = &efx->pci_dev->dev;
unsigned int frag_index, nr_frags;
dma_addr_t dma_addr, unmap_addr;
unsigned short dma_flags;
size_t len, unmap_len;
nr_frags = skb_shinfo(skb)->nr_frags;
frag_index = 0;
/* Map header data. */
len = skb_headlen(skb);
dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
dma_flags = EFX_TX_BUF_MAP_SINGLE;
unmap_len = len;
unmap_addr = dma_addr;
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
return -EIO;
if (segment_count) {
/* For TSO we need to put the header in to a separate
* descriptor. Map this separately if necessary.
*/
size_t header_len = efx_tx_tso_header_length(skb);
if (header_len != len) {
tx_queue->tso_long_headers++;
efx_siena_tx_map_chunk(tx_queue, dma_addr, header_len);
len -= header_len;
dma_addr += header_len;
}
}
/* Add descriptors for each fragment. */
do {
struct efx_tx_buffer *buffer;
skb_frag_t *fragment;
buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len);
/* The final descriptor for a fragment is responsible for
* unmapping the whole fragment.
*/
buffer->flags = EFX_TX_BUF_CONT | dma_flags;
buffer->unmap_len = unmap_len;
buffer->dma_offset = buffer->dma_addr - unmap_addr;
if (frag_index >= nr_frags) {
/* Store SKB details with the final buffer for
* the completion.
*/
buffer->skb = skb;
buffer->flags = EFX_TX_BUF_SKB | dma_flags;
return 0;
}
/* Move on to the next fragment. */
fragment = &skb_shinfo(skb)->frags[frag_index++];
len = skb_frag_size(fragment);
dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
DMA_TO_DEVICE);
dma_flags = 0;
unmap_len = len;
unmap_addr = dma_addr;
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
return -EIO;
} while (1);
}
unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx)
{
/* Header and payload descriptor for each output segment, plus
* one for every input fragment boundary within a segment
*/
unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
/* Possibly one more per segment for option descriptors */
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
max_descs += EFX_TSO_MAX_SEGS;
/* Possibly more for PCIe page boundaries within input fragments */
if (PAGE_SIZE > EFX_PAGE_SIZE)
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
return max_descs;
}
/*
* Fallback to software TSO.
*
* This is used if we are unable to send a GSO packet through hardware TSO.
* This should only ever happen due to per-queue restrictions - unsupported
* packets should first be filtered by the feature flags.
*
* Returns 0 on success, error code otherwise.
*/
int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue,
struct sk_buff *skb)
{
struct sk_buff *segments, *next;
segments = skb_gso_segment(skb, 0);
if (IS_ERR(segments))
return PTR_ERR(segments);
dev_consume_skb_any(skb);
skb_list_walk_safe(segments, skb, next) {
skb_mark_not_on_list(skb);
efx_enqueue_skb(tx_queue, skb);
}
return 0;
}

View File

@ -0,0 +1,39 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2018 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_TX_COMMON_H
#define EFX_TX_COMMON_H
int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue);
void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue);
void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue);
void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue);
static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
{
return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION);
}
void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count);
struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, size_t len);
int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
unsigned int segment_count);
unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx);
int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern bool efx_siena_separate_tx_channels;
#endif

View File

@ -0,0 +1,252 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2010-2012 Solarflare Communications Inc.
*/
#ifndef _VFDI_H
#define _VFDI_H
/**
* DOC: Virtual Function Driver Interface
*
* This file contains software structures used to form a two way
* communication channel between the VF driver and the PF driver,
* named Virtual Function Driver Interface (VFDI).
*
* For the purposes of VFDI, a page is a memory region with size and
* alignment of 4K. All addresses are DMA addresses to be used within
* the domain of the relevant VF.
*
* The only hardware-defined channels for a VF driver to communicate
* with the PF driver are the event mailboxes (%FR_CZ_USR_EV
* registers). Writing to these registers generates an event with
* EV_CODE = EV_CODE_USR_EV, USER_QID set to the index of the mailbox
* and USER_EV_REG_VALUE set to the value written. The PF driver may
* direct or disable delivery of these events by setting
* %FR_CZ_USR_EV_CFG.
*
* The PF driver can send arbitrary events to arbitrary event queues.
* However, for consistency, VFDI events from the PF are defined to
* follow the same form and be sent to the first event queue assigned
* to the VF while that queue is enabled by the VF driver.
*
* The general form of the variable bits of VFDI events is:
*
* 0 16 24 31
* | DATA | TYPE | SEQ |
*
* SEQ is a sequence number which should be incremented by 1 (modulo
* 256) for each event. The sequence numbers used in each direction
* are independent.
*
* The VF submits requests of type &struct vfdi_req by sending the
* address of the request (ADDR) in a series of 4 events:
*
* 0 16 24 31
* | ADDR[0:15] | VFDI_EV_TYPE_REQ_WORD0 | SEQ |
* | ADDR[16:31] | VFDI_EV_TYPE_REQ_WORD1 | SEQ+1 |
* | ADDR[32:47] | VFDI_EV_TYPE_REQ_WORD2 | SEQ+2 |
* | ADDR[48:63] | VFDI_EV_TYPE_REQ_WORD3 | SEQ+3 |
*
* The address must be page-aligned. After receiving such a valid
* series of events, the PF driver will attempt to read the request
* and write a response to the same address. In case of an invalid
* sequence of events or a DMA error, there will be no response.
*
* The VF driver may request that the PF driver writes status
* information into its domain asynchronously. After writing the
* status, the PF driver will send an event of the form:
*
* 0 16 24 31
* | reserved | VFDI_EV_TYPE_STATUS | SEQ |
*
* In case the VF must be reset for any reason, the PF driver will
* send an event of the form:
*
* 0 16 24 31
* | reserved | VFDI_EV_TYPE_RESET | SEQ |
*
* It is then the responsibility of the VF driver to request
* reinitialisation of its queues.
*/
#define VFDI_EV_SEQ_LBN 24
#define VFDI_EV_SEQ_WIDTH 8
#define VFDI_EV_TYPE_LBN 16
#define VFDI_EV_TYPE_WIDTH 8
#define VFDI_EV_TYPE_REQ_WORD0 0
#define VFDI_EV_TYPE_REQ_WORD1 1
#define VFDI_EV_TYPE_REQ_WORD2 2
#define VFDI_EV_TYPE_REQ_WORD3 3
#define VFDI_EV_TYPE_STATUS 4
#define VFDI_EV_TYPE_RESET 5
#define VFDI_EV_DATA_LBN 0
#define VFDI_EV_DATA_WIDTH 16
struct vfdi_endpoint {
u8 mac_addr[ETH_ALEN];
__be16 tci;
};
/**
* enum vfdi_op - VFDI operation enumeration
* @VFDI_OP_RESPONSE: Indicates a response to the request.
* @VFDI_OP_INIT_EVQ: Initialize SRAM entries and initialize an EVQ.
* @VFDI_OP_INIT_RXQ: Initialize SRAM entries and initialize an RXQ.
* @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ.
* @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then
* finalize the SRAM entries.
* @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ.
* @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters.
* @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates
* from PF and write the initial status.
* @VFDI_OP_CLEAR_STATUS_PAGE: Clear the DMA page(s) used for status
* updates from PF.
*/
enum vfdi_op {
VFDI_OP_RESPONSE = 0,
VFDI_OP_INIT_EVQ = 1,
VFDI_OP_INIT_RXQ = 2,
VFDI_OP_INIT_TXQ = 3,
VFDI_OP_FINI_ALL_QUEUES = 4,
VFDI_OP_INSERT_FILTER = 5,
VFDI_OP_REMOVE_ALL_FILTERS = 6,
VFDI_OP_SET_STATUS_PAGE = 7,
VFDI_OP_CLEAR_STATUS_PAGE = 8,
VFDI_OP_LIMIT,
};
/* Response codes for VFDI operations. Other values may be used in future. */
#define VFDI_RC_SUCCESS 0
#define VFDI_RC_ENOMEM (-12)
#define VFDI_RC_EINVAL (-22)
#define VFDI_RC_EOPNOTSUPP (-95)
#define VFDI_RC_ETIMEDOUT (-110)
/**
* struct vfdi_req - Request from VF driver to PF driver
* @op: Operation code or response indicator, taken from &enum vfdi_op.
* @rc: Response code. Set to 0 on success or a negative error code on failure.
* @u.init_evq.index: Index of event queue to create.
* @u.init_evq.buf_count: Number of 4k buffers backing event queue.
* @u.init_evq.addr: Array of length %u.init_evq.buf_count containing DMA
* address of each page backing the event queue.
* @u.init_rxq.index: Index of receive queue to create.
* @u.init_rxq.buf_count: Number of 4k buffers backing receive queue.
* @u.init_rxq.evq: Instance of event queue to target receive events at.
* @u.init_rxq.label: Label used in receive events.
* @u.init_rxq.flags: Unused.
* @u.init_rxq.addr: Array of length %u.init_rxq.buf_count containing DMA
* address of each page backing the receive queue.
* @u.init_txq.index: Index of transmit queue to create.
* @u.init_txq.buf_count: Number of 4k buffers backing transmit queue.
* @u.init_txq.evq: Instance of event queue to target transmit completion
* events at.
* @u.init_txq.label: Label used in transmit completion events.
* @u.init_txq.flags: Checksum offload flags.
* @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA
* address of each page backing the transmit queue.
* @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting
* all traffic at this receive queue.
* @u.mac_filter.flags: MAC filter flags.
* @u.set_status_page.dma_addr: Base address for the &struct vfdi_status.
* This address must be page-aligned and the PF may write up to a
* whole page (allowing for extension of the structure).
* @u.set_status_page.peer_page_count: Number of additional pages the VF
* has provided into which peer addresses may be DMAd.
* @u.set_status_page.peer_page_addr: Array of DMA addresses of pages.
* If the number of peers exceeds 256, then the VF must provide
* additional pages in this array. The PF will then DMA up to
* 512 vfdi_endpoint structures into each page. These addresses
* must be page-aligned.
*/
struct vfdi_req {
u32 op;
u32 reserved1;
s32 rc;
u32 reserved2;
union {
struct {
u32 index;
u32 buf_count;
u64 addr[];
} init_evq;
struct {
u32 index;
u32 buf_count;
u32 evq;
u32 label;
u32 flags;
#define VFDI_RXQ_FLAG_SCATTER_EN 1
u32 reserved;
u64 addr[];
} init_rxq;
struct {
u32 index;
u32 buf_count;
u32 evq;
u32 label;
u32 flags;
#define VFDI_TXQ_FLAG_IP_CSUM_DIS 1
#define VFDI_TXQ_FLAG_TCPUDP_CSUM_DIS 2
u32 reserved;
u64 addr[];
} init_txq;
struct {
u32 rxq;
u32 flags;
#define VFDI_MAC_FILTER_FLAG_RSS 1
#define VFDI_MAC_FILTER_FLAG_SCATTER 2
} mac_filter;
struct {
u64 dma_addr;
u64 peer_page_count;
u64 peer_page_addr[];
} set_status_page;
} u;
};
/**
* struct vfdi_status - Status provided by PF driver to VF driver
* @generation_start: A generation count DMA'd to VF *before* the
* rest of the structure.
* @generation_end: A generation count DMA'd to VF *after* the
* rest of the structure.
* @version: Version of this structure; currently set to 1. Later
* versions must either be layout-compatible or only be sent to VFs
* that specifically request them.
* @length: Total length of this structure including embedded tables
* @vi_scale: log2 the number of VIs available on this VF. This quantity
* is used by the hardware for register decoding.
* @max_tx_channels: The maximum number of transmit queues the VF can use.
* @rss_rxq_count: The number of receive queues present in the shared RSS
* indirection table.
* @peer_count: Total number of peers in the complete peer list. If larger
* than ARRAY_SIZE(%peers), then the VF must provide sufficient
* additional pages each of which is filled with vfdi_endpoint structures.
* @local: The MAC address and outer VLAN tag of *this* VF
* @peers: Table of peer addresses. The @tci fields in these structures
* are currently unused and must be ignored. Additional peers are
* written into any additional pages provided by the VF.
* @timer_quantum_ns: Timer quantum (nominal period between timer ticks)
* for interrupt moderation timers, in nanoseconds. This member is only
* present if @length is sufficiently large.
*/
struct vfdi_status {
u32 generation_start;
u32 generation_end;
u32 version;
u32 length;
u8 vi_scale;
u8 max_tx_channels;
u8 rss_rxq_count;
u8 reserved1;
u16 peer_count;
u16 reserved2;
struct vfdi_endpoint local;
struct vfdi_endpoint peers[256];
/* Members below here extend version 1 of this structure */
u32 timer_quantum_ns;
};
#endif

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2006-2013 Solarflare Communications Inc.
*/
#ifndef EFX_WORKAROUNDS_H
#define EFX_WORKAROUNDS_H
/*
* Hardware workarounds.
* Bug numbers are from Solarflare's Bugzilla.
*/
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
#define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
#define EFX_WORKAROUND_10G(efx) 1
/* Bit-bashed I2C reads cause performance drop */
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
/* Moderation timer access must go through MCDI */
#define EFX_EF10_WORKAROUND_61265(efx) \
(((struct efx_ef10_nic_data *)efx->nic_data)->workaround_61265)
#endif /* EFX_WORKAROUNDS_H */

View File

@ -0,0 +1,252 @@
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2020-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include "tc.h"
#include "mae.h"
#include "ef100_rep.h"
#include "efx.h"
static void efx_tc_free_action_set(struct efx_nic *efx,
struct efx_tc_action_set *act, bool in_hw)
{
/* Failure paths calling this on the 'running action' set in_hw=false,
* because if the alloc had succeeded we'd've put it in acts.list and
* not still have it in act.
*/
if (in_hw) {
efx_mae_free_action_set(efx, act->fw_id);
/* in_hw is true iff we are on an acts.list; make sure to
* remove ourselves from that list before we are freed.
*/
list_del(&act->list);
}
kfree(act);
}
static void efx_tc_free_action_set_list(struct efx_nic *efx,
struct efx_tc_action_set_list *acts,
bool in_hw)
{
struct efx_tc_action_set *act, *next;
/* Failure paths set in_hw=false, because usually the acts didn't get
* to efx_mae_alloc_action_set_list(); if they did, the failure tree
* has a separate efx_mae_free_action_set_list() before calling us.
*/
if (in_hw)
efx_mae_free_action_set_list(efx, acts);
/* Any act that's on the list will be in_hw even if the list isn't */
list_for_each_entry_safe(act, next, &acts->list, list)
efx_tc_free_action_set(efx, act, true);
/* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */
}
static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule)
{
efx_mae_delete_rule(efx, rule->fw_id);
/* Release entries in subsidiary tables */
efx_tc_free_action_set_list(efx, &rule->acts, true);
rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
}
static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port,
u32 eg_port, struct efx_tc_flow_rule *rule)
{
struct efx_tc_action_set_list *acts = &rule->acts;
struct efx_tc_match *match = &rule->match;
struct efx_tc_action_set *act;
int rc;
match->value.ingress_port = ing_port;
match->mask.ingress_port = ~0;
act = kzalloc(sizeof(*act), GFP_KERNEL);
if (!act)
return -ENOMEM;
act->deliver = 1;
act->dest_mport = eg_port;
rc = efx_mae_alloc_action_set(efx, act);
if (rc)
goto fail1;
EFX_WARN_ON_PARANOID(!list_empty(&acts->list));
list_add_tail(&act->list, &acts->list);
rc = efx_mae_alloc_action_set_list(efx, acts);
if (rc)
goto fail2;
rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT,
acts->fw_id, &rule->fw_id);
if (rc)
goto fail3;
return 0;
fail3:
efx_mae_free_action_set_list(efx, acts);
fail2:
list_del(&act->list);
efx_mae_free_action_set(efx, act->fw_id);
fail1:
kfree(act);
return rc;
}
static int efx_tc_configure_default_rule_pf(struct efx_nic *efx)
{
struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf;
u32 ing_port, eg_port;
efx_mae_mport_uplink(efx, &ing_port);
efx_mae_mport_wire(efx, &eg_port);
return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
}
static int efx_tc_configure_default_rule_wire(struct efx_nic *efx)
{
struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire;
u32 ing_port, eg_port;
efx_mae_mport_wire(efx, &ing_port);
efx_mae_mport_uplink(efx, &eg_port);
return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
}
int efx_tc_configure_default_rule_rep(struct efx_rep *efv)
{
struct efx_tc_flow_rule *rule = &efv->dflt;
struct efx_nic *efx = efv->parent;
u32 ing_port, eg_port;
efx_mae_mport_mport(efx, efv->mport, &ing_port);
efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port);
return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
}
void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
struct efx_tc_flow_rule *rule)
{
if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL)
efx_tc_delete_rule(efx, rule);
rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
}
static int efx_tc_configure_rep_mport(struct efx_nic *efx)
{
u32 rep_mport_label;
int rc;
rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label);
if (rc)
return rc;
pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n",
efx->tc->reps_mport_id, rep_mport_label);
/* Use mport *selector* as vport ID */
efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
&efx->tc->reps_mport_vport_id);
return 0;
}
static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx)
{
efx_mae_free_mport(efx, efx->tc->reps_mport_id);
efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL;
}
int efx_tc_insert_rep_filters(struct efx_nic *efx)
{
struct efx_filter_spec promisc, allmulti;
int rc;
if (efx->type->is_vf)
return 0;
if (!efx->tc)
return 0;
efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0);
efx_filter_set_uc_def(&promisc);
efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id);
rc = efx_filter_insert_filter(efx, &promisc, false);
if (rc < 0)
return rc;
efx->tc->reps_filter_uc = rc;
efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0);
efx_filter_set_mc_def(&allmulti);
efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id);
rc = efx_filter_insert_filter(efx, &allmulti, false);
if (rc < 0)
return rc;
efx->tc->reps_filter_mc = rc;
return 0;
}
void efx_tc_remove_rep_filters(struct efx_nic *efx)
{
if (efx->type->is_vf)
return;
if (!efx->tc)
return;
if (efx->tc->reps_filter_mc >= 0)
efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc);
efx->tc->reps_filter_mc = -1;
if (efx->tc->reps_filter_uc >= 0)
efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc);
efx->tc->reps_filter_uc = -1;
}
int efx_init_tc(struct efx_nic *efx)
{
int rc;
rc = efx_tc_configure_default_rule_pf(efx);
if (rc)
return rc;
rc = efx_tc_configure_default_rule_wire(efx);
if (rc)
return rc;
return efx_tc_configure_rep_mport(efx);
}
void efx_fini_tc(struct efx_nic *efx)
{
/* We can get called even if efx_init_struct_tc() failed */
if (!efx->tc)
return;
efx_tc_deconfigure_rep_mport(efx);
efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
}
int efx_init_struct_tc(struct efx_nic *efx)
{
if (efx->type->is_vf)
return 0;
efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL);
if (!efx->tc)
return -ENOMEM;
efx->tc->reps_filter_uc = -1;
efx->tc->reps_filter_mc = -1;
INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
return 0;
}
void efx_fini_struct_tc(struct efx_nic *efx)
{
if (!efx->tc)
return;
EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id !=
MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
kfree(efx->tc);
efx->tc = NULL;
}

View File

@ -0,0 +1,85 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2020-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_TC_H
#define EFX_TC_H
#include "net_driver.h"
struct efx_tc_action_set {
u16 deliver:1;
u32 dest_mport;
u32 fw_id; /* index of this entry in firmware actions table */
struct list_head list;
};
struct efx_tc_match_fields {
/* L1 */
u32 ingress_port;
};
struct efx_tc_match {
struct efx_tc_match_fields value;
struct efx_tc_match_fields mask;
};
struct efx_tc_action_set_list {
struct list_head list;
u32 fw_id;
};
struct efx_tc_flow_rule {
struct efx_tc_match match;
struct efx_tc_action_set_list acts;
u32 fw_id;
};
enum efx_tc_rule_prios {
EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */
EFX_TC_PRIO__NUM
};
/**
* struct efx_tc_state - control plane data for TC offload
*
* @reps_mport_id: MAE port allocated for representor RX
* @reps_filter_uc: VNIC filter for representor unicast RX (promisc)
* @reps_filter_mc: VNIC filter for representor multicast RX (allmulti)
* @reps_mport_vport_id: vport_id for representor RX filters
* @dflt: Match-action rules for default switching; at priority
* %EFX_TC_PRIO_DFLT. Named by *ingress* port
* @dflt.pf: rule for traffic ingressing from PF (egresses to wire)
* @dflt.wire: rule for traffic ingressing from wire (egresses to PF)
*/
struct efx_tc_state {
u32 reps_mport_id, reps_mport_vport_id;
s32 reps_filter_uc, reps_filter_mc;
struct {
struct efx_tc_flow_rule pf;
struct efx_tc_flow_rule wire;
} dflt;
};
struct efx_rep;
int efx_tc_configure_default_rule_rep(struct efx_rep *efv);
void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
struct efx_tc_flow_rule *rule);
int efx_tc_insert_rep_filters(struct efx_nic *efx);
void efx_tc_remove_rep_filters(struct efx_nic *efx);
int efx_init_tc(struct efx_nic *efx);
void efx_fini_tc(struct efx_nic *efx);
int efx_init_struct_tc(struct efx_nic *efx);
void efx_fini_struct_tc(struct efx_nic *efx);
#endif /* EFX_TC_H */

View File

@ -0,0 +1,32 @@
# SPDX-License-Identifier: GPL-2.0
#
# Sunplus network device configuration
#
config NET_VENDOR_SUNPLUS
bool "Sunplus devices"
default y
depends on ARCH_SUNPLUS || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this
class, say Y here.
Note that the answer to this question doesn't directly
affect the kernel: saying N will just cause the configurator
to skip all the questions about Sunplus cards. If you say Y,
you will be asked for your specific card in the following
questions.
if NET_VENDOR_SUNPLUS
config SP7021_EMAC
tristate "Sunplus Dual 10M/100M Ethernet devices"
depends on SOC_SP7021 || COMPILE_TEST
select PHYLIB
help
If you have Sunplus dual 10M/100M Ethernet devices, say Y.
The network device creates two net-device interfaces.
To compile this driver as a module, choose M here. The
module will be called sp7021_emac.
endif # NET_VENDOR_SUNPLUS

View File

@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Sunplus network device drivers.
#
obj-$(CONFIG_SP7021_EMAC) += sp7021_emac.o
sp7021_emac-objs := spl2sw_driver.o spl2sw_int.o spl2sw_desc.o spl2sw_mac.o spl2sw_mdio.o spl2sw_phy.o

View File

@ -0,0 +1,270 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#ifndef __SPL2SW_DEFINE_H__
#define __SPL2SW_DEFINE_H__
#define MAX_NETDEV_NUM 2 /* Maximum # of net-device */
/* Interrupt status */
#define MAC_INT_DAISY_MODE_CHG BIT(31) /* Daisy Mode Change */
#define MAC_INT_IP_CHKSUM_ERR BIT(23) /* IP Checksum Append Error */
#define MAC_INT_WDOG_TIMER1_EXP BIT(22) /* Watchdog Timer1 Expired */
#define MAC_INT_WDOG_TIMER0_EXP BIT(21) /* Watchdog Timer0 Expired */
#define MAC_INT_INTRUDER_ALERT BIT(20) /* Atruder Alert */
#define MAC_INT_PORT_ST_CHG BIT(19) /* Port Status Change */
#define MAC_INT_BC_STORM BIT(18) /* Broad Cast Storm */
#define MAC_INT_MUST_DROP_LAN BIT(17) /* Global Queue Exhausted */
#define MAC_INT_GLOBAL_QUE_FULL BIT(16) /* Global Queue Full */
#define MAC_INT_TX_SOC_PAUSE_ON BIT(15) /* Soc Port TX Pause On */
#define MAC_INT_RX_SOC_QUE_FULL BIT(14) /* Soc Port Out Queue Full */
#define MAC_INT_TX_LAN1_QUE_FULL BIT(9) /* Port 1 Out Queue Full */
#define MAC_INT_TX_LAN0_QUE_FULL BIT(8) /* Port 0 Out Queue Full */
#define MAC_INT_RX_L_DESCF BIT(7) /* Low Priority Descriptor Full */
#define MAC_INT_RX_H_DESCF BIT(6) /* High Priority Descriptor Full */
#define MAC_INT_RX_DONE_L BIT(5) /* RX Low Priority Done */
#define MAC_INT_RX_DONE_H BIT(4) /* RX High Priority Done */
#define MAC_INT_TX_DONE_L BIT(3) /* TX Low Priority Done */
#define MAC_INT_TX_DONE_H BIT(2) /* TX High Priority Done */
#define MAC_INT_TX_DES_ERR BIT(1) /* TX Descriptor Error */
#define MAC_INT_RX_DES_ERR BIT(0) /* Rx Descriptor Error */
#define MAC_INT_RX (MAC_INT_RX_DONE_H | MAC_INT_RX_DONE_L | \
MAC_INT_RX_DES_ERR)
#define MAC_INT_TX (MAC_INT_TX_DONE_L | MAC_INT_TX_DONE_H | \
MAC_INT_TX_DES_ERR)
#define MAC_INT_MASK_DEF (MAC_INT_DAISY_MODE_CHG | MAC_INT_IP_CHKSUM_ERR | \
MAC_INT_WDOG_TIMER1_EXP | MAC_INT_WDOG_TIMER0_EXP | \
MAC_INT_INTRUDER_ALERT | MAC_INT_PORT_ST_CHG | \
MAC_INT_BC_STORM | MAC_INT_MUST_DROP_LAN | \
MAC_INT_GLOBAL_QUE_FULL | MAC_INT_TX_SOC_PAUSE_ON | \
MAC_INT_RX_SOC_QUE_FULL | MAC_INT_TX_LAN1_QUE_FULL | \
MAC_INT_TX_LAN0_QUE_FULL | MAC_INT_RX_L_DESCF | \
MAC_INT_RX_H_DESCF)
/* Address table search */
#define MAC_ADDR_LOOKUP_IDLE BIT(2)
#define MAC_SEARCH_NEXT_ADDR BIT(1)
#define MAC_BEGIN_SEARCH_ADDR BIT(0)
/* Address table status */
#define MAC_HASH_LOOKUP_ADDR GENMASK(31, 22)
#define MAC_R_PORT_MAP GENMASK(13, 12)
#define MAC_R_CPU_PORT GENMASK(11, 10)
#define MAC_R_VID GENMASK(9, 7)
#define MAC_R_AGE GENMASK(6, 4)
#define MAC_R_PROXY BIT(3)
#define MAC_R_MC_INGRESS BIT(2)
#define MAC_AT_TABLE_END BIT(1)
#define MAC_AT_DATA_READY BIT(0)
/* Wt mac ad0 */
#define MAC_W_PORT_MAP GENMASK(13, 12)
#define MAC_W_LAN_PORT_1 BIT(13)
#define MAC_W_LAN_PORT_0 BIT(12)
#define MAC_W_CPU_PORT GENMASK(11, 10)
#define MAC_W_CPU_PORT_1 BIT(11)
#define MAC_W_CPU_PORT_0 BIT(10)
#define MAC_W_VID GENMASK(9, 7)
#define MAC_W_AGE GENMASK(6, 4)
#define MAC_W_PROXY BIT(3)
#define MAC_W_MC_INGRESS BIT(2)
#define MAC_W_MAC_DONE BIT(1)
#define MAC_W_MAC_CMD BIT(0)
/* W mac 15_0 bus */
#define MAC_W_MAC_15_0 GENMASK(15, 0)
/* W mac 47_16 bus */
#define MAC_W_MAC_47_16 GENMASK(31, 0)
/* PVID config 0 */
#define MAC_P1_PVID GENMASK(6, 4)
#define MAC_P0_PVID GENMASK(2, 0)
/* VLAN member config 0 */
#define MAC_VLAN_MEMSET_3 GENMASK(27, 24)
#define MAC_VLAN_MEMSET_2 GENMASK(19, 16)
#define MAC_VLAN_MEMSET_1 GENMASK(11, 8)
#define MAC_VLAN_MEMSET_0 GENMASK(3, 0)
/* VLAN member config 1 */
#define MAC_VLAN_MEMSET_5 GENMASK(11, 8)
#define MAC_VLAN_MEMSET_4 GENMASK(3, 0)
/* Port ability */
#define MAC_PORT_ABILITY_LINK_ST GENMASK(25, 24)
/* CPU control */
#define MAC_EN_SOC1_AGING BIT(15)
#define MAC_EN_SOC0_AGING BIT(14)
#define MAC_DIS_LRN_SOC1 BIT(13)
#define MAC_DIS_LRN_SOC0 BIT(12)
#define MAC_EN_CRC_SOC1 BIT(9)
#define MAC_EN_CRC_SOC0 BIT(8)
#define MAC_DIS_SOC1_CPU BIT(7)
#define MAC_DIS_SOC0_CPU BIT(6)
#define MAC_DIS_BC2CPU_P1 BIT(5)
#define MAC_DIS_BC2CPU_P0 BIT(4)
#define MAC_DIS_MC2CPU GENMASK(3, 2)
#define MAC_DIS_MC2CPU_P1 BIT(3)
#define MAC_DIS_MC2CPU_P0 BIT(2)
#define MAC_DIS_UN2CPU GENMASK(1, 0)
/* Port control 0 */
#define MAC_DIS_PORT GENMASK(25, 24)
#define MAC_DIS_PORT1 BIT(25)
#define MAC_DIS_PORT0 BIT(24)
#define MAC_DIS_RMC2CPU_P1 BIT(17)
#define MAC_DIS_RMC2CPU_P0 BIT(16)
#define MAC_EN_FLOW_CTL_P1 BIT(9)
#define MAC_EN_FLOW_CTL_P0 BIT(8)
#define MAC_EN_BACK_PRESS_P1 BIT(1)
#define MAC_EN_BACK_PRESS_P0 BIT(0)
/* Port control 1 */
#define MAC_DIS_SA_LRN_P1 BIT(9)
#define MAC_DIS_SA_LRN_P0 BIT(8)
/* Port control 2 */
#define MAC_EN_AGING_P1 BIT(9)
#define MAC_EN_AGING_P0 BIT(8)
/* Switch Global control */
#define MAC_RMC_TB_FAULT_RULE GENMASK(26, 25)
#define MAC_LED_FLASH_TIME GENMASK(24, 23)
#define MAC_BC_STORM_PREV GENMASK(5, 4)
/* LED port 0 */
#define MAC_LED_ACT_HI BIT(28)
/* PHY control register 0 */
#define MAC_CPU_PHY_WT_DATA GENMASK(31, 16)
#define MAC_CPU_PHY_CMD GENMASK(14, 13)
#define MAC_CPU_PHY_REG_ADDR GENMASK(12, 8)
#define MAC_CPU_PHY_ADDR GENMASK(4, 0)
/* PHY control register 1 */
#define MAC_CPU_PHY_RD_DATA GENMASK(31, 16)
#define MAC_PHY_RD_RDY BIT(1)
#define MAC_PHY_WT_DONE BIT(0)
/* MAC force mode */
#define MAC_EXT_PHY1_ADDR GENMASK(28, 24)
#define MAC_EXT_PHY0_ADDR GENMASK(20, 16)
#define MAC_FORCE_RMII_LINK GENMASK(9, 8)
#define MAC_FORCE_RMII_EN_1 BIT(7)
#define MAC_FORCE_RMII_EN_0 BIT(6)
#define MAC_FORCE_RMII_FC GENMASK(5, 4)
#define MAC_FORCE_RMII_DPX GENMASK(3, 2)
#define MAC_FORCE_RMII_SPD GENMASK(1, 0)
/* CPU transmit trigger */
#define MAC_TRIG_L_SOC0 BIT(1)
#define MAC_TRIG_H_SOC0 BIT(0)
/* Config descriptor queue */
#define TX_DESC_NUM 16 /* # of descriptors in TX queue */
#define MAC_GUARD_DESC_NUM 2 /* # of descriptors of gap 0 */
#define RX_QUEUE0_DESC_NUM 16 /* # of descriptors in RX queue 0 */
#define RX_QUEUE1_DESC_NUM 16 /* # of descriptors in RX queue 1 */
#define TX_DESC_QUEUE_NUM 1 /* # of TX queue */
#define RX_DESC_QUEUE_NUM 2 /* # of RX queue */
#define MAC_RX_LEN_MAX 2047 /* Size of RX buffer */
/* Tx descriptor */
/* cmd1 */
#define TXD_OWN BIT(31)
#define TXD_ERR_CODE GENMASK(29, 26)
#define TXD_SOP BIT(25) /* start of a packet */
#define TXD_EOP BIT(24) /* end of a packet */
#define TXD_VLAN GENMASK(17, 12)
#define TXD_PKT_LEN GENMASK(10, 0) /* packet length */
/* cmd2 */
#define TXD_EOR BIT(31) /* end of ring */
#define TXD_BUF_LEN2 GENMASK(22, 12)
#define TXD_BUF_LEN1 GENMASK(10, 0)
/* Rx descriptor */
/* cmd1 */
#define RXD_OWN BIT(31)
#define RXD_ERR_CODE GENMASK(29, 26)
#define RXD_TCP_UDP_CHKSUM BIT(23)
#define RXD_PROXY BIT(22)
#define RXD_PROTOCOL GENMASK(21, 20)
#define RXD_VLAN_TAG BIT(19)
#define RXD_IP_CHKSUM BIT(18)
#define RXD_ROUTE_TYPE GENMASK(17, 16)
#define RXD_PKT_SP GENMASK(14, 12) /* packet source port */
#define RXD_PKT_LEN GENMASK(10, 0) /* packet length */
/* cmd2 */
#define RXD_EOR BIT(31) /* end of ring */
#define RXD_BUF_LEN2 GENMASK(22, 12)
#define RXD_BUF_LEN1 GENMASK(10, 0)
/* structure of descriptor */
struct spl2sw_mac_desc {
u32 cmd1;
u32 cmd2;
u32 addr1;
u32 addr2;
};
struct spl2sw_skb_info {
struct sk_buff *skb;
u32 mapping;
u32 len;
};
struct spl2sw_common {
void __iomem *l2sw_reg_base;
struct platform_device *pdev;
struct reset_control *rstc;
struct clk *clk;
void *desc_base;
dma_addr_t desc_dma;
s32 desc_size;
struct spl2sw_mac_desc *rx_desc[RX_DESC_QUEUE_NUM];
struct spl2sw_skb_info *rx_skb_info[RX_DESC_QUEUE_NUM];
u32 rx_pos[RX_DESC_QUEUE_NUM];
u32 rx_desc_num[RX_DESC_QUEUE_NUM];
u32 rx_desc_buff_size;
struct spl2sw_mac_desc *tx_desc;
struct spl2sw_skb_info tx_temp_skb_info[TX_DESC_NUM];
u32 tx_done_pos;
u32 tx_pos;
u32 tx_desc_full;
struct net_device *ndev[MAX_NETDEV_NUM];
struct mii_bus *mii_bus;
struct napi_struct rx_napi;
struct napi_struct tx_napi;
spinlock_t tx_lock; /* spinlock for accessing tx buffer */
spinlock_t mdio_lock; /* spinlock for mdio commands */
spinlock_t int_mask_lock; /* spinlock for accessing int mask reg. */
u8 enable;
};
struct spl2sw_mac {
struct net_device *ndev;
struct spl2sw_common *comm;
u8 mac_addr[ETH_ALEN];
phy_interface_t phy_mode;
struct device_node *phy_node;
u8 lan_port;
u8 to_vlan;
u8 vlan_id;
};
#endif

View File

@ -0,0 +1,228 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/of_mdio.h>
#include "spl2sw_define.h"
#include "spl2sw_desc.h"
void spl2sw_rx_descs_flush(struct spl2sw_common *comm)
{
struct spl2sw_skb_info *rx_skbinfo;
struct spl2sw_mac_desc *rx_desc;
u32 i, j;
for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
rx_desc = comm->rx_desc[i];
rx_skbinfo = comm->rx_skb_info[i];
for (j = 0; j < comm->rx_desc_num[i]; j++) {
rx_desc[j].addr1 = rx_skbinfo[j].mapping;
rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ?
RXD_EOR | comm->rx_desc_buff_size :
comm->rx_desc_buff_size;
wmb(); /* Set RXD_OWN after other fields are ready. */
rx_desc[j].cmd1 = RXD_OWN;
}
}
}
void spl2sw_tx_descs_clean(struct spl2sw_common *comm)
{
u32 i;
if (!comm->tx_desc)
return;
for (i = 0; i < TX_DESC_NUM; i++) {
comm->tx_desc[i].cmd1 = 0;
wmb(); /* Clear TXD_OWN and then set other fields. */
comm->tx_desc[i].cmd2 = 0;
comm->tx_desc[i].addr1 = 0;
comm->tx_desc[i].addr2 = 0;
if (comm->tx_temp_skb_info[i].mapping) {
dma_unmap_single(&comm->pdev->dev, comm->tx_temp_skb_info[i].mapping,
comm->tx_temp_skb_info[i].skb->len, DMA_TO_DEVICE);
comm->tx_temp_skb_info[i].mapping = 0;
}
if (comm->tx_temp_skb_info[i].skb) {
dev_kfree_skb_any(comm->tx_temp_skb_info[i].skb);
comm->tx_temp_skb_info[i].skb = NULL;
}
}
}
void spl2sw_rx_descs_clean(struct spl2sw_common *comm)
{
struct spl2sw_skb_info *rx_skbinfo;
struct spl2sw_mac_desc *rx_desc;
u32 i, j;
for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
if (!comm->rx_skb_info[i])
continue;
rx_desc = comm->rx_desc[i];
rx_skbinfo = comm->rx_skb_info[i];
for (j = 0; j < comm->rx_desc_num[i]; j++) {
rx_desc[j].cmd1 = 0;
wmb(); /* Clear RXD_OWN and then set other fields. */
rx_desc[j].cmd2 = 0;
rx_desc[j].addr1 = 0;
if (rx_skbinfo[j].skb) {
dma_unmap_single(&comm->pdev->dev, rx_skbinfo[j].mapping,
comm->rx_desc_buff_size, DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_skbinfo[j].skb);
rx_skbinfo[j].skb = NULL;
rx_skbinfo[j].mapping = 0;
}
}
kfree(rx_skbinfo);
comm->rx_skb_info[i] = NULL;
}
}
void spl2sw_descs_clean(struct spl2sw_common *comm)
{
spl2sw_rx_descs_clean(comm);
spl2sw_tx_descs_clean(comm);
}
void spl2sw_descs_free(struct spl2sw_common *comm)
{
u32 i;
spl2sw_descs_clean(comm);
comm->tx_desc = NULL;
for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
comm->rx_desc[i] = NULL;
/* Free descriptor area */
if (comm->desc_base) {
dma_free_coherent(&comm->pdev->dev, comm->desc_size, comm->desc_base,
comm->desc_dma);
comm->desc_base = NULL;
comm->desc_dma = 0;
comm->desc_size = 0;
}
}
void spl2sw_tx_descs_init(struct spl2sw_common *comm)
{
memset(comm->tx_desc, '\0', sizeof(struct spl2sw_mac_desc) *
(TX_DESC_NUM + MAC_GUARD_DESC_NUM));
}
int spl2sw_rx_descs_init(struct spl2sw_common *comm)
{
struct spl2sw_skb_info *rx_skbinfo;
struct spl2sw_mac_desc *rx_desc;
struct sk_buff *skb;
u32 mapping;
u32 i, j;
for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
comm->rx_skb_info[i] = kcalloc(comm->rx_desc_num[i], sizeof(*rx_skbinfo),
GFP_KERNEL | GFP_DMA);
if (!comm->rx_skb_info[i])
goto mem_alloc_fail;
rx_skbinfo = comm->rx_skb_info[i];
rx_desc = comm->rx_desc[i];
for (j = 0; j < comm->rx_desc_num[i]; j++) {
skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size);
if (!skb)
goto mem_alloc_fail;
rx_skbinfo[j].skb = skb;
mapping = dma_map_single(&comm->pdev->dev, skb->data,
comm->rx_desc_buff_size,
DMA_FROM_DEVICE);
if (dma_mapping_error(&comm->pdev->dev, mapping))
goto mem_alloc_fail;
rx_skbinfo[j].mapping = mapping;
rx_desc[j].addr1 = mapping;
rx_desc[j].addr2 = 0;
rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ?
RXD_EOR | comm->rx_desc_buff_size :
comm->rx_desc_buff_size;
wmb(); /* Set RXD_OWN after other fields are effective. */
rx_desc[j].cmd1 = RXD_OWN;
}
}
return 0;
mem_alloc_fail:
spl2sw_rx_descs_clean(comm);
return -ENOMEM;
}
int spl2sw_descs_alloc(struct spl2sw_common *comm)
{
s32 desc_size;
u32 i;
/* Alloc descriptor area */
desc_size = (TX_DESC_NUM + MAC_GUARD_DESC_NUM) * sizeof(struct spl2sw_mac_desc);
for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
desc_size += comm->rx_desc_num[i] * sizeof(struct spl2sw_mac_desc);
comm->desc_base = dma_alloc_coherent(&comm->pdev->dev, desc_size, &comm->desc_dma,
GFP_KERNEL);
if (!comm->desc_base)
return -ENOMEM;
comm->desc_size = desc_size;
/* Setup Tx descriptor */
comm->tx_desc = comm->desc_base;
/* Setup Rx descriptor */
comm->rx_desc[0] = &comm->tx_desc[TX_DESC_NUM + MAC_GUARD_DESC_NUM];
for (i = 1; i < RX_DESC_QUEUE_NUM; i++)
comm->rx_desc[i] = comm->rx_desc[i - 1] + comm->rx_desc_num[i - 1];
return 0;
}
int spl2sw_descs_init(struct spl2sw_common *comm)
{
u32 i, ret;
/* Initialize rx descriptor's data */
comm->rx_desc_num[0] = RX_QUEUE0_DESC_NUM;
comm->rx_desc_num[1] = RX_QUEUE1_DESC_NUM;
for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
comm->rx_desc[i] = NULL;
comm->rx_skb_info[i] = NULL;
comm->rx_pos[i] = 0;
}
comm->rx_desc_buff_size = MAC_RX_LEN_MAX;
/* Initialize tx descriptor's data */
comm->tx_done_pos = 0;
comm->tx_desc = NULL;
comm->tx_pos = 0;
comm->tx_desc_full = 0;
for (i = 0; i < TX_DESC_NUM; i++)
comm->tx_temp_skb_info[i].skb = NULL;
/* Allocate tx & rx descriptors. */
ret = spl2sw_descs_alloc(comm);
if (ret)
return ret;
spl2sw_tx_descs_init(comm);
return spl2sw_rx_descs_init(comm);
}

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#ifndef __SPL2SW_DESC_H__
#define __SPL2SW_DESC_H__
void spl2sw_rx_descs_flush(struct spl2sw_common *comm);
void spl2sw_tx_descs_clean(struct spl2sw_common *comm);
void spl2sw_rx_descs_clean(struct spl2sw_common *comm);
void spl2sw_descs_clean(struct spl2sw_common *comm);
void spl2sw_descs_free(struct spl2sw_common *comm);
void spl2sw_tx_descs_init(struct spl2sw_common *comm);
int spl2sw_rx_descs_init(struct spl2sw_common *comm);
int spl2sw_descs_alloc(struct spl2sw_common *comm);
int spl2sw_descs_init(struct spl2sw_common *comm);
#endif

View File

@ -0,0 +1,565 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#include <linux/platform_device.h>
#include <linux/nvmem-consumer.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/of_net.h>
#include <linux/reset.h>
#include <linux/clk.h>
#include <linux/of.h>
#include "spl2sw_register.h"
#include "spl2sw_define.h"
#include "spl2sw_desc.h"
#include "spl2sw_mdio.h"
#include "spl2sw_phy.h"
#include "spl2sw_int.h"
#include "spl2sw_mac.h"
/* net device operations */
static int spl2sw_ethernet_open(struct net_device *ndev)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
struct spl2sw_common *comm = mac->comm;
u32 mask;
netdev_dbg(ndev, "Open port = %x\n", mac->lan_port);
comm->enable |= mac->lan_port;
spl2sw_mac_hw_start(comm);
/* Enable TX and RX interrupts */
mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
mask &= ~(MAC_INT_TX | MAC_INT_RX);
writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
phy_start(ndev->phydev);
netif_start_queue(ndev);
return 0;
}
static int spl2sw_ethernet_stop(struct net_device *ndev)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
struct spl2sw_common *comm = mac->comm;
netif_stop_queue(ndev);
comm->enable &= ~mac->lan_port;
phy_stop(ndev->phydev);
spl2sw_mac_hw_stop(comm);
return 0;
}
static int spl2sw_ethernet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
struct spl2sw_common *comm = mac->comm;
struct spl2sw_skb_info *skbinfo;
struct spl2sw_mac_desc *txdesc;
unsigned long flags;
u32 mapping;
u32 tx_pos;
u32 cmd1;
u32 cmd2;
if (unlikely(comm->tx_desc_full == 1)) {
/* No TX descriptors left. Wait for tx interrupt. */
netdev_dbg(ndev, "TX descriptor queue full when xmit!\n");
return NETDEV_TX_BUSY;
}
/* If skb size is shorter than ETH_ZLEN (60), pad it with 0. */
if (unlikely(skb->len < ETH_ZLEN)) {
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
skb_put(skb, ETH_ZLEN - skb->len);
}
mapping = dma_map_single(&comm->pdev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&comm->pdev->dev, mapping)) {
ndev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
spin_lock_irqsave(&comm->tx_lock, flags);
tx_pos = comm->tx_pos;
txdesc = &comm->tx_desc[tx_pos];
skbinfo = &comm->tx_temp_skb_info[tx_pos];
skbinfo->mapping = mapping;
skbinfo->len = skb->len;
skbinfo->skb = skb;
/* Set up a TX descriptor */
cmd1 = TXD_OWN | TXD_SOP | TXD_EOP | (mac->to_vlan << 12) |
(skb->len & TXD_PKT_LEN);
cmd2 = skb->len & TXD_BUF_LEN1;
if (tx_pos == (TX_DESC_NUM - 1))
cmd2 |= TXD_EOR;
txdesc->addr1 = skbinfo->mapping;
txdesc->cmd2 = cmd2;
wmb(); /* Set TXD_OWN after other fields are effective. */
txdesc->cmd1 = cmd1;
/* Move tx_pos to next position */
tx_pos = ((tx_pos + 1) == TX_DESC_NUM) ? 0 : tx_pos + 1;
if (unlikely(tx_pos == comm->tx_done_pos)) {
netif_stop_queue(ndev);
comm->tx_desc_full = 1;
}
comm->tx_pos = tx_pos;
wmb(); /* make sure settings are effective. */
/* Trigger mac to transmit */
writel(MAC_TRIG_L_SOC0, comm->l2sw_reg_base + L2SW_CPU_TX_TRIG);
spin_unlock_irqrestore(&comm->tx_lock, flags);
return NETDEV_TX_OK;
}
static void spl2sw_ethernet_set_rx_mode(struct net_device *ndev)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
spl2sw_mac_rx_mode_set(mac);
}
static int spl2sw_ethernet_set_mac_address(struct net_device *ndev, void *addr)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
int err;
err = eth_mac_addr(ndev, addr);
if (err)
return err;
/* Delete the old MAC address */
netdev_dbg(ndev, "Old Ethernet (MAC) address = %pM\n", mac->mac_addr);
if (is_valid_ether_addr(mac->mac_addr)) {
err = spl2sw_mac_addr_del(mac);
if (err)
return err;
}
/* Set the MAC address */
ether_addr_copy(mac->mac_addr, ndev->dev_addr);
return spl2sw_mac_addr_add(mac);
}
static void spl2sw_ethernet_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
struct spl2sw_common *comm = mac->comm;
unsigned long flags;
int i;
netdev_err(ndev, "TX timed out!\n");
ndev->stats.tx_errors++;
spin_lock_irqsave(&comm->tx_lock, flags);
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i])
netif_stop_queue(comm->ndev[i]);
spl2sw_mac_soft_reset(comm);
/* Accept TX packets again. */
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i]) {
netif_trans_update(comm->ndev[i]);
netif_wake_queue(comm->ndev[i]);
}
spin_unlock_irqrestore(&comm->tx_lock, flags);
}
static const struct net_device_ops netdev_ops = {
.ndo_open = spl2sw_ethernet_open,
.ndo_stop = spl2sw_ethernet_stop,
.ndo_start_xmit = spl2sw_ethernet_start_xmit,
.ndo_set_rx_mode = spl2sw_ethernet_set_rx_mode,
.ndo_set_mac_address = spl2sw_ethernet_set_mac_address,
.ndo_do_ioctl = phy_do_ioctl,
.ndo_tx_timeout = spl2sw_ethernet_tx_timeout,
};
static void spl2sw_check_mac_vendor_id_and_convert(u8 *mac_addr)
{
/* Byte order of MAC address of some samples are reversed.
* Check vendor id and convert byte order if it is wrong.
* OUI of Sunplus: fc:4b:bc
*/
if (mac_addr[5] == 0xfc && mac_addr[4] == 0x4b && mac_addr[3] == 0xbc &&
(mac_addr[0] != 0xfc || mac_addr[1] != 0x4b || mac_addr[2] != 0xbc)) {
swap(mac_addr[0], mac_addr[5]);
swap(mac_addr[1], mac_addr[4]);
swap(mac_addr[2], mac_addr[3]);
}
}
static int spl2sw_nvmem_get_mac_address(struct device *dev, struct device_node *np,
void *addrbuf)
{
struct nvmem_cell *cell;
ssize_t len;
u8 *mac;
/* Get nvmem cell of mac-address from dts. */
cell = of_nvmem_cell_get(np, "mac-address");
if (IS_ERR(cell))
return PTR_ERR(cell);
/* Read mac address from nvmem cell. */
mac = nvmem_cell_read(cell, &len);
nvmem_cell_put(cell);
if (IS_ERR(mac))
return PTR_ERR(mac);
if (len != ETH_ALEN) {
kfree(mac);
dev_info(dev, "Invalid length of mac address in nvmem!\n");
return -EINVAL;
}
/* Byte order of some samples are reversed.
* Convert byte order here.
*/
spl2sw_check_mac_vendor_id_and_convert(mac);
/* Check if mac address is valid */
if (!is_valid_ether_addr(mac)) {
kfree(mac);
dev_info(dev, "Invalid mac address in nvmem (%pM)!\n", mac);
return -EINVAL;
}
ether_addr_copy(addrbuf, mac);
kfree(mac);
return 0;
}
static u32 spl2sw_init_netdev(struct platform_device *pdev, u8 *mac_addr,
struct net_device **r_ndev)
{
struct net_device *ndev;
struct spl2sw_mac *mac;
int ret;
/* Allocate the devices, and also allocate spl2sw_mac,
* we can get it by netdev_priv().
*/
ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*mac));
if (!ndev) {
*r_ndev = NULL;
return -ENOMEM;
}
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &netdev_ops;
mac = netdev_priv(ndev);
mac->ndev = ndev;
ether_addr_copy(mac->mac_addr, mac_addr);
eth_hw_addr_set(ndev, mac_addr);
dev_info(&pdev->dev, "Ethernet (MAC) address = %pM\n", mac_addr);
ret = register_netdev(ndev);
if (ret) {
dev_err(&pdev->dev, "Failed to register net device \"%s\"!\n",
ndev->name);
free_netdev(ndev);
*r_ndev = NULL;
return ret;
}
netdev_dbg(ndev, "Registered net device \"%s\" successfully.\n", ndev->name);
*r_ndev = ndev;
return 0;
}
static struct device_node *spl2sw_get_eth_child_node(struct device_node *ether_np, int id)
{
struct device_node *port_np;
int port_id;
for_each_child_of_node(ether_np, port_np) {
/* It is not a 'port' node, continue. */
if (strcmp(port_np->name, "port"))
continue;
if (of_property_read_u32(port_np, "reg", &port_id) < 0)
continue;
if (port_id == id)
return port_np;
}
/* Not found! */
return NULL;
}
static int spl2sw_probe(struct platform_device *pdev)
{
struct device_node *eth_ports_np;
struct device_node *port_np;
struct spl2sw_common *comm;
struct device_node *phy_np;
phy_interface_t phy_mode;
struct net_device *ndev;
struct spl2sw_mac *mac;
u8 mac_addr[ETH_ALEN];
int irq, i, ret;
if (platform_get_drvdata(pdev))
return -ENODEV;
/* Allocate memory for 'spl2sw_common' area. */
comm = devm_kzalloc(&pdev->dev, sizeof(*comm), GFP_KERNEL);
if (!comm)
return -ENOMEM;
comm->pdev = pdev;
platform_set_drvdata(pdev, comm);
spin_lock_init(&comm->tx_lock);
spin_lock_init(&comm->mdio_lock);
spin_lock_init(&comm->int_mask_lock);
/* Get memory resource 0 from dts. */
comm->l2sw_reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(comm->l2sw_reg_base))
return PTR_ERR(comm->l2sw_reg_base);
/* Get irq resource from dts. */
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
irq = ret;
/* Get clock controller. */
comm->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(comm->clk)) {
dev_err_probe(&pdev->dev, PTR_ERR(comm->clk),
"Failed to retrieve clock controller!\n");
return PTR_ERR(comm->clk);
}
/* Get reset controller. */
comm->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(comm->rstc)) {
dev_err_probe(&pdev->dev, PTR_ERR(comm->rstc),
"Failed to retrieve reset controller!\n");
return PTR_ERR(comm->rstc);
}
/* Enable clock. */
ret = clk_prepare_enable(comm->clk);
if (ret)
return ret;
udelay(1);
/* Reset MAC */
reset_control_assert(comm->rstc);
udelay(1);
reset_control_deassert(comm->rstc);
usleep_range(1000, 2000);
/* Request irq. */
ret = devm_request_irq(&pdev->dev, irq, spl2sw_ethernet_interrupt, 0,
dev_name(&pdev->dev), comm);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq #%d!\n", irq);
goto out_clk_disable;
}
/* Initialize TX and RX descriptors. */
ret = spl2sw_descs_init(comm);
if (ret) {
dev_err(&pdev->dev, "Fail to initialize mac descriptors!\n");
spl2sw_descs_free(comm);
goto out_clk_disable;
}
/* Initialize MAC. */
spl2sw_mac_init(comm);
/* Initialize mdio bus */
ret = spl2sw_mdio_init(comm);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize mdio bus!\n");
goto out_clk_disable;
}
/* Get child node ethernet-ports. */
eth_ports_np = of_get_child_by_name(pdev->dev.of_node, "ethernet-ports");
if (!eth_ports_np) {
dev_err(&pdev->dev, "No ethernet-ports child node found!\n");
ret = -ENODEV;
goto out_free_mdio;
}
for (i = 0; i < MAX_NETDEV_NUM; i++) {
/* Get port@i of node ethernet-ports. */
port_np = spl2sw_get_eth_child_node(eth_ports_np, i);
if (!port_np)
continue;
/* Get phy-mode. */
if (of_get_phy_mode(port_np, &phy_mode)) {
dev_err(&pdev->dev, "Failed to get phy-mode property of port@%d!\n",
i);
continue;
}
/* Get phy-handle. */
phy_np = of_parse_phandle(port_np, "phy-handle", 0);
if (!phy_np) {
dev_err(&pdev->dev, "Failed to get phy-handle property of port@%d!\n",
i);
continue;
}
/* Get mac-address from nvmem. */
ret = spl2sw_nvmem_get_mac_address(&pdev->dev, port_np, mac_addr);
if (ret == -EPROBE_DEFER) {
goto out_unregister_dev;
} else if (ret) {
dev_info(&pdev->dev, "Generate a random mac address!\n");
eth_random_addr(mac_addr);
}
/* Initialize the net device. */
ret = spl2sw_init_netdev(pdev, mac_addr, &ndev);
if (ret)
goto out_unregister_dev;
ndev->irq = irq;
comm->ndev[i] = ndev;
mac = netdev_priv(ndev);
mac->phy_node = phy_np;
mac->phy_mode = phy_mode;
mac->comm = comm;
mac->lan_port = 0x1 << i; /* forward to port i */
mac->to_vlan = 0x1 << i; /* vlan group: i */
mac->vlan_id = i; /* vlan group: i */
/* Set MAC address */
ret = spl2sw_mac_addr_add(mac);
if (ret)
goto out_unregister_dev;
spl2sw_mac_rx_mode_set(mac);
}
/* Find first valid net device. */
for (i = 0; i < MAX_NETDEV_NUM; i++) {
if (comm->ndev[i])
break;
}
if (i >= MAX_NETDEV_NUM) {
dev_err(&pdev->dev, "No valid ethernet port!\n");
ret = -ENODEV;
goto out_free_mdio;
}
/* Save first valid net device */
ndev = comm->ndev[i];
ret = spl2sw_phy_connect(comm);
if (ret) {
netdev_err(ndev, "Failed to connect phy!\n");
goto out_unregister_dev;
}
/* Add and enable napi. */
netif_napi_add(ndev, &comm->rx_napi, spl2sw_rx_poll, NAPI_POLL_WEIGHT);
napi_enable(&comm->rx_napi);
netif_napi_add_tx(ndev, &comm->tx_napi, spl2sw_tx_poll);
napi_enable(&comm->tx_napi);
return 0;
out_unregister_dev:
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i])
unregister_netdev(comm->ndev[i]);
out_free_mdio:
spl2sw_mdio_remove(comm);
out_clk_disable:
clk_disable_unprepare(comm->clk);
return ret;
}
static int spl2sw_remove(struct platform_device *pdev)
{
struct spl2sw_common *comm;
int i;
comm = platform_get_drvdata(pdev);
spl2sw_phy_remove(comm);
/* Unregister and free net device. */
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i])
unregister_netdev(comm->ndev[i]);
comm->enable = 0;
spl2sw_mac_hw_stop(comm);
spl2sw_descs_free(comm);
/* Disable and delete napi. */
napi_disable(&comm->rx_napi);
netif_napi_del(&comm->rx_napi);
napi_disable(&comm->tx_napi);
netif_napi_del(&comm->tx_napi);
spl2sw_mdio_remove(comm);
clk_disable_unprepare(comm->clk);
return 0;
}
static const struct of_device_id spl2sw_of_match[] = {
{.compatible = "sunplus,sp7021-emac"},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, spl2sw_of_match);
static struct platform_driver spl2sw_driver = {
.probe = spl2sw_probe,
.remove = spl2sw_remove,
.driver = {
.name = "sp7021_emac",
.of_match_table = spl2sw_of_match,
},
};
module_platform_driver(spl2sw_driver);
MODULE_AUTHOR("Wells Lu <wellslutw@gmail.com>");
MODULE_DESCRIPTION("Sunplus Dual 10M/100M Ethernet driver");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,273 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#include <linux/platform_device.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/bitfield.h>
#include <linux/spinlock.h>
#include <linux/of_mdio.h>
#include "spl2sw_register.h"
#include "spl2sw_define.h"
#include "spl2sw_int.h"
int spl2sw_rx_poll(struct napi_struct *napi, int budget)
{
struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, rx_napi);
struct spl2sw_mac_desc *desc, *h_desc;
struct net_device_stats *stats;
struct sk_buff *skb, *new_skb;
struct spl2sw_skb_info *sinfo;
int budget_left = budget;
unsigned long flags;
u32 rx_pos, pkg_len;
u32 num, rx_count;
s32 queue;
u32 mask;
int port;
u32 cmd;
u32 len;
/* Process high-priority queue and then low-priority queue. */
for (queue = 0; queue < RX_DESC_QUEUE_NUM; queue++) {
rx_pos = comm->rx_pos[queue];
rx_count = comm->rx_desc_num[queue];
for (num = 0; num < rx_count && budget_left; num++) {
sinfo = comm->rx_skb_info[queue] + rx_pos;
desc = comm->rx_desc[queue] + rx_pos;
cmd = desc->cmd1;
if (cmd & RXD_OWN)
break;
port = FIELD_GET(RXD_PKT_SP, cmd);
if (port < MAX_NETDEV_NUM && comm->ndev[port])
stats = &comm->ndev[port]->stats;
else
goto spl2sw_rx_poll_rec_err;
pkg_len = FIELD_GET(RXD_PKT_LEN, cmd);
if (unlikely((cmd & RXD_ERR_CODE) || pkg_len < ETH_ZLEN + 4)) {
stats->rx_length_errors++;
stats->rx_dropped++;
goto spl2sw_rx_poll_rec_err;
}
dma_unmap_single(&comm->pdev->dev, sinfo->mapping,
comm->rx_desc_buff_size, DMA_FROM_DEVICE);
skb = sinfo->skb;
skb_put(skb, pkg_len - 4); /* Minus FCS */
skb->ip_summed = CHECKSUM_NONE;
skb->protocol = eth_type_trans(skb, comm->ndev[port]);
len = skb->len;
netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += len;
/* Allocate a new skb for receiving. */
new_skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size);
if (unlikely(!new_skb)) {
desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
RXD_EOR : 0;
sinfo->skb = NULL;
sinfo->mapping = 0;
desc->addr1 = 0;
goto spl2sw_rx_poll_alloc_err;
}
sinfo->mapping = dma_map_single(&comm->pdev->dev, new_skb->data,
comm->rx_desc_buff_size,
DMA_FROM_DEVICE);
if (dma_mapping_error(&comm->pdev->dev, sinfo->mapping)) {
dev_kfree_skb_irq(new_skb);
desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
RXD_EOR : 0;
sinfo->skb = NULL;
sinfo->mapping = 0;
desc->addr1 = 0;
goto spl2sw_rx_poll_alloc_err;
}
sinfo->skb = new_skb;
desc->addr1 = sinfo->mapping;
spl2sw_rx_poll_rec_err:
desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
RXD_EOR | comm->rx_desc_buff_size :
comm->rx_desc_buff_size;
wmb(); /* Set RXD_OWN after other fields are effective. */
desc->cmd1 = RXD_OWN;
spl2sw_rx_poll_alloc_err:
/* Move rx_pos to next position */
rx_pos = ((rx_pos + 1) == comm->rx_desc_num[queue]) ? 0 : rx_pos + 1;
budget_left--;
/* If there are packets in high-priority queue,
* stop processing low-priority queue.
*/
if (queue == 1 && !(h_desc->cmd1 & RXD_OWN))
break;
}
comm->rx_pos[queue] = rx_pos;
/* Save pointer to last rx descriptor of high-priority queue. */
if (queue == 0)
h_desc = comm->rx_desc[queue] + rx_pos;
}
spin_lock_irqsave(&comm->int_mask_lock, flags);
mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
mask &= ~MAC_INT_RX;
writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
spin_unlock_irqrestore(&comm->int_mask_lock, flags);
napi_complete(napi);
return budget - budget_left;
}
int spl2sw_tx_poll(struct napi_struct *napi, int budget)
{
struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, tx_napi);
struct spl2sw_skb_info *skbinfo;
struct net_device_stats *stats;
int budget_left = budget;
unsigned long flags;
u32 tx_done_pos;
u32 mask;
u32 cmd;
int i;
spin_lock(&comm->tx_lock);
tx_done_pos = comm->tx_done_pos;
while (((tx_done_pos != comm->tx_pos) || (comm->tx_desc_full == 1)) && budget_left) {
cmd = comm->tx_desc[tx_done_pos].cmd1;
if (cmd & TXD_OWN)
break;
skbinfo = &comm->tx_temp_skb_info[tx_done_pos];
if (unlikely(!skbinfo->skb))
goto spl2sw_tx_poll_next;
i = ffs(FIELD_GET(TXD_VLAN, cmd)) - 1;
if (i < MAX_NETDEV_NUM && comm->ndev[i])
stats = &comm->ndev[i]->stats;
else
goto spl2sw_tx_poll_unmap;
if (unlikely(cmd & (TXD_ERR_CODE))) {
stats->tx_errors++;
} else {
stats->tx_packets++;
stats->tx_bytes += skbinfo->len;
}
spl2sw_tx_poll_unmap:
dma_unmap_single(&comm->pdev->dev, skbinfo->mapping, skbinfo->len,
DMA_TO_DEVICE);
skbinfo->mapping = 0;
dev_kfree_skb_irq(skbinfo->skb);
skbinfo->skb = NULL;
spl2sw_tx_poll_next:
/* Move tx_done_pos to next position */
tx_done_pos = ((tx_done_pos + 1) == TX_DESC_NUM) ? 0 : tx_done_pos + 1;
if (comm->tx_desc_full == 1)
comm->tx_desc_full = 0;
budget_left--;
}
comm->tx_done_pos = tx_done_pos;
if (!comm->tx_desc_full)
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i])
if (netif_queue_stopped(comm->ndev[i]))
netif_wake_queue(comm->ndev[i]);
spin_unlock(&comm->tx_lock);
spin_lock_irqsave(&comm->int_mask_lock, flags);
mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
mask &= ~MAC_INT_TX;
writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
spin_unlock_irqrestore(&comm->int_mask_lock, flags);
napi_complete(napi);
return budget - budget_left;
}
irqreturn_t spl2sw_ethernet_interrupt(int irq, void *dev_id)
{
struct spl2sw_common *comm = (struct spl2sw_common *)dev_id;
u32 status;
u32 mask;
int i;
status = readl(comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
if (unlikely(!status)) {
dev_dbg(&comm->pdev->dev, "Interrupt status is null!\n");
goto spl2sw_ethernet_int_out;
}
writel(status, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
if (status & MAC_INT_RX) {
/* Disable RX interrupts. */
spin_lock(&comm->int_mask_lock);
mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
mask |= MAC_INT_RX;
writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
spin_unlock(&comm->int_mask_lock);
if (unlikely(status & MAC_INT_RX_DES_ERR)) {
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i]) {
comm->ndev[i]->stats.rx_fifo_errors++;
break;
}
dev_dbg(&comm->pdev->dev, "Illegal RX Descriptor!\n");
}
napi_schedule(&comm->rx_napi);
}
if (status & MAC_INT_TX) {
/* Disable TX interrupts. */
spin_lock(&comm->int_mask_lock);
mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
mask |= MAC_INT_TX;
writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
spin_unlock(&comm->int_mask_lock);
if (unlikely(status & MAC_INT_TX_DES_ERR)) {
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i]) {
comm->ndev[i]->stats.tx_fifo_errors++;
break;
}
dev_dbg(&comm->pdev->dev, "Illegal TX Descriptor Error\n");
spin_lock(&comm->int_mask_lock);
mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
mask &= ~MAC_INT_TX;
writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
spin_unlock(&comm->int_mask_lock);
} else {
napi_schedule(&comm->tx_napi);
}
}
spl2sw_ethernet_int_out:
return IRQ_HANDLED;
}

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#ifndef __SPL2SW_INT_H__
#define __SPL2SW_INT_H__
int spl2sw_rx_poll(struct napi_struct *napi, int budget);
int spl2sw_tx_poll(struct napi_struct *napi, int budget);
irqreturn_t spl2sw_ethernet_interrupt(int irq, void *dev_id);
#endif

View File

@ -0,0 +1,274 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/bitfield.h>
#include <linux/of_mdio.h>
#include "spl2sw_register.h"
#include "spl2sw_define.h"
#include "spl2sw_desc.h"
#include "spl2sw_mac.h"
void spl2sw_mac_hw_stop(struct spl2sw_common *comm)
{
u32 reg;
if (comm->enable == 0) {
/* Mask and clear all interrupts. */
writel(0xffffffff, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
writel(0xffffffff, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
/* Disable cpu 0 and cpu 1. */
reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL);
reg |= MAC_DIS_SOC1_CPU | MAC_DIS_SOC0_CPU;
writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL);
}
/* Disable LAN ports. */
reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL0);
reg |= FIELD_PREP(MAC_DIS_PORT, ~comm->enable);
writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL0);
}
void spl2sw_mac_hw_start(struct spl2sw_common *comm)
{
u32 reg;
/* Enable cpu port 0 (6) & CRC padding (8) */
reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL);
reg &= ~MAC_DIS_SOC0_CPU;
reg |= MAC_EN_CRC_SOC0;
writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL);
/* Enable port 0 & port 1 */
reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL0);
reg &= FIELD_PREP(MAC_DIS_PORT, ~comm->enable) | ~MAC_DIS_PORT;
writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL0);
}
int spl2sw_mac_addr_add(struct spl2sw_mac *mac)
{
struct spl2sw_common *comm = mac->comm;
u32 reg;
int ret;
/* Write 6-octet MAC address. */
writel((mac->mac_addr[0] << 0) + (mac->mac_addr[1] << 8),
comm->l2sw_reg_base + L2SW_W_MAC_15_0);
writel((mac->mac_addr[2] << 0) + (mac->mac_addr[3] << 8) +
(mac->mac_addr[4] << 16) + (mac->mac_addr[5] << 24),
comm->l2sw_reg_base + L2SW_W_MAC_47_16);
/* Set learn port = cpu_port, aging = 1 */
reg = MAC_W_CPU_PORT_0 | FIELD_PREP(MAC_W_VID, mac->vlan_id) |
FIELD_PREP(MAC_W_AGE, 1) | MAC_W_MAC_CMD;
writel(reg, comm->l2sw_reg_base + L2SW_WT_MAC_AD0);
/* Wait for completing. */
ret = read_poll_timeout(readl, reg, reg & MAC_W_MAC_DONE, 1, 200, true,
comm->l2sw_reg_base + L2SW_WT_MAC_AD0);
if (ret) {
netdev_err(mac->ndev, "Failed to add address to table!\n");
return ret;
}
netdev_dbg(mac->ndev, "mac_ad0 = %08x, mac_ad = %08x%04x\n",
readl(comm->l2sw_reg_base + L2SW_WT_MAC_AD0),
(u32)FIELD_GET(MAC_W_MAC_47_16,
readl(comm->l2sw_reg_base + L2SW_W_MAC_47_16)),
(u32)FIELD_GET(MAC_W_MAC_15_0,
readl(comm->l2sw_reg_base + L2SW_W_MAC_15_0)));
return 0;
}
int spl2sw_mac_addr_del(struct spl2sw_mac *mac)
{
struct spl2sw_common *comm = mac->comm;
u32 reg;
int ret;
/* Write 6-octet MAC address. */
writel((mac->mac_addr[0] << 0) + (mac->mac_addr[1] << 8),
comm->l2sw_reg_base + L2SW_W_MAC_15_0);
writel((mac->mac_addr[2] << 0) + (mac->mac_addr[3] << 8) +
(mac->mac_addr[4] << 16) + (mac->mac_addr[5] << 24),
comm->l2sw_reg_base + L2SW_W_MAC_47_16);
/* Set learn port = lan_port0 and aging = 0
* to wipe (age) out the entry.
*/
reg = MAC_W_LAN_PORT_0 | FIELD_PREP(MAC_W_VID, mac->vlan_id) | MAC_W_MAC_CMD;
writel(reg, comm->l2sw_reg_base + L2SW_WT_MAC_AD0);
/* Wait for completing. */
ret = read_poll_timeout(readl, reg, reg & MAC_W_MAC_DONE, 1, 200, true,
comm->l2sw_reg_base + L2SW_WT_MAC_AD0);
if (ret) {
netdev_err(mac->ndev, "Failed to delete address from table!\n");
return ret;
}
netdev_dbg(mac->ndev, "mac_ad0 = %08x, mac_ad = %08x%04x\n",
readl(comm->l2sw_reg_base + L2SW_WT_MAC_AD0),
(u32)FIELD_GET(MAC_W_MAC_47_16,
readl(comm->l2sw_reg_base + L2SW_W_MAC_47_16)),
(u32)FIELD_GET(MAC_W_MAC_15_0,
readl(comm->l2sw_reg_base + L2SW_W_MAC_15_0)));
return 0;
}
void spl2sw_mac_hw_init(struct spl2sw_common *comm)
{
u32 reg;
/* Disable cpu0 and cpu 1 port. */
reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL);
reg |= MAC_DIS_SOC1_CPU | MAC_DIS_SOC0_CPU;
writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL);
/* Set base addresses of TX and RX queues. */
writel(comm->desc_dma, comm->l2sw_reg_base + L2SW_TX_LBASE_ADDR_0);
writel(comm->desc_dma + sizeof(struct spl2sw_mac_desc) * TX_DESC_NUM,
comm->l2sw_reg_base + L2SW_TX_HBASE_ADDR_0);
writel(comm->desc_dma + sizeof(struct spl2sw_mac_desc) * (TX_DESC_NUM +
MAC_GUARD_DESC_NUM), comm->l2sw_reg_base + L2SW_RX_HBASE_ADDR_0);
writel(comm->desc_dma + sizeof(struct spl2sw_mac_desc) * (TX_DESC_NUM +
MAC_GUARD_DESC_NUM + RX_QUEUE0_DESC_NUM),
comm->l2sw_reg_base + L2SW_RX_LBASE_ADDR_0);
/* Fc_rls_th=0x4a, Fc_set_th=0x3a, Drop_rls_th=0x2d, Drop_set_th=0x1d */
writel(0x4a3a2d1d, comm->l2sw_reg_base + L2SW_FL_CNTL_TH);
/* Cpu_rls_th=0x4a, Cpu_set_th=0x3a, Cpu_th=0x12, Port_th=0x12 */
writel(0x4a3a1212, comm->l2sw_reg_base + L2SW_CPU_FL_CNTL_TH);
/* mtcc_lmt=0xf, Pri_th_l=6, Pri_th_h=6, weigh_8x_en=1 */
writel(0xf6680000, comm->l2sw_reg_base + L2SW_PRI_FL_CNTL);
/* High-active LED */
reg = readl(comm->l2sw_reg_base + L2SW_LED_PORT0);
reg |= MAC_LED_ACT_HI;
writel(reg, comm->l2sw_reg_base + L2SW_LED_PORT0);
/* Disable aging of cpu port 0 & 1.
* Disable SA learning of cpu port 0 & 1.
* Enable UC and MC packets
*/
reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL);
reg &= ~(MAC_EN_SOC1_AGING | MAC_EN_SOC0_AGING |
MAC_DIS_BC2CPU_P1 | MAC_DIS_BC2CPU_P0 |
MAC_DIS_MC2CPU_P1 | MAC_DIS_MC2CPU_P0);
reg |= MAC_DIS_LRN_SOC1 | MAC_DIS_LRN_SOC0;
writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL);
/* Enable RMC2CPU for port 0 & 1
* Enable Flow control for port 0 & 1
* Enable Back pressure for port 0 & 1
*/
reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL0);
reg &= ~(MAC_DIS_RMC2CPU_P1 | MAC_DIS_RMC2CPU_P0);
reg |= MAC_EN_FLOW_CTL_P1 | MAC_EN_FLOW_CTL_P0 |
MAC_EN_BACK_PRESS_P1 | MAC_EN_BACK_PRESS_P0;
writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL0);
/* Disable LAN port SA learning. */
reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL1);
reg |= MAC_DIS_SA_LRN_P1 | MAC_DIS_SA_LRN_P0;
writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL1);
/* Enable rmii force mode and
* set both external phy-address to 31.
*/
reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
reg &= ~(MAC_EXT_PHY1_ADDR | MAC_EXT_PHY0_ADDR);
reg |= FIELD_PREP(MAC_EXT_PHY1_ADDR, 31) | FIELD_PREP(MAC_EXT_PHY0_ADDR, 31);
reg |= MAC_FORCE_RMII_EN_1 | MAC_FORCE_RMII_EN_0;
writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
/* Port 0: VLAN group 0
* Port 1: VLAN group 1
*/
reg = FIELD_PREP(MAC_P1_PVID, 1) | FIELD_PREP(MAC_P0_PVID, 0);
writel(reg, comm->l2sw_reg_base + L2SW_PVID_CONFIG0);
/* VLAN group 0: cpu0 (bit3) + port0 (bit0) = 1001 = 0x9
* VLAN group 1: cpu0 (bit3) + port1 (bit1) = 1010 = 0xa
*/
reg = FIELD_PREP(MAC_VLAN_MEMSET_1, 0xa) | FIELD_PREP(MAC_VLAN_MEMSET_0, 9);
writel(reg, comm->l2sw_reg_base + L2SW_VLAN_MEMSET_CONFIG0);
/* RMC forward: to_cpu (1)
* LED: 60mS (1)
* BC storm prev: 31 BC (1)
*/
reg = readl(comm->l2sw_reg_base + L2SW_SW_GLB_CNTL);
reg &= ~(MAC_RMC_TB_FAULT_RULE | MAC_LED_FLASH_TIME | MAC_BC_STORM_PREV);
reg |= FIELD_PREP(MAC_RMC_TB_FAULT_RULE, 1) |
FIELD_PREP(MAC_LED_FLASH_TIME, 1) |
FIELD_PREP(MAC_BC_STORM_PREV, 1);
writel(reg, comm->l2sw_reg_base + L2SW_SW_GLB_CNTL);
writel(MAC_INT_MASK_DEF, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
}
void spl2sw_mac_rx_mode_set(struct spl2sw_mac *mac)
{
struct spl2sw_common *comm = mac->comm;
struct net_device *ndev = mac->ndev;
u32 mask, reg, rx_mode;
netdev_dbg(ndev, "ndev->flags = %08x\n", ndev->flags);
mask = FIELD_PREP(MAC_DIS_MC2CPU, mac->lan_port) |
FIELD_PREP(MAC_DIS_UN2CPU, mac->lan_port);
reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL);
if (ndev->flags & IFF_PROMISC) {
/* Allow MC and unknown UC packets */
rx_mode = FIELD_PREP(MAC_DIS_MC2CPU, mac->lan_port) |
FIELD_PREP(MAC_DIS_UN2CPU, mac->lan_port);
} else if ((!netdev_mc_empty(ndev) && (ndev->flags & IFF_MULTICAST)) ||
(ndev->flags & IFF_ALLMULTI)) {
/* Allow MC packets */
rx_mode = FIELD_PREP(MAC_DIS_MC2CPU, mac->lan_port);
} else {
/* Disable MC and unknown UC packets */
rx_mode = 0;
}
writel((reg & (~mask)) | ((~rx_mode) & mask), comm->l2sw_reg_base + L2SW_CPU_CNTL);
netdev_dbg(ndev, "cpu_cntl = %08x\n", readl(comm->l2sw_reg_base + L2SW_CPU_CNTL));
}
void spl2sw_mac_init(struct spl2sw_common *comm)
{
u32 i;
for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
comm->rx_pos[i] = 0;
mb(); /* make sure settings are effective. */
spl2sw_mac_hw_init(comm);
}
void spl2sw_mac_soft_reset(struct spl2sw_common *comm)
{
u32 i;
spl2sw_mac_hw_stop(comm);
spl2sw_rx_descs_flush(comm);
comm->tx_pos = 0;
comm->tx_done_pos = 0;
comm->tx_desc_full = 0;
for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
comm->rx_pos[i] = 0;
mb(); /* make sure settings are effective. */
spl2sw_mac_hw_init(comm);
spl2sw_mac_hw_start(comm);
}

View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#ifndef __SPL2SW_MAC_H__
#define __SPL2SW_MAC_H__
void spl2sw_mac_hw_stop(struct spl2sw_common *comm);
void spl2sw_mac_hw_start(struct spl2sw_common *comm);
int spl2sw_mac_addr_add(struct spl2sw_mac *mac);
int spl2sw_mac_addr_del(struct spl2sw_mac *mac);
void spl2sw_mac_hw_init(struct spl2sw_common *comm);
void spl2sw_mac_rx_mode_set(struct spl2sw_mac *mac);
void spl2sw_mac_init(struct spl2sw_common *comm);
void spl2sw_mac_soft_reset(struct spl2sw_common *comm);
#endif

View File

@ -0,0 +1,131 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/bitfield.h>
#include <linux/of_mdio.h>
#include "spl2sw_register.h"
#include "spl2sw_define.h"
#include "spl2sw_mdio.h"
#define SPL2SW_MDIO_READ_CMD 0x02
#define SPL2SW_MDIO_WRITE_CMD 0x01
static int spl2sw_mdio_access(struct spl2sw_common *comm, u8 cmd, u8 addr, u8 regnum, u16 wdata)
{
u32 reg, reg2;
u32 val;
int ret;
/* Note that addr (of phy) should match either ext_phy0_addr
* or ext_phy1_addr, or mdio commands won't be sent out.
*/
reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
reg &= ~MAC_EXT_PHY0_ADDR;
reg |= FIELD_PREP(MAC_EXT_PHY0_ADDR, addr);
reg2 = FIELD_PREP(MAC_CPU_PHY_WT_DATA, wdata) | FIELD_PREP(MAC_CPU_PHY_CMD, cmd) |
FIELD_PREP(MAC_CPU_PHY_REG_ADDR, regnum) | FIELD_PREP(MAC_CPU_PHY_ADDR, addr);
/* Set ext_phy0_addr and then issue mdio command.
* No interrupt is allowed in between.
*/
spin_lock_irq(&comm->mdio_lock);
writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
writel(reg2, comm->l2sw_reg_base + L2SW_PHY_CNTL_REG0);
spin_unlock_irq(&comm->mdio_lock);
ret = read_poll_timeout(readl, val, val & cmd, 1, 1000, true,
comm->l2sw_reg_base + L2SW_PHY_CNTL_REG1);
/* Set ext_phy0_addr back to 31 to prevent
* from sending mdio command to phy by
* hardware auto-mdio function.
*/
reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
reg &= ~MAC_EXT_PHY0_ADDR;
reg |= FIELD_PREP(MAC_EXT_PHY0_ADDR, 31);
writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
if (ret == 0)
return val >> 16;
else
return ret;
}
static int spl2sw_mii_read(struct mii_bus *bus, int addr, int regnum)
{
struct spl2sw_common *comm = bus->priv;
if (regnum & MII_ADDR_C45)
return -EOPNOTSUPP;
return spl2sw_mdio_access(comm, SPL2SW_MDIO_READ_CMD, addr, regnum, 0);
}
static int spl2sw_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
{
struct spl2sw_common *comm = bus->priv;
int ret;
if (regnum & MII_ADDR_C45)
return -EOPNOTSUPP;
ret = spl2sw_mdio_access(comm, SPL2SW_MDIO_WRITE_CMD, addr, regnum, val);
if (ret < 0)
return ret;
return 0;
}
u32 spl2sw_mdio_init(struct spl2sw_common *comm)
{
struct device_node *mdio_np;
struct mii_bus *mii_bus;
int ret;
/* Get mdio child node. */
mdio_np = of_get_child_by_name(comm->pdev->dev.of_node, "mdio");
if (!mdio_np) {
dev_err(&comm->pdev->dev, "No mdio child node found!\n");
return -ENODEV;
}
/* Allocate and register mdio bus. */
mii_bus = devm_mdiobus_alloc(&comm->pdev->dev);
if (!mii_bus) {
ret = -ENOMEM;
goto out;
}
mii_bus->name = "sunplus_mii_bus";
mii_bus->parent = &comm->pdev->dev;
mii_bus->priv = comm;
mii_bus->read = spl2sw_mii_read;
mii_bus->write = spl2sw_mii_write;
snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&comm->pdev->dev));
ret = of_mdiobus_register(mii_bus, mdio_np);
if (ret) {
dev_err(&comm->pdev->dev, "Failed to register mdiobus!\n");
goto out;
}
comm->mii_bus = mii_bus;
out:
of_node_put(mdio_np);
return ret;
}
void spl2sw_mdio_remove(struct spl2sw_common *comm)
{
if (comm->mii_bus) {
mdiobus_unregister(comm->mii_bus);
comm->mii_bus = NULL;
}
}

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#ifndef __SPL2SW_MDIO_H__
#define __SPL2SW_MDIO_H__
u32 spl2sw_mdio_init(struct spl2sw_common *comm);
void spl2sw_mdio_remove(struct spl2sw_common *comm);
#endif

View File

@ -0,0 +1,92 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#include <linux/netdevice.h>
#include <linux/bitfield.h>
#include <linux/of_mdio.h>
#include "spl2sw_register.h"
#include "spl2sw_define.h"
#include "spl2sw_phy.h"
static void spl2sw_mii_link_change(struct net_device *ndev)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
struct spl2sw_common *comm = mac->comm;
u32 reg;
reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
if (phydev->link) {
reg |= FIELD_PREP(MAC_FORCE_RMII_LINK, mac->lan_port);
if (phydev->speed == 100) {
reg |= FIELD_PREP(MAC_FORCE_RMII_SPD, mac->lan_port);
} else {
reg &= FIELD_PREP(MAC_FORCE_RMII_SPD, ~mac->lan_port) |
~MAC_FORCE_RMII_SPD;
}
if (phydev->duplex) {
reg |= FIELD_PREP(MAC_FORCE_RMII_DPX, mac->lan_port);
} else {
reg &= FIELD_PREP(MAC_FORCE_RMII_DPX, ~mac->lan_port) |
~MAC_FORCE_RMII_DPX;
}
if (phydev->pause) {
reg |= FIELD_PREP(MAC_FORCE_RMII_FC, mac->lan_port);
} else {
reg &= FIELD_PREP(MAC_FORCE_RMII_FC, ~mac->lan_port) |
~MAC_FORCE_RMII_FC;
}
} else {
reg &= FIELD_PREP(MAC_FORCE_RMII_LINK, ~mac->lan_port) |
~MAC_FORCE_RMII_LINK;
}
writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
phy_print_status(phydev);
}
int spl2sw_phy_connect(struct spl2sw_common *comm)
{
struct phy_device *phydev;
struct net_device *ndev;
struct spl2sw_mac *mac;
int i;
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i]) {
ndev = comm->ndev[i];
mac = netdev_priv(ndev);
phydev = of_phy_connect(ndev, mac->phy_node, spl2sw_mii_link_change,
0, mac->phy_mode);
if (!phydev)
return -ENODEV;
phy_support_asym_pause(phydev);
phy_attached_info(phydev);
}
return 0;
}
void spl2sw_phy_remove(struct spl2sw_common *comm)
{
struct net_device *ndev;
int i;
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i]) {
ndev = comm->ndev[i];
if (ndev) {
phy_disconnect(ndev->phydev);
ndev->phydev = NULL;
}
}
}

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#ifndef __SPL2SW_PHY_H__
#define __SPL2SW_PHY_H__
int spl2sw_phy_connect(struct spl2sw_common *comm);
void spl2sw_phy_remove(struct spl2sw_common *comm);
#endif

View File

@ -0,0 +1,86 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#ifndef __SPL2SW_REGISTER_H__
#define __SPL2SW_REGISTER_H__
/* Register L2SW */
#define L2SW_SW_INT_STATUS_0 0x0
#define L2SW_SW_INT_MASK_0 0x4
#define L2SW_FL_CNTL_TH 0x8
#define L2SW_CPU_FL_CNTL_TH 0xc
#define L2SW_PRI_FL_CNTL 0x10
#define L2SW_VLAN_PRI_TH 0x14
#define L2SW_EN_TOS_BUS 0x18
#define L2SW_TOS_MAP0 0x1c
#define L2SW_TOS_MAP1 0x20
#define L2SW_TOS_MAP2 0x24
#define L2SW_TOS_MAP3 0x28
#define L2SW_TOS_MAP4 0x2c
#define L2SW_TOS_MAP5 0x30
#define L2SW_TOS_MAP6 0x34
#define L2SW_TOS_MAP7 0x38
#define L2SW_GLOBAL_QUE_STATUS 0x3c
#define L2SW_ADDR_TBL_SRCH 0x40
#define L2SW_ADDR_TBL_ST 0x44
#define L2SW_MAC_AD_SER0 0x48
#define L2SW_MAC_AD_SER1 0x4c
#define L2SW_WT_MAC_AD0 0x50
#define L2SW_W_MAC_15_0 0x54
#define L2SW_W_MAC_47_16 0x58
#define L2SW_PVID_CONFIG0 0x5c
#define L2SW_PVID_CONFIG1 0x60
#define L2SW_VLAN_MEMSET_CONFIG0 0x64
#define L2SW_VLAN_MEMSET_CONFIG1 0x68
#define L2SW_PORT_ABILITY 0x6c
#define L2SW_PORT_ST 0x70
#define L2SW_CPU_CNTL 0x74
#define L2SW_PORT_CNTL0 0x78
#define L2SW_PORT_CNTL1 0x7c
#define L2SW_PORT_CNTL2 0x80
#define L2SW_SW_GLB_CNTL 0x84
#define L2SW_L2SW_SW_RESET 0x88
#define L2SW_LED_PORT0 0x8c
#define L2SW_LED_PORT1 0x90
#define L2SW_LED_PORT2 0x94
#define L2SW_LED_PORT3 0x98
#define L2SW_LED_PORT4 0x9c
#define L2SW_WATCH_DOG_TRIG_RST 0xa0
#define L2SW_WATCH_DOG_STOP_CPU 0xa4
#define L2SW_PHY_CNTL_REG0 0xa8
#define L2SW_PHY_CNTL_REG1 0xac
#define L2SW_MAC_FORCE_MODE 0xb0
#define L2SW_VLAN_GROUP_CONFIG0 0xb4
#define L2SW_VLAN_GROUP_CONFIG1 0xb8
#define L2SW_FLOW_CTRL_TH3 0xbc
#define L2SW_QUEUE_STATUS_0 0xc0
#define L2SW_DEBUG_CNTL 0xc4
#define L2SW_RESERVED_1 0xc8
#define L2SW_MEM_TEST_INFO 0xcc
#define L2SW_SW_INT_STATUS_1 0xd0
#define L2SW_SW_INT_MASK_1 0xd4
#define L2SW_SW_GLOBAL_SIGNAL 0xd8
#define L2SW_CPU_TX_TRIG 0x208
#define L2SW_TX_HBASE_ADDR_0 0x20c
#define L2SW_TX_LBASE_ADDR_0 0x210
#define L2SW_RX_HBASE_ADDR_0 0x214
#define L2SW_RX_LBASE_ADDR_0 0x218
#define L2SW_TX_HW_ADDR_0 0x21c
#define L2SW_TX_LW_ADDR_0 0x220
#define L2SW_RX_HW_ADDR_0 0x224
#define L2SW_RX_LW_ADDR_0 0x228
#define L2SW_CPU_PORT_CNTL_REG_0 0x22c
#define L2SW_TX_HBASE_ADDR_1 0x230
#define L2SW_TX_LBASE_ADDR_1 0x234
#define L2SW_RX_HBASE_ADDR_1 0x238
#define L2SW_RX_LBASE_ADDR_1 0x23c
#define L2SW_TX_HW_ADDR_1 0x240
#define L2SW_TX_LW_ADDR_1 0x244
#define L2SW_RX_HW_ADDR_1 0x248
#define L2SW_RX_LW_ADDR_1 0x24c
#define L2SW_CPU_PORT_CNTL_REG_1 0x250
#endif

View File

@ -0,0 +1,32 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# Wangxun network device configuration
#
config NET_VENDOR_WANGXUN
bool "Wangxun devices"
default y
help
If you have a network (Ethernet) card from Wangxun(R), say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about Wangxun(R) cards. If you say Y, you will
be asked for your specific card in the following questions.
if NET_VENDOR_WANGXUN
config TXGBE
tristate "Wangxun(R) 10GbE PCI Express adapters support"
depends on PCI
help
This driver supports Wangxun(R) 10GbE PCI Express family of
adapters.
More specific information on configuring the driver is in
<file:Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst>.
To compile this driver as a module, choose M here. The module
will be called txgbe.
endif # NET_VENDOR_WANGXUN

View File

@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Wangxun network device drivers.
#
obj-$(CONFIG_TXGBE) += txgbe/

View File

@ -0,0 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd.
#
# Makefile for the Wangxun(R) 10GbE PCI Express ethernet driver
#
obj-$(CONFIG_TXGBE) += txgbe.o
txgbe-objs := txgbe_main.o

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
#ifndef _TXGBE_H_
#define _TXGBE_H_
#include "txgbe_type.h"
#define TXGBE_MAX_FDIR_INDICES 63
#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
/* board specific private data structure */
struct txgbe_adapter {
u8 __iomem *io_addr; /* Mainly for iounmap use */
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
};
extern char txgbe_driver_name[];
#endif /* _TXGBE_H_ */

View File

@ -0,0 +1,166 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/aer.h>
#include <linux/etherdevice.h>
#include "txgbe.h"
char txgbe_driver_name[] = "txgbe";
/* txgbe_pci_tbl - PCI Device ID Table
*
* Wildcard entries (PCI_ANY_ID) should come last
* Last entry must be all 0s
*
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
static const struct pci_device_id txgbe_pci_tbl[] = {
{ PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0},
{ PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0},
/* required last entry */
{ .device = 0 }
};
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
struct txgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
pci_disable_device(pdev);
}
static void txgbe_shutdown(struct pci_dev *pdev)
{
bool wake;
txgbe_dev_shutdown(pdev, &wake);
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, wake);
pci_set_power_state(pdev, PCI_D3hot);
}
}
/**
* txgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in txgbe_pci_tbl
*
* Returns 0 on success, negative on failure
*
* txgbe_probe initializes an adapter identified by a pci_dev structure.
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
static int txgbe_probe(struct pci_dev *pdev,
const struct pci_device_id __always_unused *ent)
{
struct txgbe_adapter *adapter = NULL;
struct net_device *netdev;
int err;
err = pci_enable_device_mem(pdev);
if (err)
return err;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev,
"No usable DMA configuration, aborting\n");
goto err_pci_disable_dev;
}
err = pci_request_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM),
txgbe_driver_name);
if (err) {
dev_err(&pdev->dev,
"pci_request_selected_regions failed 0x%x\n", err);
goto err_pci_disable_dev;
}
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
netdev = devm_alloc_etherdev_mqs(&pdev->dev,
sizeof(struct txgbe_adapter),
TXGBE_MAX_TX_QUEUES,
TXGBE_MAX_RX_QUEUES);
if (!netdev) {
err = -ENOMEM;
goto err_pci_release_regions;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
adapter->io_addr = devm_ioremap(&pdev->dev,
pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!adapter->io_addr) {
err = -EIO;
goto err_pci_release_regions;
}
netdev->features |= NETIF_F_HIGHDMA;
pci_set_drvdata(pdev, adapter);
return 0;
err_pci_release_regions:
pci_disable_pcie_error_reporting(pdev);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_disable_dev:
pci_disable_device(pdev);
return err;
}
/**
* txgbe_remove - Device Removal Routine
* @pdev: PCI device information struct
*
* txgbe_remove is called by the PCI subsystem to alert the driver
* that it should release a PCI device. The could be caused by a
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
static void txgbe_remove(struct pci_dev *pdev)
{
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
static struct pci_driver txgbe_driver = {
.name = txgbe_driver_name,
.id_table = txgbe_pci_tbl,
.probe = txgbe_probe,
.remove = txgbe_remove,
.shutdown = txgbe_shutdown,
};
module_pci_driver(txgbe_driver);
MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl);
MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,57 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
#ifndef _TXGBE_TYPE_H_
#define _TXGBE_TYPE_H_
#include <linux/types.h>
#include <linux/netdevice.h>
/************ txgbe_register.h ************/
/* Vendor ID */
#ifndef PCI_VENDOR_ID_WANGXUN
#define PCI_VENDOR_ID_WANGXUN 0x8088
#endif
/* Device IDs */
#define TXGBE_DEV_ID_SP1000 0x1001
#define TXGBE_DEV_ID_WX1820 0x2001
/* Subsystem IDs */
/* SFP */
#define TXGBE_ID_SP1000_SFP 0x0000
#define TXGBE_ID_WX1820_SFP 0x2000
#define TXGBE_ID_SFP 0x00
/* copper */
#define TXGBE_ID_SP1000_XAUI 0x1010
#define TXGBE_ID_WX1820_XAUI 0x2010
#define TXGBE_ID_XAUI 0x10
#define TXGBE_ID_SP1000_SGMII 0x1020
#define TXGBE_ID_WX1820_SGMII 0x2020
#define TXGBE_ID_SGMII 0x20
/* backplane */
#define TXGBE_ID_SP1000_KR_KX_KX4 0x1030
#define TXGBE_ID_WX1820_KR_KX_KX4 0x2030
#define TXGBE_ID_KR_KX_KX4 0x30
/* MAC Interface */
#define TXGBE_ID_SP1000_MAC_XAUI 0x1040
#define TXGBE_ID_WX1820_MAC_XAUI 0x2040
#define TXGBE_ID_MAC_XAUI 0x40
#define TXGBE_ID_SP1000_MAC_SGMII 0x1060
#define TXGBE_ID_WX1820_MAC_SGMII 0x2060
#define TXGBE_ID_MAC_SGMII 0x60
#define TXGBE_NCSI_SUP 0x8000
#define TXGBE_NCSI_MASK 0x8000
#define TXGBE_WOL_SUP 0x4000
#define TXGBE_WOL_MASK 0x4000
#define TXGBE_DEV_MASK 0xf0
/* Combined interface*/
#define TXGBE_ID_SFI_XAUI 0x50
/* Revision ID */
#define TXGBE_SP_MPW 1
#endif /* _TXGBE_TYPE_H_ */

View File

@ -0,0 +1,537 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2021 Linaro Ltd.
*/
#include <linux/log2.h>
#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.1 */
enum ipa_resource_type {
/* Source resource types; first must have value 0 */
IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
IPA_RESOURCE_TYPE_SRC_HDR_SECTORS,
IPA_RESOURCE_TYPE_SRC_HDRI1_BUFFER,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
IPA_RESOURCE_TYPE_SRC_HDRI2_BUFFERS,
IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
/* Destination resource types; first must have value 0 */
IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
IPA_RESOURCE_TYPE_DST_DATA_SECTOR_LISTS,
IPA_RESOURCE_TYPE_DST_DPS_DMARS,
};
/* Resource groups used for an SoC having IPA v3.1 */
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UL = 0,
IPA_RSRC_GROUP_SRC_DL,
IPA_RSRC_GROUP_SRC_DIAG,
IPA_RSRC_GROUP_SRC_DMA,
IPA_RSRC_GROUP_SRC_UNUSED,
IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
IPA_RSRC_GROUP_DST_UL = 0,
IPA_RSRC_GROUP_DST_DL,
IPA_RSRC_GROUP_DST_DIAG_DPL,
IPA_RSRC_GROUP_DST_DMA,
IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL,
IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v3.1 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
.max_writes = 8,
.max_reads = 8,
},
[IPA_QSB_MASTER_PCIE] = {
.max_writes = 2,
.max_reads = 8,
},
};
/* Endpoint data for an SoC having IPA v3.1 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 6,
.endpoint_id = 22,
.toward_ipa = true,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 18,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
.tx = {
.seq_type = IPA_SEQ_DMA,
},
},
},
},
[IPA_ENDPOINT_AP_LAN_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 7,
.endpoint_id = 15,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 8,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL,
.aggregation = true,
.status_enable = true,
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 5,
.endpoint_id = 3,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 512,
.tlv_count = 16,
},
.endpoint = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL,
.checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
.seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 8,
.endpoint_id = 16,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 8,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_DL,
.checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
},
},
[IPA_ENDPOINT_MODEM_LAN_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 4,
.endpoint_id = 9,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 0,
.endpoint_id = 5,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 5,
.endpoint_id = 18,
.toward_ipa = false,
},
};
/* Source resource configuration data for an SoC having IPA v3.1 */
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 3, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 3, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 1, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 1, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 2, .max = 255,
},
},
[IPA_RESOURCE_TYPE_SRC_HDR_SECTORS] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 0, .max = 255,
},
},
[IPA_RESOURCE_TYPE_SRC_HDRI1_BUFFER] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 0, .max = 255,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 14, .max = 14,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 16, .max = 16,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 5, .max = 5,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 5, .max = 5,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 19, .max = 19,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 26, .max = 26,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 5, .max = 5, /* 3 downstream */
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 5, .max = 5, /* 7 downstream */
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_HDRI2_BUFFERS] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 0, .max = 255,
},
},
[IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 0, .max = 255,
},
},
[IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 19, .max = 19,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 26, .max = 26,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 5, .max = 5,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 5, .max = 5,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
};
/* Destination resource configuration data for an SoC having IPA v3.1 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
.limits[IPA_RSRC_GROUP_DST_UL] = {
.min = 3, .max = 3, /* 2 downstream */
},
.limits[IPA_RSRC_GROUP_DST_DL] = {
.min = 3, .max = 3,
},
.limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
.min = 1, .max = 1, /* 0 downstream */
},
/* IPA_RSRC_GROUP_DST_DMA uses 2 downstream */
.limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
.min = 3, .max = 3,
},
.limits[IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE] = {
.min = 3, .max = 3,
},
},
[IPA_RESOURCE_TYPE_DST_DATA_SECTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_DST_UL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_DST_DL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_DST_DMA] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE] = {
.min = 0, .max = 255,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
.limits[IPA_RSRC_GROUP_DST_UL] = {
.min = 1, .max = 1,
},
.limits[IPA_RSRC_GROUP_DST_DL] = {
.min = 1, .max = 1,
},
.limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
.min = 1, .max = 1,
},
.limits[IPA_RSRC_GROUP_DST_DMA] = {
.min = 1, .max = 1,
},
.limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
.min = 1, .max = 1,
},
},
};
/* Resource configuration data for an SoC having IPA v3.1 */
static const struct ipa_resource_data ipa_resource_data = {
.rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
.rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
/* IPA-resident memory region data for an SoC having IPA v3.1 */
static const struct ipa_mem ipa_mem_local_data[] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0140,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x07d0,
.size = 0x0200,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x09d0,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM,
.offset = 0x0bd8,
.size = 0x1424,
.canary_count = 0,
},
{
.id = IPA_MEM_END_MARKER,
.offset = 0x2000,
.size = 0,
.canary_count = 1,
},
};
/* Memory configuration data for an SoC having IPA v3.1 */
static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
.imem_size = 0x00002000,
.smem_id = 497,
.smem_size = 0x00002000,
};
/* Interconnect bandwidths are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 640000, /* 640 MBps */
.average_bandwidth = 80000, /* 80 MBps */
},
{
.name = "imem",
.peak_bandwidth = 640000, /* 640 MBps */
.average_bandwidth = 80000, /* 80 MBps */
},
/* Average bandwidth is unused for the next interconnect */
{
.name = "config",
.peak_bandwidth = 80000, /* 80 MBps */
.average_bandwidth = 0, /* unused */
},
};
/* Clock and interconnect configuration data for an SoC having IPA v3.1 */
static const struct ipa_power_data ipa_power_data = {
.core_clock_rate = 16 * 1000 * 1000, /* Hz */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
/* Configuration data for an SoC having IPA v3.1 */
const struct ipa_data ipa_data_v3_1 = {
.version = IPA_VERSION_3_1,
.backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK,
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
.power_data = &ipa_power_data,
};

View File

@ -0,0 +1,422 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2021 Linaro Ltd.
*/
#include <linux/log2.h>
#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.5.1 */
enum ipa_resource_type {
/* Source resource types; first must have value 0 */
IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
/* Destination resource types; first must have value 0 */
IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
IPA_RESOURCE_TYPE_DST_DPS_DMARS,
};
/* Resource groups used for an SoC having IPA v3.5.1 */
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_LWA_DL = 0,
IPA_RSRC_GROUP_SRC_UL_DL,
IPA_RSRC_GROUP_SRC_MHI_DMA,
IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
IPA_RSRC_GROUP_DST_LWA_DL = 0,
IPA_RSRC_GROUP_DST_UL_DL_DPL,
IPA_RSRC_GROUP_DST_UNUSED_2,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v3.5.1 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
.max_writes = 8,
.max_reads = 8,
},
[IPA_QSB_MASTER_PCIE] = {
.max_writes = 4,
.max_reads = 12,
},
};
/* Endpoint datdata for an SoC having IPA v3.5.1 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 4,
.endpoint_id = 5,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 256,
.tlv_count = 20,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
.tx = {
.seq_type = IPA_SEQ_DMA,
},
},
},
},
[IPA_ENDPOINT_AP_LAN_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 5,
.endpoint_id = 9,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 8,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.aggregation = true,
.status_enable = true,
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 3,
.endpoint_id = 2,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 512,
.tlv_count = 16,
},
.endpoint = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
.seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
.seq_rep_type = IPA_SEQ_REP_DMA_PARSER,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 6,
.endpoint_id = 10,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 8,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
},
},
[IPA_ENDPOINT_MODEM_LAN_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 0,
.endpoint_id = 3,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 4,
.endpoint_id = 6,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 2,
.endpoint_id = 12,
.toward_ipa = false,
},
};
/* Source resource configuration data for an SoC having IPA v3.5.1 */
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
.min = 1, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 1, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 1, .max = 63,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
.min = 10, .max = 10,
},
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 10, .max = 10,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
.limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
.min = 12, .max = 12,
},
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 14, .max = 14,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
.limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_MHI_DMA] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 0, .max = 63,
},
},
[IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
.limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
.min = 14, .max = 14,
},
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 20, .max = 20,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 14, .max = 14,
},
},
};
/* Destination resource configuration data for an SoC having IPA v3.5.1 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
.limits[IPA_RSRC_GROUP_DST_LWA_DL] = {
.min = 4, .max = 4,
},
.limits[1] = {
.min = 4, .max = 4,
},
.limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
.min = 3, .max = 3,
}
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
.limits[IPA_RSRC_GROUP_DST_LWA_DL] = {
.min = 2, .max = 63,
},
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 1, .max = 63,
},
.limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
.min = 1, .max = 2,
}
},
};
/* Resource configuration data for an SoC having IPA v3.5.1 */
static const struct ipa_resource_data ipa_resource_data = {
.rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
.rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
/* IPA-resident memory region data for an SoC having IPA v3.5.1 */
static const struct ipa_mem ipa_mem_local_data[] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0140,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x07d0,
.size = 0x0200,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x09d0,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM,
.offset = 0x0bd8,
.size = 0x1024,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_EVENT_RING,
.offset = 0x1c00,
.size = 0x0400,
.canary_count = 1,
},
};
/* Memory configuration data for an SoC having IPA v3.5.1 */
static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
.imem_size = 0x00002000,
.smem_id = 497,
.smem_size = 0x00002000,
};
/* Interconnect bandwidths are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 600000, /* 600 MBps */
.average_bandwidth = 80000, /* 80 MBps */
},
/* Average bandwidth is unused for the next two interconnects */
{
.name = "imem",
.peak_bandwidth = 350000, /* 350 MBps */
.average_bandwidth = 0, /* unused */
},
{
.name = "config",
.peak_bandwidth = 40000, /* 40 MBps */
.average_bandwidth = 0, /* unused */
},
};
/* Clock and interconnect configuration data for an SoC having IPA v3.5.1 */
static const struct ipa_power_data ipa_power_data = {
.core_clock_rate = 75 * 1000 * 1000, /* Hz */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
/* Configuration data for an SoC having IPA v3.5.1 */
const struct ipa_data ipa_data_v3_5_1 = {
.version = IPA_VERSION_3_5_1,
.backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK |
BCR_TX_NOT_USING_BRESP_FMASK |
BCR_SUSPEND_L2_IRQ_FMASK |
BCR_HOLB_DROP_L2_IRQ_FMASK |
BCR_DUAL_TX_FMASK,
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
.power_data = &ipa_power_data,
};

View File

@ -0,0 +1,405 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021 Linaro Ltd. */
#include <linux/log2.h>
#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.11 */
enum ipa_resource_type {
/* Source resource types; first must have value 0 */
IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
/* Destination resource types; first must have value 0 */
IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
IPA_RESOURCE_TYPE_DST_DPS_DMARS,
};
/* Resource groups used for an SoC having IPA v4.11 */
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UL_DL = 0,
IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_UNUSED_2,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
IPA_RSRC_GROUP_DST_UNUSED_1,
IPA_RSRC_GROUP_DST_DRB_IP,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v4.11 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
.max_writes = 12,
.max_reads = 13,
.max_reads_beats = 120,
},
};
/* Endpoint configuration data for an SoC having IPA v4.11 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 5,
.endpoint_id = 7,
.toward_ipa = true,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 20,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
.tx = {
.seq_type = IPA_SEQ_DMA,
},
},
},
},
[IPA_ENDPOINT_AP_LAN_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 14,
.endpoint_id = 9,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.aggregation = true,
.status_enable = true,
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 2,
.endpoint_id = 2,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 512,
.tlv_count = 16,
},
.endpoint = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
.seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 7,
.endpoint_id = 16,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
.buffer_size = 32768,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
},
},
[IPA_ENDPOINT_MODEM_AP_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 0,
.endpoint_id = 5,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 7,
.endpoint_id = 14,
.toward_ipa = false,
},
[IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 2,
.endpoint_id = 8,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
};
/* Source resource configuration data for an SoC having IPA v4.11 */
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 6, .max = 6,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 18, .max = 18,
},
},
[IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 2, .max = 2,
},
},
[IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 15, .max = 15,
},
},
};
/* Destination resource configuration data for an SoC having IPA v4.11 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 3, .max = 3,
},
.limits[IPA_RSRC_GROUP_DST_DRB_IP] = {
.min = 25, .max = 25,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 2, .max = 2,
},
},
};
/* Resource configuration data for an SoC having IPA v4.11 */
static const struct ipa_resource_data ipa_resource_data = {
.rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
.rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
/* IPA-resident memory region data for an SoC having IPA v4.11 */
static const struct ipa_mem ipa_mem_local_data[] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0200,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x0cd0,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_NAT_TABLE,
.offset = 0x0ee0,
.size = 0x0d00,
.canary_count = 4,
},
{
.id = IPA_MEM_PDN_CONFIG,
.offset = 0x1be8,
.size = 0x0050,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x1c40,
.size = 0x0030,
.canary_count = 4,
},
{
.id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x1c70,
.size = 0x0048,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_TETHERING,
.offset = 0x1cb8,
.size = 0x0238,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_DROP,
.offset = 0x1ef0,
.size = 0x0020,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM,
.offset = 0x1f18,
.size = 0x100c,
.canary_count = 2,
},
{
.id = IPA_MEM_END_MARKER,
.offset = 0x3000,
.size = 0x0000,
.canary_count = 1,
},
};
/* Memory configuration data for an SoC having IPA v4.11 */
static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x146a8000,
.imem_size = 0x00002000,
.smem_id = 497,
.smem_size = 0x00009000,
};
/* Interconnect rates are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 600000, /* 600 MBps */
.average_bandwidth = 150000, /* 150 MBps */
},
/* Average rate is unused for the next interconnect */
{
.name = "config",
.peak_bandwidth = 74000, /* 74 MBps */
.average_bandwidth = 0, /* unused */
},
};
/* Clock and interconnect configuration data for an SoC having IPA v4.11 */
static const struct ipa_power_data ipa_power_data = {
.core_clock_rate = 60 * 1000 * 1000, /* Hz */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
/* Configuration data for an SoC having IPA v4.11 */
const struct ipa_data ipa_data_v4_11 = {
.version = IPA_VERSION_4_11,
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
.power_data = &ipa_power_data,
};

View File

@ -0,0 +1,384 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019-2021 Linaro Ltd. */
#include <linux/log2.h>
#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.2 */
enum ipa_resource_type {
/* Source resource types; first must have value 0 */
IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
/* Destination resource types; first must have value 0 */
IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
IPA_RESOURCE_TYPE_DST_DPS_DMARS,
};
/* Resource groups used for an SoC having IPA v4.2 */
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UL_DL = 0,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v4.2 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
.max_writes = 8,
.max_reads = 12,
/* no outstanding read byte (beat) limit */
},
};
/* Endpoint configuration data for an SoC having IPA v4.2 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 1,
.endpoint_id = 6,
.toward_ipa = true,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 20,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
.tx = {
.seq_type = IPA_SEQ_DMA,
},
},
},
},
[IPA_ENDPOINT_AP_LAN_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 2,
.endpoint_id = 8,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 6,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.aggregation = true,
.status_enable = true,
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 0,
.endpoint_id = 1,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 512,
.tlv_count = 8,
},
.endpoint = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
.seq_type = IPA_SEQ_1_PASS_SKIP_LAST_UC,
.seq_rep_type = IPA_SEQ_REP_DMA_PARSER,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 3,
.endpoint_id = 9,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 6,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
},
},
[IPA_ENDPOINT_MODEM_COMMAND_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 1,
.endpoint_id = 5,
.toward_ipa = true,
},
[IPA_ENDPOINT_MODEM_LAN_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 3,
.endpoint_id = 11,
.toward_ipa = false,
},
[IPA_ENDPOINT_MODEM_AP_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 0,
.endpoint_id = 4,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 2,
.endpoint_id = 10,
.toward_ipa = false,
},
};
/* Source resource configuration data for an SoC having IPA v4.2 */
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 3, .max = 63,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 3, .max = 3,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 10, .max = 10,
},
},
[IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 1, .max = 1,
},
},
[IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 5, .max = 5,
},
},
};
/* Destination resource configuration data for an SoC having IPA v4.2 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 3, .max = 3,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 1, .max = 63,
},
},
};
/* Resource configuration data for an SoC having IPA v4.2 */
static const struct ipa_resource_data ipa_resource_data = {
.rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
.rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
/* IPA-resident memory region data for an SoC having IPA v4.2 */
static const struct ipa_mem ipa_mem_local_data[] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0290,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0310,
.size = 0,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0318,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0398,
.size = 0,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x03a0,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0420,
.size = 0,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0428,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x04a8,
.size = 0x0140,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x05f0,
.size = 0x0200,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x07f0,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_PDN_CONFIG,
.offset = 0x09f8,
.size = 0x0050,
.canary_count = 2,
},
{
.id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x0a50,
.size = 0x0060,
.canary_count = 2,
},
{
.id = IPA_MEM_STATS_TETHERING,
.offset = 0x0ab0,
.size = 0x0140,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM,
.offset = 0x0bf0,
.size = 0x140c,
.canary_count = 0,
},
{
.id = IPA_MEM_END_MARKER,
.offset = 0x2000,
.size = 0,
.canary_count = 1,
},
};
/* Memory configuration data for an SoC having IPA v4.2 */
static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x146a8000,
.imem_size = 0x00002000,
.smem_id = 497,
.smem_size = 0x00002000,
};
/* Interconnect rates are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 465000, /* 465 MBps */
.average_bandwidth = 80000, /* 80 MBps */
},
/* Average bandwidth is unused for the next two interconnects */
{
.name = "imem",
.peak_bandwidth = 68570, /* 68.570 MBps */
.average_bandwidth = 0, /* unused */
},
{
.name = "config",
.peak_bandwidth = 30000, /* 30 MBps */
.average_bandwidth = 0, /* unused */
},
};
/* Clock and interconnect configuration data for an SoC having IPA v4.2 */
static const struct ipa_power_data ipa_power_data = {
.core_clock_rate = 100 * 1000 * 1000, /* Hz */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
/* Configuration data for an SoC having IPA v4.2 */
const struct ipa_data ipa_data_v4_2 = {
.version = IPA_VERSION_4_2,
/* backward_compat value is 0 */
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
.power_data = &ipa_power_data,
};

View File

@ -0,0 +1,461 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021 Linaro Ltd. */
#include <linux/log2.h>
#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.5 */
enum ipa_resource_type {
/* Source resource types; first must have value 0 */
IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
/* Destination resource types; first must have value 0 */
IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
IPA_RESOURCE_TYPE_DST_DPS_DMARS,
};
/* Resource groups used for an SoC having IPA v4.5 */
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UNUSED_0 = 0,
IPA_RSRC_GROUP_SRC_UL_DL,
IPA_RSRC_GROUP_SRC_UNUSED_2,
IPA_RSRC_GROUP_SRC_UNUSED_3,
IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
IPA_RSRC_GROUP_DST_UNUSED_0 = 0,
IPA_RSRC_GROUP_DST_UL_DL_DPL,
IPA_RSRC_GROUP_DST_UNUSED_2,
IPA_RSRC_GROUP_DST_UNUSED_3,
IPA_RSRC_GROUP_DST_UC,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v4.5 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
.max_writes = 8,
.max_reads = 0, /* no limit (hardware max) */
.max_reads_beats = 120,
},
[IPA_QSB_MASTER_PCIE] = {
.max_writes = 8,
.max_reads = 12,
/* no outstanding read byte (beat) limit */
},
};
/* Endpoint configuration data for an SoC having IPA v4.5 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 9,
.endpoint_id = 7,
.toward_ipa = true,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 20,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
.tx = {
.seq_type = IPA_SEQ_DMA,
},
},
},
},
[IPA_ENDPOINT_AP_LAN_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 10,
.endpoint_id = 16,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.aggregation = true,
.status_enable = true,
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 7,
.endpoint_id = 2,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 512,
.tlv_count = 16,
},
.endpoint = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
.seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 1,
.endpoint_id = 14,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
},
},
[IPA_ENDPOINT_MODEM_AP_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 0,
.endpoint_id = 5,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 7,
.endpoint_id = 21,
.toward_ipa = false,
},
[IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 2,
.endpoint_id = 8,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
};
/* Source resource configuration data for an SoC having IPA v4.5 */
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 1, .max = 11,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 1, .max = 63,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 14, .max = 14,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 3, .max = 3,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 18, .max = 18,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
.limits[IPA_RSRC_GROUP_SRC_UNUSED_0] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UNUSED_2] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UNUSED_3] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 0, .max = 63,
},
},
[IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 24, .max = 24,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
};
/* Destination resource configuration data for an SoC having IPA v4.5 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 16, .max = 16,
},
.limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
.min = 2, .max = 2,
},
.limits[IPA_RSRC_GROUP_DST_UNUSED_3] = {
.min = 2, .max = 2,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 2, .max = 63,
},
.limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
.min = 1, .max = 2,
},
.limits[IPA_RSRC_GROUP_DST_UNUSED_3] = {
.min = 1, .max = 2,
},
.limits[IPA_RSRC_GROUP_DST_UC] = {
.min = 0, .max = 2,
},
},
};
/* Resource configuration data for an SoC having IPA v4.5 */
static const struct ipa_resource_data ipa_resource_data = {
.rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
.rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
/* IPA-resident memory region data for an SoC having IPA v4.5 */
static const struct ipa_mem ipa_mem_local_data[] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0b20,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x15f0,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_NAT_TABLE,
.offset = 0x1800,
.size = 0x0d00,
.canary_count = 4,
},
{
.id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x2510,
.size = 0x0030,
.canary_count = 4,
},
{
.id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x2540,
.size = 0x0048,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_TETHERING,
.offset = 0x2588,
.size = 0x0238,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_FILTER_ROUTE,
.offset = 0x27c0,
.size = 0x0800,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_DROP,
.offset = 0x2fc0,
.size = 0x0020,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM,
.offset = 0x2fe8,
.size = 0x0800,
.canary_count = 2,
},
{
.id = IPA_MEM_UC_EVENT_RING,
.offset = 0x3800,
.size = 0x1000,
.canary_count = 1,
},
{
.id = IPA_MEM_PDN_CONFIG,
.offset = 0x4800,
.size = 0x0050,
.canary_count = 0,
},
};
/* Memory configuration data for an SoC having IPA v4.5 */
static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x14688000,
.imem_size = 0x00003000,
.smem_id = 497,
.smem_size = 0x00009000,
};
/* Interconnect rates are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 600000, /* 600 MBps */
.average_bandwidth = 150000, /* 150 MBps */
},
/* Average rate is unused for the next two interconnects */
{
.name = "imem",
.peak_bandwidth = 450000, /* 450 MBps */
.average_bandwidth = 75000, /* 75 MBps (unused?) */
},
{
.name = "config",
.peak_bandwidth = 171400, /* 171.4 MBps */
.average_bandwidth = 0, /* unused */
},
};
/* Clock and interconnect configuration data for an SoC having IPA v4.5 */
static const struct ipa_power_data ipa_power_data = {
.core_clock_rate = 150 * 1000 * 1000, /* Hz (150? 60?) */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
/* Configuration data for an SoC having IPA v4.5 */
const struct ipa_data ipa_data_v4_5 = {
.version = IPA_VERSION_4_5,
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
.power_data = &ipa_power_data,
};

View File

@ -0,0 +1,455 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021 Linaro Ltd. */
#include <linux/log2.h>
#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.9 */
enum ipa_resource_type {
/* Source resource types; first must have value 0 */
IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
/* Destination resource types; first must have value 0 */
IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
IPA_RESOURCE_TYPE_DST_DPS_DMARS,
};
/* Resource groups used for an SoC having IPA v4.9 */
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UL_DL = 0,
IPA_RSRC_GROUP_SRC_DMA,
IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
IPA_RSRC_GROUP_DST_DMA,
IPA_RSRC_GROUP_DST_UC,
IPA_RSRC_GROUP_DST_DRB_IP,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v4.9 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
.max_writes = 8,
.max_reads = 0, /* no limit (hardware max) */
.max_reads_beats = 120,
},
};
/* Endpoint configuration data for an SoC having IPA v4.9 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 6,
.endpoint_id = 7,
.toward_ipa = true,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 20,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
.tx = {
.seq_type = IPA_SEQ_DMA,
},
},
},
},
[IPA_ENDPOINT_AP_LAN_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 7,
.endpoint_id = 11,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.aggregation = true,
.status_enable = true,
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 2,
.endpoint_id = 2,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 512,
.tlv_count = 16,
},
.endpoint = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
.seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 12,
.endpoint_id = 20,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
},
},
[IPA_ENDPOINT_MODEM_AP_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 0,
.endpoint_id = 5,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 7,
.endpoint_id = 16,
.toward_ipa = false,
},
[IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 2,
.endpoint_id = 8,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
};
/* Source resource configuration data for an SoC having IPA v4.9 */
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 1, .max = 12,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 1, .max = 1,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 1, .max = 12,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 20, .max = 20,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 2, .max = 2,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 3, .max = 3,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 38, .max = 38,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 4, .max = 4,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 0, .max = 4,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 0, .max = 4,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 0, .max = 4,
},
},
[IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 30, .max = 30,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 8, .max = 8,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
};
/* Destination resource configuration data for an SoC having IPA v4.9 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 9, .max = 9,
},
.limits[IPA_RSRC_GROUP_DST_DMA] = {
.min = 1, .max = 1,
},
.limits[IPA_RSRC_GROUP_DST_UC] = {
.min = 1, .max = 1,
},
.limits[IPA_RSRC_GROUP_DST_DRB_IP] = {
.min = 39, .max = 39,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 2, .max = 3,
},
.limits[IPA_RSRC_GROUP_DST_DMA] = {
.min = 1, .max = 2,
},
.limits[IPA_RSRC_GROUP_DST_UC] = {
.min = 0, .max = 2,
},
},
};
/* Resource configuration data for an SoC having IPA v4.9 */
static const struct ipa_resource_data ipa_resource_data = {
.rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
/* IPA-resident memory region data for an SoC having IPA v4.9 */
static const struct ipa_mem ipa_mem_local_data[] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0b20,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x15f0,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_NAT_TABLE,
.offset = 0x1800,
.size = 0x0d00,
.canary_count = 4,
},
{
.id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x2510,
.size = 0x0030,
.canary_count = 4,
},
{
.id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x2540,
.size = 0x0048,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_TETHERING,
.offset = 0x2588,
.size = 0x0238,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_FILTER_ROUTE,
.offset = 0x27c0,
.size = 0x0800,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_DROP,
.offset = 0x2fc0,
.size = 0x0020,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM,
.offset = 0x2fe8,
.size = 0x0800,
.canary_count = 2,
},
{
.id = IPA_MEM_UC_EVENT_RING,
.offset = 0x3800,
.size = 0x1000,
.canary_count = 1,
},
{
.id = IPA_MEM_PDN_CONFIG,
.offset = 0x4800,
.size = 0x0050,
.canary_count = 0,
},
};
/* Memory configuration data for an SoC having IPA v4.9 */
static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
.imem_size = 0x00002000,
.smem_id = 497,
.smem_size = 0x00009000,
};
/* Interconnect rates are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 600000, /* 600 MBps */
.average_bandwidth = 150000, /* 150 MBps */
},
/* Average rate is unused for the next interconnect */
{
.name = "config",
.peak_bandwidth = 74000, /* 74 MBps */
.average_bandwidth = 0, /* unused */
},
};
/* Clock and interconnect configuration data for an SoC having IPA v4.9 */
static const struct ipa_power_data ipa_power_data = {
.core_clock_rate = 60 * 1000 * 1000, /* Hz */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
/* Configuration data for an SoC having IPA v4.9. */
const struct ipa_data ipa_data_v4_9 = {
.version = IPA_VERSION_4_9,
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
.power_data = &ipa_power_data,
};

View File

@ -0,0 +1,531 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Schneider Electric
*
* Clément Léger <clement.leger@bootlin.com>
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/mdio.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pcs-rzn1-miic.h>
#include <linux/phylink.h>
#include <linux/pm_runtime.h>
#include <dt-bindings/net/pcs-rzn1-miic.h>
#define MIIC_PRCMD 0x0
#define MIIC_ESID_CODE 0x4
#define MIIC_MODCTRL 0x20
#define MIIC_MODCTRL_SW_MODE GENMASK(4, 0)
#define MIIC_CONVCTRL(port) (0x100 + (port) * 4)
#define MIIC_CONVCTRL_CONV_SPEED GENMASK(1, 0)
#define CONV_MODE_10MBPS 0
#define CONV_MODE_100MBPS 1
#define CONV_MODE_1000MBPS 2
#define MIIC_CONVCTRL_CONV_MODE GENMASK(3, 2)
#define CONV_MODE_MII 0
#define CONV_MODE_RMII 1
#define CONV_MODE_RGMII 2
#define MIIC_CONVCTRL_FULLD BIT(8)
#define MIIC_CONVCTRL_RGMII_LINK BIT(12)
#define MIIC_CONVCTRL_RGMII_DUPLEX BIT(13)
#define MIIC_CONVCTRL_RGMII_SPEED GENMASK(15, 14)
#define MIIC_CONVRST 0x114
#define MIIC_CONVRST_PHYIF_RST(port) BIT(port)
#define MIIC_CONVRST_PHYIF_RST_MASK GENMASK(4, 0)
#define MIIC_SWCTRL 0x304
#define MIIC_SWDUPC 0x308
#define MIIC_MAX_NR_PORTS 5
#define MIIC_MODCTRL_CONF_CONV_NUM 6
#define MIIC_MODCTRL_CONF_NONE -1
/**
* struct modctrl_match - Matching table entry for convctrl configuration
* See section 8.2.1 of manual.
* @mode_cfg: Configuration value for convctrl
* @conv: Configuration of ethernet port muxes. First index is SWITCH_PORTIN,
* then index 1 - 5 are CONV1 - CONV5.
*/
struct modctrl_match {
u32 mode_cfg;
u8 conv[MIIC_MODCTRL_CONF_CONV_NUM];
};
static struct modctrl_match modctrl_match_table[] = {
{0x0, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}},
{0x1, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
{0x2, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
{0x3, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}},
{0x8, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}},
{0x9, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
{0xA, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
{0xB, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}},
{0x10, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}},
{0x11, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
{0x12, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
{0x13, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}}
};
static const char * const conf_to_string[] = {
[MIIC_GMAC1_PORT] = "GMAC1_PORT",
[MIIC_GMAC2_PORT] = "GMAC2_PORT",
[MIIC_RTOS_PORT] = "RTOS_PORT",
[MIIC_SERCOS_PORTA] = "SERCOS_PORTA",
[MIIC_SERCOS_PORTB] = "SERCOS_PORTB",
[MIIC_ETHERCAT_PORTA] = "ETHERCAT_PORTA",
[MIIC_ETHERCAT_PORTB] = "ETHERCAT_PORTB",
[MIIC_ETHERCAT_PORTC] = "ETHERCAT_PORTC",
[MIIC_SWITCH_PORTA] = "SWITCH_PORTA",
[MIIC_SWITCH_PORTB] = "SWITCH_PORTB",
[MIIC_SWITCH_PORTC] = "SWITCH_PORTC",
[MIIC_SWITCH_PORTD] = "SWITCH_PORTD",
[MIIC_HSR_PORTA] = "HSR_PORTA",
[MIIC_HSR_PORTB] = "HSR_PORTB",
};
static const char *index_to_string[MIIC_MODCTRL_CONF_CONV_NUM] = {
"SWITCH_PORTIN",
"CONV1",
"CONV2",
"CONV3",
"CONV4",
"CONV5",
};
/**
* struct miic - MII converter structure
* @base: base address of the MII converter
* @dev: Device associated to the MII converter
* @clks: Clocks used for this device
* @nclk: Number of clocks
* @lock: Lock used for read-modify-write access
*/
struct miic {
void __iomem *base;
struct device *dev;
struct clk_bulk_data *clks;
int nclk;
spinlock_t lock;
};
/**
* struct miic_port - Per port MII converter struct
* @miic: backiling to MII converter structure
* @pcs: PCS structure associated to the port
* @port: port number
* @interface: interface mode of the port
*/
struct miic_port {
struct miic *miic;
struct phylink_pcs pcs;
int port;
phy_interface_t interface;
};
static struct miic_port *phylink_pcs_to_miic_port(struct phylink_pcs *pcs)
{
return container_of(pcs, struct miic_port, pcs);
}
static void miic_reg_writel(struct miic *miic, int offset, u32 value)
{
writel(value, miic->base + offset);
}
static u32 miic_reg_readl(struct miic *miic, int offset)
{
return readl(miic->base + offset);
}
static void miic_reg_rmw(struct miic *miic, int offset, u32 mask, u32 val)
{
u32 reg;
spin_lock(&miic->lock);
reg = miic_reg_readl(miic, offset);
reg &= ~mask;
reg |= val;
miic_reg_writel(miic, offset, reg);
spin_unlock(&miic->lock);
}
static void miic_converter_enable(struct miic *miic, int port, int enable)
{
u32 val = 0;
if (enable)
val = MIIC_CONVRST_PHYIF_RST(port);
miic_reg_rmw(miic, MIIC_CONVRST, MIIC_CONVRST_PHYIF_RST(port), val);
}
static int miic_config(struct phylink_pcs *pcs, unsigned int mode,
phy_interface_t interface,
const unsigned long *advertising, bool permit)
{
struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
struct miic *miic = miic_port->miic;
u32 speed, conv_mode, val, mask;
int port = miic_port->port;
switch (interface) {
case PHY_INTERFACE_MODE_RMII:
conv_mode = CONV_MODE_RMII;
speed = CONV_MODE_100MBPS;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
conv_mode = CONV_MODE_RGMII;
speed = CONV_MODE_1000MBPS;
break;
case PHY_INTERFACE_MODE_MII:
conv_mode = CONV_MODE_MII;
/* When in MII mode, speed should be set to 0 (which is actually
* CONV_MODE_10MBPS)
*/
speed = CONV_MODE_10MBPS;
break;
default:
return -EOPNOTSUPP;
}
val = FIELD_PREP(MIIC_CONVCTRL_CONV_MODE, conv_mode);
mask = MIIC_CONVCTRL_CONV_MODE;
/* Update speed only if we are going to change the interface because
* the link might already be up and it would break it if the speed is
* changed.
*/
if (interface != miic_port->interface) {
val |= FIELD_PREP(MIIC_CONVCTRL_CONV_SPEED, speed);
mask |= MIIC_CONVCTRL_CONV_SPEED;
miic_port->interface = interface;
}
miic_reg_rmw(miic, MIIC_CONVCTRL(port), mask, val);
miic_converter_enable(miic_port->miic, miic_port->port, 1);
return 0;
}
static void miic_link_up(struct phylink_pcs *pcs, unsigned int mode,
phy_interface_t interface, int speed, int duplex)
{
struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
struct miic *miic = miic_port->miic;
u32 conv_speed = 0, val = 0;
int port = miic_port->port;
if (duplex == DUPLEX_FULL)
val |= MIIC_CONVCTRL_FULLD;
/* No speed in MII through-mode */
if (interface != PHY_INTERFACE_MODE_MII) {
switch (speed) {
case SPEED_1000:
conv_speed = CONV_MODE_1000MBPS;
break;
case SPEED_100:
conv_speed = CONV_MODE_100MBPS;
break;
case SPEED_10:
conv_speed = CONV_MODE_10MBPS;
break;
default:
return;
}
}
val |= FIELD_PREP(MIIC_CONVCTRL_CONV_SPEED, conv_speed);
miic_reg_rmw(miic, MIIC_CONVCTRL(port),
(MIIC_CONVCTRL_CONV_SPEED | MIIC_CONVCTRL_FULLD), val);
}
static int miic_validate(struct phylink_pcs *pcs, unsigned long *supported,
const struct phylink_link_state *state)
{
if (phy_interface_mode_is_rgmii(state->interface) ||
state->interface == PHY_INTERFACE_MODE_RMII ||
state->interface == PHY_INTERFACE_MODE_MII)
return 1;
return -EINVAL;
}
static const struct phylink_pcs_ops miic_phylink_ops = {
.pcs_validate = miic_validate,
.pcs_config = miic_config,
.pcs_link_up = miic_link_up,
};
struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
{
struct platform_device *pdev;
struct miic_port *miic_port;
struct device_node *pcs_np;
struct miic *miic;
u32 port;
if (!of_device_is_available(np))
return ERR_PTR(-ENODEV);
if (of_property_read_u32(np, "reg", &port))
return ERR_PTR(-EINVAL);
if (port > MIIC_MAX_NR_PORTS || port < 1)
return ERR_PTR(-EINVAL);
/* The PCS pdev is attached to the parent node */
pcs_np = of_get_parent(np);
if (!pcs_np)
return ERR_PTR(-ENODEV);
if (!of_device_is_available(pcs_np)) {
of_node_put(pcs_np);
return ERR_PTR(-ENODEV);
}
pdev = of_find_device_by_node(pcs_np);
of_node_put(pcs_np);
if (!pdev || !platform_get_drvdata(pdev))
return ERR_PTR(-EPROBE_DEFER);
miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL);
if (!miic_port)
return ERR_PTR(-ENOMEM);
miic = platform_get_drvdata(pdev);
device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
miic_port->miic = miic;
miic_port->port = port - 1;
miic_port->pcs.ops = &miic_phylink_ops;
return &miic_port->pcs;
}
EXPORT_SYMBOL(miic_create);
void miic_destroy(struct phylink_pcs *pcs)
{
struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
miic_converter_enable(miic_port->miic, miic_port->port, 0);
kfree(miic_port);
}
EXPORT_SYMBOL(miic_destroy);
static int miic_init_hw(struct miic *miic, u32 cfg_mode)
{
int port;
/* Unlock write access to accessory registers (cf datasheet). If this
* is going to be used in conjunction with the Cortex-M3, this sequence
* will have to be moved in register write
*/
miic_reg_writel(miic, MIIC_PRCMD, 0x00A5);
miic_reg_writel(miic, MIIC_PRCMD, 0x0001);
miic_reg_writel(miic, MIIC_PRCMD, 0xFFFE);
miic_reg_writel(miic, MIIC_PRCMD, 0x0001);
miic_reg_writel(miic, MIIC_MODCTRL,
FIELD_PREP(MIIC_MODCTRL_SW_MODE, cfg_mode));
for (port = 0; port < MIIC_MAX_NR_PORTS; port++) {
miic_converter_enable(miic, port, 0);
/* Disable speed/duplex control from these registers, datasheet
* says switch registers should be used to setup switch port
* speed and duplex.
*/
miic_reg_writel(miic, MIIC_SWCTRL, 0x0);
miic_reg_writel(miic, MIIC_SWDUPC, 0x0);
}
return 0;
}
static bool miic_modctrl_match(s8 table_val[MIIC_MODCTRL_CONF_CONV_NUM],
s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM])
{
int i;
for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) {
if (dt_val[i] == MIIC_MODCTRL_CONF_NONE)
continue;
if (dt_val[i] != table_val[i])
return false;
}
return true;
}
static void miic_dump_conf(struct device *dev,
s8 conf[MIIC_MODCTRL_CONF_CONV_NUM])
{
const char *conf_name;
int i;
for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) {
if (conf[i] != MIIC_MODCTRL_CONF_NONE)
conf_name = conf_to_string[conf[i]];
else
conf_name = "NONE";
dev_err(dev, "%s: %s\n", index_to_string[i], conf_name);
}
}
static int miic_match_dt_conf(struct device *dev,
s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM],
u32 *mode_cfg)
{
struct modctrl_match *table_entry;
int i;
for (i = 0; i < ARRAY_SIZE(modctrl_match_table); i++) {
table_entry = &modctrl_match_table[i];
if (miic_modctrl_match(table_entry->conv, dt_val)) {
*mode_cfg = table_entry->mode_cfg;
return 0;
}
}
dev_err(dev, "Failed to apply requested configuration\n");
miic_dump_conf(dev, dt_val);
return -EINVAL;
}
static int miic_parse_dt(struct device *dev, u32 *mode_cfg)
{
s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM];
struct device_node *np = dev->of_node;
struct device_node *conv;
u32 conf;
int port;
memset(dt_val, MIIC_MODCTRL_CONF_NONE, sizeof(dt_val));
if (of_property_read_u32(np, "renesas,miic-switch-portin", &conf) == 0)
dt_val[0] = conf;
for_each_child_of_node(np, conv) {
if (of_property_read_u32(conv, "reg", &port))
continue;
if (!of_device_is_available(conv))
continue;
if (of_property_read_u32(conv, "renesas,miic-input", &conf) == 0)
dt_val[port] = conf;
}
return miic_match_dt_conf(dev, dt_val, mode_cfg);
}
static int miic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct miic *miic;
u32 mode_cfg;
int ret;
ret = miic_parse_dt(dev, &mode_cfg);
if (ret < 0)
return ret;
miic = devm_kzalloc(dev, sizeof(*miic), GFP_KERNEL);
if (!miic)
return -ENOMEM;
spin_lock_init(&miic->lock);
miic->dev = dev;
miic->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(miic->base))
return PTR_ERR(miic->base);
ret = devm_pm_runtime_enable(dev);
if (ret < 0)
return ret;
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
ret = miic_init_hw(miic, mode_cfg);
if (ret)
goto disable_runtime_pm;
/* miic_create() relies on that fact that data are attached to the
* platform device to determine if the driver is ready so this needs to
* be the last thing to be done after everything is initialized
* properly.
*/
platform_set_drvdata(pdev, miic);
return 0;
disable_runtime_pm:
pm_runtime_put(dev);
return ret;
}
static int miic_remove(struct platform_device *pdev)
{
pm_runtime_put(&pdev->dev);
return 0;
}
static const struct of_device_id miic_of_mtable[] = {
{ .compatible = "renesas,rzn1-miic" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, miic_of_mtable);
static struct platform_driver miic_driver = {
.driver = {
.name = "rzn1_miic",
.suppress_bind_attrs = true,
.of_match_table = miic_of_mtable,
},
.probe = miic_probe,
.remove = miic_remove,
};
module_platform_driver(miic_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Renesas MII converter PCS driver");
MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");

292
drivers/net/phy/adin1100.c Normal file
View File

@ -0,0 +1,292 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Driver for Analog Devices Industrial Ethernet T1L PHYs
*
* Copyright 2020 Analog Devices Inc.
*/
#include <linux/kernel.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/property.h>
#define PHY_ID_ADIN1100 0x0283bc81
#define ADIN_FORCED_MODE 0x8000
#define ADIN_FORCED_MODE_EN BIT(0)
#define ADIN_CRSM_SFT_RST 0x8810
#define ADIN_CRSM_SFT_RST_EN BIT(0)
#define ADIN_CRSM_SFT_PD_CNTRL 0x8812
#define ADIN_CRSM_SFT_PD_CNTRL_EN BIT(0)
#define ADIN_AN_PHY_INST_STATUS 0x8030
#define ADIN_IS_CFG_SLV BIT(2)
#define ADIN_IS_CFG_MST BIT(3)
#define ADIN_CRSM_STAT 0x8818
#define ADIN_CRSM_SFT_PD_RDY BIT(1)
#define ADIN_CRSM_SYS_RDY BIT(0)
#define ADIN_MSE_VAL 0x830B
#define ADIN_SQI_MAX 7
struct adin_mse_sqi_range {
u16 start;
u16 end;
};
static const struct adin_mse_sqi_range adin_mse_sqi_map[] = {
{ 0x0A74, 0xFFFF },
{ 0x084E, 0x0A74 },
{ 0x0698, 0x084E },
{ 0x053D, 0x0698 },
{ 0x0429, 0x053D },
{ 0x034E, 0x0429 },
{ 0x02A0, 0x034E },
{ 0x0000, 0x02A0 },
};
/**
* struct adin_priv - ADIN PHY driver private data
* @tx_level_2v4_able: set if the PHY supports 2.4V TX levels (10BASE-T1L)
* @tx_level_2v4: set if the PHY requests 2.4V TX levels (10BASE-T1L)
* @tx_level_prop_present: set if the TX level is specified in DT
*/
struct adin_priv {
unsigned int tx_level_2v4_able:1;
unsigned int tx_level_2v4:1;
unsigned int tx_level_prop_present:1;
};
static int adin_read_status(struct phy_device *phydev)
{
int ret;
ret = genphy_c45_read_status(phydev);
if (ret)
return ret;
ret = phy_read_mmd(phydev, MDIO_MMD_AN, ADIN_AN_PHY_INST_STATUS);
if (ret < 0)
return ret;
if (ret & ADIN_IS_CFG_SLV)
phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
if (ret & ADIN_IS_CFG_MST)
phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
return 0;
}
static int adin_config_aneg(struct phy_device *phydev)
{
struct adin_priv *priv = phydev->priv;
int ret;
if (phydev->autoneg == AUTONEG_DISABLE) {
ret = genphy_c45_pma_setup_forced(phydev);
if (ret < 0)
return ret;
if (priv->tx_level_prop_present && priv->tx_level_2v4)
ret = phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_B10L_PMA_CTRL,
MDIO_PMA_10T1L_CTRL_2V4_EN);
else
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_B10L_PMA_CTRL,
MDIO_PMA_10T1L_CTRL_2V4_EN);
if (ret < 0)
return ret;
/* Force PHY to use above configurations */
return phy_set_bits_mmd(phydev, MDIO_MMD_AN, ADIN_FORCED_MODE, ADIN_FORCED_MODE_EN);
}
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_AN, ADIN_FORCED_MODE, ADIN_FORCED_MODE_EN);
if (ret < 0)
return ret;
/* Request increased transmit level from LP. */
if (priv->tx_level_prop_present && priv->tx_level_2v4) {
ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_H,
MDIO_AN_T1_ADV_H_10L_TX_HI |
MDIO_AN_T1_ADV_H_10L_TX_HI_REQ);
if (ret < 0)
return ret;
}
/* Disable 2.4 Vpp transmit level. */
if ((priv->tx_level_prop_present && !priv->tx_level_2v4) || !priv->tx_level_2v4_able) {
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_H,
MDIO_AN_T1_ADV_H_10L_TX_HI |
MDIO_AN_T1_ADV_H_10L_TX_HI_REQ);
if (ret < 0)
return ret;
}
return genphy_c45_config_aneg(phydev);
}
static int adin_set_powerdown_mode(struct phy_device *phydev, bool en)
{
int ret;
int val;
val = en ? ADIN_CRSM_SFT_PD_CNTRL_EN : 0;
ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
ADIN_CRSM_SFT_PD_CNTRL, val);
if (ret < 0)
return ret;
return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, ADIN_CRSM_STAT, ret,
(ret & ADIN_CRSM_SFT_PD_RDY) == val,
1000, 30000, true);
}
static int adin_suspend(struct phy_device *phydev)
{
return adin_set_powerdown_mode(phydev, true);
}
static int adin_resume(struct phy_device *phydev)
{
return adin_set_powerdown_mode(phydev, false);
}
static int adin_set_loopback(struct phy_device *phydev, bool enable)
{
if (enable)
return phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL,
BMCR_LOOPBACK);
/* PCS loopback (according to 10BASE-T1L spec) */
return phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL,
BMCR_LOOPBACK);
}
static int adin_soft_reset(struct phy_device *phydev)
{
int ret;
ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, ADIN_CRSM_SFT_RST, ADIN_CRSM_SFT_RST_EN);
if (ret < 0)
return ret;
return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, ADIN_CRSM_STAT, ret,
(ret & ADIN_CRSM_SYS_RDY),
10000, 30000, true);
}
static int adin_get_features(struct phy_device *phydev)
{
struct adin_priv *priv = phydev->priv;
struct device *dev = &phydev->mdio.dev;
int ret;
u8 val;
ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10T1L_STAT);
if (ret < 0)
return ret;
/* This depends on the voltage level from the power source */
priv->tx_level_2v4_able = !!(ret & MDIO_PMA_10T1L_STAT_2V4_ABLE);
phydev_dbg(phydev, "PHY supports 2.4V TX level: %s\n",
priv->tx_level_2v4_able ? "yes" : "no");
priv->tx_level_prop_present = device_property_present(dev, "phy-10base-t1l-2.4vpp");
if (priv->tx_level_prop_present) {
ret = device_property_read_u8(dev, "phy-10base-t1l-2.4vpp", &val);
if (ret < 0)
return ret;
priv->tx_level_2v4 = val;
if (!priv->tx_level_2v4 && priv->tx_level_2v4_able)
phydev_info(phydev,
"PHY supports 2.4V TX level, but disabled via config\n");
}
linkmode_set_bit_array(phy_basic_ports_array, ARRAY_SIZE(phy_basic_ports_array),
phydev->supported);
return genphy_c45_pma_read_abilities(phydev);
}
static int adin_get_sqi(struct phy_device *phydev)
{
u16 mse_val;
int sqi;
int ret;
ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_STAT1);
if (ret < 0)
return ret;
else if (!(ret & MDIO_STAT1_LSTATUS))
return 0;
ret = phy_read_mmd(phydev, MDIO_STAT1, ADIN_MSE_VAL);
if (ret < 0)
return ret;
mse_val = 0xFFFF & ret;
for (sqi = 0; sqi < ARRAY_SIZE(adin_mse_sqi_map); sqi++) {
if (mse_val >= adin_mse_sqi_map[sqi].start && mse_val <= adin_mse_sqi_map[sqi].end)
return sqi;
}
return -EINVAL;
}
static int adin_get_sqi_max(struct phy_device *phydev)
{
return ADIN_SQI_MAX;
}
static int adin_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct adin_priv *priv;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
phydev->priv = priv;
return 0;
}
static struct phy_driver adin_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100),
.name = "ADIN1100",
.get_features = adin_get_features,
.soft_reset = adin_soft_reset,
.probe = adin_probe,
.config_aneg = adin_config_aneg,
.read_status = adin_read_status,
.set_loopback = adin_set_loopback,
.suspend = adin_suspend,
.resume = adin_resume,
.get_sqi = adin_get_sqi,
.get_sqi_max = adin_get_sqi_max,
},
};
module_phy_driver(adin_driver);
static struct mdio_device_id __maybe_unused adin_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100) },
{ }
};
MODULE_DEVICE_TABLE(mdio, adin_tbl);
MODULE_DESCRIPTION("Analog Devices Industrial Ethernet T1L PHY driver");
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -0,0 +1,947 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Meta Platforms Inc.
* Copyright (C) 2022 Jonathan Lemon <jonathan.lemon@gmail.com>
*/
#include <asm/unaligned.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/ptp_classify.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/netdevice.h>
#include <linux/workqueue.h>
#include "bcm-phy-lib.h"
/* IEEE 1588 Expansion registers */
#define SLICE_CTRL 0x0810
#define SLICE_TX_EN BIT(0)
#define SLICE_RX_EN BIT(8)
#define TX_EVENT_MODE 0x0811
#define MODE_TX_UPDATE_CF BIT(0)
#define MODE_TX_REPLACE_TS_CF BIT(1)
#define MODE_TX_REPLACE_TS GENMASK(1, 0)
#define RX_EVENT_MODE 0x0819
#define MODE_RX_UPDATE_CF BIT(0)
#define MODE_RX_INSERT_TS_48 BIT(1)
#define MODE_RX_INSERT_TS_64 GENMASK(1, 0)
#define MODE_EVT_SHIFT_SYNC 0
#define MODE_EVT_SHIFT_DELAY_REQ 2
#define MODE_EVT_SHIFT_PDELAY_REQ 4
#define MODE_EVT_SHIFT_PDELAY_RESP 6
#define MODE_SEL_SHIFT_PORT 0
#define MODE_SEL_SHIFT_CPU 8
#define RX_MODE_SEL(sel, evt, act) \
(((MODE_RX_##act) << (MODE_EVT_SHIFT_##evt)) << (MODE_SEL_SHIFT_##sel))
#define TX_MODE_SEL(sel, evt, act) \
(((MODE_TX_##act) << (MODE_EVT_SHIFT_##evt)) << (MODE_SEL_SHIFT_##sel))
/* needs global TS capture first */
#define TX_TS_CAPTURE 0x0821
#define TX_TS_CAP_EN BIT(0)
#define RX_TS_CAPTURE 0x0822
#define RX_TS_CAP_EN BIT(0)
#define TIME_CODE_0 0x0854
#define TIME_CODE_1 0x0855
#define TIME_CODE_2 0x0856
#define TIME_CODE_3 0x0857
#define TIME_CODE_4 0x0858
#define DPLL_SELECT 0x085b
#define DPLL_HB_MODE2 BIT(6)
#define SHADOW_CTRL 0x085c
#define SHADOW_LOAD 0x085d
#define TIME_CODE_LOAD BIT(10)
#define SYNC_OUT_LOAD BIT(9)
#define NCO_TIME_LOAD BIT(7)
#define FREQ_LOAD BIT(6)
#define INTR_MASK 0x085e
#define INTR_STATUS 0x085f
#define INTC_FSYNC BIT(0)
#define INTC_SOP BIT(1)
#define NCO_FREQ_LSB 0x0873
#define NCO_FREQ_MSB 0x0874
#define NCO_TIME_0 0x0875
#define NCO_TIME_1 0x0876
#define NCO_TIME_2_CTRL 0x0877
#define FREQ_MDIO_SEL BIT(14)
#define SYNC_OUT_0 0x0878
#define SYNC_OUT_1 0x0879
#define SYNC_OUT_2 0x087a
#define SYNC_IN_DIVIDER 0x087b
#define SYNOUT_TS_0 0x087c
#define SYNOUT_TS_1 0x087d
#define SYNOUT_TS_2 0x087e
#define NSE_CTRL 0x087f
#define NSE_GMODE_EN GENMASK(15, 14)
#define NSE_CAPTURE_EN BIT(13)
#define NSE_INIT BIT(12)
#define NSE_CPU_FRAMESYNC BIT(5)
#define NSE_SYNC1_FRAMESYNC BIT(3)
#define NSE_FRAMESYNC_MASK GENMASK(5, 2)
#define NSE_PEROUT_EN BIT(1)
#define NSE_ONESHOT_EN BIT(0)
#define NSE_SYNC_OUT_MASK GENMASK(1, 0)
#define TS_READ_CTRL 0x0885
#define TS_READ_START BIT(0)
#define TS_READ_END BIT(1)
#define HB_REG_0 0x0886
#define HB_REG_1 0x0887
#define HB_REG_2 0x0888
#define HB_REG_3 0x08ec
#define HB_REG_4 0x08ed
#define HB_STAT_CTRL 0x088e
#define HB_READ_START BIT(10)
#define HB_READ_END BIT(11)
#define HB_READ_MASK GENMASK(11, 10)
#define TS_REG_0 0x0889
#define TS_REG_1 0x088a
#define TS_REG_2 0x088b
#define TS_REG_3 0x08c4
#define TS_INFO_0 0x088c
#define TS_INFO_1 0x088d
#define TIMECODE_CTRL 0x08c3
#define TX_TIMECODE_SEL GENMASK(7, 0)
#define RX_TIMECODE_SEL GENMASK(15, 8)
#define TIME_SYNC 0x0ff5
#define TIME_SYNC_EN BIT(0)
struct bcm_ptp_private {
struct phy_device *phydev;
struct mii_timestamper mii_ts;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_info;
struct ptp_pin_desc pin;
struct mutex mutex;
struct sk_buff_head tx_queue;
int tx_type;
bool hwts_rx;
u16 nse_ctrl;
bool pin_active;
struct delayed_work pin_work;
};
struct bcm_ptp_skb_cb {
unsigned long timeout;
u16 seq_id;
u8 msgtype;
bool discard;
};
struct bcm_ptp_capture {
ktime_t hwtstamp;
u16 seq_id;
u8 msgtype;
bool tx_dir;
};
#define BCM_SKB_CB(skb) ((struct bcm_ptp_skb_cb *)(skb)->cb)
#define SKB_TS_TIMEOUT 10 /* jiffies */
#define BCM_MAX_PULSE_8NS ((1U << 9) - 1)
#define BCM_MAX_PERIOD_8NS ((1U << 30) - 1)
#define BRCM_PHY_MODEL(phydev) \
((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
static struct bcm_ptp_private *mii2priv(struct mii_timestamper *mii_ts)
{
return container_of(mii_ts, struct bcm_ptp_private, mii_ts);
}
static struct bcm_ptp_private *ptp2priv(struct ptp_clock_info *info)
{
return container_of(info, struct bcm_ptp_private, ptp_info);
}
static void bcm_ptp_get_framesync_ts(struct phy_device *phydev,
struct timespec64 *ts)
{
u16 hb[4];
bcm_phy_write_exp(phydev, HB_STAT_CTRL, HB_READ_START);
hb[0] = bcm_phy_read_exp(phydev, HB_REG_0);
hb[1] = bcm_phy_read_exp(phydev, HB_REG_1);
hb[2] = bcm_phy_read_exp(phydev, HB_REG_2);
hb[3] = bcm_phy_read_exp(phydev, HB_REG_3);
bcm_phy_write_exp(phydev, HB_STAT_CTRL, HB_READ_END);
bcm_phy_write_exp(phydev, HB_STAT_CTRL, 0);
ts->tv_sec = (hb[3] << 16) | hb[2];
ts->tv_nsec = (hb[1] << 16) | hb[0];
}
static u16 bcm_ptp_framesync_disable(struct phy_device *phydev, u16 orig_ctrl)
{
u16 ctrl = orig_ctrl & ~(NSE_FRAMESYNC_MASK | NSE_CAPTURE_EN);
bcm_phy_write_exp(phydev, NSE_CTRL, ctrl);
return ctrl;
}
static void bcm_ptp_framesync_restore(struct phy_device *phydev, u16 orig_ctrl)
{
if (orig_ctrl & NSE_FRAMESYNC_MASK)
bcm_phy_write_exp(phydev, NSE_CTRL, orig_ctrl);
}
static void bcm_ptp_framesync(struct phy_device *phydev, u16 ctrl)
{
/* trigger framesync - must have 0->1 transition. */
bcm_phy_write_exp(phydev, NSE_CTRL, ctrl | NSE_CPU_FRAMESYNC);
}
static int bcm_ptp_framesync_ts(struct phy_device *phydev,
struct ptp_system_timestamp *sts,
struct timespec64 *ts,
u16 orig_ctrl)
{
u16 ctrl, reg;
int i;
ctrl = bcm_ptp_framesync_disable(phydev, orig_ctrl);
ptp_read_system_prets(sts);
/* trigger framesync + capture */
bcm_ptp_framesync(phydev, ctrl | NSE_CAPTURE_EN);
ptp_read_system_postts(sts);
/* poll for FSYNC interrupt from TS capture */
for (i = 0; i < 10; i++) {
reg = bcm_phy_read_exp(phydev, INTR_STATUS);
if (reg & INTC_FSYNC) {
bcm_ptp_get_framesync_ts(phydev, ts);
break;
}
}
bcm_ptp_framesync_restore(phydev, orig_ctrl);
return reg & INTC_FSYNC ? 0 : -ETIMEDOUT;
}
static int bcm_ptp_gettimex(struct ptp_clock_info *info,
struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct bcm_ptp_private *priv = ptp2priv(info);
int err;
mutex_lock(&priv->mutex);
err = bcm_ptp_framesync_ts(priv->phydev, sts, ts, priv->nse_ctrl);
mutex_unlock(&priv->mutex);
return err;
}
static int bcm_ptp_settime_locked(struct bcm_ptp_private *priv,
const struct timespec64 *ts)
{
struct phy_device *phydev = priv->phydev;
u16 ctrl;
u64 ns;
ctrl = bcm_ptp_framesync_disable(phydev, priv->nse_ctrl);
/* set up time code */
bcm_phy_write_exp(phydev, TIME_CODE_0, ts->tv_nsec);
bcm_phy_write_exp(phydev, TIME_CODE_1, ts->tv_nsec >> 16);
bcm_phy_write_exp(phydev, TIME_CODE_2, ts->tv_sec);
bcm_phy_write_exp(phydev, TIME_CODE_3, ts->tv_sec >> 16);
bcm_phy_write_exp(phydev, TIME_CODE_4, ts->tv_sec >> 32);
/* set NCO counter to match */
ns = timespec64_to_ns(ts);
bcm_phy_write_exp(phydev, NCO_TIME_0, ns >> 4);
bcm_phy_write_exp(phydev, NCO_TIME_1, ns >> 20);
bcm_phy_write_exp(phydev, NCO_TIME_2_CTRL, (ns >> 36) & 0xfff);
/* set up load on next frame sync (auto-clears due to NSE_INIT) */
bcm_phy_write_exp(phydev, SHADOW_LOAD, TIME_CODE_LOAD | NCO_TIME_LOAD);
/* must have NSE_INIT in order to write time code */
bcm_ptp_framesync(phydev, ctrl | NSE_INIT);
bcm_ptp_framesync_restore(phydev, priv->nse_ctrl);
return 0;
}
static int bcm_ptp_settime(struct ptp_clock_info *info,
const struct timespec64 *ts)
{
struct bcm_ptp_private *priv = ptp2priv(info);
int err;
mutex_lock(&priv->mutex);
err = bcm_ptp_settime_locked(priv, ts);
mutex_unlock(&priv->mutex);
return err;
}
static int bcm_ptp_adjtime_locked(struct bcm_ptp_private *priv,
s64 delta_ns)
{
struct timespec64 ts;
int err;
s64 ns;
err = bcm_ptp_framesync_ts(priv->phydev, NULL, &ts, priv->nse_ctrl);
if (!err) {
ns = timespec64_to_ns(&ts) + delta_ns;
ts = ns_to_timespec64(ns);
err = bcm_ptp_settime_locked(priv, &ts);
}
return err;
}
static int bcm_ptp_adjtime(struct ptp_clock_info *info, s64 delta_ns)
{
struct bcm_ptp_private *priv = ptp2priv(info);
int err;
mutex_lock(&priv->mutex);
err = bcm_ptp_adjtime_locked(priv, delta_ns);
mutex_unlock(&priv->mutex);
return err;
}
/* A 125Mhz clock should adjust 8ns per pulse.
* The frequency adjustment base is 0x8000 0000, or 8*2^28.
*
* Frequency adjustment is
* adj = scaled_ppm * 8*2^28 / (10^6 * 2^16)
* which simplifies to:
* adj = scaled_ppm * 2^9 / 5^6
*/
static int bcm_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct bcm_ptp_private *priv = ptp2priv(info);
int neg_adj = 0;
u32 diff, freq;
u16 ctrl;
u64 adj;
if (scaled_ppm < 0) {
neg_adj = 1;
scaled_ppm = -scaled_ppm;
}
adj = scaled_ppm << 9;
diff = div_u64(adj, 15625);
freq = (8 << 28) + (neg_adj ? -diff : diff);
mutex_lock(&priv->mutex);
ctrl = bcm_ptp_framesync_disable(priv->phydev, priv->nse_ctrl);
bcm_phy_write_exp(priv->phydev, NCO_FREQ_LSB, freq);
bcm_phy_write_exp(priv->phydev, NCO_FREQ_MSB, freq >> 16);
bcm_phy_write_exp(priv->phydev, NCO_TIME_2_CTRL, FREQ_MDIO_SEL);
/* load on next framesync */
bcm_phy_write_exp(priv->phydev, SHADOW_LOAD, FREQ_LOAD);
bcm_ptp_framesync(priv->phydev, ctrl);
/* clear load */
bcm_phy_write_exp(priv->phydev, SHADOW_LOAD, 0);
bcm_ptp_framesync_restore(priv->phydev, priv->nse_ctrl);
mutex_unlock(&priv->mutex);
return 0;
}
static bool bcm_ptp_rxtstamp(struct mii_timestamper *mii_ts,
struct sk_buff *skb, int type)
{
struct bcm_ptp_private *priv = mii2priv(mii_ts);
struct skb_shared_hwtstamps *hwts;
struct ptp_header *header;
u32 sec, nsec;
u8 *data;
int off;
if (!priv->hwts_rx)
return false;
header = ptp_parse_header(skb, type);
if (!header)
return false;
data = (u8 *)(header + 1);
sec = get_unaligned_be32(data);
nsec = get_unaligned_be32(data + 4);
hwts = skb_hwtstamps(skb);
hwts->hwtstamp = ktime_set(sec, nsec);
off = data - skb->data + 8;
if (off < skb->len) {
memmove(data, data + 8, skb->len - off);
__pskb_trim(skb, skb->len - 8);
}
return false;
}
static bool bcm_ptp_get_tstamp(struct bcm_ptp_private *priv,
struct bcm_ptp_capture *capts)
{
struct phy_device *phydev = priv->phydev;
u16 ts[4], reg;
u32 sec, nsec;
mutex_lock(&priv->mutex);
reg = bcm_phy_read_exp(phydev, INTR_STATUS);
if ((reg & INTC_SOP) == 0) {
mutex_unlock(&priv->mutex);
return false;
}
bcm_phy_write_exp(phydev, TS_READ_CTRL, TS_READ_START);
ts[0] = bcm_phy_read_exp(phydev, TS_REG_0);
ts[1] = bcm_phy_read_exp(phydev, TS_REG_1);
ts[2] = bcm_phy_read_exp(phydev, TS_REG_2);
ts[3] = bcm_phy_read_exp(phydev, TS_REG_3);
/* not in be32 format for some reason */
capts->seq_id = bcm_phy_read_exp(priv->phydev, TS_INFO_0);
reg = bcm_phy_read_exp(phydev, TS_INFO_1);
capts->msgtype = reg >> 12;
capts->tx_dir = !!(reg & BIT(11));
bcm_phy_write_exp(phydev, TS_READ_CTRL, TS_READ_END);
bcm_phy_write_exp(phydev, TS_READ_CTRL, 0);
mutex_unlock(&priv->mutex);
sec = (ts[3] << 16) | ts[2];
nsec = (ts[1] << 16) | ts[0];
capts->hwtstamp = ktime_set(sec, nsec);
return true;
}
static void bcm_ptp_match_tstamp(struct bcm_ptp_private *priv,
struct bcm_ptp_capture *capts)
{
struct skb_shared_hwtstamps hwts;
struct sk_buff *skb, *ts_skb;
unsigned long flags;
bool first = false;
ts_skb = NULL;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
skb_queue_walk(&priv->tx_queue, skb) {
if (BCM_SKB_CB(skb)->seq_id == capts->seq_id &&
BCM_SKB_CB(skb)->msgtype == capts->msgtype) {
first = skb_queue_is_first(&priv->tx_queue, skb);
__skb_unlink(skb, &priv->tx_queue);
ts_skb = skb;
break;
}
}
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
/* TX captures one-step packets, discard them if needed. */
if (ts_skb) {
if (BCM_SKB_CB(ts_skb)->discard) {
kfree_skb(ts_skb);
} else {
memset(&hwts, 0, sizeof(hwts));
hwts.hwtstamp = capts->hwtstamp;
skb_complete_tx_timestamp(ts_skb, &hwts);
}
}
/* not first match, try and expire entries */
if (!first) {
while ((skb = skb_dequeue(&priv->tx_queue))) {
if (!time_after(jiffies, BCM_SKB_CB(skb)->timeout)) {
skb_queue_head(&priv->tx_queue, skb);
break;
}
kfree_skb(skb);
}
}
}
static long bcm_ptp_do_aux_work(struct ptp_clock_info *info)
{
struct bcm_ptp_private *priv = ptp2priv(info);
struct bcm_ptp_capture capts;
bool reschedule = false;
while (!skb_queue_empty_lockless(&priv->tx_queue)) {
if (!bcm_ptp_get_tstamp(priv, &capts)) {
reschedule = true;
break;
}
bcm_ptp_match_tstamp(priv, &capts);
}
return reschedule ? 1 : -1;
}
static int bcm_ptp_cancel_func(struct bcm_ptp_private *priv)
{
if (!priv->pin_active)
return 0;
priv->pin_active = false;
priv->nse_ctrl &= ~(NSE_SYNC_OUT_MASK | NSE_SYNC1_FRAMESYNC |
NSE_CAPTURE_EN);
bcm_phy_write_exp(priv->phydev, NSE_CTRL, priv->nse_ctrl);
cancel_delayed_work_sync(&priv->pin_work);
return 0;
}
static void bcm_ptp_perout_work(struct work_struct *pin_work)
{
struct bcm_ptp_private *priv =
container_of(pin_work, struct bcm_ptp_private, pin_work.work);
struct phy_device *phydev = priv->phydev;
struct timespec64 ts;
u64 ns, next;
u16 ctrl;
mutex_lock(&priv->mutex);
/* no longer running */
if (!priv->pin_active) {
mutex_unlock(&priv->mutex);
return;
}
bcm_ptp_framesync_ts(phydev, NULL, &ts, priv->nse_ctrl);
/* this is 1PPS only */
next = NSEC_PER_SEC - ts.tv_nsec;
ts.tv_sec += next < NSEC_PER_MSEC ? 2 : 1;
ts.tv_nsec = 0;
ns = timespec64_to_ns(&ts);
/* force 0->1 transition for ONESHOT */
ctrl = bcm_ptp_framesync_disable(phydev,
priv->nse_ctrl & ~NSE_ONESHOT_EN);
bcm_phy_write_exp(phydev, SYNOUT_TS_0, ns & 0xfff0);
bcm_phy_write_exp(phydev, SYNOUT_TS_1, ns >> 16);
bcm_phy_write_exp(phydev, SYNOUT_TS_2, ns >> 32);
/* load values on next framesync */
bcm_phy_write_exp(phydev, SHADOW_LOAD, SYNC_OUT_LOAD);
bcm_ptp_framesync(phydev, ctrl | NSE_ONESHOT_EN | NSE_INIT);
priv->nse_ctrl |= NSE_ONESHOT_EN;
bcm_ptp_framesync_restore(phydev, priv->nse_ctrl);
mutex_unlock(&priv->mutex);
next = next + NSEC_PER_MSEC;
schedule_delayed_work(&priv->pin_work, nsecs_to_jiffies(next));
}
static int bcm_ptp_perout_locked(struct bcm_ptp_private *priv,
struct ptp_perout_request *req, int on)
{
struct phy_device *phydev = priv->phydev;
u64 period, pulse;
u16 val;
if (!on)
return bcm_ptp_cancel_func(priv);
/* 1PPS */
if (req->period.sec != 1 || req->period.nsec != 0)
return -EINVAL;
period = BCM_MAX_PERIOD_8NS; /* write nonzero value */
if (req->flags & PTP_PEROUT_PHASE)
return -EOPNOTSUPP;
if (req->flags & PTP_PEROUT_DUTY_CYCLE)
pulse = ktime_to_ns(ktime_set(req->on.sec, req->on.nsec));
else
pulse = (u64)BCM_MAX_PULSE_8NS << 3;
/* convert to 8ns units */
pulse >>= 3;
if (!pulse || pulse > period || pulse > BCM_MAX_PULSE_8NS)
return -EINVAL;
bcm_phy_write_exp(phydev, SYNC_OUT_0, period);
val = ((pulse & 0x3) << 14) | ((period >> 16) & 0x3fff);
bcm_phy_write_exp(phydev, SYNC_OUT_1, val);
val = ((pulse >> 2) & 0x7f) | (pulse << 7);
bcm_phy_write_exp(phydev, SYNC_OUT_2, val);
if (priv->pin_active)
cancel_delayed_work_sync(&priv->pin_work);
priv->pin_active = true;
INIT_DELAYED_WORK(&priv->pin_work, bcm_ptp_perout_work);
schedule_delayed_work(&priv->pin_work, 0);
return 0;
}
static void bcm_ptp_extts_work(struct work_struct *pin_work)
{
struct bcm_ptp_private *priv =
container_of(pin_work, struct bcm_ptp_private, pin_work.work);
struct phy_device *phydev = priv->phydev;
struct ptp_clock_event event;
struct timespec64 ts;
u16 reg;
mutex_lock(&priv->mutex);
/* no longer running */
if (!priv->pin_active) {
mutex_unlock(&priv->mutex);
return;
}
reg = bcm_phy_read_exp(phydev, INTR_STATUS);
if ((reg & INTC_FSYNC) == 0)
goto out;
bcm_ptp_get_framesync_ts(phydev, &ts);
event.index = 0;
event.type = PTP_CLOCK_EXTTS;
event.timestamp = timespec64_to_ns(&ts);
ptp_clock_event(priv->ptp_clock, &event);
out:
mutex_unlock(&priv->mutex);
schedule_delayed_work(&priv->pin_work, HZ / 4);
}
static int bcm_ptp_extts_locked(struct bcm_ptp_private *priv, int on)
{
struct phy_device *phydev = priv->phydev;
if (!on)
return bcm_ptp_cancel_func(priv);
if (priv->pin_active)
cancel_delayed_work_sync(&priv->pin_work);
bcm_ptp_framesync_disable(phydev, priv->nse_ctrl);
priv->nse_ctrl |= NSE_SYNC1_FRAMESYNC | NSE_CAPTURE_EN;
bcm_ptp_framesync_restore(phydev, priv->nse_ctrl);
priv->pin_active = true;
INIT_DELAYED_WORK(&priv->pin_work, bcm_ptp_extts_work);
schedule_delayed_work(&priv->pin_work, 0);
return 0;
}
static int bcm_ptp_enable(struct ptp_clock_info *info,
struct ptp_clock_request *rq, int on)
{
struct bcm_ptp_private *priv = ptp2priv(info);
int err = -EBUSY;
mutex_lock(&priv->mutex);
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
if (priv->pin.func == PTP_PF_PEROUT)
err = bcm_ptp_perout_locked(priv, &rq->perout, on);
break;
case PTP_CLK_REQ_EXTTS:
if (priv->pin.func == PTP_PF_EXTTS)
err = bcm_ptp_extts_locked(priv, on);
break;
default:
err = -EOPNOTSUPP;
break;
}
mutex_unlock(&priv->mutex);
return err;
}
static int bcm_ptp_verify(struct ptp_clock_info *info, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
switch (func) {
case PTP_PF_NONE:
case PTP_PF_EXTTS:
case PTP_PF_PEROUT:
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static const struct ptp_clock_info bcm_ptp_clock_info = {
.owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.max_adj = 100000000,
.gettimex64 = bcm_ptp_gettimex,
.settime64 = bcm_ptp_settime,
.adjtime = bcm_ptp_adjtime,
.adjfine = bcm_ptp_adjfine,
.enable = bcm_ptp_enable,
.verify = bcm_ptp_verify,
.do_aux_work = bcm_ptp_do_aux_work,
.n_pins = 1,
.n_per_out = 1,
.n_ext_ts = 1,
};
static void bcm_ptp_txtstamp(struct mii_timestamper *mii_ts,
struct sk_buff *skb, int type)
{
struct bcm_ptp_private *priv = mii2priv(mii_ts);
struct ptp_header *hdr;
bool discard = false;
int msgtype;
hdr = ptp_parse_header(skb, type);
if (!hdr)
goto out;
msgtype = ptp_get_msgtype(hdr, type);
switch (priv->tx_type) {
case HWTSTAMP_TX_ONESTEP_P2P:
if (msgtype == PTP_MSGTYPE_PDELAY_RESP)
discard = true;
fallthrough;
case HWTSTAMP_TX_ONESTEP_SYNC:
if (msgtype == PTP_MSGTYPE_SYNC)
discard = true;
fallthrough;
case HWTSTAMP_TX_ON:
BCM_SKB_CB(skb)->timeout = jiffies + SKB_TS_TIMEOUT;
BCM_SKB_CB(skb)->seq_id = be16_to_cpu(hdr->sequence_id);
BCM_SKB_CB(skb)->msgtype = msgtype;
BCM_SKB_CB(skb)->discard = discard;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_queue_tail(&priv->tx_queue, skb);
ptp_schedule_worker(priv->ptp_clock, 0);
return;
default:
break;
}
out:
kfree_skb(skb);
}
static int bcm_ptp_hwtstamp(struct mii_timestamper *mii_ts,
struct ifreq *ifr)
{
struct bcm_ptp_private *priv = mii2priv(mii_ts);
struct hwtstamp_config cfg;
u16 mode, ctrl;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
priv->hwts_rx = false;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
priv->hwts_rx = true;
break;
default:
return -ERANGE;
}
priv->tx_type = cfg.tx_type;
ctrl = priv->hwts_rx ? SLICE_RX_EN : 0;
ctrl |= priv->tx_type != HWTSTAMP_TX_OFF ? SLICE_TX_EN : 0;
mode = TX_MODE_SEL(PORT, SYNC, REPLACE_TS) |
TX_MODE_SEL(PORT, DELAY_REQ, REPLACE_TS) |
TX_MODE_SEL(PORT, PDELAY_REQ, REPLACE_TS) |
TX_MODE_SEL(PORT, PDELAY_RESP, REPLACE_TS);
bcm_phy_write_exp(priv->phydev, TX_EVENT_MODE, mode);
mode = RX_MODE_SEL(PORT, SYNC, INSERT_TS_64) |
RX_MODE_SEL(PORT, DELAY_REQ, INSERT_TS_64) |
RX_MODE_SEL(PORT, PDELAY_REQ, INSERT_TS_64) |
RX_MODE_SEL(PORT, PDELAY_RESP, INSERT_TS_64);
bcm_phy_write_exp(priv->phydev, RX_EVENT_MODE, mode);
bcm_phy_write_exp(priv->phydev, SLICE_CTRL, ctrl);
if (ctrl & SLICE_TX_EN)
bcm_phy_write_exp(priv->phydev, TX_TS_CAPTURE, TX_TS_CAP_EN);
else
ptp_cancel_worker_sync(priv->ptp_clock);
/* purge existing data */
skb_queue_purge(&priv->tx_queue);
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
static int bcm_ptp_ts_info(struct mii_timestamper *mii_ts,
struct ethtool_ts_info *ts_info)
{
struct bcm_ptp_private *priv = mii2priv(mii_ts);
ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
ts_info->so_timestamping =
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
ts_info->tx_types =
BIT(HWTSTAMP_TX_ON) |
BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ONESTEP_SYNC) |
BIT(HWTSTAMP_TX_ONESTEP_P2P);
ts_info->rx_filters =
BIT(HWTSTAMP_FILTER_NONE) |
BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
void bcm_ptp_stop(struct bcm_ptp_private *priv)
{
ptp_cancel_worker_sync(priv->ptp_clock);
bcm_ptp_cancel_func(priv);
}
EXPORT_SYMBOL_GPL(bcm_ptp_stop);
void bcm_ptp_config_init(struct phy_device *phydev)
{
/* init network sync engine */
bcm_phy_write_exp(phydev, NSE_CTRL, NSE_GMODE_EN | NSE_INIT);
/* enable time sync (TX/RX SOP capture) */
bcm_phy_write_exp(phydev, TIME_SYNC, TIME_SYNC_EN);
/* use sec.nsec heartbeat capture */
bcm_phy_write_exp(phydev, DPLL_SELECT, DPLL_HB_MODE2);
/* use 64 bit timecode for TX */
bcm_phy_write_exp(phydev, TIMECODE_CTRL, TX_TIMECODE_SEL);
/* always allow FREQ_LOAD on framesync */
bcm_phy_write_exp(phydev, SHADOW_CTRL, FREQ_LOAD);
bcm_phy_write_exp(phydev, SYNC_IN_DIVIDER, 1);
}
EXPORT_SYMBOL_GPL(bcm_ptp_config_init);
static void bcm_ptp_init(struct bcm_ptp_private *priv)
{
priv->nse_ctrl = NSE_GMODE_EN;
mutex_init(&priv->mutex);
skb_queue_head_init(&priv->tx_queue);
priv->mii_ts.rxtstamp = bcm_ptp_rxtstamp;
priv->mii_ts.txtstamp = bcm_ptp_txtstamp;
priv->mii_ts.hwtstamp = bcm_ptp_hwtstamp;
priv->mii_ts.ts_info = bcm_ptp_ts_info;
priv->phydev->mii_ts = &priv->mii_ts;
}
struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev)
{
struct bcm_ptp_private *priv;
struct ptp_clock *clock;
switch (BRCM_PHY_MODEL(phydev)) {
case PHY_ID_BCM54210E:
#ifdef PHY_ID_BCM54213PE
case PHY_ID_BCM54213PE:
#endif
break;
default:
return NULL;
}
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return ERR_PTR(-ENOMEM);
priv->ptp_info = bcm_ptp_clock_info;
snprintf(priv->pin.name, sizeof(priv->pin.name), "SYNC_OUT");
priv->ptp_info.pin_config = &priv->pin;
clock = ptp_clock_register(&priv->ptp_info, &phydev->mdio.dev);
if (IS_ERR(clock))
return ERR_CAST(clock);
priv->ptp_clock = clock;
priv->phydev = phydev;
bcm_ptp_init(priv);
return priv;
}
EXPORT_SYMBOL_GPL(bcm_ptp_probe);
MODULE_LICENSE("GPL");

258
drivers/net/phy/dp83td510.c Normal file
View File

@ -0,0 +1,258 @@
// SPDX-License-Identifier: GPL-2.0
/* Driver for the Texas Instruments DP83TD510 PHY
* Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
*/
#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
#define DP83TD510E_PHY_ID 0x20000181
/* MDIO_MMD_VEND2 registers */
#define DP83TD510E_PHY_STS 0x10
#define DP83TD510E_STS_MII_INT BIT(7)
#define DP83TD510E_LINK_STATUS BIT(0)
#define DP83TD510E_GEN_CFG 0x11
#define DP83TD510E_GENCFG_INT_POLARITY BIT(3)
#define DP83TD510E_GENCFG_INT_EN BIT(1)
#define DP83TD510E_GENCFG_INT_OE BIT(0)
#define DP83TD510E_INTERRUPT_REG_1 0x12
#define DP83TD510E_INT1_LINK BIT(13)
#define DP83TD510E_INT1_LINK_EN BIT(5)
#define DP83TD510E_AN_STAT_1 0x60c
#define DP83TD510E_MASTER_SLAVE_RESOL_FAIL BIT(15)
#define DP83TD510E_MSE_DETECT 0xa85
#define DP83TD510_SQI_MAX 7
/* Register values are converted to SNR(dB) as suggested by
* "Application Report - DP83TD510E Cable Diagnostics Toolkit":
* SNR(dB) = -10 * log10 (VAL/2^17) - 1.76 dB.
* SQI ranges are implemented according to "OPEN ALLIANCE - Advanced diagnostic
* features for 100BASE-T1 automotive Ethernet PHYs"
*/
static const u16 dp83td510_mse_sqi_map[] = {
0x0569, /* < 18dB */
0x044c, /* 18dB =< SNR < 19dB */
0x0369, /* 19dB =< SNR < 20dB */
0x02b6, /* 20dB =< SNR < 21dB */
0x0227, /* 21dB =< SNR < 22dB */
0x01b6, /* 22dB =< SNR < 23dB */
0x015b, /* 23dB =< SNR < 24dB */
0x0000 /* 24dB =< SNR */
};
static int dp83td510_config_intr(struct phy_device *phydev)
{
int ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
/* Clear any pending interrupts */
ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS,
0x0);
if (ret)
return ret;
ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
DP83TD510E_INTERRUPT_REG_1,
DP83TD510E_INT1_LINK_EN);
if (ret)
return ret;
ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
DP83TD510E_GEN_CFG,
DP83TD510E_GENCFG_INT_POLARITY |
DP83TD510E_GENCFG_INT_EN |
DP83TD510E_GENCFG_INT_OE);
} else {
ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
DP83TD510E_INTERRUPT_REG_1, 0x0);
if (ret)
return ret;
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
DP83TD510E_GEN_CFG,
DP83TD510E_GENCFG_INT_EN);
if (ret)
return ret;
/* Clear any pending interrupts */
ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS,
0x0);
}
return ret;
}
static irqreturn_t dp83td510_handle_interrupt(struct phy_device *phydev)
{
int ret;
ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS);
if (ret < 0) {
phy_error(phydev);
return IRQ_NONE;
} else if (!(ret & DP83TD510E_STS_MII_INT)) {
return IRQ_NONE;
}
/* Read the current enabled interrupts */
ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_INTERRUPT_REG_1);
if (ret < 0) {
phy_error(phydev);
return IRQ_NONE;
} else if (!(ret & DP83TD510E_INT1_LINK_EN) ||
!(ret & DP83TD510E_INT1_LINK)) {
return IRQ_NONE;
}
phy_trigger_machine(phydev);
return IRQ_HANDLED;
}
static int dp83td510_read_status(struct phy_device *phydev)
{
u16 phy_sts;
int ret;
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
linkmode_zero(phydev->lp_advertising);
phy_sts = phy_read(phydev, DP83TD510E_PHY_STS);
phydev->link = !!(phy_sts & DP83TD510E_LINK_STATUS);
if (phydev->link) {
/* This PHY supports only one link mode: 10BaseT1L_Full */
phydev->duplex = DUPLEX_FULL;
phydev->speed = SPEED_10;
if (phydev->autoneg == AUTONEG_ENABLE) {
ret = genphy_c45_read_lpa(phydev);
if (ret)
return ret;
phy_resolve_aneg_linkmode(phydev);
}
}
if (phydev->autoneg == AUTONEG_ENABLE) {
ret = genphy_c45_baset1_read_status(phydev);
if (ret < 0)
return ret;
ret = phy_read_mmd(phydev, MDIO_MMD_VEND2,
DP83TD510E_AN_STAT_1);
if (ret < 0)
return ret;
if (ret & DP83TD510E_MASTER_SLAVE_RESOL_FAIL)
phydev->master_slave_state = MASTER_SLAVE_STATE_ERR;
} else {
return genphy_c45_pma_baset1_read_master_slave(phydev);
}
return 0;
}
static int dp83td510_config_aneg(struct phy_device *phydev)
{
bool changed = false;
int ret;
ret = genphy_c45_pma_baset1_setup_master_slave(phydev);
if (ret < 0)
return ret;
if (phydev->autoneg == AUTONEG_DISABLE)
return genphy_c45_an_disable_aneg(phydev);
ret = genphy_c45_an_config_aneg(phydev);
if (ret < 0)
return ret;
if (ret > 0)
changed = true;
return genphy_c45_check_and_restart_aneg(phydev, changed);
}
static int dp83td510_get_sqi(struct phy_device *phydev)
{
int sqi, ret;
u16 mse_val;
if (!phydev->link)
return 0;
ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_MSE_DETECT);
if (ret < 0)
return ret;
mse_val = 0xFFFF & ret;
for (sqi = 0; sqi < ARRAY_SIZE(dp83td510_mse_sqi_map); sqi++) {
if (mse_val >= dp83td510_mse_sqi_map[sqi])
return sqi;
}
return -EINVAL;
}
static int dp83td510_get_sqi_max(struct phy_device *phydev)
{
return DP83TD510_SQI_MAX;
}
static int dp83td510_get_features(struct phy_device *phydev)
{
/* This PHY can't respond on MDIO bus if no RMII clock is enabled.
* In case RMII mode is used (most meaningful mode for this PHY) and
* the PHY do not have own XTAL, and CLK providing MAC is not probed,
* we won't be able to read all needed ability registers.
* So provide it manually.
*/
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported);
return 0;
}
static struct phy_driver dp83td510_driver[] = {
{
PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID),
.name = "TI DP83TD510E",
.config_aneg = dp83td510_config_aneg,
.read_status = dp83td510_read_status,
.get_features = dp83td510_get_features,
.config_intr = dp83td510_config_intr,
.handle_interrupt = dp83td510_handle_interrupt,
.get_sqi = dp83td510_get_sqi,
.get_sqi_max = dp83td510_get_sqi_max,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
module_phy_driver(dp83td510_driver);
static struct mdio_device_id __maybe_unused dp83td510_tbl[] = {
{ PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID) },
{ }
};
MODULE_DEVICE_TABLE(mdio, dp83td510_tbl);
MODULE_DESCRIPTION("Texas Instruments DP83TD510E PHY driver");
MODULE_AUTHOR("Oleksij Rempel <kernel@pengutronix.de>");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,733 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "pcic.h"
#include "debug.h"
static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
"bhi",
"mhi-er0",
"mhi-er1",
"ce0",
"ce1",
"ce2",
"ce3",
"ce4",
"ce5",
"ce6",
"ce7",
"ce8",
"ce9",
"ce10",
"ce11",
"host2wbm-desc-feed",
"host2reo-re-injection",
"host2reo-command",
"host2rxdma-monitor-ring3",
"host2rxdma-monitor-ring2",
"host2rxdma-monitor-ring1",
"reo2ost-exception",
"wbm2host-rx-release",
"reo2host-status",
"reo2host-destination-ring4",
"reo2host-destination-ring3",
"reo2host-destination-ring2",
"reo2host-destination-ring1",
"rxdma2host-monitor-destination-mac3",
"rxdma2host-monitor-destination-mac2",
"rxdma2host-monitor-destination-mac1",
"ppdu-end-interrupts-mac3",
"ppdu-end-interrupts-mac2",
"ppdu-end-interrupts-mac1",
"rxdma2host-monitor-status-ring-mac3",
"rxdma2host-monitor-status-ring-mac2",
"rxdma2host-monitor-status-ring-mac1",
"host2rxdma-host-buf-ring-mac3",
"host2rxdma-host-buf-ring-mac2",
"host2rxdma-host-buf-ring-mac1",
"rxdma2host-destination-ring-mac3",
"rxdma2host-destination-ring-mac2",
"rxdma2host-destination-ring-mac1",
"host2tcl-input-ring4",
"host2tcl-input-ring3",
"host2tcl-input-ring2",
"host2tcl-input-ring1",
"wbm2host-tx-completions-ring3",
"wbm2host-tx-completions-ring2",
"wbm2host-tx-completions-ring1",
"tcl2host-status-ring",
};
static const struct ath11k_msi_config ath11k_msi_config[] = {
{
.total_vectors = 32,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_QCA6390_HW20,
},
{
.total_vectors = 16,
.total_users = 3,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
},
.hw_rev = ATH11K_HW_QCN9074_HW10,
},
{
.total_vectors = 32,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_WCN6855_HW20,
},
{
.total_vectors = 32,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_WCN6855_HW21,
},
{
.total_vectors = 28,
.total_users = 2,
.users = (struct ath11k_msi_user[]) {
{ .name = "CE", .num_vectors = 10, .base_vector = 0 },
{ .name = "DP", .num_vectors = 18, .base_vector = 10 },
},
.hw_rev = ATH11K_HW_WCN6750_HW10,
},
};
int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
{
const struct ath11k_msi_config *msi_config;
int i;
for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) {
msi_config = &ath11k_msi_config[i];
if (msi_config->hw_rev == ab->hw_rev)
break;
}
if (i == ARRAY_SIZE(ath11k_msi_config)) {
ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n",
ab->hw_rev);
return -EINVAL;
}
ab->pci.msi.config = msi_config;
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
int ret = 0;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
*/
if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
if (offset < ATH11K_PCI_WINDOW_START)
iowrite32(value, ab->mem + offset);
else
ab->pci.ops->window_write32(ab, offset, value);
if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
!ret)
ab->pci.ops->release(ab);
}
EXPORT_SYMBOL(ath11k_pcic_write32);
u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
{
int ret = 0;
u32 val;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
*/
if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
if (offset < ATH11K_PCI_WINDOW_START)
val = ioread32(ab->mem + offset);
else
val = ab->pci.ops->window_read32(ab, offset);
if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
!ret)
ab->pci.ops->release(ab);
return val;
}
EXPORT_SYMBOL(ath11k_pcic_read32);
void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
u32 *msi_addr_hi)
{
*msi_addr_lo = ab->pci.msi.addr_lo;
*msi_addr_hi = ab->pci.msi.addr_hi;
}
EXPORT_SYMBOL(ath11k_pcic_get_msi_address);
int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
int *num_vectors, u32 *user_base_data,
u32 *base_vector)
{
const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
int idx;
for (idx = 0; idx < msi_config->total_users; idx++) {
if (strcmp(user_name, msi_config->users[idx].name) == 0) {
*num_vectors = msi_config->users[idx].num_vectors;
*base_vector = msi_config->users[idx].base_vector;
*user_base_data = *base_vector + ab->pci.msi.ep_base_data;
ath11k_dbg(ab, ATH11K_DBG_PCI,
"Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
user_name, *num_vectors, *user_base_data,
*base_vector);
return 0;
}
}
ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
return -EINVAL;
}
EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment);
void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx)
{
u32 i, msi_data_idx;
for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
if (ce_id == i)
break;
msi_data_idx++;
}
*msi_idx = msi_data_idx;
}
EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx);
static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab)
{
int i, j;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
for (j = 0; j < irq_grp->num_irq; j++)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
}
}
void ath11k_pcic_free_irq(struct ath11k_base *ab)
{
int i, irq_idx;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
}
ath11k_pcic_free_ext_irq(ab);
}
EXPORT_SYMBOL(ath11k_pcic_free_irq);
static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
{
u32 irq_idx;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
enable_irq(ab->irq_num[irq_idx]);
}
static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
{
u32 irq_idx;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
disable_irq_nosync(ab->irq_num[irq_idx]);
}
static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab)
{
int i;
clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_pcic_ce_irq_disable(ab, i);
}
}
static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab)
{
int i;
int irq_idx;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
synchronize_irq(ab->irq_num[irq_idx]);
}
}
static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t)
{
struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
enable_irq(ce_pipe->ab->irq_num[irq_idx]);
}
static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg)
{
struct ath11k_ce_pipe *ce_pipe = arg;
struct ath11k_base *ab = ce_pipe->ab;
int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
return IRQ_HANDLED;
/* last interrupt received for this CE */
ce_pipe->timestamp = jiffies;
disable_irq_nosync(ab->irq_num[irq_idx]);
tasklet_schedule(&ce_pipe->intr_tq);
return IRQ_HANDLED;
}
static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
{
struct ath11k_base *ab = irq_grp->ab;
int i;
/* In case of one MSI vector, we handle irq enable/disable
* in a uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *sc)
{
int i;
clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags);
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
ath11k_pcic_ext_grp_disable(irq_grp);
if (irq_grp->napi_enabled) {
napi_synchronize(&irq_grp->napi);
napi_disable(&irq_grp->napi);
irq_grp->napi_enabled = false;
}
}
}
static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
{
struct ath11k_base *ab = irq_grp->ab;
int i;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
{
int i;
set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
if (!irq_grp->napi_enabled) {
napi_enable(&irq_grp->napi);
irq_grp->napi_enabled = true;
}
ath11k_pcic_ext_grp_enable(irq_grp);
}
}
EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable);
static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab)
{
int i, j, irq_idx;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
for (j = 0; j < irq_grp->num_irq; j++) {
irq_idx = irq_grp->irqs[j];
synchronize_irq(ab->irq_num[irq_idx]);
}
}
}
void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
{
__ath11k_pcic_ext_irq_disable(ab);
ath11k_pcic_sync_ext_irqs(ab);
}
EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable);
static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget)
{
struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
struct ath11k_ext_irq_grp,
napi);
struct ath11k_base *ab = irq_grp->ab;
int work_done;
int i;
work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
if (work_done > budget)
work_done = budget;
return work_done;
}
static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
{
struct ath11k_ext_irq_grp *irq_grp = arg;
struct ath11k_base *ab = irq_grp->ab;
int i;
if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
return IRQ_HANDLED;
ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq);
/* last interrupt received for this group */
irq_grp->timestamp = jiffies;
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
napi_schedule(&irq_grp->napi);
return IRQ_HANDLED;
}
static int
ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
{
return ab->pci.ops->get_msi_irq(ab, vector);
}
static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
{
int i, j, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0;
unsigned long irq_flags;
ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
&user_base_data,
&base_vector);
if (ret < 0)
return ret;
irq_flags = IRQF_SHARED;
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
irq_flags |= IRQF_NOBALANCING;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
u32 num_irq = 0;
irq_grp->ab = ab;
irq_grp->grp_id = i;
init_dummy_netdev(&irq_grp->napi_ndev);
netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
ath11k_pcic_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
if (ab->hw_params.ring_mask->tx[i] ||
ab->hw_params.ring_mask->rx[i] ||
ab->hw_params.ring_mask->rx_err[i] ||
ab->hw_params.ring_mask->rx_wbm_rel[i] ||
ab->hw_params.ring_mask->reo_status[i] ||
ab->hw_params.ring_mask->rxdma2host[i] ||
ab->hw_params.ring_mask->host2rxdma[i] ||
ab->hw_params.ring_mask->rx_mon_status[i]) {
num_irq = 1;
}
irq_grp->num_irq = num_irq;
irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
for (j = 0; j < irq_grp->num_irq; j++) {
int irq_idx = irq_grp->irqs[j];
int vector = (i % num_vectors) + base_vector;
int irq = ath11k_pcic_get_msi_irq(ab, vector);
if (irq < 0)
return irq;
ab->irq_num[irq_idx] = irq;
ath11k_dbg(ab, ATH11K_DBG_PCI,
"irq:%d group:%d\n", irq, i);
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler,
irq_flags, "DP_EXT_IRQ", irq_grp);
if (ret) {
ath11k_err(ab, "failed request irq %d: %d\n",
vector, ret);
return ret;
}
}
ath11k_pcic_ext_grp_disable(irq_grp);
}
return 0;
}
int ath11k_pcic_config_irq(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *ce_pipe;
u32 msi_data_start;
u32 msi_data_count, msi_data_idx;
u32 msi_irq_start;
unsigned int msi_data;
int irq, i, ret, irq_idx;
unsigned long irq_flags;
ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count,
&msi_data_start, &msi_irq_start);
if (ret)
return ret;
irq_flags = IRQF_SHARED;
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
irq_flags |= IRQF_NOBALANCING;
/* Configure CE irqs */
for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
irq = ath11k_pcic_get_msi_irq(ab, msi_data);
if (irq < 0)
return irq;
ce_pipe = &ab->ce.ce_pipe[i];
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet);
ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler,
irq_flags, irq_name[irq_idx], ce_pipe);
if (ret) {
ath11k_err(ab, "failed to request irq %d: %d\n",
irq_idx, ret);
return ret;
}
ab->irq_num[irq_idx] = irq;
msi_data_idx++;
ath11k_pcic_ce_irq_disable(ab, i);
}
ret = ath11k_pcic_ext_irq_config(ab);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_config_irq);
void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab)
{
int i;
set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_pcic_ce_irq_enable(ab, i);
}
}
EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable);
static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params.ce_count; i++) {
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
tasklet_kill(&ce_pipe->intr_tq);
}
}
void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab)
{
ath11k_pcic_ce_irqs_disable(ab);
ath11k_pcic_sync_ce_irqs(ab);
ath11k_pcic_kill_tasklets(ab);
}
EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync);
void ath11k_pcic_stop(struct ath11k_base *ab)
{
ath11k_pcic_ce_irq_disable_sync(ab);
ath11k_ce_cleanup_pipes(ab);
}
EXPORT_SYMBOL(ath11k_pcic_stop);
int ath11k_pcic_start(struct ath11k_base *ab)
{
set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pcic_ce_irqs_enable(ab);
ath11k_ce_rx_post_buf(ab);
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_start);
int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
const struct service_to_pipe *entry;
bool ul_set = false, dl_set = false;
int i;
for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
entry = &ab->hw_params.svc_to_ce_map[i];
if (__le32_to_cpu(entry->service_id) != service_id)
continue;
switch (__le32_to_cpu(entry->pipedir)) {
case PIPEDIR_NONE:
break;
case PIPEDIR_IN:
WARN_ON(dl_set);
*dl_pipe = __le32_to_cpu(entry->pipenum);
dl_set = true;
break;
case PIPEDIR_OUT:
WARN_ON(ul_set);
*ul_pipe = __le32_to_cpu(entry->pipenum);
ul_set = true;
break;
case PIPEDIR_INOUT:
WARN_ON(dl_set);
WARN_ON(ul_set);
*dl_pipe = __le32_to_cpu(entry->pipenum);
*ul_pipe = __le32_to_cpu(entry->pipenum);
dl_set = true;
ul_set = true;
break;
}
}
if (WARN_ON(!ul_set || !dl_set))
return -ENOENT;
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe);
int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
const struct ath11k_pci_ops *pci_ops)
{
if (!pci_ops)
return 0;
/* Return error if mandatory pci_ops callbacks are missing */
if (!pci_ops->get_msi_irq || !pci_ops->window_write32 ||
!pci_ops->window_read32)
return -EINVAL;
ab->pci.ops = pci_ops;
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);

View File

@ -0,0 +1,48 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_PCI_CMN_H
#define _ATH11K_PCI_CMN_H
#include "core.h"
#define ATH11K_PCI_IRQ_CE0_OFFSET 3
#define ATH11K_PCI_IRQ_DP_OFFSET 14
#define ATH11K_PCI_WINDOW_ENABLE_BIT 0x40000000
#define ATH11K_PCI_WINDOW_REG_ADDRESS 0x310c
#define ATH11K_PCI_WINDOW_VALUE_MASK GENMASK(24, 19)
#define ATH11K_PCI_WINDOW_START 0x80000
#define ATH11K_PCI_WINDOW_RANGE_MASK GENMASK(18, 0)
/* BAR0 + 4k is always accessible, and no
* need to force wakeup.
* 4K - 32 = 0xFE0
*/
#define ATH11K_PCI_ACCESS_ALWAYS_OFF 0xFE0
int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
int *num_vectors, u32 *user_base_data,
u32 *base_vector);
void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value);
u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset);
void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
u32 *msi_addr_hi);
void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
void ath11k_pcic_free_irq(struct ath11k_base *ab);
int ath11k_pcic_config_irq(struct ath11k_base *ab);
void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab);
void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab);
void ath11k_pcic_stop(struct ath11k_base *ab);
int ath11k_pcic_start(struct ath11k_base *ab);
int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe);
void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab);
void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab);
int ath11k_pcic_init_msi_config(struct ath11k_base *ab);
int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
const struct ath11k_pci_ops *pci_ops);
#endif

View File

@ -0,0 +1,125 @@
// SPDX-License-Identifier: GPL-2.0-only
#include "wcn36xx.h"
#include "firmware.h"
#define DEFINE(s)[s] = #s
static const char * const wcn36xx_firmware_caps_names[] = {
DEFINE(MCC),
DEFINE(P2P),
DEFINE(DOT11AC),
DEFINE(SLM_SESSIONIZATION),
DEFINE(DOT11AC_OPMODE),
DEFINE(SAP32STA),
DEFINE(TDLS),
DEFINE(P2P_GO_NOA_DECOUPLE_INIT_SCAN),
DEFINE(WLANACTIVE_OFFLOAD),
DEFINE(BEACON_OFFLOAD),
DEFINE(SCAN_OFFLOAD),
DEFINE(ROAM_OFFLOAD),
DEFINE(BCN_MISS_OFFLOAD),
DEFINE(STA_POWERSAVE),
DEFINE(STA_ADVANCED_PWRSAVE),
DEFINE(AP_UAPSD),
DEFINE(AP_DFS),
DEFINE(BLOCKACK),
DEFINE(PHY_ERR),
DEFINE(BCN_FILTER),
DEFINE(RTT),
DEFINE(RATECTRL),
DEFINE(WOW),
DEFINE(WLAN_ROAM_SCAN_OFFLOAD),
DEFINE(SPECULATIVE_PS_POLL),
DEFINE(SCAN_SCH),
DEFINE(IBSS_HEARTBEAT_OFFLOAD),
DEFINE(WLAN_SCAN_OFFLOAD),
DEFINE(WLAN_PERIODIC_TX_PTRN),
DEFINE(ADVANCE_TDLS),
DEFINE(BATCH_SCAN),
DEFINE(FW_IN_TX_PATH),
DEFINE(EXTENDED_NSOFFLOAD_SLOT),
DEFINE(CH_SWITCH_V1),
DEFINE(HT40_OBSS_SCAN),
DEFINE(UPDATE_CHANNEL_LIST),
DEFINE(WLAN_MCADDR_FLT),
DEFINE(WLAN_CH144),
DEFINE(NAN),
DEFINE(TDLS_SCAN_COEXISTENCE),
DEFINE(LINK_LAYER_STATS_MEAS),
DEFINE(MU_MIMO),
DEFINE(EXTENDED_SCAN),
DEFINE(DYNAMIC_WMM_PS),
DEFINE(MAC_SPOOFED_SCAN),
DEFINE(BMU_ERROR_GENERIC_RECOVERY),
DEFINE(DISA),
DEFINE(FW_STATS),
DEFINE(WPS_PRBRSP_TMPL),
DEFINE(BCN_IE_FLT_DELTA),
DEFINE(TDLS_OFF_CHANNEL),
DEFINE(RTT3),
DEFINE(MGMT_FRAME_LOGGING),
DEFINE(ENHANCED_TXBD_COMPLETION),
DEFINE(LOGGING_ENHANCEMENT),
DEFINE(EXT_SCAN_ENHANCED),
DEFINE(MEMORY_DUMP_SUPPORTED),
DEFINE(PER_PKT_STATS_SUPPORTED),
DEFINE(EXT_LL_STAT),
DEFINE(WIFI_CONFIG),
DEFINE(ANTENNA_DIVERSITY_SELECTION),
};
#undef DEFINE
const char *wcn36xx_firmware_get_cap_name(enum wcn36xx_firmware_feat_caps x)
{
if (x >= ARRAY_SIZE(wcn36xx_firmware_caps_names))
return "UNKNOWN";
return wcn36xx_firmware_caps_names[x];
}
void wcn36xx_firmware_set_feat_caps(u32 *bitmap,
enum wcn36xx_firmware_feat_caps cap)
{
int arr_idx, bit_idx;
if (cap < 0 || cap > 127) {
wcn36xx_warn("error cap idx %d\n", cap);
return;
}
arr_idx = cap / 32;
bit_idx = cap % 32;
bitmap[arr_idx] |= (1 << bit_idx);
}
int wcn36xx_firmware_get_feat_caps(u32 *bitmap,
enum wcn36xx_firmware_feat_caps cap)
{
int arr_idx, bit_idx;
if (cap < 0 || cap > 127) {
wcn36xx_warn("error cap idx %d\n", cap);
return -EINVAL;
}
arr_idx = cap / 32;
bit_idx = cap % 32;
return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
}
void wcn36xx_firmware_clear_feat_caps(u32 *bitmap,
enum wcn36xx_firmware_feat_caps cap)
{
int arr_idx, bit_idx;
if (cap < 0 || cap > 127) {
wcn36xx_warn("error cap idx %d\n", cap);
return;
}
arr_idx = cap / 32;
bit_idx = cap % 32;
bitmap[arr_idx] &= ~(1 << bit_idx);
}

View File

@ -0,0 +1,84 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _FIRMWARE_H_
#define _FIRMWARE_H_
/* Capability bitmap exchange definitions and macros starts */
enum wcn36xx_firmware_feat_caps {
MCC = 0,
P2P = 1,
DOT11AC = 2,
SLM_SESSIONIZATION = 3,
DOT11AC_OPMODE = 4,
SAP32STA = 5,
TDLS = 6,
P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
WLANACTIVE_OFFLOAD = 8,
BEACON_OFFLOAD = 9,
SCAN_OFFLOAD = 10,
ROAM_OFFLOAD = 11,
BCN_MISS_OFFLOAD = 12,
STA_POWERSAVE = 13,
STA_ADVANCED_PWRSAVE = 14,
AP_UAPSD = 15,
AP_DFS = 16,
BLOCKACK = 17,
PHY_ERR = 18,
BCN_FILTER = 19,
RTT = 20,
RATECTRL = 21,
WOW = 22,
WLAN_ROAM_SCAN_OFFLOAD = 23,
SPECULATIVE_PS_POLL = 24,
SCAN_SCH = 25,
IBSS_HEARTBEAT_OFFLOAD = 26,
WLAN_SCAN_OFFLOAD = 27,
WLAN_PERIODIC_TX_PTRN = 28,
ADVANCE_TDLS = 29,
BATCH_SCAN = 30,
FW_IN_TX_PATH = 31,
EXTENDED_NSOFFLOAD_SLOT = 32,
CH_SWITCH_V1 = 33,
HT40_OBSS_SCAN = 34,
UPDATE_CHANNEL_LIST = 35,
WLAN_MCADDR_FLT = 36,
WLAN_CH144 = 37,
NAN = 38,
TDLS_SCAN_COEXISTENCE = 39,
LINK_LAYER_STATS_MEAS = 40,
MU_MIMO = 41,
EXTENDED_SCAN = 42,
DYNAMIC_WMM_PS = 43,
MAC_SPOOFED_SCAN = 44,
BMU_ERROR_GENERIC_RECOVERY = 45,
DISA = 46,
FW_STATS = 47,
WPS_PRBRSP_TMPL = 48,
BCN_IE_FLT_DELTA = 49,
TDLS_OFF_CHANNEL = 51,
RTT3 = 52,
MGMT_FRAME_LOGGING = 53,
ENHANCED_TXBD_COMPLETION = 54,
LOGGING_ENHANCEMENT = 55,
EXT_SCAN_ENHANCED = 56,
MEMORY_DUMP_SUPPORTED = 57,
PER_PKT_STATS_SUPPORTED = 58,
EXT_LL_STAT = 60,
WIFI_CONFIG = 61,
ANTENNA_DIVERSITY_SELECTION = 62,
MAX_FEATURE_SUPPORTED = 128,
};
void wcn36xx_firmware_set_feat_caps(u32 *bitmap,
enum wcn36xx_firmware_feat_caps cap);
int wcn36xx_firmware_get_feat_caps(u32 *bitmap,
enum wcn36xx_firmware_feat_caps cap);
void wcn36xx_firmware_clear_feat_caps(u32 *bitmap,
enum wcn36xx_firmware_feat_caps cap);
const char *wcn36xx_firmware_get_cap_name(enum wcn36xx_firmware_feat_caps x);
#endif /* _FIRMWARE_H_ */

View File

@ -0,0 +1,323 @@
/* SPDX-License-Identifier: ISC */
/* Copyright (C) 2022 MediaTek Inc. */
#ifndef __MT76_CONNAC2_MAC_H
#define __MT76_CONNAC2_MAC_H
enum tx_header_format {
MT_HDR_FORMAT_802_3,
MT_HDR_FORMAT_CMD,
MT_HDR_FORMAT_802_11,
MT_HDR_FORMAT_802_11_EXT,
};
enum tx_pkt_type {
MT_TX_TYPE_CT,
MT_TX_TYPE_SF,
MT_TX_TYPE_CMD,
MT_TX_TYPE_FW,
};
enum {
MT_CTX0,
MT_HIF0 = 0x0,
MT_LMAC_AC00 = 0x0,
MT_LMAC_AC01,
MT_LMAC_AC02,
MT_LMAC_AC03,
MT_LMAC_ALTX0 = 0x10,
MT_LMAC_BMC0,
MT_LMAC_BCN0,
MT_LMAC_PSMP0,
};
#define MT_TXD0_Q_IDX GENMASK(31, 25)
#define MT_TXD0_PKT_FMT GENMASK(24, 23)
#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
#define MT_TXD0_TX_BYTES GENMASK(15, 0)
#define MT_TXD1_LONG_FORMAT BIT(31)
#define MT_TXD1_TGID BIT(30)
#define MT_TXD1_OWN_MAC GENMASK(29, 24)
#define MT_TXD1_AMSDU BIT(23)
#define MT_TXD1_TID GENMASK(22, 20)
#define MT_TXD1_HDR_PAD GENMASK(19, 18)
#define MT_TXD1_HDR_FORMAT GENMASK(17, 16)
#define MT_TXD1_HDR_INFO GENMASK(15, 11)
#define MT_TXD1_ETH_802_3 BIT(15)
#define MT_TXD1_VTA BIT(10)
#define MT_TXD1_WLAN_IDX GENMASK(9, 0)
#define MT_TXD2_FIX_RATE BIT(31)
#define MT_TXD2_FIXED_RATE BIT(30)
#define MT_TXD2_POWER_OFFSET GENMASK(29, 24)
#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
#define MT_TXD2_FRAG GENMASK(15, 14)
#define MT_TXD2_HTC_VLD BIT(13)
#define MT_TXD2_DURATION BIT(12)
#define MT_TXD2_BIP BIT(11)
#define MT_TXD2_MULTICAST BIT(10)
#define MT_TXD2_RTS BIT(9)
#define MT_TXD2_SOUNDING BIT(8)
#define MT_TXD2_NDPA BIT(7)
#define MT_TXD2_NDP BIT(6)
#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
#define MT_TXD3_SN_VALID BIT(31)
#define MT_TXD3_PN_VALID BIT(30)
#define MT_TXD3_SW_POWER_MGMT BIT(29)
#define MT_TXD3_BA_DISABLE BIT(28)
#define MT_TXD3_SEQ GENMASK(27, 16)
#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
#define MT_TXD3_TX_COUNT GENMASK(10, 6)
#define MT_TXD3_TIMING_MEASURE BIT(5)
#define MT_TXD3_DAS BIT(4)
#define MT_TXD3_EEOSP BIT(3)
#define MT_TXD3_EMRD BIT(2)
#define MT_TXD3_PROTECT_FRAME BIT(1)
#define MT_TXD3_NO_ACK BIT(0)
#define MT_TXD4_PN_LOW GENMASK(31, 0)
#define MT_TXD5_PN_HIGH GENMASK(31, 16)
#define MT_TXD5_MD BIT(15)
#define MT_TXD5_ADD_BA BIT(14)
#define MT_TXD5_TX_STATUS_HOST BIT(10)
#define MT_TXD5_TX_STATUS_MCU BIT(9)
#define MT_TXD5_TX_STATUS_FMT BIT(8)
#define MT_TXD5_PID GENMASK(7, 0)
#define MT_TXD6_TX_IBF BIT(31)
#define MT_TXD6_TX_EBF BIT(30)
#define MT_TXD6_TX_RATE GENMASK(29, 16)
#define MT_TXD6_SGI GENMASK(15, 14)
#define MT_TXD6_HELTF GENMASK(13, 12)
#define MT_TXD6_LDPC BIT(11)
#define MT_TXD6_SPE_ID_IDX BIT(10)
#define MT_TXD6_ANT_ID GENMASK(7, 4)
#define MT_TXD6_DYN_BW BIT(3)
#define MT_TXD6_FIXED_BW BIT(2)
#define MT_TXD6_BW GENMASK(1, 0)
#define MT_TXD7_TXD_LEN GENMASK(31, 30)
#define MT_TXD7_UDP_TCP_SUM BIT(29)
#define MT_TXD7_IP_SUM BIT(28)
#define MT_TXD7_TYPE GENMASK(21, 20)
#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
#define MT_TXD7_PSE_FID GENMASK(27, 16)
#define MT_TXD7_SPE_IDX GENMASK(15, 11)
#define MT_TXD7_HW_AMSDU BIT(10)
#define MT_TXD7_TX_TIME GENMASK(9, 0)
#define MT_TXD8_L_TYPE GENMASK(5, 4)
#define MT_TXD8_L_SUB_TYPE GENMASK(3, 0)
#define MT_TX_RATE_STBC BIT(13)
#define MT_TX_RATE_NSS GENMASK(12, 10)
#define MT_TX_RATE_MODE GENMASK(9, 6)
#define MT_TX_RATE_SU_EXT_TONE BIT(5)
#define MT_TX_RATE_DCM BIT(4)
/* VHT/HE only use bits 0-3 */
#define MT_TX_RATE_IDX GENMASK(5, 0)
#define MT_TXS0_FIXED_RATE BIT(31)
#define MT_TXS0_BW GENMASK(30, 29)
#define MT_TXS0_TID GENMASK(28, 26)
#define MT_TXS0_AMPDU BIT(25)
#define MT_TXS0_TXS_FORMAT GENMASK(24, 23)
#define MT_TXS0_BA_ERROR BIT(22)
#define MT_TXS0_PS_FLAG BIT(21)
#define MT_TXS0_TXOP_TIMEOUT BIT(20)
#define MT_TXS0_BIP_ERROR BIT(19)
#define MT_TXS0_QUEUE_TIMEOUT BIT(18)
#define MT_TXS0_RTS_TIMEOUT BIT(17)
#define MT_TXS0_ACK_TIMEOUT BIT(16)
#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
#define MT_TXS0_TX_STATUS_HOST BIT(15)
#define MT_TXS0_TX_STATUS_MCU BIT(14)
#define MT_TXS0_TX_RATE GENMASK(13, 0)
#define MT_TXS1_SEQNO GENMASK(31, 20)
#define MT_TXS1_RESP_RATE GENMASK(19, 16)
#define MT_TXS1_RXV_SEQNO GENMASK(15, 8)
#define MT_TXS1_TX_POWER_DBM GENMASK(7, 0)
#define MT_TXS2_BF_STATUS GENMASK(31, 30)
#define MT_TXS2_LAST_TX_RATE GENMASK(29, 27)
#define MT_TXS2_SHARED_ANTENNA BIT(26)
#define MT_TXS2_WCID GENMASK(25, 16)
#define MT_TXS2_TX_DELAY GENMASK(15, 0)
#define MT_TXS3_PID GENMASK(31, 24)
#define MT_TXS3_ANT_ID GENMASK(23, 0)
#define MT_TXS4_TIMESTAMP GENMASK(31, 0)
/* RXD DW1 */
#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(9, 0)
#define MT_RXD1_NORMAL_GROUP_1 BIT(11)
#define MT_RXD1_NORMAL_GROUP_2 BIT(12)
#define MT_RXD1_NORMAL_GROUP_3 BIT(13)
#define MT_RXD1_NORMAL_GROUP_4 BIT(14)
#define MT_RXD1_NORMAL_GROUP_5 BIT(15)
#define MT_RXD1_NORMAL_SEC_MODE GENMASK(20, 16)
#define MT_RXD1_NORMAL_KEY_ID GENMASK(22, 21)
#define MT_RXD1_NORMAL_CM BIT(23)
#define MT_RXD1_NORMAL_CLM BIT(24)
#define MT_RXD1_NORMAL_ICV_ERR BIT(25)
#define MT_RXD1_NORMAL_TKIP_MIC_ERR BIT(26)
#define MT_RXD1_NORMAL_FCS_ERR BIT(27)
#define MT_RXD1_NORMAL_BAND_IDX BIT(28)
#define MT_RXD1_NORMAL_SPP_EN BIT(29)
#define MT_RXD1_NORMAL_ADD_OM BIT(30)
#define MT_RXD1_NORMAL_SEC_DONE BIT(31)
/* RXD DW2 */
#define MT_RXD2_NORMAL_BSSID GENMASK(5, 0)
#define MT_RXD2_NORMAL_CO_ANT BIT(6)
#define MT_RXD2_NORMAL_BF_CQI BIT(7)
#define MT_RXD2_NORMAL_MAC_HDR_LEN GENMASK(12, 8)
#define MT_RXD2_NORMAL_HDR_TRANS BIT(13)
#define MT_RXD2_NORMAL_HDR_OFFSET GENMASK(15, 14)
#define MT_RXD2_NORMAL_TID GENMASK(19, 16)
#define MT_RXD2_NORMAL_MU_BAR BIT(21)
#define MT_RXD2_NORMAL_SW_BIT BIT(22)
#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
#define MT_RXD2_NORMAL_FRAG BIT(27)
#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
#define MT_RXD2_NORMAL_NDATA BIT(29)
#define MT_RXD2_NORMAL_NON_AMPDU BIT(30)
#define MT_RXD2_NORMAL_BF_REPORT BIT(31)
/* RXD DW4 */
#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0)
#define MT_RXD4_FIRST_AMSDU_FRAME GENMASK(1, 0)
#define MT_RXD4_MID_AMSDU_FRAME BIT(1)
#define MT_RXD4_LAST_AMSDU_FRAME BIT(0)
#define MT_RXD4_NORMAL_PATTERN_DROP BIT(9)
#define MT_RXD4_NORMAL_CLS BIT(10)
#define MT_RXD4_NORMAL_OFLD GENMASK(12, 11)
#define MT_RXD4_NORMAL_MAGIC_PKT BIT(13)
#define MT_RXD4_NORMAL_WOL GENMASK(18, 14)
#define MT_RXD4_NORMAL_CLS_BITMAP GENMASK(28, 19)
#define MT_RXD3_NORMAL_PF_MODE BIT(29)
#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
#define MT_RXV_HDR_BAND_IDX BIT(24)
/* RXD DW3 */
#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
#define MT_RXD3_NORMAL_CH_FREQ GENMASK(15, 8)
#define MT_RXD3_NORMAL_ADDR_TYPE GENMASK(17, 16)
#define MT_RXD3_NORMAL_U2M BIT(0)
#define MT_RXD3_NORMAL_HTC_VLD BIT(0)
#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(19)
#define MT_RXD3_NORMAL_BEACON_MC BIT(20)
#define MT_RXD3_NORMAL_BEACON_UC BIT(21)
#define MT_RXD3_NORMAL_AMSDU BIT(22)
#define MT_RXD3_NORMAL_MESH BIT(23)
#define MT_RXD3_NORMAL_MHCP BIT(24)
#define MT_RXD3_NORMAL_NO_INFO_WB BIT(25)
#define MT_RXD3_NORMAL_DISABLE_RX_HDR_TRANS BIT(26)
#define MT_RXD3_NORMAL_POWER_SAVE_STAT BIT(27)
#define MT_RXD3_NORMAL_MORE BIT(28)
#define MT_RXD3_NORMAL_UNWANT BIT(29)
#define MT_RXD3_NORMAL_RX_DROP BIT(30)
#define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
/* RXD GROUP4 */
#define MT_RXD6_FRAME_CONTROL GENMASK(15, 0)
#define MT_RXD6_TA_LO GENMASK(31, 16)
#define MT_RXD7_TA_HI GENMASK(31, 0)
#define MT_RXD8_SEQ_CTRL GENMASK(15, 0)
#define MT_RXD8_QOS_CTL GENMASK(31, 16)
#define MT_RXD9_HT_CONTROL GENMASK(31, 0)
/* P-RXV DW0 */
#define MT_PRXV_TX_RATE GENMASK(6, 0)
#define MT_PRXV_TX_DCM BIT(4)
#define MT_PRXV_TX_ER_SU_106T BIT(5)
#define MT_PRXV_NSTS GENMASK(9, 7)
#define MT_PRXV_TXBF BIT(10)
#define MT_PRXV_HT_AD_CODE BIT(11)
#define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28)
#define MT_PRXV_FRAME_MODE GENMASK(14, 12)
#define MT_PRXV_HT_SGI GENMASK(16, 15)
#define MT_PRXV_HT_STBC GENMASK(23, 22)
#define MT_PRXV_TX_MODE GENMASK(27, 24)
#define MT_PRXV_DCM BIT(17)
#define MT_PRXV_NUM_RX BIT(20, 18)
/* P-RXV DW1 */
#define MT_PRXV_RCPI3 GENMASK(31, 24)
#define MT_PRXV_RCPI2 GENMASK(23, 16)
#define MT_PRXV_RCPI1 GENMASK(15, 8)
#define MT_PRXV_RCPI0 GENMASK(7, 0)
#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0)
/* C-RXV */
#define MT_CRXV_HT_STBC GENMASK(1, 0)
#define MT_CRXV_TX_MODE GENMASK(7, 4)
#define MT_CRXV_FRAME_MODE GENMASK(10, 8)
#define MT_CRXV_HT_SHORT_GI GENMASK(14, 13)
#define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17)
#define MT_CRXV_HE_LDPC_EXT_SYM BIT(20)
#define MT_CRXV_HE_PE_DISAMBIG BIT(23)
#define MT_CRXV_HE_NUM_USER GENMASK(30, 24)
#define MT_CRXV_HE_UPLINK BIT(31)
#define MT_CRXV_HE_RU0 GENMASK(7, 0)
#define MT_CRXV_HE_RU1 GENMASK(15, 8)
#define MT_CRXV_HE_RU2 GENMASK(23, 16)
#define MT_CRXV_HE_RU3 GENMASK(31, 24)
#define MT_CRXV_HE_MU_AID GENMASK(30, 20)
#define MT_CRXV_HE_SR_MASK GENMASK(11, 8)
#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12)
#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17)
#define MT_CRXV_HE_SR3_MASK GENMASK(24, 21)
#define MT_CRXV_HE_BSS_COLOR GENMASK(5, 0)
#define MT_CRXV_HE_TXOP_DUR GENMASK(12, 6)
#define MT_CRXV_HE_BEAM_CHNG BIT(13)
#define MT_CRXV_HE_DOPPLER BIT(16)
#define MT_CRXV_SNR GENMASK(18, 13)
#define MT_CRXV_FOE_LO GENMASK(31, 19)
#define MT_CRXV_FOE_HI GENMASK(6, 0)
#define MT_CRXV_FOE_SHIFT 13
#define MT_CT_INFO_APPLY_TXD BIT(0)
#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
#define MT_CT_INFO_MGMT_FRAME BIT(2)
#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
#define MT_CT_INFO_HSR2_TX BIT(4)
#define MT_CT_INFO_FROM_HOST BIT(7)
enum tx_mcu_port_q_idx {
MT_TX_MCU_PORT_RX_Q0 = 0x20,
MT_TX_MCU_PORT_RX_Q1,
MT_TX_MCU_PORT_RX_Q2,
MT_TX_MCU_PORT_RX_Q3,
MT_TX_MCU_PORT_RX_FWDL = 0x3e
};
enum tx_port_idx {
MT_TX_PORT_IDX_LMAC,
MT_TX_PORT_IDX_MCU
};
#endif /* __MT76_CONNAC2_MAC_H */

View File

@ -0,0 +1,279 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2022 MediaTek Inc. */
#include <linux/acpi.h>
#include "mt7921.h"
static int
mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *sar_root, *sar_unit;
struct mt76_dev *mdev = &dev->mt76;
acpi_handle root, handle;
acpi_status status;
u32 i = 0;
root = ACPI_HANDLE(mdev->dev);
if (!root)
return -EOPNOTSUPP;
status = acpi_get_handle(root, method, &handle);
if (ACPI_FAILURE(status))
return -EIO;
status = acpi_evaluate_object(handle, NULL, NULL, &buf);
if (ACPI_FAILURE(status))
return -EIO;
sar_root = buf.pointer;
if (sar_root->type != ACPI_TYPE_PACKAGE ||
sar_root->package.count < 4 ||
sar_root->package.elements[0].type != ACPI_TYPE_INTEGER) {
dev_err(mdev->dev, "sar cnt = %d\n",
sar_root->package.count);
goto free;
}
if (!*tbl) {
*tbl = devm_kzalloc(mdev->dev, sar_root->package.count,
GFP_KERNEL);
if (!*tbl)
goto free;
}
if (len)
*len = sar_root->package.count;
for (i = 0; i < sar_root->package.count; i++) {
sar_unit = &sar_root->package.elements[i];
if (sar_unit->type != ACPI_TYPE_INTEGER)
break;
*(*tbl + i) = (u8)sar_unit->integer.value;
}
free:
kfree(sar_root);
return (i == sar_root->package.count) ? 0 : -EINVAL;
}
/* MTCL : Country List Table for 6G band */
static int
mt7921_asar_acpi_read_mtcl(struct mt7921_dev *dev, u8 **table, u8 *version)
{
*version = (mt7921_acpi_read(dev, MT7921_ACPI_MTCL, table, NULL) < 0)
? 1 : 2;
return 0;
}
/* MTDS : Dynamic SAR Power Table */
static int
mt7921_asar_acpi_read_mtds(struct mt7921_dev *dev, u8 **table, u8 version)
{
int len, ret, sarlen, prelen, tblcnt;
bool enable;
ret = mt7921_acpi_read(dev, MT7921_ACPI_MTDS, table, &len);
if (ret)
return ret;
/* Table content validation */
switch (version) {
case 1:
enable = ((struct mt7921_asar_dyn *)*table)->enable;
sarlen = sizeof(struct mt7921_asar_dyn_limit);
prelen = sizeof(struct mt7921_asar_dyn);
break;
case 2:
enable = ((struct mt7921_asar_dyn_v2 *)*table)->enable;
sarlen = sizeof(struct mt7921_asar_dyn_limit_v2);
prelen = sizeof(struct mt7921_asar_dyn_v2);
break;
default:
return -EINVAL;
}
tblcnt = (len - prelen) / sarlen;
if (!enable ||
tblcnt > MT7921_ASAR_MAX_DYN || tblcnt < MT7921_ASAR_MIN_DYN)
ret = -EINVAL;
return ret;
}
/* MTGS : Geo SAR Power Table */
static int
mt7921_asar_acpi_read_mtgs(struct mt7921_dev *dev, u8 **table, u8 version)
{
int len, ret = 0, sarlen, prelen, tblcnt;
ret = mt7921_acpi_read(dev, MT7921_ACPI_MTGS, table, &len);
if (ret)
return ret;
/* Table content validation */
switch (version) {
case 1:
sarlen = sizeof(struct mt7921_asar_geo_limit);
prelen = sizeof(struct mt7921_asar_geo);
break;
case 2:
sarlen = sizeof(struct mt7921_asar_geo_limit_v2);
prelen = sizeof(struct mt7921_asar_geo_v2);
break;
default:
return -EINVAL;
}
tblcnt = (len - prelen) / sarlen;
if (tblcnt > MT7921_ASAR_MAX_GEO || tblcnt < MT7921_ASAR_MIN_GEO)
ret = -EINVAL;
return ret;
}
int mt7921_init_acpi_sar(struct mt7921_dev *dev)
{
struct mt7921_acpi_sar *asar;
int ret;
asar = devm_kzalloc(dev->mt76.dev, sizeof(*asar), GFP_KERNEL);
if (!asar)
return -ENOMEM;
mt7921_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver);
/* MTDS is mandatory. Return error if table is invalid */
ret = mt7921_asar_acpi_read_mtds(dev, (u8 **)&asar->dyn, asar->ver);
if (ret) {
devm_kfree(dev->mt76.dev, asar->dyn);
devm_kfree(dev->mt76.dev, asar->countrylist);
devm_kfree(dev->mt76.dev, asar);
return ret;
}
/* MTGS is optional */
ret = mt7921_asar_acpi_read_mtgs(dev, (u8 **)&asar->geo, asar->ver);
if (ret) {
devm_kfree(dev->mt76.dev, asar->geo);
asar->geo = NULL;
}
dev->phy.acpisar = asar;
return 0;
}
static s8
mt7921_asar_get_geo_pwr(struct mt7921_phy *phy,
enum nl80211_band band, s8 dyn_power)
{
struct mt7921_acpi_sar *asar = phy->acpisar;
struct mt7921_asar_geo_band *band_pwr;
s8 geo_power;
u8 idx, max;
if (!asar->geo)
return dyn_power;
switch (phy->mt76->dev->region) {
case NL80211_DFS_FCC:
idx = 0;
break;
case NL80211_DFS_ETSI:
idx = 1;
break;
default: /* WW */
idx = 2;
break;
}
if (asar->ver == 1) {
band_pwr = &asar->geo->tbl[idx].band[0];
max = ARRAY_SIZE(asar->geo->tbl[idx].band);
} else {
band_pwr = &asar->geo_v2->tbl[idx].band[0];
max = ARRAY_SIZE(asar->geo_v2->tbl[idx].band);
}
switch (band) {
case NL80211_BAND_2GHZ:
idx = 0;
break;
case NL80211_BAND_5GHZ:
idx = 1;
break;
case NL80211_BAND_6GHZ:
idx = 2;
break;
default:
return dyn_power;
}
if (idx >= max)
return dyn_power;
geo_power = (band_pwr + idx)->pwr;
dyn_power += (band_pwr + idx)->offset;
return min(geo_power, dyn_power);
}
static s8
mt7921_asar_range_pwr(struct mt7921_phy *phy,
const struct cfg80211_sar_freq_ranges *range,
u8 idx)
{
const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa;
struct mt7921_acpi_sar *asar = phy->acpisar;
u8 *limit, band, max;
if (!capa)
return 127;
if (asar->ver == 1) {
limit = &asar->dyn->tbl[0].frp[0];
max = ARRAY_SIZE(asar->dyn->tbl[0].frp);
} else {
limit = &asar->dyn_v2->tbl[0].frp[0];
max = ARRAY_SIZE(asar->dyn_v2->tbl[0].frp);
}
if (idx >= max)
return 127;
if (range->start_freq >= 5945)
band = NL80211_BAND_6GHZ;
else if (range->start_freq >= 5150)
band = NL80211_BAND_5GHZ;
else
band = NL80211_BAND_2GHZ;
return mt7921_asar_get_geo_pwr(phy, band, limit[idx]);
}
int mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default)
{
const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa;
int i;
if (!phy->acpisar)
return 0;
/* When ACPI SAR enabled in HW, we should apply rules for .frp
* 1. w/o .sar_specs : set ACPI SAR power as the defatul value
* 2. w/ .sar_specs : set power with min(.sar_specs, ACPI_SAR)
*/
for (i = 0; i < capa->num_freq_ranges; i++) {
struct mt76_freq_range_power *frp = &phy->mt76->frp[i];
frp->range = set_default ? &capa->freq_ranges[i] : frp->range;
if (!frp->range)
continue;
frp->power = min_t(s8, set_default ? 127 : frp->power,
mt7921_asar_range_pwr(phy, frp->range, i));
}
return 0;
}

View File

@ -0,0 +1,93 @@
/* SPDX-License-Identifier: ISC */
/* Copyright (C) 2022 MediaTek Inc. */
#ifndef __MT7921_ACPI_SAR_H
#define __MT7921_ACPI_SAR_H
#define MT7921_ASAR_MIN_DYN 1
#define MT7921_ASAR_MAX_DYN 8
#define MT7921_ASAR_MIN_GEO 3
#define MT7921_ASAR_MAX_GEO 8
#define MT7921_ACPI_MTCL "MTCL"
#define MT7921_ACPI_MTDS "MTDS"
#define MT7921_ACPI_MTGS "MTGS"
struct mt7921_asar_dyn_limit {
u8 idx;
u8 frp[5];
} __packed;
struct mt7921_asar_dyn {
u8 names[4];
u8 enable;
u8 nr_tbl;
struct mt7921_asar_dyn_limit tbl[0];
} __packed;
struct mt7921_asar_dyn_limit_v2 {
u8 idx;
u8 frp[11];
} __packed;
struct mt7921_asar_dyn_v2 {
u8 names[4];
u8 enable;
u8 rsvd;
u8 nr_tbl;
struct mt7921_asar_dyn_limit_v2 tbl[0];
} __packed;
struct mt7921_asar_geo_band {
u8 pwr;
u8 offset;
} __packed;
struct mt7921_asar_geo_limit {
u8 idx;
/* 0:2G, 1:5G */
struct mt7921_asar_geo_band band[2];
} __packed;
struct mt7921_asar_geo {
u8 names[4];
u8 version;
u8 nr_tbl;
struct mt7921_asar_geo_limit tbl[0];
} __packed;
struct mt7921_asar_geo_limit_v2 {
u8 idx;
/* 0:2G, 1:5G, 2:6G */
struct mt7921_asar_geo_band band[3];
} __packed;
struct mt7921_asar_geo_v2 {
u8 names[4];
u8 version;
u8 rsvd;
u8 nr_tbl;
struct mt7921_asar_geo_limit_v2 tbl[0];
} __packed;
struct mt7921_asar_cl {
u8 names[4];
u8 version;
u8 mode_6g;
u8 cl6g[6];
} __packed;
struct mt7921_acpi_sar {
u8 ver;
union {
struct mt7921_asar_dyn *dyn;
struct mt7921_asar_dyn_v2 *dyn_v2;
};
union {
struct mt7921_asar_geo *geo;
struct mt7921_asar_geo_v2 *geo_v2;
};
struct mt7921_asar_cl *countrylist;
};
#endif

View File

@ -0,0 +1,17 @@
# SPDX-License-Identifier: GPL-2.0-only
config WLAN_VENDOR_PURELIFI
bool "pureLiFi devices"
default y
help
If you have a pureLiFi device, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all the
questions about these cards. If you say Y, you will be asked for
your specific card in the following questions.
if WLAN_VENDOR_PURELIFI
source "drivers/net/wireless/purelifi/plfxlc/Kconfig"
endif # WLAN_VENDOR_PURELIFI

View File

@ -0,0 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_PLFXLC) := plfxlc/

View File

@ -0,0 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
config PLFXLC
tristate "pureLiFi X, XL, XC device support"
depends on CFG80211 && MAC80211 && USB
help
This option adds support for pureLiFi LiFi wireless USB
adapters. The pureLiFi X, XL, XC USB devices are based on
802.11 OFDM PHY but uses light as the transmission medium.
The driver supports common 802.11 encryption/authentication
methods including Open, WPA, WPA2-Personal and
WPA2-Enterprise (802.1X).
To compile this driver as a module, choose m here. The module will
be called plfxlc.

View File

@ -0,0 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_PLFXLC) := plfxlc.o
plfxlc-objs += chip.o firmware.o usb.o mac.o

View File

@ -0,0 +1,98 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021 pureLiFi
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include "chip.h"
#include "mac.h"
#include "usb.h"
void plfxlc_chip_init(struct plfxlc_chip *chip,
struct ieee80211_hw *hw,
struct usb_interface *intf)
{
memset(chip, 0, sizeof(*chip));
mutex_init(&chip->mutex);
plfxlc_usb_init(&chip->usb, hw, intf);
}
void plfxlc_chip_release(struct plfxlc_chip *chip)
{
plfxlc_usb_release(&chip->usb);
mutex_destroy(&chip->mutex);
}
int plfxlc_set_beacon_interval(struct plfxlc_chip *chip, u16 interval,
u8 dtim_period, int type)
{
if (!interval ||
(chip->beacon_set && chip->beacon_interval == interval))
return 0;
chip->beacon_interval = interval;
chip->beacon_set = true;
return plfxlc_usb_wreq(chip->usb.ez_usb,
&chip->beacon_interval,
sizeof(chip->beacon_interval),
USB_REQ_BEACON_INTERVAL_WR);
}
int plfxlc_chip_init_hw(struct plfxlc_chip *chip)
{
unsigned char *addr = plfxlc_mac_get_perm_addr(plfxlc_chip_to_mac(chip));
struct usb_device *udev = interface_to_usbdev(chip->usb.intf);
pr_info("plfxlc chip %04x:%04x v%02x %pM %s\n",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct),
le16_to_cpu(udev->descriptor.bcdDevice),
addr,
plfxlc_speed(udev->speed));
return plfxlc_set_beacon_interval(chip, 100, 0, 0);
}
int plfxlc_chip_switch_radio(struct plfxlc_chip *chip, u16 value)
{
int r;
__le16 radio_on = cpu_to_le16(value);
r = plfxlc_usb_wreq(chip->usb.ez_usb, &radio_on,
sizeof(value), USB_REQ_POWER_WR);
if (r)
dev_err(plfxlc_chip_dev(chip), "POWER_WR failed (%d)\n", r);
return r;
}
int plfxlc_chip_enable_rxtx(struct plfxlc_chip *chip)
{
plfxlc_usb_enable_tx(&chip->usb);
return plfxlc_usb_enable_rx(&chip->usb);
}
void plfxlc_chip_disable_rxtx(struct plfxlc_chip *chip)
{
u8 value = 0;
plfxlc_usb_wreq(chip->usb.ez_usb,
&value, sizeof(value), USB_REQ_RXTX_WR);
plfxlc_usb_disable_rx(&chip->usb);
plfxlc_usb_disable_tx(&chip->usb);
}
int plfxlc_chip_set_rate(struct plfxlc_chip *chip, u8 rate)
{
int r;
if (!chip)
return -EINVAL;
r = plfxlc_usb_wreq(chip->usb.ez_usb,
&rate, sizeof(rate), USB_REQ_RATE_WR);
if (r)
dev_err(plfxlc_chip_dev(chip), "RATE_WR failed (%d)\n", r);
return r;
}

View File

@ -0,0 +1,70 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021 pureLiFi
*/
#ifndef PLFXLC_CHIP_H
#define PLFXLC_CHIP_H
#include <net/mac80211.h>
#include "usb.h"
enum unit_type {
STA = 0,
AP = 1,
};
enum {
PLFXLC_RADIO_OFF = 0,
PLFXLC_RADIO_ON = 1,
};
struct plfxlc_chip {
struct plfxlc_usb usb;
struct mutex mutex; /* lock to protect chip data */
enum unit_type unit_type;
u16 link_led;
u8 beacon_set;
u16 beacon_interval;
};
struct plfxlc_mc_hash {
u32 low;
u32 high;
};
#define plfxlc_chip_dev(chip) (&(chip)->usb.intf->dev)
void plfxlc_chip_init(struct plfxlc_chip *chip,
struct ieee80211_hw *hw,
struct usb_interface *intf);
void plfxlc_chip_release(struct plfxlc_chip *chip);
void plfxlc_chip_disable_rxtx(struct plfxlc_chip *chip);
int plfxlc_chip_init_hw(struct plfxlc_chip *chip);
int plfxlc_chip_enable_rxtx(struct plfxlc_chip *chip);
int plfxlc_chip_set_rate(struct plfxlc_chip *chip, u8 rate);
int plfxlc_set_beacon_interval(struct plfxlc_chip *chip, u16 interval,
u8 dtim_period, int type);
int plfxlc_chip_switch_radio(struct plfxlc_chip *chip, u16 value);
static inline struct plfxlc_chip *plfxlc_usb_to_chip(struct plfxlc_usb
*usb)
{
return container_of(usb, struct plfxlc_chip, usb);
}
static inline void plfxlc_mc_add_all(struct plfxlc_mc_hash *hash)
{
hash->low = 0xffffffff;
hash->high = 0xffffffff;
}
#endif /* PLFXLC_CHIP_H */

Some files were not shown because too many files have changed in this diff Show More