diff --git a/drivers/net/ethernet/sfc/ef100_rep.c b/drivers/net/ethernet/sfc/ef100_rep.c new file mode 100644 index 0000000000..73ae4656a6 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_rep.c @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2020-2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "ef100_rep.h" +#include "ef100_netdev.h" +#include "ef100_nic.h" +#include "mae.h" +#include "rx_common.h" + +#define EFX_EF100_REP_DRIVER "efx_ef100_rep" + +#define EFX_REP_DEFAULT_PSEUDO_RING_SIZE 64 + +static int efx_ef100_rep_poll(struct napi_struct *napi, int weight); + +static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv, + unsigned int i) +{ + efv->parent = efx; + efv->idx = i; + INIT_LIST_HEAD(&efv->list); + efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; + INIT_LIST_HEAD(&efv->dflt.acts.list); + INIT_LIST_HEAD(&efv->rx_list); + spin_lock_init(&efv->rx_lock); + efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR | NETIF_MSG_HW; + return 0; +} + +static int efx_ef100_rep_open(struct net_device *net_dev) +{ + struct efx_rep *efv = netdev_priv(net_dev); + + netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll, + NAPI_POLL_WEIGHT); + napi_enable(&efv->napi); + return 0; +} + +static int efx_ef100_rep_close(struct net_device *net_dev) +{ + struct efx_rep *efv = netdev_priv(net_dev); + + napi_disable(&efv->napi); + netif_napi_del(&efv->napi); + return 0; +} + +static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct efx_rep *efv = netdev_priv(dev); + struct efx_nic *efx = efv->parent; + netdev_tx_t rc; + + /* __ef100_hard_start_xmit() will always return success even in the + * case of TX drops, where it will increment efx's tx_dropped. The + * efv stats really only count attempted TX, not success/failure. + */ + atomic64_inc(&efv->stats.tx_packets); + atomic64_add(skb->len, &efv->stats.tx_bytes); + netif_tx_lock(efx->net_dev); + rc = __ef100_hard_start_xmit(skb, efx, dev, efv); + netif_tx_unlock(efx->net_dev); + return rc; +} + +static int efx_ef100_rep_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct efx_rep *efv = netdev_priv(dev); + struct efx_nic *efx = efv->parent; + struct ef100_nic_data *nic_data; + + nic_data = efx->nic_data; + /* nic_data->port_id is a u8[] */ + ppid->id_len = sizeof(nic_data->port_id); + memcpy(ppid->id, nic_data->port_id, sizeof(nic_data->port_id)); + return 0; +} + +static int efx_ef100_rep_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct efx_rep *efv = netdev_priv(dev); + struct efx_nic *efx = efv->parent; + struct ef100_nic_data *nic_data; + int ret; + + nic_data = efx->nic_data; + ret = snprintf(buf, len, "p%upf%uvf%u", efx->port_num, + nic_data->pf_index, efv->idx); + if (ret >= len) + return -EOPNOTSUPP; + + return 0; +} + +static void efx_ef100_rep_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct efx_rep *efv = netdev_priv(dev); + + stats->rx_packets = atomic64_read(&efv->stats.rx_packets); + stats->tx_packets = atomic64_read(&efv->stats.tx_packets); + stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes); + stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes); + stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped); + stats->tx_errors = atomic64_read(&efv->stats.tx_errors); +} + +static const struct net_device_ops efx_ef100_rep_netdev_ops = { + .ndo_open = efx_ef100_rep_open, + .ndo_stop = efx_ef100_rep_close, + .ndo_start_xmit = efx_ef100_rep_xmit, + .ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id, + .ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name, + .ndo_get_stats64 = efx_ef100_rep_get_stats64, +}; + +static void efx_ef100_rep_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strscpy(drvinfo->driver, EFX_EF100_REP_DRIVER, sizeof(drvinfo->driver)); +} + +static u32 efx_ef100_rep_ethtool_get_msglevel(struct net_device *net_dev) +{ + struct efx_rep *efv = netdev_priv(net_dev); + + return efv->msg_enable; +} + +static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev, + u32 msg_enable) +{ + struct efx_rep *efv = netdev_priv(net_dev); + + efv->msg_enable = msg_enable; +} + +static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ext_ack) +{ + struct efx_rep *efv = netdev_priv(net_dev); + + ring->rx_max_pending = U32_MAX; + ring->rx_pending = efv->rx_pring_size; +} + +static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ext_ack) +{ + struct efx_rep *efv = netdev_priv(net_dev); + + if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending) + return -EINVAL; + + efv->rx_pring_size = ring->rx_pending; + return 0; +} + +static const struct ethtool_ops efx_ef100_rep_ethtool_ops = { + .get_drvinfo = efx_ef100_rep_get_drvinfo, + .get_msglevel = efx_ef100_rep_ethtool_get_msglevel, + .set_msglevel = efx_ef100_rep_ethtool_set_msglevel, + .get_ringparam = efx_ef100_rep_ethtool_get_ringparam, + .set_ringparam = efx_ef100_rep_ethtool_set_ringparam, +}; + +static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx, + unsigned int i) +{ + struct net_device *net_dev; + struct efx_rep *efv; + int rc; + + net_dev = alloc_etherdev_mq(sizeof(*efv), 1); + if (!net_dev) + return ERR_PTR(-ENOMEM); + + efv = netdev_priv(net_dev); + rc = efx_ef100_rep_init_struct(efx, efv, i); + if (rc) + goto fail1; + efv->net_dev = net_dev; + rtnl_lock(); + spin_lock_bh(&efx->vf_reps_lock); + list_add_tail(&efv->list, &efx->vf_reps); + spin_unlock_bh(&efx->vf_reps_lock); + if (netif_running(efx->net_dev) && efx->state == STATE_NET_UP) { + netif_device_attach(net_dev); + netif_carrier_on(net_dev); + } else { + netif_carrier_off(net_dev); + netif_tx_stop_all_queues(net_dev); + } + rtnl_unlock(); + + net_dev->netdev_ops = &efx_ef100_rep_netdev_ops; + net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops; + net_dev->min_mtu = EFX_MIN_MTU; + net_dev->max_mtu = EFX_MAX_MTU; + net_dev->features |= NETIF_F_LLTX; + net_dev->hw_features |= NETIF_F_LLTX; + return efv; +fail1: + free_netdev(net_dev); + return ERR_PTR(rc); +} + +static int efx_ef100_configure_rep(struct efx_rep *efv) +{ + struct efx_nic *efx = efv->parent; + u32 selector; + int rc; + + efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE; + /* Construct mport selector for corresponding VF */ + efx_mae_mport_vf(efx, efv->idx, &selector); + /* Look up actual mport ID */ + rc = efx_mae_lookup_mport(efx, selector, &efv->mport); + if (rc) + return rc; + pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport); + /* mport label should fit in 16 bits */ + WARN_ON(efv->mport >> 16); + + return efx_tc_configure_default_rule_rep(efv); +} + +static void efx_ef100_deconfigure_rep(struct efx_rep *efv) +{ + struct efx_nic *efx = efv->parent; + + efx_tc_deconfigure_default_rule(efx, &efv->dflt); +} + +static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv) +{ + struct efx_nic *efx = efv->parent; + + rtnl_lock(); + spin_lock_bh(&efx->vf_reps_lock); + list_del(&efv->list); + spin_unlock_bh(&efx->vf_reps_lock); + rtnl_unlock(); + synchronize_rcu(); + free_netdev(efv->net_dev); +} + +int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i) +{ + struct efx_rep *efv; + int rc; + + efv = efx_ef100_rep_create_netdev(efx, i); + if (IS_ERR(efv)) { + rc = PTR_ERR(efv); + pci_err(efx->pci_dev, + "Failed to create representor for VF %d, rc %d\n", i, + rc); + return rc; + } + rc = efx_ef100_configure_rep(efv); + if (rc) { + pci_err(efx->pci_dev, + "Failed to configure representor for VF %d, rc %d\n", + i, rc); + goto fail1; + } + rc = register_netdev(efv->net_dev); + if (rc) { + pci_err(efx->pci_dev, + "Failed to register representor for VF %d, rc %d\n", + i, rc); + goto fail2; + } + pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i, + efv->net_dev->name); + return 0; +fail2: + efx_ef100_deconfigure_rep(efv); +fail1: + efx_ef100_rep_destroy_netdev(efv); + return rc; +} + +void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv) +{ + struct net_device *rep_dev; + + rep_dev = efv->net_dev; + if (!rep_dev) + return; + netif_dbg(efx, drv, rep_dev, "Removing VF representor\n"); + unregister_netdev(rep_dev); + efx_ef100_deconfigure_rep(efv); + efx_ef100_rep_destroy_netdev(efv); +} + +void efx_ef100_fini_vfreps(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct efx_rep *efv, *next; + + if (!nic_data->grp_mae) + return; + + list_for_each_entry_safe(efv, next, &efx->vf_reps, list) + efx_ef100_vfrep_destroy(efx, efv); +} + +static int efx_ef100_rep_poll(struct napi_struct *napi, int weight) +{ + struct efx_rep *efv = container_of(napi, struct efx_rep, napi); + unsigned int read_index; + struct list_head head; + struct sk_buff *skb; + bool need_resched; + int spent = 0; + + INIT_LIST_HEAD(&head); + /* Grab up to 'weight' pending SKBs */ + spin_lock_bh(&efv->rx_lock); + read_index = efv->write_index; + while (spent < weight && !list_empty(&efv->rx_list)) { + skb = list_first_entry(&efv->rx_list, struct sk_buff, list); + list_del(&skb->list); + list_add_tail(&skb->list, &head); + spent++; + } + spin_unlock_bh(&efv->rx_lock); + /* Receive them */ + netif_receive_skb_list(&head); + if (spent < weight) + if (napi_complete_done(napi, spent)) { + spin_lock_bh(&efv->rx_lock); + efv->read_index = read_index; + /* If write_index advanced while we were doing the + * RX, then storing our read_index won't re-prime the + * fake-interrupt. In that case, we need to schedule + * NAPI again to consume the additional packet(s). + */ + need_resched = efv->write_index != read_index; + spin_unlock_bh(&efv->rx_lock); + if (need_resched) + napi_schedule(&efv->napi); + } + return spent; +} + +void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf) +{ + u8 *eh = efx_rx_buf_va(rx_buf); + struct sk_buff *skb; + bool primed; + + /* Don't allow too many queued SKBs to build up, as they consume + * GFP_ATOMIC memory. If we overrun, just start dropping. + */ + if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) { + atomic64_inc(&efv->stats.rx_dropped); + if (net_ratelimit()) + netif_dbg(efv->parent, rx_err, efv->net_dev, + "nodesc-dropped packet of length %u\n", + rx_buf->len); + return; + } + + skb = netdev_alloc_skb(efv->net_dev, rx_buf->len); + if (!skb) { + atomic64_inc(&efv->stats.rx_dropped); + if (net_ratelimit()) + netif_dbg(efv->parent, rx_err, efv->net_dev, + "noskb-dropped packet of length %u\n", + rx_buf->len); + return; + } + memcpy(skb->data, eh, rx_buf->len); + __skb_put(skb, rx_buf->len); + + skb_record_rx_queue(skb, 0); /* rep is single-queue */ + + /* Move past the ethernet header */ + skb->protocol = eth_type_trans(skb, efv->net_dev); + + skb_checksum_none_assert(skb); + + atomic64_inc(&efv->stats.rx_packets); + atomic64_add(rx_buf->len, &efv->stats.rx_bytes); + + /* Add it to the rx list */ + spin_lock_bh(&efv->rx_lock); + primed = efv->read_index == efv->write_index; + list_add_tail(&skb->list, &efv->rx_list); + efv->write_index++; + spin_unlock_bh(&efv->rx_lock); + /* Trigger rx work */ + if (primed) + napi_schedule(&efv->napi); +} + +struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport) +{ + struct efx_rep *efv, *out = NULL; + + /* spinlock guards against list mutation while we're walking it; + * but caller must also hold rcu_read_lock() to ensure the netdev + * isn't freed after we drop the spinlock. + */ + spin_lock_bh(&efx->vf_reps_lock); + list_for_each_entry(efv, &efx->vf_reps, list) + if (efv->mport == mport) { + out = efv; + break; + } + spin_unlock_bh(&efx->vf_reps_lock); + return out; +} diff --git a/drivers/net/ethernet/sfc/ef100_rep.h b/drivers/net/ethernet/sfc/ef100_rep.h new file mode 100644 index 0000000000..070f700893 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_rep.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2020-2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +/* Handling for ef100 representor netdevs */ +#ifndef EF100_REP_H +#define EF100_REP_H + +#include "net_driver.h" +#include "tc.h" + +struct efx_rep_sw_stats { + atomic64_t rx_packets, tx_packets; + atomic64_t rx_bytes, tx_bytes; + atomic64_t rx_dropped, tx_errors; +}; + +/** + * struct efx_rep - Private data for an Efx representor + * + * @parent: the efx PF which manages this representor + * @net_dev: representor netdevice + * @msg_enable: log message enable flags + * @mport: m-port ID of corresponding VF + * @idx: VF index + * @write_index: number of packets enqueued to @rx_list + * @read_index: number of packets consumed from @rx_list + * @rx_pring_size: max length of RX list + * @dflt: default-rule for MAE switching + * @list: entry on efx->vf_reps + * @rx_list: list of SKBs queued for receive in NAPI poll + * @rx_lock: protects @rx_list + * @napi: NAPI control structure + * @stats: software traffic counters for netdev stats + */ +struct efx_rep { + struct efx_nic *parent; + struct net_device *net_dev; + u32 msg_enable; + u32 mport; + unsigned int idx; + unsigned int write_index, read_index; + unsigned int rx_pring_size; + struct efx_tc_flow_rule dflt; + struct list_head list; + struct list_head rx_list; + spinlock_t rx_lock; + struct napi_struct napi; + struct efx_rep_sw_stats stats; +}; + +int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i); +void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv); +void efx_ef100_fini_vfreps(struct efx_nic *efx); + +void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf); +/* Returns the representor corresponding to a VF m-port, or NULL + * @mport is an m-port label, *not* an m-port ID! + * Caller must hold rcu_read_lock(). + */ +struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport); +#endif /* EF100_REP_H */ diff --git a/drivers/net/ethernet/sfc/ef100_sriov.c b/drivers/net/ethernet/sfc/ef100_sriov.c new file mode 100644 index 0000000000..94bdbfcb47 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_sriov.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2020-2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "ef100_sriov.h" +#include "ef100_nic.h" +#include "ef100_rep.h" + +static int efx_ef100_pci_sriov_enable(struct efx_nic *efx, int num_vfs) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct pci_dev *dev = efx->pci_dev; + struct efx_rep *efv, *next; + int rc, i; + + efx->vf_count = num_vfs; + rc = pci_enable_sriov(dev, num_vfs); + if (rc) + goto fail1; + + if (!nic_data->grp_mae) + return 0; + + for (i = 0; i < num_vfs; i++) { + rc = efx_ef100_vfrep_create(efx, i); + if (rc) + goto fail2; + } + return 0; + +fail2: + list_for_each_entry_safe(efv, next, &efx->vf_reps, list) + efx_ef100_vfrep_destroy(efx, efv); + pci_disable_sriov(dev); +fail1: + netif_err(efx, probe, efx->net_dev, "Failed to enable SRIOV VFs\n"); + efx->vf_count = 0; + return rc; +} + +int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force) +{ + struct pci_dev *dev = efx->pci_dev; + unsigned int vfs_assigned; + + vfs_assigned = pci_vfs_assigned(dev); + if (vfs_assigned && !force) { + netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " + "please detach them before disabling SR-IOV\n"); + return -EBUSY; + } + + efx_ef100_fini_vfreps(efx); + if (!vfs_assigned) + pci_disable_sriov(dev); + return 0; +} + +int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs) +{ + if (num_vfs == 0) + return efx_ef100_pci_sriov_disable(efx, false); + else + return efx_ef100_pci_sriov_enable(efx, num_vfs); +} diff --git a/drivers/net/ethernet/sfc/ef100_sriov.h b/drivers/net/ethernet/sfc/ef100_sriov.h new file mode 100644 index 0000000000..8ffdf464dd --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_sriov.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2020-2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include "net_driver.h" + +int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs); +int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force); diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c new file mode 100644 index 0000000000..97627f5e36 --- /dev/null +++ b/drivers/net/ethernet/sfc/mae.c @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2020-2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "mae.h" +#include "mcdi.h" +#include "mcdi_pcol_mae.h" + +int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_LEN); + size_t outlen; + int rc; + + if (WARN_ON_ONCE(!id)) + return -EINVAL; + if (WARN_ON_ONCE(!label)) + return -EINVAL; + + MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_TYPE, + MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_ALIAS); + MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT, + MAE_MPORT_SELECTOR_ASSIGNED); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_ALLOC, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + *id = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID); + *label = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_LABEL); + return 0; +} + +int efx_mae_free_mport(struct efx_nic *efx, u32 id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_FREE_IN_LEN); + + BUILD_BUG_ON(MC_CMD_MAE_MPORT_FREE_OUT_LEN); + MCDI_SET_DWORD(inbuf, MAE_MPORT_FREE_IN_MPORT_ID, id); + return efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_FREE, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +void efx_mae_mport_wire(struct efx_nic *efx, u32 *out) +{ + efx_dword_t mport; + + EFX_POPULATE_DWORD_2(mport, + MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_PPORT, + MAE_MPORT_SELECTOR_PPORT_ID, efx->port_num); + *out = EFX_DWORD_VAL(mport); +} + +void efx_mae_mport_uplink(struct efx_nic *efx __always_unused, u32 *out) +{ + efx_dword_t mport; + + EFX_POPULATE_DWORD_3(mport, + MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC, + MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER, + MAE_MPORT_SELECTOR_FUNC_VF_ID, MAE_MPORT_SELECTOR_FUNC_VF_ID_NULL); + *out = EFX_DWORD_VAL(mport); +} + +void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out) +{ + efx_dword_t mport; + + EFX_POPULATE_DWORD_3(mport, + MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC, + MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER, + MAE_MPORT_SELECTOR_FUNC_VF_ID, vf_id); + *out = EFX_DWORD_VAL(mport); +} + +/* Constructs an mport selector from an mport ID, because they're not the same */ +void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32 *out) +{ + efx_dword_t mport; + + EFX_POPULATE_DWORD_2(mport, + MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_MPORT_ID, + MAE_MPORT_SELECTOR_MPORT_ID, mport_id); + *out = EFX_DWORD_VAL(mport); +} + +/* id is really only 24 bits wide */ +int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_LOOKUP_OUT_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_LOOKUP_IN_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR, selector); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_LOOKUP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + *id = MCDI_DWORD(outbuf, MAE_MPORT_LOOKUP_OUT_MPORT_ID); + return 0; +} + +static bool efx_mae_asl_id(u32 id) +{ + return !!(id & BIT(31)); +} + +int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID, + MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL); + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID, + MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL); + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID, + MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL); + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID, + MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL); + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID, + MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL); + if (act->deliver) + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DELIVER, + act->dest_mport); + BUILD_BUG_ON(MAE_MPORT_SELECTOR_NULL); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_ALLOC, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + act->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_ALLOC_OUT_AS_ID); + /* We rely on the high bit of AS IDs always being clear. + * The firmware API guarantees this, but let's check it ourselves. + */ + if (WARN_ON_ONCE(efx_mae_asl_id(act->fw_id))) { + efx_mae_free_action_set(efx, act->fw_id); + return -EIO; + } + return 0; +} + +int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_FREE_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_FREE_IN_LEN(1)); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_FREE_IN_AS_ID, fw_id); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_FREE, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + /* FW freed a different ID than we asked for, should never happen. + * Warn because it means we've now got a different idea to the FW of + * what action-sets exist, which could cause mayhem later. + */ + if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_FREE_OUT_FREED_AS_ID) != fw_id)) + return -EIO; + return 0; +} + +int efx_mae_alloc_action_set_list(struct efx_nic *efx, + struct efx_tc_action_set_list *acts) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN); + struct efx_tc_action_set *act; + size_t inlen, outlen, i = 0; + efx_dword_t *inbuf; + int rc; + + list_for_each_entry(act, &acts->list, list) + i++; + if (i == 0) + return -EINVAL; + if (i == 1) { + /* Don't wrap an ASL around a single AS, just use the AS_ID + * directly. ASLs are a more limited resource. + */ + act = list_first_entry(&acts->list, struct efx_tc_action_set, list); + acts->fw_id = act->fw_id; + return 0; + } + if (i > MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2) + return -EOPNOTSUPP; /* Too many actions */ + inlen = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(i); + inbuf = kzalloc(inlen, GFP_KERNEL); + if (!inbuf) + return -ENOMEM; + i = 0; + list_for_each_entry(act, &acts->list, list) { + MCDI_SET_ARRAY_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS, + i, act->fw_id); + i++; + } + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_COUNT, i); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_ALLOC, inbuf, inlen, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto out_free; + if (outlen < sizeof(outbuf)) { + rc = -EIO; + goto out_free; + } + acts->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID); + /* We rely on the high bit of ASL IDs always being set. + * The firmware API guarantees this, but let's check it ourselves. + */ + if (WARN_ON_ONCE(!efx_mae_asl_id(acts->fw_id))) { + efx_mae_free_action_set_list(efx, acts); + rc = -EIO; + } +out_free: + kfree(inbuf); + return rc; +} + +int efx_mae_free_action_set_list(struct efx_nic *efx, + struct efx_tc_action_set_list *acts) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1)); + size_t outlen; + int rc; + + /* If this is just an AS_ID with no ASL wrapper, then there is + * nothing for us to free. (The AS will be freed later.) + */ + if (efx_mae_asl_id(acts->fw_id)) { + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_FREE_IN_ASL_ID, + acts->fw_id); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_FREE, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + /* FW freed a different ID than we asked for, should never happen. + * Warn because it means we've now got a different idea to the FW of + * what action-set-lists exist, which could cause mayhem later. + */ + if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID) != acts->fw_id)) + return -EIO; + } + /* We're probably about to free @acts, but let's just make sure its + * fw_id is blatted so that it won't look valid if it leaks out. + */ + acts->fw_id = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL; + return 0; +} + +static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit), + const struct efx_tc_match *match) +{ + if (match->mask.ingress_port) { + if (~match->mask.ingress_port) + return -EOPNOTSUPP; + MCDI_STRUCT_SET_DWORD(match_crit, + MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR, + match->value.ingress_port); + } + MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK, + match->mask.ingress_port); + return 0; +} + +int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match, + u32 prio, u32 acts_id, u32 *id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN); + MCDI_DECLARE_STRUCT_PTR(match_crit); + MCDI_DECLARE_STRUCT_PTR(response); + size_t outlen; + int rc; + + if (!id) + return -EINVAL; + + match_crit = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA); + response = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_RESPONSE); + if (efx_mae_asl_id(acts_id)) { + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, acts_id); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, + MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL); + } else { + /* We only had one AS, so we didn't wrap it in an ASL */ + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, acts_id); + } + MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_PRIO, prio); + rc = efx_mae_populate_match_criteria(match_crit, match); + if (rc) + return rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_INSERT, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + *id = MCDI_DWORD(outbuf, MAE_ACTION_RULE_INSERT_OUT_AR_ID); + return 0; +} + +int efx_mae_delete_rule(struct efx_nic *efx, u32 id) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_DELETE_IN_LEN(1)); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_DELETE_IN_AR_ID, id); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_DELETE, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + /* FW freed a different ID than we asked for, should also never happen. + * Warn because it means we've now got a different idea to the FW of + * what rules exist, which could cause mayhem later. + */ + if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID) != id)) + return -EIO; + return 0; +} diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h new file mode 100644 index 0000000000..0369be4d89 --- /dev/null +++ b/drivers/net/ethernet/sfc/mae.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2020-2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EF100_MAE_H +#define EF100_MAE_H +/* MCDI interface for the ef100 Match-Action Engine */ + +#include "net_driver.h" +#include "tc.h" +#include "mcdi_pcol.h" /* needed for various MC_CMD_MAE_*_NULL defines */ + +int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label); +int efx_mae_free_mport(struct efx_nic *efx, u32 id); + +void efx_mae_mport_wire(struct efx_nic *efx, u32 *out); +void efx_mae_mport_uplink(struct efx_nic *efx, u32 *out); +void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out); +void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out); + +int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id); + +int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act); +int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id); + +int efx_mae_alloc_action_set_list(struct efx_nic *efx, + struct efx_tc_action_set_list *acts); +int efx_mae_free_action_set_list(struct efx_nic *efx, + struct efx_tc_action_set_list *acts); + +int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match, + u32 prio, u32 acts_id, u32 *id); +int efx_mae_delete_rule(struct efx_nic *efx, u32 id); + +#endif /* EF100_MAE_H */ diff --git a/drivers/net/ethernet/sfc/mcdi_pcol_mae.h b/drivers/net/ethernet/sfc/mcdi_pcol_mae.h new file mode 100644 index 0000000000..ff6d80c8e4 --- /dev/null +++ b/drivers/net/ethernet/sfc/mcdi_pcol_mae.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2019-2022 Xilinx, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef MCDI_PCOL_MAE_H +#define MCDI_PCOL_MAE_H +/* MCDI definitions for Match-Action Engine functionality, that are + * missing from the main mcdi_pcol.h + */ + +/* MC_CMD_MAE_COUNTER_LIST_ALLOC is not (yet) a released API, but the + * following value is needed as an argument to MC_CMD_MAE_ACTION_SET_ALLOC. + */ +/* enum: A counter ID that is guaranteed never to represent a real counter */ +#define MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL 0xffffffff + +#endif /* MCDI_PCOL_MAE_H */ diff --git a/drivers/net/ethernet/sfc/siena/Kconfig b/drivers/net/ethernet/sfc/siena/Kconfig new file mode 100644 index 0000000000..c6ea097698 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/Kconfig @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SFC_SIENA + tristate "Solarflare SFC9000 support" + depends on PCI + depends on PTP_1588_CLOCK + select MDIO + select CRC32 + help + This driver supports 10-gigabit Ethernet cards based on + the Solarflare SFC9000 controller. + + To compile this driver as a module, choose M here. The module + will be called sfc-siena. +config SFC_SIENA_MTD + bool "Solarflare SFC9000-family MTD support" + depends on SFC_SIENA && MTD && !(SFC_SIENA=y && MTD=m) + default y + help + This exposes the on-board flash and/or EEPROM as MTD devices + (e.g. /dev/mtd1). This is required to update the firmware or + the boot configuration under Linux. +config SFC_SIENA_MCDI_MON + bool "Solarflare SFC9000-family hwmon support" + depends on SFC_SIENA && HWMON && !(SFC_SIENA=y && HWMON=m) + default y + help + This exposes the on-board firmware-managed sensors as a + hardware monitor device. +config SFC_SIENA_SRIOV + bool "Solarflare SFC9000-family SR-IOV support" + depends on SFC_SIENA && PCI_IOV + default n + help + This enables support for the Single Root I/O Virtualization + features, allowing accelerated network performance in + virtualized environments. +config SFC_SIENA_MCDI_LOGGING + bool "Solarflare SFC9000-family MCDI logging support" + depends on SFC_SIENA + default y + help + This enables support for tracing of MCDI (Management-Controller-to- + Driver-Interface) commands and responses, allowing debugging of + driver/firmware interaction. The tracing is actually enabled by + a sysfs file 'mcdi_logging' under the PCI device, or via module + parameter mcdi_logging_default. diff --git a/drivers/net/ethernet/sfc/siena/Makefile b/drivers/net/ethernet/sfc/siena/Makefile new file mode 100644 index 0000000000..f738429966 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +sfc-siena-y += farch.o siena.o \ + efx.o efx_common.o efx_channels.o nic.o \ + tx.o tx_common.o rx.o rx_common.o \ + selftest.o ethtool.o ethtool_common.o ptp.o \ + mcdi.o mcdi_port.o mcdi_port_common.o \ + mcdi_mon.o +sfc-siena-$(CONFIG_SFC_SIENA_MTD) += mtd.o +sfc-siena-$(CONFIG_SFC_SIENA_SRIOV) += siena_sriov.o + +obj-$(CONFIG_SFC_SIENA) += sfc-siena.o diff --git a/drivers/net/ethernet/sfc/siena/bitfield.h b/drivers/net/ethernet/sfc/siena/bitfield.h new file mode 100644 index 0000000000..1f981dfe4b --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/bitfield.h @@ -0,0 +1,614 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_BITFIELD_H +#define EFX_BITFIELD_H + +/* + * Efx bitfield access + * + * Efx NICs make extensive use of bitfields up to 128 bits + * wide. Since there is no native 128-bit datatype on most systems, + * and since 64-bit datatypes are inefficient on 32-bit systems and + * vice versa, we wrap accesses in a way that uses the most efficient + * datatype. + * + * The NICs are PCI devices and therefore little-endian. Since most + * of the quantities that we deal with are DMAed to/from host memory, + * we define our datatypes (efx_oword_t, efx_qword_t and + * efx_dword_t) to be little-endian. + */ + +/* Lowest bit numbers and widths */ +#define EFX_DUMMY_FIELD_LBN 0 +#define EFX_DUMMY_FIELD_WIDTH 0 +#define EFX_WORD_0_LBN 0 +#define EFX_WORD_0_WIDTH 16 +#define EFX_WORD_1_LBN 16 +#define EFX_WORD_1_WIDTH 16 +#define EFX_DWORD_0_LBN 0 +#define EFX_DWORD_0_WIDTH 32 +#define EFX_DWORD_1_LBN 32 +#define EFX_DWORD_1_WIDTH 32 +#define EFX_DWORD_2_LBN 64 +#define EFX_DWORD_2_WIDTH 32 +#define EFX_DWORD_3_LBN 96 +#define EFX_DWORD_3_WIDTH 32 +#define EFX_QWORD_0_LBN 0 +#define EFX_QWORD_0_WIDTH 64 + +/* Specified attribute (e.g. LBN) of the specified field */ +#define EFX_VAL(field, attribute) field ## _ ## attribute +/* Low bit number of the specified field */ +#define EFX_LOW_BIT(field) EFX_VAL(field, LBN) +/* Bit width of the specified field */ +#define EFX_WIDTH(field) EFX_VAL(field, WIDTH) +/* High bit number of the specified field */ +#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1) +/* Mask equal in width to the specified field. + * + * For example, a field with width 5 would have a mask of 0x1f. + * + * The maximum width mask that can be generated is 64 bits. + */ +#define EFX_MASK64(width) \ + ((width) == 64 ? ~((u64) 0) : \ + (((((u64) 1) << (width))) - 1)) + +/* Mask equal in width to the specified field. + * + * For example, a field with width 5 would have a mask of 0x1f. + * + * The maximum width mask that can be generated is 32 bits. Use + * EFX_MASK64 for higher width fields. + */ +#define EFX_MASK32(width) \ + ((width) == 32 ? ~((u32) 0) : \ + (((((u32) 1) << (width))) - 1)) + +/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */ +typedef union efx_dword { + __le32 u32[1]; +} efx_dword_t; + +/* A quadword (i.e. 8 byte) datatype - little-endian in HW */ +typedef union efx_qword { + __le64 u64[1]; + __le32 u32[2]; + efx_dword_t dword[2]; +} efx_qword_t; + +/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */ +typedef union efx_oword { + __le64 u64[2]; + efx_qword_t qword[2]; + __le32 u32[4]; + efx_dword_t dword[4]; +} efx_oword_t; + +/* Format string and value expanders for printk */ +#define EFX_DWORD_FMT "%08x" +#define EFX_QWORD_FMT "%08x:%08x" +#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x" +#define EFX_DWORD_VAL(dword) \ + ((unsigned int) le32_to_cpu((dword).u32[0])) +#define EFX_QWORD_VAL(qword) \ + ((unsigned int) le32_to_cpu((qword).u32[1])), \ + ((unsigned int) le32_to_cpu((qword).u32[0])) +#define EFX_OWORD_VAL(oword) \ + ((unsigned int) le32_to_cpu((oword).u32[3])), \ + ((unsigned int) le32_to_cpu((oword).u32[2])), \ + ((unsigned int) le32_to_cpu((oword).u32[1])), \ + ((unsigned int) le32_to_cpu((oword).u32[0])) + +/* + * Extract bit field portion [low,high) from the native-endian element + * which contains bits [min,max). + * + * For example, suppose "element" represents the high 32 bits of a + * 64-bit value, and we wish to extract the bits belonging to the bit + * field occupying bits 28-45 of this 64-bit value. + * + * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give + * + * ( element ) << 4 + * + * The result will contain the relevant bits filled in in the range + * [0,high-low), with garbage in bits [high-low+1,...). + */ +#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \ + ((low) > (max) || (high) < (min) ? 0 : \ + (low) > (min) ? \ + (native_element) >> ((low) - (min)) : \ + (native_element) << ((min) - (low))) + +/* + * Extract bit field portion [low,high) from the 64-bit little-endian + * element which contains bits [min,max) + */ +#define EFX_EXTRACT64(element, min, max, low, high) \ + EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high) + +/* + * Extract bit field portion [low,high) from the 32-bit little-endian + * element which contains bits [min,max) + */ +#define EFX_EXTRACT32(element, min, max, low, high) \ + EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high) + +#define EFX_EXTRACT_OWORD64(oword, low, high) \ + ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \ + EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \ + EFX_MASK64((high) + 1 - (low))) + +#define EFX_EXTRACT_QWORD64(qword, low, high) \ + (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \ + EFX_MASK64((high) + 1 - (low))) + +#define EFX_EXTRACT_OWORD32(oword, low, high) \ + ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \ + EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \ + EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \ + EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \ + EFX_MASK32((high) + 1 - (low))) + +#define EFX_EXTRACT_QWORD32(qword, low, high) \ + ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \ + EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \ + EFX_MASK32((high) + 1 - (low))) + +#define EFX_EXTRACT_DWORD(dword, low, high) \ + (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \ + EFX_MASK32((high) + 1 - (low))) + +#define EFX_OWORD_FIELD64(oword, field) \ + EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_QWORD_FIELD64(qword, field) \ + EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_OWORD_FIELD32(oword, field) \ + EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_QWORD_FIELD32(qword, field) \ + EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_DWORD_FIELD(dword, field) \ + EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_OWORD_IS_ZERO64(oword) \ + (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0) + +#define EFX_QWORD_IS_ZERO64(qword) \ + (((qword).u64[0]) == (__force __le64) 0) + +#define EFX_OWORD_IS_ZERO32(oword) \ + (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \ + == (__force __le32) 0) + +#define EFX_QWORD_IS_ZERO32(qword) \ + (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0) + +#define EFX_DWORD_IS_ZERO(dword) \ + (((dword).u32[0]) == (__force __le32) 0) + +#define EFX_OWORD_IS_ALL_ONES64(oword) \ + (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0)) + +#define EFX_QWORD_IS_ALL_ONES64(qword) \ + ((qword).u64[0] == ~((__force __le64) 0)) + +#define EFX_OWORD_IS_ALL_ONES32(oword) \ + (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \ + == ~((__force __le32) 0)) + +#define EFX_QWORD_IS_ALL_ONES32(qword) \ + (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0)) + +#define EFX_DWORD_IS_ALL_ONES(dword) \ + ((dword).u32[0] == ~((__force __le32) 0)) + +#if BITS_PER_LONG == 64 +#define EFX_OWORD_FIELD EFX_OWORD_FIELD64 +#define EFX_QWORD_FIELD EFX_QWORD_FIELD64 +#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64 +#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64 +#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64 +#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64 +#else +#define EFX_OWORD_FIELD EFX_OWORD_FIELD32 +#define EFX_QWORD_FIELD EFX_QWORD_FIELD32 +#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32 +#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32 +#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32 +#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32 +#endif + +/* + * Construct bit field portion + * + * Creates the portion of the bit field [low,high) that lies within + * the range [min,max). + */ +#define EFX_INSERT_NATIVE64(min, max, low, high, value) \ + (((low > max) || (high < min)) ? 0 : \ + ((low > min) ? \ + (((u64) (value)) << (low - min)) : \ + (((u64) (value)) >> (min - low)))) + +#define EFX_INSERT_NATIVE32(min, max, low, high, value) \ + (((low > max) || (high < min)) ? 0 : \ + ((low > min) ? \ + (((u32) (value)) << (low - min)) : \ + (((u32) (value)) >> (min - low)))) + +#define EFX_INSERT_NATIVE(min, max, low, high, value) \ + ((((max - min) >= 32) || ((high - low) >= 32)) ? \ + EFX_INSERT_NATIVE64(min, max, low, high, value) : \ + EFX_INSERT_NATIVE32(min, max, low, high, value)) + +/* + * Construct bit field portion + * + * Creates the portion of the named bit field that lies within the + * range [min,max). + */ +#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \ + EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +/* + * Construct bit field + * + * Creates the portion of the named bit fields that lie within the + * range [min,max). + */ +#define EFX_INSERT_FIELDS_NATIVE(min, max, \ + field1, value1, \ + field2, value2, \ + field3, value3, \ + field4, value4, \ + field5, value5, \ + field6, value6, \ + field7, value7, \ + field8, value8, \ + field9, value9, \ + field10, value10, \ + field11, value11, \ + field12, value12, \ + field13, value13, \ + field14, value14, \ + field15, value15, \ + field16, value16, \ + field17, value17, \ + field18, value18, \ + field19, value19) \ + (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field11, (value11)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field12, (value12)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field13, (value13)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field14, (value14)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field15, (value15)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field16, (value16)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field17, (value17)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field18, (value18)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field19, (value19))) + +#define EFX_INSERT_FIELDS64(...) \ + cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) + +#define EFX_INSERT_FIELDS32(...) \ + cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) + +#define EFX_POPULATE_OWORD64(oword, ...) do { \ + (oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \ + (oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_QWORD64(qword, ...) do { \ + (qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_OWORD32(oword, ...) do { \ + (oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ + (oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \ + (oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \ + (oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_QWORD32(qword, ...) do { \ + (qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ + (qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_DWORD(dword, ...) do { \ + (dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ + } while (0) + +#if BITS_PER_LONG == 64 +#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64 +#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64 +#else +#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32 +#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32 +#endif + +/* Populate an octword field with various numbers of arguments */ +#define EFX_POPULATE_OWORD_19 EFX_POPULATE_OWORD +#define EFX_POPULATE_OWORD_18(oword, ...) \ + EFX_POPULATE_OWORD_19(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_17(oword, ...) \ + EFX_POPULATE_OWORD_18(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_16(oword, ...) \ + EFX_POPULATE_OWORD_17(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_15(oword, ...) \ + EFX_POPULATE_OWORD_16(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_14(oword, ...) \ + EFX_POPULATE_OWORD_15(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_13(oword, ...) \ + EFX_POPULATE_OWORD_14(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_12(oword, ...) \ + EFX_POPULATE_OWORD_13(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_11(oword, ...) \ + EFX_POPULATE_OWORD_12(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_10(oword, ...) \ + EFX_POPULATE_OWORD_11(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_9(oword, ...) \ + EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_8(oword, ...) \ + EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_7(oword, ...) \ + EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_6(oword, ...) \ + EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_5(oword, ...) \ + EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_4(oword, ...) \ + EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_3(oword, ...) \ + EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_2(oword, ...) \ + EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_1(oword, ...) \ + EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_ZERO_OWORD(oword) \ + EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0) +#define EFX_SET_OWORD(oword) \ + EFX_POPULATE_OWORD_4(oword, \ + EFX_DWORD_0, 0xffffffff, \ + EFX_DWORD_1, 0xffffffff, \ + EFX_DWORD_2, 0xffffffff, \ + EFX_DWORD_3, 0xffffffff) + +/* Populate a quadword field with various numbers of arguments */ +#define EFX_POPULATE_QWORD_19 EFX_POPULATE_QWORD +#define EFX_POPULATE_QWORD_18(qword, ...) \ + EFX_POPULATE_QWORD_19(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_17(qword, ...) \ + EFX_POPULATE_QWORD_18(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_16(qword, ...) \ + EFX_POPULATE_QWORD_17(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_15(qword, ...) \ + EFX_POPULATE_QWORD_16(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_14(qword, ...) \ + EFX_POPULATE_QWORD_15(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_13(qword, ...) \ + EFX_POPULATE_QWORD_14(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_12(qword, ...) \ + EFX_POPULATE_QWORD_13(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_11(qword, ...) \ + EFX_POPULATE_QWORD_12(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_10(qword, ...) \ + EFX_POPULATE_QWORD_11(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_9(qword, ...) \ + EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_8(qword, ...) \ + EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_7(qword, ...) \ + EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_6(qword, ...) \ + EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_5(qword, ...) \ + EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_4(qword, ...) \ + EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_3(qword, ...) \ + EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_2(qword, ...) \ + EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_1(qword, ...) \ + EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_ZERO_QWORD(qword) \ + EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0) +#define EFX_SET_QWORD(qword) \ + EFX_POPULATE_QWORD_2(qword, \ + EFX_DWORD_0, 0xffffffff, \ + EFX_DWORD_1, 0xffffffff) + +/* Populate a dword field with various numbers of arguments */ +#define EFX_POPULATE_DWORD_19 EFX_POPULATE_DWORD +#define EFX_POPULATE_DWORD_18(dword, ...) \ + EFX_POPULATE_DWORD_19(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_17(dword, ...) \ + EFX_POPULATE_DWORD_18(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_16(dword, ...) \ + EFX_POPULATE_DWORD_17(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_15(dword, ...) \ + EFX_POPULATE_DWORD_16(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_14(dword, ...) \ + EFX_POPULATE_DWORD_15(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_13(dword, ...) \ + EFX_POPULATE_DWORD_14(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_12(dword, ...) \ + EFX_POPULATE_DWORD_13(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_11(dword, ...) \ + EFX_POPULATE_DWORD_12(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_10(dword, ...) \ + EFX_POPULATE_DWORD_11(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_9(dword, ...) \ + EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_8(dword, ...) \ + EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_7(dword, ...) \ + EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_6(dword, ...) \ + EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_5(dword, ...) \ + EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_4(dword, ...) \ + EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_3(dword, ...) \ + EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_2(dword, ...) \ + EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_1(dword, ...) \ + EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_ZERO_DWORD(dword) \ + EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0) +#define EFX_SET_DWORD(dword) \ + EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff) + +/* + * Modify a named field within an already-populated structure. Used + * for read-modify-write operations. + * + */ +#define EFX_INVERT_OWORD(oword) do { \ + (oword).u64[0] = ~((oword).u64[0]); \ + (oword).u64[1] = ~((oword).u64[1]); \ + } while (0) + +#define EFX_AND_OWORD(oword, from, mask) \ + do { \ + (oword).u64[0] = (from).u64[0] & (mask).u64[0]; \ + (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \ + } while (0) + +#define EFX_AND_QWORD(qword, from, mask) \ + (qword).u64[0] = (from).u64[0] & (mask).u64[0] + +#define EFX_OR_OWORD(oword, from, mask) \ + do { \ + (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \ + (oword).u64[1] = (from).u64[1] | (mask).u64[1]; \ + } while (0) + +#define EFX_INSERT64(min, max, low, high, value) \ + cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value)) + +#define EFX_INSERT32(min, max, low, high, value) \ + cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value)) + +#define EFX_INPLACE_MASK64(min, max, low, high) \ + EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low))) + +#define EFX_INPLACE_MASK32(min, max, low, high) \ + EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low))) + +#define EFX_SET_OWORD64(oword, low, high, value) do { \ + (oword).u64[0] = (((oword).u64[0] \ + & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ + | EFX_INSERT64(0, 63, low, high, value)); \ + (oword).u64[1] = (((oword).u64[1] \ + & ~EFX_INPLACE_MASK64(64, 127, low, high)) \ + | EFX_INSERT64(64, 127, low, high, value)); \ + } while (0) + +#define EFX_SET_QWORD64(qword, low, high, value) do { \ + (qword).u64[0] = (((qword).u64[0] \ + & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ + | EFX_INSERT64(0, 63, low, high, value)); \ + } while (0) + +#define EFX_SET_OWORD32(oword, low, high, value) do { \ + (oword).u32[0] = (((oword).u32[0] \ + & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ + | EFX_INSERT32(0, 31, low, high, value)); \ + (oword).u32[1] = (((oword).u32[1] \ + & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ + | EFX_INSERT32(32, 63, low, high, value)); \ + (oword).u32[2] = (((oword).u32[2] \ + & ~EFX_INPLACE_MASK32(64, 95, low, high)) \ + | EFX_INSERT32(64, 95, low, high, value)); \ + (oword).u32[3] = (((oword).u32[3] \ + & ~EFX_INPLACE_MASK32(96, 127, low, high)) \ + | EFX_INSERT32(96, 127, low, high, value)); \ + } while (0) + +#define EFX_SET_QWORD32(qword, low, high, value) do { \ + (qword).u32[0] = (((qword).u32[0] \ + & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ + | EFX_INSERT32(0, 31, low, high, value)); \ + (qword).u32[1] = (((qword).u32[1] \ + & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ + | EFX_INSERT32(32, 63, low, high, value)); \ + } while (0) + +#define EFX_SET_DWORD32(dword, low, high, value) do { \ + (dword).u32[0] = (((dword).u32[0] \ + & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ + | EFX_INSERT32(0, 31, low, high, value)); \ + } while (0) + +#define EFX_SET_OWORD_FIELD64(oword, field, value) \ + EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_QWORD_FIELD64(qword, field, value) \ + EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_OWORD_FIELD32(oword, field, value) \ + EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_QWORD_FIELD32(qword, field, value) \ + EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_DWORD_FIELD(dword, field, value) \ + EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + + + +#if BITS_PER_LONG == 64 +#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64 +#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64 +#else +#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32 +#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32 +#endif + +/* Used to avoid compiler warnings about shift range exceeding width + * of the data types when dma_addr_t is only 32 bits wide. + */ +#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) +#define EFX_DMA_TYPE_WIDTH(width) \ + (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) + + +/* Static initialiser */ +#define EFX_OWORD32(a, b, c, d) \ + { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \ + cpu_to_le32(c), cpu_to_le32(d) } } + +#endif /* EFX_BITFIELD_H */ diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c new file mode 100644 index 0000000000..63d999e639 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/efx.c @@ -0,0 +1,1325 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include +#include +#include "efx.h" +#include "efx_common.h" +#include "efx_channels.h" +#include "rx_common.h" +#include "tx_common.h" +#include "nic.h" +#include "io.h" +#include "selftest.h" +#include "sriov.h" +#ifdef CONFIG_SFC_SIENA_SRIOV +#include "siena_sriov.h" +#endif + +#include "mcdi_port_common.h" +#include "mcdi_pcol.h" +#include "workarounds.h" + +/************************************************************************** + * + * Configurable values + * + *************************************************************************/ + +module_param_named(interrupt_mode, efx_siena_interrupt_mode, uint, 0444); +MODULE_PARM_DESC(interrupt_mode, + "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); + +module_param_named(rss_cpus, efx_siena_rss_cpus, uint, 0444); +MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); + +/* + * Use separate channels for TX and RX events + * + * Set this to 1 to use separate channels for TX and RX. It allows us + * to control interrupt affinity separately for TX and RX. + * + * This is only used in MSI-X interrupt mode + */ +bool efx_siena_separate_tx_channels; +module_param_named(efx_separate_tx_channels, efx_siena_separate_tx_channels, + bool, 0444); +MODULE_PARM_DESC(efx_separate_tx_channels, + "Use separate channels for TX and RX"); + +/* Initial interrupt moderation settings. They can be modified after + * module load with ethtool. + * + * The default for RX should strike a balance between increasing the + * round-trip latency and reducing overhead. + */ +static unsigned int rx_irq_mod_usec = 60; + +/* Initial interrupt moderation settings. They can be modified after + * module load with ethtool. + * + * This default is chosen to ensure that a 10G link does not go idle + * while a TX queue is stopped after it has become full. A queue is + * restarted when it drops below half full. The time this takes (assuming + * worst case 3 descriptors per packet and 1024 descriptors) is + * 512 / 3 * 1.2 = 205 usec. + */ +static unsigned int tx_irq_mod_usec = 150; + +static bool phy_flash_cfg; +module_param(phy_flash_cfg, bool, 0644); +MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); + +static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR | NETIF_MSG_HW); +module_param(debug, uint, 0); +MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); + +/************************************************************************** + * + * Utility functions and prototypes + * + *************************************************************************/ + +static void efx_remove_port(struct efx_nic *efx); +static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog); +static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp); +static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, + u32 flags); + +#define EFX_ASSERT_RESET_SERIALISED(efx) \ + do { \ + if ((efx->state == STATE_READY) || \ + (efx->state == STATE_RECOVERY) || \ + (efx->state == STATE_DISABLED)) \ + ASSERT_RTNL(); \ + } while (0) + +/************************************************************************** + * + * Port handling + * + **************************************************************************/ + +static void efx_fini_port(struct efx_nic *efx); + +static int efx_probe_port(struct efx_nic *efx) +{ + int rc; + + netif_dbg(efx, probe, efx->net_dev, "create port\n"); + + if (phy_flash_cfg) + efx->phy_mode = PHY_MODE_SPECIAL; + + /* Connect up MAC/PHY operations table */ + rc = efx->type->probe_port(efx); + if (rc) + return rc; + + /* Initialise MAC address to permanent address */ + eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr); + + return 0; +} + +static int efx_init_port(struct efx_nic *efx) +{ + int rc; + + netif_dbg(efx, drv, efx->net_dev, "init port\n"); + + mutex_lock(&efx->mac_lock); + + efx->port_initialized = true; + + /* Ensure the PHY advertises the correct flow control settings */ + rc = efx_siena_mcdi_port_reconfigure(efx); + if (rc && rc != -EPERM) + goto fail; + + mutex_unlock(&efx->mac_lock); + return 0; + +fail: + mutex_unlock(&efx->mac_lock); + return rc; +} + +static void efx_fini_port(struct efx_nic *efx) +{ + netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); + + if (!efx->port_initialized) + return; + + efx->port_initialized = false; + + efx->link_state.up = false; + efx_siena_link_status_changed(efx); +} + +static void efx_remove_port(struct efx_nic *efx) +{ + netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); + + efx->type->remove_port(efx); +} + +/************************************************************************** + * + * NIC handling + * + **************************************************************************/ + +static LIST_HEAD(efx_primary_list); +static LIST_HEAD(efx_unassociated_list); + +static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right) +{ + return left->type == right->type && + left->vpd_sn && right->vpd_sn && + !strcmp(left->vpd_sn, right->vpd_sn); +} + +static void efx_associate(struct efx_nic *efx) +{ + struct efx_nic *other, *next; + + if (efx->primary == efx) { + /* Adding primary function; look for secondaries */ + + netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); + list_add_tail(&efx->node, &efx_primary_list); + + list_for_each_entry_safe(other, next, &efx_unassociated_list, + node) { + if (efx_same_controller(efx, other)) { + list_del(&other->node); + netif_dbg(other, probe, other->net_dev, + "moving to secondary list of %s %s\n", + pci_name(efx->pci_dev), + efx->net_dev->name); + list_add_tail(&other->node, + &efx->secondary_list); + other->primary = efx; + } + } + } else { + /* Adding secondary function; look for primary */ + + list_for_each_entry(other, &efx_primary_list, node) { + if (efx_same_controller(efx, other)) { + netif_dbg(efx, probe, efx->net_dev, + "adding to secondary list of %s %s\n", + pci_name(other->pci_dev), + other->net_dev->name); + list_add_tail(&efx->node, + &other->secondary_list); + efx->primary = other; + return; + } + } + + netif_dbg(efx, probe, efx->net_dev, + "adding to unassociated list\n"); + list_add_tail(&efx->node, &efx_unassociated_list); + } +} + +static void efx_dissociate(struct efx_nic *efx) +{ + struct efx_nic *other, *next; + + list_del(&efx->node); + efx->primary = NULL; + + list_for_each_entry_safe(other, next, &efx->secondary_list, node) { + list_del(&other->node); + netif_dbg(other, probe, other->net_dev, + "moving to unassociated list\n"); + list_add_tail(&other->node, &efx_unassociated_list); + other->primary = NULL; + } +} + +static int efx_probe_nic(struct efx_nic *efx) +{ + int rc; + + netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); + + /* Carry out hardware-type specific initialisation */ + rc = efx->type->probe(efx); + if (rc) + return rc; + + do { + if (!efx->max_channels || !efx->max_tx_channels) { + netif_err(efx, drv, efx->net_dev, + "Insufficient resources to allocate" + " any channels\n"); + rc = -ENOSPC; + goto fail1; + } + + /* Determine the number of channels and queues by trying + * to hook in MSI-X interrupts. + */ + rc = efx_siena_probe_interrupts(efx); + if (rc) + goto fail1; + + rc = efx_siena_set_channels(efx); + if (rc) + goto fail1; + + /* dimension_resources can fail with EAGAIN */ + rc = efx->type->dimension_resources(efx); + if (rc != 0 && rc != -EAGAIN) + goto fail2; + + if (rc == -EAGAIN) + /* try again with new max_channels */ + efx_siena_remove_interrupts(efx); + + } while (rc == -EAGAIN); + + if (efx->n_channels > 1) + netdev_rss_key_fill(efx->rss_context.rx_hash_key, + sizeof(efx->rss_context.rx_hash_key)); + efx_siena_set_default_rx_indir_table(efx, &efx->rss_context); + + /* Initialise the interrupt moderation settings */ + efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); + efx_siena_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, + true, true); + + return 0; + +fail2: + efx_siena_remove_interrupts(efx); +fail1: + efx->type->remove(efx); + return rc; +} + +static void efx_remove_nic(struct efx_nic *efx) +{ + netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); + + efx_siena_remove_interrupts(efx); + efx->type->remove(efx); +} + +/************************************************************************** + * + * NIC startup/shutdown + * + *************************************************************************/ + +static int efx_probe_all(struct efx_nic *efx) +{ + int rc; + + rc = efx_probe_nic(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); + goto fail1; + } + + rc = efx_probe_port(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, "failed to create port\n"); + goto fail2; + } + + BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); + if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { + rc = -EINVAL; + goto fail3; + } + +#ifdef CONFIG_SFC_SIENA_SRIOV + rc = efx->type->vswitching_probe(efx); + if (rc) /* not fatal; the PF will still work fine */ + netif_warn(efx, probe, efx->net_dev, + "failed to setup vswitching rc=%d;" + " VFs may not function\n", rc); +#endif + + rc = efx_siena_probe_filters(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to create filter tables\n"); + goto fail4; + } + + rc = efx_siena_probe_channels(efx); + if (rc) + goto fail5; + + return 0; + + fail5: + efx_siena_remove_filters(efx); + fail4: +#ifdef CONFIG_SFC_SIENA_SRIOV + efx->type->vswitching_remove(efx); +#endif + fail3: + efx_remove_port(efx); + fail2: + efx_remove_nic(efx); + fail1: + return rc; +} + +static void efx_remove_all(struct efx_nic *efx) +{ + rtnl_lock(); + efx_xdp_setup_prog(efx, NULL); + rtnl_unlock(); + + efx_siena_remove_channels(efx); + efx_siena_remove_filters(efx); +#ifdef CONFIG_SFC_SIENA_SRIOV + efx->type->vswitching_remove(efx); +#endif + efx_remove_port(efx); + efx_remove_nic(efx); +} + +/************************************************************************** + * + * Interrupt moderation + * + **************************************************************************/ +unsigned int efx_siena_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) +{ + if (usecs == 0) + return 0; + if (usecs * 1000 < efx->timer_quantum_ns) + return 1; /* never round down to 0 */ + return usecs * 1000 / efx->timer_quantum_ns; +} + +/* Set interrupt moderation parameters */ +int efx_siena_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, + unsigned int rx_usecs, bool rx_adaptive, + bool rx_may_override_tx) +{ + struct efx_channel *channel; + unsigned int timer_max_us; + + EFX_ASSERT_RESET_SERIALISED(efx); + + timer_max_us = efx->timer_max_ns / 1000; + + if (tx_usecs > timer_max_us || rx_usecs > timer_max_us) + return -EINVAL; + + if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 && + !rx_may_override_tx) { + netif_err(efx, drv, efx->net_dev, "Channels are shared. " + "RX and TX IRQ moderation must be equal\n"); + return -EINVAL; + } + + efx->irq_rx_adaptive = rx_adaptive; + efx->irq_rx_moderation_us = rx_usecs; + efx_for_each_channel(channel, efx) { + if (efx_channel_has_rx_queue(channel)) + channel->irq_moderation_us = rx_usecs; + else if (efx_channel_has_tx_queues(channel)) + channel->irq_moderation_us = tx_usecs; + else if (efx_channel_is_xdp_tx(channel)) + channel->irq_moderation_us = tx_usecs; + } + + return 0; +} + +void efx_siena_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, + unsigned int *rx_usecs, bool *rx_adaptive) +{ + *rx_adaptive = efx->irq_rx_adaptive; + *rx_usecs = efx->irq_rx_moderation_us; + + /* If channels are shared between RX and TX, so is IRQ + * moderation. Otherwise, IRQ moderation is the same for all + * TX channels and is not adaptive. + */ + if (efx->tx_channel_offset == 0) { + *tx_usecs = *rx_usecs; + } else { + struct efx_channel *tx_channel; + + tx_channel = efx->channel[efx->tx_channel_offset]; + *tx_usecs = tx_channel->irq_moderation_us; + } +} + +/************************************************************************** + * + * ioctls + * + *************************************************************************/ + +/* Net device ioctl + * Context: process, rtnl_lock() held. + */ +static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (cmd == SIOCSHWTSTAMP) + return efx_siena_ptp_set_ts_config(efx, ifr); + if (cmd == SIOCGHWTSTAMP) + return efx_siena_ptp_get_ts_config(efx, ifr); + + /* Convert phy_id from older PRTAD/DEVAD format */ + if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && + (data->phy_id & 0xfc00) == 0x0400) + data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; + + return mdio_mii_ioctl(&efx->mdio, data, cmd); +} + +/************************************************************************** + * + * Kernel net device interface + * + *************************************************************************/ + +/* Context: process, rtnl_lock() held. */ +static int efx_net_open(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", + raw_smp_processor_id()); + + rc = efx_check_disabled(efx); + if (rc) + return rc; + if (efx->phy_mode & PHY_MODE_SPECIAL) + return -EBUSY; + if (efx_siena_mcdi_poll_reboot(efx) && efx_siena_reset(efx, RESET_TYPE_ALL)) + return -EIO; + + /* Notify the kernel of the link state polled during driver load, + * before the monitor starts running */ + efx_siena_link_status_changed(efx); + + efx_siena_start_all(efx); + if (efx->state == STATE_DISABLED || efx->reset_pending) + netif_device_detach(efx->net_dev); + efx_siena_selftest_async_start(efx); + return 0; +} + +/* Context: process, rtnl_lock() held. + * Note that the kernel will ignore our return code; this method + * should really be a void. + */ +static int efx_net_stop(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", + raw_smp_processor_id()); + + /* Stop the device and flush all the channels */ + efx_siena_stop_all(efx); + + return 0; +} + +static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->vlan_rx_add_vid) + return efx->type->vlan_rx_add_vid(efx, proto, vid); + else + return -EOPNOTSUPP; +} + +static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->vlan_rx_kill_vid) + return efx->type->vlan_rx_kill_vid(efx, proto, vid); + else + return -EOPNOTSUPP; +} + +static const struct net_device_ops efx_netdev_ops = { + .ndo_open = efx_net_open, + .ndo_stop = efx_net_stop, + .ndo_get_stats64 = efx_siena_net_stats, + .ndo_tx_timeout = efx_siena_watchdog, + .ndo_start_xmit = efx_siena_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_eth_ioctl = efx_ioctl, + .ndo_change_mtu = efx_siena_change_mtu, + .ndo_set_mac_address = efx_siena_set_mac_address, + .ndo_set_rx_mode = efx_siena_set_rx_mode, + .ndo_set_features = efx_siena_set_features, + .ndo_features_check = efx_siena_features_check, + .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, +#ifdef CONFIG_SFC_SIENA_SRIOV + .ndo_set_vf_mac = efx_sriov_set_vf_mac, + .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, + .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, + .ndo_get_vf_config = efx_sriov_get_vf_config, + .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, +#endif + .ndo_get_phys_port_id = efx_siena_get_phys_port_id, + .ndo_get_phys_port_name = efx_siena_get_phys_port_name, + .ndo_setup_tc = efx_siena_setup_tc, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = efx_siena_filter_rfs, +#endif + .ndo_xdp_xmit = efx_xdp_xmit, + .ndo_bpf = efx_xdp +}; + +static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog) +{ + struct bpf_prog *old_prog; + + if (efx->xdp_rxq_info_failed) { + netif_err(efx, drv, efx->net_dev, + "Unable to bind XDP program due to previous failure of rxq_info\n"); + return -EINVAL; + } + + if (prog && efx->net_dev->mtu > efx_siena_xdp_max_mtu(efx)) { + netif_err(efx, drv, efx->net_dev, + "Unable to configure XDP with MTU of %d (max: %d)\n", + efx->net_dev->mtu, efx_siena_xdp_max_mtu(efx)); + return -EINVAL; + } + + old_prog = rtnl_dereference(efx->xdp_prog); + rcu_assign_pointer(efx->xdp_prog, prog); + /* Release the reference that was originally passed by the caller. */ + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +/* Context: process, rtnl_lock() held. */ +static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct efx_nic *efx = netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return efx_xdp_setup_prog(efx, xdp->prog); + default: + return -EINVAL; + } +} + +static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, + u32 flags) +{ + struct efx_nic *efx = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + return efx_siena_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH); +} + +static void efx_update_name(struct efx_nic *efx) +{ + strcpy(efx->name, efx->net_dev->name); + efx_siena_mtd_rename(efx); + efx_siena_set_channel_names(efx); +} + +static int efx_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); + + if ((net_dev->netdev_ops == &efx_netdev_ops) && + event == NETDEV_CHANGENAME) + efx_update_name(netdev_priv(net_dev)); + + return NOTIFY_DONE; +} + +static struct notifier_block efx_netdev_notifier = { + .notifier_call = efx_netdev_event, +}; + +static ssize_t phy_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct efx_nic *efx = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", efx->phy_type); +} +static DEVICE_ATTR_RO(phy_type); + +static int efx_register_netdev(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + struct efx_channel *channel; + int rc; + + net_dev->watchdog_timeo = 5 * HZ; + net_dev->irq = efx->pci_dev->irq; + net_dev->netdev_ops = &efx_netdev_ops; + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) + net_dev->priv_flags |= IFF_UNICAST_FLT; + net_dev->ethtool_ops = &efx_siena_ethtool_ops; + netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS); + net_dev->min_mtu = EFX_MIN_MTU; + net_dev->max_mtu = EFX_MAX_MTU; + + rtnl_lock(); + + /* Enable resets to be scheduled and check whether any were + * already requested. If so, the NIC is probably hosed so we + * abort. + */ + efx->state = STATE_READY; + smp_mb(); /* ensure we change state before checking reset_pending */ + if (efx->reset_pending) { + pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n"); + rc = -EIO; + goto fail_locked; + } + + rc = dev_alloc_name(net_dev, net_dev->name); + if (rc < 0) + goto fail_locked; + efx_update_name(efx); + + /* Always start with carrier off; PHY events will detect the link */ + netif_carrier_off(net_dev); + + rc = register_netdevice(net_dev); + if (rc) + goto fail_locked; + + efx_for_each_channel(channel, efx) { + struct efx_tx_queue *tx_queue; + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_siena_init_tx_queue_core_txq(tx_queue); + } + + efx_associate(efx); + + rtnl_unlock(); + + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to init net dev attributes\n"); + goto fail_registered; + } + + efx_siena_init_mcdi_logging(efx); + + return 0; + +fail_registered: + rtnl_lock(); + efx_dissociate(efx); + unregister_netdevice(net_dev); +fail_locked: + efx->state = STATE_UNINIT; + rtnl_unlock(); + netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); + return rc; +} + +static void efx_unregister_netdev(struct efx_nic *efx) +{ + if (!efx->net_dev) + return; + + BUG_ON(netdev_priv(efx->net_dev) != efx); + + if (efx_dev_registered(efx)) { + strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); + efx_siena_fini_mcdi_logging(efx); + device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); + unregister_netdev(efx->net_dev); + } +} + +/************************************************************************** + * + * List of NICs we support + * + **************************************************************************/ + +/* PCI device ID table */ +static const struct pci_device_id efx_pci_table[] = { + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */ + .driver_data = (unsigned long)&siena_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ + .driver_data = (unsigned long)&siena_a0_nic_type}, + {0} /* end of list */ +}; + +/************************************************************************** + * + * Data housekeeping + * + **************************************************************************/ + +void efx_siena_update_sw_stats(struct efx_nic *efx, u64 *stats) +{ + u64 n_rx_nodesc_trunc = 0; + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc; + stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc; + stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); +} + +/************************************************************************** + * + * PCI interface + * + **************************************************************************/ + +/* Main body of final NIC shutdown code + * This is called only at module unload (or hotplug removal). + */ +static void efx_pci_remove_main(struct efx_nic *efx) +{ + /* Flush reset_work. It can no longer be scheduled since we + * are not READY. + */ + BUG_ON(efx->state == STATE_READY); + efx_siena_flush_reset_workqueue(efx); + + efx_siena_disable_interrupts(efx); + efx_siena_clear_interrupt_affinity(efx); + efx_siena_fini_interrupt(efx); + efx_fini_port(efx); + efx->type->fini(efx); + efx_siena_fini_napi(efx); + efx_remove_all(efx); +} + +/* Final NIC shutdown + * This is called only at module unload (or hotplug removal). A PF can call + * this on its VFs to ensure they are unbound first. + */ +static void efx_pci_remove(struct pci_dev *pci_dev) +{ + struct efx_nic *efx; + + efx = pci_get_drvdata(pci_dev); + if (!efx) + return; + + /* Mark the NIC as fini, then stop the interface */ + rtnl_lock(); + efx_dissociate(efx); + dev_close(efx->net_dev); + efx_siena_disable_interrupts(efx); + efx->state = STATE_UNINIT; + rtnl_unlock(); + + if (efx->type->sriov_fini) + efx->type->sriov_fini(efx); + + efx_unregister_netdev(efx); + + efx_siena_mtd_remove(efx); + + efx_pci_remove_main(efx); + + efx_siena_fini_io(efx); + netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); + + efx_siena_fini_struct(efx); + free_netdev(efx->net_dev); + + pci_disable_pcie_error_reporting(pci_dev); +}; + +/* NIC VPD information + * Called during probe to display the part number of the + * installed NIC. + */ +static void efx_probe_vpd_strings(struct efx_nic *efx) +{ + struct pci_dev *dev = efx->pci_dev; + unsigned int vpd_size, kw_len; + u8 *vpd_data; + int start; + + vpd_data = pci_vpd_alloc(dev, &vpd_size); + if (IS_ERR(vpd_data)) { + pci_warn(dev, "Unable to read VPD\n"); + return; + } + + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); + if (start < 0) + pci_err(dev, "Part number not found or incomplete\n"); + else + pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start); + + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len); + if (start < 0) + pci_err(dev, "Serial number not found or incomplete\n"); + else + efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL); + + kfree(vpd_data); +} + + +/* Main body of NIC initialisation + * This is called at module load (or hotplug insertion, theoretically). + */ +static int efx_pci_probe_main(struct efx_nic *efx) +{ + int rc; + + /* Do start-of-day initialisation */ + rc = efx_probe_all(efx); + if (rc) + goto fail1; + + efx_siena_init_napi(efx); + + down_write(&efx->filter_sem); + rc = efx->type->init(efx); + up_write(&efx->filter_sem); + if (rc) { + pci_err(efx->pci_dev, "failed to initialise NIC\n"); + goto fail3; + } + + rc = efx_init_port(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to initialise port\n"); + goto fail4; + } + + rc = efx_siena_init_interrupt(efx); + if (rc) + goto fail5; + + efx_siena_set_interrupt_affinity(efx); + rc = efx_siena_enable_interrupts(efx); + if (rc) + goto fail6; + + return 0; + + fail6: + efx_siena_clear_interrupt_affinity(efx); + efx_siena_fini_interrupt(efx); + fail5: + efx_fini_port(efx); + fail4: + efx->type->fini(efx); + fail3: + efx_siena_fini_napi(efx); + efx_remove_all(efx); + fail1: + return rc; +} + +static int efx_pci_probe_post_io(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + int rc = efx_pci_probe_main(efx); + + if (rc) + return rc; + + if (efx->type->sriov_init) { + rc = efx->type->sriov_init(efx); + if (rc) + pci_err(efx->pci_dev, "SR-IOV can't be enabled rc %d\n", + rc); + } + + /* Determine netdevice features */ + net_dev->features |= (efx->type->offload_features | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL); + if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) + net_dev->features |= NETIF_F_TSO6; + /* Check whether device supports TSO */ + if (!efx->type->tso_versions || !efx->type->tso_versions(efx)) + net_dev->features &= ~NETIF_F_ALL_TSO; + /* Mask for features that also apply to VLAN devices */ + net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | + NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | + NETIF_F_RXCSUM); + + net_dev->hw_features |= net_dev->features & ~efx->fixed_features; + + /* Disable receiving frames with bad FCS, by default. */ + net_dev->features &= ~NETIF_F_RXALL; + + /* Disable VLAN filtering by default. It may be enforced if + * the feature is fixed (i.e. VLAN filters are required to + * receive VLAN tagged packets due to vPort restrictions). + */ + net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + net_dev->features |= efx->fixed_features; + + rc = efx_register_netdev(efx); + if (!rc) + return 0; + + efx_pci_remove_main(efx); + return rc; +} + +/* NIC initialisation + * + * This is called at module load (or hotplug insertion, + * theoretically). It sets up PCI mappings, resets the NIC, + * sets up and registers the network devices with the kernel and hooks + * the interrupt service routine. It does not prepare the device for + * transmission; this is left to the first time one of the network + * interfaces is brought up (i.e. efx_net_open). + */ +static int efx_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *entry) +{ + struct net_device *net_dev; + struct efx_nic *efx; + int rc; + + /* Allocate and initialise a struct net_device and struct efx_nic */ + net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, + EFX_MAX_RX_QUEUES); + if (!net_dev) + return -ENOMEM; + efx = netdev_priv(net_dev); + efx->type = (const struct efx_nic_type *) entry->driver_data; + efx->fixed_features |= NETIF_F_HIGHDMA; + + pci_set_drvdata(pci_dev, efx); + SET_NETDEV_DEV(net_dev, &pci_dev->dev); + rc = efx_siena_init_struct(efx, pci_dev, net_dev); + if (rc) + goto fail1; + + pci_info(pci_dev, "Solarflare NIC detected\n"); + + if (!efx->type->is_vf) + efx_probe_vpd_strings(efx); + + /* Set up basic I/O (BAR mappings etc) */ + rc = efx_siena_init_io(efx, efx->type->mem_bar(efx), + efx->type->max_dma_mask, + efx->type->mem_map_size(efx)); + if (rc) + goto fail2; + + rc = efx_pci_probe_post_io(efx); + if (rc) { + /* On failure, retry once immediately. + * If we aborted probe due to a scheduled reset, dismiss it. + */ + efx->reset_pending = 0; + rc = efx_pci_probe_post_io(efx); + if (rc) { + /* On another failure, retry once more + * after a 50-305ms delay. + */ + unsigned char r; + + get_random_bytes(&r, 1); + msleep((unsigned int)r + 50); + efx->reset_pending = 0; + rc = efx_pci_probe_post_io(efx); + } + } + if (rc) + goto fail3; + + netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); + + /* Try to create MTDs, but allow this to fail */ + rtnl_lock(); + rc = efx_mtd_probe(efx); + rtnl_unlock(); + if (rc && rc != -EPERM) + netif_warn(efx, probe, efx->net_dev, + "failed to create MTDs (%d)\n", rc); + + (void)pci_enable_pcie_error_reporting(pci_dev); + + if (efx->type->udp_tnl_push_ports) + efx->type->udp_tnl_push_ports(efx); + + return 0; + + fail3: + efx_siena_fini_io(efx); + fail2: + efx_siena_fini_struct(efx); + fail1: + WARN_ON(rc > 0); + netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); + free_netdev(net_dev); + return rc; +} + +/* efx_pci_sriov_configure returns the actual number of Virtual Functions + * enabled on success + */ +#ifdef CONFIG_SFC_SIENA_SRIOV +static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + int rc; + struct efx_nic *efx = pci_get_drvdata(dev); + + if (efx->type->sriov_configure) { + rc = efx->type->sriov_configure(efx, num_vfs); + if (rc) + return rc; + else + return num_vfs; + } else + return -EOPNOTSUPP; +} +#endif + +static int efx_pm_freeze(struct device *dev) +{ + struct efx_nic *efx = dev_get_drvdata(dev); + + rtnl_lock(); + + if (efx->state != STATE_DISABLED) { + efx->state = STATE_UNINIT; + + efx_device_detach_sync(efx); + + efx_siena_stop_all(efx); + efx_siena_disable_interrupts(efx); + } + + rtnl_unlock(); + + return 0; +} + +static int efx_pm_thaw(struct device *dev) +{ + int rc; + struct efx_nic *efx = dev_get_drvdata(dev); + + rtnl_lock(); + + if (efx->state != STATE_DISABLED) { + rc = efx_siena_enable_interrupts(efx); + if (rc) + goto fail; + + mutex_lock(&efx->mac_lock); + efx_siena_mcdi_port_reconfigure(efx); + mutex_unlock(&efx->mac_lock); + + efx_siena_start_all(efx); + + efx_device_attach_if_not_resetting(efx); + + efx->state = STATE_READY; + + efx->type->resume_wol(efx); + } + + rtnl_unlock(); + + /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ + efx_siena_queue_reset_work(efx); + + return 0; + +fail: + rtnl_unlock(); + + return rc; +} + +static int efx_pm_poweroff(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct efx_nic *efx = pci_get_drvdata(pci_dev); + + efx->type->fini(efx); + + efx->reset_pending = 0; + + pci_save_state(pci_dev); + return pci_set_power_state(pci_dev, PCI_D3hot); +} + +/* Used for both resume and restore */ +static int efx_pm_resume(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct efx_nic *efx = pci_get_drvdata(pci_dev); + int rc; + + rc = pci_set_power_state(pci_dev, PCI_D0); + if (rc) + return rc; + pci_restore_state(pci_dev); + rc = pci_enable_device(pci_dev); + if (rc) + return rc; + pci_set_master(efx->pci_dev); + rc = efx->type->reset(efx, RESET_TYPE_ALL); + if (rc) + return rc; + down_write(&efx->filter_sem); + rc = efx->type->init(efx); + up_write(&efx->filter_sem); + if (rc) + return rc; + rc = efx_pm_thaw(dev); + return rc; +} + +static int efx_pm_suspend(struct device *dev) +{ + int rc; + + efx_pm_freeze(dev); + rc = efx_pm_poweroff(dev); + if (rc) + efx_pm_resume(dev); + return rc; +} + +static const struct dev_pm_ops efx_pm_ops = { + .suspend = efx_pm_suspend, + .resume = efx_pm_resume, + .freeze = efx_pm_freeze, + .thaw = efx_pm_thaw, + .poweroff = efx_pm_poweroff, + .restore = efx_pm_resume, +}; + +static struct pci_driver efx_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = efx_pci_table, + .probe = efx_pci_probe, + .remove = efx_pci_remove, + .driver.pm = &efx_pm_ops, + .err_handler = &efx_siena_err_handlers, +#ifdef CONFIG_SFC_SIENA_SRIOV + .sriov_configure = efx_pci_sriov_configure, +#endif +}; + +/************************************************************************** + * + * Kernel module interface + * + *************************************************************************/ + +static int __init efx_init_module(void) +{ + int rc; + + pr_info("Solarflare Siena driver\n"); + + rc = register_netdevice_notifier(&efx_netdev_notifier); + if (rc) + goto err_notifier; + +#ifdef CONFIG_SFC_SIENA_SRIOV + rc = efx_init_sriov(); + if (rc) + goto err_sriov; +#endif + + rc = efx_siena_create_reset_workqueue(); + if (rc) + goto err_reset; + + rc = pci_register_driver(&efx_pci_driver); + if (rc < 0) + goto err_pci; + + return 0; + + err_pci: + efx_siena_destroy_reset_workqueue(); + err_reset: +#ifdef CONFIG_SFC_SIENA_SRIOV + efx_fini_sriov(); + err_sriov: +#endif + unregister_netdevice_notifier(&efx_netdev_notifier); + err_notifier: + return rc; +} + +static void __exit efx_exit_module(void) +{ + pr_info("Solarflare Siena driver unloading\n"); + + pci_unregister_driver(&efx_pci_driver); + efx_siena_destroy_reset_workqueue(); +#ifdef CONFIG_SFC_SIENA_SRIOV + efx_fini_sriov(); +#endif + unregister_netdevice_notifier(&efx_netdev_notifier); + +} + +module_init(efx_init_module); +module_exit(efx_exit_module); + +MODULE_AUTHOR("Solarflare Communications and " + "Michael Brown "); +MODULE_DESCRIPTION("Solarflare Siena network driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, efx_pci_table); diff --git a/drivers/net/ethernet/sfc/siena/efx.h b/drivers/net/ethernet/sfc/siena/efx.h new file mode 100644 index 0000000000..27d1d3f19c --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/efx.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_EFX_H +#define EFX_EFX_H + +#include +#include "net_driver.h" +#include "filter.h" + +/* TX */ +void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); +netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb, + struct net_device *net_dev); +netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, + struct sk_buff *skb); +static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + return INDIRECT_CALL_1(tx_queue->efx->type->tx_enqueue, + __efx_siena_enqueue_skb, tx_queue, skb); +} +int efx_siena_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data); + +/* RX */ +void __efx_siena_rx_packet(struct efx_channel *channel); +void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, + unsigned int n_frags, unsigned int len, u16 flags); +static inline void efx_rx_flush_packet(struct efx_channel *channel) +{ + if (channel->rx_pkt_n_frags) + __efx_siena_rx_packet(channel); +} + +/* Maximum number of TCP segments we support for soft-TSO */ +#define EFX_TSO_MAX_SEGS 100 + +/* The smallest [rt]xq_entries that the driver supports. RX minimum + * is a bit arbitrary. For TX, we must have space for at least 2 + * TSO skbs. + */ +#define EFX_RXQ_MIN_ENT 128U +#define EFX_TXQ_MIN_ENT(efx) (2 * efx_siena_tx_max_skb_descs(efx)) + +/* All EF10 architecture NICs steal one bit of the DMAQ size for various + * other purposes when counting TxQ entries, so we halve the queue size. + */ +#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \ + EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) + +static inline bool efx_rss_enabled(struct efx_nic *efx) +{ + return efx->rss_spread > 1; +} + +/* Filters */ + +/** + * efx_filter_insert_filter - add or replace a filter + * @efx: NIC in which to insert the filter + * @spec: Specification for the filter + * @replace_equal: Flag for whether the specified filter may replace an + * existing filter with equal priority + * + * On success, return the filter ID. + * On failure, return a negative error code. + * + * If existing filters have equal match values to the new filter spec, + * then the new filter might replace them or the function might fail, + * as follows. + * + * 1. If the existing filters have lower priority, or @replace_equal + * is set and they have equal priority, replace them. + * + * 2. If the existing filters have higher priority, return -%EPERM. + * + * 3. If !efx_siena_filter_is_mc_recipient(@spec), or the NIC does not + * support delivery to multiple recipients, return -%EEXIST. + * + * This implies that filters for multiple multicast recipients must + * all be inserted with the same priority and @replace_equal = %false. + */ +static inline s32 efx_filter_insert_filter(struct efx_nic *efx, + struct efx_filter_spec *spec, + bool replace_equal) +{ + return efx->type->filter_insert(efx, spec, replace_equal); +} + +/** + * efx_filter_remove_id_safe - remove a filter by ID, carefully + * @efx: NIC from which to remove the filter + * @priority: Priority of filter, as passed to @efx_filter_insert_filter + * @filter_id: ID of filter, as returned by @efx_filter_insert_filter + * + * This function will range-check @filter_id, so it is safe to call + * with a value passed from userland. + */ +static inline int efx_filter_remove_id_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + return efx->type->filter_remove_safe(efx, priority, filter_id); +} + +/** + * efx_filter_get_filter_safe - retrieve a filter by ID, carefully + * @efx: NIC from which to remove the filter + * @priority: Priority of filter, as passed to @efx_filter_insert_filter + * @filter_id: ID of filter, as returned by @efx_filter_insert_filter + * @spec: Buffer in which to store filter specification + * + * This function will range-check @filter_id, so it is safe to call + * with a value passed from userland. + */ +static inline int +efx_filter_get_filter_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *spec) +{ + return efx->type->filter_get_safe(efx, priority, filter_id, spec); +} + +static inline u32 efx_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + return efx->type->filter_count_rx_used(efx, priority); +} +static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) +{ + return efx->type->filter_get_rx_id_limit(efx); +} +static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size) +{ + return efx->type->filter_get_rx_ids(efx, priority, buf, size); +} + +/* RSS contexts */ +static inline bool efx_rss_active(struct efx_rss_context *ctx) +{ + return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID; +} + +/* Ethtool support */ +extern const struct ethtool_ops efx_siena_ethtool_ops; + +/* Global */ +unsigned int efx_siena_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs); +int efx_siena_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, + unsigned int rx_usecs, bool rx_adaptive, + bool rx_may_override_tx); +void efx_siena_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, + unsigned int *rx_usecs, bool *rx_adaptive); + +/* Update the generic software stats in the passed stats array */ +void efx_siena_update_sw_stats(struct efx_nic *efx, u64 *stats); + +/* MTD */ +#ifdef CONFIG_SFC_SIENA_MTD +int efx_siena_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, + size_t n_parts, size_t sizeof_part); +static inline int efx_mtd_probe(struct efx_nic *efx) +{ + return efx->type->mtd_probe(efx); +} +void efx_siena_mtd_rename(struct efx_nic *efx); +void efx_siena_mtd_remove(struct efx_nic *efx); +#else +static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } +static inline void efx_siena_mtd_rename(struct efx_nic *efx) {} +static inline void efx_siena_mtd_remove(struct efx_nic *efx) {} +#endif + +#ifdef CONFIG_SFC_SIENA_SRIOV +static inline unsigned int efx_vf_size(struct efx_nic *efx) +{ + return 1 << efx->vi_scale; +} +#endif + +static inline void efx_device_detach_sync(struct efx_nic *efx) +{ + struct net_device *dev = efx->net_dev; + + /* Lock/freeze all TX queues so that we can be sure the + * TX scheduler is stopped when we're done and before + * netif_device_present() becomes false. + */ + netif_tx_lock_bh(dev); + netif_device_detach(dev); + netif_tx_unlock_bh(dev); +} + +static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx) +{ + if ((efx->state != STATE_DISABLED) && !efx->reset_pending) + netif_device_attach(efx->net_dev); +} + +static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem) +{ + if (WARN_ON(down_read_trylock(sem))) { + up_read(sem); + return false; + } + return true; +} + +int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n, + struct xdp_frame **xdpfs, bool flush); + +#endif /* EFX_EFX_H */ diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c new file mode 100644 index 0000000000..017212a40d --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/efx_channels.c @@ -0,0 +1,1368 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include +#include +#include "efx_channels.h" +#include "efx.h" +#include "efx_common.h" +#include "tx_common.h" +#include "rx_common.h" +#include "nic.h" +#include "sriov.h" +#include "workarounds.h" + +/* This is the first interrupt mode to try out of: + * 0 => MSI-X + * 1 => MSI + * 2 => legacy + */ +unsigned int efx_siena_interrupt_mode = EFX_INT_MODE_MSIX; + +/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), + * i.e. the number of CPUs among which we may distribute simultaneous + * interrupt handling. + * + * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. + * The default (0) means to assign an interrupt to each core. + */ +unsigned int efx_siena_rss_cpus; + +static unsigned int irq_adapt_low_thresh = 8000; +module_param(irq_adapt_low_thresh, uint, 0644); +MODULE_PARM_DESC(irq_adapt_low_thresh, + "Threshold score for reducing IRQ moderation"); + +static unsigned int irq_adapt_high_thresh = 16000; +module_param(irq_adapt_high_thresh, uint, 0644); +MODULE_PARM_DESC(irq_adapt_high_thresh, + "Threshold score for increasing IRQ moderation"); + +static const struct efx_channel_type efx_default_channel_type; + +/************* + * INTERRUPTS + *************/ + +static unsigned int count_online_cores(struct efx_nic *efx, bool local_node) +{ + cpumask_var_t filter_mask; + unsigned int count; + int cpu; + + if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) { + netif_warn(efx, probe, efx->net_dev, + "RSS disabled due to allocation failure\n"); + return 1; + } + + cpumask_copy(filter_mask, cpu_online_mask); + if (local_node) + cpumask_and(filter_mask, filter_mask, + cpumask_of_pcibus(efx->pci_dev->bus)); + + count = 0; + for_each_cpu(cpu, filter_mask) { + ++count; + cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu)); + } + + free_cpumask_var(filter_mask); + + return count; +} + +static unsigned int efx_wanted_parallelism(struct efx_nic *efx) +{ + unsigned int count; + + if (efx_siena_rss_cpus) { + count = efx_siena_rss_cpus; + } else { + count = count_online_cores(efx, true); + + /* If no online CPUs in local node, fallback to any online CPUs */ + if (count == 0) + count = count_online_cores(efx, false); + } + + if (count > EFX_MAX_RX_QUEUES) { + netif_cond_dbg(efx, probe, efx->net_dev, !efx_siena_rss_cpus, + warn, + "Reducing number of rx queues from %u to %u.\n", + count, EFX_MAX_RX_QUEUES); + count = EFX_MAX_RX_QUEUES; + } + + /* If RSS is requested for the PF *and* VFs then we can't write RSS + * table entries that are inaccessible to VFs + */ +#ifdef CONFIG_SFC_SIENA_SRIOV + if (efx->type->sriov_wanted) { + if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && + count > efx_vf_size(efx)) { + netif_warn(efx, probe, efx->net_dev, + "Reducing number of RSS channels from %u to %u for " + "VF support. Increase vf-msix-limit to use more " + "channels on the PF.\n", + count, efx_vf_size(efx)); + count = efx_vf_size(efx); + } + } +#endif + + return count; +} + +static int efx_allocate_msix_channels(struct efx_nic *efx, + unsigned int max_channels, + unsigned int extra_channels, + unsigned int parallelism) +{ + unsigned int n_channels = parallelism; + int vec_count; + int tx_per_ev; + int n_xdp_tx; + int n_xdp_ev; + + if (efx_siena_separate_tx_channels) + n_channels *= 2; + n_channels += extra_channels; + + /* To allow XDP transmit to happen from arbitrary NAPI contexts + * we allocate a TX queue per CPU. We share event queues across + * multiple tx queues, assuming tx and ev queues are both + * maximum size. + */ + tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx); + tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL); + n_xdp_tx = num_possible_cpus(); + n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev); + + vec_count = pci_msix_vec_count(efx->pci_dev); + if (vec_count < 0) + return vec_count; + + max_channels = min_t(unsigned int, vec_count, max_channels); + + /* Check resources. + * We need a channel per event queue, plus a VI per tx queue. + * This may be more pessimistic than it needs to be. + */ + if (n_channels >= max_channels) { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; + netif_warn(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", + n_xdp_ev, n_channels, max_channels); + netif_warn(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT might decrease device's performance\n"); + } else if (n_channels + n_xdp_tx > efx->max_vis) { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; + netif_warn(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", + n_xdp_tx, n_channels, efx->max_vis); + netif_warn(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT might decrease device's performance\n"); + } else if (n_channels + n_xdp_ev > max_channels) { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED; + netif_warn(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", + n_xdp_ev, n_channels, max_channels); + + n_xdp_ev = max_channels - n_channels; + netif_warn(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n", + DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev)); + } else { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED; + } + + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) { + efx->n_xdp_channels = n_xdp_ev; + efx->xdp_tx_per_channel = tx_per_ev; + efx->xdp_tx_queue_count = n_xdp_tx; + n_channels += n_xdp_ev; + netif_dbg(efx, drv, efx->net_dev, + "Allocating %d TX and %d event queues for XDP\n", + n_xdp_ev * tx_per_ev, n_xdp_ev); + } else { + efx->n_xdp_channels = 0; + efx->xdp_tx_per_channel = 0; + efx->xdp_tx_queue_count = n_xdp_tx; + } + + if (vec_count < n_channels) { + netif_err(efx, drv, efx->net_dev, + "WARNING: Insufficient MSI-X vectors available (%d < %u).\n", + vec_count, n_channels); + netif_err(efx, drv, efx->net_dev, + "WARNING: Performance may be reduced.\n"); + n_channels = vec_count; + } + + n_channels = min(n_channels, max_channels); + + efx->n_channels = n_channels; + + /* Ignore XDP tx channels when creating rx channels. */ + n_channels -= efx->n_xdp_channels; + + if (efx_siena_separate_tx_channels) { + efx->n_tx_channels = + min(max(n_channels / 2, 1U), + efx->max_tx_channels); + efx->tx_channel_offset = + n_channels - efx->n_tx_channels; + efx->n_rx_channels = + max(n_channels - + efx->n_tx_channels, 1U); + } else { + efx->n_tx_channels = min(n_channels, efx->max_tx_channels); + efx->tx_channel_offset = 0; + efx->n_rx_channels = n_channels; + } + + efx->n_rx_channels = min(efx->n_rx_channels, parallelism); + efx->n_tx_channels = min(efx->n_tx_channels, parallelism); + + efx->xdp_channel_offset = n_channels; + + netif_dbg(efx, drv, efx->net_dev, + "Allocating %u RX channels\n", + efx->n_rx_channels); + + return efx->n_channels; +} + +/* Probe the number and type of interrupts we are able to obtain, and + * the resulting numbers of channels and RX queues. + */ +int efx_siena_probe_interrupts(struct efx_nic *efx) +{ + unsigned int extra_channels = 0; + unsigned int rss_spread; + unsigned int i, j; + int rc; + + for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) + if (efx->extra_channel_type[i]) + ++extra_channels; + + if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { + unsigned int parallelism = efx_wanted_parallelism(efx); + struct msix_entry xentries[EFX_MAX_CHANNELS]; + unsigned int n_channels; + + rc = efx_allocate_msix_channels(efx, efx->max_channels, + extra_channels, parallelism); + if (rc >= 0) { + n_channels = rc; + for (i = 0; i < n_channels; i++) + xentries[i].entry = i; + rc = pci_enable_msix_range(efx->pci_dev, xentries, 1, + n_channels); + } + if (rc < 0) { + /* Fall back to single channel MSI */ + netif_err(efx, drv, efx->net_dev, + "could not enable MSI-X\n"); + if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI) + efx->interrupt_mode = EFX_INT_MODE_MSI; + else + return rc; + } else if (rc < n_channels) { + netif_err(efx, drv, efx->net_dev, + "WARNING: Insufficient MSI-X vectors" + " available (%d < %u).\n", rc, n_channels); + netif_err(efx, drv, efx->net_dev, + "WARNING: Performance may be reduced.\n"); + n_channels = rc; + } + + if (rc > 0) { + for (i = 0; i < efx->n_channels; i++) + efx_get_channel(efx, i)->irq = + xentries[i].vector; + } + } + + /* Try single interrupt MSI */ + if (efx->interrupt_mode == EFX_INT_MODE_MSI) { + efx->n_channels = 1; + efx->n_rx_channels = 1; + efx->n_tx_channels = 1; + efx->tx_channel_offset = 0; + efx->n_xdp_channels = 0; + efx->xdp_channel_offset = efx->n_channels; + rc = pci_enable_msi(efx->pci_dev); + if (rc == 0) { + efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; + } else { + netif_err(efx, drv, efx->net_dev, + "could not enable MSI\n"); + if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY) + efx->interrupt_mode = EFX_INT_MODE_LEGACY; + else + return rc; + } + } + + /* Assume legacy interrupts */ + if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { + efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0); + efx->n_rx_channels = 1; + efx->n_tx_channels = 1; + efx->tx_channel_offset = 1; + efx->n_xdp_channels = 0; + efx->xdp_channel_offset = efx->n_channels; + efx->legacy_irq = efx->pci_dev->irq; + } + + /* Assign extra channels if possible, before XDP channels */ + efx->n_extra_tx_channels = 0; + j = efx->xdp_channel_offset; + for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) { + if (!efx->extra_channel_type[i]) + continue; + if (j <= efx->tx_channel_offset + efx->n_tx_channels) { + efx->extra_channel_type[i]->handle_no_channel(efx); + } else { + --j; + efx_get_channel(efx, j)->type = + efx->extra_channel_type[i]; + if (efx_channel_has_tx_queues(efx_get_channel(efx, j))) + efx->n_extra_tx_channels++; + } + } + + rss_spread = efx->n_rx_channels; + /* RSS might be usable on VFs even if it is disabled on the PF */ +#ifdef CONFIG_SFC_SIENA_SRIOV + if (efx->type->sriov_wanted) { + efx->rss_spread = ((rss_spread > 1 || + !efx->type->sriov_wanted(efx)) ? + rss_spread : efx_vf_size(efx)); + return 0; + } +#endif + efx->rss_spread = rss_spread; + + return 0; +} + +#if defined(CONFIG_SMP) +void efx_siena_set_interrupt_affinity(struct efx_nic *efx) +{ + const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus); + struct efx_channel *channel; + unsigned int cpu; + + /* If no online CPUs in local node, fallback to any online CPU */ + if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids) + numa_mask = cpu_online_mask; + + cpu = -1; + efx_for_each_channel(channel, efx) { + cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask); + if (cpu >= nr_cpu_ids) + cpu = cpumask_first_and(cpu_online_mask, numa_mask); + irq_set_affinity_hint(channel->irq, cpumask_of(cpu)); + } +} + +void efx_siena_clear_interrupt_affinity(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + irq_set_affinity_hint(channel->irq, NULL); +} +#else +void +efx_siena_set_interrupt_affinity(struct efx_nic *efx __always_unused) +{ +} + +void +efx_siena_clear_interrupt_affinity(struct efx_nic *efx __always_unused) +{ +} +#endif /* CONFIG_SMP */ + +void efx_siena_remove_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel; + + /* Remove MSI/MSI-X interrupts */ + efx_for_each_channel(channel, efx) + channel->irq = 0; + pci_disable_msi(efx->pci_dev); + pci_disable_msix(efx->pci_dev); + + /* Remove legacy interrupt */ + efx->legacy_irq = 0; +} + +/*************** + * EVENT QUEUES + ***************/ + +/* Create event queue + * Event queue memory allocations are done only once. If the channel + * is reset, the memory buffer will be reused; this guards against + * errors during channel reset and also simplifies interrupt handling. + */ +static int efx_probe_eventq(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + unsigned long entries; + + netif_dbg(efx, probe, efx->net_dev, + "chan %d create event queue\n", channel->channel); + + /* Build an event queue with room for one event per tx and rx buffer, + * plus some extra for link state events and MCDI completions. + */ + entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); + EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); + channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; + + return efx_nic_probe_eventq(channel); +} + +/* Prepare channel's event queue */ +static int efx_init_eventq(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + int rc; + + EFX_WARN_ON_PARANOID(channel->eventq_init); + + netif_dbg(efx, drv, efx->net_dev, + "chan %d init event queue\n", channel->channel); + + rc = efx_nic_init_eventq(channel); + if (rc == 0) { + efx->type->push_irq_moderation(channel); + channel->eventq_read_ptr = 0; + channel->eventq_init = true; + } + return rc; +} + +/* Enable event queue processing and NAPI */ +void efx_siena_start_eventq(struct efx_channel *channel) +{ + netif_dbg(channel->efx, ifup, channel->efx->net_dev, + "chan %d start event queue\n", channel->channel); + + /* Make sure the NAPI handler sees the enabled flag set */ + channel->enabled = true; + smp_wmb(); + + napi_enable(&channel->napi_str); + efx_nic_eventq_read_ack(channel); +} + +/* Disable event queue processing and NAPI */ +void efx_siena_stop_eventq(struct efx_channel *channel) +{ + if (!channel->enabled) + return; + + napi_disable(&channel->napi_str); + channel->enabled = false; +} + +static void efx_fini_eventq(struct efx_channel *channel) +{ + if (!channel->eventq_init) + return; + + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "chan %d fini event queue\n", channel->channel); + + efx_nic_fini_eventq(channel); + channel->eventq_init = false; +} + +static void efx_remove_eventq(struct efx_channel *channel) +{ + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "chan %d remove event queue\n", channel->channel); + + efx_nic_remove_eventq(channel); +} + +/************************************************************************** + * + * Channel handling + * + *************************************************************************/ + +#ifdef CONFIG_RFS_ACCEL +static void efx_filter_rfs_expire(struct work_struct *data) +{ + struct delayed_work *dwork = to_delayed_work(data); + struct efx_channel *channel; + unsigned int time, quota; + + channel = container_of(dwork, struct efx_channel, filter_work); + time = jiffies - channel->rfs_last_expiry; + quota = channel->rfs_filter_count * time / (30 * HZ); + if (quota >= 20 && __efx_siena_filter_rfs_expire(channel, + min(channel->rfs_filter_count, quota))) + channel->rfs_last_expiry += time; + /* Ensure we do more work eventually even if NAPI poll is not happening */ + schedule_delayed_work(dwork, 30 * HZ); +} +#endif + +/* Allocate and initialise a channel structure. */ +static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i) +{ + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + int j; + + channel = kzalloc(sizeof(*channel), GFP_KERNEL); + if (!channel) + return NULL; + + channel->efx = efx; + channel->channel = i; + channel->type = &efx_default_channel_type; + + for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) { + tx_queue = &channel->tx_queue[j]; + tx_queue->efx = efx; + tx_queue->queue = -1; + tx_queue->label = j; + tx_queue->channel = channel; + } + +#ifdef CONFIG_RFS_ACCEL + INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); +#endif + + rx_queue = &channel->rx_queue; + rx_queue->efx = efx; + timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0); + + return channel; +} + +int efx_siena_init_channels(struct efx_nic *efx) +{ + unsigned int i; + + for (i = 0; i < EFX_MAX_CHANNELS; i++) { + efx->channel[i] = efx_alloc_channel(efx, i); + if (!efx->channel[i]) + return -ENOMEM; + efx->msi_context[i].efx = efx; + efx->msi_context[i].index = i; + } + + /* Higher numbered interrupt modes are less capable! */ + efx->interrupt_mode = min(efx->type->min_interrupt_mode, + efx_siena_interrupt_mode); + + efx->max_channels = EFX_MAX_CHANNELS; + efx->max_tx_channels = EFX_MAX_CHANNELS; + + return 0; +} + +void efx_siena_fini_channels(struct efx_nic *efx) +{ + unsigned int i; + + for (i = 0; i < EFX_MAX_CHANNELS; i++) + if (efx->channel[i]) { + kfree(efx->channel[i]); + efx->channel[i] = NULL; + } +} + +/* Allocate and initialise a channel structure, copying parameters + * (but not resources) from an old channel structure. + */ +static +struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel) +{ + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + int j; + + channel = kmalloc(sizeof(*channel), GFP_KERNEL); + if (!channel) + return NULL; + + *channel = *old_channel; + + channel->napi_dev = NULL; + INIT_HLIST_NODE(&channel->napi_str.napi_hash_node); + channel->napi_str.napi_id = 0; + channel->napi_str.state = 0; + memset(&channel->eventq, 0, sizeof(channel->eventq)); + + for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) { + tx_queue = &channel->tx_queue[j]; + if (tx_queue->channel) + tx_queue->channel = channel; + tx_queue->buffer = NULL; + tx_queue->cb_page = NULL; + memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); + } + + rx_queue = &channel->rx_queue; + rx_queue->buffer = NULL; + memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); + timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0); +#ifdef CONFIG_RFS_ACCEL + INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); +#endif + + return channel; +} + +static int efx_probe_channel(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int rc; + + netif_dbg(channel->efx, probe, channel->efx->net_dev, + "creating channel %d\n", channel->channel); + + rc = channel->type->pre_probe(channel); + if (rc) + goto fail; + + rc = efx_probe_eventq(channel); + if (rc) + goto fail; + + efx_for_each_channel_tx_queue(tx_queue, channel) { + rc = efx_siena_probe_tx_queue(tx_queue); + if (rc) + goto fail; + } + + efx_for_each_channel_rx_queue(rx_queue, channel) { + rc = efx_siena_probe_rx_queue(rx_queue); + if (rc) + goto fail; + } + + channel->rx_list = NULL; + + return 0; + +fail: + efx_siena_remove_channel(channel); + return rc; +} + +static void efx_get_channel_name(struct efx_channel *channel, char *buf, + size_t len) +{ + struct efx_nic *efx = channel->efx; + const char *type; + int number; + + number = channel->channel; + + if (number >= efx->xdp_channel_offset && + !WARN_ON_ONCE(!efx->n_xdp_channels)) { + type = "-xdp"; + number -= efx->xdp_channel_offset; + } else if (efx->tx_channel_offset == 0) { + type = ""; + } else if (number < efx->tx_channel_offset) { + type = "-rx"; + } else { + type = "-tx"; + number -= efx->tx_channel_offset; + } + snprintf(buf, len, "%s%s-%d", efx->name, type, number); +} + +void efx_siena_set_channel_names(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + channel->type->get_name(channel, + efx->msi_context[channel->channel].name, + sizeof(efx->msi_context[0].name)); +} + +int efx_siena_probe_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + int rc; + + /* Restart special buffer allocation */ + efx->next_buffer_table = 0; + + /* Probe channels in reverse, so that any 'extra' channels + * use the start of the buffer table. This allows the traffic + * channels to be resized without moving them or wasting the + * entries before them. + */ + efx_for_each_channel_rev(channel, efx) { + rc = efx_probe_channel(channel); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to create channel %d\n", + channel->channel); + goto fail; + } + } + efx_siena_set_channel_names(efx); + + return 0; + +fail: + efx_siena_remove_channels(efx); + return rc; +} + +void efx_siena_remove_channel(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "destroy chan %d\n", channel->channel); + + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_siena_remove_rx_queue(rx_queue); + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_siena_remove_tx_queue(tx_queue); + efx_remove_eventq(channel); + channel->type->post_remove(channel); +} + +void efx_siena_remove_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_siena_remove_channel(channel); + + kfree(efx->xdp_tx_queues); +} + +static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, + struct efx_tx_queue *tx_queue) +{ + if (xdp_queue_number >= efx->xdp_tx_queue_count) + return -EINVAL; + + netif_dbg(efx, drv, efx->net_dev, + "Channel %u TXQ %u is XDP %u, HW %u\n", + tx_queue->channel->channel, tx_queue->label, + xdp_queue_number, tx_queue->queue); + efx->xdp_tx_queues[xdp_queue_number] = tx_queue; + return 0; +} + +static void efx_set_xdp_channels(struct efx_nic *efx) +{ + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + unsigned int next_queue = 0; + int xdp_queue_number = 0; + int rc; + + /* We need to mark which channels really have RX and TX + * queues, and adjust the TX queue numbers if we have separate + * RX-only and TX-only channels. + */ + efx_for_each_channel(channel, efx) { + if (channel->channel < efx->tx_channel_offset) + continue; + + if (efx_channel_is_xdp_tx(channel)) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->queue = next_queue++; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, + tx_queue); + if (rc == 0) + xdp_queue_number++; + } + } else { + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->queue = next_queue++; + netif_dbg(efx, drv, efx->net_dev, + "Channel %u TXQ %u is HW %u\n", + channel->channel, tx_queue->label, + tx_queue->queue); + } + + /* If XDP is borrowing queues from net stack, it must + * use the queue with no csum offload, which is the + * first one of the channel + * (note: tx_queue_by_type is not initialized yet) + */ + if (efx->xdp_txq_queues_mode == + EFX_XDP_TX_QUEUES_BORROWED) { + tx_queue = &channel->tx_queue[0]; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, + tx_queue); + if (rc == 0) + xdp_queue_number++; + } + } + } + WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number != efx->xdp_tx_queue_count); + WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number > efx->xdp_tx_queue_count); + + /* If we have more CPUs than assigned XDP TX queues, assign the already + * existing queues to the exceeding CPUs + */ + next_queue = 0; + while (xdp_queue_number < efx->xdp_tx_queue_count) { + tx_queue = efx->xdp_tx_queues[next_queue++]; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); + if (rc == 0) + xdp_queue_number++; + } +} + +static int efx_soft_enable_interrupts(struct efx_nic *efx); +static void efx_soft_disable_interrupts(struct efx_nic *efx); +static void efx_init_napi_channel(struct efx_channel *channel); +static void efx_fini_napi_channel(struct efx_channel *channel); + +int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries, + u32 txq_entries) +{ + struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; + unsigned int i, next_buffer_table = 0; + u32 old_rxq_entries, old_txq_entries; + int rc, rc2; + + rc = efx_check_disabled(efx); + if (rc) + return rc; + + /* Not all channels should be reallocated. We must avoid + * reallocating their buffer table entries. + */ + efx_for_each_channel(channel, efx) { + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + + if (channel->type->copy) + continue; + next_buffer_table = max(next_buffer_table, + channel->eventq.index + + channel->eventq.entries); + efx_for_each_channel_rx_queue(rx_queue, channel) + next_buffer_table = max(next_buffer_table, + rx_queue->rxd.index + + rx_queue->rxd.entries); + efx_for_each_channel_tx_queue(tx_queue, channel) + next_buffer_table = max(next_buffer_table, + tx_queue->txd.index + + tx_queue->txd.entries); + } + + efx_device_detach_sync(efx); + efx_siena_stop_all(efx); + efx_soft_disable_interrupts(efx); + + /* Clone channels (where possible) */ + memset(other_channel, 0, sizeof(other_channel)); + for (i = 0; i < efx->n_channels; i++) { + channel = efx->channel[i]; + if (channel->type->copy) + channel = channel->type->copy(channel); + if (!channel) { + rc = -ENOMEM; + goto out; + } + other_channel[i] = channel; + } + + /* Swap entry counts and channel pointers */ + old_rxq_entries = efx->rxq_entries; + old_txq_entries = efx->txq_entries; + efx->rxq_entries = rxq_entries; + efx->txq_entries = txq_entries; + for (i = 0; i < efx->n_channels; i++) + swap(efx->channel[i], other_channel[i]); + + /* Restart buffer table allocation */ + efx->next_buffer_table = next_buffer_table; + + for (i = 0; i < efx->n_channels; i++) { + channel = efx->channel[i]; + if (!channel->type->copy) + continue; + rc = efx_probe_channel(channel); + if (rc) + goto rollback; + efx_init_napi_channel(efx->channel[i]); + } + + efx_set_xdp_channels(efx); +out: + /* Destroy unused channel structures */ + for (i = 0; i < efx->n_channels; i++) { + channel = other_channel[i]; + if (channel && channel->type->copy) { + efx_fini_napi_channel(channel); + efx_siena_remove_channel(channel); + kfree(channel); + } + } + + rc2 = efx_soft_enable_interrupts(efx); + if (rc2) { + rc = rc ? rc : rc2; + netif_err(efx, drv, efx->net_dev, + "unable to restart interrupts on channel reallocation\n"); + efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); + } else { + efx_siena_start_all(efx); + efx_device_attach_if_not_resetting(efx); + } + return rc; + +rollback: + /* Swap back */ + efx->rxq_entries = old_rxq_entries; + efx->txq_entries = old_txq_entries; + for (i = 0; i < efx->n_channels; i++) + swap(efx->channel[i], other_channel[i]); + goto out; +} + +int efx_siena_set_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + int rc; + + if (efx->xdp_tx_queue_count) { + EFX_WARN_ON_PARANOID(efx->xdp_tx_queues); + + /* Allocate array for XDP TX queue lookup. */ + efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count, + sizeof(*efx->xdp_tx_queues), + GFP_KERNEL); + if (!efx->xdp_tx_queues) + return -ENOMEM; + } + + efx_for_each_channel(channel, efx) { + if (channel->channel < efx->n_rx_channels) + channel->rx_queue.core_index = channel->channel; + else + channel->rx_queue.core_index = -1; + } + + efx_set_xdp_channels(efx); + + rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); + if (rc) + return rc; + return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); +} + +static bool efx_default_channel_want_txqs(struct efx_channel *channel) +{ + return channel->channel - channel->efx->tx_channel_offset < + channel->efx->n_tx_channels; +} + +/************* + * START/STOP + *************/ + +static int efx_soft_enable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel, *end_channel; + int rc; + + BUG_ON(efx->state == STATE_DISABLED); + + efx->irq_soft_enabled = true; + smp_wmb(); + + efx_for_each_channel(channel, efx) { + if (!channel->type->keep_eventq) { + rc = efx_init_eventq(channel); + if (rc) + goto fail; + } + efx_siena_start_eventq(channel); + } + + efx_siena_mcdi_mode_event(efx); + + return 0; +fail: + end_channel = channel; + efx_for_each_channel(channel, efx) { + if (channel == end_channel) + break; + efx_siena_stop_eventq(channel); + if (!channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + return rc; +} + +static void efx_soft_disable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel; + + if (efx->state == STATE_DISABLED) + return; + + efx_siena_mcdi_mode_poll(efx); + + efx->irq_soft_enabled = false; + smp_wmb(); + + if (efx->legacy_irq) + synchronize_irq(efx->legacy_irq); + + efx_for_each_channel(channel, efx) { + if (channel->irq) + synchronize_irq(channel->irq); + + efx_siena_stop_eventq(channel); + if (!channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + /* Flush the asynchronous MCDI request queue */ + efx_siena_mcdi_flush_async(efx); +} + +int efx_siena_enable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel, *end_channel; + int rc; + + /* TODO: Is this really a bug? */ + BUG_ON(efx->state == STATE_DISABLED); + + if (efx->eeh_disabled_legacy_irq) { + enable_irq(efx->legacy_irq); + efx->eeh_disabled_legacy_irq = false; + } + + efx->type->irq_enable_master(efx); + + efx_for_each_channel(channel, efx) { + if (channel->type->keep_eventq) { + rc = efx_init_eventq(channel); + if (rc) + goto fail; + } + } + + rc = efx_soft_enable_interrupts(efx); + if (rc) + goto fail; + + return 0; + +fail: + end_channel = channel; + efx_for_each_channel(channel, efx) { + if (channel == end_channel) + break; + if (channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + efx->type->irq_disable_non_ev(efx); + + return rc; +} + +void efx_siena_disable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_soft_disable_interrupts(efx); + + efx_for_each_channel(channel, efx) { + if (channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + efx->type->irq_disable_non_ev(efx); +} + +void efx_siena_start_channels(struct efx_nic *efx) +{ + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + struct efx_channel *channel; + + efx_for_each_channel_rev(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_siena_init_tx_queue(tx_queue); + atomic_inc(&efx->active_queues); + } + + efx_for_each_channel_rx_queue(rx_queue, channel) { + efx_siena_init_rx_queue(rx_queue); + atomic_inc(&efx->active_queues); + efx_siena_stop_eventq(channel); + efx_siena_fast_push_rx_descriptors(rx_queue, false); + efx_siena_start_eventq(channel); + } + + WARN_ON(channel->rx_pkt_n_frags); + } +} + +void efx_siena_stop_channels(struct efx_nic *efx) +{ + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + struct efx_channel *channel; + int rc = 0; + + /* Stop RX refill */ + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + rx_queue->refill_enabled = false; + } + + efx_for_each_channel(channel, efx) { + /* RX packet processing is pipelined, so wait for the + * NAPI handler to complete. At least event queue 0 + * might be kept active by non-data events, so don't + * use napi_synchronize() but actually disable NAPI + * temporarily. + */ + if (efx_channel_has_rx_queue(channel)) { + efx_siena_stop_eventq(channel); + efx_siena_start_eventq(channel); + } + } + + if (efx->type->fini_dmaq) + rc = efx->type->fini_dmaq(efx); + + if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); + } else { + netif_dbg(efx, drv, efx->net_dev, + "successfully flushed all queues\n"); + } + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_siena_fini_rx_queue(rx_queue); + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_siena_fini_tx_queue(tx_queue); + } +} + +/************************************************************************** + * + * NAPI interface + * + *************************************************************************/ + +/* Process channel's event queue + * + * This function is responsible for processing the event queue of a + * single channel. The caller must guarantee that this function will + * never be concurrently called more than once on the same channel, + * though different channels may be being processed concurrently. + */ +static int efx_process_channel(struct efx_channel *channel, int budget) +{ + struct efx_tx_queue *tx_queue; + struct list_head rx_list; + int spent; + + if (unlikely(!channel->enabled)) + return 0; + + /* Prepare the batch receive list */ + EFX_WARN_ON_PARANOID(channel->rx_list != NULL); + INIT_LIST_HEAD(&rx_list); + channel->rx_list = &rx_list; + + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->pkts_compl = 0; + tx_queue->bytes_compl = 0; + } + + spent = efx_nic_process_eventq(channel, budget); + if (spent && efx_channel_has_rx_queue(channel)) { + struct efx_rx_queue *rx_queue = + efx_channel_get_rx_queue(channel); + + efx_rx_flush_packet(channel); + efx_siena_fast_push_rx_descriptors(rx_queue, true); + } + + /* Update BQL */ + efx_for_each_channel_tx_queue(tx_queue, channel) { + if (tx_queue->bytes_compl) { + netdev_tx_completed_queue(tx_queue->core_txq, + tx_queue->pkts_compl, + tx_queue->bytes_compl); + } + } + + /* Receive any packets we queued up */ + netif_receive_skb_list(channel->rx_list); + channel->rx_list = NULL; + + return spent; +} + +static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel) +{ + int step = efx->irq_mod_step_us; + + if (channel->irq_mod_score < irq_adapt_low_thresh) { + if (channel->irq_moderation_us > step) { + channel->irq_moderation_us -= step; + efx->type->push_irq_moderation(channel); + } + } else if (channel->irq_mod_score > irq_adapt_high_thresh) { + if (channel->irq_moderation_us < + efx->irq_rx_moderation_us) { + channel->irq_moderation_us += step; + efx->type->push_irq_moderation(channel); + } + } + + channel->irq_count = 0; + channel->irq_mod_score = 0; +} + +/* NAPI poll handler + * + * NAPI guarantees serialisation of polls of the same device, which + * provides the guarantee required by efx_process_channel(). + */ +static int efx_poll(struct napi_struct *napi, int budget) +{ + struct efx_channel *channel = + container_of(napi, struct efx_channel, napi_str); + struct efx_nic *efx = channel->efx; +#ifdef CONFIG_RFS_ACCEL + unsigned int time; +#endif + int spent; + + netif_vdbg(efx, intr, efx->net_dev, + "channel %d NAPI poll executing on CPU %d\n", + channel->channel, raw_smp_processor_id()); + + spent = efx_process_channel(channel, budget); + + xdp_do_flush_map(); + + if (spent < budget) { + if (efx_channel_has_rx_queue(channel) && + efx->irq_rx_adaptive && + unlikely(++channel->irq_count == 1000)) { + efx_update_irq_mod(efx, channel); + } + +#ifdef CONFIG_RFS_ACCEL + /* Perhaps expire some ARFS filters */ + time = jiffies - channel->rfs_last_expiry; + /* Would our quota be >= 20? */ + if (channel->rfs_filter_count * time >= 600 * HZ) + mod_delayed_work(system_wq, &channel->filter_work, 0); +#endif + + /* There is no race here; although napi_disable() will + * only wait for napi_complete(), this isn't a problem + * since efx_nic_eventq_read_ack() will have no effect if + * interrupts have already been disabled. + */ + if (napi_complete_done(napi, spent)) + efx_nic_eventq_read_ack(channel); + } + + return spent; +} + +static void efx_init_napi_channel(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + + channel->napi_dev = efx->net_dev; + netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, 64); +} + +void efx_siena_init_napi(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_init_napi_channel(channel); +} + +static void efx_fini_napi_channel(struct efx_channel *channel) +{ + if (channel->napi_dev) + netif_napi_del(&channel->napi_str); + + channel->napi_dev = NULL; +} + +void efx_siena_fini_napi(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_fini_napi_channel(channel); +} + +/*************** + * Housekeeping + ***************/ + +static int efx_channel_dummy_op_int(struct efx_channel *channel) +{ + return 0; +} + +void efx_siena_channel_dummy_op_void(struct efx_channel *channel) +{ +} + +static const struct efx_channel_type efx_default_channel_type = { + .pre_probe = efx_channel_dummy_op_int, + .post_remove = efx_siena_channel_dummy_op_void, + .get_name = efx_get_channel_name, + .copy = efx_copy_channel, + .want_txqs = efx_default_channel_want_txqs, + .keep_eventq = false, + .want_pio = true, +}; diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.h b/drivers/net/ethernet/sfc/siena/efx_channels.h new file mode 100644 index 0000000000..c4b95a2d77 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/efx_channels.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_SIENA_CHANNELS_H +#define EFX_SIENA_CHANNELS_H + +extern unsigned int efx_siena_interrupt_mode; +extern unsigned int efx_siena_rss_cpus; + +int efx_siena_probe_interrupts(struct efx_nic *efx); +void efx_siena_remove_interrupts(struct efx_nic *efx); +int efx_siena_enable_interrupts(struct efx_nic *efx); +void efx_siena_disable_interrupts(struct efx_nic *efx); + +void efx_siena_set_interrupt_affinity(struct efx_nic *efx); +void efx_siena_clear_interrupt_affinity(struct efx_nic *efx); + +void efx_siena_start_eventq(struct efx_channel *channel); +void efx_siena_stop_eventq(struct efx_channel *channel); + +int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries, + u32 txq_entries); +void efx_siena_set_channel_names(struct efx_nic *efx); +int efx_siena_init_channels(struct efx_nic *efx); +int efx_siena_probe_channels(struct efx_nic *efx); +int efx_siena_set_channels(struct efx_nic *efx); +void efx_siena_remove_channel(struct efx_channel *channel); +void efx_siena_remove_channels(struct efx_nic *efx); +void efx_siena_fini_channels(struct efx_nic *efx); +void efx_siena_start_channels(struct efx_nic *efx); +void efx_siena_stop_channels(struct efx_nic *efx); + +void efx_siena_init_napi(struct efx_nic *efx); +void efx_siena_fini_napi(struct efx_nic *efx); + +void efx_siena_channel_dummy_op_void(struct efx_channel *channel); + +#endif diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c new file mode 100644 index 0000000000..954daf464a --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/efx_common.c @@ -0,0 +1,1408 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include +#include +#include +#include +#include "efx_common.h" +#include "efx_channels.h" +#include "efx.h" +#include "mcdi.h" +#include "selftest.h" +#include "rx_common.h" +#include "tx_common.h" +#include "nic.h" +#include "mcdi_port_common.h" +#include "io.h" +#include "mcdi_pcol.h" + +static unsigned int debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR | NETIF_MSG_HW); +module_param(debug, uint, 0); +MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); + +/* This is the time (in jiffies) between invocations of the hardware + * monitor. + * On Falcon-based NICs, this will: + * - Check the on-board hardware monitor; + * - Poll the link state and reconfigure the hardware as necessary. + * On Siena-based NICs for power systems with EEH support, this will give EEH a + * chance to start. + */ +static unsigned int efx_monitor_interval = 1 * HZ; + +/* How often and how many times to poll for a reset while waiting for a + * BIST that another function started to complete. + */ +#define BIST_WAIT_DELAY_MS 100 +#define BIST_WAIT_DELAY_COUNT 100 + +/* Default stats update time */ +#define STATS_PERIOD_MS_DEFAULT 1000 + +static const unsigned int efx_reset_type_max = RESET_TYPE_MAX; +static const char *const efx_reset_type_names[] = { + [RESET_TYPE_INVISIBLE] = "INVISIBLE", + [RESET_TYPE_ALL] = "ALL", + [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", + [RESET_TYPE_WORLD] = "WORLD", + [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", + [RESET_TYPE_DATAPATH] = "DATAPATH", + [RESET_TYPE_MC_BIST] = "MC_BIST", + [RESET_TYPE_DISABLE] = "DISABLE", + [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", + [RESET_TYPE_INT_ERROR] = "INT_ERROR", + [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", + [RESET_TYPE_TX_SKIP] = "TX_SKIP", + [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", + [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)", +}; + +#define RESET_TYPE(type) \ + STRING_TABLE_LOOKUP(type, efx_reset_type) + +/* Loopback mode names (see LOOPBACK_MODE()) */ +const unsigned int efx_siena_loopback_mode_max = LOOPBACK_MAX; +const char *const efx_siena_loopback_mode_names[] = { + [LOOPBACK_NONE] = "NONE", + [LOOPBACK_DATA] = "DATAPATH", + [LOOPBACK_GMAC] = "GMAC", + [LOOPBACK_XGMII] = "XGMII", + [LOOPBACK_XGXS] = "XGXS", + [LOOPBACK_XAUI] = "XAUI", + [LOOPBACK_GMII] = "GMII", + [LOOPBACK_SGMII] = "SGMII", + [LOOPBACK_XGBR] = "XGBR", + [LOOPBACK_XFI] = "XFI", + [LOOPBACK_XAUI_FAR] = "XAUI_FAR", + [LOOPBACK_GMII_FAR] = "GMII_FAR", + [LOOPBACK_SGMII_FAR] = "SGMII_FAR", + [LOOPBACK_XFI_FAR] = "XFI_FAR", + [LOOPBACK_GPHY] = "GPHY", + [LOOPBACK_PHYXS] = "PHYXS", + [LOOPBACK_PCS] = "PCS", + [LOOPBACK_PMAPMD] = "PMA/PMD", + [LOOPBACK_XPORT] = "XPORT", + [LOOPBACK_XGMII_WS] = "XGMII_WS", + [LOOPBACK_XAUI_WS] = "XAUI_WS", + [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", + [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", + [LOOPBACK_GMII_WS] = "GMII_WS", + [LOOPBACK_XFI_WS] = "XFI_WS", + [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", + [LOOPBACK_PHYXS_WS] = "PHYXS_WS", +}; + +/* Reset workqueue. If any NIC has a hardware failure then a reset will be + * queued onto this work queue. This is not a per-nic work queue, because + * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. + */ +static struct workqueue_struct *reset_workqueue; + +int efx_siena_create_reset_workqueue(void) +{ + reset_workqueue = create_singlethread_workqueue("sfc_siena_reset"); + if (!reset_workqueue) { + printk(KERN_ERR "Failed to create reset workqueue\n"); + return -ENOMEM; + } + + return 0; +} + +void efx_siena_queue_reset_work(struct efx_nic *efx) +{ + queue_work(reset_workqueue, &efx->reset_work); +} + +void efx_siena_flush_reset_workqueue(struct efx_nic *efx) +{ + cancel_work_sync(&efx->reset_work); +} + +void efx_siena_destroy_reset_workqueue(void) +{ + if (reset_workqueue) { + destroy_workqueue(reset_workqueue); + reset_workqueue = NULL; + } +} + +/* We assume that efx->type->reconfigure_mac will always try to sync RX + * filters and therefore needs to read-lock the filter table against freeing + */ +void efx_siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only) +{ + if (efx->type->reconfigure_mac) { + down_read(&efx->filter_sem); + efx->type->reconfigure_mac(efx, mtu_only); + up_read(&efx->filter_sem); + } +} + +/* Asynchronous work item for changing MAC promiscuity and multicast + * hash. Avoid a drain/rx_ingress enable by reconfiguring the current + * MAC directly. + */ +static void efx_mac_work(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); + + mutex_lock(&efx->mac_lock); + if (efx->port_enabled) + efx_siena_mac_reconfigure(efx, false); + mutex_unlock(&efx->mac_lock); +} + +int efx_siena_set_mac_address(struct net_device *net_dev, void *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct sockaddr *addr = data; + u8 *new_addr = addr->sa_data; + u8 old_addr[6]; + int rc; + + if (!is_valid_ether_addr(new_addr)) { + netif_err(efx, drv, efx->net_dev, + "invalid ethernet MAC address requested: %pM\n", + new_addr); + return -EADDRNOTAVAIL; + } + + /* save old address */ + ether_addr_copy(old_addr, net_dev->dev_addr); + eth_hw_addr_set(net_dev, new_addr); + if (efx->type->set_mac_address) { + rc = efx->type->set_mac_address(efx); + if (rc) { + eth_hw_addr_set(net_dev, old_addr); + return rc; + } + } + + /* Reconfigure the MAC */ + mutex_lock(&efx->mac_lock); + efx_siena_mac_reconfigure(efx, false); + mutex_unlock(&efx->mac_lock); + + return 0; +} + +/* Context: netif_addr_lock held, BHs disabled. */ +void efx_siena_set_rx_mode(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->port_enabled) + queue_work(efx->workqueue, &efx->mac_work); + /* Otherwise efx_start_port() will do this */ +} + +int efx_siena_set_features(struct net_device *net_dev, netdev_features_t data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + /* If disabling RX n-tuple filtering, clear existing filters */ + if (net_dev->features & ~data & NETIF_F_NTUPLE) { + rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); + if (rc) + return rc; + } + + /* If Rx VLAN filter is changed, update filters via mac_reconfigure. + * If rx-fcs is changed, mac_reconfigure updates that too. + */ + if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXFCS)) { + /* efx_siena_set_rx_mode() will schedule MAC work to update filters + * when a new features are finally set in net_dev. + */ + efx_siena_set_rx_mode(net_dev); + } + + return 0; +} + +/* This ensures that the kernel is kept informed (via + * netif_carrier_on/off) of the link status, and also maintains the + * link status's stop on the port's TX queue. + */ +void efx_siena_link_status_changed(struct efx_nic *efx) +{ + struct efx_link_state *link_state = &efx->link_state; + + /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure + * that no events are triggered between unregister_netdev() and the + * driver unloading. A more general condition is that NETDEV_CHANGE + * can only be generated between NETDEV_UP and NETDEV_DOWN + */ + if (!netif_running(efx->net_dev)) + return; + + if (link_state->up != netif_carrier_ok(efx->net_dev)) { + efx->n_link_state_changes++; + + if (link_state->up) + netif_carrier_on(efx->net_dev); + else + netif_carrier_off(efx->net_dev); + } + + /* Status message for kernel log */ + if (link_state->up) + netif_info(efx, link, efx->net_dev, + "link up at %uMbps %s-duplex (MTU %d)\n", + link_state->speed, link_state->fd ? "full" : "half", + efx->net_dev->mtu); + else + netif_info(efx, link, efx->net_dev, "link down\n"); +} + +unsigned int efx_siena_xdp_max_mtu(struct efx_nic *efx) +{ + /* The maximum MTU that we can fit in a single page, allowing for + * framing, overhead and XDP headroom + tailroom. + */ + int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) + + efx->rx_prefix_size + efx->type->rx_buffer_padding + + efx->rx_ip_align + EFX_XDP_HEADROOM + EFX_XDP_TAILROOM; + + return PAGE_SIZE - overhead; +} + +/* Context: process, rtnl_lock() held. */ +int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + rc = efx_check_disabled(efx); + if (rc) + return rc; + + if (rtnl_dereference(efx->xdp_prog) && + new_mtu > efx_siena_xdp_max_mtu(efx)) { + netif_err(efx, drv, efx->net_dev, + "Requested MTU of %d too big for XDP (max: %d)\n", + new_mtu, efx_siena_xdp_max_mtu(efx)); + return -EINVAL; + } + + netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); + + efx_device_detach_sync(efx); + efx_siena_stop_all(efx); + + mutex_lock(&efx->mac_lock); + net_dev->mtu = new_mtu; + efx_siena_mac_reconfigure(efx, true); + mutex_unlock(&efx->mac_lock); + + efx_siena_start_all(efx); + efx_device_attach_if_not_resetting(efx); + return 0; +} + +/************************************************************************** + * + * Hardware monitor + * + **************************************************************************/ + +/* Run periodically off the general workqueue */ +static void efx_monitor(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, + monitor_work.work); + + netif_vdbg(efx, timer, efx->net_dev, + "hardware monitor executing on CPU %d\n", + raw_smp_processor_id()); + BUG_ON(efx->type->monitor == NULL); + + /* If the mac_lock is already held then it is likely a port + * reconfiguration is already in place, which will likely do + * most of the work of monitor() anyway. + */ + if (mutex_trylock(&efx->mac_lock)) { + if (efx->port_enabled && efx->type->monitor) + efx->type->monitor(efx); + mutex_unlock(&efx->mac_lock); + } + + efx_siena_start_monitor(efx); +} + +void efx_siena_start_monitor(struct efx_nic *efx) +{ + if (efx->type->monitor) + queue_delayed_work(efx->workqueue, &efx->monitor_work, + efx_monitor_interval); +} + +/************************************************************************** + * + * Event queue processing + * + *************************************************************************/ + +/* Channels are shutdown and reinitialised whilst the NIC is running + * to propagate configuration changes (mtu, checksum offload), or + * to clear hardware error conditions + */ +static void efx_start_datapath(struct efx_nic *efx) +{ + netdev_features_t old_features = efx->net_dev->features; + bool old_rx_scatter = efx->rx_scatter; + size_t rx_buf_len; + + /* Calculate the rx buffer allocation parameters required to + * support the current MTU, including padding for header + * alignment and overruns. + */ + efx->rx_dma_len = (efx->rx_prefix_size + + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + + efx->type->rx_buffer_padding); + rx_buf_len = (sizeof(struct efx_rx_page_state) + EFX_XDP_HEADROOM + + efx->rx_ip_align + efx->rx_dma_len + EFX_XDP_TAILROOM); + + if (rx_buf_len <= PAGE_SIZE) { + efx->rx_scatter = efx->type->always_rx_scatter; + efx->rx_buffer_order = 0; + } else if (efx->type->can_rx_scatter) { + BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); + BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + + 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE, + EFX_RX_BUF_ALIGNMENT) > + PAGE_SIZE); + efx->rx_scatter = true; + efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; + efx->rx_buffer_order = 0; + } else { + efx->rx_scatter = false; + efx->rx_buffer_order = get_order(rx_buf_len); + } + + efx_siena_rx_config_page_split(efx); + if (efx->rx_buffer_order) + netif_dbg(efx, drv, efx->net_dev, + "RX buf len=%u; page order=%u batch=%u\n", + efx->rx_dma_len, efx->rx_buffer_order, + efx->rx_pages_per_batch); + else + netif_dbg(efx, drv, efx->net_dev, + "RX buf len=%u step=%u bpp=%u; page batch=%u\n", + efx->rx_dma_len, efx->rx_page_buf_step, + efx->rx_bufs_per_page, efx->rx_pages_per_batch); + + /* Restore previously fixed features in hw_features and remove + * features which are fixed now + */ + efx->net_dev->hw_features |= efx->net_dev->features; + efx->net_dev->hw_features &= ~efx->fixed_features; + efx->net_dev->features |= efx->fixed_features; + if (efx->net_dev->features != old_features) + netdev_features_change(efx->net_dev); + + /* RX filters may also have scatter-enabled flags */ + if ((efx->rx_scatter != old_rx_scatter) && + efx->type->filter_update_rx_scatter) + efx->type->filter_update_rx_scatter(efx); + + /* We must keep at least one descriptor in a TX ring empty. + * We could avoid this when the queue size does not exactly + * match the hardware ring size, but it's not that important. + * Therefore we stop the queue when one more skb might fill + * the ring completely. We wake it when half way back to + * empty. + */ + efx->txq_stop_thresh = efx->txq_entries - efx_siena_tx_max_skb_descs(efx); + efx->txq_wake_thresh = efx->txq_stop_thresh / 2; + + /* Initialise the channels */ + efx_siena_start_channels(efx); + + efx_siena_ptp_start_datapath(efx); + + if (netif_device_present(efx->net_dev)) + netif_tx_wake_all_queues(efx->net_dev); +} + +static void efx_stop_datapath(struct efx_nic *efx) +{ + EFX_ASSERT_RESET_SERIALISED(efx); + BUG_ON(efx->port_enabled); + + efx_siena_ptp_stop_datapath(efx); + + efx_siena_stop_channels(efx); +} + +/************************************************************************** + * + * Port handling + * + **************************************************************************/ + +/* Equivalent to efx_siena_link_set_advertising with all-zeroes, except does not + * force the Autoneg bit on. + */ +void efx_siena_link_clear_advertising(struct efx_nic *efx) +{ + bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); + efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); +} + +void efx_siena_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) +{ + efx->wanted_fc = wanted_fc; + if (efx->link_advertising[0]) { + if (wanted_fc & EFX_FC_RX) + efx->link_advertising[0] |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + else + efx->link_advertising[0] &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + if (wanted_fc & EFX_FC_TX) + efx->link_advertising[0] ^= ADVERTISED_Asym_Pause; + } +} + +static void efx_start_port(struct efx_nic *efx) +{ + netif_dbg(efx, ifup, efx->net_dev, "start port\n"); + BUG_ON(efx->port_enabled); + + mutex_lock(&efx->mac_lock); + efx->port_enabled = true; + + /* Ensure MAC ingress/egress is enabled */ + efx_siena_mac_reconfigure(efx, false); + + mutex_unlock(&efx->mac_lock); +} + +/* Cancel work for MAC reconfiguration, periodic hardware monitoring + * and the async self-test, wait for them to finish and prevent them + * being scheduled again. This doesn't cover online resets, which + * should only be cancelled when removing the device. + */ +static void efx_stop_port(struct efx_nic *efx) +{ + netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); + + EFX_ASSERT_RESET_SERIALISED(efx); + + mutex_lock(&efx->mac_lock); + efx->port_enabled = false; + mutex_unlock(&efx->mac_lock); + + /* Serialise against efx_set_multicast_list() */ + netif_addr_lock_bh(efx->net_dev); + netif_addr_unlock_bh(efx->net_dev); + + cancel_delayed_work_sync(&efx->monitor_work); + efx_siena_selftest_async_cancel(efx); + cancel_work_sync(&efx->mac_work); +} + +/* If the interface is supposed to be running but is not, start + * the hardware and software data path, regular activity for the port + * (MAC statistics, link polling, etc.) and schedule the port to be + * reconfigured. Interrupts must already be enabled. This function + * is safe to call multiple times, so long as the NIC is not disabled. + * Requires the RTNL lock. + */ +void efx_siena_start_all(struct efx_nic *efx) +{ + EFX_ASSERT_RESET_SERIALISED(efx); + BUG_ON(efx->state == STATE_DISABLED); + + /* Check that it is appropriate to restart the interface. All + * of these flags are safe to read under just the rtnl lock + */ + if (efx->port_enabled || !netif_running(efx->net_dev) || + efx->reset_pending) + return; + + efx_start_port(efx); + efx_start_datapath(efx); + + /* Start the hardware monitor if there is one */ + efx_siena_start_monitor(efx); + + /* Link state detection is normally event-driven; we have + * to poll now because we could have missed a change + */ + mutex_lock(&efx->mac_lock); + if (efx_siena_mcdi_phy_poll(efx)) + efx_siena_link_status_changed(efx); + mutex_unlock(&efx->mac_lock); + + if (efx->type->start_stats) { + efx->type->start_stats(efx); + efx->type->pull_stats(efx); + spin_lock_bh(&efx->stats_lock); + efx->type->update_stats(efx, NULL, NULL); + spin_unlock_bh(&efx->stats_lock); + } +} + +/* Quiesce the hardware and software data path, and regular activity + * for the port without bringing the link down. Safe to call multiple + * times with the NIC in almost any state, but interrupts should be + * enabled. Requires the RTNL lock. + */ +void efx_siena_stop_all(struct efx_nic *efx) +{ + EFX_ASSERT_RESET_SERIALISED(efx); + + /* port_enabled can be read safely under the rtnl lock */ + if (!efx->port_enabled) + return; + + if (efx->type->update_stats) { + /* update stats before we go down so we can accurately count + * rx_nodesc_drops + */ + efx->type->pull_stats(efx); + spin_lock_bh(&efx->stats_lock); + efx->type->update_stats(efx, NULL, NULL); + spin_unlock_bh(&efx->stats_lock); + efx->type->stop_stats(efx); + } + + efx_stop_port(efx); + + /* Stop the kernel transmit interface. This is only valid if + * the device is stopped or detached; otherwise the watchdog + * may fire immediately. + */ + WARN_ON(netif_running(efx->net_dev) && + netif_device_present(efx->net_dev)); + netif_tx_disable(efx->net_dev); + + efx_stop_datapath(efx); +} + +static size_t efx_siena_update_stats_atomic(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + if (efx->type->update_stats_atomic) + return efx->type->update_stats_atomic(efx, full_stats, core_stats); + return efx->type->update_stats(efx, full_stats, core_stats); +} + +/* Context: process, dev_base_lock or RTNL held, non-blocking. */ +void efx_siena_net_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + spin_lock_bh(&efx->stats_lock); + efx_siena_update_stats_atomic(efx, NULL, stats); + spin_unlock_bh(&efx->stats_lock); +} + +/* Push loopback/power/transmit disable settings to the PHY, and reconfigure + * the MAC appropriately. All other PHY configuration changes are pushed + * through phy_op->set_settings(), and pushed asynchronously to the MAC + * through efx_monitor(). + * + * Callers must hold the mac_lock + */ +int __efx_siena_reconfigure_port(struct efx_nic *efx) +{ + enum efx_phy_mode phy_mode; + int rc = 0; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + /* Disable PHY transmit in mac level loopbacks */ + phy_mode = efx->phy_mode; + if (LOOPBACK_INTERNAL(efx)) + efx->phy_mode |= PHY_MODE_TX_DISABLED; + else + efx->phy_mode &= ~PHY_MODE_TX_DISABLED; + + if (efx->type->reconfigure_port) + rc = efx->type->reconfigure_port(efx); + + if (rc) + efx->phy_mode = phy_mode; + + return rc; +} + +/* Reinitialise the MAC to pick up new PHY settings, even if the port is + * disabled. + */ +int efx_siena_reconfigure_port(struct efx_nic *efx) +{ + int rc; + + EFX_ASSERT_RESET_SERIALISED(efx); + + mutex_lock(&efx->mac_lock); + rc = __efx_siena_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + + return rc; +} + +/************************************************************************** + * + * Device reset and suspend + * + **************************************************************************/ + +static void efx_wait_for_bist_end(struct efx_nic *efx) +{ + int i; + + for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) { + if (efx_siena_mcdi_poll_reboot(efx)) + goto out; + msleep(BIST_WAIT_DELAY_MS); + } + + netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n"); +out: + /* Either way unset the BIST flag. If we found no reboot we probably + * won't recover, but we should try. + */ + efx->mc_bist_for_other_fn = false; +} + +/* Try recovery mechanisms. + * For now only EEH is supported. + * Returns 0 if the recovery mechanisms are unsuccessful. + * Returns a non-zero value otherwise. + */ +int efx_siena_try_recovery(struct efx_nic *efx) +{ +#ifdef CONFIG_EEH + /* A PCI error can occur and not be seen by EEH because nothing + * happens on the PCI bus. In this case the driver may fail and + * schedule a 'recover or reset', leading to this recovery handler. + * Manually call the eeh failure check function. + */ + struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); + if (eeh_dev_check_failure(eehdev)) { + /* The EEH mechanisms will handle the error and reset the + * device if necessary. + */ + return 1; + } +#endif + return 0; +} + +/* Tears down the entire software state and most of the hardware state + * before reset. + */ +void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method) +{ + EFX_ASSERT_RESET_SERIALISED(efx); + + if (method == RESET_TYPE_MCDI_TIMEOUT) + efx->type->prepare_flr(efx); + + efx_siena_stop_all(efx); + efx_siena_disable_interrupts(efx); + + mutex_lock(&efx->mac_lock); + down_write(&efx->filter_sem); + mutex_lock(&efx->rss_lock); + efx->type->fini(efx); +} + +/* Context: netif_tx_lock held, BHs disabled. */ +void efx_siena_watchdog(struct net_device *net_dev, unsigned int txqueue) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + netif_err(efx, tx_err, efx->net_dev, + "TX stuck with port_enabled=%d: resetting channels\n", + efx->port_enabled); + + efx_siena_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); +} + +/* This function will always ensure that the locks acquired in + * efx_siena_reset_down() are released. A failure return code indicates + * that we were unable to reinitialise the hardware, and the + * driver should be disabled. If ok is false, then the rx and tx + * engines are not restarted, pending a RESET_DISABLE. + */ +int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) +{ + int rc; + + EFX_ASSERT_RESET_SERIALISED(efx); + + if (method == RESET_TYPE_MCDI_TIMEOUT) + efx->type->finish_flr(efx); + + /* Ensure that SRAM is initialised even if we're disabling the device */ + rc = efx->type->init(efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); + goto fail; + } + + if (!ok) + goto fail; + + if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && + method != RESET_TYPE_DATAPATH) { + rc = efx_siena_mcdi_port_reconfigure(efx); + if (rc && rc != -EPERM) + netif_err(efx, drv, efx->net_dev, + "could not restore PHY settings\n"); + } + + rc = efx_siena_enable_interrupts(efx); + if (rc) + goto fail; + +#ifdef CONFIG_SFC_SIENA_SRIOV + rc = efx->type->vswitching_restore(efx); + if (rc) /* not fatal; the PF will still work fine */ + netif_warn(efx, probe, efx->net_dev, + "failed to restore vswitching rc=%d;" + " VFs may not function\n", rc); +#endif + + if (efx->type->rx_restore_rss_contexts) + efx->type->rx_restore_rss_contexts(efx); + mutex_unlock(&efx->rss_lock); + efx->type->filter_table_restore(efx); + up_write(&efx->filter_sem); + if (efx->type->sriov_reset) + efx->type->sriov_reset(efx); + + mutex_unlock(&efx->mac_lock); + + efx_siena_start_all(efx); + + if (efx->type->udp_tnl_push_ports) + efx->type->udp_tnl_push_ports(efx); + + return 0; + +fail: + efx->port_initialized = false; + + mutex_unlock(&efx->rss_lock); + up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + + return rc; +} + +/* Reset the NIC using the specified method. Note that the reset may + * fail, in which case the card will be left in an unusable state. + * + * Caller must hold the rtnl_lock. + */ +int efx_siena_reset(struct efx_nic *efx, enum reset_type method) +{ + int rc, rc2 = 0; + bool disabled; + + netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", + RESET_TYPE(method)); + + efx_device_detach_sync(efx); + /* efx_siena_reset_down() grabs locks that prevent recovery on EF100. + * EF100 reset is handled in the efx_nic_type callback below. + */ + if (efx_nic_rev(efx) != EFX_REV_EF100) + efx_siena_reset_down(efx, method); + + rc = efx->type->reset(efx, method); + if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); + goto out; + } + + /* Clear flags for the scopes we covered. We assume the NIC and + * driver are now quiescent so that there is no race here. + */ + if (method < RESET_TYPE_MAX_METHOD) + efx->reset_pending &= -(1 << (method + 1)); + else /* it doesn't fit into the well-ordered scope hierarchy */ + __clear_bit(method, &efx->reset_pending); + + /* Reinitialise bus-mastering, which may have been turned off before + * the reset was scheduled. This is still appropriate, even in the + * RESET_TYPE_DISABLE since this driver generally assumes the hardware + * can respond to requests. + */ + pci_set_master(efx->pci_dev); + +out: + /* Leave device stopped if necessary */ + disabled = rc || + method == RESET_TYPE_DISABLE || + method == RESET_TYPE_RECOVER_OR_DISABLE; + if (efx_nic_rev(efx) != EFX_REV_EF100) + rc2 = efx_siena_reset_up(efx, method, !disabled); + if (rc2) { + disabled = true; + if (!rc) + rc = rc2; + } + + if (disabled) { + dev_close(efx->net_dev); + netif_err(efx, drv, efx->net_dev, "has been disabled\n"); + efx->state = STATE_DISABLED; + } else { + netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); + efx_device_attach_if_not_resetting(efx); + } + return rc; +} + +/* The worker thread exists so that code that cannot sleep can + * schedule a reset for later. + */ +static void efx_reset_work(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); + unsigned long pending; + enum reset_type method; + + pending = READ_ONCE(efx->reset_pending); + method = fls(pending) - 1; + + if (method == RESET_TYPE_MC_BIST) + efx_wait_for_bist_end(efx); + + if ((method == RESET_TYPE_RECOVER_OR_DISABLE || + method == RESET_TYPE_RECOVER_OR_ALL) && + efx_siena_try_recovery(efx)) + return; + + if (!pending) + return; + + rtnl_lock(); + + /* We checked the state in efx_siena_schedule_reset() but it may + * have changed by now. Now that we have the RTNL lock, + * it cannot change again. + */ + if (efx->state == STATE_READY) + (void)efx_siena_reset(efx, method); + + rtnl_unlock(); +} + +void efx_siena_schedule_reset(struct efx_nic *efx, enum reset_type type) +{ + enum reset_type method; + + if (efx->state == STATE_RECOVERY) { + netif_dbg(efx, drv, efx->net_dev, + "recovering: skip scheduling %s reset\n", + RESET_TYPE(type)); + return; + } + + switch (type) { + case RESET_TYPE_INVISIBLE: + case RESET_TYPE_ALL: + case RESET_TYPE_RECOVER_OR_ALL: + case RESET_TYPE_WORLD: + case RESET_TYPE_DISABLE: + case RESET_TYPE_RECOVER_OR_DISABLE: + case RESET_TYPE_DATAPATH: + case RESET_TYPE_MC_BIST: + case RESET_TYPE_MCDI_TIMEOUT: + method = type; + netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", + RESET_TYPE(method)); + break; + default: + method = efx->type->map_reset_reason(type); + netif_dbg(efx, drv, efx->net_dev, + "scheduling %s reset for %s\n", + RESET_TYPE(method), RESET_TYPE(type)); + break; + } + + set_bit(method, &efx->reset_pending); + smp_mb(); /* ensure we change reset_pending before checking state */ + + /* If we're not READY then just leave the flags set as the cue + * to abort probing or reschedule the reset later. + */ + if (READ_ONCE(efx->state) != STATE_READY) + return; + + /* efx_process_channel() will no longer read events once a + * reset is scheduled. So switch back to poll'd MCDI completions. + */ + efx_siena_mcdi_mode_poll(efx); + + efx_siena_queue_reset_work(efx); +} + +/************************************************************************** + * + * Dummy NIC operations + * + * Can be used for some unimplemented operations + * Needed so all function pointers are valid and do not have to be tested + * before use + * + **************************************************************************/ +int efx_siena_port_dummy_op_int(struct efx_nic *efx) +{ + return 0; +} + +void efx_siena_port_dummy_op_void(struct efx_nic *efx) {} + +/************************************************************************** + * + * Data housekeeping + * + **************************************************************************/ + +/* This zeroes out and then fills in the invariants in a struct + * efx_nic (including all sub-structures). + */ +int efx_siena_init_struct(struct efx_nic *efx, + struct pci_dev *pci_dev, struct net_device *net_dev) +{ + int rc = -ENOMEM; + + /* Initialise common structures */ + INIT_LIST_HEAD(&efx->node); + INIT_LIST_HEAD(&efx->secondary_list); + spin_lock_init(&efx->biu_lock); +#ifdef CONFIG_SFC_SIENA_MTD + INIT_LIST_HEAD(&efx->mtd_list); +#endif + INIT_WORK(&efx->reset_work, efx_reset_work); + INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); + efx_siena_selftest_async_init(efx); + efx->pci_dev = pci_dev; + efx->msg_enable = debug; + efx->state = STATE_UNINIT; + strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); + + efx->net_dev = net_dev; + efx->rx_prefix_size = efx->type->rx_prefix_size; + efx->rx_ip_align = + NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; + efx->rx_packet_hash_offset = + efx->type->rx_hash_offset - efx->type->rx_prefix_size; + efx->rx_packet_ts_offset = + efx->type->rx_ts_offset - efx->type->rx_prefix_size; + INIT_LIST_HEAD(&efx->rss_context.list); + efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + mutex_init(&efx->rss_lock); + efx->vport_id = EVB_PORT_ID_ASSIGNED; + spin_lock_init(&efx->stats_lock); + efx->vi_stride = EFX_DEFAULT_VI_STRIDE; + efx->num_mac_stats = MC_CMD_MAC_NSTATS; + BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END); + mutex_init(&efx->mac_lock); + init_rwsem(&efx->filter_sem); +#ifdef CONFIG_RFS_ACCEL + mutex_init(&efx->rps_mutex); + spin_lock_init(&efx->rps_hash_lock); + /* Failure to allocate is not fatal, but may degrade ARFS performance */ + efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE, + sizeof(*efx->rps_hash_table), GFP_KERNEL); +#endif + efx->mdio.dev = net_dev; + INIT_WORK(&efx->mac_work, efx_mac_work); + init_waitqueue_head(&efx->flush_wq); + + efx->tx_queues_per_channel = 1; + efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE; + efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; + + efx->mem_bar = UINT_MAX; + + rc = efx_siena_init_channels(efx); + if (rc) + goto fail; + + /* Would be good to use the net_dev name, but we're too early */ + snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", + pci_name(pci_dev)); + efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); + if (!efx->workqueue) { + rc = -ENOMEM; + goto fail; + } + + return 0; + +fail: + efx_siena_fini_struct(efx); + return rc; +} + +void efx_siena_fini_struct(struct efx_nic *efx) +{ +#ifdef CONFIG_RFS_ACCEL + kfree(efx->rps_hash_table); +#endif + + efx_siena_fini_channels(efx); + + kfree(efx->vpd_sn); + + if (efx->workqueue) { + destroy_workqueue(efx->workqueue); + efx->workqueue = NULL; + } +} + +/* This configures the PCI device to enable I/O and DMA. */ +int efx_siena_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, + unsigned int mem_map_size) +{ + struct pci_dev *pci_dev = efx->pci_dev; + int rc; + + efx->mem_bar = UINT_MAX; + + netif_dbg(efx, probe, efx->net_dev, "initialising I/O bar=%d\n", bar); + + rc = pci_enable_device(pci_dev); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to enable PCI device\n"); + goto fail1; + } + + pci_set_master(pci_dev); + + rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "could not find a suitable DMA mask\n"); + goto fail2; + } + netif_dbg(efx, probe, efx->net_dev, + "using DMA mask %llx\n", (unsigned long long)dma_mask); + + efx->membase_phys = pci_resource_start(efx->pci_dev, bar); + if (!efx->membase_phys) { + netif_err(efx, probe, efx->net_dev, + "ERROR: No BAR%d mapping from the BIOS. " + "Try pci=realloc on the kernel command line\n", bar); + rc = -ENODEV; + goto fail3; + } + + rc = pci_request_region(pci_dev, bar, "sfc"); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "request for memory BAR[%d] failed\n", bar); + rc = -EIO; + goto fail3; + } + efx->mem_bar = bar; + efx->membase = ioremap(efx->membase_phys, mem_map_size); + if (!efx->membase) { + netif_err(efx, probe, efx->net_dev, + "could not map memory BAR[%d] at %llx+%x\n", bar, + (unsigned long long)efx->membase_phys, mem_map_size); + rc = -ENOMEM; + goto fail4; + } + netif_dbg(efx, probe, efx->net_dev, + "memory BAR[%d] at %llx+%x (virtual %p)\n", bar, + (unsigned long long)efx->membase_phys, mem_map_size, + efx->membase); + + return 0; + +fail4: + pci_release_region(efx->pci_dev, bar); +fail3: + efx->membase_phys = 0; +fail2: + pci_disable_device(efx->pci_dev); +fail1: + return rc; +} + +void efx_siena_fini_io(struct efx_nic *efx) +{ + netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); + + if (efx->membase) { + iounmap(efx->membase); + efx->membase = NULL; + } + + if (efx->membase_phys) { + pci_release_region(efx->pci_dev, efx->mem_bar); + efx->membase_phys = 0; + efx->mem_bar = UINT_MAX; + } + + /* Don't disable bus-mastering if VFs are assigned */ + if (!pci_vfs_assigned(efx->pci_dev)) + pci_disable_device(efx->pci_dev); +} + +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING +static ssize_t mcdi_logging_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_nic *efx = dev_get_drvdata(dev); + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled); +} + +static ssize_t mcdi_logging_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct efx_nic *efx = dev_get_drvdata(dev); + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + bool enable = count > 0 && *buf != '0'; + + mcdi->logging_enabled = enable; + return count; +} + +static DEVICE_ATTR_RW(mcdi_logging); + +void efx_siena_init_mcdi_logging(struct efx_nic *efx) +{ + int rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); + + if (rc) { + netif_warn(efx, drv, efx->net_dev, + "failed to init net dev attributes\n"); + } +} + +void efx_siena_fini_mcdi_logging(struct efx_nic *efx) +{ + device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); +} +#endif + +/* A PCI error affecting this device was detected. + * At this point MMIO and DMA may be disabled. + * Stop the software path and request a slot reset. + */ +static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + struct efx_nic *efx = pci_get_drvdata(pdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + + if (efx->state != STATE_DISABLED) { + efx->state = STATE_RECOVERY; + efx->reset_pending = 0; + + efx_device_detach_sync(efx); + + efx_siena_stop_all(efx); + efx_siena_disable_interrupts(efx); + + status = PCI_ERS_RESULT_NEED_RESET; + } else { + /* If the interface is disabled we don't want to do anything + * with it. + */ + status = PCI_ERS_RESULT_RECOVERED; + } + + rtnl_unlock(); + + pci_disable_device(pdev); + + return status; +} + +/* Fake a successful reset, which will be performed later in efx_io_resume. */ +static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + + if (pci_enable_device(pdev)) { + netif_err(efx, hw, efx->net_dev, + "Cannot re-enable PCI device after reset.\n"); + status = PCI_ERS_RESULT_DISCONNECT; + } + + return status; +} + +/* Perform the actual reset and resume I/O operations. */ +static void efx_io_resume(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + int rc; + + rtnl_lock(); + + if (efx->state == STATE_DISABLED) + goto out; + + rc = efx_siena_reset(efx, RESET_TYPE_ALL); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "efx_siena_reset failed after PCI error (%d)\n", rc); + } else { + efx->state = STATE_READY; + netif_dbg(efx, hw, efx->net_dev, + "Done resetting and resuming IO after PCI error.\n"); + } + +out: + rtnl_unlock(); +} + +/* For simplicity and reliability, we always require a slot reset and try to + * reset the hardware when a pci error affecting the device is detected. + * We leave both the link_reset and mmio_enabled callback unimplemented: + * with our request for slot reset the mmio_enabled callback will never be + * called, and the link_reset callback is not used by AER or EEH mechanisms. + */ +const struct pci_error_handlers efx_siena_err_handlers = { + .error_detected = efx_io_error_detected, + .slot_reset = efx_io_slot_reset, + .resume = efx_io_resume, +}; + +/* Determine whether the NIC will be able to handle TX offloads for a given + * encapsulated packet. + */ +static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb) +{ + struct gre_base_hdr *greh; + __be16 dst_port; + u8 ipproto; + + /* Does the NIC support encap offloads? + * If not, we should never get here, because we shouldn't have + * advertised encap offload feature flags in the first place. + */ + if (WARN_ON_ONCE(!efx->type->udp_tnl_has_port)) + return false; + + /* Determine encapsulation protocol in use */ + switch (skb->protocol) { + case htons(ETH_P_IP): + ipproto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + /* If there are extension headers, this will cause us to + * think we can't offload something that we maybe could have. + */ + ipproto = ipv6_hdr(skb)->nexthdr; + break; + default: + /* Not IP, so can't offload it */ + return false; + } + switch (ipproto) { + case IPPROTO_GRE: + /* We support NVGRE but not IP over GRE or random gretaps. + * Specifically, the NIC will accept GRE as encapsulated if + * the inner protocol is Ethernet, but only handle it + * correctly if the GRE header is 8 bytes long. Moreover, + * it will not update the Checksum or Sequence Number fields + * if they are present. (The Routing Present flag, + * GRE_ROUTING, cannot be set else the header would be more + * than 8 bytes long; so we don't have to worry about it.) + */ + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER) + return false; + if (ntohs(skb->inner_protocol) != ETH_P_TEB) + return false; + if (skb_inner_mac_header(skb) - skb_transport_header(skb) != 8) + return false; + greh = (struct gre_base_hdr *)skb_transport_header(skb); + return !(greh->flags & (GRE_CSUM | GRE_SEQ)); + case IPPROTO_UDP: + /* If the port is registered for a UDP tunnel, we assume the + * packet is for that tunnel, and the NIC will handle it as + * such. If not, the NIC won't know what to do with it. + */ + dst_port = udp_hdr(skb)->dest; + return efx->type->udp_tnl_has_port(efx, dst_port); + default: + return false; + } +} + +netdev_features_t efx_siena_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + struct efx_nic *efx = netdev_priv(dev); + + if (skb->encapsulation) { + if (features & NETIF_F_GSO_MASK) + /* Hardware can only do TSO with at most 208 bytes + * of headers. + */ + if (skb_inner_transport_offset(skb) > + EFX_TSO2_MAX_HDRLEN) + features &= ~(NETIF_F_GSO_MASK); + if (features & (NETIF_F_GSO_MASK | NETIF_F_CSUM_MASK)) + if (!efx_can_encap_offloads(efx, skb)) + features &= ~(NETIF_F_GSO_MASK | + NETIF_F_CSUM_MASK); + } + return features; +} + +int efx_siena_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->get_phys_port_id) + return efx->type->get_phys_port_id(efx, ppid); + else + return -EOPNOTSUPP; +} + +int efx_siena_get_phys_port_name(struct net_device *net_dev, + char *name, size_t len) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (snprintf(name, len, "p%u", efx->port_num) >= len) + return -EINVAL; + return 0; +} diff --git a/drivers/net/ethernet/sfc/siena/efx_common.h b/drivers/net/ethernet/sfc/siena/efx_common.h new file mode 100644 index 0000000000..aeb92f4e34 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/efx_common.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_COMMON_H +#define EFX_COMMON_H + +int efx_siena_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, + unsigned int mem_map_size); +void efx_siena_fini_io(struct efx_nic *efx); +int efx_siena_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev, + struct net_device *net_dev); +void efx_siena_fini_struct(struct efx_nic *efx); + +#define EFX_MAX_DMAQ_SIZE 4096UL +#define EFX_DEFAULT_DMAQ_SIZE 1024UL +#define EFX_MIN_DMAQ_SIZE 512UL + +#define EFX_MAX_EVQ_SIZE 16384UL +#define EFX_MIN_EVQ_SIZE 512UL + +void efx_siena_link_clear_advertising(struct efx_nic *efx); +void efx_siena_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc); + +void efx_siena_start_all(struct efx_nic *efx); +void efx_siena_stop_all(struct efx_nic *efx); + +void efx_siena_net_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats); + +int efx_siena_create_reset_workqueue(void); +void efx_siena_queue_reset_work(struct efx_nic *efx); +void efx_siena_flush_reset_workqueue(struct efx_nic *efx); +void efx_siena_destroy_reset_workqueue(void); + +void efx_siena_start_monitor(struct efx_nic *efx); + +int __efx_siena_reconfigure_port(struct efx_nic *efx); +int efx_siena_reconfigure_port(struct efx_nic *efx); + +#define EFX_ASSERT_RESET_SERIALISED(efx) \ + do { \ + if ((efx->state == STATE_READY) || \ + (efx->state == STATE_RECOVERY) || \ + (efx->state == STATE_DISABLED)) \ + ASSERT_RTNL(); \ + } while (0) + +int efx_siena_try_recovery(struct efx_nic *efx); +void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method); +void efx_siena_watchdog(struct net_device *net_dev, unsigned int txqueue); +int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); +int efx_siena_reset(struct efx_nic *efx, enum reset_type method); +void efx_siena_schedule_reset(struct efx_nic *efx, enum reset_type type); + +/* Dummy PHY ops for PHY drivers */ +int efx_siena_port_dummy_op_int(struct efx_nic *efx); +void efx_siena_port_dummy_op_void(struct efx_nic *efx); + +static inline int efx_check_disabled(struct efx_nic *efx) +{ + if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { + netif_err(efx, drv, efx->net_dev, + "device is disabled due to earlier errors\n"); + return -EIO; + } + return 0; +} + +static inline void efx_schedule_channel(struct efx_channel *channel) +{ + netif_vdbg(channel->efx, intr, channel->efx->net_dev, + "channel %d scheduling NAPI poll on CPU%d\n", + channel->channel, raw_smp_processor_id()); + + napi_schedule(&channel->napi_str); +} + +static inline void efx_schedule_channel_irq(struct efx_channel *channel) +{ + channel->event_test_cpu = raw_smp_processor_id(); + efx_schedule_channel(channel); +} + +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING +void efx_siena_init_mcdi_logging(struct efx_nic *efx); +void efx_siena_fini_mcdi_logging(struct efx_nic *efx); +#else +static inline void efx_siena_init_mcdi_logging(struct efx_nic *efx) {} +static inline void efx_siena_fini_mcdi_logging(struct efx_nic *efx) {} +#endif + +void efx_siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only); +int efx_siena_set_mac_address(struct net_device *net_dev, void *data); +void efx_siena_set_rx_mode(struct net_device *net_dev); +int efx_siena_set_features(struct net_device *net_dev, netdev_features_t data); +void efx_siena_link_status_changed(struct efx_nic *efx); +unsigned int efx_siena_xdp_max_mtu(struct efx_nic *efx); +int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu); + +extern const struct pci_error_handlers efx_siena_err_handlers; + +netdev_features_t efx_siena_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); + +int efx_siena_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid); + +int efx_siena_get_phys_port_name(struct net_device *net_dev, + char *name, size_t len); +#endif diff --git a/drivers/net/ethernet/sfc/siena/enum.h b/drivers/net/ethernet/sfc/siena/enum.h new file mode 100644 index 0000000000..25b28b3969 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/enum.h @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2007-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_ENUM_H +#define EFX_ENUM_H + +/** + * enum efx_loopback_mode - loopback modes + * @LOOPBACK_NONE: no loopback + * @LOOPBACK_DATA: data path loopback + * @LOOPBACK_GMAC: loopback within GMAC + * @LOOPBACK_XGMII: loopback after XMAC + * @LOOPBACK_XGXS: loopback within BPX after XGXS + * @LOOPBACK_XAUI: loopback within BPX before XAUI serdes + * @LOOPBACK_GMII: loopback within BPX after GMAC + * @LOOPBACK_SGMII: loopback within BPX within SGMII + * @LOOPBACK_XGBR: loopback within BPX within XGBR + * @LOOPBACK_XFI: loopback within BPX before XFI serdes + * @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes + * @LOOPBACK_GMII_FAR: loopback within BPX before SGMII + * @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII + * @LOOPBACK_XFI_FAR: loopback after XFI serdes + * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level + * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level + * @LOOPBACK_PCS: loopback within 10G PHY at PCS level + * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level + * @LOOPBACK_XPORT: cross port loopback + * @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC + * @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes + * @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes + * @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes + * @LOOPBACK_GMII_WS: wireside loopback excluding GMAC + * @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes + * @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes + * @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level + */ +/* Please keep up-to-date w.r.t the following two #defines */ +enum efx_loopback_mode { + LOOPBACK_NONE = 0, + LOOPBACK_DATA = 1, + LOOPBACK_GMAC = 2, + LOOPBACK_XGMII = 3, + LOOPBACK_XGXS = 4, + LOOPBACK_XAUI = 5, + LOOPBACK_GMII = 6, + LOOPBACK_SGMII = 7, + LOOPBACK_XGBR = 8, + LOOPBACK_XFI = 9, + LOOPBACK_XAUI_FAR = 10, + LOOPBACK_GMII_FAR = 11, + LOOPBACK_SGMII_FAR = 12, + LOOPBACK_XFI_FAR = 13, + LOOPBACK_GPHY = 14, + LOOPBACK_PHYXS = 15, + LOOPBACK_PCS = 16, + LOOPBACK_PMAPMD = 17, + LOOPBACK_XPORT = 18, + LOOPBACK_XGMII_WS = 19, + LOOPBACK_XAUI_WS = 20, + LOOPBACK_XAUI_WS_FAR = 21, + LOOPBACK_XAUI_WS_NEAR = 22, + LOOPBACK_GMII_WS = 23, + LOOPBACK_XFI_WS = 24, + LOOPBACK_XFI_WS_FAR = 25, + LOOPBACK_PHYXS_WS = 26, + LOOPBACK_MAX +}; +#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD + +/* These loopbacks occur within the controller */ +#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \ + (1 << LOOPBACK_GMAC) | \ + (1 << LOOPBACK_XGMII)| \ + (1 << LOOPBACK_XGXS) | \ + (1 << LOOPBACK_XAUI) | \ + (1 << LOOPBACK_GMII) | \ + (1 << LOOPBACK_SGMII) | \ + (1 << LOOPBACK_XGBR) | \ + (1 << LOOPBACK_XFI) | \ + (1 << LOOPBACK_XAUI_FAR) | \ + (1 << LOOPBACK_GMII_FAR) | \ + (1 << LOOPBACK_SGMII_FAR) | \ + (1 << LOOPBACK_XFI_FAR) | \ + (1 << LOOPBACK_XGMII_WS) | \ + (1 << LOOPBACK_XAUI_WS) | \ + (1 << LOOPBACK_XAUI_WS_FAR) | \ + (1 << LOOPBACK_XAUI_WS_NEAR) | \ + (1 << LOOPBACK_GMII_WS) | \ + (1 << LOOPBACK_XFI_WS) | \ + (1 << LOOPBACK_XFI_WS_FAR)) + +#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \ + (1 << LOOPBACK_XAUI_WS) | \ + (1 << LOOPBACK_XAUI_WS_FAR) | \ + (1 << LOOPBACK_XAUI_WS_NEAR) | \ + (1 << LOOPBACK_GMII_WS) | \ + (1 << LOOPBACK_XFI_WS) | \ + (1 << LOOPBACK_XFI_WS_FAR) | \ + (1 << LOOPBACK_PHYXS_WS)) + +#define LOOPBACKS_EXTERNAL(_efx) \ + ((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \ + ~(1 << LOOPBACK_NONE)) + +#define LOOPBACK_MASK(_efx) \ + (1 << (_efx)->loopback_mode) + +#define LOOPBACK_INTERNAL(_efx) \ + (!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx))) + +#define LOOPBACK_EXTERNAL(_efx) \ + (!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx))) + +#define LOOPBACK_CHANGED(_from, _to, _mask) \ + (!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask))) + +#define LOOPBACK_OUT_OF(_from, _to, _mask) \ + ((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask))) + +/*****************************************************************************/ + +/** + * enum reset_type - reset types + * + * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and + * %RESET_TYPE_DISABLE specify the method/scope of the reset. The + * other valuesspecify reasons, which efx_siena_schedule_reset() will choose + * a method for. + * + * Reset methods are numbered in order of increasing scope. + * + * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only) + * @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL + * if unsuccessful. + * @RESET_TYPE_ALL: Reset datapath, MAC and PHY + * @RESET_TYPE_WORLD: Reset as much as possible + * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if + * unsuccessful. + * @RESET_TYPE_DATAPATH: Reset datapath only. + * @RESET_TYPE_MC_BIST: MC entering BIST mode. + * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled + * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog + * @RESET_TYPE_INT_ERROR: reset due to internal error + * @RESET_TYPE_DMA_ERROR: DMA error + * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors + * @RESET_TYPE_MC_FAILURE: MC reboot/assertion + * @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout. + */ +enum reset_type { + RESET_TYPE_INVISIBLE, + RESET_TYPE_RECOVER_OR_ALL, + RESET_TYPE_ALL, + RESET_TYPE_WORLD, + RESET_TYPE_RECOVER_OR_DISABLE, + RESET_TYPE_DATAPATH, + RESET_TYPE_MC_BIST, + RESET_TYPE_DISABLE, + RESET_TYPE_MAX_METHOD, + RESET_TYPE_TX_WATCHDOG, + RESET_TYPE_INT_ERROR, + RESET_TYPE_DMA_ERROR, + RESET_TYPE_TX_SKIP, + RESET_TYPE_MC_FAILURE, + /* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but + * it doesn't fit the scope hierarchy (not well-ordered by inclusion). + * We encode this by having its enum value be greater than + * RESET_TYPE_MAX_METHOD. + */ + RESET_TYPE_MCDI_TIMEOUT, + RESET_TYPE_MAX, +}; + +#endif /* EFX_ENUM_H */ diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c new file mode 100644 index 0000000000..e4ec589216 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/ethtool.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include "net_driver.h" +#include "workarounds.h" +#include "selftest.h" +#include "efx.h" +#include "efx_channels.h" +#include "rx_common.h" +#include "tx_common.h" +#include "ethtool_common.h" +#include "filter.h" +#include "nic.h" + +#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB + +/************************************************************************** + * + * Ethtool operations + * + ************************************************************************** + */ + +/* Identify device by flashing LEDs */ +static int efx_ethtool_phys_id(struct net_device *net_dev, + enum ethtool_phys_id_state state) +{ + struct efx_nic *efx = netdev_priv(net_dev); + enum efx_led_mode mode = EFX_LED_DEFAULT; + + switch (state) { + case ETHTOOL_ID_ON: + mode = EFX_LED_ON; + break; + case ETHTOOL_ID_OFF: + mode = EFX_LED_OFF; + break; + case ETHTOOL_ID_INACTIVE: + mode = EFX_LED_DEFAULT; + break; + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + } + + return efx_siena_mcdi_set_id_led(efx, mode); +} + +static int efx_ethtool_get_regs_len(struct net_device *net_dev) +{ + return efx_siena_get_regs_len(netdev_priv(net_dev)); +} + +static void efx_ethtool_get_regs(struct net_device *net_dev, + struct ethtool_regs *regs, void *buf) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + regs->version = efx->type->revision; + efx_siena_get_regs(efx, buf); +} + +/* + * Each channel has a single IRQ and moderation timer, started by any + * completion (or other event). Unless the module parameter + * separate_tx_channels is set, IRQs and moderation are therefore + * shared between RX and TX completions. In this case, when RX IRQ + * moderation is explicitly changed then TX IRQ moderation is + * automatically changed too, but otherwise we fail if the two values + * are requested to be different. + * + * The hardware does not support a limit on the number of completions + * before an IRQ, so we do not use the max_frames fields. We should + * report and require that max_frames == (usecs != 0), but this would + * invalidate existing user documentation. + * + * The hardware does not have distinct settings for interrupt + * moderation while the previous IRQ is being handled, so we should + * not use the 'irq' fields. However, an earlier developer + * misunderstood the meaning of the 'irq' fields and the driver did + * not support the standard fields. To avoid invalidating existing + * user documentation, we report and accept changes through either the + * standard or 'irq' fields. If both are changed at the same time, we + * prefer the standard field. + * + * We implement adaptive IRQ moderation, but use a different algorithm + * from that assumed in the definition of struct ethtool_coalesce. + * Therefore we do not use any of the adaptive moderation parameters + * in it. + */ + +static int efx_ethtool_get_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct efx_nic *efx = netdev_priv(net_dev); + unsigned int tx_usecs, rx_usecs; + bool rx_adaptive; + + efx_siena_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive); + + coalesce->tx_coalesce_usecs = tx_usecs; + coalesce->tx_coalesce_usecs_irq = tx_usecs; + coalesce->rx_coalesce_usecs = rx_usecs; + coalesce->rx_coalesce_usecs_irq = rx_usecs; + coalesce->use_adaptive_rx_coalesce = rx_adaptive; + + return 0; +} + +static int efx_ethtool_set_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_channel *channel; + unsigned int tx_usecs, rx_usecs; + bool adaptive, rx_may_override_tx; + int rc; + + efx_siena_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive); + + if (coalesce->rx_coalesce_usecs != rx_usecs) + rx_usecs = coalesce->rx_coalesce_usecs; + else + rx_usecs = coalesce->rx_coalesce_usecs_irq; + + adaptive = coalesce->use_adaptive_rx_coalesce; + + /* If channels are shared, TX IRQ moderation can be quietly + * overridden unless it is changed from its old value. + */ + rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs && + coalesce->tx_coalesce_usecs_irq == tx_usecs); + if (coalesce->tx_coalesce_usecs != tx_usecs) + tx_usecs = coalesce->tx_coalesce_usecs; + else + tx_usecs = coalesce->tx_coalesce_usecs_irq; + + rc = efx_siena_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive, + rx_may_override_tx); + if (rc != 0) + return rc; + + efx_for_each_channel(channel, efx) + efx->type->push_irq_moderation(channel); + + return 0; +} + +static void +efx_ethtool_get_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + ring->rx_max_pending = EFX_MAX_DMAQ_SIZE; + ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx); + ring->rx_pending = efx->rxq_entries; + ring->tx_pending = efx->txq_entries; +} + +static int +efx_ethtool_set_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct efx_nic *efx = netdev_priv(net_dev); + u32 txq_entries; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending || + ring->rx_pending > EFX_MAX_DMAQ_SIZE || + ring->tx_pending > EFX_TXQ_MAX_ENT(efx)) + return -EINVAL; + + if (ring->rx_pending < EFX_RXQ_MIN_ENT) { + netif_err(efx, drv, efx->net_dev, + "RX queues cannot be smaller than %u\n", + EFX_RXQ_MIN_ENT); + return -EINVAL; + } + + txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx)); + if (txq_entries != ring->tx_pending) + netif_warn(efx, drv, efx->net_dev, + "increasing TX queue size to minimum of %u\n", + txq_entries); + + return efx_siena_realloc_channels(efx, ring->rx_pending, txq_entries); +} + +static void efx_ethtool_get_wol(struct net_device *net_dev, + struct ethtool_wolinfo *wol) +{ + struct efx_nic *efx = netdev_priv(net_dev); + return efx->type->get_wol(efx, wol); +} + + +static int efx_ethtool_set_wol(struct net_device *net_dev, + struct ethtool_wolinfo *wol) +{ + struct efx_nic *efx = netdev_priv(net_dev); + return efx->type->set_wol(efx, wol->wolopts); +} + +static void efx_ethtool_get_fec_stats(struct net_device *net_dev, + struct ethtool_fec_stats *fec_stats) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->get_fec_stats) + efx->type->get_fec_stats(efx, fec_stats); +} + +static int efx_ethtool_get_ts_info(struct net_device *net_dev, + struct ethtool_ts_info *ts_info) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + /* Software capabilities */ + ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE); + ts_info->phc_index = -1; + + efx_siena_ptp_get_ts_info(efx, ts_info); + return 0; +} + +const struct ethtool_ops efx_siena_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_USECS_IRQ | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .get_drvinfo = efx_siena_ethtool_get_drvinfo, + .get_regs_len = efx_ethtool_get_regs_len, + .get_regs = efx_ethtool_get_regs, + .get_msglevel = efx_siena_ethtool_get_msglevel, + .set_msglevel = efx_siena_ethtool_set_msglevel, + .get_link = ethtool_op_get_link, + .get_coalesce = efx_ethtool_get_coalesce, + .set_coalesce = efx_ethtool_set_coalesce, + .get_ringparam = efx_ethtool_get_ringparam, + .set_ringparam = efx_ethtool_set_ringparam, + .get_pauseparam = efx_siena_ethtool_get_pauseparam, + .set_pauseparam = efx_siena_ethtool_set_pauseparam, + .get_sset_count = efx_siena_ethtool_get_sset_count, + .self_test = efx_siena_ethtool_self_test, + .get_strings = efx_siena_ethtool_get_strings, + .set_phys_id = efx_ethtool_phys_id, + .get_ethtool_stats = efx_siena_ethtool_get_stats, + .get_wol = efx_ethtool_get_wol, + .set_wol = efx_ethtool_set_wol, + .reset = efx_siena_ethtool_reset, + .get_rxnfc = efx_siena_ethtool_get_rxnfc, + .set_rxnfc = efx_siena_ethtool_set_rxnfc, + .get_rxfh_indir_size = efx_siena_ethtool_get_rxfh_indir_size, + .get_rxfh_key_size = efx_siena_ethtool_get_rxfh_key_size, + .get_rxfh = efx_siena_ethtool_get_rxfh, + .set_rxfh = efx_siena_ethtool_set_rxfh, + .get_rxfh_context = efx_siena_ethtool_get_rxfh_context, + .set_rxfh_context = efx_siena_ethtool_set_rxfh_context, + .get_ts_info = efx_ethtool_get_ts_info, + .get_module_info = efx_siena_ethtool_get_module_info, + .get_module_eeprom = efx_siena_ethtool_get_module_eeprom, + .get_link_ksettings = efx_siena_ethtool_get_link_ksettings, + .set_link_ksettings = efx_siena_ethtool_set_link_ksettings, + .get_fec_stats = efx_ethtool_get_fec_stats, + .get_fecparam = efx_siena_ethtool_get_fecparam, + .set_fecparam = efx_siena_ethtool_set_fecparam, +}; diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c new file mode 100644 index 0000000000..0207d07f54 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c @@ -0,0 +1,1340 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include +#include +#include "net_driver.h" +#include "mcdi.h" +#include "nic.h" +#include "selftest.h" +#include "rx_common.h" +#include "ethtool_common.h" +#include "mcdi_port_common.h" + +struct efx_sw_stat_desc { + const char *name; + enum { + EFX_ETHTOOL_STAT_SOURCE_nic, + EFX_ETHTOOL_STAT_SOURCE_channel, + EFX_ETHTOOL_STAT_SOURCE_tx_queue + } source; + unsigned int offset; + u64 (*get_stat)(void *field); /* Reader function */ +}; + +/* Initialiser for a struct efx_sw_stat_desc with type-checking */ +#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \ + get_stat_function) { \ + .name = #stat_name, \ + .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \ + .offset = ((((field_type *) 0) == \ + &((struct efx_##source_name *)0)->field) ? \ + offsetof(struct efx_##source_name, field) : \ + offsetof(struct efx_##source_name, field)), \ + .get_stat = get_stat_function, \ +} + +static u64 efx_get_uint_stat(void *field) +{ + return *(unsigned int *)field; +} + +static u64 efx_get_atomic_stat(void *field) +{ + return atomic_read((atomic_t *) field); +} + +#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \ + EFX_ETHTOOL_STAT(field, nic, field, \ + atomic_t, efx_get_atomic_stat) + +#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \ + EFX_ETHTOOL_STAT(field, channel, n_##field, \ + unsigned int, efx_get_uint_stat) +#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \ + EFX_ETHTOOL_STAT(field, channel, field, \ + unsigned int, efx_get_uint_stat) + +#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \ + EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ + unsigned int, efx_get_uint_stat) + +static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { + EFX_ETHTOOL_UINT_TXQ_STAT(merge_events), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks), + EFX_ETHTOOL_UINT_TXQ_STAT(pushes), + EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets), + EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets), + EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect), +#ifdef CONFIG_RFS_ACCEL + EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed), +#endif +}; + +#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc) + +void efx_siena_ethtool_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *info) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + efx_siena_mcdi_print_fwver(efx, info->fw_version, + sizeof(info->fw_version)); + strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); +} + +u32 efx_siena_ethtool_get_msglevel(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + return efx->msg_enable; +} + +void efx_siena_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + efx->msg_enable = msg_enable; +} + +void efx_siena_ethtool_get_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX); + pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX); + pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); +} + +int efx_siena_ethtool_set_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause) +{ + struct efx_nic *efx = netdev_priv(net_dev); + u8 wanted_fc, old_fc; + u32 old_adv; + int rc = 0; + + mutex_lock(&efx->mac_lock); + + wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | + (pause->tx_pause ? EFX_FC_TX : 0) | + (pause->autoneg ? EFX_FC_AUTO : 0)); + + if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { + netif_dbg(efx, drv, efx->net_dev, + "Flow control unsupported: tx ON rx OFF\n"); + rc = -EINVAL; + goto out; + } + + if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) { + netif_dbg(efx, drv, efx->net_dev, + "Autonegotiation is disabled\n"); + rc = -EINVAL; + goto out; + } + + /* Hook for Falcon bug 11482 workaround */ + if (efx->type->prepare_enable_fc_tx && + (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX)) + efx->type->prepare_enable_fc_tx(efx); + + old_adv = efx->link_advertising[0]; + old_fc = efx->wanted_fc; + efx_siena_link_set_wanted_fc(efx, wanted_fc); + if (efx->link_advertising[0] != old_adv || + (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { + rc = efx_siena_mcdi_port_reconfigure(efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "Unable to advertise requested flow " + "control setting\n"); + goto out; + } + } + + /* Reconfigure the MAC. The PHY *may* generate a link state change event + * if the user just changed the advertised capabilities, but there's no + * harm doing this twice */ + efx_siena_mac_reconfigure(efx, false); + +out: + mutex_unlock(&efx->mac_lock); + + return rc; +} + +/** + * efx_fill_test - fill in an individual self-test entry + * @test_index: Index of the test + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + * @test: Pointer to test result (used only if data != %NULL) + * @unit_format: Unit name format (e.g. "chan\%d") + * @unit_id: Unit id (e.g. 0 for "chan0") + * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") + * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent") + * + * Fill in an individual self-test entry. + */ +static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data, + int *test, const char *unit_format, int unit_id, + const char *test_format, const char *test_id) +{ + char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN]; + + /* Fill data value, if applicable */ + if (data) + data[test_index] = *test; + + /* Fill string, if applicable */ + if (strings) { + if (strchr(unit_format, '%')) + snprintf(unit_str, sizeof(unit_str), + unit_format, unit_id); + else + strcpy(unit_str, unit_format); + snprintf(test_str, sizeof(test_str), test_format, test_id); + snprintf(strings + test_index * ETH_GSTRING_LEN, + ETH_GSTRING_LEN, + "%-6s %-24s", unit_str, test_str); + } +} + +#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel +#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->label +#define EFX_LOOPBACK_NAME(_mode, _counter) \ + "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_siena_loopback_mode) + +/** + * efx_fill_loopback_test - fill in a block of loopback self-test entries + * @efx: Efx NIC + * @lb_tests: Efx loopback self-test results structure + * @mode: Loopback test mode + * @test_index: Starting index of the test + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + * + * Fill in a block of loopback self-test entries. Return new test + * index. + */ +static int efx_fill_loopback_test(struct efx_nic *efx, + struct efx_loopback_self_tests *lb_tests, + enum efx_loopback_mode mode, + unsigned int test_index, + u8 *strings, u64 *data) +{ + struct efx_channel *channel = + efx_get_channel(efx, efx->tx_channel_offset); + struct efx_tx_queue *tx_queue; + + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_fill_test(test_index++, strings, data, + &lb_tests->tx_sent[tx_queue->label], + EFX_TX_QUEUE_NAME(tx_queue), + EFX_LOOPBACK_NAME(mode, "tx_sent")); + efx_fill_test(test_index++, strings, data, + &lb_tests->tx_done[tx_queue->label], + EFX_TX_QUEUE_NAME(tx_queue), + EFX_LOOPBACK_NAME(mode, "tx_done")); + } + efx_fill_test(test_index++, strings, data, + &lb_tests->rx_good, + "rx", 0, + EFX_LOOPBACK_NAME(mode, "rx_good")); + efx_fill_test(test_index++, strings, data, + &lb_tests->rx_bad, + "rx", 0, + EFX_LOOPBACK_NAME(mode, "rx_bad")); + + return test_index; +} + +/** + * efx_ethtool_fill_self_tests - get self-test details + * @efx: Efx NIC + * @tests: Efx self-test results structure, or %NULL + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + * + * Get self-test number of strings, strings, and/or test results. + * Return number of strings (== number of test results). + * + * The reason for merging these three functions is to make sure that + * they can never be inconsistent. + */ +static int efx_ethtool_fill_self_tests(struct efx_nic *efx, + struct efx_self_tests *tests, + u8 *strings, u64 *data) +{ + struct efx_channel *channel; + unsigned int n = 0, i; + enum efx_loopback_mode mode; + + efx_fill_test(n++, strings, data, &tests->phy_alive, + "phy", 0, "alive", NULL); + efx_fill_test(n++, strings, data, &tests->nvram, + "core", 0, "nvram", NULL); + efx_fill_test(n++, strings, data, &tests->interrupt, + "core", 0, "interrupt", NULL); + + /* Event queues */ + efx_for_each_channel(channel, efx) { + efx_fill_test(n++, strings, data, + &tests->eventq_dma[channel->channel], + EFX_CHANNEL_NAME(channel), + "eventq.dma", NULL); + efx_fill_test(n++, strings, data, + &tests->eventq_int[channel->channel], + EFX_CHANNEL_NAME(channel), + "eventq.int", NULL); + } + + efx_fill_test(n++, strings, data, &tests->memory, + "core", 0, "memory", NULL); + efx_fill_test(n++, strings, data, &tests->registers, + "core", 0, "registers", NULL); + + for (i = 0; true; ++i) { + const char *name; + + EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS); + name = efx_siena_mcdi_phy_test_name(efx, i); + if (name == NULL) + break; + + efx_fill_test(n++, strings, data, &tests->phy_ext[i], "phy", 0, name, NULL); + } + + /* Loopback tests */ + for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { + if (!(efx->loopback_modes & (1 << mode))) + continue; + n = efx_fill_loopback_test(efx, + &tests->loopback[mode], mode, n, + strings, data); + } + + return n; +} + +void efx_siena_ethtool_self_test(struct net_device *net_dev, + struct ethtool_test *test, u64 *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_self_tests *efx_tests; + bool already_up; + int rc = -ENOMEM; + + efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); + if (!efx_tests) + goto fail; + + if (efx->state != STATE_READY) { + rc = -EBUSY; + goto out; + } + + netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", + (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); + + /* We need rx buffers and interrupts. */ + already_up = (efx->net_dev->flags & IFF_UP); + if (!already_up) { + rc = dev_open(efx->net_dev, NULL); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed opening device.\n"); + goto out; + } + } + + rc = efx_siena_selftest(efx, efx_tests, test->flags); + + if (!already_up) + dev_close(efx->net_dev); + + netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n", + rc == 0 ? "passed" : "failed", + (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); + +out: + efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); + kfree(efx_tests); +fail: + if (rc) + test->flags |= ETH_TEST_FL_FAILED; +} + +static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings) +{ + size_t n_stats = 0; + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) { + if (efx_channel_has_tx_queues(channel)) { + n_stats++; + if (strings != NULL) { + snprintf(strings, ETH_GSTRING_LEN, + "tx-%u.tx_packets", + channel->tx_queue[0].queue / + EFX_MAX_TXQ_PER_CHANNEL); + + strings += ETH_GSTRING_LEN; + } + } + } + efx_for_each_channel(channel, efx) { + if (efx_channel_has_rx_queue(channel)) { + n_stats++; + if (strings != NULL) { + snprintf(strings, ETH_GSTRING_LEN, + "rx-%d.rx_packets", channel->channel); + strings += ETH_GSTRING_LEN; + } + } + } + if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) { + unsigned short xdp; + + for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) { + n_stats++; + if (strings) { + snprintf(strings, ETH_GSTRING_LEN, + "tx-xdp-cpu-%hu.tx_packets", xdp); + strings += ETH_GSTRING_LEN; + } + } + } + + return n_stats; +} + +int efx_siena_ethtool_get_sset_count(struct net_device *net_dev, int string_set) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + switch (string_set) { + case ETH_SS_STATS: + return efx->type->describe_stats(efx, NULL) + + EFX_ETHTOOL_SW_STAT_COUNT + + efx_describe_per_queue_stats(efx, NULL) + + efx_siena_ptp_describe_stats(efx, NULL); + case ETH_SS_TEST: + return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); + default: + return -EINVAL; + } +} + +void efx_siena_ethtool_get_strings(struct net_device *net_dev, + u32 string_set, u8 *strings) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int i; + + switch (string_set) { + case ETH_SS_STATS: + strings += (efx->type->describe_stats(efx, strings) * + ETH_GSTRING_LEN); + for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) + strlcpy(strings + i * ETH_GSTRING_LEN, + efx_sw_stat_desc[i].name, ETH_GSTRING_LEN); + strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN; + strings += (efx_describe_per_queue_stats(efx, strings) * + ETH_GSTRING_LEN); + efx_siena_ptp_describe_stats(efx, strings); + break; + case ETH_SS_TEST: + efx_ethtool_fill_self_tests(efx, NULL, strings, NULL); + break; + default: + /* No other string sets */ + break; + } +} + +void efx_siena_ethtool_get_stats(struct net_device *net_dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + const struct efx_sw_stat_desc *stat; + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int i; + + spin_lock_bh(&efx->stats_lock); + + /* Get NIC statistics */ + data += efx->type->update_stats(efx, data, NULL); + + /* Get software statistics */ + for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) { + stat = &efx_sw_stat_desc[i]; + switch (stat->source) { + case EFX_ETHTOOL_STAT_SOURCE_nic: + data[i] = stat->get_stat((void *)efx + stat->offset); + break; + case EFX_ETHTOOL_STAT_SOURCE_channel: + data[i] = 0; + efx_for_each_channel(channel, efx) + data[i] += stat->get_stat((void *)channel + + stat->offset); + break; + case EFX_ETHTOOL_STAT_SOURCE_tx_queue: + data[i] = 0; + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) + data[i] += + stat->get_stat((void *)tx_queue + + stat->offset); + } + break; + } + } + data += EFX_ETHTOOL_SW_STAT_COUNT; + + spin_unlock_bh(&efx->stats_lock); + + efx_for_each_channel(channel, efx) { + if (efx_channel_has_tx_queues(channel)) { + *data = 0; + efx_for_each_channel_tx_queue(tx_queue, channel) { + *data += tx_queue->tx_packets; + } + data++; + } + } + efx_for_each_channel(channel, efx) { + if (efx_channel_has_rx_queue(channel)) { + *data = 0; + efx_for_each_channel_rx_queue(rx_queue, channel) { + *data += rx_queue->rx_packets; + } + data++; + } + } + if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) { + int xdp; + + for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) { + data[0] = efx->xdp_tx_queues[xdp]->tx_packets; + data++; + } + } + + efx_siena_ptp_update_stats(efx, data); +} + +/* This must be called with rtnl_lock held. */ +int efx_siena_ethtool_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *cmd) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_link_state *link_state = &efx->link_state; + + mutex_lock(&efx->mac_lock); + efx_siena_mcdi_phy_get_link_ksettings(efx, cmd); + mutex_unlock(&efx->mac_lock); + + /* Both MACs support pause frames (bidirectional and respond-only) */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); + + if (LOOPBACK_INTERNAL(efx)) { + cmd->base.speed = link_state->speed; + cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; + } + + return 0; +} + +/* This must be called with rtnl_lock held. */ +int +efx_siena_ethtool_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *cmd) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + /* GMAC does not support 1000Mbps HD */ + if ((cmd->base.speed == SPEED_1000) && + (cmd->base.duplex != DUPLEX_FULL)) { + netif_dbg(efx, drv, efx->net_dev, + "rejecting unsupported 1000Mbps HD setting\n"); + return -EINVAL; + } + + mutex_lock(&efx->mac_lock); + rc = efx_siena_mcdi_phy_set_link_ksettings(efx, cmd); + mutex_unlock(&efx->mac_lock); + return rc; +} + +int efx_siena_ethtool_get_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + mutex_lock(&efx->mac_lock); + rc = efx_siena_mcdi_phy_get_fecparam(efx, fecparam); + mutex_unlock(&efx->mac_lock); + + return rc; +} + +int efx_siena_ethtool_set_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + mutex_lock(&efx->mac_lock); + rc = efx_siena_mcdi_phy_set_fecparam(efx, fecparam); + mutex_unlock(&efx->mac_lock); + + return rc; +} + +/* MAC address mask including only I/G bit */ +static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0}; + +#define IP4_ADDR_FULL_MASK ((__force __be32)~0) +#define IP_PROTO_FULL_MASK 0xFF +#define PORT_FULL_MASK ((__force __be16)~0) +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) + +static inline void ip6_fill_mask(__be32 *mask) +{ + mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0; +} + +static int efx_ethtool_get_class_rule(struct efx_nic *efx, + struct ethtool_rx_flow_spec *rule, + u32 *rss_context) +{ + struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; + struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; + struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; + struct ethhdr *mac_entry = &rule->h_u.ether_spec; + struct ethhdr *mac_mask = &rule->m_u.ether_spec; + struct efx_filter_spec spec; + int rc; + + rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL, + rule->location, &spec); + if (rc) + return rc; + + if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) + rule->ring_cookie = RX_CLS_FLOW_DISC; + else + rule->ring_cookie = spec.dmaq_id; + + if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && + spec.ether_type == htons(ETH_P_IP) && + (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && + (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { + rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? + TCP_V4_FLOW : UDP_V4_FLOW); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + ip_entry->ip4dst = spec.loc_host[0]; + ip_mask->ip4dst = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + ip_entry->ip4src = spec.rem_host[0]; + ip_mask->ip4src = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { + ip_entry->pdst = spec.loc_port; + ip_mask->pdst = PORT_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { + ip_entry->psrc = spec.rem_port; + ip_mask->psrc = PORT_FULL_MASK; + } + } else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && + spec.ether_type == htons(ETH_P_IPV6) && + (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && + (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { + rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? + TCP_V6_FLOW : UDP_V6_FLOW); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + memcpy(ip6_entry->ip6dst, spec.loc_host, + sizeof(ip6_entry->ip6dst)); + ip6_fill_mask(ip6_mask->ip6dst); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + memcpy(ip6_entry->ip6src, spec.rem_host, + sizeof(ip6_entry->ip6src)); + ip6_fill_mask(ip6_mask->ip6src); + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { + ip6_entry->pdst = spec.loc_port; + ip6_mask->pdst = PORT_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { + ip6_entry->psrc = spec.rem_port; + ip6_mask->psrc = PORT_FULL_MASK; + } + } else if (!(spec.match_flags & + ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG | + EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_OUTER_VID))) { + rule->flow_type = ETHER_FLOW; + if (spec.match_flags & + (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) { + ether_addr_copy(mac_entry->h_dest, spec.loc_mac); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC) + eth_broadcast_addr(mac_mask->h_dest); + else + ether_addr_copy(mac_mask->h_dest, + mac_addr_ig_mask); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) { + ether_addr_copy(mac_entry->h_source, spec.rem_mac); + eth_broadcast_addr(mac_mask->h_source); + } + if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { + mac_entry->h_proto = spec.ether_type; + mac_mask->h_proto = ETHER_TYPE_FULL_MASK; + } + } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + spec.ether_type == htons(ETH_P_IP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO))) { + rule->flow_type = IPV4_USER_FLOW; + uip_entry->ip_ver = ETH_RX_NFC_IP4; + if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { + uip_mask->proto = IP_PROTO_FULL_MASK; + uip_entry->proto = spec.ip_proto; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + uip_entry->ip4dst = spec.loc_host[0]; + uip_mask->ip4dst = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + uip_entry->ip4src = spec.rem_host[0]; + uip_mask->ip4src = IP4_ADDR_FULL_MASK; + } + } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + spec.ether_type == htons(ETH_P_IPV6) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO))) { + rule->flow_type = IPV6_USER_FLOW; + if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { + uip6_mask->l4_proto = IP_PROTO_FULL_MASK; + uip6_entry->l4_proto = spec.ip_proto; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + memcpy(uip6_entry->ip6dst, spec.loc_host, + sizeof(uip6_entry->ip6dst)); + ip6_fill_mask(uip6_mask->ip6dst); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + memcpy(uip6_entry->ip6src, spec.rem_host, + sizeof(uip6_entry->ip6src)); + ip6_fill_mask(uip6_mask->ip6src); + } + } else { + /* The above should handle all filters that we insert */ + WARN_ON(1); + return -EINVAL; + } + + if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) { + rule->flow_type |= FLOW_EXT; + rule->h_ext.vlan_tci = spec.outer_vid; + rule->m_ext.vlan_tci = htons(0xfff); + } + + if (spec.flags & EFX_FILTER_FLAG_RX_RSS) { + rule->flow_type |= FLOW_RSS; + *rss_context = spec.rss_context; + } + + return rc; +} + +int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info, u32 *rule_locs) +{ + struct efx_nic *efx = netdev_priv(net_dev); + u32 rss_context = 0; + s32 rc = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = efx->n_rx_channels; + return 0; + + case ETHTOOL_GRXFH: { + struct efx_rss_context *ctx = &efx->rss_context; + __u64 data; + + mutex_lock(&efx->rss_lock); + if (info->flow_type & FLOW_RSS && info->rss_context) { + ctx = efx_siena_find_rss_context_entry(efx, + info->rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + } + + data = 0; + if (!efx_rss_active(ctx)) /* No RSS */ + goto out_setdata_unlock; + + switch (info->flow_type & ~FLOW_RSS) { + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if (ctx->rx_hash_udp_4tuple) + data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 | + RXH_IP_SRC | RXH_IP_DST); + else + data = RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V4_FLOW: + case TCP_V6_FLOW: + data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 | + RXH_IP_SRC | RXH_IP_DST); + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V4_FLOW: + case AH_ESP_V6_FLOW: + case IPV4_FLOW: + case IPV6_FLOW: + data = RXH_IP_SRC | RXH_IP_DST; + break; + default: + break; + } +out_setdata_unlock: + info->data = data; +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; + } + + case ETHTOOL_GRXCLSRLCNT: + info->data = efx_filter_get_rx_id_limit(efx); + if (info->data == 0) + return -EOPNOTSUPP; + info->data |= RX_CLS_LOC_SPECIAL; + info->rule_cnt = + efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL); + return 0; + + case ETHTOOL_GRXCLSRULE: + if (efx_filter_get_rx_id_limit(efx) == 0) + return -EOPNOTSUPP; + rc = efx_ethtool_get_class_rule(efx, &info->fs, &rss_context); + if (rc < 0) + return rc; + if (info->fs.flow_type & FLOW_RSS) + info->rss_context = rss_context; + return 0; + + case ETHTOOL_GRXCLSRLALL: + info->data = efx_filter_get_rx_id_limit(efx); + if (info->data == 0) + return -EOPNOTSUPP; + rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL, + rule_locs, info->rule_cnt); + if (rc < 0) + return rc; + info->rule_cnt = rc; + return 0; + + default: + return -EOPNOTSUPP; + } +} + +static inline bool ip6_mask_is_full(__be32 mask[4]) +{ + return !~(mask[0] & mask[1] & mask[2] & mask[3]); +} + +static inline bool ip6_mask_is_empty(__be32 mask[4]) +{ + return !(mask[0] | mask[1] | mask[2] | mask[3]); +} + +static int efx_ethtool_set_class_rule(struct efx_nic *efx, + struct ethtool_rx_flow_spec *rule, + u32 rss_context) +{ + struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; + struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; + struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; + u32 flow_type = rule->flow_type & ~(FLOW_EXT | FLOW_RSS); + struct ethhdr *mac_entry = &rule->h_u.ether_spec; + struct ethhdr *mac_mask = &rule->m_u.ether_spec; + enum efx_filter_flags flags = 0; + struct efx_filter_spec spec; + int rc; + + /* Check that user wants us to choose the location */ + if (rule->location != RX_CLS_LOC_ANY) + return -EINVAL; + + /* Range-check ring_cookie */ + if (rule->ring_cookie >= efx->n_rx_channels && + rule->ring_cookie != RX_CLS_FLOW_DISC) + return -EINVAL; + + /* Check for unsupported extensions */ + if ((rule->flow_type & FLOW_EXT) && + (rule->m_ext.vlan_etype || rule->m_ext.data[0] || + rule->m_ext.data[1])) + return -EINVAL; + + if (efx->rx_scatter) + flags |= EFX_FILTER_FLAG_RX_SCATTER; + if (rule->flow_type & FLOW_RSS) + flags |= EFX_FILTER_FLAG_RX_RSS; + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, flags, + (rule->ring_cookie == RX_CLS_FLOW_DISC) ? + EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie); + + if (rule->flow_type & FLOW_RSS) + spec.rss_context = rss_context; + + switch (flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO); + spec.ether_type = htons(ETH_P_IP); + spec.ip_proto = flow_type == TCP_V4_FLOW ? IPPROTO_TCP + : IPPROTO_UDP; + if (ip_mask->ip4dst) { + if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + spec.loc_host[0] = ip_entry->ip4dst; + } + if (ip_mask->ip4src) { + if (ip_mask->ip4src != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + spec.rem_host[0] = ip_entry->ip4src; + } + if (ip_mask->pdst) { + if (ip_mask->pdst != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; + spec.loc_port = ip_entry->pdst; + } + if (ip_mask->psrc) { + if (ip_mask->psrc != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; + spec.rem_port = ip_entry->psrc; + } + if (ip_mask->tos) + return -EINVAL; + break; + + case TCP_V6_FLOW: + case UDP_V6_FLOW: + spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO); + spec.ether_type = htons(ETH_P_IPV6); + spec.ip_proto = flow_type == TCP_V6_FLOW ? IPPROTO_TCP + : IPPROTO_UDP; + if (!ip6_mask_is_empty(ip6_mask->ip6dst)) { + if (!ip6_mask_is_full(ip6_mask->ip6dst)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host)); + } + if (!ip6_mask_is_empty(ip6_mask->ip6src)) { + if (!ip6_mask_is_full(ip6_mask->ip6src)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host)); + } + if (ip6_mask->pdst) { + if (ip6_mask->pdst != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; + spec.loc_port = ip6_entry->pdst; + } + if (ip6_mask->psrc) { + if (ip6_mask->psrc != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; + spec.rem_port = ip6_entry->psrc; + } + if (ip6_mask->tclass) + return -EINVAL; + break; + + case IPV4_USER_FLOW: + if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver || + uip_entry->ip_ver != ETH_RX_NFC_IP4) + return -EINVAL; + spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = htons(ETH_P_IP); + if (uip_mask->ip4dst) { + if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + spec.loc_host[0] = uip_entry->ip4dst; + } + if (uip_mask->ip4src) { + if (uip_mask->ip4src != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + spec.rem_host[0] = uip_entry->ip4src; + } + if (uip_mask->proto) { + if (uip_mask->proto != IP_PROTO_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; + spec.ip_proto = uip_entry->proto; + } + break; + + case IPV6_USER_FLOW: + if (uip6_mask->l4_4_bytes || uip6_mask->tclass) + return -EINVAL; + spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = htons(ETH_P_IPV6); + if (!ip6_mask_is_empty(uip6_mask->ip6dst)) { + if (!ip6_mask_is_full(uip6_mask->ip6dst)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host)); + } + if (!ip6_mask_is_empty(uip6_mask->ip6src)) { + if (!ip6_mask_is_full(uip6_mask->ip6src)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host)); + } + if (uip6_mask->l4_proto) { + if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; + spec.ip_proto = uip6_entry->l4_proto; + } + break; + + case ETHER_FLOW: + if (!is_zero_ether_addr(mac_mask->h_dest)) { + if (ether_addr_equal(mac_mask->h_dest, + mac_addr_ig_mask)) + spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + else if (is_broadcast_ether_addr(mac_mask->h_dest)) + spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC; + else + return -EINVAL; + ether_addr_copy(spec.loc_mac, mac_entry->h_dest); + } + if (!is_zero_ether_addr(mac_mask->h_source)) { + if (!is_broadcast_ether_addr(mac_mask->h_source)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_MAC; + ether_addr_copy(spec.rem_mac, mac_entry->h_source); + } + if (mac_mask->h_proto) { + if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = mac_entry->h_proto; + } + break; + + default: + return -EINVAL; + } + + if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) { + if (rule->m_ext.vlan_tci != htons(0xfff)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID; + spec.outer_vid = rule->h_ext.vlan_tci; + } + + rc = efx_filter_insert_filter(efx, &spec, true); + if (rc < 0) + return rc; + + rule->location = rc; + return 0; +} + +int efx_siena_ethtool_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx_filter_get_rx_id_limit(efx) == 0) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_SRXCLSRLINS: + return efx_ethtool_set_class_rule(efx, &info->fs, + info->rss_context); + + case ETHTOOL_SRXCLSRLDEL: + return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL, + info->fs.location); + + default: + return -EOPNOTSUPP; + } +} + +u32 efx_siena_ethtool_get_rxfh_indir_size(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->n_rx_channels == 1) + return 0; + return ARRAY_SIZE(efx->rss_context.rx_indir_table); +} + +u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + return efx->type->rx_hash_key_size; +} + +int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + rc = efx->type->rx_pull_rss_config(efx); + if (rc) + return rc; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (indir) + memcpy(indir, efx->rss_context.rx_indir_table, + sizeof(efx->rss_context.rx_indir_table)); + if (key) + memcpy(key, efx->rss_context.rx_hash_key, + efx->type->rx_hash_key_size); + return 0; +} + +int efx_siena_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + /* Hash function is Toeplitz, cannot be changed */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + if (!indir && !key) + return 0; + + if (!key) + key = efx->rss_context.rx_hash_key; + if (!indir) + indir = efx->rss_context.rx_indir_table; + + return efx->type->rx_push_rss_config(efx, true, indir, key); +} + +int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_rss_context *ctx; + int rc = 0; + + if (!efx->type->rx_pull_rss_context_config) + return -EOPNOTSUPP; + + mutex_lock(&efx->rss_lock); + ctx = efx_siena_find_rss_context_entry(efx, rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + rc = efx->type->rx_pull_rss_context_config(efx, ctx); + if (rc) + goto out_unlock; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (indir) + memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table)); + if (key) + memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size); +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; +} + +int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev, + const u32 *indir, const u8 *key, + const u8 hfunc, u32 *rss_context, + bool delete) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_rss_context *ctx; + bool allocated = false; + int rc; + + if (!efx->type->rx_push_rss_context_config) + return -EOPNOTSUPP; + /* Hash function is Toeplitz, cannot be changed */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + mutex_lock(&efx->rss_lock); + + if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { + if (delete) { + /* alloc + delete == Nothing to do */ + rc = -EINVAL; + goto out_unlock; + } + ctx = efx_siena_alloc_rss_context_entry(efx); + if (!ctx) { + rc = -ENOMEM; + goto out_unlock; + } + ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + /* Initialise indir table and key to defaults */ + efx_siena_set_default_rx_indir_table(efx, ctx); + netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key)); + allocated = true; + } else { + ctx = efx_siena_find_rss_context_entry(efx, *rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + } + + if (delete) { + /* delete this context */ + rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL); + if (!rc) + efx_siena_free_rss_context_entry(ctx); + goto out_unlock; + } + + if (!key) + key = ctx->rx_hash_key; + if (!indir) + indir = ctx->rx_indir_table; + + rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key); + if (rc && allocated) + efx_siena_free_rss_context_entry(ctx); + else + *rss_context = ctx->user_id; +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; +} + +int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + rc = efx->type->map_reset_flags(flags); + if (rc < 0) + return rc; + + return efx_siena_reset(efx, rc); +} + +int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int ret; + + mutex_lock(&efx->mac_lock); + ret = efx_siena_mcdi_phy_get_module_eeprom(efx, ee, data); + mutex_unlock(&efx->mac_lock); + + return ret; +} + +int efx_siena_ethtool_get_module_info(struct net_device *net_dev, + struct ethtool_modinfo *modinfo) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int ret; + + mutex_lock(&efx->mac_lock); + ret = efx_siena_mcdi_phy_get_module_info(efx, modinfo); + mutex_unlock(&efx->mac_lock); + + return ret; +} diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.h b/drivers/net/ethernet/sfc/siena/ethtool_common.h new file mode 100644 index 0000000000..04b375dc68 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/ethtool_common.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_ETHTOOL_COMMON_H +#define EFX_ETHTOOL_COMMON_H + +void efx_siena_ethtool_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *info); +u32 efx_siena_ethtool_get_msglevel(struct net_device *net_dev); +void efx_siena_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable); +void efx_siena_ethtool_self_test(struct net_device *net_dev, + struct ethtool_test *test, u64 *data); +void efx_siena_ethtool_get_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause); +int efx_siena_ethtool_set_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause); +int efx_siena_ethtool_get_sset_count(struct net_device *net_dev, int string_set); +void efx_siena_ethtool_get_strings(struct net_device *net_dev, u32 string_set, + u8 *strings); +void efx_siena_ethtool_get_stats(struct net_device *net_dev, + struct ethtool_stats *stats __always_unused, + u64 *data); +int efx_siena_ethtool_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *out); +int efx_siena_ethtool_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *settings); +int efx_siena_ethtool_get_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam); +int efx_siena_ethtool_set_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam); +int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info, u32 *rule_locs); +int efx_siena_ethtool_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info); +u32 efx_siena_ethtool_get_rxfh_indir_size(struct net_device *net_dev); +u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev); +int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, + u8 *hfunc); +int efx_siena_ethtool_set_rxfh(struct net_device *net_dev, + const u32 *indir, const u8 *key, const u8 hfunc); +int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context); +int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev, + const u32 *indir, const u8 *key, + const u8 hfunc, u32 *rss_context, + bool delete); +int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags); +int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev, + struct ethtool_eeprom *ee, + u8 *data); +int efx_siena_ethtool_get_module_info(struct net_device *net_dev, + struct ethtool_modinfo *modinfo); +#endif diff --git a/drivers/net/ethernet/sfc/siena/farch.c b/drivers/net/ethernet/sfc/siena/farch.c new file mode 100644 index 0000000000..89ccd65c97 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/farch.c @@ -0,0 +1,2988 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "bitfield.h" +#include "efx.h" +#include "rx_common.h" +#include "tx_common.h" +#include "nic.h" +#include "farch_regs.h" +#include "sriov.h" +#include "siena_sriov.h" +#include "io.h" +#include "workarounds.h" + +/* Falcon-architecture (SFC9000-family) support */ + +/************************************************************************** + * + * Configurable values + * + ************************************************************************** + */ + +/* This is set to 16 for a good reason. In summary, if larger than + * 16, the descriptor cache holds more than a default socket + * buffer's worth of packets (for UDP we can only have at most one + * socket buffer's worth outstanding). This combined with the fact + * that we only get 1 TX event per descriptor cache means the NIC + * goes idle. + */ +#define TX_DC_ENTRIES 16 +#define TX_DC_ENTRIES_ORDER 1 + +#define RX_DC_ENTRIES 64 +#define RX_DC_ENTRIES_ORDER 3 + +/* If EFX_MAX_INT_ERRORS internal errors occur within + * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and + * disable it. + */ +#define EFX_INT_ERROR_EXPIRE 3600 +#define EFX_MAX_INT_ERRORS 5 + +/* Depth of RX flush request fifo */ +#define EFX_RX_FLUSH_COUNT 4 + +/* Driver generated events */ +#define _EFX_CHANNEL_MAGIC_TEST 0x000101 +#define _EFX_CHANNEL_MAGIC_FILL 0x000102 +#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 +#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 + +#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) +#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) + +#define EFX_CHANNEL_MAGIC_TEST(_channel) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) +#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ + efx_rx_queue_index(_rx_queue)) +#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ + efx_rx_queue_index(_rx_queue)) +#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ + (_tx_queue)->queue) + +static void efx_farch_magic_event(struct efx_channel *channel, u32 magic); + +/************************************************************************** + * + * Hardware access + * + **************************************************************************/ + +static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, + unsigned int index) +{ + efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, + value, index); +} + +static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, + const efx_oword_t *mask) +{ + return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || + ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); +} + +int efx_farch_test_registers(struct efx_nic *efx, + const struct efx_farch_register_test *regs, + size_t n_regs) +{ + unsigned address = 0; + int i, j; + efx_oword_t mask, imask, original, reg, buf; + + for (i = 0; i < n_regs; ++i) { + address = regs[i].address; + mask = imask = regs[i].mask; + EFX_INVERT_OWORD(imask); + + efx_reado(efx, &original, address); + + /* bit sweep on and off */ + for (j = 0; j < 128; j++) { + if (!EFX_EXTRACT_OWORD32(mask, j, j)) + continue; + + /* Test this testable bit can be set in isolation */ + EFX_AND_OWORD(reg, original, mask); + EFX_SET_OWORD32(reg, j, j, 1); + + efx_writeo(efx, ®, address); + efx_reado(efx, &buf, address); + + if (efx_masked_compare_oword(®, &buf, &mask)) + goto fail; + + /* Test this testable bit can be cleared in isolation */ + EFX_OR_OWORD(reg, original, mask); + EFX_SET_OWORD32(reg, j, j, 0); + + efx_writeo(efx, ®, address); + efx_reado(efx, &buf, address); + + if (efx_masked_compare_oword(®, &buf, &mask)) + goto fail; + } + + efx_writeo(efx, &original, address); + } + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, + "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT + " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), + EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); + return -EIO; +} + +/************************************************************************** + * + * Special buffer handling + * Special buffers are used for event queues and the TX and RX + * descriptor rings. + * + *************************************************************************/ + +/* + * Initialise a special buffer + * + * This will define a buffer (previously allocated via + * efx_alloc_special_buffer()) in the buffer table, allowing + * it to be used for event queues, descriptor rings etc. + */ +static void +efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) +{ + efx_qword_t buf_desc; + unsigned int index; + dma_addr_t dma_addr; + int i; + + EFX_WARN_ON_PARANOID(!buffer->buf.addr); + + /* Write buffer descriptors to NIC */ + for (i = 0; i < buffer->entries; i++) { + index = buffer->index + i; + dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); + netif_dbg(efx, probe, efx->net_dev, + "mapping special buffer %d at %llx\n", + index, (unsigned long long)dma_addr); + EFX_POPULATE_QWORD_3(buf_desc, + FRF_AZ_BUF_ADR_REGION, 0, + FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, + FRF_AZ_BUF_OWNER_ID_FBUF, 0); + efx_write_buf_tbl(efx, &buf_desc, index); + } +} + +/* Unmaps a buffer and clears the buffer table entries */ +static void +efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) +{ + efx_oword_t buf_tbl_upd; + unsigned int start = buffer->index; + unsigned int end = (buffer->index + buffer->entries - 1); + + if (!buffer->entries) + return; + + netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", + buffer->index, buffer->index + buffer->entries - 1); + + EFX_POPULATE_OWORD_4(buf_tbl_upd, + FRF_AZ_BUF_UPD_CMD, 0, + FRF_AZ_BUF_CLR_CMD, 1, + FRF_AZ_BUF_CLR_END_ID, end, + FRF_AZ_BUF_CLR_START_ID, start); + efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); +} + +/* + * Allocate a new special buffer + * + * This allocates memory for a new buffer, clears it and allocates a + * new buffer ID range. It does not write into the buffer table. + * + * This call will allocate 4KB buffers, since 8KB buffers can't be + * used for event queues and descriptor rings. + */ +static int efx_alloc_special_buffer(struct efx_nic *efx, + struct efx_special_buffer *buffer, + unsigned int len) +{ +#ifdef CONFIG_SFC_SIENA_SRIOV + struct siena_nic_data *nic_data = efx->nic_data; +#endif + len = ALIGN(len, EFX_BUF_SIZE); + + if (efx_siena_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) + return -ENOMEM; + buffer->entries = len / EFX_BUF_SIZE; + BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); + + /* Select new buffer ID */ + buffer->index = efx->next_buffer_table; + efx->next_buffer_table += buffer->entries; +#ifdef CONFIG_SFC_SIENA_SRIOV + BUG_ON(efx_siena_sriov_enabled(efx) && + nic_data->vf_buftbl_base < efx->next_buffer_table); +#endif + + netif_dbg(efx, probe, efx->net_dev, + "allocating special buffers %d-%d at %llx+%x " + "(virt %p phys %llx)\n", buffer->index, + buffer->index + buffer->entries - 1, + (u64)buffer->buf.dma_addr, len, + buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); + + return 0; +} + +static void +efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) +{ + if (!buffer->buf.addr) + return; + + netif_dbg(efx, hw, efx->net_dev, + "deallocating special buffers %d-%d at %llx+%x " + "(virt %p phys %llx)\n", buffer->index, + buffer->index + buffer->entries - 1, + (u64)buffer->buf.dma_addr, buffer->buf.len, + buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); + + efx_siena_free_buffer(efx, &buffer->buf); + buffer->entries = 0; +} + +/************************************************************************** + * + * TX path + * + **************************************************************************/ + +/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ +static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) +{ + unsigned write_ptr; + efx_dword_t reg; + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); + efx_writed_page(tx_queue->efx, ®, + FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); +} + +/* Write pointer and first descriptor for TX descriptor ring */ +static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, + const efx_qword_t *txd) +{ + unsigned write_ptr; + efx_oword_t reg; + + BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); + BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, + FRF_AZ_TX_DESC_WPTR, write_ptr); + reg.qword[0] = *txd; + efx_writeo_page(tx_queue->efx, ®, + FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); +} + + +/* For each entry inserted into the software descriptor ring, create a + * descriptor in the hardware TX descriptor ring (in host memory), and + * write a doorbell. + */ +void efx_farch_tx_write(struct efx_tx_queue *tx_queue) +{ + struct efx_tx_buffer *buffer; + efx_qword_t *txd; + unsigned write_ptr; + unsigned old_write_count = tx_queue->write_count; + + tx_queue->xmit_pending = false; + if (unlikely(tx_queue->write_count == tx_queue->insert_count)) + return; + + do { + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + buffer = &tx_queue->buffer[write_ptr]; + txd = efx_tx_desc(tx_queue, write_ptr); + ++tx_queue->write_count; + + EFX_WARN_ON_ONCE_PARANOID(buffer->flags & EFX_TX_BUF_OPTION); + + /* Create TX descriptor ring entry */ + BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); + EFX_POPULATE_QWORD_4(*txd, + FSF_AZ_TX_KER_CONT, + buffer->flags & EFX_TX_BUF_CONT, + FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, + FSF_AZ_TX_KER_BUF_REGION, 0, + FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); + } while (tx_queue->write_count != tx_queue->insert_count); + + wmb(); /* Ensure descriptors are written before they are fetched */ + + if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { + txd = efx_tx_desc(tx_queue, + old_write_count & tx_queue->ptr_mask); + efx_farch_push_tx_desc(tx_queue, txd); + ++tx_queue->pushes; + } else { + efx_farch_notify_tx_desc(tx_queue); + } +} + +unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, unsigned int len) +{ + /* Don't cross 4K boundaries with descriptors. */ + unsigned int limit = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; + + len = min(limit, len); + + return len; +} + + +/* Allocate hardware resources for a TX queue */ +int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + unsigned entries; + + tx_queue->type = ((tx_queue->label & 1) ? EFX_TXQ_TYPE_OUTER_CSUM : 0) | + ((tx_queue->label & 2) ? EFX_TXQ_TYPE_HIGHPRI : 0); + entries = tx_queue->ptr_mask + 1; + return efx_alloc_special_buffer(efx, &tx_queue->txd, + entries * sizeof(efx_qword_t)); +} + +void efx_farch_tx_init(struct efx_tx_queue *tx_queue) +{ + int csum = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM; + struct efx_nic *efx = tx_queue->efx; + efx_oword_t reg; + + /* Pin TX descriptor ring */ + efx_init_special_buffer(efx, &tx_queue->txd); + + /* Push TX descriptor ring to card */ + EFX_POPULATE_OWORD_10(reg, + FRF_AZ_TX_DESCQ_EN, 1, + FRF_AZ_TX_ISCSI_DDIG_EN, 0, + FRF_AZ_TX_ISCSI_HDIG_EN, 0, + FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, + FRF_AZ_TX_DESCQ_EVQ_ID, + tx_queue->channel->channel, + FRF_AZ_TX_DESCQ_OWNER_ID, 0, + FRF_AZ_TX_DESCQ_LABEL, tx_queue->label, + FRF_AZ_TX_DESCQ_SIZE, + __ffs(tx_queue->txd.entries), + FRF_AZ_TX_DESCQ_TYPE, 0, + FRF_BZ_TX_NON_IP_DROP_DIS, 1); + + EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum); + + efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, + tx_queue->queue); + + EFX_POPULATE_OWORD_1(reg, + FRF_BZ_TX_PACE, + (tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ? + FFE_BZ_TX_PACE_OFF : + FFE_BZ_TX_PACE_RESERVED); + efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, tx_queue->queue); + + tx_queue->tso_version = 1; +} + +static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + efx_oword_t tx_flush_descq; + + WARN_ON(atomic_read(&tx_queue->flush_outstanding)); + atomic_set(&tx_queue->flush_outstanding, 1); + + EFX_POPULATE_OWORD_2(tx_flush_descq, + FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, + FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); + efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); +} + +void efx_farch_tx_fini(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + efx_oword_t tx_desc_ptr; + + /* Remove TX descriptor ring from card */ + EFX_ZERO_OWORD(tx_desc_ptr); + efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, + tx_queue->queue); + + /* Unpin TX descriptor ring */ + efx_fini_special_buffer(efx, &tx_queue->txd); +} + +/* Free buffers backing TX queue */ +void efx_farch_tx_remove(struct efx_tx_queue *tx_queue) +{ + efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); +} + +/************************************************************************** + * + * RX path + * + **************************************************************************/ + +/* This creates an entry in the RX descriptor queue */ +static inline void +efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) +{ + struct efx_rx_buffer *rx_buf; + efx_qword_t *rxd; + + rxd = efx_rx_desc(rx_queue, index); + rx_buf = efx_rx_buffer(rx_queue, index); + EFX_POPULATE_QWORD_3(*rxd, + FSF_AZ_RX_KER_BUF_SIZE, + rx_buf->len - + rx_queue->efx->type->rx_buffer_padding, + FSF_AZ_RX_KER_BUF_REGION, 0, + FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); +} + +/* This writes to the RX_DESC_WPTR register for the specified receive + * descriptor ring. + */ +void efx_farch_rx_write(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + efx_dword_t reg; + unsigned write_ptr; + + while (rx_queue->notified_count != rx_queue->added_count) { + efx_farch_build_rx_desc( + rx_queue, + rx_queue->notified_count & rx_queue->ptr_mask); + ++rx_queue->notified_count; + } + + wmb(); + write_ptr = rx_queue->added_count & rx_queue->ptr_mask; + EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); + efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, + efx_rx_queue_index(rx_queue)); +} + +int efx_farch_rx_probe(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned entries; + + entries = rx_queue->ptr_mask + 1; + return efx_alloc_special_buffer(efx, &rx_queue->rxd, + entries * sizeof(efx_qword_t)); +} + +void efx_farch_rx_init(struct efx_rx_queue *rx_queue) +{ + efx_oword_t rx_desc_ptr; + struct efx_nic *efx = rx_queue->efx; + bool jumbo_en; + + /* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */ + jumbo_en = efx->rx_scatter; + + netif_dbg(efx, hw, efx->net_dev, + "RX queue %d ring in special buffers %d-%d\n", + efx_rx_queue_index(rx_queue), rx_queue->rxd.index, + rx_queue->rxd.index + rx_queue->rxd.entries - 1); + + rx_queue->scatter_n = 0; + + /* Pin RX descriptor ring */ + efx_init_special_buffer(efx, &rx_queue->rxd); + + /* Push RX descriptor ring to card */ + EFX_POPULATE_OWORD_10(rx_desc_ptr, + FRF_AZ_RX_ISCSI_DDIG_EN, true, + FRF_AZ_RX_ISCSI_HDIG_EN, true, + FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, + FRF_AZ_RX_DESCQ_EVQ_ID, + efx_rx_queue_channel(rx_queue)->channel, + FRF_AZ_RX_DESCQ_OWNER_ID, 0, + FRF_AZ_RX_DESCQ_LABEL, + efx_rx_queue_index(rx_queue), + FRF_AZ_RX_DESCQ_SIZE, + __ffs(rx_queue->rxd.entries), + FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , + FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, + FRF_AZ_RX_DESCQ_EN, 1); + efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, + efx_rx_queue_index(rx_queue)); +} + +static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + efx_oword_t rx_flush_descq; + + EFX_POPULATE_OWORD_2(rx_flush_descq, + FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, + FRF_AZ_RX_FLUSH_DESCQ, + efx_rx_queue_index(rx_queue)); + efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); +} + +void efx_farch_rx_fini(struct efx_rx_queue *rx_queue) +{ + efx_oword_t rx_desc_ptr; + struct efx_nic *efx = rx_queue->efx; + + /* Remove RX descriptor ring from card */ + EFX_ZERO_OWORD(rx_desc_ptr); + efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, + efx_rx_queue_index(rx_queue)); + + /* Unpin RX descriptor ring */ + efx_fini_special_buffer(efx, &rx_queue->rxd); +} + +/* Free buffers backing RX queue */ +void efx_farch_rx_remove(struct efx_rx_queue *rx_queue) +{ + efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); +} + +/************************************************************************** + * + * Flush handling + * + **************************************************************************/ + +/* efx_farch_flush_queues() must be woken up when all flushes are completed, + * or more RX flushes can be kicked off. + */ +static bool efx_farch_flush_wake(struct efx_nic *efx) +{ + /* Ensure that all updates are visible to efx_farch_flush_queues() */ + smp_mb(); + + return (atomic_read(&efx->active_queues) == 0 || + (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT + && atomic_read(&efx->rxq_flush_pending) > 0)); +} + +static bool efx_check_tx_flush_complete(struct efx_nic *efx) +{ + bool i = true; + efx_oword_t txd_ptr_tbl; + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_reado_table(efx, &txd_ptr_tbl, + FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); + if (EFX_OWORD_FIELD(txd_ptr_tbl, + FRF_AZ_TX_DESCQ_FLUSH) || + EFX_OWORD_FIELD(txd_ptr_tbl, + FRF_AZ_TX_DESCQ_EN)) { + netif_dbg(efx, hw, efx->net_dev, + "flush did not complete on TXQ %d\n", + tx_queue->queue); + i = false; + } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, + 1, 0)) { + /* The flush is complete, but we didn't + * receive a flush completion event + */ + netif_dbg(efx, hw, efx->net_dev, + "flush complete on TXQ %d, so drain " + "the queue\n", tx_queue->queue); + /* Don't need to increment active_queues as it + * has already been incremented for the queues + * which did not drain + */ + efx_farch_magic_event(channel, + EFX_CHANNEL_MAGIC_TX_DRAIN( + tx_queue)); + } + } + } + + return i; +} + +/* Flush all the transmit queues, and continue flushing receive queues until + * they're all flushed. Wait for the DRAIN events to be received so that there + * are no more RX and TX events left on any channel. */ +static int efx_farch_do_flush(struct efx_nic *efx) +{ + unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + int rc = 0; + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_farch_flush_tx_queue(tx_queue); + } + efx_for_each_channel_rx_queue(rx_queue, channel) { + rx_queue->flush_pending = true; + atomic_inc(&efx->rxq_flush_pending); + } + } + + while (timeout && atomic_read(&efx->active_queues) > 0) { + /* If SRIOV is enabled, then offload receive queue flushing to + * the firmware (though we will still have to poll for + * completion). If that fails, fall back to the old scheme. + */ + if (efx_siena_sriov_enabled(efx)) { + rc = efx_siena_mcdi_flush_rxqs(efx); + if (!rc) + goto wait; + } + + /* The hardware supports four concurrent rx flushes, each of + * which may need to be retried if there is an outstanding + * descriptor fetch + */ + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) { + if (atomic_read(&efx->rxq_flush_outstanding) >= + EFX_RX_FLUSH_COUNT) + break; + + if (rx_queue->flush_pending) { + rx_queue->flush_pending = false; + atomic_dec(&efx->rxq_flush_pending); + atomic_inc(&efx->rxq_flush_outstanding); + efx_farch_flush_rx_queue(rx_queue); + } + } + } + + wait: + timeout = wait_event_timeout(efx->flush_wq, + efx_farch_flush_wake(efx), + timeout); + } + + if (atomic_read(&efx->active_queues) && + !efx_check_tx_flush_complete(efx)) { + netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " + "(rx %d+%d)\n", atomic_read(&efx->active_queues), + atomic_read(&efx->rxq_flush_outstanding), + atomic_read(&efx->rxq_flush_pending)); + rc = -ETIMEDOUT; + + atomic_set(&efx->active_queues, 0); + atomic_set(&efx->rxq_flush_pending, 0); + atomic_set(&efx->rxq_flush_outstanding, 0); + } + + return rc; +} + +int efx_farch_fini_dmaq(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int rc = 0; + + /* Do not attempt to write to the NIC during EEH recovery */ + if (efx->state != STATE_RECOVERY) { + /* Only perform flush if DMA is enabled */ + if (efx->pci_dev->is_busmaster) { + efx->type->prepare_flush(efx); + rc = efx_farch_do_flush(efx); + efx->type->finish_flush(efx); + } + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_farch_rx_fini(rx_queue); + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_farch_tx_fini(tx_queue); + } + } + + return rc; +} + +/* Reset queue and flush accounting after FLR + * + * One possible cause of FLR recovery is that DMA may be failing (eg. if bus + * mastering was disabled), in which case we don't receive (RXQ) flush + * completion events. This means that efx->rxq_flush_outstanding remained at 4 + * after the FLR; also, efx->active_queues was non-zero (as no flush completion + * events were received, and we didn't go through efx_check_tx_flush_complete()) + * If we don't fix this up, on the next call to efx_siena_realloc_channels() we + * won't flush any RX queues because efx->rxq_flush_outstanding is at the limit + * of 4 for batched flush requests; and the efx->active_queues gets messed up + * because we keep incrementing for the newly initialised queues, but it never + * went to zero previously. Then we get a timeout every time we try to restart + * the queues, as it doesn't go back to zero when we should be flushing the + * queues. + */ +void efx_farch_finish_flr(struct efx_nic *efx) +{ + atomic_set(&efx->rxq_flush_pending, 0); + atomic_set(&efx->rxq_flush_outstanding, 0); + atomic_set(&efx->active_queues, 0); +} + + +/************************************************************************** + * + * Event queue processing + * Event queues are processed by per-channel tasklets. + * + **************************************************************************/ + +/* Update a channel's event queue's read pointer (RPTR) register + * + * This writes the EVQ_RPTR_REG register for the specified channel's + * event queue. + */ +void efx_farch_ev_read_ack(struct efx_channel *channel) +{ + efx_dword_t reg; + struct efx_nic *efx = channel->efx; + + EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, + channel->eventq_read_ptr & channel->eventq_mask); + + /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size + * of 4 bytes, but it is really 16 bytes just like later revisions. + */ + efx_writed(efx, ®, + efx->type->evq_rptr_tbl_base + + FR_BZ_EVQ_RPTR_STEP * channel->channel); +} + +/* Use HW to insert a SW defined event */ +void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, + efx_qword_t *event) +{ + efx_oword_t drv_ev_reg; + + BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || + FRF_AZ_DRV_EV_DATA_WIDTH != 64); + drv_ev_reg.u32[0] = event->u32[0]; + drv_ev_reg.u32[1] = event->u32[1]; + drv_ev_reg.u32[2] = 0; + drv_ev_reg.u32[3] = 0; + EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); + efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); +} + +static void efx_farch_magic_event(struct efx_channel *channel, u32 magic) +{ + efx_qword_t event; + + EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, + FSE_AZ_EV_CODE_DRV_GEN_EV, + FSF_AZ_DRV_GEN_EV_MAGIC, magic); + efx_farch_generate_event(channel->efx, channel->channel, &event); +} + +/* Handle a transmit completion event + * + * The NIC batches TX completion events; the message we receive is of + * the form "complete all TX events up to this index". + */ +static void +efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) +{ + unsigned int tx_ev_desc_ptr; + unsigned int tx_ev_q_label; + struct efx_tx_queue *tx_queue; + struct efx_nic *efx = channel->efx; + + if (unlikely(READ_ONCE(efx->reset_pending))) + return; + + if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { + /* Transmit completion */ + tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); + tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); + tx_queue = channel->tx_queue + + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); + efx_siena_xmit_done(tx_queue, tx_ev_desc_ptr); + } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { + /* Rewrite the FIFO write pointer */ + tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); + tx_queue = channel->tx_queue + + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); + + netif_tx_lock(efx->net_dev); + efx_farch_notify_tx_desc(tx_queue); + netif_tx_unlock(efx->net_dev); + } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { + efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + } else { + netif_err(efx, tx_err, efx->net_dev, + "channel %d unexpected TX event " + EFX_QWORD_FMT"\n", channel->channel, + EFX_QWORD_VAL(*event)); + } +} + +/* Detect errors included in the rx_evt_pkt_ok bit. */ +static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue, + const efx_qword_t *event) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + struct efx_nic *efx = rx_queue->efx; + bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; + bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; + bool rx_ev_frm_trunc, rx_ev_tobe_disc; + bool rx_ev_other_err, rx_ev_pause_frm; + + rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); + rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, + FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); + rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, + FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); + rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, + FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); + rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); + rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); + rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); + + /* Every error apart from tobe_disc and pause_frm */ + rx_ev_other_err = (rx_ev_tcp_udp_chksum_err | + rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | + rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); + + /* Count errors that are not in MAC stats. Ignore expected + * checksum errors during self-test. */ + if (rx_ev_frm_trunc) + ++channel->n_rx_frm_trunc; + else if (rx_ev_tobe_disc) + ++channel->n_rx_tobe_disc; + else if (!efx->loopback_selftest) { + if (rx_ev_ip_hdr_chksum_err) + ++channel->n_rx_ip_hdr_chksum_err; + else if (rx_ev_tcp_udp_chksum_err) + ++channel->n_rx_tcp_udp_chksum_err; + } + + /* TOBE_DISC is expected on unicast mismatches; don't print out an + * error message. FRM_TRUNC indicates RXDP dropped the packet due + * to a FIFO overflow. + */ +#ifdef DEBUG + if (rx_ev_other_err && net_ratelimit()) { + netif_dbg(efx, rx_err, efx->net_dev, + " RX queue %d unexpected RX event " + EFX_QWORD_FMT "%s%s%s%s%s%s%s\n", + efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), + rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", + rx_ev_ip_hdr_chksum_err ? + " [IP_HDR_CHKSUM_ERR]" : "", + rx_ev_tcp_udp_chksum_err ? + " [TCP_UDP_CHKSUM_ERR]" : "", + rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", + rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", + rx_ev_tobe_disc ? " [TOBE_DISC]" : "", + rx_ev_pause_frm ? " [PAUSE]" : ""); + } +#else + (void) rx_ev_other_err; +#endif + + if (efx->net_dev->features & NETIF_F_RXALL) + /* don't discard frame for CRC error */ + rx_ev_eth_crc_err = false; + + /* The frame must be discarded if any of these are true. */ + return (rx_ev_eth_crc_err | rx_ev_frm_trunc | + rx_ev_tobe_disc | rx_ev_pause_frm) ? + EFX_RX_PKT_DISCARD : 0; +} + +/* Handle receive events that are not in-order. Return true if this + * can be handled as a partial packet discard, false if it's more + * serious. + */ +static bool +efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + struct efx_nic *efx = rx_queue->efx; + unsigned expected, dropped; + + if (rx_queue->scatter_n && + index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & + rx_queue->ptr_mask)) { + ++channel->n_rx_nodesc_trunc; + return true; + } + + expected = rx_queue->removed_count & rx_queue->ptr_mask; + dropped = (index - expected) & rx_queue->ptr_mask; + netif_info(efx, rx_err, efx->net_dev, + "dropped %d events (index=%d expected=%d)\n", + dropped, index, expected); + + efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); + return false; +} + +/* Handle a packet received event + * + * The NIC gives a "discard" flag if it's a unicast packet with the + * wrong destination address + * Also "is multicast" and "matches multicast filter" flags can be used to + * discard non-matching multicast packets. + */ +static void +efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) +{ + unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; + unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; + unsigned expected_ptr; + bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; + u16 flags; + struct efx_rx_queue *rx_queue; + struct efx_nic *efx = channel->efx; + + if (unlikely(READ_ONCE(efx->reset_pending))) + return; + + rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); + rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); + WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != + channel->channel); + + rx_queue = efx_channel_get_rx_queue(channel); + + rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); + expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & + rx_queue->ptr_mask); + + /* Check for partial drops and other errors */ + if (unlikely(rx_ev_desc_ptr != expected_ptr) || + unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { + if (rx_ev_desc_ptr != expected_ptr && + !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) + return; + + /* Discard all pending fragments */ + if (rx_queue->scatter_n) { + efx_siena_rx_packet( + rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); + rx_queue->removed_count += rx_queue->scatter_n; + rx_queue->scatter_n = 0; + } + + /* Return if there is no new fragment */ + if (rx_ev_desc_ptr != expected_ptr) + return; + + /* Discard new fragment if not SOP */ + if (!rx_ev_sop) { + efx_siena_rx_packet( + rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + 1, 0, EFX_RX_PKT_DISCARD); + ++rx_queue->removed_count; + return; + } + } + + ++rx_queue->scatter_n; + if (rx_ev_cont) + return; + + rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); + rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); + rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); + + if (likely(rx_ev_pkt_ok)) { + /* If packet is marked as OK then we can rely on the + * hardware checksum and classification. + */ + flags = 0; + switch (rx_ev_hdr_type) { + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: + flags |= EFX_RX_PKT_TCP; + fallthrough; + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: + flags |= EFX_RX_PKT_CSUMMED; + fallthrough; + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: + case FSE_AZ_RX_EV_HDR_TYPE_OTHER: + break; + } + } else { + flags = efx_farch_handle_rx_not_ok(rx_queue, event); + } + + /* Detect multicast packets that didn't match the filter */ + rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); + if (rx_ev_mcast_pkt) { + unsigned int rx_ev_mcast_hash_match = + EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); + + if (unlikely(!rx_ev_mcast_hash_match)) { + ++channel->n_rx_mcast_mismatch; + flags |= EFX_RX_PKT_DISCARD; + } + } + + channel->irq_mod_score += 2; + + /* Handle received packet */ + efx_siena_rx_packet(rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, rx_ev_byte_cnt, flags); + rx_queue->removed_count += rx_queue->scatter_n; + rx_queue->scatter_n = 0; +} + +/* If this flush done event corresponds to a &struct efx_tx_queue, then + * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue + * of all transmit completions. + */ +static void +efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) +{ + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + int qid; + + qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); + if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) { + channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL); + tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL); + if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) + efx_farch_magic_event(tx_queue->channel, + EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); + } +} + +/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush + * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add + * the RX queue back to the mask of RX queues in need of flushing. + */ +static void +efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) +{ + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + int qid; + bool failed; + + qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); + failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); + if (qid >= efx->n_channels) + return; + channel = efx_get_channel(efx, qid); + if (!efx_channel_has_rx_queue(channel)) + return; + rx_queue = efx_channel_get_rx_queue(channel); + + if (failed) { + netif_info(efx, hw, efx->net_dev, + "RXQ %d flush retry\n", qid); + rx_queue->flush_pending = true; + atomic_inc(&efx->rxq_flush_pending); + } else { + efx_farch_magic_event(efx_rx_queue_channel(rx_queue), + EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); + } + atomic_dec(&efx->rxq_flush_outstanding); + if (efx_farch_flush_wake(efx)) + wake_up(&efx->flush_wq); +} + +static void +efx_farch_handle_drain_event(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + + WARN_ON(atomic_read(&efx->active_queues) == 0); + atomic_dec(&efx->active_queues); + if (efx_farch_flush_wake(efx)) + wake_up(&efx->flush_wq); +} + +static void efx_farch_handle_generated_event(struct efx_channel *channel, + efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + struct efx_rx_queue *rx_queue = + efx_channel_has_rx_queue(channel) ? + efx_channel_get_rx_queue(channel) : NULL; + unsigned magic, code; + + magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); + code = _EFX_CHANNEL_MAGIC_CODE(magic); + + if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { + channel->event_test_cpu = raw_smp_processor_id(); + } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { + /* The queue must be empty, so we won't receive any rx + * events, so efx_process_channel() won't refill the + * queue. Refill it here */ + efx_siena_fast_push_rx_descriptors(rx_queue, true); + } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { + efx_farch_handle_drain_event(channel); + } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { + efx_farch_handle_drain_event(channel); + } else { + netif_dbg(efx, hw, efx->net_dev, "channel %d received " + "generated event "EFX_QWORD_FMT"\n", + channel->channel, EFX_QWORD_VAL(*event)); + } +} + +static void +efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + unsigned int ev_sub_code; + unsigned int ev_sub_data; + + ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); + ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); + + switch (ev_sub_code) { + case FSE_AZ_TX_DESCQ_FLS_DONE_EV: + netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", + channel->channel, ev_sub_data); + efx_farch_handle_tx_flush_done(efx, event); +#ifdef CONFIG_SFC_SIENA_SRIOV + efx_siena_sriov_tx_flush_done(efx, event); +#endif + break; + case FSE_AZ_RX_DESCQ_FLS_DONE_EV: + netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", + channel->channel, ev_sub_data); + efx_farch_handle_rx_flush_done(efx, event); +#ifdef CONFIG_SFC_SIENA_SRIOV + efx_siena_sriov_rx_flush_done(efx, event); +#endif + break; + case FSE_AZ_EVQ_INIT_DONE_EV: + netif_dbg(efx, hw, efx->net_dev, + "channel %d EVQ %d initialised\n", + channel->channel, ev_sub_data); + break; + case FSE_AZ_SRM_UPD_DONE_EV: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d SRAM update done\n", channel->channel); + break; + case FSE_AZ_WAKE_UP_EV: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d RXQ %d wakeup event\n", + channel->channel, ev_sub_data); + break; + case FSE_AZ_TIMER_EV: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d RX queue %d timer expired\n", + channel->channel, ev_sub_data); + break; + case FSE_AA_RX_RECOVER_EV: + netif_err(efx, rx_err, efx->net_dev, + "channel %d seen DRIVER RX_RESET event. " + "Resetting.\n", channel->channel); + atomic_inc(&efx->rx_reset); + efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); + break; + case FSE_BZ_RX_DSC_ERROR_EV: + if (ev_sub_data < EFX_VI_BASE) { + netif_err(efx, rx_err, efx->net_dev, + "RX DMA Q %d reports descriptor fetch error." + " RX Q %d is disabled.\n", ev_sub_data, + ev_sub_data); + efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + } +#ifdef CONFIG_SFC_SIENA_SRIOV + else + efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); +#endif + break; + case FSE_BZ_TX_DSC_ERROR_EV: + if (ev_sub_data < EFX_VI_BASE) { + netif_err(efx, tx_err, efx->net_dev, + "TX DMA Q %d reports descriptor fetch error." + " TX Q %d is disabled.\n", ev_sub_data, + ev_sub_data); + efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + } +#ifdef CONFIG_SFC_SIENA_SRIOV + else + efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); +#endif + break; + default: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d unknown driver event code %d " + "data %04x\n", channel->channel, ev_sub_code, + ev_sub_data); + break; + } +} + +int efx_farch_ev_process(struct efx_channel *channel, int budget) +{ + struct efx_nic *efx = channel->efx; + unsigned int read_ptr; + efx_qword_t event, *p_event; + int ev_code; + int spent = 0; + + if (budget <= 0) + return spent; + + read_ptr = channel->eventq_read_ptr; + + for (;;) { + p_event = efx_event(channel, read_ptr); + event = *p_event; + + if (!efx_event_present(&event)) + /* End of events */ + break; + + netif_vdbg(channel->efx, intr, channel->efx->net_dev, + "channel %d event is "EFX_QWORD_FMT"\n", + channel->channel, EFX_QWORD_VAL(event)); + + /* Clear this event by marking it all ones */ + EFX_SET_QWORD(*p_event); + + ++read_ptr; + + ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); + + switch (ev_code) { + case FSE_AZ_EV_CODE_RX_EV: + efx_farch_handle_rx_event(channel, &event); + if (++spent == budget) + goto out; + break; + case FSE_AZ_EV_CODE_TX_EV: + efx_farch_handle_tx_event(channel, &event); + break; + case FSE_AZ_EV_CODE_DRV_GEN_EV: + efx_farch_handle_generated_event(channel, &event); + break; + case FSE_AZ_EV_CODE_DRIVER_EV: + efx_farch_handle_driver_event(channel, &event); + break; +#ifdef CONFIG_SFC_SIENA_SRIOV + case FSE_CZ_EV_CODE_USER_EV: + efx_siena_sriov_event(channel, &event); + break; +#endif + case FSE_CZ_EV_CODE_MCDI_EV: + efx_siena_mcdi_process_event(channel, &event); + break; + case FSE_AZ_EV_CODE_GLOBAL_EV: + if (efx->type->handle_global_event && + efx->type->handle_global_event(channel, &event)) + break; + fallthrough; + default: + netif_err(channel->efx, hw, channel->efx->net_dev, + "channel %d unknown event type %d (data " + EFX_QWORD_FMT ")\n", channel->channel, + ev_code, EFX_QWORD_VAL(event)); + } + } + +out: + channel->eventq_read_ptr = read_ptr; + return spent; +} + +/* Allocate buffer table entries for event queue */ +int efx_farch_ev_probe(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + unsigned entries; + + entries = channel->eventq_mask + 1; + return efx_alloc_special_buffer(efx, &channel->eventq, + entries * sizeof(efx_qword_t)); +} + +int efx_farch_ev_init(struct efx_channel *channel) +{ + efx_oword_t reg; + struct efx_nic *efx = channel->efx; + + netif_dbg(efx, hw, efx->net_dev, + "channel %d event queue in special buffers %d-%d\n", + channel->channel, channel->eventq.index, + channel->eventq.index + channel->eventq.entries - 1); + + EFX_POPULATE_OWORD_3(reg, + FRF_CZ_TIMER_Q_EN, 1, + FRF_CZ_HOST_NOTIFY_MODE, 0, + FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); + + /* Pin event queue buffer */ + efx_init_special_buffer(efx, &channel->eventq); + + /* Fill event queue with all ones (i.e. empty events) */ + memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); + + /* Push event queue to card */ + EFX_POPULATE_OWORD_3(reg, + FRF_AZ_EVQ_EN, 1, + FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), + FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); + efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, + channel->channel); + + return 0; +} + +void efx_farch_ev_fini(struct efx_channel *channel) +{ + efx_oword_t reg; + struct efx_nic *efx = channel->efx; + + /* Remove event queue from card */ + EFX_ZERO_OWORD(reg); + efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, + channel->channel); + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); + + /* Unpin event queue */ + efx_fini_special_buffer(efx, &channel->eventq); +} + +/* Free buffers backing event queue */ +void efx_farch_ev_remove(struct efx_channel *channel) +{ + efx_free_special_buffer(channel->efx, &channel->eventq); +} + + +void efx_farch_ev_test_generate(struct efx_channel *channel) +{ + efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); +} + +void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue) +{ + efx_farch_magic_event(efx_rx_queue_channel(rx_queue), + EFX_CHANNEL_MAGIC_FILL(rx_queue)); +} + +/************************************************************************** + * + * Hardware interrupts + * The hardware interrupt handler does very little work; all the event + * queue processing is carried out by per-channel tasklets. + * + **************************************************************************/ + +/* Enable/disable/generate interrupts */ +static inline void efx_farch_interrupts(struct efx_nic *efx, + bool enabled, bool force) +{ + efx_oword_t int_en_reg_ker; + + EFX_POPULATE_OWORD_3(int_en_reg_ker, + FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, + FRF_AZ_KER_INT_KER, force, + FRF_AZ_DRV_INT_EN_KER, enabled); + efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); +} + +void efx_farch_irq_enable_master(struct efx_nic *efx) +{ + EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); + wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ + + efx_farch_interrupts(efx, true, false); +} + +void efx_farch_irq_disable_master(struct efx_nic *efx) +{ + /* Disable interrupts */ + efx_farch_interrupts(efx, false, false); +} + +/* Generate a test interrupt + * Interrupt must already have been enabled, otherwise nasty things + * may happen. + */ +int efx_farch_irq_test_generate(struct efx_nic *efx) +{ + efx_farch_interrupts(efx, true, true); + return 0; +} + +/* Process a fatal interrupt + * Disable bus mastering ASAP and schedule a reset + */ +irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) +{ + efx_oword_t *int_ker = efx->irq_status.addr; + efx_oword_t fatal_intr; + int error, mem_perr; + + efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); + error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); + + netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " + EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), + EFX_OWORD_VAL(fatal_intr), + error ? "disabling bus mastering" : "no recognised error"); + + /* If this is a memory parity error dump which blocks are offending */ + mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || + EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); + if (mem_perr) { + efx_oword_t reg; + efx_reado(efx, ®, FR_AZ_MEM_STAT); + netif_err(efx, hw, efx->net_dev, + "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", + EFX_OWORD_VAL(reg)); + } + + /* Disable both devices */ + pci_clear_master(efx->pci_dev); + efx_farch_irq_disable_master(efx); + + /* Count errors and reset or disable the NIC accordingly */ + if (efx->int_error_count == 0 || + time_after(jiffies, efx->int_error_expire)) { + efx->int_error_count = 0; + efx->int_error_expire = + jiffies + EFX_INT_ERROR_EXPIRE * HZ; + } + if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { + netif_err(efx, hw, efx->net_dev, + "SYSTEM ERROR - reset scheduled\n"); + efx_siena_schedule_reset(efx, RESET_TYPE_INT_ERROR); + } else { + netif_err(efx, hw, efx->net_dev, + "SYSTEM ERROR - max number of errors seen." + "NIC will be disabled\n"); + efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); + } + + return IRQ_HANDLED; +} + +/* Handle a legacy interrupt + * Acknowledges the interrupt and schedule event queue processing. + */ +irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) +{ + struct efx_nic *efx = dev_id; + bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); + efx_oword_t *int_ker = efx->irq_status.addr; + irqreturn_t result = IRQ_NONE; + struct efx_channel *channel; + efx_dword_t reg; + u32 queues; + int syserr; + + /* Read the ISR which also ACKs the interrupts */ + efx_readd(efx, ®, FR_BZ_INT_ISR0); + queues = EFX_EXTRACT_DWORD(reg, 0, 31); + + /* Legacy interrupts are disabled too late by the EEH kernel + * code. Disable them earlier. + * If an EEH error occurred, the read will have returned all ones. + */ + if (EFX_DWORD_IS_ALL_ONES(reg) && efx_siena_try_recovery(efx) && + !efx->eeh_disabled_legacy_irq) { + disable_irq_nosync(efx->legacy_irq); + efx->eeh_disabled_legacy_irq = true; + } + + /* Handle non-event-queue sources */ + if (queues & (1U << efx->irq_level) && soft_enabled) { + syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); + if (unlikely(syserr)) + return efx_farch_fatal_interrupt(efx); + efx->last_irq_cpu = raw_smp_processor_id(); + } + + if (queues != 0) { + efx->irq_zero_count = 0; + + /* Schedule processing of any interrupting queues */ + if (likely(soft_enabled)) { + efx_for_each_channel(channel, efx) { + if (queues & 1) + efx_schedule_channel_irq(channel); + queues >>= 1; + } + } + result = IRQ_HANDLED; + + } else { + efx_qword_t *event; + + /* Legacy ISR read can return zero once (SF bug 15783) */ + + /* We can't return IRQ_HANDLED more than once on seeing ISR=0 + * because this might be a shared interrupt. */ + if (efx->irq_zero_count++ == 0) + result = IRQ_HANDLED; + + /* Ensure we schedule or rearm all event queues */ + if (likely(soft_enabled)) { + efx_for_each_channel(channel, efx) { + event = efx_event(channel, + channel->eventq_read_ptr); + if (efx_event_present(event)) + efx_schedule_channel_irq(channel); + else + efx_farch_ev_read_ack(channel); + } + } + } + + if (result == IRQ_HANDLED) + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", + irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); + + return result; +} + +/* Handle an MSI interrupt + * + * Handle an MSI hardware interrupt. This routine schedules event + * queue processing. No interrupt acknowledgement cycle is necessary. + * Also, we never need to check that the interrupt is for us, since + * MSI interrupts cannot be shared. + */ +irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) +{ + struct efx_msi_context *context = dev_id; + struct efx_nic *efx = context->efx; + efx_oword_t *int_ker = efx->irq_status.addr; + int syserr; + + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", + irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); + + if (!likely(READ_ONCE(efx->irq_soft_enabled))) + return IRQ_HANDLED; + + /* Handle non-event-queue sources */ + if (context->index == efx->irq_level) { + syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); + if (unlikely(syserr)) + return efx_farch_fatal_interrupt(efx); + efx->last_irq_cpu = raw_smp_processor_id(); + } + + /* Schedule processing of the channel */ + efx_schedule_channel_irq(efx->channel[context->index]); + + return IRQ_HANDLED; +} + +/* Setup RSS indirection table. + * This maps from the hash value of the packet to RXQ + */ +void efx_farch_rx_push_indir_table(struct efx_nic *efx) +{ + size_t i = 0; + efx_dword_t dword; + + BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != + FR_BZ_RX_INDIRECTION_TBL_ROWS); + + for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { + EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, + efx->rss_context.rx_indir_table[i]); + efx_writed(efx, &dword, + FR_BZ_RX_INDIRECTION_TBL + + FR_BZ_RX_INDIRECTION_TBL_STEP * i); + } +} + +void efx_farch_rx_pull_indir_table(struct efx_nic *efx) +{ + size_t i = 0; + efx_dword_t dword; + + BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != + FR_BZ_RX_INDIRECTION_TBL_ROWS); + + for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { + efx_readd(efx, &dword, + FR_BZ_RX_INDIRECTION_TBL + + FR_BZ_RX_INDIRECTION_TBL_STEP * i); + efx->rss_context.rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE); + } +} + +/* Looks at available SRAM resources and works out how many queues we + * can support, and where things like descriptor caches should live. + * + * SRAM is split up as follows: + * 0 buftbl entries for channels + * efx->vf_buftbl_base buftbl entries for SR-IOV + * efx->rx_dc_base RX descriptor caches + * efx->tx_dc_base TX descriptor caches + */ +void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) +{ + unsigned vi_count, total_tx_channels; +#ifdef CONFIG_SFC_SIENA_SRIOV + struct siena_nic_data *nic_data; + unsigned buftbl_min; +#endif + + total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels; + vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL); + +#ifdef CONFIG_SFC_SIENA_SRIOV + nic_data = efx->nic_data; + /* Account for the buffer table entries backing the datapath channels + * and the descriptor caches for those channels. + */ + buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + + total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_DMAQ_SIZE + + efx->n_channels * EFX_MAX_EVQ_SIZE) + * sizeof(efx_qword_t) / EFX_BUF_SIZE); + if (efx->type->sriov_wanted) { + if (efx->type->sriov_wanted(efx)) { + unsigned vi_dc_entries, buftbl_free; + unsigned entries_per_vf, vf_limit; + + nic_data->vf_buftbl_base = buftbl_min; + + vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; + vi_count = max(vi_count, EFX_VI_BASE); + buftbl_free = (sram_lim_qw - buftbl_min - + vi_count * vi_dc_entries); + + entries_per_vf = ((vi_dc_entries + + EFX_VF_BUFTBL_PER_VI) * + efx_vf_size(efx)); + vf_limit = min(buftbl_free / entries_per_vf, + (1024U - EFX_VI_BASE) >> efx->vi_scale); + + if (efx->vf_count > vf_limit) { + netif_err(efx, probe, efx->net_dev, + "Reducing VF count from from %d to %d\n", + efx->vf_count, vf_limit); + efx->vf_count = vf_limit; + } + vi_count += efx->vf_count * efx_vf_size(efx); + } + } +#endif + + efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; + efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; +} + +u32 efx_farch_fpga_ver(struct efx_nic *efx) +{ + efx_oword_t altera_build; + efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); + return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); +} + +void efx_farch_init_common(struct efx_nic *efx) +{ + efx_oword_t temp; + + /* Set positions of descriptor caches in SRAM. */ + EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); + efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); + efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); + + /* Set TX descriptor cache size. */ + BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); + efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); + + /* Set RX descriptor cache size. Set low watermark to size-8, as + * this allows most efficient prefetching. + */ + BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); + efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); + efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); + + /* Program INT_KER address */ + EFX_POPULATE_OWORD_2(temp, + FRF_AZ_NORM_INT_VEC_DIS_KER, + EFX_INT_MODE_USE_MSI(efx), + FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); + efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); + + if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) + /* Use an interrupt level unused by event queues */ + efx->irq_level = 0x1f; + else + /* Use a valid MSI-X vector */ + efx->irq_level = 0; + + /* Enable all the genuinely fatal interrupts. (They are still + * masked by the overall interrupt mask, controlled by + * falcon_interrupts()). + * + * Note: All other fatal interrupts are enabled + */ + EFX_POPULATE_OWORD_3(temp, + FRF_AZ_ILL_ADR_INT_KER_EN, 1, + FRF_AZ_RBUF_OWN_INT_KER_EN, 1, + FRF_AZ_TBUF_OWN_INT_KER_EN, 1); + EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); + EFX_INVERT_OWORD(temp); + efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); + + /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be + * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. + */ + efx_reado(efx, &temp, FR_AZ_TX_RESERVED); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); + /* Enable SW_EV to inherit in char driver - assume harmless here */ + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); + /* Prefetch threshold 2 => fetch when descriptor cache half empty */ + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); + /* Disable hardware watchdog which can misfire */ + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); + /* Squash TX of packets of 16 bytes or less */ + EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); + efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); + + EFX_POPULATE_OWORD_4(temp, + /* Default values */ + FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, + FRF_BZ_TX_PACE_SB_AF, 0xb, + FRF_BZ_TX_PACE_FB_BASE, 0, + /* Allow large pace values in the fast bin. */ + FRF_BZ_TX_PACE_BIN_TH, + FFE_BZ_TX_PACE_RESERVED); + efx_writeo(efx, &temp, FR_BZ_TX_PACE); +} + +/************************************************************************** + * + * Filter tables + * + ************************************************************************** + */ + +/* "Fudge factors" - difference between programmed value and actual depth. + * Due to pipelined implementation we need to program H/W with a value that + * is larger than the hop limit we want. + */ +#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 +#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1 + +/* Hard maximum search limit. Hardware will time-out beyond 200-something. + * We also need to avoid infinite loops in efx_farch_filter_search() when the + * table is full. + */ +#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200 + +/* Don't try very hard to find space for performance hints, as this is + * counter-productive. */ +#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5 + +enum efx_farch_filter_type { + EFX_FARCH_FILTER_TCP_FULL = 0, + EFX_FARCH_FILTER_TCP_WILD, + EFX_FARCH_FILTER_UDP_FULL, + EFX_FARCH_FILTER_UDP_WILD, + EFX_FARCH_FILTER_MAC_FULL = 4, + EFX_FARCH_FILTER_MAC_WILD, + EFX_FARCH_FILTER_UC_DEF = 8, + EFX_FARCH_FILTER_MC_DEF, + EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */ +}; + +enum efx_farch_filter_table_id { + EFX_FARCH_FILTER_TABLE_RX_IP = 0, + EFX_FARCH_FILTER_TABLE_RX_MAC, + EFX_FARCH_FILTER_TABLE_RX_DEF, + EFX_FARCH_FILTER_TABLE_TX_MAC, + EFX_FARCH_FILTER_TABLE_COUNT, +}; + +enum efx_farch_filter_index { + EFX_FARCH_FILTER_INDEX_UC_DEF, + EFX_FARCH_FILTER_INDEX_MC_DEF, + EFX_FARCH_FILTER_SIZE_RX_DEF, +}; + +struct efx_farch_filter_spec { + u8 type:4; + u8 priority:4; + u8 flags; + u16 dmaq_id; + u32 data[3]; +}; + +struct efx_farch_filter_table { + enum efx_farch_filter_table_id id; + u32 offset; /* address of table relative to BAR */ + unsigned size; /* number of entries */ + unsigned step; /* step between entries */ + unsigned used; /* number currently used */ + unsigned long *used_bitmap; + struct efx_farch_filter_spec *spec; + unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT]; +}; + +struct efx_farch_filter_state { + struct rw_semaphore lock; /* Protects table contents */ + struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; +}; + +static void +efx_farch_filter_table_clear_entry(struct efx_nic *efx, + struct efx_farch_filter_table *table, + unsigned int filter_idx); + +/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit + * key derived from the n-tuple. The initial LFSR state is 0xffff. */ +static u16 efx_farch_filter_hash(u32 key) +{ + u16 tmp; + + /* First 16 rounds */ + tmp = 0x1fff ^ key >> 16; + tmp = tmp ^ tmp >> 3 ^ tmp >> 6; + tmp = tmp ^ tmp >> 9; + /* Last 16 rounds */ + tmp = tmp ^ tmp << 13 ^ key; + tmp = tmp ^ tmp >> 3 ^ tmp >> 6; + return tmp ^ tmp >> 9; +} + +/* To allow for hash collisions, filter search continues at these + * increments from the first possible entry selected by the hash. */ +static u16 efx_farch_filter_increment(u32 key) +{ + return key * 2 - 1; +} + +static enum efx_farch_filter_table_id +efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec) +{ + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_TCP_FULL >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_TCP_WILD >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_UDP_FULL >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_UDP_WILD >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != + (EFX_FARCH_FILTER_MAC_FULL >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != + (EFX_FARCH_FILTER_MAC_WILD >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC != + EFX_FARCH_FILTER_TABLE_RX_MAC + 2); + return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); +} + +static void efx_farch_filter_push_rx_config(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table; + efx_oword_t filter_ctl; + + efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; + if (table->size) { + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + } + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; + if (table->size) { + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID, + table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED, + !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & + EFX_FILTER_FLAG_RX_RSS)); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, + table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, + !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & + EFX_FILTER_FLAG_RX_RSS)); + + /* There is a single bit to enable RX scatter for all + * unmatched packets. Only set it if scatter is + * enabled in both filter specs. + */ + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, + !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & + table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & + EFX_FILTER_FLAG_RX_SCATTER)); + } else { + /* We don't expose 'default' filters because unmatched + * packets always go to the queue number found in the + * RSS table. But we still need to set the RX scatter + * bit here. + */ + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, + efx->rx_scatter); + } + + efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); +} + +static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table; + efx_oword_t tx_cfg; + + efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG); + + table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; + if (table->size) { + EFX_SET_OWORD_FIELD( + tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, + table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD( + tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, + table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + } + + efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG); +} + +static int +efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, + const struct efx_filter_spec *gen_spec) +{ + bool is_full = false; + + if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->rss_context) + return -EINVAL; + + spec->priority = gen_spec->priority; + spec->flags = gen_spec->flags; + spec->dmaq_id = gen_spec->dmaq_id; + + switch (gen_spec->match_flags) { + case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): + is_full = true; + fallthrough; + case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { + __be32 rhost, host1, host2; + __be16 rport, port1, port2; + + EFX_WARN_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX)); + + if (gen_spec->ether_type != htons(ETH_P_IP)) + return -EPROTONOSUPPORT; + if (gen_spec->loc_port == 0 || + (is_full && gen_spec->rem_port == 0)) + return -EADDRNOTAVAIL; + switch (gen_spec->ip_proto) { + case IPPROTO_TCP: + spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL : + EFX_FARCH_FILTER_TCP_WILD); + break; + case IPPROTO_UDP: + spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL : + EFX_FARCH_FILTER_UDP_WILD); + break; + default: + return -EPROTONOSUPPORT; + } + + /* Filter is constructed in terms of source and destination, + * with the odd wrinkle that the ports are swapped in a UDP + * wildcard filter. We need to convert from local and remote + * (= zero for wildcard) addresses. + */ + rhost = is_full ? gen_spec->rem_host[0] : 0; + rport = is_full ? gen_spec->rem_port : 0; + host1 = rhost; + host2 = gen_spec->loc_host[0]; + if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { + port1 = gen_spec->loc_port; + port2 = rport; + } else { + port1 = rport; + port2 = gen_spec->loc_port; + } + spec->data[0] = ntohl(host1) << 16 | ntohs(port1); + spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; + spec->data[2] = ntohl(host2); + + break; + } + + case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: + is_full = true; + fallthrough; + case EFX_FILTER_MATCH_LOC_MAC: + spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : + EFX_FARCH_FILTER_MAC_WILD); + spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; + spec->data[1] = (gen_spec->loc_mac[2] << 24 | + gen_spec->loc_mac[3] << 16 | + gen_spec->loc_mac[4] << 8 | + gen_spec->loc_mac[5]); + spec->data[2] = (gen_spec->loc_mac[0] << 8 | + gen_spec->loc_mac[1]); + break; + + case EFX_FILTER_MATCH_LOC_MAC_IG: + spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ? + EFX_FARCH_FILTER_MC_DEF : + EFX_FARCH_FILTER_UC_DEF); + memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ + break; + + default: + return -EPROTONOSUPPORT; + } + + return 0; +} + +static void +efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, + const struct efx_farch_filter_spec *spec) +{ + bool is_full = false; + + /* *gen_spec should be completely initialised, to be consistent + * with efx_filter_init_{rx,tx}() and in case we want to copy + * it back to userland. + */ + memset(gen_spec, 0, sizeof(*gen_spec)); + + gen_spec->priority = spec->priority; + gen_spec->flags = spec->flags; + gen_spec->dmaq_id = spec->dmaq_id; + + switch (spec->type) { + case EFX_FARCH_FILTER_TCP_FULL: + case EFX_FARCH_FILTER_UDP_FULL: + is_full = true; + fallthrough; + case EFX_FARCH_FILTER_TCP_WILD: + case EFX_FARCH_FILTER_UDP_WILD: { + __be32 host1, host2; + __be16 port1, port2; + + gen_spec->match_flags = + EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; + if (is_full) + gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_REM_PORT); + gen_spec->ether_type = htons(ETH_P_IP); + gen_spec->ip_proto = + (spec->type == EFX_FARCH_FILTER_TCP_FULL || + spec->type == EFX_FARCH_FILTER_TCP_WILD) ? + IPPROTO_TCP : IPPROTO_UDP; + + host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); + port1 = htons(spec->data[0]); + host2 = htonl(spec->data[2]); + port2 = htons(spec->data[1] >> 16); + if (spec->flags & EFX_FILTER_FLAG_TX) { + gen_spec->loc_host[0] = host1; + gen_spec->rem_host[0] = host2; + } else { + gen_spec->loc_host[0] = host2; + gen_spec->rem_host[0] = host1; + } + if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^ + (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { + gen_spec->loc_port = port1; + gen_spec->rem_port = port2; + } else { + gen_spec->loc_port = port2; + gen_spec->rem_port = port1; + } + + break; + } + + case EFX_FARCH_FILTER_MAC_FULL: + is_full = true; + fallthrough; + case EFX_FARCH_FILTER_MAC_WILD: + gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; + if (is_full) + gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; + gen_spec->loc_mac[0] = spec->data[2] >> 8; + gen_spec->loc_mac[1] = spec->data[2]; + gen_spec->loc_mac[2] = spec->data[1] >> 24; + gen_spec->loc_mac[3] = spec->data[1] >> 16; + gen_spec->loc_mac[4] = spec->data[1] >> 8; + gen_spec->loc_mac[5] = spec->data[1]; + gen_spec->outer_vid = htons(spec->data[0]); + break; + + case EFX_FARCH_FILTER_UC_DEF: + case EFX_FARCH_FILTER_MC_DEF: + gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG; + gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF; + break; + + default: + WARN_ON(1); + break; + } +} + +static void +efx_farch_filter_init_rx_auto(struct efx_nic *efx, + struct efx_farch_filter_spec *spec) +{ + /* If there's only one channel then disable RSS for non VF + * traffic, thereby allowing VFs to use RSS when the PF can't. + */ + spec->priority = EFX_FILTER_PRI_AUTO; + spec->flags = (EFX_FILTER_FLAG_RX | + (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) | + (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); + spec->dmaq_id = 0; +} + +/* Build a filter entry and return its n-tuple key. */ +static u32 efx_farch_filter_build(efx_oword_t *filter, + struct efx_farch_filter_spec *spec) +{ + u32 data3; + + switch (efx_farch_filter_spec_table_id(spec)) { + case EFX_FARCH_FILTER_TABLE_RX_IP: { + bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL || + spec->type == EFX_FARCH_FILTER_UDP_WILD); + EFX_POPULATE_OWORD_7( + *filter, + FRF_BZ_RSS_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), + FRF_BZ_SCATTER_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), + FRF_BZ_TCP_UDP, is_udp, + FRF_BZ_RXQ_ID, spec->dmaq_id, + EFX_DWORD_2, spec->data[2], + EFX_DWORD_1, spec->data[1], + EFX_DWORD_0, spec->data[0]); + data3 = is_udp; + break; + } + + case EFX_FARCH_FILTER_TABLE_RX_MAC: { + bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; + EFX_POPULATE_OWORD_7( + *filter, + FRF_CZ_RMFT_RSS_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), + FRF_CZ_RMFT_SCATTER_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), + FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, + FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, + FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], + FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], + FRF_CZ_RMFT_VLAN_ID, spec->data[0]); + data3 = is_wild; + break; + } + + case EFX_FARCH_FILTER_TABLE_TX_MAC: { + bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; + EFX_POPULATE_OWORD_5(*filter, + FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, + FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, + FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], + FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], + FRF_CZ_TMFT_VLAN_ID, spec->data[0]); + data3 = is_wild | spec->dmaq_id << 1; + break; + } + + default: + BUG(); + } + + return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; +} + +static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left, + const struct efx_farch_filter_spec *right) +{ + if (left->type != right->type || + memcmp(left->data, right->data, sizeof(left->data))) + return false; + + if (left->flags & EFX_FILTER_FLAG_TX && + left->dmaq_id != right->dmaq_id) + return false; + + return true; +} + +/* + * Construct/deconstruct external filter IDs. At least the RX filter + * IDs must be ordered by matching priority, for RX NFC semantics. + * + * Deconstruction needs to be robust against invalid IDs so that + * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can + * accept user-provided IDs. + */ + +#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5 + +static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = { + [EFX_FARCH_FILTER_TCP_FULL] = 0, + [EFX_FARCH_FILTER_UDP_FULL] = 0, + [EFX_FARCH_FILTER_TCP_WILD] = 1, + [EFX_FARCH_FILTER_UDP_WILD] = 1, + [EFX_FARCH_FILTER_MAC_FULL] = 2, + [EFX_FARCH_FILTER_MAC_WILD] = 3, + [EFX_FARCH_FILTER_UC_DEF] = 4, + [EFX_FARCH_FILTER_MC_DEF] = 4, +}; + +static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = { + EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */ + EFX_FARCH_FILTER_TABLE_RX_IP, + EFX_FARCH_FILTER_TABLE_RX_MAC, + EFX_FARCH_FILTER_TABLE_RX_MAC, + EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */ + EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */ + EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */ +}; + +#define EFX_FARCH_FILTER_INDEX_WIDTH 13 +#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1) + +static inline u32 +efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec, + unsigned int index) +{ + unsigned int range; + + range = efx_farch_filter_type_match_pri[spec->type]; + if (!(spec->flags & EFX_FILTER_FLAG_RX)) + range += EFX_FARCH_FILTER_MATCH_PRI_COUNT; + + return range << EFX_FARCH_FILTER_INDEX_WIDTH | index; +} + +static inline enum efx_farch_filter_table_id +efx_farch_filter_id_table_id(u32 id) +{ + unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH; + + if (range < ARRAY_SIZE(efx_farch_filter_range_table)) + return efx_farch_filter_range_table[range]; + else + return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */ +} + +static inline unsigned int efx_farch_filter_id_index(u32 id) +{ + return id & EFX_FARCH_FILTER_INDEX_MASK; +} + +u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1; + enum efx_farch_filter_table_id table_id; + + do { + table_id = efx_farch_filter_range_table[range]; + if (state->table[table_id].size != 0) + return range << EFX_FARCH_FILTER_INDEX_WIDTH | + state->table[table_id].size; + } while (range--); + + return 0; +} + +s32 efx_farch_filter_insert(struct efx_nic *efx, + struct efx_filter_spec *gen_spec, + bool replace_equal) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table; + struct efx_farch_filter_spec spec; + efx_oword_t filter; + int rep_index, ins_index; + unsigned int depth = 0; + int rc; + + rc = efx_farch_filter_from_gen_spec(&spec, gen_spec); + if (rc) + return rc; + + down_write(&state->lock); + + table = &state->table[efx_farch_filter_spec_table_id(&spec)]; + if (table->size == 0) { + rc = -EINVAL; + goto out_unlock; + } + + netif_vdbg(efx, hw, efx->net_dev, + "%s: type %d search_limit=%d", __func__, spec.type, + table->search_limit[spec.type]); + + if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { + /* One filter spec per type */ + BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0); + BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF != + EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF); + rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; + ins_index = rep_index; + } else { + /* Search concurrently for + * (1) a filter to be replaced (rep_index): any filter + * with the same match values, up to the current + * search depth for this type, and + * (2) the insertion point (ins_index): (1) or any + * free slot before it or up to the maximum search + * depth for this priority + * We fail if we cannot find (2). + * + * We can stop once either + * (a) we find (1), in which case we have definitely + * found (2) as well; or + * (b) we have searched exhaustively for (1), and have + * either found (2) or searched exhaustively for it + */ + u32 key = efx_farch_filter_build(&filter, &spec); + unsigned int hash = efx_farch_filter_hash(key); + unsigned int incr = efx_farch_filter_increment(key); + unsigned int max_rep_depth = table->search_limit[spec.type]; + unsigned int max_ins_depth = + spec.priority <= EFX_FILTER_PRI_HINT ? + EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX : + EFX_FARCH_FILTER_CTL_SRCH_MAX; + unsigned int i = hash & (table->size - 1); + + ins_index = -1; + depth = 1; + + for (;;) { + if (!test_bit(i, table->used_bitmap)) { + if (ins_index < 0) + ins_index = i; + } else if (efx_farch_filter_equal(&spec, + &table->spec[i])) { + /* Case (a) */ + if (ins_index < 0) + ins_index = i; + rep_index = i; + break; + } + + if (depth >= max_rep_depth && + (ins_index >= 0 || depth >= max_ins_depth)) { + /* Case (b) */ + if (ins_index < 0) { + rc = -EBUSY; + goto out_unlock; + } + rep_index = -1; + break; + } + + i = (i + incr) & (table->size - 1); + ++depth; + } + } + + /* If we found a filter to be replaced, check whether we + * should do so + */ + if (rep_index >= 0) { + struct efx_farch_filter_spec *saved_spec = + &table->spec[rep_index]; + + if (spec.priority == saved_spec->priority && !replace_equal) { + rc = -EEXIST; + goto out_unlock; + } + if (spec.priority < saved_spec->priority) { + rc = -EPERM; + goto out_unlock; + } + if (saved_spec->priority == EFX_FILTER_PRI_AUTO || + saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) + spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; + } + + /* Insert the filter */ + if (ins_index != rep_index) { + __set_bit(ins_index, table->used_bitmap); + ++table->used; + } + table->spec[ins_index] = spec; + + if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { + efx_farch_filter_push_rx_config(efx); + } else { + if (table->search_limit[spec.type] < depth) { + table->search_limit[spec.type] = depth; + if (spec.flags & EFX_FILTER_FLAG_TX) + efx_farch_filter_push_tx_limits(efx); + else + efx_farch_filter_push_rx_config(efx); + } + + efx_writeo(efx, &filter, + table->offset + table->step * ins_index); + + /* If we were able to replace a filter by inserting + * at a lower depth, clear the replaced filter + */ + if (ins_index != rep_index && rep_index >= 0) + efx_farch_filter_table_clear_entry(efx, table, + rep_index); + } + + netif_vdbg(efx, hw, efx->net_dev, + "%s: filter type %d index %d rxq %u set", + __func__, spec.type, ins_index, spec.dmaq_id); + rc = efx_farch_filter_make_id(&spec, ins_index); + +out_unlock: + up_write(&state->lock); + return rc; +} + +static void +efx_farch_filter_table_clear_entry(struct efx_nic *efx, + struct efx_farch_filter_table *table, + unsigned int filter_idx) +{ + static efx_oword_t filter; + + EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); + BUG_ON(table->offset == 0); /* can't clear MAC default filters */ + + __clear_bit(filter_idx, table->used_bitmap); + --table->used; + memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); + + efx_writeo(efx, &filter, table->offset + table->step * filter_idx); + + /* If this filter required a greater search depth than + * any other, the search limit for its type can now be + * decreased. However, it is hard to determine that + * unless the table has become completely empty - in + * which case, all its search limits can be set to 0. + */ + if (unlikely(table->used == 0)) { + memset(table->search_limit, 0, sizeof(table->search_limit)); + if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) + efx_farch_filter_push_tx_limits(efx); + else + efx_farch_filter_push_rx_config(efx); + } +} + +static int efx_farch_filter_remove(struct efx_nic *efx, + struct efx_farch_filter_table *table, + unsigned int filter_idx, + enum efx_filter_priority priority) +{ + struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; + + if (!test_bit(filter_idx, table->used_bitmap) || + spec->priority != priority) + return -ENOENT; + + if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { + efx_farch_filter_init_rx_auto(efx, spec); + efx_farch_filter_push_rx_config(efx); + } else { + efx_farch_filter_table_clear_entry(efx, table, filter_idx); + } + + return 0; +} + +int efx_farch_filter_remove_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + unsigned int filter_idx; + int rc; + + table_id = efx_farch_filter_id_table_id(filter_id); + if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) + return -ENOENT; + table = &state->table[table_id]; + + filter_idx = efx_farch_filter_id_index(filter_id); + if (filter_idx >= table->size) + return -ENOENT; + down_write(&state->lock); + + rc = efx_farch_filter_remove(efx, table, filter_idx, priority); + up_write(&state->lock); + + return rc; +} + +int efx_farch_filter_get_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *spec_buf) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + struct efx_farch_filter_spec *spec; + unsigned int filter_idx; + int rc = -ENOENT; + + down_read(&state->lock); + + table_id = efx_farch_filter_id_table_id(filter_id); + if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) + goto out_unlock; + table = &state->table[table_id]; + + filter_idx = efx_farch_filter_id_index(filter_id); + if (filter_idx >= table->size) + goto out_unlock; + spec = &table->spec[filter_idx]; + + if (test_bit(filter_idx, table->used_bitmap) && + spec->priority == priority) { + efx_farch_filter_to_gen_spec(spec_buf, spec); + rc = 0; + } + +out_unlock: + up_read(&state->lock); + return rc; +} + +static void +efx_farch_filter_table_clear(struct efx_nic *efx, + enum efx_farch_filter_table_id table_id, + enum efx_filter_priority priority) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table = &state->table[table_id]; + unsigned int filter_idx; + + down_write(&state->lock); + for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { + if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO) + efx_farch_filter_remove(efx, table, + filter_idx, priority); + } + up_write(&state->lock); +} + +int efx_farch_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP, + priority); + efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC, + priority); + efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF, + priority); + return 0; +} + +u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + unsigned int filter_idx; + u32 count = 0; + + down_read(&state->lock); + + for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; + table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; + table_id++) { + table = &state->table[table_id]; + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (test_bit(filter_idx, table->used_bitmap) && + table->spec[filter_idx].priority == priority) + ++count; + } + } + + up_read(&state->lock); + + return count; +} + +s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + unsigned int filter_idx; + s32 count = 0; + + down_read(&state->lock); + + for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; + table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; + table_id++) { + table = &state->table[table_id]; + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (test_bit(filter_idx, table->used_bitmap) && + table->spec[filter_idx].priority == priority) { + if (count == size) { + count = -EMSGSIZE; + goto out; + } + buf[count++] = efx_farch_filter_make_id( + &table->spec[filter_idx], filter_idx); + } + } + } +out: + up_read(&state->lock); + + return count; +} + +/* Restore filter stater after reset */ +void efx_farch_filter_table_restore(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + efx_oword_t filter; + unsigned int filter_idx; + + down_write(&state->lock); + + for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { + table = &state->table[table_id]; + + /* Check whether this is a regular register table */ + if (table->step == 0) + continue; + + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (!test_bit(filter_idx, table->used_bitmap)) + continue; + efx_farch_filter_build(&filter, &table->spec[filter_idx]); + efx_writeo(efx, &filter, + table->offset + table->step * filter_idx); + } + } + + efx_farch_filter_push_rx_config(efx); + efx_farch_filter_push_tx_limits(efx); + + up_write(&state->lock); +} + +void efx_farch_filter_table_remove(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + + for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { + bitmap_free(state->table[table_id].used_bitmap); + vfree(state->table[table_id].spec); + } + kfree(state); +} + +int efx_farch_filter_table_probe(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state; + struct efx_farch_filter_table *table; + unsigned table_id; + + state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL); + if (!state) + return -ENOMEM; + efx->filter_state = state; + init_rwsem(&state->lock); + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; + table->id = EFX_FARCH_FILTER_TABLE_RX_IP; + table->offset = FR_BZ_RX_FILTER_TBL0; + table->size = FR_BZ_RX_FILTER_TBL0_ROWS; + table->step = FR_BZ_RX_FILTER_TBL0_STEP; + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; + table->id = EFX_FARCH_FILTER_TABLE_RX_MAC; + table->offset = FR_CZ_RX_MAC_FILTER_TBL0; + table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; + table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; + table->id = EFX_FARCH_FILTER_TABLE_RX_DEF; + table->size = EFX_FARCH_FILTER_SIZE_RX_DEF; + + table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; + table->id = EFX_FARCH_FILTER_TABLE_TX_MAC; + table->offset = FR_CZ_TX_MAC_FILTER_TBL0; + table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; + table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP; + + for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { + table = &state->table[table_id]; + if (table->size == 0) + continue; + table->used_bitmap = bitmap_zalloc(table->size, GFP_KERNEL); + if (!table->used_bitmap) + goto fail; + table->spec = vzalloc(array_size(sizeof(*table->spec), + table->size)); + if (!table->spec) + goto fail; + } + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; + if (table->size) { + /* RX default filters must always exist */ + struct efx_farch_filter_spec *spec; + unsigned i; + + for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) { + spec = &table->spec[i]; + spec->type = EFX_FARCH_FILTER_UC_DEF + i; + efx_farch_filter_init_rx_auto(efx, spec); + __set_bit(i, table->used_bitmap); + } + } + + efx_farch_filter_push_rx_config(efx); + + return 0; + +fail: + efx_farch_filter_table_remove(efx); + return -ENOMEM; +} + +/* Update scatter enable flags for filters pointing to our own RX queues */ +void efx_farch_filter_update_rx_scatter(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + efx_oword_t filter; + unsigned int filter_idx; + + down_write(&state->lock); + + for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; + table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; + table_id++) { + table = &state->table[table_id]; + + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (!test_bit(filter_idx, table->used_bitmap) || + table->spec[filter_idx].dmaq_id >= + efx->n_rx_channels) + continue; + + if (efx->rx_scatter) + table->spec[filter_idx].flags |= + EFX_FILTER_FLAG_RX_SCATTER; + else + table->spec[filter_idx].flags &= + ~EFX_FILTER_FLAG_RX_SCATTER; + + if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF) + /* Pushed by efx_farch_filter_push_rx_config() */ + continue; + + efx_farch_filter_build(&filter, &table->spec[filter_idx]); + efx_writeo(efx, &filter, + table->offset + table->step * filter_idx); + } + } + + efx_farch_filter_push_rx_config(efx); + + up_write(&state->lock); +} + +#ifdef CONFIG_RFS_ACCEL + +bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, + unsigned int index) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table; + bool ret = false, force = false; + u16 arfs_id; + + down_write(&state->lock); + spin_lock_bh(&efx->rps_hash_lock); + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; + if (test_bit(index, table->used_bitmap) && + table->spec[index].priority == EFX_FILTER_PRI_HINT) { + struct efx_arfs_rule *rule = NULL; + struct efx_filter_spec spec; + + efx_farch_filter_to_gen_spec(&spec, &table->spec[index]); + if (!efx->rps_hash_table) { + /* In the absence of the table, we always returned 0 to + * ARFS, so use the same to query it. + */ + arfs_id = 0; + } else { + rule = efx_siena_rps_hash_find(efx, &spec); + if (!rule) { + /* ARFS table doesn't know of this filter, remove it */ + force = true; + } else { + arfs_id = rule->arfs_id; + if (!efx_siena_rps_check_rule(rule, index, + &force)) + goto out_unlock; + } + } + if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id, + flow_id, arfs_id)) { + if (rule) + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; + efx_siena_rps_hash_del(efx, &spec); + efx_farch_filter_table_clear_entry(efx, table, index); + ret = true; + } + } +out_unlock: + spin_unlock_bh(&efx->rps_hash_lock); + up_write(&state->lock); + return ret; +} + +#endif /* CONFIG_RFS_ACCEL */ + +void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + struct netdev_hw_addr *ha; + union efx_multicast_hash *mc_hash = &efx->multicast_hash; + u32 crc; + int bit; + + if (!efx_dev_registered(efx)) + return; + + netif_addr_lock_bh(net_dev); + + efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); + + /* Build multicast hash table */ + if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { + memset(mc_hash, 0xff, sizeof(*mc_hash)); + } else { + memset(mc_hash, 0x00, sizeof(*mc_hash)); + netdev_for_each_mc_addr(ha, net_dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); + bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); + __set_bit_le(bit, mc_hash); + } + + /* Broadcast packets go through the multicast hash filter. + * ether_crc_le() of the broadcast address is 0xbe2612ff + * so we always add bit 0xff to the mask. + */ + __set_bit_le(0xff, mc_hash); + } + + netif_addr_unlock_bh(net_dev); +} diff --git a/drivers/net/ethernet/sfc/siena/farch_regs.h b/drivers/net/ethernet/sfc/siena/farch_regs.h new file mode 100644 index 0000000000..d138be423e --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/farch_regs.h @@ -0,0 +1,2929 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2012 Solarflare Communications Inc. + */ + +#ifndef EFX_FARCH_REGS_H +#define EFX_FARCH_REGS_H + +/* + * Falcon hardware architecture definitions have a name prefix following + * the format: + * + * F__ + * + * The following strings are used: + * + * MMIO register MC register Host memory structure + * ------------------------------------------------------------- + * Address R MCR + * Bitfield RF MCRF SF + * Enumerator FE MCFE SE + * + * is the first revision to which the definition applies: + * + * A: Falcon A1 (SFC4000AB) + * B: Falcon B0 (SFC4000BA) + * C: Siena A0 (SFL9021AA) + * + * If the definition has been changed or removed in later revisions + * then is the last revision to which the definition applies; + * otherwise it is "Z". + */ + +/************************************************************************** + * + * Falcon/Siena registers and descriptors + * + ************************************************************************** + */ + +/* ADR_REGION_REG: Address region register */ +#define FR_AZ_ADR_REGION 0x00000000 +#define FRF_AZ_ADR_REGION3_LBN 96 +#define FRF_AZ_ADR_REGION3_WIDTH 18 +#define FRF_AZ_ADR_REGION2_LBN 64 +#define FRF_AZ_ADR_REGION2_WIDTH 18 +#define FRF_AZ_ADR_REGION1_LBN 32 +#define FRF_AZ_ADR_REGION1_WIDTH 18 +#define FRF_AZ_ADR_REGION0_LBN 0 +#define FRF_AZ_ADR_REGION0_WIDTH 18 + +/* INT_EN_REG_KER: Kernel driver Interrupt enable register */ +#define FR_AZ_INT_EN_KER 0x00000010 +#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8 +#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6 +#define FRF_AZ_KER_INT_CHAR_LBN 4 +#define FRF_AZ_KER_INT_CHAR_WIDTH 1 +#define FRF_AZ_KER_INT_KER_LBN 3 +#define FRF_AZ_KER_INT_KER_WIDTH 1 +#define FRF_AZ_DRV_INT_EN_KER_LBN 0 +#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1 + +/* INT_EN_REG_CHAR: Char Driver interrupt enable register */ +#define FR_BZ_INT_EN_CHAR 0x00000020 +#define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8 +#define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6 +#define FRF_BZ_CHAR_INT_CHAR_LBN 4 +#define FRF_BZ_CHAR_INT_CHAR_WIDTH 1 +#define FRF_BZ_CHAR_INT_KER_LBN 3 +#define FRF_BZ_CHAR_INT_KER_WIDTH 1 +#define FRF_BZ_DRV_INT_EN_CHAR_LBN 0 +#define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1 + +/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */ +#define FR_AZ_INT_ADR_KER 0x00000030 +#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64 +#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1 +#define FRF_AZ_INT_ADR_KER_LBN 0 +#define FRF_AZ_INT_ADR_KER_WIDTH 64 + +/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */ +#define FR_BZ_INT_ADR_CHAR 0x00000040 +#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64 +#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1 +#define FRF_BZ_INT_ADR_CHAR_LBN 0 +#define FRF_BZ_INT_ADR_CHAR_WIDTH 64 + +/* INT_ACK_KER: Kernel interrupt acknowledge register */ +#define FR_AA_INT_ACK_KER 0x00000050 +#define FRF_AA_INT_ACK_KER_FIELD_LBN 0 +#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32 + +/* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */ +#define FR_BZ_INT_ISR0 0x00000090 +#define FRF_BZ_INT_ISR_REG_LBN 0 +#define FRF_BZ_INT_ISR_REG_WIDTH 64 + +/* HW_INIT_REG: Hardware initialization register */ +#define FR_AZ_HW_INIT 0x000000c0 +#define FRF_BB_BDMRD_CPLF_FULL_LBN 124 +#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1 +#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121 +#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3 +#define FRF_CZ_TX_MRG_TAGS_LBN 120 +#define FRF_CZ_TX_MRG_TAGS_WIDTH 1 +#define FRF_AB_TRGT_MASK_ALL_LBN 100 +#define FRF_AB_TRGT_MASK_ALL_WIDTH 1 +#define FRF_AZ_DOORBELL_DROP_LBN 92 +#define FRF_AZ_DOORBELL_DROP_WIDTH 8 +#define FRF_AB_TX_RREQ_MASK_EN_LBN 76 +#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1 +#define FRF_AB_PE_EIDLE_DIS_LBN 75 +#define FRF_AB_PE_EIDLE_DIS_WIDTH 1 +#define FRF_AA_FC_BLOCKING_EN_LBN 45 +#define FRF_AA_FC_BLOCKING_EN_WIDTH 1 +#define FRF_BZ_B2B_REQ_EN_LBN 45 +#define FRF_BZ_B2B_REQ_EN_WIDTH 1 +#define FRF_AA_B2B_REQ_EN_LBN 44 +#define FRF_AA_B2B_REQ_EN_WIDTH 1 +#define FRF_BB_FC_BLOCKING_EN_LBN 44 +#define FRF_BB_FC_BLOCKING_EN_WIDTH 1 +#define FRF_AZ_POST_WR_MASK_LBN 40 +#define FRF_AZ_POST_WR_MASK_WIDTH 4 +#define FRF_AZ_TLP_TC_LBN 34 +#define FRF_AZ_TLP_TC_WIDTH 3 +#define FRF_AZ_TLP_ATTR_LBN 32 +#define FRF_AZ_TLP_ATTR_WIDTH 2 +#define FRF_AB_INTB_VEC_LBN 24 +#define FRF_AB_INTB_VEC_WIDTH 5 +#define FRF_AB_INTA_VEC_LBN 16 +#define FRF_AB_INTA_VEC_WIDTH 5 +#define FRF_AZ_WD_TIMER_LBN 8 +#define FRF_AZ_WD_TIMER_WIDTH 8 +#define FRF_AZ_US_DISABLE_LBN 5 +#define FRF_AZ_US_DISABLE_WIDTH 1 +#define FRF_AZ_TLP_EP_LBN 4 +#define FRF_AZ_TLP_EP_WIDTH 1 +#define FRF_AZ_ATTR_SEL_LBN 3 +#define FRF_AZ_ATTR_SEL_WIDTH 1 +#define FRF_AZ_TD_SEL_LBN 1 +#define FRF_AZ_TD_SEL_WIDTH 1 +#define FRF_AZ_TLP_TD_LBN 0 +#define FRF_AZ_TLP_TD_WIDTH 1 + +/* EE_SPI_HCMD_REG: SPI host command register */ +#define FR_AB_EE_SPI_HCMD 0x00000100 +#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31 +#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1 +#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28 +#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1 +#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24 +#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1 +#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16 +#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5 +#define FRF_AB_EE_SPI_HCMD_READ_LBN 15 +#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1 +#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12 +#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2 +#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8 +#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2 +#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0 +#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8 + +/* USR_EV_CFG: User Level Event Configuration register */ +#define FR_CZ_USR_EV_CFG 0x00000100 +#define FRF_CZ_USREV_DIS_LBN 16 +#define FRF_CZ_USREV_DIS_WIDTH 1 +#define FRF_CZ_DFLT_EVQ_LBN 0 +#define FRF_CZ_DFLT_EVQ_WIDTH 10 + +/* EE_SPI_HADR_REG: SPI host address register */ +#define FR_AB_EE_SPI_HADR 0x00000110 +#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24 +#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8 +#define FRF_AB_EE_SPI_HADR_ADR_LBN 0 +#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24 + +/* EE_SPI_HDATA_REG: SPI host data register */ +#define FR_AB_EE_SPI_HDATA 0x00000120 +#define FRF_AB_EE_SPI_HDATA3_LBN 96 +#define FRF_AB_EE_SPI_HDATA3_WIDTH 32 +#define FRF_AB_EE_SPI_HDATA2_LBN 64 +#define FRF_AB_EE_SPI_HDATA2_WIDTH 32 +#define FRF_AB_EE_SPI_HDATA1_LBN 32 +#define FRF_AB_EE_SPI_HDATA1_WIDTH 32 +#define FRF_AB_EE_SPI_HDATA0_LBN 0 +#define FRF_AB_EE_SPI_HDATA0_WIDTH 32 + +/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */ +#define FR_AB_EE_BASE_PAGE 0x00000130 +#define FRF_AB_EE_EXPROM_MASK_LBN 16 +#define FRF_AB_EE_EXPROM_MASK_WIDTH 13 +#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0 +#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13 + +/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */ +#define FR_AB_EE_VPD_CFG0 0x00000140 +#define FRF_AB_EE_SF_FASTRD_EN_LBN 127 +#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1 +#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120 +#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7 +#define FRF_AB_EE_VPD_WIP_POLL_LBN 119 +#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1 +#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112 +#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7 +#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96 +#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16 +#define FRF_AB_EE_VPDW_LENGTH_LBN 80 +#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15 +#define FRF_AB_EE_VPDW_BASE_LBN 64 +#define FRF_AB_EE_VPDW_BASE_WIDTH 15 +#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56 +#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8 +#define FRF_AB_EE_VPD_BASE_LBN 32 +#define FRF_AB_EE_VPD_BASE_WIDTH 24 +#define FRF_AB_EE_VPD_LENGTH_LBN 16 +#define FRF_AB_EE_VPD_LENGTH_WIDTH 15 +#define FRF_AB_EE_VPD_AD_SIZE_LBN 8 +#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5 +#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5 +#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1 +#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4 +#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1 +#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2 +#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1 +#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1 +#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1 +#define FRF_AB_EE_VPD_EN_LBN 0 +#define FRF_AB_EE_VPD_EN_WIDTH 1 + +/* EE_VPD_SW_CNTL_REG: VPD access SW control register */ +#define FR_AB_EE_VPD_SW_CNTL 0x00000150 +#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31 +#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1 +#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28 +#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1 +#define FRF_AB_EE_VPD_CYC_ADR_LBN 0 +#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15 + +/* EE_VPD_SW_DATA_REG: VPD access SW data register */ +#define FR_AB_EE_VPD_SW_DATA 0x00000160 +#define FRF_AB_EE_VPD_CYC_DAT_LBN 0 +#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32 + +/* PBMX_DBG_IADDR_REG: Capture Module address register */ +#define FR_CZ_PBMX_DBG_IADDR 0x000001f0 +#define FRF_CZ_PBMX_DBG_IADDR_LBN 0 +#define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32 + +/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */ +#define FR_BB_PCIE_CORE_INDIRECT 0x000001f0 +#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32 +#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32 +#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15 +#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1 +#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0 +#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12 + +/* PBMX_DBG_IDATA_REG: Capture Module data register */ +#define FR_CZ_PBMX_DBG_IDATA 0x000001f8 +#define FRF_CZ_PBMX_DBG_IDATA_LBN 0 +#define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64 + +/* NIC_STAT_REG: NIC status register */ +#define FR_AB_NIC_STAT 0x00000200 +#define FRF_BB_AER_DIS_LBN 34 +#define FRF_BB_AER_DIS_WIDTH 1 +#define FRF_BB_EE_STRAP_EN_LBN 31 +#define FRF_BB_EE_STRAP_EN_WIDTH 1 +#define FRF_BB_EE_STRAP_LBN 24 +#define FRF_BB_EE_STRAP_WIDTH 4 +#define FRF_BB_REVISION_ID_LBN 17 +#define FRF_BB_REVISION_ID_WIDTH 7 +#define FRF_AB_ONCHIP_SRAM_LBN 16 +#define FRF_AB_ONCHIP_SRAM_WIDTH 1 +#define FRF_AB_SF_PRST_LBN 9 +#define FRF_AB_SF_PRST_WIDTH 1 +#define FRF_AB_EE_PRST_LBN 8 +#define FRF_AB_EE_PRST_WIDTH 1 +#define FRF_AB_ATE_MODE_LBN 3 +#define FRF_AB_ATE_MODE_WIDTH 1 +#define FRF_AB_STRAP_PINS_LBN 0 +#define FRF_AB_STRAP_PINS_WIDTH 3 + +/* GPIO_CTL_REG: GPIO control register */ +#define FR_AB_GPIO_CTL 0x00000210 +#define FRF_AB_GPIO_OUT3_LBN 112 +#define FRF_AB_GPIO_OUT3_WIDTH 16 +#define FRF_AB_GPIO_IN3_LBN 104 +#define FRF_AB_GPIO_IN3_WIDTH 8 +#define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96 +#define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8 +#define FRF_AB_GPIO_OUT2_LBN 80 +#define FRF_AB_GPIO_OUT2_WIDTH 16 +#define FRF_AB_GPIO_IN2_LBN 72 +#define FRF_AB_GPIO_IN2_WIDTH 8 +#define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64 +#define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8 +#define FRF_AB_GPIO15_OEN_LBN 63 +#define FRF_AB_GPIO15_OEN_WIDTH 1 +#define FRF_AB_GPIO14_OEN_LBN 62 +#define FRF_AB_GPIO14_OEN_WIDTH 1 +#define FRF_AB_GPIO13_OEN_LBN 61 +#define FRF_AB_GPIO13_OEN_WIDTH 1 +#define FRF_AB_GPIO12_OEN_LBN 60 +#define FRF_AB_GPIO12_OEN_WIDTH 1 +#define FRF_AB_GPIO11_OEN_LBN 59 +#define FRF_AB_GPIO11_OEN_WIDTH 1 +#define FRF_AB_GPIO10_OEN_LBN 58 +#define FRF_AB_GPIO10_OEN_WIDTH 1 +#define FRF_AB_GPIO9_OEN_LBN 57 +#define FRF_AB_GPIO9_OEN_WIDTH 1 +#define FRF_AB_GPIO8_OEN_LBN 56 +#define FRF_AB_GPIO8_OEN_WIDTH 1 +#define FRF_AB_GPIO15_OUT_LBN 55 +#define FRF_AB_GPIO15_OUT_WIDTH 1 +#define FRF_AB_GPIO14_OUT_LBN 54 +#define FRF_AB_GPIO14_OUT_WIDTH 1 +#define FRF_AB_GPIO13_OUT_LBN 53 +#define FRF_AB_GPIO13_OUT_WIDTH 1 +#define FRF_AB_GPIO12_OUT_LBN 52 +#define FRF_AB_GPIO12_OUT_WIDTH 1 +#define FRF_AB_GPIO11_OUT_LBN 51 +#define FRF_AB_GPIO11_OUT_WIDTH 1 +#define FRF_AB_GPIO10_OUT_LBN 50 +#define FRF_AB_GPIO10_OUT_WIDTH 1 +#define FRF_AB_GPIO9_OUT_LBN 49 +#define FRF_AB_GPIO9_OUT_WIDTH 1 +#define FRF_AB_GPIO8_OUT_LBN 48 +#define FRF_AB_GPIO8_OUT_WIDTH 1 +#define FRF_AB_GPIO15_IN_LBN 47 +#define FRF_AB_GPIO15_IN_WIDTH 1 +#define FRF_AB_GPIO14_IN_LBN 46 +#define FRF_AB_GPIO14_IN_WIDTH 1 +#define FRF_AB_GPIO13_IN_LBN 45 +#define FRF_AB_GPIO13_IN_WIDTH 1 +#define FRF_AB_GPIO12_IN_LBN 44 +#define FRF_AB_GPIO12_IN_WIDTH 1 +#define FRF_AB_GPIO11_IN_LBN 43 +#define FRF_AB_GPIO11_IN_WIDTH 1 +#define FRF_AB_GPIO10_IN_LBN 42 +#define FRF_AB_GPIO10_IN_WIDTH 1 +#define FRF_AB_GPIO9_IN_LBN 41 +#define FRF_AB_GPIO9_IN_WIDTH 1 +#define FRF_AB_GPIO8_IN_LBN 40 +#define FRF_AB_GPIO8_IN_WIDTH 1 +#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39 +#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38 +#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37 +#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36 +#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35 +#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34 +#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33 +#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32 +#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_CLK156_OUT_EN_LBN 31 +#define FRF_AB_CLK156_OUT_EN_WIDTH 1 +#define FRF_AB_USE_NIC_CLK_LBN 30 +#define FRF_AB_USE_NIC_CLK_WIDTH 1 +#define FRF_AB_GPIO5_OEN_LBN 29 +#define FRF_AB_GPIO5_OEN_WIDTH 1 +#define FRF_AB_GPIO4_OEN_LBN 28 +#define FRF_AB_GPIO4_OEN_WIDTH 1 +#define FRF_AB_GPIO3_OEN_LBN 27 +#define FRF_AB_GPIO3_OEN_WIDTH 1 +#define FRF_AB_GPIO2_OEN_LBN 26 +#define FRF_AB_GPIO2_OEN_WIDTH 1 +#define FRF_AB_GPIO1_OEN_LBN 25 +#define FRF_AB_GPIO1_OEN_WIDTH 1 +#define FRF_AB_GPIO0_OEN_LBN 24 +#define FRF_AB_GPIO0_OEN_WIDTH 1 +#define FRF_AB_GPIO7_OUT_LBN 23 +#define FRF_AB_GPIO7_OUT_WIDTH 1 +#define FRF_AB_GPIO6_OUT_LBN 22 +#define FRF_AB_GPIO6_OUT_WIDTH 1 +#define FRF_AB_GPIO5_OUT_LBN 21 +#define FRF_AB_GPIO5_OUT_WIDTH 1 +#define FRF_AB_GPIO4_OUT_LBN 20 +#define FRF_AB_GPIO4_OUT_WIDTH 1 +#define FRF_AB_GPIO3_OUT_LBN 19 +#define FRF_AB_GPIO3_OUT_WIDTH 1 +#define FRF_AB_GPIO2_OUT_LBN 18 +#define FRF_AB_GPIO2_OUT_WIDTH 1 +#define FRF_AB_GPIO1_OUT_LBN 17 +#define FRF_AB_GPIO1_OUT_WIDTH 1 +#define FRF_AB_GPIO0_OUT_LBN 16 +#define FRF_AB_GPIO0_OUT_WIDTH 1 +#define FRF_AB_GPIO7_IN_LBN 15 +#define FRF_AB_GPIO7_IN_WIDTH 1 +#define FRF_AB_GPIO6_IN_LBN 14 +#define FRF_AB_GPIO6_IN_WIDTH 1 +#define FRF_AB_GPIO5_IN_LBN 13 +#define FRF_AB_GPIO5_IN_WIDTH 1 +#define FRF_AB_GPIO4_IN_LBN 12 +#define FRF_AB_GPIO4_IN_WIDTH 1 +#define FRF_AB_GPIO3_IN_LBN 11 +#define FRF_AB_GPIO3_IN_WIDTH 1 +#define FRF_AB_GPIO2_IN_LBN 10 +#define FRF_AB_GPIO2_IN_WIDTH 1 +#define FRF_AB_GPIO1_IN_LBN 9 +#define FRF_AB_GPIO1_IN_WIDTH 1 +#define FRF_AB_GPIO0_IN_LBN 8 +#define FRF_AB_GPIO0_IN_WIDTH 1 +#define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7 +#define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6 +#define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5 +#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4 +#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3 +#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2 +#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1 +#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0 +#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1 + +/* GLB_CTL_REG: Global control register */ +#define FR_AB_GLB_CTL 0x00000220 +#define FRF_AB_EXT_PHY_RST_CTL_LBN 63 +#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1 +#define FRF_AB_XAUI_SD_RST_CTL_LBN 62 +#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_SD_RST_CTL_LBN 61 +#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1 +#define FRF_AA_PCIX_RST_CTL_LBN 60 +#define FRF_AA_PCIX_RST_CTL_WIDTH 1 +#define FRF_BB_BIU_RST_CTL_LBN 60 +#define FRF_BB_BIU_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59 +#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58 +#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57 +#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1 +#define FRF_AB_XGRX_RST_CTL_LBN 56 +#define FRF_AB_XGRX_RST_CTL_WIDTH 1 +#define FRF_AB_XGTX_RST_CTL_LBN 55 +#define FRF_AB_XGTX_RST_CTL_WIDTH 1 +#define FRF_AB_EM_RST_CTL_LBN 54 +#define FRF_AB_EM_RST_CTL_WIDTH 1 +#define FRF_AB_EV_RST_CTL_LBN 53 +#define FRF_AB_EV_RST_CTL_WIDTH 1 +#define FRF_AB_SR_RST_CTL_LBN 52 +#define FRF_AB_SR_RST_CTL_WIDTH 1 +#define FRF_AB_RX_RST_CTL_LBN 51 +#define FRF_AB_RX_RST_CTL_WIDTH 1 +#define FRF_AB_TX_RST_CTL_LBN 50 +#define FRF_AB_TX_RST_CTL_WIDTH 1 +#define FRF_AB_EE_RST_CTL_LBN 49 +#define FRF_AB_EE_RST_CTL_WIDTH 1 +#define FRF_AB_CS_RST_CTL_LBN 48 +#define FRF_AB_CS_RST_CTL_WIDTH 1 +#define FRF_AB_HOT_RST_CTL_LBN 40 +#define FRF_AB_HOT_RST_CTL_WIDTH 2 +#define FRF_AB_RST_EXT_PHY_LBN 31 +#define FRF_AB_RST_EXT_PHY_WIDTH 1 +#define FRF_AB_RST_XAUI_SD_LBN 30 +#define FRF_AB_RST_XAUI_SD_WIDTH 1 +#define FRF_AB_RST_PCIE_SD_LBN 29 +#define FRF_AB_RST_PCIE_SD_WIDTH 1 +#define FRF_AA_RST_PCIX_LBN 28 +#define FRF_AA_RST_PCIX_WIDTH 1 +#define FRF_BB_RST_BIU_LBN 28 +#define FRF_BB_RST_BIU_WIDTH 1 +#define FRF_AB_RST_PCIE_STKY_LBN 27 +#define FRF_AB_RST_PCIE_STKY_WIDTH 1 +#define FRF_AB_RST_PCIE_NSTKY_LBN 26 +#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1 +#define FRF_AB_RST_PCIE_CORE_LBN 25 +#define FRF_AB_RST_PCIE_CORE_WIDTH 1 +#define FRF_AB_RST_XGRX_LBN 24 +#define FRF_AB_RST_XGRX_WIDTH 1 +#define FRF_AB_RST_XGTX_LBN 23 +#define FRF_AB_RST_XGTX_WIDTH 1 +#define FRF_AB_RST_EM_LBN 22 +#define FRF_AB_RST_EM_WIDTH 1 +#define FRF_AB_RST_EV_LBN 21 +#define FRF_AB_RST_EV_WIDTH 1 +#define FRF_AB_RST_SR_LBN 20 +#define FRF_AB_RST_SR_WIDTH 1 +#define FRF_AB_RST_RX_LBN 19 +#define FRF_AB_RST_RX_WIDTH 1 +#define FRF_AB_RST_TX_LBN 18 +#define FRF_AB_RST_TX_WIDTH 1 +#define FRF_AB_RST_SF_LBN 17 +#define FRF_AB_RST_SF_WIDTH 1 +#define FRF_AB_RST_CS_LBN 16 +#define FRF_AB_RST_CS_WIDTH 1 +#define FRF_AB_INT_RST_DUR_LBN 4 +#define FRF_AB_INT_RST_DUR_WIDTH 3 +#define FRF_AB_EXT_PHY_RST_DUR_LBN 1 +#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3 +#define FFE_AB_EXT_PHY_RST_DUR_10240US 7 +#define FFE_AB_EXT_PHY_RST_DUR_5120US 6 +#define FFE_AB_EXT_PHY_RST_DUR_2560US 5 +#define FFE_AB_EXT_PHY_RST_DUR_1280US 4 +#define FFE_AB_EXT_PHY_RST_DUR_640US 3 +#define FFE_AB_EXT_PHY_RST_DUR_320US 2 +#define FFE_AB_EXT_PHY_RST_DUR_160US 1 +#define FFE_AB_EXT_PHY_RST_DUR_80US 0 +#define FRF_AB_SWRST_LBN 0 +#define FRF_AB_SWRST_WIDTH 1 + +/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ +#define FR_AZ_FATAL_INTR_KER 0x00000230 +#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44 +#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1 +#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43 +#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43 +#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1 +#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42 +#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1 +#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41 +#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1 +#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40 +#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1 +#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39 +#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38 +#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37 +#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36 +#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35 +#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34 +#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1 +#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33 +#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1 +#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32 +#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1 +#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12 +#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1 +#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11 +#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_KER_LBN 11 +#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1 +#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10 +#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1 +#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9 +#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1 +#define FRF_AZ_MEM_PERR_INT_KER_LBN 8 +#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1 +#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7 +#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6 +#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5 +#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4 +#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3 +#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2 +#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1 +#define FRF_AZ_ILL_ADR_INT_KER_LBN 1 +#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1 +#define FRF_AZ_SRM_PERR_INT_KER_LBN 0 +#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1 + +/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */ +#define FR_BZ_FATAL_INTR_CHAR 0x00000240 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1 +#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43 +#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43 +#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42 +#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41 +#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40 +#define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39 +#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38 +#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35 +#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34 +#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33 +#define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32 +#define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1 +#define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11 +#define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11 +#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1 +#define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10 +#define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1 +#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9 +#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1 +#define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8 +#define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1 +#define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7 +#define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6 +#define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3 +#define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2 +#define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1 +#define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1 +#define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1 +#define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0 +#define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1 + +/* DP_CTRL_REG: Datapath control register */ +#define FR_BZ_DP_CTRL 0x00000250 +#define FRF_BZ_FLS_EVQ_ID_LBN 0 +#define FRF_BZ_FLS_EVQ_ID_WIDTH 12 + +/* MEM_STAT_REG: Memory status register */ +#define FR_AZ_MEM_STAT 0x00000260 +#define FRF_AB_MEM_PERR_VEC_LBN 53 +#define FRF_AB_MEM_PERR_VEC_WIDTH 38 +#define FRF_AB_MBIST_CORR_LBN 38 +#define FRF_AB_MBIST_CORR_WIDTH 15 +#define FRF_AB_MBIST_ERR_LBN 0 +#define FRF_AB_MBIST_ERR_WIDTH 40 +#define FRF_CZ_MEM_PERR_VEC_LBN 0 +#define FRF_CZ_MEM_PERR_VEC_WIDTH 35 + +/* CS_DEBUG_REG: Debug register */ +#define FR_AZ_CS_DEBUG 0x00000270 +#define FRF_AB_GLB_DEBUG2_SEL_LBN 50 +#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3 +#define FRF_AB_DEBUG_BLK_SEL2_LBN 47 +#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3 +#define FRF_AB_DEBUG_BLK_SEL1_LBN 44 +#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3 +#define FRF_AB_DEBUG_BLK_SEL0_LBN 41 +#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3 +#define FRF_CZ_CS_PORT_NUM_LBN 40 +#define FRF_CZ_CS_PORT_NUM_WIDTH 2 +#define FRF_AB_MISC_DEBUG_ADDR_LBN 36 +#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31 +#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5 +#define FRF_CZ_CS_PORT_FPE_LBN 1 +#define FRF_CZ_CS_PORT_FPE_WIDTH 35 +#define FRF_AB_EM_DEBUG_ADDR_LBN 26 +#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_SR_DEBUG_ADDR_LBN 21 +#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_EV_DEBUG_ADDR_LBN 16 +#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_RX_DEBUG_ADDR_LBN 11 +#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_TX_DEBUG_ADDR_LBN 6 +#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1 +#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5 +#define FRF_AZ_CS_DEBUG_EN_LBN 0 +#define FRF_AZ_CS_DEBUG_EN_WIDTH 1 + +/* DRIVER_REG: Driver scratch register [0-7] */ +#define FR_AZ_DRIVER 0x00000280 +#define FR_AZ_DRIVER_STEP 16 +#define FR_AZ_DRIVER_ROWS 8 +#define FRF_AZ_DRIVER_DW0_LBN 0 +#define FRF_AZ_DRIVER_DW0_WIDTH 32 + +/* ALTERA_BUILD_REG: Altera build register */ +#define FR_AZ_ALTERA_BUILD 0x00000300 +#define FRF_AZ_ALTERA_BUILD_VER_LBN 0 +#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32 + +/* CSR_SPARE_REG: Spare register */ +#define FR_AZ_CSR_SPARE 0x00000310 +#define FRF_AB_MEM_PERR_EN_LBN 64 +#define FRF_AB_MEM_PERR_EN_WIDTH 38 +#define FRF_CZ_MEM_PERR_EN_LBN 64 +#define FRF_CZ_MEM_PERR_EN_WIDTH 35 +#define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72 +#define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2 +#define FRF_AZ_CSR_SPARE_BITS_LBN 0 +#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32 + +/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */ +#define FR_AB_PCIE_SD_CTL0123 0x00000320 +#define FRF_AB_PCIE_TESTSIG_H_LBN 96 +#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19 +#define FRF_AB_PCIE_TESTSIG_L_LBN 64 +#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19 +#define FRF_AB_PCIE_OFFSET_LBN 56 +#define FRF_AB_PCIE_OFFSET_WIDTH 8 +#define FRF_AB_PCIE_OFFSETEN_H_LBN 55 +#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1 +#define FRF_AB_PCIE_OFFSETEN_L_LBN 54 +#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1 +#define FRF_AB_PCIE_HIVMODE_H_LBN 53 +#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1 +#define FRF_AB_PCIE_HIVMODE_L_LBN 52 +#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1 +#define FRF_AB_PCIE_PARRESET_H_LBN 51 +#define FRF_AB_PCIE_PARRESET_H_WIDTH 1 +#define FRF_AB_PCIE_PARRESET_L_LBN 50 +#define FRF_AB_PCIE_PARRESET_L_WIDTH 1 +#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49 +#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1 +#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48 +#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1 +#define FRF_AB_PCIE_LPBK_LBN 40 +#define FRF_AB_PCIE_LPBK_WIDTH 8 +#define FRF_AB_PCIE_PARLPBK_LBN 32 +#define FRF_AB_PCIE_PARLPBK_WIDTH 8 +#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30 +#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2 +#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28 +#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2 +#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3 +#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2 +#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1 +#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0 +#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26 +#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2 +#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24 +#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2 +#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3 +#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2 +#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1 +#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0 +#define FRF_AB_PCIE_RXEQCTL_H_LBN 18 +#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2 +#define FRF_AB_PCIE_RXEQCTL_L_LBN 16 +#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2 +#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3 +#define FFE_AB_PCIE_RXEQCTL_OFF 2 +#define FFE_AB_PCIE_RXEQCTL_MIN 1 +#define FFE_AB_PCIE_RXEQCTL_MAX 0 +#define FRF_AB_PCIE_HIDRV_LBN 8 +#define FRF_AB_PCIE_HIDRV_WIDTH 8 +#define FRF_AB_PCIE_LODRV_LBN 0 +#define FRF_AB_PCIE_LODRV_WIDTH 8 + +/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */ +#define FR_AB_PCIE_SD_CTL45 0x00000330 +#define FRF_AB_PCIE_DTX7_LBN 60 +#define FRF_AB_PCIE_DTX7_WIDTH 4 +#define FRF_AB_PCIE_DTX6_LBN 56 +#define FRF_AB_PCIE_DTX6_WIDTH 4 +#define FRF_AB_PCIE_DTX5_LBN 52 +#define FRF_AB_PCIE_DTX5_WIDTH 4 +#define FRF_AB_PCIE_DTX4_LBN 48 +#define FRF_AB_PCIE_DTX4_WIDTH 4 +#define FRF_AB_PCIE_DTX3_LBN 44 +#define FRF_AB_PCIE_DTX3_WIDTH 4 +#define FRF_AB_PCIE_DTX2_LBN 40 +#define FRF_AB_PCIE_DTX2_WIDTH 4 +#define FRF_AB_PCIE_DTX1_LBN 36 +#define FRF_AB_PCIE_DTX1_WIDTH 4 +#define FRF_AB_PCIE_DTX0_LBN 32 +#define FRF_AB_PCIE_DTX0_WIDTH 4 +#define FRF_AB_PCIE_DEQ7_LBN 28 +#define FRF_AB_PCIE_DEQ7_WIDTH 4 +#define FRF_AB_PCIE_DEQ6_LBN 24 +#define FRF_AB_PCIE_DEQ6_WIDTH 4 +#define FRF_AB_PCIE_DEQ5_LBN 20 +#define FRF_AB_PCIE_DEQ5_WIDTH 4 +#define FRF_AB_PCIE_DEQ4_LBN 16 +#define FRF_AB_PCIE_DEQ4_WIDTH 4 +#define FRF_AB_PCIE_DEQ3_LBN 12 +#define FRF_AB_PCIE_DEQ3_WIDTH 4 +#define FRF_AB_PCIE_DEQ2_LBN 8 +#define FRF_AB_PCIE_DEQ2_WIDTH 4 +#define FRF_AB_PCIE_DEQ1_LBN 4 +#define FRF_AB_PCIE_DEQ1_WIDTH 4 +#define FRF_AB_PCIE_DEQ0_LBN 0 +#define FRF_AB_PCIE_DEQ0_WIDTH 4 + +/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */ +#define FR_AB_PCIE_PCS_CTL_STAT 0x00000340 +#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52 +#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4 +#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48 +#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4 +#define FRF_AB_PCIE_PRBSERR_LBN 40 +#define FRF_AB_PCIE_PRBSERR_WIDTH 8 +#define FRF_AB_PCIE_PRBSERRH0_LBN 32 +#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8 +#define FRF_AB_PCIE_FASTINIT_H_LBN 15 +#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1 +#define FRF_AB_PCIE_FASTINIT_L_LBN 14 +#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1 +#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13 +#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1 +#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12 +#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1 +#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11 +#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1 +#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10 +#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1 +#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9 +#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1 +#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8 +#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1 +#define FRF_AB_PCIE_PRBSSEL_LBN 0 +#define FRF_AB_PCIE_PRBSSEL_WIDTH 8 + +/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */ +#define FR_BB_DEBUG_DATA_OUT 0x00000350 +#define FRF_BB_DEBUG2_PORT_LBN 25 +#define FRF_BB_DEBUG2_PORT_WIDTH 15 +#define FRF_BB_DEBUG1_PORT_LBN 0 +#define FRF_BB_DEBUG1_PORT_WIDTH 25 + +/* EVQ_RPTR_REGP0: Event queue read pointer register */ +#define FR_BZ_EVQ_RPTR_P0 0x00000400 +#define FR_BZ_EVQ_RPTR_P0_STEP 8192 +#define FR_BZ_EVQ_RPTR_P0_ROWS 1024 +/* EVQ_RPTR_REG_KER: Event queue read pointer register */ +#define FR_AA_EVQ_RPTR_KER 0x00011b00 +#define FR_AA_EVQ_RPTR_KER_STEP 4 +#define FR_AA_EVQ_RPTR_KER_ROWS 4 +/* EVQ_RPTR_REG: Event queue read pointer register */ +#define FR_BZ_EVQ_RPTR 0x00fa0000 +#define FR_BZ_EVQ_RPTR_STEP 16 +#define FR_BB_EVQ_RPTR_ROWS 4096 +#define FR_CZ_EVQ_RPTR_ROWS 1024 +/* EVQ_RPTR_REGP123: Event queue read pointer register */ +#define FR_BB_EVQ_RPTR_P123 0x01000400 +#define FR_BB_EVQ_RPTR_P123_STEP 8192 +#define FR_BB_EVQ_RPTR_P123_ROWS 3072 +#define FRF_AZ_EVQ_RPTR_VLD_LBN 15 +#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1 +#define FRF_AZ_EVQ_RPTR_LBN 0 +#define FRF_AZ_EVQ_RPTR_WIDTH 15 + +/* TIMER_COMMAND_REGP0: Timer Command Registers */ +#define FR_BZ_TIMER_COMMAND_P0 0x00000420 +#define FR_BZ_TIMER_COMMAND_P0_STEP 8192 +#define FR_BZ_TIMER_COMMAND_P0_ROWS 1024 +/* TIMER_COMMAND_REG_KER: Timer Command Registers */ +#define FR_AA_TIMER_COMMAND_KER 0x00000420 +#define FR_AA_TIMER_COMMAND_KER_STEP 8192 +#define FR_AA_TIMER_COMMAND_KER_ROWS 4 +/* TIMER_COMMAND_REGP123: Timer Command Registers */ +#define FR_BB_TIMER_COMMAND_P123 0x01000420 +#define FR_BB_TIMER_COMMAND_P123_STEP 8192 +#define FR_BB_TIMER_COMMAND_P123_ROWS 3072 +#define FRF_CZ_TC_TIMER_MODE_LBN 14 +#define FRF_CZ_TC_TIMER_MODE_WIDTH 2 +#define FRF_AB_TC_TIMER_MODE_LBN 12 +#define FRF_AB_TC_TIMER_MODE_WIDTH 2 +#define FRF_CZ_TC_TIMER_VAL_LBN 0 +#define FRF_CZ_TC_TIMER_VAL_WIDTH 14 +#define FRF_AB_TC_TIMER_VAL_LBN 0 +#define FRF_AB_TC_TIMER_VAL_WIDTH 12 + +/* DRV_EV_REG: Driver generated event register */ +#define FR_AZ_DRV_EV 0x00000440 +#define FRF_AZ_DRV_EV_QID_LBN 64 +#define FRF_AZ_DRV_EV_QID_WIDTH 12 +#define FRF_AZ_DRV_EV_DATA_LBN 0 +#define FRF_AZ_DRV_EV_DATA_WIDTH 64 + +/* EVQ_CTL_REG: Event queue control register */ +#define FR_AZ_EVQ_CTL 0x00000450 +#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15 +#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10 +#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15 +#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6 +#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14 +#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1 +#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7 +#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7 +#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0 +#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7 + +/* EVQ_CNT1_REG: Event counter 1 register */ +#define FR_AZ_EVQ_CNT1 0x00000460 +#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120 +#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7 +#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100 +#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20 +#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80 +#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60 +#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40 +#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20 +#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0 +#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20 + +/* EVQ_CNT2_REG: Event counter 2 register */ +#define FR_AZ_EVQ_CNT2 0x00000470 +#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104 +#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84 +#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_RDY_CNT_LBN 80 +#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4 +#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60 +#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40 +#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20 +#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0 +#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20 + +/* USR_EV_REG: Event mailbox register */ +#define FR_CZ_USR_EV 0x00000540 +#define FR_CZ_USR_EV_STEP 8192 +#define FR_CZ_USR_EV_ROWS 1024 +#define FRF_CZ_USR_EV_DATA_LBN 0 +#define FRF_CZ_USR_EV_DATA_WIDTH 32 + +/* BUF_TBL_CFG_REG: Buffer table configuration register */ +#define FR_AZ_BUF_TBL_CFG 0x00000600 +#define FRF_AZ_BUF_TBL_MODE_LBN 3 +#define FRF_AZ_BUF_TBL_MODE_WIDTH 1 + +/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */ +#define FR_AZ_SRM_RX_DC_CFG 0x00000610 +#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21 +#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1 +#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0 +#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21 + +/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */ +#define FR_AZ_SRM_TX_DC_CFG 0x00000620 +#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0 +#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21 + +/* SRM_CFG_REG: SRAM configuration register */ +#define FR_AZ_SRM_CFG 0x00000630 +#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5 +#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1 +#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4 +#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1 +#define FRF_AZ_SRM_INIT_EN_LBN 3 +#define FRF_AZ_SRM_INIT_EN_WIDTH 1 +#define FRF_AZ_SRM_NUM_BANK_LBN 2 +#define FRF_AZ_SRM_NUM_BANK_WIDTH 1 +#define FRF_AZ_SRM_BANK_SIZE_LBN 0 +#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2 + +/* BUF_TBL_UPD_REG: Buffer table update register */ +#define FR_AZ_BUF_TBL_UPD 0x00000650 +#define FRF_AZ_BUF_UPD_CMD_LBN 63 +#define FRF_AZ_BUF_UPD_CMD_WIDTH 1 +#define FRF_AZ_BUF_CLR_CMD_LBN 62 +#define FRF_AZ_BUF_CLR_CMD_WIDTH 1 +#define FRF_AZ_BUF_CLR_END_ID_LBN 32 +#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20 +#define FRF_AZ_BUF_CLR_START_ID_LBN 0 +#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20 + +/* SRM_UPD_EVQ_REG: Buffer table update register */ +#define FR_AZ_SRM_UPD_EVQ 0x00000660 +#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0 +#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12 + +/* SRAM_PARITY_REG: SRAM parity register. */ +#define FR_AZ_SRAM_PARITY 0x00000670 +#define FRF_CZ_BYPASS_ECC_LBN 3 +#define FRF_CZ_BYPASS_ECC_WIDTH 1 +#define FRF_CZ_SEC_INT_LBN 2 +#define FRF_CZ_SEC_INT_WIDTH 1 +#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1 +#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1 +#define FRF_AB_FORCE_SRAM_PERR_LBN 0 +#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1 +#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0 +#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1 + +/* RX_CFG_REG: Receive configuration register */ +#define FR_AZ_RX_CFG 0x00000800 +#define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72 +#define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14 +#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71 +#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1 +#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62 +#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9 +#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53 +#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9 +#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49 +#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4 +#define FRF_BZ_RX_TCP_SUP_LBN 48 +#define FRF_BZ_RX_TCP_SUP_WIDTH 1 +#define FRF_BZ_RX_INGR_EN_LBN 47 +#define FRF_BZ_RX_INGR_EN_WIDTH 1 +#define FRF_BZ_RX_IP_HASH_LBN 46 +#define FRF_BZ_RX_IP_HASH_WIDTH 1 +#define FRF_BZ_RX_HASH_ALG_LBN 45 +#define FRF_BZ_RX_HASH_ALG_WIDTH 1 +#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44 +#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1 +#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43 +#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1 +#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42 +#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1 +#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39 +#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3 +#define FRF_BZ_RX_OWNERR_CTL_LBN 38 +#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1 +#define FRF_BZ_RX_XON_TX_TH_LBN 33 +#define FRF_BZ_RX_XON_TX_TH_WIDTH 5 +#define FRF_AA_RX_DESC_PUSH_EN_LBN 35 +#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1 +#define FRF_AA_RX_RDW_PATCH_EN_LBN 34 +#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1 +#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31 +#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3 +#define FRF_BZ_RX_XOFF_TX_TH_LBN 28 +#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5 +#define FRF_AA_RX_OWNERR_CTL_LBN 30 +#define FRF_AA_RX_OWNERR_CTL_WIDTH 1 +#define FRF_AA_RX_XON_TX_TH_LBN 25 +#define FRF_AA_RX_XON_TX_TH_WIDTH 5 +#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19 +#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9 +#define FRF_AA_RX_XOFF_TX_TH_LBN 20 +#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5 +#define FRF_AA_RX_USR_BUF_SIZE_LBN 11 +#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9 +#define FRF_BZ_RX_XON_MAC_TH_LBN 10 +#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9 +#define FRF_AA_RX_XON_MAC_TH_LBN 6 +#define FRF_AA_RX_XON_MAC_TH_WIDTH 5 +#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1 +#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9 +#define FRF_AA_RX_XOFF_MAC_TH_LBN 1 +#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5 +#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0 +#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1 + +/* RX_FILTER_CTL_REG: Receive filter control registers */ +#define FR_BZ_RX_FILTER_CTL 0x00000810 +#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94 +#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8 +#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86 +#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8 +#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85 +#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1 +#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69 +#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16 +#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57 +#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12 +#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56 +#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1 +#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55 +#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 +#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43 +#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12 +#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42 +#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1 +#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41 +#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 +#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40 +#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1 +#define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32 +#define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8 +#define FRF_BZ_NUM_KER_LBN 24 +#define FRF_BZ_NUM_KER_WIDTH 2 +#define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16 +#define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8 +#define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8 +#define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8 +#define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0 +#define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8 + +/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */ +#define FR_AZ_RX_FLUSH_DESCQ 0x00000820 +#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24 +#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1 +#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0 +#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12 + +/* RX_DESC_UPD_REGP0: Receive descriptor update register. */ +#define FR_BZ_RX_DESC_UPD_P0 0x00000830 +#define FR_BZ_RX_DESC_UPD_P0_STEP 8192 +#define FR_BZ_RX_DESC_UPD_P0_ROWS 1024 +/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */ +#define FR_AA_RX_DESC_UPD_KER 0x00000830 +#define FR_AA_RX_DESC_UPD_KER_STEP 8192 +#define FR_AA_RX_DESC_UPD_KER_ROWS 4 +/* RX_DESC_UPD_REGP123: Receive descriptor update register. */ +#define FR_BB_RX_DESC_UPD_P123 0x01000830 +#define FR_BB_RX_DESC_UPD_P123_STEP 8192 +#define FR_BB_RX_DESC_UPD_P123_ROWS 3072 +#define FRF_AZ_RX_DESC_WPTR_LBN 96 +#define FRF_AZ_RX_DESC_WPTR_WIDTH 12 +#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95 +#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1 +#define FRF_AZ_RX_DESC_LBN 0 +#define FRF_AZ_RX_DESC_WIDTH 64 + +/* RX_DC_CFG_REG: Receive descriptor cache configuration register */ +#define FR_AZ_RX_DC_CFG 0x00000840 +#define FRF_AB_RX_MAX_PF_LBN 2 +#define FRF_AB_RX_MAX_PF_WIDTH 2 +#define FRF_AZ_RX_DC_SIZE_LBN 0 +#define FRF_AZ_RX_DC_SIZE_WIDTH 2 +#define FFE_AZ_RX_DC_SIZE_64 3 +#define FFE_AZ_RX_DC_SIZE_32 2 +#define FFE_AZ_RX_DC_SIZE_16 1 +#define FFE_AZ_RX_DC_SIZE_8 0 + +/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */ +#define FR_AZ_RX_DC_PF_WM 0x00000850 +#define FRF_AZ_RX_DC_PF_HWM_LBN 6 +#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6 +#define FRF_AZ_RX_DC_PF_LWM_LBN 0 +#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6 + +/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */ +#define FR_BZ_RX_RSS_TKEY 0x00000860 +#define FRF_BZ_RX_RSS_TKEY_HI_LBN 64 +#define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64 +#define FRF_BZ_RX_RSS_TKEY_LO_LBN 0 +#define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64 + +/* RX_NODESC_DROP_REG: Receive dropped packet counter register */ +#define FR_AZ_RX_NODESC_DROP 0x00000880 +#define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0 +#define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32 +#define FRF_AB_RX_NODESC_DROP_CNT_LBN 0 +#define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16 + +/* RX_SELF_RST_REG: Receive self reset register */ +#define FR_AA_RX_SELF_RST 0x00000890 +#define FRF_AA_RX_ISCSI_DIS_LBN 17 +#define FRF_AA_RX_ISCSI_DIS_WIDTH 1 +#define FRF_AA_RX_SW_RST_REG_LBN 16 +#define FRF_AA_RX_SW_RST_REG_WIDTH 1 +#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9 +#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1 +#define FRF_AA_RX_SELF_RST_EN_LBN 8 +#define FRF_AA_RX_SELF_RST_EN_WIDTH 1 +#define FRF_AA_RX_MAX_PF_LAT_LBN 4 +#define FRF_AA_RX_MAX_PF_LAT_WIDTH 4 +#define FRF_AA_RX_MAX_LU_LAT_LBN 0 +#define FRF_AA_RX_MAX_LU_LAT_WIDTH 4 + +/* RX_DEBUG_REG: undocumented register */ +#define FR_AZ_RX_DEBUG 0x000008a0 +#define FRF_AZ_RX_DEBUG_LBN 0 +#define FRF_AZ_RX_DEBUG_WIDTH 64 + +/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */ +#define FR_AZ_RX_PUSH_DROP 0x000008b0 +#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0 +#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32 + +/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */ +#define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128 + +/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */ +#define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128 + +/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */ +#define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0 +#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66 +#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1 +#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65 +#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1 +#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64 +#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1 +#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64 + +/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */ +#define FR_AZ_TX_FLUSH_DESCQ 0x00000a00 +#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12 +#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1 +#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0 +#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12 + +/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ +#define FR_BZ_TX_DESC_UPD_P0 0x00000a10 +#define FR_BZ_TX_DESC_UPD_P0_STEP 8192 +#define FR_BZ_TX_DESC_UPD_P0_ROWS 1024 +/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */ +#define FR_AA_TX_DESC_UPD_KER 0x00000a10 +#define FR_AA_TX_DESC_UPD_KER_STEP 8192 +#define FR_AA_TX_DESC_UPD_KER_ROWS 8 +/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */ +#define FR_BB_TX_DESC_UPD_P123 0x01000a10 +#define FR_BB_TX_DESC_UPD_P123_STEP 8192 +#define FR_BB_TX_DESC_UPD_P123_ROWS 3072 +#define FRF_AZ_TX_DESC_WPTR_LBN 96 +#define FRF_AZ_TX_DESC_WPTR_WIDTH 12 +#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95 +#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1 +#define FRF_AZ_TX_DESC_LBN 0 +#define FRF_AZ_TX_DESC_WIDTH 95 + +/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */ +#define FR_AZ_TX_DC_CFG 0x00000a20 +#define FRF_AZ_TX_DC_SIZE_LBN 0 +#define FRF_AZ_TX_DC_SIZE_WIDTH 2 +#define FFE_AZ_TX_DC_SIZE_32 2 +#define FFE_AZ_TX_DC_SIZE_16 1 +#define FFE_AZ_TX_DC_SIZE_8 0 + +/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */ +#define FR_AA_TX_CHKSM_CFG 0x00000a30 +#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96 +#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32 +#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64 +#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32 +#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32 +#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32 +#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0 +#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32 + +/* TX_CFG_REG: Transmit configuration register */ +#define FR_AZ_TX_CFG 0x00000a50 +#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114 +#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8 +#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113 +#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1 +#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105 +#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97 +#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89 +#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81 +#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73 +#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65 +#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64 +#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1 +#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48 +#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16 +#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47 +#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1 +#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16 +#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15 +#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5 +#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1 +#define FRF_AZ_TX_P1_PRI_EN_LBN 4 +#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1 +#define FRF_AZ_TX_OWNERR_CTL_LBN 2 +#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1 +#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1 +#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1 +#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0 +#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1 + +/* TX_PUSH_DROP_REG: Transmit push dropped register */ +#define FR_AZ_TX_PUSH_DROP 0x00000a60 +#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0 +#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32 + +/* TX_RESERVED_REG: Transmit configuration register */ +#define FR_AZ_TX_RESERVED 0x00000a80 +#define FRF_AZ_TX_EVT_CNT_LBN 121 +#define FRF_AZ_TX_EVT_CNT_WIDTH 7 +#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119 +#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2 +#define FRF_AZ_TX_RD_COMP_TMR_LBN 96 +#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23 +#define FRF_AZ_TX_PUSH_EN_LBN 89 +#define FRF_AZ_TX_PUSH_EN_WIDTH 1 +#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88 +#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1 +#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85 +#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1 +#define FRF_AZ_TX_DMAR_ST_P0_LBN 81 +#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1 +#define FRF_AZ_TX_DMAQ_ST_LBN 78 +#define FRF_AZ_TX_DMAQ_ST_WIDTH 1 +#define FRF_AZ_TX_RX_SPACER_LBN 64 +#define FRF_AZ_TX_RX_SPACER_WIDTH 8 +#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60 +#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1 +#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59 +#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1 +#define FRF_AZ_TX_PS_EVT_DIS_LBN 58 +#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1 +#define FRF_AZ_TX_RX_SPACER_EN_LBN 57 +#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1 +#define FRF_AZ_TX_XP_TIMER_LBN 52 +#define FRF_AZ_TX_XP_TIMER_WIDTH 5 +#define FRF_AZ_TX_PREF_SPACER_LBN 44 +#define FRF_AZ_TX_PREF_SPACER_WIDTH 8 +#define FRF_AZ_TX_PREF_WD_TMR_LBN 22 +#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22 +#define FRF_AZ_TX_ONLY1TAG_LBN 21 +#define FRF_AZ_TX_ONLY1TAG_WIDTH 1 +#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19 +#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2 +#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18 +#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1 +#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17 +#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1 +#define FRF_AA_TX_DMA_FF_THR_LBN 16 +#define FRF_AA_TX_DMA_FF_THR_WIDTH 1 +#define FRF_AZ_TX_DMA_SPACER_LBN 8 +#define FRF_AZ_TX_DMA_SPACER_WIDTH 8 +#define FRF_AA_TX_TCP_DIS_LBN 7 +#define FRF_AA_TX_TCP_DIS_WIDTH 1 +#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7 +#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1 +#define FRF_AA_TX_IP_DIS_LBN 6 +#define FRF_AA_TX_IP_DIS_WIDTH 1 +#define FRF_AZ_TX_MAX_CPL_LBN 2 +#define FRF_AZ_TX_MAX_CPL_WIDTH 2 +#define FFE_AZ_TX_MAX_CPL_16 3 +#define FFE_AZ_TX_MAX_CPL_8 2 +#define FFE_AZ_TX_MAX_CPL_4 1 +#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0 +#define FRF_AZ_TX_MAX_PREF_LBN 0 +#define FRF_AZ_TX_MAX_PREF_WIDTH 2 +#define FFE_AZ_TX_MAX_PREF_32 3 +#define FFE_AZ_TX_MAX_PREF_16 2 +#define FFE_AZ_TX_MAX_PREF_8 1 +#define FFE_AZ_TX_MAX_PREF_OFF 0 + +/* TX_PACE_REG: Transmit pace control register */ +#define FR_BZ_TX_PACE 0x00000a90 +#define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19 +#define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10 +#define FRF_BZ_TX_PACE_SB_AF_LBN 9 +#define FRF_BZ_TX_PACE_SB_AF_WIDTH 10 +#define FRF_BZ_TX_PACE_FB_BASE_LBN 5 +#define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4 +#define FRF_BZ_TX_PACE_BIN_TH_LBN 0 +#define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5 + +/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */ +#define FR_BZ_TX_PACE_DROP_QID 0x00000aa0 +#define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0 +#define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16 + +/* TX_VLAN_REG: Transmit VLAN tag register */ +#define FR_BB_TX_VLAN 0x00000ae0 +#define FRF_BB_TX_VLAN_EN_LBN 127 +#define FRF_BB_TX_VLAN_EN_WIDTH 1 +#define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125 +#define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124 +#define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN7_LBN 112 +#define FRF_BB_TX_VLAN7_WIDTH 12 +#define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109 +#define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108 +#define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN6_LBN 96 +#define FRF_BB_TX_VLAN6_WIDTH 12 +#define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93 +#define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92 +#define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN5_LBN 80 +#define FRF_BB_TX_VLAN5_WIDTH 12 +#define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77 +#define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76 +#define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN4_LBN 64 +#define FRF_BB_TX_VLAN4_WIDTH 12 +#define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61 +#define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60 +#define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN3_LBN 48 +#define FRF_BB_TX_VLAN3_WIDTH 12 +#define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45 +#define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44 +#define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN2_LBN 32 +#define FRF_BB_TX_VLAN2_WIDTH 12 +#define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29 +#define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28 +#define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN1_LBN 16 +#define FRF_BB_TX_VLAN1_WIDTH 12 +#define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13 +#define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12 +#define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN0_LBN 0 +#define FRF_BB_TX_VLAN0_WIDTH 12 + +/* TX_IPFIL_PORTEN_REG: Transmit filter control register */ +#define FR_BZ_TX_IPFIL_PORTEN 0x00000af0 +#define FRF_BZ_TX_MADR0_FIL_EN_LBN 64 +#define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62 +#define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60 +#define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58 +#define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56 +#define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54 +#define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52 +#define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50 +#define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48 +#define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46 +#define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44 +#define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42 +#define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40 +#define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38 +#define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36 +#define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34 +#define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32 +#define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30 +#define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28 +#define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26 +#define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24 +#define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22 +#define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20 +#define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18 +#define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16 +#define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14 +#define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12 +#define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10 +#define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8 +#define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6 +#define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4 +#define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2 +#define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0 +#define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1 + +/* TX_IPFIL_TBL: Transmit IP source address filter table */ +#define FR_BB_TX_IPFIL_TBL 0x00000b00 +#define FR_BB_TX_IPFIL_TBL_STEP 16 +#define FR_BB_TX_IPFIL_TBL_ROWS 16 +#define FRF_BB_TX_IPFIL_MASK_1_LBN 96 +#define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32 +#define FRF_BB_TX_IP_SRC_ADR_1_LBN 64 +#define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32 +#define FRF_BB_TX_IPFIL_MASK_0_LBN 32 +#define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32 +#define FRF_BB_TX_IP_SRC_ADR_0_LBN 0 +#define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32 + +/* MD_TXD_REG: PHY management transmit data register */ +#define FR_AB_MD_TXD 0x00000c00 +#define FRF_AB_MD_TXD_LBN 0 +#define FRF_AB_MD_TXD_WIDTH 16 + +/* MD_RXD_REG: PHY management receive data register */ +#define FR_AB_MD_RXD 0x00000c10 +#define FRF_AB_MD_RXD_LBN 0 +#define FRF_AB_MD_RXD_WIDTH 16 + +/* MD_CS_REG: PHY management configuration & status register */ +#define FR_AB_MD_CS 0x00000c20 +#define FRF_AB_MD_RD_EN_CMD_LBN 15 +#define FRF_AB_MD_RD_EN_CMD_WIDTH 1 +#define FRF_AB_MD_WR_EN_CMD_LBN 14 +#define FRF_AB_MD_WR_EN_CMD_WIDTH 1 +#define FRF_AB_MD_ADDR_CMD_LBN 13 +#define FRF_AB_MD_ADDR_CMD_WIDTH 1 +#define FRF_AB_MD_PT_LBN 7 +#define FRF_AB_MD_PT_WIDTH 3 +#define FRF_AB_MD_PL_LBN 6 +#define FRF_AB_MD_PL_WIDTH 1 +#define FRF_AB_MD_INT_CLR_LBN 5 +#define FRF_AB_MD_INT_CLR_WIDTH 1 +#define FRF_AB_MD_GC_LBN 4 +#define FRF_AB_MD_GC_WIDTH 1 +#define FRF_AB_MD_PRSP_LBN 3 +#define FRF_AB_MD_PRSP_WIDTH 1 +#define FRF_AB_MD_RIC_LBN 2 +#define FRF_AB_MD_RIC_WIDTH 1 +#define FRF_AB_MD_RDC_LBN 1 +#define FRF_AB_MD_RDC_WIDTH 1 +#define FRF_AB_MD_WRC_LBN 0 +#define FRF_AB_MD_WRC_WIDTH 1 + +/* MD_PHY_ADR_REG: PHY management PHY address register */ +#define FR_AB_MD_PHY_ADR 0x00000c30 +#define FRF_AB_MD_PHY_ADR_LBN 0 +#define FRF_AB_MD_PHY_ADR_WIDTH 16 + +/* MD_ID_REG: PHY management ID register */ +#define FR_AB_MD_ID 0x00000c40 +#define FRF_AB_MD_PRT_ADR_LBN 11 +#define FRF_AB_MD_PRT_ADR_WIDTH 5 +#define FRF_AB_MD_DEV_ADR_LBN 6 +#define FRF_AB_MD_DEV_ADR_WIDTH 5 + +/* MD_STAT_REG: PHY management status & mask register */ +#define FR_AB_MD_STAT 0x00000c50 +#define FRF_AB_MD_PINT_LBN 4 +#define FRF_AB_MD_PINT_WIDTH 1 +#define FRF_AB_MD_DONE_LBN 3 +#define FRF_AB_MD_DONE_WIDTH 1 +#define FRF_AB_MD_BSERR_LBN 2 +#define FRF_AB_MD_BSERR_WIDTH 1 +#define FRF_AB_MD_LNFL_LBN 1 +#define FRF_AB_MD_LNFL_WIDTH 1 +#define FRF_AB_MD_BSY_LBN 0 +#define FRF_AB_MD_BSY_WIDTH 1 + +/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */ +#define FR_AB_MAC_STAT_DMA 0x00000c60 +#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48 +#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1 +#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0 +#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48 + +/* MAC_CTRL_REG: Port MAC control register */ +#define FR_AB_MAC_CTRL 0x00000c80 +#define FRF_AB_MAC_XOFF_VAL_LBN 16 +#define FRF_AB_MAC_XOFF_VAL_WIDTH 16 +#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7 +#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1 +#define FRF_AB_MAC_XG_DISTXCRC_LBN 5 +#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1 +#define FRF_AB_MAC_BCAD_ACPT_LBN 4 +#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1 +#define FRF_AB_MAC_UC_PROM_LBN 3 +#define FRF_AB_MAC_UC_PROM_WIDTH 1 +#define FRF_AB_MAC_LINK_STATUS_LBN 2 +#define FRF_AB_MAC_LINK_STATUS_WIDTH 1 +#define FRF_AB_MAC_SPEED_LBN 0 +#define FRF_AB_MAC_SPEED_WIDTH 2 +#define FFE_AB_MAC_SPEED_10G 3 +#define FFE_AB_MAC_SPEED_1G 2 +#define FFE_AB_MAC_SPEED_100M 1 +#define FFE_AB_MAC_SPEED_10M 0 + +/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */ +#define FR_BB_GEN_MODE 0x00000c90 +#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3 +#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1 +#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2 +#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1 +#define FRF_BB_XFP_PHY_INT_MASK_LBN 1 +#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1 +#define FRF_BB_XG_PHY_INT_MASK_LBN 0 +#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1 + +/* MAC_MC_HASH_REG0: Multicast address hash table */ +#define FR_AB_MAC_MC_HASH_REG0 0x00000ca0 +#define FRF_AB_MAC_MCAST_HASH0_LBN 0 +#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128 + +/* MAC_MC_HASH_REG1: Multicast address hash table */ +#define FR_AB_MAC_MC_HASH_REG1 0x00000cb0 +#define FRF_AB_MAC_MCAST_HASH1_LBN 0 +#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128 + +/* GM_CFG1_REG: GMAC configuration register 1 */ +#define FR_AB_GM_CFG1 0x00000e00 +#define FRF_AB_GM_SW_RST_LBN 31 +#define FRF_AB_GM_SW_RST_WIDTH 1 +#define FRF_AB_GM_SIM_RST_LBN 30 +#define FRF_AB_GM_SIM_RST_WIDTH 1 +#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19 +#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1 +#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18 +#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1 +#define FRF_AB_GM_RST_RX_FUNC_LBN 17 +#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1 +#define FRF_AB_GM_RST_TX_FUNC_LBN 16 +#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1 +#define FRF_AB_GM_LOOP_LBN 8 +#define FRF_AB_GM_LOOP_WIDTH 1 +#define FRF_AB_GM_RX_FC_EN_LBN 5 +#define FRF_AB_GM_RX_FC_EN_WIDTH 1 +#define FRF_AB_GM_TX_FC_EN_LBN 4 +#define FRF_AB_GM_TX_FC_EN_WIDTH 1 +#define FRF_AB_GM_SYNC_RXEN_LBN 3 +#define FRF_AB_GM_SYNC_RXEN_WIDTH 1 +#define FRF_AB_GM_RX_EN_LBN 2 +#define FRF_AB_GM_RX_EN_WIDTH 1 +#define FRF_AB_GM_SYNC_TXEN_LBN 1 +#define FRF_AB_GM_SYNC_TXEN_WIDTH 1 +#define FRF_AB_GM_TX_EN_LBN 0 +#define FRF_AB_GM_TX_EN_WIDTH 1 + +/* GM_CFG2_REG: GMAC configuration register 2 */ +#define FR_AB_GM_CFG2 0x00000e10 +#define FRF_AB_GM_PAMBL_LEN_LBN 12 +#define FRF_AB_GM_PAMBL_LEN_WIDTH 4 +#define FRF_AB_GM_IF_MODE_LBN 8 +#define FRF_AB_GM_IF_MODE_WIDTH 2 +#define FFE_AB_IF_MODE_BYTE_MODE 2 +#define FFE_AB_IF_MODE_NIBBLE_MODE 1 +#define FRF_AB_GM_HUGE_FRM_EN_LBN 5 +#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1 +#define FRF_AB_GM_LEN_CHK_LBN 4 +#define FRF_AB_GM_LEN_CHK_WIDTH 1 +#define FRF_AB_GM_PAD_CRC_EN_LBN 2 +#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1 +#define FRF_AB_GM_CRC_EN_LBN 1 +#define FRF_AB_GM_CRC_EN_WIDTH 1 +#define FRF_AB_GM_FD_LBN 0 +#define FRF_AB_GM_FD_WIDTH 1 + +/* GM_IPG_REG: GMAC IPG register */ +#define FR_AB_GM_IPG 0x00000e20 +#define FRF_AB_GM_NONB2B_IPG1_LBN 24 +#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7 +#define FRF_AB_GM_NONB2B_IPG2_LBN 16 +#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7 +#define FRF_AB_GM_MIN_IPG_ENF_LBN 8 +#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8 +#define FRF_AB_GM_B2B_IPG_LBN 0 +#define FRF_AB_GM_B2B_IPG_WIDTH 7 + +/* GM_HD_REG: GMAC half duplex register */ +#define FR_AB_GM_HD 0x00000e30 +#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20 +#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4 +#define FRF_AB_GM_ALT_BOFF_EN_LBN 19 +#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1 +#define FRF_AB_GM_BP_NO_BOFF_LBN 18 +#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1 +#define FRF_AB_GM_DIS_BOFF_LBN 17 +#define FRF_AB_GM_DIS_BOFF_WIDTH 1 +#define FRF_AB_GM_EXDEF_TX_EN_LBN 16 +#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1 +#define FRF_AB_GM_RTRY_LIMIT_LBN 12 +#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4 +#define FRF_AB_GM_COL_WIN_LBN 0 +#define FRF_AB_GM_COL_WIN_WIDTH 10 + +/* GM_MAX_FLEN_REG: GMAC maximum frame length register */ +#define FR_AB_GM_MAX_FLEN 0x00000e40 +#define FRF_AB_GM_MAX_FLEN_LBN 0 +#define FRF_AB_GM_MAX_FLEN_WIDTH 16 + +/* GM_TEST_REG: GMAC test register */ +#define FR_AB_GM_TEST 0x00000e70 +#define FRF_AB_GM_MAX_BOFF_LBN 3 +#define FRF_AB_GM_MAX_BOFF_WIDTH 1 +#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2 +#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1 +#define FRF_AB_GM_TEST_PAUSE_LBN 1 +#define FRF_AB_GM_TEST_PAUSE_WIDTH 1 +#define FRF_AB_GM_SHORT_SLOT_LBN 0 +#define FRF_AB_GM_SHORT_SLOT_WIDTH 1 + +/* GM_ADR1_REG: GMAC station address register 1 */ +#define FR_AB_GM_ADR1 0x00000f00 +#define FRF_AB_GM_ADR_B0_LBN 24 +#define FRF_AB_GM_ADR_B0_WIDTH 8 +#define FRF_AB_GM_ADR_B1_LBN 16 +#define FRF_AB_GM_ADR_B1_WIDTH 8 +#define FRF_AB_GM_ADR_B2_LBN 8 +#define FRF_AB_GM_ADR_B2_WIDTH 8 +#define FRF_AB_GM_ADR_B3_LBN 0 +#define FRF_AB_GM_ADR_B3_WIDTH 8 + +/* GM_ADR2_REG: GMAC station address register 2 */ +#define FR_AB_GM_ADR2 0x00000f10 +#define FRF_AB_GM_ADR_B4_LBN 24 +#define FRF_AB_GM_ADR_B4_WIDTH 8 +#define FRF_AB_GM_ADR_B5_LBN 16 +#define FRF_AB_GM_ADR_B5_WIDTH 8 + +/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */ +#define FR_AB_GMF_CFG0 0x00000f20 +#define FRF_AB_GMF_FTFENRPLY_LBN 20 +#define FRF_AB_GMF_FTFENRPLY_WIDTH 1 +#define FRF_AB_GMF_STFENRPLY_LBN 19 +#define FRF_AB_GMF_STFENRPLY_WIDTH 1 +#define FRF_AB_GMF_FRFENRPLY_LBN 18 +#define FRF_AB_GMF_FRFENRPLY_WIDTH 1 +#define FRF_AB_GMF_SRFENRPLY_LBN 17 +#define FRF_AB_GMF_SRFENRPLY_WIDTH 1 +#define FRF_AB_GMF_WTMENRPLY_LBN 16 +#define FRF_AB_GMF_WTMENRPLY_WIDTH 1 +#define FRF_AB_GMF_FTFENREQ_LBN 12 +#define FRF_AB_GMF_FTFENREQ_WIDTH 1 +#define FRF_AB_GMF_STFENREQ_LBN 11 +#define FRF_AB_GMF_STFENREQ_WIDTH 1 +#define FRF_AB_GMF_FRFENREQ_LBN 10 +#define FRF_AB_GMF_FRFENREQ_WIDTH 1 +#define FRF_AB_GMF_SRFENREQ_LBN 9 +#define FRF_AB_GMF_SRFENREQ_WIDTH 1 +#define FRF_AB_GMF_WTMENREQ_LBN 8 +#define FRF_AB_GMF_WTMENREQ_WIDTH 1 +#define FRF_AB_GMF_HSTRSTFT_LBN 4 +#define FRF_AB_GMF_HSTRSTFT_WIDTH 1 +#define FRF_AB_GMF_HSTRSTST_LBN 3 +#define FRF_AB_GMF_HSTRSTST_WIDTH 1 +#define FRF_AB_GMF_HSTRSTFR_LBN 2 +#define FRF_AB_GMF_HSTRSTFR_WIDTH 1 +#define FRF_AB_GMF_HSTRSTSR_LBN 1 +#define FRF_AB_GMF_HSTRSTSR_WIDTH 1 +#define FRF_AB_GMF_HSTRSTWT_LBN 0 +#define FRF_AB_GMF_HSTRSTWT_WIDTH 1 + +/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */ +#define FR_AB_GMF_CFG1 0x00000f30 +#define FRF_AB_GMF_CFGFRTH_LBN 16 +#define FRF_AB_GMF_CFGFRTH_WIDTH 5 +#define FRF_AB_GMF_CFGXOFFRTX_LBN 0 +#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16 + +/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */ +#define FR_AB_GMF_CFG2 0x00000f40 +#define FRF_AB_GMF_CFGHWM_LBN 16 +#define FRF_AB_GMF_CFGHWM_WIDTH 6 +#define FRF_AB_GMF_CFGLWM_LBN 0 +#define FRF_AB_GMF_CFGLWM_WIDTH 6 + +/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */ +#define FR_AB_GMF_CFG3 0x00000f50 +#define FRF_AB_GMF_CFGHWMFT_LBN 16 +#define FRF_AB_GMF_CFGHWMFT_WIDTH 6 +#define FRF_AB_GMF_CFGFTTH_LBN 0 +#define FRF_AB_GMF_CFGFTTH_WIDTH 6 + +/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ +#define FR_AB_GMF_CFG4 0x00000f60 +#define FRF_AB_GMF_HSTFLTRFRM_LBN 0 +#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18 + +/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ +#define FR_AB_GMF_CFG5 0x00000f70 +#define FRF_AB_GMF_CFGHDPLX_LBN 22 +#define FRF_AB_GMF_CFGHDPLX_WIDTH 1 +#define FRF_AB_GMF_SRFULL_LBN 21 +#define FRF_AB_GMF_SRFULL_WIDTH 1 +#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20 +#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1 +#define FRF_AB_GMF_CFGBYTMODE_LBN 19 +#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1 +#define FRF_AB_GMF_HSTDRPLT64_LBN 18 +#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1 +#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0 +#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18 + +/* TX_SRC_MAC_TBL: Transmit IP source address filter table */ +#define FR_BB_TX_SRC_MAC_TBL 0x00001000 +#define FR_BB_TX_SRC_MAC_TBL_STEP 16 +#define FR_BB_TX_SRC_MAC_TBL_ROWS 16 +#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64 +#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48 +#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0 +#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48 + +/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */ +#define FR_BB_TX_SRC_MAC_CTL 0x00001100 +#define FRF_BB_TX_SRC_DROP_CTR_LBN 16 +#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16 +#define FRF_BB_TX_SRC_FLTR_EN_LBN 15 +#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1 +#define FRF_BB_TX_DROP_CTR_CLR_LBN 12 +#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1 +#define FRF_BB_TX_MAC_QID_SEL_LBN 0 +#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3 + +/* XM_ADR_LO_REG: XGMAC address register low */ +#define FR_AB_XM_ADR_LO 0x00001200 +#define FRF_AB_XM_ADR_LO_LBN 0 +#define FRF_AB_XM_ADR_LO_WIDTH 32 + +/* XM_ADR_HI_REG: XGMAC address register high */ +#define FR_AB_XM_ADR_HI 0x00001210 +#define FRF_AB_XM_ADR_HI_LBN 0 +#define FRF_AB_XM_ADR_HI_WIDTH 16 + +/* XM_GLB_CFG_REG: XGMAC global configuration */ +#define FR_AB_XM_GLB_CFG 0x00001220 +#define FRF_AB_XM_RMTFLT_GEN_LBN 17 +#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1 +#define FRF_AB_XM_DEBUG_MODE_LBN 16 +#define FRF_AB_XM_DEBUG_MODE_WIDTH 1 +#define FRF_AB_XM_RX_STAT_EN_LBN 11 +#define FRF_AB_XM_RX_STAT_EN_WIDTH 1 +#define FRF_AB_XM_TX_STAT_EN_LBN 10 +#define FRF_AB_XM_TX_STAT_EN_WIDTH 1 +#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6 +#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1 +#define FRF_AB_XM_WAN_MODE_LBN 5 +#define FRF_AB_XM_WAN_MODE_WIDTH 1 +#define FRF_AB_XM_INTCLR_MODE_LBN 3 +#define FRF_AB_XM_INTCLR_MODE_WIDTH 1 +#define FRF_AB_XM_CORE_RST_LBN 0 +#define FRF_AB_XM_CORE_RST_WIDTH 1 + +/* XM_TX_CFG_REG: XGMAC transmit configuration */ +#define FR_AB_XM_TX_CFG 0x00001230 +#define FRF_AB_XM_TX_PROG_LBN 24 +#define FRF_AB_XM_TX_PROG_WIDTH 1 +#define FRF_AB_XM_IPG_LBN 16 +#define FRF_AB_XM_IPG_WIDTH 4 +#define FRF_AB_XM_FCNTL_LBN 10 +#define FRF_AB_XM_FCNTL_WIDTH 1 +#define FRF_AB_XM_TXCRC_LBN 8 +#define FRF_AB_XM_TXCRC_WIDTH 1 +#define FRF_AB_XM_EDRC_LBN 6 +#define FRF_AB_XM_EDRC_WIDTH 1 +#define FRF_AB_XM_AUTO_PAD_LBN 5 +#define FRF_AB_XM_AUTO_PAD_WIDTH 1 +#define FRF_AB_XM_TX_PRMBL_LBN 2 +#define FRF_AB_XM_TX_PRMBL_WIDTH 1 +#define FRF_AB_XM_TXEN_LBN 1 +#define FRF_AB_XM_TXEN_WIDTH 1 +#define FRF_AB_XM_TX_RST_LBN 0 +#define FRF_AB_XM_TX_RST_WIDTH 1 + +/* XM_RX_CFG_REG: XGMAC receive configuration */ +#define FR_AB_XM_RX_CFG 0x00001240 +#define FRF_AB_XM_PASS_LENERR_LBN 26 +#define FRF_AB_XM_PASS_LENERR_WIDTH 1 +#define FRF_AB_XM_PASS_CRC_ERR_LBN 25 +#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1 +#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24 +#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1 +#define FRF_AB_XM_REJ_BCAST_LBN 20 +#define FRF_AB_XM_REJ_BCAST_WIDTH 1 +#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11 +#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1 +#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9 +#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1 +#define FRF_AB_XM_AUTO_DEPAD_LBN 8 +#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1 +#define FRF_AB_XM_RXCRC_LBN 3 +#define FRF_AB_XM_RXCRC_WIDTH 1 +#define FRF_AB_XM_RX_PRMBL_LBN 2 +#define FRF_AB_XM_RX_PRMBL_WIDTH 1 +#define FRF_AB_XM_RXEN_LBN 1 +#define FRF_AB_XM_RXEN_WIDTH 1 +#define FRF_AB_XM_RX_RST_LBN 0 +#define FRF_AB_XM_RX_RST_WIDTH 1 + +/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */ +#define FR_AB_XM_MGT_INT_MASK 0x00001250 +#define FRF_AB_XM_MSK_STA_INTR_LBN 16 +#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1 +#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9 +#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1 +#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8 +#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1 +#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2 +#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1 +#define FRF_AB_XM_MSK_RMTFLT_LBN 1 +#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1 +#define FRF_AB_XM_MSK_LCLFLT_LBN 0 +#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1 + +/* XM_FC_REG: XGMAC flow control register */ +#define FR_AB_XM_FC 0x00001270 +#define FRF_AB_XM_PAUSE_TIME_LBN 16 +#define FRF_AB_XM_PAUSE_TIME_WIDTH 16 +#define FRF_AB_XM_RX_MAC_STAT_LBN 11 +#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1 +#define FRF_AB_XM_TX_MAC_STAT_LBN 10 +#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1 +#define FRF_AB_XM_MCNTL_PASS_LBN 8 +#define FRF_AB_XM_MCNTL_PASS_WIDTH 2 +#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6 +#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1 +#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5 +#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1 +#define FRF_AB_XM_ZPAUSE_LBN 2 +#define FRF_AB_XM_ZPAUSE_WIDTH 1 +#define FRF_AB_XM_XMIT_PAUSE_LBN 1 +#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1 +#define FRF_AB_XM_DIS_FCNTL_LBN 0 +#define FRF_AB_XM_DIS_FCNTL_WIDTH 1 + +/* XM_PAUSE_TIME_REG: XGMAC pause time register */ +#define FR_AB_XM_PAUSE_TIME 0x00001290 +#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16 +#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16 +#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0 +#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16 + +/* XM_TX_PARAM_REG: XGMAC transmit parameter register */ +#define FR_AB_XM_TX_PARAM 0x000012d0 +#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31 +#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3 +#define FRF_AB_XM_PAD_CHAR_LBN 0 +#define FRF_AB_XM_PAD_CHAR_WIDTH 8 + +/* XM_RX_PARAM_REG: XGMAC receive parameter register */ +#define FR_AB_XM_RX_PARAM 0x000012e0 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3 + +/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */ +#define FR_AB_XM_MGT_INT_MSK 0x000012f0 +#define FRF_AB_XM_STAT_CNTR_OF_LBN 9 +#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1 +#define FRF_AB_XM_STAT_CNTR_HF_LBN 8 +#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1 +#define FRF_AB_XM_PRMBLE_ERR_LBN 2 +#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1 +#define FRF_AB_XM_RMTFLT_LBN 1 +#define FRF_AB_XM_RMTFLT_WIDTH 1 +#define FRF_AB_XM_LCLFLT_LBN 0 +#define FRF_AB_XM_LCLFLT_WIDTH 1 + +/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */ +#define FR_AB_XX_PWR_RST 0x00001300 +#define FRF_AB_XX_PWRDND_SIG_LBN 31 +#define FRF_AB_XX_PWRDND_SIG_WIDTH 1 +#define FRF_AB_XX_PWRDNC_SIG_LBN 30 +#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1 +#define FRF_AB_XX_PWRDNB_SIG_LBN 29 +#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1 +#define FRF_AB_XX_PWRDNA_SIG_LBN 28 +#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1 +#define FRF_AB_XX_SIM_MODE_LBN 27 +#define FRF_AB_XX_SIM_MODE_WIDTH 1 +#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25 +#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1 +#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24 +#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1 +#define FRF_AB_XX_RESETD_SIG_LBN 23 +#define FRF_AB_XX_RESETD_SIG_WIDTH 1 +#define FRF_AB_XX_RESETC_SIG_LBN 22 +#define FRF_AB_XX_RESETC_SIG_WIDTH 1 +#define FRF_AB_XX_RESETB_SIG_LBN 21 +#define FRF_AB_XX_RESETB_SIG_WIDTH 1 +#define FRF_AB_XX_RESETA_SIG_LBN 20 +#define FRF_AB_XX_RESETA_SIG_WIDTH 1 +#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18 +#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1 +#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17 +#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1 +#define FRF_AB_XX_SD_RST_ACT_LBN 16 +#define FRF_AB_XX_SD_RST_ACT_WIDTH 1 +#define FRF_AB_XX_PWRDND_EN_LBN 15 +#define FRF_AB_XX_PWRDND_EN_WIDTH 1 +#define FRF_AB_XX_PWRDNC_EN_LBN 14 +#define FRF_AB_XX_PWRDNC_EN_WIDTH 1 +#define FRF_AB_XX_PWRDNB_EN_LBN 13 +#define FRF_AB_XX_PWRDNB_EN_WIDTH 1 +#define FRF_AB_XX_PWRDNA_EN_LBN 12 +#define FRF_AB_XX_PWRDNA_EN_WIDTH 1 +#define FRF_AB_XX_RSTPLLCD_EN_LBN 9 +#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1 +#define FRF_AB_XX_RSTPLLAB_EN_LBN 8 +#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1 +#define FRF_AB_XX_RESETD_EN_LBN 7 +#define FRF_AB_XX_RESETD_EN_WIDTH 1 +#define FRF_AB_XX_RESETC_EN_LBN 6 +#define FRF_AB_XX_RESETC_EN_WIDTH 1 +#define FRF_AB_XX_RESETB_EN_LBN 5 +#define FRF_AB_XX_RESETB_EN_WIDTH 1 +#define FRF_AB_XX_RESETA_EN_LBN 4 +#define FRF_AB_XX_RESETA_EN_WIDTH 1 +#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2 +#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1 +#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1 +#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1 +#define FRF_AB_XX_RST_XX_EN_LBN 0 +#define FRF_AB_XX_RST_XX_EN_WIDTH 1 + +/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */ +#define FR_AB_XX_SD_CTL 0x00001310 +#define FRF_AB_XX_TERMADJ1_LBN 17 +#define FRF_AB_XX_TERMADJ1_WIDTH 1 +#define FRF_AB_XX_TERMADJ0_LBN 16 +#define FRF_AB_XX_TERMADJ0_WIDTH 1 +#define FRF_AB_XX_HIDRVD_LBN 15 +#define FRF_AB_XX_HIDRVD_WIDTH 1 +#define FRF_AB_XX_LODRVD_LBN 14 +#define FRF_AB_XX_LODRVD_WIDTH 1 +#define FRF_AB_XX_HIDRVC_LBN 13 +#define FRF_AB_XX_HIDRVC_WIDTH 1 +#define FRF_AB_XX_LODRVC_LBN 12 +#define FRF_AB_XX_LODRVC_WIDTH 1 +#define FRF_AB_XX_HIDRVB_LBN 11 +#define FRF_AB_XX_HIDRVB_WIDTH 1 +#define FRF_AB_XX_LODRVB_LBN 10 +#define FRF_AB_XX_LODRVB_WIDTH 1 +#define FRF_AB_XX_HIDRVA_LBN 9 +#define FRF_AB_XX_HIDRVA_WIDTH 1 +#define FRF_AB_XX_LODRVA_LBN 8 +#define FRF_AB_XX_LODRVA_WIDTH 1 +#define FRF_AB_XX_LPBKD_LBN 3 +#define FRF_AB_XX_LPBKD_WIDTH 1 +#define FRF_AB_XX_LPBKC_LBN 2 +#define FRF_AB_XX_LPBKC_WIDTH 1 +#define FRF_AB_XX_LPBKB_LBN 1 +#define FRF_AB_XX_LPBKB_WIDTH 1 +#define FRF_AB_XX_LPBKA_LBN 0 +#define FRF_AB_XX_LPBKA_WIDTH 1 + +/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ +#define FR_AB_XX_TXDRV_CTL 0x00001320 +#define FRF_AB_XX_DEQD_LBN 28 +#define FRF_AB_XX_DEQD_WIDTH 4 +#define FRF_AB_XX_DEQC_LBN 24 +#define FRF_AB_XX_DEQC_WIDTH 4 +#define FRF_AB_XX_DEQB_LBN 20 +#define FRF_AB_XX_DEQB_WIDTH 4 +#define FRF_AB_XX_DEQA_LBN 16 +#define FRF_AB_XX_DEQA_WIDTH 4 +#define FRF_AB_XX_DTXD_LBN 12 +#define FRF_AB_XX_DTXD_WIDTH 4 +#define FRF_AB_XX_DTXC_LBN 8 +#define FRF_AB_XX_DTXC_WIDTH 4 +#define FRF_AB_XX_DTXB_LBN 4 +#define FRF_AB_XX_DTXB_WIDTH 4 +#define FRF_AB_XX_DTXA_LBN 0 +#define FRF_AB_XX_DTXA_WIDTH 4 + +/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */ +#define FR_AB_XX_PRBS_CTL 0x00001330 +#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30 +#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29 +#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28 +#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26 +#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25 +#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24 +#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22 +#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21 +#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20 +#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18 +#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17 +#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16 +#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14 +#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13 +#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12 +#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10 +#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9 +#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8 +#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6 +#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5 +#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4 +#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2 +#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1 +#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0 +#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1 + +/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */ +#define FR_AB_XX_PRBS_CHK 0x00001340 +#define FRF_AB_XX_REV_LB_EN_LBN 16 +#define FRF_AB_XX_REV_LB_EN_WIDTH 1 +#define FRF_AB_XX_CH3_DEG_DET_LBN 15 +#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14 +#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13 +#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH3_ERR_CHK_LBN 12 +#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1 +#define FRF_AB_XX_CH2_DEG_DET_LBN 11 +#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10 +#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9 +#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH2_ERR_CHK_LBN 8 +#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1 +#define FRF_AB_XX_CH1_DEG_DET_LBN 7 +#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6 +#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5 +#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH1_ERR_CHK_LBN 4 +#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1 +#define FRF_AB_XX_CH0_DEG_DET_LBN 3 +#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2 +#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1 +#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH0_ERR_CHK_LBN 0 +#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1 + +/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */ +#define FR_AB_XX_PRBS_ERR 0x00001350 +#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24 +#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8 +#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16 +#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8 +#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8 +#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8 +#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0 +#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8 + +/* XX_CORE_STAT_REG: XAUI XGXS core status register */ +#define FR_AB_XX_CORE_STAT 0x00001360 +#define FRF_AB_XX_FORCE_SIG3_LBN 31 +#define FRF_AB_XX_FORCE_SIG3_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30 +#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG2_LBN 29 +#define FRF_AB_XX_FORCE_SIG2_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28 +#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG1_LBN 27 +#define FRF_AB_XX_FORCE_SIG1_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26 +#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG0_LBN 25 +#define FRF_AB_XX_FORCE_SIG0_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24 +#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1 +#define FRF_AB_XX_XGXS_LB_EN_LBN 23 +#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1 +#define FRF_AB_XX_XGMII_LB_EN_LBN 22 +#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1 +#define FRF_AB_XX_MATCH_FAULT_LBN 21 +#define FRF_AB_XX_MATCH_FAULT_WIDTH 1 +#define FRF_AB_XX_ALIGN_DONE_LBN 20 +#define FRF_AB_XX_ALIGN_DONE_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT3_LBN 19 +#define FRF_AB_XX_SYNC_STAT3_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT2_LBN 18 +#define FRF_AB_XX_SYNC_STAT2_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT1_LBN 17 +#define FRF_AB_XX_SYNC_STAT1_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT0_LBN 16 +#define FRF_AB_XX_SYNC_STAT0_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH3_LBN 15 +#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH2_LBN 14 +#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH1_LBN 13 +#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH0_LBN 12 +#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11 +#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10 +#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9 +#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8 +#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7 +#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6 +#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5 +#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4 +#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH3_LBN 3 +#define FRF_AB_XX_DISPERR_CH3_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH2_LBN 2 +#define FRF_AB_XX_DISPERR_CH2_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH1_LBN 1 +#define FRF_AB_XX_DISPERR_CH1_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH0_LBN 0 +#define FRF_AB_XX_DISPERR_CH0_WIDTH 1 + +/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */ +#define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800 +#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16 +#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4 +/* RX_DESC_PTR_TBL: Receive descriptor pointer table */ +#define FR_BZ_RX_DESC_PTR_TBL 0x00f40000 +#define FR_BZ_RX_DESC_PTR_TBL_STEP 16 +#define FR_BB_RX_DESC_PTR_TBL_ROWS 4096 +#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024 +#define FRF_CZ_RX_HDR_SPLIT_LBN 90 +#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1 +#define FRF_AA_RX_RESET_LBN 89 +#define FRF_AA_RX_RESET_WIDTH 1 +#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88 +#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1 +#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87 +#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1 +#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86 +#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1 +#define FRF_AZ_RX_DC_HW_RPTR_LBN 80 +#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6 +#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68 +#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12 +#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56 +#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12 +#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36 +#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20 +#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24 +#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12 +#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10 +#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14 +#define FRF_AZ_RX_DESCQ_LABEL_LBN 5 +#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5 +#define FRF_AZ_RX_DESCQ_SIZE_LBN 3 +#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2 +#define FFE_AZ_RX_DESCQ_SIZE_4K 3 +#define FFE_AZ_RX_DESCQ_SIZE_2K 2 +#define FFE_AZ_RX_DESCQ_SIZE_1K 1 +#define FFE_AZ_RX_DESCQ_SIZE_512 0 +#define FRF_AZ_RX_DESCQ_TYPE_LBN 2 +#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1 +#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1 +#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1 +#define FRF_AZ_RX_DESCQ_EN_LBN 0 +#define FRF_AZ_RX_DESCQ_EN_WIDTH 1 + +/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */ +#define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900 +#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16 +#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8 +/* TX_DESC_PTR_TBL: Transmit descriptor pointer */ +#define FR_BZ_TX_DESC_PTR_TBL 0x00f50000 +#define FR_BZ_TX_DESC_PTR_TBL_STEP 16 +#define FR_BB_TX_DESC_PTR_TBL_ROWS 4096 +#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024 +#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94 +#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2 +#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93 +#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1 +#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92 +#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1 +#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91 +#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1 +#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90 +#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1 +#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89 +#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1 +#define FRF_AZ_TX_DESCQ_EN_LBN 88 +#define FRF_AZ_TX_DESCQ_EN_WIDTH 1 +#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87 +#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1 +#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86 +#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1 +#define FRF_AZ_TX_DC_HW_RPTR_LBN 80 +#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6 +#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68 +#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12 +#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56 +#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12 +#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36 +#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20 +#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24 +#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12 +#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10 +#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14 +#define FRF_AZ_TX_DESCQ_LABEL_LBN 5 +#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5 +#define FRF_AZ_TX_DESCQ_SIZE_LBN 3 +#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2 +#define FFE_AZ_TX_DESCQ_SIZE_4K 3 +#define FFE_AZ_TX_DESCQ_SIZE_2K 2 +#define FFE_AZ_TX_DESCQ_SIZE_1K 1 +#define FFE_AZ_TX_DESCQ_SIZE_512 0 +#define FRF_AZ_TX_DESCQ_TYPE_LBN 1 +#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2 +#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0 +#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1 + +/* EVQ_PTR_TBL_KER: Event queue pointer table */ +#define FR_AA_EVQ_PTR_TBL_KER 0x00011a00 +#define FR_AA_EVQ_PTR_TBL_KER_STEP 16 +#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4 +/* EVQ_PTR_TBL: Event queue pointer table */ +#define FR_BZ_EVQ_PTR_TBL 0x00f60000 +#define FR_BZ_EVQ_PTR_TBL_STEP 16 +#define FR_CZ_EVQ_PTR_TBL_ROWS 1024 +#define FR_BB_EVQ_PTR_TBL_ROWS 4096 +#define FRF_BZ_EVQ_RPTR_IGN_LBN 40 +#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1 +#define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39 +#define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1 +#define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39 +#define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1 +#define FRF_AZ_EVQ_NXT_WPTR_LBN 24 +#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15 +#define FRF_AZ_EVQ_EN_LBN 23 +#define FRF_AZ_EVQ_EN_WIDTH 1 +#define FRF_AZ_EVQ_SIZE_LBN 20 +#define FRF_AZ_EVQ_SIZE_WIDTH 3 +#define FFE_AZ_EVQ_SIZE_32K 6 +#define FFE_AZ_EVQ_SIZE_16K 5 +#define FFE_AZ_EVQ_SIZE_8K 4 +#define FFE_AZ_EVQ_SIZE_4K 3 +#define FFE_AZ_EVQ_SIZE_2K 2 +#define FFE_AZ_EVQ_SIZE_1K 1 +#define FFE_AZ_EVQ_SIZE_512 0 +#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0 +#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20 + +/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */ +#define FR_AA_BUF_HALF_TBL_KER 0x00018000 +#define FR_AA_BUF_HALF_TBL_KER_STEP 8 +#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096 +/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */ +#define FR_BZ_BUF_HALF_TBL 0x00800000 +#define FR_BZ_BUF_HALF_TBL_STEP 8 +#define FR_CZ_BUF_HALF_TBL_ROWS 147456 +#define FR_BB_BUF_HALF_TBL_ROWS 524288 +#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44 +#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20 +#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32 +#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12 +#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12 +#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20 +#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0 +#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12 + +/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */ +#define FR_AA_BUF_FULL_TBL_KER 0x00018000 +#define FR_AA_BUF_FULL_TBL_KER_STEP 8 +#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096 +/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */ +#define FR_BZ_BUF_FULL_TBL 0x00800000 +#define FR_BZ_BUF_FULL_TBL_STEP 8 +#define FR_CZ_BUF_FULL_TBL_ROWS 147456 +#define FR_BB_BUF_FULL_TBL_ROWS 917504 +#define FRF_AZ_BUF_FULL_UNUSED_LBN 51 +#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13 +#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50 +#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1 +#define FRF_AZ_BUF_ADR_REGION_LBN 48 +#define FRF_AZ_BUF_ADR_REGION_WIDTH 2 +#define FFE_AZ_BUF_ADR_REGN3 3 +#define FFE_AZ_BUF_ADR_REGN2 2 +#define FFE_AZ_BUF_ADR_REGN1 1 +#define FFE_AZ_BUF_ADR_REGN0 0 +#define FRF_AZ_BUF_ADR_FBUF_LBN 14 +#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34 +#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0 +#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14 + +/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */ +#define FR_BZ_RX_FILTER_TBL0 0x00f00000 +#define FR_BZ_RX_FILTER_TBL0_STEP 32 +#define FR_BZ_RX_FILTER_TBL0_ROWS 8192 +/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */ +#define FR_BB_RX_FILTER_TBL1 0x00f00010 +#define FR_BB_RX_FILTER_TBL1_STEP 32 +#define FR_BB_RX_FILTER_TBL1_ROWS 8192 +#define FRF_BZ_RSS_EN_LBN 110 +#define FRF_BZ_RSS_EN_WIDTH 1 +#define FRF_BZ_SCATTER_EN_LBN 109 +#define FRF_BZ_SCATTER_EN_WIDTH 1 +#define FRF_BZ_TCP_UDP_LBN 108 +#define FRF_BZ_TCP_UDP_WIDTH 1 +#define FRF_BZ_RXQ_ID_LBN 96 +#define FRF_BZ_RXQ_ID_WIDTH 12 +#define FRF_BZ_DEST_IP_LBN 64 +#define FRF_BZ_DEST_IP_WIDTH 32 +#define FRF_BZ_DEST_PORT_TCP_LBN 48 +#define FRF_BZ_DEST_PORT_TCP_WIDTH 16 +#define FRF_BZ_SRC_IP_LBN 16 +#define FRF_BZ_SRC_IP_WIDTH 32 +#define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0 +#define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16 + +/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */ +#define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010 +#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32 +#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512 +#define FRF_CZ_RMFT_RSS_EN_LBN 75 +#define FRF_CZ_RMFT_RSS_EN_WIDTH 1 +#define FRF_CZ_RMFT_SCATTER_EN_LBN 74 +#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1 +#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73 +#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1 +#define FRF_CZ_RMFT_RXQ_ID_LBN 61 +#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12 +#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60 +#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1 +#define FRF_CZ_RMFT_DEST_MAC_LBN 12 +#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48 +#define FRF_CZ_RMFT_VLAN_ID_LBN 0 +#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12 + +/* TIMER_TBL: Timer table */ +#define FR_BZ_TIMER_TBL 0x00f70000 +#define FR_BZ_TIMER_TBL_STEP 16 +#define FR_CZ_TIMER_TBL_ROWS 1024 +#define FR_BB_TIMER_TBL_ROWS 4096 +#define FRF_CZ_TIMER_Q_EN_LBN 33 +#define FRF_CZ_TIMER_Q_EN_WIDTH 1 +#define FRF_CZ_INT_ARMD_LBN 32 +#define FRF_CZ_INT_ARMD_WIDTH 1 +#define FRF_CZ_INT_PEND_LBN 31 +#define FRF_CZ_INT_PEND_WIDTH 1 +#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30 +#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1 +#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16 +#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14 +#define FRF_CZ_TIMER_MODE_LBN 14 +#define FRF_CZ_TIMER_MODE_WIDTH 2 +#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3 +#define FFE_CZ_TIMER_MODE_TRIG_START 2 +#define FFE_CZ_TIMER_MODE_IMMED_START 1 +#define FFE_CZ_TIMER_MODE_DIS 0 +#define FRF_BB_TIMER_MODE_LBN 12 +#define FRF_BB_TIMER_MODE_WIDTH 2 +#define FFE_BB_TIMER_MODE_INT_HLDOFF 2 +#define FFE_BB_TIMER_MODE_TRIG_START 2 +#define FFE_BB_TIMER_MODE_IMMED_START 1 +#define FFE_BB_TIMER_MODE_DIS 0 +#define FRF_CZ_TIMER_VAL_LBN 0 +#define FRF_CZ_TIMER_VAL_WIDTH 14 +#define FRF_BB_TIMER_VAL_LBN 0 +#define FRF_BB_TIMER_VAL_WIDTH 12 + +/* TX_PACE_TBL: Transmit pacing table */ +#define FR_BZ_TX_PACE_TBL 0x00f80000 +#define FR_BZ_TX_PACE_TBL_STEP 16 +#define FR_CZ_TX_PACE_TBL_ROWS 1024 +#define FR_BB_TX_PACE_TBL_ROWS 4096 +#define FRF_BZ_TX_PACE_LBN 0 +#define FRF_BZ_TX_PACE_WIDTH 5 + +/* RX_INDIRECTION_TBL: RX Indirection Table */ +#define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000 +#define FR_BZ_RX_INDIRECTION_TBL_STEP 16 +#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128 +#define FRF_BZ_IT_QUEUE_LBN 0 +#define FRF_BZ_IT_QUEUE_WIDTH 6 + +/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */ +#define FR_CZ_TX_FILTER_TBL0 0x00fc0000 +#define FR_CZ_TX_FILTER_TBL0_STEP 16 +#define FR_CZ_TX_FILTER_TBL0_ROWS 8192 +#define FRF_CZ_TIFT_TCP_UDP_LBN 108 +#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1 +#define FRF_CZ_TIFT_TXQ_ID_LBN 96 +#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12 +#define FRF_CZ_TIFT_DEST_IP_LBN 64 +#define FRF_CZ_TIFT_DEST_IP_WIDTH 32 +#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48 +#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16 +#define FRF_CZ_TIFT_SRC_IP_LBN 16 +#define FRF_CZ_TIFT_SRC_IP_WIDTH 32 +#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0 +#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16 + +/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */ +#define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000 +#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16 +#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512 +#define FRF_CZ_TMFT_TXQ_ID_LBN 61 +#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12 +#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60 +#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1 +#define FRF_CZ_TMFT_SRC_MAC_LBN 12 +#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48 +#define FRF_CZ_TMFT_VLAN_ID_LBN 0 +#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12 + +/* MC_TREG_SMEM: MC Shared Memory */ +#define FR_CZ_MC_TREG_SMEM 0x00ff0000 +#define FR_CZ_MC_TREG_SMEM_STEP 4 +#define FR_CZ_MC_TREG_SMEM_ROWS 512 +#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0 +#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32 + +/* MSIX_VECTOR_TABLE: MSIX Vector Table */ +#define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000 +#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16 +#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64 +/* MSIX_VECTOR_TABLE: MSIX Vector Table */ +#define FR_CZ_MSIX_VECTOR_TABLE 0x00000000 +/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */ +#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024 +#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97 +#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31 +#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96 +#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1 +#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64 +#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32 + +/* MSIX_PBA_TABLE: MSIX Pending Bit Array */ +#define FR_BB_MSIX_PBA_TABLE 0x00ff2000 +#define FR_BZ_MSIX_PBA_TABLE_STEP 4 +#define FR_BB_MSIX_PBA_TABLE_ROWS 2 +/* MSIX_PBA_TABLE: MSIX Pending Bit Array */ +#define FR_CZ_MSIX_PBA_TABLE 0x00008000 +/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */ +#define FR_CZ_MSIX_PBA_TABLE_ROWS 32 +#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0 +#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32 + +/* SRM_DBG_REG: SRAM debug access */ +#define FR_BZ_SRM_DBG 0x03000000 +#define FR_BZ_SRM_DBG_STEP 8 +#define FR_CZ_SRM_DBG_ROWS 262144 +#define FR_BB_SRM_DBG_ROWS 2097152 +#define FRF_BZ_SRM_DBG_LBN 0 +#define FRF_BZ_SRM_DBG_WIDTH 64 + +/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */ +#define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000 +#define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4 +#define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024 +#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0 +#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32 + +/* DRIVER_EV */ +#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56 +#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4 +#define FSE_BZ_TX_DSC_ERROR_EV 15 +#define FSE_BZ_RX_DSC_ERROR_EV 14 +#define FSE_AA_RX_RECOVER_EV 11 +#define FSE_AZ_TIMER_EV 10 +#define FSE_AZ_TX_PKT_NON_TCP_UDP 9 +#define FSE_AZ_WAKE_UP_EV 6 +#define FSE_AZ_SRM_UPD_DONE_EV 5 +#define FSE_AB_EVQ_NOT_EN_EV 3 +#define FSE_AZ_EVQ_INIT_DONE_EV 2 +#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1 +#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0 +#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0 +#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14 + +/* EVENT_ENTRY */ +#define FSF_AZ_EV_CODE_LBN 60 +#define FSF_AZ_EV_CODE_WIDTH 4 +#define FSE_CZ_EV_CODE_MCDI_EV 12 +#define FSE_CZ_EV_CODE_USER_EV 8 +#define FSE_AZ_EV_CODE_DRV_GEN_EV 7 +#define FSE_AZ_EV_CODE_GLOBAL_EV 6 +#define FSE_AZ_EV_CODE_DRIVER_EV 5 +#define FSE_AZ_EV_CODE_TX_EV 2 +#define FSE_AZ_EV_CODE_RX_EV 0 +#define FSF_AZ_EV_DATA_LBN 0 +#define FSF_AZ_EV_DATA_WIDTH 60 + +/* GLOBAL_EV */ +#define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12 +#define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1 +#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11 +#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1 +#define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11 +#define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1 +#define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10 +#define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1 +#define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9 +#define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1 +#define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7 +#define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1 + +/* LEGACY_INT_VEC */ +#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64 +#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1 +#define FSF_AZ_NET_IVEC_INT_Q_LBN 40 +#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4 +#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32 +#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1 + +/* MC_XGMAC_FLTR_RULE_DEF */ +#define FSF_CZ_MC_XFRC_MODE_LBN 416 +#define FSF_CZ_MC_XFRC_MODE_WIDTH 1 +#define FSE_CZ_MC_XFRC_MODE_LAYERED 1 +#define FSE_CZ_MC_XFRC_MODE_SIMPLE 0 +#define FSF_CZ_MC_XFRC_HASH_LBN 384 +#define FSF_CZ_MC_XFRC_HASH_WIDTH 32 +#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256 +#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128 +#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128 +#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128 +#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0 +#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128 + +/* RX_EV */ +#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58 +#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1 +#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57 +#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1 +#define FSF_AZ_RX_EV_PKT_OK_LBN 56 +#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1 +#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55 +#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54 +#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53 +#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52 +#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51 +#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50 +#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49 +#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1 +#define FSF_AA_RX_EV_DRIB_NIB_LBN 49 +#define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1 +#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47 +#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1 +#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44 +#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3 +#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5 +#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4 +#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3 +#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2 +#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1 +#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0 +#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42 +#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2 +#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3 +#define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2 +#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2 +#define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1 +#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1 +#define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0 +#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0 +#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41 +#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1 +#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40 +#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1 +#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39 +#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1 +#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37 +#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1 +#define FSF_AZ_RX_EV_Q_LABEL_LBN 32 +#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5 +#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31 +#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1 +#define FSF_AZ_RX_EV_PORT_LBN 30 +#define FSF_AZ_RX_EV_PORT_WIDTH 1 +#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16 +#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14 +#define FSF_AZ_RX_EV_SOP_LBN 15 +#define FSF_AZ_RX_EV_SOP_WIDTH 1 +#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14 +#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1 +#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13 +#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12 +#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_DESC_PTR_LBN 0 +#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12 + +/* RX_KER_DESC */ +#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48 +#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14 +#define FSF_AZ_RX_KER_BUF_REGION_LBN 46 +#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2 +#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0 +#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46 + +/* RX_USER_DESC */ +#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20 +#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12 +#define FSF_AZ_RX_USER_BUF_ID_LBN 0 +#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20 + +/* TX_EV */ +#define FSF_AZ_TX_EV_PKT_ERR_LBN 38 +#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1 +#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37 +#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1 +#define FSF_AZ_TX_EV_Q_LABEL_LBN 32 +#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5 +#define FSF_AZ_TX_EV_PORT_LBN 16 +#define FSF_AZ_TX_EV_PORT_WIDTH 1 +#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15 +#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1 +#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14 +#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1 +#define FSF_AZ_TX_EV_COMP_LBN 12 +#define FSF_AZ_TX_EV_COMP_WIDTH 1 +#define FSF_AZ_TX_EV_DESC_PTR_LBN 0 +#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12 + +/* TX_KER_DESC */ +#define FSF_AZ_TX_KER_CONT_LBN 62 +#define FSF_AZ_TX_KER_CONT_WIDTH 1 +#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48 +#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14 +#define FSF_AZ_TX_KER_BUF_REGION_LBN 46 +#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2 +#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0 +#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46 + +/* TX_USER_DESC */ +#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48 +#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1 +#define FSF_AZ_TX_USER_CONT_LBN 46 +#define FSF_AZ_TX_USER_CONT_WIDTH 1 +#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33 +#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13 +#define FSF_AZ_TX_USER_BUF_ID_LBN 13 +#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20 +#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0 +#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13 + +/* USER_EV */ +#define FSF_CZ_USER_QID_LBN 32 +#define FSF_CZ_USER_QID_WIDTH 10 +#define FSF_CZ_USER_EV_REG_VALUE_LBN 0 +#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32 + +/************************************************************************** + * + * Falcon B0 PCIe core indirect registers + * + ************************************************************************** + */ + +#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68 + +#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70 + +#define FPCR_BB_ACK_RPL_TIMER 0x700 +#define FPCRF_BB_ACK_TL_LBN 0 +#define FPCRF_BB_ACK_TL_WIDTH 16 +#define FPCRF_BB_RPL_TL_LBN 16 +#define FPCRF_BB_RPL_TL_WIDTH 16 + +#define FPCR_BB_ACK_FREQ 0x70C +#define FPCRF_BB_ACK_FREQ_LBN 0 +#define FPCRF_BB_ACK_FREQ_WIDTH 7 + +/************************************************************************** + * + * Pseudo-registers and fields + * + ************************************************************************** + */ + +/* Interrupt acknowledge work-around register (A0/A1 only) */ +#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070 + +/* EE_SPI_HCMD_REG: SPI host command register */ +/* Values for the EE_SPI_HCMD_SF_SEL register field */ +#define FFE_AB_SPI_DEVICE_EEPROM 0 +#define FFE_AB_SPI_DEVICE_FLASH 1 + +/* NIC_STAT_REG: NIC status register */ +#define FRF_AB_STRAP_10G_LBN 2 +#define FRF_AB_STRAP_10G_WIDTH 1 +#define FRF_AA_STRAP_PCIE_LBN 0 +#define FRF_AA_STRAP_PCIE_WIDTH 1 + +/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ +#define FRF_AZ_FATAL_INTR_LBN 0 +#define FRF_AZ_FATAL_INTR_WIDTH 12 + +/* SRM_CFG_REG: SRAM configuration register */ +/* We treat the number of SRAM banks and bank size as a single field */ +#define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN +#define FRF_AZ_SRM_NB_SZ_WIDTH \ + (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH) +#define FFE_AB_SRM_NB1_SZ2M 0 +#define FFE_AB_SRM_NB1_SZ4M 1 +#define FFE_AB_SRM_NB1_SZ8M 2 +#define FFE_AB_SRM_NB_SZ_DEF 3 +#define FFE_AB_SRM_NB2_SZ4M 4 +#define FFE_AB_SRM_NB2_SZ8M 5 +#define FFE_AB_SRM_NB2_SZ16M 6 +#define FFE_AB_SRM_NB_SZ_RES 7 + +/* RX_DESC_UPD_REGP0: Receive descriptor update register. */ +/* We write just the last dword of these registers */ +#define FR_AZ_RX_DESC_UPD_DWORD_P0 \ + (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \ + FR_BZ_RX_DESC_UPD_P0 + 3 * 4) +#define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32) +#define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH + +/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ +#define FR_AZ_TX_DESC_UPD_DWORD_P0 \ + (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \ + FR_BZ_TX_DESC_UPD_P0 + 3 * 4) +#define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32) +#define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH + +/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ +#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12 +#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1 + +/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ +#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12 +#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1 + +/* XM_TX_PARAM_REG: XGMAC transmit parameter register */ +#define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN +#define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \ + FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH) + +/* XM_RX_PARAM_REG: XGMAC receive parameter register */ +#define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN +#define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \ + FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH) + +/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ +/* Default values */ +#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */ +#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */ +#define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */ + +/* XX_CORE_STAT_REG: XAUI XGXS core status register */ +/* XGXS all-lanes status fields */ +#define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN +#define FRF_AB_XX_SYNC_STAT_WIDTH 4 +#define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN +#define FRF_AB_XX_COMMA_DET_WIDTH 4 +#define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN +#define FRF_AB_XX_CHAR_ERR_WIDTH 4 +#define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN +#define FRF_AB_XX_DISPERR_WIDTH 4 +#define FFE_AB_XX_STAT_ALL_LANES 0xf +#define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN +#define FRF_AB_XX_FORCE_SIG_WIDTH 8 +#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff + +/* RX_MAC_FILTER_TBL0 */ +/* RMFT_DEST_MAC is wider than 32 bits */ +#define FRF_CZ_RMFT_DEST_MAC_LO_LBN FRF_CZ_RMFT_DEST_MAC_LBN +#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32 +#define FRF_CZ_RMFT_DEST_MAC_HI_LBN (FRF_CZ_RMFT_DEST_MAC_LBN + 32) +#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH (FRF_CZ_RMFT_DEST_MAC_WIDTH - 32) + +/* TX_MAC_FILTER_TBL0 */ +/* TMFT_SRC_MAC is wider than 32 bits */ +#define FRF_CZ_TMFT_SRC_MAC_LO_LBN FRF_CZ_TMFT_SRC_MAC_LBN +#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32 +#define FRF_CZ_TMFT_SRC_MAC_HI_LBN (FRF_CZ_TMFT_SRC_MAC_LBN + 32) +#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH (FRF_CZ_TMFT_SRC_MAC_WIDTH - 32) + +/* TX_PACE_TBL */ +/* Values >20 are documented as reserved, but will result in a queue going + * into the fast bin with a pace value of zero. */ +#define FFE_BZ_TX_PACE_OFF 0 +#define FFE_BZ_TX_PACE_RESERVED 21 + +/* DRIVER_EV */ +/* Sub-fields of an RX flush completion event */ +#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 +#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1 +#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0 +#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12 + +/* EVENT_ENTRY */ +/* Magic number field for event test */ +#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0 +#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32 + +/* RX packet prefix */ +#define FS_BZ_RX_PREFIX_HASH_OFST 12 +#define FS_BZ_RX_PREFIX_SIZE 16 + +#endif /* EFX_FARCH_REGS_H */ diff --git a/drivers/net/ethernet/sfc/siena/filter.h b/drivers/net/ethernet/sfc/siena/filter.h new file mode 100644 index 0000000000..40b2af8bfb --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/filter.h @@ -0,0 +1,309 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_FILTER_H +#define EFX_FILTER_H + +#include +#include +#include + +/** + * enum efx_filter_match_flags - Flags for hardware filter match type + * @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address + * @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address + * @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address + * @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port + * @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address + * @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port + * @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type + * @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID + * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID + * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol + * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit. + * @EFX_FILTER_MATCH_ENCAP_TYPE: Match by encapsulation type. + * Used for RX default unicast and multicast/broadcast filters. + * + * Only some combinations are supported, depending on NIC type: + * + * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or + * local 2-tuple (only implemented for Falcon B0) + * + * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple + * or local 2-tuple, or local MAC with or without outer VID, and RX + * default filters + * + * - Huntington supports filter matching controlled by firmware, potentially + * using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit, + * with or without outer and inner VID + */ +enum efx_filter_match_flags { + EFX_FILTER_MATCH_REM_HOST = 0x0001, + EFX_FILTER_MATCH_LOC_HOST = 0x0002, + EFX_FILTER_MATCH_REM_MAC = 0x0004, + EFX_FILTER_MATCH_REM_PORT = 0x0008, + EFX_FILTER_MATCH_LOC_MAC = 0x0010, + EFX_FILTER_MATCH_LOC_PORT = 0x0020, + EFX_FILTER_MATCH_ETHER_TYPE = 0x0040, + EFX_FILTER_MATCH_INNER_VID = 0x0080, + EFX_FILTER_MATCH_OUTER_VID = 0x0100, + EFX_FILTER_MATCH_IP_PROTO = 0x0200, + EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400, + EFX_FILTER_MATCH_ENCAP_TYPE = 0x0800, +}; + +/** + * enum efx_filter_priority - priority of a hardware filter specification + * @EFX_FILTER_PRI_HINT: Performance hint + * @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list + * or hardware requirements. This may only be used by the filter + * implementation for each NIC type. + * @EFX_FILTER_PRI_MANUAL: Manually configured filter + * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level + * networking and SR-IOV) + */ +enum efx_filter_priority { + EFX_FILTER_PRI_HINT = 0, + EFX_FILTER_PRI_AUTO, + EFX_FILTER_PRI_MANUAL, + EFX_FILTER_PRI_REQUIRED, +}; + +/** + * enum efx_filter_flags - flags for hardware filter specifications + * @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues. + * By default, matching packets will be delivered only to the + * specified queue. If this flag is set, they will be delivered + * to a range of queues offset from the specified queue number + * according to the indirection table. + * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving + * queue. + * @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is + * overriding an automatic filter (priority + * %EFX_FILTER_PRI_AUTO). This may only be set by the filter + * implementation for each type. A removal request will restore + * the automatic filter in its place. + * @EFX_FILTER_FLAG_RX: Filter is for RX + * @EFX_FILTER_FLAG_TX: Filter is for TX + */ +enum efx_filter_flags { + EFX_FILTER_FLAG_RX_RSS = 0x01, + EFX_FILTER_FLAG_RX_SCATTER = 0x02, + EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04, + EFX_FILTER_FLAG_RX = 0x08, + EFX_FILTER_FLAG_TX = 0x10, +}; + +/** enum efx_encap_type - types of encapsulation + * @EFX_ENCAP_TYPE_NONE: no encapsulation + * @EFX_ENCAP_TYPE_VXLAN: VXLAN encapsulation + * @EFX_ENCAP_TYPE_NVGRE: NVGRE encapsulation + * @EFX_ENCAP_TYPE_GENEVE: GENEVE encapsulation + * @EFX_ENCAP_FLAG_IPV6: indicates IPv6 outer frame + * + * Contains both enumerated types and flags. + * To get just the type, OR with @EFX_ENCAP_TYPES_MASK. + */ +enum efx_encap_type { + EFX_ENCAP_TYPE_NONE = 0, + EFX_ENCAP_TYPE_VXLAN = 1, + EFX_ENCAP_TYPE_NVGRE = 2, + EFX_ENCAP_TYPE_GENEVE = 3, + + EFX_ENCAP_TYPES_MASK = 7, + EFX_ENCAP_FLAG_IPV6 = 8, +}; + +/** + * struct efx_filter_spec - specification for a hardware filter + * @match_flags: Match type flags, from &enum efx_filter_match_flags + * @priority: Priority of the filter, from &enum efx_filter_priority + * @flags: Miscellaneous flags, from &enum efx_filter_flags + * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set. This + * is a user_id (with 0 meaning the driver/default RSS context), not an + * MCFW context_id. + * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for + * an RX drop filter + * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set + * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set + * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or + * %EFX_FILTER_MATCH_LOC_MAC_IG is set + * @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set + * @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set + * @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO + * is set + * @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set + * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set + * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set + * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set + * @encap_type: Encapsulation type to match (from &enum efx_encap_type), if + * %EFX_FILTER_MATCH_ENCAP_TYPE is set + * + * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be + * used to initialise the structure. The efx_filter_set_*() functions + * may then be used to set @rss_context, @match_flags and related + * fields. + * + * The @priority field is used by software to determine whether a new + * filter may replace an old one. The hardware priority of a filter + * depends on which fields are matched. + */ +struct efx_filter_spec { + u32 match_flags:12; + u32 priority:2; + u32 flags:6; + u32 dmaq_id:12; + u32 rss_context; + __be16 outer_vid __aligned(4); /* allow jhash2() of match values */ + __be16 inner_vid; + u8 loc_mac[ETH_ALEN]; + u8 rem_mac[ETH_ALEN]; + __be16 ether_type; + u8 ip_proto; + __be32 loc_host[4]; + __be32 rem_host[4]; + __be16 loc_port; + __be16 rem_port; + u32 encap_type:4; + /* total 65 bytes */ +}; + +enum { + EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff +}; + +static inline void efx_filter_init_rx(struct efx_filter_spec *spec, + enum efx_filter_priority priority, + enum efx_filter_flags flags, + unsigned rxq_id) +{ + memset(spec, 0, sizeof(*spec)); + spec->priority = priority; + spec->flags = EFX_FILTER_FLAG_RX | flags; + spec->rss_context = 0; + spec->dmaq_id = rxq_id; +} + +static inline void efx_filter_init_tx(struct efx_filter_spec *spec, + unsigned txq_id) +{ + memset(spec, 0, sizeof(*spec)); + spec->priority = EFX_FILTER_PRI_REQUIRED; + spec->flags = EFX_FILTER_FLAG_TX; + spec->dmaq_id = txq_id; +} + +/** + * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port + * @spec: Specification to initialise + * @proto: Transport layer protocol number + * @host: Local host address (network byte order) + * @port: Local port (network byte order) + */ +static inline int +efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, + __be32 host, __be16 port) +{ + spec->match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; + spec->ether_type = htons(ETH_P_IP); + spec->ip_proto = proto; + spec->loc_host[0] = host; + spec->loc_port = port; + return 0; +} + +/** + * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports + * @spec: Specification to initialise + * @proto: Transport layer protocol number + * @lhost: Local host address (network byte order) + * @lport: Local port (network byte order) + * @rhost: Remote host address (network byte order) + * @rport: Remote port (network byte order) + */ +static inline int +efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, + __be32 lhost, __be16 lport, + __be32 rhost, __be16 rport) +{ + spec->match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; + spec->ether_type = htons(ETH_P_IP); + spec->ip_proto = proto; + spec->loc_host[0] = lhost; + spec->loc_port = lport; + spec->rem_host[0] = rhost; + spec->rem_port = rport; + return 0; +} + +enum { + EFX_FILTER_VID_UNSPEC = 0xffff, +}; + +/** + * efx_filter_set_eth_local - specify local Ethernet address and/or VID + * @spec: Specification to initialise + * @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC + * @addr: Local Ethernet MAC address, or %NULL + */ +static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec, + u16 vid, const u8 *addr) +{ + if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL) + return -EINVAL; + + if (vid != EFX_FILTER_VID_UNSPEC) { + spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; + spec->outer_vid = htons(vid); + } + if (addr != NULL) { + spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC; + ether_addr_copy(spec->loc_mac, addr); + } + return 0; +} + +/** + * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast + * @spec: Specification to initialise + */ +static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec) +{ + spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + return 0; +} + +/** + * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast + * @spec: Specification to initialise + */ +static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec) +{ + spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + spec->loc_mac[0] = 1; + return 0; +} + +static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec, + enum efx_encap_type encap_type) +{ + spec->match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + spec->encap_type = encap_type; +} + +static inline enum efx_encap_type efx_filter_get_encap_type( + const struct efx_filter_spec *spec) +{ + if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TYPE) + return spec->encap_type; + return EFX_ENCAP_TYPE_NONE; +} +#endif /* EFX_FILTER_H */ diff --git a/drivers/net/ethernet/sfc/siena/io.h b/drivers/net/ethernet/sfc/siena/io.h new file mode 100644 index 0000000000..30439cc83a --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/io.h @@ -0,0 +1,310 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_IO_H +#define EFX_IO_H + +#include +#include + +/************************************************************************** + * + * NIC register I/O + * + ************************************************************************** + * + * Notes on locking strategy for the Falcon architecture: + * + * Many CSRs are very wide and cannot be read or written atomically. + * Writes from the host are buffered by the Bus Interface Unit (BIU) + * up to 128 bits. Whenever the host writes part of such a register, + * the BIU collects the written value and does not write to the + * underlying register until all 4 dwords have been written. A + * similar buffering scheme applies to host access to the NIC's 64-bit + * SRAM. + * + * Writes to different CSRs and 64-bit SRAM words must be serialised, + * since interleaved access can result in lost writes. We use + * efx_nic::biu_lock for this. + * + * We also serialise reads from 128-bit CSRs and SRAM with the same + * spinlock. This may not be necessary, but it doesn't really matter + * as there are no such reads on the fast path. + * + * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are + * 128-bit but are special-cased in the BIU to avoid the need for + * locking in the host: + * + * - They are write-only. + * - The semantics of writing to these registers are such that + * replacing the low 96 bits with zero does not affect functionality. + * - If the host writes to the last dword address of such a register + * (i.e. the high 32 bits) the underlying register will always be + * written. If the collector and the current write together do not + * provide values for all 128 bits of the register, the low 96 bits + * will be written as zero. + * - If the host writes to the address of any other part of such a + * register while the collector already holds values for some other + * register, the write is discarded and the collector maintains its + * current state. + * + * The EF10 architecture exposes very few registers to the host and + * most of them are only 32 bits wide. The only exceptions are the MC + * doorbell register pair, which has its own latching, and + * TX_DESC_UPD, which works in a similar way to the Falcon + * architecture. + */ + +#if BITS_PER_LONG == 64 +#define EFX_USE_QWORD_IO 1 +#endif + +/* Hardware issue requires that only 64-bit naturally aligned writes + * are seen by hardware. Its not strictly necessary to restrict to + * x86_64 arch, but done for safety since unusual write combining behaviour + * can break PIO. + */ +#ifdef CONFIG_X86_64 +/* PIO is a win only if write-combining is possible */ +#ifdef ARCH_HAS_IOREMAP_WC +#define EFX_USE_PIO 1 +#endif +#endif + +static inline u32 efx_reg(struct efx_nic *efx, unsigned int reg) +{ + return efx->reg_base + reg; +} + +#ifdef EFX_USE_QWORD_IO +static inline void _efx_writeq(struct efx_nic *efx, __le64 value, + unsigned int reg) +{ + __raw_writeq((__force u64)value, efx->membase + reg); +} +static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg) +{ + return (__force __le64)__raw_readq(efx->membase + reg); +} +#endif + +static inline void _efx_writed(struct efx_nic *efx, __le32 value, + unsigned int reg) +{ + __raw_writel((__force u32)value, efx->membase + reg); +} +static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg) +{ + return (__force __le32)__raw_readl(efx->membase + reg); +} + +/* Write a normal 128-bit CSR, locking as appropriate. */ +static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value, + unsigned int reg) +{ + unsigned long flags __attribute__ ((unused)); + + netif_vdbg(efx, hw, efx->net_dev, + "writing register %x with " EFX_OWORD_FMT "\n", reg, + EFX_OWORD_VAL(*value)); + + spin_lock_irqsave(&efx->biu_lock, flags); +#ifdef EFX_USE_QWORD_IO + _efx_writeq(efx, value->u64[0], reg + 0); + _efx_writeq(efx, value->u64[1], reg + 8); +#else + _efx_writed(efx, value->u32[0], reg + 0); + _efx_writed(efx, value->u32[1], reg + 4); + _efx_writed(efx, value->u32[2], reg + 8); + _efx_writed(efx, value->u32[3], reg + 12); +#endif + spin_unlock_irqrestore(&efx->biu_lock, flags); +} + +/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */ +static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, + const efx_qword_t *value, unsigned int index) +{ + unsigned int addr = index * sizeof(*value); + unsigned long flags __attribute__ ((unused)); + + netif_vdbg(efx, hw, efx->net_dev, + "writing SRAM address %x with " EFX_QWORD_FMT "\n", + addr, EFX_QWORD_VAL(*value)); + + spin_lock_irqsave(&efx->biu_lock, flags); +#ifdef EFX_USE_QWORD_IO + __raw_writeq((__force u64)value->u64[0], membase + addr); +#else + __raw_writel((__force u32)value->u32[0], membase + addr); + __raw_writel((__force u32)value->u32[1], membase + addr + 4); +#endif + spin_unlock_irqrestore(&efx->biu_lock, flags); +} + +/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */ +static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value, + unsigned int reg) +{ + netif_vdbg(efx, hw, efx->net_dev, + "writing register %x with "EFX_DWORD_FMT"\n", + reg, EFX_DWORD_VAL(*value)); + + /* No lock required */ + _efx_writed(efx, value->u32[0], reg); +} + +/* Read a 128-bit CSR, locking as appropriate. */ +static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, + unsigned int reg) +{ + unsigned long flags __attribute__ ((unused)); + + spin_lock_irqsave(&efx->biu_lock, flags); + value->u32[0] = _efx_readd(efx, reg + 0); + value->u32[1] = _efx_readd(efx, reg + 4); + value->u32[2] = _efx_readd(efx, reg + 8); + value->u32[3] = _efx_readd(efx, reg + 12); + spin_unlock_irqrestore(&efx->biu_lock, flags); + + netif_vdbg(efx, hw, efx->net_dev, + "read from register %x, got " EFX_OWORD_FMT "\n", reg, + EFX_OWORD_VAL(*value)); +} + +/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */ +static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, + efx_qword_t *value, unsigned int index) +{ + unsigned int addr = index * sizeof(*value); + unsigned long flags __attribute__ ((unused)); + + spin_lock_irqsave(&efx->biu_lock, flags); +#ifdef EFX_USE_QWORD_IO + value->u64[0] = (__force __le64)__raw_readq(membase + addr); +#else + value->u32[0] = (__force __le32)__raw_readl(membase + addr); + value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); +#endif + spin_unlock_irqrestore(&efx->biu_lock, flags); + + netif_vdbg(efx, hw, efx->net_dev, + "read from SRAM address %x, got "EFX_QWORD_FMT"\n", + addr, EFX_QWORD_VAL(*value)); +} + +/* Read a 32-bit CSR or SRAM */ +static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, + unsigned int reg) +{ + value->u32[0] = _efx_readd(efx, reg); + netif_vdbg(efx, hw, efx->net_dev, + "read from register %x, got "EFX_DWORD_FMT"\n", + reg, EFX_DWORD_VAL(*value)); +} + +/* Write a 128-bit CSR forming part of a table */ +static inline void +efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value, + unsigned int reg, unsigned int index) +{ + efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); +} + +/* Read a 128-bit CSR forming part of a table */ +static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, + unsigned int reg, unsigned int index) +{ + efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); +} + +/* default VI stride (step between per-VI registers) is 8K on EF10 and + * 64K on EF100 + */ +#define EFX_DEFAULT_VI_STRIDE 0x2000 +#define EF100_DEFAULT_VI_STRIDE 0x10000 + +/* Calculate offset to page-mapped register */ +static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page, + unsigned int reg) +{ + return page * efx->vi_stride + reg; +} + +/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */ +static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, + unsigned int reg, unsigned int page) +{ + reg = efx_paged_reg(efx, page, reg); + + netif_vdbg(efx, hw, efx->net_dev, + "writing register %x with " EFX_OWORD_FMT "\n", reg, + EFX_OWORD_VAL(*value)); + +#ifdef EFX_USE_QWORD_IO + _efx_writeq(efx, value->u64[0], reg + 0); + _efx_writeq(efx, value->u64[1], reg + 8); +#else + _efx_writed(efx, value->u32[0], reg + 0); + _efx_writed(efx, value->u32[1], reg + 4); + _efx_writed(efx, value->u32[2], reg + 8); + _efx_writed(efx, value->u32[3], reg + 12); +#endif +} +#define efx_writeo_page(efx, value, reg, page) \ + _efx_writeo_page(efx, value, \ + reg + \ + BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \ + page) + +/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the + * high bits of RX_DESC_UPD or TX_DESC_UPD) + */ +static inline void +_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value, + unsigned int reg, unsigned int page) +{ + efx_writed(efx, value, efx_paged_reg(efx, page, reg)); +} +#define efx_writed_page(efx, value, reg, page) \ + _efx_writed_page(efx, value, \ + reg + \ + BUILD_BUG_ON_ZERO((reg) != 0x180 && \ + (reg) != 0x200 && \ + (reg) != 0x400 && \ + (reg) != 0x420 && \ + (reg) != 0x830 && \ + (reg) != 0x83c && \ + (reg) != 0xa18 && \ + (reg) != 0xa1c), \ + page) + +/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug + * in the BIU means that writes to TIMER_COMMAND[0] invalidate the + * collector register. + */ +static inline void _efx_writed_page_locked(struct efx_nic *efx, + const efx_dword_t *value, + unsigned int reg, + unsigned int page) +{ + unsigned long flags __attribute__ ((unused)); + + if (page == 0) { + spin_lock_irqsave(&efx->biu_lock, flags); + efx_writed(efx, value, efx_paged_reg(efx, page, reg)); + spin_unlock_irqrestore(&efx->biu_lock, flags); + } else { + efx_writed(efx, value, efx_paged_reg(efx, page, reg)); + } +} +#define efx_writed_page_locked(efx, value, reg, page) \ + _efx_writed_page_locked(efx, value, \ + reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \ + page) + +#endif /* EFX_IO_H */ diff --git a/drivers/net/ethernet/sfc/siena/mcdi.c b/drivers/net/ethernet/sfc/siena/mcdi.c new file mode 100644 index 0000000000..3f7899daa8 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi.c @@ -0,0 +1,2260 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2008-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include "net_driver.h" +#include "nic.h" +#include "io.h" +#include "farch_regs.h" +#include "mcdi_pcol.h" + +/************************************************************************** + * + * Management-Controller-to-Driver Interface + * + ************************************************************************** + */ + +#define MCDI_RPC_TIMEOUT (10 * HZ) + +/* A reboot/assertion causes the MCDI status word to be set after the + * command word is set or a REBOOT event is sent. If we notice a reboot + * via these mechanisms then wait 250ms for the status word to be set. + */ +#define MCDI_STATUS_DELAY_US 100 +#define MCDI_STATUS_DELAY_COUNT 2500 +#define MCDI_STATUS_SLEEP_MS \ + (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) + +#define SEQ_MASK \ + EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) + +struct efx_mcdi_async_param { + struct list_head list; + unsigned int cmd; + size_t inlen; + size_t outlen; + bool quiet; + efx_mcdi_async_completer *complete; + unsigned long cookie; + /* followed by request/response buffer */ +}; + +static void efx_mcdi_timeout_async(struct timer_list *t); +static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, + bool *was_attached_out); +static bool efx_mcdi_poll_once(struct efx_nic *efx); +static void efx_mcdi_abandon(struct efx_nic *efx); + +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING +static bool efx_siena_mcdi_logging_default; +module_param_named(mcdi_logging_default, efx_siena_mcdi_logging_default, + bool, 0644); +MODULE_PARM_DESC(mcdi_logging_default, + "Enable MCDI logging on newly-probed functions"); +#endif + +int efx_siena_mcdi_init(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi; + bool already_attached; + int rc = -ENOMEM; + + efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL); + if (!efx->mcdi) + goto fail; + + mcdi = efx_mcdi(efx); + mcdi->efx = efx; +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING + /* consuming code assumes buffer is page-sized */ + mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL); + if (!mcdi->logging_buffer) + goto fail1; + mcdi->logging_enabled = efx_siena_mcdi_logging_default; +#endif + init_waitqueue_head(&mcdi->wq); + init_waitqueue_head(&mcdi->proxy_rx_wq); + spin_lock_init(&mcdi->iface_lock); + mcdi->state = MCDI_STATE_QUIESCENT; + mcdi->mode = MCDI_MODE_POLL; + spin_lock_init(&mcdi->async_lock); + INIT_LIST_HEAD(&mcdi->async_list); + timer_setup(&mcdi->async_timer, efx_mcdi_timeout_async, 0); + + (void)efx_siena_mcdi_poll_reboot(efx); + mcdi->new_epoch = true; + + /* Recover from a failed assertion before probing */ + rc = efx_siena_mcdi_handle_assertion(efx); + if (rc) + goto fail2; + + /* Let the MC (and BMC, if this is a LOM) know that the driver + * is loaded. We should do this before we reset the NIC. + */ + rc = efx_mcdi_drv_attach(efx, true, &already_attached); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Unable to register driver with MCPU\n"); + goto fail2; + } + if (already_attached) + /* Not a fatal error */ + netif_err(efx, probe, efx->net_dev, + "Host already registered with MCPU\n"); + + if (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) + efx->primary = efx; + + return 0; +fail2: +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING + free_page((unsigned long)mcdi->logging_buffer); +fail1: +#endif + kfree(efx->mcdi); + efx->mcdi = NULL; +fail: + return rc; +} + +void efx_siena_mcdi_detach(struct efx_nic *efx) +{ + if (!efx->mcdi) + return; + + BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT); + + /* Relinquish the device (back to the BMC, if this is a LOM) */ + efx_mcdi_drv_attach(efx, false, NULL); +} + +void efx_siena_mcdi_fini(struct efx_nic *efx) +{ + if (!efx->mcdi) + return; + +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING + free_page((unsigned long)efx->mcdi->iface.logging_buffer); +#endif + + kfree(efx->mcdi); +} + +static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING + char *buf = mcdi->logging_buffer; /* page-sized */ +#endif + efx_dword_t hdr[2]; + size_t hdr_len; + u32 xflags, seqno; + + BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT); + + /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ + spin_lock_bh(&mcdi->iface_lock); + ++mcdi->seqno; + seqno = mcdi->seqno & SEQ_MASK; + spin_unlock_bh(&mcdi->iface_lock); + + xflags = 0; + if (mcdi->mode == MCDI_MODE_EVENTS) + xflags |= MCDI_HEADER_XFLAGS_EVREQ; + + if (efx->type->mcdi_max_ver == 1) { + /* MCDI v1 */ + EFX_POPULATE_DWORD_7(hdr[0], + MCDI_HEADER_RESPONSE, 0, + MCDI_HEADER_RESYNC, 1, + MCDI_HEADER_CODE, cmd, + MCDI_HEADER_DATALEN, inlen, + MCDI_HEADER_SEQ, seqno, + MCDI_HEADER_XFLAGS, xflags, + MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); + hdr_len = 4; + } else { + /* MCDI v2 */ + BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2); + EFX_POPULATE_DWORD_7(hdr[0], + MCDI_HEADER_RESPONSE, 0, + MCDI_HEADER_RESYNC, 1, + MCDI_HEADER_CODE, MC_CMD_V2_EXTN, + MCDI_HEADER_DATALEN, 0, + MCDI_HEADER_SEQ, seqno, + MCDI_HEADER_XFLAGS, xflags, + MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); + EFX_POPULATE_DWORD_2(hdr[1], + MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd, + MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen); + hdr_len = 8; + } + +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING + if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { + int bytes = 0; + int i; + /* Lengths should always be a whole number of dwords, so scream + * if they're not. + */ + WARN_ON_ONCE(hdr_len % 4); + WARN_ON_ONCE(inlen % 4); + + /* We own the logging buffer, as only one MCDI can be in + * progress on a NIC at any one time. So no need for locking. + */ + for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++) + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", + le32_to_cpu(hdr[i].u32[0])); + + for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++) + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", + le32_to_cpu(inbuf[i].u32[0])); + + netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf); + } +#endif + + efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen); + + mcdi->new_epoch = false; +} + +static int efx_mcdi_errno(unsigned int mcdi_err) +{ + switch (mcdi_err) { + case 0: + return 0; +#define TRANSLATE_ERROR(name) \ + case MC_CMD_ERR_ ## name: \ + return -name; + TRANSLATE_ERROR(EPERM); + TRANSLATE_ERROR(ENOENT); + TRANSLATE_ERROR(EINTR); + TRANSLATE_ERROR(EAGAIN); + TRANSLATE_ERROR(EACCES); + TRANSLATE_ERROR(EBUSY); + TRANSLATE_ERROR(EINVAL); + TRANSLATE_ERROR(EDEADLK); + TRANSLATE_ERROR(ENOSYS); + TRANSLATE_ERROR(ETIME); + TRANSLATE_ERROR(EALREADY); + TRANSLATE_ERROR(ENOSPC); +#undef TRANSLATE_ERROR + case MC_CMD_ERR_ENOTSUP: + return -EOPNOTSUPP; + case MC_CMD_ERR_ALLOC_FAIL: + return -ENOBUFS; + case MC_CMD_ERR_MAC_EXIST: + return -EADDRINUSE; + default: + return -EPROTO; + } +} + +static void efx_mcdi_read_response_header(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + unsigned int respseq, respcmd, error; +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING + char *buf = mcdi->logging_buffer; /* page-sized */ +#endif + efx_dword_t hdr; + + efx->type->mcdi_read_response(efx, &hdr, 0, 4); + respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ); + respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE); + error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR); + + if (respcmd != MC_CMD_V2_EXTN) { + mcdi->resp_hdr_len = 4; + mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN); + } else { + efx->type->mcdi_read_response(efx, &hdr, 4, 4); + mcdi->resp_hdr_len = 8; + mcdi->resp_data_len = + EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); + } + +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING + if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { + size_t hdr_len, data_len; + int bytes = 0; + int i; + + WARN_ON_ONCE(mcdi->resp_hdr_len % 4); + hdr_len = mcdi->resp_hdr_len / 4; + /* MCDI_DECLARE_BUF ensures that underlying buffer is padded + * to dword size, and the MCDI buffer is always dword size + */ + data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4); + + /* We own the logging buffer, as only one MCDI can be in + * progress on a NIC at any one time. So no need for locking. + */ + for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) { + efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4); + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr.u32[0])); + } + + for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) { + efx->type->mcdi_read_response(efx, &hdr, + mcdi->resp_hdr_len + (i * 4), 4); + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr.u32[0])); + } + + netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf); + } +#endif + + mcdi->resprc_raw = 0; + if (error && mcdi->resp_data_len == 0) { + netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); + mcdi->resprc = -EIO; + } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { + netif_err(efx, hw, efx->net_dev, + "MC response mismatch tx seq 0x%x rx seq 0x%x\n", + respseq, mcdi->seqno); + mcdi->resprc = -EIO; + } else if (error) { + efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4); + mcdi->resprc_raw = EFX_DWORD_FIELD(hdr, EFX_DWORD_0); + mcdi->resprc = efx_mcdi_errno(mcdi->resprc_raw); + } else { + mcdi->resprc = 0; + } +} + +static bool efx_mcdi_poll_once(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + rmb(); + if (!efx->type->mcdi_poll_response(efx)) + return false; + + spin_lock_bh(&mcdi->iface_lock); + efx_mcdi_read_response_header(efx); + spin_unlock_bh(&mcdi->iface_lock); + + return true; +} + +static int efx_mcdi_poll(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + unsigned long time, finish; + unsigned int spins; + int rc; + + /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ + rc = efx_siena_mcdi_poll_reboot(efx); + if (rc) { + spin_lock_bh(&mcdi->iface_lock); + mcdi->resprc = rc; + mcdi->resp_hdr_len = 0; + mcdi->resp_data_len = 0; + spin_unlock_bh(&mcdi->iface_lock); + return 0; + } + + /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, + * because generally mcdi responses are fast. After that, back off + * and poll once a jiffy (approximately) + */ + spins = USER_TICK_USEC; + finish = jiffies + MCDI_RPC_TIMEOUT; + + while (1) { + if (spins != 0) { + --spins; + udelay(1); + } else { + schedule_timeout_uninterruptible(1); + } + + time = jiffies; + + if (efx_mcdi_poll_once(efx)) + break; + + if (time_after(time, finish)) + return -ETIMEDOUT; + } + + /* Return rc=0 like wait_event_timeout() */ + return 0; +} + +/* Test and clear MC-rebooted flag for this port/function; reset + * software state as necessary. + */ +int efx_siena_mcdi_poll_reboot(struct efx_nic *efx) +{ + if (!efx->mcdi) + return 0; + + return efx->type->mcdi_poll_reboot(efx); +} + +static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi) +{ + return cmpxchg(&mcdi->state, + MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) == + MCDI_STATE_QUIESCENT; +} + +static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi) +{ + /* Wait until the interface becomes QUIESCENT and we win the race + * to mark it RUNNING_SYNC. + */ + wait_event(mcdi->wq, + cmpxchg(&mcdi->state, + MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) == + MCDI_STATE_QUIESCENT); +} + +static int efx_mcdi_await_completion(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED, + MCDI_RPC_TIMEOUT) == 0) + return -ETIMEDOUT; + + /* Check if efx_mcdi_set_mode() switched us back to polled completions. + * In which case, poll for completions directly. If efx_mcdi_ev_cpl() + * completed the request first, then we'll just end up completing the + * request again, which is safe. + * + * We need an smp_rmb() to synchronise with efx_siena_mcdi_mode_poll(), which + * wait_event_timeout() implicitly provides. + */ + if (mcdi->mode == MCDI_MODE_POLL) + return efx_mcdi_poll(efx); + + return 0; +} + +/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the + * requester. Return whether this was done. Does not take any locks. + */ +static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi) +{ + if (cmpxchg(&mcdi->state, + MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) == + MCDI_STATE_RUNNING_SYNC) { + wake_up(&mcdi->wq); + return true; + } + + return false; +} + +static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) +{ + if (mcdi->mode == MCDI_MODE_EVENTS) { + struct efx_mcdi_async_param *async; + struct efx_nic *efx = mcdi->efx; + + /* Process the asynchronous request queue */ + spin_lock_bh(&mcdi->async_lock); + async = list_first_entry_or_null( + &mcdi->async_list, struct efx_mcdi_async_param, list); + if (async) { + mcdi->state = MCDI_STATE_RUNNING_ASYNC; + efx_mcdi_send_request(efx, async->cmd, + (const efx_dword_t *)(async + 1), + async->inlen); + mod_timer(&mcdi->async_timer, + jiffies + MCDI_RPC_TIMEOUT); + } + spin_unlock_bh(&mcdi->async_lock); + + if (async) + return; + } + + mcdi->state = MCDI_STATE_QUIESCENT; + wake_up(&mcdi->wq); +} + +/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the + * asynchronous completion function, and release the interface. + * Return whether this was done. Must be called in bh-disabled + * context. Will take iface_lock and async_lock. + */ +static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout) +{ + struct efx_nic *efx = mcdi->efx; + struct efx_mcdi_async_param *async; + size_t hdr_len, data_len, err_len; + efx_dword_t *outbuf; + MCDI_DECLARE_BUF_ERR(errbuf); + int rc; + + if (cmpxchg(&mcdi->state, + MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) != + MCDI_STATE_RUNNING_ASYNC) + return false; + + spin_lock(&mcdi->iface_lock); + if (timeout) { + /* Ensure that if the completion event arrives later, + * the seqno check in efx_mcdi_ev_cpl() will fail + */ + ++mcdi->seqno; + ++mcdi->credits; + rc = -ETIMEDOUT; + hdr_len = 0; + data_len = 0; + } else { + rc = mcdi->resprc; + hdr_len = mcdi->resp_hdr_len; + data_len = mcdi->resp_data_len; + } + spin_unlock(&mcdi->iface_lock); + + /* Stop the timer. In case the timer function is running, we + * must wait for it to return so that there is no possibility + * of it aborting the next request. + */ + if (!timeout) + del_timer_sync(&mcdi->async_timer); + + spin_lock(&mcdi->async_lock); + async = list_first_entry(&mcdi->async_list, + struct efx_mcdi_async_param, list); + list_del(&async->list); + spin_unlock(&mcdi->async_lock); + + outbuf = (efx_dword_t *)(async + 1); + efx->type->mcdi_read_response(efx, outbuf, hdr_len, + min(async->outlen, data_len)); + if (!timeout && rc && !async->quiet) { + err_len = min(sizeof(errbuf), data_len); + efx->type->mcdi_read_response(efx, errbuf, hdr_len, + sizeof(errbuf)); + efx_siena_mcdi_display_error(efx, async->cmd, async->inlen, + errbuf, err_len, rc); + } + + if (async->complete) + async->complete(efx, async->cookie, rc, outbuf, + min(async->outlen, data_len)); + kfree(async); + + efx_mcdi_release(mcdi); + + return true; +} + +static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, + unsigned int datalen, unsigned int mcdi_err) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + bool wake = false; + + spin_lock(&mcdi->iface_lock); + + if ((seqno ^ mcdi->seqno) & SEQ_MASK) { + if (mcdi->credits) + /* The request has been cancelled */ + --mcdi->credits; + else + netif_err(efx, hw, efx->net_dev, + "MC response mismatch tx seq 0x%x rx " + "seq 0x%x\n", seqno, mcdi->seqno); + } else { + if (efx->type->mcdi_max_ver >= 2) { + /* MCDI v2 responses don't fit in an event */ + efx_mcdi_read_response_header(efx); + } else { + mcdi->resprc = efx_mcdi_errno(mcdi_err); + mcdi->resp_hdr_len = 4; + mcdi->resp_data_len = datalen; + } + + wake = true; + } + + spin_unlock(&mcdi->iface_lock); + + if (wake) { + if (!efx_mcdi_complete_async(mcdi, false)) + (void) efx_mcdi_complete_sync(mcdi); + + /* If the interface isn't RUNNING_ASYNC or + * RUNNING_SYNC then we've received a duplicate + * completion after we've already transitioned back to + * QUIESCENT. [A subsequent invocation would increment + * seqno, so would have failed the seqno check]. + */ + } +} + +static void efx_mcdi_timeout_async(struct timer_list *t) +{ + struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer); + + efx_mcdi_complete_async(mcdi, true); +} + +static int +efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen) +{ + if (efx->type->mcdi_max_ver < 0 || + (efx->type->mcdi_max_ver < 2 && + cmd > MC_CMD_CMD_SPACE_ESCAPE_7)) + return -EINVAL; + + if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 || + (efx->type->mcdi_max_ver < 2 && + inlen > MCDI_CTL_SDU_LEN_MAX_V1)) + return -EMSGSIZE; + + return 0; +} + +static bool efx_mcdi_get_proxy_handle(struct efx_nic *efx, + size_t hdr_len, size_t data_len, + u32 *proxy_handle) +{ + MCDI_DECLARE_BUF_ERR(testbuf); + const size_t buflen = sizeof(testbuf); + + if (!proxy_handle || data_len < buflen) + return false; + + efx->type->mcdi_read_response(efx, testbuf, hdr_len, buflen); + if (MCDI_DWORD(testbuf, ERR_CODE) == MC_CMD_ERR_PROXY_PENDING) { + *proxy_handle = MCDI_DWORD(testbuf, ERR_PROXY_PENDING_HANDLE); + return true; + } + + return false; +} + +static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd, + size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet, + u32 *proxy_handle, int *raw_rc) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + MCDI_DECLARE_BUF_ERR(errbuf); + int rc; + + if (mcdi->mode == MCDI_MODE_POLL) + rc = efx_mcdi_poll(efx); + else + rc = efx_mcdi_await_completion(efx); + + if (rc != 0) { + netif_err(efx, hw, efx->net_dev, + "MC command 0x%x inlen %d mode %d timed out\n", + cmd, (int)inlen, mcdi->mode); + + if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) { + netif_err(efx, hw, efx->net_dev, + "MCDI request was completed without an event\n"); + rc = 0; + } + + efx_mcdi_abandon(efx); + + /* Close the race with efx_mcdi_ev_cpl() executing just too late + * and completing a request we've just cancelled, by ensuring + * that the seqno check therein fails. + */ + spin_lock_bh(&mcdi->iface_lock); + ++mcdi->seqno; + ++mcdi->credits; + spin_unlock_bh(&mcdi->iface_lock); + } + + if (proxy_handle) + *proxy_handle = 0; + + if (rc != 0) { + if (outlen_actual) + *outlen_actual = 0; + } else { + size_t hdr_len, data_len, err_len; + + /* At the very least we need a memory barrier here to ensure + * we pick up changes from efx_mcdi_ev_cpl(). Protect against + * a spurious efx_mcdi_ev_cpl() running concurrently by + * acquiring the iface_lock. */ + spin_lock_bh(&mcdi->iface_lock); + rc = mcdi->resprc; + if (raw_rc) + *raw_rc = mcdi->resprc_raw; + hdr_len = mcdi->resp_hdr_len; + data_len = mcdi->resp_data_len; + err_len = min(sizeof(errbuf), data_len); + spin_unlock_bh(&mcdi->iface_lock); + + BUG_ON(rc > 0); + + efx->type->mcdi_read_response(efx, outbuf, hdr_len, + min(outlen, data_len)); + if (outlen_actual) + *outlen_actual = data_len; + + efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len); + + if (cmd == MC_CMD_REBOOT && rc == -EIO) { + /* Don't reset if MC_CMD_REBOOT returns EIO */ + } else if (rc == -EIO || rc == -EINTR) { + netif_err(efx, hw, efx->net_dev, "MC reboot detected\n"); + netif_dbg(efx, hw, efx->net_dev, "MC rebooted during command %d rc %d\n", + cmd, -rc); + if (efx->type->mcdi_reboot_detected) + efx->type->mcdi_reboot_detected(efx); + efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + } else if (proxy_handle && (rc == -EPROTO) && + efx_mcdi_get_proxy_handle(efx, hdr_len, data_len, + proxy_handle)) { + mcdi->proxy_rx_status = 0; + mcdi->proxy_rx_handle = 0; + mcdi->state = MCDI_STATE_PROXY_WAIT; + } else if (rc && !quiet) { + efx_siena_mcdi_display_error(efx, cmd, inlen, errbuf, + err_len, rc); + } + + if (rc == -EIO || rc == -EINTR) { + msleep(MCDI_STATUS_SLEEP_MS); + efx_siena_mcdi_poll_reboot(efx); + mcdi->new_epoch = true; + } + } + + if (!proxy_handle || !*proxy_handle) + efx_mcdi_release(mcdi); + return rc; +} + +static void efx_mcdi_proxy_abort(struct efx_mcdi_iface *mcdi) +{ + if (mcdi->state == MCDI_STATE_PROXY_WAIT) { + /* Interrupt the proxy wait. */ + mcdi->proxy_rx_status = -EINTR; + wake_up(&mcdi->proxy_rx_wq); + } +} + +static void efx_mcdi_ev_proxy_response(struct efx_nic *efx, + u32 handle, int status) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + WARN_ON(mcdi->state != MCDI_STATE_PROXY_WAIT); + + mcdi->proxy_rx_status = efx_mcdi_errno(status); + /* Ensure the status is written before we update the handle, since the + * latter is used to check if we've finished. + */ + wmb(); + mcdi->proxy_rx_handle = handle; + wake_up(&mcdi->proxy_rx_wq); +} + +static int efx_mcdi_proxy_wait(struct efx_nic *efx, u32 handle, bool quiet) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + int rc; + + /* Wait for a proxy event, or timeout. */ + rc = wait_event_timeout(mcdi->proxy_rx_wq, + mcdi->proxy_rx_handle != 0 || + mcdi->proxy_rx_status == -EINTR, + MCDI_RPC_TIMEOUT); + + if (rc <= 0) { + netif_dbg(efx, hw, efx->net_dev, + "MCDI proxy timeout %d\n", handle); + return -ETIMEDOUT; + } else if (mcdi->proxy_rx_handle != handle) { + netif_warn(efx, hw, efx->net_dev, + "MCDI proxy unexpected handle %d (expected %d)\n", + mcdi->proxy_rx_handle, handle); + return -EINVAL; + } + + return mcdi->proxy_rx_status; +} + +static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet, int *raw_rc) +{ + u32 proxy_handle = 0; /* Zero is an invalid proxy handle. */ + int rc; + + if (inbuf && inlen && (inbuf == outbuf)) { + /* The input buffer can't be aliased with the output. */ + WARN_ON(1); + return -EINVAL; + } + + rc = efx_siena_mcdi_rpc_start(efx, cmd, inbuf, inlen); + if (rc) + return rc; + + rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, + outlen_actual, quiet, &proxy_handle, raw_rc); + + if (proxy_handle) { + /* Handle proxy authorisation. This allows approval of MCDI + * operations to be delegated to the admin function, allowing + * fine control over (eg) multicast subscriptions. + */ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + netif_dbg(efx, hw, efx->net_dev, + "MCDI waiting for proxy auth %d\n", + proxy_handle); + rc = efx_mcdi_proxy_wait(efx, proxy_handle, quiet); + + if (rc == 0) { + netif_dbg(efx, hw, efx->net_dev, + "MCDI proxy retry %d\n", proxy_handle); + + /* We now retry the original request. */ + mcdi->state = MCDI_STATE_RUNNING_SYNC; + efx_mcdi_send_request(efx, cmd, inbuf, inlen); + + rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, + outbuf, outlen, outlen_actual, + quiet, NULL, raw_rc); + } else { + netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err, + "MC command 0x%x failed after proxy auth rc=%d\n", + cmd, rc); + + if (rc == -EINTR || rc == -EIO) + efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + efx_mcdi_release(mcdi); + } + } + + return rc; +} + +static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet) +{ + int raw_rc = 0; + int rc; + + rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen, + outbuf, outlen, outlen_actual, true, &raw_rc); + + if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) && + efx->type->is_vf) { + /* If the EVB port isn't available within a VF this may + * mean the PF is still bringing the switch up. We should + * retry our request shortly. + */ + unsigned long abort_time = jiffies + MCDI_RPC_TIMEOUT; + unsigned int delay_us = 10000; + + netif_dbg(efx, hw, efx->net_dev, + "%s: NO_EVB_PORT; will retry request\n", + __func__); + + do { + usleep_range(delay_us, delay_us + 10000); + rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen, + outbuf, outlen, outlen_actual, + true, &raw_rc); + if (delay_us < 100000) + delay_us <<= 1; + } while ((rc == -EPROTO) && + (raw_rc == MC_CMD_ERR_NO_EVB_PORT) && + time_before(jiffies, abort_time)); + } + + if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO)) + efx_siena_mcdi_display_error(efx, cmd, inlen, + outbuf, outlen, rc); + + return rc; +} + +/** + * efx_siena_mcdi_rpc - Issue an MCDI command and wait for completion + * @efx: NIC through which to issue the command + * @cmd: Command type number + * @inbuf: Command parameters + * @inlen: Length of command parameters, in bytes. Must be a multiple + * of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1. + * @outbuf: Response buffer. May be %NULL if @outlen is 0. + * @outlen: Length of response buffer, in bytes. If the actual + * response is longer than @outlen & ~3, it will be truncated + * to that length. + * @outlen_actual: Pointer through which to return the actual response + * length. May be %NULL if this is not needed. + * + * This function may sleep and therefore must be called in an appropriate + * context. + * + * Return: A negative error code, or zero if successful. The error + * code may come from the MCDI response or may indicate a failure + * to communicate with the MC. In the former case, the response + * will still be copied to @outbuf and *@outlen_actual will be + * set accordingly. In the latter case, *@outlen_actual will be + * set to zero. + */ +int efx_siena_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) +{ + return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen, + outlen_actual, false); +} + +/* Normally, on receiving an error code in the MCDI response, + * efx_siena_mcdi_rpc will log an error message containing (among other + * things) the raw error code, by means of efx_siena_mcdi_display_error. + * This _quiet version suppresses that; if the caller wishes to log + * the error conditionally on the return code, it should call this + * function and is then responsible for calling efx_siena_mcdi_display_error + * as needed. + */ +int efx_siena_mcdi_rpc_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) +{ + return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen, + outlen_actual, true); +} + +int efx_siena_mcdi_rpc_start(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + int rc; + + rc = efx_mcdi_check_supported(efx, cmd, inlen); + if (rc) + return rc; + + if (efx->mc_bist_for_other_fn) + return -ENETDOWN; + + if (mcdi->mode == MCDI_MODE_FAIL) + return -ENETDOWN; + + efx_mcdi_acquire_sync(mcdi); + efx_mcdi_send_request(efx, cmd, inbuf, inlen); + return 0; +} + +static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie, bool quiet) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + struct efx_mcdi_async_param *async; + int rc; + + rc = efx_mcdi_check_supported(efx, cmd, inlen); + if (rc) + return rc; + + if (efx->mc_bist_for_other_fn) + return -ENETDOWN; + + async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4), + GFP_ATOMIC); + if (!async) + return -ENOMEM; + + async->cmd = cmd; + async->inlen = inlen; + async->outlen = outlen; + async->quiet = quiet; + async->complete = complete; + async->cookie = cookie; + memcpy(async + 1, inbuf, inlen); + + spin_lock_bh(&mcdi->async_lock); + + if (mcdi->mode == MCDI_MODE_EVENTS) { + list_add_tail(&async->list, &mcdi->async_list); + + /* If this is at the front of the queue, try to start it + * immediately + */ + if (mcdi->async_list.next == &async->list && + efx_mcdi_acquire_async(mcdi)) { + efx_mcdi_send_request(efx, cmd, inbuf, inlen); + mod_timer(&mcdi->async_timer, + jiffies + MCDI_RPC_TIMEOUT); + } + } else { + kfree(async); + rc = -ENETDOWN; + } + + spin_unlock_bh(&mcdi->async_lock); + + return rc; +} + +/** + * efx_siena_mcdi_rpc_async - Schedule an MCDI command to run asynchronously + * @efx: NIC through which to issue the command + * @cmd: Command type number + * @inbuf: Command parameters + * @inlen: Length of command parameters, in bytes + * @outlen: Length to allocate for response buffer, in bytes + * @complete: Function to be called on completion or cancellation. + * @cookie: Arbitrary value to be passed to @complete. + * + * This function does not sleep and therefore may be called in atomic + * context. It will fail if event queues are disabled or if MCDI + * event completions have been disabled due to an error. + * + * If it succeeds, the @complete function will be called exactly once + * in atomic context, when one of the following occurs: + * (a) the completion event is received (in NAPI context) + * (b) event queues are disabled (in the process that disables them) + * (c) the request times-out (in timer context) + */ +int +efx_siena_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie) +{ + return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete, + cookie, false); +} + +int efx_siena_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie) +{ + return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete, + cookie, true); +} + +int efx_siena_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) +{ + return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, + outlen_actual, false, NULL, NULL); +} + +int efx_siena_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, size_t *outlen_actual) +{ + return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, + outlen_actual, true, NULL, NULL); +} + +void efx_siena_mcdi_display_error(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, int rc) +{ + int code = 0, err_arg = 0; + + if (outlen >= MC_CMD_ERR_CODE_OFST + 4) + code = MCDI_DWORD(outbuf, ERR_CODE); + if (outlen >= MC_CMD_ERR_ARG_OFST + 4) + err_arg = MCDI_DWORD(outbuf, ERR_ARG); + netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err, + "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n", + cmd, inlen, rc, code, err_arg); +} + +/* Switch to polled MCDI completions. This can be called in various + * error conditions with various locks held, so it must be lockless. + * Caller is responsible for flushing asynchronous requests later. + */ +void efx_siena_mcdi_mode_poll(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi; + + if (!efx->mcdi) + return; + + mcdi = efx_mcdi(efx); + /* If already in polling mode, nothing to do. + * If in fail-fast state, don't switch to polled completion. + * FLR recovery will do that later. + */ + if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL) + return; + + /* We can switch from event completion to polled completion, because + * mcdi requests are always completed in shared memory. We do this by + * switching the mode to POLL'd then completing the request. + * efx_mcdi_await_completion() will then call efx_mcdi_poll(). + * + * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), + * which efx_mcdi_complete_sync() provides for us. + */ + mcdi->mode = MCDI_MODE_POLL; + + efx_mcdi_complete_sync(mcdi); +} + +/* Flush any running or queued asynchronous requests, after event processing + * is stopped + */ +void efx_siena_mcdi_flush_async(struct efx_nic *efx) +{ + struct efx_mcdi_async_param *async, *next; + struct efx_mcdi_iface *mcdi; + + if (!efx->mcdi) + return; + + mcdi = efx_mcdi(efx); + + /* We must be in poll or fail mode so no more requests can be queued */ + BUG_ON(mcdi->mode == MCDI_MODE_EVENTS); + + del_timer_sync(&mcdi->async_timer); + + /* If a request is still running, make sure we give the MC + * time to complete it so that the response won't overwrite our + * next request. + */ + if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) { + efx_mcdi_poll(efx); + mcdi->state = MCDI_STATE_QUIESCENT; + } + + /* Nothing else will access the async list now, so it is safe + * to walk it without holding async_lock. If we hold it while + * calling a completer then lockdep may warn that we have + * acquired locks in the wrong order. + */ + list_for_each_entry_safe(async, next, &mcdi->async_list, list) { + if (async->complete) + async->complete(efx, async->cookie, -ENETDOWN, NULL, 0); + list_del(&async->list); + kfree(async); + } +} + +void efx_siena_mcdi_mode_event(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi; + + if (!efx->mcdi) + return; + + mcdi = efx_mcdi(efx); + /* If already in event completion mode, nothing to do. + * If in fail-fast state, don't switch to event completion. FLR + * recovery will do that later. + */ + if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL) + return; + + /* We can't switch from polled to event completion in the middle of a + * request, because the completion method is specified in the request. + * So acquire the interface to serialise the requestors. We don't need + * to acquire the iface_lock to change the mode here, but we do need a + * write memory barrier ensure that efx_siena_mcdi_rpc() sees it, which + * efx_mcdi_acquire() provides. + */ + efx_mcdi_acquire_sync(mcdi); + mcdi->mode = MCDI_MODE_EVENTS; + efx_mcdi_release(mcdi); +} + +static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + /* If there is an outstanding MCDI request, it has been terminated + * either by a BADASSERT or REBOOT event. If the mcdi interface is + * in polled mode, then do nothing because the MC reboot handler will + * set the header correctly. However, if the mcdi interface is waiting + * for a CMDDONE event it won't receive it [and since all MCDI events + * are sent to the same queue, we can't be racing with + * efx_mcdi_ev_cpl()] + * + * If there is an outstanding asynchronous request, we can't + * complete it now (efx_mcdi_complete() would deadlock). The + * reset process will take care of this. + * + * There's a race here with efx_mcdi_send_request(), because + * we might receive a REBOOT event *before* the request has + * been copied out. In polled mode (during startup) this is + * irrelevant, because efx_mcdi_complete_sync() is ignored. In + * event mode, this condition is just an edge-case of + * receiving a REBOOT event after posting the MCDI + * request. Did the mc reboot before or after the copyout? The + * best we can do always is just return failure. + * + * If there is an outstanding proxy response expected it is not going + * to arrive. We should thus abort it. + */ + spin_lock(&mcdi->iface_lock); + efx_mcdi_proxy_abort(mcdi); + + if (efx_mcdi_complete_sync(mcdi)) { + if (mcdi->mode == MCDI_MODE_EVENTS) { + mcdi->resprc = rc; + mcdi->resp_hdr_len = 0; + mcdi->resp_data_len = 0; + ++mcdi->credits; + } + } else { + int count; + + /* Consume the status word since efx_siena_mcdi_rpc_finish() won't */ + for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { + rc = efx_siena_mcdi_poll_reboot(efx); + if (rc) + break; + udelay(MCDI_STATUS_DELAY_US); + } + + /* On EF10, a CODE_MC_REBOOT event can be received without the + * reboot detection in efx_siena_mcdi_poll_reboot() being triggered. + * If zero was returned from the final call to + * efx_siena_mcdi_poll_reboot(), the MC reboot wasn't noticed but the + * MC has definitely rebooted so prepare for the reset. + */ + if (!rc && efx->type->mcdi_reboot_detected) + efx->type->mcdi_reboot_detected(efx); + + mcdi->new_epoch = true; + + /* Nobody was waiting for an MCDI request, so trigger a reset */ + efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + } + + spin_unlock(&mcdi->iface_lock); +} + +/* The MC is going down in to BIST mode. set the BIST flag to block + * new MCDI, cancel any outstanding MCDI and schedule a BIST-type reset + * (which doesn't actually execute a reset, it waits for the controlling + * function to reset it). + */ +static void efx_mcdi_ev_bist(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + spin_lock(&mcdi->iface_lock); + efx->mc_bist_for_other_fn = true; + efx_mcdi_proxy_abort(mcdi); + + if (efx_mcdi_complete_sync(mcdi)) { + if (mcdi->mode == MCDI_MODE_EVENTS) { + mcdi->resprc = -EIO; + mcdi->resp_hdr_len = 0; + mcdi->resp_data_len = 0; + ++mcdi->credits; + } + } + mcdi->new_epoch = true; + efx_siena_schedule_reset(efx, RESET_TYPE_MC_BIST); + spin_unlock(&mcdi->iface_lock); +} + +/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try + * to recover. + */ +static void efx_mcdi_abandon(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL) + return; /* it had already been done */ + netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n"); + efx_siena_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT); +} + +static void efx_handle_drain_event(struct efx_nic *efx) +{ + if (atomic_dec_and_test(&efx->active_queues)) + wake_up(&efx->flush_wq); + + WARN_ON(atomic_read(&efx->active_queues) < 0); +} + +/* Called from efx_farch_ev_process and efx_ef10_ev_process for MCDI events */ +void efx_siena_mcdi_process_event(struct efx_channel *channel, + efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); + u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); + + switch (code) { + case MCDI_EVENT_CODE_BADSSERT: + netif_err(efx, hw, efx->net_dev, + "MC watchdog or assertion failure at 0x%x\n", data); + efx_mcdi_ev_death(efx, -EINTR); + break; + + case MCDI_EVENT_CODE_PMNOTICE: + netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); + break; + + case MCDI_EVENT_CODE_CMDDONE: + efx_mcdi_ev_cpl(efx, + MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), + MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), + MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); + break; + + case MCDI_EVENT_CODE_LINKCHANGE: + efx_siena_mcdi_process_link_change(efx, event); + break; + case MCDI_EVENT_CODE_SENSOREVT: + efx_sensor_event(efx, event); + break; + case MCDI_EVENT_CODE_SCHEDERR: + netif_dbg(efx, hw, efx->net_dev, + "MC Scheduler alert (0x%x)\n", data); + break; + case MCDI_EVENT_CODE_REBOOT: + case MCDI_EVENT_CODE_MC_REBOOT: + netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); + efx_mcdi_ev_death(efx, -EIO); + break; + case MCDI_EVENT_CODE_MC_BIST: + netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n"); + efx_mcdi_ev_bist(efx); + break; + case MCDI_EVENT_CODE_MAC_STATS_DMA: + /* MAC stats are gather lazily. We can ignore this. */ + break; + case MCDI_EVENT_CODE_FLR: + if (efx->type->sriov_flr) + efx->type->sriov_flr(efx, + MCDI_EVENT_FIELD(*event, FLR_VF)); + break; + case MCDI_EVENT_CODE_PTP_RX: + case MCDI_EVENT_CODE_PTP_FAULT: + case MCDI_EVENT_CODE_PTP_PPS: + efx_siena_ptp_event(efx, event); + break; + case MCDI_EVENT_CODE_PTP_TIME: + efx_siena_time_sync_event(channel, event); + break; + case MCDI_EVENT_CODE_TX_FLUSH: + case MCDI_EVENT_CODE_RX_FLUSH: + /* Two flush events will be sent: one to the same event + * queue as completions, and one to event queue 0. + * In the latter case the {RX,TX}_FLUSH_TO_DRIVER + * flag will be set, and we should ignore the event + * because we want to wait for all completions. + */ + BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN != + MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN); + if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER)) + efx_handle_drain_event(efx); + break; + case MCDI_EVENT_CODE_TX_ERR: + case MCDI_EVENT_CODE_RX_ERR: + netif_err(efx, hw, efx->net_dev, + "%s DMA error (event: "EFX_QWORD_FMT")\n", + code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX", + EFX_QWORD_VAL(*event)); + efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + break; + case MCDI_EVENT_CODE_PROXY_RESPONSE: + efx_mcdi_ev_proxy_response(efx, + MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE), + MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC)); + break; + default: + netif_err(efx, hw, efx->net_dev, + "Unknown MCDI event " EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + } +} + +/************************************************************************** + * + * Specific request functions + * + ************************************************************************** + */ + +void efx_siena_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN); + size_t outlength; + const __le16 *ver_words; + size_t offset; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, + outbuf, sizeof(outbuf), &outlength); + if (rc) + goto fail; + if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { + rc = -EIO; + goto fail; + } + + ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); + offset = scnprintf(buf, len, "%u.%u.%u.%u", + le16_to_cpu(ver_words[0]), + le16_to_cpu(ver_words[1]), + le16_to_cpu(ver_words[2]), + le16_to_cpu(ver_words[3])); + + if (efx->type->print_additional_fwver) + offset += efx->type->print_additional_fwver(efx, buf + offset, + len - offset); + + /* It's theoretically possible for the string to exceed 31 + * characters, though in practice the first three version + * components are short enough that this doesn't happen. + */ + if (WARN_ON(offset >= len)) + buf[0] = 0; + + return; + +fail: + netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + buf[0] = 0; +} + +static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, + bool *was_attached) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, + driver_operating ? 1 : 0); + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY); + + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), + &outlen); + /* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID + * specified will fail with EPERM, and we have to tell the MC we don't + * care what firmware we get. + */ + if (rc == -EPERM) { + netif_dbg(efx, probe, efx->net_dev, + "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n"); + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, + MC_CMD_FW_DONT_CARE); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, + sizeof(inbuf), outbuf, + sizeof(outbuf), &outlen); + } + if (rc) { + efx_siena_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, + sizeof(inbuf), outbuf, outlen, rc); + goto fail; + } + if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { + rc = -EIO; + goto fail; + } + + if (driver_operating) { + if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) { + efx->mcdi->fn_flags = + MCDI_DWORD(outbuf, + DRV_ATTACH_EXT_OUT_FUNC_FLAGS); + } else { + /* Synthesise flags for Siena */ + efx->mcdi->fn_flags = + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED | + (efx_port_num(efx) == 0) << + MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY; + } + } + + /* We currently assume we have control of the external link + * and are completely trusted by firmware. Abort probing + * if that's not true for this function. + */ + + if (was_attached != NULL) + *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); + return 0; + +fail: + netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +int efx_siena_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, + u16 *fw_subtype_list, u32 *capabilities) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX); + size_t outlen, i; + int port_num = efx_port_num(efx); + int rc; + + BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); + /* we need __aligned(2) for ether_addr_copy */ + BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1); + BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { + rc = -EIO; + goto fail; + } + + if (mac_address) + ether_addr_copy(mac_address, + port_num ? + MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) : + MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0)); + if (fw_subtype_list) { + for (i = 0; + i < MCDI_VAR_ARRAY_LEN(outlen, + GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST); + i++) + fw_subtype_list[i] = MCDI_ARRAY_WORD( + outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i); + for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++) + fw_subtype_list[i] = 0; + } + if (capabilities) { + if (port_num) + *capabilities = MCDI_DWORD(outbuf, + GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); + else + *capabilities = MCDI_DWORD(outbuf, + GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); + } + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", + __func__, rc, (int)outlen); + + return rc; +} + +int efx_siena_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, + u32 dest_evq) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN); + u32 dest = 0; + int rc; + + if (uart) + dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; + if (evq) + dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; + + MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); + MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); + + BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +int efx_siena_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", + __func__, rc); + return rc; +} + +int efx_siena_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, + size_t *size_out, size_t *erase_size_out, + bool *protected_out) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); + *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); + *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & + (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN)); + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc) + return rc; + + switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { + case MC_CMD_NVRAM_TEST_PASS: + case MC_CMD_NVRAM_TEST_NOTSUPP: + return 0; + default: + return -EIO; + } +} + +int efx_siena_mcdi_nvram_test_all(struct efx_nic *efx) +{ + u32 nvram_types; + unsigned int type; + int rc; + + rc = efx_siena_mcdi_nvram_types(efx, &nvram_types); + if (rc) + goto fail1; + + type = 0; + while (nvram_types != 0) { + if (nvram_types & 1) { + rc = efx_mcdi_nvram_test(efx, type); + if (rc) + goto fail2; + } + type++; + nvram_types >>= 1; + } + + return 0; + +fail2: + netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", + __func__, type); +fail1: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +/* Returns 1 if an assertion was read, 0 if no assertion had fired, + * negative on error. + */ +static int efx_mcdi_read_assertion(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); + unsigned int flags, index; + const char *reason; + size_t outlen; + int retry; + int rc; + + /* Attempt to read any stored assertion state before we reboot + * the mcfw out of the assertion handler. Retry twice, once + * because a boot-time assertion might cause this command to fail + * with EINTR. And once again because GET_ASSERTS can race with + * MC_CMD_REBOOT running on the other port. */ + retry = 2; + do { + MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS, + inbuf, MC_CMD_GET_ASSERTS_IN_LEN, + outbuf, sizeof(outbuf), &outlen); + if (rc == -EPERM) + return 0; + } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); + + if (rc) { + efx_siena_mcdi_display_error(efx, MC_CMD_GET_ASSERTS, + MC_CMD_GET_ASSERTS_IN_LEN, outbuf, + outlen, rc); + return rc; + } + if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) + return -EIO; + + /* Print out any recorded assertion state */ + flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); + if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) + return 0; + + reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) + ? "system-level assertion" + : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) + ? "thread-level assertion" + : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) + ? "watchdog reset" + : "unknown assertion"; + netif_err(efx, hw, efx->net_dev, + "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, + MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), + MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); + + /* Print out the registers */ + for (index = 0; + index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; + index++) + netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", + 1 + index, + MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS, + index)); + + return 1; +} + +static int efx_mcdi_exit_assertion(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); + int rc; + + /* If the MC is running debug firmware, it might now be + * waiting for a debugger to attach, but we just want it to + * reboot. We set a flag that makes the command a no-op if it + * has already done so. + * The MCDI will thus return either 0 or -EIO. + */ + BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, + MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, + MC_CMD_REBOOT_IN_LEN, NULL, 0, NULL); + if (rc == -EIO) + rc = 0; + if (rc) + efx_siena_mcdi_display_error(efx, MC_CMD_REBOOT, + MC_CMD_REBOOT_IN_LEN, NULL, 0, rc); + return rc; +} + +int efx_siena_mcdi_handle_assertion(struct efx_nic *efx) +{ + int rc; + + rc = efx_mcdi_read_assertion(efx); + if (rc <= 0) + return rc; + + return efx_mcdi_exit_assertion(efx); +} + +int efx_siena_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN); + + BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); + BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); + BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); + + BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); + + return efx_siena_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +static int efx_mcdi_reset_func(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0); + MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG, + ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +static int efx_mcdi_reset_mc(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), + NULL, 0, NULL); + /* White is black, and up is down */ + if (rc == -EIO) + return 0; + if (rc == 0) + rc = -EIO; + return rc; +} + +enum reset_type efx_siena_mcdi_map_reset_reason(enum reset_type reason) +{ + return RESET_TYPE_RECOVER_OR_ALL; +} + +int efx_siena_mcdi_reset(struct efx_nic *efx, enum reset_type method) +{ + int rc; + + /* If MCDI is down, we can't handle_assertion */ + if (method == RESET_TYPE_MCDI_TIMEOUT) { + rc = pci_reset_function(efx->pci_dev); + if (rc) + return rc; + /* Re-enable polled MCDI completion */ + if (efx->mcdi) { + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + mcdi->mode = MCDI_MODE_POLL; + } + return 0; + } + + /* Recover from a failed assertion pre-reset */ + rc = efx_siena_mcdi_handle_assertion(efx); + if (rc) + return rc; + + if (method == RESET_TYPE_DATAPATH) + return 0; + else if (method == RESET_TYPE_WORLD) + return efx_mcdi_reset_mc(efx); + else + return efx_mcdi_reset_func(efx); +} + +static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, + const u8 *mac, int *id_out) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); + MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, + MC_CMD_FILTER_MODE_SIMPLE); + ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); + + return 0; + +fail: + *id_out = -1; + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; + +} + + +int efx_siena_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, + int *id_out) +{ + return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); +} + + +int efx_siena_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN); + size_t outlen; + int rc; + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); + + return 0; + +fail: + *id_out = -1; + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + + +int efx_siena_mcdi_wol_filter_remove(struct efx_nic *efx, int id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, + sizeof(inbuf), NULL, 0, NULL); + return rc; +} + +int efx_siena_mcdi_flush_rxqs(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + MCDI_DECLARE_BUF(inbuf, + MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS)); + int rc, count; + + BUILD_BUG_ON(EFX_MAX_CHANNELS > + MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); + + count = 0; + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) { + if (rx_queue->flush_pending) { + rx_queue->flush_pending = false; + atomic_dec(&efx->rxq_flush_pending); + MCDI_SET_ARRAY_DWORD( + inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, + count, efx_rx_queue_index(rx_queue)); + count++; + } + } + } + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, + MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), + NULL, 0, NULL); + WARN_ON(rc < 0); + + return rc; +} + +int efx_siena_mcdi_wol_filter_reset(struct efx_nic *efx) +{ + int rc; + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, + NULL, 0, NULL); + return rc; +} + +#ifdef CONFIG_SFC_SIENA_MTD + +#define EFX_MCDI_NVRAM_LEN_MAX 128 + +static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); + MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_START_V2_IN_FLAGS, + NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT, + 1); + + BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, + sizeof(inbuf), NULL, 0, NULL); + + return rc; +} + +static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, + loff_t offset, u8 *buffer, size_t length) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_V2_LEN); + MCDI_DECLARE_BUF(outbuf, + MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_V2_MODE, + MC_CMD_NVRAM_READ_IN_V2_DEFAULT); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); + return 0; +} + +static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, + loff_t offset, const u8 *buffer, size_t length) +{ + MCDI_DECLARE_BUF(inbuf, + MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); + memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); + + BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, + ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), + NULL, 0, NULL); + return rc; +} + +static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, + loff_t offset, size_t length) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); + MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); + + BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN); + size_t outlen; + int rc, rc2; + + MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); + /* Always set this flag. Old firmware ignores it */ + MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS, + NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT, + 1); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) { + rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE); + if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS) + netif_err(efx, drv, efx->net_dev, + "NVRAM update failed verification with code 0x%x\n", + rc2); + switch (rc2) { + case MC_CMD_NVRAM_VERIFY_RC_SUCCESS: + break; + case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED: + rc = -EIO; + break; + case MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT: + case MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST: + rc = -EINVAL; + break; + case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES: + case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS: + case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH: + rc = -EPERM; + break; + default: + netif_err(efx, drv, efx->net_dev, + "Unknown response to NVRAM_UPDATE_FINISH\n"); + rc = -EIO; + } + } + + return rc; +} + +int efx_siena_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, u8 *buffer) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + loff_t offset = start; + loff_t end = min_t(loff_t, start + len, mtd->size); + size_t chunk; + int rc = 0; + + while (offset < end) { + chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); + rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset, + buffer, chunk); + if (rc) + goto out; + offset += chunk; + buffer += chunk; + } +out: + *retlen = offset - start; + return rc; +} + +int efx_siena_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); + loff_t end = min_t(loff_t, start + len, mtd->size); + size_t chunk = part->common.mtd.erasesize; + int rc = 0; + + if (!part->updating) { + rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); + if (rc) + goto out; + part->updating = true; + } + + /* The MCDI interface can in fact do multiple erase blocks at once; + * but erasing may be slow, so we make multiple calls here to avoid + * tripping the MCDI RPC timeout. */ + while (offset < end) { + rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset, + chunk); + if (rc) + goto out; + offset += chunk; + } +out: + return rc; +} + +int efx_siena_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, const u8 *buffer) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + loff_t offset = start; + loff_t end = min_t(loff_t, start + len, mtd->size); + size_t chunk; + int rc = 0; + + if (!part->updating) { + rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); + if (rc) + goto out; + part->updating = true; + } + + while (offset < end) { + chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); + rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset, + buffer, chunk); + if (rc) + goto out; + offset += chunk; + buffer += chunk; + } +out: + *retlen = offset - start; + return rc; +} + +int efx_siena_mcdi_mtd_sync(struct mtd_info *mtd) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + int rc = 0; + + if (part->updating) { + part->updating = false; + rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type); + } + + return rc; +} + +void efx_siena_mcdi_mtd_rename(struct efx_mtd_partition *part) +{ + struct efx_mcdi_mtd_partition *mcdi_part = + container_of(part, struct efx_mcdi_mtd_partition, common); + struct efx_nic *efx = part->mtd.priv; + + snprintf(part->name, sizeof(part->name), "%s %s:%02x", + efx->name, part->type_name, mcdi_part->fw_subtype); +} + +#endif /* CONFIG_SFC_SIENA_MTD */ diff --git a/drivers/net/ethernet/sfc/siena/mcdi.h b/drivers/net/ethernet/sfc/siena/mcdi.h new file mode 100644 index 0000000000..06f38e5e68 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi.h @@ -0,0 +1,386 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2008-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_MCDI_H +#define EFX_MCDI_H + +/** + * enum efx_mcdi_state - MCDI request handling state + * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the + * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING + * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending. + * Only the thread that moved into this state is allowed to move out of it. + * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending. + * @MCDI_STATE_PROXY_WAIT: An MCDI request has completed with a response that + * indicates we must wait for a proxy try again message. + * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread + * has not yet consumed the result. For all other threads, equivalent to + * %MCDI_STATE_RUNNING. + */ +enum efx_mcdi_state { + MCDI_STATE_QUIESCENT, + MCDI_STATE_RUNNING_SYNC, + MCDI_STATE_RUNNING_ASYNC, + MCDI_STATE_PROXY_WAIT, + MCDI_STATE_COMPLETED, +}; + +/** + * enum efx_mcdi_mode - MCDI transaction mode + * @MCDI_MODE_POLL: poll for MCDI completion, until timeout + * @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once + * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls + */ +enum efx_mcdi_mode { + MCDI_MODE_POLL, + MCDI_MODE_EVENTS, + MCDI_MODE_FAIL, +}; + +/** + * struct efx_mcdi_iface - MCDI protocol context + * @efx: The associated NIC. + * @state: Request handling state. Waited for by @wq. + * @mode: Poll for mcdi completion, or wait for an mcdi_event. + * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING + * @new_epoch: Indicates start of day or start of MC reboot recovery + * @iface_lock: Serialises access to @seqno, @credits and response metadata + * @seqno: The next sequence number to use for mcdi requests. + * @credits: Number of spurious MCDI completion events allowed before we + * trigger a fatal error + * @resprc: Response error/success code (Linux numbering) + * @resp_hdr_len: Response header length + * @resp_data_len: Response data (SDU or error) length + * @async_lock: Serialises access to @async_list while event processing is + * enabled + * @async_list: Queue of asynchronous requests + * @async_timer: Timer for asynchronous request timeout + * @logging_buffer: buffer that may be used to build MCDI tracing messages + * @logging_enabled: whether to trace MCDI + * @proxy_rx_handle: Most recently received proxy authorisation handle + * @proxy_rx_status: Status of most recent proxy authorisation + * @proxy_rx_wq: Wait queue for updates to proxy_rx_handle + */ +struct efx_mcdi_iface { + struct efx_nic *efx; + enum efx_mcdi_state state; + enum efx_mcdi_mode mode; + wait_queue_head_t wq; + spinlock_t iface_lock; + bool new_epoch; + unsigned int credits; + unsigned int seqno; + int resprc; + int resprc_raw; + size_t resp_hdr_len; + size_t resp_data_len; + spinlock_t async_lock; + struct list_head async_list; + struct timer_list async_timer; +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING + char *logging_buffer; + bool logging_enabled; +#endif + unsigned int proxy_rx_handle; + int proxy_rx_status; + wait_queue_head_t proxy_rx_wq; +}; + +struct efx_mcdi_mon { + struct efx_buffer dma_buf; + struct mutex update_lock; + unsigned long last_update; + struct device *device; + struct efx_mcdi_mon_attribute *attrs; + struct attribute_group group; + const struct attribute_group *groups[2]; + unsigned int n_attrs; +}; + +struct efx_mcdi_mtd_partition { + struct efx_mtd_partition common; + bool updating; + u16 nvram_type; + u16 fw_subtype; +}; + +#define to_efx_mcdi_mtd_partition(mtd) \ + container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd) + +/** + * struct efx_mcdi_data - extra state for NICs that implement MCDI + * @iface: Interface/protocol state + * @hwmon: Hardware monitor state + * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH. + */ +struct efx_mcdi_data { + struct efx_mcdi_iface iface; +#ifdef CONFIG_SFC_SIENA_MCDI_MON + struct efx_mcdi_mon hwmon; +#endif + u32 fn_flags; +}; + +static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) +{ + EFX_WARN_ON_PARANOID(!efx->mcdi); + return &efx->mcdi->iface; +} + +#ifdef CONFIG_SFC_SIENA_MCDI_MON +static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) +{ + EFX_WARN_ON_PARANOID(!efx->mcdi); + return &efx->mcdi->hwmon; +} +#endif + +int efx_siena_mcdi_init(struct efx_nic *efx); +void efx_siena_mcdi_detach(struct efx_nic *efx); +void efx_siena_mcdi_fini(struct efx_nic *efx); + +int efx_siena_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual); +int efx_siena_mcdi_rpc_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual); + +int efx_siena_mcdi_rpc_start(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen); +int efx_siena_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual); +int efx_siena_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, size_t *outlen_actual); + +typedef void efx_mcdi_async_completer(struct efx_nic *efx, + unsigned long cookie, int rc, + efx_dword_t *outbuf, + size_t outlen_actual); +int efx_siena_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie); +int efx_siena_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie); + +void efx_siena_mcdi_display_error(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, int rc); + +int efx_siena_mcdi_poll_reboot(struct efx_nic *efx); +void efx_siena_mcdi_mode_poll(struct efx_nic *efx); +void efx_siena_mcdi_mode_event(struct efx_nic *efx); +void efx_siena_mcdi_flush_async(struct efx_nic *efx); + +void efx_siena_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event); +void efx_siena_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); + +/* We expect that 16- and 32-bit fields in MCDI requests and responses + * are appropriately aligned, but 64-bit fields are only + * 32-bit-aligned. Also, on Siena we must copy to the MC shared + * memory strictly 32 bits at a time, so add any necessary padding. + */ +#define MCDI_TX_BUF_LEN(_len) DIV_ROUND_UP((_len), 4) +#define _MCDI_DECLARE_BUF(_name, _len) \ + efx_dword_t _name[DIV_ROUND_UP(_len, 4)] +#define MCDI_DECLARE_BUF(_name, _len) \ + _MCDI_DECLARE_BUF(_name, _len) = {{{0}}} +#define MCDI_DECLARE_BUF_ERR(_name) \ + MCDI_DECLARE_BUF(_name, 8) +#define _MCDI_PTR(_buf, _offset) \ + ((u8 *)(_buf) + (_offset)) +#define MCDI_PTR(_buf, _field) \ + _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST) +#define _MCDI_CHECK_ALIGN(_ofst, _align) \ + ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1))) +#define _MCDI_DWORD(_buf, _field) \ + ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2)) + +#define MCDI_BYTE(_buf, _field) \ + ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \ + *MCDI_PTR(_buf, _field)) +#define MCDI_WORD(_buf, _field) \ + ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \ + le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field))) +#define MCDI_SET_DWORD(_buf, _field, _value) \ + EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value) +#define MCDI_DWORD(_buf, _field) \ + EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0) +#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \ + EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1) +#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \ + _name2, _value2) \ + EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2) +#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3) \ + EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3) +#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4) \ + EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4) +#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5) \ + EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5) +#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6) \ + EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6) +#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6, _name7, _value7) \ + EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6, \ + MC_CMD_ ## _name7, _value7) +#define MCDI_SET_QWORD(_buf, _field, _value) \ + do { \ + EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \ + EFX_DWORD_0, (u32)(_value)); \ + EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \ + EFX_DWORD_0, (u64)(_value) >> 32); \ + } while (0) +#define MCDI_QWORD(_buf, _field) \ + (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \ + (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32) +#define MCDI_FIELD(_ptr, _type, _field) \ + EFX_EXTRACT_DWORD( \ + *(efx_dword_t *) \ + _MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\ + MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \ + (MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \ + MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1) + +#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \ + (_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\ + + (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align)) +#define MCDI_DECLARE_STRUCT_PTR(_name) \ + efx_dword_t *_name +#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \ + ((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_VAR_ARRAY_LEN(_len, _field) \ + min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \ + ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN) +#define MCDI_ARRAY_WORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \ + le16_to_cpu(*(__force const __le16 *) \ + _MCDI_ARRAY_PTR(_buf, _field, _index, 2))) +#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \ + (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \ + EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \ + EFX_DWORD_0, _value) +#define MCDI_ARRAY_DWORD(_buf, _field, _index) \ + EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0) +#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \ + (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \ + do { \ + EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\ + EFX_DWORD_0, (u32)(_value)); \ + EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\ + EFX_DWORD_0, (u64)(_value) >> 32); \ + } while (0) +#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \ + MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \ + _type ## _TYPEDEF, _field2) + +#define MCDI_EVENT_FIELD(_ev, _field) \ + EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) + +#define MCDI_CAPABILITY(field) \ + MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _LBN + +#define MCDI_CAPABILITY_OFST(field) \ + MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _OFST + +#define efx_has_cap(efx, field) \ + efx->type->check_caps(efx, \ + MCDI_CAPABILITY(field), \ + MCDI_CAPABILITY_OFST(field)) + +void efx_siena_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); +int efx_siena_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, + u16 *fw_subtype_list, u32 *capabilities); +int efx_siena_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, + u32 dest_evq); +int efx_siena_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); +int efx_siena_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, + size_t *size_out, size_t *erase_size_out, + bool *protected_out); +int efx_siena_mcdi_nvram_test_all(struct efx_nic *efx); +int efx_siena_mcdi_handle_assertion(struct efx_nic *efx); +int efx_siena_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); +int efx_siena_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, + int *id_out); +int efx_siena_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); +int efx_siena_mcdi_wol_filter_remove(struct efx_nic *efx, int id); +int efx_siena_mcdi_wol_filter_reset(struct efx_nic *efx); +int efx_siena_mcdi_flush_rxqs(struct efx_nic *efx); +void efx_siena_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); +void efx_siena_mcdi_mac_start_stats(struct efx_nic *efx); +void efx_siena_mcdi_mac_stop_stats(struct efx_nic *efx); +void efx_siena_mcdi_mac_pull_stats(struct efx_nic *efx); +enum reset_type efx_siena_mcdi_map_reset_reason(enum reset_type reason); +int efx_siena_mcdi_reset(struct efx_nic *efx, enum reset_type method); + +#ifdef CONFIG_SFC_SIENA_MCDI_MON +int efx_siena_mcdi_mon_probe(struct efx_nic *efx); +void efx_siena_mcdi_mon_remove(struct efx_nic *efx); +#else +static inline int efx_siena_mcdi_mon_probe(struct efx_nic *efx) { return 0; } +static inline void efx_siena_mcdi_mon_remove(struct efx_nic *efx) {} +#endif + +#ifdef CONFIG_SFC_SIENA_MTD +int efx_siena_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, u8 *buffer); +int efx_siena_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len); +int efx_siena_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, const u8 *buffer); +int efx_siena_mcdi_mtd_sync(struct mtd_info *mtd); +void efx_siena_mcdi_mtd_rename(struct efx_mtd_partition *part); +#endif + +#endif /* EFX_MCDI_H */ diff --git a/drivers/net/ethernet/sfc/siena/mcdi_mon.c b/drivers/net/ethernet/sfc/siena/mcdi_mon.c new file mode 100644 index 0000000000..c7ea703c5d --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi_mon.c @@ -0,0 +1,531 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2011-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include + +#include "net_driver.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "nic.h" + +enum efx_hwmon_type { + EFX_HWMON_UNKNOWN, + EFX_HWMON_TEMP, /* temperature */ + EFX_HWMON_COOL, /* cooling device, probably a heatsink */ + EFX_HWMON_IN, /* voltage */ + EFX_HWMON_CURR, /* current */ + EFX_HWMON_POWER, /* power */ + EFX_HWMON_TYPES_COUNT +}; + +static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = { + [EFX_HWMON_TEMP] = " degC", + [EFX_HWMON_COOL] = " rpm", /* though nonsense for a heatsink */ + [EFX_HWMON_IN] = " mV", + [EFX_HWMON_CURR] = " mA", + [EFX_HWMON_POWER] = " W", +}; + +static const struct { + const char *label; + enum efx_hwmon_type hwmon_type; + int port; +} efx_mcdi_sensor_type[] = { +#define SENSOR(name, label, hwmon_type, port) \ + [MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port } + SENSOR(CONTROLLER_TEMP, "Controller board temp.", TEMP, -1), + SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1), + SENSOR(CONTROLLER_COOLING, "Controller heat sink", COOL, -1), + SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0), + SENSOR(PHY0_COOLING, "PHY heat sink", COOL, 0), + SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1), + SENSOR(PHY1_COOLING, "PHY heat sink", COOL, 1), + SENSOR(IN_1V0, "1.0V supply", IN, -1), + SENSOR(IN_1V2, "1.2V supply", IN, -1), + SENSOR(IN_1V8, "1.8V supply", IN, -1), + SENSOR(IN_2V5, "2.5V supply", IN, -1), + SENSOR(IN_3V3, "3.3V supply", IN, -1), + SENSOR(IN_12V0, "12.0V supply", IN, -1), + SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1), + SENSOR(IN_VREF, "Ref. voltage", IN, -1), + SENSOR(OUT_VAOE, "AOE FPGA supply", IN, -1), + SENSOR(AOE_TEMP, "AOE FPGA temp.", TEMP, -1), + SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", TEMP, -1), + SENSOR(PSU_TEMP, "Controller regulator temp.", + TEMP, -1), + SENSOR(FAN_0, "Fan 0", COOL, -1), + SENSOR(FAN_1, "Fan 1", COOL, -1), + SENSOR(FAN_2, "Fan 2", COOL, -1), + SENSOR(FAN_3, "Fan 3", COOL, -1), + SENSOR(FAN_4, "Fan 4", COOL, -1), + SENSOR(IN_VAOE, "AOE input supply", IN, -1), + SENSOR(OUT_IAOE, "AOE output current", CURR, -1), + SENSOR(IN_IAOE, "AOE input current", CURR, -1), + SENSOR(NIC_POWER, "Board power use", POWER, -1), + SENSOR(IN_0V9, "0.9V supply", IN, -1), + SENSOR(IN_I0V9, "0.9V supply current", CURR, -1), + SENSOR(IN_I1V2, "1.2V supply current", CURR, -1), + SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", IN, -1), + SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", TEMP, -1), + SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", TEMP, -1), + SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1), + SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1), + SENSOR(CONTROLLER_VPTAT, + "Controller PTAT voltage (int. ADC)", IN, -1), + SENSOR(CONTROLLER_INTERNAL_TEMP, + "Controller die temp. (int. ADC)", TEMP, -1), + SENSOR(CONTROLLER_VPTAT_EXTADC, + "Controller PTAT voltage (ext. ADC)", IN, -1), + SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC, + "Controller die temp. (ext. ADC)", TEMP, -1), + SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1), + SENSOR(AIRFLOW, "Air flow raw", IN, -1), + SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", IN, -1), + SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", IN, -1), + SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", TEMP, -1), +#undef SENSOR +}; + +static const char *const sensor_status_names[] = { + [MC_CMD_SENSOR_STATE_OK] = "OK", + [MC_CMD_SENSOR_STATE_WARNING] = "Warning", + [MC_CMD_SENSOR_STATE_FATAL] = "Fatal", + [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure", + [MC_CMD_SENSOR_STATE_NO_READING] = "No reading", +}; + +void efx_siena_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) +{ + unsigned int type, state, value; + enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN; + const char *name = NULL, *state_txt, *unit; + + type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR); + state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE); + value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE); + + /* Deal gracefully with the board having more drivers than we + * know about, but do not expect new sensor states. */ + if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) { + name = efx_mcdi_sensor_type[type].label; + hwmon_type = efx_mcdi_sensor_type[type].hwmon_type; + } + if (!name) + name = "No sensor name available"; + EFX_WARN_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); + state_txt = sensor_status_names[state]; + EFX_WARN_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT); + unit = efx_hwmon_unit[hwmon_type]; + if (!unit) + unit = ""; + + netif_err(efx, hw, efx->net_dev, + "Sensor %d (%s) reports condition '%s' for value %d%s\n", + type, name, state_txt, value, unit); +} + +#ifdef CONFIG_SFC_SIENA_MCDI_MON + +struct efx_mcdi_mon_attribute { + struct device_attribute dev_attr; + unsigned int index; + unsigned int type; + enum efx_hwmon_type hwmon_type; + unsigned int limit_value; + char name[12]; +}; + +static int efx_mcdi_mon_update(struct efx_nic *efx) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN); + int rc; + + MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR, + hwmon->dma_buf.dma_addr); + MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_READ_SENSORS, + inbuf, sizeof(inbuf), NULL, 0, NULL); + if (rc == 0) + hwmon->last_update = jiffies; + return rc; +} + +static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, + efx_dword_t *entry) +{ + struct efx_nic *efx = dev_get_drvdata(dev->parent); + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + int rc; + + BUILD_BUG_ON(MC_CMD_READ_SENSORS_OUT_LEN != 0); + + mutex_lock(&hwmon->update_lock); + + /* Use cached value if last update was < 1 s ago */ + if (time_before(jiffies, hwmon->last_update + HZ)) + rc = 0; + else + rc = efx_mcdi_mon_update(efx); + + /* Copy out the requested entry */ + *entry = ((efx_dword_t *)hwmon->dma_buf.addr)[index]; + + mutex_unlock(&hwmon->update_lock); + + return rc; +} + +static ssize_t efx_mcdi_mon_show_value(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + efx_dword_t entry; + unsigned int value, state; + int rc; + + rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry); + if (rc) + return rc; + + state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE); + if (state == MC_CMD_SENSOR_STATE_NO_READING) + return -EBUSY; + + value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE); + + switch (mon_attr->hwmon_type) { + case EFX_HWMON_TEMP: + /* Convert temperature from degrees to milli-degrees Celsius */ + value *= 1000; + break; + case EFX_HWMON_POWER: + /* Convert power from watts to microwatts */ + value *= 1000000; + break; + default: + /* No conversion needed */ + break; + } + + return sprintf(buf, "%u\n", value); +} + +static ssize_t efx_mcdi_mon_show_limit(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + unsigned int value; + + value = mon_attr->limit_value; + + switch (mon_attr->hwmon_type) { + case EFX_HWMON_TEMP: + /* Convert temperature from degrees to milli-degrees Celsius */ + value *= 1000; + break; + case EFX_HWMON_POWER: + /* Convert power from watts to microwatts */ + value *= 1000000; + break; + default: + /* No conversion needed */ + break; + } + + return sprintf(buf, "%u\n", value); +} + +static ssize_t efx_mcdi_mon_show_alarm(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + efx_dword_t entry; + int state; + int rc; + + rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry); + if (rc) + return rc; + + state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE); + return sprintf(buf, "%d\n", state != MC_CMD_SENSOR_STATE_OK); +} + +static ssize_t efx_mcdi_mon_show_label(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + return sprintf(buf, "%s\n", + efx_mcdi_sensor_type[mon_attr->type].label); +} + +static void +efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, + ssize_t (*reader)(struct device *, + struct device_attribute *, char *), + unsigned int index, unsigned int type, + unsigned int limit_value) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; + + strlcpy(attr->name, name, sizeof(attr->name)); + attr->index = index; + attr->type = type; + if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) + attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type; + else + attr->hwmon_type = EFX_HWMON_UNKNOWN; + attr->limit_value = limit_value; + sysfs_attr_init(&attr->dev_attr.attr); + attr->dev_attr.attr.name = attr->name; + attr->dev_attr.attr.mode = 0444; + attr->dev_attr.show = reader; + hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr; +} + +int efx_siena_mcdi_mon_probe(struct efx_nic *efx) +{ + unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0; + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX); + unsigned int n_pages, n_sensors, n_attrs, page; + size_t outlen; + char name[12]; + u32 mask; + int rc, i, j, type; + + /* Find out how many sensors are present */ + n_sensors = 0; + page = 0; + do { + MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), + &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) + return -EIO; + + mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK); + n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT)); + ++page; + } while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT)); + n_pages = page; + + /* Don't create a device if there are none */ + if (n_sensors == 0) + return 0; + + rc = efx_siena_alloc_buffer(efx, &hwmon->dma_buf, + n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN, + GFP_KERNEL); + if (rc) + return rc; + + mutex_init(&hwmon->update_lock); + efx_mcdi_mon_update(efx); + + /* Allocate space for the maximum possible number of + * attributes for this set of sensors: + * value, min, max, crit, alarm and label for each sensor. + */ + n_attrs = 6 * n_sensors; + hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); + if (!hwmon->attrs) { + rc = -ENOMEM; + goto fail; + } + hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *), + GFP_KERNEL); + if (!hwmon->group.attrs) { + rc = -ENOMEM; + goto fail; + } + + for (i = 0, j = -1, type = -1; ; i++) { + enum efx_hwmon_type hwmon_type; + const char *hwmon_prefix; + unsigned hwmon_index; + u16 min1, max1, min2, max2; + + /* Find next sensor type or exit if there is none */ + do { + type++; + + if ((type % 32) == 0) { + page = type / 32; + j = -1; + if (page == n_pages) + goto hwmon_register; + + MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, + page); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) { + rc = -EIO; + goto fail; + } + + mask = (MCDI_DWORD(outbuf, + SENSOR_INFO_OUT_MASK) & + ~(1 << MC_CMD_SENSOR_PAGE0_NEXT)); + + /* Check again for short response */ + if (outlen < + MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) { + rc = -EIO; + goto fail; + } + } + } while (!(mask & (1 << type % 32))); + j++; + + if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) { + hwmon_type = efx_mcdi_sensor_type[type].hwmon_type; + + /* Skip sensors specific to a different port */ + if (hwmon_type != EFX_HWMON_UNKNOWN && + efx_mcdi_sensor_type[type].port >= 0 && + efx_mcdi_sensor_type[type].port != + efx_port_num(efx)) + continue; + } else { + hwmon_type = EFX_HWMON_UNKNOWN; + } + + switch (hwmon_type) { + case EFX_HWMON_TEMP: + hwmon_prefix = "temp"; + hwmon_index = ++n_temp; /* 1-based */ + break; + case EFX_HWMON_COOL: + /* This is likely to be a heatsink, but there + * is no convention for representing cooling + * devices other than fans. + */ + hwmon_prefix = "fan"; + hwmon_index = ++n_cool; /* 1-based */ + break; + default: + hwmon_prefix = "in"; + hwmon_index = n_in++; /* 0-based */ + break; + case EFX_HWMON_CURR: + hwmon_prefix = "curr"; + hwmon_index = ++n_curr; /* 1-based */ + break; + case EFX_HWMON_POWER: + hwmon_prefix = "power"; + hwmon_index = ++n_power; /* 1-based */ + break; + } + + min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MIN1); + max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MAX1); + min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MIN2); + max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MAX2); + + if (min1 != max1) { + snprintf(name, sizeof(name), "%s%u_input", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_value, i, type, 0); + + if (hwmon_type != EFX_HWMON_POWER) { + snprintf(name, sizeof(name), "%s%u_min", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_limit, + i, type, min1); + } + + snprintf(name, sizeof(name), "%s%u_max", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_limit, + i, type, max1); + + if (min2 != max2) { + /* Assume max2 is critical value. + * But we have no good way to expose min2. + */ + snprintf(name, sizeof(name), "%s%u_crit", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_limit, + i, type, max2); + } + } + + snprintf(name, sizeof(name), "%s%u_alarm", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_alarm, i, type, 0); + + if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && + efx_mcdi_sensor_type[type].label) { + snprintf(name, sizeof(name), "%s%u_label", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_label, i, type, 0); + } + } + +hwmon_register: + hwmon->groups[0] = &hwmon->group; + hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev, + KBUILD_MODNAME, NULL, + hwmon->groups); + if (IS_ERR(hwmon->device)) { + rc = PTR_ERR(hwmon->device); + goto fail; + } + + return 0; + +fail: + efx_siena_mcdi_mon_remove(efx); + return rc; +} + +void efx_siena_mcdi_mon_remove(struct efx_nic *efx) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + + if (hwmon->device) + hwmon_device_unregister(hwmon->device); + kfree(hwmon->attrs); + kfree(hwmon->group.attrs); + efx_siena_free_buffer(efx, &hwmon->dma_buf); +} + +#endif /* CONFIG_SFC_SIENA_MCDI_MON */ diff --git a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h new file mode 100644 index 0000000000..a3cc8b7ec7 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h @@ -0,0 +1,17204 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2009-2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + */ + + +#ifndef MCDI_PCOL_H +#define MCDI_PCOL_H + +/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */ +/* Power-on reset state */ +#define MC_FW_STATE_POR (1) +/* If this is set in MC_RESET_STATE_REG then it should be + * possible to jump into IMEM without loading code from flash. */ +#define MC_FW_WARM_BOOT_OK (2) +/* The MC main image has started to boot. */ +#define MC_FW_STATE_BOOTING (4) +/* The Scheduler has started. */ +#define MC_FW_STATE_SCHED (8) +/* If this is set in MC_RESET_STATE_REG then it should be + * possible to jump into IMEM without loading code from flash. + * Unlike a warm boot, assume DMEM has been reloaded, so that + * the MC persistent data must be reinitialised. */ +#define MC_FW_TEPID_BOOT_OK (16) +/* We have entered the main firmware via recovery mode. This + * means that MC persistent data must be reinitialised, but that + * we shouldn't touch PCIe config. */ +#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32) +/* BIST state has been initialized */ +#define MC_FW_BIST_INIT_OK (128) + +/* Siena MC shared memmory offsets */ +/* The 'doorbell' addresses are hard-wired to alert the MC when written */ +#define MC_SMEM_P0_DOORBELL_OFST 0x000 +#define MC_SMEM_P1_DOORBELL_OFST 0x004 +/* The rest of these are firmware-defined */ +#define MC_SMEM_P0_PDU_OFST 0x008 +#define MC_SMEM_P1_PDU_OFST 0x108 +#define MC_SMEM_PDU_LEN 0x100 +#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0 +#define MC_SMEM_P0_STATUS_OFST 0x7f8 +#define MC_SMEM_P1_STATUS_OFST 0x7fc + +/* Values to be written to the per-port status dword in shared + * memory on reboot and assert */ +#define MC_STATUS_DWORD_REBOOT (0xb007b007) +#define MC_STATUS_DWORD_ASSERT (0xdeaddead) + +/* Check whether an mcfw version (in host order) belongs to a bootloader */ +#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007) + +/* The current version of the MCDI protocol. + * + * Note that the ROM burnt into the card only talks V0, so at the very + * least every driver must support version 0 and MCDI_PCOL_VERSION + */ +#define MCDI_PCOL_VERSION 2 + +/* Unused commands: 0x23, 0x27, 0x30, 0x31 */ + +/* MCDI version 1 + * + * Each MCDI request starts with an MCDI_HEADER, which is a 32bit + * structure, filled in by the client. + * + * 0 7 8 16 20 22 23 24 31 + * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS | + * | | | + * | | \--- Response + * | \------- Error + * \------------------------------ Resync (always set) + * + * The client writes it's request into MC shared memory, and rings the + * doorbell. Each request is completed by either by the MC writing + * back into shared memory, or by writing out an event. + * + * All MCDI commands support completion by shared memory response. Each + * request may also contain additional data (accounted for by HEADER.LEN), + * and some response's may also contain additional data (again, accounted + * for by HEADER.LEN). + * + * Some MCDI commands support completion by event, in which any associated + * response data is included in the event. + * + * The protocol requires one response to be delivered for every request, a + * request should not be sent unless the response for the previous request + * has been received (either by polling shared memory, or by receiving + * an event). + */ + +/** Request/Response structure */ +#define MCDI_HEADER_OFST 0 +#define MCDI_HEADER_CODE_LBN 0 +#define MCDI_HEADER_CODE_WIDTH 7 +#define MCDI_HEADER_RESYNC_LBN 7 +#define MCDI_HEADER_RESYNC_WIDTH 1 +#define MCDI_HEADER_DATALEN_LBN 8 +#define MCDI_HEADER_DATALEN_WIDTH 8 +#define MCDI_HEADER_SEQ_LBN 16 +#define MCDI_HEADER_SEQ_WIDTH 4 +#define MCDI_HEADER_RSVD_LBN 20 +#define MCDI_HEADER_RSVD_WIDTH 1 +#define MCDI_HEADER_NOT_EPOCH_LBN 21 +#define MCDI_HEADER_NOT_EPOCH_WIDTH 1 +#define MCDI_HEADER_ERROR_LBN 22 +#define MCDI_HEADER_ERROR_WIDTH 1 +#define MCDI_HEADER_RESPONSE_LBN 23 +#define MCDI_HEADER_RESPONSE_WIDTH 1 +#define MCDI_HEADER_XFLAGS_LBN 24 +#define MCDI_HEADER_XFLAGS_WIDTH 8 +/* Request response using event */ +#define MCDI_HEADER_XFLAGS_EVREQ 0x01 +/* Request (and signal) early doorbell return */ +#define MCDI_HEADER_XFLAGS_DBRET 0x02 + +/* Maximum number of payload bytes */ +#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc +#define MCDI_CTL_SDU_LEN_MAX_V2 0x400 + +#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2 + + +/* The MC can generate events for two reasons: + * - To advance a shared memory request if XFLAGS_EVREQ was set + * - As a notification (link state, i2c event), controlled + * via MC_CMD_LOG_CTRL + * + * Both events share a common structure: + * + * 0 32 33 36 44 52 60 + * | Data | Cont | Level | Src | Code | Rsvd | + * | + * \ There is another event pending in this notification + * + * If Code==CMDDONE, then the fields are further interpreted as: + * + * - LEVEL==INFO Command succeeded + * - LEVEL==ERR Command failed + * + * 0 8 16 24 32 + * | Seq | Datalen | Errno | Rsvd | + * + * These fields are taken directly out of the standard MCDI header, i.e., + * LEVEL==ERR, Datalen == 0 => Reboot + * + * Events can be squirted out of the UART (using LOG_CTRL) without a + * MCDI header. An event can be distinguished from a MCDI response by + * examining the first byte which is 0xc0. This corresponds to the + * non-existent MCDI command MC_CMD_DEBUG_LOG. + * + * 0 7 8 + * | command | Resync | = 0xc0 + * + * Since the event is written in big-endian byte order, this works + * providing bits 56-63 of the event are 0xc0. + * + * 56 60 63 + * | Rsvd | Code | = 0xc0 + * + * Which means for convenience the event code is 0xc for all MC + * generated events. + */ +#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc + + +/* Operation not permitted. */ +#define MC_CMD_ERR_EPERM 1 +/* Non-existent command target */ +#define MC_CMD_ERR_ENOENT 2 +/* assert() has killed the MC */ +#define MC_CMD_ERR_EINTR 4 +/* I/O failure */ +#define MC_CMD_ERR_EIO 5 +/* Already exists */ +#define MC_CMD_ERR_EEXIST 6 +/* Try again */ +#define MC_CMD_ERR_EAGAIN 11 +/* Out of memory */ +#define MC_CMD_ERR_ENOMEM 12 +/* Caller does not hold required locks */ +#define MC_CMD_ERR_EACCES 13 +/* Resource is currently unavailable (e.g. lock contention) */ +#define MC_CMD_ERR_EBUSY 16 +/* No such device */ +#define MC_CMD_ERR_ENODEV 19 +/* Invalid argument to target */ +#define MC_CMD_ERR_EINVAL 22 +/* Broken pipe */ +#define MC_CMD_ERR_EPIPE 32 +/* Read-only */ +#define MC_CMD_ERR_EROFS 30 +/* Out of range */ +#define MC_CMD_ERR_ERANGE 34 +/* Non-recursive resource is already acquired */ +#define MC_CMD_ERR_EDEADLK 35 +/* Operation not implemented */ +#define MC_CMD_ERR_ENOSYS 38 +/* Operation timed out */ +#define MC_CMD_ERR_ETIME 62 +/* Link has been severed */ +#define MC_CMD_ERR_ENOLINK 67 +/* Protocol error */ +#define MC_CMD_ERR_EPROTO 71 +/* Operation not supported */ +#define MC_CMD_ERR_ENOTSUP 95 +/* Address not available */ +#define MC_CMD_ERR_EADDRNOTAVAIL 99 +/* Not connected */ +#define MC_CMD_ERR_ENOTCONN 107 +/* Operation already in progress */ +#define MC_CMD_ERR_EALREADY 114 + +/* Resource allocation failed. */ +#define MC_CMD_ERR_ALLOC_FAIL 0x1000 +/* V-adaptor not found. */ +#define MC_CMD_ERR_NO_VADAPTOR 0x1001 +/* EVB port not found. */ +#define MC_CMD_ERR_NO_EVB_PORT 0x1002 +/* V-switch not found. */ +#define MC_CMD_ERR_NO_VSWITCH 0x1003 +/* Too many VLAN tags. */ +#define MC_CMD_ERR_VLAN_LIMIT 0x1004 +/* Bad PCI function number. */ +#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005 +/* Invalid VLAN mode. */ +#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006 +/* Invalid v-switch type. */ +#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007 +/* Invalid v-port type. */ +#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008 +/* MAC address exists. */ +#define MC_CMD_ERR_MAC_EXIST 0x1009 +/* Slave core not present */ +#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a +/* The datapath is disabled. */ +#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b +/* The requesting client is not a function */ +#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c +/* The requested operation might require the + command to be passed between MCs, and the + transport doesn't support that. Should + only ever been seen over the UART. */ +#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d +/* VLAN tag(s) exists */ +#define MC_CMD_ERR_VLAN_EXIST 0x100e +/* No MAC address assigned to an EVB port */ +#define MC_CMD_ERR_NO_MAC_ADDR 0x100f +/* Notifies the driver that the request has been relayed + * to an admin function for authorization. The driver should + * wait for a PROXY_RESPONSE event and then resend its request. + * This error code is followed by a 32-bit handle that + * helps matching it with the respective PROXY_RESPONSE event. */ +#define MC_CMD_ERR_PROXY_PENDING 0x1010 +#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4 +/* The request cannot be passed for authorization because + * another request from the same function is currently being + * authorized. The drvier should try again later. */ +#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011 +/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function + * that has enabled proxying or BLOCK_INDEX points to a function that + * doesn't await an authorization. */ +#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012 +/* This code is currently only used internally in FW. Its meaning is that + * an operation failed due to lack of SR-IOV privilege. + * Normally it is translated to EPERM by send_cmd_err(), + * but it may also be used to trigger some special mechanism + * for handling such case, e.g. to relay the failed request + * to a designated admin function for authorization. */ +#define MC_CMD_ERR_NO_PRIVILEGE 0x1013 +/* Workaround 26807 could not be turned on/off because some functions + * have already installed filters. See the comment at + * MC_CMD_WORKAROUND_BUG26807. + * May also returned for other operations such as sub-variant switching. */ +#define MC_CMD_ERR_FILTERS_PRESENT 0x1014 +/* The clock whose frequency you've attempted to set + * doesn't exist on this NIC */ +#define MC_CMD_ERR_NO_CLOCK 0x1015 +/* Returned by MC_CMD_TESTASSERT if the action that should + * have caused an assertion failed to do so. */ +#define MC_CMD_ERR_UNREACHABLE 0x1016 +/* This command needs to be processed in the background but there were no + * resources to do so. Send it again after a command has completed. */ +#define MC_CMD_ERR_QUEUE_FULL 0x1017 +/* The operation could not be completed because the PCIe link has gone + * away. This error code is never expected to be returned over the TLP + * transport. */ +#define MC_CMD_ERR_NO_PCIE 0x1018 +/* The operation could not be completed because the datapath has gone + * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the + * datapath absence may be temporary*/ +#define MC_CMD_ERR_NO_DATAPATH 0x1019 +/* The operation could not complete because some VIs are allocated */ +#define MC_CMD_ERR_VIS_PRESENT 0x101a +/* The operation could not complete because some PIO buffers are allocated */ +#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b + +#define MC_CMD_ERR_CODE_OFST 0 + +/* We define 8 "escape" commands to allow + for command number space extension */ + +#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78 +#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79 +#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A +#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B +#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C +#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D +#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E +#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F + +/* Vectors in the boot ROM */ +/* Point to the copycode entry point. */ +#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4) +#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4) +#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4) +/* Points to the recovery mode entry point. Misnamed but kept for compatibility. */ +#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4) +#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4) +#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4) +/* Points to the recovery mode entry point. Same as above, but the right name. */ +#define SIENA_MC_BOOTROM_RECOVERY_VEC (0x800 - 2 * 0x4) +#define HUNT_MC_BOOTROM_RECOVERY_VEC (0x8000 - 2 * 0x4) +#define MEDFORD_MC_BOOTROM_RECOVERY_VEC (0x10000 - 2 * 0x4) + +/* Points to noflash mode entry point. */ +#define MEDFORD_MC_BOOTROM_REAL_NOFLASH_VEC (0x10000 - 4 * 0x4) + +/* The command set exported by the boot ROM (MCDI v0) */ +#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ + (1 << MC_CMD_READ32) | \ + (1 << MC_CMD_WRITE32) | \ + (1 << MC_CMD_COPYCODE) | \ + (1 << MC_CMD_GET_VERSION), \ + 0, 0, 0 } + +#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \ + (MC_CMD_SENSOR_ENTRY_OFST + (_x)) + +#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \ + (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \ + MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \ + (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) + +#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \ + (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \ + MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \ + (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) + +#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \ + (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \ + MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \ + (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) + +/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default + * stack ID (which must be in the range 1-255) along with an EVB port ID. + */ +#define EVB_STACK_ID(n) (((n) & 0xff) << 16) + + +/* Version 2 adds an optional argument to error returns: the errno value + * may be followed by the (0-based) number of the first argument that + * could not be processed. + */ +#define MC_CMD_ERR_ARG_OFST 4 + +/* No space */ +#define MC_CMD_ERR_ENOSPC 28 + +/* MCDI_EVENT structuredef */ +#define MCDI_EVENT_LEN 8 +#define MCDI_EVENT_CONT_LBN 32 +#define MCDI_EVENT_CONT_WIDTH 1 +#define MCDI_EVENT_LEVEL_LBN 33 +#define MCDI_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define MCDI_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define MCDI_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define MCDI_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define MCDI_EVENT_LEVEL_FATAL 0x3 +#define MCDI_EVENT_DATA_OFST 0 +#define MCDI_EVENT_DATA_LEN 4 +#define MCDI_EVENT_CMDDONE_SEQ_OFST 0 +#define MCDI_EVENT_CMDDONE_SEQ_LBN 0 +#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8 +#define MCDI_EVENT_CMDDONE_DATALEN_OFST 0 +#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8 +#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8 +#define MCDI_EVENT_CMDDONE_ERRNO_OFST 0 +#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16 +#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_OFST 0 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16 +#define MCDI_EVENT_LINKCHANGE_SPEED_OFST 0 +#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 +#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 +/* enum: Link is down or link speed could not be determined */ +#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0 +/* enum: 100Mbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 +/* enum: 1Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 +/* enum: 10Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 +/* enum: 40Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4 +/* enum: 25Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5 +/* enum: 50Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6 +/* enum: 100Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7 +#define MCDI_EVENT_LINKCHANGE_FCNTL_OFST 0 +#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 +#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_OFST 0 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_MONITOR_OFST 0 +#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0 +#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_STATE_OFST 0 +#define MCDI_EVENT_SENSOREVT_STATE_LBN 8 +#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_VALUE_OFST 0 +#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16 +#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16 +#define MCDI_EVENT_FWALERT_DATA_OFST 0 +#define MCDI_EVENT_FWALERT_DATA_LBN 8 +#define MCDI_EVENT_FWALERT_DATA_WIDTH 24 +#define MCDI_EVENT_FWALERT_REASON_OFST 0 +#define MCDI_EVENT_FWALERT_REASON_LBN 0 +#define MCDI_EVENT_FWALERT_REASON_WIDTH 8 +/* enum: SRAM Access. */ +#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 +#define MCDI_EVENT_FLR_VF_OFST 0 +#define MCDI_EVENT_FLR_VF_LBN 0 +#define MCDI_EVENT_FLR_VF_WIDTH 8 +#define MCDI_EVENT_TX_ERR_TXQ_OFST 0 +#define MCDI_EVENT_TX_ERR_TXQ_LBN 0 +#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12 +#define MCDI_EVENT_TX_ERR_TYPE_OFST 0 +#define MCDI_EVENT_TX_ERR_TYPE_LBN 12 +#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4 +/* enum: Descriptor loader reported failure */ +#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 +/* enum: Descriptor ring empty and no EOP seen for packet */ +#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 +/* enum: Overlength packet */ +#define MCDI_EVENT_TX_ERR_2BIG 0x3 +/* enum: Malformed option descriptor */ +#define MCDI_EVENT_TX_BAD_OPTDESC 0x5 +/* enum: Option descriptor part way through a packet */ +#define MCDI_EVENT_TX_OPT_IN_PKT 0x8 +/* enum: DMA or PIO data access error */ +#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9 +#define MCDI_EVENT_TX_ERR_INFO_OFST 0 +#define MCDI_EVENT_TX_ERR_INFO_LBN 16 +#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_OFST 0 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1 +#define MCDI_EVENT_TX_FLUSH_TXQ_OFST 0 +#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0 +#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12 +#define MCDI_EVENT_PTP_ERR_TYPE_OFST 0 +#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0 +#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8 +/* enum: PLL lost lock */ +#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 +/* enum: Filter overflow (PDMA) */ +#define MCDI_EVENT_PTP_ERR_FILTER 0x2 +/* enum: FIFO overflow (FPGA) */ +#define MCDI_EVENT_PTP_ERR_FIFO 0x3 +/* enum: Merge queue overflow */ +#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 +#define MCDI_EVENT_AOE_ERR_TYPE_OFST 0 +#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0 +#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8 +/* enum: AOE failed to load - no valid image? */ +#define MCDI_EVENT_AOE_NO_LOAD 0x1 +/* enum: AOE FC reported an exception */ +#define MCDI_EVENT_AOE_FC_ASSERT 0x2 +/* enum: AOE FC watchdogged */ +#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3 +/* enum: AOE FC failed to start */ +#define MCDI_EVENT_AOE_FC_NO_START 0x4 +/* enum: Generic AOE fault - likely to have been reported via other means too + * but intended for use by aoex driver. + */ +#define MCDI_EVENT_AOE_FAULT 0x5 +/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */ +#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6 +/* enum: AOE loaded successfully */ +#define MCDI_EVENT_AOE_LOAD 0x7 +/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */ +#define MCDI_EVENT_AOE_DMA 0x8 +/* enum: AOE byteblaster connected/disconnected (Connection status in + * AOE_ERR_DATA) + */ +#define MCDI_EVENT_AOE_BYTEBLASTER 0x9 +/* enum: DDR ECC status update */ +#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa +/* enum: PTP status update */ +#define MCDI_EVENT_AOE_PTP_STATUS 0xb +/* enum: FPGA header incorrect */ +#define MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc +/* enum: FPGA Powered Off due to error in powering up FPGA */ +#define MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd +/* enum: AOE FPGA load failed due to MC to MUM communication failure */ +#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe +/* enum: Notify that invalid flash type detected */ +#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf +/* enum: Notify that the attempt to run FPGA Controller firmware timedout */ +#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10 +/* enum: Failure to probe one or more FPGA boot flash chips */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11 +/* enum: FPGA boot-flash contains an invalid image header */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12 +/* enum: Failed to program clocks required by the FPGA */ +#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13 +/* enum: Notify that FPGA Controller is alive to serve MCDI requests */ +#define MCDI_EVENT_AOE_FC_RUNNING 0x14 +#define MCDI_EVENT_AOE_ERR_DATA_OFST 0 +#define MCDI_EVENT_AOE_ERR_DATA_LBN 8 +#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_OFST 0 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8 +/* enum: FC Assert happened, but the register information is not available */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0 +/* enum: The register information for FC Assert is ready for readinng by driver + */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_OFST 0 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8 +/* enum: Reading from NV failed */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0 +/* enum: Invalid Magic Number if FPGA header */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1 +/* enum: Invalid Silicon type detected in header */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2 +/* enum: Unsupported VRatio */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3 +/* enum: Unsupported DDR Type */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4 +/* enum: DDR Voltage out of supported range */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5 +/* enum: Unsupported DDR speed */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6 +/* enum: Unsupported DDR size */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7 +/* enum: Unsupported DDR rank */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_OFST 0 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8 +/* enum: Primary boot flash */ +#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0 +/* enum: Secondary boot flash */ +#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_OFST 0 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_OFST 0 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8 +#define MCDI_EVENT_RX_ERR_RXQ_OFST 0 +#define MCDI_EVENT_RX_ERR_RXQ_LBN 0 +#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12 +#define MCDI_EVENT_RX_ERR_TYPE_OFST 0 +#define MCDI_EVENT_RX_ERR_TYPE_LBN 12 +#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4 +#define MCDI_EVENT_RX_ERR_INFO_OFST 0 +#define MCDI_EVENT_RX_ERR_INFO_LBN 16 +#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_OFST 0 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1 +#define MCDI_EVENT_RX_FLUSH_RXQ_OFST 0 +#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0 +#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12 +#define MCDI_EVENT_MC_REBOOT_COUNT_OFST 0 +#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0 +#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16 +#define MCDI_EVENT_MUM_ERR_TYPE_OFST 0 +#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0 +#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8 +/* enum: MUM failed to load - no valid image? */ +#define MCDI_EVENT_MUM_NO_LOAD 0x1 +/* enum: MUM f/w reported an exception */ +#define MCDI_EVENT_MUM_ASSERT 0x2 +/* enum: MUM not kicking watchdog */ +#define MCDI_EVENT_MUM_WATCHDOG 0x3 +#define MCDI_EVENT_MUM_ERR_DATA_OFST 0 +#define MCDI_EVENT_MUM_ERR_DATA_LBN 8 +#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_DBRET_SEQ_OFST 0 +#define MCDI_EVENT_DBRET_SEQ_LBN 0 +#define MCDI_EVENT_DBRET_SEQ_WIDTH 8 +#define MCDI_EVENT_SUC_ERR_TYPE_OFST 0 +#define MCDI_EVENT_SUC_ERR_TYPE_LBN 0 +#define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8 +/* enum: Corrupted or bad SUC application. */ +#define MCDI_EVENT_SUC_BAD_APP 0x1 +/* enum: SUC application reported an assert. */ +#define MCDI_EVENT_SUC_ASSERT 0x2 +/* enum: SUC application reported an exception. */ +#define MCDI_EVENT_SUC_EXCEPTION 0x3 +/* enum: SUC watchdog timer expired. */ +#define MCDI_EVENT_SUC_WATCHDOG 0x4 +#define MCDI_EVENT_SUC_ERR_ADDRESS_OFST 0 +#define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8 +#define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24 +#define MCDI_EVENT_SUC_ERR_DATA_OFST 0 +#define MCDI_EVENT_SUC_ERR_DATA_LBN 8 +#define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24 +#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_LBN 0 +#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_WIDTH 24 +#define MCDI_EVENT_LINKCHANGE_V2_SPEED_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_SPEED_LBN 24 +#define MCDI_EVENT_LINKCHANGE_V2_SPEED_WIDTH 4 +/* Enum values, see field(s): */ +/* MCDI_EVENT/LINKCHANGE_SPEED */ +#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_LBN 28 +#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_WIDTH 1 +#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_LBN 29 +#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_WIDTH 3 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MCDI_EVENT_MODULECHANGE_LD_CAP_OFST 0 +#define MCDI_EVENT_MODULECHANGE_LD_CAP_LBN 0 +#define MCDI_EVENT_MODULECHANGE_LD_CAP_WIDTH 30 +#define MCDI_EVENT_MODULECHANGE_SEQ_OFST 0 +#define MCDI_EVENT_MODULECHANGE_SEQ_LBN 30 +#define MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2 +#define MCDI_EVENT_DATA_LBN 0 +#define MCDI_EVENT_DATA_WIDTH 32 +/* Alias for PTP_DATA. */ +#define MCDI_EVENT_SRC_LBN 36 +#define MCDI_EVENT_SRC_WIDTH 8 +/* Data associated with PTP events which doesn't fit into the main DATA field + */ +#define MCDI_EVENT_PTP_DATA_LBN 36 +#define MCDI_EVENT_PTP_DATA_WIDTH 8 +/* EF100 specific. Defined by QDMA. The phase bit, changes each time round the + * event ring + */ +#define MCDI_EVENT_EV_EVQ_PHASE_LBN 59 +#define MCDI_EVENT_EV_EVQ_PHASE_WIDTH 1 +#define MCDI_EVENT_EV_CODE_LBN 60 +#define MCDI_EVENT_EV_CODE_WIDTH 4 +#define MCDI_EVENT_CODE_LBN 44 +#define MCDI_EVENT_CODE_WIDTH 8 +/* enum: Event generated by host software */ +#define MCDI_EVENT_SW_EVENT 0x0 +/* enum: Bad assert. */ +#define MCDI_EVENT_CODE_BADSSERT 0x1 +/* enum: PM Notice. */ +#define MCDI_EVENT_CODE_PMNOTICE 0x2 +/* enum: Command done. */ +#define MCDI_EVENT_CODE_CMDDONE 0x3 +/* enum: Link change. */ +#define MCDI_EVENT_CODE_LINKCHANGE 0x4 +/* enum: Sensor Event. */ +#define MCDI_EVENT_CODE_SENSOREVT 0x5 +/* enum: Schedule error. */ +#define MCDI_EVENT_CODE_SCHEDERR 0x6 +/* enum: Reboot. */ +#define MCDI_EVENT_CODE_REBOOT 0x7 +/* enum: Mac stats DMA. */ +#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 +/* enum: Firmware alert. */ +#define MCDI_EVENT_CODE_FWALERT 0x9 +/* enum: Function level reset. */ +#define MCDI_EVENT_CODE_FLR 0xa +/* enum: Transmit error */ +#define MCDI_EVENT_CODE_TX_ERR 0xb +/* enum: Tx flush has completed */ +#define MCDI_EVENT_CODE_TX_FLUSH 0xc +/* enum: PTP packet received timestamp */ +#define MCDI_EVENT_CODE_PTP_RX 0xd +/* enum: PTP NIC failure */ +#define MCDI_EVENT_CODE_PTP_FAULT 0xe +/* enum: PTP PPS event */ +#define MCDI_EVENT_CODE_PTP_PPS 0xf +/* enum: Rx flush has completed */ +#define MCDI_EVENT_CODE_RX_FLUSH 0x10 +/* enum: Receive error */ +#define MCDI_EVENT_CODE_RX_ERR 0x11 +/* enum: AOE fault */ +#define MCDI_EVENT_CODE_AOE 0x12 +/* enum: Network port calibration failed (VCAL). */ +#define MCDI_EVENT_CODE_VCAL_FAIL 0x13 +/* enum: HW PPS event */ +#define MCDI_EVENT_CODE_HW_PPS 0x14 +/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and + * a different format) + */ +#define MCDI_EVENT_CODE_MC_REBOOT 0x15 +/* enum: the MC has detected a parity error */ +#define MCDI_EVENT_CODE_PAR_ERR 0x16 +/* enum: the MC has detected a correctable error */ +#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17 +/* enum: the MC has detected an uncorrectable error */ +#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18 +/* enum: The MC has entered offline BIST mode */ +#define MCDI_EVENT_CODE_MC_BIST 0x19 +/* enum: PTP tick event providing current NIC time */ +#define MCDI_EVENT_CODE_PTP_TIME 0x1a +/* enum: MUM fault */ +#define MCDI_EVENT_CODE_MUM 0x1b +/* enum: notify the designated PF of a new authorization request */ +#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c +/* enum: notify a function that awaits an authorization that its request has + * been processed and it may now resend the command + */ +#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d +/* enum: MCDI command accepted. New commands can be issued but this command is + * not done yet. + */ +#define MCDI_EVENT_CODE_DBRET 0x1e +/* enum: The MC has detected a fault on the SUC */ +#define MCDI_EVENT_CODE_SUC 0x1f +/* enum: Link change. This event is sent instead of LINKCHANGE if + * WANT_V2_LINKCHANGES was set on driver attach. + */ +#define MCDI_EVENT_CODE_LINKCHANGE_V2 0x20 +/* enum: This event is sent if WANT_V2_LINKCHANGES was set on driver attach + * when the local device capabilities changes. This will usually correspond to + * a module change. + */ +#define MCDI_EVENT_CODE_MODULECHANGE 0x21 +/* enum: Notification that the sensors have been added and/or removed from the + * sensor table. This event includes the new sensor table generation count, if + * this does not match the driver's local copy it is expected to call + * DYNAMIC_SENSORS_LIST to refresh it. + */ +#define MCDI_EVENT_CODE_DYNAMIC_SENSORS_CHANGE 0x22 +/* enum: Notification that a sensor has changed state as a result of a reading + * crossing a threshold. This is sent as two events, the first event contains + * the handle and the sensor's state (in the SRC field), and the second + * contains the value. + */ +#define MCDI_EVENT_CODE_DYNAMIC_SENSORS_STATE_CHANGE 0x23 +/* enum: Notification that a descriptor proxy function configuration has been + * pushed to "live" status (visible to host). SRC field contains the handle of + * the affected descriptor proxy function. DATA field contains the generation + * count of configuration set applied. See MC_CMD_DESC_PROXY_FUNC_CONFIG_SET / + * MC_CMD_DESC_PROXY_FUNC_CONFIG_COMMIT and SF-122927-TC for details. + */ +#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_CONFIG_COMMITTED 0x24 +/* enum: Notification that a descriptor proxy function has been reset. SRC + * field contains the handle of the affected descriptor proxy function. See + * SF-122927-TC for details. + */ +#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_RESET 0x25 +/* enum: Notification that a driver attached to a descriptor proxy function. + * SRC field contains the handle of the affected descriptor proxy function. For + * Virtio proxy functions this message consists of two MCDI events, where the + * first event's (CONT=1) DATA field carries negotiated virtio feature bits 0 + * to 31 and the second (CONT=0) carries bits 32 to 63. For EF100 proxy + * functions event length and meaning of DATA field is not yet defined. See + * SF-122927-TC for details. + */ +#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_DRIVER_ATTACH 0x26 +/* enum: Artificial event generated by host and posted via MC for test + * purposes. + */ +#define MCDI_EVENT_CODE_TESTGEN 0xfa +#define MCDI_EVENT_CMDDONE_DATA_OFST 0 +#define MCDI_EVENT_CMDDONE_DATA_LEN 4 +#define MCDI_EVENT_CMDDONE_DATA_LBN 0 +#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32 +#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0 +#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4 +#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0 +#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32 +#define MCDI_EVENT_SENSOREVT_DATA_OFST 0 +#define MCDI_EVENT_SENSOREVT_DATA_LEN 4 +#define MCDI_EVENT_SENSOREVT_DATA_LBN 0 +#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32 +#define MCDI_EVENT_TX_ERR_DATA_OFST 0 +#define MCDI_EVENT_TX_ERR_DATA_LEN 4 +#define MCDI_EVENT_TX_ERR_DATA_LBN 0 +#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of + * timestamp + */ +#define MCDI_EVENT_PTP_SECONDS_OFST 0 +#define MCDI_EVENT_PTP_SECONDS_LEN 4 +#define MCDI_EVENT_PTP_SECONDS_LBN 0 +#define MCDI_EVENT_PTP_SECONDS_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of + * timestamp + */ +#define MCDI_EVENT_PTP_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_MAJOR_LEN 4 +#define MCDI_EVENT_PTP_MAJOR_LBN 0 +#define MCDI_EVENT_PTP_MAJOR_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field + * of timestamp + */ +#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0 +#define MCDI_EVENT_PTP_NANOSECONDS_LEN 4 +#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0 +#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of + * timestamp + */ +#define MCDI_EVENT_PTP_MINOR_OFST 0 +#define MCDI_EVENT_PTP_MINOR_LEN 4 +#define MCDI_EVENT_PTP_MINOR_LBN 0 +#define MCDI_EVENT_PTP_MINOR_WIDTH 32 +/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet + */ +#define MCDI_EVENT_PTP_UUID_OFST 0 +#define MCDI_EVENT_PTP_UUID_LEN 4 +#define MCDI_EVENT_PTP_UUID_LBN 0 +#define MCDI_EVENT_PTP_UUID_WIDTH 32 +#define MCDI_EVENT_RX_ERR_DATA_OFST 0 +#define MCDI_EVENT_RX_ERR_DATA_LEN 4 +#define MCDI_EVENT_RX_ERR_DATA_LBN 0 +#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_PAR_ERR_DATA_OFST 0 +#define MCDI_EVENT_PAR_ERR_DATA_LEN 4 +#define MCDI_EVENT_PAR_ERR_DATA_LBN 0 +#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32 +/* For CODE_PTP_TIME events, the major value of the PTP clock */ +#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_LEN 4 +#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32 +/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36 +#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8 +/* For CODE_PTP_TIME events, most significant bits of the minor value of the + * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19. + */ +#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36 +#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC clock has ever been set + */ +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36 +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC and System clocks are in sync + */ +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37 +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of + * the minor value of the PTP clock + */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6 +/* For CODE_PTP_TIME events, most significant bits of the minor value of the + * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21. + */ +#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32 +/* Zero means that the request has been completed or authorized, and the driver + * should resend it. A non-zero value means that the authorization has been + * denied, and gives the reason. Typically it will be EPERM. + */ +#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36 +#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8 +#define MCDI_EVENT_DBRET_DATA_OFST 0 +#define MCDI_EVENT_DBRET_DATA_LEN 4 +#define MCDI_EVENT_DBRET_DATA_LBN 0 +#define MCDI_EVENT_DBRET_DATA_WIDTH 32 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_LEN 4 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_LBN 0 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_WIDTH 32 +#define MCDI_EVENT_MODULECHANGE_DATA_OFST 0 +#define MCDI_EVENT_MODULECHANGE_DATA_LEN 4 +#define MCDI_EVENT_MODULECHANGE_DATA_LBN 0 +#define MCDI_EVENT_MODULECHANGE_DATA_WIDTH 32 +/* The new generation count after a sensor has been added or deleted. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_OFST 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LEN 4 +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LBN 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_WIDTH 32 +/* The handle of a dynamic sensor. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_OFST 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LEN 4 +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LBN 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_WIDTH 32 +/* The current values of a sensor. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_OFST 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LEN 4 +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LBN 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_WIDTH 32 +/* The current state of a sensor. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_STATE_LBN 36 +#define MCDI_EVENT_DYNAMIC_SENSORS_STATE_WIDTH 8 +#define MCDI_EVENT_DESC_PROXY_DATA_OFST 0 +#define MCDI_EVENT_DESC_PROXY_DATA_LEN 4 +#define MCDI_EVENT_DESC_PROXY_DATA_LBN 0 +#define MCDI_EVENT_DESC_PROXY_DATA_WIDTH 32 +/* Generation count of applied configuration set */ +#define MCDI_EVENT_DESC_PROXY_GENERATION_OFST 0 +#define MCDI_EVENT_DESC_PROXY_GENERATION_LEN 4 +#define MCDI_EVENT_DESC_PROXY_GENERATION_LBN 0 +#define MCDI_EVENT_DESC_PROXY_GENERATION_WIDTH 32 +/* Virtio features negotiated with the host driver. First event (CONT=1) + * carries bits 0 to 31. Second event (CONT=0) carries bits 32 to 63. + */ +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_OFST 0 +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LEN 4 +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LBN 0 +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_WIDTH 32 + +/* FCDI_EVENT structuredef */ +#define FCDI_EVENT_LEN 8 +#define FCDI_EVENT_CONT_LBN 32 +#define FCDI_EVENT_CONT_WIDTH 1 +#define FCDI_EVENT_LEVEL_LBN 33 +#define FCDI_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define FCDI_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define FCDI_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define FCDI_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define FCDI_EVENT_LEVEL_FATAL 0x3 +#define FCDI_EVENT_DATA_OFST 0 +#define FCDI_EVENT_DATA_LEN 4 +#define FCDI_EVENT_LINK_STATE_STATUS_OFST 0 +#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0 +#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1 +#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */ +#define FCDI_EVENT_LINK_UP 0x1 /* enum */ +#define FCDI_EVENT_DATA_LBN 0 +#define FCDI_EVENT_DATA_WIDTH 32 +#define FCDI_EVENT_SRC_LBN 36 +#define FCDI_EVENT_SRC_WIDTH 8 +#define FCDI_EVENT_EV_CODE_LBN 60 +#define FCDI_EVENT_EV_CODE_WIDTH 4 +#define FCDI_EVENT_CODE_LBN 44 +#define FCDI_EVENT_CODE_WIDTH 8 +/* enum: The FC was rebooted. */ +#define FCDI_EVENT_CODE_REBOOT 0x1 +/* enum: Bad assert. */ +#define FCDI_EVENT_CODE_ASSERT 0x2 +/* enum: DDR3 test result. */ +#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3 +/* enum: Link status. */ +#define FCDI_EVENT_CODE_LINK_STATE 0x4 +/* enum: A timed read is ready to be serviced. */ +#define FCDI_EVENT_CODE_TIMED_READ 0x5 +/* enum: One or more PPS IN events */ +#define FCDI_EVENT_CODE_PPS_IN 0x6 +/* enum: Tick event from PTP clock */ +#define FCDI_EVENT_CODE_PTP_TICK 0x7 +/* enum: ECC error counters */ +#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8 +/* enum: Current status of PTP */ +#define FCDI_EVENT_CODE_PTP_STATUS 0x9 +/* enum: Port id config to map MC-FC port idx */ +#define FCDI_EVENT_CODE_PORT_CONFIG 0xa +/* enum: Boot result or error code */ +#define FCDI_EVENT_CODE_BOOT_RESULT 0xb +#define FCDI_EVENT_REBOOT_SRC_LBN 36 +#define FCDI_EVENT_REBOOT_SRC_WIDTH 8 +#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */ +#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */ +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 +#define FCDI_EVENT_ASSERT_TYPE_LBN 36 +#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8 +#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36 +#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32 +#define FCDI_EVENT_LINK_STATE_DATA_OFST 0 +#define FCDI_EVENT_LINK_STATE_DATA_LEN 4 +#define FCDI_EVENT_LINK_STATE_DATA_LBN 0 +#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32 +#define FCDI_EVENT_PTP_STATE_OFST 0 +#define FCDI_EVENT_PTP_STATE_LEN 4 +#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */ +#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */ +#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */ +#define FCDI_EVENT_PTP_STATE_LBN 0 +#define FCDI_EVENT_PTP_STATE_WIDTH 32 +#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36 +#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32 +/* Index of MC port being referred to */ +#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36 +#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8 +/* FC Port index that matches the MC port index in SRC */ +#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4 +#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32 +#define FCDI_EVENT_BOOT_RESULT_OFST 0 +#define FCDI_EVENT_BOOT_RESULT_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */ +#define FCDI_EVENT_BOOT_RESULT_LBN 0 +#define FCDI_EVENT_BOOT_RESULT_WIDTH 32 + +/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events + * to the MC. Note that this structure | is overlayed over a normal FCDI event + * such that bits 32-63 containing | event code, level, source etc remain the + * same. In this case the data | field of the header is defined to be the + * number of timestamps + */ +#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16 +#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248 +#define FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016 +#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num)) +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8) +/* Number of timestamps following */ +#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32 +/* Seconds field of a timestamp record */ +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32 +/* Nanoseconds field of a timestamp record */ +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32 +/* Timestamp records comprising the event */ +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64 + +/* MUM_EVENT structuredef */ +#define MUM_EVENT_LEN 8 +#define MUM_EVENT_CONT_LBN 32 +#define MUM_EVENT_CONT_WIDTH 1 +#define MUM_EVENT_LEVEL_LBN 33 +#define MUM_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define MUM_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define MUM_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define MUM_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define MUM_EVENT_LEVEL_FATAL 0x3 +#define MUM_EVENT_DATA_OFST 0 +#define MUM_EVENT_DATA_LEN 4 +#define MUM_EVENT_SENSOR_ID_OFST 0 +#define MUM_EVENT_SENSOR_ID_LBN 0 +#define MUM_EVENT_SENSOR_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +#define MUM_EVENT_SENSOR_STATE_OFST 0 +#define MUM_EVENT_SENSOR_STATE_LBN 8 +#define MUM_EVENT_SENSOR_STATE_WIDTH 8 +#define MUM_EVENT_PORT_PHY_READY_OFST 0 +#define MUM_EVENT_PORT_PHY_READY_LBN 0 +#define MUM_EVENT_PORT_PHY_READY_WIDTH 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_OFST 0 +#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOL_OFST 0 +#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2 +#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOL_OFST 0 +#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3 +#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOS_OFST 0 +#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4 +#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOS_OFST 0 +#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5 +#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_FAULT_OFST 0 +#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6 +#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1 +#define MUM_EVENT_DATA_LBN 0 +#define MUM_EVENT_DATA_WIDTH 32 +#define MUM_EVENT_SRC_LBN 36 +#define MUM_EVENT_SRC_WIDTH 8 +#define MUM_EVENT_EV_CODE_LBN 60 +#define MUM_EVENT_EV_CODE_WIDTH 4 +#define MUM_EVENT_CODE_LBN 44 +#define MUM_EVENT_CODE_WIDTH 8 +/* enum: The MUM was rebooted. */ +#define MUM_EVENT_CODE_REBOOT 0x1 +/* enum: Bad assert. */ +#define MUM_EVENT_CODE_ASSERT 0x2 +/* enum: Sensor failure. */ +#define MUM_EVENT_CODE_SENSOR 0x3 +/* enum: Link fault has been asserted, or has cleared. */ +#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4 +#define MUM_EVENT_SENSOR_DATA_OFST 0 +#define MUM_EVENT_SENSOR_DATA_LEN 4 +#define MUM_EVENT_SENSOR_DATA_LBN 0 +#define MUM_EVENT_SENSOR_DATA_WIDTH 32 +#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0 +#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4 +#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0 +#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32 +#define MUM_EVENT_PORT_PHY_CAPS_OFST 0 +#define MUM_EVENT_PORT_PHY_CAPS_LEN 4 +#define MUM_EVENT_PORT_PHY_CAPS_LBN 0 +#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_TECH_OFST 0 +#define MUM_EVENT_PORT_PHY_TECH_LEN 4 +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */ +#define MUM_EVENT_PORT_PHY_TECH_LBN 0 +#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40 +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4 + + +/***********************************/ +/* MC_CMD_READ32 + * Read multiple 32byte words from MC memory. Note - this command really + * belongs to INSECURE category but is required by shmboot. The command handler + * has additional checks to reject insecure calls. + */ +#define MC_CMD_READ32 0x1 +#undef MC_CMD_0x1_PRIVILEGE_CTG + +#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_READ32_IN msgrequest */ +#define MC_CMD_READ32_IN_LEN 8 +#define MC_CMD_READ32_IN_ADDR_OFST 0 +#define MC_CMD_READ32_IN_ADDR_LEN 4 +#define MC_CMD_READ32_IN_NUMWORDS_OFST 4 +#define MC_CMD_READ32_IN_NUMWORDS_LEN 4 + +/* MC_CMD_READ32_OUT msgresponse */ +#define MC_CMD_READ32_OUT_LENMIN 4 +#define MC_CMD_READ32_OUT_LENMAX 252 +#define MC_CMD_READ32_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4) +#define MC_CMD_READ32_OUT_BUFFER_OFST 0 +#define MC_CMD_READ32_OUT_BUFFER_LEN 4 +#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1 +#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63 +#define MC_CMD_READ32_OUT_BUFFER_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_WRITE32 + * Write multiple 32byte words to MC memory. + */ +#define MC_CMD_WRITE32 0x2 +#undef MC_CMD_0x2_PRIVILEGE_CTG + +#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_WRITE32_IN msgrequest */ +#define MC_CMD_WRITE32_IN_LENMIN 8 +#define MC_CMD_WRITE32_IN_LENMAX 252 +#define MC_CMD_WRITE32_IN_LENMAX_MCDI2 1020 +#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num)) +#define MC_CMD_WRITE32_IN_BUFFER_NUM(len) (((len)-4)/4) +#define MC_CMD_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_WRITE32_IN_ADDR_LEN 4 +#define MC_CMD_WRITE32_IN_BUFFER_OFST 4 +#define MC_CMD_WRITE32_IN_BUFFER_LEN 4 +#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1 +#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62 +#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM_MCDI2 254 + +/* MC_CMD_WRITE32_OUT msgresponse */ +#define MC_CMD_WRITE32_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_COPYCODE + * Copy MC code between two locations and jump. Note - this command really + * belongs to INSECURE category but is required by shmboot. The command handler + * has additional checks to reject insecure calls. + */ +#define MC_CMD_COPYCODE 0x3 +#undef MC_CMD_0x3_PRIVILEGE_CTG + +#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_COPYCODE_IN msgrequest */ +#define MC_CMD_COPYCODE_IN_LEN 16 +/* Source address + * + * The main image should be entered via a copy of a single word from and to a + * magic address, which controls various aspects of the boot. The magic address + * is a bitfield, with each bit as documented below. + */ +#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 +#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */ +#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below) + */ +#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT, + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see + * below) + */ +#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1 +/* Destination address */ +#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 +#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4 +#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 +#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4 +/* Address of where to jump after copy. */ +#define MC_CMD_COPYCODE_IN_JUMP_OFST 12 +#define MC_CMD_COPYCODE_IN_JUMP_LEN 4 +/* enum: Control should return to the caller rather than jumping */ +#define MC_CMD_COPYCODE_JUMP_NONE 0x1 + +/* MC_CMD_COPYCODE_OUT msgresponse */ +#define MC_CMD_COPYCODE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_FUNC + * Select function for function-specific commands. + */ +#define MC_CMD_SET_FUNC 0x4 +#undef MC_CMD_0x4_PRIVILEGE_CTG + +#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SET_FUNC_IN msgrequest */ +#define MC_CMD_SET_FUNC_IN_LEN 4 +/* Set function */ +#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0 +#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4 + +/* MC_CMD_SET_FUNC_OUT msgresponse */ +#define MC_CMD_SET_FUNC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_BOOT_STATUS + * Get the instruction address from which the MC booted. + */ +#define MC_CMD_GET_BOOT_STATUS 0x5 +#undef MC_CMD_0x5_PRIVILEGE_CTG + +#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */ +#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0 + +/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */ +#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8 +/* ?? */ +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0 +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4 +/* enum: indicates that the MC wasn't flash booted */ +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1 + + +/***********************************/ +/* MC_CMD_GET_ASSERTS + * Get (and optionally clear) the current assertion status. Only + * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other + * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS + */ +#define MC_CMD_GET_ASSERTS 0x6 +#undef MC_CMD_0x6_PRIVILEGE_CTG + +#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_ASSERTS_IN msgrequest */ +#define MC_CMD_GET_ASSERTS_IN_LEN 4 +/* Set to clear assertion */ +#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0 +#define MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4 + +/* MC_CMD_GET_ASSERTS_OUT msgresponse */ +#define MC_CMD_GET_ASSERTS_OUT_LEN 140 +/* Assertion status flag. */ +#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4 +/* enum: No assertions have failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 +/* enum: A system-level assertion has failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 +/* enum: A thread-level assertion has failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 +/* enum: The system was reset by the watchdog. */ +#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 +/* enum: An illegal address trap stopped the system (huntington and later) */ +#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 +/* Failing PC value */ +#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 +#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 +/* Failing thread address */ +#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4 + +/* MC_CMD_GET_ASSERTS_OUT_V2 msgresponse: Extended response for MicroBlaze CPUs + * found on Riverhead designs + */ +#define MC_CMD_GET_ASSERTS_OUT_V2_LEN 240 +/* Assertion status flag. */ +#define MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_LEN 4 +/* enum: No assertions have failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */ +/* enum: A system-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */ +/* enum: A thread-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */ +/* enum: The system was reset by the watchdog. */ +/* MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */ +/* enum: An illegal address trap stopped the system (huntington and later) */ +/* MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */ +/* Failing PC value */ +#define MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_OFST 8 +#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +/* MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */ +/* Failing thread address */ +#define MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_LEN 4 +/* Saved Special Function Registers */ +#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_NUM 26 + +/* MC_CMD_GET_ASSERTS_OUT_V3 msgresponse: Extended response with asserted + * firmware version information + */ +#define MC_CMD_GET_ASSERTS_OUT_V3_LEN 360 +/* Assertion status flag. */ +#define MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_LEN 4 +/* enum: No assertions have failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */ +/* enum: A system-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */ +/* enum: A thread-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */ +/* enum: The system was reset by the watchdog. */ +/* MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */ +/* enum: An illegal address trap stopped the system (huntington and later) */ +/* MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */ +/* Failing PC value */ +#define MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_OFST 8 +#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +/* MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */ +/* Failing thread address */ +#define MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_LEN 4 +/* Saved Special Function Registers */ +#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_NUM 26 +/* MC firmware unique build ID (as binary SHA-1 value) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_OFST 240 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_LEN 20 +/* MC firmware build date (as Unix timestamp) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_OFST 260 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LEN 8 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_OFST 260 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_OFST 264 +/* MC firmware version number */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_OFST 268 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LEN 8 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_OFST 268 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_OFST 272 +/* MC firmware security level */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_OFST 276 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_LEN 4 +/* MC firmware extra version info (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_OFST 280 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_LEN 16 +/* MC firmware build name (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_OFST 296 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_LEN 64 + + +/***********************************/ +/* MC_CMD_LOG_CTRL + * Configure the output stream for log events such as link state changes, + * sensor notifications and MCDI completions + */ +#define MC_CMD_LOG_CTRL 0x7 +#undef MC_CMD_0x7_PRIVILEGE_CTG + +#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LOG_CTRL_IN msgrequest */ +#define MC_CMD_LOG_CTRL_IN_LEN 8 +/* Log destination */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0 +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4 +/* enum: UART. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 +/* enum: Event queue. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 +/* Legacy argument. Must be zero. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4 + +/* MC_CMD_LOG_CTRL_OUT msgresponse */ +#define MC_CMD_LOG_CTRL_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VERSION + * Get version information about adapter components. + */ +#define MC_CMD_GET_VERSION 0x8 +#undef MC_CMD_0x8_PRIVILEGE_CTG + +#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_VERSION_IN msgrequest */ +#define MC_CMD_GET_VERSION_IN_LEN 0 + +/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */ +#define MC_CMD_GET_VERSION_EXT_IN_LEN 4 +/* placeholder, set to 0 */ +#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0 +#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4 + +/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */ +#define MC_CMD_GET_VERSION_V0_OUT_LEN 4 +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 +/* enum: Reserved version number to indicate "any" version. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff +/* enum: Bootrom version value for Siena. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000 +/* enum: Bootrom version value for Huntington. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001 +/* enum: Bootrom version value for Medford2. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002 + +/* MC_CMD_GET_VERSION_OUT msgresponse */ +#define MC_CMD_GET_VERSION_OUT_LEN 32 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28 + +/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */ +#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28 +/* extra info */ +#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32 +#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16 + +/* MC_CMD_GET_VERSION_V2_OUT msgresponse: Extended response providing version + * information for all adapter components. For Riverhead based designs, base MC + * firmware version fields refer to NMC firmware, while CMC firmware data is in + * dedicated CMC fields. Flags indicate which data is present in the response + * (depending on which components exist on a particular adapter) + */ +#define MC_CMD_GET_VERSION_V2_OUT_LEN 304 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_V2_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_V2_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_OFST 28 +/* extra info */ +#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_OFST 32 +#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_LEN 16 +/* Flags indicating which extended fields are valid */ +#define MC_CMD_GET_VERSION_V2_OUT_FLAGS_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_LBN 0 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_LBN 2 +#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_LBN 3 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN 4 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1 +/* MC firmware unique build ID (as binary SHA-1 value) */ +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_OFST 52 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_LEN 20 +/* MC firmware security level */ +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_OFST 72 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_LEN 4 +/* MC firmware build name (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_OFST 76 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_LEN 64 +/* The SUC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_OFST 140 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_NUM 4 +/* SUC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_OFST 156 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_OFST 156 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_OFST 160 +/* The ID of the SUC chip. This is specific to the platform but typically + * indicates family, memory sizes etc. See SF-116728-SW for further details. + */ +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_OFST 164 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_LEN 4 +/* The CMC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_OFST 168 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_NUM 4 +/* CMC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_OFST 184 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_OFST 184 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_OFST 188 +/* FPGA version as three numbers. On Riverhead based systems this field uses + * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG): + * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1 + * => B, ...) FPGA_VERSION[2]: Sub-revision number + */ +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_OFST 192 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_NUM 3 +/* Extra FPGA revision information (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_OFST 204 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_LEN 16 +/* Board name / adapter model (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_OFST 220 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_LEN 16 +/* Board revision number */ +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_OFST 236 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_LEN 4 +/* Board serial number (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_OFST 240 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN 64 + + +/***********************************/ +/* MC_CMD_PTP + * Perform PTP operation + */ +#define MC_CMD_PTP 0xb +#undef MC_CMD_0xb_PRIVILEGE_CTG + +#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PTP_IN msgrequest */ +#define MC_CMD_PTP_IN_LEN 1 +/* PTP operation code */ +#define MC_CMD_PTP_IN_OP_OFST 0 +#define MC_CMD_PTP_IN_OP_LEN 1 +/* enum: Enable PTP packet timestamping operation. */ +#define MC_CMD_PTP_OP_ENABLE 0x1 +/* enum: Disable PTP packet timestamping operation. */ +#define MC_CMD_PTP_OP_DISABLE 0x2 +/* enum: Send a PTP packet. This operation is used on Siena and Huntington. + * From Medford onwards it is not supported: on those platforms PTP transmit + * timestamping is done using the fast path. + */ +#define MC_CMD_PTP_OP_TRANSMIT 0x3 +/* enum: Read the current NIC time. */ +#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 +/* enum: Get the current PTP status. Note that the clock frequency returned (in + * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666). + */ +#define MC_CMD_PTP_OP_STATUS 0x5 +/* enum: Adjust the PTP NIC's time. */ +#define MC_CMD_PTP_OP_ADJUST 0x6 +/* enum: Synchronize host and NIC time. */ +#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 +/* enum: Basic manufacturing tests. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 +/* enum: Packet based manufacturing tests. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 +/* enum: Reset some of the PTP related statistics */ +#define MC_CMD_PTP_OP_RESET_STATS 0xa +/* enum: Debug operations to MC. */ +#define MC_CMD_PTP_OP_DEBUG 0xb +/* enum: Read an FPGA register. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_FPGAREAD 0xc +/* enum: Write an FPGA register. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_FPGAWRITE 0xd +/* enum: Apply an offset to the NIC clock */ +#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe +/* enum: Change the frequency correction applied to the NIC clock */ +#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf +/* enum: Set the MC packet filter VLAN tags for received PTP packets. + * Deprecated for Huntington onwards. + */ +#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10 +/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for + * Huntington onwards. + */ +#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11 +/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated + * for Huntington onwards. + */ +#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12 +/* enum: Set the clock source. Required for snapper tests on Huntington and + * Medford. Not implemented for Siena or Medford2. + */ +#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13 +/* enum: Reset value of Timer Reg. Not implemented. */ +#define MC_CMD_PTP_OP_RST_CLK 0x14 +/* enum: Enable the forwarding of PPS events to the host */ +#define MC_CMD_PTP_OP_PPS_ENABLE 0x15 +/* enum: Get the time format used by this NIC for PTP operations */ +#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16 +/* enum: Get the clock attributes. NOTE- extended version of + * MC_CMD_PTP_OP_GET_TIME_FORMAT + */ +#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16 +/* enum: Get corrections that should be applied to the various different + * timestamps + */ +#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17 +/* enum: Subscribe to receive periodic time events indicating the current NIC + * time + */ +#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18 +/* enum: Unsubscribe to stop receiving time events */ +#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19 +/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS + * input on the same NIC. Siena PTP adapters only. + */ +#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a +/* enum: Set the PTP sync status. Status is used by firmware to report to event + * subscribers. + */ +#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b +/* enum: Above this for future use. */ +#define MC_CMD_PTP_OP_MAX 0x1c + +/* MC_CMD_PTP_IN_ENABLE msgrequest */ +#define MC_CMD_PTP_IN_ENABLE_LEN 16 +#define MC_CMD_PTP_IN_CMD_OFST 0 +#define MC_CMD_PTP_IN_CMD_LEN 4 +#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4 +#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4 +/* Not used. Events are always sent to function relative queue 0. */ +#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8 +#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4 +/* PTP timestamping mode. Not used from Huntington onwards. */ +#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12 +#define MC_CMD_PTP_IN_ENABLE_MODE_LEN 4 +/* enum: PTP, version 1 */ +#define MC_CMD_PTP_MODE_V1 0x0 +/* enum: PTP, version 1, with VLAN headers - deprecated */ +#define MC_CMD_PTP_MODE_V1_VLAN 0x1 +/* enum: PTP, version 2 */ +#define MC_CMD_PTP_MODE_V2 0x2 +/* enum: PTP, version 2, with VLAN headers - deprecated */ +#define MC_CMD_PTP_MODE_V2_VLAN 0x3 +/* enum: PTP, version 2, with improved UUID filtering */ +#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 +/* enum: FCoE (seconds and microseconds) */ +#define MC_CMD_PTP_MODE_FCOE 0x5 + +/* MC_CMD_PTP_IN_DISABLE msgrequest */ +#define MC_CMD_PTP_IN_DISABLE_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_TRANSMIT msgrequest */ +#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13 +#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252 +#define MC_CMD_PTP_IN_TRANSMIT_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num)) +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_NUM(len) (((len)-12)/1) +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Transmit packet length */ +#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8 +#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4 +/* Transmit packet data */ +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM_MCDI2 1008 + +/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */ +#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */ +#define MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_STATUS msgrequest */ +#define MC_CMD_PTP_IN_STATUS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_ADJUST_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8 +#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12 +/* enum: Number of fractional bits in frequency adjustment */ +#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 +/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ + * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES + * field. + */ +#define MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4 + +/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */ +#define MC_CMD_PTP_IN_ADJUST_V2_LEN 28 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12 +/* enum: Number of fractional bits in frequency adjustment */ +/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */ +/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ + * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES + * field. + */ +/* MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4 +/* Upper 32bits of major time offset adjustment */ +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24 +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4 + +/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */ +#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Number of time readings to capture */ +#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8 +#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4 +/* Host address in which to write "synchronization started" indication (64 + * bits) + */ +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16 + +/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Enable or disable packet testing */ +#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4 + +/* MC_CMD_PTP_IN_RESET_STATS msgrequest: Reset PTP statistics */ +#define MC_CMD_PTP_IN_RESET_STATS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_DEBUG msgrequest */ +#define MC_CMD_PTP_IN_DEBUG_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Debug operations */ +#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8 +#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4 + +/* MC_CMD_PTP_IN_FPGAREAD msgrequest */ +#define MC_CMD_PTP_IN_FPGAREAD_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4 +#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12 +#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4 + +/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */ +#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13 +#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252 +#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num)) +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_NUM(len) (((len)-12)/1) +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM_MCDI2 1008 + +/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4 + +/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4 +/* Upper 32bits of major time offset adjustment */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4 + +/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12 +/* Enum values, see field(s): */ +/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */ + +/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Number of VLAN tags, 0 if not VLAN */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4 +/* Set of VLAN tags to filter against */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3 + +/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* 1 to enable UUID filtering, 0 to disable */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4 +/* UUID to filter against */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16 + +/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* 1 to enable Domain filtering, 0 to disable */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4 +/* Domain number to filter against */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4 + +/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */ +#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Set the clock source. */ +#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8 +#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4 +/* enum: Internal. */ +#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0 +/* enum: External. */ +#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1 + +/* MC_CMD_PTP_IN_RST_CLK msgrequest: Reset value of Timer Reg. */ +#define MC_CMD_PTP_IN_RST_CLK_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */ +#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* Enable or disable */ +#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4 +#define MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4 +/* enum: Enable */ +#define MC_CMD_PTP_ENABLE_PPS 0x0 +/* enum: Disable */ +#define MC_CMD_PTP_DISABLE_PPS 0x1 +/* Not used. Events are always sent to function relative queue 0. */ +#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8 +#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4 + +/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */ +#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */ +#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */ +#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */ +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Original field containing queue ID. Now extended to include flags. */ +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1 + +/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Unsubscribe options */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4 +/* enum: Unsubscribe a single queue */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0 +/* enum: Unsubscribe all queues */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1 +/* Event queue ID */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12 +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4 + +/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* 1 to enable PPS test mode, 0 to disable and return result. */ +#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4 + +/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* NIC - Host System Clock Synchronization status */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4 +/* enum: Host System clock and NIC clock are not in sync */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0 +/* enum: Host System clock and NIC clock are synchronized */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1 +/* If synchronized, number of seconds until clocks should be considered to be + * no longer in sync. + */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4 + +/* MC_CMD_PTP_OUT msgresponse */ +#define MC_CMD_PTP_OUT_LEN 0 + +/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */ +#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4 + +/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */ +#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0 + +/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */ +#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0 + +/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4 + +/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4 +/* Upper 32bits of major timestamp value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4 + +/* MC_CMD_PTP_OUT_STATUS msgresponse */ +#define MC_CMD_PTP_OUT_STATUS_LEN 64 +/* Frequency of NIC's hardware clock */ +#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0 +#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4 +/* Number of packets transmitted and timestamped */ +#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4 +#define MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4 +/* Number of packets received and timestamped */ +#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8 +#define MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4 +/* Number of packets timestamped by the FPGA */ +#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12 +#define MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4 +/* Number of packets filter matched */ +#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16 +#define MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4 +/* Number of packets not filter matched */ +#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20 +#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4 +/* Number of PPS overflows (noise on input?) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4 +/* Number of PPS bad periods */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4 +/* Minimum period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4 +/* Maximum period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4 +/* Last period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4 +/* Mean period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4 +/* Minimum offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4 +/* Maximum offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4 +/* Last offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4 +/* Mean offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4 + +/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num)) +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_NUM(len) (((len)-0)/20) +/* A set of host and NIC times */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM_MCDI2 51 +/* Host time immediately before NIC's hardware clock read */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4 +/* Host time immediately after NIC's hardware clock read */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4 +/* Number of nanoseconds waited after reading NIC's hardware clock */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4 + +/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4 +/* enum: Successful test */ +#define MC_CMD_PTP_MANF_SUCCESS 0x0 +/* enum: FPGA load failed */ +#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 +/* enum: FPGA version invalid */ +#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 +/* enum: FPGA registers incorrect */ +#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 +/* enum: Oscillator possibly not working? */ +#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 +/* enum: Timestamps not increasing */ +#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 +/* enum: Mismatched packet count */ +#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 +/* enum: Mismatched packet count (Siena filter and FPGA) */ +#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 +/* enum: Not enough packets to perform timestamp check */ +#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 +/* enum: Timestamp trigger GPIO not working */ +#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 +/* enum: Insufficient PPS events to perform checks */ +#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa +/* enum: PPS time event period not sufficiently close to 1s. */ +#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb +/* enum: PPS time event nS reading not sufficiently close to zero. */ +#define MC_CMD_PTP_MANF_PPS_NS 0xc +/* enum: PTP peripheral registers incorrect */ +#define MC_CMD_PTP_MANF_REGISTERS 0xd +/* enum: Failed to read time from PTP peripheral */ +#define MC_CMD_PTP_MANF_CLOCK_READ 0xe +/* Presence of external oscillator */ +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4 +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4 + +/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4 +/* Number of packets received by FPGA */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4 +/* Number of packets received by Siena filters */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4 + +/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */ +#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1 +#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252 +#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num)) +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_NUM(len) (((len)-0)/1) +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM_MCDI2 1020 + +/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4 +/* Time format required/used by for this NIC. Applies to all PTP MCDI + * operations that pass times between the host and firmware. If this operation + * is not supported (older firmware) a format of seconds and nanoseconds should + * be assumed. Note this enum is deprecated. Do not add to it- use the + * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead. + */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4 +/* enum: Times are in seconds and nanoseconds */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0 +/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1 +/* enum: Major register has units of seconds, minor 2^-27s per tick */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2 + +/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24 +/* Time format required/used by for this NIC. Applies to all PTP MCDI + * operations that pass times between the host and firmware. If this operation + * is not supported (older firmware) a format of seconds and nanoseconds should + * be assumed. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4 +/* enum: Times are in seconds and nanoseconds */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0 +/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1 +/* enum: Major register has units of seconds, minor 2^-27s per tick */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2 +/* enum: Major register units are seconds, minor units are quarter nanoseconds + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3 +/* Minimum acceptable value for a corrected synchronization timeset. When + * comparing host and NIC clock times, the MC returns a set of samples that + * contain the host start and end time, the MC time when the host start was + * detected and the time the MC waited between reading the time and detecting + * the host end. The corrected sync window is the difference between the host + * end and start times minus the time that the MC waited for host end. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4 +/* Various PTP capabilities */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4 + +/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 +/* Uncorrected error on PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4 +/* Uncorrected error on PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4 +/* Uncorrected error on PPS output in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4 +/* Uncorrected error on PPS input in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4 + +/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24 +/* Uncorrected error on PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4 +/* Uncorrected error on PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4 +/* Uncorrected error on PPS output in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4 +/* Uncorrected error on PPS input in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4 +/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4 +/* Uncorrected error on non-PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4 + +/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */ + +/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */ +#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0 + + +/***********************************/ +/* MC_CMD_CSR_READ32 + * Read 32bit words from the indirect memory map. + */ +#define MC_CMD_CSR_READ32 0xc +#undef MC_CMD_0xc_PRIVILEGE_CTG + +#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_CSR_READ32_IN msgrequest */ +#define MC_CMD_CSR_READ32_IN_LEN 12 +/* Address */ +#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4 +#define MC_CMD_CSR_READ32_IN_STEP_OFST 4 +#define MC_CMD_CSR_READ32_IN_STEP_LEN 4 +#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8 +#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4 + +/* MC_CMD_CSR_READ32_OUT msgresponse */ +#define MC_CMD_CSR_READ32_OUT_LENMIN 4 +#define MC_CMD_CSR_READ32_OUT_LENMAX 252 +#define MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4) +/* The last dword is the status, not a value read */ +#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0 +#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4 +#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1 +#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63 +#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_CSR_WRITE32 + * Write 32bit dwords to the indirect memory map. + */ +#define MC_CMD_CSR_WRITE32 0xd +#undef MC_CMD_0xd_PRIVILEGE_CTG + +#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_CSR_WRITE32_IN msgrequest */ +#define MC_CMD_CSR_WRITE32_IN_LENMIN 12 +#define MC_CMD_CSR_WRITE32_IN_LENMAX 252 +#define MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020 +#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num)) +#define MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4) +/* Address */ +#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4 +#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4 +#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253 + +/* MC_CMD_CSR_WRITE32_OUT msgresponse */ +#define MC_CMD_CSR_WRITE32_OUT_LEN 4 +#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0 +#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_HP + * These commands are used for HP related features. They are grouped under one + * MCDI command to avoid creating too many MCDI commands. + */ +#define MC_CMD_HP 0x54 +#undef MC_CMD_0x54_PRIVILEGE_CTG + +#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_HP_IN msgrequest */ +#define MC_CMD_HP_IN_LEN 16 +/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at + * the specified address with the specified interval.When address is NULL, + * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current + * state / 2: (debug) Show temperature reported by one of the supported + * sensors. + */ +#define MC_CMD_HP_IN_SUBCMD_OFST 0 +#define MC_CMD_HP_IN_SUBCMD_LEN 4 +/* enum: OCSD (Option Card Sensor Data) sub-command. */ +#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0 +/* enum: Last known valid HP sub-command. */ +#define MC_CMD_HP_IN_LAST_SUBCMD 0x0 +/* The address to the array of sensor fields. (Or NULL to use a sub-command.) + */ +#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4 +#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8 +#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4 +#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8 +/* The requested update interval, in seconds. (Or the sub-command if ADDR is + * NULL.) + */ +#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12 +#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4 + +/* MC_CMD_HP_OUT msgresponse */ +#define MC_CMD_HP_OUT_LEN 4 +#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0 +#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4 +/* enum: OCSD stopped for this card. */ +#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1 +/* enum: OCSD was successfully started with the address provided. */ +#define MC_CMD_HP_OUT_OCSD_STARTED 0x2 +/* enum: OCSD was already started for this card. */ +#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3 + + +/***********************************/ +/* MC_CMD_STACKINFO + * Get stack information. + */ +#define MC_CMD_STACKINFO 0xf +#undef MC_CMD_0xf_PRIVILEGE_CTG + +#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_STACKINFO_IN msgrequest */ +#define MC_CMD_STACKINFO_IN_LEN 0 + +/* MC_CMD_STACKINFO_OUT msgresponse */ +#define MC_CMD_STACKINFO_OUT_LENMIN 12 +#define MC_CMD_STACKINFO_OUT_LENMAX 252 +#define MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num)) +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12) +/* (thread ptr, stack size, free space) for each thread in system */ +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85 + + +/***********************************/ +/* MC_CMD_MDIO_READ + * MDIO register read. + */ +#define MC_CMD_MDIO_READ 0x10 +#undef MC_CMD_0x10_PRIVILEGE_CTG + +#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_MDIO_READ_IN msgrequest */ +#define MC_CMD_MDIO_READ_IN_LEN 16 +/* Bus number; there are two MDIO buses: one for the internal PHY, and one for + * external devices. + */ +#define MC_CMD_MDIO_READ_IN_BUS_OFST 0 +#define MC_CMD_MDIO_READ_IN_BUS_LEN 4 +/* enum: Internal. */ +#define MC_CMD_MDIO_BUS_INTERNAL 0x0 +/* enum: External. */ +#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 +/* Port address */ +#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4 +#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4 +/* Device Address or clause 22. */ +#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8 +#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4 +/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you + * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. + */ +#define MC_CMD_MDIO_CLAUSE22 0x20 +/* Address */ +#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12 +#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4 + +/* MC_CMD_MDIO_READ_OUT msgresponse */ +#define MC_CMD_MDIO_READ_OUT_LEN 8 +/* Value */ +#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0 +#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4 +/* Status the MDIO commands return the raw status bits from the MDIO block. A + * "good" transaction should have the DONE bit set and all other bits clear. + */ +#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4 +#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4 +/* enum: Good. */ +#define MC_CMD_MDIO_STATUS_GOOD 0x8 + + +/***********************************/ +/* MC_CMD_MDIO_WRITE + * MDIO register write. + */ +#define MC_CMD_MDIO_WRITE 0x11 +#undef MC_CMD_0x11_PRIVILEGE_CTG + +#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_MDIO_WRITE_IN msgrequest */ +#define MC_CMD_MDIO_WRITE_IN_LEN 20 +/* Bus number; there are two MDIO buses: one for the internal PHY, and one for + * external devices. + */ +#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0 +#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4 +/* enum: Internal. */ +/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */ +/* enum: External. */ +/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */ +/* Port address */ +#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4 +#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4 +/* Device Address or clause 22. */ +#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8 +#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4 +/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you + * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. + */ +/* MC_CMD_MDIO_CLAUSE22 0x20 */ +/* Address */ +#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12 +#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4 +/* Value */ +#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16 +#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4 + +/* MC_CMD_MDIO_WRITE_OUT msgresponse */ +#define MC_CMD_MDIO_WRITE_OUT_LEN 4 +/* Status; the MDIO commands return the raw status bits from the MDIO block. A + * "good" transaction should have the DONE bit set and all other bits clear. + */ +#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0 +#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4 +/* enum: Good. */ +/* MC_CMD_MDIO_STATUS_GOOD 0x8 */ + + +/***********************************/ +/* MC_CMD_DBI_WRITE + * Write DBI register(s). + */ +#define MC_CMD_DBI_WRITE 0x12 +#undef MC_CMD_0x12_PRIVILEGE_CTG + +#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DBI_WRITE_IN msgrequest */ +#define MC_CMD_DBI_WRITE_IN_LENMIN 12 +#define MC_CMD_DBI_WRITE_IN_LENMAX 252 +#define MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num)) +#define MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12) +/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset + * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF. + */ +#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85 + +/* MC_CMD_DBI_WRITE_OUT msgresponse */ +#define MC_CMD_DBI_WRITE_OUT_LEN 0 + +/* MC_CMD_DBIWROP_TYPEDEF structuredef */ +#define MC_CMD_DBIWROP_TYPEDEF_LEN 12 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_PORT_READ32 + * Read a 32-bit register from the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_READ32 0x14 + +/* MC_CMD_PORT_READ32_IN msgrequest */ +#define MC_CMD_PORT_READ32_IN_LEN 4 +/* Address */ +#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0 +#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4 + +/* MC_CMD_PORT_READ32_OUT msgresponse */ +#define MC_CMD_PORT_READ32_OUT_LEN 8 +/* Value */ +#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0 +#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4 +/* Status */ +#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4 +#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_PORT_WRITE32 + * Write a 32-bit register to the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_WRITE32 0x15 + +/* MC_CMD_PORT_WRITE32_IN msgrequest */ +#define MC_CMD_PORT_WRITE32_IN_LEN 8 +/* Address */ +#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4 +/* Value */ +#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 +#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4 + +/* MC_CMD_PORT_WRITE32_OUT msgresponse */ +#define MC_CMD_PORT_WRITE32_OUT_LEN 4 +/* Status */ +#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 +#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_PORT_READ128 + * Read a 128-bit register from the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_READ128 0x16 + +/* MC_CMD_PORT_READ128_IN msgrequest */ +#define MC_CMD_PORT_READ128_IN_LEN 4 +/* Address */ +#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 +#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4 + +/* MC_CMD_PORT_READ128_OUT msgresponse */ +#define MC_CMD_PORT_READ128_OUT_LEN 20 +/* Value */ +#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0 +#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16 +/* Status */ +#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 +#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_PORT_WRITE128 + * Write a 128-bit register to the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_WRITE128 0x17 + +/* MC_CMD_PORT_WRITE128_IN msgrequest */ +#define MC_CMD_PORT_WRITE128_IN_LEN 20 +/* Address */ +#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 +#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4 +/* Value */ +#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 +#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16 + +/* MC_CMD_PORT_WRITE128_OUT msgresponse */ +#define MC_CMD_PORT_WRITE128_OUT_LEN 4 +/* Status */ +#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 +#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4 + +/* MC_CMD_CAPABILITIES structuredef */ +#define MC_CMD_CAPABILITIES_LEN 4 +/* Small buf table. */ +#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0 +#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1 +/* Turbo mode (for Maranello). */ +#define MC_CMD_CAPABILITIES_TURBO_LBN 1 +#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1 +/* Turbo mode active (for Maranello). */ +#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2 +#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1 +/* PTP offload. */ +#define MC_CMD_CAPABILITIES_PTP_LBN 3 +#define MC_CMD_CAPABILITIES_PTP_WIDTH 1 +/* AOE mode. */ +#define MC_CMD_CAPABILITIES_AOE_LBN 4 +#define MC_CMD_CAPABILITIES_AOE_WIDTH 1 +/* AOE mode active. */ +#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5 +#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1 +/* AOE mode active. */ +#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6 +#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1 +#define MC_CMD_CAPABILITIES_RESERVED_LBN 7 +#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25 + + +/***********************************/ +/* MC_CMD_GET_BOARD_CFG + * Returns the MC firmware configuration structure. + */ +#define MC_CMD_GET_BOARD_CFG 0x18 +#undef MC_CMD_0x18_PRIVILEGE_CTG + +#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_BOARD_CFG_IN msgrequest */ +#define MC_CMD_GET_BOARD_CFG_IN_LEN 0 + +/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */ +#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96 +#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136 +#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX_MCDI2 136 +#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num)) +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_NUM(len) (((len)-72)/2) +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32 +/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on + * EF10 and later (use MC_CMD_GET_CAPABILITIES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36 +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4 +/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on + * EF10 and later (use MC_CMD_GET_CAPABILITIES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40 +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4 +/* Base MAC address for Siena Port0. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6 +/* Base MAC address for Siena Port1. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6 +/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4 +/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4 +/* Increment between addresses in MAC address pool for Siena Port0. Unused on + * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4 +/* Increment between addresses in MAC address pool for Siena Port1. Unused on + * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4 +/* Siena only. This field contains a 16-bit value for each of the types of + * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a + * specific board type, but otherwise have no meaning to the MC; they are used + * by the driver to manage selection of appropriate firmware updates. Unused on + * EF10 and later (use MC_CMD_NVRAM_METADATA). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM_MCDI2 32 + + +/***********************************/ +/* MC_CMD_DBI_READX + * Read DBI register(s) -- extended functionality + */ +#define MC_CMD_DBI_READX 0x19 +#undef MC_CMD_0x19_PRIVILEGE_CTG + +#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DBI_READX_IN msgrequest */ +#define MC_CMD_DBI_READX_IN_LENMIN 8 +#define MC_CMD_DBI_READX_IN_LENMAX 248 +#define MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016 +#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num)) +#define MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8) +/* Each Read op consists of an address (offset 0), VF/CS2) */ +#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0 +#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8 +#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0 +#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4 +#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1 +#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31 +#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127 + +/* MC_CMD_DBI_READX_OUT msgresponse */ +#define MC_CMD_DBI_READX_OUT_LENMIN 4 +#define MC_CMD_DBI_READX_OUT_LENMAX 252 +#define MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4) +/* Value */ +#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0 +#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4 +#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1 +#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63 +#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255 + +/* MC_CMD_DBIRDOP_TYPEDEF structuredef */ +#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32 + + +/***********************************/ +/* MC_CMD_SET_RAND_SEED + * Set the 16byte seed for the MC pseudo-random generator. + */ +#define MC_CMD_SET_RAND_SEED 0x1a +#undef MC_CMD_0x1a_PRIVILEGE_CTG + +#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SET_RAND_SEED_IN msgrequest */ +#define MC_CMD_SET_RAND_SEED_IN_LEN 16 +/* Seed value. */ +#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0 +#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16 + +/* MC_CMD_SET_RAND_SEED_OUT msgresponse */ +#define MC_CMD_SET_RAND_SEED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LTSSM_HIST + * Retrieve the history of the LTSSM, if the build supports it. + */ +#define MC_CMD_LTSSM_HIST 0x1b + +/* MC_CMD_LTSSM_HIST_IN msgrequest */ +#define MC_CMD_LTSSM_HIST_IN_LEN 0 + +/* MC_CMD_LTSSM_HIST_OUT msgresponse */ +#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0 +#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252 +#define MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4) +/* variable number of LTSSM values, as bytes. The history is read-to-clear. */ +#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0 +#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4 +#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0 +#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63 +#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_DRV_ATTACH + * Inform MCPU that this port is managed on the host (i.e. driver active). For + * Huntington, also request the preferred datapath firmware to use if possible + * (it may not be possible for this request to be fulfilled; the driver must + * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which + * features are actually available). The FIRMWARE_ID field is ignored by older + * platforms. + */ +#define MC_CMD_DRV_ATTACH 0x1c +#undef MC_CMD_0x1c_PRIVILEGE_CTG + +#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DRV_ATTACH_IN msgrequest */ +#define MC_CMD_DRV_ATTACH_IN_LEN 12 +/* new state to set if UPDATE=1 */ +#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4 +#define MC_CMD_DRV_ATTACH_OFST 0 +#define MC_CMD_DRV_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_WIDTH 1 +#define MC_CMD_DRV_PREBOOT_OFST 0 +#define MC_CMD_DRV_PREBOOT_LBN 1 +#define MC_CMD_DRV_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_LBN 1 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_LBN 2 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_LBN 3 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_LBN 4 +#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_WIDTH 1 +/* 1 to set new state, or 0 to just report the existing state */ +#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 +#define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4 +/* preferred datapath firmware (for Huntington; ignored for Siena) */ +#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8 +#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4 +/* enum: Prefer to use full featured firmware */ +#define MC_CMD_FW_FULL_FEATURED 0x0 +/* enum: Prefer to use firmware with fewer features but lower latency */ +#define MC_CMD_FW_LOW_LATENCY 0x1 +/* enum: Prefer to use firmware for SolarCapture packed stream mode */ +#define MC_CMD_FW_PACKED_STREAM 0x2 +/* enum: Prefer to use firmware with fewer features and simpler TX event + * batching but higher TX packet rate + */ +#define MC_CMD_FW_HIGH_TX_RATE 0x3 +/* enum: Reserved value */ +#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 +/* enum: Prefer to use firmware with additional "rules engine" filtering + * support + */ +#define MC_CMD_FW_RULES_ENGINE 0x5 +/* enum: Prefer to use firmware with additional DPDK support */ +#define MC_CMD_FW_DPDK 0x6 +/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and + * bug69716) + */ +#define MC_CMD_FW_L3XUDP 0x7 +/* enum: Requests that the MC keep whatever datapath firmware is currently + * running. It's used for test purposes, where we want to be able to shmboot + * special test firmware variants. This option is only recognised in eftest + * (i.e. non-production) builds. + */ +#define MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe +/* enum: Only this option is allowed for non-admin functions */ +#define MC_CMD_FW_DONT_CARE 0xffffffff + +/* MC_CMD_DRV_ATTACH_IN_V2 msgrequest: Updated DRV_ATTACH to include driver + * version + */ +#define MC_CMD_DRV_ATTACH_IN_V2_LEN 32 +/* new state to set if UPDATE=1 */ +#define MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_LEN 4 +/* MC_CMD_DRV_ATTACH_OFST 0 */ +/* MC_CMD_DRV_ATTACH_LBN 0 */ +/* MC_CMD_DRV_ATTACH_WIDTH 1 */ +#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_WIDTH 1 +/* MC_CMD_DRV_PREBOOT_OFST 0 */ +/* MC_CMD_DRV_PREBOOT_LBN 1 */ +/* MC_CMD_DRV_PREBOOT_WIDTH 1 */ +#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_LBN 1 +#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_LBN 2 +#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_LBN 3 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_LBN 4 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_WIDTH 1 +/* 1 to set new state, or 0 to just report the existing state */ +#define MC_CMD_DRV_ATTACH_IN_V2_UPDATE_OFST 4 +#define MC_CMD_DRV_ATTACH_IN_V2_UPDATE_LEN 4 +/* preferred datapath firmware (for Huntington; ignored for Siena) */ +#define MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_OFST 8 +#define MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_LEN 4 +/* enum: Prefer to use full featured firmware */ +/* MC_CMD_FW_FULL_FEATURED 0x0 */ +/* enum: Prefer to use firmware with fewer features but lower latency */ +/* MC_CMD_FW_LOW_LATENCY 0x1 */ +/* enum: Prefer to use firmware for SolarCapture packed stream mode */ +/* MC_CMD_FW_PACKED_STREAM 0x2 */ +/* enum: Prefer to use firmware with fewer features and simpler TX event + * batching but higher TX packet rate + */ +/* MC_CMD_FW_HIGH_TX_RATE 0x3 */ +/* enum: Reserved value */ +/* MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 */ +/* enum: Prefer to use firmware with additional "rules engine" filtering + * support + */ +/* MC_CMD_FW_RULES_ENGINE 0x5 */ +/* enum: Prefer to use firmware with additional DPDK support */ +/* MC_CMD_FW_DPDK 0x6 */ +/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and + * bug69716) + */ +/* MC_CMD_FW_L3XUDP 0x7 */ +/* enum: Requests that the MC keep whatever datapath firmware is currently + * running. It's used for test purposes, where we want to be able to shmboot + * special test firmware variants. This option is only recognised in eftest + * (i.e. non-production) builds. + */ +/* MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe */ +/* enum: Only this option is allowed for non-admin functions */ +/* MC_CMD_FW_DONT_CARE 0xffffffff */ +/* Version of the driver to be reported by management protocols (e.g. NC-SI) + * handled by the NIC. This is a zero-terminated ASCII string. + */ +#define MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_OFST 12 +#define MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN 20 + +/* MC_CMD_DRV_ATTACH_OUT msgresponse */ +#define MC_CMD_DRV_ATTACH_OUT_LEN 4 +/* previous or existing state, see the bitmask at NEW_STATE */ +#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4 + +/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8 +/* previous or existing state, see the bitmask at NEW_STATE */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4 +/* Flags associated with this function */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4 +#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4 +/* enum: Labels the lowest-numbered function visible to the OS */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0 +/* enum: The function can control the link state of the physical port it is + * bound to. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1 +/* enum: The function can perform privileged operations */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2 +/* enum: The function does not have an active port associated with it. The port + * refers to the Sorrento external FPGA port. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3 +/* enum: If set, indicates that VI spreading is currently enabled. Will always + * indicate the current state, regardless of the value in the WANT_VI_SPREADING + * input. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED 0x4 +/* enum: Used during development only. Should no longer be used. */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_RX_VI_SPREADING_INHIBITED 0x5 +/* enum: If set, indicates that TX only spreading is enabled. Even-numbered + * TXQs will use one engine, and odd-numbered TXQs will use the other. This + * also has the effect that only even-numbered RXQs will receive traffic. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TX_ONLY_VI_SPREADING_ENABLED 0x5 + + +/***********************************/ +/* MC_CMD_SHMUART + * Route UART output to circular buffer in shared memory instead. + */ +#define MC_CMD_SHMUART 0x1f + +/* MC_CMD_SHMUART_IN msgrequest */ +#define MC_CMD_SHMUART_IN_LEN 4 +/* ??? */ +#define MC_CMD_SHMUART_IN_FLAG_OFST 0 +#define MC_CMD_SHMUART_IN_FLAG_LEN 4 + +/* MC_CMD_SHMUART_OUT msgresponse */ +#define MC_CMD_SHMUART_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PORT_RESET + * Generic per-port reset. There is no equivalent for per-board reset. Locks + * required: None; Return code: 0, ETIME. NOTE: This command is deprecated - + * use MC_CMD_ENTITY_RESET instead. + */ +#define MC_CMD_PORT_RESET 0x20 +#undef MC_CMD_0x20_PRIVILEGE_CTG + +#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PORT_RESET_IN msgrequest */ +#define MC_CMD_PORT_RESET_IN_LEN 0 + +/* MC_CMD_PORT_RESET_OUT msgresponse */ +#define MC_CMD_PORT_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ENTITY_RESET + * Generic per-resource reset. There is no equivalent for per-board reset. + * Locks required: None; Return code: 0, ETIME. NOTE: This command is an + * extended version of the deprecated MC_CMD_PORT_RESET with added fields. + */ +#define MC_CMD_ENTITY_RESET 0x20 +/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */ + +/* MC_CMD_ENTITY_RESET_IN msgrequest */ +#define MC_CMD_ENTITY_RESET_IN_LEN 4 +/* Optional flags field. Omitting this will perform a "legacy" reset action + * (TBD). + */ +#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0 +#define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_OFST 0 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1 + +/* MC_CMD_ENTITY_RESET_OUT msgresponse */ +#define MC_CMD_ENTITY_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PCIE_CREDITS + * Read instantaneous and minimum flow control thresholds. + */ +#define MC_CMD_PCIE_CREDITS 0x21 + +/* MC_CMD_PCIE_CREDITS_IN msgrequest */ +#define MC_CMD_PCIE_CREDITS_IN_LEN 8 +/* poll period. 0 is disabled */ +#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0 +#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4 +/* wipe statistics */ +#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4 +#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4 + +/* MC_CMD_PCIE_CREDITS_OUT msgresponse */ +#define MC_CMD_PCIE_CREDITS_OUT_LEN 16 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2 + + +/***********************************/ +/* MC_CMD_RXD_MONITOR + * Get histogram of RX queue fill level. + */ +#define MC_CMD_RXD_MONITOR 0x22 + +/* MC_CMD_RXD_MONITOR_IN msgrequest */ +#define MC_CMD_RXD_MONITOR_IN_LEN 12 +#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4 +#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4 +#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4 +#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8 +#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4 + +/* MC_CMD_RXD_MONITOR_OUT msgresponse */ +#define MC_CMD_RXD_MONITOR_OUT_LEN 80 +#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48 +#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4 + + +/***********************************/ +/* MC_CMD_PUTS + * Copy the given ASCII string out onto UART and/or out of the network port. + */ +#define MC_CMD_PUTS 0x23 +#undef MC_CMD_0x23_PRIVILEGE_CTG + +#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_PUTS_IN msgrequest */ +#define MC_CMD_PUTS_IN_LENMIN 13 +#define MC_CMD_PUTS_IN_LENMAX 252 +#define MC_CMD_PUTS_IN_LENMAX_MCDI2 1020 +#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num)) +#define MC_CMD_PUTS_IN_STRING_NUM(len) (((len)-12)/1) +#define MC_CMD_PUTS_IN_DEST_OFST 0 +#define MC_CMD_PUTS_IN_DEST_LEN 4 +#define MC_CMD_PUTS_IN_UART_OFST 0 +#define MC_CMD_PUTS_IN_UART_LBN 0 +#define MC_CMD_PUTS_IN_UART_WIDTH 1 +#define MC_CMD_PUTS_IN_PORT_OFST 0 +#define MC_CMD_PUTS_IN_PORT_LBN 1 +#define MC_CMD_PUTS_IN_PORT_WIDTH 1 +#define MC_CMD_PUTS_IN_DHOST_OFST 4 +#define MC_CMD_PUTS_IN_DHOST_LEN 6 +#define MC_CMD_PUTS_IN_STRING_OFST 12 +#define MC_CMD_PUTS_IN_STRING_LEN 1 +#define MC_CMD_PUTS_IN_STRING_MINNUM 1 +#define MC_CMD_PUTS_IN_STRING_MAXNUM 240 +#define MC_CMD_PUTS_IN_STRING_MAXNUM_MCDI2 1008 + +/* MC_CMD_PUTS_OUT msgresponse */ +#define MC_CMD_PUTS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PHY_CFG + * Report PHY configuration. This guarantees to succeed even if the PHY is in a + * 'zombie' state. Locks required: None + */ +#define MC_CMD_GET_PHY_CFG 0x24 +#undef MC_CMD_0x24_PRIVILEGE_CTG + +#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PHY_CFG_IN msgrequest */ +#define MC_CMD_GET_PHY_CFG_IN_LEN 0 + +/* MC_CMD_GET_PHY_CFG_OUT msgresponse */ +#define MC_CMD_GET_PHY_CFG_OUT_LEN 72 +/* flags */ +#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 +#define MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4 +/* Bitmask of supported capabilities */ +#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 +#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4 +#define MC_CMD_PHY_CAP_10HDX_OFST 8 +#define MC_CMD_PHY_CAP_10HDX_LBN 1 +#define MC_CMD_PHY_CAP_10HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_10FDX_OFST 8 +#define MC_CMD_PHY_CAP_10FDX_LBN 2 +#define MC_CMD_PHY_CAP_10FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_100HDX_OFST 8 +#define MC_CMD_PHY_CAP_100HDX_LBN 3 +#define MC_CMD_PHY_CAP_100HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_100FDX_OFST 8 +#define MC_CMD_PHY_CAP_100FDX_LBN 4 +#define MC_CMD_PHY_CAP_100FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_1000HDX_OFST 8 +#define MC_CMD_PHY_CAP_1000HDX_LBN 5 +#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_1000FDX_OFST 8 +#define MC_CMD_PHY_CAP_1000FDX_LBN 6 +#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_10000FDX_OFST 8 +#define MC_CMD_PHY_CAP_10000FDX_LBN 7 +#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_PAUSE_OFST 8 +#define MC_CMD_PHY_CAP_PAUSE_LBN 8 +#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1 +#define MC_CMD_PHY_CAP_ASYM_OFST 8 +#define MC_CMD_PHY_CAP_ASYM_LBN 9 +#define MC_CMD_PHY_CAP_ASYM_WIDTH 1 +#define MC_CMD_PHY_CAP_AN_OFST 8 +#define MC_CMD_PHY_CAP_AN_LBN 10 +#define MC_CMD_PHY_CAP_AN_WIDTH 1 +#define MC_CMD_PHY_CAP_40000FDX_OFST 8 +#define MC_CMD_PHY_CAP_40000FDX_LBN 11 +#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_DDM_OFST 8 +#define MC_CMD_PHY_CAP_DDM_LBN 12 +#define MC_CMD_PHY_CAP_DDM_WIDTH 1 +#define MC_CMD_PHY_CAP_100000FDX_OFST 8 +#define MC_CMD_PHY_CAP_100000FDX_LBN 13 +#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_25000FDX_OFST 8 +#define MC_CMD_PHY_CAP_25000FDX_LBN 14 +#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_50000FDX_OFST 8 +#define MC_CMD_PHY_CAP_50000FDX_LBN 15 +#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_OFST 8 +#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16 +#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_OFST 8 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_OFST 8 +#define MC_CMD_PHY_CAP_RS_FEC_LBN 18 +#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_OFST 8 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_OFST 8 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_OFST 8 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 +#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 +#define MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 +#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 +#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 +#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4 +/* enum: Xaui. */ +#define MC_CMD_MEDIA_XAUI 0x1 +/* enum: CX4. */ +#define MC_CMD_MEDIA_CX4 0x2 +/* enum: KX4. */ +#define MC_CMD_MEDIA_KX4 0x3 +/* enum: XFP Far. */ +#define MC_CMD_MEDIA_XFP 0x4 +/* enum: SFP+. */ +#define MC_CMD_MEDIA_SFP_PLUS 0x5 +/* enum: 10GBaseT. */ +#define MC_CMD_MEDIA_BASE_T 0x6 +/* enum: QSFP+. */ +#define MC_CMD_MEDIA_QSFP_PLUS 0x7 +#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 +#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4 +/* enum: Native clause 22 */ +#define MC_CMD_MMD_CLAUSE22 0x0 +#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */ +#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */ +#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */ +#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */ +#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */ +#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */ +#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */ +/* enum: Clause22 proxied over clause45 by PHY. */ +#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d +#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */ +#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */ +#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52 +#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20 + + +/***********************************/ +/* MC_CMD_START_BIST + * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST + * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held) + */ +#define MC_CMD_START_BIST 0x25 +#undef MC_CMD_0x25_PRIVILEGE_CTG + +#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_START_BIST_IN msgrequest */ +#define MC_CMD_START_BIST_IN_LEN 4 +/* Type of test. */ +#define MC_CMD_START_BIST_IN_TYPE_OFST 0 +#define MC_CMD_START_BIST_IN_TYPE_LEN 4 +/* enum: Run the PHY's short cable BIST. */ +#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 +/* enum: Run the PHY's long cable BIST. */ +#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 +/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */ +#define MC_CMD_BPX_SERDES_BIST 0x3 +/* enum: Run the MC loopback tests. */ +#define MC_CMD_MC_LOOPBACK_BIST 0x4 +/* enum: Run the PHY's standard BIST. */ +#define MC_CMD_PHY_BIST 0x5 +/* enum: Run MC RAM test. */ +#define MC_CMD_MC_MEM_BIST 0x6 +/* enum: Run Port RAM test. */ +#define MC_CMD_PORT_MEM_BIST 0x7 +/* enum: Run register test. */ +#define MC_CMD_REG_BIST 0x8 + +/* MC_CMD_START_BIST_OUT msgresponse */ +#define MC_CMD_START_BIST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_POLL_BIST + * Poll for BIST completion. Returns a single status code, and optionally some + * PHY specific bist output. The driver should only consume the BIST output + * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't + * successfully parse the BIST output, it should still respect the pass/Fail in + * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0, + * EACCES (if PHY_LOCK is not held). + */ +#define MC_CMD_POLL_BIST 0x26 +#undef MC_CMD_0x26_PRIVILEGE_CTG + +#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_POLL_BIST_IN msgrequest */ +#define MC_CMD_POLL_BIST_IN_LEN 0 + +/* MC_CMD_POLL_BIST_OUT msgresponse */ +#define MC_CMD_POLL_BIST_OUT_LEN 8 +/* result */ +#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 +#define MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 +/* enum: Running. */ +#define MC_CMD_POLL_BIST_RUNNING 0x1 +/* enum: Passed. */ +#define MC_CMD_POLL_BIST_PASSED 0x2 +/* enum: Failed. */ +#define MC_CMD_POLL_BIST_FAILED 0x3 +/* enum: Timed-out. */ +#define MC_CMD_POLL_BIST_TIMEOUT 0x4 +#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 +#define MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4 + +/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4 +/* Status of each channel A */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4 +/* enum: Ok. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 +/* enum: Open. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 +/* enum: Intra-pair short. */ +#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 +/* enum: Inter-pair short. */ +#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 +/* enum: Busy. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 +/* Status of each channel B */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4 +/* Enum values, see field(s): */ +/* CABLE_STATUS_A */ +/* Status of each channel C */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4 +/* Enum values, see field(s): */ +/* CABLE_STATUS_A */ +/* Status of each channel D */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4 +/* Enum values, see field(s): */ +/* CABLE_STATUS_A */ + +/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */ +#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4 +#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4 +/* enum: Complete. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 +/* enum: Bus switch off I2C write. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 +/* enum: Bus switch off I2C no access IO exp. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 +/* enum: Bus switch off I2C no access module. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 +/* enum: IO exp I2C configure. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 +/* enum: Bus switch I2C no cross talk. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 +/* enum: Module presence. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 +/* enum: Module ID I2C access. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 +/* enum: Module ID sane value. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 + +/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */ +#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4 +#define MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4 +/* enum: Test has completed. */ +#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0 +/* enum: RAM test - walk ones. */ +#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1 +/* enum: RAM test - walk zeros. */ +#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2 +/* enum: RAM test - walking inversions zeros/ones. */ +#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3 +/* enum: RAM test - walking inversions checkerboard. */ +#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4 +/* enum: Register test - set / clear individual bits. */ +#define MC_CMD_POLL_BIST_MEM_REG 0x5 +/* enum: ECC error detected. */ +#define MC_CMD_POLL_BIST_MEM_ECC 0x6 +/* Failure address, only valid if result is POLL_BIST_FAILED */ +#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8 +#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4 +/* Bus or address space to which the failure address corresponds */ +#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12 +#define MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4 +/* enum: MC MIPS bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0 +/* enum: CSR IREG bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1 +/* enum: RX0 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2 +/* enum: TX0 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3 +/* enum: TX1 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4 +/* enum: RX0 DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5 +/* enum: TX DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6 +/* enum: RX1 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX1 0x7 +/* enum: RX1 DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8 +/* Pattern written to RAM / register */ +#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16 +#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4 +/* Actual value read from RAM / register */ +#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20 +#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4 +/* ECC error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4 +/* ECC parity error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4 +/* ECC fatal error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4 + + +/***********************************/ +/* MC_CMD_FLUSH_RX_QUEUES + * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ + * flushes should be initiated via this MCDI operation, rather than via + * directly writing FLUSH_CMD. + * + * The flush is completed (either done/fail) asynchronously (after this command + * returns). The driver must still wait for flush done/failure events as usual. + */ +#define MC_CMD_FLUSH_RX_QUEUES 0x27 + +/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */ +#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4 +#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252 +#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020 +#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num)) +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4) +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255 + +/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */ +#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_LOOPBACK_MODES + * Returns a bitmask of loopback modes available at each speed. + */ +#define MC_CMD_GET_LOOPBACK_MODES 0x28 +#undef MC_CMD_0x28_PRIVILEGE_CTG + +#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */ +#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0 + +/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4 +/* enum: None. */ +#define MC_CMD_LOOPBACK_NONE 0x0 +/* enum: Data. */ +#define MC_CMD_LOOPBACK_DATA 0x1 +/* enum: GMAC. */ +#define MC_CMD_LOOPBACK_GMAC 0x2 +/* enum: XGMII. */ +#define MC_CMD_LOOPBACK_XGMII 0x3 +/* enum: XGXS. */ +#define MC_CMD_LOOPBACK_XGXS 0x4 +/* enum: XAUI. */ +#define MC_CMD_LOOPBACK_XAUI 0x5 +/* enum: GMII. */ +#define MC_CMD_LOOPBACK_GMII 0x6 +/* enum: SGMII. */ +#define MC_CMD_LOOPBACK_SGMII 0x7 +/* enum: XGBR. */ +#define MC_CMD_LOOPBACK_XGBR 0x8 +/* enum: XFI. */ +#define MC_CMD_LOOPBACK_XFI 0x9 +/* enum: XAUI Far. */ +#define MC_CMD_LOOPBACK_XAUI_FAR 0xa +/* enum: GMII Far. */ +#define MC_CMD_LOOPBACK_GMII_FAR 0xb +/* enum: SGMII Far. */ +#define MC_CMD_LOOPBACK_SGMII_FAR 0xc +/* enum: XFI Far. */ +#define MC_CMD_LOOPBACK_XFI_FAR 0xd +/* enum: GPhy. */ +#define MC_CMD_LOOPBACK_GPHY 0xe +/* enum: PhyXS. */ +#define MC_CMD_LOOPBACK_PHYXS 0xf +/* enum: PCS. */ +#define MC_CMD_LOOPBACK_PCS 0x10 +/* enum: PMA-PMD. */ +#define MC_CMD_LOOPBACK_PMAPMD 0x11 +/* enum: Cross-Port. */ +#define MC_CMD_LOOPBACK_XPORT 0x12 +/* enum: XGMII-Wireside. */ +#define MC_CMD_LOOPBACK_XGMII_WS 0x13 +/* enum: XAUI Wireside. */ +#define MC_CMD_LOOPBACK_XAUI_WS 0x14 +/* enum: XAUI Wireside Far. */ +#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 +/* enum: XAUI Wireside near. */ +#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 +/* enum: GMII Wireside. */ +#define MC_CMD_LOOPBACK_GMII_WS 0x17 +/* enum: XFI Wireside. */ +#define MC_CMD_LOOPBACK_XFI_WS 0x18 +/* enum: XFI Wireside Far. */ +#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 +/* enum: PhyXS Wireside. */ +#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a +/* enum: PMA lanes MAC-Serdes. */ +#define MC_CMD_LOOPBACK_PMA_INT 0x1b +/* enum: KR Serdes Parallel (Encoder). */ +#define MC_CMD_LOOPBACK_SD_NEAR 0x1c +/* enum: KR Serdes Serial. */ +#define MC_CMD_LOOPBACK_SD_FAR 0x1d +/* enum: PMA lanes MAC-Serdes Wireside. */ +#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e +/* enum: KR Serdes Parallel Wireside (Full PCS). */ +#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f +/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ +#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 +/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ +#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21 +/* enum: KR Serdes Serial Wireside. */ +#define MC_CMD_LOOPBACK_SD_FES_WS 0x22 +/* enum: Near side of AOE Siena side port */ +#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 +/* enum: Medford Wireside datapath loopback */ +#define MC_CMD_LOOPBACK_DATA_WS 0x24 +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36 +/* Enum values, see field(s): */ +/* 100M */ + +/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for + * newer NICs with 25G/50G/100G support + */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4 +/* enum: None. */ +/* MC_CMD_LOOPBACK_NONE 0x0 */ +/* enum: Data. */ +/* MC_CMD_LOOPBACK_DATA 0x1 */ +/* enum: GMAC. */ +/* MC_CMD_LOOPBACK_GMAC 0x2 */ +/* enum: XGMII. */ +/* MC_CMD_LOOPBACK_XGMII 0x3 */ +/* enum: XGXS. */ +/* MC_CMD_LOOPBACK_XGXS 0x4 */ +/* enum: XAUI. */ +/* MC_CMD_LOOPBACK_XAUI 0x5 */ +/* enum: GMII. */ +/* MC_CMD_LOOPBACK_GMII 0x6 */ +/* enum: SGMII. */ +/* MC_CMD_LOOPBACK_SGMII 0x7 */ +/* enum: XGBR. */ +/* MC_CMD_LOOPBACK_XGBR 0x8 */ +/* enum: XFI. */ +/* MC_CMD_LOOPBACK_XFI 0x9 */ +/* enum: XAUI Far. */ +/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */ +/* enum: GMII Far. */ +/* MC_CMD_LOOPBACK_GMII_FAR 0xb */ +/* enum: SGMII Far. */ +/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */ +/* enum: XFI Far. */ +/* MC_CMD_LOOPBACK_XFI_FAR 0xd */ +/* enum: GPhy. */ +/* MC_CMD_LOOPBACK_GPHY 0xe */ +/* enum: PhyXS. */ +/* MC_CMD_LOOPBACK_PHYXS 0xf */ +/* enum: PCS. */ +/* MC_CMD_LOOPBACK_PCS 0x10 */ +/* enum: PMA-PMD. */ +/* MC_CMD_LOOPBACK_PMAPMD 0x11 */ +/* enum: Cross-Port. */ +/* MC_CMD_LOOPBACK_XPORT 0x12 */ +/* enum: XGMII-Wireside. */ +/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */ +/* enum: XAUI Wireside. */ +/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */ +/* enum: XAUI Wireside Far. */ +/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */ +/* enum: XAUI Wireside near. */ +/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */ +/* enum: GMII Wireside. */ +/* MC_CMD_LOOPBACK_GMII_WS 0x17 */ +/* enum: XFI Wireside. */ +/* MC_CMD_LOOPBACK_XFI_WS 0x18 */ +/* enum: XFI Wireside Far. */ +/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */ +/* enum: PhyXS Wireside. */ +/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */ +/* enum: PMA lanes MAC-Serdes. */ +/* MC_CMD_LOOPBACK_PMA_INT 0x1b */ +/* enum: KR Serdes Parallel (Encoder). */ +/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */ +/* enum: KR Serdes Serial. */ +/* MC_CMD_LOOPBACK_SD_FAR 0x1d */ +/* enum: PMA lanes MAC-Serdes Wireside. */ +/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */ +/* enum: KR Serdes Parallel Wireside (Full PCS). */ +/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */ +/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ +/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */ +/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ +/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */ +/* enum: KR Serdes Serial Wireside. */ +/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */ +/* enum: Near side of AOE Siena side port */ +/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */ +/* enum: Medford Wireside datapath loopback */ +/* MC_CMD_LOOPBACK_DATA_WS 0x24 */ +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 25G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 50 loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 100G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60 +/* Enum values, see field(s): */ +/* 100M */ + +/* AN_TYPE structuredef: Auto-negotiation types defined in IEEE802.3 */ +#define AN_TYPE_LEN 4 +#define AN_TYPE_TYPE_OFST 0 +#define AN_TYPE_TYPE_LEN 4 +/* enum: None, AN disabled or not supported */ +#define MC_CMD_AN_NONE 0x0 +/* enum: Clause 28 - BASE-T */ +#define MC_CMD_AN_CLAUSE28 0x1 +/* enum: Clause 37 - BASE-X */ +#define MC_CMD_AN_CLAUSE37 0x2 +/* enum: Clause 73 - BASE-R startup protocol for backplane and copper cable + * assemblies. Includes Clause 72/Clause 92 link-training. + */ +#define MC_CMD_AN_CLAUSE73 0x3 +#define AN_TYPE_TYPE_LBN 0 +#define AN_TYPE_TYPE_WIDTH 32 + +/* FEC_TYPE structuredef: Forward error correction types defined in IEEE802.3 + */ +#define FEC_TYPE_LEN 4 +#define FEC_TYPE_TYPE_OFST 0 +#define FEC_TYPE_TYPE_LEN 4 +/* enum: No FEC */ +#define MC_CMD_FEC_NONE 0x0 +/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */ +#define MC_CMD_FEC_BASER 0x1 +/* enum: Clause 91/Clause 108 Reed-Solomon FEC */ +#define MC_CMD_FEC_RS 0x2 +#define FEC_TYPE_TYPE_LBN 0 +#define FEC_TYPE_TYPE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_GET_LINK + * Read the unified MAC/PHY link state. Locks required: None Return code: 0, + * ETIME. + */ +#define MC_CMD_GET_LINK 0x29 +#undef MC_CMD_0x29_PRIVILEGE_CTG + +#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LINK_IN msgrequest */ +#define MC_CMD_GET_LINK_IN_LEN 0 + +/* MC_CMD_GET_LINK_OUT msgresponse */ +#define MC_CMD_GET_LINK_OUT_LEN 28 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_CAP_OFST 0 +#define MC_CMD_GET_LINK_OUT_CAP_LEN 4 +/* Link-partner advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4 +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. + */ +#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4 +/* Current loopback setting. */ +#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_LINK_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0 +#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_OFST 16 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_OFST 16 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_OFST 16 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_OFST 16 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_OFST 16 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_OFST 16 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_LBN 8 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_LBN 9 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_WIDTH 1 +/* This returns the negotiated flow control value. */ +#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 +#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 +#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 + +/* MC_CMD_GET_LINK_OUT_V2 msgresponse: Extended link state information */ +#define MC_CMD_GET_LINK_OUT_V2_LEN 44 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_CAP_OFST 0 +#define MC_CMD_GET_LINK_OUT_V2_CAP_LEN 4 +/* Link-partner advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_LEN 4 +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. + */ +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_LEN 4 +/* Current loopback setting. */ +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_LBN 0 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_LBN 1 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_LBN 2 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_LBN 3 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_LBN 6 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_LBN 7 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_LBN 8 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_LBN 9 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_WIDTH 1 +/* This returns the negotiated flow control value. */ +#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20 +#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24 +#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_LEN 4 +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24 */ +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 */ +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 */ +/* True local device capabilities (taking into account currently used PMD/MDI, + * e.g. plugged-in module). In general, subset of + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP, but may include extra _FEC_REQUEST + * bits, if the PMD requires FEC. 0 if unknown (e.g. module unplugged). Equal + * to SUPPORTED_CAP for non-pluggable PMDs. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_OFST 28 +#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_LEN 4 +/* Auto-negotiation type used on the link */ +#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_OFST 32 +#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* AN_TYPE/TYPE */ +/* Forward error correction used on the link */ +#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_OFST 36 +#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* FEC_TYPE/TYPE */ +#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_LBN 0 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_LBN 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_LBN 2 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_LBN 3 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_LBN 4 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_LBN 5 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_LBN 6 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_LBN 7 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_LBN 8 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_LBN 9 +#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_WIDTH 1 + + +/***********************************/ +/* MC_CMD_SET_LINK + * Write the unified MAC/PHY link configuration. Locks required: None. Return + * code: 0, EINVAL, ETIME, EAGAIN + */ +#define MC_CMD_SET_LINK 0x2a +#undef MC_CMD_0x2a_PRIVILEGE_CTG + +#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_SET_LINK_IN msgrequest */ +#define MC_CMD_SET_LINK_IN_LEN 16 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_SET_LINK_IN_CAP_OFST 0 +#define MC_CMD_SET_LINK_IN_CAP_LEN 4 +/* Flags */ +#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 +#define MC_CMD_SET_LINK_IN_FLAGS_LEN 4 +#define MC_CMD_SET_LINK_IN_LOWPOWER_OFST 4 +#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0 +#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1 +#define MC_CMD_SET_LINK_IN_POWEROFF_OFST 4 +#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1 +#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1 +#define MC_CMD_SET_LINK_IN_TXDIS_OFST 4 +#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2 +#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1 +#define MC_CMD_SET_LINK_IN_LINKDOWN_OFST 4 +#define MC_CMD_SET_LINK_IN_LINKDOWN_LBN 3 +#define MC_CMD_SET_LINK_IN_LINKDOWN_WIDTH 1 +/* Loopback mode. */ +#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 +#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +/* A loopback speed of "0" is supported, and means (choose any available + * speed). + */ +#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 +#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4 + +/* MC_CMD_SET_LINK_IN_V2 msgrequest: Updated SET_LINK to include sequence + * number to ensure this SET_LINK command corresponds to the latest + * MODULECHANGE event. + */ +#define MC_CMD_SET_LINK_IN_V2_LEN 17 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_SET_LINK_IN_V2_CAP_OFST 0 +#define MC_CMD_SET_LINK_IN_V2_CAP_LEN 4 +/* Flags */ +#define MC_CMD_SET_LINK_IN_V2_FLAGS_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_FLAGS_LEN 4 +#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_LBN 0 +#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V2_POWEROFF_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_POWEROFF_LBN 1 +#define MC_CMD_SET_LINK_IN_V2_POWEROFF_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V2_TXDIS_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_TXDIS_LBN 2 +#define MC_CMD_SET_LINK_IN_V2_TXDIS_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_LBN 3 +#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_WIDTH 1 +/* Loopback mode. */ +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_OFST 8 +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +/* A loopback speed of "0" is supported, and means (choose any available + * speed). + */ +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_OFST 12 +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_LEN 4 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_OFST 16 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_LEN 1 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_OFST 16 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_LBN 0 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_WIDTH 7 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_OFST 16 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1 + +/* MC_CMD_SET_LINK_OUT msgresponse */ +#define MC_CMD_SET_LINK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_ID_LED + * Set identification LED state. Locks required: None. Return code: 0, EINVAL + */ +#define MC_CMD_SET_ID_LED 0x2b +#undef MC_CMD_0x2b_PRIVILEGE_CTG + +#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_SET_ID_LED_IN msgrequest */ +#define MC_CMD_SET_ID_LED_IN_LEN 4 +/* Set LED state. */ +#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 +#define MC_CMD_SET_ID_LED_IN_STATE_LEN 4 +#define MC_CMD_LED_OFF 0x0 /* enum */ +#define MC_CMD_LED_ON 0x1 /* enum */ +#define MC_CMD_LED_DEFAULT 0x2 /* enum */ + +/* MC_CMD_SET_ID_LED_OUT msgresponse */ +#define MC_CMD_SET_ID_LED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_MAC + * Set MAC configuration. Locks required: None. Return code: 0, EINVAL + */ +#define MC_CMD_SET_MAC 0x2c +#undef MC_CMD_0x2c_PRIVILEGE_CTG + +#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_MAC_IN msgrequest */ +#define MC_CMD_SET_MAC_IN_LEN 28 +/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of + * EtherII, VLAN, bug16011 padding). + */ +#define MC_CMD_SET_MAC_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_IN_MTU_LEN 4 +#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_IN_DRAIN_LEN 4 +#define MC_CMD_SET_MAC_IN_ADDR_OFST 8 +#define MC_CMD_SET_MAC_IN_ADDR_LEN 8 +#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8 +#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12 +#define MC_CMD_SET_MAC_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_IN_REJECT_LEN 4 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_OFST 16 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_OFST 16 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 +#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 +#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4 +/* enum: Flow control is off. */ +#define MC_CMD_FCNTL_OFF 0x0 +/* enum: Respond to flow control. */ +#define MC_CMD_FCNTL_RESPOND 0x1 +/* enum: Respond to and Issue flow control. */ +#define MC_CMD_FCNTL_BIDIR 0x2 +/* enum: Auto neg flow control. */ +#define MC_CMD_FCNTL_AUTO 0x3 +/* enum: Priority flow control (eftest builds only). */ +#define MC_CMD_FCNTL_QBB 0x4 +/* enum: Issue flow control. */ +#define MC_CMD_FCNTL_GENERATE 0x5 +#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_OFST 24 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 + +/* MC_CMD_SET_MAC_EXT_IN msgrequest */ +#define MC_CMD_SET_MAC_EXT_IN_LEN 32 +/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of + * EtherII, VLAN, bug16011 padding). + */ +#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20 +#define MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4 +/* enum: Flow control is off. */ +/* MC_CMD_FCNTL_OFF 0x0 */ +/* enum: Respond to flow control. */ +/* MC_CMD_FCNTL_RESPOND 0x1 */ +/* enum: Respond to and Issue flow control. */ +/* MC_CMD_FCNTL_BIDIR 0x2 */ +/* enum: Auto neg flow control. */ +/* MC_CMD_FCNTL_AUTO 0x3 */ +/* enum: Priority flow control (eftest builds only). */ +/* MC_CMD_FCNTL_QBB 0x4 */ +/* enum: Issue flow control. */ +/* MC_CMD_FCNTL_GENERATE 0x5 */ +#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_OFST 24 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1 +/* Select which parameters to configure. A parameter will only be modified if + * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in + * capabilities then this field is ignored (and all flags are assumed to be + * set). + */ +#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1 + +/* MC_CMD_SET_MAC_OUT msgresponse */ +#define MC_CMD_SET_MAC_OUT_LEN 0 + +/* MC_CMD_SET_MAC_V2_OUT msgresponse */ +#define MC_CMD_SET_MAC_V2_OUT_LEN 4 +/* MTU as configured after processing the request. See comment at + * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL + * to 0. + */ +#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0 +#define MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4 + + +/***********************************/ +/* MC_CMD_PHY_STATS + * Get generic PHY statistics. This call returns the statistics for a generic + * PHY in a sparse array (indexed by the enumerate). Each value is represented + * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the + * statistics may be read from the message response. If DMA_ADDR != 0, then the + * statistics are dmad to that (page-aligned location). Locks required: None. + * Returns: 0, ETIME + */ +#define MC_CMD_PHY_STATS 0x2d +#undef MC_CMD_0x2d_PRIVILEGE_CTG + +#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_PHY_STATS_IN msgrequest */ +#define MC_CMD_PHY_STATS_IN_LEN 8 +/* ??? */ +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4 + +/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */ +#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0 + +/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */ +#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3) +#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4 +#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS +/* enum: OUI. */ +#define MC_CMD_OUI 0x0 +/* enum: PMA-PMD Link Up. */ +#define MC_CMD_PMA_PMD_LINK_UP 0x1 +/* enum: PMA-PMD RX Fault. */ +#define MC_CMD_PMA_PMD_RX_FAULT 0x2 +/* enum: PMA-PMD TX Fault. */ +#define MC_CMD_PMA_PMD_TX_FAULT 0x3 +/* enum: PMA-PMD Signal */ +#define MC_CMD_PMA_PMD_SIGNAL 0x4 +/* enum: PMA-PMD SNR A. */ +#define MC_CMD_PMA_PMD_SNR_A 0x5 +/* enum: PMA-PMD SNR B. */ +#define MC_CMD_PMA_PMD_SNR_B 0x6 +/* enum: PMA-PMD SNR C. */ +#define MC_CMD_PMA_PMD_SNR_C 0x7 +/* enum: PMA-PMD SNR D. */ +#define MC_CMD_PMA_PMD_SNR_D 0x8 +/* enum: PCS Link Up. */ +#define MC_CMD_PCS_LINK_UP 0x9 +/* enum: PCS RX Fault. */ +#define MC_CMD_PCS_RX_FAULT 0xa +/* enum: PCS TX Fault. */ +#define MC_CMD_PCS_TX_FAULT 0xb +/* enum: PCS BER. */ +#define MC_CMD_PCS_BER 0xc +/* enum: PCS Block Errors. */ +#define MC_CMD_PCS_BLOCK_ERRORS 0xd +/* enum: PhyXS Link Up. */ +#define MC_CMD_PHYXS_LINK_UP 0xe +/* enum: PhyXS RX Fault. */ +#define MC_CMD_PHYXS_RX_FAULT 0xf +/* enum: PhyXS TX Fault. */ +#define MC_CMD_PHYXS_TX_FAULT 0x10 +/* enum: PhyXS Align. */ +#define MC_CMD_PHYXS_ALIGN 0x11 +/* enum: PhyXS Sync. */ +#define MC_CMD_PHYXS_SYNC 0x12 +/* enum: AN link-up. */ +#define MC_CMD_AN_LINK_UP 0x13 +/* enum: AN Complete. */ +#define MC_CMD_AN_COMPLETE 0x14 +/* enum: AN 10GBaseT Status. */ +#define MC_CMD_AN_10GBT_STATUS 0x15 +/* enum: Clause 22 Link-Up. */ +#define MC_CMD_CL22_LINK_UP 0x16 +/* enum: (Last entry) */ +#define MC_CMD_PHY_NSTATS 0x17 + + +/***********************************/ +/* MC_CMD_MAC_STATS + * Get generic MAC statistics. This call returns unified statistics maintained + * by the MC as it switches between the GMAC and XMAC. The MC will write out + * all supported stats. The driver should zero initialise the buffer to + * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is + * performed, and the statistics may be read from the message response. If + * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location). + * Locks required: None. The PERIODIC_CLEAR option is not used and now has no + * effect. Returns: 0, ETIME + */ +#define MC_CMD_MAC_STATS 0x2e +#undef MC_CMD_0x2e_PRIVILEGE_CTG + +#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_MAC_STATS_IN msgrequest */ +#define MC_CMD_MAC_STATS_IN_LEN 20 +/* ??? */ +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4 +#define MC_CMD_MAC_STATS_IN_CMD_OFST 8 +#define MC_CMD_MAC_STATS_IN_CMD_LEN 4 +#define MC_CMD_MAC_STATS_IN_DMA_OFST 8 +#define MC_CMD_MAC_STATS_IN_DMA_LBN 0 +#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_CLEAR_OFST 8 +#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1 +#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_OFST 8 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_OFST 8 +#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3 +#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_OFST 8 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_OFST 8 +#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5 +#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIOD_MS_OFST 8 +#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16 +#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16 +/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as + * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not + * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to + * MC_CMD_MAC_NSTATS * sizeof(uint64_t) + */ +#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 +#define MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4 +/* port id so vadapter stats can be provided */ +#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16 +#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4 + +/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3) +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS +#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ +#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */ +#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ +#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */ +#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */ +#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */ +#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */ +#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */ +#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */ +#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */ +#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */ +#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */ +#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */ +#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */ +#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */ +#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */ +#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */ +#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */ +#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */ +#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */ +#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */ +#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */ +#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */ +#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */ +#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */ +#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */ +#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */ +#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */ +#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */ +#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */ +#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */ +#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */ +#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */ +#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */ +#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */ +#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */ +#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */ +#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */ +#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */ +#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */ +#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */ +#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */ +#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */ +#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */ +#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */ +#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */ +#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */ +#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */ +#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */ +#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */ +#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */ +#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */ +#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */ +#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */ +#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */ +#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */ +#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ +#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ +#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ +/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c +/* enum: PM discard_bb_overflow counter. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d +/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e +/* enum: PM discard_vfifo_full counter. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f +/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_QBB 0x40 +/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_QBB 0x41 +/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42 +/* enum: RXDP counter: Number of packets dropped due to the queue being + * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43 +/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10 + * with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45 +/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 +/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed. + * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 +/* enum: RXDP counter: Number of times the DPCPU waited for an existing + * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48 +#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */ +/* enum: Start of GMAC stats buffer space, for Siena only. */ +#define MC_CMD_GMAC_DMABUF_START 0x40 +/* enum: End of GMAC stats buffer space, for Siena only. */ +#define MC_CMD_GMAC_DMABUF_END 0x5f +/* enum: GENERATION_END value, used together with GENERATION_START to verify + * consistency of DMAd data. For legacy firmware / drivers without extended + * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS * + * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise, + * this value is invalid/ reserved and GENERATION_END is written as the last + * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that + * this is consistent with the legacy behaviour, in the sense that entry 96 is + * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS * + * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details. + */ +#define MC_CMD_MAC_GENERATION_END 0x60 +#define MC_CMD_MAC_NSTATS 0x61 /* enum */ + +/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3) +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2 +/* enum: Start of FEC stats buffer space, Medford2 and up */ +#define MC_CMD_MAC_FEC_DMABUF_START 0x61 +/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2) + */ +#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61 +/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2) + */ +#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62 +/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63 +/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64 +/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65 +/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66 +/* enum: This includes the space at offset 103 which is the final + * GENERATION_END in a MAC_STATS_V2 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V2 0x68 +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */ + +/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3) +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3 +/* enum: Start of CTPIO stats buffer space, Medford2 and up */ +#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68 +/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the + * target VI + */ +#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68 +/* enum: Number of times a CTPIO send wrote beyond frame end (informational + * only) + */ +#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69 +/* enum: Number of CTPIO failures because the TX doorbell was written before + * the end of the frame data + */ +#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a +/* enum: Number of CTPIO failures because the internal FIFO overflowed */ +#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b +/* enum: Number of CTPIO failures because the host did not deliver data fast + * enough to avoid MAC underflow + */ +#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c +/* enum: Number of CTPIO failures because the host did not deliver all the + * frame data within the timeout + */ +#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d +/* enum: Number of CTPIO failures because the frame data arrived out of order + * or with gaps + */ +#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e +/* enum: Number of CTPIO failures because the host started a new frame before + * completing the previous one + */ +#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f +/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits + * or not 32-bit aligned + */ +#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70 +/* enum: Number of CTPIO fallbacks because another VI on the same port was + * sending a CTPIO frame + */ +#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71 +/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled + */ +#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72 +/* enum: Number of CTPIO fallbacks because length in header was less than 29 + * bytes + */ +#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73 +/* enum: Total number of successful CTPIO sends on this port */ +#define MC_CMD_MAC_CTPIO_SUCCESS 0x74 +/* enum: Total number of CTPIO fallbacks on this port */ +#define MC_CMD_MAC_CTPIO_FALLBACK 0x75 +/* enum: Total number of CTPIO poisoned frames on this port, whether erased or + * not + */ +#define MC_CMD_MAC_CTPIO_POISON 0x76 +/* enum: Total number of CTPIO erased frames on this port */ +#define MC_CMD_MAC_CTPIO_ERASE 0x77 +/* enum: This includes the space at offset 120 which is the final + * GENERATION_END in a MAC_STATS_V3 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V3 0x79 +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */ + +/* MC_CMD_MAC_STATS_V4_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V4_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V4*64))>>3) +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4 +/* enum: Start of V4 stats buffer space */ +#define MC_CMD_MAC_V4_DMABUF_START 0x79 +/* enum: RXDP counter: Number of packets truncated because scattering was + * disabled. + */ +#define MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC 0x79 +/* enum: RXDP counter: Number of times the RXDP head of line blocked waiting + * for descriptors. Will be zero unless RXDP_HLB_IDLE capability is set. + */ +#define MC_CMD_MAC_RXDP_HLB_IDLE 0x7a +/* enum: RXDP counter: Number of times the RXDP timed out while head of line + * blocking. Will be zero unless RXDP_HLB_IDLE capability is set. + */ +#define MC_CMD_MAC_RXDP_HLB_TIMEOUT 0x7b +/* enum: This includes the space at offset 124 which is the final + * GENERATION_END in a MAC_STATS_V4 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V4 0x7d +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */ + + +/***********************************/ +/* MC_CMD_SRIOV + * to be documented + */ +#define MC_CMD_SRIOV 0x30 + +/* MC_CMD_SRIOV_IN msgrequest */ +#define MC_CMD_SRIOV_IN_LEN 12 +#define MC_CMD_SRIOV_IN_ENABLE_OFST 0 +#define MC_CMD_SRIOV_IN_ENABLE_LEN 4 +#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4 +#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4 +#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8 +#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4 + +/* MC_CMD_SRIOV_OUT msgresponse */ +#define MC_CMD_SRIOV_OUT_LEN 8 +#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0 +#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4 +#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4 +#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4 + +/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */ +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32 +/* this is only used for the first record */ +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */ +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32 + + +/***********************************/ +/* MC_CMD_MEMCPY + * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data + * embedded directly in the command. + * + * A common pattern is for a client to use generation counts to signal a dma + * update of a datastructure. To facilitate this, this MCDI operation can + * contain multiple requests which are executed in strict order. Requests take + * the form of duplicating the entire MCDI request continuously (including the + * requests record, which is ignored in all but the first structure) + * + * The source data can either come from a DMA from the host, or it can be + * embedded within the request directly, thereby eliminating a DMA read. To + * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and + * ADDR_LO=offset, and inserts the data at %offset from the start of the + * payload. It's the callers responsibility to ensure that the embedded data + * doesn't overlap the records. + * + * Returns: 0, EINVAL (invalid RID) + */ +#define MC_CMD_MEMCPY 0x31 + +/* MC_CMD_MEMCPY_IN msgrequest */ +#define MC_CMD_MEMCPY_IN_LENMIN 32 +#define MC_CMD_MEMCPY_IN_LENMAX 224 +#define MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992 +#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num)) +#define MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32) +/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */ +#define MC_CMD_MEMCPY_IN_RECORD_OFST 0 +#define MC_CMD_MEMCPY_IN_RECORD_LEN 32 +#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1 +#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7 +#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31 + +/* MC_CMD_MEMCPY_OUT msgresponse */ +#define MC_CMD_MEMCPY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_SET + * Set a WoL filter. + */ +#define MC_CMD_WOL_FILTER_SET 0x32 +#undef MC_CMD_0x32_PRIVILEGE_CTG + +#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_SET_IN msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_LEN 192 +#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 +#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 +#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */ +#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */ +/* A type value of 1 is unused. */ +#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 +#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 +/* enum: Magic */ +#define MC_CMD_WOL_TYPE_MAGIC 0x0 +/* enum: MS Windows Magic */ +#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 +/* enum: IPv4 Syn */ +#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 +/* enum: IPv6 Syn */ +#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 +/* enum: Bitmap */ +#define MC_CMD_WOL_TYPE_BITMAP 0x5 +/* enum: Link */ +#define MC_CMD_WOL_TYPE_LINK 0x6 +/* enum: (Above this for future use) */ +#define MC_CMD_WOL_TYPE_MAX 0x7 +#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46 + +/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12 + +/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2 + +/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2 + +/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1 + +/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1 + +/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4 +#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_REMOVE + * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS + */ +#define MC_CMD_WOL_FILTER_REMOVE 0x33 +#undef MC_CMD_0x33_PRIVILEGE_CTG + +#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */ +#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4 +#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4 + +/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_RESET + * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0, + * ENOSYS + */ +#define MC_CMD_WOL_FILTER_RESET 0x34 +#undef MC_CMD_0x34_PRIVILEGE_CTG + +#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */ +#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4 +#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0 +#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4 +#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */ +#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */ + +/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_MCAST_HASH + * Set the MCAST hash value without otherwise reconfiguring the MAC + */ +#define MC_CMD_SET_MCAST_HASH 0x35 + +/* MC_CMD_SET_MCAST_HASH_IN msgrequest */ +#define MC_CMD_SET_MCAST_HASH_IN_LEN 32 +#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0 +#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16 +#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16 +#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16 + +/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */ +#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_TYPES + * Return bitfield indicating available types of virtual NVRAM partitions. + * Locks required: none. Returns: 0 + */ +#define MC_CMD_NVRAM_TYPES 0x36 +#undef MC_CMD_0x36_PRIVILEGE_CTG + +#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_TYPES_IN msgrequest */ +#define MC_CMD_NVRAM_TYPES_IN_LEN 0 + +/* MC_CMD_NVRAM_TYPES_OUT msgresponse */ +#define MC_CMD_NVRAM_TYPES_OUT_LEN 4 +/* Bit mask of supported types. */ +#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 +#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4 +/* enum: Disabled callisto. */ +#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 +/* enum: MC firmware. */ +#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 +/* enum: MC backup firmware. */ +#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 +/* enum: Static configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 +/* enum: Static configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 +/* enum: Dynamic configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 +/* enum: Dynamic configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 +/* enum: Expansion Rom. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 +/* enum: Expansion Rom Configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 +/* enum: Expansion Rom Configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 +/* enum: Phy Configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa +/* enum: Phy Configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb +/* enum: Log. */ +#define MC_CMD_NVRAM_TYPE_LOG 0xc +/* enum: FPGA image. */ +#define MC_CMD_NVRAM_TYPE_FPGA 0xd +/* enum: FPGA backup image */ +#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe +/* enum: FC firmware. */ +#define MC_CMD_NVRAM_TYPE_FC_FW 0xf +/* enum: FC backup firmware. */ +#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10 +/* enum: CPLD image. */ +#define MC_CMD_NVRAM_TYPE_CPLD 0x11 +/* enum: Licensing information. */ +#define MC_CMD_NVRAM_TYPE_LICENSE 0x12 +/* enum: FC Log. */ +#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13 +/* enum: Additional flash on FPGA. */ +#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14 + + +/***********************************/ +/* MC_CMD_NVRAM_INFO + * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0, + * EINVAL (bad type). + */ +#define MC_CMD_NVRAM_INFO 0x37 +#undef MC_CMD_0x37_PRIVILEGE_CTG + +#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_INFO_IN msgrequest */ +#define MC_CMD_NVRAM_INFO_IN_LEN 4 +#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ + +/* MC_CMD_NVRAM_INFO_OUT msgresponse */ +#define MC_CMD_NVRAM_INFO_OUT_LEN 24 +#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0 +#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_TLV_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1 +#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_CRC_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_CRC_LBN 3 +#define MC_CMD_NVRAM_INFO_OUT_CRC_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_A_B_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7 +#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 +#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4 + +/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */ +#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28 +#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4 +/* Writes must be multiples of this size. Added to support the MUM on Sorrento. + */ +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24 +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4 + + +/***********************************/ +/* MC_CMD_NVRAM_UPDATE_START + * Start a group of update operations on a virtual NVRAM partition. Locks + * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if + * PHY_LOCK required and not held). In an adapter bound to a TSA controller, + * MC_CMD_NVRAM_UPDATE_START can only be used on a subset of partition types + * i.e. static config, dynamic config and expansion ROM config. Attempting to + * perform this operation on a restricted partition will return the error + * EPERM. + */ +#define MC_CMD_NVRAM_UPDATE_START 0x38 +#undef MC_CMD_0x38_PRIVILEGE_CTG + +#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest: Legacy NVRAM_UPDATE_START request. + * Use NVRAM_UPDATE_START_V2_IN in new code + */ +#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4 +#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ + +/* MC_CMD_NVRAM_UPDATE_START_V2_IN msgrequest: Extended NVRAM_UPDATE_START + * request with additional flags indicating version of command in use. See + * MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended functionality. Use + * paired up with NVRAM_UPDATE_FINISH_V2_IN. + */ +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 + +/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */ +#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_READ + * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_READ 0x39 +#undef MC_CMD_0x39_PRIVILEGE_CTG + +#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_READ_IN msgrequest */ +#define MC_CMD_NVRAM_READ_IN_LEN 12 +#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_READ_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4 +/* amount to read in bytes */ +#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4 + +/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */ +#define MC_CMD_NVRAM_READ_IN_V2_LEN 16 +#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0 +#define MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4 +#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4 +/* amount to read in bytes */ +#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8 +#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4 +/* Optional control info. If a partition is stored with an A/B versioning + * scheme (i.e. in more than one physical partition in NVRAM) the host can set + * this to control which underlying physical partition is used to read data + * from. This allows it to perform a read-modify-write-verify with the write + * lock continuously held by calling NVRAM_UPDATE_START, reading the old + * contents using MODE=TARGET_CURRENT, overwriting the old partition and then + * verifying by reading with MODE=TARGET_BACKUP. + */ +#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12 +#define MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4 +/* enum: Same as omitting MODE: caller sees data in current partition unless it + * holds the write lock in which case it sees data in the partition it is + * updating. + */ +#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0 +/* enum: Read from the current partition of an A/B pair, even if holding the + * write lock. + */ +#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1 +/* enum: Read from the non-current (i.e. to be updated) partition of an A/B + * pair + */ +#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2 + +/* MC_CMD_NVRAM_READ_OUT msgresponse */ +#define MC_CMD_NVRAM_READ_OUT_LENMIN 1 +#define MC_CMD_NVRAM_READ_OUT_LENMAX 252 +#define MC_CMD_NVRAM_READ_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num)) +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_NUM(len) (((len)-0)/1) +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM_MCDI2 1020 + + +/***********************************/ +/* MC_CMD_NVRAM_WRITE + * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_WRITE 0x3a +#undef MC_CMD_0x3a_PRIVILEGE_CTG + +#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_WRITE_IN msgrequest */ +#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13 +#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252 +#define MC_CMD_NVRAM_WRITE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num)) +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_NUM(len) (((len)-12)/1) +#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4 +#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2 1008 + +/* MC_CMD_NVRAM_WRITE_OUT msgresponse */ +#define MC_CMD_NVRAM_WRITE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_ERASE + * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_ERASE 0x3b +#undef MC_CMD_0x3b_PRIVILEGE_CTG + +#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_ERASE_IN msgrequest */ +#define MC_CMD_NVRAM_ERASE_IN_LEN 12 +#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4 +#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4 + +/* MC_CMD_NVRAM_ERASE_OUT msgresponse */ +#define MC_CMD_NVRAM_ERASE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_UPDATE_FINISH + * Finish a group of update operations on a virtual NVRAM partition. Locks + * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type/offset/ + * length), EACCES (if PHY_LOCK required and not held). In an adapter bound to + * a TSA controller, MC_CMD_NVRAM_UPDATE_FINISH can only be used on a subset of + * partition types i.e. static config, dynamic config and expansion ROM config. + * Attempting to perform this operation on a restricted partition will return + * the error EPERM. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c +#undef MC_CMD_0x3c_PRIVILEGE_CTG + +#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest: Legacy NVRAM_UPDATE_FINISH + * request. Use NVRAM_UPDATE_FINISH_V2_IN in new code + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4 + +/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH + * request with additional flags indicating version of NVRAM_UPDATE commands in + * use. See MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended + * functionality. Use paired up with NVRAM_UPDATE_START_V2_IN. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_LBN 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_WIDTH 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_LBN 2 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_WIDTH 1 + +/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH + * response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0 + +/* MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT msgresponse: + * + * Extended NVRAM_UPDATE_FINISH response that communicates the result of secure + * firmware validation where applicable back to the host. + * + * Medford only: For signed firmware images, such as those for medford, the MC + * firmware verifies the signature before marking the firmware image as valid. + * This process takes a few seconds to complete. So is likely to take more than + * the MCDI timeout. Hence signature verification is initiated when + * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the + * MCDI command is run in a background MCDI processing thread. This response + * payload includes the results of the signature verification. Note that the + * per-partition nvram lock in firmware is only released after the verification + * has completed. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4 +/* Result of nvram update completion processing. Result codes that indicate an + * internal build failure and therefore not expected to be seen by customers in + * the field are marked with a prefix 'Internal-error'. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4 +/* enum: Invalid return code; only non-zero values are defined. Defined as + * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT. + */ +#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0 +/* enum: Verify succeeded without any errors. */ +#define MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1 +/* enum: CMS format verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2 +/* enum: Invalid CMS format in image metadata. */ +#define MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3 +/* enum: Message digest verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4 +/* enum: Error in message digest calculated over the reflash-header, payload + * and reflash-trailer. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5 +/* enum: Signature verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6 +/* enum: There are no valid signatures in the image. */ +#define MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7 +/* enum: Trusted approvers verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8 +/* enum: The Trusted approver's list is empty. */ +#define MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9 +/* enum: Signature chain verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa +/* enum: The signers of the signatures in the image are not listed in the + * Trusted approver's list. + */ +#define MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb +/* enum: The image contains a test-signed certificate, but the adapter accepts + * only production signed images. + */ +#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc +/* enum: The image has a lower security level than the current firmware. */ +#define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd +/* enum: Internal-error. The signed image is missing the 'contents' section, + * where the 'contents' section holds the actual image payload to be applied. + */ +#define MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe +/* enum: Internal-error. The bundle header is invalid. */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf +/* enum: Internal-error. The bundle does not have a valid reflash image layout. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10 +/* enum: Internal-error. The bundle has an inconsistent layout of components or + * incorrect checksum. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11 +/* enum: Internal-error. The bundle manifest is inconsistent with components in + * the bundle. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12 +/* enum: Internal-error. The number of components in a bundle do not match the + * number of components advertised by the bundle manifest. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13 +/* enum: Internal-error. The bundle contains too many components for the MC + * firmware to process + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14 +/* enum: Internal-error. The bundle manifest has an invalid/inconsistent + * component. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15 +/* enum: Internal-error. The hash of a component does not match the hash stored + * in the bundle manifest. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16 +/* enum: Internal-error. Component hash calculation failed. */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17 +/* enum: Internal-error. The component does not have a valid reflash image + * layout. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18 +/* enum: The bundle processing code failed to copy a component to its target + * partition. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19 +/* enum: The update operation is in-progress. */ +#define MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a + + +/***********************************/ +/* MC_CMD_REBOOT + * Reboot the MC. + * + * The AFTER_ASSERTION flag is intended to be used when the driver notices an + * assertion failure (at which point it is expected to perform a complete tear + * down and reinitialise), to allow both ports to reset the MC once in an + * atomic fashion. + * + * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1, + * which means that they will automatically reboot out of the assertion + * handler, so this is in practise an optional operation. It is still + * recommended that drivers execute this to support custom firmwares with + * REBOOT_ON_ASSERT=0. + * + * Locks required: NONE Returns: Nothing. You get back a response with ERR=1, + * DATALEN=0 + */ +#define MC_CMD_REBOOT 0x3d +#undef MC_CMD_0x3d_PRIVILEGE_CTG + +#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_REBOOT_IN msgrequest */ +#define MC_CMD_REBOOT_IN_LEN 4 +#define MC_CMD_REBOOT_IN_FLAGS_OFST 0 +#define MC_CMD_REBOOT_IN_FLAGS_LEN 4 +#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */ + +/* MC_CMD_REBOOT_OUT msgresponse */ +#define MC_CMD_REBOOT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SCHEDINFO + * Request scheduler info. Locks required: NONE. Returns: An array of + * (timeslice,maximum overrun), one for each thread, in ascending order of + * thread address. + */ +#define MC_CMD_SCHEDINFO 0x3e +#undef MC_CMD_0x3e_PRIVILEGE_CTG + +#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SCHEDINFO_IN msgrequest */ +#define MC_CMD_SCHEDINFO_IN_LEN 0 + +/* MC_CMD_SCHEDINFO_OUT msgresponse */ +#define MC_CMD_SCHEDINFO_OUT_LENMIN 4 +#define MC_CMD_SCHEDINFO_OUT_LENMAX 252 +#define MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4) +#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0 +#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4 +#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1 +#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63 +#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_REBOOT_MODE + * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot + * mode to the specified value. Returns the old mode. + */ +#define MC_CMD_REBOOT_MODE 0x3f +#undef MC_CMD_0x3f_PRIVILEGE_CTG + +#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_REBOOT_MODE_IN msgrequest */ +#define MC_CMD_REBOOT_MODE_IN_LEN 4 +#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 +#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4 +/* enum: Normal. */ +#define MC_CMD_REBOOT_MODE_NORMAL 0x0 +/* enum: Power-on Reset. */ +#define MC_CMD_REBOOT_MODE_POR 0x2 +/* enum: Snapper. */ +#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 +/* enum: snapper fake POR */ +#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4 +#define MC_CMD_REBOOT_MODE_IN_FAKE_OFST 0 +#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7 +#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1 + +/* MC_CMD_REBOOT_MODE_OUT msgresponse */ +#define MC_CMD_REBOOT_MODE_OUT_LEN 4 +#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0 +#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4 + + +/***********************************/ +/* MC_CMD_SENSOR_INFO + * Returns information about every available sensor. + * + * Each sensor has a single (16bit) value, and a corresponding state. The + * mapping between value and state is nominally determined by the MC, but may + * be implemented using up to 2 ranges per sensor. + * + * This call returns a mask (32bit) of the sensors that are supported by this + * platform, then an array of sensor information structures, in order of sensor + * type (but without gaps for unimplemented sensors). Each structure defines + * the ranges for the corresponding sensor. An unused range is indicated by + * equal limit values. If one range is used, a value outside that range results + * in STATE_FATAL. If two ranges are used, a value outside the second range + * results in STATE_FATAL while a value outside the first and inside the second + * range results in STATE_WARNING. + * + * Sensor masks and sensor information arrays are organised into pages. For + * backward compatibility, older host software can only use sensors in page 0. + * Bit 32 in the sensor mask was previously unused, and is no reserved for use + * as the next page flag. + * + * If the request does not contain a PAGE value then firmware will only return + * page 0 of sensor information, with bit 31 in the sensor mask cleared. + * + * If the request contains a PAGE value then firmware responds with the sensor + * mask and sensor information array for that page of sensors. In this case bit + * 31 in the mask is set if another page exists. + * + * Locks required: None Returns: 0 + */ +#define MC_CMD_SENSOR_INFO 0x41 +#undef MC_CMD_0x41_PRIVILEGE_CTG + +#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SENSOR_INFO_IN msgrequest */ +#define MC_CMD_SENSOR_INFO_IN_LEN 0 + +/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */ +#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4 +/* Which page of sensors to report. + * + * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit). + * + * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc. + */ +#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4 + +/* MC_CMD_SENSOR_INFO_EXT_IN_V2 msgrequest */ +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_LEN 8 +/* Which page of sensors to report. + * + * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit). + * + * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc. + */ +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_LEN 4 +/* Flags controlling information retrieved */ +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_OFST 4 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_LEN 4 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_OFST 4 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_LBN 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_WIDTH 1 + +/* MC_CMD_SENSOR_INFO_OUT msgresponse */ +#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4 +#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252 +#define MC_CMD_SENSOR_INFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num)) +#define MC_CMD_SENSOR_INFO_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8) +#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 +#define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4 +/* enum: Controller temperature: degC */ +#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 +/* enum: Phy common temperature: degC */ +#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 +/* enum: Controller cooling: bool */ +#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 +/* enum: Phy 0 temperature: degC */ +#define MC_CMD_SENSOR_PHY0_TEMP 0x3 +/* enum: Phy 0 cooling: bool */ +#define MC_CMD_SENSOR_PHY0_COOLING 0x4 +/* enum: Phy 1 temperature: degC */ +#define MC_CMD_SENSOR_PHY1_TEMP 0x5 +/* enum: Phy 1 cooling: bool */ +#define MC_CMD_SENSOR_PHY1_COOLING 0x6 +/* enum: 1.0v power: mV */ +#define MC_CMD_SENSOR_IN_1V0 0x7 +/* enum: 1.2v power: mV */ +#define MC_CMD_SENSOR_IN_1V2 0x8 +/* enum: 1.8v power: mV */ +#define MC_CMD_SENSOR_IN_1V8 0x9 +/* enum: 2.5v power: mV */ +#define MC_CMD_SENSOR_IN_2V5 0xa +/* enum: 3.3v power: mV */ +#define MC_CMD_SENSOR_IN_3V3 0xb +/* enum: 12v power: mV */ +#define MC_CMD_SENSOR_IN_12V0 0xc +/* enum: 1.2v analogue power: mV */ +#define MC_CMD_SENSOR_IN_1V2A 0xd +/* enum: reference voltage: mV */ +#define MC_CMD_SENSOR_IN_VREF 0xe +/* enum: AOE FPGA power: mV */ +#define MC_CMD_SENSOR_OUT_VAOE 0xf +/* enum: AOE FPGA temperature: degC */ +#define MC_CMD_SENSOR_AOE_TEMP 0x10 +/* enum: AOE FPGA PSU temperature: degC */ +#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11 +/* enum: AOE PSU temperature: degC */ +#define MC_CMD_SENSOR_PSU_TEMP 0x12 +/* enum: Fan 0 speed: RPM */ +#define MC_CMD_SENSOR_FAN_0 0x13 +/* enum: Fan 1 speed: RPM */ +#define MC_CMD_SENSOR_FAN_1 0x14 +/* enum: Fan 2 speed: RPM */ +#define MC_CMD_SENSOR_FAN_2 0x15 +/* enum: Fan 3 speed: RPM */ +#define MC_CMD_SENSOR_FAN_3 0x16 +/* enum: Fan 4 speed: RPM */ +#define MC_CMD_SENSOR_FAN_4 0x17 +/* enum: AOE FPGA input power: mV */ +#define MC_CMD_SENSOR_IN_VAOE 0x18 +/* enum: AOE FPGA current: mA */ +#define MC_CMD_SENSOR_OUT_IAOE 0x19 +/* enum: AOE FPGA input current: mA */ +#define MC_CMD_SENSOR_IN_IAOE 0x1a +/* enum: NIC power consumption: W */ +#define MC_CMD_SENSOR_NIC_POWER 0x1b +/* enum: 0.9v power voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9 0x1c +/* enum: 0.9v power current: mA */ +#define MC_CMD_SENSOR_IN_I0V9 0x1d +/* enum: 1.2v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V2 0x1e +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f +/* enum: 0.9v power voltage (at ADC): mV */ +#define MC_CMD_SENSOR_IN_0V9_ADC 0x20 +/* enum: Controller temperature 2: degC */ +#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21 +/* enum: Voltage regulator internal temperature: degC */ +#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22 +/* enum: 0.9V voltage regulator temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23 +/* enum: 1.2V voltage regulator temperature: degC */ +#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24 +/* enum: controller internal temperature sensor voltage (internal ADC): mV */ +#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25 +/* enum: controller internal temperature (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26 +/* enum: controller internal temperature sensor voltage (external ADC): mV */ +#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27 +/* enum: controller internal temperature (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28 +/* enum: ambient temperature: degC */ +#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29 +/* enum: air flow: bool */ +#define MC_CMD_SENSOR_AIRFLOW 0x2a +/* enum: voltage between VSS08D and VSS08D at CSR: mV */ +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b +/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */ +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c +/* enum: Hotpoint temperature: degC */ +#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d +/* enum: Port 0 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e +/* enum: Port 1 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f +/* enum: Mop-up microcontroller reference voltage: mV */ +#define MC_CMD_SENSOR_MUM_VCC 0x30 +/* enum: 0.9v power phase A voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_A 0x31 +/* enum: 0.9v power phase A current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_A 0x32 +/* enum: 0.9V voltage regulator phase A temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33 +/* enum: 0.9v power phase B voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_B 0x34 +/* enum: 0.9v power phase B current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_B 0x35 +/* enum: 0.9V voltage regulator phase B temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36 +/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37 +/* enum: CCOM AVREG 1v2 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38 +/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 +/* enum: CCOM AVREG 1v8 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a +/* enum: CCOM RTS temperature: degC */ +#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f +/* enum: controller internal temperature sensor voltage on master core + * (internal ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40 +/* enum: controller internal temperature on master core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41 +/* enum: controller internal temperature sensor voltage on master core + * (external ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42 +/* enum: controller internal temperature on master core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43 +/* enum: controller internal temperature on slave core sensor voltage (internal + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44 +/* enum: controller internal temperature on slave core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45 +/* enum: controller internal temperature on slave core sensor voltage (external + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46 +/* enum: controller internal temperature on slave core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47 +/* enum: Voltage supplied to the SODIMMs from their power supply: mV */ +#define MC_CMD_SENSOR_SODIMM_VOUT 0x49 +/* enum: Temperature of SODIMM 0 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a +/* enum: Temperature of SODIMM 1 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b +/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY0_VCC 0x4c +/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY1_VCC 0x4d +/* enum: Controller die temperature (TDIODE): degC */ +#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e +/* enum: Board temperature (front): degC */ +#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f +/* enum: Board temperature (back): degC */ +#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50 +/* enum: 1.8v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V8 0x51 +/* enum: 2.5v power current: mA */ +#define MC_CMD_SENSOR_IN_I2V5 0x52 +/* enum: 3.3v power current: mA */ +#define MC_CMD_SENSOR_IN_I3V3 0x53 +/* enum: 12v power current: mA */ +#define MC_CMD_SENSOR_IN_I12V0 0x54 +/* enum: 1.3v power: mV */ +#define MC_CMD_SENSOR_IN_1V3 0x55 +/* enum: 1.3v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V3 0x56 +/* enum: Engineering sensor 1 */ +#define MC_CMD_SENSOR_ENGINEERING_1 0x57 +/* enum: Engineering sensor 2 */ +#define MC_CMD_SENSOR_ENGINEERING_2 0x58 +/* enum: Engineering sensor 3 */ +#define MC_CMD_SENSOR_ENGINEERING_3 0x59 +/* enum: Engineering sensor 4 */ +#define MC_CMD_SENSOR_ENGINEERING_4 0x5a +/* enum: Engineering sensor 5 */ +#define MC_CMD_SENSOR_ENGINEERING_5 0x5b +/* enum: Engineering sensor 6 */ +#define MC_CMD_SENSOR_ENGINEERING_6 0x5c +/* enum: Engineering sensor 7 */ +#define MC_CMD_SENSOR_ENGINEERING_7 0x5d +/* enum: Engineering sensor 8 */ +#define MC_CMD_SENSOR_ENGINEERING_8 0x5e +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ +#define MC_CMD_SENSOR_ENTRY_OFST 4 +#define MC_CMD_SENSOR_ENTRY_LEN 8 +#define MC_CMD_SENSOR_ENTRY_LO_OFST 4 +#define MC_CMD_SENSOR_ENTRY_HI_OFST 8 +#define MC_CMD_SENSOR_ENTRY_MINNUM 0 +#define MC_CMD_SENSOR_ENTRY_MAXNUM 31 +#define MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 + +/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */ +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num)) +#define MC_CMD_SENSOR_INFO_EXT_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8) +#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO_OUT */ +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31 +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1 +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ +/* MC_CMD_SENSOR_ENTRY_OFST 4 */ +/* MC_CMD_SENSOR_ENTRY_LEN 8 */ +/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */ +/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */ +/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */ +/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */ +/* MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 */ + +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */ +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16 + + +/***********************************/ +/* MC_CMD_READ_SENSORS + * Returns the current reading from each sensor. DMAs an array of sensor + * readings, in order of sensor type (but without gaps for unimplemented + * sensors), into host memory. Each array element is a + * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword. + * + * If the request does not contain the LENGTH field then only sensors 0 to 30 + * are reported, to avoid DMA buffer overflow in older host software. If the + * sensor reading require more space than the LENGTH allows, then return + * EINVAL. + * + * The MC will send a SENSOREVT event every time any sensor changes state. The + * driver is responsible for ensuring that it doesn't miss any events. The + * board will function normally if all sensors are in STATE_OK or + * STATE_WARNING. Otherwise the board should not be expected to function. + */ +#define MC_CMD_READ_SENSORS 0x42 +#undef MC_CMD_0x42_PRIVILEGE_CTG + +#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_READ_SENSORS_IN msgrequest */ +#define MC_CMD_READ_SENSORS_IN_LEN 8 +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). + * + * If the address is 0xffffffffffffffff send the readings in the response (used + * by cmdclient). + */ +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4 + +/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */ +#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12 +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). + * + * If the address is 0xffffffffffffffff send the readings in the response (used + * by cmdclient). + */ +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4 +/* Size in bytes of host buffer. */ +#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8 +#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4 + +/* MC_CMD_READ_SENSORS_EXT_IN_V2 msgrequest */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_LEN 16 +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). + * + * If the address is 0xffffffffffffffff send the readings in the response (used + * by cmdclient). + */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_OFST 4 +/* Size in bytes of host buffer. */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_OFST 8 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_LEN 4 +/* Flags controlling information retrieved */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_OFST 12 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_LEN 4 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_OFST 12 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_LBN 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_WIDTH 1 + +/* MC_CMD_READ_SENSORS_OUT msgresponse */ +#define MC_CMD_READ_SENSORS_OUT_LEN 0 + +/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */ +#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0 + +/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */ +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1 +/* enum: Ok. */ +#define MC_CMD_SENSOR_STATE_OK 0x0 +/* enum: Breached warning threshold. */ +#define MC_CMD_SENSOR_STATE_WARNING 0x1 +/* enum: Breached fatal threshold. */ +#define MC_CMD_SENSOR_STATE_FATAL 0x2 +/* enum: Fault with sensor. */ +#define MC_CMD_SENSOR_STATE_BROKEN 0x3 +/* enum: Sensor is working but does not currently have a reading. */ +#define MC_CMD_SENSOR_STATE_NO_READING 0x4 +/* enum: Sensor initialisation failed. */ +#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8 + + +/***********************************/ +/* MC_CMD_GET_PHY_STATE + * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot + * (e.g. due to missing or corrupted firmware). Locks required: None. Return + * code: 0 + */ +#define MC_CMD_GET_PHY_STATE 0x43 +#undef MC_CMD_0x43_PRIVILEGE_CTG + +#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PHY_STATE_IN msgrequest */ +#define MC_CMD_GET_PHY_STATE_IN_LEN 0 + +/* MC_CMD_GET_PHY_STATE_OUT msgresponse */ +#define MC_CMD_GET_PHY_STATE_OUT_LEN 4 +#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4 +/* enum: Ok. */ +#define MC_CMD_PHY_STATE_OK 0x1 +/* enum: Faulty. */ +#define MC_CMD_PHY_STATE_ZOMBIE 0x2 + + +/***********************************/ +/* MC_CMD_SETUP_8021QBB + * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to + * disable 802.Qbb for a given priority. + */ +#define MC_CMD_SETUP_8021QBB 0x44 + +/* MC_CMD_SETUP_8021QBB_IN msgrequest */ +#define MC_CMD_SETUP_8021QBB_IN_LEN 32 +#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0 +#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32 + +/* MC_CMD_SETUP_8021QBB_OUT msgresponse */ +#define MC_CMD_SETUP_8021QBB_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_GET + * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS + */ +#define MC_CMD_WOL_FILTER_GET 0x45 +#undef MC_CMD_0x45_PRIVILEGE_CTG + +#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_GET_IN msgrequest */ +#define MC_CMD_WOL_FILTER_GET_IN_LEN 0 + +/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4 +#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD + * Add a protocol offload to NIC for lights-out state. Locks required: None. + * Returns: 0, ENOSYS + */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46 +#undef MC_CMD_0x46_PRIVILEGE_CTG + +#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num)) +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4) +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 +#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */ +#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14 +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42 +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD + * Remove a protocol offload from NIC for lights-out state. Locks required: + * None. Returns: 0, ENOSYS + */ +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47 +#undef MC_CMD_0x47_PRIVILEGE_CTG + +#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */ +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4 + +/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */ +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_MAC_RESET_RESTORE + * Restore MAC after block reset. Locks required: None. Returns: 0. + */ +#define MC_CMD_MAC_RESET_RESTORE 0x48 + +/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */ +#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0 + +/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */ +#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TESTASSERT + * Deliberately trigger an assert-detonation in the firmware for testing + * purposes (i.e. to allow tests that the driver copes gracefully). Locks + * required: None Returns: 0 + */ +#define MC_CMD_TESTASSERT 0x49 +#undef MC_CMD_0x49_PRIVILEGE_CTG + +#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TESTASSERT_IN msgrequest */ +#define MC_CMD_TESTASSERT_IN_LEN 0 + +/* MC_CMD_TESTASSERT_OUT msgresponse */ +#define MC_CMD_TESTASSERT_OUT_LEN 0 + +/* MC_CMD_TESTASSERT_V2_IN msgrequest */ +#define MC_CMD_TESTASSERT_V2_IN_LEN 4 +/* How to provoke the assertion */ +#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0 +#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4 +/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless + * you're testing firmware, this is what you want. + */ +#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0 +/* enum: Assert using assert(0); */ +#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1 +/* enum: Deliberately trigger a watchdog */ +#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2 +/* enum: Deliberately trigger a trap by loading from an invalid address */ +#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3 +/* enum: Deliberately trigger a trap by storing to an invalid address */ +#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4 +/* enum: Jump to an invalid address */ +#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5 + +/* MC_CMD_TESTASSERT_V2_OUT msgresponse */ +#define MC_CMD_TESTASSERT_V2_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WORKAROUND + * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't + * understand the given workaround number - which should not be treated as a + * hard error by client code. This op does not imply any semantics about each + * workaround, that's between the driver and the mcfw on a per-workaround + * basis. Locks required: None. Returns: 0, EINVAL . + */ +#define MC_CMD_WORKAROUND 0x4a +#undef MC_CMD_0x4a_PRIVILEGE_CTG + +#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_WORKAROUND_IN msgrequest */ +#define MC_CMD_WORKAROUND_IN_LEN 8 +/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */ +#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 +#define MC_CMD_WORKAROUND_IN_TYPE_LEN 4 +/* enum: Bug 17230 work around. */ +#define MC_CMD_WORKAROUND_BUG17230 0x1 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_WORKAROUND_BUG35388 0x2 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_WORKAROUND_BUG35017 0x3 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_WORKAROUND_BUG41750 0x4 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_WORKAROUND_BUG42008 0x5 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) + * This feature cannot be turned on/off while there are any filters already + * present. The behaviour in such case depends on the acting client's privilege + * level. If the client has the admin privilege, then all functions that have + * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise + * the command will fail with MC_CMD_ERR_FILTERS_PRESENT. + */ +#define MC_CMD_WORKAROUND_BUG26807 0x6 +/* enum: Bug 61265 work around (broken EVQ TMR writes). */ +#define MC_CMD_WORKAROUND_BUG61265 0x7 +/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable + * the workaround + */ +#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 +#define MC_CMD_WORKAROUND_IN_ENABLED_LEN 4 + +/* MC_CMD_WORKAROUND_OUT msgresponse */ +#define MC_CMD_WORKAROUND_OUT_LEN 0 + +/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used + * when (TYPE == MC_CMD_WORKAROUND_BUG26807) + */ +#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_OFST 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1 + + +/***********************************/ +/* MC_CMD_GET_PHY_MEDIA_INFO + * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for + * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG + * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the + * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1 + * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80. + * Anything else: currently undefined. Locks required: None. Return code: 0. + */ +#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b +#undef MC_CMD_0x4b_PRIVILEGE_CTG + +#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */ +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4 + +/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */ +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_NUM(len) (((len)-4)/1) +/* in bytes */ +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM_MCDI2 1016 + + +/***********************************/ +/* MC_CMD_NVRAM_TEST + * Test a particular NVRAM partition for valid contents (where "valid" depends + * on the type of partition). + */ +#define MC_CMD_NVRAM_TEST 0x4c +#undef MC_CMD_0x4c_PRIVILEGE_CTG + +#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_TEST_IN msgrequest */ +#define MC_CMD_NVRAM_TEST_IN_LEN 4 +#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ + +/* MC_CMD_NVRAM_TEST_OUT msgresponse */ +#define MC_CMD_NVRAM_TEST_OUT_LEN 4 +#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0 +#define MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4 +/* enum: Passed. */ +#define MC_CMD_NVRAM_TEST_PASS 0x0 +/* enum: Failed. */ +#define MC_CMD_NVRAM_TEST_FAIL 0x1 +/* enum: Not supported. */ +#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 + + +/***********************************/ +/* MC_CMD_MRSFP_TWEAK + * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds. + * I2C I/O expander bits are always read; if equaliser parameters are supplied, + * they are configured first. Locks required: None. Return code: 0, EINVAL. + */ +#define MC_CMD_MRSFP_TWEAK 0x4d + +/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16 +/* 0-6 low->high de-emph. */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4 +/* 0-8 low->high ref.V */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4 +/* 0-8 0-8 low->high boost */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4 +/* 0-8 low->high ref.V */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4 + +/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */ +#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0 + +/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */ +#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 +/* input bits */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4 +/* output bits */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4 +/* direction */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4 +/* enum: Out. */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 +/* enum: In. */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 + + +/***********************************/ +/* MC_CMD_SENSOR_SET_LIMS + * Adjusts the sensor limits. This is a warranty-voiding operation. Returns: + * ENOENT if the sensor specified does not exist, EINVAL if the limits are out + * of range. + */ +#define MC_CMD_SENSOR_SET_LIMS 0x4e +#undef MC_CMD_0x4e_PRIVILEGE_CTG + +#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */ +#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20 +#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 +#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4 +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4 +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8 +#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4 +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12 +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4 +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16 +#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4 + +/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */ +#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_RESOURCE_LIMITS + */ +#define MC_CMD_GET_RESOURCE_LIMITS 0x4f + +/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */ +#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0 + +/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */ +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4 + + +/***********************************/ +/* MC_CMD_NVRAM_PARTITIONS + * Reads the list of available virtual NVRAM partition types. Locks required: + * none. Returns: 0, EINVAL (bad type). + */ +#define MC_CMD_NVRAM_PARTITIONS 0x51 +#undef MC_CMD_0x51_PRIVILEGE_CTG + +#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */ +#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0 + +/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num)) +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_NUM(len) (((len)-4)/4) +/* total number of partitions */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0 +#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4 +/* type ID code for each of NUM_PARTITIONS partitions */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM_MCDI2 254 + + +/***********************************/ +/* MC_CMD_NVRAM_METADATA + * Reads soft metadata for a virtual NVRAM partition type. Locks required: + * none. Returns: 0, EINVAL (bad type). + */ +#define MC_CMD_NVRAM_METADATA 0x52 +#undef MC_CMD_0x52_PRIVILEGE_CTG + +#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_METADATA_IN msgrequest */ +#define MC_CMD_NVRAM_METADATA_IN_LEN 4 +/* Partition type ID code */ +#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4 + +/* MC_CMD_NVRAM_METADATA_OUT msgresponse */ +#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20 +#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252 +#define MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num)) +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(len) (((len)-20)/1) +/* Partition type ID code */ +#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4 +#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1 +/* Subtype ID code for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4 +/* 1st component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2 +/* 2nd component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2 +/* 3rd component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2 +/* 4th component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2 +/* Zero-terminated string describing the content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM_MCDI2 1000 + + +/***********************************/ +/* MC_CMD_GET_MAC_ADDRESSES + * Returns the base MAC, count and stride for the requesting function + */ +#define MC_CMD_GET_MAC_ADDRESSES 0x55 +#undef MC_CMD_0x55_PRIVILEGE_CTG + +#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */ +#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0 + +/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16 +/* Base MAC address */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6 +/* Padding */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2 +/* Number of allocated MAC addresses */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4 +/* Spacing of allocated MAC addresses */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4 + + +/***********************************/ +/* MC_CMD_CLP + * Perform a CLP related operation, see SF-110495-PS for details of CLP + * processing. This command has been extended to accomodate the requirements of + * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC, + * SF-120509-TC and SF-117282-PS. + */ +#define MC_CMD_CLP 0x56 +#undef MC_CMD_0x56_PRIVILEGE_CTG + +#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_CLP_IN msgrequest */ +#define MC_CMD_CLP_IN_LEN 4 +/* Sub operation */ +#define MC_CMD_CLP_IN_OP_OFST 0 +#define MC_CMD_CLP_IN_OP_LEN 4 +/* enum: Return to factory default settings */ +#define MC_CMD_CLP_OP_DEFAULT 0x1 +/* enum: Set MAC address */ +#define MC_CMD_CLP_OP_SET_MAC 0x2 +/* enum: Get MAC address */ +#define MC_CMD_CLP_OP_GET_MAC 0x3 +/* enum: Set UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_SET_BOOT 0x4 +/* enum: Get UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_GET_BOOT 0x5 + +/* MC_CMD_CLP_OUT msgresponse */ +#define MC_CMD_CLP_OUT_LEN 0 + +/* MC_CMD_CLP_IN_DEFAULT msgrequest */ +#define MC_CMD_CLP_IN_DEFAULT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ + +/* MC_CMD_CLP_OUT_DEFAULT msgresponse */ +#define MC_CMD_CLP_OUT_DEFAULT_LEN 0 + +/* MC_CMD_CLP_IN_SET_MAC msgrequest */ +#define MC_CMD_CLP_IN_SET_MAC_LEN 12 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00 + * restores the permanent (factory-programmed) MAC address associated with the + * port. A non-zero MAC address persists until a PCIe reset or a power cycle. + */ +#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4 +#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10 +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_OUT_SET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_SET_MAC_LEN 0 + +/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */ +#define MC_CMD_CLP_IN_SET_MAC_V2_LEN 16 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00 + * restores the permanent (factory-programmed) MAC address associated with the + * port. A non-zero MAC address persists until a PCIe reset or a power cycle. + */ +#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4 +#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10 +#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2 +#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12 +#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4 +#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_OFST 12 +#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0 +#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1 + +/* MC_CMD_CLP_IN_GET_MAC msgrequest */ +#define MC_CMD_CLP_IN_GET_MAC_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ + +/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */ +#define MC_CMD_CLP_IN_GET_MAC_V2_LEN 8 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4 +#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4 +#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_OFST 4 +#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0 +#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1 + +/* MC_CMD_CLP_OUT_GET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_GET_MAC_LEN 8 +/* MAC address assigned to port */ +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0 +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6 +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_IN_SET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_SET_BOOT_LEN 5 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +/* Boot flag */ +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4 +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1 + +/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0 + +/* MC_CMD_CLP_IN_GET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_GET_BOOT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ + +/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4 +/* Boot flag */ +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0 +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1 +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3 + + +/***********************************/ +/* MC_CMD_MUM + * Perform a MUM operation + */ +#define MC_CMD_MUM 0x57 +#undef MC_CMD_0x57_PRIVILEGE_CTG + +#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_MUM_IN msgrequest */ +#define MC_CMD_MUM_IN_LEN 4 +#define MC_CMD_MUM_IN_OP_HDR_OFST 0 +#define MC_CMD_MUM_IN_OP_HDR_LEN 4 +#define MC_CMD_MUM_IN_OP_OFST 0 +#define MC_CMD_MUM_IN_OP_LBN 0 +#define MC_CMD_MUM_IN_OP_WIDTH 8 +/* enum: NULL MCDI command to MUM */ +#define MC_CMD_MUM_OP_NULL 0x1 +/* enum: Get MUM version */ +#define MC_CMD_MUM_OP_GET_VERSION 0x2 +/* enum: Issue raw I2C command to MUM */ +#define MC_CMD_MUM_OP_RAW_CMD 0x3 +/* enum: Read from registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_READ 0x4 +/* enum: Write to registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_WRITE 0x5 +/* enum: Control UART logging. */ +#define MC_CMD_MUM_OP_LOG 0x6 +/* enum: Operations on MUM GPIO lines */ +#define MC_CMD_MUM_OP_GPIO 0x7 +/* enum: Get sensor readings from MUM */ +#define MC_CMD_MUM_OP_READ_SENSORS 0x8 +/* enum: Initiate clock programming on the MUM */ +#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9 +/* enum: Initiate FPGA load from flash on the MUM */ +#define MC_CMD_MUM_OP_FPGA_LOAD 0xa +/* enum: Request sensor reading from MUM ADC resulting from earlier request via + * MUM ATB + */ +#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb +/* enum: Send commands relating to the QSFP ports via the MUM for PHY + * operations + */ +#define MC_CMD_MUM_OP_QSFP 0xc +/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage + * level) from MUM + */ +#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd + +/* MC_CMD_MUM_IN_NULL msgrequest */ +#define MC_CMD_MUM_IN_NULL_LEN 4 +/* MUM cmd header */ +#define MC_CMD_MUM_IN_CMD_OFST 0 +#define MC_CMD_MUM_IN_CMD_LEN 4 + +/* MC_CMD_MUM_IN_GET_VERSION msgrequest */ +#define MC_CMD_MUM_IN_GET_VERSION_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ + +/* MC_CMD_MUM_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_READ_LEN 16 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* ID of (device connected to MUM) to read from registers of */ +#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4 +#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE 0x1 +/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2 +/* 32-bit address to read from */ +#define MC_CMD_MUM_IN_READ_ADDR_OFST 8 +#define MC_CMD_MUM_IN_READ_ADDR_LEN 4 +/* Number of words to read. */ +#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12 +#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4 + +/* MC_CMD_MUM_IN_WRITE msgrequest */ +#define MC_CMD_MUM_IN_WRITE_LENMIN 16 +#define MC_CMD_MUM_IN_WRITE_LENMAX 252 +#define MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num)) +#define MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* ID of (device connected to MUM) to write to registers of */ +#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4 +#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +/* MC_CMD_MUM_DEV_HITTITE 0x1 */ +/* 32-bit address to write to */ +#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8 +#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4 +/* Words to write */ +#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12 +#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252 + +/* MC_CMD_MUM_IN_RAW_CMD msgrequest */ +#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17 +#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num)) +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* MUM I2C cmd code */ +#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4 +#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4 +/* Number of bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8 +#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4 +/* Number of bytes to read */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12 +#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4 +/* Bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004 + +/* MC_CMD_MUM_IN_LOG msgrequest */ +#define MC_CMD_MUM_IN_LOG_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_LOG_OP_OFST 4 +#define MC_CMD_MUM_IN_LOG_OP_LEN 4 +#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ + +/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */ +#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */ +/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */ +/* Enable/disable debug output to UART */ +#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8 +#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4 + +/* MC_CMD_MUM_IN_GPIO msgrequest */ +#define MC_CMD_MUM_IN_GPIO_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OPCODE_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */ + +/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4 +/* The first 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4 +/* The second 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4 +/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4 +/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OP msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16 +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */ +#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8 + +/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* Bit-mask of clocks to be programmed */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4 +#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */ +/* Control flags for clock programming */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1 + +/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */ +#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* Enable/Disable FPGA config from flash */ +#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4 +#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4 + +/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */ +#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ + +/* MC_CMD_MUM_IN_QSFP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_OPCODE_OFST 4 +#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4 +#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */ +#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */ +#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4 +#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4 + +/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */ +#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ + +/* MC_CMD_MUM_OUT msgresponse */ +#define MC_CMD_MUM_OUT_LEN 0 + +/* MC_CMD_MUM_OUT_NULL msgresponse */ +#define MC_CMD_MUM_OUT_NULL_LEN 0 + +/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */ +#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12 +#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0 +#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8 + +/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */ +#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num)) +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1) +/* returned data */ +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020 + +/* MC_CMD_MUM_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_READ_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4) +#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0 +#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4 +#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63 +#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255 + +/* MC_CMD_MUM_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_LOG msgresponse */ +#define MC_CMD_MUM_OUT_LOG_LEN 0 + +/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */ +#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8 +/* The first 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4 +/* The second 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8 +/* The first 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4 +/* The second 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4 +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0 + +/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */ +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4) +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8 + +/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */ +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4 +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0 +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4 + +/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */ +#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0 + +/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */ +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4 +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0 +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4 + +/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0 + +/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1 + +/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4 + +/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num)) +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1) +/* in bytes */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016 + +/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4 + +/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4 + +/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num)) +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8) +/* Discrete (soldered) DDR resistor strap info */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_OFST 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_OFST 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16 +/* Number of SODIMM info records */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4 +/* Array of SODIMM info records */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8 +/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0 +/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1 +/* enum: Total number of SODIMM banks */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */ +/* enum: Values 5-15 are reserved for future usage */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4 +/* enum: No module present */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0 +/* enum: Module present supported and powered on */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1 +/* enum: Module present but bad type */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2 +/* enum: Module present but incompatible voltage */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3 +/* enum: Module present but unknown SPD */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4 +/* enum: Module present but slot cannot support it */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5 +/* enum: Modules may or may not be present, but cannot establish contact by I2C + */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12 + +/* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This + * should match the equivalent structure in the sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LEN 24 +/* A value below this will trigger a warning event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LBN 0 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_WIDTH 32 +/* A value below this will trigger a critical event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LBN 32 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_WIDTH 32 +/* A value below this will shut down the card. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_OFST 8 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LBN 64 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_WIDTH 32 +/* A value above this will trigger a warning event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_OFST 12 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LBN 96 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_WIDTH 32 +/* A value above this will trigger a critical event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_OFST 16 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LBN 128 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_WIDTH 32 +/* A value above this will shut down the card. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_OFST 20 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LBN 160 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_WIDTH 32 + +/* MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structuredef: Description of a sensor. + * This should match the equivalent structure in the sensor_query SPHINX + * service. + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LEN 64 +/* The handle used to identify the sensor in calls to + * MC_CMD_DYNAMIC_SENSORS_GET_VALUES + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LBN 0 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_WIDTH 32 +/* A human-readable name for the sensor (zero terminated string, max 32 bytes) + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LEN 32 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LBN 32 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_WIDTH 256 +/* The type of the sensor device, and by implication the unit of that the + * values will be reported in + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_OFST 36 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LEN 4 +/* enum: A voltage sensor. Unit is mV */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_VOLTAGE 0x0 +/* enum: A current sensor. Unit is mA */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_CURRENT 0x1 +/* enum: A power sensor. Unit is mW */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_POWER 0x2 +/* enum: A temperature sensor. Unit is Celsius */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TEMPERATURE 0x3 +/* enum: A cooling fan sensor. Unit is RPM */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_FAN 0x4 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LBN 288 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_WIDTH 32 +/* A single MC_CMD_DYNAMIC_SENSORS_LIMITS structure */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_OFST 40 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LEN 24 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LBN 320 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_WIDTH 192 + +/* MC_CMD_DYNAMIC_SENSORS_READING structuredef: State and value of a sensor. + * This should match the equivalent structure in the sensor_query SPHINX + * service. + */ +#define MC_CMD_DYNAMIC_SENSORS_READING_LEN 12 +/* The handle used to identify the sensor */ +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LBN 0 +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_WIDTH 32 +/* The current value of the sensor */ +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LBN 32 +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_WIDTH 32 +/* The sensor's condition, e.g. good, broken or removed */ +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_OFST 8 +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_LEN 4 +/* enum: Sensor working normally within limits */ +#define MC_CMD_DYNAMIC_SENSORS_READING_OK 0x0 +/* enum: Warning threshold breached */ +#define MC_CMD_DYNAMIC_SENSORS_READING_WARNING 0x1 +/* enum: Critical threshold breached */ +#define MC_CMD_DYNAMIC_SENSORS_READING_CRITICAL 0x2 +/* enum: Fatal threshold breached */ +#define MC_CMD_DYNAMIC_SENSORS_READING_FATAL 0x3 +/* enum: Sensor not working */ +#define MC_CMD_DYNAMIC_SENSORS_READING_BROKEN 0x4 +/* enum: Sensor working but no reading available */ +#define MC_CMD_DYNAMIC_SENSORS_READING_NO_READING 0x5 +/* enum: Sensor initialization failed */ +#define MC_CMD_DYNAMIC_SENSORS_READING_INIT_FAILED 0x6 +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_LBN 64 +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_DYNAMIC_SENSORS_LIST + * Return a complete list of handles for sensors currently managed by the MC, + * and a generation count for this version of the sensor table. On systems + * advertising the DYNAMIC_SENSORS capability bit, this replaces the + * MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors + * added by the NMC. + * + * Sensor handles are persistent for the lifetime of the sensor and are used to + * identify sensors in MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and + * MC_CMD_DYNAMIC_SENSORS_GET_VALUES. + * + * The generation count is maintained by the MC, is persistent across reboots + * and will be incremented each time the sensor table is modified. When the + * table is modified, a CODE_DYNAMIC_SENSORS_CHANGE event will be generated + * containing the new generation count. The driver should compare this against + * the current generation count, and if it is different, call + * MC_CMD_DYNAMIC_SENSORS_LIST again to update it's copy of the sensor table. + * + * The sensor count is provided to allow a future path to supporting more than + * MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e. + * the maximum number that will fit in a single response. As this is a fairly + * large number (253) it is not anticipated that this will be needed in the + * near future, so can currently be ignored. + * + * On Riverhead this command is implemented as a wrapper for `list` in the + * sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66 +#undef MC_CMD_0x66_PRIVILEGE_CTG + +#define MC_CMD_0x66_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DYNAMIC_SENSORS_LIST_IN msgrequest */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_IN_LEN 0 + +/* MC_CMD_DYNAMIC_SENSORS_LIST_OUT msgresponse */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMIN 8 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_NUM(len) (((len)-8)/4) +/* Generation count, which will be updated each time a sensor is added to or + * removed from the MC sensor table. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_LEN 4 +/* Number of sensors managed by the MC. Note that in principle, this can be + * larger than the size of the HANDLES array. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_LEN 4 +/* Array of sensor handles */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_OFST 8 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM 61 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM_MCDI2 253 + + +/***********************************/ +/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS + * Get descriptions for a set of sensors, specified as an array of sensor + * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST + * + * Any handles which do not correspond to a sensor currently managed by the MC + * will be dropped from from the response. This may happen when a sensor table + * update is in progress, and effectively means the set of usable sensors is + * the intersection between the sets of sensors known to the driver and the MC. + * + * On Riverhead this command is implemented as a wrapper for + * `get_descriptions` in the sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67 +#undef MC_CMD_0x67_PRIVILEGE_CTG + +#define MC_CMD_0x67_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN msgrequest */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LEN(num) (0+4*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_NUM(len) (((len)-0)/4) +/* Array of sensor handles */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM 63 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM_MCDI2 255 + +/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT msgresponse */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX 192 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX_MCDI2 960 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LEN(num) (0+64*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_NUM(len) (((len)-0)/64) +/* Array of MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structures */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_LEN 64 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM 3 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM_MCDI2 15 + + +/***********************************/ +/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS + * Read the state and value for a set of sensors, specified as an array of + * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. + * + * In the case of a broken sensor, then the state of the response's + * MC_CMD_DYNAMIC_SENSORS_VALUE entry will be set to BROKEN, and any value + * provided should be treated as erroneous. + * + * Any handles which do not correspond to a sensor currently managed by the MC + * will be dropped from from the response. This may happen when a sensor table + * update is in progress, and effectively means the set of usable sensors is + * the intersection between the sets of sensors known to the driver and the MC. + * + * On Riverhead this command is implemented as a wrapper for `get_readings` + * in the sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68 +#undef MC_CMD_0x68_PRIVILEGE_CTG + +#define MC_CMD_0x68_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN msgrequest */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LEN(num) (0+4*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_NUM(len) (((len)-0)/4) +/* Array of sensor handles */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM 63 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 255 + +/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT msgresponse */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LEN(num) (0+12*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_NUM(len) (((len)-0)/12) +/* Array of MC_CMD_DYNAMIC_SENSORS_READING structures */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_LEN 12 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85 + + +/***********************************/ +/* MC_CMD_EVENT_CTRL + * Configure which categories of unsolicited events the driver expects to + * receive (Riverhead). + */ +#define MC_CMD_EVENT_CTRL 0x69 +#undef MC_CMD_0x69_PRIVILEGE_CTG + +#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EVENT_CTRL_IN msgrequest */ +#define MC_CMD_EVENT_CTRL_IN_LENMIN 0 +#define MC_CMD_EVENT_CTRL_IN_LENMAX 252 +#define MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020 +#define MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num)) +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4) +/* Array of event categories for which the driver wishes to receive events. */ +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255 +/* enum: Driver wishes to receive LINKCHANGE events. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0 +/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events. + */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1 +/* enum: Driver wishes to receive receive errors. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2 +/* enum: Driver wishes to receive transmit errors. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3 +/* enum: Driver wishes to receive firmware alerts. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4 +/* enum: Driver wishes to receive reboot events. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5 + +/* MC_CMD_EVENT_CTRL_OUT msgrequest */ +#define MC_CMD_EVENT_CTRL_OUT_LEN 0 + +/* EVB_PORT_ID structuredef */ +#define EVB_PORT_ID_LEN 4 +#define EVB_PORT_ID_PORT_ID_OFST 0 +#define EVB_PORT_ID_PORT_ID_LEN 4 +/* enum: An invalid port handle. */ +#define EVB_PORT_ID_NULL 0x0 +/* enum: The port assigned to this function.. */ +#define EVB_PORT_ID_ASSIGNED 0x1000000 +/* enum: External network port 0 */ +#define EVB_PORT_ID_MAC0 0x2000000 +/* enum: External network port 1 */ +#define EVB_PORT_ID_MAC1 0x2000001 +/* enum: External network port 2 */ +#define EVB_PORT_ID_MAC2 0x2000002 +/* enum: External network port 3 */ +#define EVB_PORT_ID_MAC3 0x2000003 +#define EVB_PORT_ID_PORT_ID_LBN 0 +#define EVB_PORT_ID_PORT_ID_WIDTH 32 + +/* EVB_VLAN_TAG structuredef */ +#define EVB_VLAN_TAG_LEN 2 +/* The VLAN tag value */ +#define EVB_VLAN_TAG_VLAN_ID_LBN 0 +#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12 +#define EVB_VLAN_TAG_MODE_LBN 12 +#define EVB_VLAN_TAG_MODE_WIDTH 4 +/* enum: Insert the VLAN. */ +#define EVB_VLAN_TAG_INSERT 0x0 +/* enum: Replace the VLAN if already present. */ +#define EVB_VLAN_TAG_REPLACE 0x1 + +/* BUFTBL_ENTRY structuredef */ +#define BUFTBL_ENTRY_LEN 12 +/* the owner ID */ +#define BUFTBL_ENTRY_OID_OFST 0 +#define BUFTBL_ENTRY_OID_LEN 2 +#define BUFTBL_ENTRY_OID_LBN 0 +#define BUFTBL_ENTRY_OID_WIDTH 16 +/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */ +#define BUFTBL_ENTRY_PGSZ_OFST 2 +#define BUFTBL_ENTRY_PGSZ_LEN 2 +#define BUFTBL_ENTRY_PGSZ_LBN 16 +#define BUFTBL_ENTRY_PGSZ_WIDTH 16 +/* the raw 64-bit address field from the SMC, not adjusted for page size */ +#define BUFTBL_ENTRY_RAWADDR_OFST 4 +#define BUFTBL_ENTRY_RAWADDR_LEN 8 +#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4 +#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8 +#define BUFTBL_ENTRY_RAWADDR_LBN 32 +#define BUFTBL_ENTRY_RAWADDR_WIDTH 64 + +/* NVRAM_PARTITION_TYPE structuredef */ +#define NVRAM_PARTITION_TYPE_LEN 2 +#define NVRAM_PARTITION_TYPE_ID_OFST 0 +#define NVRAM_PARTITION_TYPE_ID_LEN 2 +/* enum: Primary MC firmware partition */ +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100 +/* enum: Secondary MC firmware partition */ +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200 +/* enum: Expansion ROM partition */ +#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300 +/* enum: Static configuration TLV partition */ +#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400 +/* enum: Dynamic configuration TLV partition */ +#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500 +/* enum: Expansion ROM configuration data for port 0 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600 +/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600 +/* enum: Expansion ROM configuration data for port 1 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601 +/* enum: Expansion ROM configuration data for port 2 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602 +/* enum: Expansion ROM configuration data for port 3 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603 +/* enum: Non-volatile log output partition */ +#define NVRAM_PARTITION_TYPE_LOG 0x700 +/* enum: Non-volatile log output of second core on dual-core device */ +#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701 +/* enum: Device state dump output partition */ +#define NVRAM_PARTITION_TYPE_DUMP 0x800 +/* enum: Application license key storage partition */ +#define NVRAM_PARTITION_TYPE_LICENSE 0x900 +/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */ +#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 +/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */ +#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff +/* enum: Primary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA 0xb00 +/* enum: Secondary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01 +/* enum: FC firmware partition */ +#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02 +/* enum: FC License partition */ +#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03 +/* enum: Non-volatile log output partition for FC */ +#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04 +/* enum: MUM firmware partition */ +#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00 +/* enum: SUC firmware partition (this is intentionally an alias of + * MUM_FIRMWARE) + */ +#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00 +/* enum: MUM Non-volatile log output partition. */ +#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01 +/* enum: MUM Application table partition. */ +#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02 +/* enum: MUM boot rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03 +/* enum: MUM production signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04 +/* enum: MUM user signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 +/* enum: MUM fuses and lockbits partition. */ +#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 +/* enum: UEFI expansion ROM if separate from PXE */ +#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00 +/* enum: Used by the expansion ROM for logging */ +#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000 +/* enum: Used for XIP code of shmbooted images */ +#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100 +/* enum: Spare partition 2 */ +#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200 +/* enum: Manufacturing partition. Used during manufacture to pass information + * between XJTAG and Manftest. + */ +#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300 +/* enum: Spare partition 4 */ +#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400 +/* enum: Spare partition 5 */ +#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500 +/* enum: Partition for reporting MC status. See mc_flash_layout.h + * medford_mc_status_hdr_t for layout on Medford. + */ +#define NVRAM_PARTITION_TYPE_STATUS 0x1600 +/* enum: Spare partition 13 */ +#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700 +/* enum: Spare partition 14 */ +#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800 +/* enum: Spare partition 15 */ +#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900 +/* enum: Spare partition 16 */ +#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00 +/* enum: Factory defaults for dynamic configuration */ +#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00 +/* enum: Factory defaults for expansion ROM configuration */ +#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00 +/* enum: Field Replaceable Unit inventory information for use on IPMI + * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a + * subset of the information stored in this partition. + */ +#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00 +/* enum: Bundle image partition */ +#define NVRAM_PARTITION_TYPE_BUNDLE 0x1e00 +/* enum: Bundle metadata partition that holds additional information related to + * a bundle update in TLV format + */ +#define NVRAM_PARTITION_TYPE_BUNDLE_METADATA 0x1e01 +/* enum: Bundle update non-volatile log output partition */ +#define NVRAM_PARTITION_TYPE_BUNDLE_LOG 0x1e02 +/* enum: Partition for Solarflare gPXE bootrom installed via Bundle update. */ +#define NVRAM_PARTITION_TYPE_EXPANSION_ROM_INTERNAL 0x1e03 +/* enum: Start of reserved value range (firmware may use for any purpose) */ +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 +/* enum: End of reserved value range (firmware may use for any purpose) */ +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd +/* enum: Recovery partition map (provided if real map is missing or corrupt) */ +#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe +/* enum: Partition map (real map as stored in flash) */ +#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff +#define NVRAM_PARTITION_TYPE_ID_LBN 0 +#define NVRAM_PARTITION_TYPE_ID_WIDTH 16 + +/* LICENSED_APP_ID structuredef */ +#define LICENSED_APP_ID_LEN 4 +#define LICENSED_APP_ID_ID_OFST 0 +#define LICENSED_APP_ID_ID_LEN 4 +/* enum: OpenOnload */ +#define LICENSED_APP_ID_ONLOAD 0x1 +/* enum: PTP timestamping */ +#define LICENSED_APP_ID_PTP 0x2 +/* enum: SolarCapture Pro */ +#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +/* enum: SolarSecure filter engine */ +#define LICENSED_APP_ID_SOLARSECURE 0x8 +/* enum: Performance monitor */ +#define LICENSED_APP_ID_PERF_MONITOR 0x10 +/* enum: SolarCapture Live */ +#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20 +/* enum: Capture SolarSystem */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40 +/* enum: Network Access Control */ +#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80 +/* enum: TCP Direct */ +#define LICENSED_APP_ID_TCP_DIRECT 0x100 +/* enum: Low Latency */ +#define LICENSED_APP_ID_LOW_LATENCY 0x200 +/* enum: SolarCapture Tap */ +#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400 +/* enum: Capture SolarSystem 40G */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800 +/* enum: Capture SolarSystem 1G */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000 +/* enum: ScaleOut Onload */ +#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000 +/* enum: SCS Network Analytics Dashboard */ +#define LICENSED_APP_ID_DSHBRD 0x4000 +/* enum: SolarCapture Trading Analytics */ +#define LICENSED_APP_ID_SCATRD 0x8000 +#define LICENSED_APP_ID_ID_LBN 0 +#define LICENSED_APP_ID_ID_WIDTH 32 + +/* LICENSED_FEATURES structuredef */ +#define LICENSED_FEATURES_LEN 8 +/* Bitmask of licensed firmware features */ +#define LICENSED_FEATURES_MASK_OFST 0 +#define LICENSED_FEATURES_MASK_LEN 8 +#define LICENSED_FEATURES_MASK_LO_OFST 0 +#define LICENSED_FEATURES_MASK_HI_OFST 4 +#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0 +#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0 +#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1 +#define LICENSED_FEATURES_PIO_OFST 0 +#define LICENSED_FEATURES_PIO_LBN 1 +#define LICENSED_FEATURES_PIO_WIDTH 1 +#define LICENSED_FEATURES_EVQ_TIMER_OFST 0 +#define LICENSED_FEATURES_EVQ_TIMER_LBN 2 +#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1 +#define LICENSED_FEATURES_CLOCK_OFST 0 +#define LICENSED_FEATURES_CLOCK_LBN 3 +#define LICENSED_FEATURES_CLOCK_WIDTH 1 +#define LICENSED_FEATURES_RX_TIMESTAMPS_OFST 0 +#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4 +#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1 +#define LICENSED_FEATURES_TX_TIMESTAMPS_OFST 0 +#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5 +#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1 +#define LICENSED_FEATURES_RX_SNIFF_OFST 0 +#define LICENSED_FEATURES_RX_SNIFF_LBN 6 +#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1 +#define LICENSED_FEATURES_TX_SNIFF_OFST 0 +#define LICENSED_FEATURES_TX_SNIFF_LBN 7 +#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_OFST 0 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_OFST 0 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1 +#define LICENSED_FEATURES_MASK_LBN 0 +#define LICENSED_FEATURES_MASK_WIDTH 64 + +/* LICENSED_V3_APPS structuredef */ +#define LICENSED_V3_APPS_LEN 8 +/* Bitmask of licensed applications */ +#define LICENSED_V3_APPS_MASK_OFST 0 +#define LICENSED_V3_APPS_MASK_LEN 8 +#define LICENSED_V3_APPS_MASK_LO_OFST 0 +#define LICENSED_V3_APPS_MASK_HI_OFST 4 +#define LICENSED_V3_APPS_ONLOAD_OFST 0 +#define LICENSED_V3_APPS_ONLOAD_LBN 0 +#define LICENSED_V3_APPS_ONLOAD_WIDTH 1 +#define LICENSED_V3_APPS_PTP_OFST 0 +#define LICENSED_V3_APPS_PTP_LBN 1 +#define LICENSED_V3_APPS_PTP_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_OFST 0 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1 +#define LICENSED_V3_APPS_SOLARSECURE_OFST 0 +#define LICENSED_V3_APPS_SOLARSECURE_LBN 3 +#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1 +#define LICENSED_V3_APPS_PERF_MONITOR_OFST 0 +#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4 +#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_OFST 0 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_OFST 0 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_OFST 0 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1 +#define LICENSED_V3_APPS_TCP_DIRECT_OFST 0 +#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8 +#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1 +#define LICENSED_V3_APPS_LOW_LATENCY_OFST 0 +#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9 +#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_OFST 0 +#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10 +#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_OFST 0 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_OFST 0 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_OFST 0 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1 +#define LICENSED_V3_APPS_DSHBRD_OFST 0 +#define LICENSED_V3_APPS_DSHBRD_LBN 14 +#define LICENSED_V3_APPS_DSHBRD_WIDTH 1 +#define LICENSED_V3_APPS_SCATRD_OFST 0 +#define LICENSED_V3_APPS_SCATRD_LBN 15 +#define LICENSED_V3_APPS_SCATRD_WIDTH 1 +#define LICENSED_V3_APPS_MASK_LBN 0 +#define LICENSED_V3_APPS_MASK_WIDTH 64 + +/* LICENSED_V3_FEATURES structuredef */ +#define LICENSED_V3_FEATURES_LEN 8 +/* Bitmask of licensed firmware features */ +#define LICENSED_V3_FEATURES_MASK_OFST 0 +#define LICENSED_V3_FEATURES_MASK_LEN 8 +#define LICENSED_V3_FEATURES_MASK_LO_OFST 0 +#define LICENSED_V3_FEATURES_MASK_HI_OFST 4 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_OFST 0 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1 +#define LICENSED_V3_FEATURES_PIO_OFST 0 +#define LICENSED_V3_FEATURES_PIO_LBN 1 +#define LICENSED_V3_FEATURES_PIO_WIDTH 1 +#define LICENSED_V3_FEATURES_EVQ_TIMER_OFST 0 +#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2 +#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1 +#define LICENSED_V3_FEATURES_CLOCK_OFST 0 +#define LICENSED_V3_FEATURES_CLOCK_LBN 3 +#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_OFST 0 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_OFST 0 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1 +#define LICENSED_V3_FEATURES_RX_SNIFF_OFST 0 +#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6 +#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1 +#define LICENSED_V3_FEATURES_TX_SNIFF_OFST 0 +#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7 +#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_OFST 0 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_OFST 0 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1 +#define LICENSED_V3_FEATURES_MASK_LBN 0 +#define LICENSED_V3_FEATURES_MASK_WIDTH 64 + +/* TX_TIMESTAMP_EVENT structuredef */ +#define TX_TIMESTAMP_EVENT_LEN 6 +/* lower 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16 +/* Type of TX event, ordinary TX completion, low or high part of TX timestamp + */ +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1 +/* enum: This is a TX completion event, not a timestamp */ +#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0 +/* enum: This is a TX completion event for a CTPIO transmit. The event format + * is the same as for TX_EV_COMPLETION. + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11 +/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The + * event format is the same as for TX_EV_TSTAMP_LO + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12 +/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The + * event format is the same as for TX_EV_TSTAMP_HI + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13 +/* enum: This is the low part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51 +/* enum: This is the high part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8 +/* upper 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16 + +/* RSS_MODE structuredef */ +#define RSS_MODE_LEN 1 +/* The RSS mode for a particular packet type is a value from 0 - 15 which can + * be considered as 4 bits selecting which fields are included in the hash. (A + * value 0 effectively disables RSS spreading for the packet type.) The YAML + * generation tools require this structure to be a whole number of bytes wide, + * but only 4 bits are relevant. + */ +#define RSS_MODE_HASH_SELECTOR_OFST 0 +#define RSS_MODE_HASH_SELECTOR_LEN 1 +#define RSS_MODE_HASH_SRC_ADDR_OFST 0 +#define RSS_MODE_HASH_SRC_ADDR_LBN 0 +#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1 +#define RSS_MODE_HASH_DST_ADDR_OFST 0 +#define RSS_MODE_HASH_DST_ADDR_LBN 1 +#define RSS_MODE_HASH_DST_ADDR_WIDTH 1 +#define RSS_MODE_HASH_SRC_PORT_OFST 0 +#define RSS_MODE_HASH_SRC_PORT_LBN 2 +#define RSS_MODE_HASH_SRC_PORT_WIDTH 1 +#define RSS_MODE_HASH_DST_PORT_OFST 0 +#define RSS_MODE_HASH_DST_PORT_LBN 3 +#define RSS_MODE_HASH_DST_PORT_WIDTH 1 +#define RSS_MODE_HASH_SELECTOR_LBN 0 +#define RSS_MODE_HASH_SELECTOR_WIDTH 8 + +/* CTPIO_STATS_MAP structuredef */ +#define CTPIO_STATS_MAP_LEN 4 +/* The (function relative) VI number */ +#define CTPIO_STATS_MAP_VI_OFST 0 +#define CTPIO_STATS_MAP_VI_LEN 2 +#define CTPIO_STATS_MAP_VI_LBN 0 +#define CTPIO_STATS_MAP_VI_WIDTH 16 +/* The target bucket for the VI */ +#define CTPIO_STATS_MAP_BUCKET_OFST 2 +#define CTPIO_STATS_MAP_BUCKET_LEN 2 +#define CTPIO_STATS_MAP_BUCKET_LBN 16 +#define CTPIO_STATS_MAP_BUCKET_WIDTH 16 + + +/***********************************/ +/* MC_CMD_READ_REGS + * Get a dump of the MCPU registers + */ +#define MC_CMD_READ_REGS 0x50 +#undef MC_CMD_0x50_PRIVILEGE_CTG + +#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_READ_REGS_IN msgrequest */ +#define MC_CMD_READ_REGS_IN_LEN 0 + +/* MC_CMD_READ_REGS_OUT msgresponse */ +#define MC_CMD_READ_REGS_OUT_LEN 308 +/* Whether the corresponding register entry contains a valid value */ +#define MC_CMD_READ_REGS_OUT_MASK_OFST 0 +#define MC_CMD_READ_REGS_OUT_MASK_LEN 16 +/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr, + * fir, fp) + */ +#define MC_CMD_READ_REGS_OUT_REGS_OFST 16 +#define MC_CMD_READ_REGS_OUT_REGS_LEN 4 +#define MC_CMD_READ_REGS_OUT_REGS_NUM 73 + + +/***********************************/ +/* MC_CMD_INIT_EVQ + * Set up an event queue according to the supplied parameters. The IN arguments + * end with an address for each 4k of host memory required to back the EVQ. + */ +#define MC_CMD_INIT_EVQ 0x80 +#undef MC_CMD_0x80_PRIVILEGE_CTG + +#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_EVQ_IN msgrequest */ +#define MC_CMD_INIT_EVQ_IN_LENMIN 44 +#define MC_CMD_INIT_EVQ_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_IN_LENMAX_MCDI2 548 +#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num)) +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_NUM(len) (((len)-36)/8) +/* Size, in entries */ +#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4 +#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4 +/* The initial timer value. The load value is ignored if the timer mode is DIS. + */ +#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8 +#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4 +/* The reload value is ignored in one-shot modes */ +#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12 +#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4 +/* tbd */ +#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20 +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0 +/* enum: Immediate */ +#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1 +/* enum: Triggered */ +#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2 +/* enum: Hold-off */ +#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3 +/* Target EVQ for wakeups if in wakeup mode. */ +#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24 +#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4 +/* Target interrupt if in interrupting mode (note union with target EVQ). Use + * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test + * purposes. + */ +#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24 +#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4 +/* Event Counter Mode. */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28 +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3 +/* Event queue packet count threshold. */ +#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32 +#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM_MCDI2 64 + +/* MC_CMD_INIT_EVQ_OUT msgresponse */ +#define MC_CMD_INIT_EVQ_OUT_LEN 4 +/* Only valid if INTRFLAG was true */ +#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0 +#define MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4 + +/* MC_CMD_INIT_EVQ_V2_IN msgrequest */ +#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44 +#define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_V2_IN_LENMAX_MCDI2 548 +#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num)) +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_NUM(len) (((len)-36)/8) +/* Size, in entries */ +#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0 +#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4 +#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4 +/* The initial timer value. The load value is ignored if the timer mode is DIS. + */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4 +/* The reload value is ignored in one-shot modes */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4 +/* tbd */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4 +/* enum: All initialisation flags specified by host. */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the lowest latency achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the best throughput achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2 +/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by + * firmware based on licenses and firmware variant. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_LBN 11 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0 +/* enum: Immediate */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1 +/* enum: Triggered */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2 +/* enum: Hold-off */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3 +/* Target EVQ for wakeups if in wakeup mode. */ +#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24 +#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4 +/* Target interrupt if in interrupting mode (note union with target EVQ). Use + * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test + * purposes. + */ +#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24 +#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4 +/* Event Counter Mode. */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28 +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3 +/* Event queue packet count threshold. */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32 +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM_MCDI2 64 + +/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */ +#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8 +/* Only valid if INTRFLAG was true */ +#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0 +#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4 +/* Actual configuration applied on the card */ +#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1 + +/* QUEUE_CRC_MODE structuredef */ +#define QUEUE_CRC_MODE_LEN 1 +#define QUEUE_CRC_MODE_MODE_LBN 0 +#define QUEUE_CRC_MODE_MODE_WIDTH 4 +/* enum: No CRC. */ +#define QUEUE_CRC_MODE_NONE 0x0 +/* enum: CRC Fiber channel over ethernet. */ +#define QUEUE_CRC_MODE_FCOE 0x1 +/* enum: CRC (digest) iSCSI header only. */ +#define QUEUE_CRC_MODE_ISCSI_HDR 0x2 +/* enum: CRC (digest) iSCSI header and payload. */ +#define QUEUE_CRC_MODE_ISCSI 0x3 +/* enum: CRC Fiber channel over IP over ethernet. */ +#define QUEUE_CRC_MODE_FCOIPOE 0x4 +/* enum: CRC MPA. */ +#define QUEUE_CRC_MODE_MPA 0x5 +#define QUEUE_CRC_MODE_SPARE_LBN 4 +#define QUEUE_CRC_MODE_SPARE_WIDTH 4 + + +/***********************************/ +/* MC_CMD_INIT_RXQ + * set up a receive queue according to the supplied parameters. The IN + * arguments end with an address for each 4k of host memory required to back + * the RXQ. + */ +#define MC_CMD_INIT_RXQ 0x81 +#undef MC_CMD_0x81_PRIVILEGE_CTG + +#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version + * in new code. + */ +#define MC_CMD_INIT_RXQ_IN_LENMIN 36 +#define MC_CMD_INIT_RXQ_IN_LENMAX 252 +#define MC_CMD_INIT_RXQ_IN_LENMAX_MCDI2 1020 +#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num)) +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8) +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to INIT_EVQ + */ +#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_UNUSED_OFST 16 +#define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10 +#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124 + +/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode + * flags + */ +#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4 + +/* MC_CMD_INIT_RXQ_V3_IN msgrequest */ +#define MC_CMD_INIT_RXQ_V3_IN_LEN 560 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V3_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V3_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V3_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 + +/* MC_CMD_INIT_RXQ_V4_IN msgrequest: INIT_RXQ request with new field required + * for systems with a QDMA (currently, Riverhead) + */ +#define MC_CMD_INIT_RXQ_V4_IN_LEN 564 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V4_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V4_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V4_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V4_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V4_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V4_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V4_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V4_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 +/* V4 message data */ +#define MC_CMD_INIT_RXQ_V4_IN_V4_DATA_OFST 560 +#define MC_CMD_INIT_RXQ_V4_IN_V4_DATA_LEN 4 +/* Size in bytes of buffers attached to descriptors posted to this queue. Set + * to zero if using this message on non-QDMA based platforms. Currently in + * Riverhead there is a global limit of eight different buffer sizes across all + * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a + * request for a different buffer size will fail if there are already eight + * other buffer sizes in use. In future Riverhead this limit will go away and + * any size will be accepted. + */ +#define MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_OFST 560 +#define MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_LEN 4 + +/* MC_CMD_INIT_RXQ_V5_IN msgrequest: INIT_RXQ request with ability to request a + * different RX packet prefix + */ +#define MC_CMD_INIT_RXQ_V5_IN_LEN 568 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V5_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V5_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V5_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V5_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V5_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V5_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V5_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V5_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 +/* V4 message data */ +#define MC_CMD_INIT_RXQ_V5_IN_V4_DATA_OFST 560 +#define MC_CMD_INIT_RXQ_V5_IN_V4_DATA_LEN 4 +/* Size in bytes of buffers attached to descriptors posted to this queue. Set + * to zero if using this message on non-QDMA based platforms. Currently in + * Riverhead there is a global limit of eight different buffer sizes across all + * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a + * request for a different buffer size will fail if there are already eight + * other buffer sizes in use. In future Riverhead this limit will go away and + * any size will be accepted. + */ +#define MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_OFST 560 +#define MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_LEN 4 +/* Prefix id for the RX prefix format to use on packets delivered this queue. + * Zero is always a valid prefix id and means the default prefix format + * documented for the platform. Other prefix ids can be obtained by calling + * MC_CMD_GET_RX_PREFIX_ID with a requested set of prefix fields. + */ +#define MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_OFST 564 +#define MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_LEN 4 + +/* MC_CMD_INIT_RXQ_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_V3_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V3_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_V4_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V4_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_V5_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V5_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_INIT_TXQ + */ +#define MC_CMD_INIT_TXQ 0x82 +#undef MC_CMD_0x82_PRIVILEGE_CTG + +#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version + * in new code. + */ +#define MC_CMD_INIT_TXQ_IN_LENMIN 36 +#define MC_CMD_INIT_TXQ_IN_LENMAX 252 +#define MC_CMD_INIT_TXQ_IN_LENMAX_MCDI2 1020 +#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num)) +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8) +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8 +#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124 + +/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode + * flags + */ +#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0 +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8 +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_LBN 15 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_LBN 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64 +/* Flags related to Qbb flow control mode. */ +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3 + +/* MC_CMD_INIT_TXQ_OUT msgresponse */ +#define MC_CMD_INIT_TXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_EVQ + * Teardown an EVQ. + * + * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first + * or the operation will fail with EBUSY + */ +#define MC_CMD_FINI_EVQ 0x83 +#undef MC_CMD_0x83_PRIVILEGE_CTG + +#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FINI_EVQ_IN msgrequest */ +#define MC_CMD_FINI_EVQ_IN_LEN 4 +/* Instance of EVQ to destroy. Should be the same instance as that previously + * passed to INIT_EVQ + */ +#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4 + +/* MC_CMD_FINI_EVQ_OUT msgresponse */ +#define MC_CMD_FINI_EVQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_RXQ + * Teardown a RXQ. + */ +#define MC_CMD_FINI_RXQ 0x84 +#undef MC_CMD_0x84_PRIVILEGE_CTG + +#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FINI_RXQ_IN msgrequest */ +#define MC_CMD_FINI_RXQ_IN_LEN 4 +/* Instance of RXQ to destroy */ +#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4 + +/* MC_CMD_FINI_RXQ_OUT msgresponse */ +#define MC_CMD_FINI_RXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_TXQ + * Teardown a TXQ. + */ +#define MC_CMD_FINI_TXQ 0x85 +#undef MC_CMD_0x85_PRIVILEGE_CTG + +#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FINI_TXQ_IN msgrequest */ +#define MC_CMD_FINI_TXQ_IN_LEN 4 +/* Instance of TXQ to destroy */ +#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4 + +/* MC_CMD_FINI_TXQ_OUT msgresponse */ +#define MC_CMD_FINI_TXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DRIVER_EVENT + * Generate an event on an EVQ belonging to the function issuing the command. + */ +#define MC_CMD_DRIVER_EVENT 0x86 +#undef MC_CMD_0x86_PRIVILEGE_CTG + +#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DRIVER_EVENT_IN msgrequest */ +#define MC_CMD_DRIVER_EVENT_IN_LEN 12 +/* Handle of target EVQ */ +#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0 +#define MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4 +/* Bits 0 - 63 of event */ +#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4 +#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8 +#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4 +#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8 + +/* MC_CMD_DRIVER_EVENT_OUT msgresponse */ +#define MC_CMD_DRIVER_EVENT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ALLOC_BUFTBL_CHUNK + * Allocate a set of buffer table entries using the specified owner ID. This + * operation allocates the required buffer table entries (and fails if it + * cannot do so). The buffer table entries will initially be zeroed. + */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87 +#undef MC_CMD_0x87_PRIVILEGE_CTG + +#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8 +/* Owner ID to use */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4 +/* Size of buffer table pages to use, in bytes (note that only a few values are + * legal on any specific hardware). + */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4 + +/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4 +/* Buffer table IDs for use in DMA descriptors. */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES + * Reprogram a set of buffer table entries in the specified chunk. + */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88 +#undef MC_CMD_0x88_PRIVILEGE_CTG + +#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num)) +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8) +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4 +/* ID */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4 +/* Num entries */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4 +/* Buffer table entry address */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32 + +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FREE_BUFTBL_CHUNK + */ +#define MC_CMD_FREE_BUFTBL_CHUNK 0x89 +#undef MC_CMD_0x89_PRIVILEGE_CTG + +#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */ +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4 +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0 +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4 + +/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */ +#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FILTER_OP + * Multiplexed MCDI call for filter operations + */ +#define MC_CMD_FILTER_OP 0x8a +#undef MC_CMD_0x8a_PRIVILEGE_CTG + +#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FILTER_OP_IN msgrequest */ +#define MC_CMD_FILTER_OP_IN_LEN 108 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_IN_OP_LEN 4 +/* enum: single-recipient filter insert */ +#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0 +/* enum: single-recipient filter remove */ +#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1 +/* enum: multi-recipient filter subscribe */ +#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2 +/* enum: multi-recipient filter unsubscribe */ +#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3 +/* enum: replace one recipient with another (warning - the filter handle may + * change) + */ +#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4 +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_OFST 40 +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_OFST 40 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4 +/* Firmware defined register 1 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72 +#define MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16 + +/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to + * include handling of VXLAN/NVGRE encapsulated frame filtering (which is + * supported on Medford only). + */ +#define MC_CMD_FILTER_OP_EXT_IN_LEN 172 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_OFST 40 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_OFST 40 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16 + +/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional + * filter actions for Intel's DPDK (Data Plane Development Kit, dpdk.org) via + * its rte_flow API. This extension is only useful with the sfc_efx driver + * included as part of DPDK, used in conjunction with the dpdk datapath + * firmware variant. + */ +#define MC_CMD_FILTER_OP_V3_IN_LEN 180 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_V3_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_V3_IN_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_LEN 4 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_LEN 4 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_LEN 4 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_LEN 4 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_LEN 4 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_OFST 40 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_OFST 40 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_LEN 4 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_V3_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_OFST 132 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_LEN 4 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_OFST 136 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_LEN 4 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16 +/* Set an action for all packets matching this filter. The DPDK driver and dpdk + * f/w variant use their own specific delivery structures, which are documented + * in the DPDK Firmware Driver Interface (SF-119419-TC). Requesting anything + * other than MATCH_ACTION_NONE when the NIC is running another f/w variant + * will cause the filter insertion to fail with ENOTSUP. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4 +/* enum: do nothing extra */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_NONE 0x0 +/* enum: Set the match flag in the packet prefix for packets matching the + * filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to + * support the DPDK rte_flow "FLAG" action. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG 0x1 +/* enum: Insert MATCH_MARK_VALUE into the packet prefix for packets matching + * the filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to + * support the DPDK rte_flow "MARK" action. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK 0x2 +/* the mark value for MATCH_ACTION_MARK. Requesting a value larger than the + * maximum (obtained from MC_CMD_GET_CAPABILITIES_V5/FILTER_ACTION_MARK_MAX) + * will cause the filter insertion to fail with EINVAL. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_OFST 176 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_LEN 4 + +/* MC_CMD_FILTER_OP_OUT msgresponse */ +#define MC_CMD_FILTER_OP_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 +/* enum: guaranteed invalid filter handle (low 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff +/* enum: guaranteed invalid filter handle (high 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff + +/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */ +#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_EXT_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_OUT/HANDLE */ + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_INFO + * Get information related to the parser-dispatcher subsystem + */ +#define MC_CMD_GET_PARSER_DISP_INFO 0xe4 +#undef MC_CMD_0xe4_PRIVILEGE_CTG + +#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4 +/* enum: read the list of supported RX filter matches */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 +/* enum: read flags indicating restrictions on filter insertion for the calling + * client + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 +/* enum: read properties relating to security rules (Medford-only; for use by + * SolarSecure apps, not directly by drivers. See SF-114946-SW.) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3 +/* enum: read the list of supported RX filter matches for VXLAN/NVGRE + * encapsulated frames, which follow a different match sequence to normal + * frames (Medford only) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4 +/* enum: read the list of supported matches for the encapsulation detection + * rules inserted by MC_CMD_VNIC_ENCAP_RULE_ADD. (ef100 and later) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5 + +/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4) +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* number of supported match types */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4 +/* array of supported match types (valid MATCH_FIELDS values for + * MC_CMD_FILTER_OP) sorted in decreasing priority order + */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253 + +/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* bitfield of filter insertion restrictions */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_OFST 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1 + +/* MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT msgresponse: This response is + * returned if a MC_CMD_GET_PARSER_DISP_INFO_IN request is sent with OP value + * OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES. It contains information about the + * supported match types that can be used in the encapsulation detection rules + * inserted by MC_CMD_VNIC_ENCAP_RULE_ADD. + */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMIN 8 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4) +/* The op code OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES is returned. */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* number of supported match types */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_OFST 4 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_LEN 4 +/* array of supported match types (valid MATCH_FLAGS values for + * MC_CMD_VNIC_ENCAP_RULE_ADD) sorted in decreasing priority order + */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_OFST 8 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_LEN 4 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MINNUM 0 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM 61 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253 + + +/***********************************/ +/* MC_CMD_GET_PORT_ASSIGNMENT + * Get port assignment for current PCI function. + */ +#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8 +#undef MC_CMD_0xb8_PRIVILEGE_CTG + +#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */ +#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0 + +/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */ +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4 +/* Identifies the port assignment for this function. */ +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0 +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_PORT_ASSIGNMENT + * Set port assignment for current PCI function. + */ +#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9 +#undef MC_CMD_0xb9_PRIVILEGE_CTG + +#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */ +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4 +/* Identifies the port assignment for this function. */ +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0 +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4 + +/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */ +#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ALLOC_VIS + * Allocate VIs for current PCI function. + */ +#define MC_CMD_ALLOC_VIS 0x8b +#undef MC_CMD_0x8b_PRIVILEGE_CTG + +#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_ALLOC_VIS_IN msgrequest */ +#define MC_CMD_ALLOC_VIS_IN_LEN 8 +/* The minimum number of VIs that is acceptable */ +#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4 +/* The maximum number of VIs that would be useful */ +#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4 +#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4 + +/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request. + * Use extended version in new code. + */ +#define MC_CMD_ALLOC_VIS_OUT_LEN 8 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4 +#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4 + +/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4 + + +/***********************************/ +/* MC_CMD_FREE_VIS + * Free VIs for current PCI function. Any linked PIO buffers will be unlinked, + * but not freed. + */ +#define MC_CMD_FREE_VIS 0x8c +#undef MC_CMD_0x8c_PRIVILEGE_CTG + +#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FREE_VIS_IN msgrequest */ +#define MC_CMD_FREE_VIS_IN_LEN 0 + +/* MC_CMD_FREE_VIS_OUT msgresponse */ +#define MC_CMD_FREE_VIS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_SRIOV_CFG + * Get SRIOV config for this PF. + */ +#define MC_CMD_GET_SRIOV_CFG 0xba +#undef MC_CMD_0xba_PRIVILEGE_CTG + +#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */ +#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0 + +/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */ +#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20 +/* Number of VFs currently enabled. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4 +/* Max number of VFs before sriov stride and offset may need to be changed. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8 +#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_OFST 8 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1 +/* RID offset of first VF from PF. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4 +/* RID offset of each subsequent VF from the previous. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_SRIOV_CFG + * Set SRIOV config for this PF. + */ +#define MC_CMD_SET_SRIOV_CFG 0xbb +#undef MC_CMD_0xbb_PRIVILEGE_CTG + +#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */ +#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20 +/* Number of VFs currently enabled. */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4 +/* Max number of VFs before sriov stride and offset may need to be changed. */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4 +#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8 +#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_OFST 8 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1 +/* RID offset of first VF from PF, or 0 for no change, or + * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset. + */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4 +/* RID offset of each subsequent VF from the previous, 0 for no change, or + * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride. + */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4 + +/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */ +#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VI_ALLOC_INFO + * Get information about number of VI's and base VI number allocated to this + * function. + */ +#define MC_CMD_GET_VI_ALLOC_INFO 0x8d +#undef MC_CMD_0x8d_PRIVILEGE_CTG + +#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */ +#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0 + +/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12 +/* The number of VIs allocated on this function */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4 + + +/***********************************/ +/* MC_CMD_DUMP_VI_STATE + * For CmdClient use. Dump pertinent information on a specific absolute VI. + */ +#define MC_CMD_DUMP_VI_STATE 0x8e +#undef MC_CMD_0x8e_PRIVILEGE_CTG + +#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DUMP_VI_STATE_IN msgrequest */ +#define MC_CMD_DUMP_VI_STATE_IN_LEN 4 +/* The VI number to query. */ +#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0 +#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4 + +/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */ +#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96 +/* The PF part of the function owning this VI. */ +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0 +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2 +/* The VF part of the function owning this VI. */ +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2 +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2 +/* Base of VIs allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2 +/* Count of VIs allocated to the owner function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2 +/* Base interrupt vector allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2 +/* Number of interrupt vectors allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2 +/* Raw evq ptr table data. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16 +/* Raw evq timer table data. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24 +/* RXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68 +/* RXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76 +/* Reserved, currently 0. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8 + + +/***********************************/ +/* MC_CMD_ALLOC_PIOBUF + * Allocate a push I/O buffer for later use with a tx queue. + */ +#define MC_CMD_ALLOC_PIOBUF 0x8f +#undef MC_CMD_0x8f_PRIVILEGE_CTG + +#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */ +#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0 + +/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */ +#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4 + + +/***********************************/ +/* MC_CMD_FREE_PIOBUF + * Free a push I/O buffer. + */ +#define MC_CMD_FREE_PIOBUF 0x90 +#undef MC_CMD_0x90_PRIVILEGE_CTG + +#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_FREE_PIOBUF_IN msgrequest */ +#define MC_CMD_FREE_PIOBUF_IN_LEN 4 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4 + +/* MC_CMD_FREE_PIOBUF_OUT msgresponse */ +#define MC_CMD_FREE_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_CAPABILITIES + * Get device capabilities. + * + * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to + * reference inherent device capabilities as opposed to current NVRAM config. + */ +#define MC_CMD_GET_CAPABILITIES 0xbe +#undef MC_CMD_0xbe_PRIVILEGE_CTG + +#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CAPABILITIES_IN msgrequest */ +#define MC_CMD_GET_CAPABILITIES_IN_LEN 0 + +/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4 + +/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */ +#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0 + +/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4 + +/* MC_CMD_GET_CAPABILITIES_V6_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LEN 148 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 + +/* MC_CMD_GET_CAPABILITIES_V7_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LEN 152 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 + +/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +/* These bits are reserved for communicating test-specific capabilities to + * host-side test software. All production drivers should treat this field as + * opaque. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LEN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_OFST 156 + +/* MC_CMD_GET_CAPABILITIES_V9_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LEN 184 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +/* These bits are reserved for communicating test-specific capabilities to + * host-side test software. All production drivers should treat this field as + * opaque. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LEN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_OFST 156 +/* The minimum size (in table entries) of indirection table to be allocated + * from the pool for an RSS context. Note that the table size used must be a + * power of 2. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4 +/* The maximum size (in table entries) of indirection table to be allocated + * from the pool for an RSS context. Note that the table size used must be a + * power of 2. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4 +/* The maximum number of queues that can be used by an RSS context in exclusive + * mode. In exclusive mode the context has a configurable indirection table and + * a configurable RSS key. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4 +/* The maximum number of queues that can be used by an RSS context in even- + * spreading mode. In even-spreading mode the context has no indirection table + * but it does have a configurable RSS key. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4 +/* The total number of RSS contexts supported. Note that the number of + * available contexts using indirection tables is also limited by the + * availability of indirection table space allocated from a common pool. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_OFST 176 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_LEN 4 +/* The total amount of indirection table space that can be shared between RSS + * contexts. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_OFST 180 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_LEN 4 + + +/***********************************/ +/* MC_CMD_V2_EXTN + * Encapsulation for a v2 extended command + */ +#define MC_CMD_V2_EXTN 0x7f + +/* MC_CMD_V2_EXTN_IN msgrequest */ +#define MC_CMD_V2_EXTN_IN_LEN 4 +/* the extended command number */ +#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0 +#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15 +#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15 +#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1 +/* the actual length of the encapsulated command (which is not in the v1 + * header) + */ +#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16 +#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10 +#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26 +#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2 +/* Type of command/response */ +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28 +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4 +/* enum: MCDI command directed to or response originating from the MC. */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0 +/* enum: MCDI command directed to a TSA controller. MCDI responses of this type + * are not defined. + */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1 + + +/***********************************/ +/* MC_CMD_LINK_PIOBUF + * Link a push I/O buffer to a TxQ + */ +#define MC_CMD_LINK_PIOBUF 0x92 +#undef MC_CMD_0x92_PRIVILEGE_CTG + +#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_LINK_PIOBUF_IN msgrequest */ +#define MC_CMD_LINK_PIOBUF_IN_LEN 8 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4 +/* Function Local Instance (VI) number. */ +#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4 +#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4 + +/* MC_CMD_LINK_PIOBUF_OUT msgresponse */ +#define MC_CMD_LINK_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_UNLINK_PIOBUF + * Unlink a push I/O buffer from a TxQ + */ +#define MC_CMD_UNLINK_PIOBUF 0x93 +#undef MC_CMD_0x93_PRIVILEGE_CTG + +#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */ +#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4 +/* Function Local Instance (VI) number. */ +#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0 +#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4 + +/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */ +#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_ALLOC + * allocate and initialise a v-switch. + */ +#define MC_CMD_VSWITCH_ALLOC 0x94 +#undef MC_CMD_0x94_PRIVILEGE_CTG + +#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */ +#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16 +/* The port to connect to the v-switch's upstream port. */ +#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of v-switch to create. */ +#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4 +/* enum: VLAN */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 +/* enum: VEB */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 +/* enum: VEPA (obsolete) */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 +/* enum: MUX */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4 +/* enum: Snapper specific; semantics TBD */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5 +/* Flags controlling v-port creation */ +#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_OFST 8 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 +/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators, + * this must be one or greated, and the attached v-ports must have exactly this + * number of tags. For other v-switch types, this must be zero of greater, and + * is an upper limit on the number of VLAN tags for attached v-ports. An error + * will be returned if existing configuration means we can't support attached + * v-ports with this number of tags. + */ +#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 + +/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */ +#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_FREE + * de-allocate a v-switch. + */ +#define MC_CMD_VSWITCH_FREE 0x95 +#undef MC_CMD_0x95_PRIVILEGE_CTG + +#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VSWITCH_FREE_IN msgrequest */ +#define MC_CMD_VSWITCH_FREE_IN_LEN 4 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VSWITCH_FREE_OUT msgresponse */ +#define MC_CMD_VSWITCH_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_QUERY + * read some config of v-switch. For now this command is an empty placeholder. + * It may be used to check if a v-switch is connected to a given EVB port (if + * not, then the command returns ENOENT). + */ +#define MC_CMD_VSWITCH_QUERY 0x63 +#undef MC_CMD_0x63_PRIVILEGE_CTG + +#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VSWITCH_QUERY_IN msgrequest */ +#define MC_CMD_VSWITCH_QUERY_IN_LEN 4 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */ +#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_ALLOC + * allocate a v-port. + */ +#define MC_CMD_VPORT_ALLOC 0x96 +#undef MC_CMD_0x96_PRIVILEGE_CTG + +#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_ALLOC_IN msgrequest */ +#define MC_CMD_VPORT_ALLOC_IN_LEN 20 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of the new v-port. */ +#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4 +/* enum: VLAN (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1 +/* enum: VEB (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2 +/* enum: VEPA (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3 +/* enum: A normal v-port receives packets which match a specified MAC and/or + * VLAN. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4 +/* enum: An expansion v-port packets traffic which don't match any other + * v-port. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5 +/* enum: An test v-port receives packets which match any filters installed by + * its downstream components. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6 +/* Flags controlling v-port creation */ +#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_OFST 8 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_OFST 8 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1 +/* The number of VLAN tags to insert/remove. An error will be returned if + * incompatible with the number of VLAN tags specified for the upstream + * v-switch. + */ +#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_OFST 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_OFST 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16 + +/* MC_CMD_VPORT_ALLOC_OUT msgresponse */ +#define MC_CMD_VPORT_ALLOC_OUT_LEN 4 +/* The handle of the new v-port */ +#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_VPORT_FREE + * de-allocate a v-port. + */ +#define MC_CMD_VPORT_FREE 0x97 +#undef MC_CMD_0x97_PRIVILEGE_CTG + +#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_FREE_IN msgrequest */ +#define MC_CMD_VPORT_FREE_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4 + +/* MC_CMD_VPORT_FREE_OUT msgresponse */ +#define MC_CMD_VPORT_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_ALLOC + * allocate a v-adaptor. + */ +#define MC_CMD_VADAPTOR_ALLOC 0x98 +#undef MC_CMD_0x98_PRIVILEGE_CTG + +#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */ +#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30 +/* The port to connect to the v-adaptor's port. */ +#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* Flags controlling v-adaptor creation */ +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_OFST 8 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 8 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +/* The number of VLAN tags to strip on receive */ +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12 +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4 +/* The number of VLAN tags to transparently insert/remove. */ +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_OFST 20 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_OFST 20 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16 +/* The MAC address to assign to this v-adaptor */ +#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24 +#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6 +/* enum: Derive the MAC address from the upstream port */ +#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0 + +/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_FREE + * de-allocate a v-adaptor. + */ +#define MC_CMD_VADAPTOR_FREE 0x99 +#undef MC_CMD_0x99_PRIVILEGE_CTG + +#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_FREE_IN msgrequest */ +#define MC_CMD_VADAPTOR_FREE_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */ +#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_SET_MAC + * assign a new MAC address to a v-adaptor. + */ +#define MC_CMD_VADAPTOR_SET_MAC 0x5d +#undef MC_CMD_0x5d_PRIVILEGE_CTG + +#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */ +#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The new MAC address to assign to this v-adaptor */ +#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4 +#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6 + +/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_GET_MAC + * read the MAC address assigned to a v-adaptor. + */ +#define MC_CMD_VADAPTOR_GET_MAC 0x5e +#undef MC_CMD_0x5e_PRIVILEGE_CTG + +#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */ +#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6 +/* The MAC address assigned to this v-adaptor */ +#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0 +#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6 + + +/***********************************/ +/* MC_CMD_VADAPTOR_QUERY + * read some config of v-adaptor. + */ +#define MC_CMD_VADAPTOR_QUERY 0x61 +#undef MC_CMD_0x61_PRIVILEGE_CTG + +#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */ +#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */ +#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12 +/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ +#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0 +#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4 +/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */ +#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4 +#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4 +/* The number of VLAN tags that may still be added */ +#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8 +#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4 + + +/***********************************/ +/* MC_CMD_EVB_PORT_ASSIGN + * assign a port to a PCI function. + */ +#define MC_CMD_EVB_PORT_ASSIGN 0x9a +#undef MC_CMD_0x9a_PRIVILEGE_CTG + +#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8 +/* The port to assign. */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4 +/* The target function to modify. */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_OFST 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_OFST 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16 + +/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */ +#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RDWR_A64_REGIONS + * Assign the 64 bit region addresses. + */ +#define MC_CMD_RDWR_A64_REGIONS 0x9b +#undef MC_CMD_0x9b_PRIVILEGE_CTG + +#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */ +#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4 +/* Write enable bits 0-3, set to write, clear to read. */ +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1 + +/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included + * regardless of state of write bits in the request. + */ +#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4 + + +/***********************************/ +/* MC_CMD_ONLOAD_STACK_ALLOC + * Allocate an Onload stack ID. + */ +#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c +#undef MC_CMD_0x9c_PRIVILEGE_CTG + +#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */ +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4 +/* The handle of the owning upstream port */ +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */ +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4 +/* The handle of the new Onload stack */ +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_ONLOAD_STACK_FREE + * Free an Onload stack ID. + */ +#define MC_CMD_ONLOAD_STACK_FREE 0x9d +#undef MC_CMD_0x9d_PRIVILEGE_CTG + +#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */ +#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4 +/* The handle of the Onload stack */ +#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4 + +/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */ +#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_ALLOC + * Allocate an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e +#undef MC_CMD_0x9e_PRIVILEGE_CTG + +#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12 +/* The handle of the owning upstream port */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of context to allocate */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4 +/* enum: Allocate a context for exclusive use. The key and indirection table + * must be explicitly configured. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0 +/* enum: Allocate a context for shared use; this will spread across a range of + * queues, but the key and indirection table are pre-configured and may not be + * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1 +/* enum: Allocate a context to spread evenly across an arbitrary number of + * queues. No indirection table space is allocated for this context. (EF100 and + * later) + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EVEN_SPREADING 0x2 +/* Number of queues spanned by this context. For exclusive contexts this must + * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where + * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if + * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in + * the indirection table will be in the range 0 to NUM_QUEUES-1. For even- + * spreading contexts this must be in the range 1 to + * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note + * that specifying NUM_QUEUES = 1 will not perform any spreading but may still + * be useful as a way of obtaining the Toeplitz hash. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4 + +/* MC_CMD_RSS_CONTEXT_ALLOC_V2_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_LEN 16 +/* The handle of the owning upstream port */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of context to allocate */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_OFST 4 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_LEN 4 +/* enum: Allocate a context for exclusive use. The key and indirection table + * must be explicitly configured. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EXCLUSIVE 0x0 +/* enum: Allocate a context for shared use; this will spread across a range of + * queues, but the key and indirection table are pre-configured and may not be + * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_SHARED 0x1 +/* enum: Allocate a context to spread evenly across an arbitrary number of + * queues. No indirection table space is allocated for this context. (EF100 and + * later) + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EVEN_SPREADING 0x2 +/* Number of queues spanned by this context. For exclusive contexts this must + * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where + * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if + * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in + * the indirection table will be in the range 0 to NUM_QUEUES-1. For even- + * spreading contexts this must be in the range 1 to + * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note + * that specifying NUM_QUEUES = 1 will not perform any spreading but may still + * be useful as a way of obtaining the Toeplitz hash. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_OFST 8 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_LEN 4 +/* Size of indirection table to be allocated to this context from the pool. + * Must be a power of 2. The minimum and maximum table size can be queried + * using MC_CMD_GET_CAPABILITIES_V9. If there is not enough space remaining in + * the common pool to allocate the requested table size, due to allocating + * table space to other RSS contexts, then the command will fail with + * MC_CMD_ERR_ENOSPC. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_OFST 12 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_LEN 4 + +/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4 +/* The handle of the new RSS context. This should be considered opaque to the + * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid + * handle. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4 +/* enum: guaranteed invalid RSS context handle value */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_FREE + * Free an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_FREE 0x9f +#undef MC_CMD_0x9f_PRIVILEGE_CTG + +#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_KEY + * Set the Toeplitz hash key for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0 +#undef MC_CMD_0xa0_PRIVILEGE_CTG + +#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4 +/* The 40-byte Toeplitz hash key (TBD endianness issues?) */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40 + +/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_KEY + * Get the Toeplitz hash key for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1 +#undef MC_CMD_0xa1_PRIVILEGE_CTG + +#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44 +/* The 40-byte Toeplitz hash key (TBD endianness issues?) */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_TABLE + * Set the indirection table for an RSS context. This command should only be + * used with indirection tables containing 128 entries, which is the default + * when the RSS context is allocated without specifying a table size. + */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2 +#undef MC_CMD_0xa2_PRIVILEGE_CTG + +#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4 +/* The 128-byte indirection table (1 byte per entry) */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128 + +/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_TABLE + * Get the indirection table for an RSS context. This command should only be + * used with indirection tables containing 128 entries, which is the default + * when the RSS context is allocated without specifying a table size. + */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3 +#undef MC_CMD_0xa3_PRIVILEGE_CTG + +#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132 +/* The 128-byte indirection table (1 byte per entry) */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE + * Write a portion of a selectable-size indirection table for an RSS context. + * This command must be used instead of MC_CMD_RSS_CONTEXT_SET_TABLE if the + * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES. + */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE 0x13e +#undef MC_CMD_0x13e_PRIVILEGE_CTG + +#define MC_CMD_0x13e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMIN 8 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX 252 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LEN(num) (4+4*(num)) +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_NUM(len) (((len)-4)/4) +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_LEN 4 +/* An array of index-value pairs to be written to the table. Structure is + * MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY. + */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_OFST 4 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_LEN 4 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MINNUM 1 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM 62 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM_MCDI2 254 + +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT_LEN 0 + +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY structuredef */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_LEN 4 +/* The index of the table entry to be written. */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_OFST 0 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LEN 2 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LBN 0 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_WIDTH 16 +/* The value to write into the table entry. */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_OFST 2 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LEN 2 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LBN 16 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_WIDTH 16 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_READ_TABLE + * Read a portion of a selectable-size indirection table for an RSS context. + * This command must be used instead of MC_CMD_RSS_CONTEXT_GET_TABLE if the + * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES. + */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE 0x13f +#undef MC_CMD_0x13f_PRIVILEGE_CTG + +#define MC_CMD_0x13f_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_READ_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMIN 6 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX 252 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LEN(num) (4+2*(num)) +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_NUM(len) (((len)-4)/2) +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_LEN 4 +/* An array containing the indices of the entries to be read. */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_OFST 4 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_LEN 2 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MINNUM 1 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM 124 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM_MCDI2 508 + +/* MC_CMD_RSS_CONTEXT_READ_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMIN 2 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX 252 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LEN(num) (0+2*(num)) +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_NUM(len) (((len)-0)/2) +/* A buffer containing the requested entries read from the table. */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_OFST 0 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_LEN 2 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MINNUM 1 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM 126 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM_MCDI2 510 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_FLAGS + * Set various control flags for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1 +#undef MC_CMD_0xe1_PRIVILEGE_CTG + +#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4 +/* Hash control flags. The _EN bits are always supported, but new modes are + * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES: + * in this case, the MODE fields may be set to non-zero values, and will take + * effect regardless of the settings of the _EN flags. See the RSS_MODE + * structure for the meaning of the mode bits. Drivers must check the + * capability before trying to set any _MODE fields, as older firmware will + * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In + * the case where all the _MODE flags are zero, the _EN flags take effect, + * providing backward compatibility for existing drivers. (Setting all _MODE + * *and* all _EN flags to zero is valid, to disable RSS spreading for that + * particular packet type.) + */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4 + +/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_FLAGS + * Get various control flags for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2 +#undef MC_CMD_0xe2_PRIVILEGE_CTG + +#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 +/* Hash control flags. If all _MODE bits are zero (which will always be true + * for older firmware which does not report the ADDITIONAL_RSS_MODES + * capability), the _EN bits report the state. If any _MODE bits are non-zero + * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES) + * then the _EN bits should be disregarded, although the _MODE flags are + * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS + * context and in the case where the _EN flags were used in the SET. This + * provides backward compatibility: old drivers will not be attempting to + * derive any meaning from the _MODE bits (and can never set them to any value + * not representable by the _EN bits); new drivers can always determine the + * mode by looking only at the _MODE bits; the value returned by a GET can + * always be used for a SET regardless of old/new driver vs. old/new firmware. + */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4 + + +/***********************************/ +/* MC_CMD_VPORT_ADD_MAC_ADDRESS + * Add a MAC address to a v-port + */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8 +#undef MC_CMD_0xa8_PRIVILEGE_CTG + +#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10 +/* The handle of the v-port */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4 +/* MAC address to add */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4 +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6 + +/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_DEL_MAC_ADDRESS + * Delete a MAC address from a v-port + */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9 +#undef MC_CMD_0xa9_PRIVILEGE_CTG + +#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10 +/* The handle of the v-port */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4 +/* MAC address to add */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4 +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6 + +/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_GET_MAC_ADDRESSES + * Delete a MAC address from a v-port + */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa +#undef MC_CMD_0xaa_PRIVILEGE_CTG + +#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4 + +/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX_MCDI2 1018 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num)) +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_NUM(len) (((len)-4)/6) +/* The number of MAC addresses returned */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4 +/* Array of MAC addresses */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM_MCDI2 169 + + +/***********************************/ +/* MC_CMD_VPORT_RECONFIGURE + * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port + * has already been passed to another function (v-port's user), then that + * function will be reset before applying the changes. + */ +#define MC_CMD_VPORT_RECONFIGURE 0xeb +#undef MC_CMD_0xeb_PRIVILEGE_CTG + +#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */ +#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44 +/* The handle of the v-port */ +#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4 +/* Flags requesting what should be changed. */ +#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_OFST 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_OFST 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1 +/* The number of VLAN tags to insert/remove. An error will be returned if + * incompatible with the number of VLAN tags specified for the upstream + * v-switch. + */ +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8 +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_OFST 12 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_OFST 12 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16 +/* The number of MAC addresses to add */ +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4 +/* MAC addresses to add */ +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20 +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6 +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4 + +/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */ +#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1 + + +/***********************************/ +/* MC_CMD_EVB_PORT_QUERY + * read some config of v-port. + */ +#define MC_CMD_EVB_PORT_QUERY 0x62 +#undef MC_CMD_0x62_PRIVILEGE_CTG + +#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */ +#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0 +#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4 + +/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */ +#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8 +/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ +#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0 +#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4 +/* The number of VLAN tags that may be used on a v-adaptor connected to this + * EVB port. + */ +#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4 +#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4 + + +/***********************************/ +/* MC_CMD_GET_CLOCK + * Return the system and PDCPU clock frequencies. + */ +#define MC_CMD_GET_CLOCK 0xac +#undef MC_CMD_0xac_PRIVILEGE_CTG + +#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CLOCK_IN msgrequest */ +#define MC_CMD_GET_CLOCK_IN_LEN 0 + +/* MC_CMD_GET_CLOCK_OUT msgresponse */ +#define MC_CMD_GET_CLOCK_OUT_LEN 8 +/* System frequency, MHz */ +#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0 +#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4 +/* DPCPU frequency, MHz */ +#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4 +#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4 + + +/***********************************/ +/* MC_CMD_TRIGGER_INTERRUPT + * Trigger an interrupt by prodding the BIU. + */ +#define MC_CMD_TRIGGER_INTERRUPT 0xe3 +#undef MC_CMD_0xe3_PRIVILEGE_CTG + +#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */ +#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4 +/* Interrupt level relative to base for function. */ +#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0 +#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4 + +/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */ +#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SHMBOOT_OP + * Special operations to support (for now) shmboot. + */ +#define MC_CMD_SHMBOOT_OP 0xe6 +#undef MC_CMD_0xe6_PRIVILEGE_CTG + +#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SHMBOOT_OP_IN msgrequest */ +#define MC_CMD_SHMBOOT_OP_IN_LEN 4 +/* Identifies the operation to perform */ +#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0 +#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4 +/* enum: Copy slave_data section to the slave core. (Greenport only) */ +#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 + +/* MC_CMD_SHMBOOT_OP_OUT msgresponse */ +#define MC_CMD_SHMBOOT_OP_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_PSU + * Adjusts power supply parameters. This is a warranty-voiding operation. + * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if + * the parameter is out of range. + */ +#define MC_CMD_SET_PSU 0xea +#undef MC_CMD_0xea_PRIVILEGE_CTG + +#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SET_PSU_IN msgrequest */ +#define MC_CMD_SET_PSU_IN_LEN 12 +#define MC_CMD_SET_PSU_IN_PARAM_OFST 0 +#define MC_CMD_SET_PSU_IN_PARAM_LEN 4 +#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_OFST 4 +#define MC_CMD_SET_PSU_IN_RAIL_LEN 4 +#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */ +/* desired value, eg voltage in mV */ +#define MC_CMD_SET_PSU_IN_VALUE_OFST 8 +#define MC_CMD_SET_PSU_IN_VALUE_LEN 4 + +/* MC_CMD_SET_PSU_OUT msgresponse */ +#define MC_CMD_SET_PSU_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_FUNCTION_INFO + * Get function information. PF and VF number. + */ +#define MC_CMD_GET_FUNCTION_INFO 0xec +#undef MC_CMD_0xec_PRIVILEGE_CTG + +#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */ +#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0 + +/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */ +#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8 +#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0 +#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4 +#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4 +#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4 + + +/***********************************/ +/* MC_CMD_ENABLE_OFFLINE_BIST + * Enters offline BIST mode. All queues are torn down, chip enters quiescent + * mode, calling function gets exclusive MCDI ownership. The only way out is + * reboot. + */ +#define MC_CMD_ENABLE_OFFLINE_BIST 0xed +#undef MC_CMD_0xed_PRIVILEGE_CTG + +#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */ +#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0 + +/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */ +#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_READ_FUSES + * Read data programmed into the device One-Time-Programmable (OTP) Fuses + */ +#define MC_CMD_READ_FUSES 0xf0 +#undef MC_CMD_0xf0_PRIVILEGE_CTG + +#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_READ_FUSES_IN msgrequest */ +#define MC_CMD_READ_FUSES_IN_LEN 8 +/* Offset in OTP to read */ +#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0 +#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4 +/* Length of data to read in bytes */ +#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4 +#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4 + +/* MC_CMD_READ_FUSES_OUT msgresponse */ +#define MC_CMD_READ_FUSES_OUT_LENMIN 4 +#define MC_CMD_READ_FUSES_OUT_LENMAX 252 +#define MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1) +/* Length of returned OTP data in bytes */ +#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0 +#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4 +/* Returned data */ +#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4 +#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1 +#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0 +#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248 +#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016 + + +/***********************************/ +/* MC_CMD_LICENSING + * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition + * - not used for V3 licensing + */ +#define MC_CMD_LICENSING 0xf3 +#undef MC_CMD_0xf3_PRIVILEGE_CTG + +#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_IN msgrequest */ +#define MC_CMD_LICENSING_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_LICENSING_IN_OP_OFST 0 +#define MC_CMD_LICENSING_IN_OP_LEN 4 +/* enum: re-read and apply licenses after a license key partition update; note + * that this operation returns a zero-length response + */ +#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0 +/* enum: report counts of installed licenses */ +#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1 + +/* MC_CMD_LICENSING_OUT msgresponse */ +#define MC_CMD_LICENSING_OUT_LEN 28 +/* count of application keys which are valid */ +#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0 +#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4 +/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with + * MC_CMD_FC_OP_LICENSE) + */ +#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4 +#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4 +/* count of application keys which are invalid due to being blacklisted */ +#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8 +#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4 +/* count of application keys which are invalid due to being unverifiable */ +#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12 +#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4 +/* count of application keys which are invalid due to being for the wrong node + */ +#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16 +#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4 +/* licensing state (for diagnostics; the exact meaning of the bits in this + * field are private to the firmware) + */ +#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20 +#define MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4 +/* licensing subsystem self-test report (for manftest) */ +#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24 +#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4 +/* enum: licensing subsystem self-test failed */ +#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0 +/* enum: licensing subsystem self-test passed */ +#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1 + + +/***********************************/ +/* MC_CMD_LICENSING_V3 + * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition + * - V3 licensing (Medford) + */ +#define MC_CMD_LICENSING_V3 0xd0 +#undef MC_CMD_0xd0_PRIVILEGE_CTG + +#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_V3_IN msgrequest */ +#define MC_CMD_LICENSING_V3_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_LICENSING_V3_IN_OP_OFST 0 +#define MC_CMD_LICENSING_V3_IN_OP_LEN 4 +/* enum: re-read and apply licenses after a license key partition update; note + * that this operation returns a zero-length response + */ +#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0 +/* enum: report counts of installed licenses Returns EAGAIN if license + * processing (updating) has been started but not yet completed. + */ +#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1 + +/* MC_CMD_LICENSING_V3_OUT msgresponse */ +#define MC_CMD_LICENSING_V3_OUT_LEN 88 +/* count of keys which are valid */ +#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0 +#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4 +/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with + * MC_CMD_FC_OP_LICENSE) + */ +#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4 +#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4 +/* count of keys which are invalid due to being unverifiable */ +#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8 +#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4 +/* count of keys which are invalid due to being for the wrong node */ +#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12 +#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4 +/* licensing state (for diagnostics; the exact meaning of the bits in this + * field are private to the firmware) + */ +#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16 +#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4 +/* licensing subsystem self-test report (for manftest) */ +#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20 +#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4 +/* enum: licensing subsystem self-test failed */ +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0 +/* enum: licensing subsystem self-test passed */ +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1 +/* bitmask of licensed applications */ +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28 +/* reserved for future use */ +#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32 +#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24 +/* bitmask of licensed features */ +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60 +/* reserved for future use */ +#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64 +#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24 + + +/***********************************/ +/* MC_CMD_LICENSING_GET_ID_V3 + * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license + * partition - V3 licensing (Medford) + */ +#define MC_CMD_LICENSING_GET_ID_V3 0xd1 +#undef MC_CMD_0xd1_PRIVILEGE_CTG + +#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */ +#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0 + +/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num)) +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1) +/* type of license (eg 3) */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4 +/* length of the license ID (in bytes) */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4 +/* the unique license ID of the adapter */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_APP_STATE + * Query the state of an individual licensed application. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation + * or a reboot of the MC.) Not used for V3 licensing + */ +#define MC_CMD_GET_LICENSED_APP_STATE 0xf5 +#undef MC_CMD_0xf5_PRIVILEGE_CTG + +#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */ +#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4 +/* application ID to query (LICENSED_APP_ID_xxx) */ +#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0 +#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4 + +/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4 +/* state of this application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4 +/* enum: no (or invalid) license is present for the application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0 +/* enum: a valid license is present for the application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_V3_APP_STATE + * Query the state of an individual licensed application. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE + * operation or a reboot of the MC.) Used for V3 licensing (Medford) + */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2 +#undef MC_CMD_0xd2_PRIVILEGE_CTG + +#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8 +/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit + * mask + */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4 + +/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4 +/* state of this application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4 +/* enum: no (or invalid) license is present for the application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0 +/* enum: a valid license is present for the application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES + * Query the state of an one or more licensed features. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE + * operation or a reboot of the MC.) Used for V3 licensing (Medford) + */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3 +#undef MC_CMD_0xd3_PRIVILEGE_CTG + +#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8 +/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or + * more bits set + */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4 + +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8 +/* states of these features - bit set for licensed, clear for not licensed */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4 + + +/***********************************/ +/* MC_CMD_LICENSED_APP_OP + * Perform an action for an individual licensed application - not used for V3 + * licensing. + */ +#define MC_CMD_LICENSED_APP_OP 0xf6 +#undef MC_CMD_0xf6_PRIVILEGE_CTG + +#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSED_APP_OP_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8 +#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252 +#define MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020 +#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num)) +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4) +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4 +/* enum: validate application */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 +/* enum: mask application */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1 +/* arguments specific to this particular operation */ +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253 + +/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0 +#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252 +#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4) +/* result specific to this particular operation */ +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255 + +/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72 +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4 +/* validation challenge */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64 + +/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68 +/* feature expiry (time_t) */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4 +/* validation response */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64 + +/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12 +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4 +/* flag */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4 + +/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LICENSED_V3_VALIDATE_APP + * Perform validation for an individual licensed application - V3 licensing + * (Medford) + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4 +#undef MC_CMD_0xd4_PRIVILEGE_CTG + +#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56 +/* challenge for validation (384 bits) */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48 +/* application ID expressed as a single bit mask */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52 + +/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116 +/* validation response to challenge in the form of ECDSA signature consisting + * of two 384-bit integers, r and s, in big-endian order. The signature signs a + * SHA-384 digest of a message constructed from the concatenation of the input + * message and the remaining fields of this output message, e.g. challenge[48 + * bytes] ... expiry_time[4 bytes] ... + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96 +/* application expiry time */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4 +/* application expiry units */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4 +/* enum: expiry units are accounting units */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0 +/* enum: expiry units are calendar days */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1 +/* base MAC address of the NIC stored in NVRAM (note that this is a constant + * value for a given NIC regardless which function is calling, effectively this + * is PF0 base MAC address) + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6 +/* MAC address of v-adaptor associated with the client. If no such v-adapator + * exists, then the field is filled with 0xFF. + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6 + + +/***********************************/ +/* MC_CMD_LICENSED_V3_MASK_FEATURES + * Mask features - V3 licensing (Medford) + */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5 +#undef MC_CMD_0xd5_PRIVILEGE_CTG + +#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12 +/* mask to be applied to features to be changed */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4 +/* whether to turn on or turn off the masked features */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4 +/* enum: turn the features off */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0 +/* enum: turn the features back on */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1 + +/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LICENSING_V3_TEMPORARY + * Perform operations to support installation of a single temporary license in + * the adapter, in addition to those found in the licensing partition. See + * SF-116124-SW for an overview of how this could be used. The license is + * stored in MC persistent data and so will survive a MC reboot, but will be + * erased when the adapter is power cycled + */ +#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6 +#undef MC_CMD_0xd6_PRIVILEGE_CTG + +#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4 +/* operation code */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4 +/* enum: install a new license, overwriting any existing temporary license. + * This is an asynchronous operation owing to the time taken to validate an + * ECDSA license + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0 +/* enum: clear the license immediately rather than waiting for the next power + * cycle + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1 +/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET + * operation + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2 + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4 +/* ECDSA license and signature */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160 + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4 + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4 + +/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */ +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12 +/* status code */ +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4 +/* enum: finished validating and installing license */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0 +/* enum: license validation and installation in progress */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1 +/* enum: licensing error. More specific error messages are not provided to + * avoid exposing details of the licensing system to the client + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2 +/* bitmask of licensed features */ +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8 + + +/***********************************/ +/* MC_CMD_SET_PARSER_DISP_CONFIG + * Change configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9 +#undef MC_CMD_0xf9_PRIVILEGE_CTG + +#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX_MCDI2 1020 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num)) +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_NUM(len) (((len)-8)/4) +/* the type of configuration setting to change */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 +/* enum: Per-TXQ enable for multicast UDP destination lookup for possible + * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0 +/* enum: Per-v-adaptor enable for suppression of self-transmissions on the + * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single + * boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1 +/* handle for the entity to update: queue handle, EVB port ID, etc. depending + * on the type of configuration setting being changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4 +/* new value: the details depend on the type of configuration setting being + * changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM_MCDI2 253 + +/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_CONFIG + * Read configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa +#undef MC_CMD_0xfa_PRIVILEGE_CTG + +#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8 +/* the type of configuration setting to read */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */ +/* handle for the entity to query: queue handle, EVB port ID, etc. depending on + * the type of configuration setting being read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4 + +/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4) +/* current value: the details depend on the type of configuration setting being + * read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_GET_PORT_MODES + * Find out about available port modes + */ +#define MC_CMD_GET_PORT_MODES 0xff +#undef MC_CMD_0xff_PRIVILEGE_CTG + +#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PORT_MODES_IN msgrequest */ +#define MC_CMD_GET_PORT_MODES_IN_LEN 0 + +/* MC_CMD_GET_PORT_MODES_OUT msgresponse */ +#define MC_CMD_GET_PORT_MODES_OUT_LEN 12 +/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) + * that are supported for customer use in production firmware. + */ +#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0 +#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4 +/* Default (canonical) board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4 +#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4 +/* Current board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8 +#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4 + +/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16 +/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) + * that are supported for customer use in production firmware. + */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0 +#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4 +/* Default (canonical) board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4 +#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4 +/* Current board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8 +#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4 +/* Bitmask of engineering port modes available on the board (indexed by + * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that + * contains all modes implemented in firmware for a particular board. Modes + * listed in MODES are considered production modes and should be exposed in + * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES + * should be considered hidden (not to be exposed in userland tools) and for + * engineering use only. There are no other semantic differences and any mode + * listed in either MODES or ENGINEERING_MODES can be set on the board. + */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12 +#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4 + + +/***********************************/ +/* MC_CMD_OVERRIDE_PORT_MODE + * Override flash config port mode for subsequent MC reboot(s). Override data + * is stored in the presistent data section of DMEM and activated on next MC + * warm reboot. A cold reboot resets the override. It is assumed that a + * sufficient number of PFs are available and that port mapping is valid for + * the new port mode, as the override does not affect PF configuration. + */ +#define MC_CMD_OVERRIDE_PORT_MODE 0x137 +#undef MC_CMD_0x137_PRIVILEGE_CTG + +#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */ +#define MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_OFST 0 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1 +/* New mode (TLV_PORT_MODE_*) to set, if override enabled */ +#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4 + +/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */ +#define MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_WORKAROUNDS + * Read the list of all implemented and all currently enabled workarounds. The + * enums here must correspond with those in MC_CMD_WORKAROUND. + */ +#define MC_CMD_GET_WORKAROUNDS 0x59 +#undef MC_CMD_0x59_PRIVILEGE_CTG + +#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */ +#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8 +/* Each workaround is represented by a single bit according to the enums below. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0 +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4 +/* enum: Bug 17230 work around. */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40 +/* enum: Bug 61265 work around (broken EVQ TMR writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MASK + * Read/set privileges of an arbitrary PCIe function + */ +#define MC_CMD_PRIVILEGE_MASK 0x5a +#undef MC_CMD_0x5a_PRIVILEGE_CTG + +#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8 +/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF + * 1,3 = 0x00030001 + */ +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */ +/* New privilege mask to be set. The mask will only be changed if the MSB is + * set to 1. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4 +#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ +/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ +/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC + * adress. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800 +/* enum: Privilege that allows a Function to change the MAC address configured + * in its associated vAdapter/vPort. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000 +/* enum: Privilege that allows a Function to install filters that specify VLANs + * that are not in the permit list for the associated vPort. This privilege is + * primarily to support ESX where vPorts are created that restrict traffic to + * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000 +/* enum: Privilege for insecure commands. Commands that belong to this group + * are not permitted on secure adapters regardless of the privilege mask. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000 +/* enum: Trusted Server Adapter (TSA) / ServerLock. Privilege for + * administrator-level operations that are not allowed from the local host once + * an adapter has Bound to a remote ServerLock Controller (see doxbox + * SF-117064-DG for background). + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000 +/* enum: Set this bit to indicate that a new privilege mask is to be set, + * otherwise the command will only read the existing mask. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000 + +/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4 +/* For an admin function, always all the privileges are reported. */ +#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4 + + +/***********************************/ +/* MC_CMD_LINK_STATE_MODE + * Read/set link state mode of a VF + */ +#define MC_CMD_LINK_STATE_MODE 0x5c +#undef MC_CMD_0x5c_PRIVILEGE_CTG + +#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LINK_STATE_MODE_IN msgrequest */ +#define MC_CMD_LINK_STATE_MODE_IN_LEN 8 +/* The target function to have its link state mode read or set, must be a VF + * e.g. VF 1,3 = 0x00030001 + */ +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16 +/* New link state mode to be set */ +#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4 +#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4 +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ +/* enum: Use this value to just read the existing setting without modifying it. + */ +#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff + +/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */ +#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4 +#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0 +#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4 + + +/***********************************/ +/* MC_CMD_FUSE_DIAGS + * Additional fuse diagnostics + */ +#define MC_CMD_FUSE_DIAGS 0x102 +#undef MC_CMD_0x102_PRIVILEGE_CTG + +#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_FUSE_DIAGS_IN msgrequest */ +#define MC_CMD_FUSE_DIAGS_IN_LEN 0 + +/* MC_CMD_FUSE_DIAGS_OUT msgresponse */ +#define MC_CMD_FUSE_DIAGS_OUT_LEN 48 +/* Total number of mismatched bits between pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4 +/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4 +/* Checksum of data after logical OR of pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4 +/* Total number of mismatched bits between pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4 +/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4 +/* Checksum of data after logical OR of pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4 +/* Total number of mismatched bits between pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4 +/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4 +/* Checksum of data after logical OR of pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MODIFY + * Modify the privileges of a set of PCIe functions. Note that this operation + * only effects non-admin functions unless the admin privilege itself is + * included in one of the masks provided. + */ +#define MC_CMD_PRIVILEGE_MODIFY 0x60 +#undef MC_CMD_0x60_PRIVILEGE_CTG + +#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16 +/* The groups of functions to have their privilege masks modified. */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ +/* For VFS_OF_PF specify the PF, for ONE specify the target function */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16 +/* Privileges to be added to the target functions. For privilege definitions + * refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8 +#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4 +/* Privileges to be removed from the target functions. For privilege + * definitions refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12 +#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4 + +/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0 + + +/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4 +/* UDP port (the standard ports are named below but any port may be used) */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2 +/* enum: the IANA allocated UDP port for VXLAN */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5 +/* enum: the IANA allocated UDP port for Geneve */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16 +/* tunnel encapsulation protocol (only those named below are supported) */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2 +/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0 +/* enum: This port will be used for Geneve on both IPv4 and IPv6 */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16 + + +/***********************************/ +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS + * Configure UDP ports for tunnel encapsulation hardware acceleration. The + * parser-dispatcher will attempt to parse traffic on these ports as tunnel + * encapsulation PDUs and filter them using the tunnel encapsulation filter + * chain rather than the standard filter chain. Note that this command can + * cause all functions to see a reset. (Available on Medford only.) + */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117 +#undef MC_CMD_0x117_PRIVILEGE_CTG + +#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX_MCDI2 68 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num)) +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_NUM(len) (((len)-4)/4) +/* Flags */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1 +/* The number of entries in the ENTRIES array */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2 +/* Entries defining the UDP port to protocol mapping, each laid out as a + * TUNNEL_ENCAP_UDP_PORT_ENTRY + */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM_MCDI2 16 + +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2 +/* Flags */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1 + + +/***********************************/ +/* MC_CMD_VNIC_ENCAP_RULE_ADD + * Add a rule for detecting encapsulations in the VNIC stage. Currently this only affects checksum validation in VNIC RX - on TX the send descriptor explicitly specifies encapsulation. These rules are per-VNIC, i.e. only apply to the current driver. If a rule matches, then the packet is considered to have the corresponding encapsulation type, and the inner packet is parsed. It is up to the driver to ensure that overlapping rules are not inserted. (If a packet would match multiple rules, a random one of them will be used.) A rule with the exact same match criteria may not be inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are supported, use MC_CMD_GET_PARSER_DISP_INFO with OP OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported combinations. Each driver may only have a limited set of active rules - returns ENOSPC if the caller's table is full. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d +#undef MC_CMD_0x16d_PRIVILEGE_CTG + +#define MC_CMD_0x16d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VNIC_ENCAP_RULE_ADD_IN msgrequest */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_LEN 36 +/* Set to MAE_MPORT_SELECTOR_ASSIGNED. In the future this may be relaxed. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_OFST 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_LEN 4 +/* Any non-zero bits other than the ones named below or an unsupported + * combination will cause the NIC to return EOPNOTSUPP. In the future more + * flags may be added. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_LEN 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_LBN 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_LBN 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_LBN 2 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_LBN 3 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_LBN 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_WIDTH 1 +/* Only if MATCH_ETHER_TYPE is set. Ethertype value as bytes in network order. + * Currently only IPv4 (0x0800) and IPv6 (0x86DD) ethertypes may be used. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_OFST 8 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_LEN 2 +/* Only if MATCH_OUTER_VLAN is set. VID value as bytes in network order. + * (Deprecated) + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_LBN 80 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WIDTH 12 +/* Only if MATCH_OUTER_VLAN is set. Aligned wrapper for OUTER_VLAN_VID. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_OFST 10 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_LEN 2 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_OFST 10 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_LBN 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_WIDTH 12 +/* Only if MATCH_DST_IP is set. IP address as bytes in network order. In the + * case of IPv4, the IP should be in the first 4 bytes and all other bytes + * should be zero. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_OFST 12 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_LEN 16 +/* Only if MATCH_IP_PROTO is set. Currently only UDP proto (17) may be used. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_OFST 28 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_LEN 1 +/* Actions that should be applied to packets match the rule. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_OFST 29 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_LEN 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1 +/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2 +/* Resulting encapsulation type, as per MAE_MCDI_ENCAP_TYPE enumeration. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_OFST 32 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_LEN 4 + +/* MC_CMD_VNIC_ENCAP_RULE_ADD_OUT msgresponse */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_LEN 4 +/* Handle to inserted rule. Used for removing the rule. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_OFST 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_LEN 4 + + +/***********************************/ +/* MC_CMD_VNIC_ENCAP_RULE_REMOVE + * Remove a VNIC encapsulation rule. Packets which would have previously matched the rule will then be considered as unencapsulated. Returns EALREADY if the input HANDLE doesn't correspond to an existing rule. + */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e +#undef MC_CMD_0x16e_PRIVILEGE_CTG + +#define MC_CMD_0x16e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN msgrequest */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_LEN 4 +/* Handle which was returned by MC_CMD_VNIC_ENCAP_RULE_ADD. */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_OFST 0 +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_LEN 4 + +/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0 + +/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are + * defined in SF-120734-TC with more information in SF-122717-TC. + */ +#define FUNCTION_PERSONALITY_LEN 4 +#define FUNCTION_PERSONALITY_ID_OFST 0 +#define FUNCTION_PERSONALITY_ID_LEN 4 +/* enum: Function has no assigned personality */ +#define FUNCTION_PERSONALITY_NULL 0x0 +/* enum: Function has an EF100-style function control window and VI windows + * with both EF100 and vDPA doorbells. + */ +#define FUNCTION_PERSONALITY_EF100 0x1 +/* enum: Function has virtio net device configuration registers and doorbells + * for virtio queue pairs. + */ +#define FUNCTION_PERSONALITY_VIRTIO_NET 0x2 +/* enum: Function has virtio block device configuration registers and a + * doorbell for a single virtqueue. + */ +#define FUNCTION_PERSONALITY_VIRTIO_BLK 0x3 +/* enum: Function is a Xilinx acceleration device - management function */ +#define FUNCTION_PERSONALITY_ACCEL_MGMT 0x4 +/* enum: Function is a Xilinx acceleration device - user function */ +#define FUNCTION_PERSONALITY_ACCEL_USR 0x5 +#define FUNCTION_PERSONALITY_ID_LBN 0 +#define FUNCTION_PERSONALITY_ID_WIDTH 32 + +/* PCIE_FUNCTION structuredef: Structure representing a PCIe function ID + * (interface/PF/VF tuple) + */ +#define PCIE_FUNCTION_LEN 8 +/* PCIe PF function number */ +#define PCIE_FUNCTION_PF_OFST 0 +#define PCIE_FUNCTION_PF_LEN 2 +/* enum: Wildcard value representing any available function (e.g in resource + * allocation requests) + */ +#define PCIE_FUNCTION_PF_ANY 0xfffe +/* enum: Value representing invalid (null) function */ +#define PCIE_FUNCTION_PF_NULL 0xffff +#define PCIE_FUNCTION_PF_LBN 0 +#define PCIE_FUNCTION_PF_WIDTH 16 +/* PCIe VF Function number (PF relative) */ +#define PCIE_FUNCTION_VF_OFST 2 +#define PCIE_FUNCTION_VF_LEN 2 +/* enum: Wildcard value representing any available function (e.g in resource + * allocation requests) + */ +#define PCIE_FUNCTION_VF_ANY 0xfffe +/* enum: Function is a PF (when PF != PF_NULL) or invalid function (when PF == + * PF_NULL) + */ +#define PCIE_FUNCTION_VF_NULL 0xffff +#define PCIE_FUNCTION_VF_LBN 16 +#define PCIE_FUNCTION_VF_WIDTH 16 +/* PCIe interface of the function */ +#define PCIE_FUNCTION_INTF_OFST 4 +#define PCIE_FUNCTION_INTF_LEN 4 +/* enum: Host PCIe interface */ +#define PCIE_FUNCTION_INTF_HOST 0x0 +/* enum: Application Processor interface */ +#define PCIE_FUNCTION_INTF_AP 0x1 +#define PCIE_FUNCTION_INTF_LBN 32 +#define PCIE_FUNCTION_INTF_WIDTH 32 + +#endif /* MCDI_PCOL_H */ diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port.c b/drivers/net/ethernet/sfc/siena/mcdi_port.c new file mode 100644 index 0000000000..93b8b2338f --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi_port.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2009-2013 Solarflare Communications Inc. + */ + +/* + * Driver for PHY related operations via MCDI. + */ + +#include +#include "efx.h" +#include "mcdi_port.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "nic.h" +#include "selftest.h" +#include "mcdi_port_common.h" + +static int efx_mcdi_mdio_read(struct net_device *net_dev, + int prtad, int devad, u16 addr) +{ + struct efx_nic *efx = netdev_priv(net_dev); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus); + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad); + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad); + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) != + MC_CMD_MDIO_STATUS_GOOD) + return -EIO; + + return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE); +} + +static int efx_mcdi_mdio_write(struct net_device *net_dev, + int prtad, int devad, u16 addr, u16 value) +{ + struct efx_nic *efx = netdev_priv(net_dev); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) != + MC_CMD_MDIO_STATUS_GOOD) + return -EIO; + + return 0; +} + +bool efx_siena_mcdi_mac_check_fault(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + size_t outlength; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), &outlength); + if (rc) + return true; + + return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0; +} + +int efx_siena_mcdi_port_probe(struct efx_nic *efx) +{ + int rc; + + /* Set up MDIO structure for PHY */ + efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + efx->mdio.mdio_read = efx_mcdi_mdio_read; + efx->mdio.mdio_write = efx_mcdi_mdio_write; + + /* Fill out MDIO structure, loopback modes, and initial link state */ + rc = efx_siena_mcdi_phy_probe(efx); + if (rc != 0) + return rc; + + return efx_siena_mcdi_mac_init_stats(efx); +} + +void efx_siena_mcdi_port_remove(struct efx_nic *efx) +{ + efx_siena_mcdi_phy_remove(efx); + efx_siena_mcdi_mac_fini_stats(efx); +} diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port.h b/drivers/net/ethernet/sfc/siena/mcdi_port.h new file mode 100644 index 0000000000..7b4ae250b5 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi_port.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2008-2013 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + */ + +#ifndef EFX_MCDI_PORT_H +#define EFX_MCDI_PORT_H + +#include "net_driver.h" + +bool efx_siena_mcdi_mac_check_fault(struct efx_nic *efx); +int efx_siena_mcdi_port_probe(struct efx_nic *efx); +void efx_siena_mcdi_port_remove(struct efx_nic *efx); + +#endif /* EFX_MCDI_PORT_H */ diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port_common.c b/drivers/net/ethernet/sfc/siena/mcdi_port_common.c new file mode 100644 index 0000000000..067fe0f439 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi_port_common.c @@ -0,0 +1,1282 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "mcdi_port_common.h" +#include "efx_common.h" +#include "nic.h" + +static int efx_mcdi_get_phy_cfg(struct efx_nic *efx, + struct efx_mcdi_phy_data *cfg) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0); + BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name)); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) { + rc = -EIO; + goto fail; + } + + cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS); + cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE); + cfg->supported_cap = + MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP); + cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL); + cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT); + cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK); + memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME), + sizeof(cfg->name)); + cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE); + cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK); + memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION), + sizeof(cfg->revision)); + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +void efx_siena_link_set_advertising(struct efx_nic *efx, + const unsigned long *advertising) +{ + memcpy(efx->link_advertising, advertising, + sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK())); + + efx->link_advertising[0] |= ADVERTISED_Autoneg; + if (advertising[0] & ADVERTISED_Pause) + efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); + else + efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); + if (advertising[0] & ADVERTISED_Asym_Pause) + efx->wanted_fc ^= EFX_FC_TX; +} + +static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, + u32 flags, u32 loopback_mode, u32 loopback_speed) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN); + + BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed); + + return efx_siena_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN); + size_t outlen; + int rc; + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) { + rc = -EIO; + goto fail; + } + + *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED); + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset) +{ + #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \ + linkset) + + bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS); + switch (media) { + case MC_CMD_MEDIA_KX4: + SET_BIT(Backplane); + if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) + SET_BIT(1000baseKX_Full); + if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) + SET_BIT(10000baseKX4_Full); + if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) + SET_BIT(40000baseKR4_Full); + break; + + case MC_CMD_MEDIA_XFP: + case MC_CMD_MEDIA_SFP_PLUS: + case MC_CMD_MEDIA_QSFP_PLUS: + SET_BIT(FIBRE); + if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) { + SET_BIT(1000baseT_Full); + SET_BIT(1000baseX_Full); + } + if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) { + SET_BIT(10000baseCR_Full); + SET_BIT(10000baseLR_Full); + SET_BIT(10000baseSR_Full); + } + if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) { + SET_BIT(40000baseCR4_Full); + SET_BIT(40000baseSR4_Full); + } + if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) { + SET_BIT(100000baseCR4_Full); + SET_BIT(100000baseSR4_Full); + } + if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) { + SET_BIT(25000baseCR_Full); + SET_BIT(25000baseSR_Full); + } + if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN)) + SET_BIT(50000baseCR2_Full); + break; + + case MC_CMD_MEDIA_BASE_T: + SET_BIT(TP); + if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) + SET_BIT(10baseT_Half); + if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) + SET_BIT(10baseT_Full); + if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) + SET_BIT(100baseT_Half); + if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) + SET_BIT(100baseT_Full); + if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) + SET_BIT(1000baseT_Half); + if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) + SET_BIT(1000baseT_Full); + if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) + SET_BIT(10000baseT_Full); + break; + } + + if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) + SET_BIT(Pause); + if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) + SET_BIT(Asym_Pause); + if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + SET_BIT(Autoneg); + + #undef SET_BIT +} + +static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset) +{ + u32 result = 0; + + #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \ + linkset) + + if (TEST_BIT(10baseT_Half)) + result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN); + if (TEST_BIT(10baseT_Full)) + result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN); + if (TEST_BIT(100baseT_Half)) + result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN); + if (TEST_BIT(100baseT_Full)) + result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN); + if (TEST_BIT(1000baseT_Half)) + result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN); + if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full) || + TEST_BIT(1000baseX_Full)) + result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN); + if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full) || + TEST_BIT(10000baseCR_Full) || TEST_BIT(10000baseLR_Full) || + TEST_BIT(10000baseSR_Full)) + result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN); + if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full) || + TEST_BIT(40000baseSR4_Full)) + result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN); + if (TEST_BIT(100000baseCR4_Full) || TEST_BIT(100000baseSR4_Full)) + result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN); + if (TEST_BIT(25000baseCR_Full) || TEST_BIT(25000baseSR_Full)) + result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN); + if (TEST_BIT(50000baseCR2_Full)) + result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN); + if (TEST_BIT(Pause)) + result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN); + if (TEST_BIT(Asym_Pause)) + result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN); + if (TEST_BIT(Autoneg)) + result |= (1 << MC_CMD_PHY_CAP_AN_LBN); + + #undef TEST_BIT + + return result; +} + +static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + enum efx_phy_mode mode, supported; + u32 flags; + + /* TODO: Advertise the capabilities supported by this PHY */ + supported = 0; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN)) + supported |= PHY_MODE_TX_DISABLED; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN)) + supported |= PHY_MODE_LOW_POWER; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN)) + supported |= PHY_MODE_OFF; + + mode = efx->phy_mode & supported; + + flags = 0; + if (mode & PHY_MODE_TX_DISABLED) + flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN); + if (mode & PHY_MODE_LOW_POWER) + flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN); + if (mode & PHY_MODE_OFF) + flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN); + + return flags; +} + +static u8 mcdi_to_ethtool_media(u32 media) +{ + switch (media) { + case MC_CMD_MEDIA_XAUI: + case MC_CMD_MEDIA_CX4: + case MC_CMD_MEDIA_KX4: + return PORT_OTHER; + + case MC_CMD_MEDIA_XFP: + case MC_CMD_MEDIA_SFP_PLUS: + case MC_CMD_MEDIA_QSFP_PLUS: + return PORT_FIBRE; + + case MC_CMD_MEDIA_BASE_T: + return PORT_TP; + + default: + return PORT_OTHER; + } +} + +static void efx_mcdi_phy_decode_link(struct efx_nic *efx, + struct efx_link_state *link_state, + u32 speed, u32 flags, u32 fcntl) +{ + switch (fcntl) { + case MC_CMD_FCNTL_AUTO: + WARN_ON(1); /* This is not a link mode */ + link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX; + break; + case MC_CMD_FCNTL_BIDIR: + link_state->fc = EFX_FC_TX | EFX_FC_RX; + break; + case MC_CMD_FCNTL_RESPOND: + link_state->fc = EFX_FC_RX; + break; + default: + WARN_ON(1); + fallthrough; + case MC_CMD_FCNTL_OFF: + link_state->fc = 0; + break; + } + + link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN)); + link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN)); + link_state->speed = speed; +} + +/* The semantics of the ethtool FEC mode bitmask are not well defined, + * particularly the meaning of combinations of bits. Which means we get to + * define our own semantics, as follows: + * OFF overrides any other bits, and means "disable all FEC" (with the + * exception of 25G KR4/CR4, where it is not possible to reject it if AN + * partner requests it). + * AUTO on its own means use cable requirements and link partner autoneg with + * fw-default preferences for the cable type. + * AUTO and either RS or BASER means use the specified FEC type if cable and + * link partner support it, otherwise autoneg/fw-default. + * RS or BASER alone means use the specified FEC type if cable and link partner + * support it and either requests it, otherwise no FEC. + * Both RS and BASER (whether AUTO or not) means use FEC if cable and link + * partner support it, preferring RS to BASER. + */ +static u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap) +{ + u32 ret = 0; + + if (ethtool_cap & ETHTOOL_FEC_OFF) + return 0; + + if (ethtool_cap & ETHTOOL_FEC_AUTO) + ret |= ((1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_RS_FEC_LBN)) & supported_cap; + if (ethtool_cap & ETHTOOL_FEC_RS && + supported_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN)) + ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN); + if (ethtool_cap & ETHTOOL_FEC_BASER) { + if (supported_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN)) + ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN); + if (supported_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)) + ret |= (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN); + } + return ret; +} + +/* Invert ethtool_fec_caps_to_mcdi. There are two combinations that function + * can never produce, (baser xor rs) and neither req; the implementation below + * maps both of those to AUTO. This should never matter, and it's not clear + * what a better mapping would be anyway. + */ +static u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g) +{ + bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN), + rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN), + baser = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) + : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN), + baser_req = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN) + : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN); + + if (!baser && !rs) + return ETHTOOL_FEC_OFF; + return (rs_req ? ETHTOOL_FEC_RS : 0) | + (baser_req ? ETHTOOL_FEC_BASER : 0) | + (baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO); +} + +/* Verify that the forced flow control settings (!EFX_FC_AUTO) are + * supported by the link partner. Warn the user if this isn't the case + */ +static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 rmtadv; + + /* The link partner capabilities are only relevant if the + * link supports flow control autonegotiation + */ + if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + return; + + /* If flow control autoneg is supported and enabled, then fine */ + if (efx->wanted_fc & EFX_FC_AUTO) + return; + + rmtadv = 0; + if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) + rmtadv |= ADVERTISED_Pause; + if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) + rmtadv |= ADVERTISED_Asym_Pause; + + if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause) + netif_err(efx, link, efx->net_dev, + "warning: link partner doesn't support pause frames"); +} + +bool efx_siena_mcdi_phy_poll(struct efx_nic *efx) +{ + struct efx_link_state old_state = efx->link_state; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + int rc; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + efx->link_state.up = false; + else + efx_mcdi_phy_decode_link( + efx, &efx->link_state, + MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), + MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), + MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); + + return !efx_link_state_equal(&efx->link_state, &old_state); +} + +int efx_siena_mcdi_phy_probe(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + u32 caps; + int rc; + + /* Initialise and populate phy_data */ + phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); + if (phy_data == NULL) + return -ENOMEM; + + rc = efx_mcdi_get_phy_cfg(efx, phy_data); + if (rc != 0) + goto fail; + + /* Read initial link advertisement */ + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + goto fail; + + /* Fill out nic state */ + efx->phy_data = phy_data; + efx->phy_type = phy_data->type; + + efx->mdio_bus = phy_data->channel; + efx->mdio.prtad = phy_data->port; + efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); + efx->mdio.mode_support = 0; + if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) + efx->mdio.mode_support |= MDIO_SUPPORTS_C22; + if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) + efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + + caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); + if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) + mcdi_to_ethtool_linkset(phy_data->media, caps, + efx->link_advertising); + else + phy_data->forced_cap = caps; + + /* Assert that we can map efx -> mcdi loopback modes */ + BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE); + BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA); + BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC); + BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII); + BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS); + BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI); + BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII); + BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII); + BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR); + BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI); + BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR); + BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR); + BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR); + BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR); + BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY); + BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS); + BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS); + BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD); + BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT); + BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS); + BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS); + BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR); + BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR); + BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS); + BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS); + BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR); + BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS); + + rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes); + if (rc != 0) + goto fail; + /* The MC indicates that LOOPBACK_NONE is a valid loopback mode, + * but by convention we don't + */ + efx->loopback_modes &= ~(1 << LOOPBACK_NONE); + + /* Set the initial link mode */ + efx_mcdi_phy_decode_link(efx, &efx->link_state, + MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), + MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), + MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); + + /* Record the initial FEC configuration (or nearest approximation + * representable in the ethtool configuration space) + */ + efx->fec_config = mcdi_fec_caps_to_ethtool(caps, + efx->link_state.speed == 25000 || + efx->link_state.speed == 50000); + + /* Default to Autonegotiated flow control if the PHY supports it */ + efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; + if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + efx->wanted_fc |= EFX_FC_AUTO; + efx_siena_link_set_wanted_fc(efx, efx->wanted_fc); + + return 0; + +fail: + kfree(phy_data); + return rc; +} + +void efx_siena_mcdi_phy_remove(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + efx->phy_data = NULL; + kfree(phy_data); +} + +void efx_siena_mcdi_phy_get_link_ksettings(struct efx_nic *efx, + struct ethtool_link_ksettings *cmd) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + int rc; + + cmd->base.speed = efx->link_state.speed; + cmd->base.duplex = efx->link_state.fd; + cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media); + cmd->base.phy_address = phy_cfg->port; + cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg); + cmd->base.mdio_support = (efx->mdio.mode_support & + (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22)); + + mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap, + cmd->link_modes.supported); + memcpy(cmd->link_modes.advertising, efx->link_advertising, + sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK())); + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + return; + mcdi_to_ethtool_linkset(phy_cfg->media, + MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP), + cmd->link_modes.lp_advertising); +} + +int +efx_siena_mcdi_phy_set_link_ksettings(struct efx_nic *efx, + const struct ethtool_link_ksettings *cmd) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 caps; + int rc; + + if (cmd->base.autoneg) { + caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) | + 1 << MC_CMD_PHY_CAP_AN_LBN); + } else if (cmd->base.duplex) { + switch (cmd->base.speed) { + case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; + case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; + case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; + case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; + case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break; + case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break; + case 25000: caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN; break; + case 50000: caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN; break; + default: return -EINVAL; + } + } else { + switch (cmd->base.speed) { + case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; + case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; + case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; + default: return -EINVAL; + } + } + + caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, efx->fec_config); + + rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, 0); + if (rc) + return rc; + + if (cmd->base.autoneg) { + efx_siena_link_set_advertising(efx, cmd->link_modes.advertising); + phy_cfg->forced_cap = 0; + } else { + efx_siena_link_clear_advertising(efx); + phy_cfg->forced_cap = caps; + } + return 0; +} + +int efx_siena_mcdi_phy_get_fecparam(struct efx_nic *efx, + struct ethtool_fecparam *fec) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN); + u32 caps, active, speed; /* MCDI format */ + bool is_25g = false; + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN) + return -EOPNOTSUPP; + + /* behaviour for 25G/50G links depends on 25G BASER bit */ + speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED); + is_25g = speed == 25000 || speed == 50000; + + caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP); + fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g); + /* BASER is never supported on 100G */ + if (speed == 100000) + fec->fec &= ~ETHTOOL_FEC_BASER; + + active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE); + switch (active) { + case MC_CMD_FEC_NONE: + fec->active_fec = ETHTOOL_FEC_OFF; + break; + case MC_CMD_FEC_BASER: + fec->active_fec = ETHTOOL_FEC_BASER; + break; + case MC_CMD_FEC_RS: + fec->active_fec = ETHTOOL_FEC_RS; + break; + default: + netif_warn(efx, hw, efx->net_dev, + "Firmware reports unrecognised FEC_TYPE %u\n", + active); + /* We don't know what firmware has picked. AUTO is as good a + * "can't happen" value as any other. + */ + fec->active_fec = ETHTOOL_FEC_AUTO; + break; + } + + return 0; +} + +/* Basic validation to ensure that the caps we are going to attempt to set are + * in fact supported by the adapter. Note that 'no FEC' is always supported. + */ +static int ethtool_fec_supported(u32 supported_cap, u32 ethtool_cap) +{ + if (ethtool_cap & ETHTOOL_FEC_OFF) + return 0; + + if (ethtool_cap && + !ethtool_fec_caps_to_mcdi(supported_cap, ethtool_cap)) + return -EINVAL; + return 0; +} + +int efx_siena_mcdi_phy_set_fecparam(struct efx_nic *efx, + const struct ethtool_fecparam *fec) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 caps; + int rc; + + rc = ethtool_fec_supported(phy_cfg->supported_cap, fec->fec); + if (rc) + return rc; + + /* Work out what efx_siena_mcdi_phy_set_link_ksettings() would produce from + * saved advertising bits + */ + if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, efx->link_advertising)) + caps = (ethtool_linkset_to_mcdi_cap(efx->link_advertising) | + 1 << MC_CMD_PHY_CAP_AN_LBN); + else + caps = phy_cfg->forced_cap; + + caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, fec->fec); + rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, 0); + if (rc) + return rc; + + /* Record the new FEC setting for subsequent set_link calls */ + efx->fec_config = fec->fec; + return 0; +} + +int efx_siena_mcdi_phy_test_alive(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN) + return -EIO; + if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK) + return -EINVAL; + + return 0; +} + +int efx_siena_mcdi_port_reconfigure(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 caps = (efx->link_advertising[0] ? + ethtool_linkset_to_mcdi_cap(efx->link_advertising) : + phy_cfg->forced_cap); + + caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, efx->fec_config); + + return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, 0); +} + +static const char *const mcdi_sft9001_cable_diag_names[] = { + "cable.pairA.length", + "cable.pairB.length", + "cable.pairC.length", + "cable.pairD.length", + "cable.pairA.status", + "cable.pairB.status", + "cable.pairC.status", + "cable.pairD.status", +}; + +static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode, + int *results) +{ + unsigned int retry, i, count = 0; + size_t outlen; + u32 status; + MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN); + u8 *ptr; + int rc; + + BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, + MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL); + if (rc) + goto out; + + /* Wait up to 10s for BIST to finish */ + for (retry = 0; retry < 100; ++retry) { + BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto out; + + status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); + if (status != MC_CMD_POLL_BIST_RUNNING) + goto finished; + + msleep(100); + } + + rc = -ETIMEDOUT; + goto out; + +finished: + results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1; + + /* SFT9001 specific cable diagnostics output */ + if (efx->phy_type == PHY_TYPE_SFT9001B && + (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT || + bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) { + ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); + if (status == MC_CMD_POLL_BIST_PASSED && + outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) { + for (i = 0; i < 8; i++) { + results[count + i] = + EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i], + EFX_DWORD_0); + } + } + count += 8; + } + rc = count; + +out: + return rc; +} + +int efx_siena_mcdi_phy_run_tests(struct efx_nic *efx, int *results, + unsigned int flags) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 mode; + int rc; + + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) { + rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results); + if (rc < 0) + return rc; + + results += rc; + } + + /* If we support both LONG and SHORT, then run each in response to + * break or not. Otherwise, run the one we support + */ + mode = 0; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) { + if ((flags & ETH_TEST_FL_OFFLINE) && + (phy_cfg->flags & + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) + mode = MC_CMD_PHY_BIST_CABLE_LONG; + else + mode = MC_CMD_PHY_BIST_CABLE_SHORT; + } else if (phy_cfg->flags & + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)) + mode = MC_CMD_PHY_BIST_CABLE_LONG; + + if (mode != 0) { + rc = efx_mcdi_bist(efx, mode, results); + if (rc < 0) + return rc; + results += rc; + } + + return 0; +} + +const char *efx_siena_mcdi_phy_test_name(struct efx_nic *efx, + unsigned int index) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) { + if (index == 0) + return "bist"; + --index; + } + + if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) | + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) { + if (index == 0) + return "cable"; + --index; + + if (efx->phy_type == PHY_TYPE_SFT9001B) { + if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names)) + return mcdi_sft9001_cable_diag_names[index]; + index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names); + } + } + + return NULL; +} + +#define SFP_PAGE_SIZE 128 +#define SFF_DIAG_TYPE_OFFSET 92 +#define SFF_DIAG_ADDR_CHANGE BIT(2) +#define SFF_8079_NUM_PAGES 2 +#define SFF_8472_NUM_PAGES 4 +#define SFF_8436_NUM_PAGES 5 +#define SFF_DMT_LEVEL_OFFSET 94 + +/** efx_mcdi_phy_get_module_eeprom_page() - Get a single page of module eeprom + * @efx: NIC context + * @page: EEPROM page number + * @data: Destination data pointer + * @offset: Offset in page to copy from in to data + * @space: Space available in data + * + * Return: + * >=0 - amount of data copied + * <0 - error + */ +static int efx_mcdi_phy_get_module_eeprom_page(struct efx_nic *efx, + unsigned int page, + u8 *data, ssize_t offset, + ssize_t space) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX); + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN); + unsigned int payload_len; + unsigned int to_copy; + size_t outlen; + int rc; + + if (offset > SFP_PAGE_SIZE) + return -EINVAL; + + to_copy = min(space, SFP_PAGE_SIZE - offset); + + MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_GET_PHY_MEDIA_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); + + if (rc) + return rc; + + if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST + + SFP_PAGE_SIZE)) + return -EIO; + + payload_len = MCDI_DWORD(outbuf, GET_PHY_MEDIA_INFO_OUT_DATALEN); + if (payload_len != SFP_PAGE_SIZE) + return -EIO; + + memcpy(data, MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + offset, + to_copy); + + return to_copy; +} + +static int efx_mcdi_phy_get_module_eeprom_byte(struct efx_nic *efx, + unsigned int page, + u8 byte) +{ + u8 data; + int rc; + + rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, &data, byte, 1); + if (rc == 1) + return data; + + return rc; +} + +static int efx_mcdi_phy_diag_type(struct efx_nic *efx) +{ + /* Page zero of the EEPROM includes the diagnostic type at byte 92. */ + return efx_mcdi_phy_get_module_eeprom_byte(efx, 0, + SFF_DIAG_TYPE_OFFSET); +} + +static int efx_mcdi_phy_sff_8472_level(struct efx_nic *efx) +{ + /* Page zero of the EEPROM includes the DMT level at byte 94. */ + return efx_mcdi_phy_get_module_eeprom_byte(efx, 0, + SFF_DMT_LEVEL_OFFSET); +} + +static u32 efx_mcdi_phy_module_type(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + if (phy_data->media != MC_CMD_MEDIA_QSFP_PLUS) + return phy_data->media; + + /* A QSFP+ NIC may actually have an SFP+ module attached. + * The ID is page 0, byte 0. + */ + switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) { + case 0x3: + return MC_CMD_MEDIA_SFP_PLUS; + case 0xc: + case 0xd: + return MC_CMD_MEDIA_QSFP_PLUS; + default: + return 0; + } +} + +int efx_siena_mcdi_phy_get_module_eeprom(struct efx_nic *efx, + struct ethtool_eeprom *ee, u8 *data) +{ + int rc; + ssize_t space_remaining = ee->len; + unsigned int page_off; + bool ignore_missing; + int num_pages; + int page; + + switch (efx_mcdi_phy_module_type(efx)) { + case MC_CMD_MEDIA_SFP_PLUS: + num_pages = efx_mcdi_phy_sff_8472_level(efx) > 0 ? + SFF_8472_NUM_PAGES : SFF_8079_NUM_PAGES; + page = 0; + ignore_missing = false; + break; + case MC_CMD_MEDIA_QSFP_PLUS: + num_pages = SFF_8436_NUM_PAGES; + page = -1; /* We obtain the lower page by asking for -1. */ + ignore_missing = true; /* Ignore missing pages after page 0. */ + break; + default: + return -EOPNOTSUPP; + } + + page_off = ee->offset % SFP_PAGE_SIZE; + page += ee->offset / SFP_PAGE_SIZE; + + while (space_remaining && (page < num_pages)) { + rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, + data, page_off, + space_remaining); + + if (rc > 0) { + space_remaining -= rc; + data += rc; + page_off = 0; + page++; + } else if (rc == 0) { + space_remaining = 0; + } else if (ignore_missing && (page > 0)) { + int intended_size = SFP_PAGE_SIZE - page_off; + + space_remaining -= intended_size; + if (space_remaining < 0) { + space_remaining = 0; + } else { + memset(data, 0, intended_size); + data += intended_size; + page_off = 0; + page++; + rc = 0; + } + } else { + return rc; + } + } + + return 0; +} + +int efx_siena_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo) +{ + int sff_8472_level; + int diag_type; + + switch (efx_mcdi_phy_module_type(efx)) { + case MC_CMD_MEDIA_SFP_PLUS: + sff_8472_level = efx_mcdi_phy_sff_8472_level(efx); + + /* If we can't read the diagnostics level we have none. */ + if (sff_8472_level < 0) + return -EOPNOTSUPP; + + /* Check if this module requires the (unsupported) address + * change operation. + */ + diag_type = efx_mcdi_phy_diag_type(efx); + + if (sff_8472_level == 0 || + (diag_type & SFF_DIAG_ADDR_CHANGE)) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + break; + + case MC_CMD_MEDIA_QSFP_PLUS: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static unsigned int efx_calc_mac_mtu(struct efx_nic *efx) +{ + return EFX_MAX_FRAME_LEN(efx->net_dev->mtu); +} + +int efx_siena_mcdi_set_mac(struct efx_nic *efx) +{ + u32 fcntl; + MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN); + + BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); + + /* This has no effect on EF10 */ + ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR), + efx->net_dev->dev_addr); + + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, efx_calc_mac_mtu(efx)); + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0); + + /* Set simple MAC filter for Siena */ + MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT, + SET_MAC_IN_REJECT_UNCST, efx->unicast_filter); + + MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS, + SET_MAC_IN_FLAG_INCLUDE_FCS, + !!(efx->net_dev->features & NETIF_F_RXFCS)); + + switch (efx->wanted_fc) { + case EFX_FC_RX | EFX_FC_TX: + fcntl = MC_CMD_FCNTL_BIDIR; + break; + case EFX_FC_RX: + fcntl = MC_CMD_FCNTL_RESPOND; + break; + default: + fcntl = MC_CMD_FCNTL_OFF; + break; + } + if (efx->wanted_fc & EFX_FC_AUTO) + fcntl = MC_CMD_FCNTL_AUTO; + if (efx->fc_disable) + fcntl = MC_CMD_FCNTL_OFF; + + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl); + + return efx_siena_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, + sizeof(cmdbytes), NULL, 0, NULL); +} + +enum efx_stats_action { + EFX_STATS_ENABLE, + EFX_STATS_DISABLE, + EFX_STATS_PULL, +}; + +static int efx_mcdi_mac_stats(struct efx_nic *efx, + enum efx_stats_action action, int clear) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); + int rc; + int change = action == EFX_STATS_PULL ? 0 : 1; + int enable = action == EFX_STATS_ENABLE ? 1 : 0; + int period = action == EFX_STATS_ENABLE ? 1000 : 0; + dma_addr_t dma_addr = efx->stats_buffer.dma_addr; + u32 dma_len = action != EFX_STATS_DISABLE ? + efx->num_mac_stats * sizeof(u64) : 0; + + BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0); + + MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr); + MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD, + MAC_STATS_IN_DMA, !!enable, + MAC_STATS_IN_CLEAR, clear, + MAC_STATS_IN_PERIODIC_CHANGE, change, + MAC_STATS_IN_PERIODIC_ENABLE, enable, + MAC_STATS_IN_PERIODIC_CLEAR, 0, + MAC_STATS_IN_PERIODIC_NOEVENT, 1, + MAC_STATS_IN_PERIOD_MS, period); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); + + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, efx->vport_id); + + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, + sizeof(inbuf), NULL, 0, NULL); + /* Expect ENOENT if DMA queues have not been set up */ + if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues))) + efx_siena_mcdi_display_error(efx, MC_CMD_MAC_STATS, + sizeof(inbuf), NULL, 0, rc); + return rc; +} + +void efx_siena_mcdi_mac_start_stats(struct efx_nic *efx) +{ + __le64 *dma_stats = efx->stats_buffer.addr; + + dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; + + efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0); +} + +void efx_siena_mcdi_mac_stop_stats(struct efx_nic *efx) +{ + efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0); +} + +#define EFX_MAC_STATS_WAIT_US 100 +#define EFX_MAC_STATS_WAIT_ATTEMPTS 10 + +void efx_siena_mcdi_mac_pull_stats(struct efx_nic *efx) +{ + __le64 *dma_stats = efx->stats_buffer.addr; + int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS; + + dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; + efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0); + + while (dma_stats[efx->num_mac_stats - 1] == + EFX_MC_STATS_GENERATION_INVALID && + attempts-- != 0) + udelay(EFX_MAC_STATS_WAIT_US); +} + +int efx_siena_mcdi_mac_init_stats(struct efx_nic *efx) +{ + int rc; + + if (!efx->num_mac_stats) + return 0; + + /* Allocate buffer for stats */ + rc = efx_siena_alloc_buffer(efx, &efx->stats_buffer, + efx->num_mac_stats * sizeof(u64), + GFP_KERNEL); + if (rc) { + netif_warn(efx, probe, efx->net_dev, + "failed to allocate DMA buffer: %d\n", rc); + return rc; + } + + netif_dbg(efx, probe, efx->net_dev, + "stats buffer at %llx (virt %p phys %llx)\n", + (u64) efx->stats_buffer.dma_addr, + efx->stats_buffer.addr, + (u64) virt_to_phys(efx->stats_buffer.addr)); + + return 0; +} + +void efx_siena_mcdi_mac_fini_stats(struct efx_nic *efx) +{ + efx_siena_free_buffer(efx, &efx->stats_buffer); +} + +static unsigned int efx_mcdi_event_link_speed[] = { + [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, + [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, + [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, + [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000, + [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000, + [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000, + [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000, +}; + +void efx_siena_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) +{ + u32 flags, fcntl, speed, lpa; + + speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); + EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); + speed = efx_mcdi_event_link_speed[speed]; + + flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); + fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); + lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); + + /* efx->link_state is only modified by efx_mcdi_phy_get_link(), + * which is only run after flushing the event queues. Therefore, it + * is safe to modify the link state outside of the mac_lock here. + */ + efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); + + efx_mcdi_phy_check_fcntl(efx, lpa); + + efx_siena_link_status_changed(efx); +} diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port_common.h b/drivers/net/ethernet/sfc/siena/mcdi_port_common.h new file mode 100644 index 0000000000..7a6de13d9c --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi_port_common.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#ifndef EFX_MCDI_PORT_COMMON_H +#define EFX_MCDI_PORT_COMMON_H + +#include "net_driver.h" +#include "mcdi.h" +#include "mcdi_pcol.h" + +struct efx_mcdi_phy_data { + u32 flags; + u32 type; + u32 supported_cap; + u32 channel; + u32 port; + u32 stats_mask; + u8 name[20]; + u32 media; + u32 mmd_mask; + u8 revision[20]; + u32 forced_cap; +}; + +void efx_siena_link_set_advertising(struct efx_nic *efx, + const unsigned long *advertising); +bool efx_siena_mcdi_phy_poll(struct efx_nic *efx); +int efx_siena_mcdi_phy_probe(struct efx_nic *efx); +void efx_siena_mcdi_phy_remove(struct efx_nic *efx); +void efx_siena_mcdi_phy_get_link_ksettings(struct efx_nic *efx, + struct ethtool_link_ksettings *cmd); +int efx_siena_mcdi_phy_set_link_ksettings(struct efx_nic *efx, + const struct ethtool_link_ksettings *cmd); +int efx_siena_mcdi_phy_get_fecparam(struct efx_nic *efx, + struct ethtool_fecparam *fec); +int efx_siena_mcdi_phy_set_fecparam(struct efx_nic *efx, + const struct ethtool_fecparam *fec); +int efx_siena_mcdi_phy_test_alive(struct efx_nic *efx); +int efx_siena_mcdi_port_reconfigure(struct efx_nic *efx); +int efx_siena_mcdi_phy_run_tests(struct efx_nic *efx, int *results, + unsigned int flags); +const char *efx_siena_mcdi_phy_test_name(struct efx_nic *efx, + unsigned int index); +int efx_siena_mcdi_phy_get_module_eeprom(struct efx_nic *efx, + struct ethtool_eeprom *ee, u8 *data); +int efx_siena_mcdi_phy_get_module_info(struct efx_nic *efx, + struct ethtool_modinfo *modinfo); +int efx_siena_mcdi_set_mac(struct efx_nic *efx); +int efx_siena_mcdi_mac_init_stats(struct efx_nic *efx); +void efx_siena_mcdi_mac_fini_stats(struct efx_nic *efx); + +#endif diff --git a/drivers/net/ethernet/sfc/siena/mtd.c b/drivers/net/ethernet/sfc/siena/mtd.c new file mode 100644 index 0000000000..12a624247f --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mtd.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include + +#include "net_driver.h" +#include "efx.h" + +#define to_efx_mtd_partition(mtd) \ + container_of(mtd, struct efx_mtd_partition, mtd) + +/* MTD interface */ + +static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) +{ + struct efx_nic *efx = mtd->priv; + + return efx->type->mtd_erase(mtd, erase->addr, erase->len); +} + +static void efx_mtd_sync(struct mtd_info *mtd) +{ + struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + int rc; + + rc = efx->type->mtd_sync(mtd); + if (rc) + pr_err("%s: %s sync failed (%d)\n", + part->name, part->dev_type_name, rc); +} + +static void efx_siena_mtd_remove_partition(struct efx_mtd_partition *part) +{ + int rc; + + for (;;) { + rc = mtd_device_unregister(&part->mtd); + if (rc != -EBUSY) + break; + ssleep(1); + } + WARN_ON(rc); + list_del(&part->node); +} + +int efx_siena_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, + size_t n_parts, size_t sizeof_part) +{ + struct efx_mtd_partition *part; + size_t i; + + for (i = 0; i < n_parts; i++) { + part = (struct efx_mtd_partition *)((char *)parts + + i * sizeof_part); + + part->mtd.writesize = 1; + + if (!(part->mtd.flags & MTD_NO_ERASE)) + part->mtd.flags |= MTD_WRITEABLE; + + part->mtd.owner = THIS_MODULE; + part->mtd.priv = efx; + part->mtd.name = part->name; + part->mtd._erase = efx_mtd_erase; + part->mtd._read = efx->type->mtd_read; + part->mtd._write = efx->type->mtd_write; + part->mtd._sync = efx_mtd_sync; + + efx->type->mtd_rename(part); + + if (mtd_device_register(&part->mtd, NULL, 0)) + goto fail; + + /* Add to list in order - efx_siena_mtd_remove() depends on this */ + list_add_tail(&part->node, &efx->mtd_list); + } + + return 0; + +fail: + while (i--) { + part = (struct efx_mtd_partition *)((char *)parts + + i * sizeof_part); + efx_siena_mtd_remove_partition(part); + } + /* Failure is unlikely here, but probably means we're out of memory */ + return -ENOMEM; +} + +void efx_siena_mtd_remove(struct efx_nic *efx) +{ + struct efx_mtd_partition *parts, *part, *next; + + WARN_ON(efx_dev_registered(efx)); + + if (list_empty(&efx->mtd_list)) + return; + + parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition, + node); + + list_for_each_entry_safe(part, next, &efx->mtd_list, node) + efx_siena_mtd_remove_partition(part); + + kfree(parts); +} + +void efx_siena_mtd_rename(struct efx_nic *efx) +{ + struct efx_mtd_partition *part; + + ASSERT_RTNL(); + + list_for_each_entry(part, &efx->mtd_list, node) + efx->type->mtd_rename(part); +} diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h new file mode 100644 index 0000000000..ff7bbc3259 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/net_driver.h @@ -0,0 +1,1715 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2013 Solarflare Communications Inc. + */ + +/* Common definitions for all Efx net driver code */ + +#ifndef EFX_NET_DRIVER_H +#define EFX_NET_DRIVER_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "enum.h" +#include "bitfield.h" +#include "filter.h" + +/************************************************************************** + * + * Build definitions + * + **************************************************************************/ + +#ifdef DEBUG +#define EFX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x) +#define EFX_WARN_ON_PARANOID(x) WARN_ON(x) +#else +#define EFX_WARN_ON_ONCE_PARANOID(x) do {} while (0) +#define EFX_WARN_ON_PARANOID(x) do {} while (0) +#endif + +/************************************************************************** + * + * Efx data structures + * + **************************************************************************/ + +#define EFX_MAX_CHANNELS 32U +#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS +#define EFX_EXTRA_CHANNEL_IOV 0 +#define EFX_EXTRA_CHANNEL_PTP 1 +#define EFX_MAX_EXTRA_CHANNELS 2U + +/* Checksum generation is a per-queue option in hardware, so each + * queue visible to the networking core is backed by two hardware TX + * queues. */ +#define EFX_MAX_TX_TC 2 +#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS) +#define EFX_TXQ_TYPE_OUTER_CSUM 1 /* Outer checksum offload */ +#define EFX_TXQ_TYPE_INNER_CSUM 2 /* Inner checksum offload */ +#define EFX_TXQ_TYPE_HIGHPRI 4 /* High-priority (for TC) */ +#define EFX_TXQ_TYPES 8 +/* HIGHPRI is Siena-only, and INNER_CSUM is EF10, so no need for both */ +#define EFX_MAX_TXQ_PER_CHANNEL 4 +#define EFX_MAX_TX_QUEUES (EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_CHANNELS) + +/* Maximum possible MTU the driver supports */ +#define EFX_MAX_MTU (9 * 1024) + +/* Minimum MTU, from RFC791 (IP) */ +#define EFX_MIN_MTU 68 + +/* Maximum total header length for TSOv2 */ +#define EFX_TSO2_MAX_HDRLEN 208 + +/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page, + * and should be a multiple of the cache line size. + */ +#define EFX_RX_USR_BUF_SIZE (2048 - 256) + +/* If possible, we should ensure cache line alignment at start and end + * of every buffer. Otherwise, we just need to ensure 4-byte + * alignment of the network header. + */ +#if NET_IP_ALIGN == 0 +#define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES +#else +#define EFX_RX_BUF_ALIGNMENT 4 +#endif + +/* Non-standard XDP_PACKET_HEADROOM and tailroom to satisfy XDP_REDIRECT and + * still fit two standard MTU size packets into a single 4K page. + */ +#define EFX_XDP_HEADROOM 128 +#define EFX_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + +/* Forward declare Precision Time Protocol (PTP) support structure. */ +struct efx_ptp_data; +struct hwtstamp_config; + +struct efx_self_tests; + +/** + * struct efx_buffer - A general-purpose DMA buffer + * @addr: host base address of the buffer + * @dma_addr: DMA base address of the buffer + * @len: Buffer length, in bytes + * + * The NIC uses these buffers for its interrupt status registers and + * MAC stats dumps. + */ +struct efx_buffer { + void *addr; + dma_addr_t dma_addr; + unsigned int len; +}; + +/** + * struct efx_special_buffer - DMA buffer entered into buffer table + * @buf: Standard &struct efx_buffer + * @index: Buffer index within controller;s buffer table + * @entries: Number of buffer table entries + * + * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE. + * Event and descriptor rings are addressed via one or more buffer + * table entries (and so can be physically non-contiguous, although we + * currently do not take advantage of that). On Falcon and Siena we + * have to take care of allocating and initialising the entries + * ourselves. On later hardware this is managed by the firmware and + * @index and @entries are left as 0. + */ +struct efx_special_buffer { + struct efx_buffer buf; + unsigned int index; + unsigned int entries; +}; + +/** + * struct efx_tx_buffer - buffer state for a TX descriptor + * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be + * freed when descriptor completes + * @xdpf: When @flags & %EFX_TX_BUF_XDP, the XDP frame information; its @data + * member is the associated buffer to drop a page reference on. + * @option: When @flags & %EFX_TX_BUF_OPTION, an EF10-specific option + * descriptor. + * @dma_addr: DMA address of the fragment. + * @flags: Flags for allocation and DMA mapping type + * @len: Length of this fragment. + * This field is zero when the queue slot is empty. + * @unmap_len: Length of this fragment to unmap + * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping. + * Only valid if @unmap_len != 0. + */ +struct efx_tx_buffer { + union { + const struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + union { + efx_qword_t option; /* EF10 */ + dma_addr_t dma_addr; + }; + unsigned short flags; + unsigned short len; + unsigned short unmap_len; + unsigned short dma_offset; +}; +#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */ +#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */ +#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */ +#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */ +#define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */ +#define EFX_TX_BUF_TSO_V3 0x40 /* empty buffer for a TSO_V3 descriptor */ + +/** + * struct efx_tx_queue - An Efx TX queue + * + * This is a ring buffer of TX fragments. + * Since the TX completion path always executes on the same + * CPU and the xmit path can operate on different CPUs, + * performance is increased by ensuring that the completion + * path and the xmit path operate on different cache lines. + * This is particularly important if the xmit path is always + * executing on one CPU which is different from the completion + * path. There is also a cache line for members which are + * read but not written on the fast path. + * + * @efx: The associated Efx NIC + * @queue: DMA queue number + * @label: Label for TX completion events. + * Is our index within @channel->tx_queue array. + * @type: configuration type of this TX queue. A bitmask of %EFX_TXQ_TYPE_* flags. + * @tso_version: Version of TSO in use for this queue. + * @tso_encap: Is encapsulated TSO supported? Supported in TSOv2 on 8000 series. + * @channel: The associated channel + * @core_txq: The networking core TX queue structure + * @buffer: The software buffer ring + * @cb_page: Array of pages of copy buffers. Carved up according to + * %EFX_TX_CB_ORDER into %EFX_TX_CB_SIZE-sized chunks. + * @txd: The hardware descriptor ring + * @ptr_mask: The size of the ring minus 1. + * @piobuf: PIO buffer region for this TX queue (shared with its partner). + * @piobuf_offset: Buffer offset to be specified in PIO descriptors + * @initialised: Has hardware queue been initialised? + * @timestamping: Is timestamping enabled for this channel? + * @xdp_tx: Is this an XDP tx queue? + * @read_count: Current read pointer. + * This is the number of buffers that have been removed from both rings. + * @old_write_count: The value of @write_count when last checked. + * This is here for performance reasons. The xmit path will + * only get the up-to-date value of @write_count if this + * variable indicates that the queue is empty. This is to + * avoid cache-line ping-pong between the xmit path and the + * completion path. + * @merge_events: Number of TX merged completion events + * @completed_timestamp_major: Top part of the most recent tx timestamp. + * @completed_timestamp_minor: Low part of the most recent tx timestamp. + * @insert_count: Current insert pointer + * This is the number of buffers that have been added to the + * software ring. + * @write_count: Current write pointer + * This is the number of buffers that have been added to the + * hardware ring. + * @packet_write_count: Completable write pointer + * This is the write pointer of the last packet written. + * Normally this will equal @write_count, but as option descriptors + * don't produce completion events, they won't update this. + * Filled in iff @efx->type->option_descriptors; only used for PIO. + * Thus, this is written and used on EF10, and neither on farch. + * @old_read_count: The value of read_count when last checked. + * This is here for performance reasons. The xmit path will + * only get the up-to-date value of read_count if this + * variable indicates that the queue is full. This is to + * avoid cache-line ping-pong between the xmit path and the + * completion path. + * @tso_bursts: Number of times TSO xmit invoked by kernel + * @tso_long_headers: Number of packets with headers too long for standard + * blocks + * @tso_packets: Number of packets via the TSO xmit path + * @tso_fallbacks: Number of times TSO fallback used + * @pushes: Number of times the TX push feature has been used + * @pio_packets: Number of times the TX PIO feature has been used + * @xmit_pending: Are any packets waiting to be pushed to the NIC + * @cb_packets: Number of times the TX copybreak feature has been used + * @notify_count: Count of notified descriptors to the NIC + * @empty_read_count: If the completion path has seen the queue as empty + * and the transmission path has not yet checked this, the value of + * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. + */ +struct efx_tx_queue { + /* Members which don't change on the fast path */ + struct efx_nic *efx ____cacheline_aligned_in_smp; + unsigned int queue; + unsigned int label; + unsigned int type; + unsigned int tso_version; + bool tso_encap; + struct efx_channel *channel; + struct netdev_queue *core_txq; + struct efx_tx_buffer *buffer; + struct efx_buffer *cb_page; + struct efx_special_buffer txd; + unsigned int ptr_mask; + void __iomem *piobuf; + unsigned int piobuf_offset; + bool initialised; + bool timestamping; + bool xdp_tx; + + /* Members used mainly on the completion path */ + unsigned int read_count ____cacheline_aligned_in_smp; + unsigned int old_write_count; + unsigned int merge_events; + unsigned int bytes_compl; + unsigned int pkts_compl; + u32 completed_timestamp_major; + u32 completed_timestamp_minor; + + /* Members used only on the xmit path */ + unsigned int insert_count ____cacheline_aligned_in_smp; + unsigned int write_count; + unsigned int packet_write_count; + unsigned int old_read_count; + unsigned int tso_bursts; + unsigned int tso_long_headers; + unsigned int tso_packets; + unsigned int tso_fallbacks; + unsigned int pushes; + unsigned int pio_packets; + bool xmit_pending; + unsigned int cb_packets; + unsigned int notify_count; + /* Statistics to supplement MAC stats */ + unsigned long tx_packets; + + /* Members shared between paths and sometimes updated */ + unsigned int empty_read_count ____cacheline_aligned_in_smp; +#define EFX_EMPTY_COUNT_VALID 0x80000000 + atomic_t flush_outstanding; +}; + +#define EFX_TX_CB_ORDER 7 +#define EFX_TX_CB_SIZE (1 << EFX_TX_CB_ORDER) - NET_IP_ALIGN + +/** + * struct efx_rx_buffer - An Efx RX data buffer + * @dma_addr: DMA base address of the buffer + * @page: The associated page buffer. + * Will be %NULL if the buffer slot is currently free. + * @page_offset: If pending: offset in @page of DMA base address. + * If completed: offset in @page of Ethernet header. + * @len: If pending: length for DMA descriptor. + * If completed: received length, excluding hash prefix. + * @flags: Flags for buffer and packet state. These are only set on the + * first buffer of a scattered packet. + */ +struct efx_rx_buffer { + dma_addr_t dma_addr; + struct page *page; + u16 page_offset; + u16 len; + u16 flags; +}; +#define EFX_RX_BUF_LAST_IN_PAGE 0x0001 +#define EFX_RX_PKT_CSUMMED 0x0002 +#define EFX_RX_PKT_DISCARD 0x0004 +#define EFX_RX_PKT_TCP 0x0040 +#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */ +#define EFX_RX_PKT_CSUM_LEVEL 0x0200 + +/** + * struct efx_rx_page_state - Page-based rx buffer state + * + * Inserted at the start of every page allocated for receive buffers. + * Used to facilitate sharing dma mappings between recycled rx buffers + * and those passed up to the kernel. + * + * @dma_addr: The dma address of this page. + */ +struct efx_rx_page_state { + dma_addr_t dma_addr; + + unsigned int __pad[] ____cacheline_aligned; +}; + +/** + * struct efx_rx_queue - An Efx RX queue + * @efx: The associated Efx NIC + * @core_index: Index of network core RX queue. Will be >= 0 iff this + * is associated with a real RX queue. + * @buffer: The software buffer ring + * @rxd: The hardware descriptor ring + * @ptr_mask: The size of the ring minus 1. + * @refill_enabled: Enable refill whenever fill level is low + * @flush_pending: Set when a RX flush is pending. Has the same lifetime as + * @rxq_flush_pending. + * @added_count: Number of buffers added to the receive queue. + * @notified_count: Number of buffers given to NIC (<= @added_count). + * @removed_count: Number of buffers removed from the receive queue. + * @scatter_n: Used by NIC specific receive code. + * @scatter_len: Used by NIC specific receive code. + * @page_ring: The ring to store DMA mapped pages for reuse. + * @page_add: Counter to calculate the write pointer for the recycle ring. + * @page_remove: Counter to calculate the read pointer for the recycle ring. + * @page_recycle_count: The number of pages that have been recycled. + * @page_recycle_failed: The number of pages that couldn't be recycled because + * the kernel still held a reference to them. + * @page_recycle_full: The number of pages that were released because the + * recycle ring was full. + * @page_ptr_mask: The number of pages in the RX recycle ring minus 1. + * @max_fill: RX descriptor maximum fill level (<= ring size) + * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill + * (<= @max_fill) + * @min_fill: RX descriptor minimum non-zero fill level. + * This records the minimum fill level observed when a ring + * refill was triggered. + * @recycle_count: RX buffer recycle counter. + * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). + * @xdp_rxq_info: XDP specific RX queue information. + * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?. + */ +struct efx_rx_queue { + struct efx_nic *efx; + int core_index; + struct efx_rx_buffer *buffer; + struct efx_special_buffer rxd; + unsigned int ptr_mask; + bool refill_enabled; + bool flush_pending; + + unsigned int added_count; + unsigned int notified_count; + unsigned int removed_count; + unsigned int scatter_n; + unsigned int scatter_len; + struct page **page_ring; + unsigned int page_add; + unsigned int page_remove; + unsigned int page_recycle_count; + unsigned int page_recycle_failed; + unsigned int page_recycle_full; + unsigned int page_ptr_mask; + unsigned int max_fill; + unsigned int fast_fill_trigger; + unsigned int min_fill; + unsigned int min_overfill; + unsigned int recycle_count; + struct timer_list slow_fill; + unsigned int slow_fill_count; + /* Statistics to supplement MAC stats */ + unsigned long rx_packets; + struct xdp_rxq_info xdp_rxq_info; + bool xdp_rxq_info_valid; +}; + +enum efx_sync_events_state { + SYNC_EVENTS_DISABLED = 0, + SYNC_EVENTS_QUIESCENT, + SYNC_EVENTS_REQUESTED, + SYNC_EVENTS_VALID, +}; + +/** + * struct efx_channel - An Efx channel + * + * A channel comprises an event queue, at least one TX queue, at least + * one RX queue, and an associated tasklet for processing the event + * queue. + * + * @efx: Associated Efx NIC + * @channel: Channel instance number + * @type: Channel type definition + * @eventq_init: Event queue initialised flag + * @enabled: Channel enabled indicator + * @irq: IRQ number (MSI and MSI-X only) + * @irq_moderation_us: IRQ moderation value (in microseconds) + * @napi_dev: Net device used with NAPI + * @napi_str: NAPI control structure + * @state: state for NAPI vs busy polling + * @state_lock: lock protecting @state + * @eventq: Event queue buffer + * @eventq_mask: Event queue pointer mask + * @eventq_read_ptr: Event queue read pointer + * @event_test_cpu: Last CPU to handle interrupt or test event for this channel + * @irq_count: Number of IRQs since last adaptive moderation decision + * @irq_mod_score: IRQ moderation score + * @rfs_filter_count: number of accelerated RFS filters currently in place; + * equals the count of @rps_flow_id slots filled + * @rfs_last_expiry: value of jiffies last time some accelerated RFS filters + * were checked for expiry + * @rfs_expire_index: next accelerated RFS filter ID to check for expiry + * @n_rfs_succeeded: number of successful accelerated RFS filter insertions + * @n_rfs_failed: number of failed accelerated RFS filter insertions + * @filter_work: Work item for efx_filter_rfs_expire() + * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, + * indexed by filter ID + * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors + * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors + * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors + * @n_rx_mcast_mismatch: Count of unmatched multicast frames + * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors + * @n_rx_overlength: Count of RX_OVERLENGTH errors + * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun + * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to + * lack of descriptors + * @n_rx_merge_events: Number of RX merged completion events + * @n_rx_merge_packets: Number of RX packets completed by merged events + * @n_rx_xdp_drops: Count of RX packets intentionally dropped due to XDP + * @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors + * @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP + * @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP + * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by + * __efx_siena_rx_packet(), or zero if there is none + * @rx_pkt_index: Ring index of first buffer for next packet to be delivered + * by __efx_siena_rx_packet(), if @rx_pkt_n_frags != 0 + * @rx_list: list of SKBs from current RX, awaiting processing + * @rx_queue: RX queue for this channel + * @tx_queue: TX queues for this channel + * @tx_queue_by_type: pointers into @tx_queue, or %NULL, indexed by txq type + * @sync_events_state: Current state of sync events on this channel + * @sync_timestamp_major: Major part of the last ptp sync event + * @sync_timestamp_minor: Minor part of the last ptp sync event + */ +struct efx_channel { + struct efx_nic *efx; + int channel; + const struct efx_channel_type *type; + bool eventq_init; + bool enabled; + int irq; + unsigned int irq_moderation_us; + struct net_device *napi_dev; + struct napi_struct napi_str; +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned long busy_poll_state; +#endif + struct efx_special_buffer eventq; + unsigned int eventq_mask; + unsigned int eventq_read_ptr; + int event_test_cpu; + + unsigned int irq_count; + unsigned int irq_mod_score; +#ifdef CONFIG_RFS_ACCEL + unsigned int rfs_filter_count; + unsigned int rfs_last_expiry; + unsigned int rfs_expire_index; + unsigned int n_rfs_succeeded; + unsigned int n_rfs_failed; + struct delayed_work filter_work; +#define RPS_FLOW_ID_INVALID 0xFFFFFFFF + u32 *rps_flow_id; +#endif + + unsigned int n_rx_tobe_disc; + unsigned int n_rx_ip_hdr_chksum_err; + unsigned int n_rx_tcp_udp_chksum_err; + unsigned int n_rx_outer_ip_hdr_chksum_err; + unsigned int n_rx_outer_tcp_udp_chksum_err; + unsigned int n_rx_inner_ip_hdr_chksum_err; + unsigned int n_rx_inner_tcp_udp_chksum_err; + unsigned int n_rx_eth_crc_err; + unsigned int n_rx_mcast_mismatch; + unsigned int n_rx_frm_trunc; + unsigned int n_rx_overlength; + unsigned int n_skbuff_leaks; + unsigned int n_rx_nodesc_trunc; + unsigned int n_rx_merge_events; + unsigned int n_rx_merge_packets; + unsigned int n_rx_xdp_drops; + unsigned int n_rx_xdp_bad_drops; + unsigned int n_rx_xdp_tx; + unsigned int n_rx_xdp_redirect; + + unsigned int rx_pkt_n_frags; + unsigned int rx_pkt_index; + + struct list_head *rx_list; + + struct efx_rx_queue rx_queue; + struct efx_tx_queue tx_queue[EFX_MAX_TXQ_PER_CHANNEL]; + struct efx_tx_queue *tx_queue_by_type[EFX_TXQ_TYPES]; + + enum efx_sync_events_state sync_events_state; + u32 sync_timestamp_major; + u32 sync_timestamp_minor; +}; + +/** + * struct efx_msi_context - Context for each MSI + * @efx: The associated NIC + * @index: Index of the channel/IRQ + * @name: Name of the channel/IRQ + * + * Unlike &struct efx_channel, this is never reallocated and is always + * safe for the IRQ handler to access. + */ +struct efx_msi_context { + struct efx_nic *efx; + unsigned int index; + char name[IFNAMSIZ + 6]; +}; + +/** + * struct efx_channel_type - distinguishes traffic and extra channels + * @handle_no_channel: Handle failure to allocate an extra channel + * @pre_probe: Set up extra state prior to initialisation + * @post_remove: Tear down extra state after finalisation, if allocated. + * May be called on channels that have not been probed. + * @get_name: Generate the channel's name (used for its IRQ handler) + * @copy: Copy the channel state prior to reallocation. May be %NULL if + * reallocation is not supported. + * @receive_skb: Handle an skb ready to be passed to netif_receive_skb() + * @want_txqs: Determine whether this channel should have TX queues + * created. If %NULL, TX queues are not created. + * @keep_eventq: Flag for whether event queue should be kept initialised + * while the device is stopped + * @want_pio: Flag for whether PIO buffers should be linked to this + * channel's TX queues. + */ +struct efx_channel_type { + void (*handle_no_channel)(struct efx_nic *); + int (*pre_probe)(struct efx_channel *); + void (*post_remove)(struct efx_channel *); + void (*get_name)(struct efx_channel *, char *buf, size_t len); + struct efx_channel *(*copy)(const struct efx_channel *); + bool (*receive_skb)(struct efx_channel *, struct sk_buff *); + bool (*want_txqs)(struct efx_channel *); + bool keep_eventq; + bool want_pio; +}; + +enum efx_led_mode { + EFX_LED_OFF = 0, + EFX_LED_ON = 1, + EFX_LED_DEFAULT = 2 +}; + +#define STRING_TABLE_LOOKUP(val, member) \ + ((val) < member ## _max) ? member ## _names[val] : "(invalid)" + +extern const char *const efx_siena_loopback_mode_names[]; +extern const unsigned int efx_siena_loopback_mode_max; +#define LOOPBACK_MODE(efx) \ + STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_siena_loopback_mode) + +enum efx_int_mode { + /* Be careful if altering to correct macro below */ + EFX_INT_MODE_MSIX = 0, + EFX_INT_MODE_MSI = 1, + EFX_INT_MODE_LEGACY = 2, + EFX_INT_MODE_MAX /* Insert any new items before this */ +}; +#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) + +enum nic_state { + STATE_UNINIT = 0, /* device being probed/removed or is frozen */ + STATE_READY = 1, /* hardware ready and netdev registered */ + STATE_DISABLED = 2, /* device disabled due to hardware errors */ + STATE_RECOVERY = 3, /* device recovering from PCI error */ +}; + +/* Forward declaration */ +struct efx_nic; + +/* Pseudo bit-mask flow control field */ +#define EFX_FC_RX FLOW_CTRL_RX +#define EFX_FC_TX FLOW_CTRL_TX +#define EFX_FC_AUTO 4 + +/** + * struct efx_link_state - Current state of the link + * @up: Link is up + * @fd: Link is full-duplex + * @fc: Actual flow control flags + * @speed: Link speed (Mbps) + */ +struct efx_link_state { + bool up; + bool fd; + u8 fc; + unsigned int speed; +}; + +static inline bool efx_link_state_equal(const struct efx_link_state *left, + const struct efx_link_state *right) +{ + return left->up == right->up && left->fd == right->fd && + left->fc == right->fc && left->speed == right->speed; +} + +/** + * enum efx_phy_mode - PHY operating mode flags + * @PHY_MODE_NORMAL: on and should pass traffic + * @PHY_MODE_TX_DISABLED: on with TX disabled + * @PHY_MODE_LOW_POWER: set to low power through MDIO + * @PHY_MODE_OFF: switched off through external control + * @PHY_MODE_SPECIAL: on but will not pass traffic + */ +enum efx_phy_mode { + PHY_MODE_NORMAL = 0, + PHY_MODE_TX_DISABLED = 1, + PHY_MODE_LOW_POWER = 2, + PHY_MODE_OFF = 4, + PHY_MODE_SPECIAL = 8, +}; + +static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode) +{ + return !!(mode & ~PHY_MODE_TX_DISABLED); +} + +/** + * struct efx_hw_stat_desc - Description of a hardware statistic + * @name: Name of the statistic as visible through ethtool, or %NULL if + * it should not be exposed + * @dma_width: Width in bits (0 for non-DMA statistics) + * @offset: Offset within stats (ignored for non-DMA statistics) + */ +struct efx_hw_stat_desc { + const char *name; + u16 dma_width; + u16 offset; +}; + +/* Number of bits used in a multicast filter hash address */ +#define EFX_MCAST_HASH_BITS 8 + +/* Number of (single-bit) entries in a multicast filter hash */ +#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS) + +/* An Efx multicast filter hash */ +union efx_multicast_hash { + u8 byte[EFX_MCAST_HASH_ENTRIES / 8]; + efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; +}; + +struct vfdi_status; + +/* The reserved RSS context value */ +#define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff +/** + * struct efx_rss_context - A user-defined RSS context for filtering + * @list: node of linked list on which this struct is stored + * @context_id: the RSS_CONTEXT_ID returned by MC firmware, or + * %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC. + * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID. + * @user_id: the rss_context ID exposed to userspace over ethtool. + * @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled + * @rx_hash_key: Toeplitz hash key for this RSS context + * @indir_table: Indirection table for this RSS context + */ +struct efx_rss_context { + struct list_head list; + u32 context_id; + u32 user_id; + bool rx_hash_udp_4tuple; + u8 rx_hash_key[40]; + u32 rx_indir_table[128]; +}; + +#ifdef CONFIG_RFS_ACCEL +/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING + * is used to test if filter does or will exist. + */ +#define EFX_ARFS_FILTER_ID_PENDING -1 +#define EFX_ARFS_FILTER_ID_ERROR -2 +#define EFX_ARFS_FILTER_ID_REMOVING -3 +/** + * struct efx_arfs_rule - record of an ARFS filter and its IDs + * @node: linkage into hash table + * @spec: details of the filter (used as key for hash table). Use efx->type to + * determine which member to use. + * @rxq_index: channel to which the filter will steer traffic. + * @arfs_id: filter ID which was returned to ARFS + * @filter_id: index in software filter table. May be + * %EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet, + * %EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or + * %EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter. + */ +struct efx_arfs_rule { + struct hlist_node node; + struct efx_filter_spec spec; + u16 rxq_index; + u16 arfs_id; + s32 filter_id; +}; + +/* Size chosen so that the table is one page (4kB) */ +#define EFX_ARFS_HASH_TABLE_SIZE 512 + +/** + * struct efx_async_filter_insertion - Request to asynchronously insert a filter + * @net_dev: Reference to the netdevice + * @spec: The filter to insert + * @work: Workitem for this request + * @rxq_index: Identifies the channel for which this request was made + * @flow_id: Identifies the kernel-side flow for which this request was made + */ +struct efx_async_filter_insertion { + struct net_device *net_dev; + struct efx_filter_spec spec; + struct work_struct work; + u16 rxq_index; + u32 flow_id; +}; + +/* Maximum number of ARFS workitems that may be in flight on an efx_nic */ +#define EFX_RPS_MAX_IN_FLIGHT 8 +#endif /* CONFIG_RFS_ACCEL */ + +enum efx_xdp_tx_queues_mode { + EFX_XDP_TX_QUEUES_DEDICATED, /* one queue per core, locking not needed */ + EFX_XDP_TX_QUEUES_SHARED, /* each queue used by more than 1 core */ + EFX_XDP_TX_QUEUES_BORROWED /* queues borrowed from net stack */ +}; + +/** + * struct efx_nic - an Efx NIC + * @name: Device name (net device name or bus id before net device registered) + * @pci_dev: The PCI device + * @node: List node for maintaning primary/secondary function lists + * @primary: &struct efx_nic instance for the primary function of this + * controller. May be the same structure, and may be %NULL if no + * primary function is bound. Serialised by rtnl_lock. + * @secondary_list: List of &struct efx_nic instances for the secondary PCI + * functions of the controller, if this is for the primary function. + * Serialised by rtnl_lock. + * @type: Controller type attributes + * @legacy_irq: IRQ number + * @workqueue: Workqueue for port reconfigures and the HW monitor. + * Work items do not hold and must not acquire RTNL. + * @workqueue_name: Name of workqueue + * @reset_work: Scheduled reset workitem + * @membase_phys: Memory BAR value as physical address + * @membase: Memory BAR value + * @vi_stride: step between per-VI registers / memory regions + * @interrupt_mode: Interrupt mode + * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds + * @timer_max_ns: Interrupt timer maximum value, in nanoseconds + * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues + * @irqs_hooked: Channel interrupts are hooked + * @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues + * @irq_rx_moderation_us: IRQ moderation time for RX event queues + * @msg_enable: Log message enable flags + * @state: Device state number (%STATE_*). Serialised by the rtnl_lock. + * @reset_pending: Bitmask for pending resets + * @tx_queue: TX DMA queues + * @rx_queue: RX DMA queues + * @channel: Channels + * @msi_context: Context for each MSI + * @extra_channel_types: Types of extra (non-traffic) channels that + * should be allocated for this NIC + * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues. + * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit. + * @xdp_txq_queues_mode: XDP TX queues sharing strategy. + * @rxq_entries: Size of receive queues requested by user. + * @txq_entries: Size of transmit queues requested by user. + * @txq_stop_thresh: TX queue fill level at or above which we stop it. + * @txq_wake_thresh: TX queue fill level at or below which we wake it. + * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches + * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches + * @sram_lim_qw: Qword address limit of SRAM + * @next_buffer_table: First available buffer table id + * @n_channels: Number of channels in use + * @n_rx_channels: Number of channels used for RX (= number of RX queues) + * @n_tx_channels: Number of channels used for TX + * @n_extra_tx_channels: Number of extra channels with TX queues + * @tx_queues_per_channel: number of TX queues probed on each channel + * @n_xdp_channels: Number of channels used for XDP TX + * @xdp_channel_offset: Offset of zeroth channel used for XPD TX. + * @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel. + * @rx_ip_align: RX DMA address offset to have IP header aligned in + * accordance with NET_IP_ALIGN + * @rx_dma_len: Current maximum RX DMA length + * @rx_buffer_order: Order (log2) of number of pages for each RX buffer + * @rx_buffer_truesize: Amortised allocation size of an RX buffer, + * for use in sk_buff::truesize + * @rx_prefix_size: Size of RX prefix before packet data + * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data + * (valid only if @rx_prefix_size != 0; always negative) + * @rx_packet_len_offset: Offset of RX packet length from start of packet data + * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative) + * @rx_packet_ts_offset: Offset of timestamp from start of packet data + * (valid only if channel->sync_timestamps_enabled; always negative) + * @rx_scatter: Scatter mode enabled for receives + * @rss_context: Main RSS context. Its @list member is the head of the list of + * RSS contexts created by user requests + * @rss_lock: Protects custom RSS context software state in @rss_context.list + * @vport_id: The function's vport ID, only relevant for PFs + * @int_error_count: Number of internal errors seen recently + * @int_error_expire: Time at which error count will be expired + * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot + * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will + * acknowledge but do nothing else. + * @irq_status: Interrupt status buffer + * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 + * @irq_level: IRQ level/index for IRQs not triggered by an event queue + * @selftest_work: Work item for asynchronous self-test + * @mtd_list: List of MTDs attached to the NIC + * @nic_data: Hardware dependent state + * @mcdi: Management-Controller-to-Driver Interface state + * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, + * efx_monitor() and efx_siena_reconfigure_port() + * @port_enabled: Port enabled indicator. + * Serialises efx_siena_stop_all(), efx_siena_start_all(), + * efx_monitor() and efx_mac_work() with kernel interfaces. + * Safe to read under any one of the rtnl_lock, mac_lock, or netif_tx_lock, + * but all three must be held to modify it. + * @port_initialized: Port initialized? + * @net_dev: Operating system network device. Consider holding the rtnl lock + * @fixed_features: Features which cannot be turned off + * @num_mac_stats: Number of MAC stats reported by firmware (MAC_STATS_NUM_STATS + * field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS) + * @stats_buffer: DMA buffer for statistics + * @phy_type: PHY type + * @phy_data: PHY private data (including PHY-specific stats) + * @mdio: PHY MDIO interface + * @mdio_bus: PHY MDIO bus ID (only used by Siena) + * @phy_mode: PHY operating mode. Serialised by @mac_lock. + * @link_advertising: Autonegotiation advertising flags + * @fec_config: Forward Error Correction configuration flags. For bit positions + * see &enum ethtool_fec_config_bits. + * @link_state: Current state of the link + * @n_link_state_changes: Number of times the link has changed state + * @unicast_filter: Flag for Falcon-arch simple unicast filter. + * Protected by @mac_lock. + * @multicast_hash: Multicast hash table for Falcon-arch. + * Protected by @mac_lock. + * @wanted_fc: Wanted flow control flags + * @fc_disable: When non-zero flow control is disabled. Typically used to + * ensure that network back pressure doesn't delay dma queue flushes. + * Serialised by the rtnl lock. + * @mac_work: Work item for changing MAC promiscuity and multicast hash + * @loopback_mode: Loopback status + * @loopback_modes: Supported loopback mode bitmask + * @loopback_selftest: Offline self-test private state + * @xdp_prog: Current XDP programme for this interface + * @filter_sem: Filter table rw_semaphore, protects existence of @filter_state + * @filter_state: Architecture-dependent filter table state + * @rps_mutex: Protects RPS state of all channels + * @rps_slot_map: bitmap of in-flight entries in @rps_slot + * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work() + * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and + * @rps_next_id). + * @rps_hash_table: Mapping between ARFS filters and their various IDs + * @rps_next_id: next arfs_id for an ARFS filter + * @active_queues: Count of RX and TX queues that haven't been flushed and drained. + * @rxq_flush_pending: Count of number of receive queues that need to be flushed. + * Decremented when the efx_flush_rx_queue() is called. + * @rxq_flush_outstanding: Count of number of RX flushes started but not yet + * completed (either success or failure). Not used when MCDI is used to + * flush receive queues. + * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions. + * @vf_count: Number of VFs intended to be enabled. + * @vf_init_count: Number of VFs that have been fully initialised. + * @vi_scale: log2 number of vnics per VF. + * @ptp_data: PTP state data + * @ptp_warned: has this NIC seen and warned about unexpected PTP events? + * @vpd_sn: Serial number read from VPD + * @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their + * xdp_rxq_info structures? + * @netdev_notifier: Netdevice notifier. + * @mem_bar: The BAR that is mapped into membase. + * @reg_base: Offset from the start of the bar to the function control window. + * @monitor_work: Hardware monitor workitem + * @biu_lock: BIU (bus interface unit) lock + * @last_irq_cpu: Last CPU to handle a possible test interrupt. This + * field is used by efx_test_interrupts() to verify that an + * interrupt has occurred. + * @stats_lock: Statistics update lock. Must be held when calling + * efx_nic_type::{update,start,stop}_stats. + * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb + * + * This is stored in the private area of the &struct net_device. + */ +struct efx_nic { + /* The following fields should be written very rarely */ + + char name[IFNAMSIZ]; + struct list_head node; + struct efx_nic *primary; + struct list_head secondary_list; + struct pci_dev *pci_dev; + unsigned int port_num; + const struct efx_nic_type *type; + int legacy_irq; + bool eeh_disabled_legacy_irq; + struct workqueue_struct *workqueue; + char workqueue_name[16]; + struct work_struct reset_work; + resource_size_t membase_phys; + void __iomem *membase; + + unsigned int vi_stride; + + enum efx_int_mode interrupt_mode; + unsigned int timer_quantum_ns; + unsigned int timer_max_ns; + bool irq_rx_adaptive; + bool irqs_hooked; + unsigned int irq_mod_step_us; + unsigned int irq_rx_moderation_us; + u32 msg_enable; + + enum nic_state state; + unsigned long reset_pending; + + struct efx_channel *channel[EFX_MAX_CHANNELS]; + struct efx_msi_context msi_context[EFX_MAX_CHANNELS]; + const struct efx_channel_type * + extra_channel_type[EFX_MAX_EXTRA_CHANNELS]; + + unsigned int xdp_tx_queue_count; + struct efx_tx_queue **xdp_tx_queues; + enum efx_xdp_tx_queues_mode xdp_txq_queues_mode; + + unsigned rxq_entries; + unsigned txq_entries; + unsigned int txq_stop_thresh; + unsigned int txq_wake_thresh; + + unsigned tx_dc_base; + unsigned rx_dc_base; + unsigned sram_lim_qw; + unsigned next_buffer_table; + + unsigned int max_channels; + unsigned int max_vis; + unsigned int max_tx_channels; + unsigned n_channels; + unsigned n_rx_channels; + unsigned rss_spread; + unsigned tx_channel_offset; + unsigned n_tx_channels; + unsigned n_extra_tx_channels; + unsigned int tx_queues_per_channel; + unsigned int n_xdp_channels; + unsigned int xdp_channel_offset; + unsigned int xdp_tx_per_channel; + unsigned int rx_ip_align; + unsigned int rx_dma_len; + unsigned int rx_buffer_order; + unsigned int rx_buffer_truesize; + unsigned int rx_page_buf_step; + unsigned int rx_bufs_per_page; + unsigned int rx_pages_per_batch; + unsigned int rx_prefix_size; + int rx_packet_hash_offset; + int rx_packet_len_offset; + int rx_packet_ts_offset; + bool rx_scatter; + struct efx_rss_context rss_context; + struct mutex rss_lock; + u32 vport_id; + + unsigned int_error_count; + unsigned long int_error_expire; + + bool must_realloc_vis; + bool irq_soft_enabled; + struct efx_buffer irq_status; + unsigned irq_zero_count; + unsigned irq_level; + struct delayed_work selftest_work; + +#ifdef CONFIG_SFC_SIENA_MTD + struct list_head mtd_list; +#endif + + void *nic_data; + struct efx_mcdi_data *mcdi; + + struct mutex mac_lock; + struct work_struct mac_work; + bool port_enabled; + + bool mc_bist_for_other_fn; + bool port_initialized; + struct net_device *net_dev; + + netdev_features_t fixed_features; + + u16 num_mac_stats; + struct efx_buffer stats_buffer; + u64 rx_nodesc_drops_total; + u64 rx_nodesc_drops_while_down; + bool rx_nodesc_drops_prev_state; + + unsigned int phy_type; + void *phy_data; + struct mdio_if_info mdio; + unsigned int mdio_bus; + enum efx_phy_mode phy_mode; + + __ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising); + u32 fec_config; + struct efx_link_state link_state; + unsigned int n_link_state_changes; + + bool unicast_filter; + union efx_multicast_hash multicast_hash; + u8 wanted_fc; + unsigned fc_disable; + + atomic_t rx_reset; + enum efx_loopback_mode loopback_mode; + u64 loopback_modes; + + void *loopback_selftest; + /* We access loopback_selftest immediately before running XDP, + * so we want them next to each other. + */ + struct bpf_prog __rcu *xdp_prog; + + struct rw_semaphore filter_sem; + void *filter_state; +#ifdef CONFIG_RFS_ACCEL + struct mutex rps_mutex; + unsigned long rps_slot_map; + struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT]; + spinlock_t rps_hash_lock; + struct hlist_head *rps_hash_table; + u32 rps_next_id; +#endif + + atomic_t active_queues; + atomic_t rxq_flush_pending; + atomic_t rxq_flush_outstanding; + wait_queue_head_t flush_wq; + +#ifdef CONFIG_SFC_SIENA_SRIOV + unsigned vf_count; + unsigned vf_init_count; + unsigned vi_scale; +#endif + + struct efx_ptp_data *ptp_data; + bool ptp_warned; + + char *vpd_sn; + bool xdp_rxq_info_failed; + + struct notifier_block netdev_notifier; + + unsigned int mem_bar; + u32 reg_base; + + /* The following fields may be written more often */ + + struct delayed_work monitor_work ____cacheline_aligned_in_smp; + spinlock_t biu_lock; + int last_irq_cpu; + spinlock_t stats_lock; + atomic_t n_rx_noskb_drops; +}; + +static inline int efx_dev_registered(struct efx_nic *efx) +{ + return efx->net_dev->reg_state == NETREG_REGISTERED; +} + +static inline unsigned int efx_port_num(struct efx_nic *efx) +{ + return efx->port_num; +} + +struct efx_mtd_partition { + struct list_head node; + struct mtd_info mtd; + const char *dev_type_name; + const char *type_name; + char name[IFNAMSIZ + 20]; +}; + +struct efx_udp_tunnel { +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID 0xffff + u16 type; /* TUNNEL_ENCAP_UDP_PORT_ENTRY_foo, see mcdi_pcol.h */ + __be16 port; +}; + +/** + * struct efx_nic_type - Efx device type definition + * @mem_bar: Get the memory BAR + * @mem_map_size: Get memory BAR mapped size + * @probe: Probe the controller + * @remove: Free resources allocated by probe() + * @init: Initialise the controller + * @dimension_resources: Dimension controller resources (buffer table, + * and VIs once the available interrupt resources are clear) + * @fini: Shut down the controller + * @monitor: Periodic function for polling link state and hardware monitor + * @map_reset_reason: Map ethtool reset reason to a reset method + * @map_reset_flags: Map ethtool reset flags to a reset method, if possible + * @reset: Reset the controller hardware and possibly the PHY. This will + * be called while the controller is uninitialised. + * @probe_port: Probe the MAC and PHY + * @remove_port: Free resources allocated by probe_port() + * @handle_global_event: Handle a "global" event (may be %NULL) + * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues) + * @prepare_flush: Prepare the hardware for flushing the DMA queues + * (for Falcon architecture) + * @finish_flush: Clean up after flushing the DMA queues (for Falcon + * architecture) + * @prepare_flr: Prepare for an FLR + * @finish_flr: Clean up after an FLR + * @describe_stats: Describe statistics for ethtool + * @update_stats: Update statistics not provided by event handling. + * Either argument may be %NULL. + * @update_stats_atomic: Update statistics while in atomic context, if that + * is more limiting than @update_stats. Otherwise, leave %NULL and + * driver core will call @update_stats. + * @start_stats: Start the regular fetching of statistics + * @pull_stats: Pull stats from the NIC and wait until they arrive. + * @stop_stats: Stop the regular fetching of statistics + * @push_irq_moderation: Apply interrupt moderation value + * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY + * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL) + * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings + * to the hardware. Serialised by the mac_lock. + * @check_mac_fault: Check MAC fault state. True if fault present. + * @get_wol: Get WoL configuration from driver state + * @set_wol: Push WoL configuration to the NIC + * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) + * @get_fec_stats: Get standard FEC statistics. + * @test_chip: Test registers. May use efx_farch_test_registers(), and is + * expected to reset the NIC. + * @test_nvram: Test validity of NVRAM contents + * @mcdi_request: Send an MCDI request with the given header and SDU. + * The SDU length may be any value from 0 up to the protocol- + * defined maximum, but its buffer will be padded to a multiple + * of 4 bytes. + * @mcdi_poll_response: Test whether an MCDI response is available. + * @mcdi_read_response: Read the MCDI response PDU. The offset will + * be a multiple of 4. The length may not be, but the buffer + * will be padded so it is safe to round up. + * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so, + * return an appropriate error code for aborting any current + * request; otherwise return 0. + * @irq_enable_master: Enable IRQs on the NIC. Each event queue must + * be separately enabled after this. + * @irq_test_generate: Generate a test IRQ + * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event + * queue must be separately disabled before this. + * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is + * a pointer to the &struct efx_msi_context for the channel. + * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument + * is a pointer to the &struct efx_nic. + * @tx_probe: Allocate resources for TX queue (and select TXQ type) + * @tx_init: Initialise TX queue on the NIC + * @tx_remove: Free resources for TX queue + * @tx_write: Write TX descriptors and doorbell + * @tx_enqueue: Add an SKB to TX queue + * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC + * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC + * @rx_push_rss_context_config: Write RSS hash key and indirection table for + * user RSS context to the NIC + * @rx_pull_rss_context_config: Read RSS hash key and indirection table for user + * RSS context back from the NIC + * @rx_probe: Allocate resources for RX queue + * @rx_init: Initialise RX queue on the NIC + * @rx_remove: Free resources for RX queue + * @rx_write: Write RX descriptors and doorbell + * @rx_defer_refill: Generate a refill reminder event + * @rx_packet: Receive the queued RX buffer on a channel + * @rx_buf_hash_valid: Determine whether the RX prefix contains a valid hash + * @ev_probe: Allocate resources for event queue + * @ev_init: Initialise event queue on the NIC + * @ev_fini: Deinitialise event queue on the NIC + * @ev_remove: Free resources for event queue + * @ev_process: Process events for a queue, up to the given NAPI quota + * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ + * @ev_test_generate: Generate a test event + * @filter_table_probe: Probe filter capabilities and set up filter software state + * @filter_table_restore: Restore filters removed from hardware + * @filter_table_remove: Remove filters from hardware and tear down software state + * @filter_update_rx_scatter: Update filters after change to rx scatter setting + * @filter_insert: add or replace a filter + * @filter_remove_safe: remove a filter by ID, carefully + * @filter_get_safe: retrieve a filter by ID, carefully + * @filter_clear_rx: Remove all RX filters whose priority is less than or + * equal to the given priority and is not %EFX_FILTER_PRI_AUTO + * @filter_count_rx_used: Get the number of filters in use at a given priority + * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1 + * @filter_get_rx_ids: Get list of RX filters at a given priority + * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS. + * This must check whether the specified table entry is used by RFS + * and that rps_may_expire_flow() returns true for it. + * @mtd_probe: Probe and add MTD partitions associated with this net device, + * using efx_siena_mtd_add() + * @mtd_rename: Set an MTD partition name using the net device name + * @mtd_read: Read from an MTD partition + * @mtd_erase: Erase part of an MTD partition + * @mtd_write: Write to an MTD partition + * @mtd_sync: Wait for write-back to complete on MTD partition. This + * also notifies the driver that a writer has finished using this + * partition. + * @ptp_write_host_time: Send host time to MC as part of sync protocol + * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX + * timestamping, possibly only temporarily for the purposes of a reset. + * @ptp_set_ts_config: Set hardware timestamp configuration. The flags + * and tx_type will already have been validated but this operation + * must validate and update rx_filter. + * @get_phys_port_id: Get the underlying physical port id. + * @set_mac_address: Set the MAC address of the device + * @tso_versions: Returns mask of firmware-assisted TSO versions supported. + * If %NULL, then device does not support any TSO version. + * @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required. + * @udp_tnl_has_port: Check if a port has been added as UDP tunnel + * @print_additional_fwver: Dump NIC-specific additional FW version info + * @sensor_event: Handle a sensor event from MCDI + * @rx_recycle_ring_size: Size of the RX recycle ring + * @revision: Hardware architecture revision + * @txd_ptr_tbl_base: TX descriptor ring base address + * @rxd_ptr_tbl_base: RX descriptor ring base address + * @buf_tbl_base: Buffer table base address + * @evq_ptr_tbl_base: Event queue pointer table base address + * @evq_rptr_tbl_base: Event queue read-pointer table base address + * @max_dma_mask: Maximum possible DMA mask + * @rx_prefix_size: Size of RX prefix before packet data + * @rx_hash_offset: Offset of RX flow hash within prefix + * @rx_ts_offset: Offset of timestamp within prefix + * @rx_buffer_padding: Size of padding at end of RX packet + * @can_rx_scatter: NIC is able to scatter packets to multiple buffers + * @always_rx_scatter: NIC will always scatter packets to multiple buffers + * @option_descriptors: NIC supports TX option descriptors + * @min_interrupt_mode: Lowest capability interrupt mode supported + * from &enum efx_int_mode. + * @timer_period_max: Maximum period of interrupt timer (in ticks) + * @offload_features: net_device feature flags for protocol offload + * features implemented in hardware + * @mcdi_max_ver: Maximum MCDI version supported + * @hwtstamp_filters: Mask of hardware timestamp filter types supported + */ +struct efx_nic_type { + bool is_vf; + unsigned int (*mem_bar)(struct efx_nic *efx); + unsigned int (*mem_map_size)(struct efx_nic *efx); + int (*probe)(struct efx_nic *efx); + void (*remove)(struct efx_nic *efx); + int (*init)(struct efx_nic *efx); + int (*dimension_resources)(struct efx_nic *efx); + void (*fini)(struct efx_nic *efx); + void (*monitor)(struct efx_nic *efx); + enum reset_type (*map_reset_reason)(enum reset_type reason); + int (*map_reset_flags)(u32 *flags); + int (*reset)(struct efx_nic *efx, enum reset_type method); + int (*probe_port)(struct efx_nic *efx); + void (*remove_port)(struct efx_nic *efx); + bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); + int (*fini_dmaq)(struct efx_nic *efx); + void (*prepare_flush)(struct efx_nic *efx); + void (*finish_flush)(struct efx_nic *efx); + void (*prepare_flr)(struct efx_nic *efx); + void (*finish_flr)(struct efx_nic *efx); + size_t (*describe_stats)(struct efx_nic *efx, u8 *names); + size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats); + size_t (*update_stats_atomic)(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats); + void (*start_stats)(struct efx_nic *efx); + void (*pull_stats)(struct efx_nic *efx); + void (*stop_stats)(struct efx_nic *efx); + void (*push_irq_moderation)(struct efx_channel *channel); + int (*reconfigure_port)(struct efx_nic *efx); + void (*prepare_enable_fc_tx)(struct efx_nic *efx); + int (*reconfigure_mac)(struct efx_nic *efx, bool mtu_only); + bool (*check_mac_fault)(struct efx_nic *efx); + void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); + int (*set_wol)(struct efx_nic *efx, u32 type); + void (*resume_wol)(struct efx_nic *efx); + void (*get_fec_stats)(struct efx_nic *efx, + struct ethtool_fec_stats *fec_stats); + unsigned int (*check_caps)(const struct efx_nic *efx, + u8 flag, + u32 offset); + int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests); + int (*test_nvram)(struct efx_nic *efx); + void (*mcdi_request)(struct efx_nic *efx, + const efx_dword_t *hdr, size_t hdr_len, + const efx_dword_t *sdu, size_t sdu_len); + bool (*mcdi_poll_response)(struct efx_nic *efx); + void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu, + size_t pdu_offset, size_t pdu_len); + int (*mcdi_poll_reboot)(struct efx_nic *efx); + void (*mcdi_reboot_detected)(struct efx_nic *efx); + void (*irq_enable_master)(struct efx_nic *efx); + int (*irq_test_generate)(struct efx_nic *efx); + void (*irq_disable_non_ev)(struct efx_nic *efx); + irqreturn_t (*irq_handle_msi)(int irq, void *dev_id); + irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id); + int (*tx_probe)(struct efx_tx_queue *tx_queue); + void (*tx_init)(struct efx_tx_queue *tx_queue); + void (*tx_remove)(struct efx_tx_queue *tx_queue); + void (*tx_write)(struct efx_tx_queue *tx_queue); + netdev_tx_t (*tx_enqueue)(struct efx_tx_queue *tx_queue, struct sk_buff *skb); + unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, unsigned int len); + int (*rx_push_rss_config)(struct efx_nic *efx, bool user, + const u32 *rx_indir_table, const u8 *key); + int (*rx_pull_rss_config)(struct efx_nic *efx); + int (*rx_push_rss_context_config)(struct efx_nic *efx, + struct efx_rss_context *ctx, + const u32 *rx_indir_table, + const u8 *key); + int (*rx_pull_rss_context_config)(struct efx_nic *efx, + struct efx_rss_context *ctx); + void (*rx_restore_rss_contexts)(struct efx_nic *efx); + int (*rx_probe)(struct efx_rx_queue *rx_queue); + void (*rx_init)(struct efx_rx_queue *rx_queue); + void (*rx_remove)(struct efx_rx_queue *rx_queue); + void (*rx_write)(struct efx_rx_queue *rx_queue); + void (*rx_defer_refill)(struct efx_rx_queue *rx_queue); + void (*rx_packet)(struct efx_channel *channel); + bool (*rx_buf_hash_valid)(const u8 *prefix); + int (*ev_probe)(struct efx_channel *channel); + int (*ev_init)(struct efx_channel *channel); + void (*ev_fini)(struct efx_channel *channel); + void (*ev_remove)(struct efx_channel *channel); + int (*ev_process)(struct efx_channel *channel, int quota); + void (*ev_read_ack)(struct efx_channel *channel); + void (*ev_test_generate)(struct efx_channel *channel); + int (*filter_table_probe)(struct efx_nic *efx); + void (*filter_table_restore)(struct efx_nic *efx); + void (*filter_table_remove)(struct efx_nic *efx); + void (*filter_update_rx_scatter)(struct efx_nic *efx); + s32 (*filter_insert)(struct efx_nic *efx, + struct efx_filter_spec *spec, bool replace); + int (*filter_remove_safe)(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id); + int (*filter_get_safe)(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *); + int (*filter_clear_rx)(struct efx_nic *efx, + enum efx_filter_priority priority); + u32 (*filter_count_rx_used)(struct efx_nic *efx, + enum efx_filter_priority priority); + u32 (*filter_get_rx_id_limit)(struct efx_nic *efx); + s32 (*filter_get_rx_ids)(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size); +#ifdef CONFIG_RFS_ACCEL + bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id, + unsigned int index); +#endif +#ifdef CONFIG_SFC_SIENA_MTD + int (*mtd_probe)(struct efx_nic *efx); + void (*mtd_rename)(struct efx_mtd_partition *part); + int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, u8 *buffer); + int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len); + int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, const u8 *buffer); + int (*mtd_sync)(struct mtd_info *mtd); +#endif + void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time); + int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp); + int (*ptp_set_ts_config)(struct efx_nic *efx, + struct hwtstamp_config *init); + int (*sriov_configure)(struct efx_nic *efx, int num_vfs); + int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid); + int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid); + int (*get_phys_port_id)(struct efx_nic *efx, + struct netdev_phys_item_id *ppid); + int (*sriov_init)(struct efx_nic *efx); + void (*sriov_fini)(struct efx_nic *efx); + bool (*sriov_wanted)(struct efx_nic *efx); + void (*sriov_reset)(struct efx_nic *efx); + void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i); + int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, const u8 *mac); + int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan, + u8 qos); + int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i, + bool spoofchk); + int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i, + struct ifla_vf_info *ivi); + int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i, + int link_state); + int (*vswitching_probe)(struct efx_nic *efx); + int (*vswitching_restore)(struct efx_nic *efx); + void (*vswitching_remove)(struct efx_nic *efx); + int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr); + int (*set_mac_address)(struct efx_nic *efx); + u32 (*tso_versions)(struct efx_nic *efx); + int (*udp_tnl_push_ports)(struct efx_nic *efx); + bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port); + size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf, + size_t len); + void (*sensor_event)(struct efx_nic *efx, efx_qword_t *ev); + unsigned int (*rx_recycle_ring_size)(const struct efx_nic *efx); + + int revision; + unsigned int txd_ptr_tbl_base; + unsigned int rxd_ptr_tbl_base; + unsigned int buf_tbl_base; + unsigned int evq_ptr_tbl_base; + unsigned int evq_rptr_tbl_base; + u64 max_dma_mask; + unsigned int rx_prefix_size; + unsigned int rx_hash_offset; + unsigned int rx_ts_offset; + unsigned int rx_buffer_padding; + bool can_rx_scatter; + bool always_rx_scatter; + bool option_descriptors; + unsigned int min_interrupt_mode; + unsigned int timer_period_max; + netdev_features_t offload_features; + int mcdi_max_ver; + unsigned int max_rx_ip_filters; + u32 hwtstamp_filters; + unsigned int rx_hash_key_size; +}; + +/************************************************************************** + * + * Prototypes and inline functions + * + *************************************************************************/ + +static inline struct efx_channel * +efx_get_channel(struct efx_nic *efx, unsigned index) +{ + EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_channels); + return efx->channel[index]; +} + +/* Iterate over all used channels */ +#define efx_for_each_channel(_channel, _efx) \ + for (_channel = (_efx)->channel[0]; \ + _channel; \ + _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ + (_efx)->channel[_channel->channel + 1] : NULL) + +/* Iterate over all used channels in reverse */ +#define efx_for_each_channel_rev(_channel, _efx) \ + for (_channel = (_efx)->channel[(_efx)->n_channels - 1]; \ + _channel; \ + _channel = _channel->channel ? \ + (_efx)->channel[_channel->channel - 1] : NULL) + +static inline struct efx_channel * +efx_get_tx_channel(struct efx_nic *efx, unsigned int index) +{ + EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels); + return efx->channel[efx->tx_channel_offset + index]; +} + +static inline struct efx_channel * +efx_get_xdp_channel(struct efx_nic *efx, unsigned int index) +{ + EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_xdp_channels); + return efx->channel[efx->xdp_channel_offset + index]; +} + +static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel) +{ + return channel->channel - channel->efx->xdp_channel_offset < + channel->efx->n_xdp_channels; +} + +static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) +{ + return channel && channel->channel >= channel->efx->tx_channel_offset; +} + +static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel) +{ + if (efx_channel_is_xdp_tx(channel)) + return channel->efx->xdp_tx_per_channel; + return channel->efx->tx_queues_per_channel; +} + +static inline struct efx_tx_queue * +efx_channel_get_tx_queue(struct efx_channel *channel, unsigned int type) +{ + EFX_WARN_ON_ONCE_PARANOID(type >= EFX_TXQ_TYPES); + return channel->tx_queue_by_type[type]; +} + +static inline struct efx_tx_queue * +efx_get_tx_queue(struct efx_nic *efx, unsigned int index, unsigned int type) +{ + struct efx_channel *channel = efx_get_tx_channel(efx, index); + + return efx_channel_get_tx_queue(channel, type); +} + +/* Iterate over all TX queues belonging to a channel */ +#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ + if (!efx_channel_has_tx_queues(_channel)) \ + ; \ + else \ + for (_tx_queue = (_channel)->tx_queue; \ + _tx_queue < (_channel)->tx_queue + \ + efx_channel_num_tx_queues(_channel); \ + _tx_queue++) + +static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) +{ + return channel->rx_queue.core_index >= 0; +} + +static inline struct efx_rx_queue * +efx_channel_get_rx_queue(struct efx_channel *channel) +{ + EFX_WARN_ON_ONCE_PARANOID(!efx_channel_has_rx_queue(channel)); + return &channel->rx_queue; +} + +/* Iterate over all RX queues belonging to a channel */ +#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ + if (!efx_channel_has_rx_queue(_channel)) \ + ; \ + else \ + for (_rx_queue = &(_channel)->rx_queue; \ + _rx_queue; \ + _rx_queue = NULL) + +static inline struct efx_channel * +efx_rx_queue_channel(struct efx_rx_queue *rx_queue) +{ + return container_of(rx_queue, struct efx_channel, rx_queue); +} + +static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue) +{ + return efx_rx_queue_channel(rx_queue)->channel; +} + +/* Returns a pointer to the specified receive buffer in the RX + * descriptor queue. + */ +static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, + unsigned int index) +{ + return &rx_queue->buffer[index]; +} + +static inline struct efx_rx_buffer * +efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) +{ + if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask))) + return efx_rx_buffer(rx_queue, 0); + else + return rx_buf + 1; +} + +/** + * EFX_MAX_FRAME_LEN - calculate maximum frame length + * + * This calculates the maximum frame length that will be used for a + * given MTU. The frame length will be equal to the MTU plus a + * constant amount of header space and padding. This is the quantity + * that the net driver will program into the MAC as the maximum frame + * length. + * + * The 10G MAC requires 8-byte alignment on the frame + * length, so we round up to the nearest 8. + * + * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an + * XGMII cycle). If the frame length reaches the maximum value in the + * same cycle, the XMAC can miss the IPG altogether. We work around + * this by adding a further 16 bytes. + */ +#define EFX_FRAME_PAD 16 +#define EFX_MAX_FRAME_LEN(mtu) \ + (ALIGN(((mtu) + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN + EFX_FRAME_PAD), 8)) + +static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb) +{ + return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP; +} +static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb) +{ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +} + +/* Get the max fill level of the TX queues on this channel */ +static inline unsigned int +efx_channel_tx_fill_level(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + unsigned int fill_level = 0; + + efx_for_each_channel_tx_queue(tx_queue, channel) + fill_level = max(fill_level, + tx_queue->insert_count - tx_queue->read_count); + + return fill_level; +} + +/* Conservative approximation of efx_channel_tx_fill_level using cached value */ +static inline unsigned int +efx_channel_tx_old_fill_level(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + unsigned int fill_level = 0; + + efx_for_each_channel_tx_queue(tx_queue, channel) + fill_level = max(fill_level, + tx_queue->insert_count - tx_queue->old_read_count); + + return fill_level; +} + +/* Get all supported features. + * If a feature is not fixed, it is present in hw_features. + * If a feature is fixed, it does not present in hw_features, but + * always in features. + */ +static inline netdev_features_t efx_supported_features(const struct efx_nic *efx) +{ + const struct net_device *net_dev = efx->net_dev; + + return net_dev->features | net_dev->hw_features; +} + +/* Get the current TX queue insert index. */ +static inline unsigned int +efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue) +{ + return tx_queue->insert_count & tx_queue->ptr_mask; +} + +/* Get a TX buffer. */ +static inline struct efx_tx_buffer * +__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) +{ + return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)]; +} + +/* Get a TX buffer, checking it's not currently in use. */ +static inline struct efx_tx_buffer * +efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) +{ + struct efx_tx_buffer *buffer = + __efx_tx_queue_get_insert_buffer(tx_queue); + + EFX_WARN_ON_ONCE_PARANOID(buffer->len); + EFX_WARN_ON_ONCE_PARANOID(buffer->flags); + EFX_WARN_ON_ONCE_PARANOID(buffer->unmap_len); + + return buffer; +} + +#endif /* EFX_NET_DRIVER_H */ diff --git a/drivers/net/ethernet/sfc/siena/nic.c b/drivers/net/ethernet/sfc/siena/nic.c new file mode 100644 index 0000000000..abf9a4adf1 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/nic.c @@ -0,0 +1,530 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "bitfield.h" +#include "efx.h" +#include "nic.h" +#include "farch_regs.h" +#include "io.h" +#include "workarounds.h" +#include "mcdi_pcol.h" + +/************************************************************************** + * + * Generic buffer handling + * These buffers are used for interrupt status, MAC stats, etc. + * + **************************************************************************/ + +int efx_siena_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, + unsigned int len, gfp_t gfp_flags) +{ + buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, + &buffer->dma_addr, gfp_flags); + if (!buffer->addr) + return -ENOMEM; + buffer->len = len; + return 0; +} + +void efx_siena_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) +{ + if (buffer->addr) { + dma_free_coherent(&efx->pci_dev->dev, buffer->len, + buffer->addr, buffer->dma_addr); + buffer->addr = NULL; + } +} + +/* Check whether an event is present in the eventq at the current + * read pointer. Only useful for self-test. + */ +bool efx_siena_event_present(struct efx_channel *channel) +{ + return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); +} + +void efx_siena_event_test_start(struct efx_channel *channel) +{ + channel->event_test_cpu = -1; + smp_wmb(); + channel->efx->type->ev_test_generate(channel); +} + +int efx_siena_irq_test_start(struct efx_nic *efx) +{ + efx->last_irq_cpu = -1; + smp_wmb(); + return efx->type->irq_test_generate(efx); +} + +/* Hook interrupt handler(s) + * Try MSI and then legacy interrupts. + */ +int efx_siena_init_interrupt(struct efx_nic *efx) +{ + struct efx_channel *channel; + unsigned int n_irqs; + int rc; + + if (!EFX_INT_MODE_USE_MSI(efx)) { + rc = request_irq(efx->legacy_irq, + efx->type->irq_handle_legacy, IRQF_SHARED, + efx->name, efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to hook legacy IRQ %d\n", + efx->pci_dev->irq); + goto fail1; + } + efx->irqs_hooked = true; + return 0; + } + +#ifdef CONFIG_RFS_ACCEL + if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { + efx->net_dev->rx_cpu_rmap = + alloc_irq_cpu_rmap(efx->n_rx_channels); + if (!efx->net_dev->rx_cpu_rmap) { + rc = -ENOMEM; + goto fail1; + } + } +#endif + + /* Hook MSI or MSI-X interrupt */ + n_irqs = 0; + efx_for_each_channel(channel, efx) { + rc = request_irq(channel->irq, efx->type->irq_handle_msi, + IRQF_PROBE_SHARED, /* Not shared */ + efx->msi_context[channel->channel].name, + &efx->msi_context[channel->channel]); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to hook IRQ %d\n", channel->irq); + goto fail2; + } + ++n_irqs; + +#ifdef CONFIG_RFS_ACCEL + if (efx->interrupt_mode == EFX_INT_MODE_MSIX && + channel->channel < efx->n_rx_channels) { + rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, + channel->irq); + if (rc) + goto fail2; + } +#endif + } + + efx->irqs_hooked = true; + return 0; + + fail2: +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); + efx->net_dev->rx_cpu_rmap = NULL; +#endif + efx_for_each_channel(channel, efx) { + if (n_irqs-- == 0) + break; + free_irq(channel->irq, &efx->msi_context[channel->channel]); + } + fail1: + return rc; +} + +void efx_siena_fini_interrupt(struct efx_nic *efx) +{ + struct efx_channel *channel; + +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); + efx->net_dev->rx_cpu_rmap = NULL; +#endif + + if (!efx->irqs_hooked) + return; + if (EFX_INT_MODE_USE_MSI(efx)) { + /* Disable MSI/MSI-X interrupts */ + efx_for_each_channel(channel, efx) + free_irq(channel->irq, + &efx->msi_context[channel->channel]); + } else { + /* Disable legacy interrupt */ + free_irq(efx->legacy_irq, efx); + } + efx->irqs_hooked = false; +} + +/* Register dump */ + +#define REGISTER_REVISION_FA 1 +#define REGISTER_REVISION_FB 2 +#define REGISTER_REVISION_FC 3 +#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */ +#define REGISTER_REVISION_ED 4 +#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */ + +struct efx_nic_reg { + u32 offset:24; + u32 min_revision:3, max_revision:3; +}; + +#define REGISTER(name, arch, min_rev, max_rev) { \ + arch ## R_ ## min_rev ## max_rev ## _ ## name, \ + REGISTER_REVISION_ ## arch ## min_rev, \ + REGISTER_REVISION_ ## arch ## max_rev \ +} +#define REGISTER_AA(name) REGISTER(name, F, A, A) +#define REGISTER_AB(name) REGISTER(name, F, A, B) +#define REGISTER_AZ(name) REGISTER(name, F, A, Z) +#define REGISTER_BB(name) REGISTER(name, F, B, B) +#define REGISTER_BZ(name) REGISTER(name, F, B, Z) +#define REGISTER_CZ(name) REGISTER(name, F, C, Z) + +static const struct efx_nic_reg efx_nic_regs[] = { + REGISTER_AZ(ADR_REGION), + REGISTER_AZ(INT_EN_KER), + REGISTER_BZ(INT_EN_CHAR), + REGISTER_AZ(INT_ADR_KER), + REGISTER_BZ(INT_ADR_CHAR), + /* INT_ACK_KER is WO */ + /* INT_ISR0 is RC */ + REGISTER_AZ(HW_INIT), + REGISTER_CZ(USR_EV_CFG), + REGISTER_AB(EE_SPI_HCMD), + REGISTER_AB(EE_SPI_HADR), + REGISTER_AB(EE_SPI_HDATA), + REGISTER_AB(EE_BASE_PAGE), + REGISTER_AB(EE_VPD_CFG0), + /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ + /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ + /* PCIE_CORE_INDIRECT is indirect */ + REGISTER_AB(NIC_STAT), + REGISTER_AB(GPIO_CTL), + REGISTER_AB(GLB_CTL), + /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ + REGISTER_BZ(DP_CTRL), + REGISTER_AZ(MEM_STAT), + REGISTER_AZ(CS_DEBUG), + REGISTER_AZ(ALTERA_BUILD), + REGISTER_AZ(CSR_SPARE), + REGISTER_AB(PCIE_SD_CTL0123), + REGISTER_AB(PCIE_SD_CTL45), + REGISTER_AB(PCIE_PCS_CTL_STAT), + /* DEBUG_DATA_OUT is not used */ + /* DRV_EV is WO */ + REGISTER_AZ(EVQ_CTL), + REGISTER_AZ(EVQ_CNT1), + REGISTER_AZ(EVQ_CNT2), + REGISTER_AZ(BUF_TBL_CFG), + REGISTER_AZ(SRM_RX_DC_CFG), + REGISTER_AZ(SRM_TX_DC_CFG), + REGISTER_AZ(SRM_CFG), + /* BUF_TBL_UPD is WO */ + REGISTER_AZ(SRM_UPD_EVQ), + REGISTER_AZ(SRAM_PARITY), + REGISTER_AZ(RX_CFG), + REGISTER_BZ(RX_FILTER_CTL), + /* RX_FLUSH_DESCQ is WO */ + REGISTER_AZ(RX_DC_CFG), + REGISTER_AZ(RX_DC_PF_WM), + REGISTER_BZ(RX_RSS_TKEY), + /* RX_NODESC_DROP is RC */ + REGISTER_AA(RX_SELF_RST), + /* RX_DEBUG, RX_PUSH_DROP are not used */ + REGISTER_CZ(RX_RSS_IPV6_REG1), + REGISTER_CZ(RX_RSS_IPV6_REG2), + REGISTER_CZ(RX_RSS_IPV6_REG3), + /* TX_FLUSH_DESCQ is WO */ + REGISTER_AZ(TX_DC_CFG), + REGISTER_AA(TX_CHKSM_CFG), + REGISTER_AZ(TX_CFG), + /* TX_PUSH_DROP is not used */ + REGISTER_AZ(TX_RESERVED), + REGISTER_BZ(TX_PACE), + /* TX_PACE_DROP_QID is RC */ + REGISTER_BB(TX_VLAN), + REGISTER_BZ(TX_IPFIL_PORTEN), + REGISTER_AB(MD_TXD), + REGISTER_AB(MD_RXD), + REGISTER_AB(MD_CS), + REGISTER_AB(MD_PHY_ADR), + REGISTER_AB(MD_ID), + /* MD_STAT is RC */ + REGISTER_AB(MAC_STAT_DMA), + REGISTER_AB(MAC_CTRL), + REGISTER_BB(GEN_MODE), + REGISTER_AB(MAC_MC_HASH_REG0), + REGISTER_AB(MAC_MC_HASH_REG1), + REGISTER_AB(GM_CFG1), + REGISTER_AB(GM_CFG2), + /* GM_IPG and GM_HD are not used */ + REGISTER_AB(GM_MAX_FLEN), + /* GM_TEST is not used */ + REGISTER_AB(GM_ADR1), + REGISTER_AB(GM_ADR2), + REGISTER_AB(GMF_CFG0), + REGISTER_AB(GMF_CFG1), + REGISTER_AB(GMF_CFG2), + REGISTER_AB(GMF_CFG3), + REGISTER_AB(GMF_CFG4), + REGISTER_AB(GMF_CFG5), + REGISTER_BB(TX_SRC_MAC_CTL), + REGISTER_AB(XM_ADR_LO), + REGISTER_AB(XM_ADR_HI), + REGISTER_AB(XM_GLB_CFG), + REGISTER_AB(XM_TX_CFG), + REGISTER_AB(XM_RX_CFG), + REGISTER_AB(XM_MGT_INT_MASK), + REGISTER_AB(XM_FC), + REGISTER_AB(XM_PAUSE_TIME), + REGISTER_AB(XM_TX_PARAM), + REGISTER_AB(XM_RX_PARAM), + /* XM_MGT_INT_MSK (note no 'A') is RC */ + REGISTER_AB(XX_PWR_RST), + REGISTER_AB(XX_SD_CTL), + REGISTER_AB(XX_TXDRV_CTL), + /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ + /* XX_CORE_STAT is partly RC */ +}; + +struct efx_nic_reg_table { + u32 offset:24; + u32 min_revision:3, max_revision:3; + u32 step:6, rows:21; +}; + +#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \ + offset, \ + REGISTER_REVISION_ ## arch ## min_rev, \ + REGISTER_REVISION_ ## arch ## max_rev, \ + step, rows \ +} +#define REGISTER_TABLE(name, arch, min_rev, max_rev) \ + REGISTER_TABLE_DIMENSIONS( \ + name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \ + arch, min_rev, max_rev, \ + arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ + arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS) +#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A) +#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z) +#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B) +#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z) +#define REGISTER_TABLE_BB_CZ(name) \ + REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \ + FR_BZ_ ## name ## _STEP, \ + FR_BB_ ## name ## _ROWS), \ + REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \ + FR_BZ_ ## name ## _STEP, \ + FR_CZ_ ## name ## _ROWS) +#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z) + +static const struct efx_nic_reg_table efx_nic_reg_tables[] = { + /* DRIVER is not used */ + /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ + REGISTER_TABLE_BB(TX_IPFIL_TBL), + REGISTER_TABLE_BB(TX_SRC_MAC_TBL), + REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), + REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), + REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), + REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), + REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), + REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), + /* We can't reasonably read all of the buffer table (up to 8MB!). + * However this driver will only use a few entries. Reading + * 1K entries allows for some expansion of queue count and + * size before we need to change the version. */ + REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, + F, A, A, 8, 1024), + REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, + F, B, Z, 8, 1024), + REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), + REGISTER_TABLE_BB_CZ(TIMER_TBL), + REGISTER_TABLE_BB_CZ(TX_PACE_TBL), + REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), + /* TX_FILTER_TBL0 is huge and not used by this driver */ + REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), + REGISTER_TABLE_CZ(MC_TREG_SMEM), + /* MSIX_PBA_TABLE is not mapped */ + /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ + REGISTER_TABLE_BZ(RX_FILTER_TBL0), +}; + +size_t efx_siena_get_regs_len(struct efx_nic *efx) +{ + const struct efx_nic_reg *reg; + const struct efx_nic_reg_table *table; + size_t len = 0; + + for (reg = efx_nic_regs; + reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); + reg++) + if (efx->type->revision >= reg->min_revision && + efx->type->revision <= reg->max_revision) + len += sizeof(efx_oword_t); + + for (table = efx_nic_reg_tables; + table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); + table++) + if (efx->type->revision >= table->min_revision && + efx->type->revision <= table->max_revision) + len += table->rows * min_t(size_t, table->step, 16); + + return len; +} + +void efx_siena_get_regs(struct efx_nic *efx, void *buf) +{ + const struct efx_nic_reg *reg; + const struct efx_nic_reg_table *table; + + for (reg = efx_nic_regs; + reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); + reg++) { + if (efx->type->revision >= reg->min_revision && + efx->type->revision <= reg->max_revision) { + efx_reado(efx, (efx_oword_t *)buf, reg->offset); + buf += sizeof(efx_oword_t); + } + } + + for (table = efx_nic_reg_tables; + table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); + table++) { + size_t size, i; + + if (!(efx->type->revision >= table->min_revision && + efx->type->revision <= table->max_revision)) + continue; + + size = min_t(size_t, table->step, 16); + + for (i = 0; i < table->rows; i++) { + switch (table->step) { + case 4: /* 32-bit SRAM */ + efx_readd(efx, buf, table->offset + 4 * i); + break; + case 8: /* 64-bit SRAM */ + efx_sram_readq(efx, + efx->membase + table->offset, + buf, i); + break; + case 16: /* 128-bit-readable register */ + efx_reado_table(efx, buf, table->offset, i); + break; + case 32: /* 128-bit register, interleaved */ + efx_reado_table(efx, buf, table->offset, 2 * i); + break; + default: + WARN_ON(1); + return; + } + buf += size; + } + } +} + +/** + * efx_siena_describe_stats - Describe supported statistics for ethtool + * @desc: Array of &struct efx_hw_stat_desc describing the statistics + * @count: Length of the @desc array + * @mask: Bitmask of which elements of @desc are enabled + * @names: Buffer to copy names to, or %NULL. The names are copied + * starting at intervals of %ETH_GSTRING_LEN bytes. + * + * Returns the number of visible statistics, i.e. the number of set + * bits in the first @count bits of @mask for which a name is defined. + */ +size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u8 *names) +{ + size_t visible = 0; + size_t index; + + for_each_set_bit(index, mask, count) { + if (desc[index].name) { + if (names) { + strlcpy(names, desc[index].name, + ETH_GSTRING_LEN); + names += ETH_GSTRING_LEN; + } + ++visible; + } + } + + return visible; +} + +/** + * efx_siena_update_stats - Convert statistics DMA buffer to array of u64 + * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer + * layout. DMA widths of 0, 16, 32 and 64 are supported; where + * the width is specified as 0 the corresponding element of + * @stats is not updated. + * @count: Length of the @desc array + * @mask: Bitmask of which elements of @desc are enabled + * @stats: Buffer to update with the converted statistics. The length + * of this array must be at least @count. + * @dma_buf: DMA buffer containing hardware statistics + * @accumulate: If set, the converted values will be added rather than + * directly stored to the corresponding elements of @stats + */ +void efx_siena_update_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, + u64 *stats, const void *dma_buf, bool accumulate) +{ + size_t index; + + for_each_set_bit(index, mask, count) { + if (desc[index].dma_width) { + const void *addr = dma_buf + desc[index].offset; + u64 val; + + switch (desc[index].dma_width) { + case 16: + val = le16_to_cpup((__le16 *)addr); + break; + case 32: + val = le32_to_cpup((__le32 *)addr); + break; + case 64: + val = le64_to_cpup((__le64 *)addr); + break; + default: + WARN_ON(1); + val = 0; + break; + } + + if (accumulate) + stats[index] += val; + else + stats[index] = val; + } + } +} + +void efx_siena_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops) +{ + /* if down, or this is the first update after coming up */ + if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state) + efx->rx_nodesc_drops_while_down += + *rx_nodesc_drops - efx->rx_nodesc_drops_total; + efx->rx_nodesc_drops_total = *rx_nodesc_drops; + efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP); + *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down; +} diff --git a/drivers/net/ethernet/sfc/siena/nic.h b/drivers/net/ethernet/sfc/siena/nic.h new file mode 100644 index 0000000000..6def31070e --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/nic.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_NIC_H +#define EFX_NIC_H + +#include "nic_common.h" +#include "efx.h" + +u32 efx_farch_fpga_ver(struct efx_nic *efx); + +enum { + PHY_TYPE_NONE = 0, + PHY_TYPE_TXC43128 = 1, + PHY_TYPE_88E1111 = 2, + PHY_TYPE_SFX7101 = 3, + PHY_TYPE_QT2022C2 = 4, + PHY_TYPE_PM8358 = 6, + PHY_TYPE_SFT9001A = 8, + PHY_TYPE_QT2025C = 9, + PHY_TYPE_SFT9001B = 10, +}; + +enum { + SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT, + SIENA_STAT_tx_good_bytes, + SIENA_STAT_tx_bad_bytes, + SIENA_STAT_tx_packets, + SIENA_STAT_tx_bad, + SIENA_STAT_tx_pause, + SIENA_STAT_tx_control, + SIENA_STAT_tx_unicast, + SIENA_STAT_tx_multicast, + SIENA_STAT_tx_broadcast, + SIENA_STAT_tx_lt64, + SIENA_STAT_tx_64, + SIENA_STAT_tx_65_to_127, + SIENA_STAT_tx_128_to_255, + SIENA_STAT_tx_256_to_511, + SIENA_STAT_tx_512_to_1023, + SIENA_STAT_tx_1024_to_15xx, + SIENA_STAT_tx_15xx_to_jumbo, + SIENA_STAT_tx_gtjumbo, + SIENA_STAT_tx_collision, + SIENA_STAT_tx_single_collision, + SIENA_STAT_tx_multiple_collision, + SIENA_STAT_tx_excessive_collision, + SIENA_STAT_tx_deferred, + SIENA_STAT_tx_late_collision, + SIENA_STAT_tx_excessive_deferred, + SIENA_STAT_tx_non_tcpudp, + SIENA_STAT_tx_mac_src_error, + SIENA_STAT_tx_ip_src_error, + SIENA_STAT_rx_bytes, + SIENA_STAT_rx_good_bytes, + SIENA_STAT_rx_bad_bytes, + SIENA_STAT_rx_packets, + SIENA_STAT_rx_good, + SIENA_STAT_rx_bad, + SIENA_STAT_rx_pause, + SIENA_STAT_rx_control, + SIENA_STAT_rx_unicast, + SIENA_STAT_rx_multicast, + SIENA_STAT_rx_broadcast, + SIENA_STAT_rx_lt64, + SIENA_STAT_rx_64, + SIENA_STAT_rx_65_to_127, + SIENA_STAT_rx_128_to_255, + SIENA_STAT_rx_256_to_511, + SIENA_STAT_rx_512_to_1023, + SIENA_STAT_rx_1024_to_15xx, + SIENA_STAT_rx_15xx_to_jumbo, + SIENA_STAT_rx_gtjumbo, + SIENA_STAT_rx_bad_gtjumbo, + SIENA_STAT_rx_overflow, + SIENA_STAT_rx_false_carrier, + SIENA_STAT_rx_symbol_error, + SIENA_STAT_rx_align_error, + SIENA_STAT_rx_length_error, + SIENA_STAT_rx_internal_error, + SIENA_STAT_rx_nodesc_drop_cnt, + SIENA_STAT_COUNT +}; + +/** + * struct siena_nic_data - Siena NIC state + * @efx: Pointer back to main interface structure + * @wol_filter_id: Wake-on-LAN packet filter id + * @stats: Hardware statistics + * @vf: Array of &struct siena_vf objects + * @vf_buftbl_base: The zeroth buffer table index used to back VF queues. + * @vfdi_status: Common VFDI status page to be dmad to VF address space. + * @local_addr_list: List of local addresses. Protected by %local_lock. + * @local_page_list: List of DMA addressable pages used to broadcast + * %local_addr_list. Protected by %local_lock. + * @local_lock: Mutex protecting %local_addr_list and %local_page_list. + * @peer_work: Work item to broadcast peer addresses to VMs. + */ +struct siena_nic_data { + struct efx_nic *efx; + int wol_filter_id; + u64 stats[SIENA_STAT_COUNT]; +#ifdef CONFIG_SFC_SIENA_SRIOV + struct siena_vf *vf; + struct efx_channel *vfdi_channel; + unsigned vf_buftbl_base; + struct efx_buffer vfdi_status; + struct list_head local_addr_list; + struct list_head local_page_list; + struct mutex local_lock; + struct work_struct peer_work; +#endif +}; + +extern const struct efx_nic_type siena_a0_nic_type; + +int falcon_probe_board(struct efx_nic *efx, u16 revision_info); + +/* Falcon/Siena queue operations */ +int efx_farch_tx_probe(struct efx_tx_queue *tx_queue); +void efx_farch_tx_init(struct efx_tx_queue *tx_queue); +void efx_farch_tx_fini(struct efx_tx_queue *tx_queue); +void efx_farch_tx_remove(struct efx_tx_queue *tx_queue); +void efx_farch_tx_write(struct efx_tx_queue *tx_queue); +unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, unsigned int len); +int efx_farch_rx_probe(struct efx_rx_queue *rx_queue); +void efx_farch_rx_init(struct efx_rx_queue *rx_queue); +void efx_farch_rx_fini(struct efx_rx_queue *rx_queue); +void efx_farch_rx_remove(struct efx_rx_queue *rx_queue); +void efx_farch_rx_write(struct efx_rx_queue *rx_queue); +void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue); +int efx_farch_ev_probe(struct efx_channel *channel); +int efx_farch_ev_init(struct efx_channel *channel); +void efx_farch_ev_fini(struct efx_channel *channel); +void efx_farch_ev_remove(struct efx_channel *channel); +int efx_farch_ev_process(struct efx_channel *channel, int quota); +void efx_farch_ev_read_ack(struct efx_channel *channel); +void efx_farch_ev_test_generate(struct efx_channel *channel); + +/* Falcon/Siena filter operations */ +int efx_farch_filter_table_probe(struct efx_nic *efx); +void efx_farch_filter_table_restore(struct efx_nic *efx); +void efx_farch_filter_table_remove(struct efx_nic *efx); +void efx_farch_filter_update_rx_scatter(struct efx_nic *efx); +s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec, + bool replace); +int efx_farch_filter_remove_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id); +int efx_farch_filter_get_safe(struct efx_nic *efx, + enum efx_filter_priority priority, u32 filter_id, + struct efx_filter_spec *); +int efx_farch_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority); +u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority); +u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx); +s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, u32 *buf, + u32 size); +#ifdef CONFIG_RFS_ACCEL +bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, + unsigned int index); +#endif +void efx_farch_filter_sync_rx_mode(struct efx_nic *efx); + +/* Falcon/Siena interrupts */ +void efx_farch_irq_enable_master(struct efx_nic *efx); +int efx_farch_irq_test_generate(struct efx_nic *efx); +void efx_farch_irq_disable_master(struct efx_nic *efx); +irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id); +irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); +irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); + +/* Global Resources */ +void efx_siena_prepare_flush(struct efx_nic *efx); +int efx_farch_fini_dmaq(struct efx_nic *efx); +void efx_farch_finish_flr(struct efx_nic *efx); +void siena_finish_flush(struct efx_nic *efx); +void falcon_start_nic_stats(struct efx_nic *efx); +void falcon_stop_nic_stats(struct efx_nic *efx); +int falcon_reset_xaui(struct efx_nic *efx); +void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); +void efx_farch_init_common(struct efx_nic *efx); +void efx_farch_rx_push_indir_table(struct efx_nic *efx); +void efx_farch_rx_pull_indir_table(struct efx_nic *efx); + +/* Tests */ +struct efx_farch_register_test { + unsigned address; + efx_oword_t mask; +}; + +int efx_farch_test_registers(struct efx_nic *efx, + const struct efx_farch_register_test *regs, + size_t n_regs); + +void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, + efx_qword_t *event); + +#endif /* EFX_NIC_H */ diff --git a/drivers/net/ethernet/sfc/siena/nic_common.h b/drivers/net/ethernet/sfc/siena/nic_common.h new file mode 100644 index 0000000000..3af0405eea --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/nic_common.h @@ -0,0 +1,251 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + */ + +#ifndef EFX_NIC_COMMON_H +#define EFX_NIC_COMMON_H + +#include "net_driver.h" +#include "efx_common.h" +#include "mcdi.h" +#include "ptp.h" + +enum { + /* Revisions 0-2 were Falcon A0, A1 and B0 respectively. + * They are not supported by this driver but these revision numbers + * form part of the ethtool API for register dumping. + */ + EFX_REV_SIENA_A0 = 3, + EFX_REV_HUNT_A0 = 4, + EFX_REV_EF100 = 5, +}; + +static inline int efx_nic_rev(struct efx_nic *efx) +{ + return efx->type->revision; +} + +/* Read the current event from the event queue */ +static inline efx_qword_t *efx_event(struct efx_channel *channel, + unsigned int index) +{ + return ((efx_qword_t *) (channel->eventq.buf.addr)) + + (index & channel->eventq_mask); +} + +/* See if an event is present + * + * We check both the high and low dword of the event for all ones. We + * wrote all ones when we cleared the event, and no valid event can + * have all ones in either its high or low dwords. This approach is + * robust against reordering. + * + * Note that using a single 64-bit comparison is incorrect; even + * though the CPU read will be atomic, the DMA write may not be. + */ +static inline int efx_event_present(efx_qword_t *event) +{ + return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | + EFX_DWORD_IS_ALL_ONES(event->dword[1])); +} + +/* Returns a pointer to the specified transmit descriptor in the TX + * descriptor queue belonging to the specified channel. + */ +static inline efx_qword_t * +efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) +{ + return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; +} + +/* Report whether this TX queue would be empty for the given write_count. + * May return false negative. + */ +static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count) +{ + unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); + + if (empty_read_count == 0) + return false; + + return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; +} + +/* Decide whether to push a TX descriptor to the NIC vs merely writing + * the doorbell. This can reduce latency when we are adding a single + * descriptor to an empty queue, but is otherwise pointless. Further, + * Falcon and Siena have hardware bugs (SF bug 33851) that may be + * triggered if we don't check this. + * We use the write_count used for the last doorbell push, to get the + * NIC's view of the tx queue. + */ +static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, + unsigned int write_count) +{ + bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count); + + tx_queue->empty_read_count = 0; + return was_empty && tx_queue->write_count - write_count == 1; +} + +/* Returns a pointer to the specified descriptor in the RX descriptor queue */ +static inline efx_qword_t * +efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) +{ + return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; +} + +/* Alignment of PCIe DMA boundaries (4KB) */ +#define EFX_PAGE_SIZE 4096 +/* Size and alignment of buffer table entries (same) */ +#define EFX_BUF_SIZE EFX_PAGE_SIZE + +/* NIC-generic software stats */ +enum { + GENERIC_STAT_rx_noskb_drops, + GENERIC_STAT_rx_nodesc_trunc, + GENERIC_STAT_COUNT +}; + +#define EFX_GENERIC_SW_STAT(ext_name) \ + [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } + +/* TX data path */ +static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) +{ + return tx_queue->efx->type->tx_probe(tx_queue); +} +static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue) +{ + tx_queue->efx->type->tx_init(tx_queue); +} +static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) +{ + if (tx_queue->efx->type->tx_remove) + tx_queue->efx->type->tx_remove(tx_queue); +} +static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) +{ + tx_queue->efx->type->tx_write(tx_queue); +} + +/* RX data path */ +static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) +{ + return rx_queue->efx->type->rx_probe(rx_queue); +} +static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_init(rx_queue); +} +static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_remove(rx_queue); +} +static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_write(rx_queue); +} +static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_defer_refill(rx_queue); +} + +/* Event data path */ +static inline int efx_nic_probe_eventq(struct efx_channel *channel) +{ + return channel->efx->type->ev_probe(channel); +} +static inline int efx_nic_init_eventq(struct efx_channel *channel) +{ + return channel->efx->type->ev_init(channel); +} +static inline void efx_nic_fini_eventq(struct efx_channel *channel) +{ + channel->efx->type->ev_fini(channel); +} +static inline void efx_nic_remove_eventq(struct efx_channel *channel) +{ + channel->efx->type->ev_remove(channel); +} +static inline int +efx_nic_process_eventq(struct efx_channel *channel, int quota) +{ + return channel->efx->type->ev_process(channel, quota); +} +static inline void efx_nic_eventq_read_ack(struct efx_channel *channel) +{ + channel->efx->type->ev_read_ack(channel); +} + +void efx_siena_event_test_start(struct efx_channel *channel); + +bool efx_siena_event_present(struct efx_channel *channel); + +static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev) +{ + if (efx->type->sensor_event) + efx->type->sensor_event(efx, ev); +} + +static inline unsigned int efx_rx_recycle_ring_size(const struct efx_nic *efx) +{ + return efx->type->rx_recycle_ring_size(efx); +} + +/* Some statistics are computed as A - B where A and B each increase + * linearly with some hardware counter(s) and the counters are read + * asynchronously. If the counters contributing to B are always read + * after those contributing to A, the computed value may be lower than + * the true value by some variable amount, and may decrease between + * subsequent computations. + * + * We should never allow statistics to decrease or to exceed the true + * value. Since the computed value will never be greater than the + * true value, we can achieve this by only storing the computed value + * when it increases. + */ +static inline void efx_update_diff_stat(u64 *stat, u64 diff) +{ + if ((s64)(diff - *stat) > 0) + *stat = diff; +} + +/* Interrupts */ +int efx_siena_init_interrupt(struct efx_nic *efx); +int efx_siena_irq_test_start(struct efx_nic *efx); +void efx_siena_fini_interrupt(struct efx_nic *efx); + +static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) +{ + return READ_ONCE(channel->event_test_cpu); +} +static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) +{ + return READ_ONCE(efx->last_irq_cpu); +} + +/* Global Resources */ +int efx_siena_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, + unsigned int len, gfp_t gfp_flags); +void efx_siena_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); + +size_t efx_siena_get_regs_len(struct efx_nic *efx); +void efx_siena_get_regs(struct efx_nic *efx, void *buf); + +#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) + +size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u8 *names); +void efx_siena_update_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u64 *stats, + const void *dma_buf, bool accumulate); +void efx_siena_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat); + +#define EFX_MAX_FLUSH_TIME 5000 + +#endif /* EFX_NIC_COMMON_H */ diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c new file mode 100644 index 0000000000..7c46752e6e --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/ptp.c @@ -0,0 +1,2201 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2011-2013 Solarflare Communications Inc. + */ + +/* Theory of operation: + * + * PTP support is assisted by firmware running on the MC, which provides + * the hardware timestamping capabilities. Both transmitted and received + * PTP event packets are queued onto internal queues for subsequent processing; + * this is because the MC operations are relatively long and would block + * block NAPI/interrupt operation. + * + * Receive event processing: + * The event contains the packet's UUID and sequence number, together + * with the hardware timestamp. The PTP receive packet queue is searched + * for this UUID/sequence number and, if found, put on a pending queue. + * Packets not matching are delivered without timestamps (MCDI events will + * always arrive after the actual packet). + * It is important for the operation of the PTP protocol that the ordering + * of packets between the event and general port is maintained. + * + * Work queue processing: + * If work waiting, synchronise host/hardware time + * + * Transmit: send packet through MC, which returns the transmission time + * that is converted to an appropriate timestamp. + * + * Receive: the packet's reception time is converted to an appropriate + * timestamp. + */ +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "io.h" +#include "farch_regs.h" +#include "tx.h" +#include "nic.h" /* indirectly includes ptp.h */ + +/* Maximum number of events expected to make up a PTP event */ +#define MAX_EVENT_FRAGS 3 + +/* Maximum delay, ms, to begin synchronisation */ +#define MAX_SYNCHRONISE_WAIT_MS 2 + +/* How long, at most, to spend synchronising */ +#define SYNCHRONISE_PERIOD_NS 250000 + +/* How often to update the shared memory time */ +#define SYNCHRONISATION_GRANULARITY_NS 200 + +/* Minimum permitted length of a (corrected) synchronisation time */ +#define DEFAULT_MIN_SYNCHRONISATION_NS 120 + +/* Maximum permitted length of a (corrected) synchronisation time */ +#define MAX_SYNCHRONISATION_NS 1000 + +/* How many (MC) receive events that can be queued */ +#define MAX_RECEIVE_EVENTS 8 + +/* Length of (modified) moving average. */ +#define AVERAGE_LENGTH 16 + +/* How long an unmatched event or packet can be held */ +#define PKT_EVENT_LIFETIME_MS 10 + +/* Offsets into PTP packet for identification. These offsets are from the + * start of the IP header, not the MAC header. Note that neither PTP V1 nor + * PTP V2 permit the use of IPV4 options. + */ +#define PTP_DPORT_OFFSET 22 + +#define PTP_V1_VERSION_LENGTH 2 +#define PTP_V1_VERSION_OFFSET 28 + +#define PTP_V1_UUID_LENGTH 6 +#define PTP_V1_UUID_OFFSET 50 + +#define PTP_V1_SEQUENCE_LENGTH 2 +#define PTP_V1_SEQUENCE_OFFSET 58 + +/* The minimum length of a PTP V1 packet for offsets, etc. to be valid: + * includes IP header. + */ +#define PTP_V1_MIN_LENGTH 64 + +#define PTP_V2_VERSION_LENGTH 1 +#define PTP_V2_VERSION_OFFSET 29 + +#define PTP_V2_UUID_LENGTH 8 +#define PTP_V2_UUID_OFFSET 48 + +/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2), + * the MC only captures the last six bytes of the clock identity. These values + * reflect those, not the ones used in the standard. The standard permits + * mapping of V1 UUIDs to V2 UUIDs with these same values. + */ +#define PTP_V2_MC_UUID_LENGTH 6 +#define PTP_V2_MC_UUID_OFFSET 50 + +#define PTP_V2_SEQUENCE_LENGTH 2 +#define PTP_V2_SEQUENCE_OFFSET 58 + +/* The minimum length of a PTP V2 packet for offsets, etc. to be valid: + * includes IP header. + */ +#define PTP_V2_MIN_LENGTH 63 + +#define PTP_MIN_LENGTH 63 + +#define PTP_ADDRESS 0xe0000181 /* 224.0.1.129 */ +#define PTP_EVENT_PORT 319 +#define PTP_GENERAL_PORT 320 + +/* Annoyingly the format of the version numbers are different between + * versions 1 and 2 so it isn't possible to simply look for 1 or 2. + */ +#define PTP_VERSION_V1 1 + +#define PTP_VERSION_V2 2 +#define PTP_VERSION_V2_MASK 0x0f + +enum ptp_packet_state { + PTP_PACKET_STATE_UNMATCHED = 0, + PTP_PACKET_STATE_MATCHED, + PTP_PACKET_STATE_TIMED_OUT, + PTP_PACKET_STATE_MATCH_UNWANTED +}; + +/* NIC synchronised with single word of time only comprising + * partial seconds and full nanoseconds: 10^9 ~ 2^30 so 2 bits for seconds. + */ +#define MC_NANOSECOND_BITS 30 +#define MC_NANOSECOND_MASK ((1 << MC_NANOSECOND_BITS) - 1) +#define MC_SECOND_MASK ((1 << (32 - MC_NANOSECOND_BITS)) - 1) + +/* Maximum parts-per-billion adjustment that is acceptable */ +#define MAX_PPB 1000000 + +/* Precalculate scale word to avoid long long division at runtime */ +/* This is equivalent to 2^66 / 10^9. */ +#define PPB_SCALE_WORD ((1LL << (57)) / 1953125LL) + +/* How much to shift down after scaling to convert to FP40 */ +#define PPB_SHIFT_FP40 26 +/* ... and FP44. */ +#define PPB_SHIFT_FP44 22 + +#define PTP_SYNC_ATTEMPTS 4 + +/** + * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area. + * @words: UUID and (partial) sequence number + * @expiry: Time after which the packet should be delivered irrespective of + * event arrival. + * @state: The state of the packet - whether it is ready for processing or + * whether that is of no interest. + */ +struct efx_ptp_match { + u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)]; + unsigned long expiry; + enum ptp_packet_state state; +}; + +/** + * struct efx_ptp_event_rx - A PTP receive event (from MC) + * @link: list of events + * @seq0: First part of (PTP) UUID + * @seq1: Second part of (PTP) UUID and sequence number + * @hwtimestamp: Event timestamp + * @expiry: Time which the packet arrived + */ +struct efx_ptp_event_rx { + struct list_head link; + u32 seq0; + u32 seq1; + ktime_t hwtimestamp; + unsigned long expiry; +}; + +/** + * struct efx_ptp_timeset - Synchronisation between host and MC + * @host_start: Host time immediately before hardware timestamp taken + * @major: Hardware timestamp, major + * @minor: Hardware timestamp, minor + * @host_end: Host time immediately after hardware timestamp taken + * @wait: Number of NIC clock ticks between hardware timestamp being read and + * host end time being seen + * @window: Difference of host_end and host_start + * @valid: Whether this timeset is valid + */ +struct efx_ptp_timeset { + u32 host_start; + u32 major; + u32 minor; + u32 host_end; + u32 wait; + u32 window; /* Derived: end - start, allowing for wrap */ +}; + +/** + * struct efx_ptp_data - Precision Time Protocol (PTP) state + * @efx: The NIC context + * @channel: The PTP channel (Siena only) + * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are + * separate events) + * @rxq: Receive SKB queue (awaiting timestamps) + * @txq: Transmit SKB queue + * @evt_list: List of MC receive events awaiting packets + * @evt_free_list: List of free events + * @evt_lock: Lock for manipulating evt_list and evt_free_list + * @rx_evts: Instantiated events (on evt_list and evt_free_list) + * @workwq: Work queue for processing pending PTP operations + * @work: Work task + * @reset_required: A serious error has occurred and the PTP task needs to be + * reset (disable, enable). + * @rxfilter_event: Receive filter when operating + * @rxfilter_general: Receive filter when operating + * @rxfilter_installed: Receive filter installed + * @config: Current timestamp configuration + * @enabled: PTP operation enabled + * @mode: Mode in which PTP operating (PTP version) + * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time + * @nic_to_kernel_time: Function to convert from NIC to kernel time + * @nic_time: contains time details + * @nic_time.minor_max: Wrap point for NIC minor times + * @nic_time.sync_event_diff_min: Minimum acceptable difference between time + * in packet prefix and last MCDI time sync event i.e. how much earlier than + * the last sync event time a packet timestamp can be. + * @nic_time.sync_event_diff_max: Maximum acceptable difference between time + * in packet prefix and last MCDI time sync event i.e. how much later than + * the last sync event time a packet timestamp can be. + * @nic_time.sync_event_minor_shift: Shift required to make minor time from + * field in MCDI time sync event. + * @min_synchronisation_ns: Minimum acceptable corrected sync window + * @capabilities: Capabilities flags from the NIC + * @ts_corrections: contains corrections details + * @ts_corrections.ptp_tx: Required driver correction of PTP packet transmit + * timestamps + * @ts_corrections.ptp_rx: Required driver correction of PTP packet receive + * timestamps + * @ts_corrections.pps_out: PPS output error (information only) + * @ts_corrections.pps_in: Required driver correction of PPS input timestamps + * @ts_corrections.general_tx: Required driver correction of general packet + * transmit timestamps + * @ts_corrections.general_rx: Required driver correction of general packet + * receive timestamps + * @evt_frags: Partly assembled PTP events + * @evt_frag_idx: Current fragment number + * @evt_code: Last event code + * @start: Address at which MC indicates ready for synchronisation + * @host_time_pps: Host time at last PPS + * @adjfreq_ppb_shift: Shift required to convert scaled parts-per-billion + * frequency adjustment into a fixed point fractional nanosecond format. + * @current_adjfreq: Current ppb adjustment. + * @phc_clock: Pointer to registered phc device (if primary function) + * @phc_clock_info: Registration structure for phc device + * @pps_work: pps work task for handling pps events + * @pps_workwq: pps work queue + * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled + * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids + * allocations in main data path). + * @good_syncs: Number of successful synchronisations. + * @fast_syncs: Number of synchronisations requiring short delay + * @bad_syncs: Number of failed synchronisations. + * @sync_timeouts: Number of synchronisation timeouts + * @no_time_syncs: Number of synchronisations with no good times. + * @invalid_sync_windows: Number of sync windows with bad durations. + * @undersize_sync_windows: Number of corrected sync windows that are too small + * @oversize_sync_windows: Number of corrected sync windows that are too large + * @rx_no_timestamp: Number of packets received without a timestamp. + * @timeset: Last set of synchronisation statistics. + * @xmit_skb: Transmit SKB function. + */ +struct efx_ptp_data { + struct efx_nic *efx; + struct efx_channel *channel; + bool rx_ts_inline; + struct sk_buff_head rxq; + struct sk_buff_head txq; + struct list_head evt_list; + struct list_head evt_free_list; + spinlock_t evt_lock; + struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; + struct workqueue_struct *workwq; + struct work_struct work; + bool reset_required; + u32 rxfilter_event; + u32 rxfilter_general; + bool rxfilter_installed; + struct hwtstamp_config config; + bool enabled; + unsigned int mode; + void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor); + ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor, + s32 correction); + struct { + u32 minor_max; + u32 sync_event_diff_min; + u32 sync_event_diff_max; + unsigned int sync_event_minor_shift; + } nic_time; + unsigned int min_synchronisation_ns; + unsigned int capabilities; + struct { + s32 ptp_tx; + s32 ptp_rx; + s32 pps_out; + s32 pps_in; + s32 general_tx; + s32 general_rx; + } ts_corrections; + efx_qword_t evt_frags[MAX_EVENT_FRAGS]; + int evt_frag_idx; + int evt_code; + struct efx_buffer start; + struct pps_event_time host_time_pps; + unsigned int adjfreq_ppb_shift; + s64 current_adjfreq; + struct ptp_clock *phc_clock; + struct ptp_clock_info phc_clock_info; + struct work_struct pps_work; + struct workqueue_struct *pps_workwq; + bool nic_ts_enabled; + efx_dword_t txbuf[MCDI_TX_BUF_LEN(MC_CMD_PTP_IN_TRANSMIT_LENMAX)]; + + unsigned int good_syncs; + unsigned int fast_syncs; + unsigned int bad_syncs; + unsigned int sync_timeouts; + unsigned int no_time_syncs; + unsigned int invalid_sync_windows; + unsigned int undersize_sync_windows; + unsigned int oversize_sync_windows; + unsigned int rx_no_timestamp; + struct efx_ptp_timeset + timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM]; + void (*xmit_skb)(struct efx_nic *efx, struct sk_buff *skb); +}; + +static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta); +static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta); +static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts); +static int efx_phc_settime(struct ptp_clock_info *ptp, + const struct timespec64 *e_ts); +static int efx_phc_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, int on); + +bool efx_siena_ptp_use_mac_tx_timestamps(struct efx_nic *efx) +{ + return efx_has_cap(efx, TX_MAC_TIMESTAMPING); +} + +/* PTP 'extra' channel is still a traffic channel, but we only create TX queues + * if PTP uses MAC TX timestamps, not if PTP uses the MC directly to transmit. + */ +static bool efx_ptp_want_txqs(struct efx_channel *channel) +{ + return efx_siena_ptp_use_mac_tx_timestamps(channel->efx); +} + +#define PTP_SW_STAT(ext_name, field_name) \ + { #ext_name, 0, offsetof(struct efx_ptp_data, field_name) } +#define PTP_MC_STAT(ext_name, mcdi_name) \ + { #ext_name, 32, MC_CMD_PTP_OUT_STATUS_STATS_ ## mcdi_name ## _OFST } +static const struct efx_hw_stat_desc efx_ptp_stat_desc[] = { + PTP_SW_STAT(ptp_good_syncs, good_syncs), + PTP_SW_STAT(ptp_fast_syncs, fast_syncs), + PTP_SW_STAT(ptp_bad_syncs, bad_syncs), + PTP_SW_STAT(ptp_sync_timeouts, sync_timeouts), + PTP_SW_STAT(ptp_no_time_syncs, no_time_syncs), + PTP_SW_STAT(ptp_invalid_sync_windows, invalid_sync_windows), + PTP_SW_STAT(ptp_undersize_sync_windows, undersize_sync_windows), + PTP_SW_STAT(ptp_oversize_sync_windows, oversize_sync_windows), + PTP_SW_STAT(ptp_rx_no_timestamp, rx_no_timestamp), + PTP_MC_STAT(ptp_tx_timestamp_packets, TX), + PTP_MC_STAT(ptp_rx_timestamp_packets, RX), + PTP_MC_STAT(ptp_timestamp_packets, TS), + PTP_MC_STAT(ptp_filter_matches, FM), + PTP_MC_STAT(ptp_non_filter_matches, NFM), +}; +#define PTP_STAT_COUNT ARRAY_SIZE(efx_ptp_stat_desc) +static const unsigned long efx_ptp_stat_mask[] = { + [0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL, +}; + +size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings) +{ + if (!efx->ptp_data) + return 0; + + return efx_siena_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, + efx_ptp_stat_mask, strings); +} + +size_t efx_siena_ptp_update_stats(struct efx_nic *efx, u64 *stats) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN); + size_t i; + int rc; + + if (!efx->ptp_data) + return 0; + + /* Copy software statistics */ + for (i = 0; i < PTP_STAT_COUNT; i++) { + if (efx_ptp_stat_desc[i].dma_width) + continue; + stats[i] = *(unsigned int *)((char *)efx->ptp_data + + efx_ptp_stat_desc[i].offset); + } + + /* Fetch MC statistics. We *must* fill in all statistics or + * risk leaking kernel memory to userland, so if the MCDI + * request fails we pretend we got zeroes. + */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc) + memset(outbuf, 0, sizeof(outbuf)); + efx_siena_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, + efx_ptp_stat_mask, + stats, _MCDI_PTR(outbuf, 0), false); + + return PTP_STAT_COUNT; +} + +/* For Siena platforms NIC time is s and ns */ +static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor) +{ + struct timespec64 ts = ns_to_timespec64(ns); + *nic_major = (u32)ts.tv_sec; + *nic_minor = ts.tv_nsec; +} + +static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor, + s32 correction) +{ + ktime_t kt = ktime_set(nic_major, nic_minor); + if (correction >= 0) + kt = ktime_add_ns(kt, (u64)correction); + else + kt = ktime_sub_ns(kt, (u64)-correction); + return kt; +} + +/* To convert from s27 format to ns we multiply then divide by a power of 2. + * For the conversion from ns to s27, the operation is also converted to a + * multiply and shift. + */ +#define S27_TO_NS_SHIFT (27) +#define NS_TO_S27_MULT (((1ULL << 63) + NSEC_PER_SEC / 2) / NSEC_PER_SEC) +#define NS_TO_S27_SHIFT (63 - S27_TO_NS_SHIFT) +#define S27_MINOR_MAX (1 << S27_TO_NS_SHIFT) + +/* For Huntington platforms NIC time is in seconds and fractions of a second + * where the minor register only uses 27 bits in units of 2^-27s. + */ +static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor) +{ + struct timespec64 ts = ns_to_timespec64(ns); + u32 maj = (u32)ts.tv_sec; + u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT + + (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT); + + /* The conversion can result in the minor value exceeding the maximum. + * In this case, round up to the next second. + */ + if (min >= S27_MINOR_MAX) { + min -= S27_MINOR_MAX; + maj++; + } + + *nic_major = maj; + *nic_minor = min; +} + +static inline ktime_t efx_ptp_s27_to_ktime(u32 nic_major, u32 nic_minor) +{ + u32 ns = (u32)(((u64)nic_minor * NSEC_PER_SEC + + (1ULL << (S27_TO_NS_SHIFT - 1))) >> S27_TO_NS_SHIFT); + return ktime_set(nic_major, ns); +} + +static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor, + s32 correction) +{ + /* Apply the correction and deal with carry */ + nic_minor += correction; + if ((s32)nic_minor < 0) { + nic_minor += S27_MINOR_MAX; + nic_major--; + } else if (nic_minor >= S27_MINOR_MAX) { + nic_minor -= S27_MINOR_MAX; + nic_major++; + } + + return efx_ptp_s27_to_ktime(nic_major, nic_minor); +} + +/* For Medford2 platforms the time is in seconds and quarter nanoseconds. */ +static void efx_ptp_ns_to_s_qns(s64 ns, u32 *nic_major, u32 *nic_minor) +{ + struct timespec64 ts = ns_to_timespec64(ns); + + *nic_major = (u32)ts.tv_sec; + *nic_minor = ts.tv_nsec * 4; +} + +static ktime_t efx_ptp_s_qns_to_ktime_correction(u32 nic_major, u32 nic_minor, + s32 correction) +{ + ktime_t kt; + + nic_minor = DIV_ROUND_CLOSEST(nic_minor, 4); + correction = DIV_ROUND_CLOSEST(correction, 4); + + kt = ktime_set(nic_major, nic_minor); + + if (correction >= 0) + kt = ktime_add_ns(kt, (u64)correction); + else + kt = ktime_sub_ns(kt, (u64)-correction); + return kt; +} + +struct efx_channel *efx_siena_ptp_channel(struct efx_nic *efx) +{ + return efx->ptp_data ? efx->ptp_data->channel : NULL; +} + +static u32 last_sync_timestamp_major(struct efx_nic *efx) +{ + struct efx_channel *channel = efx_siena_ptp_channel(efx); + u32 major = 0; + + if (channel) + major = channel->sync_timestamp_major; + return major; +} + +/* The 8000 series and later can provide the time from the MAC, which is only + * 48 bits long and provides meta-information in the top 2 bits. + */ +static ktime_t +efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx, + struct efx_ptp_data *ptp, + u32 nic_major, u32 nic_minor, + s32 correction) +{ + u32 sync_timestamp; + ktime_t kt = { 0 }; + s16 delta; + + if (!(nic_major & 0x80000000)) { + WARN_ON_ONCE(nic_major >> 16); + + /* Medford provides 48 bits of timestamp, so we must get the top + * 16 bits from the timesync event state. + * + * We only have the lower 16 bits of the time now, but we do + * have a full resolution timestamp at some point in past. As + * long as the difference between the (real) now and the sync + * is less than 2^15, then we can reconstruct the difference + * between those two numbers using only the lower 16 bits of + * each. + * + * Put another way + * + * a - b = ((a mod k) - b) mod k + * + * when -k/2 < (a-b) < k/2. In our case k is 2^16. We know + * (a mod k) and b, so can calculate the delta, a - b. + * + */ + sync_timestamp = last_sync_timestamp_major(efx); + + /* Because delta is s16 this does an implicit mask down to + * 16 bits which is what we need, assuming + * MEDFORD_TX_SECS_EVENT_BITS is 16. delta is signed so that + * we can deal with the (unlikely) case of sync timestamps + * arriving from the future. + */ + delta = nic_major - sync_timestamp; + + /* Recover the fully specified time now, by applying the offset + * to the (fully specified) sync time. + */ + nic_major = sync_timestamp + delta; + + kt = ptp->nic_to_kernel_time(nic_major, nic_minor, + correction); + } + return kt; +} + +ktime_t efx_siena_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + ktime_t kt; + + if (efx_siena_ptp_use_mac_tx_timestamps(efx)) + kt = efx_ptp_mac_nic_to_ktime_correction(efx, ptp, + tx_queue->completed_timestamp_major, + tx_queue->completed_timestamp_minor, + ptp->ts_corrections.general_tx); + else + kt = ptp->nic_to_kernel_time( + tx_queue->completed_timestamp_major, + tx_queue->completed_timestamp_minor, + ptp->ts_corrections.general_tx); + return kt; +} + +/* Get PTP attributes and set up time conversions */ +static int efx_ptp_get_attributes(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN); + struct efx_ptp_data *ptp = efx->ptp_data; + int rc; + u32 fmt; + size_t out_len; + + /* Get the PTP attributes. If the NIC doesn't support the operation we + * use the default format for compatibility with older NICs i.e. + * seconds and nanoseconds. + */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &out_len); + if (rc == 0) { + fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT); + } else if (rc == -EINVAL) { + fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS; + } else if (rc == -EPERM) { + pci_info(efx->pci_dev, "no PTP support\n"); + return rc; + } else { + efx_siena_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), + outbuf, sizeof(outbuf), rc); + return rc; + } + + switch (fmt) { + case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION: + ptp->ns_to_nic_time = efx_ptp_ns_to_s27; + ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction; + ptp->nic_time.minor_max = 1 << 27; + ptp->nic_time.sync_event_minor_shift = 19; + break; + case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS: + ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns; + ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction; + ptp->nic_time.minor_max = 1000000000; + ptp->nic_time.sync_event_minor_shift = 22; + break; + case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS: + ptp->ns_to_nic_time = efx_ptp_ns_to_s_qns; + ptp->nic_to_kernel_time = efx_ptp_s_qns_to_ktime_correction; + ptp->nic_time.minor_max = 4000000000UL; + ptp->nic_time.sync_event_minor_shift = 24; + break; + default: + return -ERANGE; + } + + /* Precalculate acceptable difference between the minor time in the + * packet prefix and the last MCDI time sync event. We expect the + * packet prefix timestamp to be after of sync event by up to one + * sync event interval (0.25s) but we allow it to exceed this by a + * fuzz factor of (0.1s) + */ + ptp->nic_time.sync_event_diff_min = ptp->nic_time.minor_max + - (ptp->nic_time.minor_max / 10); + ptp->nic_time.sync_event_diff_max = (ptp->nic_time.minor_max / 4) + + (ptp->nic_time.minor_max / 10); + + /* MC_CMD_PTP_OP_GET_ATTRIBUTES has been extended twice from an older + * operation MC_CMD_PTP_OP_GET_TIME_FORMAT. The function now may return + * a value to use for the minimum acceptable corrected synchronization + * window and may return further capabilities. + * If we have the extra information store it. For older firmware that + * does not implement the extended command use the default value. + */ + if (rc == 0 && + out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST) + ptp->min_synchronisation_ns = + MCDI_DWORD(outbuf, + PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN); + else + ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS; + + if (rc == 0 && + out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN) + ptp->capabilities = MCDI_DWORD(outbuf, + PTP_OUT_GET_ATTRIBUTES_CAPABILITIES); + else + ptp->capabilities = 0; + + /* Set up the shift for conversion between frequency + * adjustments in parts-per-billion and the fixed-point + * fractional ns format that the adapter uses. + */ + if (ptp->capabilities & (1 << MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN)) + ptp->adjfreq_ppb_shift = PPB_SHIFT_FP44; + else + ptp->adjfreq_ppb_shift = PPB_SHIFT_FP40; + + return 0; +} + +/* Get PTP timestamp corrections */ +static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN); + int rc; + size_t out_len; + + /* Get the timestamp corrections from the NIC. If this operation is + * not supported (older NICs) then no correction is required. + */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, + MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &out_len); + if (rc == 0) { + efx->ptp_data->ts_corrections.ptp_tx = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT); + efx->ptp_data->ts_corrections.ptp_rx = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE); + efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT); + efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN); + + if (out_len >= MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN) { + efx->ptp_data->ts_corrections.general_tx = MCDI_DWORD( + outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX); + efx->ptp_data->ts_corrections.general_rx = MCDI_DWORD( + outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX); + } else { + efx->ptp_data->ts_corrections.general_tx = + efx->ptp_data->ts_corrections.ptp_tx; + efx->ptp_data->ts_corrections.general_rx = + efx->ptp_data->ts_corrections.ptp_rx; + } + } else if (rc == -EINVAL) { + efx->ptp_data->ts_corrections.ptp_tx = 0; + efx->ptp_data->ts_corrections.ptp_rx = 0; + efx->ptp_data->ts_corrections.pps_out = 0; + efx->ptp_data->ts_corrections.pps_in = 0; + efx->ptp_data->ts_corrections.general_tx = 0; + efx->ptp_data->ts_corrections.general_rx = 0; + } else { + efx_siena_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), + outbuf, sizeof(outbuf), rc); + return rc; + } + + return 0; +} + +/* Enable MCDI PTP support. */ +static int efx_ptp_enable(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + int rc; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE, + efx->ptp_data->channel ? + efx->ptp_data->channel->channel : 0); + MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode); + + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + rc = (rc == -EALREADY) ? 0 : rc; + if (rc) + efx_siena_mcdi_display_error(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_ENABLE_LEN, + outbuf, sizeof(outbuf), rc); + return rc; +} + +/* Disable MCDI PTP support. + * + * Note that this function should never rely on the presence of ptp_data - + * may be called before that exists. + */ +static int efx_ptp_disable(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + int rc; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + rc = (rc == -EALREADY) ? 0 : rc; + /* If we get ENOSYS, the NIC doesn't support PTP, and thus this function + * should only have been called during probe. + */ + if (rc == -ENOSYS || rc == -EPERM) + pci_info(efx->pci_dev, "no PTP support\n"); + else if (rc) + efx_siena_mcdi_display_error(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_DISABLE_LEN, + outbuf, sizeof(outbuf), rc); + return rc; +} + +static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(q))) { + local_bh_disable(); + netif_receive_skb(skb); + local_bh_enable(); + } +} + +static void efx_ptp_handle_no_channel(struct efx_nic *efx) +{ + netif_err(efx, drv, efx->net_dev, + "ERROR: PTP requires MSI-X and 1 additional interrupt" + "vector. PTP disabled\n"); +} + +/* Repeatedly send the host time to the MC which will capture the hardware + * time. + */ +static void efx_ptp_send_times(struct efx_nic *efx, + struct pps_event_time *last_time) +{ + struct pps_event_time now; + struct timespec64 limit; + struct efx_ptp_data *ptp = efx->ptp_data; + int *mc_running = ptp->start.addr; + + pps_get_ts(&now); + limit = now.ts_real; + timespec64_add_ns(&limit, SYNCHRONISE_PERIOD_NS); + + /* Write host time for specified period or until MC is done */ + while ((timespec64_compare(&now.ts_real, &limit) < 0) && + READ_ONCE(*mc_running)) { + struct timespec64 update_time; + unsigned int host_time; + + /* Don't update continuously to avoid saturating the PCIe bus */ + update_time = now.ts_real; + timespec64_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); + do { + pps_get_ts(&now); + } while ((timespec64_compare(&now.ts_real, &update_time) < 0) && + READ_ONCE(*mc_running)); + + /* Synchronise NIC with single word of time only */ + host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | + now.ts_real.tv_nsec); + /* Update host time in NIC memory */ + efx->type->ptp_write_host_time(efx, host_time); + } + *last_time = now; +} + +/* Read a timeset from the MC's results and partial process. */ +static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data), + struct efx_ptp_timeset *timeset) +{ + unsigned start_ns, end_ns; + + timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART); + timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR); + timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR); + timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND), + timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS); + + /* Ignore seconds */ + start_ns = timeset->host_start & MC_NANOSECOND_MASK; + end_ns = timeset->host_end & MC_NANOSECOND_MASK; + /* Allow for rollover */ + if (end_ns < start_ns) + end_ns += NSEC_PER_SEC; + /* Determine duration of operation */ + timeset->window = end_ns - start_ns; +} + +/* Process times received from MC. + * + * Extract times from returned results, and establish the minimum value + * seen. The minimum value represents the "best" possible time and events + * too much greater than this are rejected - the machine is, perhaps, too + * busy. A number of readings are taken so that, hopefully, at least one good + * synchronisation will be seen in the results. + */ +static int +efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), + size_t response_length, + const struct pps_event_time *last_time) +{ + unsigned number_readings = + MCDI_VAR_ARRAY_LEN(response_length, + PTP_OUT_SYNCHRONIZE_TIMESET); + unsigned i; + unsigned ngood = 0; + unsigned last_good = 0; + struct efx_ptp_data *ptp = efx->ptp_data; + u32 last_sec; + u32 start_sec; + struct timespec64 delta; + ktime_t mc_time; + + if (number_readings == 0) + return -EAGAIN; + + /* Read the set of results and find the last good host-MC + * synchronization result. The MC times when it finishes reading the + * host time so the corrected window time should be fairly constant + * for a given platform. Increment stats for any results that appear + * to be erroneous. + */ + for (i = 0; i < number_readings; i++) { + s32 window, corrected; + struct timespec64 wait; + + efx_ptp_read_timeset( + MCDI_ARRAY_STRUCT_PTR(synch_buf, + PTP_OUT_SYNCHRONIZE_TIMESET, i), + &ptp->timeset[i]); + + wait = ktime_to_timespec64( + ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0)); + window = ptp->timeset[i].window; + corrected = window - wait.tv_nsec; + + /* We expect the uncorrected synchronization window to be at + * least as large as the interval between host start and end + * times. If it is smaller than this then this is mostly likely + * to be a consequence of the host's time being adjusted. + * Check that the corrected sync window is in a reasonable + * range. If it is out of range it is likely to be because an + * interrupt or other delay occurred between reading the system + * time and writing it to MC memory. + */ + if (window < SYNCHRONISATION_GRANULARITY_NS) { + ++ptp->invalid_sync_windows; + } else if (corrected >= MAX_SYNCHRONISATION_NS) { + ++ptp->oversize_sync_windows; + } else if (corrected < ptp->min_synchronisation_ns) { + ++ptp->undersize_sync_windows; + } else { + ngood++; + last_good = i; + } + } + + if (ngood == 0) { + netif_warn(efx, drv, efx->net_dev, + "PTP no suitable synchronisations\n"); + return -EAGAIN; + } + + /* Calculate delay from last good sync (host time) to last_time. + * It is possible that the seconds rolled over between taking + * the start reading and the last value written by the host. The + * timescales are such that a gap of more than one second is never + * expected. delta is *not* normalised. + */ + start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS; + last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK; + if (start_sec != last_sec && + ((start_sec + 1) & MC_SECOND_MASK) != last_sec) { + netif_warn(efx, hw, efx->net_dev, + "PTP bad synchronisation seconds\n"); + return -EAGAIN; + } + delta.tv_sec = (last_sec - start_sec) & 1; + delta.tv_nsec = + last_time->ts_real.tv_nsec - + (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK); + + /* Convert the NIC time at last good sync into kernel time. + * No correction is required - this time is the output of a + * firmware process. + */ + mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major, + ptp->timeset[last_good].minor, 0); + + /* Calculate delay from NIC top of second to last_time */ + delta.tv_nsec += ktime_to_timespec64(mc_time).tv_nsec; + + /* Set PPS timestamp to match NIC top of second */ + ptp->host_time_pps = *last_time; + pps_sub_ts(&ptp->host_time_pps, delta); + + return 0; +} + +/* Synchronize times between the host and the MC */ +static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + MCDI_DECLARE_BUF(synch_buf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX); + size_t response_length; + int rc; + unsigned long timeout; + struct pps_event_time last_time = {}; + unsigned int loops = 0; + int *start = ptp->start.addr; + + MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE); + MCDI_SET_DWORD(synch_buf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS, + num_readings); + MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR, + ptp->start.dma_addr); + + /* Clear flag that signals MC ready */ + WRITE_ONCE(*start, 0); + rc = efx_siena_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, + MC_CMD_PTP_IN_SYNCHRONIZE_LEN); + EFX_WARN_ON_ONCE_PARANOID(rc); + + /* Wait for start from MCDI (or timeout) */ + timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); + while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) { + udelay(20); /* Usually start MCDI execution quickly */ + loops++; + } + + if (loops <= 1) + ++ptp->fast_syncs; + if (!time_before(jiffies, timeout)) + ++ptp->sync_timeouts; + + if (READ_ONCE(*start)) + efx_ptp_send_times(efx, &last_time); + + /* Collect results */ + rc = efx_siena_mcdi_rpc_finish(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_SYNCHRONIZE_LEN, + synch_buf, sizeof(synch_buf), + &response_length); + if (rc == 0) { + rc = efx_ptp_process_times(efx, synch_buf, response_length, + &last_time); + if (rc == 0) + ++ptp->good_syncs; + else + ++ptp->no_time_syncs; + } + + /* Increment the bad syncs counter if the synchronize fails, whatever + * the reason. + */ + if (rc != 0) + ++ptp->bad_syncs; + + return rc; +} + +/* Transmit a PTP packet via the dedicated hardware timestamped queue. */ +static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb) +{ + struct efx_ptp_data *ptp_data = efx->ptp_data; + u8 type = efx_tx_csum_type_skb(skb); + struct efx_tx_queue *tx_queue; + + tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type); + if (tx_queue && tx_queue->timestamping) { + efx_enqueue_skb(tx_queue, skb); + } else { + WARN_ONCE(1, "PTP channel has no timestamped tx queue\n"); + dev_kfree_skb_any(skb); + } +} + +/* Transmit a PTP packet, via the MCDI interface, to the wire. */ +static void efx_ptp_xmit_skb_mc(struct efx_nic *efx, struct sk_buff *skb) +{ + struct efx_ptp_data *ptp_data = efx->ptp_data; + struct skb_shared_hwtstamps timestamps; + int rc = -EIO; + MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN); + size_t len; + + MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT); + MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len); + if (skb_shinfo(skb)->nr_frags != 0) { + rc = skb_linearize(skb); + if (rc != 0) + goto fail; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + rc = skb_checksum_help(skb); + if (rc != 0) + goto fail; + } + skb_copy_from_linear_data(skb, + MCDI_PTR(ptp_data->txbuf, + PTP_IN_TRANSMIT_PACKET), + skb->len); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_PTP, ptp_data->txbuf, + MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), txtime, + sizeof(txtime), &len); + if (rc != 0) + goto fail; + + memset(×tamps, 0, sizeof(timestamps)); + timestamps.hwtstamp = ptp_data->nic_to_kernel_time( + MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR), + MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR), + ptp_data->ts_corrections.ptp_tx); + + skb_tstamp_tx(skb, ×tamps); + + rc = 0; + +fail: + dev_kfree_skb_any(skb); + + return; +} + +static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct list_head *cursor; + struct list_head *next; + + if (ptp->rx_ts_inline) + return; + + /* Drop time-expired events */ + spin_lock_bh(&ptp->evt_lock); + list_for_each_safe(cursor, next, &ptp->evt_list) { + struct efx_ptp_event_rx *evt; + + evt = list_entry(cursor, struct efx_ptp_event_rx, + link); + if (time_after(jiffies, evt->expiry)) { + list_move(&evt->link, &ptp->evt_free_list); + netif_warn(efx, hw, efx->net_dev, + "PTP rx event dropped\n"); + } + } + spin_unlock_bh(&ptp->evt_lock); +} + +static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, + struct sk_buff *skb) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + bool evts_waiting; + struct list_head *cursor; + struct list_head *next; + struct efx_ptp_match *match; + enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED; + + WARN_ON_ONCE(ptp->rx_ts_inline); + + spin_lock_bh(&ptp->evt_lock); + evts_waiting = !list_empty(&ptp->evt_list); + spin_unlock_bh(&ptp->evt_lock); + + if (!evts_waiting) + return PTP_PACKET_STATE_UNMATCHED; + + match = (struct efx_ptp_match *)skb->cb; + /* Look for a matching timestamp in the event queue */ + spin_lock_bh(&ptp->evt_lock); + list_for_each_safe(cursor, next, &ptp->evt_list) { + struct efx_ptp_event_rx *evt; + + evt = list_entry(cursor, struct efx_ptp_event_rx, link); + if ((evt->seq0 == match->words[0]) && + (evt->seq1 == match->words[1])) { + struct skb_shared_hwtstamps *timestamps; + + /* Match - add in hardware timestamp */ + timestamps = skb_hwtstamps(skb); + timestamps->hwtstamp = evt->hwtimestamp; + + match->state = PTP_PACKET_STATE_MATCHED; + rc = PTP_PACKET_STATE_MATCHED; + list_move(&evt->link, &ptp->evt_free_list); + break; + } + } + spin_unlock_bh(&ptp->evt_lock); + + return rc; +} + +/* Process any queued receive events and corresponding packets + * + * q is returned with all the packets that are ready for delivery. + */ +static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct sk_buff *skb; + + while ((skb = skb_dequeue(&ptp->rxq))) { + struct efx_ptp_match *match; + + match = (struct efx_ptp_match *)skb->cb; + if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) { + __skb_queue_tail(q, skb); + } else if (efx_ptp_match_rx(efx, skb) == + PTP_PACKET_STATE_MATCHED) { + __skb_queue_tail(q, skb); + } else if (time_after(jiffies, match->expiry)) { + match->state = PTP_PACKET_STATE_TIMED_OUT; + ++ptp->rx_no_timestamp; + __skb_queue_tail(q, skb); + } else { + /* Replace unprocessed entry and stop */ + skb_queue_head(&ptp->rxq, skb); + break; + } + } +} + +/* Complete processing of a received packet */ +static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb) +{ + local_bh_disable(); + netif_receive_skb(skb); + local_bh_enable(); +} + +static void efx_ptp_remove_multicast_filters(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + if (ptp->rxfilter_installed) { + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_general); + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_event); + ptp->rxfilter_installed = false; + } +} + +static int efx_ptp_insert_multicast_filters(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_filter_spec rxfilter; + int rc; + + if (!ptp->channel || ptp->rxfilter_installed) + return 0; + + /* Must filter on both event and general ports to ensure + * that there is no packet re-ordering. + */ + efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0, + efx_rx_queue_index( + efx_channel_get_rx_queue(ptp->channel))); + rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP, + htonl(PTP_ADDRESS), + htons(PTP_EVENT_PORT)); + if (rc != 0) + return rc; + + rc = efx_filter_insert_filter(efx, &rxfilter, true); + if (rc < 0) + return rc; + ptp->rxfilter_event = rc; + + efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0, + efx_rx_queue_index( + efx_channel_get_rx_queue(ptp->channel))); + rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP, + htonl(PTP_ADDRESS), + htons(PTP_GENERAL_PORT)); + if (rc != 0) + goto fail; + + rc = efx_filter_insert_filter(efx, &rxfilter, true); + if (rc < 0) + goto fail; + ptp->rxfilter_general = rc; + + ptp->rxfilter_installed = true; + return 0; + +fail: + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_event); + return rc; +} + +static int efx_ptp_start(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + int rc; + + ptp->reset_required = false; + + rc = efx_ptp_insert_multicast_filters(efx); + if (rc) + return rc; + + rc = efx_ptp_enable(efx); + if (rc != 0) + goto fail; + + ptp->evt_frag_idx = 0; + ptp->current_adjfreq = 0; + + return 0; + +fail: + efx_ptp_remove_multicast_filters(efx); + return rc; +} + +static int efx_ptp_stop(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct list_head *cursor; + struct list_head *next; + int rc; + + if (ptp == NULL) + return 0; + + rc = efx_ptp_disable(efx); + + efx_ptp_remove_multicast_filters(efx); + + /* Make sure RX packets are really delivered */ + efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq); + skb_queue_purge(&efx->ptp_data->txq); + + /* Drop any pending receive events */ + spin_lock_bh(&efx->ptp_data->evt_lock); + list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { + list_move(cursor, &efx->ptp_data->evt_free_list); + } + spin_unlock_bh(&efx->ptp_data->evt_lock); + + return rc; +} + +static int efx_ptp_restart(struct efx_nic *efx) +{ + if (efx->ptp_data && efx->ptp_data->enabled) + return efx_ptp_start(efx); + return 0; +} + +static void efx_ptp_pps_worker(struct work_struct *work) +{ + struct efx_ptp_data *ptp = + container_of(work, struct efx_ptp_data, pps_work); + struct efx_nic *efx = ptp->efx; + struct ptp_clock_event ptp_evt; + + if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS)) + return; + + ptp_evt.type = PTP_CLOCK_PPSUSR; + ptp_evt.pps_times = ptp->host_time_pps; + ptp_clock_event(ptp->phc_clock, &ptp_evt); +} + +static void efx_ptp_worker(struct work_struct *work) +{ + struct efx_ptp_data *ptp_data = + container_of(work, struct efx_ptp_data, work); + struct efx_nic *efx = ptp_data->efx; + struct sk_buff *skb; + struct sk_buff_head tempq; + + if (ptp_data->reset_required) { + efx_ptp_stop(efx); + efx_ptp_start(efx); + return; + } + + efx_ptp_drop_time_expired_events(efx); + + __skb_queue_head_init(&tempq); + efx_ptp_process_events(efx, &tempq); + + while ((skb = skb_dequeue(&ptp_data->txq))) + ptp_data->xmit_skb(efx, skb); + + while ((skb = __skb_dequeue(&tempq))) + efx_ptp_process_rx(efx, skb); +} + +static const struct ptp_clock_info efx_phc_clock_info = { + .owner = THIS_MODULE, + .name = "sfc_siena", + .max_adj = MAX_PPB, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .n_pins = 0, + .pps = 1, + .adjfreq = efx_phc_adjfreq, + .adjtime = efx_phc_adjtime, + .gettime64 = efx_phc_gettime, + .settime64 = efx_phc_settime, + .enable = efx_phc_enable, +}; + +/* Initialise PTP state. */ +static int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) +{ + struct efx_ptp_data *ptp; + int rc = 0; + unsigned int pos; + + ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL); + efx->ptp_data = ptp; + if (!efx->ptp_data) + return -ENOMEM; + + ptp->efx = efx; + ptp->channel = channel; + ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; + + rc = efx_siena_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL); + if (rc != 0) + goto fail1; + + skb_queue_head_init(&ptp->rxq); + skb_queue_head_init(&ptp->txq); + ptp->workwq = create_singlethread_workqueue("sfc_siena_ptp"); + if (!ptp->workwq) { + rc = -ENOMEM; + goto fail2; + } + + if (efx_siena_ptp_use_mac_tx_timestamps(efx)) { + ptp->xmit_skb = efx_ptp_xmit_skb_queue; + /* Request sync events on this channel. */ + channel->sync_events_state = SYNC_EVENTS_QUIESCENT; + } else { + ptp->xmit_skb = efx_ptp_xmit_skb_mc; + } + + INIT_WORK(&ptp->work, efx_ptp_worker); + ptp->config.flags = 0; + ptp->config.tx_type = HWTSTAMP_TX_OFF; + ptp->config.rx_filter = HWTSTAMP_FILTER_NONE; + INIT_LIST_HEAD(&ptp->evt_list); + INIT_LIST_HEAD(&ptp->evt_free_list); + spin_lock_init(&ptp->evt_lock); + for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) + list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); + + /* Get the NIC PTP attributes and set up time conversions */ + rc = efx_ptp_get_attributes(efx); + if (rc < 0) + goto fail3; + + /* Get the timestamp corrections */ + rc = efx_ptp_get_timestamp_corrections(efx); + if (rc < 0) + goto fail3; + + if (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) { + ptp->phc_clock_info = efx_phc_clock_info; + ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info, + &efx->pci_dev->dev); + if (IS_ERR(ptp->phc_clock)) { + rc = PTR_ERR(ptp->phc_clock); + goto fail3; + } else if (ptp->phc_clock) { + INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker); + ptp->pps_workwq = + create_singlethread_workqueue("sfc_siena_pps"); + if (!ptp->pps_workwq) { + rc = -ENOMEM; + goto fail4; + } + } + } + ptp->nic_ts_enabled = false; + + return 0; +fail4: + ptp_clock_unregister(efx->ptp_data->phc_clock); + +fail3: + destroy_workqueue(efx->ptp_data->workwq); + +fail2: + efx_siena_free_buffer(efx, &ptp->start); + +fail1: + kfree(efx->ptp_data); + efx->ptp_data = NULL; + + return rc; +} + +/* Initialise PTP channel. + * + * Setting core_index to zero causes the queue to be initialised and doesn't + * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue. + */ +static int efx_ptp_probe_channel(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + int rc; + + channel->irq_moderation_us = 0; + channel->rx_queue.core_index = 0; + + rc = efx_ptp_probe(efx, channel); + /* Failure to probe PTP is not fatal; this channel will just not be + * used for anything. + * In the case of EPERM, efx_ptp_probe will print its own message (in + * efx_ptp_get_attributes()), so we don't need to. + */ + if (rc && rc != -EPERM) + netif_warn(efx, drv, efx->net_dev, + "Failed to probe PTP, rc=%d\n", rc); + return 0; +} + +static void efx_ptp_remove(struct efx_nic *efx) +{ + if (!efx->ptp_data) + return; + + (void)efx_ptp_disable(efx); + + cancel_work_sync(&efx->ptp_data->work); + if (efx->ptp_data->pps_workwq) + cancel_work_sync(&efx->ptp_data->pps_work); + + skb_queue_purge(&efx->ptp_data->rxq); + skb_queue_purge(&efx->ptp_data->txq); + + if (efx->ptp_data->phc_clock) { + destroy_workqueue(efx->ptp_data->pps_workwq); + ptp_clock_unregister(efx->ptp_data->phc_clock); + } + + destroy_workqueue(efx->ptp_data->workwq); + + efx_siena_free_buffer(efx, &efx->ptp_data->start); + kfree(efx->ptp_data); + efx->ptp_data = NULL; +} + +static void efx_ptp_remove_channel(struct efx_channel *channel) +{ + efx_ptp_remove(channel->efx); +} + +static void efx_ptp_get_channel_name(struct efx_channel *channel, + char *buf, size_t len) +{ + snprintf(buf, len, "%s-ptp", channel->efx->name); +} + +/* Determine whether this packet should be processed by the PTP module + * or transmitted conventionally. + */ +bool efx_siena_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +{ + return efx->ptp_data && + efx->ptp_data->enabled && + skb->len >= PTP_MIN_LENGTH && + skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && + likely(skb->protocol == htons(ETH_P_IP)) && + skb_transport_header_was_set(skb) && + skb_network_header_len(skb) >= sizeof(struct iphdr) && + ip_hdr(skb)->protocol == IPPROTO_UDP && + skb_headlen(skb) >= + skb_transport_offset(skb) + sizeof(struct udphdr) && + udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); +} + +/* Receive a PTP packet. Packets are queued until the arrival of + * the receive timestamp from the MC - this will probably occur after the + * packet arrival because of the processing in the MC. + */ +static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) +{ + struct efx_nic *efx = channel->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; + u8 *match_data_012, *match_data_345; + unsigned int version; + u8 *data; + + match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); + + /* Correct version? */ + if (ptp->mode == MC_CMD_PTP_MODE_V1) { + if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) { + return false; + } + data = skb->data; + version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]); + if (version != PTP_VERSION_V1) { + return false; + } + + /* PTP V1 uses all six bytes of the UUID to match the packet + * to the timestamp + */ + match_data_012 = data + PTP_V1_UUID_OFFSET; + match_data_345 = data + PTP_V1_UUID_OFFSET + 3; + } else { + if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) { + return false; + } + data = skb->data; + version = data[PTP_V2_VERSION_OFFSET]; + if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { + return false; + } + + /* The original V2 implementation uses bytes 2-7 of + * the UUID to match the packet to the timestamp. This + * discards two of the bytes of the MAC address used + * to create the UUID (SF bug 33070). The PTP V2 + * enhanced mode fixes this issue and uses bytes 0-2 + * and byte 5-7 of the UUID. + */ + match_data_345 = data + PTP_V2_UUID_OFFSET + 5; + if (ptp->mode == MC_CMD_PTP_MODE_V2) { + match_data_012 = data + PTP_V2_UUID_OFFSET + 2; + } else { + match_data_012 = data + PTP_V2_UUID_OFFSET + 0; + BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED); + } + } + + /* Does this packet require timestamping? */ + if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) { + match->state = PTP_PACKET_STATE_UNMATCHED; + + /* We expect the sequence number to be in the same position in + * the packet for PTP V1 and V2 + */ + BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET); + BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH); + + /* Extract UUID/Sequence information */ + match->words[0] = (match_data_012[0] | + (match_data_012[1] << 8) | + (match_data_012[2] << 16) | + (match_data_345[0] << 24)); + match->words[1] = (match_data_345[1] | + (match_data_345[2] << 8) | + (data[PTP_V1_SEQUENCE_OFFSET + + PTP_V1_SEQUENCE_LENGTH - 1] << + 16)); + } else { + match->state = PTP_PACKET_STATE_MATCH_UNWANTED; + } + + skb_queue_tail(&ptp->rxq, skb); + queue_work(ptp->workwq, &ptp->work); + + return true; +} + +/* Transmit a PTP packet. This has to be transmitted by the MC + * itself, through an MCDI call. MCDI calls aren't permitted + * in the transmit path so defer the actual transmission to a suitable worker. + */ +int efx_siena_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + skb_queue_tail(&ptp->txq, skb); + + if ((udp_hdr(skb)->dest == htons(PTP_EVENT_PORT)) && + (skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM)) + efx_xmit_hwtstamp_pending(skb); + queue_work(ptp->workwq, &ptp->work); + + return NETDEV_TX_OK; +} + +int efx_siena_ptp_get_mode(struct efx_nic *efx) +{ + return efx->ptp_data->mode; +} + +int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, + unsigned int new_mode) +{ + if ((enable_wanted != efx->ptp_data->enabled) || + (enable_wanted && (efx->ptp_data->mode != new_mode))) { + int rc = 0; + + if (enable_wanted) { + /* Change of mode requires disable */ + if (efx->ptp_data->enabled && + (efx->ptp_data->mode != new_mode)) { + efx->ptp_data->enabled = false; + rc = efx_ptp_stop(efx); + if (rc != 0) + return rc; + } + + /* Set new operating mode and establish + * baseline synchronisation, which must + * succeed. + */ + efx->ptp_data->mode = new_mode; + if (netif_running(efx->net_dev)) + rc = efx_ptp_start(efx); + if (rc == 0) { + rc = efx_ptp_synchronize(efx, + PTP_SYNC_ATTEMPTS * 2); + if (rc != 0) + efx_ptp_stop(efx); + } + } else { + rc = efx_ptp_stop(efx); + } + + if (rc != 0) + return rc; + + efx->ptp_data->enabled = enable_wanted; + } + + return 0; +} + +static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) +{ + int rc; + + if ((init->tx_type != HWTSTAMP_TX_OFF) && + (init->tx_type != HWTSTAMP_TX_ON)) + return -ERANGE; + + rc = efx->type->ptp_set_ts_config(efx, init); + if (rc) + return rc; + + efx->ptp_data->config = *init; + return 0; +} + +void efx_siena_ptp_get_ts_info(struct efx_nic *efx, + struct ethtool_ts_info *ts_info) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_nic *primary = efx->primary; + + ASSERT_RTNL(); + + if (!ptp) + return; + + ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE); + if (primary && primary->ptp_data && primary->ptp_data->phc_clock) + ts_info->phc_index = + ptp_clock_index(primary->ptp_data->phc_clock); + ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON; + ts_info->rx_filters = ptp->efx->type->hwtstamp_filters; +} + +int efx_siena_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int rc; + + /* Not a PTP enabled port */ + if (!efx->ptp_data) + return -EOPNOTSUPP; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + rc = efx_ptp_ts_init(efx, &config); + if (rc != 0) + return rc; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) + ? -EFAULT : 0; +} + +int efx_siena_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr) +{ + if (!efx->ptp_data) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, &efx->ptp_data->config, + sizeof(efx->ptp_data->config)) ? -EFAULT : 0; +} + +static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + netif_err(efx, hw, efx->net_dev, + "PTP unexpected event length: got %d expected %d\n", + ptp->evt_frag_idx, expected_frag_len); + ptp->reset_required = true; + queue_work(ptp->workwq, &ptp->work); +} + +/* Process a completed receive event. Put it on the event queue and + * start worker thread. This is required because event and their + * correspoding packets may come in either order. + */ +static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) +{ + struct efx_ptp_event_rx *evt = NULL; + + if (WARN_ON_ONCE(ptp->rx_ts_inline)) + return; + + if (ptp->evt_frag_idx != 3) { + ptp_event_failure(efx, 3); + return; + } + + spin_lock_bh(&ptp->evt_lock); + if (!list_empty(&ptp->evt_free_list)) { + evt = list_first_entry(&ptp->evt_free_list, + struct efx_ptp_event_rx, link); + list_del(&evt->link); + + evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA); + evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2], + MCDI_EVENT_SRC) | + (EFX_QWORD_FIELD(ptp->evt_frags[1], + MCDI_EVENT_SRC) << 8) | + (EFX_QWORD_FIELD(ptp->evt_frags[0], + MCDI_EVENT_SRC) << 16)); + evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time( + EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA), + EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA), + ptp->ts_corrections.ptp_rx); + evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); + list_add_tail(&evt->link, &ptp->evt_list); + + queue_work(ptp->workwq, &ptp->work); + } else if (net_ratelimit()) { + /* Log a rate-limited warning message. */ + netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n"); + } + spin_unlock_bh(&ptp->evt_lock); +} + +static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp) +{ + int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA); + if (ptp->evt_frag_idx != 1) { + ptp_event_failure(efx, 1); + return; + } + + netif_err(efx, hw, efx->net_dev, "PTP error %d\n", code); +} + +static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp) +{ + if (ptp->nic_ts_enabled) + queue_work(ptp->pps_workwq, &ptp->pps_work); +} + +void efx_siena_ptp_event(struct efx_nic *efx, efx_qword_t *ev) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); + + if (!ptp) { + if (!efx->ptp_warned) { + netif_warn(efx, drv, efx->net_dev, + "Received PTP event but PTP not set up\n"); + efx->ptp_warned = true; + } + return; + } + + if (!ptp->enabled) + return; + + if (ptp->evt_frag_idx == 0) { + ptp->evt_code = code; + } else if (ptp->evt_code != code) { + netif_err(efx, hw, efx->net_dev, + "PTP out of sequence event %d\n", code); + ptp->evt_frag_idx = 0; + } + + ptp->evt_frags[ptp->evt_frag_idx++] = *ev; + if (!MCDI_EVENT_FIELD(*ev, CONT)) { + /* Process resulting event */ + switch (code) { + case MCDI_EVENT_CODE_PTP_RX: + ptp_event_rx(efx, ptp); + break; + case MCDI_EVENT_CODE_PTP_FAULT: + ptp_event_fault(efx, ptp); + break; + case MCDI_EVENT_CODE_PTP_PPS: + ptp_event_pps(efx, ptp); + break; + default: + netif_err(efx, hw, efx->net_dev, + "PTP unknown event %d\n", code); + break; + } + ptp->evt_frag_idx = 0; + } else if (MAX_EVENT_FRAGS == ptp->evt_frag_idx) { + netif_err(efx, hw, efx->net_dev, + "PTP too many event fragments\n"); + ptp->evt_frag_idx = 0; + } +} + +void efx_siena_time_sync_event(struct efx_channel *channel, efx_qword_t *ev) +{ + struct efx_nic *efx = channel->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + + /* When extracting the sync timestamp minor value, we should discard + * the least significant two bits. These are not required in order + * to reconstruct full-range timestamps and they are optionally used + * to report status depending on the options supplied when subscribing + * for sync events. + */ + channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR); + channel->sync_timestamp_minor = + (MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_MS_8BITS) & 0xFC) + << ptp->nic_time.sync_event_minor_shift; + + /* if sync events have been disabled then we want to silently ignore + * this event, so throw away result. + */ + (void) cmpxchg(&channel->sync_events_state, SYNC_EVENTS_REQUESTED, + SYNC_EVENTS_VALID); +} + +static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_ts_offset)); +#else + const u8 *data = eh + efx->rx_packet_ts_offset; + return (u32)data[0] | + (u32)data[1] << 8 | + (u32)data[2] << 16 | + (u32)data[3] << 24; +#endif +} + +void __efx_siena_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb) +{ + struct efx_nic *efx = channel->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + u32 pkt_timestamp_major, pkt_timestamp_minor; + u32 diff, carry; + struct skb_shared_hwtstamps *timestamps; + + if (channel->sync_events_state != SYNC_EVENTS_VALID) + return; + + pkt_timestamp_minor = efx_rx_buf_timestamp_minor(efx, skb_mac_header(skb)); + + /* get the difference between the packet and sync timestamps, + * modulo one second + */ + diff = pkt_timestamp_minor - channel->sync_timestamp_minor; + if (pkt_timestamp_minor < channel->sync_timestamp_minor) + diff += ptp->nic_time.minor_max; + + /* do we roll over a second boundary and need to carry the one? */ + carry = (channel->sync_timestamp_minor >= ptp->nic_time.minor_max - diff) ? + 1 : 0; + + if (diff <= ptp->nic_time.sync_event_diff_max) { + /* packet is ahead of the sync event by a quarter of a second or + * less (allowing for fuzz) + */ + pkt_timestamp_major = channel->sync_timestamp_major + carry; + } else if (diff >= ptp->nic_time.sync_event_diff_min) { + /* packet is behind the sync event but within the fuzz factor. + * This means the RX packet and sync event crossed as they were + * placed on the event queue, which can sometimes happen. + */ + pkt_timestamp_major = channel->sync_timestamp_major - 1 + carry; + } else { + /* it's outside tolerance in both directions. this might be + * indicative of us missing sync events for some reason, so + * we'll call it an error rather than risk giving a bogus + * timestamp. + */ + netif_vdbg(efx, drv, efx->net_dev, + "packet timestamp %x too far from sync event %x:%x\n", + pkt_timestamp_minor, channel->sync_timestamp_major, + channel->sync_timestamp_minor); + return; + } + + /* attach the timestamps to the skb */ + timestamps = skb_hwtstamps(skb); + timestamps->hwtstamp = + ptp->nic_to_kernel_time(pkt_timestamp_major, + pkt_timestamp_minor, + ptp->ts_corrections.general_rx); +} + +static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + struct efx_nic *efx = ptp_data->efx; + MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN); + s64 adjustment_ns; + int rc; + + if (delta > MAX_PPB) + delta = MAX_PPB; + else if (delta < -MAX_PPB) + delta = -MAX_PPB; + + /* Convert ppb to fixed point ns taking care to round correctly. */ + adjustment_ns = ((s64)delta * PPB_SCALE_WORD + + (1 << (ptp_data->adjfreq_ppb_shift - 1))) >> + ptp_data->adjfreq_ppb_shift; + + MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); + MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0); + MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns); + MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0); + MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj), + NULL, 0, NULL); + if (rc != 0) + return rc; + + ptp_data->current_adjfreq = adjustment_ns; + return 0; +} + +static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + u32 nic_major, nic_minor; + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + struct efx_nic *efx = ptp_data->efx; + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN); + + efx->ptp_data->ns_to_nic_time(delta, &nic_major, &nic_minor); + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq); + MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MAJOR, nic_major); + MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MINOR, nic_minor); + return efx_siena_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + struct efx_nic *efx = ptp_data->efx; + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN); + int rc; + ktime_t kt; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc != 0) + return rc; + + kt = ptp_data->nic_to_kernel_time( + MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR), + MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0); + *ts = ktime_to_timespec64(kt); + return 0; +} + +static int efx_phc_settime(struct ptp_clock_info *ptp, + const struct timespec64 *e_ts) +{ + /* Get the current NIC time, efx_phc_gettime. + * Subtract from the desired time to get the offset + * call efx_phc_adjtime with the offset + */ + int rc; + struct timespec64 time_now; + struct timespec64 delta; + + rc = efx_phc_gettime(ptp, &time_now); + if (rc != 0) + return rc; + + delta = timespec64_sub(*e_ts, time_now); + + rc = efx_phc_adjtime(ptp, timespec64_to_ns(&delta)); + if (rc != 0) + return rc; + + return 0; +} + +static int efx_phc_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, + int enable) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + if (request->type != PTP_CLK_REQ_PPS) + return -EOPNOTSUPP; + + ptp_data->nic_ts_enabled = !!enable; + return 0; +} + +static const struct efx_channel_type efx_ptp_channel_type = { + .handle_no_channel = efx_ptp_handle_no_channel, + .pre_probe = efx_ptp_probe_channel, + .post_remove = efx_ptp_remove_channel, + .get_name = efx_ptp_get_channel_name, + /* no copy operation; there is no need to reallocate this channel */ + .receive_skb = efx_ptp_rx, + .want_txqs = efx_ptp_want_txqs, + .keep_eventq = false, +}; + +void efx_siena_ptp_defer_probe_with_channel(struct efx_nic *efx) +{ + /* Check whether PTP is implemented on this NIC. The DISABLE + * operation will succeed if and only if it is implemented. + */ + if (efx_ptp_disable(efx) == 0) + efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = + &efx_ptp_channel_type; +} + +void efx_siena_ptp_start_datapath(struct efx_nic *efx) +{ + if (efx_ptp_restart(efx)) + netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n"); + /* re-enable timestamping if it was previously enabled */ + if (efx->type->ptp_set_ts_sync_events) + efx->type->ptp_set_ts_sync_events(efx, true, true); +} + +void efx_siena_ptp_stop_datapath(struct efx_nic *efx) +{ + /* temporarily disable timestamping */ + if (efx->type->ptp_set_ts_sync_events) + efx->type->ptp_set_ts_sync_events(efx, false, true); + efx_ptp_stop(efx); +} diff --git a/drivers/net/ethernet/sfc/siena/ptp.h b/drivers/net/ethernet/sfc/siena/ptp.h new file mode 100644 index 0000000000..4172f90e9f --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/ptp.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + */ + +#ifndef EFX_PTP_H +#define EFX_PTP_H + +#include +#include "net_driver.h" + +struct ethtool_ts_info; +void efx_siena_ptp_defer_probe_with_channel(struct efx_nic *efx); +struct efx_channel *efx_siena_ptp_channel(struct efx_nic *efx); +int efx_siena_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr); +int efx_siena_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr); +void efx_siena_ptp_get_ts_info(struct efx_nic *efx, + struct ethtool_ts_info *ts_info); +bool efx_siena_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +int efx_siena_ptp_get_mode(struct efx_nic *efx); +int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, + unsigned int new_mode); +int efx_siena_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +void efx_siena_ptp_event(struct efx_nic *efx, efx_qword_t *ev); +size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings); +size_t efx_siena_ptp_update_stats(struct efx_nic *efx, u64 *stats); +void efx_siena_time_sync_event(struct efx_channel *channel, efx_qword_t *ev); +void __efx_siena_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb); +static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb) +{ + if (channel->sync_events_state == SYNC_EVENTS_VALID) + __efx_siena_rx_skb_attach_timestamp(channel, skb); +} + +void efx_siena_ptp_start_datapath(struct efx_nic *efx); +void efx_siena_ptp_stop_datapath(struct efx_nic *efx); +bool efx_siena_ptp_use_mac_tx_timestamps(struct efx_nic *efx); +ktime_t efx_siena_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue); + +#endif /* EFX_PTP_H */ diff --git a/drivers/net/ethernet/sfc/siena/rx.c b/drivers/net/ethernet/sfc/siena/rx.c new file mode 100644 index 0000000000..98d3c0743c --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/rx.c @@ -0,0 +1,400 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "rx_common.h" +#include "filter.h" +#include "nic.h" +#include "selftest.h" +#include "workarounds.h" + +/* Preferred number of descriptors to fill at once */ +#define EFX_RX_PREFERRED_BATCH 8U + +/* Maximum rx prefix used by any architecture. */ +#define EFX_MAX_RX_PREFIX_SIZE 16 + +/* Size of buffer allocated for skb header area. */ +#define EFX_SKB_HEADERS 128u + +/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ +#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \ + EFX_RX_USR_BUF_SIZE) + +static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + int len) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; + + if (likely(len <= max_len)) + return; + + /* The packet must be discarded, but this is only a fatal error + * if the caller indicated it was + */ + rx_buf->flags |= EFX_RX_PKT_DISCARD; + + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + "RX queue %d overlength RX event (%#x > %#x)\n", + efx_rx_queue_index(rx_queue), len, max_len); + + efx_rx_queue_channel(rx_queue)->n_rx_overlength++; +} + +/* Allocate and construct an SKB around page fragments */ +static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags, + u8 *eh, int hdr_len) +{ + struct efx_nic *efx = channel->efx; + struct sk_buff *skb; + + /* Allocate an SKB to store the headers */ + skb = netdev_alloc_skb(efx->net_dev, + efx->rx_ip_align + efx->rx_prefix_size + + hdr_len); + if (unlikely(skb == NULL)) { + atomic_inc(&efx->n_rx_noskb_drops); + return NULL; + } + + EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len); + + memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size, + efx->rx_prefix_size + hdr_len); + skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size); + __skb_put(skb, hdr_len); + + /* Append the remaining page(s) onto the frag list */ + if (rx_buf->len > hdr_len) { + rx_buf->page_offset += hdr_len; + rx_buf->len -= hdr_len; + + for (;;) { + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rx_buf->page, rx_buf->page_offset, + rx_buf->len, efx->rx_buffer_truesize); + rx_buf->page = NULL; + + if (skb_shinfo(skb)->nr_frags == n_frags) + break; + + rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); + } + } else { + __free_pages(rx_buf->page, efx->rx_buffer_order); + rx_buf->page = NULL; + n_frags = 0; + } + + /* Move past the ethernet header */ + skb->protocol = eth_type_trans(skb, efx->net_dev); + + skb_mark_napi_id(skb, &channel->napi_str); + + return skb; +} + +void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, + unsigned int n_frags, unsigned int len, u16 flags) +{ + struct efx_nic *efx = rx_queue->efx; + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + struct efx_rx_buffer *rx_buf; + + rx_queue->rx_packets++; + + rx_buf = efx_rx_buffer(rx_queue, index); + rx_buf->flags |= flags; + + /* Validate the number of fragments and completed length */ + if (n_frags == 1) { + if (!(flags & EFX_RX_PKT_PREFIX_LEN)) + efx_rx_packet__check_len(rx_queue, rx_buf, len); + } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || + unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || + unlikely(len > n_frags * efx->rx_dma_len) || + unlikely(!efx->rx_scatter)) { + /* If this isn't an explicit discard request, either + * the hardware or the driver is broken. + */ + WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD)); + rx_buf->flags |= EFX_RX_PKT_DISCARD; + } + + netif_vdbg(efx, rx_status, efx->net_dev, + "RX queue %d received ids %x-%x len %d %s%s\n", + efx_rx_queue_index(rx_queue), index, + (index + n_frags - 1) & rx_queue->ptr_mask, len, + (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", + (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); + + /* Discard packet, if instructed to do so. Process the + * previous receive first. + */ + if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { + efx_rx_flush_packet(channel); + efx_siena_discard_rx_packet(channel, rx_buf, n_frags); + return; + } + + if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN)) + rx_buf->len = len; + + /* Release and/or sync the DMA mapping - assumes all RX buffers + * consumed in-order per RX queue. + */ + efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); + + /* Prefetch nice and early so data will (hopefully) be in cache by + * the time we look at it. + */ + prefetch(efx_rx_buf_va(rx_buf)); + + rx_buf->page_offset += efx->rx_prefix_size; + rx_buf->len -= efx->rx_prefix_size; + + if (n_frags > 1) { + /* Release/sync DMA mapping for additional fragments. + * Fix length for last fragment. + */ + unsigned int tail_frags = n_frags - 1; + + for (;;) { + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + if (--tail_frags == 0) + break; + efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); + } + rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; + efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); + } + + /* All fragments have been DMA-synced, so recycle pages. */ + rx_buf = efx_rx_buffer(rx_queue, index); + efx_siena_recycle_rx_pages(channel, rx_buf, n_frags); + + /* Pipeline receives so that we give time for packet headers to be + * prefetched into cache. + */ + efx_rx_flush_packet(channel); + channel->rx_pkt_n_frags = n_frags; + channel->rx_pkt_index = index; +} + +static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct sk_buff *skb; + u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS); + + skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); + if (unlikely(skb == NULL)) { + struct efx_rx_queue *rx_queue; + + rx_queue = efx_channel_get_rx_queue(channel); + efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags); + return; + } + skb_record_rx_queue(skb, channel->rx_queue.core_index); + + /* Set the SKB flags */ + skb_checksum_none_assert(skb); + if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); + } + + efx_rx_skb_attach_timestamp(channel, skb); + + if (channel->type->receive_skb) + if (channel->type->receive_skb(channel, skb)) + return; + + /* Pass the packet up */ + if (channel->rx_list != NULL) + /* Add to list, will pass up later */ + list_add_tail(&skb->list, channel->rx_list); + else + /* No list, so pass it up now */ + netif_receive_skb(skb); +} + +/** efx_do_xdp: perform XDP processing on a received packet + * + * Returns true if packet should still be delivered. + */ +static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, u8 **ehp) +{ + u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE]; + struct efx_rx_queue *rx_queue; + struct bpf_prog *xdp_prog; + struct xdp_frame *xdpf; + struct xdp_buff xdp; + u32 xdp_act; + s16 offset; + int err; + + xdp_prog = rcu_dereference_bh(efx->xdp_prog); + if (!xdp_prog) + return true; + + rx_queue = efx_channel_get_rx_queue(channel); + + if (unlikely(channel->rx_pkt_n_frags > 1)) { + /* We can't do XDP on fragmented packets - drop. */ + efx_siena_free_rx_buffers(rx_queue, rx_buf, + channel->rx_pkt_n_frags); + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + "XDP is not possible with multiple receive fragments (%d)\n", + channel->rx_pkt_n_frags); + channel->n_rx_xdp_bad_drops++; + return false; + } + + dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, + rx_buf->len, DMA_FROM_DEVICE); + + /* Save the rx prefix. */ + EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE); + memcpy(rx_prefix, *ehp - efx->rx_prefix_size, + efx->rx_prefix_size); + + xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info); + /* No support yet for XDP metadata */ + xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM, + rx_buf->len, false); + + xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); + + offset = (u8 *)xdp.data - *ehp; + + switch (xdp_act) { + case XDP_PASS: + /* Fix up rx prefix. */ + if (offset) { + *ehp += offset; + rx_buf->page_offset += offset; + rx_buf->len -= offset; + memcpy(*ehp - efx->rx_prefix_size, rx_prefix, + efx->rx_prefix_size); + } + break; + + case XDP_TX: + /* Buffer ownership passes to tx on success. */ + xdpf = xdp_convert_buff_to_frame(&xdp); + err = efx_siena_xdp_tx_buffers(efx, 1, &xdpf, true); + if (unlikely(err != 1)) { + efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + "XDP TX failed (%d)\n", err); + channel->n_rx_xdp_bad_drops++; + trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); + } else { + channel->n_rx_xdp_tx++; + } + break; + + case XDP_REDIRECT: + err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog); + if (unlikely(err)) { + efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + "XDP redirect failed (%d)\n", err); + channel->n_rx_xdp_bad_drops++; + trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); + } else { + channel->n_rx_xdp_redirect++; + } + break; + + default: + bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act); + efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); + channel->n_rx_xdp_bad_drops++; + trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); + break; + + case XDP_ABORTED: + trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); + fallthrough; + case XDP_DROP: + efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); + channel->n_rx_xdp_drops++; + break; + } + + return xdp_act == XDP_PASS; +} + +/* Handle a received packet. Second half: Touches packet payload. */ +void __efx_siena_rx_packet(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + struct efx_rx_buffer *rx_buf = + efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); + u8 *eh = efx_rx_buf_va(rx_buf); + + /* Read length from the prefix if necessary. This already + * excludes the length of the prefix itself. + */ + if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) + rx_buf->len = le16_to_cpup((__le16 *) + (eh + efx->rx_packet_len_offset)); + + /* If we're in loopback test, then pass the packet directly to the + * loopback layer, and free the rx_buf here + */ + if (unlikely(efx->loopback_selftest)) { + struct efx_rx_queue *rx_queue; + + efx_siena_loopback_rx_packet(efx, eh, rx_buf->len); + rx_queue = efx_channel_get_rx_queue(channel); + efx_siena_free_rx_buffers(rx_queue, rx_buf, + channel->rx_pkt_n_frags); + goto out; + } + + if (!efx_do_xdp(efx, channel, rx_buf, &eh)) + goto out; + + if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) + rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; + + if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb) + efx_siena_rx_packet_gro(channel, rx_buf, + channel->rx_pkt_n_frags, eh, 0); + else + efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); +out: + channel->rx_pkt_n_frags = 0; +} diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c new file mode 100644 index 0000000000..4579f43484 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/rx_common.c @@ -0,0 +1,1094 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include +#include +#include "efx.h" +#include "nic.h" +#include "rx_common.h" + +/* This is the percentage fill level below which new RX descriptors + * will be added to the RX descriptor ring. + */ +static unsigned int rx_refill_threshold; +module_param(rx_refill_threshold, uint, 0444); +MODULE_PARM_DESC(rx_refill_threshold, + "RX descriptor ring refill threshold (%)"); + +/* RX maximum head room required. + * + * This must be at least 1 to prevent overflow, plus one packet-worth + * to allow pipelined receives. + */ +#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS) + +static void efx_unmap_rx_buffer(struct efx_nic *efx, + struct efx_rx_buffer *rx_buf); + +/* Check the RX page recycle ring for a page that can be reused. */ +static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + struct efx_rx_page_state *state; + unsigned int index; + struct page *page; + + if (unlikely(!rx_queue->page_ring)) + return NULL; + index = rx_queue->page_remove & rx_queue->page_ptr_mask; + page = rx_queue->page_ring[index]; + if (page == NULL) + return NULL; + + rx_queue->page_ring[index] = NULL; + /* page_remove cannot exceed page_add. */ + if (rx_queue->page_remove != rx_queue->page_add) + ++rx_queue->page_remove; + + /* If page_count is 1 then we hold the only reference to this page. */ + if (page_count(page) == 1) { + ++rx_queue->page_recycle_count; + return page; + } else { + state = page_address(page); + dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + put_page(page); + ++rx_queue->page_recycle_failed; + } + + return NULL; +} + +/* Attempt to recycle the page if there is an RX recycle ring; the page can + * only be added if this is the final RX buffer, to prevent pages being used in + * the descriptor ring and appearing in the recycle ring simultaneously. + */ +static void efx_recycle_rx_page(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + struct efx_nic *efx = rx_queue->efx; + struct page *page = rx_buf->page; + unsigned int index; + + /* Only recycle the page after processing the final buffer. */ + if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE)) + return; + + index = rx_queue->page_add & rx_queue->page_ptr_mask; + if (rx_queue->page_ring[index] == NULL) { + unsigned int read_index = rx_queue->page_remove & + rx_queue->page_ptr_mask; + + /* The next slot in the recycle ring is available, but + * increment page_remove if the read pointer currently + * points here. + */ + if (read_index == index) + ++rx_queue->page_remove; + rx_queue->page_ring[index] = page; + ++rx_queue->page_add; + return; + } + ++rx_queue->page_recycle_full; + efx_unmap_rx_buffer(efx, rx_buf); + put_page(rx_buf->page); +} + +/* Recycle the pages that are used by buffers that have just been received. */ +void efx_siena_recycle_rx_pages(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + + if (unlikely(!rx_queue->page_ring)) + return; + + do { + efx_recycle_rx_page(channel, rx_buf); + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } while (--n_frags); +} + +void efx_siena_discard_rx_packet(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + + efx_siena_recycle_rx_pages(channel, rx_buf, n_frags); + + efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags); +} + +static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue) +{ + unsigned int bufs_in_recycle_ring, page_ring_size; + struct efx_nic *efx = rx_queue->efx; + + bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx); + page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / + efx->rx_bufs_per_page); + rx_queue->page_ring = kcalloc(page_ring_size, + sizeof(*rx_queue->page_ring), GFP_KERNEL); + if (!rx_queue->page_ring) + rx_queue->page_ptr_mask = 0; + else + rx_queue->page_ptr_mask = page_ring_size - 1; +} + +static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + int i; + + if (unlikely(!rx_queue->page_ring)) + return; + + /* Unmap and release the pages in the recycle ring. Remove the ring. */ + for (i = 0; i <= rx_queue->page_ptr_mask; i++) { + struct page *page = rx_queue->page_ring[i]; + struct efx_rx_page_state *state; + + if (page == NULL) + continue; + + state = page_address(page); + dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + put_page(page); + } + kfree(rx_queue->page_ring); + rx_queue->page_ring = NULL; +} + +static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf) +{ + /* Release the page reference we hold for the buffer. */ + if (rx_buf->page) + put_page(rx_buf->page); + + /* If this is the last buffer in a page, unmap and free it. */ + if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { + efx_unmap_rx_buffer(rx_queue->efx, rx_buf); + efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); + } + rx_buf->page = NULL; +} + +int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned int entries; + int rc; + + /* Create the smallest power-of-two aligned ring */ + entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); + EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); + rx_queue->ptr_mask = entries - 1; + + netif_dbg(efx, probe, efx->net_dev, + "creating RX queue %d size %#x mask %#x\n", + efx_rx_queue_index(rx_queue), efx->rxq_entries, + rx_queue->ptr_mask); + + /* Allocate RX buffers */ + rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), + GFP_KERNEL); + if (!rx_queue->buffer) + return -ENOMEM; + + rc = efx_nic_probe_rx(rx_queue); + if (rc) { + kfree(rx_queue->buffer); + rx_queue->buffer = NULL; + } + + return rc; +} + +void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue) +{ + unsigned int max_fill, trigger, max_trigger; + struct efx_nic *efx = rx_queue->efx; + int rc = 0; + + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); + + /* Initialise ptr fields */ + rx_queue->added_count = 0; + rx_queue->notified_count = 0; + rx_queue->removed_count = 0; + rx_queue->min_fill = -1U; + efx_init_rx_recycle_ring(rx_queue); + + rx_queue->page_remove = 0; + rx_queue->page_add = rx_queue->page_ptr_mask + 1; + rx_queue->page_recycle_count = 0; + rx_queue->page_recycle_failed = 0; + rx_queue->page_recycle_full = 0; + + /* Initialise limit fields */ + max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; + max_trigger = + max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; + if (rx_refill_threshold != 0) { + trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; + if (trigger > max_trigger) + trigger = max_trigger; + } else { + trigger = max_trigger; + } + + rx_queue->max_fill = max_fill; + rx_queue->fast_fill_trigger = trigger; + rx_queue->refill_enabled = true; + + /* Initialise XDP queue information */ + rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev, + rx_queue->core_index, 0); + + if (rc) { + netif_err(efx, rx_err, efx->net_dev, + "Failure to initialise XDP queue information rc=%d\n", + rc); + efx->xdp_rxq_info_failed = true; + } else { + rx_queue->xdp_rxq_info_valid = true; + } + + /* Set up RX descriptor ring */ + efx_nic_init_rx(rx_queue); +} + +void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue) +{ + struct efx_rx_buffer *rx_buf; + int i; + + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); + + del_timer_sync(&rx_queue->slow_fill); + + /* Release RX buffers from the current read ptr to the write ptr */ + if (rx_queue->buffer) { + for (i = rx_queue->removed_count; i < rx_queue->added_count; + i++) { + unsigned int index = i & rx_queue->ptr_mask; + + rx_buf = efx_rx_buffer(rx_queue, index); + efx_fini_rx_buffer(rx_queue, rx_buf); + } + } + + efx_fini_rx_recycle_ring(rx_queue); + + if (rx_queue->xdp_rxq_info_valid) + xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info); + + rx_queue->xdp_rxq_info_valid = false; +} + +void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue) +{ + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); + + efx_nic_remove_rx(rx_queue); + + kfree(rx_queue->buffer); + rx_queue->buffer = NULL; +} + +/* Unmap a DMA-mapped page. This function is only called for the final RX + * buffer in a page. + */ +static void efx_unmap_rx_buffer(struct efx_nic *efx, + struct efx_rx_buffer *rx_buf) +{ + struct page *page = rx_buf->page; + + if (page) { + struct efx_rx_page_state *state = page_address(page); + + dma_unmap_page(&efx->pci_dev->dev, + state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + } +} + +void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + unsigned int num_bufs) +{ + do { + if (rx_buf->page) { + put_page(rx_buf->page); + rx_buf->page = NULL; + } + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } while (--num_bufs); +} + +void efx_siena_rx_slow_fill(struct timer_list *t) +{ + struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill); + + /* Post an event to cause NAPI to run and refill the queue */ + efx_nic_generate_fill_event(rx_queue); + ++rx_queue->slow_fill_count; +} + +static void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) +{ + mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10)); +} + +/* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers + * + * @rx_queue: Efx RX queue + * + * This allocates a batch of pages, maps them for DMA, and populates + * struct efx_rx_buffers for each one. Return a negative error code or + * 0 on success. If a single page can be used for multiple buffers, + * then the page will either be inserted fully, or not at all. + */ +static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) +{ + unsigned int page_offset, index, count; + struct efx_nic *efx = rx_queue->efx; + struct efx_rx_page_state *state; + struct efx_rx_buffer *rx_buf; + dma_addr_t dma_addr; + struct page *page; + + count = 0; + do { + page = efx_reuse_page(rx_queue); + if (page == NULL) { + page = alloc_pages(__GFP_COMP | + (atomic ? GFP_ATOMIC : GFP_KERNEL), + efx->rx_buffer_order); + if (unlikely(page == NULL)) + return -ENOMEM; + dma_addr = + dma_map_page(&efx->pci_dev->dev, page, 0, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&efx->pci_dev->dev, + dma_addr))) { + __free_pages(page, efx->rx_buffer_order); + return -EIO; + } + state = page_address(page); + state->dma_addr = dma_addr; + } else { + state = page_address(page); + dma_addr = state->dma_addr; + } + + dma_addr += sizeof(struct efx_rx_page_state); + page_offset = sizeof(struct efx_rx_page_state); + + do { + index = rx_queue->added_count & rx_queue->ptr_mask; + rx_buf = efx_rx_buffer(rx_queue, index); + rx_buf->dma_addr = dma_addr + efx->rx_ip_align + + EFX_XDP_HEADROOM; + rx_buf->page = page; + rx_buf->page_offset = page_offset + efx->rx_ip_align + + EFX_XDP_HEADROOM; + rx_buf->len = efx->rx_dma_len; + rx_buf->flags = 0; + ++rx_queue->added_count; + get_page(page); + dma_addr += efx->rx_page_buf_step; + page_offset += efx->rx_page_buf_step; + } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); + + rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE; + } while (++count < efx->rx_pages_per_batch); + + return 0; +} + +void efx_siena_rx_config_page_split(struct efx_nic *efx) +{ + efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align + + EFX_XDP_HEADROOM + EFX_XDP_TAILROOM, + EFX_RX_BUF_ALIGNMENT); + efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : + ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / + efx->rx_page_buf_step); + efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / + efx->rx_bufs_per_page; + efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, + efx->rx_bufs_per_page); +} + +/* efx_siena_fast_push_rx_descriptors - push new RX descriptors quickly + * @rx_queue: RX descriptor queue + * + * This will aim to fill the RX descriptor queue up to + * @rx_queue->@max_fill. If there is insufficient atomic + * memory to do so, a slow fill will be scheduled. + * + * The caller must provide serialisation (none is used here). In practise, + * this means this function must run from the NAPI handler, or be called + * when NAPI is disabled. + */ +void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, + bool atomic) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned int fill_level, batch_size; + int space, rc = 0; + + if (!rx_queue->refill_enabled) + return; + + /* Calculate current fill level, and exit if we don't need to fill */ + fill_level = (rx_queue->added_count - rx_queue->removed_count); + EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries); + if (fill_level >= rx_queue->fast_fill_trigger) + goto out; + + /* Record minimum fill level */ + if (unlikely(fill_level < rx_queue->min_fill)) { + if (fill_level) + rx_queue->min_fill = fill_level; + } + + batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; + space = rx_queue->max_fill - fill_level; + EFX_WARN_ON_ONCE_PARANOID(space < batch_size); + + netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, + "RX queue %d fast-filling descriptor ring from" + " level %d to level %d\n", + efx_rx_queue_index(rx_queue), fill_level, + rx_queue->max_fill); + + do { + rc = efx_init_rx_buffers(rx_queue, atomic); + if (unlikely(rc)) { + /* Ensure that we don't leave the rx queue empty */ + efx_schedule_slow_fill(rx_queue); + goto out; + } + } while ((space -= batch_size) >= batch_size); + + netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, + "RX queue %d fast-filled descriptor ring " + "to level %d\n", efx_rx_queue_index(rx_queue), + rx_queue->added_count - rx_queue->removed_count); + + out: + if (rx_queue->notified_count != rx_queue->added_count) + efx_nic_notify_rx_desc(rx_queue); +} + +/* Pass a received packet up through GRO. GRO can handle pages + * regardless of checksum state and skbs with a good checksum. + */ +void +efx_siena_rx_packet_gro(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags, u8 *eh, __wsum csum) +{ + struct napi_struct *napi = &channel->napi_str; + struct efx_nic *efx = channel->efx; + struct sk_buff *skb; + + skb = napi_get_frags(napi); + if (unlikely(!skb)) { + struct efx_rx_queue *rx_queue; + + rx_queue = efx_channel_get_rx_queue(channel); + efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags); + return; + } + + if (efx->net_dev->features & NETIF_F_RXHASH) + skb_set_hash(skb, efx_rx_buf_hash(efx, eh), + PKT_HASH_TYPE_L3); + if (csum) { + skb->csum = csum; + skb->ip_summed = CHECKSUM_COMPLETE; + } else { + skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? + CHECKSUM_UNNECESSARY : CHECKSUM_NONE); + } + skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); + + for (;;) { + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_buf->page, rx_buf->page_offset, + rx_buf->len); + rx_buf->page = NULL; + skb->len += rx_buf->len; + if (skb_shinfo(skb)->nr_frags == n_frags) + break; + + rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); + } + + skb->data_len = skb->len; + skb->truesize += n_frags * efx->rx_buffer_truesize; + + skb_record_rx_queue(skb, channel->rx_queue.core_index); + + napi_gro_frags(napi); +} + +/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because + * (a) this is an infrequent control-plane operation and (b) n is small (max 64) + */ +struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx) +{ + struct list_head *head = &efx->rss_context.list; + struct efx_rss_context *ctx, *new; + u32 id = 1; /* Don't use zero, that refers to the master RSS context */ + + WARN_ON(!mutex_is_locked(&efx->rss_lock)); + + /* Search for first gap in the numbering */ + list_for_each_entry(ctx, head, list) { + if (ctx->user_id != id) + break; + id++; + /* Check for wrap. If this happens, we have nearly 2^32 + * allocated RSS contexts, which seems unlikely. + */ + if (WARN_ON_ONCE(!id)) + return NULL; + } + + /* Create the new entry */ + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + new->rx_hash_udp_4tuple = false; + + /* Insert the new entry into the gap */ + new->user_id = id; + list_add_tail(&new->list, &ctx->list); + return new; +} + +struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx, + u32 id) +{ + struct list_head *head = &efx->rss_context.list; + struct efx_rss_context *ctx; + + WARN_ON(!mutex_is_locked(&efx->rss_lock)); + + list_for_each_entry(ctx, head, list) + if (ctx->user_id == id) + return ctx; + return NULL; +} + +void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx) +{ + list_del(&ctx->list); + kfree(ctx); +} + +void efx_siena_set_default_rx_indir_table(struct efx_nic *efx, + struct efx_rss_context *ctx) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++) + ctx->rx_indir_table[i] = + ethtool_rxfh_indir_default(i, efx->rss_spread); +} + +/** + * efx_siena_filter_is_mc_recipient - test whether spec is a multicast recipient + * @spec: Specification to test + * + * Return: %true if the specification is a non-drop RX filter that + * matches a local MAC address I/G bit value of 1 or matches a local + * IPv4 or IPv6 address value in the respective multicast address + * range. Otherwise %false. + */ +bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec) +{ + if (!(spec->flags & EFX_FILTER_FLAG_RX) || + spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) + return false; + + if (spec->match_flags & + (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) && + is_multicast_ether_addr(spec->loc_mac)) + return true; + + if ((spec->match_flags & + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { + if (spec->ether_type == htons(ETH_P_IP) && + ipv4_is_multicast(spec->loc_host[0])) + return true; + if (spec->ether_type == htons(ETH_P_IPV6) && + ((const u8 *)spec->loc_host)[0] == 0xff) + return true; + } + + return false; +} + +bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right) +{ + if ((left->match_flags ^ right->match_flags) | + ((left->flags ^ right->flags) & + (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) + return false; + + return memcmp(&left->outer_vid, &right->outer_vid, + sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) == 0; +} + +u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec) +{ + BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); + return jhash2((const u32 *)&spec->outer_vid, + (sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) / 4, + 0); +} + +#ifdef CONFIG_RFS_ACCEL +bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule, + unsigned int filter_idx, bool *force) +{ + if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) { + /* ARFS is currently updating this entry, leave it */ + return false; + } + if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) { + /* ARFS tried and failed to update this, so it's probably out + * of date. Remove the filter and the ARFS rule entry. + */ + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; + *force = true; + return true; + } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */ + /* ARFS has moved on, so old filter is not needed. Since we did + * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will + * not be removed by efx_siena_rps_hash_del() subsequently. + */ + *force = true; + return true; + } + /* Remove it iff ARFS wants to. */ + return true; +} + +static +struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, + const struct efx_filter_spec *spec) +{ + u32 hash = efx_siena_filter_spec_hash(spec); + + lockdep_assert_held(&efx->rps_hash_lock); + if (!efx->rps_hash_table) + return NULL; + return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; +} + +struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx, + const struct efx_filter_spec *spec) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (!head) + return NULL; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_siena_filter_spec_equal(spec, &rule->spec)) + return rule; + } + return NULL; +} + +static struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool *new) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (!head) + return NULL; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_siena_filter_spec_equal(spec, &rule->spec)) { + *new = false; + return rule; + } + } + rule = kmalloc(sizeof(*rule), GFP_ATOMIC); + *new = true; + if (rule) { + memcpy(&rule->spec, spec, sizeof(rule->spec)); + hlist_add_head(&rule->node, head); + } + return rule; +} + +void efx_siena_rps_hash_del(struct efx_nic *efx, + const struct efx_filter_spec *spec) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (WARN_ON(!head)) + return; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_siena_filter_spec_equal(spec, &rule->spec)) { + /* Someone already reused the entry. We know that if + * this check doesn't fire (i.e. filter_id == REMOVING) + * then the REMOVING mark was put there by our caller, + * because caller is holding a lock on filter table and + * only holders of that lock set REMOVING. + */ + if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING) + return; + hlist_del(node); + kfree(rule); + return; + } + } + /* We didn't find it. */ + WARN_ON(1); +} +#endif + +int efx_siena_probe_filters(struct efx_nic *efx) +{ + int rc; + + mutex_lock(&efx->mac_lock); + down_write(&efx->filter_sem); + rc = efx->type->filter_table_probe(efx); + if (rc) + goto out_unlock; + +#ifdef CONFIG_RFS_ACCEL + if (efx->type->offload_features & NETIF_F_NTUPLE) { + struct efx_channel *channel; + int i, success = 1; + + efx_for_each_channel(channel, efx) { + channel->rps_flow_id = + kcalloc(efx->type->max_rx_ip_filters, + sizeof(*channel->rps_flow_id), + GFP_KERNEL); + if (!channel->rps_flow_id) + success = 0; + else + for (i = 0; + i < efx->type->max_rx_ip_filters; + ++i) + channel->rps_flow_id[i] = + RPS_FLOW_ID_INVALID; + channel->rfs_expire_index = 0; + channel->rfs_filter_count = 0; + } + + if (!success) { + efx_for_each_channel(channel, efx) + kfree(channel->rps_flow_id); + efx->type->filter_table_remove(efx); + rc = -ENOMEM; + goto out_unlock; + } + } +#endif +out_unlock: + up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + return rc; +} + +void efx_siena_remove_filters(struct efx_nic *efx) +{ +#ifdef CONFIG_RFS_ACCEL + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) { + cancel_delayed_work_sync(&channel->filter_work); + kfree(channel->rps_flow_id); + channel->rps_flow_id = NULL; + } +#endif + down_write(&efx->filter_sem); + efx->type->filter_table_remove(efx); + up_write(&efx->filter_sem); +} + +#ifdef CONFIG_RFS_ACCEL + +static void efx_filter_rfs_work(struct work_struct *data) +{ + struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, + work); + struct efx_nic *efx = netdev_priv(req->net_dev); + struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); + int slot_idx = req - efx->rps_slot; + struct efx_arfs_rule *rule; + u16 arfs_id = 0; + int rc; + + rc = efx->type->filter_insert(efx, &req->spec, true); + if (rc >= 0) + /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */ + rc %= efx->type->max_rx_ip_filters; + if (efx->rps_hash_table) { + spin_lock_bh(&efx->rps_hash_lock); + rule = efx_siena_rps_hash_find(efx, &req->spec); + /* The rule might have already gone, if someone else's request + * for the same spec was already worked and then expired before + * we got around to our work. In that case we have nothing + * tying us to an arfs_id, meaning that as soon as the filter + * is considered for expiry it will be removed. + */ + if (rule) { + if (rc < 0) + rule->filter_id = EFX_ARFS_FILTER_ID_ERROR; + else + rule->filter_id = rc; + arfs_id = rule->arfs_id; + } + spin_unlock_bh(&efx->rps_hash_lock); + } + if (rc >= 0) { + /* Remember this so we can check whether to expire the filter + * later. + */ + mutex_lock(&efx->rps_mutex); + if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID) + channel->rfs_filter_count++; + channel->rps_flow_id[rc] = req->flow_id; + mutex_unlock(&efx->rps_mutex); + + if (req->spec.ether_type == htons(ETH_P_IP)) + netif_info(efx, rx_status, efx->net_dev, + "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + else + netif_info(efx, rx_status, efx->net_dev, + "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + channel->n_rfs_succeeded++; + } else { + if (req->spec.ether_type == htons(ETH_P_IP)) + netif_dbg(efx, rx_status, efx->net_dev, + "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + else + netif_dbg(efx, rx_status, efx->net_dev, + "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + channel->n_rfs_failed++; + /* We're overloading the NIC's filter tables, so let's do a + * chunk of extra expiry work. + */ + __efx_siena_filter_rfs_expire(channel, + min(channel->rfs_filter_count, + 100u)); + } + + /* Release references */ + clear_bit(slot_idx, &efx->rps_slot_map); + dev_put(req->net_dev); +} + +int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_async_filter_insertion *req; + struct efx_arfs_rule *rule; + struct flow_keys fk; + int slot_idx; + bool new; + int rc; + + /* find a free slot */ + for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++) + if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) + break; + if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT) + return -EBUSY; + + if (flow_id == RPS_FLOW_ID_INVALID) { + rc = -EINVAL; + goto out_clear; + } + + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) { + rc = -EPROTONOSUPPORT; + goto out_clear; + } + + if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) { + rc = -EPROTONOSUPPORT; + goto out_clear; + } + if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) { + rc = -EPROTONOSUPPORT; + goto out_clear; + } + + req = efx->rps_slot + slot_idx; + efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, + efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, + rxq_index); + req->spec.match_flags = + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; + req->spec.ether_type = fk.basic.n_proto; + req->spec.ip_proto = fk.basic.ip_proto; + + if (fk.basic.n_proto == htons(ETH_P_IP)) { + req->spec.rem_host[0] = fk.addrs.v4addrs.src; + req->spec.loc_host[0] = fk.addrs.v4addrs.dst; + } else { + memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src, + sizeof(struct in6_addr)); + memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst, + sizeof(struct in6_addr)); + } + + req->spec.rem_port = fk.ports.src; + req->spec.loc_port = fk.ports.dst; + + if (efx->rps_hash_table) { + /* Add it to ARFS hash table */ + spin_lock(&efx->rps_hash_lock); + rule = efx_rps_hash_add(efx, &req->spec, &new); + if (!rule) { + rc = -ENOMEM; + goto out_unlock; + } + if (new) + rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; + rc = rule->arfs_id; + /* Skip if existing or pending filter already does the right thing */ + if (!new && rule->rxq_index == rxq_index && + rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING) + goto out_unlock; + rule->rxq_index = rxq_index; + rule->filter_id = EFX_ARFS_FILTER_ID_PENDING; + spin_unlock(&efx->rps_hash_lock); + } else { + /* Without an ARFS hash table, we just use arfs_id 0 for all + * filters. This means if multiple flows hash to the same + * flow_id, all but the most recently touched will be eligible + * for expiry. + */ + rc = 0; + } + + /* Queue the request */ + dev_hold(req->net_dev = net_dev); + INIT_WORK(&req->work, efx_filter_rfs_work); + req->rxq_index = rxq_index; + req->flow_id = flow_id; + schedule_work(&req->work); + return rc; +out_unlock: + spin_unlock(&efx->rps_hash_lock); +out_clear: + clear_bit(slot_idx, &efx->rps_slot_map); + return rc; +} + +bool __efx_siena_filter_rfs_expire(struct efx_channel *channel, + unsigned int quota) +{ + bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); + struct efx_nic *efx = channel->efx; + unsigned int index, size, start; + u32 flow_id; + + if (!mutex_trylock(&efx->rps_mutex)) + return false; + expire_one = efx->type->filter_rfs_expire_one; + index = channel->rfs_expire_index; + start = index; + size = efx->type->max_rx_ip_filters; + while (quota) { + flow_id = channel->rps_flow_id[index]; + + if (flow_id != RPS_FLOW_ID_INVALID) { + quota--; + if (expire_one(efx, flow_id, index)) { + netif_info(efx, rx_status, efx->net_dev, + "expired filter %d [channel %u flow %u]\n", + index, channel->channel, flow_id); + channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; + channel->rfs_filter_count--; + } + } + if (++index == size) + index = 0; + /* If we were called with a quota that exceeds the total number + * of filters in the table (which shouldn't happen, but could + * if two callers race), ensure that we don't loop forever - + * stop when we've examined every row of the table. + */ + if (index == start) + break; + } + + channel->rfs_expire_index = index; + mutex_unlock(&efx->rps_mutex); + return true; +} + +#endif /* CONFIG_RFS_ACCEL */ diff --git a/drivers/net/ethernet/sfc/siena/rx_common.h b/drivers/net/ethernet/sfc/siena/rx_common.h new file mode 100644 index 0000000000..6b37f83ecb --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/rx_common.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_RX_COMMON_H +#define EFX_RX_COMMON_H + +/* Preferred number of descriptors to fill at once */ +#define EFX_RX_PREFERRED_BATCH 8U + +/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ +#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \ + EFX_RX_USR_BUF_SIZE) + +/* Number of RX buffers to recycle pages for. When creating the RX page recycle + * ring, this number is divided by the number of buffers per page to calculate + * the number of pages to store in the RX page recycle ring. + */ +#define EFX_RECYCLE_RING_SIZE_10G 256 + +static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf) +{ + return page_address(buf->page) + buf->page_offset; +} + +static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset)); +#else + const u8 *data = eh + efx->rx_packet_hash_offset; + + return (u32)data[0] | + (u32)data[1] << 8 | + (u32)data[2] << 16 | + (u32)data[3] << 24; +#endif +} + +void efx_siena_rx_slow_fill(struct timer_list *t); + +void efx_siena_recycle_rx_pages(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags); +void efx_siena_discard_rx_packet(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags); + +int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue); +void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue); +void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue); +void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue); + +static inline void efx_sync_rx_buffer(struct efx_nic *efx, + struct efx_rx_buffer *rx_buf, + unsigned int len) +{ + dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, + DMA_FROM_DEVICE); +} + +void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + unsigned int num_bufs); + +void efx_siena_rx_config_page_split(struct efx_nic *efx); +void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, + bool atomic); + +void +efx_siena_rx_packet_gro(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags, u8 *eh, __wsum csum); + +struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx); +struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx, + u32 id); +void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx); +void efx_siena_set_default_rx_indir_table(struct efx_nic *efx, + struct efx_rss_context *ctx); + +bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec); +bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right); +u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec); + +#ifdef CONFIG_RFS_ACCEL +bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule, + unsigned int filter_idx, bool *force); +struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx, + const struct efx_filter_spec *spec); +void efx_siena_rps_hash_del(struct efx_nic *efx, + const struct efx_filter_spec *spec); + +int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id); +bool __efx_siena_filter_rfs_expire(struct efx_channel *channel, + unsigned int quota); +#endif + +int efx_siena_probe_filters(struct efx_nic *efx); +void efx_siena_remove_filters(struct efx_nic *efx); + +#endif diff --git a/drivers/net/ethernet/sfc/siena/selftest.c b/drivers/net/ethernet/sfc/siena/selftest.c new file mode 100644 index 0000000000..07715a3d6b --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/selftest.c @@ -0,0 +1,807 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2012 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "efx_common.h" +#include "efx_channels.h" +#include "nic.h" +#include "mcdi_port_common.h" +#include "selftest.h" +#include "workarounds.h" + +/* IRQ latency can be enormous because: + * - All IRQs may be disabled on a CPU for a *long* time by e.g. a + * slow serial console or an old IDE driver doing error recovery + * - The PREEMPT_RT patches mostly deal with this, but also allow a + * tasklet or normal task to be given higher priority than our IRQ + * threads + * Try to avoid blaming the hardware for this. + */ +#define IRQ_TIMEOUT HZ + +/* + * Loopback test packet structure + * + * The self-test should stress every RSS vector, and unfortunately + * Falcon only performs RSS on TCP/UDP packets. + */ +struct efx_loopback_payload { + struct ethhdr header; + struct iphdr ip; + struct udphdr udp; + __be16 iteration; + char msg[64]; +} __packed; + +/* Loopback test source MAC address */ +static const u8 payload_source[ETH_ALEN] __aligned(2) = { + 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, +}; + +static const char payload_msg[] = + "Hello world! This is an Efx loopback test in progress!"; + +/* Interrupt mode names */ +static const unsigned int efx_siena_interrupt_mode_max = EFX_INT_MODE_MAX; +static const char *const efx_siena_interrupt_mode_names[] = { + [EFX_INT_MODE_MSIX] = "MSI-X", + [EFX_INT_MODE_MSI] = "MSI", + [EFX_INT_MODE_LEGACY] = "legacy", +}; +#define INT_MODE(efx) \ + STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_siena_interrupt_mode) + +/** + * struct efx_loopback_state - persistent state during a loopback selftest + * @flush: Drop all packets in efx_siena_loopback_rx_packet + * @packet_count: Number of packets being used in this test + * @skbs: An array of skbs transmitted + * @offload_csum: Checksums are being offloaded + * @rx_good: RX good packet count + * @rx_bad: RX bad packet count + * @payload: Payload used in tests + */ +struct efx_loopback_state { + bool flush; + int packet_count; + struct sk_buff **skbs; + bool offload_csum; + atomic_t rx_good; + atomic_t rx_bad; + struct efx_loopback_payload payload; +}; + +/* How long to wait for all the packets to arrive (in ms) */ +#define LOOPBACK_TIMEOUT_MS 1000 + +/************************************************************************** + * + * MII, NVRAM and register tests + * + **************************************************************************/ + +static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) +{ + int rc = 0; + + rc = efx_siena_mcdi_phy_test_alive(efx); + tests->phy_alive = rc ? -1 : 1; + + return rc; +} + +static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) +{ + int rc = 0; + + if (efx->type->test_nvram) { + rc = efx->type->test_nvram(efx); + if (rc == -EPERM) + rc = 0; + else + tests->nvram = rc ? -1 : 1; + } + + return rc; +} + +/************************************************************************** + * + * Interrupt and event queue testing + * + **************************************************************************/ + +/* Test generation and receipt of interrupts */ +static int efx_test_interrupts(struct efx_nic *efx, + struct efx_self_tests *tests) +{ + unsigned long timeout, wait; + int cpu; + int rc; + + netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); + tests->interrupt = -1; + + rc = efx_siena_irq_test_start(efx); + if (rc == -ENOTSUPP) { + netif_dbg(efx, drv, efx->net_dev, + "direct interrupt testing not supported\n"); + tests->interrupt = 0; + return 0; + } + + timeout = jiffies + IRQ_TIMEOUT; + wait = 1; + + /* Wait for arrival of test interrupt. */ + netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); + do { + schedule_timeout_uninterruptible(wait); + cpu = efx_nic_irq_test_irq_cpu(efx); + if (cpu >= 0) + goto success; + wait *= 2; + } while (time_before(jiffies, timeout)); + + netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); + return -ETIMEDOUT; + + success: + netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", + INT_MODE(efx), cpu); + tests->interrupt = 1; + return 0; +} + +/* Test generation and receipt of interrupting events */ +static int efx_test_eventq_irq(struct efx_nic *efx, + struct efx_self_tests *tests) +{ + struct efx_channel *channel; + unsigned int read_ptr[EFX_MAX_CHANNELS]; + unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0; + unsigned long timeout, wait; + + BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG); + + efx_for_each_channel(channel, efx) { + read_ptr[channel->channel] = channel->eventq_read_ptr; + set_bit(channel->channel, &dma_pend); + set_bit(channel->channel, &int_pend); + efx_siena_event_test_start(channel); + } + + timeout = jiffies + IRQ_TIMEOUT; + wait = 1; + + /* Wait for arrival of interrupts. NAPI processing may or may + * not complete in time, but we can cope in any case. + */ + do { + schedule_timeout_uninterruptible(wait); + + efx_for_each_channel(channel, efx) { + efx_siena_stop_eventq(channel); + if (channel->eventq_read_ptr != + read_ptr[channel->channel]) { + set_bit(channel->channel, &napi_ran); + clear_bit(channel->channel, &dma_pend); + clear_bit(channel->channel, &int_pend); + } else { + if (efx_siena_event_present(channel)) + clear_bit(channel->channel, &dma_pend); + if (efx_nic_event_test_irq_cpu(channel) >= 0) + clear_bit(channel->channel, &int_pend); + } + efx_siena_start_eventq(channel); + } + + wait *= 2; + } while ((dma_pend || int_pend) && time_before(jiffies, timeout)); + + efx_for_each_channel(channel, efx) { + bool dma_seen = !test_bit(channel->channel, &dma_pend); + bool int_seen = !test_bit(channel->channel, &int_pend); + + tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; + tests->eventq_int[channel->channel] = int_seen ? 1 : -1; + + if (dma_seen && int_seen) { + netif_dbg(efx, drv, efx->net_dev, + "channel %d event queue passed (with%s NAPI)\n", + channel->channel, + test_bit(channel->channel, &napi_ran) ? + "" : "out"); + } else { + /* Report failure and whether either interrupt or DMA + * worked + */ + netif_err(efx, drv, efx->net_dev, + "channel %d timed out waiting for event queue\n", + channel->channel); + if (int_seen) + netif_err(efx, drv, efx->net_dev, + "channel %d saw interrupt " + "during event queue test\n", + channel->channel); + if (dma_seen) + netif_err(efx, drv, efx->net_dev, + "channel %d event was generated, but " + "failed to trigger an interrupt\n", + channel->channel); + } + } + + return (dma_pend || int_pend) ? -ETIMEDOUT : 0; +} + +static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned flags) +{ + int rc; + + mutex_lock(&efx->mac_lock); + rc = efx_siena_mcdi_phy_run_tests(efx, tests->phy_ext, flags); + mutex_unlock(&efx->mac_lock); + if (rc == -EPERM) + rc = 0; + else + netif_info(efx, drv, efx->net_dev, + "%s phy selftest\n", rc ? "Failed" : "Passed"); + + return rc; +} + +/************************************************************************** + * + * Loopback testing + * NB Only one loopback test can be executing concurrently. + * + **************************************************************************/ + +/* Loopback test RX callback + * This is called for each received packet during loopback testing. + */ +void efx_siena_loopback_rx_packet(struct efx_nic *efx, + const char *buf_ptr, int pkt_len) +{ + struct efx_loopback_state *state = efx->loopback_selftest; + struct efx_loopback_payload *received; + struct efx_loopback_payload *payload; + + BUG_ON(!buf_ptr); + + /* If we are just flushing, then drop the packet */ + if ((state == NULL) || state->flush) + return; + + payload = &state->payload; + + received = (struct efx_loopback_payload *) buf_ptr; + received->ip.saddr = payload->ip.saddr; + if (state->offload_csum) + received->ip.check = payload->ip.check; + + /* Check that header exists */ + if (pkt_len < sizeof(received->header)) { + netif_err(efx, drv, efx->net_dev, + "saw runt RX packet (length %d) in %s loopback " + "test\n", pkt_len, LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that the ethernet header exists */ + if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { + netif_err(efx, drv, efx->net_dev, + "saw non-loopback RX packet in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check packet length */ + if (pkt_len != sizeof(*payload)) { + netif_err(efx, drv, efx->net_dev, + "saw incorrect RX packet length %d (wanted %d) in " + "%s loopback test\n", pkt_len, (int)sizeof(*payload), + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that IP header matches */ + if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { + netif_err(efx, drv, efx->net_dev, + "saw corrupted IP header in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that msg and padding matches */ + if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { + netif_err(efx, drv, efx->net_dev, + "saw corrupted RX packet in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that iteration matches */ + if (received->iteration != payload->iteration) { + netif_err(efx, drv, efx->net_dev, + "saw RX packet from iteration %d (wanted %d) in " + "%s loopback test\n", ntohs(received->iteration), + ntohs(payload->iteration), LOOPBACK_MODE(efx)); + goto err; + } + + /* Increase correct RX count */ + netif_vdbg(efx, drv, efx->net_dev, + "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); + + atomic_inc(&state->rx_good); + return; + + err: +#ifdef DEBUG + if (atomic_read(&state->rx_bad) == 0) { + netif_err(efx, drv, efx->net_dev, "received packet:\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, + buf_ptr, pkt_len, 0); + netif_err(efx, drv, efx->net_dev, "expected packet:\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, + &state->payload, sizeof(state->payload), 0); + } +#endif + atomic_inc(&state->rx_bad); +} + +/* Initialise an efx_siena_selftest_state for a new iteration */ +static void efx_iterate_state(struct efx_nic *efx) +{ + struct efx_loopback_state *state = efx->loopback_selftest; + struct net_device *net_dev = efx->net_dev; + struct efx_loopback_payload *payload = &state->payload; + + /* Initialise the layerII header */ + ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr); + ether_addr_copy((u8 *)&payload->header.h_source, payload_source); + payload->header.h_proto = htons(ETH_P_IP); + + /* saddr set later and used as incrementing count */ + payload->ip.daddr = htonl(INADDR_LOOPBACK); + payload->ip.ihl = 5; + payload->ip.check = (__force __sum16) htons(0xdead); + payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); + payload->ip.version = IPVERSION; + payload->ip.protocol = IPPROTO_UDP; + + /* Initialise udp header */ + payload->udp.source = 0; + payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - + sizeof(struct iphdr)); + payload->udp.check = 0; /* checksum ignored */ + + /* Fill out payload */ + payload->iteration = htons(ntohs(payload->iteration) + 1); + memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); + + /* Fill out remaining state members */ + atomic_set(&state->rx_good, 0); + atomic_set(&state->rx_bad, 0); + smp_wmb(); +} + +static int efx_begin_loopback(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_loopback_state *state = efx->loopback_selftest; + struct efx_loopback_payload *payload; + struct sk_buff *skb; + int i; + netdev_tx_t rc; + + /* Transmit N copies of buffer */ + for (i = 0; i < state->packet_count; i++) { + /* Allocate an skb, holding an extra reference for + * transmit completion counting */ + skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); + if (!skb) + return -ENOMEM; + state->skbs[i] = skb; + skb_get(skb); + + /* Copy the payload in, incrementing the source address to + * exercise the rss vectors */ + payload = skb_put(skb, sizeof(state->payload)); + memcpy(payload, &state->payload, sizeof(state->payload)); + payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); + + /* Ensure everything we've written is visible to the + * interrupt handler. */ + smp_wmb(); + + netif_tx_lock_bh(efx->net_dev); + rc = efx_enqueue_skb(tx_queue, skb); + netif_tx_unlock_bh(efx->net_dev); + + if (rc != NETDEV_TX_OK) { + netif_err(efx, drv, efx->net_dev, + "TX queue %d could not transmit packet %d of " + "%d in %s loopback test\n", tx_queue->label, + i + 1, state->packet_count, + LOOPBACK_MODE(efx)); + + /* Defer cleaning up the other skbs for the caller */ + kfree_skb(skb); + return -EPIPE; + } + } + + return 0; +} + +static int efx_poll_loopback(struct efx_nic *efx) +{ + struct efx_loopback_state *state = efx->loopback_selftest; + + return atomic_read(&state->rx_good) == state->packet_count; +} + +static int efx_end_loopback(struct efx_tx_queue *tx_queue, + struct efx_loopback_self_tests *lb_tests) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_loopback_state *state = efx->loopback_selftest; + struct sk_buff *skb; + int tx_done = 0, rx_good, rx_bad; + int i, rc = 0; + + netif_tx_lock_bh(efx->net_dev); + + /* Count the number of tx completions, and decrement the refcnt. Any + * skbs not already completed will be free'd when the queue is flushed */ + for (i = 0; i < state->packet_count; i++) { + skb = state->skbs[i]; + if (skb && !skb_shared(skb)) + ++tx_done; + dev_kfree_skb(skb); + } + + netif_tx_unlock_bh(efx->net_dev); + + /* Check TX completion and received packet counts */ + rx_good = atomic_read(&state->rx_good); + rx_bad = atomic_read(&state->rx_bad); + if (tx_done != state->packet_count) { + /* Don't free the skbs; they will be picked up on TX + * overflow or channel teardown. + */ + netif_err(efx, drv, efx->net_dev, + "TX queue %d saw only %d out of an expected %d " + "TX completion events in %s loopback test\n", + tx_queue->label, tx_done, state->packet_count, + LOOPBACK_MODE(efx)); + rc = -ETIMEDOUT; + /* Allow to fall through so we see the RX errors as well */ + } + + /* We may always be up to a flush away from our desired packet total */ + if (rx_good != state->packet_count) { + netif_dbg(efx, drv, efx->net_dev, + "TX queue %d saw only %d out of an expected %d " + "received packets in %s loopback test\n", + tx_queue->label, rx_good, state->packet_count, + LOOPBACK_MODE(efx)); + rc = -ETIMEDOUT; + /* Fall through */ + } + + /* Update loopback test structure */ + lb_tests->tx_sent[tx_queue->label] += state->packet_count; + lb_tests->tx_done[tx_queue->label] += tx_done; + lb_tests->rx_good += rx_good; + lb_tests->rx_bad += rx_bad; + + return rc; +} + +static int +efx_test_loopback(struct efx_tx_queue *tx_queue, + struct efx_loopback_self_tests *lb_tests) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_loopback_state *state = efx->loopback_selftest; + int i, begin_rc, end_rc; + + for (i = 0; i < 3; i++) { + /* Determine how many packets to send */ + state->packet_count = efx->txq_entries / 3; + state->packet_count = min(1 << (i << 2), state->packet_count); + state->skbs = kcalloc(state->packet_count, + sizeof(state->skbs[0]), GFP_KERNEL); + if (!state->skbs) + return -ENOMEM; + state->flush = false; + + netif_dbg(efx, drv, efx->net_dev, + "TX queue %d (hw %d) testing %s loopback with %d packets\n", + tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx), + state->packet_count); + + efx_iterate_state(efx); + begin_rc = efx_begin_loopback(tx_queue); + + /* This will normally complete very quickly, but be + * prepared to wait much longer. */ + msleep(1); + if (!efx_poll_loopback(efx)) { + msleep(LOOPBACK_TIMEOUT_MS); + efx_poll_loopback(efx); + } + + end_rc = efx_end_loopback(tx_queue, lb_tests); + kfree(state->skbs); + + if (begin_rc || end_rc) { + /* Wait a while to ensure there are no packets + * floating around after a failure. */ + schedule_timeout_uninterruptible(HZ / 10); + return begin_rc ? begin_rc : end_rc; + } + } + + netif_dbg(efx, drv, efx->net_dev, + "TX queue %d passed %s loopback test with a burst length " + "of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx), + state->packet_count); + + return 0; +} + +/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but + * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it + * to delay and retry. Therefore, it's safer to just poll directly. Wait + * for link up and any faults to dissipate. */ +static int efx_wait_for_link(struct efx_nic *efx) +{ + struct efx_link_state *link_state = &efx->link_state; + int count, link_up_count = 0; + bool link_up; + + for (count = 0; count < 40; count++) { + schedule_timeout_uninterruptible(HZ / 10); + + if (efx->type->monitor != NULL) { + mutex_lock(&efx->mac_lock); + efx->type->monitor(efx); + mutex_unlock(&efx->mac_lock); + } + + mutex_lock(&efx->mac_lock); + link_up = link_state->up; + if (link_up) + link_up = !efx->type->check_mac_fault(efx); + mutex_unlock(&efx->mac_lock); + + if (link_up) { + if (++link_up_count == 2) + return 0; + } else { + link_up_count = 0; + } + } + + return -ETIMEDOUT; +} + +static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned int loopback_modes) +{ + enum efx_loopback_mode mode; + struct efx_loopback_state *state; + struct efx_channel *channel = + efx_get_channel(efx, efx->tx_channel_offset); + struct efx_tx_queue *tx_queue; + int rc = 0; + + /* Set the port loopback_selftest member. From this point on + * all received packets will be dropped. Mark the state as + * "flushing" so all inflight packets are dropped */ + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state == NULL) + return -ENOMEM; + BUG_ON(efx->loopback_selftest); + state->flush = true; + efx->loopback_selftest = state; + + /* Test all supported loopback modes */ + for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { + if (!(loopback_modes & (1 << mode))) + continue; + + /* Move the port into the specified loopback mode. */ + state->flush = true; + mutex_lock(&efx->mac_lock); + efx->loopback_mode = mode; + rc = __efx_siena_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "unable to move into %s loopback\n", + LOOPBACK_MODE(efx)); + goto out; + } + + rc = efx_wait_for_link(efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "loopback %s never came up\n", + LOOPBACK_MODE(efx)); + goto out; + } + + /* Test all enabled types of TX queue */ + efx_for_each_channel_tx_queue(tx_queue, channel) { + state->offload_csum = (tx_queue->type & + EFX_TXQ_TYPE_OUTER_CSUM); + rc = efx_test_loopback(tx_queue, + &tests->loopback[mode]); + if (rc) + goto out; + } + } + + out: + /* Remove the flush. The caller will remove the loopback setting */ + state->flush = true; + efx->loopback_selftest = NULL; + wmb(); + kfree(state); + + if (rc == -EPERM) + rc = 0; + + return rc; +} + +/************************************************************************** + * + * Entry point + * + *************************************************************************/ + +int efx_siena_selftest(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned int flags) +{ + enum efx_loopback_mode loopback_mode = efx->loopback_mode; + int phy_mode = efx->phy_mode; + int rc_test = 0, rc_reset, rc; + + efx_siena_selftest_async_cancel(efx); + + /* Online (i.e. non-disruptive) testing + * This checks interrupt generation, event delivery and PHY presence. */ + + rc = efx_test_phy_alive(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_nvram(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_interrupts(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_eventq_irq(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + if (rc_test) + return rc_test; + + if (!(flags & ETH_TEST_FL_OFFLINE)) + return efx_test_phy(efx, tests, flags); + + /* Offline (i.e. disruptive) testing + * This checks MAC and PHY loopback on the specified port. */ + + /* Detach the device so the kernel doesn't transmit during the + * loopback test and the watchdog timeout doesn't fire. + */ + efx_device_detach_sync(efx); + + if (efx->type->test_chip) { + rc_reset = efx->type->test_chip(efx, tests); + if (rc_reset) { + netif_err(efx, hw, efx->net_dev, + "Unable to recover from chip test\n"); + efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); + return rc_reset; + } + + if ((tests->memory < 0 || tests->registers < 0) && !rc_test) + rc_test = -EIO; + } + + /* Ensure that the phy is powered and out of loopback + * for the bist and loopback tests */ + mutex_lock(&efx->mac_lock); + efx->phy_mode &= ~PHY_MODE_LOW_POWER; + efx->loopback_mode = LOOPBACK_NONE; + __efx_siena_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + + rc = efx_test_phy(efx, tests, flags); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_loopbacks(efx, tests, efx->loopback_modes); + if (rc && !rc_test) + rc_test = rc; + + /* restore the PHY to the previous state */ + mutex_lock(&efx->mac_lock); + efx->phy_mode = phy_mode; + efx->loopback_mode = loopback_mode; + __efx_siena_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + + efx_device_attach_if_not_resetting(efx); + + return rc_test; +} + +void efx_siena_selftest_async_start(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_siena_event_test_start(channel); + schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT); +} + +void efx_siena_selftest_async_cancel(struct efx_nic *efx) +{ + cancel_delayed_work_sync(&efx->selftest_work); +} + +static void efx_siena_selftest_async_work(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, + selftest_work.work); + struct efx_channel *channel; + int cpu; + + efx_for_each_channel(channel, efx) { + cpu = efx_nic_event_test_irq_cpu(channel); + if (cpu < 0) + netif_err(efx, ifup, efx->net_dev, + "channel %d failed to trigger an interrupt\n", + channel->channel); + else + netif_dbg(efx, ifup, efx->net_dev, + "channel %d triggered interrupt on CPU %d\n", + channel->channel, cpu); + } +} + +void efx_siena_selftest_async_init(struct efx_nic *efx) +{ + INIT_DELAYED_WORK(&efx->selftest_work, efx_siena_selftest_async_work); +} diff --git a/drivers/net/ethernet/sfc/siena/selftest.h b/drivers/net/ethernet/sfc/siena/selftest.h new file mode 100644 index 0000000000..6af6e7fbfc --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/selftest.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2012 Solarflare Communications Inc. + */ + +#ifndef EFX_SELFTEST_H +#define EFX_SELFTEST_H + +#include "net_driver.h" + +/* + * Self tests + */ + +struct efx_loopback_self_tests { + int tx_sent[EFX_MAX_TXQ_PER_CHANNEL]; + int tx_done[EFX_MAX_TXQ_PER_CHANNEL]; + int rx_good; + int rx_bad; +}; + +#define EFX_MAX_PHY_TESTS 20 + +/* Efx self test results + * For fields which are not counters, 1 indicates success and -1 + * indicates failure; 0 indicates test could not be run. + */ +struct efx_self_tests { + /* online tests */ + int phy_alive; + int nvram; + int interrupt; + int eventq_dma[EFX_MAX_CHANNELS]; + int eventq_int[EFX_MAX_CHANNELS]; + /* offline tests */ + int memory; + int registers; + int phy_ext[EFX_MAX_PHY_TESTS]; + struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1]; +}; + +void efx_siena_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr, + int pkt_len); +int efx_siena_selftest(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned int flags); +void efx_siena_selftest_async_init(struct efx_nic *efx); +void efx_siena_selftest_async_start(struct efx_nic *efx); +void efx_siena_selftest_async_cancel(struct efx_nic *efx); + +#endif /* EFX_SELFTEST_H */ diff --git a/drivers/net/ethernet/sfc/siena/siena.c b/drivers/net/ethernet/sfc/siena/siena.c new file mode 100644 index 0000000000..a44c8fa257 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/siena.c @@ -0,0 +1,1113 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "bitfield.h" +#include "efx.h" +#include "efx_common.h" +#include "nic.h" +#include "farch_regs.h" +#include "io.h" +#include "workarounds.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "mcdi_port.h" +#include "mcdi_port_common.h" +#include "selftest.h" +#include "siena_sriov.h" +#include "rx_common.h" + +/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ + +static void siena_init_wol(struct efx_nic *efx); + + +static void siena_push_irq_moderation(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + efx_dword_t timer_cmd; + + if (channel->irq_moderation_us) { + unsigned int ticks; + + ticks = efx_siena_usecs_to_ticks(efx, channel->irq_moderation_us); + EFX_POPULATE_DWORD_2(timer_cmd, + FRF_CZ_TC_TIMER_MODE, + FFE_CZ_TIMER_MODE_INT_HLDOFF, + FRF_CZ_TC_TIMER_VAL, + ticks - 1); + } else { + EFX_POPULATE_DWORD_2(timer_cmd, + FRF_CZ_TC_TIMER_MODE, + FFE_CZ_TIMER_MODE_DIS, + FRF_CZ_TC_TIMER_VAL, 0); + } + efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, + channel->channel); +} + +void efx_siena_prepare_flush(struct efx_nic *efx) +{ + if (efx->fc_disable++ == 0) + efx_siena_mcdi_set_mac(efx); +} + +void siena_finish_flush(struct efx_nic *efx) +{ + if (--efx->fc_disable == 0) + efx_siena_mcdi_set_mac(efx); +} + +static const struct efx_farch_register_test siena_register_tests[] = { + { FR_AZ_ADR_REGION, + EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, + { FR_CZ_USR_EV_CFG, + EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_RX_CFG, + EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) }, + { FR_AZ_TX_CFG, + EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) }, + { FR_AZ_TX_RESERVED, + EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, + { FR_AZ_SRM_TX_DC_CFG, + EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_RX_DC_CFG, + EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_RX_DC_PF_WM, + EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_BZ_DP_CTRL, + EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_BZ_RX_RSS_TKEY, + EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, + { FR_CZ_RX_RSS_IPV6_REG1, + EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, + { FR_CZ_RX_RSS_IPV6_REG2, + EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, + { FR_CZ_RX_RSS_IPV6_REG3, + EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, +}; + +static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) +{ + enum reset_type reset_method = RESET_TYPE_ALL; + int rc, rc2; + + efx_siena_reset_down(efx, reset_method); + + /* Reset the chip immediately so that it is completely + * quiescent regardless of what any VF driver does. + */ + rc = efx_siena_mcdi_reset(efx, reset_method); + if (rc) + goto out; + + tests->registers = + efx_farch_test_registers(efx, siena_register_tests, + ARRAY_SIZE(siena_register_tests)) + ? -1 : 1; + + rc = efx_siena_mcdi_reset(efx, reset_method); +out: + rc2 = efx_siena_reset_up(efx, reset_method, rc == 0); + return rc ? rc : rc2; +} + +/************************************************************************** + * + * PTP + * + ************************************************************************** + */ + +static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time) +{ + _efx_writed(efx, cpu_to_le32(host_time), + FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST); +} + +static int siena_ptp_set_ts_config(struct efx_nic *efx, + struct hwtstamp_config *init) +{ + int rc; + + switch (init->rx_filter) { + case HWTSTAMP_FILTER_NONE: + /* if TX timestamping is still requested then leave PTP on */ + return efx_siena_ptp_change_mode(efx, + init->tx_type != HWTSTAMP_TX_OFF, + efx_siena_ptp_get_mode(efx)); + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + return efx_siena_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1); + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + rc = efx_siena_ptp_change_mode(efx, true, + MC_CMD_PTP_MODE_V2_ENHANCED); + /* bug 33070 - old versions of the firmware do not support the + * improved UUID filtering option. Similarly old versions of the + * application do not expect it to be enabled. If the firmware + * does not accept the enhanced mode, fall back to the standard + * PTP v2 UUID filtering. */ + if (rc != 0) + rc = efx_siena_ptp_change_mode(efx, true, + MC_CMD_PTP_MODE_V2); + return rc; + default: + return -ERANGE; + } +} + +/************************************************************************** + * + * Device reset + * + ************************************************************************** + */ + +static int siena_map_reset_flags(u32 *flags) +{ + enum { + SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER | + ETH_RESET_OFFLOAD | ETH_RESET_MAC | + ETH_RESET_PHY), + SIENA_RESET_MC = (SIENA_RESET_PORT | + ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT), + }; + + if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) { + *flags &= ~SIENA_RESET_MC; + return RESET_TYPE_WORLD; + } + + if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) { + *flags &= ~SIENA_RESET_PORT; + return RESET_TYPE_ALL; + } + + /* no invisible reset implemented */ + + return -EINVAL; +} + +#ifdef CONFIG_EEH +/* When a PCI device is isolated from the bus, a subsequent MMIO read is + * required for the kernel EEH mechanisms to notice. As the Solarflare driver + * was written to minimise MMIO read (for latency) then a periodic call to check + * the EEH status of the device is required so that device recovery can happen + * in a timely fashion. + */ +static void siena_monitor(struct efx_nic *efx) +{ + struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); + + eeh_dev_check_failure(eehdev); +} +#endif + +static int siena_probe_nvconfig(struct efx_nic *efx) +{ + u32 caps = 0; + int rc; + + rc = efx_siena_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, + &caps); + + efx->timer_quantum_ns = + (caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ? + 3072 : 6144; /* 768 cycles */ + efx->timer_max_ns = efx->type->timer_period_max * + efx->timer_quantum_ns; + + return rc; +} + +static int siena_dimension_resources(struct efx_nic *efx) +{ + /* Each port has a small block of internal SRAM dedicated to + * the buffer table and descriptor caches. In theory we can + * map both blocks to one port, but we don't. + */ + efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); + return 0; +} + +/* On all Falcon-architecture NICs, PFs use BAR 0 for I/O space and BAR 2(&3) + * for memory. + */ +static unsigned int siena_mem_bar(struct efx_nic *efx) +{ + return 2; +} + +static unsigned int siena_mem_map_size(struct efx_nic *efx) +{ + return FR_CZ_MC_TREG_SMEM + + FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS; +} + +static int siena_probe_nic(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data; + efx_oword_t reg; + int rc; + + /* Allocate storage for hardware specific data */ + nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); + if (!nic_data) + return -ENOMEM; + nic_data->efx = efx; + efx->nic_data = nic_data; + + if (efx_farch_fpga_ver(efx) != 0) { + netif_err(efx, probe, efx->net_dev, + "Siena FPGA not supported\n"); + rc = -ENODEV; + goto fail1; + } + + efx->max_channels = EFX_MAX_CHANNELS; + efx->max_vis = EFX_MAX_CHANNELS; + efx->max_tx_channels = EFX_MAX_CHANNELS; + efx->tx_queues_per_channel = 4; + + efx_reado(efx, ®, FR_AZ_CS_DEBUG); + efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; + + rc = efx_siena_mcdi_init(efx); + if (rc) + goto fail1; + + /* Now we can reset the NIC */ + rc = efx_siena_mcdi_reset(efx, RESET_TYPE_ALL); + if (rc) { + netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); + goto fail3; + } + + siena_init_wol(efx); + + /* Allocate memory for INT_KER */ + rc = efx_siena_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t), + GFP_KERNEL); + if (rc) + goto fail4; + BUG_ON(efx->irq_status.dma_addr & 0x0f); + + netif_dbg(efx, probe, efx->net_dev, + "INT_KER at %llx (virt %p phys %llx)\n", + (unsigned long long)efx->irq_status.dma_addr, + efx->irq_status.addr, + (unsigned long long)virt_to_phys(efx->irq_status.addr)); + + /* Read in the non-volatile configuration */ + rc = siena_probe_nvconfig(efx); + if (rc == -EINVAL) { + netif_err(efx, probe, efx->net_dev, + "NVRAM is invalid therefore using defaults\n"); + efx->phy_type = PHY_TYPE_NONE; + efx->mdio.prtad = MDIO_PRTAD_NONE; + } else if (rc) { + goto fail5; + } + + rc = efx_siena_mcdi_mon_probe(efx); + if (rc) + goto fail5; + +#ifdef CONFIG_SFC_SIENA_SRIOV + efx_siena_sriov_probe(efx); +#endif + efx_siena_ptp_defer_probe_with_channel(efx); + + return 0; + +fail5: + efx_siena_free_buffer(efx, &efx->irq_status); +fail4: +fail3: + efx_siena_mcdi_detach(efx); + efx_siena_mcdi_fini(efx); +fail1: + kfree(efx->nic_data); + return rc; +} + +static int siena_rx_pull_rss_config(struct efx_nic *efx) +{ + efx_oword_t temp; + + /* Read from IPv6 RSS key as that's longer (the IPv4 key is just the + * first 128 bits of the same key, assuming it's been set by + * siena_rx_push_rss_config, below) + */ + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); + memcpy(efx->rss_context.rx_hash_key, &temp, sizeof(temp)); + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); + memcpy(efx->rss_context.rx_hash_key + sizeof(temp), &temp, sizeof(temp)); + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); + memcpy(efx->rss_context.rx_hash_key + 2 * sizeof(temp), &temp, + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); + efx_farch_rx_pull_indir_table(efx); + return 0; +} + +static int siena_rx_push_rss_config(struct efx_nic *efx, bool user, + const u32 *rx_indir_table, const u8 *key) +{ + efx_oword_t temp; + + /* Set hash key for IPv4 */ + if (key) + memcpy(efx->rss_context.rx_hash_key, key, sizeof(temp)); + memcpy(&temp, efx->rss_context.rx_hash_key, sizeof(temp)); + efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); + + /* Enable IPv6 RSS */ + BUILD_BUG_ON(sizeof(efx->rss_context.rx_hash_key) < + 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || + FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); + memcpy(&temp, efx->rss_context.rx_hash_key, sizeof(temp)); + efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); + memcpy(&temp, efx->rss_context.rx_hash_key + sizeof(temp), sizeof(temp)); + efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); + EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, + FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); + memcpy(&temp, efx->rss_context.rx_hash_key + 2 * sizeof(temp), + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); + efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); + + memcpy(efx->rss_context.rx_indir_table, rx_indir_table, + sizeof(efx->rss_context.rx_indir_table)); + efx_farch_rx_push_indir_table(efx); + + return 0; +} + +/* This call performs hardware-specific global initialisation, such as + * defining the descriptor cache sizes and number of RSS channels. + * It does not set up any buffers, descriptor rings or event queues. + */ +static int siena_init_nic(struct efx_nic *efx) +{ + efx_oword_t temp; + int rc; + + /* Recover from a failed assertion post-reset */ + rc = efx_siena_mcdi_handle_assertion(efx); + if (rc) + return rc; + + /* Squash TX of packets of 16 bytes or less */ + efx_reado(efx, &temp, FR_AZ_TX_RESERVED); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); + efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); + + /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 + * descriptors (which is bad). + */ + efx_reado(efx, &temp, FR_AZ_TX_CFG); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); + EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1); + efx_writeo(efx, &temp, FR_AZ_TX_CFG); + + efx_reado(efx, &temp, FR_AZ_RX_CFG); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); + /* Enable hash insertion. This is broken for the 'Falcon' hash + * if IPv6 hashing is also enabled, so also select Toeplitz + * TCP/IPv4 and IPv4 hashes. */ + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_USR_BUF_SIZE, + EFX_RX_USR_BUF_SIZE >> 5); + efx_writeo(efx, &temp, FR_AZ_RX_CFG); + + siena_rx_push_rss_config(efx, false, efx->rss_context.rx_indir_table, NULL); + efx->rss_context.context_id = 0; /* indicates RSS is active */ + + /* Enable event logging */ + rc = efx_siena_mcdi_log_ctrl(efx, true, false, 0); + if (rc) + return rc; + + /* Set destination of both TX and RX Flush events */ + EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); + efx_writeo(efx, &temp, FR_BZ_DP_CTRL); + + EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); + efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); + + efx_farch_init_common(efx); + return 0; +} + +static void siena_remove_nic(struct efx_nic *efx) +{ + efx_siena_mcdi_mon_remove(efx); + + efx_siena_free_buffer(efx, &efx->irq_status); + + efx_siena_mcdi_reset(efx, RESET_TYPE_ALL); + + efx_siena_mcdi_detach(efx); + efx_siena_mcdi_fini(efx); + + /* Tear down the private nic state */ + kfree(efx->nic_data); + efx->nic_data = NULL; +} + +#define SIENA_DMA_STAT(ext_name, mcdi_name) \ + [SIENA_STAT_ ## ext_name] = \ + { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } +#define SIENA_OTHER_STAT(ext_name) \ + [SIENA_STAT_ ## ext_name] = { #ext_name, 0, 0 } +#define GENERIC_SW_STAT(ext_name) \ + [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } + +static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = { + SIENA_DMA_STAT(tx_bytes, TX_BYTES), + SIENA_OTHER_STAT(tx_good_bytes), + SIENA_DMA_STAT(tx_bad_bytes, TX_BAD_BYTES), + SIENA_DMA_STAT(tx_packets, TX_PKTS), + SIENA_DMA_STAT(tx_bad, TX_BAD_FCS_PKTS), + SIENA_DMA_STAT(tx_pause, TX_PAUSE_PKTS), + SIENA_DMA_STAT(tx_control, TX_CONTROL_PKTS), + SIENA_DMA_STAT(tx_unicast, TX_UNICAST_PKTS), + SIENA_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS), + SIENA_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS), + SIENA_DMA_STAT(tx_lt64, TX_LT64_PKTS), + SIENA_DMA_STAT(tx_64, TX_64_PKTS), + SIENA_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS), + SIENA_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS), + SIENA_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS), + SIENA_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS), + SIENA_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), + SIENA_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), + SIENA_DMA_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS), + SIENA_OTHER_STAT(tx_collision), + SIENA_DMA_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_deferred, TX_DEFERRED_PKTS), + SIENA_DMA_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS), + SIENA_DMA_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS), + SIENA_DMA_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS), + SIENA_DMA_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS), + SIENA_DMA_STAT(rx_bytes, RX_BYTES), + SIENA_OTHER_STAT(rx_good_bytes), + SIENA_DMA_STAT(rx_bad_bytes, RX_BAD_BYTES), + SIENA_DMA_STAT(rx_packets, RX_PKTS), + SIENA_DMA_STAT(rx_good, RX_GOOD_PKTS), + SIENA_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS), + SIENA_DMA_STAT(rx_pause, RX_PAUSE_PKTS), + SIENA_DMA_STAT(rx_control, RX_CONTROL_PKTS), + SIENA_DMA_STAT(rx_unicast, RX_UNICAST_PKTS), + SIENA_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS), + SIENA_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS), + SIENA_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS), + SIENA_DMA_STAT(rx_64, RX_64_PKTS), + SIENA_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS), + SIENA_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS), + SIENA_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS), + SIENA_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS), + SIENA_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), + SIENA_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), + SIENA_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS), + SIENA_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS), + SIENA_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS), + SIENA_DMA_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS), + SIENA_DMA_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS), + SIENA_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), + SIENA_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), + SIENA_DMA_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS), + SIENA_DMA_STAT(rx_nodesc_drop_cnt, RX_NODESC_DROPS), + GENERIC_SW_STAT(rx_nodesc_trunc), + GENERIC_SW_STAT(rx_noskb_drops), +}; +static const unsigned long siena_stat_mask[] = { + [0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL, +}; + +static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names) +{ + return efx_siena_describe_stats(siena_stat_desc, SIENA_STAT_COUNT, + siena_stat_mask, names); +} + +static int siena_try_update_nic_stats(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; + __le64 *dma_stats; + __le64 generation_start, generation_end; + + dma_stats = efx->stats_buffer.addr; + + generation_end = dma_stats[efx->num_mac_stats - 1]; + if (generation_end == EFX_MC_STATS_GENERATION_INVALID) + return 0; + rmb(); + efx_siena_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask, + stats, efx->stats_buffer.addr, false); + rmb(); + generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; + if (generation_end != generation_start) + return -EAGAIN; + + /* Update derived statistics */ + efx_siena_fix_nodesc_drop_stat(efx, + &stats[SIENA_STAT_rx_nodesc_drop_cnt]); + efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes], + stats[SIENA_STAT_tx_bytes] - + stats[SIENA_STAT_tx_bad_bytes]); + stats[SIENA_STAT_tx_collision] = + stats[SIENA_STAT_tx_single_collision] + + stats[SIENA_STAT_tx_multiple_collision] + + stats[SIENA_STAT_tx_excessive_collision] + + stats[SIENA_STAT_tx_late_collision]; + efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes], + stats[SIENA_STAT_rx_bytes] - + stats[SIENA_STAT_rx_bad_bytes]); + efx_siena_update_sw_stats(efx, stats); + return 0; +} + +static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + struct siena_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; + int retry; + + /* If we're unlucky enough to read statistics wduring the DMA, wait + * up to 10ms for it to finish (typically takes <500us) */ + for (retry = 0; retry < 100; ++retry) { + if (siena_try_update_nic_stats(efx) == 0) + break; + udelay(100); + } + + if (full_stats) + memcpy(full_stats, stats, sizeof(u64) * SIENA_STAT_COUNT); + + if (core_stats) { + core_stats->rx_packets = stats[SIENA_STAT_rx_packets]; + core_stats->tx_packets = stats[SIENA_STAT_tx_packets]; + core_stats->rx_bytes = stats[SIENA_STAT_rx_bytes]; + core_stats->tx_bytes = stats[SIENA_STAT_tx_bytes]; + core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt] + + stats[GENERIC_STAT_rx_nodesc_trunc] + + stats[GENERIC_STAT_rx_noskb_drops]; + core_stats->multicast = stats[SIENA_STAT_rx_multicast]; + core_stats->collisions = stats[SIENA_STAT_tx_collision]; + core_stats->rx_length_errors = + stats[SIENA_STAT_rx_gtjumbo] + + stats[SIENA_STAT_rx_length_error]; + core_stats->rx_crc_errors = stats[SIENA_STAT_rx_bad]; + core_stats->rx_frame_errors = stats[SIENA_STAT_rx_align_error]; + core_stats->rx_fifo_errors = stats[SIENA_STAT_rx_overflow]; + core_stats->tx_window_errors = + stats[SIENA_STAT_tx_late_collision]; + + core_stats->rx_errors = (core_stats->rx_length_errors + + core_stats->rx_crc_errors + + core_stats->rx_frame_errors + + stats[SIENA_STAT_rx_symbol_error]); + core_stats->tx_errors = (core_stats->tx_window_errors + + stats[SIENA_STAT_tx_bad]); + } + + return SIENA_STAT_COUNT; +} + +static int siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only __always_unused) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_SET_MCAST_HASH_IN_LEN != + MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST + + sizeof(efx->multicast_hash)); + + efx_farch_filter_sync_rx_mode(efx); + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + rc = efx_siena_mcdi_set_mac(efx); + if (rc != 0) + return rc; + + memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0), + efx->multicast_hash.byte, sizeof(efx->multicast_hash)); + return efx_siena_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, + inbuf, sizeof(inbuf), NULL, 0, NULL); +} + +/************************************************************************** + * + * Wake on LAN + * + ************************************************************************** + */ + +static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) +{ + struct siena_nic_data *nic_data = efx->nic_data; + + wol->supported = WAKE_MAGIC; + if (nic_data->wol_filter_id != -1) + wol->wolopts = WAKE_MAGIC; + else + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + + +static int siena_set_wol(struct efx_nic *efx, u32 type) +{ + struct siena_nic_data *nic_data = efx->nic_data; + int rc; + + if (type & ~WAKE_MAGIC) + return -EINVAL; + + if (type & WAKE_MAGIC) { + if (nic_data->wol_filter_id != -1) + efx_siena_mcdi_wol_filter_remove(efx, + nic_data->wol_filter_id); + rc = efx_siena_mcdi_wol_filter_set_magic(efx, + efx->net_dev->dev_addr, + &nic_data->wol_filter_id); + if (rc) + goto fail; + + pci_wake_from_d3(efx->pci_dev, true); + } else { + rc = efx_siena_mcdi_wol_filter_reset(efx); + nic_data->wol_filter_id = -1; + pci_wake_from_d3(efx->pci_dev, false); + if (rc) + goto fail; + } + + return 0; + fail: + netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n", + __func__, type, rc); + return rc; +} + + +static void siena_init_wol(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + int rc; + + rc = efx_siena_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id); + + if (rc != 0) { + /* If it failed, attempt to get into a synchronised + * state with MC by resetting any set WoL filters */ + efx_siena_mcdi_wol_filter_reset(efx); + nic_data->wol_filter_id = -1; + } else if (nic_data->wol_filter_id != -1) { + pci_wake_from_d3(efx->pci_dev, true); + } +} + +/************************************************************************** + * + * MCDI + * + ************************************************************************** + */ + +#define MCDI_PDU(efx) \ + (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST) +#define MCDI_DOORBELL(efx) \ + (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST) +#define MCDI_STATUS(efx) \ + (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST) + +static void siena_mcdi_request(struct efx_nic *efx, + const efx_dword_t *hdr, size_t hdr_len, + const efx_dword_t *sdu, size_t sdu_len) +{ + unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); + unsigned int i; + unsigned int inlen_dw = DIV_ROUND_UP(sdu_len, 4); + + EFX_WARN_ON_PARANOID(hdr_len != 4); + + efx_writed(efx, hdr, pdu); + + for (i = 0; i < inlen_dw; i++) + efx_writed(efx, &sdu[i], pdu + hdr_len + 4 * i); + + /* Ensure the request is written out before the doorbell */ + wmb(); + + /* ring the doorbell with a distinctive value */ + _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); +} + +static bool siena_mcdi_poll_response(struct efx_nic *efx) +{ + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + efx_dword_t hdr; + + efx_readd(efx, &hdr, pdu); + + /* All 1's indicates that shared memory is in reset (and is + * not a valid hdr). Wait for it to come out reset before + * completing the command + */ + return EFX_DWORD_FIELD(hdr, EFX_DWORD_0) != 0xffffffff && + EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); +} + +static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, + size_t offset, size_t outlen) +{ + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4); + int i; + + for (i = 0; i < outlen_dw; i++) + efx_readd(efx, &outbuf[i], pdu + offset + 4 * i); +} + +static int siena_mcdi_poll_reboot(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); + efx_dword_t reg; + u32 value; + + efx_readd(efx, ®, addr); + value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); + + if (value == 0) + return 0; + + EFX_ZERO_DWORD(reg); + efx_writed(efx, ®, addr); + + /* MAC statistics have been cleared on the NIC; clear the local + * copies that we update with efx_update_diff_stat(). + */ + nic_data->stats[SIENA_STAT_tx_good_bytes] = 0; + nic_data->stats[SIENA_STAT_rx_good_bytes] = 0; + + if (value == MC_STATUS_DWORD_ASSERT) + return -EINTR; + else + return -EIO; +} + +/************************************************************************** + * + * MTD + * + ************************************************************************** + */ + +#ifdef CONFIG_SFC_SIENA_MTD + +struct siena_nvram_type_info { + int port; + const char *name; +}; + +static const struct siena_nvram_type_info siena_nvram_types[] = { + [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" }, + [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" }, + [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" }, + [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" }, + [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" }, + [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" }, + [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" }, + [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" }, + [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" }, + [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" }, + [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" }, + [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" }, + [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" }, +}; + +static int siena_mtd_probe_partition(struct efx_nic *efx, + struct efx_mcdi_mtd_partition *part, + unsigned int type) +{ + const struct siena_nvram_type_info *info; + size_t size, erase_size; + bool protected; + int rc; + + if (type >= ARRAY_SIZE(siena_nvram_types) || + siena_nvram_types[type].name == NULL) + return -ENODEV; + + info = &siena_nvram_types[type]; + + if (info->port != efx_port_num(efx)) + return -ENODEV; + + rc = efx_siena_mcdi_nvram_info(efx, type, &size, &erase_size, + &protected); + if (rc) + return rc; + if (protected) + return -ENODEV; /* hide it */ + + part->nvram_type = type; + part->common.dev_type_name = "Siena NVRAM manager"; + part->common.type_name = info->name; + + part->common.mtd.type = MTD_NORFLASH; + part->common.mtd.flags = MTD_CAP_NORFLASH; + part->common.mtd.size = size; + part->common.mtd.erasesize = erase_size; + + return 0; +} + +static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, + struct efx_mcdi_mtd_partition *parts, + size_t n_parts) +{ + uint16_t fw_subtype_list[ + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM]; + size_t i; + int rc; + + rc = efx_siena_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL); + if (rc) + return rc; + + for (i = 0; i < n_parts; i++) + parts[i].fw_subtype = fw_subtype_list[parts[i].nvram_type]; + + return 0; +} + +static int siena_mtd_probe(struct efx_nic *efx) +{ + struct efx_mcdi_mtd_partition *parts; + u32 nvram_types; + unsigned int type; + size_t n_parts; + int rc; + + ASSERT_RTNL(); + + rc = efx_siena_mcdi_nvram_types(efx, &nvram_types); + if (rc) + return rc; + + parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL); + if (!parts) + return -ENOMEM; + + type = 0; + n_parts = 0; + + while (nvram_types != 0) { + if (nvram_types & 1) { + rc = siena_mtd_probe_partition(efx, &parts[n_parts], + type); + if (rc == 0) + n_parts++; + else if (rc != -ENODEV) + goto fail; + } + type++; + nvram_types >>= 1; + } + + rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts); + if (rc) + goto fail; + + rc = efx_siena_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); +fail: + if (rc) + kfree(parts); + return rc; +} + +#endif /* CONFIG_SFC_SIENA_MTD */ + +static unsigned int siena_check_caps(const struct efx_nic *efx, + u8 flag, u32 offset) +{ + /* Siena did not support MC_CMD_GET_CAPABILITIES */ + return 0; +} + +static unsigned int efx_siena_recycle_ring_size(const struct efx_nic *efx) +{ + /* Maximum link speed is 10G */ + return EFX_RECYCLE_RING_SIZE_10G; +} + +/************************************************************************** + * + * Revision-dependent attributes used by efx.c and nic.c + * + ************************************************************************** + */ + +const struct efx_nic_type siena_a0_nic_type = { + .is_vf = false, + .mem_bar = siena_mem_bar, + .mem_map_size = siena_mem_map_size, + .probe = siena_probe_nic, + .remove = siena_remove_nic, + .init = siena_init_nic, + .dimension_resources = siena_dimension_resources, + .fini = efx_siena_port_dummy_op_void, +#ifdef CONFIG_EEH + .monitor = siena_monitor, +#else + .monitor = NULL, +#endif + .map_reset_reason = efx_siena_mcdi_map_reset_reason, + .map_reset_flags = siena_map_reset_flags, + .reset = efx_siena_mcdi_reset, + .probe_port = efx_siena_mcdi_port_probe, + .remove_port = efx_siena_mcdi_port_remove, + .fini_dmaq = efx_farch_fini_dmaq, + .prepare_flush = efx_siena_prepare_flush, + .finish_flush = siena_finish_flush, + .prepare_flr = efx_siena_port_dummy_op_void, + .finish_flr = efx_farch_finish_flr, + .describe_stats = siena_describe_nic_stats, + .update_stats = siena_update_nic_stats, + .start_stats = efx_siena_mcdi_mac_start_stats, + .pull_stats = efx_siena_mcdi_mac_pull_stats, + .stop_stats = efx_siena_mcdi_mac_stop_stats, + .push_irq_moderation = siena_push_irq_moderation, + .reconfigure_mac = siena_mac_reconfigure, + .check_mac_fault = efx_siena_mcdi_mac_check_fault, + .reconfigure_port = efx_siena_mcdi_port_reconfigure, + .get_wol = siena_get_wol, + .set_wol = siena_set_wol, + .resume_wol = siena_init_wol, + .test_chip = siena_test_chip, + .test_nvram = efx_siena_mcdi_nvram_test_all, + .mcdi_request = siena_mcdi_request, + .mcdi_poll_response = siena_mcdi_poll_response, + .mcdi_read_response = siena_mcdi_read_response, + .mcdi_poll_reboot = siena_mcdi_poll_reboot, + .irq_enable_master = efx_farch_irq_enable_master, + .irq_test_generate = efx_farch_irq_test_generate, + .irq_disable_non_ev = efx_farch_irq_disable_master, + .irq_handle_msi = efx_farch_msi_interrupt, + .irq_handle_legacy = efx_farch_legacy_interrupt, + .tx_probe = efx_farch_tx_probe, + .tx_init = efx_farch_tx_init, + .tx_remove = efx_farch_tx_remove, + .tx_write = efx_farch_tx_write, + .tx_limit_len = efx_farch_tx_limit_len, + .tx_enqueue = __efx_siena_enqueue_skb, + .rx_push_rss_config = siena_rx_push_rss_config, + .rx_pull_rss_config = siena_rx_pull_rss_config, + .rx_probe = efx_farch_rx_probe, + .rx_init = efx_farch_rx_init, + .rx_remove = efx_farch_rx_remove, + .rx_write = efx_farch_rx_write, + .rx_defer_refill = efx_farch_rx_defer_refill, + .rx_packet = __efx_siena_rx_packet, + .ev_probe = efx_farch_ev_probe, + .ev_init = efx_farch_ev_init, + .ev_fini = efx_farch_ev_fini, + .ev_remove = efx_farch_ev_remove, + .ev_process = efx_farch_ev_process, + .ev_read_ack = efx_farch_ev_read_ack, + .ev_test_generate = efx_farch_ev_test_generate, + .filter_table_probe = efx_farch_filter_table_probe, + .filter_table_restore = efx_farch_filter_table_restore, + .filter_table_remove = efx_farch_filter_table_remove, + .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter, + .filter_insert = efx_farch_filter_insert, + .filter_remove_safe = efx_farch_filter_remove_safe, + .filter_get_safe = efx_farch_filter_get_safe, + .filter_clear_rx = efx_farch_filter_clear_rx, + .filter_count_rx_used = efx_farch_filter_count_rx_used, + .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_farch_filter_get_rx_ids, +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one, +#endif +#ifdef CONFIG_SFC_SIENA_MTD + .mtd_probe = siena_mtd_probe, + .mtd_rename = efx_siena_mcdi_mtd_rename, + .mtd_read = efx_siena_mcdi_mtd_read, + .mtd_erase = efx_siena_mcdi_mtd_erase, + .mtd_write = efx_siena_mcdi_mtd_write, + .mtd_sync = efx_siena_mcdi_mtd_sync, +#endif + .ptp_write_host_time = siena_ptp_write_host_time, + .ptp_set_ts_config = siena_ptp_set_ts_config, +#ifdef CONFIG_SFC_SIENA_SRIOV + .sriov_configure = efx_siena_sriov_configure, + .sriov_init = efx_siena_sriov_init, + .sriov_fini = efx_siena_sriov_fini, + .sriov_wanted = efx_siena_sriov_wanted, + .sriov_reset = efx_siena_sriov_reset, + .sriov_flr = efx_siena_sriov_flr, + .sriov_set_vf_mac = efx_siena_sriov_set_vf_mac, + .sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan, + .sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk, + .sriov_get_vf_config = efx_siena_sriov_get_vf_config, + .vswitching_probe = efx_siena_port_dummy_op_int, + .vswitching_restore = efx_siena_port_dummy_op_int, + .vswitching_remove = efx_siena_port_dummy_op_void, + .set_mac_address = efx_siena_sriov_mac_address_changed, +#endif + + .revision = EFX_REV_SIENA_A0, + .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, + .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, + .buf_tbl_base = FR_BZ_BUF_FULL_TBL, + .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, + .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, + .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), + .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE, + .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST, + .rx_buffer_padding = 0, + .can_rx_scatter = true, + .option_descriptors = false, + .min_interrupt_mode = EFX_INT_MODE_LEGACY, + .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, + .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXHASH | NETIF_F_NTUPLE), + .mcdi_max_ver = 1, + .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, + .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE | + 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | + 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT), + .rx_hash_key_size = 16, + .check_caps = siena_check_caps, + .sensor_event = efx_siena_mcdi_sensor_event, + .rx_recycle_ring_size = efx_siena_recycle_ring_size, +}; diff --git a/drivers/net/ethernet/sfc/siena/siena_sriov.c b/drivers/net/ethernet/sfc/siena/siena_sriov.c new file mode 100644 index 0000000000..8353c15dc2 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/siena_sriov.c @@ -0,0 +1,1687 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2010-2012 Solarflare Communications Inc. + */ +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "efx_channels.h" +#include "nic.h" +#include "io.h" +#include "mcdi.h" +#include "filter.h" +#include "mcdi_pcol.h" +#include "farch_regs.h" +#include "siena_sriov.h" +#include "vfdi.h" + +/* Number of longs required to track all the VIs in a VF */ +#define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX) + +/* Maximum number of RX queues supported */ +#define VF_MAX_RX_QUEUES 63 + +/** + * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour + * @VF_TX_FILTER_OFF: Disabled + * @VF_TX_FILTER_AUTO: Enabled if MAC address assigned to VF and only + * 2 TX queues allowed per VF. + * @VF_TX_FILTER_ON: Enabled + */ +enum efx_vf_tx_filter_mode { + VF_TX_FILTER_OFF, + VF_TX_FILTER_AUTO, + VF_TX_FILTER_ON, +}; + +/** + * struct siena_vf - Back-end resource and protocol state for a PCI VF + * @efx: The Efx NIC owning this VF + * @pci_rid: The PCI requester ID for this VF + * @pci_name: The PCI name (formatted address) of this VF + * @index: Index of VF within its port and PF. + * @req: VFDI incoming request work item. Incoming USR_EV events are received + * by the NAPI handler, but must be handled by executing MCDI requests + * inside a work item. + * @req_addr: VFDI incoming request DMA address (in VF's PCI address space). + * @req_type: Expected next incoming (from VF) %VFDI_EV_TYPE member. + * @req_seqno: Expected next incoming (from VF) %VFDI_EV_SEQ member. + * @msg_seqno: Next %VFDI_EV_SEQ member to reply to VF. Protected by + * @status_lock + * @busy: VFDI request queued to be processed or being processed. Receiving + * a VFDI request when @busy is set is an error condition. + * @buf: Incoming VFDI requests are DMA from the VF into this buffer. + * @buftbl_base: Buffer table entries for this VF start at this index. + * @rx_filtering: Receive filtering has been requested by the VF driver. + * @rx_filter_flags: The flags sent in the %VFDI_OP_INSERT_FILTER request. + * @rx_filter_qid: VF relative qid for RX filter requested by VF. + * @rx_filter_id: Receive MAC filter ID. Only one filter per VF is supported. + * @tx_filter_mode: Transmit MAC filtering mode. + * @tx_filter_id: Transmit MAC filter ID. + * @addr: The MAC address and outer vlan tag of the VF. + * @status_addr: VF DMA address of page for &struct vfdi_status updates. + * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr, + * @peer_page_addrs and @peer_page_count from simultaneous + * updates by the VM and consumption by + * efx_siena_sriov_update_vf_addr() + * @peer_page_addrs: Pointer to an array of guest pages for local addresses. + * @peer_page_count: Number of entries in @peer_page_count. + * @evq0_addrs: Array of guest pages backing evq0. + * @evq0_count: Number of entries in @evq0_addrs. + * @flush_waitq: wait queue used by %VFDI_OP_FINI_ALL_QUEUES handler + * to wait for flush completions. + * @txq_lock: Mutex for TX queue allocation. + * @txq_mask: Mask of initialized transmit queues. + * @txq_count: Number of initialized transmit queues. + * @rxq_mask: Mask of initialized receive queues. + * @rxq_count: Number of initialized receive queues. + * @rxq_retry_mask: Mask or receive queues that need to be flushed again + * due to flush failure. + * @rxq_retry_count: Number of receive queues in @rxq_retry_mask. + * @reset_work: Work item to schedule a VF reset. + */ +struct siena_vf { + struct efx_nic *efx; + unsigned int pci_rid; + char pci_name[13]; /* dddd:bb:dd.f */ + unsigned int index; + struct work_struct req; + u64 req_addr; + int req_type; + unsigned req_seqno; + unsigned msg_seqno; + bool busy; + struct efx_buffer buf; + unsigned buftbl_base; + bool rx_filtering; + enum efx_filter_flags rx_filter_flags; + unsigned rx_filter_qid; + int rx_filter_id; + enum efx_vf_tx_filter_mode tx_filter_mode; + int tx_filter_id; + struct vfdi_endpoint addr; + u64 status_addr; + struct mutex status_lock; + u64 *peer_page_addrs; + unsigned peer_page_count; + u64 evq0_addrs[EFX_MAX_VF_EVQ_SIZE * sizeof(efx_qword_t) / + EFX_BUF_SIZE]; + unsigned evq0_count; + wait_queue_head_t flush_waitq; + struct mutex txq_lock; + unsigned long txq_mask[VI_MASK_LENGTH]; + unsigned txq_count; + unsigned long rxq_mask[VI_MASK_LENGTH]; + unsigned rxq_count; + unsigned long rxq_retry_mask[VI_MASK_LENGTH]; + atomic_t rxq_retry_count; + struct work_struct reset_work; +}; + +struct efx_memcpy_req { + unsigned int from_rid; + void *from_buf; + u64 from_addr; + unsigned int to_rid; + u64 to_addr; + unsigned length; +}; + +/** + * struct efx_local_addr - A MAC address on the vswitch without a VF. + * + * Siena does not have a switch, so VFs can't transmit data to each + * other. Instead the VFs must be made aware of the local addresses + * on the vswitch, so that they can arrange for an alternative + * software datapath to be used. + * + * @link: List head for insertion into efx->local_addr_list. + * @addr: Ethernet address + */ +struct efx_local_addr { + struct list_head link; + u8 addr[ETH_ALEN]; +}; + +/** + * struct efx_endpoint_page - Page of vfdi_endpoint structures + * + * @link: List head for insertion into efx->local_page_list. + * @ptr: Pointer to page. + * @addr: DMA address of page. + */ +struct efx_endpoint_page { + struct list_head link; + void *ptr; + dma_addr_t addr; +}; + +/* Buffer table entries are reserved txq0,rxq0,evq0,txq1,rxq1,evq1 */ +#define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \ + ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid)) +#define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \ + (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \ + (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE)) +#define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \ + (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \ + (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE)) + +#define EFX_FIELD_MASK(_field) \ + ((1 << _field ## _WIDTH) - 1) + +/* VFs can only use this many transmit channels */ +static unsigned int vf_max_tx_channels = 2; +module_param(vf_max_tx_channels, uint, 0444); +MODULE_PARM_DESC(vf_max_tx_channels, + "Limit the number of TX channels VFs can use"); + +static int max_vfs = -1; +module_param(max_vfs, int, 0444); +MODULE_PARM_DESC(max_vfs, + "Reduce the number of VFs initialized by the driver"); + +/* Workqueue used by VFDI communication. We can't use the global + * workqueue because it may be running the VF driver's probe() + * routine, which will be blocked there waiting for a VFDI response. + */ +static struct workqueue_struct *vfdi_workqueue; + +static unsigned abs_index(struct siena_vf *vf, unsigned index) +{ + return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index; +} + +static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable, + unsigned *vi_scale_out, unsigned *vf_total_out) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN); + unsigned vi_scale, vf_total; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, SRIOV_IN_ENABLE, enable ? 1 : 0); + MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE); + MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count); + + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf, + MC_CMD_SRIOV_IN_LEN, outbuf, + MC_CMD_SRIOV_OUT_LEN, &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_SRIOV_OUT_LEN) + return -EIO; + + vf_total = MCDI_DWORD(outbuf, SRIOV_OUT_VF_TOTAL); + vi_scale = MCDI_DWORD(outbuf, SRIOV_OUT_VI_SCALE); + if (vi_scale > EFX_VI_SCALE_MAX) + return -EOPNOTSUPP; + + if (vi_scale_out) + *vi_scale_out = vi_scale; + if (vf_total_out) + *vf_total_out = vf_total; + + return 0; +} + +static void efx_siena_sriov_usrev(struct efx_nic *efx, bool enabled) +{ + struct siena_nic_data *nic_data = efx->nic_data; + efx_oword_t reg; + + EFX_POPULATE_OWORD_2(reg, + FRF_CZ_USREV_DIS, enabled ? 0 : 1, + FRF_CZ_DFLT_EVQ, nic_data->vfdi_channel->channel); + efx_writeo(efx, ®, FR_CZ_USR_EV_CFG); +} + +static int efx_siena_sriov_memcpy(struct efx_nic *efx, + struct efx_memcpy_req *req, + unsigned int count) +{ + MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1); + MCDI_DECLARE_STRUCT_PTR(record); + unsigned int index, used; + u64 from_addr; + u32 from_rid; + int rc; + + mb(); /* Finish writing source/reading dest before DMA starts */ + + if (WARN_ON(count > MC_CMD_MEMCPY_IN_RECORD_MAXNUM)) + return -ENOBUFS; + used = MC_CMD_MEMCPY_IN_LEN(count); + + for (index = 0; index < count; index++) { + record = MCDI_ARRAY_STRUCT_PTR(inbuf, MEMCPY_IN_RECORD, index); + MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_NUM_RECORDS, + count); + MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID, + req->to_rid); + MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR, + req->to_addr); + if (req->from_buf == NULL) { + from_rid = req->from_rid; + from_addr = req->from_addr; + } else { + if (WARN_ON(used + req->length > + MCDI_CTL_SDU_LEN_MAX_V1)) { + rc = -ENOBUFS; + goto out; + } + + from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE; + from_addr = used; + memcpy(_MCDI_PTR(inbuf, used), req->from_buf, + req->length); + used += req->length; + } + + MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid); + MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR, + from_addr); + MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH, + req->length); + + ++req; + } + + rc = efx_siena_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL); +out: + mb(); /* Don't write source/read dest before DMA is complete */ + + return rc; +} + +/* The TX filter is entirely controlled by this driver, and is modified + * underneath the feet of the VF + */ +static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct efx_filter_spec filter; + u16 vlan; + int rc; + + if (vf->tx_filter_id != -1) { + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + vf->tx_filter_id); + netif_dbg(efx, hw, efx->net_dev, "Removed vf %s tx filter %d\n", + vf->pci_name, vf->tx_filter_id); + vf->tx_filter_id = -1; + } + + if (is_zero_ether_addr(vf->addr.mac_addr)) + return; + + /* Turn on TX filtering automatically if not explicitly + * enabled or disabled. + */ + if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2) + vf->tx_filter_mode = VF_TX_FILTER_ON; + + vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK; + efx_filter_init_tx(&filter, abs_index(vf, 0)); + rc = efx_filter_set_eth_local(&filter, + vlan ? vlan : EFX_FILTER_VID_UNSPEC, + vf->addr.mac_addr); + BUG_ON(rc); + + rc = efx_filter_insert_filter(efx, &filter, true); + if (rc < 0) { + netif_warn(efx, hw, efx->net_dev, + "Unable to migrate tx filter for vf %s\n", + vf->pci_name); + } else { + netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s tx filter %d\n", + vf->pci_name, rc); + vf->tx_filter_id = rc; + } +} + +/* The RX filter is managed here on behalf of the VF driver */ +static void efx_siena_sriov_reset_rx_filter(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct efx_filter_spec filter; + u16 vlan; + int rc; + + if (vf->rx_filter_id != -1) { + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + vf->rx_filter_id); + netif_dbg(efx, hw, efx->net_dev, "Removed vf %s rx filter %d\n", + vf->pci_name, vf->rx_filter_id); + vf->rx_filter_id = -1; + } + + if (!vf->rx_filtering || is_zero_ether_addr(vf->addr.mac_addr)) + return; + + vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK; + efx_filter_init_rx(&filter, EFX_FILTER_PRI_REQUIRED, + vf->rx_filter_flags, + abs_index(vf, vf->rx_filter_qid)); + rc = efx_filter_set_eth_local(&filter, + vlan ? vlan : EFX_FILTER_VID_UNSPEC, + vf->addr.mac_addr); + BUG_ON(rc); + + rc = efx_filter_insert_filter(efx, &filter, true); + if (rc < 0) { + netif_warn(efx, hw, efx->net_dev, + "Unable to insert rx filter for vf %s\n", + vf->pci_name); + } else { + netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s rx filter %d\n", + vf->pci_name, rc); + vf->rx_filter_id = rc; + } +} + +static void __efx_siena_sriov_update_vf_addr(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + + efx_siena_sriov_reset_tx_filter(vf); + efx_siena_sriov_reset_rx_filter(vf); + queue_work(vfdi_workqueue, &nic_data->peer_work); +} + +/* Push the peer list to this VF. The caller must hold status_lock to interlock + * with VFDI requests, and they must be serialised against manipulation of + * local_page_list, either by acquiring local_lock or by running from + * efx_siena_sriov_peer_work() + */ +static void __efx_siena_sriov_push_vf_status(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_status *status = nic_data->vfdi_status.addr; + struct efx_memcpy_req copy[4]; + struct efx_endpoint_page *epp; + unsigned int pos, count; + unsigned data_offset; + efx_qword_t event; + + WARN_ON(!mutex_is_locked(&vf->status_lock)); + WARN_ON(!vf->status_addr); + + status->local = vf->addr; + status->generation_end = ++status->generation_start; + + memset(copy, '\0', sizeof(copy)); + /* Write generation_start */ + copy[0].from_buf = &status->generation_start; + copy[0].to_rid = vf->pci_rid; + copy[0].to_addr = vf->status_addr + offsetof(struct vfdi_status, + generation_start); + copy[0].length = sizeof(status->generation_start); + /* DMA the rest of the structure (excluding the generations). This + * assumes that the non-generation portion of vfdi_status is in + * one chunk starting at the version member. + */ + data_offset = offsetof(struct vfdi_status, version); + copy[1].from_rid = efx->pci_dev->devfn; + copy[1].from_addr = nic_data->vfdi_status.dma_addr + data_offset; + copy[1].to_rid = vf->pci_rid; + copy[1].to_addr = vf->status_addr + data_offset; + copy[1].length = status->length - data_offset; + + /* Copy the peer pages */ + pos = 2; + count = 0; + list_for_each_entry(epp, &nic_data->local_page_list, link) { + if (count == vf->peer_page_count) { + /* The VF driver will know they need to provide more + * pages because peer_addr_count is too large. + */ + break; + } + copy[pos].from_buf = NULL; + copy[pos].from_rid = efx->pci_dev->devfn; + copy[pos].from_addr = epp->addr; + copy[pos].to_rid = vf->pci_rid; + copy[pos].to_addr = vf->peer_page_addrs[count]; + copy[pos].length = EFX_PAGE_SIZE; + + if (++pos == ARRAY_SIZE(copy)) { + efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); + pos = 0; + } + ++count; + } + + /* Write generation_end */ + copy[pos].from_buf = &status->generation_end; + copy[pos].to_rid = vf->pci_rid; + copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status, + generation_end); + copy[pos].length = sizeof(status->generation_end); + efx_siena_sriov_memcpy(efx, copy, pos + 1); + + /* Notify the guest */ + EFX_POPULATE_QWORD_3(event, + FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV, + VFDI_EV_SEQ, (vf->msg_seqno & 0xff), + VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS); + ++vf->msg_seqno; + efx_farch_generate_event(efx, + EFX_VI_BASE + vf->index * efx_vf_size(efx), + &event); +} + +static void efx_siena_sriov_bufs(struct efx_nic *efx, unsigned offset, + u64 *addr, unsigned count) +{ + efx_qword_t buf; + unsigned pos; + + for (pos = 0; pos < count; ++pos) { + EFX_POPULATE_QWORD_3(buf, + FRF_AZ_BUF_ADR_REGION, 0, + FRF_AZ_BUF_ADR_FBUF, + addr ? addr[pos] >> 12 : 0, + FRF_AZ_BUF_OWNER_ID_FBUF, 0); + efx_sram_writeq(efx, efx->membase + FR_BZ_BUF_FULL_TBL, + &buf, offset + pos); + } +} + +static bool bad_vf_index(struct efx_nic *efx, unsigned index) +{ + return index >= efx_vf_size(efx); +} + +static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count) +{ + unsigned max_buf_count = max_entry_count * + sizeof(efx_qword_t) / EFX_BUF_SIZE; + + return ((buf_count & (buf_count - 1)) || buf_count > max_buf_count); +} + +/* Check that VI specified by per-port index belongs to a VF. + * Optionally set VF index and VI index within the VF. + */ +static bool map_vi_index(struct efx_nic *efx, unsigned abs_index, + struct siena_vf **vf_out, unsigned *rel_index_out) +{ + struct siena_nic_data *nic_data = efx->nic_data; + unsigned vf_i; + + if (abs_index < EFX_VI_BASE) + return true; + vf_i = (abs_index - EFX_VI_BASE) / efx_vf_size(efx); + if (vf_i >= efx->vf_init_count) + return true; + + if (vf_out) + *vf_out = nic_data->vf + vf_i; + if (rel_index_out) + *rel_index_out = abs_index % efx_vf_size(efx); + return false; +} + +static int efx_vfdi_init_evq(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct vfdi_req *req = vf->buf.addr; + unsigned vf_evq = req->u.init_evq.index; + unsigned buf_count = req->u.init_evq.buf_count; + unsigned abs_evq = abs_index(vf, vf_evq); + unsigned buftbl = EFX_BUFTBL_EVQ_BASE(vf, vf_evq); + efx_oword_t reg; + + if (bad_vf_index(efx, vf_evq) || + bad_buf_count(buf_count, EFX_MAX_VF_EVQ_SIZE)) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n", + vf->pci_name, vf_evq, buf_count); + return VFDI_RC_EINVAL; + } + + efx_siena_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count); + + EFX_POPULATE_OWORD_3(reg, + FRF_CZ_TIMER_Q_EN, 1, + FRF_CZ_HOST_NOTIFY_MODE, 0, + FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, abs_evq); + EFX_POPULATE_OWORD_3(reg, + FRF_AZ_EVQ_EN, 1, + FRF_AZ_EVQ_SIZE, __ffs(buf_count), + FRF_AZ_EVQ_BUF_BASE_ID, buftbl); + efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, abs_evq); + + if (vf_evq == 0) { + memcpy(vf->evq0_addrs, req->u.init_evq.addr, + buf_count * sizeof(u64)); + vf->evq0_count = buf_count; + } + + return VFDI_RC_SUCCESS; +} + +static int efx_vfdi_init_rxq(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct vfdi_req *req = vf->buf.addr; + unsigned vf_rxq = req->u.init_rxq.index; + unsigned vf_evq = req->u.init_rxq.evq; + unsigned buf_count = req->u.init_rxq.buf_count; + unsigned buftbl = EFX_BUFTBL_RXQ_BASE(vf, vf_rxq); + unsigned label; + efx_oword_t reg; + + if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) || + vf_rxq >= VF_MAX_RX_QUEUES || + bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d " + "buf_count %d\n", vf->pci_name, vf_rxq, + vf_evq, buf_count); + return VFDI_RC_EINVAL; + } + if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask)) + ++vf->rxq_count; + efx_siena_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count); + + label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL); + EFX_POPULATE_OWORD_6(reg, + FRF_AZ_RX_DESCQ_BUF_BASE_ID, buftbl, + FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq), + FRF_AZ_RX_DESCQ_LABEL, label, + FRF_AZ_RX_DESCQ_SIZE, __ffs(buf_count), + FRF_AZ_RX_DESCQ_JUMBO, + !!(req->u.init_rxq.flags & + VFDI_RXQ_FLAG_SCATTER_EN), + FRF_AZ_RX_DESCQ_EN, 1); + efx_writeo_table(efx, ®, FR_BZ_RX_DESC_PTR_TBL, + abs_index(vf, vf_rxq)); + + return VFDI_RC_SUCCESS; +} + +static int efx_vfdi_init_txq(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct vfdi_req *req = vf->buf.addr; + unsigned vf_txq = req->u.init_txq.index; + unsigned vf_evq = req->u.init_txq.evq; + unsigned buf_count = req->u.init_txq.buf_count; + unsigned buftbl = EFX_BUFTBL_TXQ_BASE(vf, vf_txq); + unsigned label, eth_filt_en; + efx_oword_t reg; + + if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_txq) || + vf_txq >= vf_max_tx_channels || + bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d " + "buf_count %d\n", vf->pci_name, vf_txq, + vf_evq, buf_count); + return VFDI_RC_EINVAL; + } + + mutex_lock(&vf->txq_lock); + if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask)) + ++vf->txq_count; + mutex_unlock(&vf->txq_lock); + efx_siena_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count); + + eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON; + + label = req->u.init_txq.label & EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL); + EFX_POPULATE_OWORD_8(reg, + FRF_CZ_TX_DPT_Q_MASK_WIDTH, min(efx->vi_scale, 1U), + FRF_CZ_TX_DPT_ETH_FILT_EN, eth_filt_en, + FRF_AZ_TX_DESCQ_EN, 1, + FRF_AZ_TX_DESCQ_BUF_BASE_ID, buftbl, + FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq), + FRF_AZ_TX_DESCQ_LABEL, label, + FRF_AZ_TX_DESCQ_SIZE, __ffs(buf_count), + FRF_BZ_TX_NON_IP_DROP_DIS, 1); + efx_writeo_table(efx, ®, FR_BZ_TX_DESC_PTR_TBL, + abs_index(vf, vf_txq)); + + return VFDI_RC_SUCCESS; +} + +/* Returns true when efx_vfdi_fini_all_queues should wake */ +static bool efx_vfdi_flush_wake(struct siena_vf *vf) +{ + /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */ + smp_mb(); + + return (!vf->txq_count && !vf->rxq_count) || + atomic_read(&vf->rxq_retry_count); +} + +static void efx_vfdi_flush_clear(struct siena_vf *vf) +{ + memset(vf->txq_mask, 0, sizeof(vf->txq_mask)); + vf->txq_count = 0; + memset(vf->rxq_mask, 0, sizeof(vf->rxq_mask)); + vf->rxq_count = 0; + memset(vf->rxq_retry_mask, 0, sizeof(vf->rxq_retry_mask)); + atomic_set(&vf->rxq_retry_count, 0); +} + +static int efx_vfdi_fini_all_queues(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + efx_oword_t reg; + unsigned count = efx_vf_size(efx); + unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx); + unsigned timeout = HZ; + unsigned index, rxqs_count; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX); + int rc; + + BUILD_BUG_ON(VF_MAX_RX_QUEUES > + MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); + + rtnl_lock(); + efx_siena_prepare_flush(efx); + rtnl_unlock(); + + /* Flush all the initialized queues */ + rxqs_count = 0; + for (index = 0; index < count; ++index) { + if (test_bit(index, vf->txq_mask)) { + EFX_POPULATE_OWORD_2(reg, + FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, + FRF_AZ_TX_FLUSH_DESCQ, + vf_offset + index); + efx_writeo(efx, ®, FR_AZ_TX_FLUSH_DESCQ); + } + if (test_bit(index, vf->rxq_mask)) { + MCDI_SET_ARRAY_DWORD( + inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, + rxqs_count, vf_offset + index); + rxqs_count++; + } + } + + atomic_set(&vf->rxq_retry_count, 0); + while (timeout && (vf->rxq_count || vf->txq_count)) { + rc = efx_siena_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, + MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count), + NULL, 0, NULL); + WARN_ON(rc < 0); + + timeout = wait_event_timeout(vf->flush_waitq, + efx_vfdi_flush_wake(vf), + timeout); + rxqs_count = 0; + for (index = 0; index < count; ++index) { + if (test_and_clear_bit(index, vf->rxq_retry_mask)) { + atomic_dec(&vf->rxq_retry_count); + MCDI_SET_ARRAY_DWORD( + inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, + rxqs_count, vf_offset + index); + rxqs_count++; + } + } + } + + rtnl_lock(); + siena_finish_flush(efx); + rtnl_unlock(); + + /* Irrespective of success/failure, fini the queues */ + EFX_ZERO_OWORD(reg); + for (index = 0; index < count; ++index) { + efx_writeo_table(efx, ®, FR_BZ_RX_DESC_PTR_TBL, + vf_offset + index); + efx_writeo_table(efx, ®, FR_BZ_TX_DESC_PTR_TBL, + vf_offset + index); + efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, + vf_offset + index); + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, + vf_offset + index); + } + efx_siena_sriov_bufs(efx, vf->buftbl_base, NULL, + EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx)); + efx_vfdi_flush_clear(vf); + + vf->evq0_count = 0; + + return timeout ? 0 : VFDI_RC_ETIMEDOUT; +} + +static int efx_vfdi_insert_filter(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_req *req = vf->buf.addr; + unsigned vf_rxq = req->u.mac_filter.rxq; + unsigned flags; + + if (bad_vf_index(efx, vf_rxq) || vf->rx_filtering) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Invalid INSERT_FILTER from %s: rxq %d " + "flags 0x%x\n", vf->pci_name, vf_rxq, + req->u.mac_filter.flags); + return VFDI_RC_EINVAL; + } + + flags = 0; + if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_RSS) + flags |= EFX_FILTER_FLAG_RX_RSS; + if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_SCATTER) + flags |= EFX_FILTER_FLAG_RX_SCATTER; + vf->rx_filter_flags = flags; + vf->rx_filter_qid = vf_rxq; + vf->rx_filtering = true; + + efx_siena_sriov_reset_rx_filter(vf); + queue_work(vfdi_workqueue, &nic_data->peer_work); + + return VFDI_RC_SUCCESS; +} + +static int efx_vfdi_remove_all_filters(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + + vf->rx_filtering = false; + efx_siena_sriov_reset_rx_filter(vf); + queue_work(vfdi_workqueue, &nic_data->peer_work); + + return VFDI_RC_SUCCESS; +} + +static int efx_vfdi_set_status_page(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_req *req = vf->buf.addr; + u64 page_count = req->u.set_status_page.peer_page_count; + u64 max_page_count = + (EFX_PAGE_SIZE - + offsetof(struct vfdi_req, u.set_status_page.peer_page_addr[0])) + / sizeof(req->u.set_status_page.peer_page_addr[0]); + + if (!req->u.set_status_page.dma_addr || page_count > max_page_count) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Invalid SET_STATUS_PAGE from %s\n", + vf->pci_name); + return VFDI_RC_EINVAL; + } + + mutex_lock(&nic_data->local_lock); + mutex_lock(&vf->status_lock); + vf->status_addr = req->u.set_status_page.dma_addr; + + kfree(vf->peer_page_addrs); + vf->peer_page_addrs = NULL; + vf->peer_page_count = 0; + + if (page_count) { + vf->peer_page_addrs = kcalloc(page_count, sizeof(u64), + GFP_KERNEL); + if (vf->peer_page_addrs) { + memcpy(vf->peer_page_addrs, + req->u.set_status_page.peer_page_addr, + page_count * sizeof(u64)); + vf->peer_page_count = page_count; + } + } + + __efx_siena_sriov_push_vf_status(vf); + mutex_unlock(&vf->status_lock); + mutex_unlock(&nic_data->local_lock); + + return VFDI_RC_SUCCESS; +} + +static int efx_vfdi_clear_status_page(struct siena_vf *vf) +{ + mutex_lock(&vf->status_lock); + vf->status_addr = 0; + mutex_unlock(&vf->status_lock); + + return VFDI_RC_SUCCESS; +} + +typedef int (*efx_vfdi_op_t)(struct siena_vf *vf); + +static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = { + [VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq, + [VFDI_OP_INIT_TXQ] = efx_vfdi_init_txq, + [VFDI_OP_INIT_RXQ] = efx_vfdi_init_rxq, + [VFDI_OP_FINI_ALL_QUEUES] = efx_vfdi_fini_all_queues, + [VFDI_OP_INSERT_FILTER] = efx_vfdi_insert_filter, + [VFDI_OP_REMOVE_ALL_FILTERS] = efx_vfdi_remove_all_filters, + [VFDI_OP_SET_STATUS_PAGE] = efx_vfdi_set_status_page, + [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page, +}; + +static void efx_siena_sriov_vfdi(struct work_struct *work) +{ + struct siena_vf *vf = container_of(work, struct siena_vf, req); + struct efx_nic *efx = vf->efx; + struct vfdi_req *req = vf->buf.addr; + struct efx_memcpy_req copy[2]; + int rc; + + /* Copy this page into the local address space */ + memset(copy, '\0', sizeof(copy)); + copy[0].from_rid = vf->pci_rid; + copy[0].from_addr = vf->req_addr; + copy[0].to_rid = efx->pci_dev->devfn; + copy[0].to_addr = vf->buf.dma_addr; + copy[0].length = EFX_PAGE_SIZE; + rc = efx_siena_sriov_memcpy(efx, copy, 1); + if (rc) { + /* If we can't get the request, we can't reply to the caller */ + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Unable to fetch VFDI request from %s rc %d\n", + vf->pci_name, -rc); + vf->busy = false; + return; + } + + if (req->op < VFDI_OP_LIMIT && vfdi_ops[req->op] != NULL) { + rc = vfdi_ops[req->op](vf); + if (rc == 0) { + netif_dbg(efx, hw, efx->net_dev, + "vfdi request %d from %s ok\n", + req->op, vf->pci_name); + } + } else { + netif_dbg(efx, hw, efx->net_dev, + "ERROR: Unrecognised request %d from VF %s addr " + "%llx\n", req->op, vf->pci_name, + (unsigned long long)vf->req_addr); + rc = VFDI_RC_EOPNOTSUPP; + } + + /* Allow subsequent VF requests */ + vf->busy = false; + smp_wmb(); + + /* Respond to the request */ + req->rc = rc; + req->op = VFDI_OP_RESPONSE; + + memset(copy, '\0', sizeof(copy)); + copy[0].from_buf = &req->rc; + copy[0].to_rid = vf->pci_rid; + copy[0].to_addr = vf->req_addr + offsetof(struct vfdi_req, rc); + copy[0].length = sizeof(req->rc); + copy[1].from_buf = &req->op; + copy[1].to_rid = vf->pci_rid; + copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op); + copy[1].length = sizeof(req->op); + + (void)efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); +} + + + +/* After a reset the event queues inside the guests no longer exist. Fill the + * event ring in guest memory with VFDI reset events, then (re-initialise) the + * event queue to raise an interrupt. The guest driver will then recover. + */ + +static void efx_siena_sriov_reset_vf(struct siena_vf *vf, + struct efx_buffer *buffer) +{ + struct efx_nic *efx = vf->efx; + struct efx_memcpy_req copy_req[4]; + efx_qword_t event; + unsigned int pos, count, k, buftbl, abs_evq; + efx_oword_t reg; + efx_dword_t ptr; + int rc; + + BUG_ON(buffer->len != EFX_PAGE_SIZE); + + if (!vf->evq0_count) + return; + BUG_ON(vf->evq0_count & (vf->evq0_count - 1)); + + mutex_lock(&vf->status_lock); + EFX_POPULATE_QWORD_3(event, + FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV, + VFDI_EV_SEQ, vf->msg_seqno, + VFDI_EV_TYPE, VFDI_EV_TYPE_RESET); + vf->msg_seqno++; + for (pos = 0; pos < EFX_PAGE_SIZE; pos += sizeof(event)) + memcpy(buffer->addr + pos, &event, sizeof(event)); + + for (pos = 0; pos < vf->evq0_count; pos += count) { + count = min_t(unsigned, vf->evq0_count - pos, + ARRAY_SIZE(copy_req)); + for (k = 0; k < count; k++) { + copy_req[k].from_buf = NULL; + copy_req[k].from_rid = efx->pci_dev->devfn; + copy_req[k].from_addr = buffer->dma_addr; + copy_req[k].to_rid = vf->pci_rid; + copy_req[k].to_addr = vf->evq0_addrs[pos + k]; + copy_req[k].length = EFX_PAGE_SIZE; + } + rc = efx_siena_sriov_memcpy(efx, copy_req, count); + if (rc) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Unable to notify %s of reset" + ": %d\n", vf->pci_name, -rc); + break; + } + } + + /* Reinitialise, arm and trigger evq0 */ + abs_evq = abs_index(vf, 0); + buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0); + efx_siena_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count); + + EFX_POPULATE_OWORD_3(reg, + FRF_CZ_TIMER_Q_EN, 1, + FRF_CZ_HOST_NOTIFY_MODE, 0, + FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, abs_evq); + EFX_POPULATE_OWORD_3(reg, + FRF_AZ_EVQ_EN, 1, + FRF_AZ_EVQ_SIZE, __ffs(vf->evq0_count), + FRF_AZ_EVQ_BUF_BASE_ID, buftbl); + efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, abs_evq); + EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0); + efx_writed(efx, &ptr, FR_BZ_EVQ_RPTR + FR_BZ_EVQ_RPTR_STEP * abs_evq); + + mutex_unlock(&vf->status_lock); +} + +static void efx_siena_sriov_reset_vf_work(struct work_struct *work) +{ + struct siena_vf *vf = container_of(work, struct siena_vf, req); + struct efx_nic *efx = vf->efx; + struct efx_buffer buf; + + if (!efx_siena_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) { + efx_siena_sriov_reset_vf(vf, &buf); + efx_siena_free_buffer(efx, &buf); + } +} + +static void efx_siena_sriov_handle_no_channel(struct efx_nic *efx) +{ + netif_err(efx, drv, efx->net_dev, + "ERROR: IOV requires MSI-X and 1 additional interrupt" + "vector. IOV disabled\n"); + efx->vf_count = 0; +} + +static int efx_siena_sriov_probe_channel(struct efx_channel *channel) +{ + struct siena_nic_data *nic_data = channel->efx->nic_data; + nic_data->vfdi_channel = channel; + + return 0; +} + +static void +efx_siena_sriov_get_channel_name(struct efx_channel *channel, + char *buf, size_t len) +{ + snprintf(buf, len, "%s-iov", channel->efx->name); +} + +static const struct efx_channel_type efx_siena_sriov_channel_type = { + .handle_no_channel = efx_siena_sriov_handle_no_channel, + .pre_probe = efx_siena_sriov_probe_channel, + .post_remove = efx_siena_channel_dummy_op_void, + .get_name = efx_siena_sriov_get_channel_name, + /* no copy operation; channel must not be reallocated */ + .keep_eventq = true, +}; + +void efx_siena_sriov_probe(struct efx_nic *efx) +{ + unsigned count; + + if (!max_vfs) + return; + + if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) { + pci_info(efx->pci_dev, "no SR-IOV VFs probed\n"); + return; + } + if (count > 0 && count > max_vfs) + count = max_vfs; + + /* efx_nic_dimension_resources() will reduce vf_count as appopriate */ + efx->vf_count = count; + + efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_siena_sriov_channel_type; +} + +/* Copy the list of individual addresses into the vfdi_status.peers + * array and auxiliary pages, protected by %local_lock. Drop that lock + * and then broadcast the address list to every VF. + */ +static void efx_siena_sriov_peer_work(struct work_struct *data) +{ + struct siena_nic_data *nic_data = container_of(data, + struct siena_nic_data, + peer_work); + struct efx_nic *efx = nic_data->efx; + struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; + struct siena_vf *vf; + struct efx_local_addr *local_addr; + struct vfdi_endpoint *peer; + struct efx_endpoint_page *epp; + struct list_head pages; + unsigned int peer_space; + unsigned int peer_count; + unsigned int pos; + + mutex_lock(&nic_data->local_lock); + + /* Move the existing peer pages off %local_page_list */ + INIT_LIST_HEAD(&pages); + list_splice_tail_init(&nic_data->local_page_list, &pages); + + /* Populate the VF addresses starting from entry 1 (entry 0 is + * the PF address) + */ + peer = vfdi_status->peers + 1; + peer_space = ARRAY_SIZE(vfdi_status->peers) - 1; + peer_count = 1; + for (pos = 0; pos < efx->vf_count; ++pos) { + vf = nic_data->vf + pos; + + mutex_lock(&vf->status_lock); + if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) { + *peer++ = vf->addr; + ++peer_count; + --peer_space; + BUG_ON(peer_space == 0); + } + mutex_unlock(&vf->status_lock); + } + + /* Fill the remaining addresses */ + list_for_each_entry(local_addr, &nic_data->local_addr_list, link) { + ether_addr_copy(peer->mac_addr, local_addr->addr); + peer->tci = 0; + ++peer; + ++peer_count; + if (--peer_space == 0) { + if (list_empty(&pages)) { + epp = kmalloc(sizeof(*epp), GFP_KERNEL); + if (!epp) + break; + epp->ptr = dma_alloc_coherent( + &efx->pci_dev->dev, EFX_PAGE_SIZE, + &epp->addr, GFP_KERNEL); + if (!epp->ptr) { + kfree(epp); + break; + } + } else { + epp = list_first_entry( + &pages, struct efx_endpoint_page, link); + list_del(&epp->link); + } + + list_add_tail(&epp->link, &nic_data->local_page_list); + peer = (struct vfdi_endpoint *)epp->ptr; + peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint); + } + } + vfdi_status->peer_count = peer_count; + mutex_unlock(&nic_data->local_lock); + + /* Free any now unused endpoint pages */ + while (!list_empty(&pages)) { + epp = list_first_entry( + &pages, struct efx_endpoint_page, link); + list_del(&epp->link); + dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, + epp->ptr, epp->addr); + kfree(epp); + } + + /* Finally, push the pages */ + for (pos = 0; pos < efx->vf_count; ++pos) { + vf = nic_data->vf + pos; + + mutex_lock(&vf->status_lock); + if (vf->status_addr) + __efx_siena_sriov_push_vf_status(vf); + mutex_unlock(&vf->status_lock); + } +} + +static void efx_siena_sriov_free_local(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct efx_local_addr *local_addr; + struct efx_endpoint_page *epp; + + while (!list_empty(&nic_data->local_addr_list)) { + local_addr = list_first_entry(&nic_data->local_addr_list, + struct efx_local_addr, link); + list_del(&local_addr->link); + kfree(local_addr); + } + + while (!list_empty(&nic_data->local_page_list)) { + epp = list_first_entry(&nic_data->local_page_list, + struct efx_endpoint_page, link); + list_del(&epp->link); + dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, + epp->ptr, epp->addr); + kfree(epp); + } +} + +static int efx_siena_sriov_vf_alloc(struct efx_nic *efx) +{ + unsigned index; + struct siena_vf *vf; + struct siena_nic_data *nic_data = efx->nic_data; + + nic_data->vf = kcalloc(efx->vf_count, sizeof(*nic_data->vf), + GFP_KERNEL); + if (!nic_data->vf) + return -ENOMEM; + + for (index = 0; index < efx->vf_count; ++index) { + vf = nic_data->vf + index; + + vf->efx = efx; + vf->index = index; + vf->rx_filter_id = -1; + vf->tx_filter_mode = VF_TX_FILTER_AUTO; + vf->tx_filter_id = -1; + INIT_WORK(&vf->req, efx_siena_sriov_vfdi); + INIT_WORK(&vf->reset_work, efx_siena_sriov_reset_vf_work); + init_waitqueue_head(&vf->flush_waitq); + mutex_init(&vf->status_lock); + mutex_init(&vf->txq_lock); + } + + return 0; +} + +static void efx_siena_sriov_vfs_fini(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; + unsigned int pos; + + for (pos = 0; pos < efx->vf_count; ++pos) { + vf = nic_data->vf + pos; + + efx_siena_free_buffer(efx, &vf->buf); + kfree(vf->peer_page_addrs); + vf->peer_page_addrs = NULL; + vf->peer_page_count = 0; + + vf->evq0_count = 0; + } +} + +static int efx_siena_sriov_vfs_init(struct efx_nic *efx) +{ + struct pci_dev *pci_dev = efx->pci_dev; + struct siena_nic_data *nic_data = efx->nic_data; + unsigned index, devfn, sriov, buftbl_base; + u16 offset, stride; + struct siena_vf *vf; + int rc; + + sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV); + if (!sriov) + return -ENOENT; + + pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset); + pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride); + + buftbl_base = nic_data->vf_buftbl_base; + devfn = pci_dev->devfn + offset; + for (index = 0; index < efx->vf_count; ++index) { + vf = nic_data->vf + index; + + /* Reserve buffer entries */ + vf->buftbl_base = buftbl_base; + buftbl_base += EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx); + + vf->pci_rid = devfn; + snprintf(vf->pci_name, sizeof(vf->pci_name), + "%04x:%02x:%02x.%d", + pci_domain_nr(pci_dev->bus), pci_dev->bus->number, + PCI_SLOT(devfn), PCI_FUNC(devfn)); + + rc = efx_siena_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE, + GFP_KERNEL); + if (rc) + goto fail; + + devfn += stride; + } + + return 0; + +fail: + efx_siena_sriov_vfs_fini(efx); + return rc; +} + +int efx_siena_sriov_init(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_status *vfdi_status; + int rc; + + /* Ensure there's room for vf_channel */ + BUILD_BUG_ON(EFX_MAX_CHANNELS + 1 >= EFX_VI_BASE); + /* Ensure that VI_BASE is aligned on VI_SCALE */ + BUILD_BUG_ON(EFX_VI_BASE & ((1 << EFX_VI_SCALE_MAX) - 1)); + + if (efx->vf_count == 0) + return 0; + + rc = efx_siena_sriov_cmd(efx, true, NULL, NULL); + if (rc) + goto fail_cmd; + + rc = efx_siena_alloc_buffer(efx, &nic_data->vfdi_status, + sizeof(*vfdi_status), GFP_KERNEL); + if (rc) + goto fail_status; + vfdi_status = nic_data->vfdi_status.addr; + memset(vfdi_status, 0, sizeof(*vfdi_status)); + vfdi_status->version = 1; + vfdi_status->length = sizeof(*vfdi_status); + vfdi_status->max_tx_channels = vf_max_tx_channels; + vfdi_status->vi_scale = efx->vi_scale; + vfdi_status->rss_rxq_count = efx->rss_spread; + vfdi_status->peer_count = 1 + efx->vf_count; + vfdi_status->timer_quantum_ns = efx->timer_quantum_ns; + + rc = efx_siena_sriov_vf_alloc(efx); + if (rc) + goto fail_alloc; + + mutex_init(&nic_data->local_lock); + INIT_WORK(&nic_data->peer_work, efx_siena_sriov_peer_work); + INIT_LIST_HEAD(&nic_data->local_addr_list); + INIT_LIST_HEAD(&nic_data->local_page_list); + + rc = efx_siena_sriov_vfs_init(efx); + if (rc) + goto fail_vfs; + + rtnl_lock(); + ether_addr_copy(vfdi_status->peers[0].mac_addr, net_dev->dev_addr); + efx->vf_init_count = efx->vf_count; + rtnl_unlock(); + + efx_siena_sriov_usrev(efx, true); + + /* At this point we must be ready to accept VFDI requests */ + + rc = pci_enable_sriov(efx->pci_dev, efx->vf_count); + if (rc) + goto fail_pci; + + netif_info(efx, probe, net_dev, + "enabled SR-IOV for %d VFs, %d VI per VF\n", + efx->vf_count, efx_vf_size(efx)); + return 0; + +fail_pci: + efx_siena_sriov_usrev(efx, false); + rtnl_lock(); + efx->vf_init_count = 0; + rtnl_unlock(); + efx_siena_sriov_vfs_fini(efx); +fail_vfs: + cancel_work_sync(&nic_data->peer_work); + efx_siena_sriov_free_local(efx); + kfree(nic_data->vf); +fail_alloc: + efx_siena_free_buffer(efx, &nic_data->vfdi_status); +fail_status: + efx_siena_sriov_cmd(efx, false, NULL, NULL); +fail_cmd: + return rc; +} + +void efx_siena_sriov_fini(struct efx_nic *efx) +{ + struct siena_vf *vf; + unsigned int pos; + struct siena_nic_data *nic_data = efx->nic_data; + + if (efx->vf_init_count == 0) + return; + + /* Disable all interfaces to reconfiguration */ + BUG_ON(nic_data->vfdi_channel->enabled); + efx_siena_sriov_usrev(efx, false); + rtnl_lock(); + efx->vf_init_count = 0; + rtnl_unlock(); + + /* Flush all reconfiguration work */ + for (pos = 0; pos < efx->vf_count; ++pos) { + vf = nic_data->vf + pos; + cancel_work_sync(&vf->req); + cancel_work_sync(&vf->reset_work); + } + cancel_work_sync(&nic_data->peer_work); + + pci_disable_sriov(efx->pci_dev); + + /* Tear down back-end state */ + efx_siena_sriov_vfs_fini(efx); + efx_siena_sriov_free_local(efx); + kfree(nic_data->vf); + efx_siena_free_buffer(efx, &nic_data->vfdi_status); + efx_siena_sriov_cmd(efx, false, NULL, NULL); +} + +void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + struct siena_vf *vf; + unsigned qid, seq, type, data; + + qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID); + + /* USR_EV_REG_VALUE is dword0, so access the VFDI_EV fields directly */ + BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN != 0); + seq = EFX_QWORD_FIELD(*event, VFDI_EV_SEQ); + type = EFX_QWORD_FIELD(*event, VFDI_EV_TYPE); + data = EFX_QWORD_FIELD(*event, VFDI_EV_DATA); + + netif_vdbg(efx, hw, efx->net_dev, + "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n", + qid, seq, type, data); + + if (map_vi_index(efx, qid, &vf, NULL)) + return; + if (vf->busy) + goto error; + + if (type == VFDI_EV_TYPE_REQ_WORD0) { + /* Resynchronise */ + vf->req_type = VFDI_EV_TYPE_REQ_WORD0; + vf->req_seqno = seq + 1; + vf->req_addr = 0; + } else if (seq != (vf->req_seqno++ & 0xff) || type != vf->req_type) + goto error; + + switch (vf->req_type) { + case VFDI_EV_TYPE_REQ_WORD0: + case VFDI_EV_TYPE_REQ_WORD1: + case VFDI_EV_TYPE_REQ_WORD2: + vf->req_addr |= (u64)data << (vf->req_type << 4); + ++vf->req_type; + return; + + case VFDI_EV_TYPE_REQ_WORD3: + vf->req_addr |= (u64)data << 48; + vf->req_type = VFDI_EV_TYPE_REQ_WORD0; + vf->busy = true; + queue_work(vfdi_workqueue, &vf->req); + return; + } + +error: + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Screaming VFDI request from %s\n", + vf->pci_name); + /* Reset the request and sequence number */ + vf->req_type = VFDI_EV_TYPE_REQ_WORD0; + vf->req_seqno = seq + 1; +} + +void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; + + if (vf_i > efx->vf_init_count) + return; + vf = nic_data->vf + vf_i; + netif_info(efx, hw, efx->net_dev, + "FLR on VF %s\n", vf->pci_name); + + vf->status_addr = 0; + efx_vfdi_remove_all_filters(vf); + efx_vfdi_flush_clear(vf); + + vf->evq0_count = 0; +} + +int efx_siena_sriov_mac_address_changed(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; + + if (!efx->vf_init_count) + return 0; + ether_addr_copy(vfdi_status->peers[0].mac_addr, + efx->net_dev->dev_addr); + queue_work(vfdi_workqueue, &nic_data->peer_work); + + return 0; +} + +void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) +{ + struct siena_vf *vf; + unsigned queue, qid; + + queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); + if (map_vi_index(efx, queue, &vf, &qid)) + return; + /* Ignore flush completions triggered by an FLR */ + if (!test_bit(qid, vf->txq_mask)) + return; + + __clear_bit(qid, vf->txq_mask); + --vf->txq_count; + + if (efx_vfdi_flush_wake(vf)) + wake_up(&vf->flush_waitq); +} + +void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) +{ + struct siena_vf *vf; + unsigned ev_failed, queue, qid; + + queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); + ev_failed = EFX_QWORD_FIELD(*event, + FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); + if (map_vi_index(efx, queue, &vf, &qid)) + return; + if (!test_bit(qid, vf->rxq_mask)) + return; + + if (ev_failed) { + set_bit(qid, vf->rxq_retry_mask); + atomic_inc(&vf->rxq_retry_count); + } else { + __clear_bit(qid, vf->rxq_mask); + --vf->rxq_count; + } + if (efx_vfdi_flush_wake(vf)) + wake_up(&vf->flush_waitq); +} + +/* Called from napi. Schedule the reset work item */ +void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) +{ + struct siena_vf *vf; + unsigned int rel; + + if (map_vi_index(efx, dmaq, &vf, &rel)) + return; + + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "VF %d DMA Q %d reports descriptor fetch error.\n", + vf->index, rel); + queue_work(vfdi_workqueue, &vf->reset_work); +} + +/* Reset all VFs */ +void efx_siena_sriov_reset(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + unsigned int vf_i; + struct efx_buffer buf; + struct siena_vf *vf; + + ASSERT_RTNL(); + + if (efx->vf_init_count == 0) + return; + + efx_siena_sriov_usrev(efx, true); + (void)efx_siena_sriov_cmd(efx, true, NULL, NULL); + + if (efx_siena_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) + return; + + for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) { + vf = nic_data->vf + vf_i; + efx_siena_sriov_reset_vf(vf, &buf); + } + + efx_siena_free_buffer(efx, &buf); +} + +int efx_init_sriov(void) +{ + /* A single threaded workqueue is sufficient. efx_siena_sriov_vfdi() and + * efx_siena_sriov_peer_work() spend almost all their time sleeping for + * MCDI to complete anyway + */ + vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi"); + if (!vfdi_workqueue) + return -ENOMEM; + return 0; +} + +void efx_fini_sriov(void) +{ + destroy_workqueue(vfdi_workqueue); +} + +int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; + + if (vf_i >= efx->vf_init_count) + return -EINVAL; + vf = nic_data->vf + vf_i; + + mutex_lock(&vf->status_lock); + ether_addr_copy(vf->addr.mac_addr, mac); + __efx_siena_sriov_update_vf_addr(vf); + mutex_unlock(&vf->status_lock); + + return 0; +} + +int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, + u16 vlan, u8 qos) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; + u16 tci; + + if (vf_i >= efx->vf_init_count) + return -EINVAL; + vf = nic_data->vf + vf_i; + + mutex_lock(&vf->status_lock); + tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT); + vf->addr.tci = htons(tci); + __efx_siena_sriov_update_vf_addr(vf); + mutex_unlock(&vf->status_lock); + + return 0; +} + +int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i, + bool spoofchk) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; + int rc; + + if (vf_i >= efx->vf_init_count) + return -EINVAL; + vf = nic_data->vf + vf_i; + + mutex_lock(&vf->txq_lock); + if (vf->txq_count == 0) { + vf->tx_filter_mode = + spoofchk ? VF_TX_FILTER_ON : VF_TX_FILTER_OFF; + rc = 0; + } else { + /* This cannot be changed while TX queues are running */ + rc = -EBUSY; + } + mutex_unlock(&vf->txq_lock); + return rc; +} + +int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf_i, + struct ifla_vf_info *ivi) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; + u16 tci; + + if (vf_i >= efx->vf_init_count) + return -EINVAL; + vf = nic_data->vf + vf_i; + + ivi->vf = vf_i; + ether_addr_copy(ivi->mac, vf->addr.mac_addr); + ivi->max_tx_rate = 0; + ivi->min_tx_rate = 0; + tci = ntohs(vf->addr.tci); + ivi->vlan = tci & VLAN_VID_MASK; + ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7; + ivi->spoofchk = vf->tx_filter_mode == VF_TX_FILTER_ON; + + return 0; +} + +bool efx_siena_sriov_wanted(struct efx_nic *efx) +{ + return efx->vf_count != 0; +} + +int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs) +{ + return 0; +} diff --git a/drivers/net/ethernet/sfc/siena/siena_sriov.h b/drivers/net/ethernet/sfc/siena/siena_sriov.h new file mode 100644 index 0000000000..50f6e92449 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/siena_sriov.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2015 Solarflare Communications Inc. + */ + +#ifndef SIENA_SRIOV_H +#define SIENA_SRIOV_H + +#include "net_driver.h" + +/* On the SFC9000 family each port is associated with 1 PCI physical + * function (PF) handled by sfc and a configurable number of virtual + * functions (VFs) that may be handled by some other driver, often in + * a VM guest. The queue pointer registers are mapped in both PF and + * VF BARs such that an 8K region provides access to a single RX, TX + * and event queue (collectively a Virtual Interface, VI or VNIC). + * + * The PF has access to all 1024 VIs while VFs are mapped to VIs + * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered + * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE). + * The number of VIs and the VI_SCALE value are configurable but must + * be established at boot time by firmware. + */ + +/* Maximum VI_SCALE parameter supported by Siena */ +#define EFX_VI_SCALE_MAX 6 +/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX), + * so this is the smallest allowed value. + */ +#define EFX_VI_BASE 128U +/* Maximum number of VFs allowed */ +#define EFX_VF_COUNT_MAX 127 +/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */ +#define EFX_MAX_VF_EVQ_SIZE 8192UL +/* The number of buffer table entries reserved for each VI on a VF */ +#define EFX_VF_BUFTBL_PER_VI \ + ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \ + sizeof(efx_qword_t) / EFX_BUF_SIZE) + +int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs); +int efx_siena_sriov_init(struct efx_nic *efx); +void efx_siena_sriov_fini(struct efx_nic *efx); +int efx_siena_sriov_mac_address_changed(struct efx_nic *efx); +bool efx_siena_sriov_wanted(struct efx_nic *efx); +void efx_siena_sriov_reset(struct efx_nic *efx); +void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr); + +int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac); +int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf, + u16 vlan, u8 qos); +int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf, + bool spoofchk); +int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf, + struct ifla_vf_info *ivf); + +#ifdef CONFIG_SFC_SIENA_SRIOV + +static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) +{ + return efx->vf_init_count != 0; +} + +int efx_init_sriov(void); +void efx_fini_sriov(void); +#else /* !CONFIG_SFC_SIENA_SRIOV */ +static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) +{ + return false; +} +#endif /* CONFIG_SFC_SIENA_SRIOV */ + +void efx_siena_sriov_probe(struct efx_nic *efx); +void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); +void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); +void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event); +void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); + +#endif /* SIENA_SRIOV_H */ diff --git a/drivers/net/ethernet/sfc/siena/sriov.h b/drivers/net/ethernet/sfc/siena/sriov.h new file mode 100644 index 0000000000..a6981bad76 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/sriov.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2014-2015 Solarflare Communications Inc. + */ + +#ifndef EFX_SRIOV_H +#define EFX_SRIOV_H + +#include "net_driver.h" + +#ifdef CONFIG_SFC_SIENA_SRIOV + +static inline +int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_mac) + return efx->type->sriov_set_vf_mac(efx, vf_i, mac); + else + return -EOPNOTSUPP; +} + +static inline +int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan, + u8 qos, __be16 vlan_proto) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_vlan) { + if ((vlan & ~VLAN_VID_MASK) || + (qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT))) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos); + } else { + return -EOPNOTSUPP; + } +} + +static inline +int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, + bool spoofchk) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_spoofchk) + return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk); + else + return -EOPNOTSUPP; +} + +static inline +int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, + struct ifla_vf_info *ivi) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_get_vf_config) + return efx->type->sriov_get_vf_config(efx, vf_i, ivi); + else + return -EOPNOTSUPP; +} + +static inline +int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, + int link_state) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_link_state) + return efx->type->sriov_set_vf_link_state(efx, vf_i, + link_state); + else + return -EOPNOTSUPP; +} +#endif /* CONFIG_SFC_SIENA_SRIOV */ + +#endif /* EFX_SRIOV_H */ diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c new file mode 100644 index 0000000000..e166dcb9b9 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/tx.c @@ -0,0 +1,392 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "io.h" +#include "nic.h" +#include "tx.h" +#include "tx_common.h" +#include "workarounds.h" + +static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer) +{ + unsigned int index = efx_tx_queue_get_insert_index(tx_queue); + struct efx_buffer *page_buf = + &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)]; + unsigned int offset = + ((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1); + + if (unlikely(!page_buf->addr) && + efx_siena_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, + GFP_ATOMIC)) + return NULL; + buffer->dma_addr = page_buf->dma_addr + offset; + buffer->unmap_len = 0; + return (u8 *)page_buf->addr + offset; +} + +static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) +{ + /* We need to consider all queues that the net core sees as one */ + struct efx_nic *efx = txq1->efx; + struct efx_tx_queue *txq2; + unsigned int fill_level; + + fill_level = efx_channel_tx_old_fill_level(txq1->channel); + if (likely(fill_level < efx->txq_stop_thresh)) + return; + + /* We used the stale old_read_count above, which gives us a + * pessimistic estimate of the fill level (which may even + * validly be >= efx->txq_entries). Now try again using + * read_count (more likely to be a cache miss). + * + * If we read read_count and then conditionally stop the + * queue, it is possible for the completion path to race with + * us and complete all outstanding descriptors in the middle, + * after which there will be no more completions to wake it. + * Therefore we stop the queue first, then read read_count + * (with a memory barrier to ensure the ordering), then + * restart the queue if the fill level turns out to be low + * enough. + */ + netif_tx_stop_queue(txq1->core_txq); + smp_mb(); + efx_for_each_channel_tx_queue(txq2, txq1->channel) + txq2->old_read_count = READ_ONCE(txq2->read_count); + + fill_level = efx_channel_tx_old_fill_level(txq1->channel); + EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries); + if (likely(fill_level < efx->txq_stop_thresh)) { + smp_mb(); + if (likely(!efx->loopback_selftest)) + netif_tx_start_queue(txq1->core_txq); + } +} + +static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, + struct sk_buff *skb) +{ + unsigned int copy_len = skb->len; + struct efx_tx_buffer *buffer; + u8 *copy_buffer; + int rc; + + EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE); + + buffer = efx_tx_queue_get_insert_buffer(tx_queue); + + copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer); + if (unlikely(!copy_buffer)) + return -ENOMEM; + + rc = skb_copy_bits(skb, 0, copy_buffer, copy_len); + EFX_WARN_ON_PARANOID(rc); + buffer->len = copy_len; + + buffer->skb = skb; + buffer->flags = EFX_TX_BUF_SKB; + + ++tx_queue->insert_count; + return rc; +} + +/* Send any pending traffic for a channel. xmit_more is shared across all + * queues for a channel, so we must check all of them. + */ +static void efx_tx_send_pending(struct efx_channel *channel) +{ + struct efx_tx_queue *q; + + efx_for_each_channel_tx_queue(q, channel) { + if (q->xmit_pending) + efx_nic_push_buffers(q); + } +} + +/* + * Add a socket buffer to a TX queue + * + * This maps all fragments of a socket buffer for DMA and adds them to + * the TX queue. The queue's insert pointer will be incremented by + * the number of fragments in the socket buffer. + * + * If any DMA mapping fails, any mapped fragments will be unmapped, + * the queue's insert pointer will be restored to its original value. + * + * This function is split out from efx_siena_hard_start_xmit to allow the + * loopback test to direct packets via specific TX queues. + * + * Returns NETDEV_TX_OK. + * You must hold netif_tx_lock() to call this function. + */ +netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, + struct sk_buff *skb) +{ + unsigned int old_insert_count = tx_queue->insert_count; + bool xmit_more = netdev_xmit_more(); + bool data_mapped = false; + unsigned int segments; + unsigned int skb_len; + int rc; + + skb_len = skb->len; + segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; + if (segments == 1) + segments = 0; /* Don't use TSO for a single segment. */ + + /* Handle TSO first - it's *possible* (although unlikely) that we might + * be passed a packet to segment that's smaller than the copybreak/PIO + * size limit. + */ + if (segments) { + rc = efx_siena_tx_tso_fallback(tx_queue, skb); + tx_queue->tso_fallbacks++; + if (rc == 0) + return 0; + goto err; + } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) { + /* Pad short packets or coalesce short fragmented packets. */ + if (efx_enqueue_skb_copy(tx_queue, skb)) + goto err; + tx_queue->cb_packets++; + data_mapped = true; + } + + /* Map for DMA and create descriptors if we haven't done so already. */ + if (!data_mapped && (efx_siena_tx_map_data(tx_queue, skb, segments))) + goto err; + + efx_tx_maybe_stop_queue(tx_queue); + + tx_queue->xmit_pending = true; + + /* Pass off to hardware */ + if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) + efx_tx_send_pending(tx_queue->channel); + + tx_queue->tx_packets++; + return NETDEV_TX_OK; + + +err: + efx_siena_enqueue_unwind(tx_queue, old_insert_count); + dev_kfree_skb_any(skb); + + /* If we're not expecting another transmit and we had something to push + * on this queue or a partner queue then we need to push here to get the + * previous packets out. + */ + if (!xmit_more) + efx_tx_send_pending(tx_queue->channel); + + return NETDEV_TX_OK; +} + +/* Transmit a packet from an XDP buffer + * + * Returns number of packets sent on success, error code otherwise. + * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC + * (for XDP redirect). + */ +int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, + bool flush) +{ + struct efx_tx_buffer *tx_buffer; + struct efx_tx_queue *tx_queue; + struct xdp_frame *xdpf; + dma_addr_t dma_addr; + unsigned int len; + int space; + int cpu; + int i = 0; + + if (unlikely(n && !xdpfs)) + return -EINVAL; + if (unlikely(!n)) + return 0; + + cpu = raw_smp_processor_id(); + if (unlikely(cpu >= efx->xdp_tx_queue_count)) + return -EINVAL; + + tx_queue = efx->xdp_tx_queues[cpu]; + if (unlikely(!tx_queue)) + return -EINVAL; + + if (!tx_queue->initialised) + return -EINVAL; + + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) + HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu); + + /* If we're borrowing net stack queues we have to handle stop-restart + * or we might block the queue and it will be considered as frozen + */ + if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { + if (netif_tx_queue_stopped(tx_queue->core_txq)) + goto unlock; + efx_tx_maybe_stop_queue(tx_queue); + } + + /* Check for available space. We should never need multiple + * descriptors per frame. + */ + space = efx->txq_entries + + tx_queue->read_count - tx_queue->insert_count; + + for (i = 0; i < n; i++) { + xdpf = xdpfs[i]; + + if (i >= space) + break; + + /* We'll want a descriptor for this tx. */ + prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue)); + + len = xdpf->len; + + /* Map for DMA. */ + dma_addr = dma_map_single(&efx->pci_dev->dev, + xdpf->data, len, + DMA_TO_DEVICE); + if (dma_mapping_error(&efx->pci_dev->dev, dma_addr)) + break; + + /* Create descriptor and set up for unmapping DMA. */ + tx_buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len); + tx_buffer->xdpf = xdpf; + tx_buffer->flags = EFX_TX_BUF_XDP | + EFX_TX_BUF_MAP_SINGLE; + tx_buffer->dma_offset = 0; + tx_buffer->unmap_len = len; + tx_queue->tx_packets++; + } + + /* Pass mapped frames to hardware. */ + if (flush && i > 0) + efx_nic_push_buffers(tx_queue); + +unlock: + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) + HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq); + + return i == 0 ? -EIO : i; +} + +/* Initiate a packet transmission. We use one channel per CPU + * (sharing when we have more CPUs than channels). + * + * Context: non-blocking. + * Should always return NETDEV_TX_OK and consume the skb. + */ +netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_tx_queue *tx_queue; + unsigned index, type; + + EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); + + index = skb_get_queue_mapping(skb); + type = efx_tx_csum_type_skb(skb); + if (index >= efx->n_tx_channels) { + index -= efx->n_tx_channels; + type |= EFX_TXQ_TYPE_HIGHPRI; + } + + /* PTP "event" packet */ + if (unlikely(efx_xmit_with_hwtstamp(skb)) && + ((efx_siena_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) || + unlikely(efx_siena_ptp_is_ptp_tx(efx, skb)))) { + /* There may be existing transmits on the channel that are + * waiting for this packet to trigger the doorbell write. + * We need to send the packets at this point. + */ + efx_tx_send_pending(efx_get_tx_channel(efx, index)); + return efx_siena_ptp_tx(efx, skb); + } + + tx_queue = efx_get_tx_queue(efx, index, type); + if (WARN_ON_ONCE(!tx_queue)) { + /* We don't have a TXQ of the right type. + * This should never happen, as we don't advertise offload + * features unless we can support them. + */ + dev_kfree_skb_any(skb); + /* If we're not expecting another transmit and we had something to push + * on this queue or a partner queue then we need to push here to get the + * previous packets out. + */ + if (!netdev_xmit_more()) + efx_tx_send_pending(tx_queue->channel); + return NETDEV_TX_OK; + } + + return __efx_siena_enqueue_skb(tx_queue, skb); +} + +void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + + /* Must be inverse of queue lookup in efx_siena_hard_start_xmit() */ + tx_queue->core_txq = + netdev_get_tx_queue(efx->net_dev, + tx_queue->channel->channel + + ((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ? + efx->n_tx_channels : 0)); +} + +int efx_siena_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct tc_mqprio_qopt *mqprio = type_data; + unsigned tc, num_tc; + + if (type != TC_SETUP_QDISC_MQPRIO) + return -EOPNOTSUPP; + + /* Only Siena supported highpri queues */ + if (efx_nic_rev(efx) > EFX_REV_SIENA_A0) + return -EOPNOTSUPP; + + num_tc = mqprio->num_tc; + + if (num_tc > EFX_MAX_TX_TC) + return -EINVAL; + + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + if (num_tc == net_dev->num_tc) + return 0; + + for (tc = 0; tc < num_tc; tc++) { + net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; + net_dev->tc_to_txq[tc].count = efx->n_tx_channels; + } + + net_dev->num_tc = num_tc; + + return netif_set_real_num_tx_queues(net_dev, + max_t(int, num_tc, 1) * + efx->n_tx_channels); +} diff --git a/drivers/net/ethernet/sfc/siena/tx.h b/drivers/net/ethernet/sfc/siena/tx.h new file mode 100644 index 0000000000..ee801950c9 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/tx.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2015 Solarflare Communications Inc. + */ + +#ifndef EFX_TX_H +#define EFX_TX_H + +#include + +/* Driver internal tx-path related declarations. */ +/* What TXQ type will satisfy the checksum offloads required for this skb? */ +static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; /* no checksum offload */ + + if (skb->encapsulation && + skb_checksum_start_offset(skb) == skb_inner_transport_offset(skb)) { + /* we only advertise features for IPv4 and IPv6 checksums on + * encapsulated packets, so if the checksum is for the inner + * packet, it must be one of them; no further checking required. + */ + + /* Do we also need to offload the outer header checksum? */ + if (skb_shinfo(skb)->gso_segs > 1 && + !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) + return EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM; + return EFX_TXQ_TYPE_INNER_CSUM; + } + + /* similarly, we only advertise features for IPv4 and IPv6 checksums, + * so it must be one of them. No need for further checks. + */ + return EFX_TXQ_TYPE_OUTER_CSUM; +} +#endif /* EFX_TX_H */ diff --git a/drivers/net/ethernet/sfc/siena/tx_common.c b/drivers/net/ethernet/sfc/siena/tx_common.c new file mode 100644 index 0000000000..93a32d6194 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/tx_common.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include "efx.h" +#include "nic_common.h" +#include "tx_common.h" + +static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) +{ + return DIV_ROUND_UP(tx_queue->ptr_mask + 1, + PAGE_SIZE >> EFX_TX_CB_ORDER); +} + +int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + unsigned int entries; + int rc; + + /* Create the smallest power-of-two aligned ring */ + entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); + EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); + tx_queue->ptr_mask = entries - 1; + + netif_dbg(efx, probe, efx->net_dev, + "creating TX queue %d size %#x mask %#x\n", + tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); + + /* Allocate software ring */ + tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), + GFP_KERNEL); + if (!tx_queue->buffer) + return -ENOMEM; + + tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue), + sizeof(tx_queue->cb_page[0]), GFP_KERNEL); + if (!tx_queue->cb_page) { + rc = -ENOMEM; + goto fail1; + } + + /* Allocate hardware ring, determine TXQ type */ + rc = efx_nic_probe_tx(tx_queue); + if (rc) + goto fail2; + + tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue; + return 0; + +fail2: + kfree(tx_queue->cb_page); + tx_queue->cb_page = NULL; +fail1: + kfree(tx_queue->buffer); + tx_queue->buffer = NULL; + return rc; +} + +void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + + netif_dbg(efx, drv, efx->net_dev, + "initialising TX queue %d\n", tx_queue->queue); + + tx_queue->insert_count = 0; + tx_queue->notify_count = 0; + tx_queue->write_count = 0; + tx_queue->packet_write_count = 0; + tx_queue->old_write_count = 0; + tx_queue->read_count = 0; + tx_queue->old_read_count = 0; + tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; + tx_queue->xmit_pending = false; + tx_queue->timestamping = (efx_siena_ptp_use_mac_tx_timestamps(efx) && + tx_queue->channel == efx_siena_ptp_channel(efx)); + tx_queue->completed_timestamp_major = 0; + tx_queue->completed_timestamp_minor = 0; + + tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel); + tx_queue->tso_version = 0; + + /* Set up TX descriptor ring */ + efx_nic_init_tx(tx_queue); + + tx_queue->initialised = true; +} + +void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue) +{ + int i; + + if (!tx_queue->buffer) + return; + + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, + "destroying TX queue %d\n", tx_queue->queue); + efx_nic_remove_tx(tx_queue); + + if (tx_queue->cb_page) { + for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++) + efx_siena_free_buffer(tx_queue->efx, + &tx_queue->cb_page[i]); + kfree(tx_queue->cb_page); + tx_queue->cb_page = NULL; + } + + kfree(tx_queue->buffer); + tx_queue->buffer = NULL; + tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL; +} + +static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, + unsigned int *pkts_compl, + unsigned int *bytes_compl) +{ + if (buffer->unmap_len) { + struct device *dma_dev = &tx_queue->efx->pci_dev->dev; + dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; + + if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) + dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, + DMA_TO_DEVICE); + else + dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, + DMA_TO_DEVICE); + buffer->unmap_len = 0; + } + + if (buffer->flags & EFX_TX_BUF_SKB) { + struct sk_buff *skb = (struct sk_buff *)buffer->skb; + + EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); + (*pkts_compl)++; + (*bytes_compl) += skb->len; + if (tx_queue->timestamping && + (tx_queue->completed_timestamp_major || + tx_queue->completed_timestamp_minor)) { + struct skb_shared_hwtstamps hwtstamp; + + hwtstamp.hwtstamp = + efx_siena_ptp_nic_to_kernel_time(tx_queue); + skb_tstamp_tx(skb, &hwtstamp); + + tx_queue->completed_timestamp_major = 0; + tx_queue->completed_timestamp_minor = 0; + } + dev_consume_skb_any((struct sk_buff *)buffer->skb); + netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, + "TX queue %d transmission id %x complete\n", + tx_queue->queue, tx_queue->read_count); + } else if (buffer->flags & EFX_TX_BUF_XDP) { + xdp_return_frame_rx_napi(buffer->xdpf); + } + + buffer->len = 0; + buffer->flags = 0; +} + +void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue) +{ + struct efx_tx_buffer *buffer; + + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, + "shutting down TX queue %d\n", tx_queue->queue); + + if (!tx_queue->buffer) + return; + + /* Free any buffers left in the ring */ + while (tx_queue->read_count != tx_queue->write_count) { + unsigned int pkts_compl = 0, bytes_compl = 0; + + buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); + + ++tx_queue->read_count; + } + tx_queue->xmit_pending = false; + netdev_tx_reset_queue(tx_queue->core_txq); +} + +/* Remove packets from the TX queue + * + * This removes packets from the TX queue, up to and including the + * specified index. + */ +static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, + unsigned int index, + unsigned int *pkts_compl, + unsigned int *bytes_compl) +{ + struct efx_nic *efx = tx_queue->efx; + unsigned int stop_index, read_ptr; + + stop_index = (index + 1) & tx_queue->ptr_mask; + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + + while (read_ptr != stop_index) { + struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; + + if (!efx_tx_buffer_in_use(buffer)) { + netif_err(efx, tx_err, efx->net_dev, + "TX queue %d spurious TX completion id %d\n", + tx_queue->queue, read_ptr); + efx_siena_schedule_reset(efx, RESET_TYPE_TX_SKIP); + return; + } + + efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); + + ++tx_queue->read_count; + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + } +} + +void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue) +{ + if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { + tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); + if (tx_queue->read_count == tx_queue->old_write_count) { + /* Ensure that read_count is flushed. */ + smp_mb(); + tx_queue->empty_read_count = + tx_queue->read_count | EFX_EMPTY_COUNT_VALID; + } + } +} + +void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) +{ + unsigned int fill_level, pkts_compl = 0, bytes_compl = 0; + struct efx_nic *efx = tx_queue->efx; + + EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask); + + efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); + tx_queue->pkts_compl += pkts_compl; + tx_queue->bytes_compl += bytes_compl; + + if (pkts_compl > 1) + ++tx_queue->merge_events; + + /* See if we need to restart the netif queue. This memory + * barrier ensures that we write read_count (inside + * efx_dequeue_buffers()) before reading the queue status. + */ + smp_mb(); + if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && + likely(efx->port_enabled) && + likely(netif_device_present(efx->net_dev))) { + fill_level = efx_channel_tx_fill_level(tx_queue->channel); + if (fill_level <= efx->txq_wake_thresh) + netif_tx_wake_queue(tx_queue->core_txq); + } + + efx_siena_xmit_done_check_empty(tx_queue); +} + +/* Remove buffers put into a tx_queue for the current packet. + * None of the buffers must have an skb attached. + */ +void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue, + unsigned int insert_count) +{ + struct efx_tx_buffer *buffer; + unsigned int bytes_compl = 0; + unsigned int pkts_compl = 0; + + /* Work backwards until we hit the original insert pointer value */ + while (tx_queue->insert_count != insert_count) { + --tx_queue->insert_count; + buffer = __efx_tx_queue_get_insert_buffer(tx_queue); + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); + } +} + +struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, size_t len) +{ + const struct efx_nic_type *nic_type = tx_queue->efx->type; + struct efx_tx_buffer *buffer; + unsigned int dma_len; + + /* Map the fragment taking account of NIC-dependent DMA limits. */ + do { + buffer = efx_tx_queue_get_insert_buffer(tx_queue); + + if (nic_type->tx_limit_len) + dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); + else + dma_len = len; + + buffer->len = dma_len; + buffer->dma_addr = dma_addr; + buffer->flags = EFX_TX_BUF_CONT; + len -= dma_len; + dma_addr += dma_len; + ++tx_queue->insert_count; + } while (len); + + return buffer; +} + +static int efx_tx_tso_header_length(struct sk_buff *skb) +{ + size_t header_len; + + if (skb->encapsulation) + header_len = skb_inner_transport_header(skb) - + skb->data + + (inner_tcp_hdr(skb)->doff << 2u); + else + header_len = skb_transport_header(skb) - skb->data + + (tcp_hdr(skb)->doff << 2u); + return header_len; +} + +/* Map all data from an SKB for DMA and create descriptors on the queue. */ +int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, + unsigned int segment_count) +{ + struct efx_nic *efx = tx_queue->efx; + struct device *dma_dev = &efx->pci_dev->dev; + unsigned int frag_index, nr_frags; + dma_addr_t dma_addr, unmap_addr; + unsigned short dma_flags; + size_t len, unmap_len; + + nr_frags = skb_shinfo(skb)->nr_frags; + frag_index = 0; + + /* Map header data. */ + len = skb_headlen(skb); + dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE); + dma_flags = EFX_TX_BUF_MAP_SINGLE; + unmap_len = len; + unmap_addr = dma_addr; + + if (unlikely(dma_mapping_error(dma_dev, dma_addr))) + return -EIO; + + if (segment_count) { + /* For TSO we need to put the header in to a separate + * descriptor. Map this separately if necessary. + */ + size_t header_len = efx_tx_tso_header_length(skb); + + if (header_len != len) { + tx_queue->tso_long_headers++; + efx_siena_tx_map_chunk(tx_queue, dma_addr, header_len); + len -= header_len; + dma_addr += header_len; + } + } + + /* Add descriptors for each fragment. */ + do { + struct efx_tx_buffer *buffer; + skb_frag_t *fragment; + + buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len); + + /* The final descriptor for a fragment is responsible for + * unmapping the whole fragment. + */ + buffer->flags = EFX_TX_BUF_CONT | dma_flags; + buffer->unmap_len = unmap_len; + buffer->dma_offset = buffer->dma_addr - unmap_addr; + + if (frag_index >= nr_frags) { + /* Store SKB details with the final buffer for + * the completion. + */ + buffer->skb = skb; + buffer->flags = EFX_TX_BUF_SKB | dma_flags; + return 0; + } + + /* Move on to the next fragment. */ + fragment = &skb_shinfo(skb)->frags[frag_index++]; + len = skb_frag_size(fragment); + dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, + DMA_TO_DEVICE); + dma_flags = 0; + unmap_len = len; + unmap_addr = dma_addr; + + if (unlikely(dma_mapping_error(dma_dev, dma_addr))) + return -EIO; + } while (1); +} + +unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx) +{ + /* Header and payload descriptor for each output segment, plus + * one for every input fragment boundary within a segment + */ + unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; + + /* Possibly one more per segment for option descriptors */ + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) + max_descs += EFX_TSO_MAX_SEGS; + + /* Possibly more for PCIe page boundaries within input fragments */ + if (PAGE_SIZE > EFX_PAGE_SIZE) + max_descs += max_t(unsigned int, MAX_SKB_FRAGS, + DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); + + return max_descs; +} + +/* + * Fallback to software TSO. + * + * This is used if we are unable to send a GSO packet through hardware TSO. + * This should only ever happen due to per-queue restrictions - unsupported + * packets should first be filtered by the feature flags. + * + * Returns 0 on success, error code otherwise. + */ +int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, + struct sk_buff *skb) +{ + struct sk_buff *segments, *next; + + segments = skb_gso_segment(skb, 0); + if (IS_ERR(segments)) + return PTR_ERR(segments); + + dev_consume_skb_any(skb); + + skb_list_walk_safe(segments, skb, next) { + skb_mark_not_on_list(skb); + efx_enqueue_skb(tx_queue, skb); + } + + return 0; +} diff --git a/drivers/net/ethernet/sfc/siena/tx_common.h b/drivers/net/ethernet/sfc/siena/tx_common.h new file mode 100644 index 0000000000..31ca52a250 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/tx_common.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_TX_COMMON_H +#define EFX_TX_COMMON_H + +int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue); +void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue); +void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue); +void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue); + +static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer) +{ + return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION); +} + +void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue); +void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); + +void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue, + unsigned int insert_count); + +struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, size_t len); +int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, + unsigned int segment_count); + +unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx); +int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb); + +extern bool efx_siena_separate_tx_channels; +#endif diff --git a/drivers/net/ethernet/sfc/siena/vfdi.h b/drivers/net/ethernet/sfc/siena/vfdi.h new file mode 100644 index 0000000000..480b872eb4 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/vfdi.h @@ -0,0 +1,252 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2010-2012 Solarflare Communications Inc. + */ +#ifndef _VFDI_H +#define _VFDI_H + +/** + * DOC: Virtual Function Driver Interface + * + * This file contains software structures used to form a two way + * communication channel between the VF driver and the PF driver, + * named Virtual Function Driver Interface (VFDI). + * + * For the purposes of VFDI, a page is a memory region with size and + * alignment of 4K. All addresses are DMA addresses to be used within + * the domain of the relevant VF. + * + * The only hardware-defined channels for a VF driver to communicate + * with the PF driver are the event mailboxes (%FR_CZ_USR_EV + * registers). Writing to these registers generates an event with + * EV_CODE = EV_CODE_USR_EV, USER_QID set to the index of the mailbox + * and USER_EV_REG_VALUE set to the value written. The PF driver may + * direct or disable delivery of these events by setting + * %FR_CZ_USR_EV_CFG. + * + * The PF driver can send arbitrary events to arbitrary event queues. + * However, for consistency, VFDI events from the PF are defined to + * follow the same form and be sent to the first event queue assigned + * to the VF while that queue is enabled by the VF driver. + * + * The general form of the variable bits of VFDI events is: + * + * 0 16 24 31 + * | DATA | TYPE | SEQ | + * + * SEQ is a sequence number which should be incremented by 1 (modulo + * 256) for each event. The sequence numbers used in each direction + * are independent. + * + * The VF submits requests of type &struct vfdi_req by sending the + * address of the request (ADDR) in a series of 4 events: + * + * 0 16 24 31 + * | ADDR[0:15] | VFDI_EV_TYPE_REQ_WORD0 | SEQ | + * | ADDR[16:31] | VFDI_EV_TYPE_REQ_WORD1 | SEQ+1 | + * | ADDR[32:47] | VFDI_EV_TYPE_REQ_WORD2 | SEQ+2 | + * | ADDR[48:63] | VFDI_EV_TYPE_REQ_WORD3 | SEQ+3 | + * + * The address must be page-aligned. After receiving such a valid + * series of events, the PF driver will attempt to read the request + * and write a response to the same address. In case of an invalid + * sequence of events or a DMA error, there will be no response. + * + * The VF driver may request that the PF driver writes status + * information into its domain asynchronously. After writing the + * status, the PF driver will send an event of the form: + * + * 0 16 24 31 + * | reserved | VFDI_EV_TYPE_STATUS | SEQ | + * + * In case the VF must be reset for any reason, the PF driver will + * send an event of the form: + * + * 0 16 24 31 + * | reserved | VFDI_EV_TYPE_RESET | SEQ | + * + * It is then the responsibility of the VF driver to request + * reinitialisation of its queues. + */ +#define VFDI_EV_SEQ_LBN 24 +#define VFDI_EV_SEQ_WIDTH 8 +#define VFDI_EV_TYPE_LBN 16 +#define VFDI_EV_TYPE_WIDTH 8 +#define VFDI_EV_TYPE_REQ_WORD0 0 +#define VFDI_EV_TYPE_REQ_WORD1 1 +#define VFDI_EV_TYPE_REQ_WORD2 2 +#define VFDI_EV_TYPE_REQ_WORD3 3 +#define VFDI_EV_TYPE_STATUS 4 +#define VFDI_EV_TYPE_RESET 5 +#define VFDI_EV_DATA_LBN 0 +#define VFDI_EV_DATA_WIDTH 16 + +struct vfdi_endpoint { + u8 mac_addr[ETH_ALEN]; + __be16 tci; +}; + +/** + * enum vfdi_op - VFDI operation enumeration + * @VFDI_OP_RESPONSE: Indicates a response to the request. + * @VFDI_OP_INIT_EVQ: Initialize SRAM entries and initialize an EVQ. + * @VFDI_OP_INIT_RXQ: Initialize SRAM entries and initialize an RXQ. + * @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ. + * @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then + * finalize the SRAM entries. + * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ. + * @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters. + * @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates + * from PF and write the initial status. + * @VFDI_OP_CLEAR_STATUS_PAGE: Clear the DMA page(s) used for status + * updates from PF. + */ +enum vfdi_op { + VFDI_OP_RESPONSE = 0, + VFDI_OP_INIT_EVQ = 1, + VFDI_OP_INIT_RXQ = 2, + VFDI_OP_INIT_TXQ = 3, + VFDI_OP_FINI_ALL_QUEUES = 4, + VFDI_OP_INSERT_FILTER = 5, + VFDI_OP_REMOVE_ALL_FILTERS = 6, + VFDI_OP_SET_STATUS_PAGE = 7, + VFDI_OP_CLEAR_STATUS_PAGE = 8, + VFDI_OP_LIMIT, +}; + +/* Response codes for VFDI operations. Other values may be used in future. */ +#define VFDI_RC_SUCCESS 0 +#define VFDI_RC_ENOMEM (-12) +#define VFDI_RC_EINVAL (-22) +#define VFDI_RC_EOPNOTSUPP (-95) +#define VFDI_RC_ETIMEDOUT (-110) + +/** + * struct vfdi_req - Request from VF driver to PF driver + * @op: Operation code or response indicator, taken from &enum vfdi_op. + * @rc: Response code. Set to 0 on success or a negative error code on failure. + * @u.init_evq.index: Index of event queue to create. + * @u.init_evq.buf_count: Number of 4k buffers backing event queue. + * @u.init_evq.addr: Array of length %u.init_evq.buf_count containing DMA + * address of each page backing the event queue. + * @u.init_rxq.index: Index of receive queue to create. + * @u.init_rxq.buf_count: Number of 4k buffers backing receive queue. + * @u.init_rxq.evq: Instance of event queue to target receive events at. + * @u.init_rxq.label: Label used in receive events. + * @u.init_rxq.flags: Unused. + * @u.init_rxq.addr: Array of length %u.init_rxq.buf_count containing DMA + * address of each page backing the receive queue. + * @u.init_txq.index: Index of transmit queue to create. + * @u.init_txq.buf_count: Number of 4k buffers backing transmit queue. + * @u.init_txq.evq: Instance of event queue to target transmit completion + * events at. + * @u.init_txq.label: Label used in transmit completion events. + * @u.init_txq.flags: Checksum offload flags. + * @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA + * address of each page backing the transmit queue. + * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting + * all traffic at this receive queue. + * @u.mac_filter.flags: MAC filter flags. + * @u.set_status_page.dma_addr: Base address for the &struct vfdi_status. + * This address must be page-aligned and the PF may write up to a + * whole page (allowing for extension of the structure). + * @u.set_status_page.peer_page_count: Number of additional pages the VF + * has provided into which peer addresses may be DMAd. + * @u.set_status_page.peer_page_addr: Array of DMA addresses of pages. + * If the number of peers exceeds 256, then the VF must provide + * additional pages in this array. The PF will then DMA up to + * 512 vfdi_endpoint structures into each page. These addresses + * must be page-aligned. + */ +struct vfdi_req { + u32 op; + u32 reserved1; + s32 rc; + u32 reserved2; + union { + struct { + u32 index; + u32 buf_count; + u64 addr[]; + } init_evq; + struct { + u32 index; + u32 buf_count; + u32 evq; + u32 label; + u32 flags; +#define VFDI_RXQ_FLAG_SCATTER_EN 1 + u32 reserved; + u64 addr[]; + } init_rxq; + struct { + u32 index; + u32 buf_count; + u32 evq; + u32 label; + u32 flags; +#define VFDI_TXQ_FLAG_IP_CSUM_DIS 1 +#define VFDI_TXQ_FLAG_TCPUDP_CSUM_DIS 2 + u32 reserved; + u64 addr[]; + } init_txq; + struct { + u32 rxq; + u32 flags; +#define VFDI_MAC_FILTER_FLAG_RSS 1 +#define VFDI_MAC_FILTER_FLAG_SCATTER 2 + } mac_filter; + struct { + u64 dma_addr; + u64 peer_page_count; + u64 peer_page_addr[]; + } set_status_page; + } u; +}; + +/** + * struct vfdi_status - Status provided by PF driver to VF driver + * @generation_start: A generation count DMA'd to VF *before* the + * rest of the structure. + * @generation_end: A generation count DMA'd to VF *after* the + * rest of the structure. + * @version: Version of this structure; currently set to 1. Later + * versions must either be layout-compatible or only be sent to VFs + * that specifically request them. + * @length: Total length of this structure including embedded tables + * @vi_scale: log2 the number of VIs available on this VF. This quantity + * is used by the hardware for register decoding. + * @max_tx_channels: The maximum number of transmit queues the VF can use. + * @rss_rxq_count: The number of receive queues present in the shared RSS + * indirection table. + * @peer_count: Total number of peers in the complete peer list. If larger + * than ARRAY_SIZE(%peers), then the VF must provide sufficient + * additional pages each of which is filled with vfdi_endpoint structures. + * @local: The MAC address and outer VLAN tag of *this* VF + * @peers: Table of peer addresses. The @tci fields in these structures + * are currently unused and must be ignored. Additional peers are + * written into any additional pages provided by the VF. + * @timer_quantum_ns: Timer quantum (nominal period between timer ticks) + * for interrupt moderation timers, in nanoseconds. This member is only + * present if @length is sufficiently large. + */ +struct vfdi_status { + u32 generation_start; + u32 generation_end; + u32 version; + u32 length; + u8 vi_scale; + u8 max_tx_channels; + u8 rss_rxq_count; + u8 reserved1; + u16 peer_count; + u16 reserved2; + struct vfdi_endpoint local; + struct vfdi_endpoint peers[256]; + + /* Members below here extend version 1 of this structure */ + u32 timer_quantum_ns; +}; + +#endif diff --git a/drivers/net/ethernet/sfc/siena/workarounds.h b/drivers/net/ethernet/sfc/siena/workarounds.h new file mode 100644 index 0000000000..42fb143a94 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/workarounds.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_WORKAROUNDS_H +#define EFX_WORKAROUNDS_H + +/* + * Hardware workarounds. + * Bug numbers are from Solarflare's Bugzilla. + */ + +#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) +#define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) +#define EFX_WORKAROUND_10G(efx) 1 + +/* Bit-bashed I2C reads cause performance drop */ +#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G +/* Legacy interrupt storm when interrupt fifo fills */ +#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA + +/* Moderation timer access must go through MCDI */ +#define EFX_EF10_WORKAROUND_61265(efx) \ + (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_61265) + +#endif /* EFX_WORKAROUNDS_H */ diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c new file mode 100644 index 0000000000..0c0aeb91f5 --- /dev/null +++ b/drivers/net/ethernet/sfc/tc.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2020-2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "tc.h" +#include "mae.h" +#include "ef100_rep.h" +#include "efx.h" + +static void efx_tc_free_action_set(struct efx_nic *efx, + struct efx_tc_action_set *act, bool in_hw) +{ + /* Failure paths calling this on the 'running action' set in_hw=false, + * because if the alloc had succeeded we'd've put it in acts.list and + * not still have it in act. + */ + if (in_hw) { + efx_mae_free_action_set(efx, act->fw_id); + /* in_hw is true iff we are on an acts.list; make sure to + * remove ourselves from that list before we are freed. + */ + list_del(&act->list); + } + kfree(act); +} + +static void efx_tc_free_action_set_list(struct efx_nic *efx, + struct efx_tc_action_set_list *acts, + bool in_hw) +{ + struct efx_tc_action_set *act, *next; + + /* Failure paths set in_hw=false, because usually the acts didn't get + * to efx_mae_alloc_action_set_list(); if they did, the failure tree + * has a separate efx_mae_free_action_set_list() before calling us. + */ + if (in_hw) + efx_mae_free_action_set_list(efx, acts); + /* Any act that's on the list will be in_hw even if the list isn't */ + list_for_each_entry_safe(act, next, &acts->list, list) + efx_tc_free_action_set(efx, act, true); + /* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */ +} + +static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule) +{ + efx_mae_delete_rule(efx, rule->fw_id); + + /* Release entries in subsidiary tables */ + efx_tc_free_action_set_list(efx, &rule->acts, true); + rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; +} + +static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port, + u32 eg_port, struct efx_tc_flow_rule *rule) +{ + struct efx_tc_action_set_list *acts = &rule->acts; + struct efx_tc_match *match = &rule->match; + struct efx_tc_action_set *act; + int rc; + + match->value.ingress_port = ing_port; + match->mask.ingress_port = ~0; + act = kzalloc(sizeof(*act), GFP_KERNEL); + if (!act) + return -ENOMEM; + act->deliver = 1; + act->dest_mport = eg_port; + rc = efx_mae_alloc_action_set(efx, act); + if (rc) + goto fail1; + EFX_WARN_ON_PARANOID(!list_empty(&acts->list)); + list_add_tail(&act->list, &acts->list); + rc = efx_mae_alloc_action_set_list(efx, acts); + if (rc) + goto fail2; + rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT, + acts->fw_id, &rule->fw_id); + if (rc) + goto fail3; + return 0; +fail3: + efx_mae_free_action_set_list(efx, acts); +fail2: + list_del(&act->list); + efx_mae_free_action_set(efx, act->fw_id); +fail1: + kfree(act); + return rc; +} + +static int efx_tc_configure_default_rule_pf(struct efx_nic *efx) +{ + struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf; + u32 ing_port, eg_port; + + efx_mae_mport_uplink(efx, &ing_port); + efx_mae_mport_wire(efx, &eg_port); + return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); +} + +static int efx_tc_configure_default_rule_wire(struct efx_nic *efx) +{ + struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire; + u32 ing_port, eg_port; + + efx_mae_mport_wire(efx, &ing_port); + efx_mae_mport_uplink(efx, &eg_port); + return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); +} + +int efx_tc_configure_default_rule_rep(struct efx_rep *efv) +{ + struct efx_tc_flow_rule *rule = &efv->dflt; + struct efx_nic *efx = efv->parent; + u32 ing_port, eg_port; + + efx_mae_mport_mport(efx, efv->mport, &ing_port); + efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port); + return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); +} + +void efx_tc_deconfigure_default_rule(struct efx_nic *efx, + struct efx_tc_flow_rule *rule) +{ + if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL) + efx_tc_delete_rule(efx, rule); + rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; +} + +static int efx_tc_configure_rep_mport(struct efx_nic *efx) +{ + u32 rep_mport_label; + int rc; + + rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label); + if (rc) + return rc; + pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n", + efx->tc->reps_mport_id, rep_mport_label); + /* Use mport *selector* as vport ID */ + efx_mae_mport_mport(efx, efx->tc->reps_mport_id, + &efx->tc->reps_mport_vport_id); + return 0; +} + +static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx) +{ + efx_mae_free_mport(efx, efx->tc->reps_mport_id); + efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL; +} + +int efx_tc_insert_rep_filters(struct efx_nic *efx) +{ + struct efx_filter_spec promisc, allmulti; + int rc; + + if (efx->type->is_vf) + return 0; + if (!efx->tc) + return 0; + efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0); + efx_filter_set_uc_def(&promisc); + efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id); + rc = efx_filter_insert_filter(efx, &promisc, false); + if (rc < 0) + return rc; + efx->tc->reps_filter_uc = rc; + efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0); + efx_filter_set_mc_def(&allmulti); + efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id); + rc = efx_filter_insert_filter(efx, &allmulti, false); + if (rc < 0) + return rc; + efx->tc->reps_filter_mc = rc; + return 0; +} + +void efx_tc_remove_rep_filters(struct efx_nic *efx) +{ + if (efx->type->is_vf) + return; + if (!efx->tc) + return; + if (efx->tc->reps_filter_mc >= 0) + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc); + efx->tc->reps_filter_mc = -1; + if (efx->tc->reps_filter_uc >= 0) + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc); + efx->tc->reps_filter_uc = -1; +} + +int efx_init_tc(struct efx_nic *efx) +{ + int rc; + + rc = efx_tc_configure_default_rule_pf(efx); + if (rc) + return rc; + rc = efx_tc_configure_default_rule_wire(efx); + if (rc) + return rc; + return efx_tc_configure_rep_mport(efx); +} + +void efx_fini_tc(struct efx_nic *efx) +{ + /* We can get called even if efx_init_struct_tc() failed */ + if (!efx->tc) + return; + efx_tc_deconfigure_rep_mport(efx); + efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf); + efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire); +} + +int efx_init_struct_tc(struct efx_nic *efx) +{ + if (efx->type->is_vf) + return 0; + + efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL); + if (!efx->tc) + return -ENOMEM; + + efx->tc->reps_filter_uc = -1; + efx->tc->reps_filter_mc = -1; + INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list); + efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; + INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list); + efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; + return 0; +} + +void efx_fini_struct_tc(struct efx_nic *efx) +{ + if (!efx->tc) + return; + + EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id != + MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); + EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id != + MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); + kfree(efx->tc); + efx->tc = NULL; +} diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h new file mode 100644 index 0000000000..309123c6b3 --- /dev/null +++ b/drivers/net/ethernet/sfc/tc.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2020-2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_TC_H +#define EFX_TC_H +#include "net_driver.h" + +struct efx_tc_action_set { + u16 deliver:1; + u32 dest_mport; + u32 fw_id; /* index of this entry in firmware actions table */ + struct list_head list; +}; + +struct efx_tc_match_fields { + /* L1 */ + u32 ingress_port; +}; + +struct efx_tc_match { + struct efx_tc_match_fields value; + struct efx_tc_match_fields mask; +}; + +struct efx_tc_action_set_list { + struct list_head list; + u32 fw_id; +}; + +struct efx_tc_flow_rule { + struct efx_tc_match match; + struct efx_tc_action_set_list acts; + u32 fw_id; +}; + +enum efx_tc_rule_prios { + EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */ + EFX_TC_PRIO__NUM +}; + +/** + * struct efx_tc_state - control plane data for TC offload + * + * @reps_mport_id: MAE port allocated for representor RX + * @reps_filter_uc: VNIC filter for representor unicast RX (promisc) + * @reps_filter_mc: VNIC filter for representor multicast RX (allmulti) + * @reps_mport_vport_id: vport_id for representor RX filters + * @dflt: Match-action rules for default switching; at priority + * %EFX_TC_PRIO_DFLT. Named by *ingress* port + * @dflt.pf: rule for traffic ingressing from PF (egresses to wire) + * @dflt.wire: rule for traffic ingressing from wire (egresses to PF) + */ +struct efx_tc_state { + u32 reps_mport_id, reps_mport_vport_id; + s32 reps_filter_uc, reps_filter_mc; + struct { + struct efx_tc_flow_rule pf; + struct efx_tc_flow_rule wire; + } dflt; +}; + +struct efx_rep; + +int efx_tc_configure_default_rule_rep(struct efx_rep *efv); +void efx_tc_deconfigure_default_rule(struct efx_nic *efx, + struct efx_tc_flow_rule *rule); + +int efx_tc_insert_rep_filters(struct efx_nic *efx); +void efx_tc_remove_rep_filters(struct efx_nic *efx); + +int efx_init_tc(struct efx_nic *efx); +void efx_fini_tc(struct efx_nic *efx); + +int efx_init_struct_tc(struct efx_nic *efx); +void efx_fini_struct_tc(struct efx_nic *efx); + +#endif /* EFX_TC_H */ diff --git a/drivers/net/ethernet/sunplus/Kconfig b/drivers/net/ethernet/sunplus/Kconfig new file mode 100644 index 0000000000..be50a6b723 --- /dev/null +++ b/drivers/net/ethernet/sunplus/Kconfig @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Sunplus network device configuration +# + +config NET_VENDOR_SUNPLUS + bool "Sunplus devices" + default y + depends on ARCH_SUNPLUS || COMPILE_TEST + help + If you have a network (Ethernet) card belonging to this + class, say Y here. + + Note that the answer to this question doesn't directly + affect the kernel: saying N will just cause the configurator + to skip all the questions about Sunplus cards. If you say Y, + you will be asked for your specific card in the following + questions. + +if NET_VENDOR_SUNPLUS + +config SP7021_EMAC + tristate "Sunplus Dual 10M/100M Ethernet devices" + depends on SOC_SP7021 || COMPILE_TEST + select PHYLIB + help + If you have Sunplus dual 10M/100M Ethernet devices, say Y. + The network device creates two net-device interfaces. + To compile this driver as a module, choose M here. The + module will be called sp7021_emac. + +endif # NET_VENDOR_SUNPLUS diff --git a/drivers/net/ethernet/sunplus/Makefile b/drivers/net/ethernet/sunplus/Makefile new file mode 100644 index 0000000000..ef7d7d0a7e --- /dev/null +++ b/drivers/net/ethernet/sunplus/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Sunplus network device drivers. +# +obj-$(CONFIG_SP7021_EMAC) += sp7021_emac.o +sp7021_emac-objs := spl2sw_driver.o spl2sw_int.o spl2sw_desc.o spl2sw_mac.o spl2sw_mdio.o spl2sw_phy.o diff --git a/drivers/net/ethernet/sunplus/spl2sw_define.h b/drivers/net/ethernet/sunplus/spl2sw_define.h new file mode 100644 index 0000000000..acc6c1228e --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_define.h @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __SPL2SW_DEFINE_H__ +#define __SPL2SW_DEFINE_H__ + +#define MAX_NETDEV_NUM 2 /* Maximum # of net-device */ + +/* Interrupt status */ +#define MAC_INT_DAISY_MODE_CHG BIT(31) /* Daisy Mode Change */ +#define MAC_INT_IP_CHKSUM_ERR BIT(23) /* IP Checksum Append Error */ +#define MAC_INT_WDOG_TIMER1_EXP BIT(22) /* Watchdog Timer1 Expired */ +#define MAC_INT_WDOG_TIMER0_EXP BIT(21) /* Watchdog Timer0 Expired */ +#define MAC_INT_INTRUDER_ALERT BIT(20) /* Atruder Alert */ +#define MAC_INT_PORT_ST_CHG BIT(19) /* Port Status Change */ +#define MAC_INT_BC_STORM BIT(18) /* Broad Cast Storm */ +#define MAC_INT_MUST_DROP_LAN BIT(17) /* Global Queue Exhausted */ +#define MAC_INT_GLOBAL_QUE_FULL BIT(16) /* Global Queue Full */ +#define MAC_INT_TX_SOC_PAUSE_ON BIT(15) /* Soc Port TX Pause On */ +#define MAC_INT_RX_SOC_QUE_FULL BIT(14) /* Soc Port Out Queue Full */ +#define MAC_INT_TX_LAN1_QUE_FULL BIT(9) /* Port 1 Out Queue Full */ +#define MAC_INT_TX_LAN0_QUE_FULL BIT(8) /* Port 0 Out Queue Full */ +#define MAC_INT_RX_L_DESCF BIT(7) /* Low Priority Descriptor Full */ +#define MAC_INT_RX_H_DESCF BIT(6) /* High Priority Descriptor Full */ +#define MAC_INT_RX_DONE_L BIT(5) /* RX Low Priority Done */ +#define MAC_INT_RX_DONE_H BIT(4) /* RX High Priority Done */ +#define MAC_INT_TX_DONE_L BIT(3) /* TX Low Priority Done */ +#define MAC_INT_TX_DONE_H BIT(2) /* TX High Priority Done */ +#define MAC_INT_TX_DES_ERR BIT(1) /* TX Descriptor Error */ +#define MAC_INT_RX_DES_ERR BIT(0) /* Rx Descriptor Error */ + +#define MAC_INT_RX (MAC_INT_RX_DONE_H | MAC_INT_RX_DONE_L | \ + MAC_INT_RX_DES_ERR) +#define MAC_INT_TX (MAC_INT_TX_DONE_L | MAC_INT_TX_DONE_H | \ + MAC_INT_TX_DES_ERR) +#define MAC_INT_MASK_DEF (MAC_INT_DAISY_MODE_CHG | MAC_INT_IP_CHKSUM_ERR | \ + MAC_INT_WDOG_TIMER1_EXP | MAC_INT_WDOG_TIMER0_EXP | \ + MAC_INT_INTRUDER_ALERT | MAC_INT_PORT_ST_CHG | \ + MAC_INT_BC_STORM | MAC_INT_MUST_DROP_LAN | \ + MAC_INT_GLOBAL_QUE_FULL | MAC_INT_TX_SOC_PAUSE_ON | \ + MAC_INT_RX_SOC_QUE_FULL | MAC_INT_TX_LAN1_QUE_FULL | \ + MAC_INT_TX_LAN0_QUE_FULL | MAC_INT_RX_L_DESCF | \ + MAC_INT_RX_H_DESCF) + +/* Address table search */ +#define MAC_ADDR_LOOKUP_IDLE BIT(2) +#define MAC_SEARCH_NEXT_ADDR BIT(1) +#define MAC_BEGIN_SEARCH_ADDR BIT(0) + +/* Address table status */ +#define MAC_HASH_LOOKUP_ADDR GENMASK(31, 22) +#define MAC_R_PORT_MAP GENMASK(13, 12) +#define MAC_R_CPU_PORT GENMASK(11, 10) +#define MAC_R_VID GENMASK(9, 7) +#define MAC_R_AGE GENMASK(6, 4) +#define MAC_R_PROXY BIT(3) +#define MAC_R_MC_INGRESS BIT(2) +#define MAC_AT_TABLE_END BIT(1) +#define MAC_AT_DATA_READY BIT(0) + +/* Wt mac ad0 */ +#define MAC_W_PORT_MAP GENMASK(13, 12) +#define MAC_W_LAN_PORT_1 BIT(13) +#define MAC_W_LAN_PORT_0 BIT(12) +#define MAC_W_CPU_PORT GENMASK(11, 10) +#define MAC_W_CPU_PORT_1 BIT(11) +#define MAC_W_CPU_PORT_0 BIT(10) +#define MAC_W_VID GENMASK(9, 7) +#define MAC_W_AGE GENMASK(6, 4) +#define MAC_W_PROXY BIT(3) +#define MAC_W_MC_INGRESS BIT(2) +#define MAC_W_MAC_DONE BIT(1) +#define MAC_W_MAC_CMD BIT(0) + +/* W mac 15_0 bus */ +#define MAC_W_MAC_15_0 GENMASK(15, 0) + +/* W mac 47_16 bus */ +#define MAC_W_MAC_47_16 GENMASK(31, 0) + +/* PVID config 0 */ +#define MAC_P1_PVID GENMASK(6, 4) +#define MAC_P0_PVID GENMASK(2, 0) + +/* VLAN member config 0 */ +#define MAC_VLAN_MEMSET_3 GENMASK(27, 24) +#define MAC_VLAN_MEMSET_2 GENMASK(19, 16) +#define MAC_VLAN_MEMSET_1 GENMASK(11, 8) +#define MAC_VLAN_MEMSET_0 GENMASK(3, 0) + +/* VLAN member config 1 */ +#define MAC_VLAN_MEMSET_5 GENMASK(11, 8) +#define MAC_VLAN_MEMSET_4 GENMASK(3, 0) + +/* Port ability */ +#define MAC_PORT_ABILITY_LINK_ST GENMASK(25, 24) + +/* CPU control */ +#define MAC_EN_SOC1_AGING BIT(15) +#define MAC_EN_SOC0_AGING BIT(14) +#define MAC_DIS_LRN_SOC1 BIT(13) +#define MAC_DIS_LRN_SOC0 BIT(12) +#define MAC_EN_CRC_SOC1 BIT(9) +#define MAC_EN_CRC_SOC0 BIT(8) +#define MAC_DIS_SOC1_CPU BIT(7) +#define MAC_DIS_SOC0_CPU BIT(6) +#define MAC_DIS_BC2CPU_P1 BIT(5) +#define MAC_DIS_BC2CPU_P0 BIT(4) +#define MAC_DIS_MC2CPU GENMASK(3, 2) +#define MAC_DIS_MC2CPU_P1 BIT(3) +#define MAC_DIS_MC2CPU_P0 BIT(2) +#define MAC_DIS_UN2CPU GENMASK(1, 0) + +/* Port control 0 */ +#define MAC_DIS_PORT GENMASK(25, 24) +#define MAC_DIS_PORT1 BIT(25) +#define MAC_DIS_PORT0 BIT(24) +#define MAC_DIS_RMC2CPU_P1 BIT(17) +#define MAC_DIS_RMC2CPU_P0 BIT(16) +#define MAC_EN_FLOW_CTL_P1 BIT(9) +#define MAC_EN_FLOW_CTL_P0 BIT(8) +#define MAC_EN_BACK_PRESS_P1 BIT(1) +#define MAC_EN_BACK_PRESS_P0 BIT(0) + +/* Port control 1 */ +#define MAC_DIS_SA_LRN_P1 BIT(9) +#define MAC_DIS_SA_LRN_P0 BIT(8) + +/* Port control 2 */ +#define MAC_EN_AGING_P1 BIT(9) +#define MAC_EN_AGING_P0 BIT(8) + +/* Switch Global control */ +#define MAC_RMC_TB_FAULT_RULE GENMASK(26, 25) +#define MAC_LED_FLASH_TIME GENMASK(24, 23) +#define MAC_BC_STORM_PREV GENMASK(5, 4) + +/* LED port 0 */ +#define MAC_LED_ACT_HI BIT(28) + +/* PHY control register 0 */ +#define MAC_CPU_PHY_WT_DATA GENMASK(31, 16) +#define MAC_CPU_PHY_CMD GENMASK(14, 13) +#define MAC_CPU_PHY_REG_ADDR GENMASK(12, 8) +#define MAC_CPU_PHY_ADDR GENMASK(4, 0) + +/* PHY control register 1 */ +#define MAC_CPU_PHY_RD_DATA GENMASK(31, 16) +#define MAC_PHY_RD_RDY BIT(1) +#define MAC_PHY_WT_DONE BIT(0) + +/* MAC force mode */ +#define MAC_EXT_PHY1_ADDR GENMASK(28, 24) +#define MAC_EXT_PHY0_ADDR GENMASK(20, 16) +#define MAC_FORCE_RMII_LINK GENMASK(9, 8) +#define MAC_FORCE_RMII_EN_1 BIT(7) +#define MAC_FORCE_RMII_EN_0 BIT(6) +#define MAC_FORCE_RMII_FC GENMASK(5, 4) +#define MAC_FORCE_RMII_DPX GENMASK(3, 2) +#define MAC_FORCE_RMII_SPD GENMASK(1, 0) + +/* CPU transmit trigger */ +#define MAC_TRIG_L_SOC0 BIT(1) +#define MAC_TRIG_H_SOC0 BIT(0) + +/* Config descriptor queue */ +#define TX_DESC_NUM 16 /* # of descriptors in TX queue */ +#define MAC_GUARD_DESC_NUM 2 /* # of descriptors of gap 0 */ +#define RX_QUEUE0_DESC_NUM 16 /* # of descriptors in RX queue 0 */ +#define RX_QUEUE1_DESC_NUM 16 /* # of descriptors in RX queue 1 */ +#define TX_DESC_QUEUE_NUM 1 /* # of TX queue */ +#define RX_DESC_QUEUE_NUM 2 /* # of RX queue */ + +#define MAC_RX_LEN_MAX 2047 /* Size of RX buffer */ + +/* Tx descriptor */ +/* cmd1 */ +#define TXD_OWN BIT(31) +#define TXD_ERR_CODE GENMASK(29, 26) +#define TXD_SOP BIT(25) /* start of a packet */ +#define TXD_EOP BIT(24) /* end of a packet */ +#define TXD_VLAN GENMASK(17, 12) +#define TXD_PKT_LEN GENMASK(10, 0) /* packet length */ +/* cmd2 */ +#define TXD_EOR BIT(31) /* end of ring */ +#define TXD_BUF_LEN2 GENMASK(22, 12) +#define TXD_BUF_LEN1 GENMASK(10, 0) + +/* Rx descriptor */ +/* cmd1 */ +#define RXD_OWN BIT(31) +#define RXD_ERR_CODE GENMASK(29, 26) +#define RXD_TCP_UDP_CHKSUM BIT(23) +#define RXD_PROXY BIT(22) +#define RXD_PROTOCOL GENMASK(21, 20) +#define RXD_VLAN_TAG BIT(19) +#define RXD_IP_CHKSUM BIT(18) +#define RXD_ROUTE_TYPE GENMASK(17, 16) +#define RXD_PKT_SP GENMASK(14, 12) /* packet source port */ +#define RXD_PKT_LEN GENMASK(10, 0) /* packet length */ +/* cmd2 */ +#define RXD_EOR BIT(31) /* end of ring */ +#define RXD_BUF_LEN2 GENMASK(22, 12) +#define RXD_BUF_LEN1 GENMASK(10, 0) + +/* structure of descriptor */ +struct spl2sw_mac_desc { + u32 cmd1; + u32 cmd2; + u32 addr1; + u32 addr2; +}; + +struct spl2sw_skb_info { + struct sk_buff *skb; + u32 mapping; + u32 len; +}; + +struct spl2sw_common { + void __iomem *l2sw_reg_base; + + struct platform_device *pdev; + struct reset_control *rstc; + struct clk *clk; + + void *desc_base; + dma_addr_t desc_dma; + s32 desc_size; + struct spl2sw_mac_desc *rx_desc[RX_DESC_QUEUE_NUM]; + struct spl2sw_skb_info *rx_skb_info[RX_DESC_QUEUE_NUM]; + u32 rx_pos[RX_DESC_QUEUE_NUM]; + u32 rx_desc_num[RX_DESC_QUEUE_NUM]; + u32 rx_desc_buff_size; + + struct spl2sw_mac_desc *tx_desc; + struct spl2sw_skb_info tx_temp_skb_info[TX_DESC_NUM]; + u32 tx_done_pos; + u32 tx_pos; + u32 tx_desc_full; + + struct net_device *ndev[MAX_NETDEV_NUM]; + struct mii_bus *mii_bus; + + struct napi_struct rx_napi; + struct napi_struct tx_napi; + + spinlock_t tx_lock; /* spinlock for accessing tx buffer */ + spinlock_t mdio_lock; /* spinlock for mdio commands */ + spinlock_t int_mask_lock; /* spinlock for accessing int mask reg. */ + + u8 enable; +}; + +struct spl2sw_mac { + struct net_device *ndev; + struct spl2sw_common *comm; + + u8 mac_addr[ETH_ALEN]; + phy_interface_t phy_mode; + struct device_node *phy_node; + + u8 lan_port; + u8 to_vlan; + u8 vlan_id; +}; + +#endif diff --git a/drivers/net/ethernet/sunplus/spl2sw_desc.c b/drivers/net/ethernet/sunplus/spl2sw_desc.c new file mode 100644 index 0000000000..3f0d9f78b3 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_desc.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include + +#include "spl2sw_define.h" +#include "spl2sw_desc.h" + +void spl2sw_rx_descs_flush(struct spl2sw_common *comm) +{ + struct spl2sw_skb_info *rx_skbinfo; + struct spl2sw_mac_desc *rx_desc; + u32 i, j; + + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) { + rx_desc = comm->rx_desc[i]; + rx_skbinfo = comm->rx_skb_info[i]; + for (j = 0; j < comm->rx_desc_num[i]; j++) { + rx_desc[j].addr1 = rx_skbinfo[j].mapping; + rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ? + RXD_EOR | comm->rx_desc_buff_size : + comm->rx_desc_buff_size; + wmb(); /* Set RXD_OWN after other fields are ready. */ + rx_desc[j].cmd1 = RXD_OWN; + } + } +} + +void spl2sw_tx_descs_clean(struct spl2sw_common *comm) +{ + u32 i; + + if (!comm->tx_desc) + return; + + for (i = 0; i < TX_DESC_NUM; i++) { + comm->tx_desc[i].cmd1 = 0; + wmb(); /* Clear TXD_OWN and then set other fields. */ + comm->tx_desc[i].cmd2 = 0; + comm->tx_desc[i].addr1 = 0; + comm->tx_desc[i].addr2 = 0; + + if (comm->tx_temp_skb_info[i].mapping) { + dma_unmap_single(&comm->pdev->dev, comm->tx_temp_skb_info[i].mapping, + comm->tx_temp_skb_info[i].skb->len, DMA_TO_DEVICE); + comm->tx_temp_skb_info[i].mapping = 0; + } + + if (comm->tx_temp_skb_info[i].skb) { + dev_kfree_skb_any(comm->tx_temp_skb_info[i].skb); + comm->tx_temp_skb_info[i].skb = NULL; + } + } +} + +void spl2sw_rx_descs_clean(struct spl2sw_common *comm) +{ + struct spl2sw_skb_info *rx_skbinfo; + struct spl2sw_mac_desc *rx_desc; + u32 i, j; + + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) { + if (!comm->rx_skb_info[i]) + continue; + + rx_desc = comm->rx_desc[i]; + rx_skbinfo = comm->rx_skb_info[i]; + for (j = 0; j < comm->rx_desc_num[i]; j++) { + rx_desc[j].cmd1 = 0; + wmb(); /* Clear RXD_OWN and then set other fields. */ + rx_desc[j].cmd2 = 0; + rx_desc[j].addr1 = 0; + + if (rx_skbinfo[j].skb) { + dma_unmap_single(&comm->pdev->dev, rx_skbinfo[j].mapping, + comm->rx_desc_buff_size, DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_skbinfo[j].skb); + rx_skbinfo[j].skb = NULL; + rx_skbinfo[j].mapping = 0; + } + } + + kfree(rx_skbinfo); + comm->rx_skb_info[i] = NULL; + } +} + +void spl2sw_descs_clean(struct spl2sw_common *comm) +{ + spl2sw_rx_descs_clean(comm); + spl2sw_tx_descs_clean(comm); +} + +void spl2sw_descs_free(struct spl2sw_common *comm) +{ + u32 i; + + spl2sw_descs_clean(comm); + comm->tx_desc = NULL; + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) + comm->rx_desc[i] = NULL; + + /* Free descriptor area */ + if (comm->desc_base) { + dma_free_coherent(&comm->pdev->dev, comm->desc_size, comm->desc_base, + comm->desc_dma); + comm->desc_base = NULL; + comm->desc_dma = 0; + comm->desc_size = 0; + } +} + +void spl2sw_tx_descs_init(struct spl2sw_common *comm) +{ + memset(comm->tx_desc, '\0', sizeof(struct spl2sw_mac_desc) * + (TX_DESC_NUM + MAC_GUARD_DESC_NUM)); +} + +int spl2sw_rx_descs_init(struct spl2sw_common *comm) +{ + struct spl2sw_skb_info *rx_skbinfo; + struct spl2sw_mac_desc *rx_desc; + struct sk_buff *skb; + u32 mapping; + u32 i, j; + + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) { + comm->rx_skb_info[i] = kcalloc(comm->rx_desc_num[i], sizeof(*rx_skbinfo), + GFP_KERNEL | GFP_DMA); + if (!comm->rx_skb_info[i]) + goto mem_alloc_fail; + + rx_skbinfo = comm->rx_skb_info[i]; + rx_desc = comm->rx_desc[i]; + for (j = 0; j < comm->rx_desc_num[i]; j++) { + skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size); + if (!skb) + goto mem_alloc_fail; + + rx_skbinfo[j].skb = skb; + mapping = dma_map_single(&comm->pdev->dev, skb->data, + comm->rx_desc_buff_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(&comm->pdev->dev, mapping)) + goto mem_alloc_fail; + + rx_skbinfo[j].mapping = mapping; + rx_desc[j].addr1 = mapping; + rx_desc[j].addr2 = 0; + rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ? + RXD_EOR | comm->rx_desc_buff_size : + comm->rx_desc_buff_size; + wmb(); /* Set RXD_OWN after other fields are effective. */ + rx_desc[j].cmd1 = RXD_OWN; + } + } + + return 0; + +mem_alloc_fail: + spl2sw_rx_descs_clean(comm); + return -ENOMEM; +} + +int spl2sw_descs_alloc(struct spl2sw_common *comm) +{ + s32 desc_size; + u32 i; + + /* Alloc descriptor area */ + desc_size = (TX_DESC_NUM + MAC_GUARD_DESC_NUM) * sizeof(struct spl2sw_mac_desc); + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) + desc_size += comm->rx_desc_num[i] * sizeof(struct spl2sw_mac_desc); + + comm->desc_base = dma_alloc_coherent(&comm->pdev->dev, desc_size, &comm->desc_dma, + GFP_KERNEL); + if (!comm->desc_base) + return -ENOMEM; + + comm->desc_size = desc_size; + + /* Setup Tx descriptor */ + comm->tx_desc = comm->desc_base; + + /* Setup Rx descriptor */ + comm->rx_desc[0] = &comm->tx_desc[TX_DESC_NUM + MAC_GUARD_DESC_NUM]; + for (i = 1; i < RX_DESC_QUEUE_NUM; i++) + comm->rx_desc[i] = comm->rx_desc[i - 1] + comm->rx_desc_num[i - 1]; + + return 0; +} + +int spl2sw_descs_init(struct spl2sw_common *comm) +{ + u32 i, ret; + + /* Initialize rx descriptor's data */ + comm->rx_desc_num[0] = RX_QUEUE0_DESC_NUM; + comm->rx_desc_num[1] = RX_QUEUE1_DESC_NUM; + + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) { + comm->rx_desc[i] = NULL; + comm->rx_skb_info[i] = NULL; + comm->rx_pos[i] = 0; + } + comm->rx_desc_buff_size = MAC_RX_LEN_MAX; + + /* Initialize tx descriptor's data */ + comm->tx_done_pos = 0; + comm->tx_desc = NULL; + comm->tx_pos = 0; + comm->tx_desc_full = 0; + for (i = 0; i < TX_DESC_NUM; i++) + comm->tx_temp_skb_info[i].skb = NULL; + + /* Allocate tx & rx descriptors. */ + ret = spl2sw_descs_alloc(comm); + if (ret) + return ret; + + spl2sw_tx_descs_init(comm); + + return spl2sw_rx_descs_init(comm); +} diff --git a/drivers/net/ethernet/sunplus/spl2sw_desc.h b/drivers/net/ethernet/sunplus/spl2sw_desc.h new file mode 100644 index 0000000000..f04e2d85c3 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_desc.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __SPL2SW_DESC_H__ +#define __SPL2SW_DESC_H__ + +void spl2sw_rx_descs_flush(struct spl2sw_common *comm); +void spl2sw_tx_descs_clean(struct spl2sw_common *comm); +void spl2sw_rx_descs_clean(struct spl2sw_common *comm); +void spl2sw_descs_clean(struct spl2sw_common *comm); +void spl2sw_descs_free(struct spl2sw_common *comm); +void spl2sw_tx_descs_init(struct spl2sw_common *comm); +int spl2sw_rx_descs_init(struct spl2sw_common *comm); +int spl2sw_descs_alloc(struct spl2sw_common *comm); +int spl2sw_descs_init(struct spl2sw_common *comm); + +#endif diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c new file mode 100644 index 0000000000..5462066404 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c @@ -0,0 +1,565 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spl2sw_register.h" +#include "spl2sw_define.h" +#include "spl2sw_desc.h" +#include "spl2sw_mdio.h" +#include "spl2sw_phy.h" +#include "spl2sw_int.h" +#include "spl2sw_mac.h" + +/* net device operations */ +static int spl2sw_ethernet_open(struct net_device *ndev) +{ + struct spl2sw_mac *mac = netdev_priv(ndev); + struct spl2sw_common *comm = mac->comm; + u32 mask; + + netdev_dbg(ndev, "Open port = %x\n", mac->lan_port); + + comm->enable |= mac->lan_port; + + spl2sw_mac_hw_start(comm); + + /* Enable TX and RX interrupts */ + mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + mask &= ~(MAC_INT_TX | MAC_INT_RX); + writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + + phy_start(ndev->phydev); + + netif_start_queue(ndev); + + return 0; +} + +static int spl2sw_ethernet_stop(struct net_device *ndev) +{ + struct spl2sw_mac *mac = netdev_priv(ndev); + struct spl2sw_common *comm = mac->comm; + + netif_stop_queue(ndev); + + comm->enable &= ~mac->lan_port; + + phy_stop(ndev->phydev); + + spl2sw_mac_hw_stop(comm); + + return 0; +} + +static int spl2sw_ethernet_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct spl2sw_mac *mac = netdev_priv(ndev); + struct spl2sw_common *comm = mac->comm; + struct spl2sw_skb_info *skbinfo; + struct spl2sw_mac_desc *txdesc; + unsigned long flags; + u32 mapping; + u32 tx_pos; + u32 cmd1; + u32 cmd2; + + if (unlikely(comm->tx_desc_full == 1)) { + /* No TX descriptors left. Wait for tx interrupt. */ + netdev_dbg(ndev, "TX descriptor queue full when xmit!\n"); + return NETDEV_TX_BUSY; + } + + /* If skb size is shorter than ETH_ZLEN (60), pad it with 0. */ + if (unlikely(skb->len < ETH_ZLEN)) { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + skb_put(skb, ETH_ZLEN - skb->len); + } + + mapping = dma_map_single(&comm->pdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&comm->pdev->dev, mapping)) { + ndev->stats.tx_errors++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + spin_lock_irqsave(&comm->tx_lock, flags); + + tx_pos = comm->tx_pos; + txdesc = &comm->tx_desc[tx_pos]; + skbinfo = &comm->tx_temp_skb_info[tx_pos]; + skbinfo->mapping = mapping; + skbinfo->len = skb->len; + skbinfo->skb = skb; + + /* Set up a TX descriptor */ + cmd1 = TXD_OWN | TXD_SOP | TXD_EOP | (mac->to_vlan << 12) | + (skb->len & TXD_PKT_LEN); + cmd2 = skb->len & TXD_BUF_LEN1; + + if (tx_pos == (TX_DESC_NUM - 1)) + cmd2 |= TXD_EOR; + + txdesc->addr1 = skbinfo->mapping; + txdesc->cmd2 = cmd2; + wmb(); /* Set TXD_OWN after other fields are effective. */ + txdesc->cmd1 = cmd1; + + /* Move tx_pos to next position */ + tx_pos = ((tx_pos + 1) == TX_DESC_NUM) ? 0 : tx_pos + 1; + + if (unlikely(tx_pos == comm->tx_done_pos)) { + netif_stop_queue(ndev); + comm->tx_desc_full = 1; + } + comm->tx_pos = tx_pos; + wmb(); /* make sure settings are effective. */ + + /* Trigger mac to transmit */ + writel(MAC_TRIG_L_SOC0, comm->l2sw_reg_base + L2SW_CPU_TX_TRIG); + + spin_unlock_irqrestore(&comm->tx_lock, flags); + return NETDEV_TX_OK; +} + +static void spl2sw_ethernet_set_rx_mode(struct net_device *ndev) +{ + struct spl2sw_mac *mac = netdev_priv(ndev); + + spl2sw_mac_rx_mode_set(mac); +} + +static int spl2sw_ethernet_set_mac_address(struct net_device *ndev, void *addr) +{ + struct spl2sw_mac *mac = netdev_priv(ndev); + int err; + + err = eth_mac_addr(ndev, addr); + if (err) + return err; + + /* Delete the old MAC address */ + netdev_dbg(ndev, "Old Ethernet (MAC) address = %pM\n", mac->mac_addr); + if (is_valid_ether_addr(mac->mac_addr)) { + err = spl2sw_mac_addr_del(mac); + if (err) + return err; + } + + /* Set the MAC address */ + ether_addr_copy(mac->mac_addr, ndev->dev_addr); + return spl2sw_mac_addr_add(mac); +} + +static void spl2sw_ethernet_tx_timeout(struct net_device *ndev, unsigned int txqueue) +{ + struct spl2sw_mac *mac = netdev_priv(ndev); + struct spl2sw_common *comm = mac->comm; + unsigned long flags; + int i; + + netdev_err(ndev, "TX timed out!\n"); + ndev->stats.tx_errors++; + + spin_lock_irqsave(&comm->tx_lock, flags); + + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) + netif_stop_queue(comm->ndev[i]); + + spl2sw_mac_soft_reset(comm); + + /* Accept TX packets again. */ + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) { + netif_trans_update(comm->ndev[i]); + netif_wake_queue(comm->ndev[i]); + } + + spin_unlock_irqrestore(&comm->tx_lock, flags); +} + +static const struct net_device_ops netdev_ops = { + .ndo_open = spl2sw_ethernet_open, + .ndo_stop = spl2sw_ethernet_stop, + .ndo_start_xmit = spl2sw_ethernet_start_xmit, + .ndo_set_rx_mode = spl2sw_ethernet_set_rx_mode, + .ndo_set_mac_address = spl2sw_ethernet_set_mac_address, + .ndo_do_ioctl = phy_do_ioctl, + .ndo_tx_timeout = spl2sw_ethernet_tx_timeout, +}; + +static void spl2sw_check_mac_vendor_id_and_convert(u8 *mac_addr) +{ + /* Byte order of MAC address of some samples are reversed. + * Check vendor id and convert byte order if it is wrong. + * OUI of Sunplus: fc:4b:bc + */ + if (mac_addr[5] == 0xfc && mac_addr[4] == 0x4b && mac_addr[3] == 0xbc && + (mac_addr[0] != 0xfc || mac_addr[1] != 0x4b || mac_addr[2] != 0xbc)) { + + swap(mac_addr[0], mac_addr[5]); + swap(mac_addr[1], mac_addr[4]); + swap(mac_addr[2], mac_addr[3]); + } +} + +static int spl2sw_nvmem_get_mac_address(struct device *dev, struct device_node *np, + void *addrbuf) +{ + struct nvmem_cell *cell; + ssize_t len; + u8 *mac; + + /* Get nvmem cell of mac-address from dts. */ + cell = of_nvmem_cell_get(np, "mac-address"); + if (IS_ERR(cell)) + return PTR_ERR(cell); + + /* Read mac address from nvmem cell. */ + mac = nvmem_cell_read(cell, &len); + nvmem_cell_put(cell); + if (IS_ERR(mac)) + return PTR_ERR(mac); + + if (len != ETH_ALEN) { + kfree(mac); + dev_info(dev, "Invalid length of mac address in nvmem!\n"); + return -EINVAL; + } + + /* Byte order of some samples are reversed. + * Convert byte order here. + */ + spl2sw_check_mac_vendor_id_and_convert(mac); + + /* Check if mac address is valid */ + if (!is_valid_ether_addr(mac)) { + kfree(mac); + dev_info(dev, "Invalid mac address in nvmem (%pM)!\n", mac); + return -EINVAL; + } + + ether_addr_copy(addrbuf, mac); + kfree(mac); + return 0; +} + +static u32 spl2sw_init_netdev(struct platform_device *pdev, u8 *mac_addr, + struct net_device **r_ndev) +{ + struct net_device *ndev; + struct spl2sw_mac *mac; + int ret; + + /* Allocate the devices, and also allocate spl2sw_mac, + * we can get it by netdev_priv(). + */ + ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*mac)); + if (!ndev) { + *r_ndev = NULL; + return -ENOMEM; + } + SET_NETDEV_DEV(ndev, &pdev->dev); + ndev->netdev_ops = &netdev_ops; + mac = netdev_priv(ndev); + mac->ndev = ndev; + ether_addr_copy(mac->mac_addr, mac_addr); + + eth_hw_addr_set(ndev, mac_addr); + dev_info(&pdev->dev, "Ethernet (MAC) address = %pM\n", mac_addr); + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "Failed to register net device \"%s\"!\n", + ndev->name); + free_netdev(ndev); + *r_ndev = NULL; + return ret; + } + netdev_dbg(ndev, "Registered net device \"%s\" successfully.\n", ndev->name); + + *r_ndev = ndev; + return 0; +} + +static struct device_node *spl2sw_get_eth_child_node(struct device_node *ether_np, int id) +{ + struct device_node *port_np; + int port_id; + + for_each_child_of_node(ether_np, port_np) { + /* It is not a 'port' node, continue. */ + if (strcmp(port_np->name, "port")) + continue; + + if (of_property_read_u32(port_np, "reg", &port_id) < 0) + continue; + + if (port_id == id) + return port_np; + } + + /* Not found! */ + return NULL; +} + +static int spl2sw_probe(struct platform_device *pdev) +{ + struct device_node *eth_ports_np; + struct device_node *port_np; + struct spl2sw_common *comm; + struct device_node *phy_np; + phy_interface_t phy_mode; + struct net_device *ndev; + struct spl2sw_mac *mac; + u8 mac_addr[ETH_ALEN]; + int irq, i, ret; + + if (platform_get_drvdata(pdev)) + return -ENODEV; + + /* Allocate memory for 'spl2sw_common' area. */ + comm = devm_kzalloc(&pdev->dev, sizeof(*comm), GFP_KERNEL); + if (!comm) + return -ENOMEM; + + comm->pdev = pdev; + platform_set_drvdata(pdev, comm); + + spin_lock_init(&comm->tx_lock); + spin_lock_init(&comm->mdio_lock); + spin_lock_init(&comm->int_mask_lock); + + /* Get memory resource 0 from dts. */ + comm->l2sw_reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(comm->l2sw_reg_base)) + return PTR_ERR(comm->l2sw_reg_base); + + /* Get irq resource from dts. */ + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + irq = ret; + + /* Get clock controller. */ + comm->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(comm->clk)) { + dev_err_probe(&pdev->dev, PTR_ERR(comm->clk), + "Failed to retrieve clock controller!\n"); + return PTR_ERR(comm->clk); + } + + /* Get reset controller. */ + comm->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(comm->rstc)) { + dev_err_probe(&pdev->dev, PTR_ERR(comm->rstc), + "Failed to retrieve reset controller!\n"); + return PTR_ERR(comm->rstc); + } + + /* Enable clock. */ + ret = clk_prepare_enable(comm->clk); + if (ret) + return ret; + udelay(1); + + /* Reset MAC */ + reset_control_assert(comm->rstc); + udelay(1); + reset_control_deassert(comm->rstc); + usleep_range(1000, 2000); + + /* Request irq. */ + ret = devm_request_irq(&pdev->dev, irq, spl2sw_ethernet_interrupt, 0, + dev_name(&pdev->dev), comm); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq #%d!\n", irq); + goto out_clk_disable; + } + + /* Initialize TX and RX descriptors. */ + ret = spl2sw_descs_init(comm); + if (ret) { + dev_err(&pdev->dev, "Fail to initialize mac descriptors!\n"); + spl2sw_descs_free(comm); + goto out_clk_disable; + } + + /* Initialize MAC. */ + spl2sw_mac_init(comm); + + /* Initialize mdio bus */ + ret = spl2sw_mdio_init(comm); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize mdio bus!\n"); + goto out_clk_disable; + } + + /* Get child node ethernet-ports. */ + eth_ports_np = of_get_child_by_name(pdev->dev.of_node, "ethernet-ports"); + if (!eth_ports_np) { + dev_err(&pdev->dev, "No ethernet-ports child node found!\n"); + ret = -ENODEV; + goto out_free_mdio; + } + + for (i = 0; i < MAX_NETDEV_NUM; i++) { + /* Get port@i of node ethernet-ports. */ + port_np = spl2sw_get_eth_child_node(eth_ports_np, i); + if (!port_np) + continue; + + /* Get phy-mode. */ + if (of_get_phy_mode(port_np, &phy_mode)) { + dev_err(&pdev->dev, "Failed to get phy-mode property of port@%d!\n", + i); + continue; + } + + /* Get phy-handle. */ + phy_np = of_parse_phandle(port_np, "phy-handle", 0); + if (!phy_np) { + dev_err(&pdev->dev, "Failed to get phy-handle property of port@%d!\n", + i); + continue; + } + + /* Get mac-address from nvmem. */ + ret = spl2sw_nvmem_get_mac_address(&pdev->dev, port_np, mac_addr); + if (ret == -EPROBE_DEFER) { + goto out_unregister_dev; + } else if (ret) { + dev_info(&pdev->dev, "Generate a random mac address!\n"); + eth_random_addr(mac_addr); + } + + /* Initialize the net device. */ + ret = spl2sw_init_netdev(pdev, mac_addr, &ndev); + if (ret) + goto out_unregister_dev; + + ndev->irq = irq; + comm->ndev[i] = ndev; + mac = netdev_priv(ndev); + mac->phy_node = phy_np; + mac->phy_mode = phy_mode; + mac->comm = comm; + + mac->lan_port = 0x1 << i; /* forward to port i */ + mac->to_vlan = 0x1 << i; /* vlan group: i */ + mac->vlan_id = i; /* vlan group: i */ + + /* Set MAC address */ + ret = spl2sw_mac_addr_add(mac); + if (ret) + goto out_unregister_dev; + + spl2sw_mac_rx_mode_set(mac); + } + + /* Find first valid net device. */ + for (i = 0; i < MAX_NETDEV_NUM; i++) { + if (comm->ndev[i]) + break; + } + if (i >= MAX_NETDEV_NUM) { + dev_err(&pdev->dev, "No valid ethernet port!\n"); + ret = -ENODEV; + goto out_free_mdio; + } + + /* Save first valid net device */ + ndev = comm->ndev[i]; + + ret = spl2sw_phy_connect(comm); + if (ret) { + netdev_err(ndev, "Failed to connect phy!\n"); + goto out_unregister_dev; + } + + /* Add and enable napi. */ + netif_napi_add(ndev, &comm->rx_napi, spl2sw_rx_poll, NAPI_POLL_WEIGHT); + napi_enable(&comm->rx_napi); + netif_napi_add_tx(ndev, &comm->tx_napi, spl2sw_tx_poll); + napi_enable(&comm->tx_napi); + return 0; + +out_unregister_dev: + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) + unregister_netdev(comm->ndev[i]); + +out_free_mdio: + spl2sw_mdio_remove(comm); + +out_clk_disable: + clk_disable_unprepare(comm->clk); + return ret; +} + +static int spl2sw_remove(struct platform_device *pdev) +{ + struct spl2sw_common *comm; + int i; + + comm = platform_get_drvdata(pdev); + + spl2sw_phy_remove(comm); + + /* Unregister and free net device. */ + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) + unregister_netdev(comm->ndev[i]); + + comm->enable = 0; + spl2sw_mac_hw_stop(comm); + spl2sw_descs_free(comm); + + /* Disable and delete napi. */ + napi_disable(&comm->rx_napi); + netif_napi_del(&comm->rx_napi); + napi_disable(&comm->tx_napi); + netif_napi_del(&comm->tx_napi); + + spl2sw_mdio_remove(comm); + + clk_disable_unprepare(comm->clk); + + return 0; +} + +static const struct of_device_id spl2sw_of_match[] = { + {.compatible = "sunplus,sp7021-emac"}, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, spl2sw_of_match); + +static struct platform_driver spl2sw_driver = { + .probe = spl2sw_probe, + .remove = spl2sw_remove, + .driver = { + .name = "sp7021_emac", + .of_match_table = spl2sw_of_match, + }, +}; + +module_platform_driver(spl2sw_driver); + +MODULE_AUTHOR("Wells Lu "); +MODULE_DESCRIPTION("Sunplus Dual 10M/100M Ethernet driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/sunplus/spl2sw_int.c b/drivers/net/ethernet/sunplus/spl2sw_int.c new file mode 100644 index 0000000000..a37c9a4c28 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_int.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "spl2sw_register.h" +#include "spl2sw_define.h" +#include "spl2sw_int.h" + +int spl2sw_rx_poll(struct napi_struct *napi, int budget) +{ + struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, rx_napi); + struct spl2sw_mac_desc *desc, *h_desc; + struct net_device_stats *stats; + struct sk_buff *skb, *new_skb; + struct spl2sw_skb_info *sinfo; + int budget_left = budget; + unsigned long flags; + u32 rx_pos, pkg_len; + u32 num, rx_count; + s32 queue; + u32 mask; + int port; + u32 cmd; + u32 len; + + /* Process high-priority queue and then low-priority queue. */ + for (queue = 0; queue < RX_DESC_QUEUE_NUM; queue++) { + rx_pos = comm->rx_pos[queue]; + rx_count = comm->rx_desc_num[queue]; + + for (num = 0; num < rx_count && budget_left; num++) { + sinfo = comm->rx_skb_info[queue] + rx_pos; + desc = comm->rx_desc[queue] + rx_pos; + cmd = desc->cmd1; + + if (cmd & RXD_OWN) + break; + + port = FIELD_GET(RXD_PKT_SP, cmd); + if (port < MAX_NETDEV_NUM && comm->ndev[port]) + stats = &comm->ndev[port]->stats; + else + goto spl2sw_rx_poll_rec_err; + + pkg_len = FIELD_GET(RXD_PKT_LEN, cmd); + if (unlikely((cmd & RXD_ERR_CODE) || pkg_len < ETH_ZLEN + 4)) { + stats->rx_length_errors++; + stats->rx_dropped++; + goto spl2sw_rx_poll_rec_err; + } + + dma_unmap_single(&comm->pdev->dev, sinfo->mapping, + comm->rx_desc_buff_size, DMA_FROM_DEVICE); + + skb = sinfo->skb; + skb_put(skb, pkg_len - 4); /* Minus FCS */ + skb->ip_summed = CHECKSUM_NONE; + skb->protocol = eth_type_trans(skb, comm->ndev[port]); + len = skb->len; + netif_receive_skb(skb); + + stats->rx_packets++; + stats->rx_bytes += len; + + /* Allocate a new skb for receiving. */ + new_skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size); + if (unlikely(!new_skb)) { + desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ? + RXD_EOR : 0; + sinfo->skb = NULL; + sinfo->mapping = 0; + desc->addr1 = 0; + goto spl2sw_rx_poll_alloc_err; + } + + sinfo->mapping = dma_map_single(&comm->pdev->dev, new_skb->data, + comm->rx_desc_buff_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(&comm->pdev->dev, sinfo->mapping)) { + dev_kfree_skb_irq(new_skb); + desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ? + RXD_EOR : 0; + sinfo->skb = NULL; + sinfo->mapping = 0; + desc->addr1 = 0; + goto spl2sw_rx_poll_alloc_err; + } + + sinfo->skb = new_skb; + desc->addr1 = sinfo->mapping; + +spl2sw_rx_poll_rec_err: + desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ? + RXD_EOR | comm->rx_desc_buff_size : + comm->rx_desc_buff_size; + + wmb(); /* Set RXD_OWN after other fields are effective. */ + desc->cmd1 = RXD_OWN; + +spl2sw_rx_poll_alloc_err: + /* Move rx_pos to next position */ + rx_pos = ((rx_pos + 1) == comm->rx_desc_num[queue]) ? 0 : rx_pos + 1; + + budget_left--; + + /* If there are packets in high-priority queue, + * stop processing low-priority queue. + */ + if (queue == 1 && !(h_desc->cmd1 & RXD_OWN)) + break; + } + + comm->rx_pos[queue] = rx_pos; + + /* Save pointer to last rx descriptor of high-priority queue. */ + if (queue == 0) + h_desc = comm->rx_desc[queue] + rx_pos; + } + + spin_lock_irqsave(&comm->int_mask_lock, flags); + mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + mask &= ~MAC_INT_RX; + writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + spin_unlock_irqrestore(&comm->int_mask_lock, flags); + + napi_complete(napi); + return budget - budget_left; +} + +int spl2sw_tx_poll(struct napi_struct *napi, int budget) +{ + struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, tx_napi); + struct spl2sw_skb_info *skbinfo; + struct net_device_stats *stats; + int budget_left = budget; + unsigned long flags; + u32 tx_done_pos; + u32 mask; + u32 cmd; + int i; + + spin_lock(&comm->tx_lock); + + tx_done_pos = comm->tx_done_pos; + while (((tx_done_pos != comm->tx_pos) || (comm->tx_desc_full == 1)) && budget_left) { + cmd = comm->tx_desc[tx_done_pos].cmd1; + if (cmd & TXD_OWN) + break; + + skbinfo = &comm->tx_temp_skb_info[tx_done_pos]; + if (unlikely(!skbinfo->skb)) + goto spl2sw_tx_poll_next; + + i = ffs(FIELD_GET(TXD_VLAN, cmd)) - 1; + if (i < MAX_NETDEV_NUM && comm->ndev[i]) + stats = &comm->ndev[i]->stats; + else + goto spl2sw_tx_poll_unmap; + + if (unlikely(cmd & (TXD_ERR_CODE))) { + stats->tx_errors++; + } else { + stats->tx_packets++; + stats->tx_bytes += skbinfo->len; + } + +spl2sw_tx_poll_unmap: + dma_unmap_single(&comm->pdev->dev, skbinfo->mapping, skbinfo->len, + DMA_TO_DEVICE); + skbinfo->mapping = 0; + dev_kfree_skb_irq(skbinfo->skb); + skbinfo->skb = NULL; + +spl2sw_tx_poll_next: + /* Move tx_done_pos to next position */ + tx_done_pos = ((tx_done_pos + 1) == TX_DESC_NUM) ? 0 : tx_done_pos + 1; + + if (comm->tx_desc_full == 1) + comm->tx_desc_full = 0; + + budget_left--; + } + + comm->tx_done_pos = tx_done_pos; + if (!comm->tx_desc_full) + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) + if (netif_queue_stopped(comm->ndev[i])) + netif_wake_queue(comm->ndev[i]); + + spin_unlock(&comm->tx_lock); + + spin_lock_irqsave(&comm->int_mask_lock, flags); + mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + mask &= ~MAC_INT_TX; + writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + spin_unlock_irqrestore(&comm->int_mask_lock, flags); + + napi_complete(napi); + return budget - budget_left; +} + +irqreturn_t spl2sw_ethernet_interrupt(int irq, void *dev_id) +{ + struct spl2sw_common *comm = (struct spl2sw_common *)dev_id; + u32 status; + u32 mask; + int i; + + status = readl(comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0); + if (unlikely(!status)) { + dev_dbg(&comm->pdev->dev, "Interrupt status is null!\n"); + goto spl2sw_ethernet_int_out; + } + writel(status, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0); + + if (status & MAC_INT_RX) { + /* Disable RX interrupts. */ + spin_lock(&comm->int_mask_lock); + mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + mask |= MAC_INT_RX; + writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + spin_unlock(&comm->int_mask_lock); + + if (unlikely(status & MAC_INT_RX_DES_ERR)) { + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) { + comm->ndev[i]->stats.rx_fifo_errors++; + break; + } + dev_dbg(&comm->pdev->dev, "Illegal RX Descriptor!\n"); + } + + napi_schedule(&comm->rx_napi); + } + + if (status & MAC_INT_TX) { + /* Disable TX interrupts. */ + spin_lock(&comm->int_mask_lock); + mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + mask |= MAC_INT_TX; + writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + spin_unlock(&comm->int_mask_lock); + + if (unlikely(status & MAC_INT_TX_DES_ERR)) { + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) { + comm->ndev[i]->stats.tx_fifo_errors++; + break; + } + dev_dbg(&comm->pdev->dev, "Illegal TX Descriptor Error\n"); + + spin_lock(&comm->int_mask_lock); + mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + mask &= ~MAC_INT_TX; + writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + spin_unlock(&comm->int_mask_lock); + } else { + napi_schedule(&comm->tx_napi); + } + } + +spl2sw_ethernet_int_out: + return IRQ_HANDLED; +} diff --git a/drivers/net/ethernet/sunplus/spl2sw_int.h b/drivers/net/ethernet/sunplus/spl2sw_int.h new file mode 100644 index 0000000000..64f6f2572f --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_int.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __SPL2SW_INT_H__ +#define __SPL2SW_INT_H__ + +int spl2sw_rx_poll(struct napi_struct *napi, int budget); +int spl2sw_tx_poll(struct napi_struct *napi, int budget); +irqreturn_t spl2sw_ethernet_interrupt(int irq, void *dev_id); + +#endif diff --git a/drivers/net/ethernet/sunplus/spl2sw_mac.c b/drivers/net/ethernet/sunplus/spl2sw_mac.c new file mode 100644 index 0000000000..57e431dfc4 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_mac.c @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include + +#include "spl2sw_register.h" +#include "spl2sw_define.h" +#include "spl2sw_desc.h" +#include "spl2sw_mac.h" + +void spl2sw_mac_hw_stop(struct spl2sw_common *comm) +{ + u32 reg; + + if (comm->enable == 0) { + /* Mask and clear all interrupts. */ + writel(0xffffffff, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + writel(0xffffffff, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0); + + /* Disable cpu 0 and cpu 1. */ + reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL); + reg |= MAC_DIS_SOC1_CPU | MAC_DIS_SOC0_CPU; + writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL); + } + + /* Disable LAN ports. */ + reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL0); + reg |= FIELD_PREP(MAC_DIS_PORT, ~comm->enable); + writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL0); +} + +void spl2sw_mac_hw_start(struct spl2sw_common *comm) +{ + u32 reg; + + /* Enable cpu port 0 (6) & CRC padding (8) */ + reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL); + reg &= ~MAC_DIS_SOC0_CPU; + reg |= MAC_EN_CRC_SOC0; + writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL); + + /* Enable port 0 & port 1 */ + reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL0); + reg &= FIELD_PREP(MAC_DIS_PORT, ~comm->enable) | ~MAC_DIS_PORT; + writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL0); +} + +int spl2sw_mac_addr_add(struct spl2sw_mac *mac) +{ + struct spl2sw_common *comm = mac->comm; + u32 reg; + int ret; + + /* Write 6-octet MAC address. */ + writel((mac->mac_addr[0] << 0) + (mac->mac_addr[1] << 8), + comm->l2sw_reg_base + L2SW_W_MAC_15_0); + writel((mac->mac_addr[2] << 0) + (mac->mac_addr[3] << 8) + + (mac->mac_addr[4] << 16) + (mac->mac_addr[5] << 24), + comm->l2sw_reg_base + L2SW_W_MAC_47_16); + + /* Set learn port = cpu_port, aging = 1 */ + reg = MAC_W_CPU_PORT_0 | FIELD_PREP(MAC_W_VID, mac->vlan_id) | + FIELD_PREP(MAC_W_AGE, 1) | MAC_W_MAC_CMD; + writel(reg, comm->l2sw_reg_base + L2SW_WT_MAC_AD0); + + /* Wait for completing. */ + ret = read_poll_timeout(readl, reg, reg & MAC_W_MAC_DONE, 1, 200, true, + comm->l2sw_reg_base + L2SW_WT_MAC_AD0); + if (ret) { + netdev_err(mac->ndev, "Failed to add address to table!\n"); + return ret; + } + + netdev_dbg(mac->ndev, "mac_ad0 = %08x, mac_ad = %08x%04x\n", + readl(comm->l2sw_reg_base + L2SW_WT_MAC_AD0), + (u32)FIELD_GET(MAC_W_MAC_47_16, + readl(comm->l2sw_reg_base + L2SW_W_MAC_47_16)), + (u32)FIELD_GET(MAC_W_MAC_15_0, + readl(comm->l2sw_reg_base + L2SW_W_MAC_15_0))); + return 0; +} + +int spl2sw_mac_addr_del(struct spl2sw_mac *mac) +{ + struct spl2sw_common *comm = mac->comm; + u32 reg; + int ret; + + /* Write 6-octet MAC address. */ + writel((mac->mac_addr[0] << 0) + (mac->mac_addr[1] << 8), + comm->l2sw_reg_base + L2SW_W_MAC_15_0); + writel((mac->mac_addr[2] << 0) + (mac->mac_addr[3] << 8) + + (mac->mac_addr[4] << 16) + (mac->mac_addr[5] << 24), + comm->l2sw_reg_base + L2SW_W_MAC_47_16); + + /* Set learn port = lan_port0 and aging = 0 + * to wipe (age) out the entry. + */ + reg = MAC_W_LAN_PORT_0 | FIELD_PREP(MAC_W_VID, mac->vlan_id) | MAC_W_MAC_CMD; + writel(reg, comm->l2sw_reg_base + L2SW_WT_MAC_AD0); + + /* Wait for completing. */ + ret = read_poll_timeout(readl, reg, reg & MAC_W_MAC_DONE, 1, 200, true, + comm->l2sw_reg_base + L2SW_WT_MAC_AD0); + if (ret) { + netdev_err(mac->ndev, "Failed to delete address from table!\n"); + return ret; + } + + netdev_dbg(mac->ndev, "mac_ad0 = %08x, mac_ad = %08x%04x\n", + readl(comm->l2sw_reg_base + L2SW_WT_MAC_AD0), + (u32)FIELD_GET(MAC_W_MAC_47_16, + readl(comm->l2sw_reg_base + L2SW_W_MAC_47_16)), + (u32)FIELD_GET(MAC_W_MAC_15_0, + readl(comm->l2sw_reg_base + L2SW_W_MAC_15_0))); + return 0; +} + +void spl2sw_mac_hw_init(struct spl2sw_common *comm) +{ + u32 reg; + + /* Disable cpu0 and cpu 1 port. */ + reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL); + reg |= MAC_DIS_SOC1_CPU | MAC_DIS_SOC0_CPU; + writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL); + + /* Set base addresses of TX and RX queues. */ + writel(comm->desc_dma, comm->l2sw_reg_base + L2SW_TX_LBASE_ADDR_0); + writel(comm->desc_dma + sizeof(struct spl2sw_mac_desc) * TX_DESC_NUM, + comm->l2sw_reg_base + L2SW_TX_HBASE_ADDR_0); + writel(comm->desc_dma + sizeof(struct spl2sw_mac_desc) * (TX_DESC_NUM + + MAC_GUARD_DESC_NUM), comm->l2sw_reg_base + L2SW_RX_HBASE_ADDR_0); + writel(comm->desc_dma + sizeof(struct spl2sw_mac_desc) * (TX_DESC_NUM + + MAC_GUARD_DESC_NUM + RX_QUEUE0_DESC_NUM), + comm->l2sw_reg_base + L2SW_RX_LBASE_ADDR_0); + + /* Fc_rls_th=0x4a, Fc_set_th=0x3a, Drop_rls_th=0x2d, Drop_set_th=0x1d */ + writel(0x4a3a2d1d, comm->l2sw_reg_base + L2SW_FL_CNTL_TH); + + /* Cpu_rls_th=0x4a, Cpu_set_th=0x3a, Cpu_th=0x12, Port_th=0x12 */ + writel(0x4a3a1212, comm->l2sw_reg_base + L2SW_CPU_FL_CNTL_TH); + + /* mtcc_lmt=0xf, Pri_th_l=6, Pri_th_h=6, weigh_8x_en=1 */ + writel(0xf6680000, comm->l2sw_reg_base + L2SW_PRI_FL_CNTL); + + /* High-active LED */ + reg = readl(comm->l2sw_reg_base + L2SW_LED_PORT0); + reg |= MAC_LED_ACT_HI; + writel(reg, comm->l2sw_reg_base + L2SW_LED_PORT0); + + /* Disable aging of cpu port 0 & 1. + * Disable SA learning of cpu port 0 & 1. + * Enable UC and MC packets + */ + reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL); + reg &= ~(MAC_EN_SOC1_AGING | MAC_EN_SOC0_AGING | + MAC_DIS_BC2CPU_P1 | MAC_DIS_BC2CPU_P0 | + MAC_DIS_MC2CPU_P1 | MAC_DIS_MC2CPU_P0); + reg |= MAC_DIS_LRN_SOC1 | MAC_DIS_LRN_SOC0; + writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL); + + /* Enable RMC2CPU for port 0 & 1 + * Enable Flow control for port 0 & 1 + * Enable Back pressure for port 0 & 1 + */ + reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL0); + reg &= ~(MAC_DIS_RMC2CPU_P1 | MAC_DIS_RMC2CPU_P0); + reg |= MAC_EN_FLOW_CTL_P1 | MAC_EN_FLOW_CTL_P0 | + MAC_EN_BACK_PRESS_P1 | MAC_EN_BACK_PRESS_P0; + writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL0); + + /* Disable LAN port SA learning. */ + reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL1); + reg |= MAC_DIS_SA_LRN_P1 | MAC_DIS_SA_LRN_P0; + writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL1); + + /* Enable rmii force mode and + * set both external phy-address to 31. + */ + reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + reg &= ~(MAC_EXT_PHY1_ADDR | MAC_EXT_PHY0_ADDR); + reg |= FIELD_PREP(MAC_EXT_PHY1_ADDR, 31) | FIELD_PREP(MAC_EXT_PHY0_ADDR, 31); + reg |= MAC_FORCE_RMII_EN_1 | MAC_FORCE_RMII_EN_0; + writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + + /* Port 0: VLAN group 0 + * Port 1: VLAN group 1 + */ + reg = FIELD_PREP(MAC_P1_PVID, 1) | FIELD_PREP(MAC_P0_PVID, 0); + writel(reg, comm->l2sw_reg_base + L2SW_PVID_CONFIG0); + + /* VLAN group 0: cpu0 (bit3) + port0 (bit0) = 1001 = 0x9 + * VLAN group 1: cpu0 (bit3) + port1 (bit1) = 1010 = 0xa + */ + reg = FIELD_PREP(MAC_VLAN_MEMSET_1, 0xa) | FIELD_PREP(MAC_VLAN_MEMSET_0, 9); + writel(reg, comm->l2sw_reg_base + L2SW_VLAN_MEMSET_CONFIG0); + + /* RMC forward: to_cpu (1) + * LED: 60mS (1) + * BC storm prev: 31 BC (1) + */ + reg = readl(comm->l2sw_reg_base + L2SW_SW_GLB_CNTL); + reg &= ~(MAC_RMC_TB_FAULT_RULE | MAC_LED_FLASH_TIME | MAC_BC_STORM_PREV); + reg |= FIELD_PREP(MAC_RMC_TB_FAULT_RULE, 1) | + FIELD_PREP(MAC_LED_FLASH_TIME, 1) | + FIELD_PREP(MAC_BC_STORM_PREV, 1); + writel(reg, comm->l2sw_reg_base + L2SW_SW_GLB_CNTL); + + writel(MAC_INT_MASK_DEF, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); +} + +void spl2sw_mac_rx_mode_set(struct spl2sw_mac *mac) +{ + struct spl2sw_common *comm = mac->comm; + struct net_device *ndev = mac->ndev; + u32 mask, reg, rx_mode; + + netdev_dbg(ndev, "ndev->flags = %08x\n", ndev->flags); + mask = FIELD_PREP(MAC_DIS_MC2CPU, mac->lan_port) | + FIELD_PREP(MAC_DIS_UN2CPU, mac->lan_port); + reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL); + + if (ndev->flags & IFF_PROMISC) { + /* Allow MC and unknown UC packets */ + rx_mode = FIELD_PREP(MAC_DIS_MC2CPU, mac->lan_port) | + FIELD_PREP(MAC_DIS_UN2CPU, mac->lan_port); + } else if ((!netdev_mc_empty(ndev) && (ndev->flags & IFF_MULTICAST)) || + (ndev->flags & IFF_ALLMULTI)) { + /* Allow MC packets */ + rx_mode = FIELD_PREP(MAC_DIS_MC2CPU, mac->lan_port); + } else { + /* Disable MC and unknown UC packets */ + rx_mode = 0; + } + + writel((reg & (~mask)) | ((~rx_mode) & mask), comm->l2sw_reg_base + L2SW_CPU_CNTL); + netdev_dbg(ndev, "cpu_cntl = %08x\n", readl(comm->l2sw_reg_base + L2SW_CPU_CNTL)); +} + +void spl2sw_mac_init(struct spl2sw_common *comm) +{ + u32 i; + + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) + comm->rx_pos[i] = 0; + mb(); /* make sure settings are effective. */ + + spl2sw_mac_hw_init(comm); +} + +void spl2sw_mac_soft_reset(struct spl2sw_common *comm) +{ + u32 i; + + spl2sw_mac_hw_stop(comm); + + spl2sw_rx_descs_flush(comm); + comm->tx_pos = 0; + comm->tx_done_pos = 0; + comm->tx_desc_full = 0; + + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) + comm->rx_pos[i] = 0; + mb(); /* make sure settings are effective. */ + + spl2sw_mac_hw_init(comm); + spl2sw_mac_hw_start(comm); +} diff --git a/drivers/net/ethernet/sunplus/spl2sw_mac.h b/drivers/net/ethernet/sunplus/spl2sw_mac.h new file mode 100644 index 0000000000..41a929b3a3 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_mac.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __SPL2SW_MAC_H__ +#define __SPL2SW_MAC_H__ + +void spl2sw_mac_hw_stop(struct spl2sw_common *comm); +void spl2sw_mac_hw_start(struct spl2sw_common *comm); +int spl2sw_mac_addr_add(struct spl2sw_mac *mac); +int spl2sw_mac_addr_del(struct spl2sw_mac *mac); +void spl2sw_mac_hw_init(struct spl2sw_common *comm); +void spl2sw_mac_rx_mode_set(struct spl2sw_mac *mac); +void spl2sw_mac_init(struct spl2sw_common *comm); +void spl2sw_mac_soft_reset(struct spl2sw_common *comm); + +#endif diff --git a/drivers/net/ethernet/sunplus/spl2sw_mdio.c b/drivers/net/ethernet/sunplus/spl2sw_mdio.c new file mode 100644 index 0000000000..733ae17042 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_mdio.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include + +#include "spl2sw_register.h" +#include "spl2sw_define.h" +#include "spl2sw_mdio.h" + +#define SPL2SW_MDIO_READ_CMD 0x02 +#define SPL2SW_MDIO_WRITE_CMD 0x01 + +static int spl2sw_mdio_access(struct spl2sw_common *comm, u8 cmd, u8 addr, u8 regnum, u16 wdata) +{ + u32 reg, reg2; + u32 val; + int ret; + + /* Note that addr (of phy) should match either ext_phy0_addr + * or ext_phy1_addr, or mdio commands won't be sent out. + */ + reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + reg &= ~MAC_EXT_PHY0_ADDR; + reg |= FIELD_PREP(MAC_EXT_PHY0_ADDR, addr); + + reg2 = FIELD_PREP(MAC_CPU_PHY_WT_DATA, wdata) | FIELD_PREP(MAC_CPU_PHY_CMD, cmd) | + FIELD_PREP(MAC_CPU_PHY_REG_ADDR, regnum) | FIELD_PREP(MAC_CPU_PHY_ADDR, addr); + + /* Set ext_phy0_addr and then issue mdio command. + * No interrupt is allowed in between. + */ + spin_lock_irq(&comm->mdio_lock); + writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + writel(reg2, comm->l2sw_reg_base + L2SW_PHY_CNTL_REG0); + spin_unlock_irq(&comm->mdio_lock); + + ret = read_poll_timeout(readl, val, val & cmd, 1, 1000, true, + comm->l2sw_reg_base + L2SW_PHY_CNTL_REG1); + + /* Set ext_phy0_addr back to 31 to prevent + * from sending mdio command to phy by + * hardware auto-mdio function. + */ + reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + reg &= ~MAC_EXT_PHY0_ADDR; + reg |= FIELD_PREP(MAC_EXT_PHY0_ADDR, 31); + writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + + if (ret == 0) + return val >> 16; + else + return ret; +} + +static int spl2sw_mii_read(struct mii_bus *bus, int addr, int regnum) +{ + struct spl2sw_common *comm = bus->priv; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + return spl2sw_mdio_access(comm, SPL2SW_MDIO_READ_CMD, addr, regnum, 0); +} + +static int spl2sw_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val) +{ + struct spl2sw_common *comm = bus->priv; + int ret; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + ret = spl2sw_mdio_access(comm, SPL2SW_MDIO_WRITE_CMD, addr, regnum, val); + if (ret < 0) + return ret; + + return 0; +} + +u32 spl2sw_mdio_init(struct spl2sw_common *comm) +{ + struct device_node *mdio_np; + struct mii_bus *mii_bus; + int ret; + + /* Get mdio child node. */ + mdio_np = of_get_child_by_name(comm->pdev->dev.of_node, "mdio"); + if (!mdio_np) { + dev_err(&comm->pdev->dev, "No mdio child node found!\n"); + return -ENODEV; + } + + /* Allocate and register mdio bus. */ + mii_bus = devm_mdiobus_alloc(&comm->pdev->dev); + if (!mii_bus) { + ret = -ENOMEM; + goto out; + } + + mii_bus->name = "sunplus_mii_bus"; + mii_bus->parent = &comm->pdev->dev; + mii_bus->priv = comm; + mii_bus->read = spl2sw_mii_read; + mii_bus->write = spl2sw_mii_write; + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&comm->pdev->dev)); + + ret = of_mdiobus_register(mii_bus, mdio_np); + if (ret) { + dev_err(&comm->pdev->dev, "Failed to register mdiobus!\n"); + goto out; + } + + comm->mii_bus = mii_bus; + +out: + of_node_put(mdio_np); + return ret; +} + +void spl2sw_mdio_remove(struct spl2sw_common *comm) +{ + if (comm->mii_bus) { + mdiobus_unregister(comm->mii_bus); + comm->mii_bus = NULL; + } +} diff --git a/drivers/net/ethernet/sunplus/spl2sw_mdio.h b/drivers/net/ethernet/sunplus/spl2sw_mdio.h new file mode 100644 index 0000000000..8a24c9caeb --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_mdio.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __SPL2SW_MDIO_H__ +#define __SPL2SW_MDIO_H__ + +u32 spl2sw_mdio_init(struct spl2sw_common *comm); +void spl2sw_mdio_remove(struct spl2sw_common *comm); + +#endif diff --git a/drivers/net/ethernet/sunplus/spl2sw_phy.c b/drivers/net/ethernet/sunplus/spl2sw_phy.c new file mode 100644 index 0000000000..404f508a54 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_phy.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include + +#include "spl2sw_register.h" +#include "spl2sw_define.h" +#include "spl2sw_phy.h" + +static void spl2sw_mii_link_change(struct net_device *ndev) +{ + struct spl2sw_mac *mac = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + struct spl2sw_common *comm = mac->comm; + u32 reg; + + reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + + if (phydev->link) { + reg |= FIELD_PREP(MAC_FORCE_RMII_LINK, mac->lan_port); + + if (phydev->speed == 100) { + reg |= FIELD_PREP(MAC_FORCE_RMII_SPD, mac->lan_port); + } else { + reg &= FIELD_PREP(MAC_FORCE_RMII_SPD, ~mac->lan_port) | + ~MAC_FORCE_RMII_SPD; + } + + if (phydev->duplex) { + reg |= FIELD_PREP(MAC_FORCE_RMII_DPX, mac->lan_port); + } else { + reg &= FIELD_PREP(MAC_FORCE_RMII_DPX, ~mac->lan_port) | + ~MAC_FORCE_RMII_DPX; + } + + if (phydev->pause) { + reg |= FIELD_PREP(MAC_FORCE_RMII_FC, mac->lan_port); + } else { + reg &= FIELD_PREP(MAC_FORCE_RMII_FC, ~mac->lan_port) | + ~MAC_FORCE_RMII_FC; + } + } else { + reg &= FIELD_PREP(MAC_FORCE_RMII_LINK, ~mac->lan_port) | + ~MAC_FORCE_RMII_LINK; + } + + writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + + phy_print_status(phydev); +} + +int spl2sw_phy_connect(struct spl2sw_common *comm) +{ + struct phy_device *phydev; + struct net_device *ndev; + struct spl2sw_mac *mac; + int i; + + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) { + ndev = comm->ndev[i]; + mac = netdev_priv(ndev); + phydev = of_phy_connect(ndev, mac->phy_node, spl2sw_mii_link_change, + 0, mac->phy_mode); + if (!phydev) + return -ENODEV; + + phy_support_asym_pause(phydev); + phy_attached_info(phydev); + } + + return 0; +} + +void spl2sw_phy_remove(struct spl2sw_common *comm) +{ + struct net_device *ndev; + int i; + + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) { + ndev = comm->ndev[i]; + if (ndev) { + phy_disconnect(ndev->phydev); + ndev->phydev = NULL; + } + } +} diff --git a/drivers/net/ethernet/sunplus/spl2sw_phy.h b/drivers/net/ethernet/sunplus/spl2sw_phy.h new file mode 100644 index 0000000000..3d051a2548 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_phy.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __SPL2SW_PHY_H__ +#define __SPL2SW_PHY_H__ + +int spl2sw_phy_connect(struct spl2sw_common *comm); +void spl2sw_phy_remove(struct spl2sw_common *comm); + +#endif diff --git a/drivers/net/ethernet/sunplus/spl2sw_register.h b/drivers/net/ethernet/sunplus/spl2sw_register.h new file mode 100644 index 0000000000..9718e2ee2b --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_register.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __SPL2SW_REGISTER_H__ +#define __SPL2SW_REGISTER_H__ + +/* Register L2SW */ +#define L2SW_SW_INT_STATUS_0 0x0 +#define L2SW_SW_INT_MASK_0 0x4 +#define L2SW_FL_CNTL_TH 0x8 +#define L2SW_CPU_FL_CNTL_TH 0xc +#define L2SW_PRI_FL_CNTL 0x10 +#define L2SW_VLAN_PRI_TH 0x14 +#define L2SW_EN_TOS_BUS 0x18 +#define L2SW_TOS_MAP0 0x1c +#define L2SW_TOS_MAP1 0x20 +#define L2SW_TOS_MAP2 0x24 +#define L2SW_TOS_MAP3 0x28 +#define L2SW_TOS_MAP4 0x2c +#define L2SW_TOS_MAP5 0x30 +#define L2SW_TOS_MAP6 0x34 +#define L2SW_TOS_MAP7 0x38 +#define L2SW_GLOBAL_QUE_STATUS 0x3c +#define L2SW_ADDR_TBL_SRCH 0x40 +#define L2SW_ADDR_TBL_ST 0x44 +#define L2SW_MAC_AD_SER0 0x48 +#define L2SW_MAC_AD_SER1 0x4c +#define L2SW_WT_MAC_AD0 0x50 +#define L2SW_W_MAC_15_0 0x54 +#define L2SW_W_MAC_47_16 0x58 +#define L2SW_PVID_CONFIG0 0x5c +#define L2SW_PVID_CONFIG1 0x60 +#define L2SW_VLAN_MEMSET_CONFIG0 0x64 +#define L2SW_VLAN_MEMSET_CONFIG1 0x68 +#define L2SW_PORT_ABILITY 0x6c +#define L2SW_PORT_ST 0x70 +#define L2SW_CPU_CNTL 0x74 +#define L2SW_PORT_CNTL0 0x78 +#define L2SW_PORT_CNTL1 0x7c +#define L2SW_PORT_CNTL2 0x80 +#define L2SW_SW_GLB_CNTL 0x84 +#define L2SW_L2SW_SW_RESET 0x88 +#define L2SW_LED_PORT0 0x8c +#define L2SW_LED_PORT1 0x90 +#define L2SW_LED_PORT2 0x94 +#define L2SW_LED_PORT3 0x98 +#define L2SW_LED_PORT4 0x9c +#define L2SW_WATCH_DOG_TRIG_RST 0xa0 +#define L2SW_WATCH_DOG_STOP_CPU 0xa4 +#define L2SW_PHY_CNTL_REG0 0xa8 +#define L2SW_PHY_CNTL_REG1 0xac +#define L2SW_MAC_FORCE_MODE 0xb0 +#define L2SW_VLAN_GROUP_CONFIG0 0xb4 +#define L2SW_VLAN_GROUP_CONFIG1 0xb8 +#define L2SW_FLOW_CTRL_TH3 0xbc +#define L2SW_QUEUE_STATUS_0 0xc0 +#define L2SW_DEBUG_CNTL 0xc4 +#define L2SW_RESERVED_1 0xc8 +#define L2SW_MEM_TEST_INFO 0xcc +#define L2SW_SW_INT_STATUS_1 0xd0 +#define L2SW_SW_INT_MASK_1 0xd4 +#define L2SW_SW_GLOBAL_SIGNAL 0xd8 + +#define L2SW_CPU_TX_TRIG 0x208 +#define L2SW_TX_HBASE_ADDR_0 0x20c +#define L2SW_TX_LBASE_ADDR_0 0x210 +#define L2SW_RX_HBASE_ADDR_0 0x214 +#define L2SW_RX_LBASE_ADDR_0 0x218 +#define L2SW_TX_HW_ADDR_0 0x21c +#define L2SW_TX_LW_ADDR_0 0x220 +#define L2SW_RX_HW_ADDR_0 0x224 +#define L2SW_RX_LW_ADDR_0 0x228 +#define L2SW_CPU_PORT_CNTL_REG_0 0x22c +#define L2SW_TX_HBASE_ADDR_1 0x230 +#define L2SW_TX_LBASE_ADDR_1 0x234 +#define L2SW_RX_HBASE_ADDR_1 0x238 +#define L2SW_RX_LBASE_ADDR_1 0x23c +#define L2SW_TX_HW_ADDR_1 0x240 +#define L2SW_TX_LW_ADDR_1 0x244 +#define L2SW_RX_HW_ADDR_1 0x248 +#define L2SW_RX_LW_ADDR_1 0x24c +#define L2SW_CPU_PORT_CNTL_REG_1 0x250 + +#endif diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig new file mode 100644 index 0000000000..b4a4fa0a58 --- /dev/null +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Wangxun network device configuration +# + +config NET_VENDOR_WANGXUN + bool "Wangxun devices" + default y + help + If you have a network (Ethernet) card from Wangxun(R), say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Wangxun(R) cards. If you say Y, you will + be asked for your specific card in the following questions. + +if NET_VENDOR_WANGXUN + +config TXGBE + tristate "Wangxun(R) 10GbE PCI Express adapters support" + depends on PCI + help + This driver supports Wangxun(R) 10GbE PCI Express family of + adapters. + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called txgbe. + +endif # NET_VENDOR_WANGXUN diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile new file mode 100644 index 0000000000..c34db1bead --- /dev/null +++ b/drivers/net/ethernet/wangxun/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Wangxun network device drivers. +# + +obj-$(CONFIG_TXGBE) += txgbe/ diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile new file mode 100644 index 0000000000..431303ca75 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. +# +# Makefile for the Wangxun(R) 10GbE PCI Express ethernet driver +# + +obj-$(CONFIG_TXGBE) += txgbe.o + +txgbe-objs := txgbe_main.o diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h new file mode 100644 index 0000000000..38ddbde0ed --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_H_ +#define _TXGBE_H_ + +#include "txgbe_type.h" + +#define TXGBE_MAX_FDIR_INDICES 63 + +#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) +#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) + +/* board specific private data structure */ +struct txgbe_adapter { + u8 __iomem *io_addr; /* Mainly for iounmap use */ + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; +}; + +extern char txgbe_driver_name[]; + +#endif /* _TXGBE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c new file mode 100644 index 0000000000..d3b9f73ecb --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include +#include +#include +#include +#include +#include +#include + +#include "txgbe.h" + +char txgbe_driver_name[] = "txgbe"; + +/* txgbe_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id txgbe_pci_tbl[] = { + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0}, + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0}, + /* required last entry */ + { .device = 0 } +}; + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + +static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + + pci_disable_device(pdev); +} + +static void txgbe_shutdown(struct pci_dev *pdev) +{ + bool wake; + + txgbe_dev_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * txgbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in txgbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * txgbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int txgbe_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) +{ + struct txgbe_adapter *adapter = NULL; + struct net_device *netdev; + int err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_pci_disable_dev; + } + + err = pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), + txgbe_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_disable_dev; + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + + netdev = devm_alloc_etherdev_mqs(&pdev->dev, + sizeof(struct txgbe_adapter), + TXGBE_MAX_TX_QUEUES, + TXGBE_MAX_RX_QUEUES); + if (!netdev) { + err = -ENOMEM; + goto err_pci_release_regions; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + + adapter->io_addr = devm_ioremap(&pdev->dev, + pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!adapter->io_addr) { + err = -EIO; + goto err_pci_release_regions; + } + + netdev->features |= NETIF_F_HIGHDMA; + + pci_set_drvdata(pdev, adapter); + + return 0; + +err_pci_release_regions: + pci_disable_pcie_error_reporting(pdev); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_disable_dev: + pci_disable_device(pdev); + return err; +} + +/** + * txgbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * txgbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void txgbe_remove(struct pci_dev *pdev) +{ + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +static struct pci_driver txgbe_driver = { + .name = txgbe_driver_name, + .id_table = txgbe_pci_tbl, + .probe = txgbe_probe, + .remove = txgbe_remove, + .shutdown = txgbe_shutdown, +}; + +module_pci_driver(txgbe_driver); + +MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl); +MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, "); +MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h new file mode 100644 index 0000000000..b2e329f50b --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_TYPE_H_ +#define _TXGBE_TYPE_H_ + +#include +#include + +/************ txgbe_register.h ************/ +/* Vendor ID */ +#ifndef PCI_VENDOR_ID_WANGXUN +#define PCI_VENDOR_ID_WANGXUN 0x8088 +#endif + +/* Device IDs */ +#define TXGBE_DEV_ID_SP1000 0x1001 +#define TXGBE_DEV_ID_WX1820 0x2001 + +/* Subsystem IDs */ +/* SFP */ +#define TXGBE_ID_SP1000_SFP 0x0000 +#define TXGBE_ID_WX1820_SFP 0x2000 +#define TXGBE_ID_SFP 0x00 + +/* copper */ +#define TXGBE_ID_SP1000_XAUI 0x1010 +#define TXGBE_ID_WX1820_XAUI 0x2010 +#define TXGBE_ID_XAUI 0x10 +#define TXGBE_ID_SP1000_SGMII 0x1020 +#define TXGBE_ID_WX1820_SGMII 0x2020 +#define TXGBE_ID_SGMII 0x20 +/* backplane */ +#define TXGBE_ID_SP1000_KR_KX_KX4 0x1030 +#define TXGBE_ID_WX1820_KR_KX_KX4 0x2030 +#define TXGBE_ID_KR_KX_KX4 0x30 +/* MAC Interface */ +#define TXGBE_ID_SP1000_MAC_XAUI 0x1040 +#define TXGBE_ID_WX1820_MAC_XAUI 0x2040 +#define TXGBE_ID_MAC_XAUI 0x40 +#define TXGBE_ID_SP1000_MAC_SGMII 0x1060 +#define TXGBE_ID_WX1820_MAC_SGMII 0x2060 +#define TXGBE_ID_MAC_SGMII 0x60 + +#define TXGBE_NCSI_SUP 0x8000 +#define TXGBE_NCSI_MASK 0x8000 +#define TXGBE_WOL_SUP 0x4000 +#define TXGBE_WOL_MASK 0x4000 +#define TXGBE_DEV_MASK 0xf0 + +/* Combined interface*/ +#define TXGBE_ID_SFI_XAUI 0x50 + +/* Revision ID */ +#define TXGBE_SP_MPW 1 + +#endif /* _TXGBE_TYPE_H_ */ diff --git a/drivers/net/ipa/data/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c new file mode 100644 index 0000000000..1c1895aea8 --- /dev/null +++ b/drivers/net/ipa/data/ipa_data-v3.1.c @@ -0,0 +1,537 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2019-2021 Linaro Ltd. + */ + +#include + +#include "../gsi.h" +#include "../ipa_data.h" +#include "../ipa_endpoint.h" +#include "../ipa_mem.h" + +/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.1 */ +enum ipa_resource_type { + /* Source resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_RESOURCE_TYPE_SRC_HDR_SECTORS, + IPA_RESOURCE_TYPE_SRC_HDRI1_BUFFER, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_RESOURCE_TYPE_SRC_HDRI2_BUFFERS, + IPA_RESOURCE_TYPE_SRC_HPS_DMARS, + IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES, + + /* Destination resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0, + IPA_RESOURCE_TYPE_DST_DATA_SECTOR_LISTS, + IPA_RESOURCE_TYPE_DST_DPS_DMARS, +}; + +/* Resource groups used for an SoC having IPA v3.1 */ +enum ipa_rsrc_group_id { + /* Source resource group identifiers */ + IPA_RSRC_GROUP_SRC_UL = 0, + IPA_RSRC_GROUP_SRC_DL, + IPA_RSRC_GROUP_SRC_DIAG, + IPA_RSRC_GROUP_SRC_DMA, + IPA_RSRC_GROUP_SRC_UNUSED, + IPA_RSRC_GROUP_SRC_UC_RX_Q, + IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */ + + /* Destination resource group identifiers */ + IPA_RSRC_GROUP_DST_UL = 0, + IPA_RSRC_GROUP_DST_DL, + IPA_RSRC_GROUP_DST_DIAG_DPL, + IPA_RSRC_GROUP_DST_DMA, + IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL, + IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE, + IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */ +}; + +/* QSB configuration data for an SoC having IPA v3.1 */ +static const struct ipa_qsb_data ipa_qsb_data[] = { + [IPA_QSB_MASTER_DDR] = { + .max_writes = 8, + .max_reads = 8, + }, + [IPA_QSB_MASTER_PCIE] = { + .max_writes = 2, + .max_reads = 8, + }, +}; + +/* Endpoint data for an SoC having IPA v3.1 */ +static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { + [IPA_ENDPOINT_AP_COMMAND_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 6, + .endpoint_id = 22, + .toward_ipa = true, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 18, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL, + .dma_mode = true, + .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, + .tx = { + .seq_type = IPA_SEQ_DMA, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_LAN_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 7, + .endpoint_id = 15, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 8, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL, + .aggregation = true, + .status_enable = true, + .rx = { + .buffer_size = 8192, + .pad_align = ilog2(sizeof(u32)), + .aggr_time_limit = 500, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 5, + .endpoint_id = 3, + .toward_ipa = true, + .channel = { + .tre_count = 512, + .event_count = 512, + .tlv_count = 16, + }, + .endpoint = { + .filter_support = true, + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL, + .checksum = true, + .qmap = true, + .status_enable = true, + .tx = { + .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC, + .status_endpoint = + IPA_ENDPOINT_MODEM_AP_RX, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 8, + .endpoint_id = 16, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 8, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_DL, + .checksum = true, + .qmap = true, + .aggregation = true, + .rx = { + .buffer_size = 8192, + .aggr_time_limit = 500, + .aggr_close_eof = true, + }, + }, + }, + }, + [IPA_ENDPOINT_MODEM_LAN_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 4, + .endpoint_id = 9, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, + [IPA_ENDPOINT_MODEM_AP_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 0, + .endpoint_id = 5, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, + [IPA_ENDPOINT_MODEM_AP_RX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 5, + .endpoint_id = 18, + .toward_ipa = false, + }, +}; + +/* Source resource configuration data for an SoC having IPA v3.1 */ +static const struct ipa_resource ipa_resource_src[] = { + [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL] = { + .min = 3, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_DL] = { + .min = 3, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_DIAG] = { + .min = 1, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 1, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 2, .max = 255, + }, + }, + [IPA_RESOURCE_TYPE_SRC_HDR_SECTORS] = { + .limits[IPA_RSRC_GROUP_SRC_UL] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_DL] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_DIAG] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 0, .max = 255, + }, + }, + [IPA_RESOURCE_TYPE_SRC_HDRI1_BUFFER] = { + .limits[IPA_RSRC_GROUP_SRC_UL] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_DL] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_DIAG] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 0, .max = 255, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL] = { + .min = 14, .max = 14, + }, + .limits[IPA_RSRC_GROUP_SRC_DL] = { + .min = 16, .max = 16, + }, + .limits[IPA_RSRC_GROUP_SRC_DIAG] = { + .min = 5, .max = 5, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 5, .max = 5, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = { + .limits[IPA_RSRC_GROUP_SRC_UL] = { + .min = 19, .max = 19, + }, + .limits[IPA_RSRC_GROUP_SRC_DL] = { + .min = 26, .max = 26, + }, + .limits[IPA_RSRC_GROUP_SRC_DIAG] = { + .min = 5, .max = 5, /* 3 downstream */ + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 5, .max = 5, /* 7 downstream */ + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, + }, + }, + [IPA_RESOURCE_TYPE_SRC_HDRI2_BUFFERS] = { + .limits[IPA_RSRC_GROUP_SRC_UL] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_DL] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_DIAG] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 0, .max = 255, + }, + }, + [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = { + .limits[IPA_RSRC_GROUP_SRC_UL] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_DL] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_DIAG] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 0, .max = 255, + }, + }, + [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = { + .limits[IPA_RSRC_GROUP_SRC_UL] = { + .min = 19, .max = 19, + }, + .limits[IPA_RSRC_GROUP_SRC_DL] = { + .min = 26, .max = 26, + }, + .limits[IPA_RSRC_GROUP_SRC_DIAG] = { + .min = 5, .max = 5, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 5, .max = 5, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, + }, + }, +}; + +/* Destination resource configuration data for an SoC having IPA v3.1 */ +static const struct ipa_resource ipa_resource_dst[] = { + [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = { + .limits[IPA_RSRC_GROUP_DST_UL] = { + .min = 3, .max = 3, /* 2 downstream */ + }, + .limits[IPA_RSRC_GROUP_DST_DL] = { + .min = 3, .max = 3, + }, + .limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = { + .min = 1, .max = 1, /* 0 downstream */ + }, + /* IPA_RSRC_GROUP_DST_DMA uses 2 downstream */ + .limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = { + .min = 3, .max = 3, + }, + .limits[IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE] = { + .min = 3, .max = 3, + }, + }, + [IPA_RESOURCE_TYPE_DST_DATA_SECTOR_LISTS] = { + .limits[IPA_RSRC_GROUP_DST_UL] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_DST_DL] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_DST_DMA] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = { + .min = 0, .max = 255, + }, + .limits[IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE] = { + .min = 0, .max = 255, + }, + }, + [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = { + .limits[IPA_RSRC_GROUP_DST_UL] = { + .min = 1, .max = 1, + }, + .limits[IPA_RSRC_GROUP_DST_DL] = { + .min = 1, .max = 1, + }, + .limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = { + .min = 1, .max = 1, + }, + .limits[IPA_RSRC_GROUP_DST_DMA] = { + .min = 1, .max = 1, + }, + .limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = { + .min = 1, .max = 1, + }, + }, +}; + +/* Resource configuration data for an SoC having IPA v3.1 */ +static const struct ipa_resource_data ipa_resource_data = { + .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT, + .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT, + .resource_src_count = ARRAY_SIZE(ipa_resource_src), + .resource_src = ipa_resource_src, + .resource_dst_count = ARRAY_SIZE(ipa_resource_dst), + .resource_dst = ipa_resource_dst, +}; + +/* IPA-resident memory region data for an SoC having IPA v3.1 */ +static const struct ipa_mem ipa_mem_local_data[] = { + { + .id = IPA_MEM_UC_SHARED, + .offset = 0x0000, + .size = 0x0080, + .canary_count = 0, + }, + { + .id = IPA_MEM_UC_INFO, + .offset = 0x0080, + .size = 0x0200, + .canary_count = 0, + }, + { + .id = IPA_MEM_V4_FILTER_HASHED, + .offset = 0x0288, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_FILTER, + .offset = 0x0308, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_FILTER_HASHED, + .offset = 0x0388, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_FILTER, + .offset = 0x0408, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_ROUTE_HASHED, + .offset = 0x0488, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_ROUTE, + .offset = 0x0508, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_ROUTE_HASHED, + .offset = 0x0588, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_ROUTE, + .offset = 0x0608, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_MODEM_HEADER, + .offset = 0x0688, + .size = 0x0140, + .canary_count = 2, + }, + { + .id = IPA_MEM_MODEM_PROC_CTX, + .offset = 0x07d0, + .size = 0x0200, + .canary_count = 2, + }, + { + .id = IPA_MEM_AP_PROC_CTX, + .offset = 0x09d0, + .size = 0x0200, + .canary_count = 0, + }, + { + .id = IPA_MEM_MODEM, + .offset = 0x0bd8, + .size = 0x1424, + .canary_count = 0, + }, + { + .id = IPA_MEM_END_MARKER, + .offset = 0x2000, + .size = 0, + .canary_count = 1, + }, +}; + +/* Memory configuration data for an SoC having IPA v3.1 */ +static const struct ipa_mem_data ipa_mem_data = { + .local_count = ARRAY_SIZE(ipa_mem_local_data), + .local = ipa_mem_local_data, + .imem_addr = 0x146bd000, + .imem_size = 0x00002000, + .smem_id = 497, + .smem_size = 0x00002000, +}; + +/* Interconnect bandwidths are in 1000 byte/second units */ +static const struct ipa_interconnect_data ipa_interconnect_data[] = { + { + .name = "memory", + .peak_bandwidth = 640000, /* 640 MBps */ + .average_bandwidth = 80000, /* 80 MBps */ + }, + { + .name = "imem", + .peak_bandwidth = 640000, /* 640 MBps */ + .average_bandwidth = 80000, /* 80 MBps */ + }, + /* Average bandwidth is unused for the next interconnect */ + { + .name = "config", + .peak_bandwidth = 80000, /* 80 MBps */ + .average_bandwidth = 0, /* unused */ + }, +}; + +/* Clock and interconnect configuration data for an SoC having IPA v3.1 */ +static const struct ipa_power_data ipa_power_data = { + .core_clock_rate = 16 * 1000 * 1000, /* Hz */ + .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), + .interconnect_data = ipa_interconnect_data, +}; + +/* Configuration data for an SoC having IPA v3.1 */ +const struct ipa_data ipa_data_v3_1 = { + .version = IPA_VERSION_3_1, + .backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK, + .qsb_count = ARRAY_SIZE(ipa_qsb_data), + .qsb_data = ipa_qsb_data, + .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), + .endpoint_data = ipa_gsi_endpoint_data, + .resource_data = &ipa_resource_data, + .mem_data = &ipa_mem_data, + .power_data = &ipa_power_data, +}; diff --git a/drivers/net/ipa/data/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c new file mode 100644 index 0000000000..58b708d2fc --- /dev/null +++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c @@ -0,0 +1,422 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2019-2021 Linaro Ltd. + */ + +#include + +#include "../gsi.h" +#include "../ipa_data.h" +#include "../ipa_endpoint.h" +#include "../ipa_mem.h" + +/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.5.1 */ +enum ipa_resource_type { + /* Source resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_RESOURCE_TYPE_SRC_HPS_DMARS, + IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES, + + /* Destination resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0, + IPA_RESOURCE_TYPE_DST_DPS_DMARS, +}; + +/* Resource groups used for an SoC having IPA v3.5.1 */ +enum ipa_rsrc_group_id { + /* Source resource group identifiers */ + IPA_RSRC_GROUP_SRC_LWA_DL = 0, + IPA_RSRC_GROUP_SRC_UL_DL, + IPA_RSRC_GROUP_SRC_MHI_DMA, + IPA_RSRC_GROUP_SRC_UC_RX_Q, + IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */ + + /* Destination resource group identifiers */ + IPA_RSRC_GROUP_DST_LWA_DL = 0, + IPA_RSRC_GROUP_DST_UL_DL_DPL, + IPA_RSRC_GROUP_DST_UNUSED_2, + IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */ +}; + +/* QSB configuration data for an SoC having IPA v3.5.1 */ +static const struct ipa_qsb_data ipa_qsb_data[] = { + [IPA_QSB_MASTER_DDR] = { + .max_writes = 8, + .max_reads = 8, + }, + [IPA_QSB_MASTER_PCIE] = { + .max_writes = 4, + .max_reads = 12, + }, +}; + +/* Endpoint datdata for an SoC having IPA v3.5.1 */ +static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { + [IPA_ENDPOINT_AP_COMMAND_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 4, + .endpoint_id = 5, + .toward_ipa = true, + .channel = { + .tre_count = 512, + .event_count = 256, + .tlv_count = 20, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .dma_mode = true, + .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, + .tx = { + .seq_type = IPA_SEQ_DMA, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_LAN_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 5, + .endpoint_id = 9, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 8, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .aggregation = true, + .status_enable = true, + .rx = { + .buffer_size = 8192, + .pad_align = ilog2(sizeof(u32)), + .aggr_time_limit = 500, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 3, + .endpoint_id = 2, + .toward_ipa = true, + .channel = { + .tre_count = 512, + .event_count = 512, + .tlv_count = 16, + }, + .endpoint = { + .filter_support = true, + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .checksum = true, + .qmap = true, + .status_enable = true, + .tx = { + .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC, + .seq_rep_type = IPA_SEQ_REP_DMA_PARSER, + .status_endpoint = + IPA_ENDPOINT_MODEM_AP_RX, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 6, + .endpoint_id = 10, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 8, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .checksum = true, + .qmap = true, + .aggregation = true, + .rx = { + .buffer_size = 8192, + .aggr_time_limit = 500, + .aggr_close_eof = true, + }, + }, + }, + }, + [IPA_ENDPOINT_MODEM_LAN_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 0, + .endpoint_id = 3, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, + [IPA_ENDPOINT_MODEM_AP_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 4, + .endpoint_id = 6, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, + [IPA_ENDPOINT_MODEM_AP_RX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 2, + .endpoint_id = 12, + .toward_ipa = false, + }, +}; + +/* Source resource configuration data for an SoC having IPA v3.5.1 */ +static const struct ipa_resource ipa_resource_src[] = { + [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = { + .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = { + .min = 1, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 1, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 1, .max = 63, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = { + .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = { + .min = 10, .max = 10, + }, + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 10, .max = 10, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = { + .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = { + .min = 12, .max = 12, + }, + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 14, .max = 14, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, + }, + }, + [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = { + .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = { + .min = 0, .max = 63, + }, + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 0, .max = 63, + }, + .limits[IPA_RSRC_GROUP_SRC_MHI_DMA] = { + .min = 0, .max = 63, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 0, .max = 63, + }, + }, + [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = { + .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = { + .min = 14, .max = 14, + }, + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 20, .max = 20, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 14, .max = 14, + }, + }, +}; + +/* Destination resource configuration data for an SoC having IPA v3.5.1 */ +static const struct ipa_resource ipa_resource_dst[] = { + [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = { + .limits[IPA_RSRC_GROUP_DST_LWA_DL] = { + .min = 4, .max = 4, + }, + .limits[1] = { + .min = 4, .max = 4, + }, + .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = { + .min = 3, .max = 3, + } + }, + [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = { + .limits[IPA_RSRC_GROUP_DST_LWA_DL] = { + .min = 2, .max = 63, + }, + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 1, .max = 63, + }, + .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = { + .min = 1, .max = 2, + } + }, +}; + +/* Resource configuration data for an SoC having IPA v3.5.1 */ +static const struct ipa_resource_data ipa_resource_data = { + .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT, + .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT, + .resource_src_count = ARRAY_SIZE(ipa_resource_src), + .resource_src = ipa_resource_src, + .resource_dst_count = ARRAY_SIZE(ipa_resource_dst), + .resource_dst = ipa_resource_dst, +}; + +/* IPA-resident memory region data for an SoC having IPA v3.5.1 */ +static const struct ipa_mem ipa_mem_local_data[] = { + { + .id = IPA_MEM_UC_SHARED, + .offset = 0x0000, + .size = 0x0080, + .canary_count = 0, + }, + { + .id = IPA_MEM_UC_INFO, + .offset = 0x0080, + .size = 0x0200, + .canary_count = 0, + }, + { + .id = IPA_MEM_V4_FILTER_HASHED, + .offset = 0x0288, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_FILTER, + .offset = 0x0308, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_FILTER_HASHED, + .offset = 0x0388, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_FILTER, + .offset = 0x0408, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_ROUTE_HASHED, + .offset = 0x0488, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_ROUTE, + .offset = 0x0508, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_ROUTE_HASHED, + .offset = 0x0588, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_ROUTE, + .offset = 0x0608, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_MODEM_HEADER, + .offset = 0x0688, + .size = 0x0140, + .canary_count = 2, + }, + { + .id = IPA_MEM_MODEM_PROC_CTX, + .offset = 0x07d0, + .size = 0x0200, + .canary_count = 2, + }, + { + .id = IPA_MEM_AP_PROC_CTX, + .offset = 0x09d0, + .size = 0x0200, + .canary_count = 0, + }, + { + .id = IPA_MEM_MODEM, + .offset = 0x0bd8, + .size = 0x1024, + .canary_count = 0, + }, + { + .id = IPA_MEM_UC_EVENT_RING, + .offset = 0x1c00, + .size = 0x0400, + .canary_count = 1, + }, +}; + +/* Memory configuration data for an SoC having IPA v3.5.1 */ +static const struct ipa_mem_data ipa_mem_data = { + .local_count = ARRAY_SIZE(ipa_mem_local_data), + .local = ipa_mem_local_data, + .imem_addr = 0x146bd000, + .imem_size = 0x00002000, + .smem_id = 497, + .smem_size = 0x00002000, +}; + +/* Interconnect bandwidths are in 1000 byte/second units */ +static const struct ipa_interconnect_data ipa_interconnect_data[] = { + { + .name = "memory", + .peak_bandwidth = 600000, /* 600 MBps */ + .average_bandwidth = 80000, /* 80 MBps */ + }, + /* Average bandwidth is unused for the next two interconnects */ + { + .name = "imem", + .peak_bandwidth = 350000, /* 350 MBps */ + .average_bandwidth = 0, /* unused */ + }, + { + .name = "config", + .peak_bandwidth = 40000, /* 40 MBps */ + .average_bandwidth = 0, /* unused */ + }, +}; + +/* Clock and interconnect configuration data for an SoC having IPA v3.5.1 */ +static const struct ipa_power_data ipa_power_data = { + .core_clock_rate = 75 * 1000 * 1000, /* Hz */ + .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), + .interconnect_data = ipa_interconnect_data, +}; + +/* Configuration data for an SoC having IPA v3.5.1 */ +const struct ipa_data ipa_data_v3_5_1 = { + .version = IPA_VERSION_3_5_1, + .backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK | + BCR_TX_NOT_USING_BRESP_FMASK | + BCR_SUSPEND_L2_IRQ_FMASK | + BCR_HOLB_DROP_L2_IRQ_FMASK | + BCR_DUAL_TX_FMASK, + .qsb_count = ARRAY_SIZE(ipa_qsb_data), + .qsb_data = ipa_qsb_data, + .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), + .endpoint_data = ipa_gsi_endpoint_data, + .resource_data = &ipa_resource_data, + .mem_data = &ipa_mem_data, + .power_data = &ipa_power_data, +}; diff --git a/drivers/net/ipa/data/ipa_data-v4.11.c b/drivers/net/ipa/data/ipa_data-v4.11.c new file mode 100644 index 0000000000..a204e439c2 --- /dev/null +++ b/drivers/net/ipa/data/ipa_data-v4.11.c @@ -0,0 +1,405 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (C) 2021 Linaro Ltd. */ + +#include + +#include "../gsi.h" +#include "../ipa_data.h" +#include "../ipa_endpoint.h" +#include "../ipa_mem.h" + +/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.11 */ +enum ipa_resource_type { + /* Source resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_RESOURCE_TYPE_SRC_HPS_DMARS, + IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES, + + /* Destination resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0, + IPA_RESOURCE_TYPE_DST_DPS_DMARS, +}; + +/* Resource groups used for an SoC having IPA v4.11 */ +enum ipa_rsrc_group_id { + /* Source resource group identifiers */ + IPA_RSRC_GROUP_SRC_UL_DL = 0, + IPA_RSRC_GROUP_SRC_UC_RX_Q, + IPA_RSRC_GROUP_SRC_UNUSED_2, + IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */ + + /* Destination resource group identifiers */ + IPA_RSRC_GROUP_DST_UL_DL_DPL = 0, + IPA_RSRC_GROUP_DST_UNUSED_1, + IPA_RSRC_GROUP_DST_DRB_IP, + IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */ +}; + +/* QSB configuration data for an SoC having IPA v4.11 */ +static const struct ipa_qsb_data ipa_qsb_data[] = { + [IPA_QSB_MASTER_DDR] = { + .max_writes = 12, + .max_reads = 13, + .max_reads_beats = 120, + }, +}; + +/* Endpoint configuration data for an SoC having IPA v4.11 */ +static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { + [IPA_ENDPOINT_AP_COMMAND_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 5, + .endpoint_id = 7, + .toward_ipa = true, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 20, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .dma_mode = true, + .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, + .tx = { + .seq_type = IPA_SEQ_DMA, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_LAN_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 14, + .endpoint_id = 9, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 9, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .aggregation = true, + .status_enable = true, + .rx = { + .buffer_size = 8192, + .pad_align = ilog2(sizeof(u32)), + .aggr_time_limit = 500, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 2, + .endpoint_id = 2, + .toward_ipa = true, + .channel = { + .tre_count = 512, + .event_count = 512, + .tlv_count = 16, + }, + .endpoint = { + .filter_support = true, + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .checksum = true, + .qmap = true, + .status_enable = true, + .tx = { + .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC, + .status_endpoint = + IPA_ENDPOINT_MODEM_AP_RX, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 7, + .endpoint_id = 16, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 9, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .checksum = true, + .qmap = true, + .aggregation = true, + .rx = { + .buffer_size = 32768, + .aggr_time_limit = 500, + .aggr_close_eof = true, + }, + }, + }, + }, + [IPA_ENDPOINT_MODEM_AP_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 0, + .endpoint_id = 5, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, + [IPA_ENDPOINT_MODEM_AP_RX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 7, + .endpoint_id = 14, + .toward_ipa = false, + }, + [IPA_ENDPOINT_MODEM_DL_NLO_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 2, + .endpoint_id = 8, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, +}; + +/* Source resource configuration data for an SoC having IPA v4.11 */ +static const struct ipa_resource ipa_resource_src[] = { + [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 6, .max = 6, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 8, .max = 8, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 18, .max = 18, + }, + }, + [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 2, .max = 2, + }, + }, + [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 15, .max = 15, + }, + }, +}; + +/* Destination resource configuration data for an SoC having IPA v4.11 */ +static const struct ipa_resource ipa_resource_dst[] = { + [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 3, .max = 3, + }, + .limits[IPA_RSRC_GROUP_DST_DRB_IP] = { + .min = 25, .max = 25, + }, + }, + [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 2, .max = 2, + }, + }, +}; + +/* Resource configuration data for an SoC having IPA v4.11 */ +static const struct ipa_resource_data ipa_resource_data = { + .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT, + .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT, + .resource_src_count = ARRAY_SIZE(ipa_resource_src), + .resource_src = ipa_resource_src, + .resource_dst_count = ARRAY_SIZE(ipa_resource_dst), + .resource_dst = ipa_resource_dst, +}; + +/* IPA-resident memory region data for an SoC having IPA v4.11 */ +static const struct ipa_mem ipa_mem_local_data[] = { + { + .id = IPA_MEM_UC_SHARED, + .offset = 0x0000, + .size = 0x0080, + .canary_count = 0, + }, + { + .id = IPA_MEM_UC_INFO, + .offset = 0x0080, + .size = 0x0200, + .canary_count = 0, + }, + { + .id = IPA_MEM_V4_FILTER_HASHED, + .offset = 0x0288, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_FILTER, + .offset = 0x0308, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_FILTER_HASHED, + .offset = 0x0388, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_FILTER, + .offset = 0x0408, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_ROUTE_HASHED, + .offset = 0x0488, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_ROUTE, + .offset = 0x0508, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_ROUTE_HASHED, + .offset = 0x0588, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_ROUTE, + .offset = 0x0608, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_MODEM_HEADER, + .offset = 0x0688, + .size = 0x0240, + .canary_count = 2, + }, + { + .id = IPA_MEM_AP_HEADER, + .offset = 0x08c8, + .size = 0x0200, + .canary_count = 0, + }, + { + .id = IPA_MEM_MODEM_PROC_CTX, + .offset = 0x0ad0, + .size = 0x0200, + .canary_count = 2, + }, + { + .id = IPA_MEM_AP_PROC_CTX, + .offset = 0x0cd0, + .size = 0x0200, + .canary_count = 0, + }, + { + .id = IPA_MEM_NAT_TABLE, + .offset = 0x0ee0, + .size = 0x0d00, + .canary_count = 4, + }, + { + .id = IPA_MEM_PDN_CONFIG, + .offset = 0x1be8, + .size = 0x0050, + .canary_count = 0, + }, + { + .id = IPA_MEM_STATS_QUOTA_MODEM, + .offset = 0x1c40, + .size = 0x0030, + .canary_count = 4, + }, + { + .id = IPA_MEM_STATS_QUOTA_AP, + .offset = 0x1c70, + .size = 0x0048, + .canary_count = 0, + }, + { + .id = IPA_MEM_STATS_TETHERING, + .offset = 0x1cb8, + .size = 0x0238, + .canary_count = 0, + }, + { + .id = IPA_MEM_STATS_DROP, + .offset = 0x1ef0, + .size = 0x0020, + .canary_count = 0, + }, + { + .id = IPA_MEM_MODEM, + .offset = 0x1f18, + .size = 0x100c, + .canary_count = 2, + }, + { + .id = IPA_MEM_END_MARKER, + .offset = 0x3000, + .size = 0x0000, + .canary_count = 1, + }, +}; + +/* Memory configuration data for an SoC having IPA v4.11 */ +static const struct ipa_mem_data ipa_mem_data = { + .local_count = ARRAY_SIZE(ipa_mem_local_data), + .local = ipa_mem_local_data, + .imem_addr = 0x146a8000, + .imem_size = 0x00002000, + .smem_id = 497, + .smem_size = 0x00009000, +}; + +/* Interconnect rates are in 1000 byte/second units */ +static const struct ipa_interconnect_data ipa_interconnect_data[] = { + { + .name = "memory", + .peak_bandwidth = 600000, /* 600 MBps */ + .average_bandwidth = 150000, /* 150 MBps */ + }, + /* Average rate is unused for the next interconnect */ + { + .name = "config", + .peak_bandwidth = 74000, /* 74 MBps */ + .average_bandwidth = 0, /* unused */ + }, +}; + +/* Clock and interconnect configuration data for an SoC having IPA v4.11 */ +static const struct ipa_power_data ipa_power_data = { + .core_clock_rate = 60 * 1000 * 1000, /* Hz */ + .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), + .interconnect_data = ipa_interconnect_data, +}; + +/* Configuration data for an SoC having IPA v4.11 */ +const struct ipa_data ipa_data_v4_11 = { + .version = IPA_VERSION_4_11, + .qsb_count = ARRAY_SIZE(ipa_qsb_data), + .qsb_data = ipa_qsb_data, + .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), + .endpoint_data = ipa_gsi_endpoint_data, + .resource_data = &ipa_resource_data, + .mem_data = &ipa_mem_data, + .power_data = &ipa_power_data, +}; diff --git a/drivers/net/ipa/data/ipa_data-v4.2.c b/drivers/net/ipa/data/ipa_data-v4.2.c new file mode 100644 index 0000000000..04f574fe00 --- /dev/null +++ b/drivers/net/ipa/data/ipa_data-v4.2.c @@ -0,0 +1,384 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (C) 2019-2021 Linaro Ltd. */ + +#include + +#include "../gsi.h" +#include "../ipa_data.h" +#include "../ipa_endpoint.h" +#include "../ipa_mem.h" + +/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.2 */ +enum ipa_resource_type { + /* Source resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_RESOURCE_TYPE_SRC_HPS_DMARS, + IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES, + + /* Destination resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0, + IPA_RESOURCE_TYPE_DST_DPS_DMARS, +}; + +/* Resource groups used for an SoC having IPA v4.2 */ +enum ipa_rsrc_group_id { + /* Source resource group identifiers */ + IPA_RSRC_GROUP_SRC_UL_DL = 0, + IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */ + + /* Destination resource group identifiers */ + IPA_RSRC_GROUP_DST_UL_DL_DPL = 0, + IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */ +}; + +/* QSB configuration data for an SoC having IPA v4.2 */ +static const struct ipa_qsb_data ipa_qsb_data[] = { + [IPA_QSB_MASTER_DDR] = { + .max_writes = 8, + .max_reads = 12, + /* no outstanding read byte (beat) limit */ + }, +}; + +/* Endpoint configuration data for an SoC having IPA v4.2 */ +static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { + [IPA_ENDPOINT_AP_COMMAND_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 1, + .endpoint_id = 6, + .toward_ipa = true, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 20, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .dma_mode = true, + .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, + .tx = { + .seq_type = IPA_SEQ_DMA, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_LAN_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 2, + .endpoint_id = 8, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 6, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .aggregation = true, + .status_enable = true, + .rx = { + .buffer_size = 8192, + .pad_align = ilog2(sizeof(u32)), + .aggr_time_limit = 500, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 0, + .endpoint_id = 1, + .toward_ipa = true, + .channel = { + .tre_count = 512, + .event_count = 512, + .tlv_count = 8, + }, + .endpoint = { + .filter_support = true, + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .checksum = true, + .qmap = true, + .status_enable = true, + .tx = { + .seq_type = IPA_SEQ_1_PASS_SKIP_LAST_UC, + .seq_rep_type = IPA_SEQ_REP_DMA_PARSER, + .status_endpoint = + IPA_ENDPOINT_MODEM_AP_RX, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 3, + .endpoint_id = 9, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 6, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .checksum = true, + .qmap = true, + .aggregation = true, + .rx = { + .buffer_size = 8192, + .aggr_time_limit = 500, + .aggr_close_eof = true, + }, + }, + }, + }, + [IPA_ENDPOINT_MODEM_COMMAND_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 1, + .endpoint_id = 5, + .toward_ipa = true, + }, + [IPA_ENDPOINT_MODEM_LAN_RX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 3, + .endpoint_id = 11, + .toward_ipa = false, + }, + [IPA_ENDPOINT_MODEM_AP_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 0, + .endpoint_id = 4, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, + [IPA_ENDPOINT_MODEM_AP_RX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 2, + .endpoint_id = 10, + .toward_ipa = false, + }, +}; + +/* Source resource configuration data for an SoC having IPA v4.2 */ +static const struct ipa_resource ipa_resource_src[] = { + [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 3, .max = 63, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 3, .max = 3, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 10, .max = 10, + }, + }, + [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 1, .max = 1, + }, + }, + [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 5, .max = 5, + }, + }, +}; + +/* Destination resource configuration data for an SoC having IPA v4.2 */ +static const struct ipa_resource ipa_resource_dst[] = { + [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 3, .max = 3, + }, + }, + [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 1, .max = 63, + }, + }, +}; + +/* Resource configuration data for an SoC having IPA v4.2 */ +static const struct ipa_resource_data ipa_resource_data = { + .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT, + .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT, + .resource_src_count = ARRAY_SIZE(ipa_resource_src), + .resource_src = ipa_resource_src, + .resource_dst_count = ARRAY_SIZE(ipa_resource_dst), + .resource_dst = ipa_resource_dst, +}; + +/* IPA-resident memory region data for an SoC having IPA v4.2 */ +static const struct ipa_mem ipa_mem_local_data[] = { + { + .id = IPA_MEM_UC_SHARED, + .offset = 0x0000, + .size = 0x0080, + .canary_count = 0, + }, + { + .id = IPA_MEM_UC_INFO, + .offset = 0x0080, + .size = 0x0200, + .canary_count = 0, + }, + { + .id = IPA_MEM_V4_FILTER_HASHED, + .offset = 0x0288, + .size = 0, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_FILTER, + .offset = 0x0290, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_FILTER_HASHED, + .offset = 0x0310, + .size = 0, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_FILTER, + .offset = 0x0318, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_ROUTE_HASHED, + .offset = 0x0398, + .size = 0, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_ROUTE, + .offset = 0x03a0, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_ROUTE_HASHED, + .offset = 0x0420, + .size = 0, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_ROUTE, + .offset = 0x0428, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_MODEM_HEADER, + .offset = 0x04a8, + .size = 0x0140, + .canary_count = 2, + }, + { + .id = IPA_MEM_MODEM_PROC_CTX, + .offset = 0x05f0, + .size = 0x0200, + .canary_count = 2, + }, + { + .id = IPA_MEM_AP_PROC_CTX, + .offset = 0x07f0, + .size = 0x0200, + .canary_count = 0, + }, + { + .id = IPA_MEM_PDN_CONFIG, + .offset = 0x09f8, + .size = 0x0050, + .canary_count = 2, + }, + { + .id = IPA_MEM_STATS_QUOTA_MODEM, + .offset = 0x0a50, + .size = 0x0060, + .canary_count = 2, + }, + { + .id = IPA_MEM_STATS_TETHERING, + .offset = 0x0ab0, + .size = 0x0140, + .canary_count = 0, + }, + { + .id = IPA_MEM_MODEM, + .offset = 0x0bf0, + .size = 0x140c, + .canary_count = 0, + }, + { + .id = IPA_MEM_END_MARKER, + .offset = 0x2000, + .size = 0, + .canary_count = 1, + }, +}; + +/* Memory configuration data for an SoC having IPA v4.2 */ +static const struct ipa_mem_data ipa_mem_data = { + .local_count = ARRAY_SIZE(ipa_mem_local_data), + .local = ipa_mem_local_data, + .imem_addr = 0x146a8000, + .imem_size = 0x00002000, + .smem_id = 497, + .smem_size = 0x00002000, +}; + +/* Interconnect rates are in 1000 byte/second units */ +static const struct ipa_interconnect_data ipa_interconnect_data[] = { + { + .name = "memory", + .peak_bandwidth = 465000, /* 465 MBps */ + .average_bandwidth = 80000, /* 80 MBps */ + }, + /* Average bandwidth is unused for the next two interconnects */ + { + .name = "imem", + .peak_bandwidth = 68570, /* 68.570 MBps */ + .average_bandwidth = 0, /* unused */ + }, + { + .name = "config", + .peak_bandwidth = 30000, /* 30 MBps */ + .average_bandwidth = 0, /* unused */ + }, +}; + +/* Clock and interconnect configuration data for an SoC having IPA v4.2 */ +static const struct ipa_power_data ipa_power_data = { + .core_clock_rate = 100 * 1000 * 1000, /* Hz */ + .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), + .interconnect_data = ipa_interconnect_data, +}; + +/* Configuration data for an SoC having IPA v4.2 */ +const struct ipa_data ipa_data_v4_2 = { + .version = IPA_VERSION_4_2, + /* backward_compat value is 0 */ + .qsb_count = ARRAY_SIZE(ipa_qsb_data), + .qsb_data = ipa_qsb_data, + .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), + .endpoint_data = ipa_gsi_endpoint_data, + .resource_data = &ipa_resource_data, + .mem_data = &ipa_mem_data, + .power_data = &ipa_power_data, +}; diff --git a/drivers/net/ipa/data/ipa_data-v4.5.c b/drivers/net/ipa/data/ipa_data-v4.5.c new file mode 100644 index 0000000000..684239e71f --- /dev/null +++ b/drivers/net/ipa/data/ipa_data-v4.5.c @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (C) 2021 Linaro Ltd. */ + +#include + +#include "../gsi.h" +#include "../ipa_data.h" +#include "../ipa_endpoint.h" +#include "../ipa_mem.h" + +/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.5 */ +enum ipa_resource_type { + /* Source resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_RESOURCE_TYPE_SRC_HPS_DMARS, + IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES, + + /* Destination resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0, + IPA_RESOURCE_TYPE_DST_DPS_DMARS, +}; + +/* Resource groups used for an SoC having IPA v4.5 */ +enum ipa_rsrc_group_id { + /* Source resource group identifiers */ + IPA_RSRC_GROUP_SRC_UNUSED_0 = 0, + IPA_RSRC_GROUP_SRC_UL_DL, + IPA_RSRC_GROUP_SRC_UNUSED_2, + IPA_RSRC_GROUP_SRC_UNUSED_3, + IPA_RSRC_GROUP_SRC_UC_RX_Q, + IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */ + + /* Destination resource group identifiers */ + IPA_RSRC_GROUP_DST_UNUSED_0 = 0, + IPA_RSRC_GROUP_DST_UL_DL_DPL, + IPA_RSRC_GROUP_DST_UNUSED_2, + IPA_RSRC_GROUP_DST_UNUSED_3, + IPA_RSRC_GROUP_DST_UC, + IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */ +}; + +/* QSB configuration data for an SoC having IPA v4.5 */ +static const struct ipa_qsb_data ipa_qsb_data[] = { + [IPA_QSB_MASTER_DDR] = { + .max_writes = 8, + .max_reads = 0, /* no limit (hardware max) */ + .max_reads_beats = 120, + }, + [IPA_QSB_MASTER_PCIE] = { + .max_writes = 8, + .max_reads = 12, + /* no outstanding read byte (beat) limit */ + }, +}; + +/* Endpoint configuration data for an SoC having IPA v4.5 */ +static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { + [IPA_ENDPOINT_AP_COMMAND_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 9, + .endpoint_id = 7, + .toward_ipa = true, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 20, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .dma_mode = true, + .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, + .tx = { + .seq_type = IPA_SEQ_DMA, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_LAN_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 10, + .endpoint_id = 16, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 9, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .aggregation = true, + .status_enable = true, + .rx = { + .buffer_size = 8192, + .pad_align = ilog2(sizeof(u32)), + .aggr_time_limit = 500, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 7, + .endpoint_id = 2, + .toward_ipa = true, + .channel = { + .tre_count = 512, + .event_count = 512, + .tlv_count = 16, + }, + .endpoint = { + .filter_support = true, + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .checksum = true, + .qmap = true, + .status_enable = true, + .tx = { + .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC, + .status_endpoint = + IPA_ENDPOINT_MODEM_AP_RX, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 1, + .endpoint_id = 14, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 9, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .checksum = true, + .qmap = true, + .aggregation = true, + .rx = { + .buffer_size = 8192, + .aggr_time_limit = 500, + .aggr_close_eof = true, + }, + }, + }, + }, + [IPA_ENDPOINT_MODEM_AP_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 0, + .endpoint_id = 5, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, + [IPA_ENDPOINT_MODEM_AP_RX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 7, + .endpoint_id = 21, + .toward_ipa = false, + }, + [IPA_ENDPOINT_MODEM_DL_NLO_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 2, + .endpoint_id = 8, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, +}; + +/* Source resource configuration data for an SoC having IPA v4.5 */ +static const struct ipa_resource ipa_resource_src[] = { + [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 1, .max = 11, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 1, .max = 63, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 14, .max = 14, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 3, .max = 3, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 18, .max = 18, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, + }, + }, + [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = { + .limits[IPA_RSRC_GROUP_SRC_UNUSED_0] = { + .min = 0, .max = 63, + }, + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 0, .max = 63, + }, + .limits[IPA_RSRC_GROUP_SRC_UNUSED_2] = { + .min = 0, .max = 63, + }, + .limits[IPA_RSRC_GROUP_SRC_UNUSED_3] = { + .min = 0, .max = 63, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 0, .max = 63, + }, + }, + [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 24, .max = 24, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, + }, + }, +}; + +/* Destination resource configuration data for an SoC having IPA v4.5 */ +static const struct ipa_resource ipa_resource_dst[] = { + [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 16, .max = 16, + }, + .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = { + .min = 2, .max = 2, + }, + .limits[IPA_RSRC_GROUP_DST_UNUSED_3] = { + .min = 2, .max = 2, + }, + }, + [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 2, .max = 63, + }, + .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = { + .min = 1, .max = 2, + }, + .limits[IPA_RSRC_GROUP_DST_UNUSED_3] = { + .min = 1, .max = 2, + }, + .limits[IPA_RSRC_GROUP_DST_UC] = { + .min = 0, .max = 2, + }, + }, +}; + +/* Resource configuration data for an SoC having IPA v4.5 */ +static const struct ipa_resource_data ipa_resource_data = { + .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT, + .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT, + .resource_src_count = ARRAY_SIZE(ipa_resource_src), + .resource_src = ipa_resource_src, + .resource_dst_count = ARRAY_SIZE(ipa_resource_dst), + .resource_dst = ipa_resource_dst, +}; + +/* IPA-resident memory region data for an SoC having IPA v4.5 */ +static const struct ipa_mem ipa_mem_local_data[] = { + { + .id = IPA_MEM_UC_SHARED, + .offset = 0x0000, + .size = 0x0080, + .canary_count = 0, + }, + { + .id = IPA_MEM_UC_INFO, + .offset = 0x0080, + .size = 0x0200, + .canary_count = 0, + }, + { + .id = IPA_MEM_V4_FILTER_HASHED, + .offset = 0x0288, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_FILTER, + .offset = 0x0308, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_FILTER_HASHED, + .offset = 0x0388, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_FILTER, + .offset = 0x0408, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_ROUTE_HASHED, + .offset = 0x0488, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_ROUTE, + .offset = 0x0508, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_ROUTE_HASHED, + .offset = 0x0588, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_ROUTE, + .offset = 0x0608, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_MODEM_HEADER, + .offset = 0x0688, + .size = 0x0240, + .canary_count = 2, + }, + { + .id = IPA_MEM_AP_HEADER, + .offset = 0x08c8, + .size = 0x0200, + .canary_count = 0, + }, + { + .id = IPA_MEM_MODEM_PROC_CTX, + .offset = 0x0ad0, + .size = 0x0b20, + .canary_count = 2, + }, + { + .id = IPA_MEM_AP_PROC_CTX, + .offset = 0x15f0, + .size = 0x0200, + .canary_count = 0, + }, + { + .id = IPA_MEM_NAT_TABLE, + .offset = 0x1800, + .size = 0x0d00, + .canary_count = 4, + }, + { + .id = IPA_MEM_STATS_QUOTA_MODEM, + .offset = 0x2510, + .size = 0x0030, + .canary_count = 4, + }, + { + .id = IPA_MEM_STATS_QUOTA_AP, + .offset = 0x2540, + .size = 0x0048, + .canary_count = 0, + }, + { + .id = IPA_MEM_STATS_TETHERING, + .offset = 0x2588, + .size = 0x0238, + .canary_count = 0, + }, + { + .id = IPA_MEM_STATS_FILTER_ROUTE, + .offset = 0x27c0, + .size = 0x0800, + .canary_count = 0, + }, + { + .id = IPA_MEM_STATS_DROP, + .offset = 0x2fc0, + .size = 0x0020, + .canary_count = 0, + }, + { + .id = IPA_MEM_MODEM, + .offset = 0x2fe8, + .size = 0x0800, + .canary_count = 2, + }, + { + .id = IPA_MEM_UC_EVENT_RING, + .offset = 0x3800, + .size = 0x1000, + .canary_count = 1, + }, + { + .id = IPA_MEM_PDN_CONFIG, + .offset = 0x4800, + .size = 0x0050, + .canary_count = 0, + }, +}; + +/* Memory configuration data for an SoC having IPA v4.5 */ +static const struct ipa_mem_data ipa_mem_data = { + .local_count = ARRAY_SIZE(ipa_mem_local_data), + .local = ipa_mem_local_data, + .imem_addr = 0x14688000, + .imem_size = 0x00003000, + .smem_id = 497, + .smem_size = 0x00009000, +}; + +/* Interconnect rates are in 1000 byte/second units */ +static const struct ipa_interconnect_data ipa_interconnect_data[] = { + { + .name = "memory", + .peak_bandwidth = 600000, /* 600 MBps */ + .average_bandwidth = 150000, /* 150 MBps */ + }, + /* Average rate is unused for the next two interconnects */ + { + .name = "imem", + .peak_bandwidth = 450000, /* 450 MBps */ + .average_bandwidth = 75000, /* 75 MBps (unused?) */ + }, + { + .name = "config", + .peak_bandwidth = 171400, /* 171.4 MBps */ + .average_bandwidth = 0, /* unused */ + }, +}; + +/* Clock and interconnect configuration data for an SoC having IPA v4.5 */ +static const struct ipa_power_data ipa_power_data = { + .core_clock_rate = 150 * 1000 * 1000, /* Hz (150? 60?) */ + .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), + .interconnect_data = ipa_interconnect_data, +}; + +/* Configuration data for an SoC having IPA v4.5 */ +const struct ipa_data ipa_data_v4_5 = { + .version = IPA_VERSION_4_5, + .qsb_count = ARRAY_SIZE(ipa_qsb_data), + .qsb_data = ipa_qsb_data, + .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), + .endpoint_data = ipa_gsi_endpoint_data, + .resource_data = &ipa_resource_data, + .mem_data = &ipa_mem_data, + .power_data = &ipa_power_data, +}; diff --git a/drivers/net/ipa/data/ipa_data-v4.9.c b/drivers/net/ipa/data/ipa_data-v4.9.c new file mode 100644 index 0000000000..2333e15f95 --- /dev/null +++ b/drivers/net/ipa/data/ipa_data-v4.9.c @@ -0,0 +1,455 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (C) 2021 Linaro Ltd. */ + +#include + +#include "../gsi.h" +#include "../ipa_data.h" +#include "../ipa_endpoint.h" +#include "../ipa_mem.h" + +/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.9 */ +enum ipa_resource_type { + /* Source resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_RESOURCE_TYPE_SRC_HPS_DMARS, + IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES, + + /* Destination resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0, + IPA_RESOURCE_TYPE_DST_DPS_DMARS, +}; + +/* Resource groups used for an SoC having IPA v4.9 */ +enum ipa_rsrc_group_id { + /* Source resource group identifiers */ + IPA_RSRC_GROUP_SRC_UL_DL = 0, + IPA_RSRC_GROUP_SRC_DMA, + IPA_RSRC_GROUP_SRC_UC_RX_Q, + IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */ + + /* Destination resource group identifiers */ + IPA_RSRC_GROUP_DST_UL_DL_DPL = 0, + IPA_RSRC_GROUP_DST_DMA, + IPA_RSRC_GROUP_DST_UC, + IPA_RSRC_GROUP_DST_DRB_IP, + IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */ +}; + +/* QSB configuration data for an SoC having IPA v4.9 */ +static const struct ipa_qsb_data ipa_qsb_data[] = { + [IPA_QSB_MASTER_DDR] = { + .max_writes = 8, + .max_reads = 0, /* no limit (hardware max) */ + .max_reads_beats = 120, + }, +}; + +/* Endpoint configuration data for an SoC having IPA v4.9 */ +static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { + [IPA_ENDPOINT_AP_COMMAND_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 6, + .endpoint_id = 7, + .toward_ipa = true, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 20, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .dma_mode = true, + .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, + .tx = { + .seq_type = IPA_SEQ_DMA, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_LAN_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 7, + .endpoint_id = 11, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 9, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .aggregation = true, + .status_enable = true, + .rx = { + .buffer_size = 8192, + .pad_align = ilog2(sizeof(u32)), + .aggr_time_limit = 500, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 2, + .endpoint_id = 2, + .toward_ipa = true, + .channel = { + .tre_count = 512, + .event_count = 512, + .tlv_count = 16, + }, + .endpoint = { + .filter_support = true, + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .checksum = true, + .qmap = true, + .status_enable = true, + .tx = { + .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC, + .status_endpoint = + IPA_ENDPOINT_MODEM_AP_RX, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 12, + .endpoint_id = 20, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 9, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .checksum = true, + .qmap = true, + .aggregation = true, + .rx = { + .buffer_size = 8192, + .aggr_time_limit = 500, + .aggr_close_eof = true, + }, + }, + }, + }, + [IPA_ENDPOINT_MODEM_AP_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 0, + .endpoint_id = 5, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, + [IPA_ENDPOINT_MODEM_AP_RX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 7, + .endpoint_id = 16, + .toward_ipa = false, + }, + [IPA_ENDPOINT_MODEM_DL_NLO_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 2, + .endpoint_id = 8, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, +}; + +/* Source resource configuration data for an SoC having IPA v4.9 */ +static const struct ipa_resource ipa_resource_src[] = { + [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 1, .max = 12, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 1, .max = 1, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 1, .max = 12, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 20, .max = 20, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 2, .max = 2, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 3, .max = 3, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 38, .max = 38, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 4, .max = 4, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, + }, + }, + [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 0, .max = 4, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 0, .max = 4, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 0, .max = 4, + }, + }, + [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 30, .max = 30, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 8, .max = 8, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, + }, + }, +}; + +/* Destination resource configuration data for an SoC having IPA v4.9 */ +static const struct ipa_resource ipa_resource_dst[] = { + [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 9, .max = 9, + }, + .limits[IPA_RSRC_GROUP_DST_DMA] = { + .min = 1, .max = 1, + }, + .limits[IPA_RSRC_GROUP_DST_UC] = { + .min = 1, .max = 1, + }, + .limits[IPA_RSRC_GROUP_DST_DRB_IP] = { + .min = 39, .max = 39, + }, + }, + [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 2, .max = 3, + }, + .limits[IPA_RSRC_GROUP_DST_DMA] = { + .min = 1, .max = 2, + }, + .limits[IPA_RSRC_GROUP_DST_UC] = { + .min = 0, .max = 2, + }, + }, +}; + +/* Resource configuration data for an SoC having IPA v4.9 */ +static const struct ipa_resource_data ipa_resource_data = { + .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT, + .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT, + .resource_src_count = ARRAY_SIZE(ipa_resource_src), + .resource_src = ipa_resource_src, + .resource_dst_count = ARRAY_SIZE(ipa_resource_dst), + .resource_dst = ipa_resource_dst, +}; + +/* IPA-resident memory region data for an SoC having IPA v4.9 */ +static const struct ipa_mem ipa_mem_local_data[] = { + { + .id = IPA_MEM_UC_SHARED, + .offset = 0x0000, + .size = 0x0080, + .canary_count = 0, + }, + { + .id = IPA_MEM_UC_INFO, + .offset = 0x0080, + .size = 0x0200, + .canary_count = 0, + }, + { + .id = IPA_MEM_V4_FILTER_HASHED, + .offset = 0x0288, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_FILTER, + .offset = 0x0308, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_FILTER_HASHED, + .offset = 0x0388, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_FILTER, + .offset = 0x0408, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_ROUTE_HASHED, + .offset = 0x0488, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V4_ROUTE, + .offset = 0x0508, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_ROUTE_HASHED, + .offset = 0x0588, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_V6_ROUTE, + .offset = 0x0608, + .size = 0x0078, + .canary_count = 2, + }, + { + .id = IPA_MEM_MODEM_HEADER, + .offset = 0x0688, + .size = 0x0240, + .canary_count = 2, + }, + { + .id = IPA_MEM_AP_HEADER, + .offset = 0x08c8, + .size = 0x0200, + .canary_count = 0, + }, + { + .id = IPA_MEM_MODEM_PROC_CTX, + .offset = 0x0ad0, + .size = 0x0b20, + .canary_count = 2, + }, + { + .id = IPA_MEM_AP_PROC_CTX, + .offset = 0x15f0, + .size = 0x0200, + .canary_count = 0, + }, + { + .id = IPA_MEM_NAT_TABLE, + .offset = 0x1800, + .size = 0x0d00, + .canary_count = 4, + }, + { + .id = IPA_MEM_STATS_QUOTA_MODEM, + .offset = 0x2510, + .size = 0x0030, + .canary_count = 4, + }, + { + .id = IPA_MEM_STATS_QUOTA_AP, + .offset = 0x2540, + .size = 0x0048, + .canary_count = 0, + }, + { + .id = IPA_MEM_STATS_TETHERING, + .offset = 0x2588, + .size = 0x0238, + .canary_count = 0, + }, + { + .id = IPA_MEM_STATS_FILTER_ROUTE, + .offset = 0x27c0, + .size = 0x0800, + .canary_count = 0, + }, + { + .id = IPA_MEM_STATS_DROP, + .offset = 0x2fc0, + .size = 0x0020, + .canary_count = 0, + }, + { + .id = IPA_MEM_MODEM, + .offset = 0x2fe8, + .size = 0x0800, + .canary_count = 2, + }, + { + .id = IPA_MEM_UC_EVENT_RING, + .offset = 0x3800, + .size = 0x1000, + .canary_count = 1, + }, + { + .id = IPA_MEM_PDN_CONFIG, + .offset = 0x4800, + .size = 0x0050, + .canary_count = 0, + }, +}; + +/* Memory configuration data for an SoC having IPA v4.9 */ +static const struct ipa_mem_data ipa_mem_data = { + .local_count = ARRAY_SIZE(ipa_mem_local_data), + .local = ipa_mem_local_data, + .imem_addr = 0x146bd000, + .imem_size = 0x00002000, + .smem_id = 497, + .smem_size = 0x00009000, +}; + +/* Interconnect rates are in 1000 byte/second units */ +static const struct ipa_interconnect_data ipa_interconnect_data[] = { + { + .name = "memory", + .peak_bandwidth = 600000, /* 600 MBps */ + .average_bandwidth = 150000, /* 150 MBps */ + }, + /* Average rate is unused for the next interconnect */ + { + .name = "config", + .peak_bandwidth = 74000, /* 74 MBps */ + .average_bandwidth = 0, /* unused */ + }, + +}; + +/* Clock and interconnect configuration data for an SoC having IPA v4.9 */ +static const struct ipa_power_data ipa_power_data = { + .core_clock_rate = 60 * 1000 * 1000, /* Hz */ + .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), + .interconnect_data = ipa_interconnect_data, +}; + +/* Configuration data for an SoC having IPA v4.9. */ +const struct ipa_data ipa_data_v4_9 = { + .version = IPA_VERSION_4_9, + .qsb_count = ARRAY_SIZE(ipa_qsb_data), + .qsb_data = ipa_qsb_data, + .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), + .endpoint_data = ipa_gsi_endpoint_data, + .resource_data = &ipa_resource_data, + .mem_data = &ipa_mem_data, + .power_data = &ipa_power_data, +}; diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c new file mode 100644 index 0000000000..c1424119e8 --- /dev/null +++ b/drivers/net/pcs/pcs-rzn1-miic.c @@ -0,0 +1,531 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Schneider Electric + * + * Clément Léger + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MIIC_PRCMD 0x0 +#define MIIC_ESID_CODE 0x4 + +#define MIIC_MODCTRL 0x20 +#define MIIC_MODCTRL_SW_MODE GENMASK(4, 0) + +#define MIIC_CONVCTRL(port) (0x100 + (port) * 4) + +#define MIIC_CONVCTRL_CONV_SPEED GENMASK(1, 0) +#define CONV_MODE_10MBPS 0 +#define CONV_MODE_100MBPS 1 +#define CONV_MODE_1000MBPS 2 + +#define MIIC_CONVCTRL_CONV_MODE GENMASK(3, 2) +#define CONV_MODE_MII 0 +#define CONV_MODE_RMII 1 +#define CONV_MODE_RGMII 2 + +#define MIIC_CONVCTRL_FULLD BIT(8) +#define MIIC_CONVCTRL_RGMII_LINK BIT(12) +#define MIIC_CONVCTRL_RGMII_DUPLEX BIT(13) +#define MIIC_CONVCTRL_RGMII_SPEED GENMASK(15, 14) + +#define MIIC_CONVRST 0x114 +#define MIIC_CONVRST_PHYIF_RST(port) BIT(port) +#define MIIC_CONVRST_PHYIF_RST_MASK GENMASK(4, 0) + +#define MIIC_SWCTRL 0x304 +#define MIIC_SWDUPC 0x308 + +#define MIIC_MAX_NR_PORTS 5 + +#define MIIC_MODCTRL_CONF_CONV_NUM 6 +#define MIIC_MODCTRL_CONF_NONE -1 + +/** + * struct modctrl_match - Matching table entry for convctrl configuration + * See section 8.2.1 of manual. + * @mode_cfg: Configuration value for convctrl + * @conv: Configuration of ethernet port muxes. First index is SWITCH_PORTIN, + * then index 1 - 5 are CONV1 - CONV5. + */ +struct modctrl_match { + u32 mode_cfg; + u8 conv[MIIC_MODCTRL_CONF_CONV_NUM]; +}; + +static struct modctrl_match modctrl_match_table[] = { + {0x0, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, + MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}}, + {0x1, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, + MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}}, + {0x2, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, + MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}}, + {0x3, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, + MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}}, + + {0x8, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, + MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}}, + {0x9, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, + MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}}, + {0xA, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, + MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}}, + {0xB, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, + MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}}, + + {0x10, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, + MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}}, + {0x11, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, + MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}}, + {0x12, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, + MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}}, + {0x13, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, + MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}} +}; + +static const char * const conf_to_string[] = { + [MIIC_GMAC1_PORT] = "GMAC1_PORT", + [MIIC_GMAC2_PORT] = "GMAC2_PORT", + [MIIC_RTOS_PORT] = "RTOS_PORT", + [MIIC_SERCOS_PORTA] = "SERCOS_PORTA", + [MIIC_SERCOS_PORTB] = "SERCOS_PORTB", + [MIIC_ETHERCAT_PORTA] = "ETHERCAT_PORTA", + [MIIC_ETHERCAT_PORTB] = "ETHERCAT_PORTB", + [MIIC_ETHERCAT_PORTC] = "ETHERCAT_PORTC", + [MIIC_SWITCH_PORTA] = "SWITCH_PORTA", + [MIIC_SWITCH_PORTB] = "SWITCH_PORTB", + [MIIC_SWITCH_PORTC] = "SWITCH_PORTC", + [MIIC_SWITCH_PORTD] = "SWITCH_PORTD", + [MIIC_HSR_PORTA] = "HSR_PORTA", + [MIIC_HSR_PORTB] = "HSR_PORTB", +}; + +static const char *index_to_string[MIIC_MODCTRL_CONF_CONV_NUM] = { + "SWITCH_PORTIN", + "CONV1", + "CONV2", + "CONV3", + "CONV4", + "CONV5", +}; + +/** + * struct miic - MII converter structure + * @base: base address of the MII converter + * @dev: Device associated to the MII converter + * @clks: Clocks used for this device + * @nclk: Number of clocks + * @lock: Lock used for read-modify-write access + */ +struct miic { + void __iomem *base; + struct device *dev; + struct clk_bulk_data *clks; + int nclk; + spinlock_t lock; +}; + +/** + * struct miic_port - Per port MII converter struct + * @miic: backiling to MII converter structure + * @pcs: PCS structure associated to the port + * @port: port number + * @interface: interface mode of the port + */ +struct miic_port { + struct miic *miic; + struct phylink_pcs pcs; + int port; + phy_interface_t interface; +}; + +static struct miic_port *phylink_pcs_to_miic_port(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct miic_port, pcs); +} + +static void miic_reg_writel(struct miic *miic, int offset, u32 value) +{ + writel(value, miic->base + offset); +} + +static u32 miic_reg_readl(struct miic *miic, int offset) +{ + return readl(miic->base + offset); +} + +static void miic_reg_rmw(struct miic *miic, int offset, u32 mask, u32 val) +{ + u32 reg; + + spin_lock(&miic->lock); + + reg = miic_reg_readl(miic, offset); + reg &= ~mask; + reg |= val; + miic_reg_writel(miic, offset, reg); + + spin_unlock(&miic->lock); +} + +static void miic_converter_enable(struct miic *miic, int port, int enable) +{ + u32 val = 0; + + if (enable) + val = MIIC_CONVRST_PHYIF_RST(port); + + miic_reg_rmw(miic, MIIC_CONVRST, MIIC_CONVRST_PHYIF_RST(port), val); +} + +static int miic_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, bool permit) +{ + struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs); + struct miic *miic = miic_port->miic; + u32 speed, conv_mode, val, mask; + int port = miic_port->port; + + switch (interface) { + case PHY_INTERFACE_MODE_RMII: + conv_mode = CONV_MODE_RMII; + speed = CONV_MODE_100MBPS; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + conv_mode = CONV_MODE_RGMII; + speed = CONV_MODE_1000MBPS; + break; + case PHY_INTERFACE_MODE_MII: + conv_mode = CONV_MODE_MII; + /* When in MII mode, speed should be set to 0 (which is actually + * CONV_MODE_10MBPS) + */ + speed = CONV_MODE_10MBPS; + break; + default: + return -EOPNOTSUPP; + } + + val = FIELD_PREP(MIIC_CONVCTRL_CONV_MODE, conv_mode); + mask = MIIC_CONVCTRL_CONV_MODE; + + /* Update speed only if we are going to change the interface because + * the link might already be up and it would break it if the speed is + * changed. + */ + if (interface != miic_port->interface) { + val |= FIELD_PREP(MIIC_CONVCTRL_CONV_SPEED, speed); + mask |= MIIC_CONVCTRL_CONV_SPEED; + miic_port->interface = interface; + } + + miic_reg_rmw(miic, MIIC_CONVCTRL(port), mask, val); + miic_converter_enable(miic_port->miic, miic_port->port, 1); + + return 0; +} + +static void miic_link_up(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, int speed, int duplex) +{ + struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs); + struct miic *miic = miic_port->miic; + u32 conv_speed = 0, val = 0; + int port = miic_port->port; + + if (duplex == DUPLEX_FULL) + val |= MIIC_CONVCTRL_FULLD; + + /* No speed in MII through-mode */ + if (interface != PHY_INTERFACE_MODE_MII) { + switch (speed) { + case SPEED_1000: + conv_speed = CONV_MODE_1000MBPS; + break; + case SPEED_100: + conv_speed = CONV_MODE_100MBPS; + break; + case SPEED_10: + conv_speed = CONV_MODE_10MBPS; + break; + default: + return; + } + } + + val |= FIELD_PREP(MIIC_CONVCTRL_CONV_SPEED, conv_speed); + + miic_reg_rmw(miic, MIIC_CONVCTRL(port), + (MIIC_CONVCTRL_CONV_SPEED | MIIC_CONVCTRL_FULLD), val); +} + +static int miic_validate(struct phylink_pcs *pcs, unsigned long *supported, + const struct phylink_link_state *state) +{ + if (phy_interface_mode_is_rgmii(state->interface) || + state->interface == PHY_INTERFACE_MODE_RMII || + state->interface == PHY_INTERFACE_MODE_MII) + return 1; + + return -EINVAL; +} + +static const struct phylink_pcs_ops miic_phylink_ops = { + .pcs_validate = miic_validate, + .pcs_config = miic_config, + .pcs_link_up = miic_link_up, +}; + +struct phylink_pcs *miic_create(struct device *dev, struct device_node *np) +{ + struct platform_device *pdev; + struct miic_port *miic_port; + struct device_node *pcs_np; + struct miic *miic; + u32 port; + + if (!of_device_is_available(np)) + return ERR_PTR(-ENODEV); + + if (of_property_read_u32(np, "reg", &port)) + return ERR_PTR(-EINVAL); + + if (port > MIIC_MAX_NR_PORTS || port < 1) + return ERR_PTR(-EINVAL); + + /* The PCS pdev is attached to the parent node */ + pcs_np = of_get_parent(np); + if (!pcs_np) + return ERR_PTR(-ENODEV); + + if (!of_device_is_available(pcs_np)) { + of_node_put(pcs_np); + return ERR_PTR(-ENODEV); + } + + pdev = of_find_device_by_node(pcs_np); + of_node_put(pcs_np); + if (!pdev || !platform_get_drvdata(pdev)) + return ERR_PTR(-EPROBE_DEFER); + + miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL); + if (!miic_port) + return ERR_PTR(-ENOMEM); + + miic = platform_get_drvdata(pdev); + device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER); + + miic_port->miic = miic; + miic_port->port = port - 1; + miic_port->pcs.ops = &miic_phylink_ops; + + return &miic_port->pcs; +} +EXPORT_SYMBOL(miic_create); + +void miic_destroy(struct phylink_pcs *pcs) +{ + struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs); + + miic_converter_enable(miic_port->miic, miic_port->port, 0); + kfree(miic_port); +} +EXPORT_SYMBOL(miic_destroy); + +static int miic_init_hw(struct miic *miic, u32 cfg_mode) +{ + int port; + + /* Unlock write access to accessory registers (cf datasheet). If this + * is going to be used in conjunction with the Cortex-M3, this sequence + * will have to be moved in register write + */ + miic_reg_writel(miic, MIIC_PRCMD, 0x00A5); + miic_reg_writel(miic, MIIC_PRCMD, 0x0001); + miic_reg_writel(miic, MIIC_PRCMD, 0xFFFE); + miic_reg_writel(miic, MIIC_PRCMD, 0x0001); + + miic_reg_writel(miic, MIIC_MODCTRL, + FIELD_PREP(MIIC_MODCTRL_SW_MODE, cfg_mode)); + + for (port = 0; port < MIIC_MAX_NR_PORTS; port++) { + miic_converter_enable(miic, port, 0); + /* Disable speed/duplex control from these registers, datasheet + * says switch registers should be used to setup switch port + * speed and duplex. + */ + miic_reg_writel(miic, MIIC_SWCTRL, 0x0); + miic_reg_writel(miic, MIIC_SWDUPC, 0x0); + } + + return 0; +} + +static bool miic_modctrl_match(s8 table_val[MIIC_MODCTRL_CONF_CONV_NUM], + s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM]) +{ + int i; + + for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) { + if (dt_val[i] == MIIC_MODCTRL_CONF_NONE) + continue; + + if (dt_val[i] != table_val[i]) + return false; + } + + return true; +} + +static void miic_dump_conf(struct device *dev, + s8 conf[MIIC_MODCTRL_CONF_CONV_NUM]) +{ + const char *conf_name; + int i; + + for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) { + if (conf[i] != MIIC_MODCTRL_CONF_NONE) + conf_name = conf_to_string[conf[i]]; + else + conf_name = "NONE"; + + dev_err(dev, "%s: %s\n", index_to_string[i], conf_name); + } +} + +static int miic_match_dt_conf(struct device *dev, + s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM], + u32 *mode_cfg) +{ + struct modctrl_match *table_entry; + int i; + + for (i = 0; i < ARRAY_SIZE(modctrl_match_table); i++) { + table_entry = &modctrl_match_table[i]; + + if (miic_modctrl_match(table_entry->conv, dt_val)) { + *mode_cfg = table_entry->mode_cfg; + return 0; + } + } + + dev_err(dev, "Failed to apply requested configuration\n"); + miic_dump_conf(dev, dt_val); + + return -EINVAL; +} + +static int miic_parse_dt(struct device *dev, u32 *mode_cfg) +{ + s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM]; + struct device_node *np = dev->of_node; + struct device_node *conv; + u32 conf; + int port; + + memset(dt_val, MIIC_MODCTRL_CONF_NONE, sizeof(dt_val)); + + if (of_property_read_u32(np, "renesas,miic-switch-portin", &conf) == 0) + dt_val[0] = conf; + + for_each_child_of_node(np, conv) { + if (of_property_read_u32(conv, "reg", &port)) + continue; + + if (!of_device_is_available(conv)) + continue; + + if (of_property_read_u32(conv, "renesas,miic-input", &conf) == 0) + dt_val[port] = conf; + } + + return miic_match_dt_conf(dev, dt_val, mode_cfg); +} + +static int miic_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct miic *miic; + u32 mode_cfg; + int ret; + + ret = miic_parse_dt(dev, &mode_cfg); + if (ret < 0) + return ret; + + miic = devm_kzalloc(dev, sizeof(*miic), GFP_KERNEL); + if (!miic) + return -ENOMEM; + + spin_lock_init(&miic->lock); + miic->dev = dev; + miic->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(miic->base)) + return PTR_ERR(miic->base); + + ret = devm_pm_runtime_enable(dev); + if (ret < 0) + return ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + + ret = miic_init_hw(miic, mode_cfg); + if (ret) + goto disable_runtime_pm; + + /* miic_create() relies on that fact that data are attached to the + * platform device to determine if the driver is ready so this needs to + * be the last thing to be done after everything is initialized + * properly. + */ + platform_set_drvdata(pdev, miic); + + return 0; + +disable_runtime_pm: + pm_runtime_put(dev); + + return ret; +} + +static int miic_remove(struct platform_device *pdev) +{ + pm_runtime_put(&pdev->dev); + + return 0; +} + +static const struct of_device_id miic_of_mtable[] = { + { .compatible = "renesas,rzn1-miic" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, miic_of_mtable); + +static struct platform_driver miic_driver = { + .driver = { + .name = "rzn1_miic", + .suppress_bind_attrs = true, + .of_match_table = miic_of_mtable, + }, + .probe = miic_probe, + .remove = miic_remove, +}; +module_platform_driver(miic_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Renesas MII converter PCS driver"); +MODULE_AUTHOR("Clément Léger "); diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c new file mode 100644 index 0000000000..b6d1395011 --- /dev/null +++ b/drivers/net/phy/adin1100.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Driver for Analog Devices Industrial Ethernet T1L PHYs + * + * Copyright 2020 Analog Devices Inc. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PHY_ID_ADIN1100 0x0283bc81 + +#define ADIN_FORCED_MODE 0x8000 +#define ADIN_FORCED_MODE_EN BIT(0) + +#define ADIN_CRSM_SFT_RST 0x8810 +#define ADIN_CRSM_SFT_RST_EN BIT(0) + +#define ADIN_CRSM_SFT_PD_CNTRL 0x8812 +#define ADIN_CRSM_SFT_PD_CNTRL_EN BIT(0) + +#define ADIN_AN_PHY_INST_STATUS 0x8030 +#define ADIN_IS_CFG_SLV BIT(2) +#define ADIN_IS_CFG_MST BIT(3) + +#define ADIN_CRSM_STAT 0x8818 +#define ADIN_CRSM_SFT_PD_RDY BIT(1) +#define ADIN_CRSM_SYS_RDY BIT(0) + +#define ADIN_MSE_VAL 0x830B + +#define ADIN_SQI_MAX 7 + +struct adin_mse_sqi_range { + u16 start; + u16 end; +}; + +static const struct adin_mse_sqi_range adin_mse_sqi_map[] = { + { 0x0A74, 0xFFFF }, + { 0x084E, 0x0A74 }, + { 0x0698, 0x084E }, + { 0x053D, 0x0698 }, + { 0x0429, 0x053D }, + { 0x034E, 0x0429 }, + { 0x02A0, 0x034E }, + { 0x0000, 0x02A0 }, +}; + +/** + * struct adin_priv - ADIN PHY driver private data + * @tx_level_2v4_able: set if the PHY supports 2.4V TX levels (10BASE-T1L) + * @tx_level_2v4: set if the PHY requests 2.4V TX levels (10BASE-T1L) + * @tx_level_prop_present: set if the TX level is specified in DT + */ +struct adin_priv { + unsigned int tx_level_2v4_able:1; + unsigned int tx_level_2v4:1; + unsigned int tx_level_prop_present:1; +}; + +static int adin_read_status(struct phy_device *phydev) +{ + int ret; + + ret = genphy_c45_read_status(phydev); + if (ret) + return ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_AN, ADIN_AN_PHY_INST_STATUS); + if (ret < 0) + return ret; + + if (ret & ADIN_IS_CFG_SLV) + phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE; + + if (ret & ADIN_IS_CFG_MST) + phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER; + + return 0; +} + +static int adin_config_aneg(struct phy_device *phydev) +{ + struct adin_priv *priv = phydev->priv; + int ret; + + if (phydev->autoneg == AUTONEG_DISABLE) { + ret = genphy_c45_pma_setup_forced(phydev); + if (ret < 0) + return ret; + + if (priv->tx_level_prop_present && priv->tx_level_2v4) + ret = phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_B10L_PMA_CTRL, + MDIO_PMA_10T1L_CTRL_2V4_EN); + else + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_B10L_PMA_CTRL, + MDIO_PMA_10T1L_CTRL_2V4_EN); + if (ret < 0) + return ret; + + /* Force PHY to use above configurations */ + return phy_set_bits_mmd(phydev, MDIO_MMD_AN, ADIN_FORCED_MODE, ADIN_FORCED_MODE_EN); + } + + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_AN, ADIN_FORCED_MODE, ADIN_FORCED_MODE_EN); + if (ret < 0) + return ret; + + /* Request increased transmit level from LP. */ + if (priv->tx_level_prop_present && priv->tx_level_2v4) { + ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_H, + MDIO_AN_T1_ADV_H_10L_TX_HI | + MDIO_AN_T1_ADV_H_10L_TX_HI_REQ); + if (ret < 0) + return ret; + } + + /* Disable 2.4 Vpp transmit level. */ + if ((priv->tx_level_prop_present && !priv->tx_level_2v4) || !priv->tx_level_2v4_able) { + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_H, + MDIO_AN_T1_ADV_H_10L_TX_HI | + MDIO_AN_T1_ADV_H_10L_TX_HI_REQ); + if (ret < 0) + return ret; + } + + return genphy_c45_config_aneg(phydev); +} + +static int adin_set_powerdown_mode(struct phy_device *phydev, bool en) +{ + int ret; + int val; + + val = en ? ADIN_CRSM_SFT_PD_CNTRL_EN : 0; + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, + ADIN_CRSM_SFT_PD_CNTRL, val); + if (ret < 0) + return ret; + + return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, ADIN_CRSM_STAT, ret, + (ret & ADIN_CRSM_SFT_PD_RDY) == val, + 1000, 30000, true); +} + +static int adin_suspend(struct phy_device *phydev) +{ + return adin_set_powerdown_mode(phydev, true); +} + +static int adin_resume(struct phy_device *phydev) +{ + return adin_set_powerdown_mode(phydev, false); +} + +static int adin_set_loopback(struct phy_device *phydev, bool enable) +{ + if (enable) + return phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL, + BMCR_LOOPBACK); + + /* PCS loopback (according to 10BASE-T1L spec) */ + return phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL, + BMCR_LOOPBACK); +} + +static int adin_soft_reset(struct phy_device *phydev) +{ + int ret; + + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, ADIN_CRSM_SFT_RST, ADIN_CRSM_SFT_RST_EN); + if (ret < 0) + return ret; + + return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, ADIN_CRSM_STAT, ret, + (ret & ADIN_CRSM_SYS_RDY), + 10000, 30000, true); +} + +static int adin_get_features(struct phy_device *phydev) +{ + struct adin_priv *priv = phydev->priv; + struct device *dev = &phydev->mdio.dev; + int ret; + u8 val; + + ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10T1L_STAT); + if (ret < 0) + return ret; + + /* This depends on the voltage level from the power source */ + priv->tx_level_2v4_able = !!(ret & MDIO_PMA_10T1L_STAT_2V4_ABLE); + + phydev_dbg(phydev, "PHY supports 2.4V TX level: %s\n", + priv->tx_level_2v4_able ? "yes" : "no"); + + priv->tx_level_prop_present = device_property_present(dev, "phy-10base-t1l-2.4vpp"); + if (priv->tx_level_prop_present) { + ret = device_property_read_u8(dev, "phy-10base-t1l-2.4vpp", &val); + if (ret < 0) + return ret; + + priv->tx_level_2v4 = val; + if (!priv->tx_level_2v4 && priv->tx_level_2v4_able) + phydev_info(phydev, + "PHY supports 2.4V TX level, but disabled via config\n"); + } + + linkmode_set_bit_array(phy_basic_ports_array, ARRAY_SIZE(phy_basic_ports_array), + phydev->supported); + + return genphy_c45_pma_read_abilities(phydev); +} + +static int adin_get_sqi(struct phy_device *phydev) +{ + u16 mse_val; + int sqi; + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_STAT1); + if (ret < 0) + return ret; + else if (!(ret & MDIO_STAT1_LSTATUS)) + return 0; + + ret = phy_read_mmd(phydev, MDIO_STAT1, ADIN_MSE_VAL); + if (ret < 0) + return ret; + + mse_val = 0xFFFF & ret; + for (sqi = 0; sqi < ARRAY_SIZE(adin_mse_sqi_map); sqi++) { + if (mse_val >= adin_mse_sqi_map[sqi].start && mse_val <= adin_mse_sqi_map[sqi].end) + return sqi; + } + + return -EINVAL; +} + +static int adin_get_sqi_max(struct phy_device *phydev) +{ + return ADIN_SQI_MAX; +} + +static int adin_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct adin_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + return 0; +} + +static struct phy_driver adin_driver[] = { + { + PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100), + .name = "ADIN1100", + .get_features = adin_get_features, + .soft_reset = adin_soft_reset, + .probe = adin_probe, + .config_aneg = adin_config_aneg, + .read_status = adin_read_status, + .set_loopback = adin_set_loopback, + .suspend = adin_suspend, + .resume = adin_resume, + .get_sqi = adin_get_sqi, + .get_sqi_max = adin_get_sqi_max, + }, +}; + +module_phy_driver(adin_driver); + +static struct mdio_device_id __maybe_unused adin_tbl[] = { + { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100) }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, adin_tbl); +MODULE_DESCRIPTION("Analog Devices Industrial Ethernet T1L PHY driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c new file mode 100644 index 0000000000..08a1824d4a --- /dev/null +++ b/drivers/net/phy/bcm-phy-ptp.c @@ -0,0 +1,947 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Meta Platforms Inc. + * Copyright (C) 2022 Jonathan Lemon + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcm-phy-lib.h" + +/* IEEE 1588 Expansion registers */ +#define SLICE_CTRL 0x0810 +#define SLICE_TX_EN BIT(0) +#define SLICE_RX_EN BIT(8) +#define TX_EVENT_MODE 0x0811 +#define MODE_TX_UPDATE_CF BIT(0) +#define MODE_TX_REPLACE_TS_CF BIT(1) +#define MODE_TX_REPLACE_TS GENMASK(1, 0) +#define RX_EVENT_MODE 0x0819 +#define MODE_RX_UPDATE_CF BIT(0) +#define MODE_RX_INSERT_TS_48 BIT(1) +#define MODE_RX_INSERT_TS_64 GENMASK(1, 0) + +#define MODE_EVT_SHIFT_SYNC 0 +#define MODE_EVT_SHIFT_DELAY_REQ 2 +#define MODE_EVT_SHIFT_PDELAY_REQ 4 +#define MODE_EVT_SHIFT_PDELAY_RESP 6 + +#define MODE_SEL_SHIFT_PORT 0 +#define MODE_SEL_SHIFT_CPU 8 + +#define RX_MODE_SEL(sel, evt, act) \ + (((MODE_RX_##act) << (MODE_EVT_SHIFT_##evt)) << (MODE_SEL_SHIFT_##sel)) + +#define TX_MODE_SEL(sel, evt, act) \ + (((MODE_TX_##act) << (MODE_EVT_SHIFT_##evt)) << (MODE_SEL_SHIFT_##sel)) + +/* needs global TS capture first */ +#define TX_TS_CAPTURE 0x0821 +#define TX_TS_CAP_EN BIT(0) +#define RX_TS_CAPTURE 0x0822 +#define RX_TS_CAP_EN BIT(0) + +#define TIME_CODE_0 0x0854 +#define TIME_CODE_1 0x0855 +#define TIME_CODE_2 0x0856 +#define TIME_CODE_3 0x0857 +#define TIME_CODE_4 0x0858 + +#define DPLL_SELECT 0x085b +#define DPLL_HB_MODE2 BIT(6) + +#define SHADOW_CTRL 0x085c +#define SHADOW_LOAD 0x085d +#define TIME_CODE_LOAD BIT(10) +#define SYNC_OUT_LOAD BIT(9) +#define NCO_TIME_LOAD BIT(7) +#define FREQ_LOAD BIT(6) +#define INTR_MASK 0x085e +#define INTR_STATUS 0x085f +#define INTC_FSYNC BIT(0) +#define INTC_SOP BIT(1) + +#define NCO_FREQ_LSB 0x0873 +#define NCO_FREQ_MSB 0x0874 + +#define NCO_TIME_0 0x0875 +#define NCO_TIME_1 0x0876 +#define NCO_TIME_2_CTRL 0x0877 +#define FREQ_MDIO_SEL BIT(14) + +#define SYNC_OUT_0 0x0878 +#define SYNC_OUT_1 0x0879 +#define SYNC_OUT_2 0x087a + +#define SYNC_IN_DIVIDER 0x087b + +#define SYNOUT_TS_0 0x087c +#define SYNOUT_TS_1 0x087d +#define SYNOUT_TS_2 0x087e + +#define NSE_CTRL 0x087f +#define NSE_GMODE_EN GENMASK(15, 14) +#define NSE_CAPTURE_EN BIT(13) +#define NSE_INIT BIT(12) +#define NSE_CPU_FRAMESYNC BIT(5) +#define NSE_SYNC1_FRAMESYNC BIT(3) +#define NSE_FRAMESYNC_MASK GENMASK(5, 2) +#define NSE_PEROUT_EN BIT(1) +#define NSE_ONESHOT_EN BIT(0) +#define NSE_SYNC_OUT_MASK GENMASK(1, 0) + +#define TS_READ_CTRL 0x0885 +#define TS_READ_START BIT(0) +#define TS_READ_END BIT(1) + +#define HB_REG_0 0x0886 +#define HB_REG_1 0x0887 +#define HB_REG_2 0x0888 +#define HB_REG_3 0x08ec +#define HB_REG_4 0x08ed +#define HB_STAT_CTRL 0x088e +#define HB_READ_START BIT(10) +#define HB_READ_END BIT(11) +#define HB_READ_MASK GENMASK(11, 10) + +#define TS_REG_0 0x0889 +#define TS_REG_1 0x088a +#define TS_REG_2 0x088b +#define TS_REG_3 0x08c4 + +#define TS_INFO_0 0x088c +#define TS_INFO_1 0x088d + +#define TIMECODE_CTRL 0x08c3 +#define TX_TIMECODE_SEL GENMASK(7, 0) +#define RX_TIMECODE_SEL GENMASK(15, 8) + +#define TIME_SYNC 0x0ff5 +#define TIME_SYNC_EN BIT(0) + +struct bcm_ptp_private { + struct phy_device *phydev; + struct mii_timestamper mii_ts; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_info; + struct ptp_pin_desc pin; + struct mutex mutex; + struct sk_buff_head tx_queue; + int tx_type; + bool hwts_rx; + u16 nse_ctrl; + bool pin_active; + struct delayed_work pin_work; +}; + +struct bcm_ptp_skb_cb { + unsigned long timeout; + u16 seq_id; + u8 msgtype; + bool discard; +}; + +struct bcm_ptp_capture { + ktime_t hwtstamp; + u16 seq_id; + u8 msgtype; + bool tx_dir; +}; + +#define BCM_SKB_CB(skb) ((struct bcm_ptp_skb_cb *)(skb)->cb) +#define SKB_TS_TIMEOUT 10 /* jiffies */ + +#define BCM_MAX_PULSE_8NS ((1U << 9) - 1) +#define BCM_MAX_PERIOD_8NS ((1U << 30) - 1) + +#define BRCM_PHY_MODEL(phydev) \ + ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask) + +static struct bcm_ptp_private *mii2priv(struct mii_timestamper *mii_ts) +{ + return container_of(mii_ts, struct bcm_ptp_private, mii_ts); +} + +static struct bcm_ptp_private *ptp2priv(struct ptp_clock_info *info) +{ + return container_of(info, struct bcm_ptp_private, ptp_info); +} + +static void bcm_ptp_get_framesync_ts(struct phy_device *phydev, + struct timespec64 *ts) +{ + u16 hb[4]; + + bcm_phy_write_exp(phydev, HB_STAT_CTRL, HB_READ_START); + + hb[0] = bcm_phy_read_exp(phydev, HB_REG_0); + hb[1] = bcm_phy_read_exp(phydev, HB_REG_1); + hb[2] = bcm_phy_read_exp(phydev, HB_REG_2); + hb[3] = bcm_phy_read_exp(phydev, HB_REG_3); + + bcm_phy_write_exp(phydev, HB_STAT_CTRL, HB_READ_END); + bcm_phy_write_exp(phydev, HB_STAT_CTRL, 0); + + ts->tv_sec = (hb[3] << 16) | hb[2]; + ts->tv_nsec = (hb[1] << 16) | hb[0]; +} + +static u16 bcm_ptp_framesync_disable(struct phy_device *phydev, u16 orig_ctrl) +{ + u16 ctrl = orig_ctrl & ~(NSE_FRAMESYNC_MASK | NSE_CAPTURE_EN); + + bcm_phy_write_exp(phydev, NSE_CTRL, ctrl); + + return ctrl; +} + +static void bcm_ptp_framesync_restore(struct phy_device *phydev, u16 orig_ctrl) +{ + if (orig_ctrl & NSE_FRAMESYNC_MASK) + bcm_phy_write_exp(phydev, NSE_CTRL, orig_ctrl); +} + +static void bcm_ptp_framesync(struct phy_device *phydev, u16 ctrl) +{ + /* trigger framesync - must have 0->1 transition. */ + bcm_phy_write_exp(phydev, NSE_CTRL, ctrl | NSE_CPU_FRAMESYNC); +} + +static int bcm_ptp_framesync_ts(struct phy_device *phydev, + struct ptp_system_timestamp *sts, + struct timespec64 *ts, + u16 orig_ctrl) +{ + u16 ctrl, reg; + int i; + + ctrl = bcm_ptp_framesync_disable(phydev, orig_ctrl); + + ptp_read_system_prets(sts); + + /* trigger framesync + capture */ + bcm_ptp_framesync(phydev, ctrl | NSE_CAPTURE_EN); + + ptp_read_system_postts(sts); + + /* poll for FSYNC interrupt from TS capture */ + for (i = 0; i < 10; i++) { + reg = bcm_phy_read_exp(phydev, INTR_STATUS); + if (reg & INTC_FSYNC) { + bcm_ptp_get_framesync_ts(phydev, ts); + break; + } + } + + bcm_ptp_framesync_restore(phydev, orig_ctrl); + + return reg & INTC_FSYNC ? 0 : -ETIMEDOUT; +} + +static int bcm_ptp_gettimex(struct ptp_clock_info *info, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct bcm_ptp_private *priv = ptp2priv(info); + int err; + + mutex_lock(&priv->mutex); + err = bcm_ptp_framesync_ts(priv->phydev, sts, ts, priv->nse_ctrl); + mutex_unlock(&priv->mutex); + + return err; +} + +static int bcm_ptp_settime_locked(struct bcm_ptp_private *priv, + const struct timespec64 *ts) +{ + struct phy_device *phydev = priv->phydev; + u16 ctrl; + u64 ns; + + ctrl = bcm_ptp_framesync_disable(phydev, priv->nse_ctrl); + + /* set up time code */ + bcm_phy_write_exp(phydev, TIME_CODE_0, ts->tv_nsec); + bcm_phy_write_exp(phydev, TIME_CODE_1, ts->tv_nsec >> 16); + bcm_phy_write_exp(phydev, TIME_CODE_2, ts->tv_sec); + bcm_phy_write_exp(phydev, TIME_CODE_3, ts->tv_sec >> 16); + bcm_phy_write_exp(phydev, TIME_CODE_4, ts->tv_sec >> 32); + + /* set NCO counter to match */ + ns = timespec64_to_ns(ts); + bcm_phy_write_exp(phydev, NCO_TIME_0, ns >> 4); + bcm_phy_write_exp(phydev, NCO_TIME_1, ns >> 20); + bcm_phy_write_exp(phydev, NCO_TIME_2_CTRL, (ns >> 36) & 0xfff); + + /* set up load on next frame sync (auto-clears due to NSE_INIT) */ + bcm_phy_write_exp(phydev, SHADOW_LOAD, TIME_CODE_LOAD | NCO_TIME_LOAD); + + /* must have NSE_INIT in order to write time code */ + bcm_ptp_framesync(phydev, ctrl | NSE_INIT); + + bcm_ptp_framesync_restore(phydev, priv->nse_ctrl); + + return 0; +} + +static int bcm_ptp_settime(struct ptp_clock_info *info, + const struct timespec64 *ts) +{ + struct bcm_ptp_private *priv = ptp2priv(info); + int err; + + mutex_lock(&priv->mutex); + err = bcm_ptp_settime_locked(priv, ts); + mutex_unlock(&priv->mutex); + + return err; +} + +static int bcm_ptp_adjtime_locked(struct bcm_ptp_private *priv, + s64 delta_ns) +{ + struct timespec64 ts; + int err; + s64 ns; + + err = bcm_ptp_framesync_ts(priv->phydev, NULL, &ts, priv->nse_ctrl); + if (!err) { + ns = timespec64_to_ns(&ts) + delta_ns; + ts = ns_to_timespec64(ns); + err = bcm_ptp_settime_locked(priv, &ts); + } + return err; +} + +static int bcm_ptp_adjtime(struct ptp_clock_info *info, s64 delta_ns) +{ + struct bcm_ptp_private *priv = ptp2priv(info); + int err; + + mutex_lock(&priv->mutex); + err = bcm_ptp_adjtime_locked(priv, delta_ns); + mutex_unlock(&priv->mutex); + + return err; +} + +/* A 125Mhz clock should adjust 8ns per pulse. + * The frequency adjustment base is 0x8000 0000, or 8*2^28. + * + * Frequency adjustment is + * adj = scaled_ppm * 8*2^28 / (10^6 * 2^16) + * which simplifies to: + * adj = scaled_ppm * 2^9 / 5^6 + */ +static int bcm_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) +{ + struct bcm_ptp_private *priv = ptp2priv(info); + int neg_adj = 0; + u32 diff, freq; + u16 ctrl; + u64 adj; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + + adj = scaled_ppm << 9; + diff = div_u64(adj, 15625); + freq = (8 << 28) + (neg_adj ? -diff : diff); + + mutex_lock(&priv->mutex); + + ctrl = bcm_ptp_framesync_disable(priv->phydev, priv->nse_ctrl); + + bcm_phy_write_exp(priv->phydev, NCO_FREQ_LSB, freq); + bcm_phy_write_exp(priv->phydev, NCO_FREQ_MSB, freq >> 16); + + bcm_phy_write_exp(priv->phydev, NCO_TIME_2_CTRL, FREQ_MDIO_SEL); + + /* load on next framesync */ + bcm_phy_write_exp(priv->phydev, SHADOW_LOAD, FREQ_LOAD); + + bcm_ptp_framesync(priv->phydev, ctrl); + + /* clear load */ + bcm_phy_write_exp(priv->phydev, SHADOW_LOAD, 0); + + bcm_ptp_framesync_restore(priv->phydev, priv->nse_ctrl); + + mutex_unlock(&priv->mutex); + + return 0; +} + +static bool bcm_ptp_rxtstamp(struct mii_timestamper *mii_ts, + struct sk_buff *skb, int type) +{ + struct bcm_ptp_private *priv = mii2priv(mii_ts); + struct skb_shared_hwtstamps *hwts; + struct ptp_header *header; + u32 sec, nsec; + u8 *data; + int off; + + if (!priv->hwts_rx) + return false; + + header = ptp_parse_header(skb, type); + if (!header) + return false; + + data = (u8 *)(header + 1); + sec = get_unaligned_be32(data); + nsec = get_unaligned_be32(data + 4); + + hwts = skb_hwtstamps(skb); + hwts->hwtstamp = ktime_set(sec, nsec); + + off = data - skb->data + 8; + if (off < skb->len) { + memmove(data, data + 8, skb->len - off); + __pskb_trim(skb, skb->len - 8); + } + + return false; +} + +static bool bcm_ptp_get_tstamp(struct bcm_ptp_private *priv, + struct bcm_ptp_capture *capts) +{ + struct phy_device *phydev = priv->phydev; + u16 ts[4], reg; + u32 sec, nsec; + + mutex_lock(&priv->mutex); + + reg = bcm_phy_read_exp(phydev, INTR_STATUS); + if ((reg & INTC_SOP) == 0) { + mutex_unlock(&priv->mutex); + return false; + } + + bcm_phy_write_exp(phydev, TS_READ_CTRL, TS_READ_START); + + ts[0] = bcm_phy_read_exp(phydev, TS_REG_0); + ts[1] = bcm_phy_read_exp(phydev, TS_REG_1); + ts[2] = bcm_phy_read_exp(phydev, TS_REG_2); + ts[3] = bcm_phy_read_exp(phydev, TS_REG_3); + + /* not in be32 format for some reason */ + capts->seq_id = bcm_phy_read_exp(priv->phydev, TS_INFO_0); + + reg = bcm_phy_read_exp(phydev, TS_INFO_1); + capts->msgtype = reg >> 12; + capts->tx_dir = !!(reg & BIT(11)); + + bcm_phy_write_exp(phydev, TS_READ_CTRL, TS_READ_END); + bcm_phy_write_exp(phydev, TS_READ_CTRL, 0); + + mutex_unlock(&priv->mutex); + + sec = (ts[3] << 16) | ts[2]; + nsec = (ts[1] << 16) | ts[0]; + capts->hwtstamp = ktime_set(sec, nsec); + + return true; +} + +static void bcm_ptp_match_tstamp(struct bcm_ptp_private *priv, + struct bcm_ptp_capture *capts) +{ + struct skb_shared_hwtstamps hwts; + struct sk_buff *skb, *ts_skb; + unsigned long flags; + bool first = false; + + ts_skb = NULL; + spin_lock_irqsave(&priv->tx_queue.lock, flags); + skb_queue_walk(&priv->tx_queue, skb) { + if (BCM_SKB_CB(skb)->seq_id == capts->seq_id && + BCM_SKB_CB(skb)->msgtype == capts->msgtype) { + first = skb_queue_is_first(&priv->tx_queue, skb); + __skb_unlink(skb, &priv->tx_queue); + ts_skb = skb; + break; + } + } + spin_unlock_irqrestore(&priv->tx_queue.lock, flags); + + /* TX captures one-step packets, discard them if needed. */ + if (ts_skb) { + if (BCM_SKB_CB(ts_skb)->discard) { + kfree_skb(ts_skb); + } else { + memset(&hwts, 0, sizeof(hwts)); + hwts.hwtstamp = capts->hwtstamp; + skb_complete_tx_timestamp(ts_skb, &hwts); + } + } + + /* not first match, try and expire entries */ + if (!first) { + while ((skb = skb_dequeue(&priv->tx_queue))) { + if (!time_after(jiffies, BCM_SKB_CB(skb)->timeout)) { + skb_queue_head(&priv->tx_queue, skb); + break; + } + kfree_skb(skb); + } + } +} + +static long bcm_ptp_do_aux_work(struct ptp_clock_info *info) +{ + struct bcm_ptp_private *priv = ptp2priv(info); + struct bcm_ptp_capture capts; + bool reschedule = false; + + while (!skb_queue_empty_lockless(&priv->tx_queue)) { + if (!bcm_ptp_get_tstamp(priv, &capts)) { + reschedule = true; + break; + } + bcm_ptp_match_tstamp(priv, &capts); + } + + return reschedule ? 1 : -1; +} + +static int bcm_ptp_cancel_func(struct bcm_ptp_private *priv) +{ + if (!priv->pin_active) + return 0; + + priv->pin_active = false; + + priv->nse_ctrl &= ~(NSE_SYNC_OUT_MASK | NSE_SYNC1_FRAMESYNC | + NSE_CAPTURE_EN); + bcm_phy_write_exp(priv->phydev, NSE_CTRL, priv->nse_ctrl); + + cancel_delayed_work_sync(&priv->pin_work); + + return 0; +} + +static void bcm_ptp_perout_work(struct work_struct *pin_work) +{ + struct bcm_ptp_private *priv = + container_of(pin_work, struct bcm_ptp_private, pin_work.work); + struct phy_device *phydev = priv->phydev; + struct timespec64 ts; + u64 ns, next; + u16 ctrl; + + mutex_lock(&priv->mutex); + + /* no longer running */ + if (!priv->pin_active) { + mutex_unlock(&priv->mutex); + return; + } + + bcm_ptp_framesync_ts(phydev, NULL, &ts, priv->nse_ctrl); + + /* this is 1PPS only */ + next = NSEC_PER_SEC - ts.tv_nsec; + ts.tv_sec += next < NSEC_PER_MSEC ? 2 : 1; + ts.tv_nsec = 0; + + ns = timespec64_to_ns(&ts); + + /* force 0->1 transition for ONESHOT */ + ctrl = bcm_ptp_framesync_disable(phydev, + priv->nse_ctrl & ~NSE_ONESHOT_EN); + + bcm_phy_write_exp(phydev, SYNOUT_TS_0, ns & 0xfff0); + bcm_phy_write_exp(phydev, SYNOUT_TS_1, ns >> 16); + bcm_phy_write_exp(phydev, SYNOUT_TS_2, ns >> 32); + + /* load values on next framesync */ + bcm_phy_write_exp(phydev, SHADOW_LOAD, SYNC_OUT_LOAD); + + bcm_ptp_framesync(phydev, ctrl | NSE_ONESHOT_EN | NSE_INIT); + + priv->nse_ctrl |= NSE_ONESHOT_EN; + bcm_ptp_framesync_restore(phydev, priv->nse_ctrl); + + mutex_unlock(&priv->mutex); + + next = next + NSEC_PER_MSEC; + schedule_delayed_work(&priv->pin_work, nsecs_to_jiffies(next)); +} + +static int bcm_ptp_perout_locked(struct bcm_ptp_private *priv, + struct ptp_perout_request *req, int on) +{ + struct phy_device *phydev = priv->phydev; + u64 period, pulse; + u16 val; + + if (!on) + return bcm_ptp_cancel_func(priv); + + /* 1PPS */ + if (req->period.sec != 1 || req->period.nsec != 0) + return -EINVAL; + + period = BCM_MAX_PERIOD_8NS; /* write nonzero value */ + + if (req->flags & PTP_PEROUT_PHASE) + return -EOPNOTSUPP; + + if (req->flags & PTP_PEROUT_DUTY_CYCLE) + pulse = ktime_to_ns(ktime_set(req->on.sec, req->on.nsec)); + else + pulse = (u64)BCM_MAX_PULSE_8NS << 3; + + /* convert to 8ns units */ + pulse >>= 3; + + if (!pulse || pulse > period || pulse > BCM_MAX_PULSE_8NS) + return -EINVAL; + + bcm_phy_write_exp(phydev, SYNC_OUT_0, period); + + val = ((pulse & 0x3) << 14) | ((period >> 16) & 0x3fff); + bcm_phy_write_exp(phydev, SYNC_OUT_1, val); + + val = ((pulse >> 2) & 0x7f) | (pulse << 7); + bcm_phy_write_exp(phydev, SYNC_OUT_2, val); + + if (priv->pin_active) + cancel_delayed_work_sync(&priv->pin_work); + + priv->pin_active = true; + INIT_DELAYED_WORK(&priv->pin_work, bcm_ptp_perout_work); + schedule_delayed_work(&priv->pin_work, 0); + + return 0; +} + +static void bcm_ptp_extts_work(struct work_struct *pin_work) +{ + struct bcm_ptp_private *priv = + container_of(pin_work, struct bcm_ptp_private, pin_work.work); + struct phy_device *phydev = priv->phydev; + struct ptp_clock_event event; + struct timespec64 ts; + u16 reg; + + mutex_lock(&priv->mutex); + + /* no longer running */ + if (!priv->pin_active) { + mutex_unlock(&priv->mutex); + return; + } + + reg = bcm_phy_read_exp(phydev, INTR_STATUS); + if ((reg & INTC_FSYNC) == 0) + goto out; + + bcm_ptp_get_framesync_ts(phydev, &ts); + + event.index = 0; + event.type = PTP_CLOCK_EXTTS; + event.timestamp = timespec64_to_ns(&ts); + ptp_clock_event(priv->ptp_clock, &event); + +out: + mutex_unlock(&priv->mutex); + schedule_delayed_work(&priv->pin_work, HZ / 4); +} + +static int bcm_ptp_extts_locked(struct bcm_ptp_private *priv, int on) +{ + struct phy_device *phydev = priv->phydev; + + if (!on) + return bcm_ptp_cancel_func(priv); + + if (priv->pin_active) + cancel_delayed_work_sync(&priv->pin_work); + + bcm_ptp_framesync_disable(phydev, priv->nse_ctrl); + + priv->nse_ctrl |= NSE_SYNC1_FRAMESYNC | NSE_CAPTURE_EN; + + bcm_ptp_framesync_restore(phydev, priv->nse_ctrl); + + priv->pin_active = true; + INIT_DELAYED_WORK(&priv->pin_work, bcm_ptp_extts_work); + schedule_delayed_work(&priv->pin_work, 0); + + return 0; +} + +static int bcm_ptp_enable(struct ptp_clock_info *info, + struct ptp_clock_request *rq, int on) +{ + struct bcm_ptp_private *priv = ptp2priv(info); + int err = -EBUSY; + + mutex_lock(&priv->mutex); + + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + if (priv->pin.func == PTP_PF_PEROUT) + err = bcm_ptp_perout_locked(priv, &rq->perout, on); + break; + case PTP_CLK_REQ_EXTTS: + if (priv->pin.func == PTP_PF_EXTTS) + err = bcm_ptp_extts_locked(priv, on); + break; + default: + err = -EOPNOTSUPP; + break; + } + + mutex_unlock(&priv->mutex); + + return err; +} + +static int bcm_ptp_verify(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static const struct ptp_clock_info bcm_ptp_clock_info = { + .owner = THIS_MODULE, + .name = KBUILD_MODNAME, + .max_adj = 100000000, + .gettimex64 = bcm_ptp_gettimex, + .settime64 = bcm_ptp_settime, + .adjtime = bcm_ptp_adjtime, + .adjfine = bcm_ptp_adjfine, + .enable = bcm_ptp_enable, + .verify = bcm_ptp_verify, + .do_aux_work = bcm_ptp_do_aux_work, + .n_pins = 1, + .n_per_out = 1, + .n_ext_ts = 1, +}; + +static void bcm_ptp_txtstamp(struct mii_timestamper *mii_ts, + struct sk_buff *skb, int type) +{ + struct bcm_ptp_private *priv = mii2priv(mii_ts); + struct ptp_header *hdr; + bool discard = false; + int msgtype; + + hdr = ptp_parse_header(skb, type); + if (!hdr) + goto out; + msgtype = ptp_get_msgtype(hdr, type); + + switch (priv->tx_type) { + case HWTSTAMP_TX_ONESTEP_P2P: + if (msgtype == PTP_MSGTYPE_PDELAY_RESP) + discard = true; + fallthrough; + case HWTSTAMP_TX_ONESTEP_SYNC: + if (msgtype == PTP_MSGTYPE_SYNC) + discard = true; + fallthrough; + case HWTSTAMP_TX_ON: + BCM_SKB_CB(skb)->timeout = jiffies + SKB_TS_TIMEOUT; + BCM_SKB_CB(skb)->seq_id = be16_to_cpu(hdr->sequence_id); + BCM_SKB_CB(skb)->msgtype = msgtype; + BCM_SKB_CB(skb)->discard = discard; + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + skb_queue_tail(&priv->tx_queue, skb); + ptp_schedule_worker(priv->ptp_clock, 0); + return; + default: + break; + } + +out: + kfree_skb(skb); +} + +static int bcm_ptp_hwtstamp(struct mii_timestamper *mii_ts, + struct ifreq *ifr) +{ + struct bcm_ptp_private *priv = mii2priv(mii_ts); + struct hwtstamp_config cfg; + u16 mode, ctrl; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + priv->hwts_rx = false; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + priv->hwts_rx = true; + break; + default: + return -ERANGE; + } + + priv->tx_type = cfg.tx_type; + + ctrl = priv->hwts_rx ? SLICE_RX_EN : 0; + ctrl |= priv->tx_type != HWTSTAMP_TX_OFF ? SLICE_TX_EN : 0; + + mode = TX_MODE_SEL(PORT, SYNC, REPLACE_TS) | + TX_MODE_SEL(PORT, DELAY_REQ, REPLACE_TS) | + TX_MODE_SEL(PORT, PDELAY_REQ, REPLACE_TS) | + TX_MODE_SEL(PORT, PDELAY_RESP, REPLACE_TS); + + bcm_phy_write_exp(priv->phydev, TX_EVENT_MODE, mode); + + mode = RX_MODE_SEL(PORT, SYNC, INSERT_TS_64) | + RX_MODE_SEL(PORT, DELAY_REQ, INSERT_TS_64) | + RX_MODE_SEL(PORT, PDELAY_REQ, INSERT_TS_64) | + RX_MODE_SEL(PORT, PDELAY_RESP, INSERT_TS_64); + + bcm_phy_write_exp(priv->phydev, RX_EVENT_MODE, mode); + + bcm_phy_write_exp(priv->phydev, SLICE_CTRL, ctrl); + + if (ctrl & SLICE_TX_EN) + bcm_phy_write_exp(priv->phydev, TX_TS_CAPTURE, TX_TS_CAP_EN); + else + ptp_cancel_worker_sync(priv->ptp_clock); + + /* purge existing data */ + skb_queue_purge(&priv->tx_queue); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +static int bcm_ptp_ts_info(struct mii_timestamper *mii_ts, + struct ethtool_ts_info *ts_info) +{ + struct bcm_ptp_private *priv = mii2priv(mii_ts); + + ts_info->phc_index = ptp_clock_index(priv->ptp_clock); + ts_info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + ts_info->tx_types = + BIT(HWTSTAMP_TX_ON) | + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ONESTEP_SYNC) | + BIT(HWTSTAMP_TX_ONESTEP_P2P); + ts_info->rx_filters = + BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; +} + +void bcm_ptp_stop(struct bcm_ptp_private *priv) +{ + ptp_cancel_worker_sync(priv->ptp_clock); + bcm_ptp_cancel_func(priv); +} +EXPORT_SYMBOL_GPL(bcm_ptp_stop); + +void bcm_ptp_config_init(struct phy_device *phydev) +{ + /* init network sync engine */ + bcm_phy_write_exp(phydev, NSE_CTRL, NSE_GMODE_EN | NSE_INIT); + + /* enable time sync (TX/RX SOP capture) */ + bcm_phy_write_exp(phydev, TIME_SYNC, TIME_SYNC_EN); + + /* use sec.nsec heartbeat capture */ + bcm_phy_write_exp(phydev, DPLL_SELECT, DPLL_HB_MODE2); + + /* use 64 bit timecode for TX */ + bcm_phy_write_exp(phydev, TIMECODE_CTRL, TX_TIMECODE_SEL); + + /* always allow FREQ_LOAD on framesync */ + bcm_phy_write_exp(phydev, SHADOW_CTRL, FREQ_LOAD); + + bcm_phy_write_exp(phydev, SYNC_IN_DIVIDER, 1); +} +EXPORT_SYMBOL_GPL(bcm_ptp_config_init); + +static void bcm_ptp_init(struct bcm_ptp_private *priv) +{ + priv->nse_ctrl = NSE_GMODE_EN; + + mutex_init(&priv->mutex); + skb_queue_head_init(&priv->tx_queue); + + priv->mii_ts.rxtstamp = bcm_ptp_rxtstamp; + priv->mii_ts.txtstamp = bcm_ptp_txtstamp; + priv->mii_ts.hwtstamp = bcm_ptp_hwtstamp; + priv->mii_ts.ts_info = bcm_ptp_ts_info; + + priv->phydev->mii_ts = &priv->mii_ts; +} + +struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev) +{ + struct bcm_ptp_private *priv; + struct ptp_clock *clock; + + switch (BRCM_PHY_MODEL(phydev)) { + case PHY_ID_BCM54210E: +#ifdef PHY_ID_BCM54213PE + case PHY_ID_BCM54213PE: +#endif + break; + default: + return NULL; + } + + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return ERR_PTR(-ENOMEM); + + priv->ptp_info = bcm_ptp_clock_info; + + snprintf(priv->pin.name, sizeof(priv->pin.name), "SYNC_OUT"); + priv->ptp_info.pin_config = &priv->pin; + + clock = ptp_clock_register(&priv->ptp_info, &phydev->mdio.dev); + if (IS_ERR(clock)) + return ERR_CAST(clock); + priv->ptp_clock = clock; + + priv->phydev = phydev; + bcm_ptp_init(priv); + + return priv; +} +EXPORT_SYMBOL_GPL(bcm_ptp_probe); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c new file mode 100644 index 0000000000..3cd9a77f95 --- /dev/null +++ b/drivers/net/phy/dp83td510.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Driver for the Texas Instruments DP83TD510 PHY + * Copyright (c) 2022 Pengutronix, Oleksij Rempel + */ + +#include +#include +#include +#include + +#define DP83TD510E_PHY_ID 0x20000181 + +/* MDIO_MMD_VEND2 registers */ +#define DP83TD510E_PHY_STS 0x10 +#define DP83TD510E_STS_MII_INT BIT(7) +#define DP83TD510E_LINK_STATUS BIT(0) + +#define DP83TD510E_GEN_CFG 0x11 +#define DP83TD510E_GENCFG_INT_POLARITY BIT(3) +#define DP83TD510E_GENCFG_INT_EN BIT(1) +#define DP83TD510E_GENCFG_INT_OE BIT(0) + +#define DP83TD510E_INTERRUPT_REG_1 0x12 +#define DP83TD510E_INT1_LINK BIT(13) +#define DP83TD510E_INT1_LINK_EN BIT(5) + +#define DP83TD510E_AN_STAT_1 0x60c +#define DP83TD510E_MASTER_SLAVE_RESOL_FAIL BIT(15) + +#define DP83TD510E_MSE_DETECT 0xa85 + +#define DP83TD510_SQI_MAX 7 + +/* Register values are converted to SNR(dB) as suggested by + * "Application Report - DP83TD510E Cable Diagnostics Toolkit": + * SNR(dB) = -10 * log10 (VAL/2^17) - 1.76 dB. + * SQI ranges are implemented according to "OPEN ALLIANCE - Advanced diagnostic + * features for 100BASE-T1 automotive Ethernet PHYs" + */ +static const u16 dp83td510_mse_sqi_map[] = { + 0x0569, /* < 18dB */ + 0x044c, /* 18dB =< SNR < 19dB */ + 0x0369, /* 19dB =< SNR < 20dB */ + 0x02b6, /* 20dB =< SNR < 21dB */ + 0x0227, /* 21dB =< SNR < 22dB */ + 0x01b6, /* 22dB =< SNR < 23dB */ + 0x015b, /* 23dB =< SNR < 24dB */ + 0x0000 /* 24dB =< SNR */ +}; + +static int dp83td510_config_intr(struct phy_device *phydev) +{ + int ret; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + /* Clear any pending interrupts */ + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS, + 0x0); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + DP83TD510E_INTERRUPT_REG_1, + DP83TD510E_INT1_LINK_EN); + if (ret) + return ret; + + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, + DP83TD510E_GEN_CFG, + DP83TD510E_GENCFG_INT_POLARITY | + DP83TD510E_GENCFG_INT_EN | + DP83TD510E_GENCFG_INT_OE); + } else { + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + DP83TD510E_INTERRUPT_REG_1, 0x0); + if (ret) + return ret; + + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, + DP83TD510E_GEN_CFG, + DP83TD510E_GENCFG_INT_EN); + if (ret) + return ret; + + /* Clear any pending interrupts */ + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS, + 0x0); + } + + return ret; +} + +static irqreturn_t dp83td510_handle_interrupt(struct phy_device *phydev) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS); + if (ret < 0) { + phy_error(phydev); + return IRQ_NONE; + } else if (!(ret & DP83TD510E_STS_MII_INT)) { + return IRQ_NONE; + } + + /* Read the current enabled interrupts */ + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_INTERRUPT_REG_1); + if (ret < 0) { + phy_error(phydev); + return IRQ_NONE; + } else if (!(ret & DP83TD510E_INT1_LINK_EN) || + !(ret & DP83TD510E_INT1_LINK)) { + return IRQ_NONE; + } + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + +static int dp83td510_read_status(struct phy_device *phydev) +{ + u16 phy_sts; + int ret; + + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + phydev->pause = 0; + phydev->asym_pause = 0; + linkmode_zero(phydev->lp_advertising); + + phy_sts = phy_read(phydev, DP83TD510E_PHY_STS); + + phydev->link = !!(phy_sts & DP83TD510E_LINK_STATUS); + if (phydev->link) { + /* This PHY supports only one link mode: 10BaseT1L_Full */ + phydev->duplex = DUPLEX_FULL; + phydev->speed = SPEED_10; + + if (phydev->autoneg == AUTONEG_ENABLE) { + ret = genphy_c45_read_lpa(phydev); + if (ret) + return ret; + + phy_resolve_aneg_linkmode(phydev); + } + } + + if (phydev->autoneg == AUTONEG_ENABLE) { + ret = genphy_c45_baset1_read_status(phydev); + if (ret < 0) + return ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, + DP83TD510E_AN_STAT_1); + if (ret < 0) + return ret; + + if (ret & DP83TD510E_MASTER_SLAVE_RESOL_FAIL) + phydev->master_slave_state = MASTER_SLAVE_STATE_ERR; + } else { + return genphy_c45_pma_baset1_read_master_slave(phydev); + } + + return 0; +} + +static int dp83td510_config_aneg(struct phy_device *phydev) +{ + bool changed = false; + int ret; + + ret = genphy_c45_pma_baset1_setup_master_slave(phydev); + if (ret < 0) + return ret; + + if (phydev->autoneg == AUTONEG_DISABLE) + return genphy_c45_an_disable_aneg(phydev); + + ret = genphy_c45_an_config_aneg(phydev); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + return genphy_c45_check_and_restart_aneg(phydev, changed); +} + +static int dp83td510_get_sqi(struct phy_device *phydev) +{ + int sqi, ret; + u16 mse_val; + + if (!phydev->link) + return 0; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_MSE_DETECT); + if (ret < 0) + return ret; + + mse_val = 0xFFFF & ret; + for (sqi = 0; sqi < ARRAY_SIZE(dp83td510_mse_sqi_map); sqi++) { + if (mse_val >= dp83td510_mse_sqi_map[sqi]) + return sqi; + } + + return -EINVAL; +} + +static int dp83td510_get_sqi_max(struct phy_device *phydev) +{ + return DP83TD510_SQI_MAX; +} + +static int dp83td510_get_features(struct phy_device *phydev) +{ + /* This PHY can't respond on MDIO bus if no RMII clock is enabled. + * In case RMII mode is used (most meaningful mode for this PHY) and + * the PHY do not have own XTAL, and CLK providing MAC is not probed, + * we won't be able to read all needed ability registers. + * So provide it manually. + */ + + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, + phydev->supported); + + return 0; +} + +static struct phy_driver dp83td510_driver[] = { +{ + PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID), + .name = "TI DP83TD510E", + + .config_aneg = dp83td510_config_aneg, + .read_status = dp83td510_read_status, + .get_features = dp83td510_get_features, + .config_intr = dp83td510_config_intr, + .handle_interrupt = dp83td510_handle_interrupt, + .get_sqi = dp83td510_get_sqi, + .get_sqi_max = dp83td510_get_sqi_max, + + .suspend = genphy_suspend, + .resume = genphy_resume, +} }; +module_phy_driver(dp83td510_driver); + +static struct mdio_device_id __maybe_unused dp83td510_tbl[] = { + { PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID) }, + { } +}; +MODULE_DEVICE_TABLE(mdio, dp83td510_tbl); + +MODULE_DESCRIPTION("Texas Instruments DP83TD510E PHY driver"); +MODULE_AUTHOR("Oleksij Rempel "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c new file mode 100644 index 0000000000..1adf20ebef --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/pcic.c @@ -0,0 +1,733 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "core.h" +#include "pcic.h" +#include "debug.h" + +static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { + "bhi", + "mhi-er0", + "mhi-er1", + "ce0", + "ce1", + "ce2", + "ce3", + "ce4", + "ce5", + "ce6", + "ce7", + "ce8", + "ce9", + "ce10", + "ce11", + "host2wbm-desc-feed", + "host2reo-re-injection", + "host2reo-command", + "host2rxdma-monitor-ring3", + "host2rxdma-monitor-ring2", + "host2rxdma-monitor-ring1", + "reo2ost-exception", + "wbm2host-rx-release", + "reo2host-status", + "reo2host-destination-ring4", + "reo2host-destination-ring3", + "reo2host-destination-ring2", + "reo2host-destination-ring1", + "rxdma2host-monitor-destination-mac3", + "rxdma2host-monitor-destination-mac2", + "rxdma2host-monitor-destination-mac1", + "ppdu-end-interrupts-mac3", + "ppdu-end-interrupts-mac2", + "ppdu-end-interrupts-mac1", + "rxdma2host-monitor-status-ring-mac3", + "rxdma2host-monitor-status-ring-mac2", + "rxdma2host-monitor-status-ring-mac1", + "host2rxdma-host-buf-ring-mac3", + "host2rxdma-host-buf-ring-mac2", + "host2rxdma-host-buf-ring-mac1", + "rxdma2host-destination-ring-mac3", + "rxdma2host-destination-ring-mac2", + "rxdma2host-destination-ring-mac1", + "host2tcl-input-ring4", + "host2tcl-input-ring3", + "host2tcl-input-ring2", + "host2tcl-input-ring1", + "wbm2host-tx-completions-ring3", + "wbm2host-tx-completions-ring2", + "wbm2host-tx-completions-ring1", + "tcl2host-status-ring", +}; + +static const struct ath11k_msi_config ath11k_msi_config[] = { + { + .total_vectors = 32, + .total_users = 4, + .users = (struct ath11k_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 10, .base_vector = 3 }, + { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, + { .name = "DP", .num_vectors = 18, .base_vector = 14 }, + }, + .hw_rev = ATH11K_HW_QCA6390_HW20, + }, + { + .total_vectors = 16, + .total_users = 3, + .users = (struct ath11k_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 5, .base_vector = 3 }, + { .name = "DP", .num_vectors = 8, .base_vector = 8 }, + }, + .hw_rev = ATH11K_HW_QCN9074_HW10, + }, + { + .total_vectors = 32, + .total_users = 4, + .users = (struct ath11k_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 10, .base_vector = 3 }, + { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, + { .name = "DP", .num_vectors = 18, .base_vector = 14 }, + }, + .hw_rev = ATH11K_HW_WCN6855_HW20, + }, + { + .total_vectors = 32, + .total_users = 4, + .users = (struct ath11k_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 10, .base_vector = 3 }, + { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, + { .name = "DP", .num_vectors = 18, .base_vector = 14 }, + }, + .hw_rev = ATH11K_HW_WCN6855_HW21, + }, + { + .total_vectors = 28, + .total_users = 2, + .users = (struct ath11k_msi_user[]) { + { .name = "CE", .num_vectors = 10, .base_vector = 0 }, + { .name = "DP", .num_vectors = 18, .base_vector = 10 }, + }, + .hw_rev = ATH11K_HW_WCN6750_HW10, + }, +}; + +int ath11k_pcic_init_msi_config(struct ath11k_base *ab) +{ + const struct ath11k_msi_config *msi_config; + int i; + + for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) { + msi_config = &ath11k_msi_config[i]; + + if (msi_config->hw_rev == ab->hw_rev) + break; + } + + if (i == ARRAY_SIZE(ath11k_msi_config)) { + ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n", + ab->hw_rev); + return -EINVAL; + } + + ab->pci.msi.config = msi_config; + return 0; +} +EXPORT_SYMBOL(ath11k_pcic_init_msi_config); + +void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value) +{ + int ret = 0; + + /* for offset beyond BAR + 4K - 32, may + * need to wakeup the device to access. + */ + if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup) + ret = ab->pci.ops->wakeup(ab); + + if (offset < ATH11K_PCI_WINDOW_START) + iowrite32(value, ab->mem + offset); + else + ab->pci.ops->window_write32(ab, offset, value); + + if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release && + !ret) + ab->pci.ops->release(ab); +} +EXPORT_SYMBOL(ath11k_pcic_write32); + +u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset) +{ + int ret = 0; + u32 val; + + /* for offset beyond BAR + 4K - 32, may + * need to wakeup the device to access. + */ + if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup) + ret = ab->pci.ops->wakeup(ab); + + if (offset < ATH11K_PCI_WINDOW_START) + val = ioread32(ab->mem + offset); + else + val = ab->pci.ops->window_read32(ab, offset); + + if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release && + !ret) + ab->pci.ops->release(ab); + + return val; +} +EXPORT_SYMBOL(ath11k_pcic_read32); + +void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, + u32 *msi_addr_hi) +{ + *msi_addr_lo = ab->pci.msi.addr_lo; + *msi_addr_hi = ab->pci.msi.addr_hi; +} +EXPORT_SYMBOL(ath11k_pcic_get_msi_address); + +int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, + int *num_vectors, u32 *user_base_data, + u32 *base_vector) +{ + const struct ath11k_msi_config *msi_config = ab->pci.msi.config; + int idx; + + for (idx = 0; idx < msi_config->total_users; idx++) { + if (strcmp(user_name, msi_config->users[idx].name) == 0) { + *num_vectors = msi_config->users[idx].num_vectors; + *base_vector = msi_config->users[idx].base_vector; + *user_base_data = *base_vector + ab->pci.msi.ep_base_data; + + ath11k_dbg(ab, ATH11K_DBG_PCI, + "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", + user_name, *num_vectors, *user_base_data, + *base_vector); + + return 0; + } + } + + ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name); + + return -EINVAL; +} +EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment); + +void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx) +{ + u32 i, msi_data_idx; + + for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + if (ce_id == i) + break; + + msi_data_idx++; + } + *msi_idx = msi_data_idx; +} +EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx); + +static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab) +{ + int i, j; + + for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + for (j = 0; j < irq_grp->num_irq; j++) + free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); + + netif_napi_del(&irq_grp->napi); + } +} + +void ath11k_pcic_free_irq(struct ath11k_base *ab) +{ + int i, irq_idx; + + for (i = 0; i < ab->hw_params.ce_count; i++) { + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; + free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); + } + + ath11k_pcic_free_ext_irq(ab); +} +EXPORT_SYMBOL(ath11k_pcic_free_irq); + +static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) +{ + u32 irq_idx; + + /* In case of one MSI vector, we handle irq enable/disable in a + * uniform way since we only have one irq + */ + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) + return; + + irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; + enable_irq(ab->irq_num[irq_idx]); +} + +static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) +{ + u32 irq_idx; + + /* In case of one MSI vector, we handle irq enable/disable in a + * uniform way since we only have one irq + */ + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) + return; + + irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; + disable_irq_nosync(ab->irq_num[irq_idx]); +} + +static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab) +{ + int i; + + clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); + + for (i = 0; i < ab->hw_params.ce_count; i++) { + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + ath11k_pcic_ce_irq_disable(ab, i); + } +} + +static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab) +{ + int i; + int irq_idx; + + for (i = 0; i < ab->hw_params.ce_count; i++) { + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; + synchronize_irq(ab->irq_num[irq_idx]); + } +} + +static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t) +{ + struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); + int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; + + ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); + + enable_irq(ce_pipe->ab->irq_num[irq_idx]); +} + +static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg) +{ + struct ath11k_ce_pipe *ce_pipe = arg; + struct ath11k_base *ab = ce_pipe->ab; + int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; + + if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags)) + return IRQ_HANDLED; + + /* last interrupt received for this CE */ + ce_pipe->timestamp = jiffies; + + disable_irq_nosync(ab->irq_num[irq_idx]); + + tasklet_schedule(&ce_pipe->intr_tq); + + return IRQ_HANDLED; +} + +static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) +{ + struct ath11k_base *ab = irq_grp->ab; + int i; + + /* In case of one MSI vector, we handle irq enable/disable + * in a uniform way since we only have one irq + */ + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) + return; + + for (i = 0; i < irq_grp->num_irq; i++) + disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); +} + +static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *sc) +{ + int i; + + clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags); + + for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; + + ath11k_pcic_ext_grp_disable(irq_grp); + + if (irq_grp->napi_enabled) { + napi_synchronize(&irq_grp->napi); + napi_disable(&irq_grp->napi); + irq_grp->napi_enabled = false; + } + } +} + +static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) +{ + struct ath11k_base *ab = irq_grp->ab; + int i; + + /* In case of one MSI vector, we handle irq enable/disable in a + * uniform way since we only have one irq + */ + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) + return; + + for (i = 0; i < irq_grp->num_irq; i++) + enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); +} + +void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab) +{ + int i; + + set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); + + for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + if (!irq_grp->napi_enabled) { + napi_enable(&irq_grp->napi); + irq_grp->napi_enabled = true; + } + ath11k_pcic_ext_grp_enable(irq_grp); + } +} +EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable); + +static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab) +{ + int i, j, irq_idx; + + for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + for (j = 0; j < irq_grp->num_irq; j++) { + irq_idx = irq_grp->irqs[j]; + synchronize_irq(ab->irq_num[irq_idx]); + } + } +} + +void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab) +{ + __ath11k_pcic_ext_irq_disable(ab); + ath11k_pcic_sync_ext_irqs(ab); +} +EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable); + +static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget) +{ + struct ath11k_ext_irq_grp *irq_grp = container_of(napi, + struct ath11k_ext_irq_grp, + napi); + struct ath11k_base *ab = irq_grp->ab; + int work_done; + int i; + + work_done = ath11k_dp_service_srng(ab, irq_grp, budget); + if (work_done < budget) { + napi_complete_done(napi, work_done); + for (i = 0; i < irq_grp->num_irq; i++) + enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); + } + + if (work_done > budget) + work_done = budget; + + return work_done; +} + +static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg) +{ + struct ath11k_ext_irq_grp *irq_grp = arg; + struct ath11k_base *ab = irq_grp->ab; + int i; + + if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) + return IRQ_HANDLED; + + ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq); + + /* last interrupt received for this group */ + irq_grp->timestamp = jiffies; + + for (i = 0; i < irq_grp->num_irq; i++) + disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); + + napi_schedule(&irq_grp->napi); + + return IRQ_HANDLED; +} + +static int +ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector) +{ + return ab->pci.ops->get_msi_irq(ab, vector); +} + +static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) +{ + int i, j, ret, num_vectors = 0; + u32 user_base_data = 0, base_vector = 0; + unsigned long irq_flags; + + ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors, + &user_base_data, + &base_vector); + if (ret < 0) + return ret; + + irq_flags = IRQF_SHARED; + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) + irq_flags |= IRQF_NOBALANCING; + + for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + u32 num_irq = 0; + + irq_grp->ab = ab; + irq_grp->grp_id = i; + init_dummy_netdev(&irq_grp->napi_ndev); + netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, + ath11k_pcic_ext_grp_napi_poll, NAPI_POLL_WEIGHT); + + if (ab->hw_params.ring_mask->tx[i] || + ab->hw_params.ring_mask->rx[i] || + ab->hw_params.ring_mask->rx_err[i] || + ab->hw_params.ring_mask->rx_wbm_rel[i] || + ab->hw_params.ring_mask->reo_status[i] || + ab->hw_params.ring_mask->rxdma2host[i] || + ab->hw_params.ring_mask->host2rxdma[i] || + ab->hw_params.ring_mask->rx_mon_status[i]) { + num_irq = 1; + } + + irq_grp->num_irq = num_irq; + irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i; + + for (j = 0; j < irq_grp->num_irq; j++) { + int irq_idx = irq_grp->irqs[j]; + int vector = (i % num_vectors) + base_vector; + int irq = ath11k_pcic_get_msi_irq(ab, vector); + + if (irq < 0) + return irq; + + ab->irq_num[irq_idx] = irq; + + ath11k_dbg(ab, ATH11K_DBG_PCI, + "irq:%d group:%d\n", irq, i); + + irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); + ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler, + irq_flags, "DP_EXT_IRQ", irq_grp); + if (ret) { + ath11k_err(ab, "failed request irq %d: %d\n", + vector, ret); + return ret; + } + } + ath11k_pcic_ext_grp_disable(irq_grp); + } + + return 0; +} + +int ath11k_pcic_config_irq(struct ath11k_base *ab) +{ + struct ath11k_ce_pipe *ce_pipe; + u32 msi_data_start; + u32 msi_data_count, msi_data_idx; + u32 msi_irq_start; + unsigned int msi_data; + int irq, i, ret, irq_idx; + unsigned long irq_flags; + + ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count, + &msi_data_start, &msi_irq_start); + if (ret) + return ret; + + irq_flags = IRQF_SHARED; + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) + irq_flags |= IRQF_NOBALANCING; + + /* Configure CE irqs */ + for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + msi_data = (msi_data_idx % msi_data_count) + msi_irq_start; + irq = ath11k_pcic_get_msi_irq(ab, msi_data); + if (irq < 0) + return irq; + + ce_pipe = &ab->ce.ce_pipe[i]; + + irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; + + tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet); + + ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler, + irq_flags, irq_name[irq_idx], ce_pipe); + if (ret) { + ath11k_err(ab, "failed to request irq %d: %d\n", + irq_idx, ret); + return ret; + } + + ab->irq_num[irq_idx] = irq; + msi_data_idx++; + + ath11k_pcic_ce_irq_disable(ab, i); + } + + ret = ath11k_pcic_ext_irq_config(ab); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL(ath11k_pcic_config_irq); + +void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab) +{ + int i; + + set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); + + for (i = 0; i < ab->hw_params.ce_count; i++) { + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + ath11k_pcic_ce_irq_enable(ab, i); + } +} +EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable); + +static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab) +{ + int i; + + for (i = 0; i < ab->hw_params.ce_count; i++) { + struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; + + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + tasklet_kill(&ce_pipe->intr_tq); + } +} + +void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab) +{ + ath11k_pcic_ce_irqs_disable(ab); + ath11k_pcic_sync_ce_irqs(ab); + ath11k_pcic_kill_tasklets(ab); +} +EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync); + +void ath11k_pcic_stop(struct ath11k_base *ab) +{ + ath11k_pcic_ce_irq_disable_sync(ab); + ath11k_ce_cleanup_pipes(ab); +} +EXPORT_SYMBOL(ath11k_pcic_stop); + +int ath11k_pcic_start(struct ath11k_base *ab) +{ + set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags); + + ath11k_pcic_ce_irqs_enable(ab); + ath11k_ce_rx_post_buf(ab); + + return 0; +} +EXPORT_SYMBOL(ath11k_pcic_start); + +int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + const struct service_to_pipe *entry; + bool ul_set = false, dl_set = false; + int i; + + for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) { + entry = &ab->hw_params.svc_to_ce_map[i]; + + if (__le32_to_cpu(entry->service_id) != service_id) + continue; + + switch (__le32_to_cpu(entry->pipedir)) { + case PIPEDIR_NONE: + break; + case PIPEDIR_IN: + WARN_ON(dl_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + break; + case PIPEDIR_OUT: + WARN_ON(ul_set); + *ul_pipe = __le32_to_cpu(entry->pipenum); + ul_set = true; + break; + case PIPEDIR_INOUT: + WARN_ON(dl_set); + WARN_ON(ul_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + *ul_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + ul_set = true; + break; + } + } + + if (WARN_ON(!ul_set || !dl_set)) + return -ENOENT; + + return 0; +} +EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe); + +int ath11k_pcic_register_pci_ops(struct ath11k_base *ab, + const struct ath11k_pci_ops *pci_ops) +{ + if (!pci_ops) + return 0; + + /* Return error if mandatory pci_ops callbacks are missing */ + if (!pci_ops->get_msi_irq || !pci_ops->window_write32 || + !pci_ops->window_read32) + return -EINVAL; + + ab->pci.ops = pci_ops; + return 0; +} +EXPORT_SYMBOL(ath11k_pcic_register_pci_ops); diff --git a/drivers/net/wireless/ath/ath11k/pcic.h b/drivers/net/wireless/ath/ath11k/pcic.h new file mode 100644 index 0000000000..0afbb34510 --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/pcic.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _ATH11K_PCI_CMN_H +#define _ATH11K_PCI_CMN_H + +#include "core.h" + +#define ATH11K_PCI_IRQ_CE0_OFFSET 3 +#define ATH11K_PCI_IRQ_DP_OFFSET 14 + +#define ATH11K_PCI_WINDOW_ENABLE_BIT 0x40000000 +#define ATH11K_PCI_WINDOW_REG_ADDRESS 0x310c +#define ATH11K_PCI_WINDOW_VALUE_MASK GENMASK(24, 19) +#define ATH11K_PCI_WINDOW_START 0x80000 +#define ATH11K_PCI_WINDOW_RANGE_MASK GENMASK(18, 0) + +/* BAR0 + 4k is always accessible, and no + * need to force wakeup. + * 4K - 32 = 0xFE0 + */ +#define ATH11K_PCI_ACCESS_ALWAYS_OFF 0xFE0 + +int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, + int *num_vectors, u32 *user_base_data, + u32 *base_vector); +void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value); +u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset); +void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, + u32 *msi_addr_hi); +void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx); +void ath11k_pcic_free_irq(struct ath11k_base *ab); +int ath11k_pcic_config_irq(struct ath11k_base *ab); +void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab); +void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab); +void ath11k_pcic_stop(struct ath11k_base *ab); +int ath11k_pcic_start(struct ath11k_base *ab); +int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe); +void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab); +void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab); +int ath11k_pcic_init_msi_config(struct ath11k_base *ab); +int ath11k_pcic_register_pci_ops(struct ath11k_base *ab, + const struct ath11k_pci_ops *pci_ops); +#endif diff --git a/drivers/net/wireless/ath/wcn36xx/firmware.c b/drivers/net/wireless/ath/wcn36xx/firmware.c new file mode 100644 index 0000000000..4b7f439e4d --- /dev/null +++ b/drivers/net/wireless/ath/wcn36xx/firmware.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "wcn36xx.h" +#include "firmware.h" + +#define DEFINE(s)[s] = #s + +static const char * const wcn36xx_firmware_caps_names[] = { + DEFINE(MCC), + DEFINE(P2P), + DEFINE(DOT11AC), + DEFINE(SLM_SESSIONIZATION), + DEFINE(DOT11AC_OPMODE), + DEFINE(SAP32STA), + DEFINE(TDLS), + DEFINE(P2P_GO_NOA_DECOUPLE_INIT_SCAN), + DEFINE(WLANACTIVE_OFFLOAD), + DEFINE(BEACON_OFFLOAD), + DEFINE(SCAN_OFFLOAD), + DEFINE(ROAM_OFFLOAD), + DEFINE(BCN_MISS_OFFLOAD), + DEFINE(STA_POWERSAVE), + DEFINE(STA_ADVANCED_PWRSAVE), + DEFINE(AP_UAPSD), + DEFINE(AP_DFS), + DEFINE(BLOCKACK), + DEFINE(PHY_ERR), + DEFINE(BCN_FILTER), + DEFINE(RTT), + DEFINE(RATECTRL), + DEFINE(WOW), + DEFINE(WLAN_ROAM_SCAN_OFFLOAD), + DEFINE(SPECULATIVE_PS_POLL), + DEFINE(SCAN_SCH), + DEFINE(IBSS_HEARTBEAT_OFFLOAD), + DEFINE(WLAN_SCAN_OFFLOAD), + DEFINE(WLAN_PERIODIC_TX_PTRN), + DEFINE(ADVANCE_TDLS), + DEFINE(BATCH_SCAN), + DEFINE(FW_IN_TX_PATH), + DEFINE(EXTENDED_NSOFFLOAD_SLOT), + DEFINE(CH_SWITCH_V1), + DEFINE(HT40_OBSS_SCAN), + DEFINE(UPDATE_CHANNEL_LIST), + DEFINE(WLAN_MCADDR_FLT), + DEFINE(WLAN_CH144), + DEFINE(NAN), + DEFINE(TDLS_SCAN_COEXISTENCE), + DEFINE(LINK_LAYER_STATS_MEAS), + DEFINE(MU_MIMO), + DEFINE(EXTENDED_SCAN), + DEFINE(DYNAMIC_WMM_PS), + DEFINE(MAC_SPOOFED_SCAN), + DEFINE(BMU_ERROR_GENERIC_RECOVERY), + DEFINE(DISA), + DEFINE(FW_STATS), + DEFINE(WPS_PRBRSP_TMPL), + DEFINE(BCN_IE_FLT_DELTA), + DEFINE(TDLS_OFF_CHANNEL), + DEFINE(RTT3), + DEFINE(MGMT_FRAME_LOGGING), + DEFINE(ENHANCED_TXBD_COMPLETION), + DEFINE(LOGGING_ENHANCEMENT), + DEFINE(EXT_SCAN_ENHANCED), + DEFINE(MEMORY_DUMP_SUPPORTED), + DEFINE(PER_PKT_STATS_SUPPORTED), + DEFINE(EXT_LL_STAT), + DEFINE(WIFI_CONFIG), + DEFINE(ANTENNA_DIVERSITY_SELECTION), +}; + +#undef DEFINE + +const char *wcn36xx_firmware_get_cap_name(enum wcn36xx_firmware_feat_caps x) +{ + if (x >= ARRAY_SIZE(wcn36xx_firmware_caps_names)) + return "UNKNOWN"; + return wcn36xx_firmware_caps_names[x]; +} + +void wcn36xx_firmware_set_feat_caps(u32 *bitmap, + enum wcn36xx_firmware_feat_caps cap) +{ + int arr_idx, bit_idx; + + if (cap < 0 || cap > 127) { + wcn36xx_warn("error cap idx %d\n", cap); + return; + } + + arr_idx = cap / 32; + bit_idx = cap % 32; + bitmap[arr_idx] |= (1 << bit_idx); +} + +int wcn36xx_firmware_get_feat_caps(u32 *bitmap, + enum wcn36xx_firmware_feat_caps cap) +{ + int arr_idx, bit_idx; + + if (cap < 0 || cap > 127) { + wcn36xx_warn("error cap idx %d\n", cap); + return -EINVAL; + } + + arr_idx = cap / 32; + bit_idx = cap % 32; + + return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0; +} + +void wcn36xx_firmware_clear_feat_caps(u32 *bitmap, + enum wcn36xx_firmware_feat_caps cap) +{ + int arr_idx, bit_idx; + + if (cap < 0 || cap > 127) { + wcn36xx_warn("error cap idx %d\n", cap); + return; + } + + arr_idx = cap / 32; + bit_idx = cap % 32; + bitmap[arr_idx] &= ~(1 << bit_idx); +} diff --git a/drivers/net/wireless/ath/wcn36xx/firmware.h b/drivers/net/wireless/ath/wcn36xx/firmware.h new file mode 100644 index 0000000000..f991cf959f --- /dev/null +++ b/drivers/net/wireless/ath/wcn36xx/firmware.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _FIRMWARE_H_ +#define _FIRMWARE_H_ + +/* Capability bitmap exchange definitions and macros starts */ + +enum wcn36xx_firmware_feat_caps { + MCC = 0, + P2P = 1, + DOT11AC = 2, + SLM_SESSIONIZATION = 3, + DOT11AC_OPMODE = 4, + SAP32STA = 5, + TDLS = 6, + P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7, + WLANACTIVE_OFFLOAD = 8, + BEACON_OFFLOAD = 9, + SCAN_OFFLOAD = 10, + ROAM_OFFLOAD = 11, + BCN_MISS_OFFLOAD = 12, + STA_POWERSAVE = 13, + STA_ADVANCED_PWRSAVE = 14, + AP_UAPSD = 15, + AP_DFS = 16, + BLOCKACK = 17, + PHY_ERR = 18, + BCN_FILTER = 19, + RTT = 20, + RATECTRL = 21, + WOW = 22, + WLAN_ROAM_SCAN_OFFLOAD = 23, + SPECULATIVE_PS_POLL = 24, + SCAN_SCH = 25, + IBSS_HEARTBEAT_OFFLOAD = 26, + WLAN_SCAN_OFFLOAD = 27, + WLAN_PERIODIC_TX_PTRN = 28, + ADVANCE_TDLS = 29, + BATCH_SCAN = 30, + FW_IN_TX_PATH = 31, + EXTENDED_NSOFFLOAD_SLOT = 32, + CH_SWITCH_V1 = 33, + HT40_OBSS_SCAN = 34, + UPDATE_CHANNEL_LIST = 35, + WLAN_MCADDR_FLT = 36, + WLAN_CH144 = 37, + NAN = 38, + TDLS_SCAN_COEXISTENCE = 39, + LINK_LAYER_STATS_MEAS = 40, + MU_MIMO = 41, + EXTENDED_SCAN = 42, + DYNAMIC_WMM_PS = 43, + MAC_SPOOFED_SCAN = 44, + BMU_ERROR_GENERIC_RECOVERY = 45, + DISA = 46, + FW_STATS = 47, + WPS_PRBRSP_TMPL = 48, + BCN_IE_FLT_DELTA = 49, + TDLS_OFF_CHANNEL = 51, + RTT3 = 52, + MGMT_FRAME_LOGGING = 53, + ENHANCED_TXBD_COMPLETION = 54, + LOGGING_ENHANCEMENT = 55, + EXT_SCAN_ENHANCED = 56, + MEMORY_DUMP_SUPPORTED = 57, + PER_PKT_STATS_SUPPORTED = 58, + EXT_LL_STAT = 60, + WIFI_CONFIG = 61, + ANTENNA_DIVERSITY_SELECTION = 62, + + MAX_FEATURE_SUPPORTED = 128, +}; + +void wcn36xx_firmware_set_feat_caps(u32 *bitmap, + enum wcn36xx_firmware_feat_caps cap); +int wcn36xx_firmware_get_feat_caps(u32 *bitmap, + enum wcn36xx_firmware_feat_caps cap); +void wcn36xx_firmware_clear_feat_caps(u32 *bitmap, + enum wcn36xx_firmware_feat_caps cap); + +const char *wcn36xx_firmware_get_cap_name(enum wcn36xx_firmware_feat_caps x); + +#endif /* _FIRMWARE_H_ */ + diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h new file mode 100644 index 0000000000..67ce216fb5 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2022 MediaTek Inc. */ + +#ifndef __MT76_CONNAC2_MAC_H +#define __MT76_CONNAC2_MAC_H + +enum tx_header_format { + MT_HDR_FORMAT_802_3, + MT_HDR_FORMAT_CMD, + MT_HDR_FORMAT_802_11, + MT_HDR_FORMAT_802_11_EXT, +}; + +enum tx_pkt_type { + MT_TX_TYPE_CT, + MT_TX_TYPE_SF, + MT_TX_TYPE_CMD, + MT_TX_TYPE_FW, +}; + +enum { + MT_CTX0, + MT_HIF0 = 0x0, + + MT_LMAC_AC00 = 0x0, + MT_LMAC_AC01, + MT_LMAC_AC02, + MT_LMAC_AC03, + MT_LMAC_ALTX0 = 0x10, + MT_LMAC_BMC0, + MT_LMAC_BCN0, + MT_LMAC_PSMP0, +}; + +#define MT_TXD0_Q_IDX GENMASK(31, 25) +#define MT_TXD0_PKT_FMT GENMASK(24, 23) +#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16) +#define MT_TXD0_TX_BYTES GENMASK(15, 0) + +#define MT_TXD1_LONG_FORMAT BIT(31) +#define MT_TXD1_TGID BIT(30) +#define MT_TXD1_OWN_MAC GENMASK(29, 24) +#define MT_TXD1_AMSDU BIT(23) +#define MT_TXD1_TID GENMASK(22, 20) +#define MT_TXD1_HDR_PAD GENMASK(19, 18) +#define MT_TXD1_HDR_FORMAT GENMASK(17, 16) +#define MT_TXD1_HDR_INFO GENMASK(15, 11) +#define MT_TXD1_ETH_802_3 BIT(15) +#define MT_TXD1_VTA BIT(10) +#define MT_TXD1_WLAN_IDX GENMASK(9, 0) + +#define MT_TXD2_FIX_RATE BIT(31) +#define MT_TXD2_FIXED_RATE BIT(30) +#define MT_TXD2_POWER_OFFSET GENMASK(29, 24) +#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16) +#define MT_TXD2_FRAG GENMASK(15, 14) +#define MT_TXD2_HTC_VLD BIT(13) +#define MT_TXD2_DURATION BIT(12) +#define MT_TXD2_BIP BIT(11) +#define MT_TXD2_MULTICAST BIT(10) +#define MT_TXD2_RTS BIT(9) +#define MT_TXD2_SOUNDING BIT(8) +#define MT_TXD2_NDPA BIT(7) +#define MT_TXD2_NDP BIT(6) +#define MT_TXD2_FRAME_TYPE GENMASK(5, 4) +#define MT_TXD2_SUB_TYPE GENMASK(3, 0) + +#define MT_TXD3_SN_VALID BIT(31) +#define MT_TXD3_PN_VALID BIT(30) +#define MT_TXD3_SW_POWER_MGMT BIT(29) +#define MT_TXD3_BA_DISABLE BIT(28) +#define MT_TXD3_SEQ GENMASK(27, 16) +#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11) +#define MT_TXD3_TX_COUNT GENMASK(10, 6) +#define MT_TXD3_TIMING_MEASURE BIT(5) +#define MT_TXD3_DAS BIT(4) +#define MT_TXD3_EEOSP BIT(3) +#define MT_TXD3_EMRD BIT(2) +#define MT_TXD3_PROTECT_FRAME BIT(1) +#define MT_TXD3_NO_ACK BIT(0) + +#define MT_TXD4_PN_LOW GENMASK(31, 0) + +#define MT_TXD5_PN_HIGH GENMASK(31, 16) +#define MT_TXD5_MD BIT(15) +#define MT_TXD5_ADD_BA BIT(14) +#define MT_TXD5_TX_STATUS_HOST BIT(10) +#define MT_TXD5_TX_STATUS_MCU BIT(9) +#define MT_TXD5_TX_STATUS_FMT BIT(8) +#define MT_TXD5_PID GENMASK(7, 0) + +#define MT_TXD6_TX_IBF BIT(31) +#define MT_TXD6_TX_EBF BIT(30) +#define MT_TXD6_TX_RATE GENMASK(29, 16) +#define MT_TXD6_SGI GENMASK(15, 14) +#define MT_TXD6_HELTF GENMASK(13, 12) +#define MT_TXD6_LDPC BIT(11) +#define MT_TXD6_SPE_ID_IDX BIT(10) +#define MT_TXD6_ANT_ID GENMASK(7, 4) +#define MT_TXD6_DYN_BW BIT(3) +#define MT_TXD6_FIXED_BW BIT(2) +#define MT_TXD6_BW GENMASK(1, 0) + +#define MT_TXD7_TXD_LEN GENMASK(31, 30) +#define MT_TXD7_UDP_TCP_SUM BIT(29) +#define MT_TXD7_IP_SUM BIT(28) +#define MT_TXD7_TYPE GENMASK(21, 20) +#define MT_TXD7_SUB_TYPE GENMASK(19, 16) + +#define MT_TXD7_PSE_FID GENMASK(27, 16) +#define MT_TXD7_SPE_IDX GENMASK(15, 11) +#define MT_TXD7_HW_AMSDU BIT(10) +#define MT_TXD7_TX_TIME GENMASK(9, 0) + +#define MT_TXD8_L_TYPE GENMASK(5, 4) +#define MT_TXD8_L_SUB_TYPE GENMASK(3, 0) + +#define MT_TX_RATE_STBC BIT(13) +#define MT_TX_RATE_NSS GENMASK(12, 10) +#define MT_TX_RATE_MODE GENMASK(9, 6) +#define MT_TX_RATE_SU_EXT_TONE BIT(5) +#define MT_TX_RATE_DCM BIT(4) +/* VHT/HE only use bits 0-3 */ +#define MT_TX_RATE_IDX GENMASK(5, 0) + +#define MT_TXS0_FIXED_RATE BIT(31) +#define MT_TXS0_BW GENMASK(30, 29) +#define MT_TXS0_TID GENMASK(28, 26) +#define MT_TXS0_AMPDU BIT(25) +#define MT_TXS0_TXS_FORMAT GENMASK(24, 23) +#define MT_TXS0_BA_ERROR BIT(22) +#define MT_TXS0_PS_FLAG BIT(21) +#define MT_TXS0_TXOP_TIMEOUT BIT(20) +#define MT_TXS0_BIP_ERROR BIT(19) + +#define MT_TXS0_QUEUE_TIMEOUT BIT(18) +#define MT_TXS0_RTS_TIMEOUT BIT(17) +#define MT_TXS0_ACK_TIMEOUT BIT(16) +#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16) + +#define MT_TXS0_TX_STATUS_HOST BIT(15) +#define MT_TXS0_TX_STATUS_MCU BIT(14) +#define MT_TXS0_TX_RATE GENMASK(13, 0) + +#define MT_TXS1_SEQNO GENMASK(31, 20) +#define MT_TXS1_RESP_RATE GENMASK(19, 16) +#define MT_TXS1_RXV_SEQNO GENMASK(15, 8) +#define MT_TXS1_TX_POWER_DBM GENMASK(7, 0) + +#define MT_TXS2_BF_STATUS GENMASK(31, 30) +#define MT_TXS2_LAST_TX_RATE GENMASK(29, 27) +#define MT_TXS2_SHARED_ANTENNA BIT(26) +#define MT_TXS2_WCID GENMASK(25, 16) +#define MT_TXS2_TX_DELAY GENMASK(15, 0) + +#define MT_TXS3_PID GENMASK(31, 24) +#define MT_TXS3_ANT_ID GENMASK(23, 0) + +#define MT_TXS4_TIMESTAMP GENMASK(31, 0) + +/* RXD DW1 */ +#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(9, 0) +#define MT_RXD1_NORMAL_GROUP_1 BIT(11) +#define MT_RXD1_NORMAL_GROUP_2 BIT(12) +#define MT_RXD1_NORMAL_GROUP_3 BIT(13) +#define MT_RXD1_NORMAL_GROUP_4 BIT(14) +#define MT_RXD1_NORMAL_GROUP_5 BIT(15) +#define MT_RXD1_NORMAL_SEC_MODE GENMASK(20, 16) +#define MT_RXD1_NORMAL_KEY_ID GENMASK(22, 21) +#define MT_RXD1_NORMAL_CM BIT(23) +#define MT_RXD1_NORMAL_CLM BIT(24) +#define MT_RXD1_NORMAL_ICV_ERR BIT(25) +#define MT_RXD1_NORMAL_TKIP_MIC_ERR BIT(26) +#define MT_RXD1_NORMAL_FCS_ERR BIT(27) +#define MT_RXD1_NORMAL_BAND_IDX BIT(28) +#define MT_RXD1_NORMAL_SPP_EN BIT(29) +#define MT_RXD1_NORMAL_ADD_OM BIT(30) +#define MT_RXD1_NORMAL_SEC_DONE BIT(31) + +/* RXD DW2 */ +#define MT_RXD2_NORMAL_BSSID GENMASK(5, 0) +#define MT_RXD2_NORMAL_CO_ANT BIT(6) +#define MT_RXD2_NORMAL_BF_CQI BIT(7) +#define MT_RXD2_NORMAL_MAC_HDR_LEN GENMASK(12, 8) +#define MT_RXD2_NORMAL_HDR_TRANS BIT(13) +#define MT_RXD2_NORMAL_HDR_OFFSET GENMASK(15, 14) +#define MT_RXD2_NORMAL_TID GENMASK(19, 16) +#define MT_RXD2_NORMAL_MU_BAR BIT(21) +#define MT_RXD2_NORMAL_SW_BIT BIT(22) +#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23) +#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24) +#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25) +#define MT_RXD2_NORMAL_INT_FRAME BIT(26) +#define MT_RXD2_NORMAL_FRAG BIT(27) +#define MT_RXD2_NORMAL_NULL_FRAME BIT(28) +#define MT_RXD2_NORMAL_NDATA BIT(29) +#define MT_RXD2_NORMAL_NON_AMPDU BIT(30) +#define MT_RXD2_NORMAL_BF_REPORT BIT(31) + +/* RXD DW4 */ +#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0) +#define MT_RXD4_FIRST_AMSDU_FRAME GENMASK(1, 0) +#define MT_RXD4_MID_AMSDU_FRAME BIT(1) +#define MT_RXD4_LAST_AMSDU_FRAME BIT(0) +#define MT_RXD4_NORMAL_PATTERN_DROP BIT(9) +#define MT_RXD4_NORMAL_CLS BIT(10) +#define MT_RXD4_NORMAL_OFLD GENMASK(12, 11) +#define MT_RXD4_NORMAL_MAGIC_PKT BIT(13) +#define MT_RXD4_NORMAL_WOL GENMASK(18, 14) +#define MT_RXD4_NORMAL_CLS_BITMAP GENMASK(28, 19) +#define MT_RXD3_NORMAL_PF_MODE BIT(29) +#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30) + +#define MT_RXV_HDR_BAND_IDX BIT(24) + +/* RXD DW3 */ +#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0) +#define MT_RXD3_NORMAL_CH_FREQ GENMASK(15, 8) +#define MT_RXD3_NORMAL_ADDR_TYPE GENMASK(17, 16) +#define MT_RXD3_NORMAL_U2M BIT(0) +#define MT_RXD3_NORMAL_HTC_VLD BIT(0) +#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(19) +#define MT_RXD3_NORMAL_BEACON_MC BIT(20) +#define MT_RXD3_NORMAL_BEACON_UC BIT(21) +#define MT_RXD3_NORMAL_AMSDU BIT(22) +#define MT_RXD3_NORMAL_MESH BIT(23) +#define MT_RXD3_NORMAL_MHCP BIT(24) +#define MT_RXD3_NORMAL_NO_INFO_WB BIT(25) +#define MT_RXD3_NORMAL_DISABLE_RX_HDR_TRANS BIT(26) +#define MT_RXD3_NORMAL_POWER_SAVE_STAT BIT(27) +#define MT_RXD3_NORMAL_MORE BIT(28) +#define MT_RXD3_NORMAL_UNWANT BIT(29) +#define MT_RXD3_NORMAL_RX_DROP BIT(30) +#define MT_RXD3_NORMAL_VLAN2ETH BIT(31) + +/* RXD GROUP4 */ +#define MT_RXD6_FRAME_CONTROL GENMASK(15, 0) +#define MT_RXD6_TA_LO GENMASK(31, 16) + +#define MT_RXD7_TA_HI GENMASK(31, 0) + +#define MT_RXD8_SEQ_CTRL GENMASK(15, 0) +#define MT_RXD8_QOS_CTL GENMASK(31, 16) + +#define MT_RXD9_HT_CONTROL GENMASK(31, 0) + +/* P-RXV DW0 */ +#define MT_PRXV_TX_RATE GENMASK(6, 0) +#define MT_PRXV_TX_DCM BIT(4) +#define MT_PRXV_TX_ER_SU_106T BIT(5) +#define MT_PRXV_NSTS GENMASK(9, 7) +#define MT_PRXV_TXBF BIT(10) +#define MT_PRXV_HT_AD_CODE BIT(11) +#define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28) + +#define MT_PRXV_FRAME_MODE GENMASK(14, 12) +#define MT_PRXV_HT_SGI GENMASK(16, 15) +#define MT_PRXV_HT_STBC GENMASK(23, 22) +#define MT_PRXV_TX_MODE GENMASK(27, 24) +#define MT_PRXV_DCM BIT(17) +#define MT_PRXV_NUM_RX BIT(20, 18) + +/* P-RXV DW1 */ +#define MT_PRXV_RCPI3 GENMASK(31, 24) +#define MT_PRXV_RCPI2 GENMASK(23, 16) +#define MT_PRXV_RCPI1 GENMASK(15, 8) +#define MT_PRXV_RCPI0 GENMASK(7, 0) +#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0) + +/* C-RXV */ +#define MT_CRXV_HT_STBC GENMASK(1, 0) +#define MT_CRXV_TX_MODE GENMASK(7, 4) +#define MT_CRXV_FRAME_MODE GENMASK(10, 8) +#define MT_CRXV_HT_SHORT_GI GENMASK(14, 13) +#define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17) +#define MT_CRXV_HE_LDPC_EXT_SYM BIT(20) +#define MT_CRXV_HE_PE_DISAMBIG BIT(23) +#define MT_CRXV_HE_NUM_USER GENMASK(30, 24) +#define MT_CRXV_HE_UPLINK BIT(31) + +#define MT_CRXV_HE_RU0 GENMASK(7, 0) +#define MT_CRXV_HE_RU1 GENMASK(15, 8) +#define MT_CRXV_HE_RU2 GENMASK(23, 16) +#define MT_CRXV_HE_RU3 GENMASK(31, 24) + +#define MT_CRXV_HE_MU_AID GENMASK(30, 20) + +#define MT_CRXV_HE_SR_MASK GENMASK(11, 8) +#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12) +#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17) +#define MT_CRXV_HE_SR3_MASK GENMASK(24, 21) + +#define MT_CRXV_HE_BSS_COLOR GENMASK(5, 0) +#define MT_CRXV_HE_TXOP_DUR GENMASK(12, 6) +#define MT_CRXV_HE_BEAM_CHNG BIT(13) +#define MT_CRXV_HE_DOPPLER BIT(16) + +#define MT_CRXV_SNR GENMASK(18, 13) +#define MT_CRXV_FOE_LO GENMASK(31, 19) +#define MT_CRXV_FOE_HI GENMASK(6, 0) +#define MT_CRXV_FOE_SHIFT 13 + +#define MT_CT_INFO_APPLY_TXD BIT(0) +#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1) +#define MT_CT_INFO_MGMT_FRAME BIT(2) +#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3) +#define MT_CT_INFO_HSR2_TX BIT(4) +#define MT_CT_INFO_FROM_HOST BIT(7) + +enum tx_mcu_port_q_idx { + MT_TX_MCU_PORT_RX_Q0 = 0x20, + MT_TX_MCU_PORT_RX_Q1, + MT_TX_MCU_PORT_RX_Q2, + MT_TX_MCU_PORT_RX_Q3, + MT_TX_MCU_PORT_RX_FWDL = 0x3e +}; + +enum tx_port_idx { + MT_TX_PORT_IDX_LMAC, + MT_TX_PORT_IDX_MCU +}; + +#endif /* __MT76_CONNAC2_MAC_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c new file mode 100644 index 0000000000..be4f07ad3a --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2022 MediaTek Inc. */ + +#include +#include "mt7921.h" + +static int +mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len) +{ + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *sar_root, *sar_unit; + struct mt76_dev *mdev = &dev->mt76; + acpi_handle root, handle; + acpi_status status; + u32 i = 0; + + root = ACPI_HANDLE(mdev->dev); + if (!root) + return -EOPNOTSUPP; + + status = acpi_get_handle(root, method, &handle); + if (ACPI_FAILURE(status)) + return -EIO; + + status = acpi_evaluate_object(handle, NULL, NULL, &buf); + if (ACPI_FAILURE(status)) + return -EIO; + + sar_root = buf.pointer; + if (sar_root->type != ACPI_TYPE_PACKAGE || + sar_root->package.count < 4 || + sar_root->package.elements[0].type != ACPI_TYPE_INTEGER) { + dev_err(mdev->dev, "sar cnt = %d\n", + sar_root->package.count); + goto free; + } + + if (!*tbl) { + *tbl = devm_kzalloc(mdev->dev, sar_root->package.count, + GFP_KERNEL); + if (!*tbl) + goto free; + } + if (len) + *len = sar_root->package.count; + + for (i = 0; i < sar_root->package.count; i++) { + sar_unit = &sar_root->package.elements[i]; + + if (sar_unit->type != ACPI_TYPE_INTEGER) + break; + *(*tbl + i) = (u8)sar_unit->integer.value; + } +free: + kfree(sar_root); + + return (i == sar_root->package.count) ? 0 : -EINVAL; +} + +/* MTCL : Country List Table for 6G band */ +static int +mt7921_asar_acpi_read_mtcl(struct mt7921_dev *dev, u8 **table, u8 *version) +{ + *version = (mt7921_acpi_read(dev, MT7921_ACPI_MTCL, table, NULL) < 0) + ? 1 : 2; + return 0; +} + +/* MTDS : Dynamic SAR Power Table */ +static int +mt7921_asar_acpi_read_mtds(struct mt7921_dev *dev, u8 **table, u8 version) +{ + int len, ret, sarlen, prelen, tblcnt; + bool enable; + + ret = mt7921_acpi_read(dev, MT7921_ACPI_MTDS, table, &len); + if (ret) + return ret; + + /* Table content validation */ + switch (version) { + case 1: + enable = ((struct mt7921_asar_dyn *)*table)->enable; + sarlen = sizeof(struct mt7921_asar_dyn_limit); + prelen = sizeof(struct mt7921_asar_dyn); + break; + case 2: + enable = ((struct mt7921_asar_dyn_v2 *)*table)->enable; + sarlen = sizeof(struct mt7921_asar_dyn_limit_v2); + prelen = sizeof(struct mt7921_asar_dyn_v2); + break; + default: + return -EINVAL; + } + + tblcnt = (len - prelen) / sarlen; + if (!enable || + tblcnt > MT7921_ASAR_MAX_DYN || tblcnt < MT7921_ASAR_MIN_DYN) + ret = -EINVAL; + + return ret; +} + +/* MTGS : Geo SAR Power Table */ +static int +mt7921_asar_acpi_read_mtgs(struct mt7921_dev *dev, u8 **table, u8 version) +{ + int len, ret = 0, sarlen, prelen, tblcnt; + + ret = mt7921_acpi_read(dev, MT7921_ACPI_MTGS, table, &len); + if (ret) + return ret; + + /* Table content validation */ + switch (version) { + case 1: + sarlen = sizeof(struct mt7921_asar_geo_limit); + prelen = sizeof(struct mt7921_asar_geo); + break; + case 2: + sarlen = sizeof(struct mt7921_asar_geo_limit_v2); + prelen = sizeof(struct mt7921_asar_geo_v2); + break; + default: + return -EINVAL; + } + + tblcnt = (len - prelen) / sarlen; + if (tblcnt > MT7921_ASAR_MAX_GEO || tblcnt < MT7921_ASAR_MIN_GEO) + ret = -EINVAL; + + return ret; +} + +int mt7921_init_acpi_sar(struct mt7921_dev *dev) +{ + struct mt7921_acpi_sar *asar; + int ret; + + asar = devm_kzalloc(dev->mt76.dev, sizeof(*asar), GFP_KERNEL); + if (!asar) + return -ENOMEM; + + mt7921_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver); + + /* MTDS is mandatory. Return error if table is invalid */ + ret = mt7921_asar_acpi_read_mtds(dev, (u8 **)&asar->dyn, asar->ver); + if (ret) { + devm_kfree(dev->mt76.dev, asar->dyn); + devm_kfree(dev->mt76.dev, asar->countrylist); + devm_kfree(dev->mt76.dev, asar); + return ret; + } + + /* MTGS is optional */ + ret = mt7921_asar_acpi_read_mtgs(dev, (u8 **)&asar->geo, asar->ver); + if (ret) { + devm_kfree(dev->mt76.dev, asar->geo); + asar->geo = NULL; + } + + dev->phy.acpisar = asar; + + return 0; +} + +static s8 +mt7921_asar_get_geo_pwr(struct mt7921_phy *phy, + enum nl80211_band band, s8 dyn_power) +{ + struct mt7921_acpi_sar *asar = phy->acpisar; + struct mt7921_asar_geo_band *band_pwr; + s8 geo_power; + u8 idx, max; + + if (!asar->geo) + return dyn_power; + + switch (phy->mt76->dev->region) { + case NL80211_DFS_FCC: + idx = 0; + break; + case NL80211_DFS_ETSI: + idx = 1; + break; + default: /* WW */ + idx = 2; + break; + } + + if (asar->ver == 1) { + band_pwr = &asar->geo->tbl[idx].band[0]; + max = ARRAY_SIZE(asar->geo->tbl[idx].band); + } else { + band_pwr = &asar->geo_v2->tbl[idx].band[0]; + max = ARRAY_SIZE(asar->geo_v2->tbl[idx].band); + } + + switch (band) { + case NL80211_BAND_2GHZ: + idx = 0; + break; + case NL80211_BAND_5GHZ: + idx = 1; + break; + case NL80211_BAND_6GHZ: + idx = 2; + break; + default: + return dyn_power; + } + + if (idx >= max) + return dyn_power; + + geo_power = (band_pwr + idx)->pwr; + dyn_power += (band_pwr + idx)->offset; + + return min(geo_power, dyn_power); +} + +static s8 +mt7921_asar_range_pwr(struct mt7921_phy *phy, + const struct cfg80211_sar_freq_ranges *range, + u8 idx) +{ + const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa; + struct mt7921_acpi_sar *asar = phy->acpisar; + u8 *limit, band, max; + + if (!capa) + return 127; + + if (asar->ver == 1) { + limit = &asar->dyn->tbl[0].frp[0]; + max = ARRAY_SIZE(asar->dyn->tbl[0].frp); + } else { + limit = &asar->dyn_v2->tbl[0].frp[0]; + max = ARRAY_SIZE(asar->dyn_v2->tbl[0].frp); + } + + if (idx >= max) + return 127; + + if (range->start_freq >= 5945) + band = NL80211_BAND_6GHZ; + else if (range->start_freq >= 5150) + band = NL80211_BAND_5GHZ; + else + band = NL80211_BAND_2GHZ; + + return mt7921_asar_get_geo_pwr(phy, band, limit[idx]); +} + +int mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default) +{ + const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa; + int i; + + if (!phy->acpisar) + return 0; + + /* When ACPI SAR enabled in HW, we should apply rules for .frp + * 1. w/o .sar_specs : set ACPI SAR power as the defatul value + * 2. w/ .sar_specs : set power with min(.sar_specs, ACPI_SAR) + */ + for (i = 0; i < capa->num_freq_ranges; i++) { + struct mt76_freq_range_power *frp = &phy->mt76->frp[i]; + + frp->range = set_default ? &capa->freq_ranges[i] : frp->range; + if (!frp->range) + continue; + + frp->power = min_t(s8, set_default ? 127 : frp->power, + mt7921_asar_range_pwr(phy, frp->range, i)); + } + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h new file mode 100644 index 0000000000..23f86bfae0 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2022 MediaTek Inc. */ + +#ifndef __MT7921_ACPI_SAR_H +#define __MT7921_ACPI_SAR_H + +#define MT7921_ASAR_MIN_DYN 1 +#define MT7921_ASAR_MAX_DYN 8 +#define MT7921_ASAR_MIN_GEO 3 +#define MT7921_ASAR_MAX_GEO 8 + +#define MT7921_ACPI_MTCL "MTCL" +#define MT7921_ACPI_MTDS "MTDS" +#define MT7921_ACPI_MTGS "MTGS" + +struct mt7921_asar_dyn_limit { + u8 idx; + u8 frp[5]; +} __packed; + +struct mt7921_asar_dyn { + u8 names[4]; + u8 enable; + u8 nr_tbl; + struct mt7921_asar_dyn_limit tbl[0]; +} __packed; + +struct mt7921_asar_dyn_limit_v2 { + u8 idx; + u8 frp[11]; +} __packed; + +struct mt7921_asar_dyn_v2 { + u8 names[4]; + u8 enable; + u8 rsvd; + u8 nr_tbl; + struct mt7921_asar_dyn_limit_v2 tbl[0]; +} __packed; + +struct mt7921_asar_geo_band { + u8 pwr; + u8 offset; +} __packed; + +struct mt7921_asar_geo_limit { + u8 idx; + /* 0:2G, 1:5G */ + struct mt7921_asar_geo_band band[2]; +} __packed; + +struct mt7921_asar_geo { + u8 names[4]; + u8 version; + u8 nr_tbl; + struct mt7921_asar_geo_limit tbl[0]; +} __packed; + +struct mt7921_asar_geo_limit_v2 { + u8 idx; + /* 0:2G, 1:5G, 2:6G */ + struct mt7921_asar_geo_band band[3]; +} __packed; + +struct mt7921_asar_geo_v2 { + u8 names[4]; + u8 version; + u8 rsvd; + u8 nr_tbl; + struct mt7921_asar_geo_limit_v2 tbl[0]; +} __packed; + +struct mt7921_asar_cl { + u8 names[4]; + u8 version; + u8 mode_6g; + u8 cl6g[6]; +} __packed; + +struct mt7921_acpi_sar { + u8 ver; + union { + struct mt7921_asar_dyn *dyn; + struct mt7921_asar_dyn_v2 *dyn_v2; + }; + union { + struct mt7921_asar_geo *geo; + struct mt7921_asar_geo_v2 *geo_v2; + }; + struct mt7921_asar_cl *countrylist; +}; + +#endif diff --git a/drivers/net/wireless/purelifi/Kconfig b/drivers/net/wireless/purelifi/Kconfig new file mode 100644 index 0000000000..e39afec3dc --- /dev/null +++ b/drivers/net/wireless/purelifi/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only +config WLAN_VENDOR_PURELIFI + bool "pureLiFi devices" + default y + help + If you have a pureLiFi device, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_PURELIFI + +source "drivers/net/wireless/purelifi/plfxlc/Kconfig" + +endif # WLAN_VENDOR_PURELIFI diff --git a/drivers/net/wireless/purelifi/Makefile b/drivers/net/wireless/purelifi/Makefile new file mode 100644 index 0000000000..0bd6880b02 --- /dev/null +++ b/drivers/net/wireless/purelifi/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_PLFXLC) := plfxlc/ diff --git a/drivers/net/wireless/purelifi/plfxlc/Kconfig b/drivers/net/wireless/purelifi/plfxlc/Kconfig new file mode 100644 index 0000000000..4e0be27a5e --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +config PLFXLC + tristate "pureLiFi X, XL, XC device support" + depends on CFG80211 && MAC80211 && USB + help + This option adds support for pureLiFi LiFi wireless USB + adapters. The pureLiFi X, XL, XC USB devices are based on + 802.11 OFDM PHY but uses light as the transmission medium. + The driver supports common 802.11 encryption/authentication + methods including Open, WPA, WPA2-Personal and + WPA2-Enterprise (802.1X). + + To compile this driver as a module, choose m here. The module will + be called plfxlc. diff --git a/drivers/net/wireless/purelifi/plfxlc/Makefile b/drivers/net/wireless/purelifi/plfxlc/Makefile new file mode 100644 index 0000000000..7ed954e871 --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_PLFXLC) := plfxlc.o +plfxlc-objs += chip.o firmware.o usb.o mac.o diff --git a/drivers/net/wireless/purelifi/plfxlc/chip.c b/drivers/net/wireless/purelifi/plfxlc/chip.c new file mode 100644 index 0000000000..f4ef9ff971 --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/chip.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 pureLiFi + */ + +#include +#include + +#include "chip.h" +#include "mac.h" +#include "usb.h" + +void plfxlc_chip_init(struct plfxlc_chip *chip, + struct ieee80211_hw *hw, + struct usb_interface *intf) +{ + memset(chip, 0, sizeof(*chip)); + mutex_init(&chip->mutex); + plfxlc_usb_init(&chip->usb, hw, intf); +} + +void plfxlc_chip_release(struct plfxlc_chip *chip) +{ + plfxlc_usb_release(&chip->usb); + mutex_destroy(&chip->mutex); +} + +int plfxlc_set_beacon_interval(struct plfxlc_chip *chip, u16 interval, + u8 dtim_period, int type) +{ + if (!interval || + (chip->beacon_set && chip->beacon_interval == interval)) + return 0; + + chip->beacon_interval = interval; + chip->beacon_set = true; + return plfxlc_usb_wreq(chip->usb.ez_usb, + &chip->beacon_interval, + sizeof(chip->beacon_interval), + USB_REQ_BEACON_INTERVAL_WR); +} + +int plfxlc_chip_init_hw(struct plfxlc_chip *chip) +{ + unsigned char *addr = plfxlc_mac_get_perm_addr(plfxlc_chip_to_mac(chip)); + struct usb_device *udev = interface_to_usbdev(chip->usb.intf); + + pr_info("plfxlc chip %04x:%04x v%02x %pM %s\n", + le16_to_cpu(udev->descriptor.idVendor), + le16_to_cpu(udev->descriptor.idProduct), + le16_to_cpu(udev->descriptor.bcdDevice), + addr, + plfxlc_speed(udev->speed)); + + return plfxlc_set_beacon_interval(chip, 100, 0, 0); +} + +int plfxlc_chip_switch_radio(struct plfxlc_chip *chip, u16 value) +{ + int r; + __le16 radio_on = cpu_to_le16(value); + + r = plfxlc_usb_wreq(chip->usb.ez_usb, &radio_on, + sizeof(value), USB_REQ_POWER_WR); + if (r) + dev_err(plfxlc_chip_dev(chip), "POWER_WR failed (%d)\n", r); + return r; +} + +int plfxlc_chip_enable_rxtx(struct plfxlc_chip *chip) +{ + plfxlc_usb_enable_tx(&chip->usb); + return plfxlc_usb_enable_rx(&chip->usb); +} + +void plfxlc_chip_disable_rxtx(struct plfxlc_chip *chip) +{ + u8 value = 0; + + plfxlc_usb_wreq(chip->usb.ez_usb, + &value, sizeof(value), USB_REQ_RXTX_WR); + plfxlc_usb_disable_rx(&chip->usb); + plfxlc_usb_disable_tx(&chip->usb); +} + +int plfxlc_chip_set_rate(struct plfxlc_chip *chip, u8 rate) +{ + int r; + + if (!chip) + return -EINVAL; + + r = plfxlc_usb_wreq(chip->usb.ez_usb, + &rate, sizeof(rate), USB_REQ_RATE_WR); + if (r) + dev_err(plfxlc_chip_dev(chip), "RATE_WR failed (%d)\n", r); + return r; +} diff --git a/drivers/net/wireless/purelifi/plfxlc/chip.h b/drivers/net/wireless/purelifi/plfxlc/chip.h new file mode 100644 index 0000000000..dc04bbed07 --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/chip.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 pureLiFi + */ + +#ifndef PLFXLC_CHIP_H +#define PLFXLC_CHIP_H + +#include + +#include "usb.h" + +enum unit_type { + STA = 0, + AP = 1, +}; + +enum { + PLFXLC_RADIO_OFF = 0, + PLFXLC_RADIO_ON = 1, +}; + +struct plfxlc_chip { + struct plfxlc_usb usb; + struct mutex mutex; /* lock to protect chip data */ + enum unit_type unit_type; + u16 link_led; + u8 beacon_set; + u16 beacon_interval; +}; + +struct plfxlc_mc_hash { + u32 low; + u32 high; +}; + +#define plfxlc_chip_dev(chip) (&(chip)->usb.intf->dev) + +void plfxlc_chip_init(struct plfxlc_chip *chip, + struct ieee80211_hw *hw, + struct usb_interface *intf); + +void plfxlc_chip_release(struct plfxlc_chip *chip); + +void plfxlc_chip_disable_rxtx(struct plfxlc_chip *chip); + +int plfxlc_chip_init_hw(struct plfxlc_chip *chip); + +int plfxlc_chip_enable_rxtx(struct plfxlc_chip *chip); + +int plfxlc_chip_set_rate(struct plfxlc_chip *chip, u8 rate); + +int plfxlc_set_beacon_interval(struct plfxlc_chip *chip, u16 interval, + u8 dtim_period, int type); + +int plfxlc_chip_switch_radio(struct plfxlc_chip *chip, u16 value); + +static inline struct plfxlc_chip *plfxlc_usb_to_chip(struct plfxlc_usb + *usb) +{ + return container_of(usb, struct plfxlc_chip, usb); +} + +static inline void plfxlc_mc_add_all(struct plfxlc_mc_hash *hash) +{ + hash->low = 0xffffffff; + hash->high = 0xffffffff; +} + +#endif /* PLFXLC_CHIP_H */ diff --git a/drivers/net/wireless/purelifi/plfxlc/firmware.c b/drivers/net/wireless/purelifi/plfxlc/firmware.c new file mode 100644 index 0000000000..8a3529d079 --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/firmware.c @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 pureLiFi + */ + +#include +#include + +#include "mac.h" +#include "usb.h" + +static int send_vendor_request(struct usb_device *udev, int request, + unsigned char *buffer, int buffer_size) +{ + return usb_control_msg(udev, + usb_rcvctrlpipe(udev, 0), + request, 0xC0, 0, 0, + buffer, buffer_size, PLF_USB_TIMEOUT); +} + +static int send_vendor_command(struct usb_device *udev, int request, + unsigned char *buffer, int buffer_size) +{ + return usb_control_msg(udev, + usb_sndctrlpipe(udev, 0), + request, USB_TYPE_VENDOR /*0x40*/, 0, 0, + buffer, buffer_size, PLF_USB_TIMEOUT); +} + +int plfxlc_download_fpga(struct usb_interface *intf) +{ + struct usb_device *udev = interface_to_usbdev(intf); + unsigned char *fpga_dmabuff = NULL; + const struct firmware *fw = NULL; + int blk_tran_len = PLF_BULK_TLEN; + unsigned char *fw_data; + const char *fw_name; + int r, actual_length; + int fw_data_i = 0; + + if ((le16_to_cpu(udev->descriptor.idVendor) == + PURELIFI_X_VENDOR_ID_0) && + (le16_to_cpu(udev->descriptor.idProduct) == + PURELIFI_X_PRODUCT_ID_0)) { + fw_name = "plfxlc/lifi-x.bin"; + dev_dbg(&intf->dev, "bin file for X selected\n"); + + } else if ((le16_to_cpu(udev->descriptor.idVendor)) == + PURELIFI_XC_VENDOR_ID_0 && + (le16_to_cpu(udev->descriptor.idProduct) == + PURELIFI_XC_PRODUCT_ID_0)) { + fw_name = "plfxlc/lifi-xc.bin"; + dev_dbg(&intf->dev, "bin file for XC selected\n"); + + } else { + r = -EINVAL; + goto error; + } + + r = request_firmware(&fw, fw_name, &intf->dev); + if (r) { + dev_err(&intf->dev, "request_firmware failed (%d)\n", r); + goto error; + } + fpga_dmabuff = kmalloc(PLF_FPGA_STATUS_LEN, GFP_KERNEL); + + if (!fpga_dmabuff) { + r = -ENOMEM; + goto error_free_fw; + } + send_vendor_request(udev, PLF_VNDR_FPGA_SET_REQ, + fpga_dmabuff, PLF_FPGA_STATUS_LEN); + + send_vendor_command(udev, PLF_VNDR_FPGA_SET_CMD, NULL, 0); + + if (fpga_dmabuff[0] != PLF_FPGA_MG) { + dev_err(&intf->dev, "fpga_dmabuff[0] is wrong\n"); + r = -EINVAL; + goto error_free_fw; + } + + for (fw_data_i = 0; fw_data_i < fw->size;) { + int tbuf_idx; + + if ((fw->size - fw_data_i) < blk_tran_len) + blk_tran_len = fw->size - fw_data_i; + + fw_data = kmemdup(&fw->data[fw_data_i], blk_tran_len, + GFP_KERNEL); + if (!fw_data) { + r = -ENOMEM; + goto error_free_fw; + } + + for (tbuf_idx = 0; tbuf_idx < blk_tran_len; tbuf_idx++) { + /* u8 bit reverse */ + fw_data[tbuf_idx] = bitrev8(fw_data[tbuf_idx]); + } + r = usb_bulk_msg(udev, + usb_sndbulkpipe(interface_to_usbdev(intf), + fpga_dmabuff[0] & 0xff), + fw_data, + blk_tran_len, + &actual_length, + 2 * PLF_USB_TIMEOUT); + + if (r) + dev_err(&intf->dev, "Bulk msg failed (%d)\n", r); + + kfree(fw_data); + fw_data_i += blk_tran_len; + } + + kfree(fpga_dmabuff); + fpga_dmabuff = kmalloc(PLF_FPGA_STATE_LEN, GFP_KERNEL); + if (!fpga_dmabuff) { + r = -ENOMEM; + goto error_free_fw; + } + memset(fpga_dmabuff, 0xff, PLF_FPGA_STATE_LEN); + + send_vendor_request(udev, PLF_VNDR_FPGA_STATE_REQ, fpga_dmabuff, + PLF_FPGA_STATE_LEN); + + dev_dbg(&intf->dev, "%*ph\n", 8, fpga_dmabuff); + + if (fpga_dmabuff[0] != 0) { + r = -EINVAL; + goto error_free_fw; + } + + send_vendor_command(udev, PLF_VNDR_FPGA_STATE_CMD, NULL, 0); + + msleep(PLF_MSLEEP_TIME); + +error_free_fw: + kfree(fpga_dmabuff); + release_firmware(fw); +error: + return r; +} + +int plfxlc_download_xl_firmware(struct usb_interface *intf) +{ + struct usb_device *udev = interface_to_usbdev(intf); + const struct firmware *fwp = NULL; + struct plfxlc_firmware_file file = {0}; + const char *fw_pack; + int s, r; + u8 *buf; + u32 i; + + r = send_vendor_command(udev, PLF_VNDR_XL_FW_CMD, NULL, 0); + msleep(PLF_MSLEEP_TIME); + + if (r) { + dev_err(&intf->dev, "vendor command failed (%d)\n", r); + return -EINVAL; + } + /* Code for single pack file download */ + + fw_pack = "plfxlc/lifi-xl.bin"; + + r = request_firmware(&fwp, fw_pack, &intf->dev); + if (r) { + dev_err(&intf->dev, "Request_firmware failed (%d)\n", r); + return -EINVAL; + } + file.total_files = get_unaligned_le32(&fwp->data[0]); + file.total_size = get_unaligned_le32(&fwp->size); + + dev_dbg(&intf->dev, "XL Firmware (%d, %d)\n", + file.total_files, file.total_size); + + buf = kzalloc(PLF_XL_BUF_LEN, GFP_KERNEL); + if (!buf) { + release_firmware(fwp); + return -ENOMEM; + } + + if (file.total_files > 10) { + dev_err(&intf->dev, "Too many files (%d)\n", file.total_files); + release_firmware(fwp); + kfree(buf); + return -EINVAL; + } + + /* Download firmware files in multiple steps */ + for (s = 0; s < file.total_files; s++) { + buf[0] = s; + r = send_vendor_command(udev, PLF_VNDR_XL_FILE_CMD, buf, + PLF_XL_BUF_LEN); + + if (s < file.total_files - 1) + file.size = get_unaligned_le32(&fwp->data[4 + ((s + 1) * 4)]) + - get_unaligned_le32(&fwp->data[4 + (s) * 4]); + else + file.size = file.total_size - + get_unaligned_le32(&fwp->data[4 + (s) * 4]); + + if (file.size > file.total_size || file.size > 60000) { + dev_err(&intf->dev, "File size is too large (%d)\n", file.size); + break; + } + + file.start_addr = get_unaligned_le32(&fwp->data[4 + (s * 4)]); + + if (file.size % PLF_XL_BUF_LEN && s < 2) + file.size += PLF_XL_BUF_LEN - file.size % PLF_XL_BUF_LEN; + + file.control_packets = file.size / PLF_XL_BUF_LEN; + + for (i = 0; i < file.control_packets; i++) { + memcpy(buf, + &fwp->data[file.start_addr + (i * PLF_XL_BUF_LEN)], + PLF_XL_BUF_LEN); + r = send_vendor_command(udev, PLF_VNDR_XL_DATA_CMD, buf, + PLF_XL_BUF_LEN); + } + dev_dbg(&intf->dev, "fw-dw step=%d,r=%d size=%d\n", s, r, + file.size); + } + release_firmware(fwp); + kfree(buf); + + /* Code for single pack file download ends fw download finish */ + + r = send_vendor_command(udev, PLF_VNDR_XL_EX_CMD, NULL, 0); + dev_dbg(&intf->dev, "Download fpga (4) (%d)\n", r); + + return 0; +} + +int plfxlc_upload_mac_and_serial(struct usb_interface *intf, + unsigned char *hw_address, + unsigned char *serial_number) +{ + struct usb_device *udev = interface_to_usbdev(intf); + unsigned long long firmware_version; + unsigned char *dma_buffer = NULL; + + dma_buffer = kmalloc(PLF_SERIAL_LEN, GFP_KERNEL); + if (!dma_buffer) + return -ENOMEM; + + BUILD_BUG_ON(ETH_ALEN > PLF_SERIAL_LEN); + BUILD_BUG_ON(PLF_FW_VER_LEN > PLF_SERIAL_LEN); + + send_vendor_request(udev, PLF_MAC_VENDOR_REQUEST, dma_buffer, + ETH_ALEN); + + memcpy(hw_address, dma_buffer, ETH_ALEN); + + send_vendor_request(udev, PLF_SERIAL_NUMBER_VENDOR_REQUEST, + dma_buffer, PLF_SERIAL_LEN); + + send_vendor_request(udev, PLF_SERIAL_NUMBER_VENDOR_REQUEST, + dma_buffer, PLF_SERIAL_LEN); + + memcpy(serial_number, dma_buffer, PLF_SERIAL_LEN); + + memset(dma_buffer, 0x00, PLF_SERIAL_LEN); + + send_vendor_request(udev, PLF_FIRMWARE_VERSION_VENDOR_REQUEST, + (unsigned char *)dma_buffer, PLF_FW_VER_LEN); + + memcpy(&firmware_version, dma_buffer, PLF_FW_VER_LEN); + + dev_info(&intf->dev, "Firmware Version: %llu\n", firmware_version); + kfree(dma_buffer); + + dev_dbg(&intf->dev, "Mac: %pM\n", hw_address); + + return 0; +} + diff --git a/drivers/net/wireless/purelifi/plfxlc/intf.h b/drivers/net/wireless/purelifi/plfxlc/intf.h new file mode 100644 index 0000000000..5ae89343b5 --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/intf.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 pureLiFi + */ + +#define PURELIFI_BYTE_NUM_ALIGNMENT 4 +#define ETH_ALEN 6 +#define AP_USER_LIMIT 8 + +#define PLF_VNDR_FPGA_STATE_REQ 0x30 +#define PLF_VNDR_FPGA_SET_REQ 0x33 +#define PLF_VNDR_FPGA_SET_CMD 0x34 +#define PLF_VNDR_FPGA_STATE_CMD 0x35 + +#define PLF_VNDR_XL_FW_CMD 0x80 +#define PLF_VNDR_XL_DATA_CMD 0x81 +#define PLF_VNDR_XL_FILE_CMD 0x82 +#define PLF_VNDR_XL_EX_CMD 0x83 + +#define PLF_MAC_VENDOR_REQUEST 0x36 +#define PLF_SERIAL_NUMBER_VENDOR_REQUEST 0x37 +#define PLF_FIRMWARE_VERSION_VENDOR_REQUEST 0x39 +#define PLF_SERIAL_LEN 14 +#define PLF_FW_VER_LEN 8 + +struct rx_status { + __be16 rssi; + u8 rate_idx; + u8 pad; + __be64 crc_error_count; +} __packed; + +enum plf_usb_req_enum { + USB_REQ_TEST_WR = 0, + USB_REQ_MAC_WR = 1, + USB_REQ_POWER_WR = 2, + USB_REQ_RXTX_WR = 3, + USB_REQ_BEACON_WR = 4, + USB_REQ_BEACON_INTERVAL_WR = 5, + USB_REQ_RTS_CTS_RATE_WR = 6, + USB_REQ_HASH_WR = 7, + USB_REQ_DATA_TX = 8, + USB_REQ_RATE_WR = 9, + USB_REQ_SET_FREQ = 15 +}; + +struct plf_usb_req { + __be32 id; /* should be plf_usb_req_enum */ + __be32 len; + u8 buf[512]; +}; + diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c new file mode 100644 index 0000000000..d3cdffbded --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/mac.c @@ -0,0 +1,754 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 pureLiFi + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "chip.h" +#include "mac.h" +#include "usb.h" + +static const struct ieee80211_rate plfxlc_rates[] = { + { .bitrate = 10, + .hw_value = PURELIFI_CCK_RATE_1M, + .flags = 0 }, + { .bitrate = 20, + .hw_value = PURELIFI_CCK_RATE_2M, + .hw_value_short = PURELIFI_CCK_RATE_2M + | PURELIFI_CCK_PREA_SHORT, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = PURELIFI_CCK_RATE_5_5M, + .hw_value_short = PURELIFI_CCK_RATE_5_5M + | PURELIFI_CCK_PREA_SHORT, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = PURELIFI_CCK_RATE_11M, + .hw_value_short = PURELIFI_CCK_RATE_11M + | PURELIFI_CCK_PREA_SHORT, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 60, + .hw_value = PURELIFI_OFDM_RATE_6M, + .flags = 0 }, + { .bitrate = 90, + .hw_value = PURELIFI_OFDM_RATE_9M, + .flags = 0 }, + { .bitrate = 120, + .hw_value = PURELIFI_OFDM_RATE_12M, + .flags = 0 }, + { .bitrate = 180, + .hw_value = PURELIFI_OFDM_RATE_18M, + .flags = 0 }, + { .bitrate = 240, + .hw_value = PURELIFI_OFDM_RATE_24M, + .flags = 0 }, + { .bitrate = 360, + .hw_value = PURELIFI_OFDM_RATE_36M, + .flags = 0 }, + { .bitrate = 480, + .hw_value = PURELIFI_OFDM_RATE_48M, + .flags = 0 }, + { .bitrate = 540, + .hw_value = PURELIFI_OFDM_RATE_54M, + .flags = 0 } +}; + +static const struct ieee80211_channel plfxlc_channels[] = { + { .center_freq = 2412, .hw_value = 1 }, + { .center_freq = 2417, .hw_value = 2 }, + { .center_freq = 2422, .hw_value = 3 }, + { .center_freq = 2427, .hw_value = 4 }, + { .center_freq = 2432, .hw_value = 5 }, + { .center_freq = 2437, .hw_value = 6 }, + { .center_freq = 2442, .hw_value = 7 }, + { .center_freq = 2447, .hw_value = 8 }, + { .center_freq = 2452, .hw_value = 9 }, + { .center_freq = 2457, .hw_value = 10 }, + { .center_freq = 2462, .hw_value = 11 }, + { .center_freq = 2467, .hw_value = 12 }, + { .center_freq = 2472, .hw_value = 13 }, + { .center_freq = 2484, .hw_value = 14 }, +}; + +int plfxlc_mac_preinit_hw(struct ieee80211_hw *hw, const u8 *hw_address) +{ + SET_IEEE80211_PERM_ADDR(hw, hw_address); + return 0; +} + +int plfxlc_mac_init_hw(struct ieee80211_hw *hw) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + struct plfxlc_chip *chip = &mac->chip; + int r; + + r = plfxlc_chip_init_hw(chip); + if (r) { + dev_warn(plfxlc_mac_dev(mac), "init hw failed (%d)\n", r); + return r; + } + + dev_dbg(plfxlc_mac_dev(mac), "irq_disabled (%d)\n", irqs_disabled()); + regulatory_hint(hw->wiphy, "00"); + return r; +} + +void plfxlc_mac_release(struct plfxlc_mac *mac) +{ + plfxlc_chip_release(&mac->chip); + lockdep_assert_held(&mac->lock); +} + +int plfxlc_op_start(struct ieee80211_hw *hw) +{ + plfxlc_hw_mac(hw)->chip.usb.initialized = 1; + return 0; +} + +void plfxlc_op_stop(struct ieee80211_hw *hw) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + + clear_bit(PURELIFI_DEVICE_RUNNING, &mac->flags); +} + +int plfxlc_restore_settings(struct plfxlc_mac *mac) +{ + int beacon_interval, beacon_period; + struct sk_buff *beacon; + + spin_lock_irq(&mac->lock); + beacon_interval = mac->beacon.interval; + beacon_period = mac->beacon.period; + spin_unlock_irq(&mac->lock); + + if (mac->type != NL80211_IFTYPE_ADHOC) + return 0; + + if (mac->vif) { + beacon = ieee80211_beacon_get(mac->hw, mac->vif, 0); + if (beacon) { + /*beacon is hardcoded in firmware */ + kfree_skb(beacon); + /* Returned skb is used only once and lowlevel + * driver is responsible for freeing it. + */ + } + } + + plfxlc_set_beacon_interval(&mac->chip, beacon_interval, + beacon_period, mac->type); + + spin_lock_irq(&mac->lock); + mac->beacon.last_update = jiffies; + spin_unlock_irq(&mac->lock); + + return 0; +} + +static void plfxlc_mac_tx_status(struct ieee80211_hw *hw, + struct sk_buff *skb, + int ackssi, + struct tx_status *tx_status) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int success = 1; + + ieee80211_tx_info_clear_status(info); + if (tx_status) + success = !tx_status->failure; + + if (success) + info->flags |= IEEE80211_TX_STAT_ACK; + else + info->flags &= ~IEEE80211_TX_STAT_ACK; + + info->status.ack_signal = 50; + ieee80211_tx_status_irqsafe(hw, skb); +} + +void plfxlc_mac_tx_to_dev(struct sk_buff *skb, int error) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hw *hw = info->rate_driver_data[0]; + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + struct sk_buff_head *q = NULL; + + ieee80211_tx_info_clear_status(info); + skb_pull(skb, sizeof(struct plfxlc_ctrlset)); + + if (unlikely(error || + (info->flags & IEEE80211_TX_CTL_NO_ACK))) { + ieee80211_tx_status_irqsafe(hw, skb); + return; + } + + q = &mac->ack_wait_queue; + + skb_queue_tail(q, skb); + while (skb_queue_len(q)/* > PURELIFI_MAC_MAX_ACK_WAITERS*/) { + plfxlc_mac_tx_status(hw, skb_dequeue(q), + mac->ack_pending ? + mac->ack_signal : 0, + NULL); + mac->ack_pending = 0; + } +} + +static int plfxlc_fill_ctrlset(struct plfxlc_mac *mac, struct sk_buff *skb) +{ + unsigned int frag_len = skb->len; + struct plfxlc_ctrlset *cs; + u32 temp_payload_len = 0; + unsigned int tmp; + u32 temp_len = 0; + + if (skb_headroom(skb) < sizeof(struct plfxlc_ctrlset)) { + dev_dbg(plfxlc_mac_dev(mac), "Not enough hroom(1)\n"); + return 1; + } + + cs = (void *)skb_push(skb, sizeof(struct plfxlc_ctrlset)); + temp_payload_len = frag_len; + temp_len = temp_payload_len + + sizeof(struct plfxlc_ctrlset) - + sizeof(cs->id) - sizeof(cs->len); + + /* Data packet lengths must be multiple of four bytes and must + * not be a multiple of 512 bytes. First, it is attempted to + * append the data packet in the tailroom of the skb. In rare + * occasions, the tailroom is too small. In this case, the + * content of the packet is shifted into the headroom of the skb + * by memcpy. Headroom is allocated at startup (below in this + * file). Therefore, there will be always enough headroom. The + * call skb_headroom is an additional safety which might be + * dropped. + */ + /* check if 32 bit aligned and align data */ + tmp = skb->len & 3; + if (tmp) { + if (skb_tailroom(skb) < (3 - tmp)) { + if (skb_headroom(skb) >= 4 - tmp) { + u8 len; + u8 *src_pt; + u8 *dest_pt; + + len = skb->len; + src_pt = skb->data; + dest_pt = skb_push(skb, 4 - tmp); + memmove(dest_pt, src_pt, len); + } else { + return -ENOBUFS; + } + } else { + skb_put(skb, 4 - tmp); + } + temp_len += 4 - tmp; + } + + /* check if not multiple of 512 and align data */ + tmp = skb->len & 0x1ff; + if (!tmp) { + if (skb_tailroom(skb) < 4) { + if (skb_headroom(skb) >= 4) { + u8 len = skb->len; + u8 *src_pt = skb->data; + u8 *dest_pt = skb_push(skb, 4); + + memmove(dest_pt, src_pt, len); + } else { + /* should never happen because + * sufficient headroom was reserved + */ + return -ENOBUFS; + } + } else { + skb_put(skb, 4); + } + temp_len += 4; + } + + cs->id = cpu_to_be32(USB_REQ_DATA_TX); + cs->len = cpu_to_be32(temp_len); + cs->payload_len_nw = cpu_to_be32(temp_payload_len); + + return 0; +} + +static void plfxlc_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct plfxlc_header *plhdr = (void *)skb->data; + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + struct plfxlc_usb *usb = &mac->chip.usb; + unsigned long flags; + int r; + + r = plfxlc_fill_ctrlset(mac, skb); + if (r) + goto fail; + + info->rate_driver_data[0] = hw; + + if (plhdr->frametype == IEEE80211_FTYPE_DATA) { + u8 *dst_mac = plhdr->dmac; + u8 sidx; + bool found = false; + struct plfxlc_usb_tx *tx = &usb->tx; + + for (sidx = 0; sidx < MAX_STA_NUM; sidx++) { + if (!(tx->station[sidx].flag & STATION_CONNECTED_FLAG)) + continue; + if (memcmp(tx->station[sidx].mac, dst_mac, ETH_ALEN)) + continue; + found = true; + break; + } + + /* Default to broadcast address for unknown MACs */ + if (!found) + sidx = STA_BROADCAST_INDEX; + + /* Stop OS from sending packets, if the queue is half full */ + if (skb_queue_len(&tx->station[sidx].data_list) > 60) + ieee80211_stop_queues(plfxlc_usb_to_hw(usb)); + + /* Schedule packet for transmission if queue is not full */ + if (skb_queue_len(&tx->station[sidx].data_list) > 256) + goto fail; + skb_queue_tail(&tx->station[sidx].data_list, skb); + plfxlc_send_packet_from_data_queue(usb); + + } else { + spin_lock_irqsave(&usb->tx.lock, flags); + r = plfxlc_usb_wreq_async(&mac->chip.usb, skb->data, skb->len, + USB_REQ_DATA_TX, plfxlc_tx_urb_complete, skb); + spin_unlock_irqrestore(&usb->tx.lock, flags); + if (r) + goto fail; + } + return; + +fail: + dev_kfree_skb(skb); +} + +static int plfxlc_filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr, + struct ieee80211_rx_status *stats) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + struct sk_buff_head *q; + int i, position = 0; + unsigned long flags; + struct sk_buff *skb; + bool found = false; + + if (!ieee80211_is_ack(rx_hdr->frame_control)) + return 0; + + dev_dbg(plfxlc_mac_dev(mac), "ACK Received\n"); + + /* code based on zy driver, this logic may need fix */ + q = &mac->ack_wait_queue; + spin_lock_irqsave(&q->lock, flags); + + skb_queue_walk(q, skb) { + struct ieee80211_hdr *tx_hdr; + + position++; + + if (mac->ack_pending && skb_queue_is_first(q, skb)) + continue; + if (mac->ack_pending == 0) + break; + + tx_hdr = (struct ieee80211_hdr *)skb->data; + if (likely(ether_addr_equal(tx_hdr->addr2, rx_hdr->addr1))) { + found = 1; + break; + } + } + + if (found) { + for (i = 1; i < position; i++) + skb = __skb_dequeue(q); + if (i == position) { + plfxlc_mac_tx_status(hw, skb, + mac->ack_pending ? + mac->ack_signal : 0, + NULL); + mac->ack_pending = 0; + } + + mac->ack_pending = skb_queue_len(q) ? 1 : 0; + mac->ack_signal = stats->signal; + } + + spin_unlock_irqrestore(&q->lock, flags); + return 1; +} + +int plfxlc_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, + unsigned int length) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + struct ieee80211_rx_status stats; + const struct rx_status *status; + unsigned int payload_length; + struct plfxlc_usb_tx *tx; + struct sk_buff *skb; + int need_padding; + __le16 fc; + int sidx; + + /* Packet blockade during disabled interface. */ + if (!mac->vif) + return 0; + + status = (struct rx_status *)buffer; + + memset(&stats, 0, sizeof(stats)); + + stats.flag = 0; + stats.freq = 2412; + stats.band = NL80211_BAND_LC; + mac->rssi = -15 * be16_to_cpu(status->rssi) / 10; + + stats.signal = mac->rssi; + + if (status->rate_idx > 7) + stats.rate_idx = 0; + else + stats.rate_idx = status->rate_idx; + + mac->crc_errors = be64_to_cpu(status->crc_error_count); + + /* TODO bad frame check for CRC error*/ + if (plfxlc_filter_ack(hw, (struct ieee80211_hdr *)buffer, &stats) && + !mac->pass_ctrl) + return 0; + + buffer += sizeof(struct rx_status); + payload_length = get_unaligned_be32(buffer); + + if (payload_length > 1560) { + dev_err(plfxlc_mac_dev(mac), " > MTU %u\n", payload_length); + return 0; + } + buffer += sizeof(u32); + + fc = get_unaligned((__le16 *)buffer); + need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc); + + tx = &mac->chip.usb.tx; + + for (sidx = 0; sidx < MAX_STA_NUM - 1; sidx++) { + if (memcmp(&buffer[10], tx->station[sidx].mac, ETH_ALEN)) + continue; + if (tx->station[sidx].flag & STATION_CONNECTED_FLAG) { + tx->station[sidx].flag |= STATION_HEARTBEAT_FLAG; + break; + } + } + + if (sidx == MAX_STA_NUM - 1) { + for (sidx = 0; sidx < MAX_STA_NUM - 1; sidx++) { + if (tx->station[sidx].flag & STATION_CONNECTED_FLAG) + continue; + memcpy(tx->station[sidx].mac, &buffer[10], ETH_ALEN); + tx->station[sidx].flag |= STATION_CONNECTED_FLAG; + tx->station[sidx].flag |= STATION_HEARTBEAT_FLAG; + break; + } + } + + switch (buffer[0]) { + case IEEE80211_STYPE_PROBE_REQ: + dev_dbg(plfxlc_mac_dev(mac), "Probe request\n"); + break; + case IEEE80211_STYPE_ASSOC_REQ: + dev_dbg(plfxlc_mac_dev(mac), "Association request\n"); + break; + case IEEE80211_STYPE_AUTH: + dev_dbg(plfxlc_mac_dev(mac), "Authentication req\n"); + break; + case IEEE80211_FTYPE_DATA: + dev_dbg(plfxlc_mac_dev(mac), "802.11 data frame\n"); + break; + } + + skb = dev_alloc_skb(payload_length + (need_padding ? 2 : 0)); + if (!skb) + return -ENOMEM; + + if (need_padding) + /* Make sure that the payload data is 4 byte aligned. */ + skb_reserve(skb, 2); + + skb_put_data(skb, buffer, payload_length); + memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats)); + ieee80211_rx_irqsafe(hw, skb); + return 0; +} + +static int plfxlc_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + static const char * const iftype80211[] = { + [NL80211_IFTYPE_STATION] = "Station", + [NL80211_IFTYPE_ADHOC] = "Adhoc" + }; + + if (mac->type != NL80211_IFTYPE_UNSPECIFIED) + return -EOPNOTSUPP; + + if (vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_STATION) { + dev_dbg(plfxlc_mac_dev(mac), "%s %s\n", __func__, + iftype80211[vif->type]); + mac->type = vif->type; + mac->vif = vif; + return 0; + } + dev_dbg(plfxlc_mac_dev(mac), "unsupported iftype\n"); + return -EOPNOTSUPP; +} + +static void plfxlc_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + + mac->type = NL80211_IFTYPE_UNSPECIFIED; + mac->vif = NULL; +} + +static int plfxlc_op_config(struct ieee80211_hw *hw, u32 changed) +{ + return 0; +} + +#define SUPPORTED_FIF_FLAGS \ + (FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \ + FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC) +static void plfxlc_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *new_flags, + u64 multicast) +{ + struct plfxlc_mc_hash hash = { + .low = multicast, + .high = multicast >> 32, + }; + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + unsigned long flags; + + /* Only deal with supported flags */ + *new_flags &= SUPPORTED_FIF_FLAGS; + + /* If multicast parameter + * (as returned by plfxlc_op_prepare_multicast) + * has changed, no bit in changed_flags is set. To handle this + * situation, we do not return if changed_flags is 0. If we do so, + * we will have some issue with IPv6 which uses multicast for link + * layer address resolution. + */ + if (*new_flags & (FIF_ALLMULTI)) + plfxlc_mc_add_all(&hash); + + spin_lock_irqsave(&mac->lock, flags); + mac->pass_failed_fcs = !!(*new_flags & FIF_FCSFAIL); + mac->pass_ctrl = !!(*new_flags & FIF_CONTROL); + mac->multicast_hash = hash; + spin_unlock_irqrestore(&mac->lock, flags); + + /* no handling required for FIF_OTHER_BSS as we don't currently + * do BSSID filtering + */ + /* FIXME: in future it would be nice to enable the probe response + * filter (so that the driver doesn't see them) until + * FIF_BCN_PRBRESP_PROMISC is set. however due to atomicity here, we'd + * have to schedule work to enable prbresp reception, which might + * happen too late. For now we'll just listen and forward them all the + * time. + */ +} + +static void plfxlc_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u64 changes) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + int associated; + + dev_dbg(plfxlc_mac_dev(mac), "changes: %llx\n", changes); + + if (mac->type != NL80211_IFTYPE_ADHOC) { /* for STATION */ + associated = is_valid_ether_addr(bss_conf->bssid); + goto exit_all; + } + /* for ADHOC */ + associated = true; + if (changes & BSS_CHANGED_BEACON) { + struct sk_buff *beacon = ieee80211_beacon_get(hw, vif, 0); + + if (beacon) { + /*beacon is hardcoded in firmware */ + kfree_skb(beacon); + /*Returned skb is used only once and + * low-level driver is + * responsible for freeing it. + */ + } + } + + if (changes & BSS_CHANGED_BEACON_ENABLED) { + u16 interval = 0; + u8 period = 0; + + if (bss_conf->enable_beacon) { + period = bss_conf->dtim_period; + interval = bss_conf->beacon_int; + } + + spin_lock_irq(&mac->lock); + mac->beacon.period = period; + mac->beacon.interval = interval; + mac->beacon.last_update = jiffies; + spin_unlock_irq(&mac->lock); + + plfxlc_set_beacon_interval(&mac->chip, interval, + period, mac->type); + } +exit_all: + spin_lock_irq(&mac->lock); + mac->associated = associated; + spin_unlock_irq(&mac->lock); +} + +static int plfxlc_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + stats->dot11ACKFailureCount = 0; + stats->dot11RTSFailureCount = 0; + stats->dot11FCSErrorCount = 0; + stats->dot11RTSSuccessCount = 0; + return 0; +} + +static const char et_strings[][ETH_GSTRING_LEN] = { + "phy_rssi", + "phy_rx_crc_err" +}; + +static int plfxlc_get_et_sset_count(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(et_strings); + + return 0; +} + +static void plfxlc_get_et_strings(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 sset, u8 *data) +{ + if (sset == ETH_SS_STATS) + memcpy(data, *et_strings, sizeof(et_strings)); +} + +static void plfxlc_get_et_stats(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + + data[0] = mac->rssi; + data[1] = mac->crc_errors; +} + +static int plfxlc_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + return 0; +} + +static const struct ieee80211_ops plfxlc_ops = { + .tx = plfxlc_op_tx, + .start = plfxlc_op_start, + .stop = plfxlc_op_stop, + .add_interface = plfxlc_op_add_interface, + .remove_interface = plfxlc_op_remove_interface, + .set_rts_threshold = plfxlc_set_rts_threshold, + .config = plfxlc_op_config, + .configure_filter = plfxlc_op_configure_filter, + .bss_info_changed = plfxlc_op_bss_info_changed, + .get_stats = plfxlc_get_stats, + .get_et_sset_count = plfxlc_get_et_sset_count, + .get_et_stats = plfxlc_get_et_stats, + .get_et_strings = plfxlc_get_et_strings, +}; + +struct ieee80211_hw *plfxlc_mac_alloc_hw(struct usb_interface *intf) +{ + struct ieee80211_hw *hw; + struct plfxlc_mac *mac; + + hw = ieee80211_alloc_hw(sizeof(struct plfxlc_mac), &plfxlc_ops); + if (!hw) { + dev_dbg(&intf->dev, "out of memory\n"); + return NULL; + } + set_wiphy_dev(hw->wiphy, &intf->dev); + + mac = plfxlc_hw_mac(hw); + memset(mac, 0, sizeof(*mac)); + spin_lock_init(&mac->lock); + mac->hw = hw; + + mac->type = NL80211_IFTYPE_UNSPECIFIED; + + memcpy(mac->channels, plfxlc_channels, sizeof(plfxlc_channels)); + memcpy(mac->rates, plfxlc_rates, sizeof(plfxlc_rates)); + mac->band.n_bitrates = ARRAY_SIZE(plfxlc_rates); + mac->band.bitrates = mac->rates; + mac->band.n_channels = ARRAY_SIZE(plfxlc_channels); + mac->band.channels = mac->channels; + hw->wiphy->bands[NL80211_BAND_LC] = &mac->band; + hw->conf.chandef.width = NL80211_CHAN_WIDTH_20; + + ieee80211_hw_set(hw, RX_INCLUDES_FCS); + ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING); + ieee80211_hw_set(hw, MFP_CAPABLE); + + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC); + hw->max_signal = 100; + hw->queues = 1; + /* 4 for 32 bit alignment if no tailroom */ + hw->extra_tx_headroom = sizeof(struct plfxlc_ctrlset) + 4; + /* Tell mac80211 that we support multi rate retries */ + hw->max_rates = IEEE80211_TX_MAX_RATES; + hw->max_rate_tries = 18; /* 9 rates * 2 retries/rate */ + + skb_queue_head_init(&mac->ack_wait_queue); + mac->ack_pending = 0; + + plfxlc_chip_init(&mac->chip, hw, intf); + + SET_IEEE80211_DEV(hw, &intf->dev); + return hw; +} diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.h b/drivers/net/wireless/purelifi/plfxlc/mac.h new file mode 100644 index 0000000000..49b9241372 --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/mac.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 pureLiFi + */ + +#ifndef PLFXLC_MAC_H +#define PLFXLC_MAC_H + +#include +#include + +#include "chip.h" + +#define PURELIFI_CCK 0x00 +#define PURELIFI_OFDM 0x10 +#define PURELIFI_CCK_PREA_SHORT 0x20 + +#define PURELIFI_OFDM_PLCP_RATE_6M 0xb +#define PURELIFI_OFDM_PLCP_RATE_9M 0xf +#define PURELIFI_OFDM_PLCP_RATE_12M 0xa +#define PURELIFI_OFDM_PLCP_RATE_18M 0xe +#define PURELIFI_OFDM_PLCP_RATE_24M 0x9 +#define PURELIFI_OFDM_PLCP_RATE_36M 0xd +#define PURELIFI_OFDM_PLCP_RATE_48M 0x8 +#define PURELIFI_OFDM_PLCP_RATE_54M 0xc + +#define PURELIFI_CCK_RATE_1M (PURELIFI_CCK | 0x00) +#define PURELIFI_CCK_RATE_2M (PURELIFI_CCK | 0x01) +#define PURELIFI_CCK_RATE_5_5M (PURELIFI_CCK | 0x02) +#define PURELIFI_CCK_RATE_11M (PURELIFI_CCK | 0x03) +#define PURELIFI_OFDM_RATE_6M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_6M) +#define PURELIFI_OFDM_RATE_9M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_9M) +#define PURELIFI_OFDM_RATE_12M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_12M) +#define PURELIFI_OFDM_RATE_18M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_18M) +#define PURELIFI_OFDM_RATE_24M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_24M) +#define PURELIFI_OFDM_RATE_36M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_36M) +#define PURELIFI_OFDM_RATE_48M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_48M) +#define PURELIFI_OFDM_RATE_54M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_54M) + +#define PURELIFI_RX_ERROR 0x80 +#define PURELIFI_RX_CRC32_ERROR 0x10 + +#define PLF_REGDOMAIN_FCC 0x10 +#define PLF_REGDOMAIN_IC 0x20 +#define PLF_REGDOMAIN_ETSI 0x30 +#define PLF_REGDOMAIN_SPAIN 0x31 +#define PLF_REGDOMAIN_FRANCE 0x32 +#define PLF_REGDOMAIN_JAPAN_2 0x40 +#define PLF_REGDOMAIN_JAPAN 0x41 +#define PLF_REGDOMAIN_JAPAN_3 0x49 + +#define PLF_RX_ERROR 0x80 +#define PLF_RX_CRC32_ERROR 0x10 + +enum { + MODULATION_RATE_BPSK_1_2 = 0, + MODULATION_RATE_BPSK_3_4, + MODULATION_RATE_QPSK_1_2, + MODULATION_RATE_QPSK_3_4, + MODULATION_RATE_QAM16_1_2, + MODULATION_RATE_QAM16_3_4, + MODULATION_RATE_QAM64_1_2, + MODULATION_RATE_QAM64_3_4, + MODULATION_RATE_AUTO, + MODULATION_RATE_NUM +}; + +#define plfxlc_mac_dev(mac) plfxlc_chip_dev(&(mac)->chip) + +#define PURELIFI_MAC_STATS_BUFFER_SIZE 16 +#define PURELIFI_MAC_MAX_ACK_WAITERS 50 + +struct plfxlc_ctrlset { + /* id should be plf_usb_req_enum */ + __be32 id; + __be32 len; + u8 modulation; + u8 control; + u8 service; + u8 pad; + __le16 packet_length; + __le16 current_length; + __le16 next_frame_length; + __le16 tx_length; + __be32 payload_len_nw; +} __packed; + +/* overlay */ +struct plfxlc_header { + struct plfxlc_ctrlset plf_ctrl; + u32 frametype; + u8 *dmac; +} __packed; + +struct tx_status { + u8 type; + u8 id; + u8 rate; + u8 pad; + u8 mac[ETH_ALEN]; + u8 retry; + u8 failure; +} __packed; + +struct beacon { + struct delayed_work watchdog_work; + struct sk_buff *cur_beacon; + unsigned long last_update; + u16 interval; + u8 period; +}; + +enum plfxlc_device_flags { + PURELIFI_DEVICE_RUNNING, +}; + +struct plfxlc_mac { + struct ieee80211_hw *hw; + struct ieee80211_vif *vif; + struct beacon beacon; + struct work_struct set_rts_cts_work; + struct work_struct process_intr; + struct plfxlc_mc_hash multicast_hash; + struct sk_buff_head ack_wait_queue; + struct ieee80211_channel channels[14]; + struct ieee80211_rate rates[12]; + struct ieee80211_supported_band band; + struct plfxlc_chip chip; + spinlock_t lock; /* lock for mac data */ + u8 intr_buffer[USB_MAX_EP_INT_BUFFER]; + char serial_number[PURELIFI_SERIAL_LEN]; + unsigned char hw_address[ETH_ALEN]; + u8 default_regdomain; + unsigned long flags; + bool pass_failed_fcs; + bool pass_ctrl; + bool ack_pending; + int ack_signal; + int associated; + u8 regdomain; + u8 channel; + int type; + u64 crc_errors; + u64 rssi; +}; + +static inline struct plfxlc_mac * +plfxlc_hw_mac(struct ieee80211_hw *hw) +{ + return hw->priv; +} + +static inline struct plfxlc_mac * +plfxlc_chip_to_mac(struct plfxlc_chip *chip) +{ + return container_of(chip, struct plfxlc_mac, chip); +} + +static inline struct plfxlc_mac * +plfxlc_usb_to_mac(struct plfxlc_usb *usb) +{ + return plfxlc_chip_to_mac(plfxlc_usb_to_chip(usb)); +} + +static inline u8 *plfxlc_mac_get_perm_addr(struct plfxlc_mac *mac) +{ + return mac->hw->wiphy->perm_addr; +} + +struct ieee80211_hw *plfxlc_mac_alloc_hw(struct usb_interface *intf); +void plfxlc_mac_release(struct plfxlc_mac *mac); + +int plfxlc_mac_preinit_hw(struct ieee80211_hw *hw, const u8 *hw_address); +int plfxlc_mac_init_hw(struct ieee80211_hw *hw); + +int plfxlc_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, + unsigned int length); +void plfxlc_mac_tx_failed(struct urb *urb); +void plfxlc_mac_tx_to_dev(struct sk_buff *skb, int error); +int plfxlc_op_start(struct ieee80211_hw *hw); +void plfxlc_op_stop(struct ieee80211_hw *hw); +int plfxlc_restore_settings(struct plfxlc_mac *mac); + +#endif /* PLFXLC_MAC_H */ diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c new file mode 100644 index 0000000000..39e54b3787 --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/usb.c @@ -0,0 +1,891 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 pureLiFi + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mac.h" +#include "usb.h" +#include "chip.h" + +static const struct usb_device_id usb_ids[] = { + { USB_DEVICE(PURELIFI_X_VENDOR_ID_0, PURELIFI_X_PRODUCT_ID_0), + .driver_info = DEVICE_LIFI_X }, + { USB_DEVICE(PURELIFI_XC_VENDOR_ID_0, PURELIFI_XC_PRODUCT_ID_0), + .driver_info = DEVICE_LIFI_XC }, + { USB_DEVICE(PURELIFI_XL_VENDOR_ID_0, PURELIFI_XL_PRODUCT_ID_0), + .driver_info = DEVICE_LIFI_XL }, + {} +}; + +void plfxlc_send_packet_from_data_queue(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_tx *tx = &usb->tx; + struct sk_buff *skb = NULL; + unsigned long flags; + u8 last_served_sidx; + + spin_lock_irqsave(&tx->lock, flags); + last_served_sidx = usb->sidx; + do { + usb->sidx = (usb->sidx + 1) % MAX_STA_NUM; + if (!(tx->station[usb->sidx].flag & STATION_CONNECTED_FLAG)) + continue; + if (!(tx->station[usb->sidx].flag & STATION_FIFO_FULL_FLAG)) + skb = skb_peek(&tx->station[usb->sidx].data_list); + } while ((usb->sidx != last_served_sidx) && (!skb)); + + if (skb) { + skb = skb_dequeue(&tx->station[usb->sidx].data_list); + plfxlc_usb_wreq_async(usb, skb->data, skb->len, USB_REQ_DATA_TX, + plfxlc_tx_urb_complete, skb); + if (skb_queue_len(&tx->station[usb->sidx].data_list) <= 60) + ieee80211_wake_queues(plfxlc_usb_to_hw(usb)); + } + spin_unlock_irqrestore(&tx->lock, flags); +} + +static void handle_rx_packet(struct plfxlc_usb *usb, const u8 *buffer, + unsigned int length) +{ + plfxlc_mac_rx(plfxlc_usb_to_hw(usb), buffer, length); +} + +static void rx_urb_complete(struct urb *urb) +{ + struct plfxlc_usb_tx *tx; + struct plfxlc_usb *usb; + unsigned int length; + const u8 *buffer; + u16 status; + u8 sidx; + int r; + + if (!urb) { + pr_err("urb is NULL\n"); + return; + } + if (!urb->context) { + pr_err("urb ctx is NULL\n"); + return; + } + usb = urb->context; + + if (usb->initialized != 1) { + pr_err("usb is not initialized\n"); + return; + } + + tx = &usb->tx; + switch (urb->status) { + case 0: + break; + case -ESHUTDOWN: + case -EINVAL: + case -ENODEV: + case -ENOENT: + case -ECONNRESET: + case -EPIPE: + dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status); + return; + default: + dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status); + if (tx->submitted_urbs++ < PURELIFI_URB_RETRY_MAX) { + dev_dbg(plfxlc_urb_dev(urb), "urb %p resubmit %d", urb, + tx->submitted_urbs++); + goto resubmit; + } else { + dev_dbg(plfxlc_urb_dev(urb), "urb %p max resubmits reached", urb); + tx->submitted_urbs = 0; + return; + } + } + + buffer = urb->transfer_buffer; + length = le32_to_cpu(*(__le32 *)(buffer + sizeof(struct rx_status))) + + sizeof(u32); + + if (urb->actual_length != (PLF_MSG_STATUS_OFFSET + 1)) { + if (usb->initialized && usb->link_up) + handle_rx_packet(usb, buffer, length); + goto resubmit; + } + + status = buffer[PLF_MSG_STATUS_OFFSET]; + + switch (status) { + case STATION_FIFO_ALMOST_FULL_NOT_MESSAGE: + dev_dbg(&usb->intf->dev, + "FIFO full not packet receipt\n"); + tx->mac_fifo_full = 1; + for (sidx = 0; sidx < MAX_STA_NUM; sidx++) + tx->station[sidx].flag |= STATION_FIFO_FULL_FLAG; + break; + case STATION_FIFO_ALMOST_FULL_MESSAGE: + dev_dbg(&usb->intf->dev, "FIFO full packet receipt\n"); + + for (sidx = 0; sidx < MAX_STA_NUM; sidx++) + tx->station[sidx].flag &= STATION_ACTIVE_FLAG; + + plfxlc_send_packet_from_data_queue(usb); + break; + case STATION_CONNECT_MESSAGE: + usb->link_up = 1; + dev_dbg(&usb->intf->dev, "ST_CONNECT_MSG packet receipt\n"); + break; + case STATION_DISCONNECT_MESSAGE: + usb->link_up = 0; + dev_dbg(&usb->intf->dev, "ST_DISCONN_MSG packet receipt\n"); + break; + default: + dev_dbg(&usb->intf->dev, "Unknown packet receipt\n"); + break; + } + +resubmit: + r = usb_submit_urb(urb, GFP_ATOMIC); + if (r) + dev_dbg(plfxlc_urb_dev(urb), "urb %p resubmit fail (%d)\n", urb, r); +} + +static struct urb *alloc_rx_urb(struct plfxlc_usb *usb) +{ + struct usb_device *udev = plfxlc_usb_to_usbdev(usb); + struct urb *urb; + void *buffer; + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return NULL; + + buffer = usb_alloc_coherent(udev, USB_MAX_RX_SIZE, GFP_KERNEL, + &urb->transfer_dma); + if (!buffer) { + usb_free_urb(urb); + return NULL; + } + + usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN), + buffer, USB_MAX_RX_SIZE, + rx_urb_complete, usb); + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + return urb; +} + +static void free_rx_urb(struct urb *urb) +{ + if (!urb) + return; + usb_free_coherent(urb->dev, urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma); + usb_free_urb(urb); +} + +static int __lf_x_usb_enable_rx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_rx *rx = &usb->rx; + struct urb **urbs; + int i, r; + + r = -ENOMEM; + urbs = kcalloc(RX_URBS_COUNT, sizeof(struct urb *), GFP_KERNEL); + if (!urbs) + goto error; + + for (i = 0; i < RX_URBS_COUNT; i++) { + urbs[i] = alloc_rx_urb(usb); + if (!urbs[i]) + goto error; + } + + spin_lock_irq(&rx->lock); + + dev_dbg(plfxlc_usb_dev(usb), "irq_disabled %d\n", irqs_disabled()); + + if (rx->urbs) { + spin_unlock_irq(&rx->lock); + r = 0; + goto error; + } + rx->urbs = urbs; + rx->urbs_count = RX_URBS_COUNT; + spin_unlock_irq(&rx->lock); + + for (i = 0; i < RX_URBS_COUNT; i++) { + r = usb_submit_urb(urbs[i], GFP_KERNEL); + if (r) + goto error_submit; + } + + return 0; + +error_submit: + for (i = 0; i < RX_URBS_COUNT; i++) + usb_kill_urb(urbs[i]); + spin_lock_irq(&rx->lock); + rx->urbs = NULL; + rx->urbs_count = 0; + spin_unlock_irq(&rx->lock); +error: + if (urbs) { + for (i = 0; i < RX_URBS_COUNT; i++) + free_rx_urb(urbs[i]); + } + return r; +} + +int plfxlc_usb_enable_rx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_rx *rx = &usb->rx; + int r; + + mutex_lock(&rx->setup_mutex); + r = __lf_x_usb_enable_rx(usb); + if (!r) + usb->rx_usb_enabled = 1; + + mutex_unlock(&rx->setup_mutex); + + return r; +} + +static void __lf_x_usb_disable_rx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_rx *rx = &usb->rx; + unsigned long flags; + unsigned int count; + struct urb **urbs; + int i; + + spin_lock_irqsave(&rx->lock, flags); + urbs = rx->urbs; + count = rx->urbs_count; + spin_unlock_irqrestore(&rx->lock, flags); + + if (!urbs) + return; + + for (i = 0; i < count; i++) { + usb_kill_urb(urbs[i]); + free_rx_urb(urbs[i]); + } + kfree(urbs); + rx->urbs = NULL; + rx->urbs_count = 0; +} + +void plfxlc_usb_disable_rx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_rx *rx = &usb->rx; + + mutex_lock(&rx->setup_mutex); + __lf_x_usb_disable_rx(usb); + usb->rx_usb_enabled = 0; + mutex_unlock(&rx->setup_mutex); +} + +void plfxlc_usb_disable_tx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_tx *tx = &usb->tx; + unsigned long flags; + + clear_bit(PLF_BIT_ENABLED, &tx->enabled); + + /* kill all submitted tx-urbs */ + usb_kill_anchored_urbs(&tx->submitted); + + spin_lock_irqsave(&tx->lock, flags); + WARN_ON(!skb_queue_empty(&tx->submitted_skbs)); + WARN_ON(tx->submitted_urbs != 0); + tx->submitted_urbs = 0; + spin_unlock_irqrestore(&tx->lock, flags); + + /* The stopped state is ignored, relying on ieee80211_wake_queues() + * in a potentionally following plfxlc_usb_enable_tx(). + */ +} + +void plfxlc_usb_enable_tx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_tx *tx = &usb->tx; + unsigned long flags; + + spin_lock_irqsave(&tx->lock, flags); + set_bit(PLF_BIT_ENABLED, &tx->enabled); + tx->submitted_urbs = 0; + ieee80211_wake_queues(plfxlc_usb_to_hw(usb)); + tx->stopped = 0; + spin_unlock_irqrestore(&tx->lock, flags); +} + +void plfxlc_tx_urb_complete(struct urb *urb) +{ + struct ieee80211_tx_info *info; + struct plfxlc_usb *usb; + struct sk_buff *skb; + + skb = urb->context; + info = IEEE80211_SKB_CB(skb); + /* grab 'usb' pointer before handing off the skb (since + * it might be freed by plfxlc_mac_tx_to_dev or mac80211) + */ + usb = &plfxlc_hw_mac(info->rate_driver_data[0])->chip.usb; + + switch (urb->status) { + case 0: + break; + case -ESHUTDOWN: + case -EINVAL: + case -ENODEV: + case -ENOENT: + case -ECONNRESET: + case -EPIPE: + dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status); + break; + default: + dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status); + return; + } + + plfxlc_mac_tx_to_dev(skb, urb->status); + plfxlc_send_packet_from_data_queue(usb); + usb_free_urb(urb); +} + +static inline void init_usb_rx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_rx *rx = &usb->rx; + + spin_lock_init(&rx->lock); + mutex_init(&rx->setup_mutex); + + if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) + rx->usb_packet_size = 512; + else + rx->usb_packet_size = 64; + + if (rx->fragment_length != 0) + dev_dbg(plfxlc_usb_dev(usb), "fragment_length error\n"); +} + +static inline void init_usb_tx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_tx *tx = &usb->tx; + + spin_lock_init(&tx->lock); + clear_bit(PLF_BIT_ENABLED, &tx->enabled); + tx->stopped = 0; + skb_queue_head_init(&tx->submitted_skbs); + init_usb_anchor(&tx->submitted); +} + +void plfxlc_usb_init(struct plfxlc_usb *usb, struct ieee80211_hw *hw, + struct usb_interface *intf) +{ + memset(usb, 0, sizeof(*usb)); + usb->intf = usb_get_intf(intf); + usb_set_intfdata(usb->intf, hw); + init_usb_tx(usb); + init_usb_rx(usb); +} + +void plfxlc_usb_release(struct plfxlc_usb *usb) +{ + plfxlc_op_stop(plfxlc_usb_to_hw(usb)); + plfxlc_usb_disable_tx(usb); + plfxlc_usb_disable_rx(usb); + usb_set_intfdata(usb->intf, NULL); + usb_put_intf(usb->intf); +} + +const char *plfxlc_speed(enum usb_device_speed speed) +{ + switch (speed) { + case USB_SPEED_LOW: + return "low"; + case USB_SPEED_FULL: + return "full"; + case USB_SPEED_HIGH: + return "high"; + default: + return "unknown"; + } +} + +int plfxlc_usb_init_hw(struct plfxlc_usb *usb) +{ + int r; + + r = usb_reset_configuration(plfxlc_usb_to_usbdev(usb)); + if (r) { + dev_err(plfxlc_usb_dev(usb), "cfg reset failed (%d)\n", r); + return r; + } + return 0; +} + +static void get_usb_req(struct usb_device *udev, void *buffer, + u32 buffer_len, enum plf_usb_req_enum usb_req_id, + struct plf_usb_req *usb_req) +{ + __be32 payload_len_nw = cpu_to_be32(buffer_len + FCS_LEN); + const u8 *buffer_src_p = buffer; + u8 *buffer_dst = usb_req->buf; + u32 temp_usb_len = 0; + + usb_req->id = cpu_to_be32(usb_req_id); + usb_req->len = cpu_to_be32(0); + + /* Copy buffer length into the transmitted buffer, as it is important + * for the Rx MAC to know its exact length. + */ + if (usb_req->id == cpu_to_be32(USB_REQ_BEACON_WR)) { + memcpy(buffer_dst, &payload_len_nw, sizeof(payload_len_nw)); + buffer_dst += sizeof(payload_len_nw); + temp_usb_len += sizeof(payload_len_nw); + } + + memcpy(buffer_dst, buffer_src_p, buffer_len); + buffer_dst += buffer_len; + buffer_src_p += buffer_len; + temp_usb_len += buffer_len; + + /* Set the FCS_LEN (4) bytes as 0 for CRC checking. */ + memset(buffer_dst, 0, FCS_LEN); + buffer_dst += FCS_LEN; + temp_usb_len += FCS_LEN; + + /* Round the packet to be transmitted to 4 bytes. */ + if (temp_usb_len % PURELIFI_BYTE_NUM_ALIGNMENT) { + memset(buffer_dst, 0, PURELIFI_BYTE_NUM_ALIGNMENT - + (temp_usb_len % + PURELIFI_BYTE_NUM_ALIGNMENT)); + buffer_dst += PURELIFI_BYTE_NUM_ALIGNMENT - + (temp_usb_len % + PURELIFI_BYTE_NUM_ALIGNMENT); + temp_usb_len += PURELIFI_BYTE_NUM_ALIGNMENT - + (temp_usb_len % PURELIFI_BYTE_NUM_ALIGNMENT); + } + + usb_req->len = cpu_to_be32(temp_usb_len); +} + +int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer, + int buffer_len, enum plf_usb_req_enum usb_req_id, + usb_complete_t complete_fn, + void *context) +{ + struct usb_device *udev = interface_to_usbdev(usb->ez_usb); + struct urb *urb = usb_alloc_urb(0, GFP_ATOMIC); + int r; + + usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT), + (void *)buffer, buffer_len, complete_fn, context); + + r = usb_submit_urb(urb, GFP_ATOMIC); + if (r) + dev_err(&udev->dev, "Async write submit failed (%d)\n", r); + + return r; +} + +int plfxlc_usb_wreq(struct usb_interface *ez_usb, void *buffer, int buffer_len, + enum plf_usb_req_enum usb_req_id) +{ + struct usb_device *udev = interface_to_usbdev(ez_usb); + unsigned char *dma_buffer = NULL; + struct plf_usb_req usb_req; + int usb_bulk_msg_len; + int actual_length; + int r; + + get_usb_req(udev, buffer, buffer_len, usb_req_id, &usb_req); + usb_bulk_msg_len = sizeof(__le32) + sizeof(__le32) + + be32_to_cpu(usb_req.len); + + dma_buffer = kmemdup(&usb_req, usb_bulk_msg_len, GFP_KERNEL); + + if (!dma_buffer) { + r = -ENOMEM; + goto error; + } + + r = usb_bulk_msg(udev, + usb_sndbulkpipe(udev, EP_DATA_OUT), + dma_buffer, usb_bulk_msg_len, + &actual_length, USB_BULK_MSG_TIMEOUT_MS); + kfree(dma_buffer); +error: + if (r) { + r = -ENOMEM; + dev_err(&udev->dev, "usb_bulk_msg failed (%d)\n", r); + } + + return r; +} + +static void slif_data_plane_sap_timer_callb(struct timer_list *t) +{ + struct plfxlc_usb *usb = from_timer(usb, t, tx.tx_retry_timer); + + plfxlc_send_packet_from_data_queue(usb); + timer_setup(&usb->tx.tx_retry_timer, + slif_data_plane_sap_timer_callb, 0); + mod_timer(&usb->tx.tx_retry_timer, jiffies + TX_RETRY_BACKOFF_JIFF); +} + +static void sta_queue_cleanup_timer_callb(struct timer_list *t) +{ + struct plfxlc_usb *usb = from_timer(usb, t, sta_queue_cleanup); + struct plfxlc_usb_tx *tx = &usb->tx; + int sidx; + + for (sidx = 0; sidx < MAX_STA_NUM - 1; sidx++) { + if (!(tx->station[sidx].flag & STATION_CONNECTED_FLAG)) + continue; + if (tx->station[sidx].flag & STATION_HEARTBEAT_FLAG) { + tx->station[sidx].flag ^= STATION_HEARTBEAT_FLAG; + } else { + eth_zero_addr(tx->station[sidx].mac); + tx->station[sidx].flag = 0; + } + } + timer_setup(&usb->sta_queue_cleanup, + sta_queue_cleanup_timer_callb, 0); + mod_timer(&usb->sta_queue_cleanup, jiffies + STA_QUEUE_CLEANUP_JIFF); +} + +static int probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + u8 serial_number[PURELIFI_SERIAL_LEN]; + struct ieee80211_hw *hw = NULL; + struct plfxlc_usb_tx *tx; + struct plfxlc_chip *chip; + struct plfxlc_usb *usb; + u8 hw_address[ETH_ALEN]; + unsigned int i; + int r = 0; + + hw = plfxlc_mac_alloc_hw(intf); + + if (!hw) { + r = -ENOMEM; + goto error; + } + + chip = &plfxlc_hw_mac(hw)->chip; + usb = &chip->usb; + usb->ez_usb = intf; + tx = &usb->tx; + + r = plfxlc_upload_mac_and_serial(intf, hw_address, serial_number); + if (r) { + dev_err(&intf->dev, "MAC and Serial upload failed (%d)\n", r); + goto error; + } + + chip->unit_type = STA; + dev_err(&intf->dev, "Unit type is station"); + + r = plfxlc_mac_preinit_hw(hw, hw_address); + if (r) { + dev_err(&intf->dev, "Init mac failed (%d)\n", r); + goto error; + } + + r = ieee80211_register_hw(hw); + if (r) { + dev_err(&intf->dev, "Register device failed (%d)\n", r); + goto error; + } + + if ((le16_to_cpu(interface_to_usbdev(intf)->descriptor.idVendor) == + PURELIFI_XL_VENDOR_ID_0) && + (le16_to_cpu(interface_to_usbdev(intf)->descriptor.idProduct) == + PURELIFI_XL_PRODUCT_ID_0)) { + r = plfxlc_download_xl_firmware(intf); + } else { + r = plfxlc_download_fpga(intf); + } + if (r != 0) { + dev_err(&intf->dev, "FPGA download failed (%d)\n", r); + goto error; + } + + tx->mac_fifo_full = 0; + spin_lock_init(&tx->lock); + + msleep(PLF_MSLEEP_TIME); + r = plfxlc_usb_init_hw(usb); + if (r < 0) { + dev_err(&intf->dev, "usb_init_hw failed (%d)\n", r); + goto error; + } + + msleep(PLF_MSLEEP_TIME); + r = plfxlc_chip_switch_radio(chip, PLFXLC_RADIO_ON); + if (r < 0) { + dev_dbg(&intf->dev, "chip_switch_radio_on failed (%d)\n", r); + goto error; + } + + msleep(PLF_MSLEEP_TIME); + r = plfxlc_chip_set_rate(chip, 8); + if (r < 0) { + dev_dbg(&intf->dev, "chip_set_rate failed (%d)\n", r); + goto error; + } + + msleep(PLF_MSLEEP_TIME); + r = plfxlc_usb_wreq(usb->ez_usb, + hw_address, ETH_ALEN, USB_REQ_MAC_WR); + if (r < 0) { + dev_dbg(&intf->dev, "MAC_WR failure (%d)\n", r); + goto error; + } + + plfxlc_chip_enable_rxtx(chip); + + /* Initialise the data plane Tx queue */ + for (i = 0; i < MAX_STA_NUM; i++) { + skb_queue_head_init(&tx->station[i].data_list); + tx->station[i].flag = 0; + } + + tx->station[STA_BROADCAST_INDEX].flag |= STATION_CONNECTED_FLAG; + for (i = 0; i < ETH_ALEN; i++) + tx->station[STA_BROADCAST_INDEX].mac[i] = 0xFF; + + timer_setup(&tx->tx_retry_timer, slif_data_plane_sap_timer_callb, 0); + tx->tx_retry_timer.expires = jiffies + TX_RETRY_BACKOFF_JIFF; + add_timer(&tx->tx_retry_timer); + + timer_setup(&usb->sta_queue_cleanup, + sta_queue_cleanup_timer_callb, 0); + usb->sta_queue_cleanup.expires = jiffies + STA_QUEUE_CLEANUP_JIFF; + add_timer(&usb->sta_queue_cleanup); + + plfxlc_mac_init_hw(hw); + usb->initialized = true; + return 0; +error: + if (hw) { + plfxlc_mac_release(plfxlc_hw_mac(hw)); + ieee80211_unregister_hw(hw); + ieee80211_free_hw(hw); + } + dev_err(&intf->dev, "pureLifi:Device error"); + return r; +} + +static void disconnect(struct usb_interface *intf) +{ + struct ieee80211_hw *hw = plfxlc_intf_to_hw(intf); + struct plfxlc_mac *mac; + struct plfxlc_usb *usb; + + /* Either something really bad happened, or + * we're just dealing with a DEVICE_INSTALLER. + */ + if (!hw) + return; + + mac = plfxlc_hw_mac(hw); + usb = &mac->chip.usb; + + del_timer_sync(&usb->tx.tx_retry_timer); + del_timer_sync(&usb->sta_queue_cleanup); + + ieee80211_unregister_hw(hw); + + plfxlc_chip_disable_rxtx(&mac->chip); + + /* If the disconnect has been caused by a removal of the + * driver module, the reset allows reloading of the driver. If the + * reset will not be executed here, the upload of the firmware in the + * probe function caused by the reloading of the driver will fail. + */ + usb_reset_device(interface_to_usbdev(intf)); + + plfxlc_mac_release(mac); + ieee80211_free_hw(hw); +} + +static void plfxlc_usb_resume(struct plfxlc_usb *usb) +{ + struct plfxlc_mac *mac = plfxlc_usb_to_mac(usb); + int r; + + r = plfxlc_op_start(plfxlc_usb_to_hw(usb)); + if (r < 0) { + dev_warn(plfxlc_usb_dev(usb), + "Device resume failed (%d)\n", r); + + if (usb->was_running) + set_bit(PURELIFI_DEVICE_RUNNING, &mac->flags); + + usb_queue_reset_device(usb->intf); + return; + } + + if (mac->type != NL80211_IFTYPE_UNSPECIFIED) { + r = plfxlc_restore_settings(mac); + if (r < 0) { + dev_dbg(plfxlc_usb_dev(usb), + "Restore failed (%d)\n", r); + return; + } + } +} + +static void plfxlc_usb_stop(struct plfxlc_usb *usb) +{ + plfxlc_op_stop(plfxlc_usb_to_hw(usb)); + plfxlc_usb_disable_tx(usb); + plfxlc_usb_disable_rx(usb); + + usb->initialized = false; +} + +static int pre_reset(struct usb_interface *intf) +{ + struct ieee80211_hw *hw = usb_get_intfdata(intf); + struct plfxlc_mac *mac; + struct plfxlc_usb *usb; + + if (!hw || intf->condition != USB_INTERFACE_BOUND) + return 0; + + mac = plfxlc_hw_mac(hw); + usb = &mac->chip.usb; + + usb->was_running = test_bit(PURELIFI_DEVICE_RUNNING, &mac->flags); + + plfxlc_usb_stop(usb); + + return 0; +} + +static int post_reset(struct usb_interface *intf) +{ + struct ieee80211_hw *hw = usb_get_intfdata(intf); + struct plfxlc_mac *mac; + struct plfxlc_usb *usb; + + if (!hw || intf->condition != USB_INTERFACE_BOUND) + return 0; + + mac = plfxlc_hw_mac(hw); + usb = &mac->chip.usb; + + if (usb->was_running) + plfxlc_usb_resume(usb); + + return 0; +} + +#ifdef CONFIG_PM + +static struct plfxlc_usb *get_plfxlc_usb(struct usb_interface *intf) +{ + struct ieee80211_hw *hw = plfxlc_intf_to_hw(intf); + struct plfxlc_mac *mac; + + /* Either something really bad happened, or + * we're just dealing with a DEVICE_INSTALLER. + */ + if (!hw) + return NULL; + + mac = plfxlc_hw_mac(hw); + return &mac->chip.usb; +} + +static int suspend(struct usb_interface *interface, + pm_message_t message) +{ + struct plfxlc_usb *pl = get_plfxlc_usb(interface); + struct plfxlc_mac *mac = plfxlc_usb_to_mac(pl); + + if (!pl) + return -ENODEV; + if (pl->initialized == 0) + return 0; + pl->was_running = test_bit(PURELIFI_DEVICE_RUNNING, &mac->flags); + plfxlc_usb_stop(pl); + return 0; +} + +static int resume(struct usb_interface *interface) +{ + struct plfxlc_usb *pl = get_plfxlc_usb(interface); + + if (!pl) + return -ENODEV; + if (pl->was_running) + plfxlc_usb_resume(pl); + return 0; +} + +#endif + +static struct usb_driver driver = { + .name = KBUILD_MODNAME, + .id_table = usb_ids, + .probe = probe, + .disconnect = disconnect, + .pre_reset = pre_reset, + .post_reset = post_reset, +#ifdef CONFIG_PM + .suspend = suspend, + .resume = resume, +#endif + .disable_hub_initiated_lpm = 1, +}; + +static int __init usb_init(void) +{ + int r; + + r = usb_register(&driver); + if (r) { + pr_err("%s usb_register() failed %d\n", driver.name, r); + return r; + } + + pr_debug("Driver initialized :%s\n", driver.name); + return 0; +} + +static void __exit usb_exit(void) +{ + usb_deregister(&driver); + pr_debug("%s %s\n", driver.name, __func__); +} + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("USB driver for pureLiFi devices"); +MODULE_AUTHOR("pureLiFi"); +MODULE_VERSION("1.0"); +MODULE_FIRMWARE("plfxlc/lifi-x.bin"); +MODULE_DEVICE_TABLE(usb, usb_ids); + +module_init(usb_init); +module_exit(usb_exit); diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.h b/drivers/net/wireless/purelifi/plfxlc/usb.h new file mode 100644 index 0000000000..ba2defd593 --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/usb.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 pureLiFi + */ + +#ifndef PLFXLC_USB_H +#define PLFXLC_USB_H + +#include +#include +#include +#include +#include + +#include "intf.h" + +#define USB_BULK_MSG_TIMEOUT_MS 2000 + +#define PURELIFI_X_VENDOR_ID_0 0x16C1 +#define PURELIFI_X_PRODUCT_ID_0 0x1CDE +#define PURELIFI_XC_VENDOR_ID_0 0x2EF5 +#define PURELIFI_XC_PRODUCT_ID_0 0x0008 +#define PURELIFI_XL_VENDOR_ID_0 0x2EF5 +#define PURELIFI_XL_PRODUCT_ID_0 0x000A /* Station */ + +#define PLF_FPGA_STATUS_LEN 2 +#define PLF_FPGA_STATE_LEN 9 +#define PLF_BULK_TLEN 16384 +#define PLF_FPGA_MG 6 /* Magic check */ +#define PLF_XL_BUF_LEN 64 +#define PLF_MSG_STATUS_OFFSET 7 + +#define PLF_USB_TIMEOUT 1000 +#define PLF_MSLEEP_TIME 200 + +#define PURELIFI_URB_RETRY_MAX 5 + +#define plfxlc_usb_dev(usb) (&(usb)->intf->dev) + +/* Tx retry backoff timer (in milliseconds) */ +#define TX_RETRY_BACKOFF_MS 10 +#define STA_QUEUE_CLEANUP_MS 5000 + +/* Tx retry backoff timer (in jiffies) */ +#define TX_RETRY_BACKOFF_JIFF ((TX_RETRY_BACKOFF_MS * HZ) / 1000) +#define STA_QUEUE_CLEANUP_JIFF ((STA_QUEUE_CLEANUP_MS * HZ) / 1000) + +/* Ensures that MAX_TRANSFER_SIZE is even. */ +#define MAX_TRANSFER_SIZE (USB_MAX_TRANSFER_SIZE & ~1) +#define plfxlc_urb_dev(urb) (&(urb)->dev->dev) + +#define STATION_FIFO_ALMOST_FULL_MESSAGE 0 +#define STATION_FIFO_ALMOST_FULL_NOT_MESSAGE 1 +#define STATION_CONNECT_MESSAGE 2 +#define STATION_DISCONNECT_MESSAGE 3 + +int plfxlc_usb_wreq(struct usb_interface *ez_usb, void *buffer, int buffer_len, + enum plf_usb_req_enum usb_req_id); +void plfxlc_tx_urb_complete(struct urb *urb); + +enum { + USB_MAX_RX_SIZE = 4800, + USB_MAX_EP_INT_BUFFER = 64, +}; + +struct plfxlc_usb_interrupt { + spinlock_t lock; /* spin lock for usb interrupt buffer */ + struct urb *urb; + void *buffer; + int interval; +}; + +#define RX_URBS_COUNT 5 + +struct plfxlc_usb_rx { + spinlock_t lock; /* spin lock for rx urb */ + struct mutex setup_mutex; /* mutex lockt for rx urb */ + u8 fragment[2 * USB_MAX_RX_SIZE]; + unsigned int fragment_length; + unsigned int usb_packet_size; + struct urb **urbs; + int urbs_count; +}; + +struct plf_station { + /* 7...3 | 2 | 1 | 0 | + * Reserved | Heartbeat | FIFO full | Connected | + */ + unsigned char flag; + unsigned char mac[ETH_ALEN]; + struct sk_buff_head data_list; +}; + +struct plfxlc_firmware_file { + u32 total_files; + u32 total_size; + u32 size; + u32 start_addr; + u32 control_packets; +} __packed; + +#define STATION_CONNECTED_FLAG 0x1 +#define STATION_FIFO_FULL_FLAG 0x2 +#define STATION_HEARTBEAT_FLAG 0x4 +#define STATION_ACTIVE_FLAG 0xFD + +#define PURELIFI_SERIAL_LEN 256 +#define STA_BROADCAST_INDEX (AP_USER_LIMIT) +#define MAX_STA_NUM (AP_USER_LIMIT + 1) + +struct plfxlc_usb_tx { + unsigned long enabled; + spinlock_t lock; /* spinlock for USB tx */ + u8 mac_fifo_full; + struct sk_buff_head submitted_skbs; + struct usb_anchor submitted; + int submitted_urbs; + bool stopped; + struct timer_list tx_retry_timer; + struct plf_station station[MAX_STA_NUM]; +}; + +/* Contains the usb parts. The structure doesn't require a lock because intf + * will not be changed after initialization. + */ +struct plfxlc_usb { + struct timer_list sta_queue_cleanup; + struct plfxlc_usb_rx rx; + struct plfxlc_usb_tx tx; + struct usb_interface *intf; + struct usb_interface *ez_usb; + u8 req_buf[64]; /* plfxlc_usb_iowrite16v needs 62 bytes */ + u8 sidx; /* store last served */ + bool rx_usb_enabled; + bool initialized; + bool was_running; + bool link_up; +}; + +enum endpoints { + EP_DATA_IN = 2, + EP_DATA_OUT = 8, +}; + +enum devicetype { + DEVICE_LIFI_X = 0, + DEVICE_LIFI_XC = 1, + DEVICE_LIFI_XL = 1, +}; + +enum { + PLF_BIT_ENABLED = 1, + PLF_BIT_MAX = 2, +}; + +int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer, + int buffer_len, enum plf_usb_req_enum usb_req_id, + usb_complete_t complete_fn, void *context); + +static inline struct usb_device * +plfxlc_usb_to_usbdev(struct plfxlc_usb *usb) +{ + return interface_to_usbdev(usb->intf); +} + +static inline struct ieee80211_hw * +plfxlc_intf_to_hw(struct usb_interface *intf) +{ + return usb_get_intfdata(intf); +} + +static inline struct ieee80211_hw * +plfxlc_usb_to_hw(struct plfxlc_usb *usb) +{ + return plfxlc_intf_to_hw(usb->intf); +} + +void plfxlc_usb_init(struct plfxlc_usb *usb, struct ieee80211_hw *hw, + struct usb_interface *intf); +void plfxlc_send_packet_from_data_queue(struct plfxlc_usb *usb); +void plfxlc_usb_release(struct plfxlc_usb *usb); +void plfxlc_usb_disable_rx(struct plfxlc_usb *usb); +void plfxlc_usb_enable_tx(struct plfxlc_usb *usb); +void plfxlc_usb_disable_tx(struct plfxlc_usb *usb); +int plfxlc_usb_tx(struct plfxlc_usb *usb, struct sk_buff *skb); +int plfxlc_usb_enable_rx(struct plfxlc_usb *usb); +int plfxlc_usb_init_hw(struct plfxlc_usb *usb); +const char *plfxlc_speed(enum usb_device_speed speed); + +/* Firmware declarations */ +int plfxlc_download_xl_firmware(struct usb_interface *intf); +int plfxlc_download_fpga(struct usb_interface *intf); + +int plfxlc_upload_mac_and_serial(struct usb_interface *intf, + unsigned char *hw_address, + unsigned char *serial_number); + +#endif /* PLFXLC_USB_H */ diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c new file mode 100644 index 0000000000..4186d825d1 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -0,0 +1,4068 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2022 Realtek Corporation + */ + +#include "coex.h" +#include "debug.h" +#include "phy.h" +#include "reg.h" +#include "rtw8852c.h" +#include "rtw8852c_rfk.h" +#include "rtw8852c_rfk_table.h" +#include "rtw8852c_table.h" + +#define _TSSI_DE_MASK GENMASK(21, 12) +static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852C] = {0x5858, 0x7858}; +static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852C] = {0x5860, 0x7860}; +static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852C] = {0x5838, 0x7838}; +static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852C] = {0x5840, 0x7840}; +static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852C] = {0x5848, 0x7848}; +static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852C] = {0x5850, 0x7850}; +static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852C] = {0x5828, 0x7828}; +static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852C] = {0x5830, 0x7830}; + +static const u32 rtw8852c_backup_bb_regs[] = { + 0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8, 0xc0e8, 0x823c, 0x8224, 0x8220, + 0xc1d4, 0xc1d8, 0xc1e8 +}; + +static const u32 rtw8852c_backup_rf_regs[] = { + 0xdf, 0x8f, 0x97, 0xa3, 0x5, 0x10005 +}; + +#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852c_backup_bb_regs) +#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852c_backup_rf_regs) + +#define RXK_GROUP_NR 4 +static const u32 _rxk_a6_idxrxgain[RXK_GROUP_NR] = {0x190, 0x196, 0x290, 0x316}; +static const u32 _rxk_a6_idxattc2[RXK_GROUP_NR] = {0x00, 0x0, 0x00, 0x00}; +static const u32 _rxk_a_idxrxgain[RXK_GROUP_NR] = {0x190, 0x198, 0x310, 0x318}; +static const u32 _rxk_a_idxattc2[RXK_GROUP_NR] = {0x00, 0x00, 0x00, 0x00}; +static const u32 _rxk_g_idxrxgain[RXK_GROUP_NR] = {0x252, 0x26c, 0x350, 0x360}; +static const u32 _rxk_g_idxattc2[RXK_GROUP_NR] = {0x00, 0x07, 0x00, 0x3}; + +#define TXK_GROUP_NR 3 +static const u32 _txk_a6_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0}; +static const u32 _txk_a6_track_range[TXK_GROUP_NR] = {0x6, 0x7, 0x7}; +static const u32 _txk_a6_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e}; +static const u32 _txk_a6_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12}; +static const u32 _txk_a_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0}; +static const u32 _txk_a_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x7}; +static const u32 _txk_a_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e}; +static const u32 _txk_a_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12}; +static const u32 _txk_g_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0}; +static const u32 _txk_g_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x6}; +static const u32 _txk_g_gain_bb[TXK_GROUP_NR] = {0x0e, 0x0a, 0x0e}; +static const u32 _txk_g_itqt[TXK_GROUP_NR] = { 0x12, 0x12, 0x12}; + +static const u32 dpk_par_regs[RTW89_DPK_RF_PATH][4] = { + {0x8190, 0x8194, 0x8198, 0x81a4}, + {0x81a8, 0x81c4, 0x81c8, 0x81e8}, +}; + +static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n", + rtwdev->dbcc_en, phy_idx); + + if (!rtwdev->dbcc_en) + return RF_AB; + + if (phy_idx == RTW89_PHY_0) + return RF_A; + else + return RF_B; +} + +static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[]) +{ + u32 i; + + for (i = 0; i < BACKUP_BB_REGS_NR; i++) { + backup_bb_reg_val[i] = + rtw89_phy_read32_mask(rtwdev, rtw8852c_backup_bb_regs[i], + MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]backup bb reg : %x, value =%x\n", + rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]); + } +} + +static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], + u8 rf_path) +{ + u32 i; + + for (i = 0; i < BACKUP_RF_REGS_NR; i++) { + backup_rf_reg_val[i] = + rtw89_read_rf(rtwdev, rf_path, + rtw8852c_backup_rf_regs[i], RFREG_MASK); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path, + rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]); + } +} + +static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[]) +{ + u32 i; + + for (i = 0; i < BACKUP_BB_REGS_NR; i++) { + rtw89_phy_write32_mask(rtwdev, rtw8852c_backup_bb_regs[i], + MASKDWORD, backup_bb_reg_val[i]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]restore bb reg : %x, value =%x\n", + rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]); + } +} + +static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], + u8 rf_path) +{ + u32 i; + + for (i = 0; i < BACKUP_RF_REGS_NR; i++) { + rtw89_write_rf(rtwdev, rf_path, rtw8852c_backup_rf_regs[i], + RFREG_MASK, backup_rf_reg_val[i]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path, + rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]); + } +} + +static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath) +{ + u8 path; + u32 rf_mode; + int ret; + + for (path = 0; path < RF_PATH_MAX; path++) { + if (!(kpath & BIT(path))) + continue; + + ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2, + 2, 5000, false, rtwdev, path, 0x00, + RR_MOD_MASK); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", + path, ret); + } +} + +static void _dack_dump(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u8 i; + u8 t; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n", + dack->addck_d[0][0], dack->addck_d[0][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n", + dack->addck_d[1][0], dack->addck_d[1][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n", + dack->dadck_d[0][0], dack->dadck_d[0][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n", + dack->dadck_d[1][0], dack->dadck_d[1][1]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n", + dack->biask_d[0][0], dack->biask_d[0][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n", + dack->biask_d[1][0], dack->biask_d[1][1]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[0][0][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[0][1][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[1][0][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[1][1][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } +} + +static void _addck_backup(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0); + dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, + B_ADDCKR0_A0); + dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, + B_ADDCKR0_A1); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0); + dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, + B_ADDCKR1_A0); + dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, + B_ADDCKR1_A1); +} + +static void _addck_reload(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, + dack->addck_d[0][0]); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, + dack->addck_d[0][1]); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1, + dack->addck_d[1][0]); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0, + dack->addck_d[1][1]); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3); +} + +static void _dack_backup_s0(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u8 i; + + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i); + dack->msbk_d[0][0][i] = rtw89_phy_read32_mask(rtwdev, + R_DACK_S0P2, + B_DACK_S0M0); + rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i); + dack->msbk_d[0][1][i] = rtw89_phy_read32_mask(rtwdev, + R_DACK_S0P3, + B_DACK_S0M1); + } + dack->biask_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, + B_DACK_BIAS00); + dack->biask_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, + B_DACK_BIAS01); + dack->dadck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, + B_DACK_DADCK00); + dack->dadck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, + B_DACK_DADCK01); +} + +static void _dack_backup_s1(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u8 i; + + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i); + dack->msbk_d[1][0][i] = rtw89_phy_read32_mask(rtwdev, + R_DACK10S, + B_DACK10S); + rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i); + dack->msbk_d[1][1][i] = rtw89_phy_read32_mask(rtwdev, + R_DACK11S, + B_DACK11S); + } + dack->biask_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, + B_DACK_BIAS10); + dack->biask_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, + B_DACK_BIAS11); + dack->dadck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, + B_DACK_DADCK10); + dack->dadck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, + B_DACK_DADCK11); +} + +static void _dack_reload_by_path(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, u8 index) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u32 idx_offset, path_offset; + u32 val32, offset, addr; + u8 i; + + idx_offset = (index == 0 ? 0 : 0x14); + path_offset = (path == RF_PATH_A ? 0 : 0x28); + offset = idx_offset + path_offset; + + rtw89_rfk_parser(rtwdev, &rtw8852c_dack_reload_defs_tbl); + + /* msbk_d: 15/14/13/12 */ + val32 = 0x0; + for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) + val32 |= dack->msbk_d[path][index][i + 12] << (i * 8); + addr = 0xc200 + offset; + rtw89_phy_write32(rtwdev, addr, val32); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr, + rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD)); + + /* msbk_d: 11/10/9/8 */ + val32 = 0x0; + for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) + val32 |= dack->msbk_d[path][index][i + 8] << (i * 8); + addr = 0xc204 + offset; + rtw89_phy_write32(rtwdev, addr, val32); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr, + rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD)); + + /* msbk_d: 7/6/5/4 */ + val32 = 0x0; + for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) + val32 |= dack->msbk_d[path][index][i + 4] << (i * 8); + addr = 0xc208 + offset; + rtw89_phy_write32(rtwdev, addr, val32); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr, + rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD)); + + /* msbk_d: 3/2/1/0 */ + val32 = 0x0; + for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) + val32 |= dack->msbk_d[path][index][i] << (i * 8); + addr = 0xc20c + offset; + rtw89_phy_write32(rtwdev, addr, val32); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr, + rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD)); + + /* dadak_d/biask_d */ + val32 = (dack->biask_d[path][index] << 22) | + (dack->dadck_d[path][index] << 14); + addr = 0xc210 + offset; + rtw89_phy_write32(rtwdev, addr, val32); + rtw89_phy_write32_set(rtwdev, addr, BIT(1)); +} + +static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + u8 i; + + for (i = 0; i < 2; i++) + _dack_reload_by_path(rtwdev, path, i); +} + +static void _addck(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u32 val; + int ret; + + /* S0 */ + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x0); + fsleep(1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, + 1, 10000, false, rtwdev, 0xc0fc, BIT(0)); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n"); + dack->addck_timeout[0] = true; + } + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x0); + + /* S1 */ + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x0); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, + 1, 10000, false, rtwdev, 0xc1fc, BIT(0)); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n"); + dack->addck_timeout[0] = true; + } + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x0); +} + +static void _dack_reset(struct rtw89_dev *rtwdev, u8 path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_dack_reset_defs_a_tbl, + &rtw8852c_dack_reset_defs_b_tbl); +} + +enum adc_ck { + ADC_NA = 0, + ADC_480M = 1, + ADC_960M = 2, + ADC_1920M = 3, +}; + +enum dac_ck { + DAC_40M = 0, + DAC_80M = 1, + DAC_120M = 2, + DAC_160M = 3, + DAC_240M = 4, + DAC_320M = 5, + DAC_480M = 6, + DAC_960M = 7, +}; + +enum rf_mode { + RF_SHUT_DOWN = 0x0, + RF_STANDBY = 0x1, + RF_TX = 0x2, + RF_RX = 0x3, + RF_TXIQK = 0x4, + RF_DPK = 0x5, + RF_RXK1 = 0x6, + RF_RXK2 = 0x7, +}; + +static void rtw8852c_txck_force(struct rtw89_dev *rtwdev, u8 path, bool force, + enum dac_ck ck) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0); + + if (!force) + return; + + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1); +} + +static void rtw8852c_rxck_force(struct rtw89_dev *rtwdev, u8 path, bool force, + enum adc_ck ck) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0); + + if (!force) + return; + + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1); +} + +static bool _check_dack_done(struct rtw89_dev *rtwdev, bool s0) +{ + if (s0) { + if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0) + return false; + } else { + if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0) + return false; + } + + return true; +} + +static void _dack_s0(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + bool done; + int ret; + + rtw8852c_txck_force(rtwdev, RF_PATH_A, true, DAC_160M); + rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s0_tbl); + + _dack_reset(rtwdev, RF_PATH_A); + + rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1); + ret = read_poll_timeout_atomic(_check_dack_done, done, done, + 1, 10000, false, rtwdev, true); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n"); + dack->msbk_timeout[0] = true; + } + rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0); + rtw8852c_txck_force(rtwdev, RF_PATH_A, false, DAC_960M); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n"); + + _dack_backup_s0(rtwdev); + _dack_reload(rtwdev, RF_PATH_A); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); +} + +static void _dack_s1(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + bool done; + int ret; + + rtw8852c_txck_force(rtwdev, RF_PATH_B, true, DAC_160M); + rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s1_tbl); + + _dack_reset(rtwdev, RF_PATH_B); + + rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1); + ret = read_poll_timeout_atomic(_check_dack_done, done, done, + 1, 10000, false, rtwdev, false); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n"); + dack->msbk_timeout[0] = true; + } + rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0); + rtw8852c_txck_force(rtwdev, RF_PATH_B, false, DAC_960M); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n"); + + _dack_backup_s1(rtwdev); + _dack_reload(rtwdev, RF_PATH_B); + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0); +} + +static void _dack(struct rtw89_dev *rtwdev) +{ + _dack_s0(rtwdev); + _dack_s1(rtwdev); +} + +static void _drck(struct rtw89_dev *rtwdev) +{ + u32 val; + int ret; + + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1); + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, + 1, 10000, false, rtwdev, 0xc0c8, BIT(3)); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n"); + + rtw89_rfk_parser(rtwdev, &rtw8852c_drck_defs_tbl); + + val = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, B_DRCK_RES); + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, val); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n", + rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD)); +} + +static void _dac_cal(struct rtw89_dev *rtwdev, bool force) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u32 rf0_0, rf1_0; + u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB); + + dack->dack_done = false; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n"); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n"); + rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK); + rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK); + _drck(rtwdev); + + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START); + _addck(rtwdev); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP); + + _addck_backup(rtwdev); + _addck_reload(rtwdev); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START); + _dack(rtwdev); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP); + + _dack_dump(rtwdev); + dack->dack_done = true; + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1); + dack->dack_cnt++; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n"); +} + +#define RTW8852C_NCTL_VER 0xd +#define RTW8852C_IQK_VER 0x2a +#define RTW8852C_IQK_SS 2 +#define RTW8852C_IQK_THR_REK 8 +#define RTW8852C_IQK_CFIR_GROUP_NR 4 + +enum rtw8852c_iqk_type { + ID_TXAGC, + ID_G_FLOK_COARSE, + ID_A_FLOK_COARSE, + ID_G_FLOK_FINE, + ID_A_FLOK_FINE, + ID_FLOK_VBUFFER, + ID_TXK, + ID_RXAGC, + ID_RXK, + ID_NBTXK, + ID_NBRXK, +}; + +static void rtw8852c_disable_rxagc(struct rtw89_dev *rtwdev, u8 path, u8 en_rxgac) +{ + if (path == RF_PATH_A) + rtw89_phy_write32_mask(rtwdev, R_P0_AGC_CTL, B_P0_AGC_EN, en_rxgac); + else + rtw89_phy_write32_mask(rtwdev, R_P1_AGC_CTL, B_P1_AGC_EN, en_rxgac); +} + +static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + if (path == RF_PATH_A) + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101); + else + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0202); + + switch (iqk_info->iqk_bw[path]) { + case RTW89_CHANNEL_WIDTH_20: + case RTW89_CHANNEL_WIDTH_40: + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1); + rtw8852c_rxck_force(rtwdev, path, true, ADC_480M); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xf); + rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1); + break; + case RTW89_CHANNEL_WIDTH_80: + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1); + rtw8852c_rxck_force(rtwdev, path, true, ADC_960M); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x2); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xd); + rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1); + break; + case RTW89_CHANNEL_WIDTH_160: + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1); + rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x2); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb); + rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1); + break; + default: + break; + } + + rtw89_rfk_parser(rtwdev, &rtw8852c_iqk_rxk_cfg_defs_tbl); + + if (path == RF_PATH_A) + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x1101); + else + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x2202); +} + +static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype) +{ + u32 tmp; + u32 val; + int ret; + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, + 1, 8200, false, rtwdev, 0xbff8, MASKBYTE0); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n"); + + rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret); + tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp); + + return false; +} + +static bool _iqk_one_shot(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path, u8 ktype) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u32 addr_rfc_ctl = R_UPD_CLK + (path << 13); + u32 iqk_cmd; + bool fail; + + switch (ktype) { + case ID_TXAGC: + iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1); + break; + case ID_A_FLOK_COARSE: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); + iqk_cmd = 0x008 | (1 << (4 + path)); + break; + case ID_G_FLOK_COARSE: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); + iqk_cmd = 0x108 | (1 << (4 + path)); + break; + case ID_A_FLOK_FINE: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); + iqk_cmd = 0x508 | (1 << (4 + path)); + break; + case ID_G_FLOK_FINE: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); + iqk_cmd = 0x208 | (1 << (4 + path)); + break; + case ID_FLOK_VBUFFER: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); + iqk_cmd = 0x308 | (1 << (4 + path)); + break; + case ID_TXK: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0); + iqk_cmd = 0x008 | (1 << (4 + path)) | ((0x8 + iqk_info->iqk_bw[path]) << 8); + break; + case ID_RXAGC: + iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1); + break; + case ID_RXK: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); + iqk_cmd = 0x008 | (1 << (4 + path)) | ((0xc + iqk_info->iqk_bw[path]) << 8); + break; + case ID_NBTXK: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0); + iqk_cmd = 0x408 | (1 << (4 + path)); + break; + case ID_NBRXK: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); + iqk_cmd = 0x608 | (1 << (4 + path)); + break; + default: + return false; + } + + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1); + fsleep(15); + fail = _iqk_check_cal(rtwdev, path, ktype); + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0); + + return fail; +} + +static bool _rxk_group_sel(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool fail; + u32 tmp; + u32 bkrf0; + u8 gp; + + bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW); + if (path == RF_PATH_B) { + rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3); + tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp); + tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp); + } + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + default: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9); + break; + } + + fsleep(10); + + for (gp = 0; gp < RXK_GROUP_NR; gp++) { + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + default: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, + _rxk_g_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF, + _rxk_g_idxattc2[gp]); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, + _rxk_a_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, + _rxk_a_idxattc2[gp]); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, + _rxk_a6_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, + _rxk_a6_idxattc2[gp]); + break; + } + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_SEL, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_SET, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_GP_V1, gp); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); + } + + if (path == RF_PATH_B) + rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0); + + if (fail) { + iqk_info->nb_rxcfir[path] = 0x40000002; + iqk_info->is_wb_rxiqk[path] = false; + } else { + iqk_info->nb_rxcfir[path] = 0x40000000; + iqk_info->is_wb_rxiqk[path] = true; + } + + return false; +} + +static bool _iqk_nbrxk(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool fail; + u32 tmp; + u32 bkrf0; + u8 gp = 0x2; + + bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW); + if (path == RF_PATH_B) { + rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3); + tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp); + tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp); + } + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + default: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9); + break; + } + + fsleep(10); + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + default: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_g_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF, _rxk_g_idxattc2[gp]); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a_idxattc2[gp]); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a6_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a6_idxattc2[gp]); + break; + } + + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); + + if (path == RF_PATH_B) + rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0); + + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0); + + if (fail) + iqk_info->nb_rxcfir[path] = + rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), + MASKDWORD) | 0x2; + else + iqk_info->nb_rxcfir[path] = 0x40000002; + + iqk_info->is_wb_rxiqk[path] = false; + return fail; +} + +static bool _txk_group_sel(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool fail; + u8 gp; + + for (gp = 0; gp < TXK_GROUP_NR; gp++) { + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, + _txk_g_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, + _txk_g_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, + _txk_g_gain_bb[gp]); + rtw89_phy_write32_mask(rtwdev, + R_KIP_IQP + (path << 8), + MASKDWORD, _txk_g_itqt[gp]); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, + _txk_a_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, + _txk_a_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, + _txk_a_gain_bb[gp]); + rtw89_phy_write32_mask(rtwdev, + R_KIP_IQP + (path << 8), + MASKDWORD, _txk_a_itqt[gp]); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, + _txk_a6_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, + _txk_a6_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, + _txk_a6_gain_bb[gp]); + rtw89_phy_write32_mask(rtwdev, + R_KIP_IQP + (path << 8), + MASKDWORD, _txk_a6_itqt[gp]); + break; + default: + break; + } + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_SEL, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_SET, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_G2, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_GP, gp + 1); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); + } + + if (fail) { + iqk_info->nb_txcfir[path] = 0x40000002; + iqk_info->is_wb_txiqk[path] = false; + } else { + iqk_info->nb_txcfir[path] = 0x40000000; + iqk_info->is_wb_txiqk[path] = true; + } + + return fail; +} + +static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool fail; + u8 gp = 0x2; + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_g_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_g_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_g_gain_bb[gp]); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + MASKDWORD, _txk_g_itqt[gp]); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a_gain_bb[gp]); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + MASKDWORD, _txk_a_itqt[gp]); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a6_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a6_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a6_gain_bb[gp]); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + MASKDWORD, _txk_a6_itqt[gp]); + break; + default: + break; + } + + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp + 1); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); + + if (!fail) + iqk_info->nb_txcfir[path] = + rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), + MASKDWORD) | 0x2; + else + iqk_info->nb_txcfir[path] = 0x40000002; + + iqk_info->is_wb_txiqk[path] = false; + + return fail; +} + +static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_mcc_info *mcc_info = &rtwdev->mcc; + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u8 idx = mcc_info->table_idx; + bool is_fail1, is_fail2; + u32 val; + u32 core_i; + u32 core_q; + u32 vbuff_i; + u32 vbuff_q; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + val = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK); + core_i = FIELD_GET(RR_TXMO_COI, val); + core_q = FIELD_GET(RR_TXMO_COQ, val); + + if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d) + is_fail1 = true; + else + is_fail1 = false; + + iqk_info->lok_idac[idx][path] = val; + + val = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK); + vbuff_i = FIELD_GET(RR_LOKVB_COI, val); + vbuff_q = FIELD_GET(RR_LOKVB_COQ, val); + + if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d) + is_fail2 = true; + else + is_fail2 = false; + + iqk_info->lok_vbuf[idx][path] = val; + + return is_fail1 || is_fail2; +} + +static bool _iqk_lok(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u8 tmp_id = 0x0; + bool fail = false; + bool tmp = false; + + /* Step 0: Init RF gain & tone idx= 8.25Mhz */ + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, IQK_DF4_TXT_8_25MHZ); + + /* Step 1 START: _lok_coarse_fine_wi_swap */ + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x9); + tmp_id = ID_G_FLOK_COARSE; + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x9); + tmp_id = ID_A_FLOK_COARSE; + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x9); + tmp_id = ID_A_FLOK_COARSE; + break; + default: + break; + } + tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id); + iqk_info->lok_cor_fail[0][path] = tmp; + + /* Step 2 */ + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x1b); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x1b); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x1b); + break; + default: + break; + } + tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER); + + /* Step 3 */ + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x9); + tmp_id = ID_G_FLOK_FINE; + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x9); + tmp_id = ID_A_FLOK_FINE; + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x9); + tmp_id = ID_A_FLOK_FINE; + break; + default: + break; + } + tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id); + iqk_info->lok_fin_fail[0][path] = tmp; + + /* Step 4 large rf gain */ + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + default: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x1b); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x1b); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x1b); + break; + } + tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER); + fail = _lok_finetune_check(rtwdev, path); + + return fail; +} + +static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + default: + rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1); + rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf); + rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, + 0x403e0 | iqk_info->syn1to2); + fsleep(10); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1); + rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf); + rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, + 0x403e0 | iqk_info->syn1to2); + fsleep(10); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1); + rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf); + rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, + 0x403e0 | iqk_info->syn1to2); + fsleep(10); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6); + break; + } +} + +static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u32 tmp; + bool flag; + + iqk_info->thermal[path] = + ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + iqk_info->thermal_rek_en = false; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %d\n", path, + iqk_info->thermal[path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path, + iqk_info->lok_cor_fail[0][path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path, + iqk_info->lok_fin_fail[0][path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path, + iqk_info->iqk_tx_fail[0][path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path, + iqk_info->iqk_rx_fail[0][path]); + + flag = iqk_info->lok_cor_fail[0][path]; + rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag); + flag = iqk_info->lok_fin_fail[0][path]; + rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag); + flag = iqk_info->iqk_tx_fail[0][path]; + rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag); + flag = iqk_info->iqk_rx_fail[0][path]; + rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag); + + tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD); + iqk_info->bp_iqkenable[path] = tmp; + tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); + iqk_info->bp_txkresult[path] = tmp; + tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD); + iqk_info->bp_rxkresult[path] = tmp; + + rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT, + iqk_info->iqk_times); + + tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4)); + if (tmp != 0x0) + iqk_info->iqk_fail_cnt++; + rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4), + iqk_info->iqk_fail_cnt); +} + +static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + _iqk_txk_setting(rtwdev, path); + iqk_info->lok_fail[path] = _iqk_lok(rtwdev, phy_idx, path); + + if (iqk_info->is_nbiqk) + iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path); + else + iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path); + + _iqk_rxk_setting(rtwdev, path); + if (iqk_info->is_nbiqk) + iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path); + else + iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path); + + _iqk_info_iqk(rtwdev, phy_idx, path); +} + +static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + struct rtw89_hal *hal = &rtwdev->hal; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + + iqk_info->iqk_band[path] = hal->current_band_type; + iqk_info->iqk_bw[path] = hal->current_band_width; + iqk_info->iqk_ch[path] = hal->current_channel; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path, + iqk_info->iqk_band[path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n", + path, iqk_info->iqk_bw[path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n", + path, iqk_info->iqk_ch[path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy, + rtwdev->dbcc_en ? "on" : "off", + iqk_info->iqk_band[path] == 0 ? "2G" : + iqk_info->iqk_band[path] == 1 ? "5G" : "6G", + iqk_info->iqk_ch[path], + iqk_info->iqk_bw[path] == 0 ? "20M" : + iqk_info->iqk_bw[path] == 1 ? "40M" : "80M"); + if (!rtwdev->dbcc_en) + iqk_info->syn1to2 = 0x1; + else + iqk_info->syn1to2 = 0x3; + + rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852C_IQK_VER); + rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16), + iqk_info->iqk_band[path]); + rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16), + iqk_info->iqk_bw[path]); + rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16), + iqk_info->iqk_ch[path]); + + rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_NCTLV, RTW8852C_NCTL_VER); +} + +static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + u8 path) +{ + _iqk_by_path(rtwdev, phy_idx, path); +} + +static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool fail; + + rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, + iqk_info->nb_txcfir[path]); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, + iqk_info->nb_rxcfir[path]); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, + 0x00001219 + (path << 4)); + fsleep(200); + fail = _iqk_check_cal(rtwdev, path, 0x12); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] restore fail = %x\n", fail); + + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); + + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); +} + +static void _iqk_afebb_restore(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_iqk_afebb_restore_defs_a_tbl, + &rtw8852c_iqk_afebb_restore_defs_b_tbl); + + rtw8852c_disable_rxagc(rtwdev, path, 0x1); +} + +static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_mcc_info *mcc_info = &rtwdev->mcc; + u8 idx = 0; + + idx = mcc_info->table_idx; + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx); + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a); +} + +static void _iqk_macbb_setting(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__); + + /* 01_BB_AFE_for DPK_S0_20210820 */ + rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0); + + /* disable rxgac */ + rtw8852c_disable_rxagc(rtwdev, path, 0x0); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), MASKDWORD, 0xf801fffd); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_DIS, 0x1); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DAC_VAL, 0x1); + + rtw8852c_txck_force(rtwdev, path, true, DAC_960M); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_GDIS, 0x1); + + rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_ACK_VAL, 0x2); + + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW | (path << 13), B_P0_NRBW_DBG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1); +} + +static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + u32 rf_reg5, rck_val = 0; + u32 val; + int ret; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path); + + rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); + + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); + + /* RCK trigger */ + rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240); + + ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20, + false, rtwdev, path, 0x1c, BIT(3)); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n"); + + rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA); + rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val); + + rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RCK] RF 0x1b / 0x1c = 0x%x / 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK), + rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK)); +} + +static void _iqk_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u8 ch, path; + + rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD); + if (iqk_info->is_iqk_init) + return; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + iqk_info->is_iqk_init = true; + iqk_info->is_nbiqk = false; + iqk_info->iqk_fft_en = false; + iqk_info->iqk_sram_en = false; + iqk_info->iqk_cfir_en = false; + iqk_info->iqk_xym_en = false; + iqk_info->thermal_rek_en = false; + iqk_info->iqk_times = 0x0; + + for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) { + iqk_info->iqk_channel[ch] = 0x0; + for (path = 0; path < RTW8852C_IQK_SS; path++) { + iqk_info->lok_cor_fail[ch][path] = false; + iqk_info->lok_fin_fail[ch][path] = false; + iqk_info->iqk_tx_fail[ch][path] = false; + iqk_info->iqk_rx_fail[ch][path] = false; + iqk_info->iqk_mcc_ch[ch][path] = 0x0; + iqk_info->iqk_table_idx[path] = 0x0; + } + } +} + +static void _doiqk(struct rtw89_dev *rtwdev, bool force, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u32 backup_bb_val[BACKUP_BB_REGS_NR]; + u32 backup_rf_val[RTW8852C_IQK_SS][BACKUP_RF_REGS_NR]; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]==========IQK strat!!!!!==========\n"); + iqk_info->iqk_times++; + iqk_info->kcount = 0; + iqk_info->version = RTW8852C_IQK_VER; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); + _iqk_get_ch_info(rtwdev, phy_idx, path); + _rfk_backup_bb_reg(rtwdev, backup_bb_val); + _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path); + _iqk_macbb_setting(rtwdev, phy_idx, path); + _iqk_preset(rtwdev, path); + _iqk_start_iqk(rtwdev, phy_idx, path); + _iqk_restore(rtwdev, path); + _iqk_afebb_restore(rtwdev, phy_idx, path); + _rfk_restore_bb_reg(rtwdev, backup_bb_val); + _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); +} + +static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force) +{ + switch (_kpath(rtwdev, phy_idx)) { + case RF_A: + _doiqk(rtwdev, force, phy_idx, RF_PATH_A); + break; + case RF_B: + _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + break; + case RF_AB: + _doiqk(rtwdev, force, phy_idx, RF_PATH_A); + _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + break; + default: + break; + } +} + +static void _rx_dck_toggle(struct rtw89_dev *rtwdev, u8 path) +{ + int ret; + u32 val; + + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1); + + ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, + 2, 1000, false, rtwdev, path, 0x93, BIT(5)); + if (ret) + rtw89_warn(rtwdev, "[RX_DCK] S%d RXDCK timeout\n", path); + else + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] S%d RXDCK finish\n", path); + + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); +} + +static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path, + bool is_afe) +{ + u8 res; + + rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0); + + _rx_dck_toggle(rtwdev, path); + if (rtw89_read_rf(rtwdev, path, RR_DCKC, RR_DCKC_CHK) == 0) + return; + res = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_DONE); + if (res > 1) { + rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, res); + _rx_dck_toggle(rtwdev, path); + rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, 0x1); + } +} + +#define RTW8852C_RF_REL_VERSION 34 +#define RTW8852C_DPK_VER 0x10 +#define RTW8852C_DPK_TH_AVG_NUM 4 +#define RTW8852C_DPK_RF_PATH 2 +#define RTW8852C_DPK_KIP_REG_NUM 5 +#define RTW8852C_DPK_RXSRAM_DBG 0 + +enum rtw8852c_dpk_id { + LBK_RXIQK = 0x06, + SYNC = 0x10, + MDPK_IDL = 0x11, + MDPK_MPA = 0x12, + GAIN_LOSS = 0x13, + GAIN_CAL = 0x14, + DPK_RXAGC = 0x15, + KIP_PRESET = 0x16, + KIP_RESTORE = 0x17, + DPK_TXAGC = 0x19, + D_KIP_PRESET = 0x28, + D_TXAGC = 0x29, + D_RXAGC = 0x2a, + D_SYNC = 0x2b, + D_GAIN_LOSS = 0x2c, + D_MDPK_IDL = 0x2d, + D_GAIN_NORM = 0x2f, + D_KIP_THERMAL = 0x30, + D_KIP_RESTORE = 0x31 +}; + +#define DPK_TXAGC_LOWER 0x2e +#define DPK_TXAGC_UPPER 0x3f +#define DPK_TXAGC_INVAL 0xff + +enum dpk_agc_step { + DPK_AGC_STEP_SYNC_DGAIN, + DPK_AGC_STEP_GAIN_LOSS_IDX, + DPK_AGC_STEP_GL_GT_CRITERION, + DPK_AGC_STEP_GL_LT_CRITERION, + DPK_AGC_STEP_SET_TX_GAIN, +}; + +static void _rf_direct_cntrl(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool is_bybb) +{ + if (is_bybb) + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); + else + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); +} + +static void _dpk_onoff(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool off); + +static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[], + u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path) +{ + u8 i; + + for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) { + reg_bkup[path][i] = + rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n", + reg[i] + (path << 8), reg_bkup[path][i]); + } +} + +static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[], + u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path) +{ + u8 i; + + for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) { + rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), + MASKDWORD, reg_bkup[path][i]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n", + reg[i] + (path << 8), reg_bkup[path][i]); + } +} + +static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, enum rtw8852c_dpk_id id) +{ + u16 dpk_cmd; + u32 val; + int ret; + + dpk_cmd = (u16)((id << 8) | (0x19 + path * 0x12)); + + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, + 10, 20000, false, rtwdev, 0xbff8, MASKBYTE0); + mdelay(10); + rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] one-shot for %s = 0x%x (ret=%d)\n", + id == 0x06 ? "LBK_RXIQK" : + id == 0x10 ? "SYNC" : + id == 0x11 ? "MDPK_IDL" : + id == 0x12 ? "MDPK_MPA" : + id == 0x13 ? "GAIN_LOSS" : "PWR_CAL", + dpk_cmd, ret); + + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] one-shot over 20ms!!!!\n"); + return 1; + } + + return 0; +} + +static void _dpk_information(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + struct rtw89_hal *hal = &rtwdev->hal; + + u8 kidx = dpk->cur_idx[path]; + + dpk->bp[path][kidx].band = hal->current_band_type; + dpk->bp[path][kidx].ch = hal->current_channel; + dpk->bp[path][kidx].bw = hal->current_band_width; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n", + path, dpk->cur_idx[path], phy, + rtwdev->is_tssi_mode[path] ? "on" : "off", + rtwdev->dbcc_en ? "on" : "off", + dpk->bp[path][kidx].band == 0 ? "2G" : + dpk->bp[path][kidx].band == 1 ? "5G" : "6G", + dpk->bp[path][kidx].ch, + dpk->bp[path][kidx].bw == 0 ? "20M" : + dpk->bp[path][kidx].bw == 1 ? "40M" : "80M"); +} + +static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kpath) +{ + /*1. Keep ADC_fifo reset*/ + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0); + + /*2. BB for IQK DBG mode*/ + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0xd801dffd); + + /*3.Set DAC clk*/ + rtw8852c_txck_force(rtwdev, path, true, DAC_960M); + + /*4. Set ADC clk*/ + rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), + B_P0_NRBW_DBG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041); + + /*5. ADDA fifo rst*/ + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE setting\n", path); +} + +static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, u8 path) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), + B_P0_NRBW_DBG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0x00000000); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_TXCK_ALL, 0x00); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE restore\n", path); +} + +static void _dpk_tssi_pause(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool is_pause) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), + B_P0_TSSI_TRK_EN, is_pause); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path, + is_pause ? "pause" : "resume"); +} + +static void _dpk_kip_control_rfc(struct rtw89_dev *rtwdev, u8 path, bool ctrl_by_kip) +{ + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_IQK_RFC_ON, ctrl_by_kip); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RFC is controlled by %s\n", + ctrl_by_kip ? "KIP" : "BB"); +} + +static void _dpk_txpwr_bb_force(struct rtw89_dev *rtwdev, u8 path, bool force) +{ + rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_ON, force); + rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H + (path << 13), B_TXPWRB_RDY, force); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d txpwr_bb_force %s\n", + path, force ? "on" : "off"); +} + +static void _dpk_kip_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + _dpk_one_shot(rtwdev, phy, path, D_KIP_RESTORE); + _dpk_kip_control_rfc(rtwdev, path, false); + _dpk_txpwr_bb_force(rtwdev, path, false); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path); +} + +static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ +#define RX_TONE_IDX 0x00250025 /* Q.2 9.25MHz */ + u8 cur_rxbb; + u32 rf_11, reg_81cc; + + rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1); + rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1); + + _dpk_kip_control_rfc(rtwdev, path, false); + + cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB); + rf_11 = rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK); + reg_81cc = rtw89_phy_read32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_SW); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x3); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0xd); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, 0x1f); + + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x12); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, 0x3); + + _dpk_kip_control_rfc(rtwdev, path, true); + + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, MASKDWORD, RX_TONE_IDX); + + _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path, + rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD)); + + _dpk_kip_control_rfc(rtwdev, path, false); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RFREG_MASK, rf_11); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, cur_rxbb); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, reg_81cc); + + rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0); + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1); + + _dpk_kip_control_rfc(rtwdev, path, true); +} + +static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain, + enum rtw89_rf_path path, u8 kidx) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (dpk->bp[path][kidx].band == RTW89_BAND_2G) { + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, + 0x50121 | BIT(rtwdev->dbcc_en)); + rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x2); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4); + rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1); + rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] RF 0x0/0x83/0x9e/0x1a/0xdf/0x1001a = 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK), + rtw89_read_rf(rtwdev, path, RR_RXBB, RFREG_MASK), + rtw89_read_rf(rtwdev, path, RR_TIA, RFREG_MASK), + rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK), + rtw89_read_rf(rtwdev, path, RR_LUTDBG, RFREG_MASK), + rtw89_read_rf(rtwdev, path, 0x1001a, RFREG_MASK)); + } else { + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, + 0x50101 | BIT(rtwdev->dbcc_en)); + rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK); + + if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161) { + rtw89_write_rf(rtwdev, path, RR_IQGEN, RR_IQGEN_BIAS, 0x8); + rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd); + } else { + rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd); + } + + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_ATT, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT2, 0x3); + rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1); + rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1); + + if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160) + rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_EBW, 0x0); + } +} + +static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160) { + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x3); + rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0x0180ff30); + } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) { + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0); + rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xffe0fa00); + } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) { + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2); + rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xff4009e0); + } else { + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1); + rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xf9f007d0); + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n", + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160 ? "160M" : + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" : + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M"); +} + +static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) +{ +#define DPK_SYNC_TH_DC_I 200 +#define DPK_SYNC_TH_DC_Q 200 +#define DPK_SYNC_TH_CORR 170 + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u16 dc_i, dc_q; + u8 corr_val, corr_idx, rxbb; + u8 rxbb_ov; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); + + corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI); + corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV); + + dpk->corr_idx[path][kidx] = corr_idx; + dpk->corr_val[path][kidx] = corr_val; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9); + + dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); + dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ); + + dc_i = abs(sign_extend32(dc_i, 11)); + dc_q = abs(sign_extend32(dc_q, 11)); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] S%d Corr_idx/ Corr_val /DC I/Q, = %d / %d / %d / %d\n", + path, corr_idx, corr_val, dc_i, dc_q); + + dpk->dc_i[path][kidx] = dc_i; + dpk->dc_q[path][kidx] = dc_q; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x8); + rxbb = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB); + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x31); + rxbb_ov = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXOV); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] S%d RXBB/ RXAGC_done /RXBB_ovlmt = %d / %d / %d\n", + path, rxbb, + rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DONE), + rxbb_ov); + + if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q || + corr_val < DPK_SYNC_TH_CORR) + return true; + else + return false; +} + +static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev) +{ + u16 dgain = 0x0; + + rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL); + + dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain, dgain); + + return dgain; +} + +static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev) +{ + u8 result; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1); + + result = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp GL = %d\n", result); + + return result; +} + +static void _dpk_kset_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0x10); + dpk->cur_k_set = + rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 0xE0000000) - 1; +} + +static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 dbm, bool set_from_bb) +{ + if (set_from_bb) { + dbm = clamp_t(u8, dbm, 7, 24); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set S%d txagc to %ddBm\n", path, dbm); + rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_VAL, dbm << 2); + } + _dpk_one_shot(rtwdev, phy, path, D_TXAGC); + _dpk_kset_query(rtwdev, path); +} + +static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + _dpk_one_shot(rtwdev, phy, path, D_GAIN_LOSS); + _dpk_kip_set_txagc(rtwdev, phy, path, 0xff, false); + + rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A0, 0x0); + + return _dpk_gainloss_read(rtwdev); +} + +static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check) +{ + u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0; + u8 i; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08); + + if (is_check) { + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00); + val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); + val1_i = abs(sign_extend32(val1_i, 11)); + val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); + val1_q = abs(sign_extend32(val1_q, 11)); + + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f); + val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); + val2_i = abs(sign_extend32(val2_i, 11)); + val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); + val2_q = abs(sign_extend32(val2_q, 11)); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n", + phy_div(val1_i * val1_i + val1_q * val1_q, + val2_i * val2_i + val2_q * val2_q)); + } else { + for (i = 0; i < 32; i++) { + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_Read[%02d]= 0x%08x\n", i, + rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD)); + } + } + + if (val1_i * val1_i + val1_q * val1_q >= (val2_i * val2_i + val2_q * val2_q) * 8 / 5) + return true; + else + return false; +} + +static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + _dpk_one_shot(rtwdev, phy, path, D_RXAGC); + + return _dpk_sync_check(rtwdev, path, kidx); +} + +static void _dpk_read_rxsram(struct rtw89_dev *rtwdev) +{ + u32 addr; + + rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_pre_defs_tbl); + + for (addr = 0; addr < 0x200; addr++) { + rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 | addr); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RXSRAM[%03d] = 0x%07x\n", addr, + rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD)); + } + + rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_post_defs_tbl); +} + +static void _dpk_bypass_rxiqc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000002); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Bypass RXIQC\n"); +} + +static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx, u8 init_xdbm, u8 loss_only) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 step = DPK_AGC_STEP_SYNC_DGAIN; + u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0; + u8 tmp_rxbb; + u8 goout = 0, agc_cnt = 0; + u16 dgain = 0; + bool is_fail = false; + int limit = 200; + + do { + switch (step) { + case DPK_AGC_STEP_SYNC_DGAIN: + is_fail = _dpk_kip_set_rxagc(rtwdev, phy, path, kidx); + + if (RTW8852C_DPK_RXSRAM_DBG) + _dpk_read_rxsram(rtwdev); + + if (is_fail) { + goout = 1; + break; + } + + dgain = _dpk_dgain_read(rtwdev); + + if (dgain > 0x5fc || dgain < 0x556) { + _dpk_one_shot(rtwdev, phy, path, D_SYNC); + dgain = _dpk_dgain_read(rtwdev); + } + + if (agc_cnt == 0) { + if (dpk->bp[path][kidx].band == RTW89_BAND_2G) + _dpk_bypass_rxiqc(rtwdev, path); + else + _dpk_lbk_rxiqk(rtwdev, phy, path); + } + step = DPK_AGC_STEP_GAIN_LOSS_IDX; + break; + + case DPK_AGC_STEP_GAIN_LOSS_IDX: + tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx); + + if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) || + tmp_gl_idx >= 7) + step = DPK_AGC_STEP_GL_GT_CRITERION; + else if (tmp_gl_idx == 0) + step = DPK_AGC_STEP_GL_LT_CRITERION; + else + step = DPK_AGC_STEP_SET_TX_GAIN; + break; + + case DPK_AGC_STEP_GL_GT_CRITERION: + if (tmp_dbm <= 7) { + goout = 1; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@lower bound!!\n"); + } else { + tmp_dbm = max_t(u8, tmp_dbm - 3, 7); + _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true); + } + step = DPK_AGC_STEP_SYNC_DGAIN; + agc_cnt++; + break; + + case DPK_AGC_STEP_GL_LT_CRITERION: + if (tmp_dbm >= 24) { + goout = 1; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@upper bound!!\n"); + } else { + tmp_dbm = min_t(u8, tmp_dbm + 2, 24); + _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true); + } + step = DPK_AGC_STEP_SYNC_DGAIN; + agc_cnt++; + break; + + case DPK_AGC_STEP_SET_TX_GAIN: + _dpk_kip_control_rfc(rtwdev, path, false); + tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB); + if (tmp_rxbb + tmp_gl_idx > 0x1f) + tmp_rxbb = 0x1f; + else + tmp_rxbb = tmp_rxbb + tmp_gl_idx; + + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust RXBB (%+d) = 0x%x\n", + tmp_gl_idx, tmp_rxbb); + _dpk_kip_control_rfc(rtwdev, path, true); + goout = 1; + break; + default: + goout = 1; + break; + } + } while (!goout && agc_cnt < 6 && --limit > 0); + + if (limit <= 0) + rtw89_warn(rtwdev, "[DPK] exceed loop limit\n"); + + return is_fail; +} + +static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order) +{ + static const struct rtw89_rfk_tbl *order_tbls[] = { + &rtw8852c_dpk_mdpd_order0_defs_tbl, + &rtw8852c_dpk_mdpd_order1_defs_tbl, + &rtw8852c_dpk_mdpd_order2_defs_tbl, + &rtw8852c_dpk_mdpd_order3_defs_tbl, + }; + + if (order >= ARRAY_SIZE(order_tbls)) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Wrong MDPD order!!(0x%x)\n", order); + return; + } + + rtw89_rfk_parser(rtwdev, order_tbls[order]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n", + order == 0x0 ? "(5,3,1)" : + order == 0x1 ? "(5,3,0)" : + order == 0x2 ? "(5,0,0)" : "(7,3,1)"); +} + +static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 cnt; + u8 ov_flag; + u32 dpk_sync; + + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_MA, 0x1); + + if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T2) == 0x1) + _dpk_set_mdpd_para(rtwdev, 0x2); + else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T1) == 0x1) + _dpk_set_mdpd_para(rtwdev, 0x1); + else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T0) == 0x1) + _dpk_set_mdpd_para(rtwdev, 0x0); + else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_5 || + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_10 || + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_20) + _dpk_set_mdpd_para(rtwdev, 0x2); + else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 || + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) + _dpk_set_mdpd_para(rtwdev, 0x1); + else + _dpk_set_mdpd_para(rtwdev, 0x0); + + rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL, 0x0); + fsleep(1000); + + _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL); + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); + dpk_sync = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] dpk_sync = 0x%x\n", dpk_sync); + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf); + ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR); + for (cnt = 0; cnt < 5 && ov_flag == 0x1; cnt++) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ReK due to MDPK ov!!!\n"); + _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL); + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf); + ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR); + } + + if (ov_flag) { + _dpk_set_mdpd_para(rtwdev, 0x2); + _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL); + } +} + +static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + bool is_reload = false; + u8 idx, cur_band, cur_ch; + + cur_band = rtwdev->hal.current_band_type; + cur_ch = rtwdev->hal.current_channel; + + for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) { + if (cur_band != dpk->bp[path][idx].band || + cur_ch != dpk->bp[path][idx].ch) + continue; + + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), + B_COEF_SEL_MDPD, idx); + dpk->cur_idx[path] = idx; + is_reload = true; + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] reload S%d[%d] success\n", path, idx); + } + + return is_reload; +} + +static void _dpk_kip_pwr_clk_onoff(struct rtw89_dev *rtwdev, bool turn_on) +{ + rtw89_rfk_parser(rtwdev, turn_on ? &rtw8852c_dpk_kip_pwr_clk_on_defs_tbl : + &rtw8852c_dpk_kip_pwr_clk_off_defs_tbl); +} + +static void _dpk_kip_preset_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, + rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); + + if (rtwdev->hal.cv == CHIP_CAV) + rtw89_phy_write32_mask(rtwdev, + R_DPD_CH0A + (path << 8) + (kidx << 2), + B_DPD_SEL, 0x01); + else + rtw89_phy_write32_mask(rtwdev, + R_DPD_CH0A + (path << 8) + (kidx << 2), + B_DPD_SEL, 0x0c); + + _dpk_kip_control_rfc(rtwdev, path, true); + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx); + + _dpk_one_shot(rtwdev, phy, path, D_KIP_PRESET); +} + +static void _dpk_para_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) +{ +#define _DPK_PARA_TXAGC GENMASK(15, 10) +#define _DPK_PARA_THER GENMASK(31, 26) + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u32 para; + + para = rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8), + MASKDWORD); + + dpk->bp[path][kidx].txagc_dpk = FIELD_GET(_DPK_PARA_TXAGC, para); + dpk->bp[path][kidx].ther_dpk = FIELD_GET(_DPK_PARA_THER, para); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal/ txagc_RF (K%d) = 0x%x/ 0x%x\n", + dpk->cur_k_set, dpk->bp[path][kidx].ther_dpk, dpk->bp[path][kidx].txagc_dpk); +} + +static void _dpk_gain_normalize_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx, bool is_execute) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (is_execute) { + rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_AG, 0x200); + rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_EN, 0x3); + + _dpk_one_shot(rtwdev, phy, path, D_GAIN_NORM); + } else { + rtw89_phy_write32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8), + 0x0000007F, 0x5b); + } + dpk->bp[path][kidx].gs = + rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8), + 0x0000007F); +} + +static u8 _dpk_order_convert(struct rtw89_dev *rtwdev) +{ + u32 val32 = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP); + u8 val; + + switch (val32) { + case 0: + val = 0x6; + break; + case 1: + val = 0x2; + break; + case 2: + val = 0x0; + break; + case 3: + val = 0x7; + break; + default: + val = 0xff; + break; + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val); + + return val; +} + +static void _dpk_on(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1); + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), + B_DPD_ORDER, _dpk_order_convert(rtwdev)); + + dpk->bp[path][kidx].mdpd_en = BIT(dpk->cur_k_set); + dpk->bp[path][kidx].path_ok = true; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] path_ok = 0x%x\n", + path, kidx, dpk->bp[path][kidx].mdpd_en); + + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), + B_DPD_MEN, dpk->bp[path][kidx].mdpd_en); + + _dpk_gain_normalize_8852c(rtwdev, phy, path, kidx, false); +} + +static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 gain) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 kidx = dpk->cur_idx[path]; + u8 init_xdbm = 15; + bool is_fail; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx); + _dpk_kip_control_rfc(rtwdev, path, false); + _rf_direct_cntrl(rtwdev, path, false); + rtw89_write_rf(rtwdev, path, RR_BBDC, RFREG_MASK, 0x03ffd); + _dpk_rf_setting(rtwdev, gain, path, kidx); + _set_rx_dck(rtwdev, phy, path, false); + _dpk_kip_pwr_clk_onoff(rtwdev, true); + _dpk_kip_preset_8852c(rtwdev, phy, path, kidx); + _dpk_txpwr_bb_force(rtwdev, path, true); + _dpk_kip_set_txagc(rtwdev, phy, path, init_xdbm, true); + _dpk_tpg_sel(rtwdev, path, kidx); + + is_fail = _dpk_agc(rtwdev, phy, path, kidx, init_xdbm, false); + if (is_fail) + goto _error; + + _dpk_idl_mpa(rtwdev, phy, path, kidx); + _dpk_para_query(rtwdev, path, kidx); + _dpk_on(rtwdev, phy, path, kidx); + +_error: + _dpk_kip_control_rfc(rtwdev, path, false); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d]_K%d %s\n", path, kidx, + dpk->cur_k_set, is_fail ? "need Check" : "is Success"); + + return is_fail; +} + +static void _dpk_init(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 kidx = dpk->cur_idx[path]; + + dpk->bp[path][kidx].path_ok = false; +} + +static void _dpk_drf_direct_cntrl(struct rtw89_dev *rtwdev, u8 path, bool is_bybb) +{ + if (is_bybb) + rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1); + else + rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0); +} + +static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, + enum rtw89_phy_idx phy, u8 kpath) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8}; + u32 backup_rf_val[RTW8852C_DPK_RF_PATH][BACKUP_RF_REGS_NR]; + u32 kip_bkup[RTW8852C_DPK_RF_PATH][RTW8852C_DPK_KIP_REG_NUM] = {}; + u8 path; + bool is_fail = true, reloaded[RTW8852C_DPK_RF_PATH] = {false}; + + if (dpk->is_dpk_reload_en) { + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { + if (!(kpath & BIT(path))) + continue; + + reloaded[path] = _dpk_reload_check(rtwdev, phy, path); + if (!reloaded[path] && dpk->bp[path][0].ch != 0) + dpk->cur_idx[path] = !dpk->cur_idx[path]; + else + _dpk_onoff(rtwdev, path, false); + } + } else { + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) + dpk->cur_idx[path] = 0; + } + + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ========= S%d[%d] DPK Init =========\n", + path, dpk->cur_idx[path]); + _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path); + _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path); + _dpk_information(rtwdev, phy, path); + _dpk_init(rtwdev, path); + if (rtwdev->is_tssi_mode[path]) + _dpk_tssi_pause(rtwdev, path, true); + } + + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ========= S%d[%d] DPK Start =========\n", + path, dpk->cur_idx[path]); + rtw8852c_disable_rxagc(rtwdev, path, 0x0); + _dpk_drf_direct_cntrl(rtwdev, path, false); + _dpk_bb_afe_setting(rtwdev, phy, path, kpath); + is_fail = _dpk_main(rtwdev, phy, path, 1); + _dpk_onoff(rtwdev, path, is_fail); + } + + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ========= S%d[%d] DPK Restore =========\n", + path, dpk->cur_idx[path]); + _dpk_kip_restore(rtwdev, phy, path); + _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path); + _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path); + _dpk_bb_afe_restore(rtwdev, path); + rtw8852c_disable_rxagc(rtwdev, path, 0x1); + if (rtwdev->is_tssi_mode[path]) + _dpk_tssi_pause(rtwdev, path, false); + } + + _dpk_kip_pwr_clk_onoff(rtwdev, false); +} + +static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + struct rtw89_fem_info *fem = &rtwdev->fem; + + if (rtwdev->hal.cv == CHIP_CAV && rtwdev->hal.current_band_type != RTW89_BAND_2G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to CAV & not 2G!!\n"); + return true; + } else if (fem->epa_2g && rtwdev->hal.current_band_type == RTW89_BAND_2G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 2G_ext_PA exist!!\n"); + return true; + } else if (fem->epa_5g && rtwdev->hal.current_band_type == RTW89_BAND_5G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 5G_ext_PA exist!!\n"); + return true; + } else if (fem->epa_6g && rtwdev->hal.current_band_type == RTW89_BAND_6G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 6G_ext_PA exist!!\n"); + return true; + } + + return false; +} + +static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u8 path, kpath; + + kpath = _kpath(rtwdev, phy); + + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { + if (kpath & BIT(path)) + _dpk_onoff(rtwdev, path, true); + } +} + +static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n", + RTW8852C_DPK_VER, rtwdev->hal.cv, + RTW8852C_RF_REL_VERSION); + + if (_dpk_bypass_check(rtwdev, phy)) + _dpk_force_bypass(rtwdev, phy); + else + _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy)); + + if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_DCKC, RR_DCKC_CHK) == 0x1) + rtw8852c_rx_dck(rtwdev, phy, false); +} + +static void _dpk_onoff(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool off) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 val, kidx = dpk->cur_idx[path]; + + val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok ? + dpk->bp[path][kidx].mdpd_en : 0; + + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), + B_DPD_MEN, val); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path, + kidx, dpk->is_dpk_enable && !off ? "enable" : "disable"); +} + +static void _dpk_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 path, kidx; + u8 txagc_rf = 0; + s8 txagc_bb = 0, txagc_bb_tp = 0, txagc_ofst = 0; + u8 cur_ther; + s8 delta_ther = 0; + s16 pwsf_tssi_ofst; + + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { + kidx = dpk->cur_idx[path]; + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n", + path, kidx, dpk->bp[path][kidx].ch); + + txagc_rf = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 0x0000003f); + txagc_bb = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), MASKBYTE2); + txagc_bb_tp = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BTP + (path << 13), B_TXAGC_BTP); + + /* report from KIP */ + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xf); + cur_ther = + rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TH); + txagc_ofst = + rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_OF); + pwsf_tssi_ofst = + rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TSSI); + pwsf_tssi_ofst = sign_extend32(pwsf_tssi_ofst, 12); + + cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] thermal now = %d\n", cur_ther); + + if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0) + delta_ther = dpk->bp[path][kidx].ther_dpk - cur_ther; + + delta_ther = delta_ther * 1 / 2; + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] extra delta_ther = %d (0x%x / 0x%x@k)\n", + delta_ther, cur_ther, dpk->bp[path][kidx].ther_dpk); + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] delta_txagc = %d (0x%x / 0x%x@k)\n", + txagc_rf - dpk->bp[path][kidx].txagc_dpk, txagc_rf, + dpk->bp[path][kidx].txagc_dpk); + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] txagc_offset / pwsf_tssi_ofst = 0x%x / %+d\n", + txagc_ofst, pwsf_tssi_ofst); + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n", + txagc_bb_tp, txagc_bb); + + if (rtw89_phy_read32_mask(rtwdev, R_DPK_WR, B_DPK_WR_ST) == 0x0 && + txagc_rf != 0 && rtwdev->hal.cv == CHIP_CAV) { + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] New pwsf = 0x%x\n", 0x78 - delta_ther); + + rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2), + 0x07FC0000, 0x78 - delta_ther); + } + } +} + +static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + enum rtw89_band band = rtwdev->hal.current_band_type; + + rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_sys_defs_tbl); + + if (path == RF_PATH_A) + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852c_tssi_sys_defs_2g_a_tbl, + &rtw8852c_tssi_sys_defs_5g_a_tbl); + else + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852c_tssi_sys_defs_2g_b_tbl, + &rtw8852c_tssi_sys_defs_5g_b_tbl); +} + +static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_tssi_txpwr_ctrl_bb_defs_a_tbl, + &rtw8852c_tssi_txpwr_ctrl_bb_defs_b_tbl); +} + +static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl, + &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl); +} + +static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + enum rtw89_band band = rtwdev->hal.current_band_type; + + if (path == RF_PATH_A) { + rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_a_tbl); + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852c_tssi_dck_defs_2g_a_tbl, + &rtw8852c_tssi_dck_defs_5g_a_tbl); + } else { + rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_b_tbl); + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852c_tssi_dck_defs_2g_b_tbl, + &rtw8852c_tssi_dck_defs_5g_b_tbl); + } +} + +static void _tssi_set_bbgain_split(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_tssi_set_bbgain_split_a_tbl, + &rtw8852c_tssi_set_bbgain_split_b_tbl); +} + +static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ +#define RTW8852C_TSSI_GET_VAL(ptr, idx) \ +({ \ + s8 *__ptr = (ptr); \ + u8 __idx = (idx), __i, __v; \ + u32 __val = 0; \ + for (__i = 0; __i < 4; __i++) { \ + __v = (__ptr[__idx + __i]); \ + __val |= (__v << (8 * __i)); \ + } \ + __val; \ +}) + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 ch = rtwdev->hal.current_channel; + u8 subband = rtwdev->hal.current_subband; + const s8 *thm_up_a = NULL; + const s8 *thm_down_a = NULL; + const s8 *thm_up_b = NULL; + const s8 *thm_down_b = NULL; + u8 thermal = 0xff; + s8 thm_ofst[64] = {0}; + u32 tmp = 0; + u8 i, j; + + switch (subband) { + default: + case RTW89_CH_2G: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_p; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_n; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_p; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_n; + break; + case RTW89_CH_5G_BAND_1: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0]; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0]; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0]; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0]; + break; + case RTW89_CH_5G_BAND_3: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1]; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1]; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1]; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1]; + break; + case RTW89_CH_5G_BAND_4: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2]; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2]; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2]; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2]; + break; + case RTW89_CH_6G_BAND_IDX0: + case RTW89_CH_6G_BAND_IDX1: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0]; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0]; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0]; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0]; + break; + case RTW89_CH_6G_BAND_IDX2: + case RTW89_CH_6G_BAND_IDX3: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1]; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1]; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1]; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1]; + break; + case RTW89_CH_6G_BAND_IDX4: + case RTW89_CH_6G_BAND_IDX5: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2]; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2]; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2]; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2]; + break; + case RTW89_CH_6G_BAND_IDX6: + case RTW89_CH_6G_BAND_IDX7: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3]; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3]; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3]; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3]; + break; + } + + if (path == RF_PATH_A) { + thermal = tssi_info->thermal[RF_PATH_A]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal); + + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1); + + if (thermal == 0xff) { + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32); + + for (i = 0; i < 64; i += 4) { + rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x5c00 + i, 0x0); + } + + } else { + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, + thermal); + + i = 0; + for (j = 0; j < 32; j++) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + -thm_down_a[i++] : + -thm_down_a[DELTA_SWINGIDX_SIZE - 1]; + + i = 1; + for (j = 63; j >= 32; j--) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + thm_up_a[i++] : + thm_up_a[DELTA_SWINGIDX_SIZE - 1]; + + for (i = 0; i < 64; i += 4) { + tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i); + rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x5c00 + i, tmp); + } + } + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0); + + } else { + thermal = tssi_info->thermal[RF_PATH_B]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal); + + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1); + + if (thermal == 0xff) { + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32); + + for (i = 0; i < 64; i += 4) { + rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x7c00 + i, 0x0); + } + + } else { + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, + thermal); + + i = 0; + for (j = 0; j < 32; j++) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + -thm_down_b[i++] : + -thm_down_b[DELTA_SWINGIDX_SIZE - 1]; + + i = 1; + for (j = 63; j >= 32; j--) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + thm_up_b[i++] : + thm_up_b[DELTA_SWINGIDX_SIZE - 1]; + + for (i = 0; i < 64; i += 4) { + tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i); + rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x7c00 + i, tmp); + } + } + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0); + } +#undef RTW8852C_TSSI_GET_VAL +} + +static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + enum rtw89_band band = rtwdev->hal.current_band_type; + + if (path == RF_PATH_A) { + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852c_tssi_slope_cal_org_defs_2g_a_tbl, + &rtw8852c_tssi_slope_cal_org_defs_5g_a_tbl); + } else { + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852c_tssi_slope_cal_org_defs_2g_b_tbl, + &rtw8852c_tssi_slope_cal_org_defs_5g_b_tbl); + } +} + +static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + enum rtw89_band band = rtwdev->hal.current_band_type; + const struct rtw89_rfk_tbl *tbl; + + if (path == RF_PATH_A) { + if (band == RTW89_BAND_2G) + tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_a_tbl; + else if (band == RTW89_BAND_6G) + tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_a_tbl; + else + tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_a_tbl; + } else { + if (band == RTW89_BAND_2G) + tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_b_tbl; + else if (band == RTW89_BAND_6G) + tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_b_tbl; + else + tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_b_tbl; + } + + rtw89_rfk_parser(rtwdev, tbl); +} + +static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_tssi_slope_defs_a_tbl, + &rtw8852c_tssi_slope_defs_b_tbl); +} + +static void _tssi_run_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_tssi_run_slope_defs_a_tbl, + &rtw8852c_tssi_run_slope_defs_b_tbl); +} + +static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_tssi_track_defs_a_tbl, + &rtw8852c_tssi_track_defs_b_tbl); +} + +static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_tssi_txagc_ofst_mv_avg_defs_a_tbl, + &rtw8852c_tssi_txagc_ofst_mv_avg_defs_b_tbl); +} + +static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; + + if (rtwdev->dbcc_en) { + if (phy == RTW89_PHY_0) { + path = RF_PATH_A; + path_max = RF_PATH_B; + } else if (phy == RTW89_PHY_1) { + path = RF_PATH_B; + path_max = RF_PATH_NUM_8852C; + } + } + + for (i = path; i < path_max; i++) { + _tssi_set_track(rtwdev, phy, i); + _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i); + + rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A, + &rtw8852c_tssi_enable_defs_a_tbl, + &rtw8852c_tssi_enable_defs_b_tbl); + + tssi_info->base_thermal[i] = + ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]); + rtwdev->is_tssi_mode[i] = true; + } +} + +static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; + + if (rtwdev->dbcc_en) { + if (phy == RTW89_PHY_0) { + path = RF_PATH_A; + path_max = RF_PATH_B; + } else if (phy == RTW89_PHY_1) { + path = RF_PATH_B; + path_max = RF_PATH_NUM_8852C; + } + } + + for (i = path; i < path_max; i++) { + if (i == RF_PATH_A) { + rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_a_tbl); + rtwdev->is_tssi_mode[RF_PATH_A] = false; + } else if (i == RF_PATH_B) { + rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_b_tbl); + rtwdev->is_tssi_mode[RF_PATH_B] = false; + } + } +} + +static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 2: + return 0; + case 3 ... 5: + return 1; + case 6 ... 8: + return 2; + case 9 ... 11: + return 3; + case 12 ... 13: + return 4; + case 14: + return 5; + } + + return 0; +} + +#define TSSI_EXTRA_GROUP_BIT (BIT(31)) +#define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx)) +#define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT) +#define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT) +#define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) + +static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 2: + return 0; + case 3 ... 5: + return 1; + case 6 ... 8: + return 2; + case 9 ... 11: + return 3; + case 12 ... 14: + return 4; + case 36 ... 40: + return 5; + case 41 ... 43: + return TSSI_EXTRA_GROUP(5); + case 44 ... 48: + return 6; + case 49 ... 51: + return TSSI_EXTRA_GROUP(6); + case 52 ... 56: + return 7; + case 57 ... 59: + return TSSI_EXTRA_GROUP(7); + case 60 ... 64: + return 8; + case 100 ... 104: + return 9; + case 105 ... 107: + return TSSI_EXTRA_GROUP(9); + case 108 ... 112: + return 10; + case 113 ... 115: + return TSSI_EXTRA_GROUP(10); + case 116 ... 120: + return 11; + case 121 ... 123: + return TSSI_EXTRA_GROUP(11); + case 124 ... 128: + return 12; + case 129 ... 131: + return TSSI_EXTRA_GROUP(12); + case 132 ... 136: + return 13; + case 137 ... 139: + return TSSI_EXTRA_GROUP(13); + case 140 ... 144: + return 14; + case 149 ... 153: + return 15; + case 154 ... 156: + return TSSI_EXTRA_GROUP(15); + case 157 ... 161: + return 16; + case 162 ... 164: + return TSSI_EXTRA_GROUP(16); + case 165 ... 169: + return 17; + case 170 ... 172: + return TSSI_EXTRA_GROUP(17); + case 173 ... 177: + return 18; + } + + return 0; +} + +static u32 _tssi_get_6g_ofdm_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 5: + return 0; + case 6 ... 8: + return TSSI_EXTRA_GROUP(0); + case 9 ... 13: + return 1; + case 14 ... 16: + return TSSI_EXTRA_GROUP(1); + case 17 ... 21: + return 2; + case 22 ... 24: + return TSSI_EXTRA_GROUP(2); + case 25 ... 29: + return 3; + case 33 ... 37: + return 4; + case 38 ... 40: + return TSSI_EXTRA_GROUP(4); + case 41 ... 45: + return 5; + case 46 ... 48: + return TSSI_EXTRA_GROUP(5); + case 49 ... 53: + return 6; + case 54 ... 56: + return TSSI_EXTRA_GROUP(6); + case 57 ... 61: + return 7; + case 65 ... 69: + return 8; + case 70 ... 72: + return TSSI_EXTRA_GROUP(8); + case 73 ... 77: + return 9; + case 78 ... 80: + return TSSI_EXTRA_GROUP(9); + case 81 ... 85: + return 10; + case 86 ... 88: + return TSSI_EXTRA_GROUP(10); + case 89 ... 93: + return 11; + case 97 ... 101: + return 12; + case 102 ... 104: + return TSSI_EXTRA_GROUP(12); + case 105 ... 109: + return 13; + case 110 ... 112: + return TSSI_EXTRA_GROUP(13); + case 113 ... 117: + return 14; + case 118 ... 120: + return TSSI_EXTRA_GROUP(14); + case 121 ... 125: + return 15; + case 129 ... 133: + return 16; + case 134 ... 136: + return TSSI_EXTRA_GROUP(16); + case 137 ... 141: + return 17; + case 142 ... 144: + return TSSI_EXTRA_GROUP(17); + case 145 ... 149: + return 18; + case 150 ... 152: + return TSSI_EXTRA_GROUP(18); + case 153 ... 157: + return 19; + case 161 ... 165: + return 20; + case 166 ... 168: + return TSSI_EXTRA_GROUP(20); + case 169 ... 173: + return 21; + case 174 ... 176: + return TSSI_EXTRA_GROUP(21); + case 177 ... 181: + return 22; + case 182 ... 184: + return TSSI_EXTRA_GROUP(22); + case 185 ... 189: + return 23; + case 193 ... 197: + return 24; + case 198 ... 200: + return TSSI_EXTRA_GROUP(24); + case 201 ... 205: + return 25; + case 206 ... 208: + return TSSI_EXTRA_GROUP(25); + case 209 ... 213: + return 26; + case 214 ... 216: + return TSSI_EXTRA_GROUP(26); + case 217 ... 221: + return 27; + case 225 ... 229: + return 28; + case 230 ... 232: + return TSSI_EXTRA_GROUP(28); + case 233 ... 237: + return 29; + case 238 ... 240: + return TSSI_EXTRA_GROUP(29); + case 241 ... 245: + return 30; + case 246 ... 248: + return TSSI_EXTRA_GROUP(30); + case 249 ... 253: + return 31; + } + + return 0; +} + +static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 8: + return 0; + case 9 ... 14: + return 1; + case 36 ... 48: + return 2; + case 49 ... 51: + return TSSI_EXTRA_GROUP(2); + case 52 ... 64: + return 3; + case 100 ... 112: + return 4; + case 113 ... 115: + return TSSI_EXTRA_GROUP(4); + case 116 ... 128: + return 5; + case 132 ... 144: + return 6; + case 149 ... 177: + return 7; + } + + return 0; +} + +static u32 _tssi_get_6g_trim_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 13: + return 0; + case 14 ... 16: + return TSSI_EXTRA_GROUP(0); + case 17 ... 29: + return 1; + case 33 ... 45: + return 2; + case 46 ... 48: + return TSSI_EXTRA_GROUP(2); + case 49 ... 61: + return 3; + case 65 ... 77: + return 4; + case 78 ... 80: + return TSSI_EXTRA_GROUP(4); + case 81 ... 93: + return 5; + case 97 ... 109: + return 6; + case 110 ... 112: + return TSSI_EXTRA_GROUP(6); + case 113 ... 125: + return 7; + case 129 ... 141: + return 8; + case 142 ... 144: + return TSSI_EXTRA_GROUP(8); + case 145 ... 157: + return 9; + case 161 ... 173: + return 10; + case 174 ... 176: + return TSSI_EXTRA_GROUP(10); + case 177 ... 189: + return 11; + case 193 ... 205: + return 12; + case 206 ... 208: + return TSSI_EXTRA_GROUP(12); + case 209 ... 221: + return 13; + case 225 ... 237: + return 14; + case 238 ... 240: + return TSSI_EXTRA_GROUP(14); + case 241 ... 253: + return 15; + } + + return 0; +} + +static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + enum rtw89_band band = rtwdev->hal.current_band_type; + u8 ch = rtwdev->hal.current_channel; + u32 gidx, gidx_1st, gidx_2nd; + s8 de_1st; + s8 de_2nd; + s8 val; + + if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) { + gidx = _tssi_get_ofdm_group(rtwdev, ch); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", + path, gidx); + + if (IS_TSSI_EXTRA_GROUP(gidx)) { + gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx); + gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx); + de_1st = tssi_info->tssi_mcs[path][gidx_1st]; + de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; + val = (de_1st + de_2nd) / 2; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", + path, val, de_1st, de_2nd); + } else { + val = tssi_info->tssi_mcs[path][gidx]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); + } + } else { + gidx = _tssi_get_6g_ofdm_group(rtwdev, ch); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", + path, gidx); + + if (IS_TSSI_EXTRA_GROUP(gidx)) { + gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx); + gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx); + de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st]; + de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd]; + val = (de_1st + de_2nd) / 2; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", + path, val, de_1st, de_2nd); + } else { + val = tssi_info->tssi_6g_mcs[path][gidx]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); + } + } + + return val; +} + +static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + enum rtw89_band band = rtwdev->hal.current_band_type; + u8 ch = rtwdev->hal.current_channel; + u32 tgidx, tgidx_1st, tgidx_2nd; + s8 tde_1st = 0; + s8 tde_2nd = 0; + s8 val; + + if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) { + tgidx = _tssi_get_trim_group(rtwdev, ch); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", + path, tgidx); + + if (IS_TSSI_EXTRA_GROUP(tgidx)) { + tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx); + tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx); + tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; + tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; + val = (tde_1st + tde_2nd) / 2; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", + path, val, tde_1st, tde_2nd); + } else { + val = tssi_info->tssi_trim[path][tgidx]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", + path, val); + } + } else { + tgidx = _tssi_get_6g_trim_group(rtwdev, ch); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", + path, tgidx); + + if (IS_TSSI_EXTRA_GROUP(tgidx)) { + tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx); + tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx); + tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st]; + tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd]; + val = (tde_1st + tde_2nd) / 2; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", + path, val, tde_1st, tde_2nd); + } else { + val = tssi_info->tssi_trim_6g[path][tgidx]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", + path, val); + } + } + + return val; +} + +static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 ch = rtwdev->hal.current_channel; + u8 gidx; + s8 ofdm_de; + s8 trim_de; + s32 val; + u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n", + phy, ch); + + if (rtwdev->dbcc_en) { + if (phy == RTW89_PHY_0) { + path = RF_PATH_A; + path_max = RF_PATH_B; + } else if (phy == RTW89_PHY_1) { + path = RF_PATH_B; + path_max = RF_PATH_NUM_8852C; + } + } + + for (i = path; i < path_max; i++) { + gidx = _tssi_get_cck_group(rtwdev, ch); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + val = tssi_info->tssi_cck[i][gidx] + trim_de; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n", + i, gidx, tssi_info->tssi_cck[i][gidx], trim_de); + + rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n", + _tssi_de_cck_long[i], + rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i], + _TSSI_DE_MASK)); + + ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + val = ofdm_de + trim_de; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n", + i, ofdm_de, trim_de); + + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n", + _tssi_de_mcs_20m[i], + rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i], + _TSSI_DE_MASK)); + } +} + +static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en, + enum rtw89_rf_path path) +{ + static const u32 tssi_trk[2] = {0x5818, 0x7818}; + static const u32 tssi_en[2] = {0x5820, 0x7820}; + + if (en) { + rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0); + rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x0); + if (rtwdev->dbcc_en && path == RF_PATH_B) + _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1); + else + _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0); + } else { + rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1); + rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x1); + } +} + +void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx) +{ + if (!rtwdev->dbcc_en) { + rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A); + rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B); + } else { + if (phy_idx == RTW89_PHY_0) + rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A); + else + rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B); + } +} + +static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, + enum rtw89_bandwidth bw, bool is_dav) +{ + u32 rf_reg18; + u32 reg_reg18_addr; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__); + if (is_dav) + reg_reg18_addr = RR_CFGCH; + else + reg_reg18_addr = RR_CFGCH_V1; + + rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK); + rf_reg18 &= ~RR_CFGCH_BW; + + switch (bw) { + case RTW89_CHANNEL_WIDTH_5: + case RTW89_CHANNEL_WIDTH_10: + case RTW89_CHANNEL_WIDTH_20: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf); + break; + case RTW89_CHANNEL_WIDTH_40: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf); + break; + case RTW89_CHANNEL_WIDTH_80: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x2); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xd); + break; + case RTW89_CHANNEL_WIDTH_160: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_160M); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb); + break; + default: + break; + } + + rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18); +} + +static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_bandwidth bw) +{ + bool is_dav; + u8 kpath, path; + u32 tmp = 0; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__); + kpath = _kpath(rtwdev, phy); + + for (path = 0; path < 2; path++) { + if (!(kpath & BIT(path))) + continue; + + is_dav = true; + _bw_setting(rtwdev, path, bw, is_dav); + is_dav = false; + _bw_setting(rtwdev, path, bw, is_dav); + if (rtwdev->dbcc_en) + continue; + + if (path == RF_PATH_B && rtwdev->hal.cv == CHIP_CAV) { + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0); + tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_APK, RR_APK_MOD, 0x3); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK, tmp); + fsleep(100); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1); + } + } +} + +static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, + u8 central_ch, enum rtw89_band band, bool is_dav) +{ + u32 rf_reg18; + u32 reg_reg18_addr; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__); + if (is_dav) + reg_reg18_addr = 0x18; + else + reg_reg18_addr = 0x10018; + + rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK); + rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BAND0 | RR_CFGCH_CH); + rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch); + + switch (band) { + case RTW89_BAND_2G: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_2G); + rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_2G); + break; + case RTW89_BAND_5G: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G); + rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G); + break; + case RTW89_BAND_6G: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_6G); + rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_6G); + break; + default: + break; + } + rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18); + fsleep(100); +} + +static void _ctrl_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + u8 central_ch, enum rtw89_band band) +{ + u8 kpath, path; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__); + if (band != RTW89_BAND_6G) { + if ((central_ch > 14 && central_ch < 36) || + (central_ch > 64 && central_ch < 100) || + (central_ch > 144 && central_ch < 149) || central_ch > 177) + return; + } else { + if (central_ch > 253 || central_ch == 2) + return; + } + + kpath = _kpath(rtwdev, phy); + + for (path = 0; path < 2; path++) { + if (kpath & BIT(path)) { + _ch_setting(rtwdev, path, central_ch, band, true); + _ch_setting(rtwdev, path, central_ch, band, false); + } + } +} + +static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_bandwidth bw) +{ + u8 kpath; + u8 path; + u32 val; + + kpath = _kpath(rtwdev, phy); + for (path = 0; path < 2; path++) { + if (!(kpath & BIT(path))) + continue; + + rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1); + rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0xa); + switch (bw) { + case RTW89_CHANNEL_WIDTH_20: + val = 0x1b; + break; + case RTW89_CHANNEL_WIDTH_40: + val = 0x13; + break; + case RTW89_CHANNEL_WIDTH_80: + val = 0xb; + break; + case RTW89_CHANNEL_WIDTH_160: + default: + val = 0x3; + break; + } + rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, val); + rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0); + } +} + +static void _lck_keep_thermal(struct rtw89_dev *rtwdev) +{ + struct rtw89_lck_info *lck = &rtwdev->lck; + int path; + + for (path = 0; path < rtwdev->chip->rf_path_num; path++) { + lck->thermal[path] = + ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[LCK] path=%d thermal=0x%x", path, lck->thermal[path]); + } +} + +static void _lck(struct rtw89_dev *rtwdev) +{ + u32 tmp18[2]; + int path = rtwdev->dbcc_en ? 2 : 1; + int i; + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[LCK] DO LCK\n"); + + tmp18[0] = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); + tmp18[1] = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); + + for (i = 0; i < path; i++) { + rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); + rtw89_write_rf(rtwdev, i, RR_CFGCH, RFREG_MASK, tmp18[i]); + rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0); + } + + _lck_keep_thermal(rtwdev); +} + +#define RTW8852C_LCK_TH 8 + +void rtw8852c_lck_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_lck_info *lck = &rtwdev->lck; + u8 cur_thermal; + int delta; + int path; + + for (path = 0; path < rtwdev->chip->rf_path_num; path++) { + cur_thermal = + ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + delta = abs((int)cur_thermal - lck->thermal[path]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[LCK] path=%d current thermal=0x%x delta=0x%x\n", + path, cur_thermal, delta); + + if (delta >= RTW8852C_LCK_TH) { + _lck(rtwdev); + return; + } + } +} + +void rtw8852c_lck_init(struct rtw89_dev *rtwdev) +{ + _lck_keep_thermal(rtwdev); +} + +static +void rtw8852c_ctrl_bw_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + u8 central_ch, enum rtw89_band band, + enum rtw89_bandwidth bw) +{ + _ctrl_ch(rtwdev, phy, central_ch, band); + _ctrl_bw(rtwdev, phy, bw); + _rxbb_bw(rtwdev, phy, bw); +} + +void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param, + enum rtw89_phy_idx phy_idx) +{ + rtw8852c_ctrl_bw_ch(rtwdev, phy_idx, param->center_chan, param->band_type, + param->bandwidth); +} + +void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + struct rtw89_mcc_info *mcc_info = &rtwdev->mcc; + u8 idx = mcc_info->table_idx; + int i; + + for (i = 0; i < RTW89_IQK_CHS_NR; i++) { + if (mcc_info->ch[idx] == 0) + break; + if (++idx >= RTW89_IQK_CHS_NR) + idx = 0; + } + + mcc_info->table_idx = idx; + mcc_info->ch[idx] = rtwdev->hal.current_channel; + mcc_info->band[idx] = rtwdev->hal.current_band_type; +} + +void rtw8852c_rck(struct rtw89_dev *rtwdev) +{ + u8 path; + + for (path = 0; path < 2; path++) + _rck(rtwdev, path); +} + +void rtw8852c_dack(struct rtw89_dev *rtwdev) +{ + u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START); + _dac_cal(rtwdev, false); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP); +} + +void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + u32 tx_en; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); + rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); + _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); + + _iqk_init(rtwdev); + _iqk(rtwdev, phy_idx, false); + + rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); +} + +#define RXDCK_VER_8852C 0xe + +void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe) +{ + struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck; + u8 path, kpath; + u32 rf_reg5; + + kpath = _kpath(rtwdev, phy); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n", + RXDCK_VER_8852C, rtwdev->hal.cv); + + for (path = 0; path < 2; path++) { + rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); + if (!(kpath & BIT(path))) + continue; + + if (rtwdev->is_tssi_mode[path]) + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), + B_P0_TSSI_TRK_EN, 0x1); + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); + _set_rx_dck(rtwdev, phy, path, is_afe); + rx_dck->thermal[path] = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); + + if (rtwdev->is_tssi_mode[path]) + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), + B_P0_TSSI_TRK_EN, 0x0); + } +} + +#define RTW8852C_RX_DCK_TH 8 + +void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck; + u8 cur_thermal; + int delta; + int path; + + for (path = 0; path < RF_PATH_NUM_8852C; path++) { + cur_thermal = + ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + delta = abs((int)cur_thermal - rx_dck->thermal[path]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[RX_DCK] path=%d current thermal=0x%x delta=0x%x\n", + path, cur_thermal, delta); + + if (delta >= RTW8852C_RX_DCK_TH) { + rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false); + return; + } + } +} + +void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + u32 tx_en; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START); + rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); + _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); + + rtwdev->dpk.is_dpk_enable = true; + rtwdev->dpk.is_dpk_reload_en = false; + _dpk(rtwdev, phy_idx, false); + + rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP); +} + +void rtw8852c_dpk_track(struct rtw89_dev *rtwdev) +{ + _dpk_track(rtwdev); +} + +void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy); + + if (rtwdev->dbcc_en) { + if (phy == RTW89_PHY_0) { + path = RF_PATH_A; + path_max = RF_PATH_B; + } else if (phy == RTW89_PHY_1) { + path = RF_PATH_B; + path_max = RF_PATH_NUM_8852C; + } + } + + _tssi_disable(rtwdev, phy); + + for (i = path; i < path_max; i++) { + _tssi_set_sys(rtwdev, phy, i); + _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i); + _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); + _tssi_set_dck(rtwdev, phy, i); + _tssi_set_bbgain_split(rtwdev, phy, i); + _tssi_set_tmeter_tbl(rtwdev, phy, i); + _tssi_slope_cal_org(rtwdev, phy, i); + _tssi_set_aligk_default(rtwdev, phy, i); + _tssi_set_slope(rtwdev, phy, i); + _tssi_run_slope(rtwdev, phy, i); + } + + _tssi_enable(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy); +} + +void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", + __func__, phy); + + if (!rtwdev->is_tssi_mode[RF_PATH_A]) + return; + if (!rtwdev->is_tssi_mode[RF_PATH_B]) + return; + + if (rtwdev->dbcc_en) { + if (phy == RTW89_PHY_0) { + path = RF_PATH_A; + path_max = RF_PATH_B; + } else if (phy == RTW89_PHY_1) { + path = RF_PATH_B; + path_max = RF_PATH_NUM_8852C; + } + } + + _tssi_disable(rtwdev, phy); + + for (i = path; i < path_max; i++) { + _tssi_set_sys(rtwdev, phy, i); + _tssi_set_dck(rtwdev, phy, i); + _tssi_set_tmeter_tbl(rtwdev, phy, i); + _tssi_slope_cal_org(rtwdev, phy, i); + _tssi_set_aligk_default(rtwdev, phy, i); + } + + _tssi_enable(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy); +} + +static void rtw8852c_tssi_default_txagc(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, bool enable) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 i; + + if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B]) + return; + + if (enable) { + /* SCAN_START */ + if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 && + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) { + for (i = 0; i < 6; i++) { + tssi_info->default_txagc_offset[RF_PATH_A] = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, + B_TXAGC_BB); + if (tssi_info->default_txagc_offset[RF_PATH_A]) + break; + } + } + + if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 && + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) { + for (i = 0; i < 6; i++) { + tssi_info->default_txagc_offset[RF_PATH_B] = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, + B_TXAGC_BB_S1); + if (tssi_info->default_txagc_offset[RF_PATH_B]) + break; + } + } + } else { + /* SCAN_END */ + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, + tssi_info->default_txagc_offset[RF_PATH_A]); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, + tssi_info->default_txagc_offset[RF_PATH_B]); + + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); + + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1); + } +} + +void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, + bool scan_start, enum rtw89_phy_idx phy_idx) +{ + if (scan_start) + rtw8852c_tssi_default_txagc(rtwdev, phy_idx, true); + else + rtw8852c_tssi_default_txagc(rtwdev, phy_idx, false); +} diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h new file mode 100644 index 0000000000..5118a49da8 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2022 Realtek Corporation + */ + +#ifndef __RTW89_8852C_RFK_H__ +#define __RTW89_8852C_RFK_H__ + +#include "core.h" + +void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852c_rck(struct rtw89_dev *rtwdev); +void rtw8852c_dack(struct rtw89_dev *rtwdev); +void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); +void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool is_afe); +void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev); +void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852c_dpk_track(struct rtw89_dev *rtwdev); +void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx); +void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, + enum rtw89_phy_idx phy_idx); +void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param, + enum rtw89_phy_idx phy_idx); +void rtw8852c_lck_init(struct rtw89_dev *rtwdev); +void rtw8852c_lck_track(struct rtw89_dev *rtwdev); + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c new file mode 100644 index 0000000000..d727d528b3 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c @@ -0,0 +1,781 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2022 Realtek Corporation + */ + +#include "rtw8852c_rfk_table.h" + +static const struct rtw89_reg5_def rtw8852c_dack_reload_defs[] = { + RTW89_DECL_RFK_WM(0xc004, BIT(17), 0x1), + RTW89_DECL_RFK_WM(0xc024, BIT(17), 0x1), + RTW89_DECL_RFK_WM(0xc104, BIT(17), 0x1), + RTW89_DECL_RFK_WM(0xc124, BIT(17), 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dack_reload_defs); + +static const struct rtw89_reg5_def rtw8852c_dack_reset_defs_a[] = { + RTW89_DECL_RFK_WM(0xc000, BIT(17), 0x0), + RTW89_DECL_RFK_WM(0xc000, BIT(17), 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dack_reset_defs_a); + +static const struct rtw89_reg5_def rtw8852c_dack_reset_defs_b[] = { + RTW89_DECL_RFK_WM(0xc100, BIT(17), 0x0), + RTW89_DECL_RFK_WM(0xc100, BIT(17), 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dack_reset_defs_b); + +static const struct rtw89_reg5_def rtw8852c_dack_defs_s0[] = { + RTW89_DECL_RFK_WM(0x12b8, BIT(30), 0x1), + RTW89_DECL_RFK_WM(0x030c, BIT(28), 0x1), + RTW89_DECL_RFK_WM(0x032c, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0xc004, 0xfff00000, 0x30), + RTW89_DECL_RFK_WM(0xc024, 0xfff00000, 0x30), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dack_defs_s0); + +static const struct rtw89_reg5_def rtw8852c_dack_defs_s1[] = { + RTW89_DECL_RFK_WM(0x32b8, BIT(30), 0x1), + RTW89_DECL_RFK_WM(0x030c, BIT(28), 0x1), + RTW89_DECL_RFK_WM(0x032c, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0xc104, 0xfff00000, 0x30), + RTW89_DECL_RFK_WM(0xc124, 0xfff00000, 0x30), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dack_defs_s1); + +static const struct rtw89_reg5_def rtw8852c_drck_defs[] = { + RTW89_DECL_RFK_WM(0xc0c4, BIT(6), 0x0), + RTW89_DECL_RFK_WM(0xc094, BIT(9), 0x1), + RTW89_DECL_RFK_DELAY(1), + RTW89_DECL_RFK_WM(0xc094, BIT(9), 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_drck_defs); + +static const struct rtw89_reg5_def rtw8852c_iqk_rxk_cfg_defs[] = { + RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0f), + RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x03), + RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0001), + RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0041), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_iqk_rxk_cfg_defs); + +static const struct rtw89_reg5_def rtw8852c_iqk_afebb_restore_defs_a[] = { + RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x0), + RTW89_DECL_RFK_WM(0x20fc, 0x00010000, 0x1), + RTW89_DECL_RFK_WM(0x20fc, 0x00100000, 0x0), + RTW89_DECL_RFK_WM(0x20fc, 0x01000000, 0x1), + RTW89_DECL_RFK_WM(0x20fc, 0x10000000, 0x0), + RTW89_DECL_RFK_WM(0x5670, MASKDWORD, 0x00000000), + RTW89_DECL_RFK_WM(0x12a0, 0x000ff000, 0x00), + RTW89_DECL_RFK_WM(0x20fc, 0x00010000, 0x0), + RTW89_DECL_RFK_WM(0x20fc, 0x01000000, 0x0), + RTW89_DECL_RFK_WRF(RF_PATH_A, 0x10005, 0x00001, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_iqk_afebb_restore_defs_a); + +static const struct rtw89_reg5_def rtw8852c_iqk_afebb_restore_defs_b[] = { + RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x0), + RTW89_DECL_RFK_WM(0x20fc, 0x00020000, 0x1), + RTW89_DECL_RFK_WM(0x20fc, 0x00200000, 0x0), + RTW89_DECL_RFK_WM(0x20fc, 0x02000000, 0x1), + RTW89_DECL_RFK_WM(0x20fc, 0x20000000, 0x0), + RTW89_DECL_RFK_WM(0x7670, MASKDWORD, 0x00000000), + RTW89_DECL_RFK_WM(0x32a0, 0x000ff000, 0x00), + RTW89_DECL_RFK_WM(0x20fc, 0x00020000, 0x0), + RTW89_DECL_RFK_WM(0x20fc, 0x02000000, 0x0), + RTW89_DECL_RFK_WRF(RF_PATH_B, 0x10005, 0x00001, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_iqk_afebb_restore_defs_b); + +static const struct rtw89_reg5_def rtw8852c_read_rxsram_pre_defs[] = { + RTW89_DECL_RFK_WM(0x80e8, BIT(7), 0x1), + RTW89_DECL_RFK_WM(0x8074, BIT(31), 0x1), + RTW89_DECL_RFK_WM(0x80d4, MASKDWORD, 0x00020000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_read_rxsram_pre_defs); + +static const struct rtw89_reg5_def rtw8852c_read_rxsram_post_defs[] = { + RTW89_DECL_RFK_WM(0x80e8, BIT(7), 0x0), + RTW89_DECL_RFK_WM(0x8074, BIT(31), 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_read_rxsram_post_defs); + +static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order0_defs[] = { + RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x0), + RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x2), + RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x4), + RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order0_defs); + +static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order1_defs[] = { + RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x1), + RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x1), + RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x0), + RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order1_defs); + +static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order2_defs[] = { + RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x2), + RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x0), + RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x0), + RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order2_defs); + +static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order3_defs[] = { + RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x3), + RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x3), + RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x4), + RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order3_defs); + +static const struct rtw89_reg5_def rtw8852c_dpk_kip_pwr_clk_on_defs[] = { + RTW89_DECL_RFK_WM(0x8008, MASKDWORD, 0x00000080), + RTW89_DECL_RFK_WM(0x8088, MASKDWORD, 0x807f030a), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_kip_pwr_clk_on_defs); + +static const struct rtw89_reg5_def rtw8852c_dpk_kip_pwr_clk_off_defs[] = { + RTW89_DECL_RFK_WM(0x8008, MASKDWORD, 0x00000000), + RTW89_DECL_RFK_WM(0x8088, MASKDWORD, 0x80000000), + RTW89_DECL_RFK_WM(0x80f4, BIT(18), 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_kip_pwr_clk_off_defs); + +static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs[] = { + RTW89_DECL_RFK_WM(0x12bc, 0x000ffff0, 0xb5b5), + RTW89_DECL_RFK_WM(0x32bc, 0x000ffff0, 0xb5b5), + RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x16), + RTW89_DECL_RFK_WM(0x0304, 0x0000ffff, 0x1f19), + RTW89_DECL_RFK_WM(0x0308, 0xff000000, 0x1c), + RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x2041), + RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x20012041), + RTW89_DECL_RFK_WM(0x0324, 0xffff0000, 0x2001), + RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x3), + RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x3), + RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x601e), + RTW89_DECL_RFK_WM(0x2704, 0xffff0000, 0x601e), + RTW89_DECL_RFK_WM(0x0700, 0xf0000000, 0x4), + RTW89_DECL_RFK_WM(0x2700, 0xf0000000, 0x4), + RTW89_DECL_RFK_WM(0x0650, 0x3c000000, 0x0), + RTW89_DECL_RFK_WM(0x2650, 0x3c000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs); + +static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_2g_a[] = { + RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x33), + RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x33), + RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_2g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_2g_b[] = { + RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x33), + RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x33), + RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_2g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_5g_a[] = { + RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x44), + RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x44), + RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_5g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_5g_b[] = { + RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x44), + RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x44), + RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_5g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_a[] = { + RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0), + RTW89_DECL_RFK_WM(0x5800, 0xffffffff, 0x003f807f), + RTW89_DECL_RFK_WM(0x580c, 0x0000007f, 0x40), + RTW89_DECL_RFK_WM(0x580c, 0x0fffff00, 0x00040), + RTW89_DECL_RFK_WM(0x5810, 0xffffffff, 0x59010000), + RTW89_DECL_RFK_WM(0x5814, 0x01ffffff, 0x026d000), + RTW89_DECL_RFK_WM(0x5814, 0xf8000000, 0x00), + RTW89_DECL_RFK_WM(0x5818, 0xffffffff, 0x002c1800), + RTW89_DECL_RFK_WM(0x581c, 0x3fffffff, 0x3dc80280), + RTW89_DECL_RFK_WM(0x5820, 0xffffffff, 0x00000080), + RTW89_DECL_RFK_WM(0x58e8, 0x0000003f, 0x03), + RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x1), + RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x5834, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x5838, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x5854, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x5858, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x5860, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x5864, 0x07ffffff, 0x00801ff), + RTW89_DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x58a4, 0x000000ff, 0x16), + RTW89_DECL_RFK_WM(0x58b4, 0x7fffffff, 0x0a002000), + RTW89_DECL_RFK_WM(0x58b8, 0x7fffffff, 0x00007628), + RTW89_DECL_RFK_WM(0x58bc, 0x07ffffff, 0x7a7807f), + RTW89_DECL_RFK_WM(0x58c0, 0xfffe0000, 0x003f), + RTW89_DECL_RFK_WM(0x58c4, 0xffffffff, 0x0003ffff), + RTW89_DECL_RFK_WM(0x58c8, 0x00ffffff, 0x000000), + RTW89_DECL_RFK_WM(0x58c8, 0xf0000000, 0x0), + RTW89_DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x58d0, 0x07ffffff, 0x2008101), + RTW89_DECL_RFK_WM(0x58d4, 0x000000ff, 0x00), + RTW89_DECL_RFK_WM(0x58d4, 0x0003fe00, 0x0ff), + RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x100), + RTW89_DECL_RFK_WM(0x58d8, 0xffffffff, 0x8008016c), + RTW89_DECL_RFK_WM(0x58dc, 0x0001ffff, 0x0807f), + RTW89_DECL_RFK_WM(0x58dc, 0xfff00000, 0x800), + RTW89_DECL_RFK_WM(0x58f0, 0x0003ffff, 0x001ff), + RTW89_DECL_RFK_WM(0x58f4, 0x000fffff, 0x000), + RTW89_DECL_RFK_WM(0x58f8, 0x000fffff, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_b[] = { + RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0), + RTW89_DECL_RFK_WM(0x7800, 0xffffffff, 0x003f807f), + RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x40), + RTW89_DECL_RFK_WM(0x780c, 0x0fffff00, 0x00040), + RTW89_DECL_RFK_WM(0x7810, 0xffffffff, 0x59010000), + RTW89_DECL_RFK_WM(0x7814, 0x01ffffff, 0x026d000), + RTW89_DECL_RFK_WM(0x7814, 0xf8000000, 0x00), + RTW89_DECL_RFK_WM(0x7818, 0xffffffff, 0x002c1800), + RTW89_DECL_RFK_WM(0x781c, 0x3fffffff, 0x3dc80280), + RTW89_DECL_RFK_WM(0x7820, 0xffffffff, 0x00000080), + RTW89_DECL_RFK_WM(0x78e8, 0x0000003f, 0x03), + RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x1), + RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x7834, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x7838, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x7854, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x7858, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x7860, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x7864, 0x07ffffff, 0x00801ff), + RTW89_DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x78a4, 0x000000ff, 0x16), + RTW89_DECL_RFK_WM(0x78b4, 0x7fffffff, 0x0a002000), + RTW89_DECL_RFK_WM(0x78b8, 0x7fffffff, 0x00007628), + RTW89_DECL_RFK_WM(0x78bc, 0x07ffffff, 0x7a7807f), + RTW89_DECL_RFK_WM(0x78c0, 0xfffe0000, 0x003f), + RTW89_DECL_RFK_WM(0x78c4, 0xffffffff, 0x0003ffff), + RTW89_DECL_RFK_WM(0x78c8, 0x00ffffff, 0x000000), + RTW89_DECL_RFK_WM(0x78c8, 0xf0000000, 0x0), + RTW89_DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x78d0, 0x07ffffff, 0x2008101), + RTW89_DECL_RFK_WM(0x78d4, 0x000000ff, 0x00), + RTW89_DECL_RFK_WM(0x78d4, 0x0003fe00, 0x0ff), + RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x100), + RTW89_DECL_RFK_WM(0x78d8, 0xffffffff, 0x8008016c), + RTW89_DECL_RFK_WM(0x78dc, 0x0001ffff, 0x0807f), + RTW89_DECL_RFK_WM(0x78dc, 0xfff00000, 0x800), + RTW89_DECL_RFK_WM(0x78f0, 0x0003ffff, 0x001ff), + RTW89_DECL_RFK_WM(0x78f4, 0x000fffff, 0x000), + RTW89_DECL_RFK_WM(0x78f8, 0x000fffff, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a[] = { + RTW89_DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fe), + RTW89_DECL_RFK_WM(0x58e4, 0x0000007f, 0x1f), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b[] = { + RTW89_DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fe), + RTW89_DECL_RFK_WM(0x78e4, 0x0000007f, 0x1f), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_a[] = { + RTW89_DECL_RFK_WM(0x58c4, 0x3ffc0000, 0x0), + RTW89_DECL_RFK_WM(0x58c8, 0x00000fff, 0x0), + RTW89_DECL_RFK_WM(0x58c8, 0x00fff000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_b[] = { + RTW89_DECL_RFK_WM(0x78c4, 0x3ffc0000, 0x0), + RTW89_DECL_RFK_WM(0x78c8, 0x00000fff, 0x0), + RTW89_DECL_RFK_WM(0x78c8, 0x00fff000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_2g_a[] = { + RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x000), + RTW89_DECL_RFK_WM(0x5814, 0x003ff000, 0x1af), + RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_2g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_2g_b[] = { + RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x000), + RTW89_DECL_RFK_WM(0x7814, 0x003ff000, 0x1af), + RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_2g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_5g_a[] = { + RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x000), + RTW89_DECL_RFK_WM(0x5814, 0x00001000, 0x1), + RTW89_DECL_RFK_WM(0x5814, 0x0003c000, 0xb), + RTW89_DECL_RFK_WM(0x5814, 0x00002000, 0x1), + RTW89_DECL_RFK_WM(0x5814, 0x003c0000, 0x6), + RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_5g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_5g_b[] = { + RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x000), + RTW89_DECL_RFK_WM(0x7814, 0x00001000, 0x1), + RTW89_DECL_RFK_WM(0x7814, 0x0003c000, 0xb), + RTW89_DECL_RFK_WM(0x7814, 0x00002000, 0x1), + RTW89_DECL_RFK_WM(0x7814, 0x003c0000, 0x6), + RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_5g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_bbgain_split_a[] = { + RTW89_DECL_RFK_WM(0x5818, 0x08000000, 0x1), + RTW89_DECL_RFK_WM(0x58d4, 0xf0000000, 0x7), + RTW89_DECL_RFK_WM(0x58f0, 0x000c0000, 0x1), + RTW89_DECL_RFK_WM(0x58f0, 0xfff00000, 0x400), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_bbgain_split_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_bbgain_split_b[] = { + RTW89_DECL_RFK_WM(0x7818, 0x08000000, 0x1), + RTW89_DECL_RFK_WM(0x78d4, 0xf0000000, 0x7), + RTW89_DECL_RFK_WM(0x78f0, 0x000c0000, 0x1), + RTW89_DECL_RFK_WM(0x78f0, 0xfff00000, 0x400), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_bbgain_split_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_2g_a[] = { + RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201020), + RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0801008), + RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008), + RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x0808081e), + RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x081d), + RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_2g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_2g_b[] = { + RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0204020), + RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0801008), + RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x020), + RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08081e21), + RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x1d23), + RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_2g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_5g_a[] = { + RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008), + RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x0808), + RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_5g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_5g_b[] = { + RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x008), + RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x0808), + RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_5g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_2g_a[] = { + RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x000000), + RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x2d2721), + RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5634, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5634, 0x000ffc00, 0x3b8), + RTW89_DECL_RFK_WM(0x5634, 0x3ff00000, 0x3d2), + RTW89_DECL_RFK_WM(0x5638, 0x000003ff, 0x042), + RTW89_DECL_RFK_WM(0x5638, 0x000ffc00, 0x06b), + RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x3bc), + RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x3d6), + RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x03e), + RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x06b), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_2g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_2g_b[] = { + RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x000000), + RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x2d2721), + RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7634, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7634, 0x000ffc00, 0x3c0), + RTW89_DECL_RFK_WM(0x7634, 0x3ff00000, 0x3da), + RTW89_DECL_RFK_WM(0x7638, 0x000003ff, 0x002), + RTW89_DECL_RFK_WM(0x7638, 0x000ffc00, 0x071), + RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x3c8), + RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x3e2), + RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x00c), + RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x071), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_2g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_5g_a[] = { + RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x000000), + RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x312600), + RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5634, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5634, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x5634, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x5638, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x5638, 0x000ffc00, 0x07d), + RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x000), + RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_5g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_5g_b[] = { + RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x000000), + RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x312600), + RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7634, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7634, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x7634, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x7638, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x7638, 0x000ffc00, 0x07d), + RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x000), + RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_5g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_6g_a[] = { + RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x000000), + RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x312600), + RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5634, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5634, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x5634, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x5638, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x5638, 0x000ffc00, 0x080), + RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x000), + RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_6g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_6g_b[] = { + RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x000000), + RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x312600), + RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7634, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7634, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x7634, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x7638, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x7638, 0x000ffc00, 0x080), + RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x000), + RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_6g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_slope_defs_a[] = { + RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x0), + RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x1), + RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x58e8, 0x0000003f, 0x0f), + RTW89_DECL_RFK_WM(0x581c, 0x000003ff, 0x280), + RTW89_DECL_RFK_WM(0x581c, 0x000ffc00, 0x200), + RTW89_DECL_RFK_WM(0x58b8, 0x007f0000, 0x00), + RTW89_DECL_RFK_WM(0x58b8, 0x7f000000, 0x00), + RTW89_DECL_RFK_WM(0x58b4, 0x7f000000, 0x0a), + RTW89_DECL_RFK_WM(0x58b8, 0x0000007f, 0x28), + RTW89_DECL_RFK_WM(0x58b8, 0x00007f00, 0x76), + RTW89_DECL_RFK_WM(0x5810, 0x20000000, 0x0), + RTW89_DECL_RFK_WM(0x5814, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x1), + RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x5834, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x5834, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5838, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5838, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x5854, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x5854, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5858, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5858, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x5824, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x5824, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5828, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5828, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x582c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x582c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5830, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5830, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x583c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x583c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5840, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5840, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x5844, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x5844, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5848, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5848, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x584c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x584c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5850, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5850, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x585c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x585c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5860, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5860, 0x003ff000, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_slope_defs_b[] = { + RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x0), + RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x1), + RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x78e8, 0x0000003f, 0x0f), + RTW89_DECL_RFK_WM(0x781c, 0x000003ff, 0x280), + RTW89_DECL_RFK_WM(0x781c, 0x000ffc00, 0x200), + RTW89_DECL_RFK_WM(0x78b8, 0x007f0000, 0x00), + RTW89_DECL_RFK_WM(0x78b8, 0x7f000000, 0x00), + RTW89_DECL_RFK_WM(0x78b4, 0x7f000000, 0x0a), + RTW89_DECL_RFK_WM(0x78b8, 0x0000007f, 0x28), + RTW89_DECL_RFK_WM(0x78b8, 0x00007f00, 0x76), + RTW89_DECL_RFK_WM(0x7810, 0x20000000, 0x0), + RTW89_DECL_RFK_WM(0x7814, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x1), + RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x7834, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x7834, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7838, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7838, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x7854, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x7854, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7858, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7858, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x7824, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x7824, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7828, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7828, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x782c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x782c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7830, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7830, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x783c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x783c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7840, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7840, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x7844, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x7844, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7848, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7848, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x784c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x784c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7850, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7850, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x785c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x785c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7860, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7860, 0x003ff000, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_run_slope_defs_a[] = { + RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_run_slope_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_run_slope_defs_b[] = { + RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_run_slope_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_track_defs_a[] = { + RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x0), + RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x0), + RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x5864, 0x000003ff, 0x1ff), + RTW89_DECL_RFK_WM(0x5864, 0x000ffc00, 0x200), + RTW89_DECL_RFK_WM(0x5820, 0x00000fff, 0x080), + RTW89_DECL_RFK_WM(0x5814, 0x01000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_track_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_track_defs_b[] = { + RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x0), + RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x0), + RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x7864, 0x000003ff, 0x1ff), + RTW89_DECL_RFK_WM(0x7864, 0x000ffc00, 0x200), + RTW89_DECL_RFK_WM(0x7820, 0x00000fff, 0x080), + RTW89_DECL_RFK_WM(0x7814, 0x01000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_track_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_txagc_ofst_mv_avg_defs_a[] = { + RTW89_DECL_RFK_WM(0x58e4, 0x00003800, 0x1), + RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x0), + RTW89_DECL_RFK_WM(0x58e4, 0x00008000, 0x1), + RTW89_DECL_RFK_WM(0x58e4, 0x000f0000, 0x0), + RTW89_DECL_RFK_WM(0x58e8, 0x0000003f, 0x03), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txagc_ofst_mv_avg_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_txagc_ofst_mv_avg_defs_b[] = { + RTW89_DECL_RFK_WM(0x78e4, 0x00003800, 0x1), + RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x0), + RTW89_DECL_RFK_WM(0x78e4, 0x00008000, 0x1), + RTW89_DECL_RFK_WM(0x78e4, 0x000f0000, 0x0), + RTW89_DECL_RFK_WM(0x78e8, 0x0000003f, 0x03), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txagc_ofst_mv_avg_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_enable_defs_a[] = { + RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x0), + RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x1), + RTW89_DECL_RFK_WRF(0x0, 0x10055, 0x00080, 0x1), + RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_enable_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_enable_defs_b[] = { + RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x0), + RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x1), + RTW89_DECL_RFK_WRF(0x1, 0x10055, 0x00080, 0x1), + RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_enable_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_disable_defs_a[] = { + RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000), + RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000000), + RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000001), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_disable_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_disable_defs_b[] = { + RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000), + RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000000), + RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000001), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_disable_defs_b); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h new file mode 100644 index 0000000000..953a960ef1 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2022 Realtek Corporation + */ + +#ifndef __RTW89_8852C_RFK_TABLE_H__ +#define __RTW89_8852C_RFK_TABLE_H__ + +#include "phy.h" + +extern const struct rtw89_rfk_tbl rtw8852c_dack_reload_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dack_reset_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dack_reset_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dack_defs_s0_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dack_defs_s1_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_drck_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_iqk_rxk_cfg_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_iqk_afebb_restore_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_iqk_afebb_restore_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_read_rxsram_pre_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_read_rxsram_post_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order0_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order1_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order2_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order3_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dpk_kip_pwr_clk_on_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dpk_kip_pwr_clk_off_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_2g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_2g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_5g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_5g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_2g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_2g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_5g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_5g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_bbgain_split_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_bbgain_split_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_2g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_2g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_5g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_5g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_2g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_2g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_5g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_5g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_6g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_6g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_run_slope_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_run_slope_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_track_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_track_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_txagc_ofst_mv_avg_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_txagc_ofst_mv_avg_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_enable_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_enable_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_disable_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_disable_defs_b_tbl; + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c new file mode 100644 index 0000000000..feaa83b161 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c @@ -0,0 +1,19470 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2022 Realtek Corporation + */ + +#include "phy.h" +#include "reg.h" +#include "rtw8852c_table.h" + +static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = { + {0xF0FF0000, 0x00000000}, + {0xF03300FF, 0x00000001}, + {0xF03400FF, 0x00000002}, + {0x70C, 0x00000020}, + {0x704, 0x601E0100}, + {0x4000, 0x00000000}, + {0x4004, 0xCA014000}, + {0x4008, 0xC751D4F0}, + {0x400C, 0x44511475}, + {0x4010, 0x00000000}, + {0x4014, 0x00000000}, + {0x44AC, 0x01F60380}, + {0x4018, 0x4F4C4B4B}, + {0x401C, 0x494A4E52}, + {0x4020, 0x4D504E4B}, + {0x4024, 0x4F4C4949}, + {0x4028, 0x49484C50}, + {0x402C, 0x4C50504C}, + {0x4030, 0x54544D4A}, + {0x4034, 0x504B5654}, + {0x4038, 0x6A6C605A}, + {0x403C, 0x48484848}, + {0x4040, 0x48483D47}, + {0x4044, 0x3D474848}, + {0x4048, 0x51484848}, + {0x404C, 0x4A4A404F}, + {0x4050, 0x514F4C4A}, + {0x4054, 0x524E4A4A}, + {0x4058, 0x4A4A5154}, + {0x405C, 0x53555554}, + {0x4060, 0x45454545}, + {0x4064, 0x45454144}, + {0x4068, 0x40434445}, + {0x406C, 0x44454545}, + {0x4070, 0x44444043}, + {0x4074, 0x42434444}, + {0x4078, 0x46454444}, + {0x407C, 0x44444843}, + {0x4080, 0x4B4E4A47}, + {0x4084, 0x514D4A49}, + {0x4088, 0x4A495454}, + {0x408C, 0x5454514D}, + {0x4090, 0x524E4B4A}, + {0x4094, 0x4C4B5455}, + {0x4098, 0x55565550}, + {0x409C, 0x5959504D}, + {0x40A0, 0x544E5D5A}, + {0x40A4, 0x7975665F}, + {0x40A8, 0x48484848}, + {0x40AC, 0x48483D47}, + {0x40B0, 0x3D474848}, + {0x40B4, 0x48484848}, + {0x40B8, 0x48483E48}, + {0x40BC, 0x3E4A4A49}, + {0x40C0, 0x514E4948}, + {0x40C4, 0x4A49404F}, + {0x40C8, 0x42525555}, + {0x40CC, 0x47474747}, + {0x40D0, 0x47474747}, + {0x40D4, 0x47474747}, + {0x40D8, 0x48484848}, + {0x40DC, 0x48474848}, + {0x40E0, 0x4A484848}, + {0x40E4, 0x49484847}, + {0x40E8, 0x4847524D}, + {0x40EC, 0x55544F4B}, + {0x40F0, 0x00000000}, + {0x4604, 0x4C4C4D4E}, + {0x4608, 0x3D3D6A56}, + {0x460C, 0x53515140}, + {0x4610, 0x42404041}, + {0x4614, 0x54544B48}, + {0x4618, 0x795D5554}, + {0x461C, 0x3E3E3D3D}, + {0x4620, 0x47474240}, + {0x4624, 0x55524A48}, + {0x4ED4, 0x00000000}, + {0x40F4, 0x00000006}, + {0x4628, 0x00000000}, + {0x4E9C, 0x26663333}, + {0x4EA0, 0x6EDA4148}, + {0x4EA4, 0x599A0000}, + {0x4EA8, 0x40000000}, + {0x4ED0, 0x00000001}, + {0x40F8, 0x00000000}, + {0x40FC, 0x8C30C30C}, + {0x4100, 0x4C30C30C}, + {0x4104, 0x0C30C30C}, + {0x4108, 0x0C30C30C}, + {0x410C, 0x0C30C30C}, + {0x4110, 0x0C30C30C}, + {0x4114, 0x28A28A28}, + {0x4118, 0x28A28A28}, + {0x411C, 0x28A28A28}, + {0x4120, 0x28A28A28}, + {0x4124, 0x28A28A28}, + {0x4128, 0x28A28A28}, + {0x412C, 0x06666666}, + {0x4130, 0x33333333}, + {0x4134, 0x33333333}, + {0x4138, 0x33333333}, + {0x413C, 0x00000031}, + {0x462C, 0x0C30C30C}, + {0x4630, 0x0C30C30C}, + {0x4634, 0x28A28A28}, + {0x4638, 0x28A28A28}, + {0x463C, 0x33333333}, + {0x4640, 0x00000033}, + {0x4140, 0x5100600A}, + {0x4144, 0x18363113}, + {0x4148, 0x1D976DDC}, + {0x414C, 0x1C072DD7}, + {0x4150, 0x1127CDF4}, + {0x4154, 0x1E37BDF1}, + {0x4158, 0x1FB7F1D6}, + {0x415C, 0x1EA7DDF9}, + {0x4160, 0x1FE445DD}, + {0x4164, 0x1F97F1FE}, + {0x4168, 0x1FF781ED}, + {0x416C, 0x1FA7F5FE}, + {0x4170, 0x1E07B913}, + {0x4174, 0x1FD7FDFF}, + {0x4178, 0x1E17B9FA}, + {0x417C, 0x19A66914}, + {0x4180, 0x10F65598}, + {0x4184, 0x14A5A111}, + {0x4188, 0x1D3765DB}, + {0x418C, 0x17C685CA}, + {0x4190, 0x1107C5F3}, + {0x4194, 0x1B5785EB}, + {0x4198, 0x1F97ED8F}, + {0x419C, 0x1BC7A5F3}, + {0x41A0, 0x1FE43595}, + {0x41A4, 0x1EB7D9FC}, + {0x41A8, 0x1FE65DBE}, + {0x41AC, 0x1EC7D9FC}, + {0x41B0, 0x1976FCFF}, + {0x41B4, 0x1F77F5FF}, + {0x41B8, 0x1976FDEC}, + {0x41BC, 0x198664EF}, + {0x41C0, 0x11062D93}, + {0x41C4, 0x10C4E910}, + {0x41C8, 0x1CA759DB}, + {0x41CC, 0x1335A9B5}, + {0x41D0, 0x1097B9F3}, + {0x41D4, 0x17B72DE1}, + {0x41D8, 0x1F67ED42}, + {0x41DC, 0x18074DE9}, + {0x41E0, 0x1FD40547}, + {0x41E4, 0x1D57ADF9}, + {0x41E8, 0x1FE52182}, + {0x41EC, 0x1D67B1F9}, + {0x41F0, 0x14860CE1}, + {0x41F4, 0x1EC7E9FE}, + {0x41F8, 0x14860DD6}, + {0x41FC, 0x195664C7}, + {0x4200, 0x0005E58A}, + {0x4204, 0x00000000}, + {0x4208, 0x00000000}, + {0x420C, 0x7A000000}, + {0x4210, 0x0F9F3D7A}, + {0x4214, 0x0040817C}, + {0x4218, 0x00E10204}, + {0x421C, 0x257D94CD}, + {0x4220, 0x0802DB6D}, + {0x4224, 0x00000200}, + {0x4228, 0x04688000}, + {0x4644, 0x00000000}, + {0x4648, 0x00000000}, + {0x464C, 0x00000000}, + {0x4650, 0x00000020}, + {0x4ECC, 0x00000001}, + {0x422C, 0x0060B002}, + {0x4230, 0x9A8249A8}, + {0x4234, 0x26A1469E}, + {0x4238, 0x2099A824}, + {0x423C, 0x2359461C}, + {0x4240, 0x1631A675}, + {0x4244, 0x2C6B1D63}, + {0x4248, 0x0000000E}, + {0x424C, 0x00000001}, + {0x4250, 0x00000001}, + {0x4254, 0x00000000}, + {0x4258, 0x00000000}, + {0x425C, 0x00000000}, + {0x4260, 0x01E0000C}, + {0x4654, 0x00000000}, + {0x4658, 0x00000000}, + {0x465C, 0x0000001E}, + {0x4E74, 0x00000000}, + {0x4264, 0x00000000}, + {0x4268, 0x00000000}, + {0x426C, 0x0418317C}, + {0x46C0, 0x00000001}, + {0x4270, 0x00D6135C}, + {0x46C4, 0x00000033}, + {0x4274, 0x00000000}, + {0x4278, 0x00000000}, + {0x427C, 0x00000000}, + {0x4280, 0x00000000}, + {0x4284, 0x00000000}, + {0x4288, 0x00000000}, + {0x46D8, 0x00000000}, + {0x46DC, 0x00000000}, + {0x46E0, 0x00000000}, + {0x46E4, 0x00000000}, + {0x46E8, 0x00000000}, + {0x428C, 0x00000000}, + {0x4290, 0x00000000}, + {0x4294, 0x00000000}, + {0x4298, 0x84026000}, + {0x429C, 0x0051AC20}, + {0x46EC, 0x1020C040}, + {0x46F0, 0xB8BEBEB8}, + {0x46F4, 0x021102BE}, + {0x46F8, 0x14221142}, + {0x46FC, 0x18C4098C}, + {0x4700, 0x00021084}, + {0x42A0, 0x02024008}, + {0x42A4, 0x00000000}, + {0x42A8, 0x00000000}, + {0x42AC, 0x22CE803C}, + {0x42B0, 0x32000000}, + {0x42B4, 0x996FD67D}, + {0x42B8, 0xBD67D67D}, + {0x42BC, 0x7D67D65B}, + {0x42C0, 0x28029F59}, + {0x42C4, 0x00280280}, + {0x4704, 0x00000000}, + {0x42C8, 0x00000000}, + {0x42CC, 0x00000000}, + {0x42D0, 0x00000003}, + {0x4708, 0x00280000}, + {0x42D4, 0x00000001}, + {0x42D8, 0x61861800}, + {0x42DC, 0x830C30C3}, + {0x42E0, 0xC30C30C3}, + {0x42E4, 0x830C30C3}, + {0x42E8, 0x451450C3}, + {0x42EC, 0x05145145}, + {0x42F0, 0x05145145}, + {0x42F4, 0x05145145}, + {0x42F8, 0x03207145}, + {0x42FC, 0x041C32C6}, + {0x4300, 0x031C5247}, + {0x4304, 0x030C5143}, + {0x4308, 0x030C30C3}, + {0x430C, 0x0F3CF3C3}, + {0x4310, 0x0F3CF3CF}, + {0x4314, 0x0F3CF3CF}, + {0x4318, 0x0F3CF3CF}, + {0x431C, 0x0F3CF3CF}, + {0x4320, 0x030C10C3}, + {0x4324, 0x051430C3}, + {0x4328, 0x051490CB}, + {0x432C, 0x030C70D1}, + {0x4330, 0x050C50C7}, + {0x4334, 0x051492CB}, + {0x4338, 0x05145145}, + {0x433C, 0x05145145}, + {0x4340, 0x05145145}, + {0x4344, 0x05145145}, + {0x4348, 0x090CD243}, + {0x434C, 0x0918A1C5}, + {0x4350, 0x071C3143}, + {0x4354, 0x071431C3}, + {0x4358, 0x0F3CF1C5}, + {0x435C, 0x0F3CF3CF}, + {0x4360, 0x0F3CF3CF}, + {0x4364, 0x0F3CF3CF}, + {0x4368, 0x0F3CF3CF}, + {0x436C, 0x090C91CF}, + {0x4370, 0x11243143}, + {0x4374, 0x9777A777}, + {0x4378, 0xBB7BAC95}, + {0x437C, 0xB667B889}, + {0x4380, 0x7B9B8899}, + {0x4384, 0x7A5567C8}, + {0x4388, 0x2278CCCC}, + {0x438C, 0x7C222222}, + {0x4390, 0x0000049B}, + {0x470C, 0x00000888}, + {0x4EB4, 0x00000002}, + {0x4394, 0x001CCCCC}, + {0x4710, 0xCCCCCAAC}, + {0x4714, 0x0000AACC}, + {0x4398, 0x00000000}, + {0x439C, 0x00000008}, + {0x49A4, 0x00000000}, + {0x43A0, 0x00000000}, + {0x43A4, 0x00000000}, + {0x43A8, 0x00000000}, + {0x43AC, 0x10000000}, + {0x43B0, 0x00401001}, + {0x43B4, 0x00061003}, + {0x4718, 0x00003000}, + {0x43B8, 0x000024D8}, + {0x43BC, 0x00000000}, + {0x43C0, 0x10000020}, + {0x43C4, 0x20000200}, + {0x43C8, 0x00000000}, + {0x43CC, 0x04000000}, + {0x43D0, 0x44000100}, + {0x43D4, 0x60804060}, + {0x43D8, 0x44204210}, + {0x43DC, 0x82108082}, + {0x43E0, 0x82108402}, + {0x43E4, 0xC8082108}, + {0x43E8, 0xC8202084}, + {0x43EC, 0x44208208}, + {0x43F0, 0x84108204}, + {0x43F4, 0xD0108104}, + {0x43F8, 0xF8210108}, + {0x43FC, 0x6431E930}, + {0x4400, 0x02109468}, + {0x4404, 0x10C61C22}, + {0x4408, 0x02109469}, + {0x440C, 0x10C61C22}, + {0x4410, 0x00041049}, + {0x471C, 0x0B02C080}, + {0x4414, 0x00000000}, + {0x4418, 0x00000000}, + {0x441C, 0x80000000}, + {0x4420, 0xB0200000}, + {0x4424, 0x00001FF0}, + {0x4780, 0xEC000000}, + {0x4784, 0x8C400020}, + {0x4964, 0x51089104}, + {0x4968, 0x88448844}, + {0x496C, 0x07000044}, + {0x4E4C, 0x00000000}, + {0x4428, 0x00000000}, + {0x442C, 0x00000000}, + {0x4430, 0x00000000}, + {0x4434, 0x00000000}, + {0x4438, 0x590642D0}, + {0x443C, 0x398668A0}, + {0x4440, 0x6C100808}, + {0x4444, 0x4A145344}, + {0x4448, 0x0C5B008F}, + {0x444C, 0x6E30498A}, + {0x4450, 0x656E371B}, + {0x4454, 0x00000F53}, + {0x49A8, 0x68120000}, + {0x49AC, 0xDA0681E0}, + {0x49BC, 0x14060180}, + {0x49D8, 0x600603FF}, + {0x49DC, 0x3C502000}, + {0x49E0, 0x2C580050}, + {0x49E4, 0x45B055EF}, + {0x49E8, 0x00000290}, + {0x4A0C, 0x00000001}, + {0x4A28, 0x0DAC1B58}, + {0x4A2C, 0x0000001E}, + {0x4E50, 0x16878003}, + {0x4E54, 0x0F00F078}, + {0x4E58, 0x03C1E0B4}, + {0x4E5C, 0x78584830}, + {0x4E60, 0x88C0140C}, + {0x4E64, 0x90302C24}, + {0x4E68, 0x0F84A00A}, + {0x4E6C, 0x00000011}, + {0x4E78, 0x00003039}, + {0x4E7C, 0x0000D431}, + {0x4E80, 0x00008235}, + {0x4E84, 0x00000000}, + {0x4E88, 0x000056CE}, + {0x4E8C, 0x00002B67}, + {0x4E90, 0x00000237}, + {0x4EB8, 0x00004624}, + {0x4A30, 0x00000000}, + {0x4458, 0x00000000}, + {0x445C, 0x4801442E}, + {0x4460, 0x0051A0B8}, + {0x4A34, 0x0000011F}, + {0x4EBC, 0x00000000}, + {0x4A38, 0x0000011F}, + {0x4EC0, 0x00000000}, + {0x4464, 0x00000000}, + {0x4468, 0x00000000}, + {0x446C, 0x00000000}, + {0x4470, 0x00000000}, + {0x4474, 0x00000000}, + {0x4478, 0x00000000}, + {0x447C, 0x00000000}, + {0x4480, 0x2A0AA040}, + {0x4484, 0x0A886926}, + {0x4488, 0x00000004}, + {0x4A3C, 0x00002B1C}, + {0x448C, 0x00000000}, + {0x4490, 0x88000000}, + {0x4494, 0x10000000}, + {0x4498, 0xE0000000}, + {0x4A08, 0x00000FE6}, + {0x4A40, 0x00000000}, + {0x4A44, 0x00000000}, + {0x4A48, 0x00000000}, + {0x4A4C, 0x00000000}, + {0x4A50, 0x00000000}, + {0x4A54, 0x00000000}, + {0x449C, 0x00000019}, + {0x44A0, 0x02B2E394}, + {0x44A4, 0x00000400}, + {0x4A58, 0x14285208}, + {0x4A84, 0x02850A14}, + {0x4A88, 0x048D0A14}, + {0x4A8C, 0x01123401}, + {0x4A90, 0x34011234}, + {0x4A94, 0x23450112}, + {0x4A98, 0x45123451}, + {0x4AAC, 0x12345123}, + {0x4AB0, 0x00000000}, + {0x44A8, 0x00000001}, + {0x44B0, 0x00000000}, + {0x44B4, 0x00000000}, + {0x44B8, 0x00000000}, + {0x44BC, 0x00000000}, + {0x44C0, 0x00000000}, + {0x44C4, 0x00000000}, + {0x44C8, 0x00000000}, + {0x44CC, 0x00000000}, + {0x44D0, 0x00000000}, + {0x44D4, 0x00000000}, + {0x44D8, 0x00000000}, + {0x44DC, 0x00000000}, + {0x44E0, 0x00000000}, + {0x44E4, 0x00000000}, + {0x44E8, 0x00000000}, + {0x44EC, 0x00000000}, + {0x44F0, 0x00000000}, + {0x44F4, 0x00000000}, + {0x44F8, 0x00000000}, + {0x44FC, 0x00000000}, + {0x4500, 0x00000000}, + {0x4504, 0x00000000}, + {0x4508, 0x00000000}, + {0x450C, 0x00000000}, + {0x4510, 0x00000000}, + {0x4514, 0x00000000}, + {0x4518, 0x00000000}, + {0x451C, 0x00000000}, + {0x4520, 0x00000000}, + {0x4524, 0x00000000}, + {0x4528, 0x00000000}, + {0x452C, 0x00000000}, + {0x4530, 0x4ED80C81}, + {0x4534, 0x00001808}, + {0x4538, 0x000000FF}, + {0x453C, 0x00000000}, + {0x4540, 0x00000000}, + {0x4544, 0x00000000}, + {0x4548, 0x00000000}, + {0x454C, 0x00000000}, + {0x4550, 0x00000000}, + {0x4554, 0x00000000}, + {0x4558, 0x00000000}, + {0x455C, 0x00000000}, + {0x4560, 0x40600033}, + {0x4564, 0x40000000}, + {0x4568, 0x00000000}, + {0x456C, 0x20000000}, + {0x4570, 0x04AAA407}, + {0x4574, 0x0001A2B4}, + {0x4578, 0x0002024B}, + {0x457C, 0x00200000}, + {0x4580, 0x00001B40}, + {0x4584, 0x00000000}, + {0x4588, 0x000000C8}, + {0x458C, 0x30000000}, + {0x4590, 0x00000000}, + {0x4594, 0x00000000}, + {0x4598, 0x00000001}, + {0x459C, 0x0003FE00}, + {0x45A0, 0x00000000}, + {0x45A4, 0x00000000}, + {0x45A8, 0xC00002C0}, + {0x45AC, 0x78028000}, + {0x45B0, 0x80000048}, + {0x45B4, 0x00098800}, + {0x45B8, 0x00200002}, + {0x4AB4, 0x00000000}, + {0x4AB8, 0x00000000}, + {0x4ABC, 0x00000000}, + {0x4AC0, 0x00000000}, + {0x4AC4, 0x00000000}, + {0x4AC8, 0x00000000}, + {0x4AF4, 0x00000000}, + {0x4AF8, 0x00000000}, + {0x4AFC, 0x00000000}, + {0x4B00, 0x00000000}, + {0x4B04, 0x00000000}, + {0x4B08, 0x00000000}, + {0x4B0C, 0x00000000}, + {0x4B10, 0x00000000}, + {0x4B14, 0x00000000}, + {0x4B18, 0xB0000000}, + {0x4B1C, 0x00000000}, + {0x4B20, 0x00000000}, + {0x4B24, 0x00000000}, + {0x4B28, 0x00000000}, + {0x4B2C, 0x00000000}, + {0x4B30, 0x00000000}, + {0x4B34, 0x00000000}, + {0x4B38, 0x00000000}, + {0x4B3C, 0x00000000}, + {0x4B40, 0x00000000}, + {0x45BC, 0x06748790}, + {0x45C0, 0x80000000}, + {0x45C4, 0x00000000}, + {0x45C8, 0x00000000}, + {0x45CC, 0x00558670}, + {0x45D0, 0x002883F0}, + {0x45D4, 0x00090120}, + {0x45D8, 0x00000000}, + {0x4B44, 0x00000100}, + {0x4B48, 0xA6DBC4B1}, + {0x4B4C, 0x64F624C3}, + {0x4B50, 0x00D4EF15}, + {0x49B0, 0x11110F0A}, + {0x49B4, 0x00000003}, + {0x49B8, 0x0000000A}, + {0x4B54, 0xBE9007FF}, + {0x4B58, 0x00000001}, + {0x49C0, 0x00000007}, + {0x49C4, 0x000003D9}, + {0x4A10, 0x00000001}, + {0x49C8, 0x002B1CB0}, + {0x4A00, 0xC0000000}, + {0x4A04, 0x00001000}, + {0x4B5C, 0x00000005}, + {0x4A18, 0x00000007}, + {0x4B60, 0x00000024}, + {0x49CC, 0x00000001}, + {0x49D0, 0x00000010}, + {0x49D4, 0x00000001}, + {0x4B64, 0x927FBFBF}, + {0x4B68, 0x1D07BDD0}, + {0x4B6C, 0x318A4DEF}, + {0x4B70, 0x158C5318}, + {0x4B74, 0x18C5318C}, + {0x4B78, 0x4E7394EC}, + {0x4B7C, 0xD9081CE5}, + {0x4B80, 0x00000001}, + {0x49EC, 0x00000001}, + {0x4B84, 0x00000000}, + {0x4B88, 0x00000000}, + {0x4B8C, 0x00000000}, + {0x4B90, 0x00000000}, + {0x4B94, 0x00000000}, + {0x4B98, 0x00000000}, + {0x4B9C, 0x00000000}, + {0x4BA0, 0x00000000}, + {0x4BA4, 0x00EA99A2}, + {0x49F8, 0x0000C4C3}, + {0x4A1C, 0x00020800}, + {0x4A20, 0x0002CC00}, + {0x4BA8, 0x002B6456}, + {0x45E0, 0x00000000}, + {0x45E4, 0x00000000}, + {0x45E8, 0x00E2E1E1}, + {0x45EC, 0xCBCBB6B6}, + {0x45F0, 0x59100FCA}, + {0x4BAC, 0x12CAB6DE}, + {0x4BB0, 0x00001110}, + {0x45F4, 0x08882550}, + {0x45F8, 0x08CC2660}, + {0x45FC, 0x09102660}, + {0x4600, 0x00000154}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x45DC, 0xE1CB38E8}, + {0x4660, 0x4A2E1800}, + {0x4664, 0x6750E462}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x45DC, 0xD1B942F4}, + {0x4660, 0x41250EF4}, + {0x4664, 0x6750E458}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x45DC, 0xE1CB38E8}, + {0x4660, 0x4A2E1800}, + {0x4664, 0x6750E462}, + {0xA0000000, 0x00000000}, + {0x45DC, 0xE1CB38E8}, + {0x4660, 0x4A2E1800}, + {0x4664, 0x6750E462}, + {0xB0000000, 0x00000000}, + {0x4668, 0x0E0CFB0A}, + {0x466C, 0x30100F06}, + {0x4670, 0x34333333}, + {0x4674, 0x34343434}, + {0x4678, 0xC39D38E8}, + {0x467C, 0x482800E3}, + {0x4680, 0x5836E46A}, + {0x4684, 0xFBEBDA00}, + {0x4688, 0x1A10FF04}, + {0x468C, 0x282A3000}, + {0x4690, 0x2A29292A}, + {0x4694, 0x04FA2A2A}, + {0x4698, 0xEE0F04D1}, + {0x469C, 0x89291436}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x46A0, 0x0701E79E}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x46A0, 0x0701E79E}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x46A0, 0x0701E79E}, + {0xA0000000, 0x00000000}, + {0x46A0, 0x0701E79E}, + {0xB0000000, 0x00000000}, + {0x46A4, 0x08D07CFF}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x46A8, 0x2212FF14}, + {0x46AC, 0x60423537}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x46A8, 0x4D1E7F14}, + {0x46AC, 0x60B37C4E}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x46A8, 0x2212FF14}, + {0x46AC, 0x60423537}, + {0xA0000000, 0x00000000}, + {0x46A8, 0x2212FF14}, + {0x46AC, 0x60423537}, + {0xB0000000, 0x00000000}, + {0x46B0, 0x63666666}, + {0x46B4, 0x35374425}, + {0x46B8, 0x25883043}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x46BC, 0x5107C252}, + {0x4720, 0x3FFFFD63}, + {0x4724, 0xB58D11FF}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x46BC, 0x5107C252}, + {0x4720, 0x27795843}, + {0x4724, 0xB58D11F5}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x46BC, 0x5107C252}, + {0x4720, 0x27795303}, + {0x4724, 0xB58D11F5}, + {0xA0000000, 0x00000000}, + {0x46BC, 0x5107C252}, + {0x4720, 0x3FFFFD63}, + {0x4724, 0xB58D11FF}, + {0xB0000000, 0x00000000}, + {0x4728, 0x07FFFFFF}, + {0x472C, 0x0E7893B6}, + {0x4730, 0xE0399201}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x4734, 0x00000020}, + {0x4738, 0x8325C500}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4734, 0x003D4C20}, + {0x4738, 0x8F25C500}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4734, 0x003D5420}, + {0x4738, 0x8725C500}, + {0xA0000000, 0x00000000}, + {0x4734, 0x00000020}, + {0x4738, 0x8325C500}, + {0xB0000000, 0x00000000}, + {0x473C, 0x00000B7F}, + {0x4ACC, 0x000F7D00}, + {0x4AD0, 0x00000000}, + {0x4AD4, 0x00000040}, + {0x4AE4, 0x5379E99E}, + {0x4AE8, 0x00000744}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x4BB4, 0xFBD5B89F}, + {0x4BB8, 0x99563918}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4BB4, 0x05EBC8AF}, + {0x4BB8, 0x99543D24}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4BB4, 0xFBD5B89F}, + {0x4BB8, 0x99563918}, + {0xA0000000, 0x00000000}, + {0x4BB4, 0xFBD5B89F}, + {0x4BB8, 0x99563918}, + {0xB0000000, 0x00000000}, + {0x4BBC, 0x12EED5B8}, + {0x4BC0, 0x80C4542F}, + {0x4BC4, 0x005A007F}, + {0x4BC8, 0x40000000}, + {0x4BCC, 0x40000000}, + {0x4BD0, 0x00000000}, + {0x4BD4, 0x40000000}, + {0x4BD8, 0xC0000000}, + {0x4BDC, 0x40000000}, + {0x4BE0, 0x80000000}, + {0x4BE4, 0xBAAC8000}, + {0x4BE8, 0x638A88C5}, + {0x4BEC, 0x00900000}, + {0x4EAC, 0x00000000}, + {0x4BF0, 0x00000000}, + {0x4BF4, 0x00000000}, + {0x4BF8, 0x00000219}, + {0x4EC4, 0x00000001}, + {0x4EE8, 0x00002020}, + {0x4BFC, 0x00000000}, + {0x4C00, 0x00000010}, + {0x4C04, 0x00000001}, + {0x4C08, 0x00000001}, + {0x4C0C, 0x00000000}, + {0x4C10, 0x00000000}, + {0x4C14, 0x00000151}, + {0x4C18, 0x00000000}, + {0x4C1C, 0x00000000}, + {0x4C20, 0x00000151}, + {0x4C24, 0x00000498}, + {0x4C28, 0x00000498}, + {0x4C2C, 0x00000498}, + {0x4C30, 0x00000498}, + {0x4C34, 0x00000498}, + {0x4C38, 0x00000498}, + {0x4C3C, 0x00000498}, + {0x4C40, 0x00000498}, + {0x4C44, 0x00000000}, + {0x4C48, 0x00000000}, + {0x4C4C, 0x00001146}, + {0x4C50, 0x00000000}, + {0x4C54, 0x00000000}, + {0x4C58, 0x00001146}, + {0x4C5C, 0x00000000}, + {0x4C60, 0x00000000}, + {0x4C64, 0xE2E1E1DE}, + {0x4C68, 0xB6B600B6}, + {0x4C6C, 0xCACBCBCA}, + {0x4C70, 0x8091010F}, + {0x4C74, 0x00000B11}, + {0x46C8, 0x08882550}, + {0x46CC, 0x08CC2660}, + {0x46D0, 0x09102660}, + {0x46D4, 0x00000154}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x4740, 0xE4CD38E8}, + {0x4744, 0x4C321B04}, + {0x4748, 0x6750E466}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4740, 0xC5AD42F4}, + {0x4744, 0x412504E8}, + {0x4748, 0x6850E459}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4740, 0xE4CD38E8}, + {0x4744, 0x4C321B04}, + {0x4748, 0x6750E466}, + {0xA0000000, 0x00000000}, + {0x4740, 0xE4CD38E8}, + {0x4744, 0x4C321B04}, + {0x4748, 0x6750E466}, + {0xB0000000, 0x00000000}, + {0x474C, 0x0E0CFB0A}, + {0x4750, 0x30100F06}, + {0x4754, 0x34333333}, + {0x4758, 0x34343434}, + {0x475C, 0xC49E38E8}, + {0x4760, 0x482800E2}, + {0x4764, 0x5636E466}, + {0x4768, 0xFBEBDA00}, + {0x476C, 0x1A10FF04}, + {0x4770, 0x282A3000}, + {0x4774, 0x2A29292A}, + {0x4778, 0x04FA2A2A}, + {0x477C, 0xEE0F04D1}, + {0x49F0, 0x89291436}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x49F4, 0x0701E79E}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x49F4, 0x0701E79E}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x49F4, 0x0701E79E}, + {0xA0000000, 0x00000000}, + {0x49F4, 0x0701E79E}, + {0xB0000000, 0x00000000}, + {0x49FC, 0x08D07CFF}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x4A5C, 0x2212FF14}, + {0x4A60, 0x60423537}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4A5C, 0x4D1E7F14}, + {0x4A60, 0x60B37C4E}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4A5C, 0x2212FF14}, + {0x4A60, 0x60423537}, + {0xA0000000, 0x00000000}, + {0x4A5C, 0x2212FF14}, + {0x4A60, 0x60423537}, + {0xB0000000, 0x00000000}, + {0x4A64, 0x63666666}, + {0x4A68, 0x35374425}, + {0x4A6C, 0x25883043}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x4A70, 0x5107C252}, + {0x4A74, 0x3FFFFD63}, + {0x4A78, 0xB58D11FF}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4A70, 0x5107C252}, + {0x4A74, 0x27795843}, + {0x4A78, 0xB58D11F5}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4A70, 0x5107C252}, + {0x4A74, 0x27795303}, + {0x4A78, 0xB58D11F5}, + {0xA0000000, 0x00000000}, + {0x4A70, 0x5107C252}, + {0x4A74, 0x3FFFFD63}, + {0x4A78, 0xB58D11FF}, + {0xB0000000, 0x00000000}, + {0x4A7C, 0x07FFFFFF}, + {0x4A80, 0x0E7893B6}, + {0x4A9C, 0xE0399201}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x4AA0, 0x00000020}, + {0x4AA4, 0x8325C500}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4AA0, 0x003D4C20}, + {0x4AA4, 0x8F25C500}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4AA0, 0x003D5420}, + {0x4AA4, 0x8725C500}, + {0xA0000000, 0x00000000}, + {0x4AA0, 0x00000020}, + {0x4AA4, 0x8325C500}, + {0xB0000000, 0x00000000}, + {0x4AA8, 0x00000B7F}, + {0x4AD8, 0x000F7D00}, + {0x4ADC, 0x00000000}, + {0x4AE0, 0x00000040}, + {0x4AEC, 0x5379E99E}, + {0x4AF0, 0x00000744}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x4C78, 0xFBD5B89F}, + {0x4C7C, 0x99563918}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4C78, 0x07ECC9B0}, + {0x4C7C, 0x995B4126}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4C78, 0xFBD5B89F}, + {0x4C7C, 0x99563918}, + {0xA0000000, 0x00000000}, + {0x4C78, 0xFBD5B89F}, + {0x4C7C, 0x99563918}, + {0xB0000000, 0x00000000}, + {0x4C80, 0x12EED5B8}, + {0x4C84, 0x80C4542F}, + {0x4C88, 0x005A007F}, + {0x4C8C, 0x40000000}, + {0x4C90, 0x40000000}, + {0x4C94, 0x00000000}, + {0x4C98, 0x40000000}, + {0x4C9C, 0xC0000000}, + {0x4CA0, 0x40000000}, + {0x4CA4, 0x80000000}, + {0x4CA8, 0xBAAC8000}, + {0x4CAC, 0x638A88C5}, + {0x4CB0, 0x00900000}, + {0x4EB0, 0x00000000}, + {0x4CB4, 0x00000000}, + {0x4CB8, 0x00000000}, + {0x4CBC, 0x00000219}, + {0x4EC8, 0x00000001}, + {0x4EEC, 0x00002020}, + {0x4CC0, 0x00000000}, + {0x4CC4, 0x00000010}, + {0x4CC8, 0x00000001}, + {0x4CCC, 0x00000001}, + {0x4CD0, 0x00000000}, + {0x4CD4, 0x00000000}, + {0x4CD8, 0x00000151}, + {0x4CDC, 0x00000000}, + {0x4CE0, 0x00000000}, + {0x4CE4, 0x00000151}, + {0x4CE8, 0x00000498}, + {0x4CEC, 0x00000498}, + {0x4CF0, 0x00000498}, + {0x4CF4, 0x00000498}, + {0x4CF8, 0x00000498}, + {0x4CFC, 0x00000498}, + {0x4D00, 0x00000498}, + {0x4D04, 0x00000498}, + {0x4D08, 0x00000000}, + {0x4D0C, 0x00000000}, + {0x4D10, 0x00001146}, + {0x4D14, 0x00000000}, + {0x4D18, 0x00000000}, + {0x4D1C, 0x00001146}, + {0x4788, 0x00000000}, + {0x478C, 0xA32103FE}, + {0x4790, 0xB20A7B28}, + {0x4794, 0xC6A7B14F}, + {0x4798, 0x000000D3}, + {0x4D20, 0x00000000}, + {0x4D24, 0x0C442416}, + {0x4D28, 0x00000000}, + {0x479C, 0x009B902A}, + {0x47A0, 0x009B902A}, + {0x47A4, 0x98682C18}, + {0x47A8, 0x6318C4C1}, + {0x47AC, 0x6248C631}, + {0x47B0, 0x922A8253}, + {0x47B4, 0x00000005}, + {0x4D2C, 0x0008C0C1}, + {0x47B8, 0x00001759}, + {0x47BC, 0x4B702400}, + {0x47C0, 0x831508BA}, + {0x4A14, 0x000000E9}, + {0x4D30, 0x00000001}, + {0x4E94, 0x000000FC}, + {0x47C4, 0x9ABBCACB}, + {0x47C8, 0x56767578}, + {0x47CC, 0xBBCCBBB3}, + {0x47D0, 0x57889989}, + {0x47D4, 0x00000F45}, + {0x4D34, 0x7BB167AB}, + {0x4D38, 0xBBBBBB05}, + {0x4D3C, 0x777777BB}, + {0x4D40, 0x00015277}, + {0x47D8, 0x27039CE9}, + {0x47DC, 0x41414432}, + {0x47E0, 0x36058342}, + {0x47E4, 0x00000006}, + {0x4D44, 0x00000687}, + {0x47E8, 0x00000001}, + {0x47EC, 0x00000001}, + {0x47F0, 0xC7013016}, + {0x47F4, 0x84413016}, + {0x47F8, 0x84413016}, + {0x47FC, 0x8C413016}, + {0x4800, 0x8C40B028}, + {0x4804, 0x3140B028}, + {0x4808, 0x2940B028}, + {0x480C, 0x8440B028}, + {0x4810, 0x6318C610}, + {0x4814, 0x45334753}, + {0x4818, 0x236A6A88}, + {0x4D48, 0x8C413016}, + {0x4D4C, 0xA140B028}, + {0x4D50, 0x00150A31}, + {0x481C, 0x576DF814}, + {0x4820, 0xA08877AC}, + {0x4824, 0x0000007A}, + {0x4D54, 0x00001184}, + {0x4828, 0xBCEB4A14}, + {0x482C, 0x000A3A4A}, + {0x4830, 0xBCEB4A14}, + {0x4834, 0x000A3A4A}, + {0x4D58, 0x2F63DD3A}, + {0x4838, 0xBCBDBD85}, + {0x483C, 0x0CABB99A}, + {0x4D5C, 0x000000BC}, + {0x4840, 0x38384242}, + {0x4844, 0x0086102E}, + {0x4848, 0xCA24C82A}, + {0x4D60, 0x00000000}, + {0x4D64, 0x0000F49D}, + {0x4ED8, 0x00000001}, + {0x4D68, 0x000001C4}, + {0x4D6C, 0x00000000}, + {0x4D70, 0x38384242}, + {0x4D74, 0x030E902E}, + {0x4D78, 0x994C1502}, + {0x4D7C, 0x00017912}, + {0x4EDC, 0x00000001}, + {0x484C, 0x00008A62}, + {0x4D80, 0x00000002}, + {0x4850, 0x00000008}, + {0x4854, 0x009B902A}, + {0x4858, 0x009B902A}, + {0x485C, 0x98682C18}, + {0x4860, 0x6318C4C1}, + {0x4864, 0x6248C631}, + {0x4868, 0x922A8253}, + {0x486C, 0x00000005}, + {0x4D84, 0x0008C0C1}, + {0x4870, 0x00001759}, + {0x4874, 0x4B702400}, + {0x4878, 0x831508BA}, + {0x4A24, 0x000000E9}, + {0x4D88, 0x00000001}, + {0x4E98, 0x000000FC}, + {0x487C, 0x9898A8BB}, + {0x4880, 0x54535368}, + {0x4884, 0x999999B3}, + {0x4888, 0x35555589}, + {0x488C, 0x00000745}, + {0x4D8C, 0x6AB14487}, + {0x4D90, 0xBBBBBB04}, + {0x4D94, 0x777777BB}, + {0x4D98, 0x00015277}, + {0x4890, 0x27039CE9}, + {0x4894, 0x41414432}, + {0x4898, 0x36058342}, + {0x489C, 0x00000006}, + {0x4D9C, 0x00000687}, + {0x48A0, 0x00000001}, + {0x48A4, 0x00000001}, + {0x48A8, 0xC7013016}, + {0x48AC, 0x84413016}, + {0x48B0, 0x84413016}, + {0x48B4, 0x8C413016}, + {0x48B8, 0x8C40B028}, + {0x48BC, 0x3140B028}, + {0x48C0, 0x2940B028}, + {0x48C4, 0x8440B028}, + {0x48C8, 0x6318C610}, + {0x48CC, 0x45334753}, + {0x48D0, 0x236A6A88}, + {0x4DA0, 0x8C413016}, + {0x4DA4, 0xA140B028}, + {0x4DA8, 0x00150A31}, + {0x48D4, 0x576DF814}, + {0x48D8, 0xA08877AC}, + {0x48DC, 0x0000007A}, + {0x4DAC, 0x00001184}, + {0x48E0, 0xBCEB4A14}, + {0x48E4, 0x000A3A4A}, + {0x48E8, 0xBCEB4A14}, + {0x48EC, 0x000A3A4A}, + {0x4DB0, 0x2F63DD3A}, + {0x48F0, 0x9A8A8A85}, + {0x48F4, 0x0C9BB99A}, + {0x4DB4, 0x0000009A}, + {0x48F8, 0x38384242}, + {0x48FC, 0x0086102E}, + {0x4900, 0xCA24C82A}, + {0x4DB8, 0x00000000}, + {0x4DBC, 0x0000F49D}, + {0x4EE0, 0x00000001}, + {0x4DC0, 0x000001C4}, + {0x4DC4, 0x00000000}, + {0x4DC8, 0x38384242}, + {0x4DCC, 0x030E902E}, + {0x4DD0, 0x994C1502}, + {0x4DD4, 0x00017912}, + {0x4EE4, 0x00000001}, + {0x4904, 0x00008A62}, + {0x4DD8, 0x00000002}, + {0x4908, 0x00000008}, + {0x490C, 0x80040000}, + {0x4910, 0x80040000}, + {0x4914, 0xFE800000}, + {0x4918, 0x834C0000}, + {0x491C, 0x00000000}, + {0x4920, 0x00000000}, + {0x4924, 0x000003FF}, + {0x4928, 0x00000000}, + {0x492C, 0x00000000}, + {0x4930, 0x00000000}, + {0x4934, 0x40000000}, + {0x4938, 0x00000000}, + {0x493C, 0x00000000}, + {0x4940, 0x00000000}, + {0x4944, 0x00000000}, + {0x4948, 0x04065800}, + {0x494C, 0x02010080}, + {0x4950, 0x0E1E3E05}, + {0x4954, 0x0A163068}, + {0x4958, 0x00206040}, + {0x495C, 0x02020202}, + {0x4960, 0x00002020}, + {0x4DDC, 0x18002000}, + {0x4DE0, 0x00004001}, + {0x4DE4, 0x00040004}, + {0x4DE8, 0x00400040}, + {0x4DEC, 0x04000400}, + {0x4DF0, 0x08080618}, + {0x4DF4, 0x08081616}, + {0x4DF8, 0x08080808}, + {0x4DFC, 0x18180808}, + {0x4E00, 0x01020100}, + {0x4E04, 0x05020502}, + {0x4E08, 0x00020E0F}, + {0x4E0C, 0x00000000}, + {0x4E10, 0x16080806}, + {0x4E14, 0x08080816}, + {0x4E18, 0x08080808}, + {0x4E1C, 0x00181808}, + {0x4E20, 0x02010201}, + {0x4E24, 0x0F050205}, + {0x4E28, 0x0000020E}, + {0x4E2C, 0x00000000}, + {0x4E70, 0x00000001}, + {0x4970, 0x00000000}, + {0x4974, 0xC00CD62D}, + {0x4978, 0x00000103}, + {0x4E30, 0x02E416A8}, + {0x497C, 0x00000000}, + {0x4980, 0x00000000}, + {0x4984, 0x00000000}, + {0x4988, 0x00000000}, + {0x498C, 0x00000000}, + {0x4E34, 0x00FC0000}, + {0x4E38, 0x0000F800}, + {0x4E3C, 0x00000001}, + {0x4990, 0x00000000}, + {0x4994, 0x00000000}, + {0x4998, 0x00000000}, + {0x499C, 0x00000000}, + {0x49A0, 0x00000000}, + {0x4E40, 0x00FC0000}, + {0x4E44, 0x0000F800}, + {0x4E48, 0x00000001}, + {0xC54, 0x10014368}, + {0xC58, 0x61000000}, + {0xC5C, 0x805580F0}, + {0xC64, 0x0010A030}, + {0x189C, 0x000003FF}, + {0xC6C, 0x00060020}, + {0xC3C, 0x2840E1BF}, + {0xC40, 0x00000000}, + {0xC44, 0x00000007}, + {0xC48, 0x410E4000}, + {0xC54, 0x1EE1436A}, + {0xC58, 0x61000000}, + {0x730, 0x00000002}, + {0xC60, 0x017FFFF2}, + {0xC64, 0x0010A170}, + {0xC64, 0x0010A170}, + {0xC68, 0x000000FF}, + {0xC64, 0x0010A130}, + {0xC54, 0x1AE1436A}, + {0xC6C, 0x00060020}, + {0xC58, 0x41000000}, + {0x708, 0x00000000}, + {0xC6C, 0x00061020}, + {0x884, 0x0043F01D}, + {0x704, 0x601E0100}, + {0x710, 0xEF810000}, + {0xC54, 0x1AE1436A}, + {0xC58, 0x41000000}, + {0xC68, 0x10000050}, + {0xC6C, 0x20061020}, + {0x704, 0x601E0100}, + {0xC74, 0x00000000}, + {0x90C, 0x00300000}, + {0xC70, 0x071BFC00}, + {0xC74, 0x3FFFFFFF}, + {0xC78, 0x3FFFFFFF}, + {0xC7C, 0x0000BFFF}, + {0xD40, 0xF64FA0F7}, + {0xD44, 0x0400463F}, + {0xD48, 0x0003FFFF}, + {0xD4C, 0x00000000}, + {0xD50, 0xF64FA0F7}, + {0xD54, 0x04100437}, + {0xD58, 0x0000FF7F}, + {0xD5C, 0x00000000}, + {0xD60, 0x00000000}, + {0xD64, 0x00000000}, + {0xD70, 0x00000015}, + {0xD90, 0x000003FF}, + {0xD94, 0x00000000}, + {0xD98, 0x0000003F}, + {0xD9C, 0x00000000}, + {0xDA0, 0x000003FE}, + {0xDA4, 0x00000000}, + {0xDA8, 0x0000003F}, + {0xDAC, 0x00000000}, + {0xD00, 0x77777777}, + {0xD04, 0xBBBBBBBB}, + {0xD08, 0xBBBBBBBB}, + {0xD0C, 0x00000070}, + {0xD10, 0x20110900}, + {0xD10, 0x20110FFF}, + {0xD78, 0x00000001}, + {0xD7C, 0x001C040A}, + {0xD84, 0x00006007}, + {0xD84, 0x00006607}, + {0xD10, 0x28110FFF}, + {0xD18, 0x50209900}, + {0xD80, 0x00804100}, + {0xD80, 0x00804200}, + {0x718, 0x1333233F}, + {0x604, 0x041E1E1E}, + {0x714, 0x00010000}, + {0x586C, 0x000000F0}, + {0x586C, 0x000000E0}, + {0x586C, 0x000000D0}, + {0x586C, 0x000000C0}, + {0x586C, 0x000000B0}, + {0x586C, 0x000000A0}, + {0x586C, 0x00000090}, + {0x586C, 0x00000080}, + {0x586C, 0x00000070}, + {0x586C, 0x00000060}, + {0x586C, 0x00000050}, + {0x586C, 0x00000040}, + {0x586C, 0x00000030}, + {0x586C, 0x00000020}, + {0x586C, 0x00000010}, + {0x586C, 0x00000000}, + {0x786C, 0x000000F0}, + {0x786C, 0x000000E0}, + {0x786C, 0x000000D0}, + {0x786C, 0x000000C0}, + {0x786C, 0x000000B0}, + {0x786C, 0x000000A0}, + {0x786C, 0x00000090}, + {0x786C, 0x00000080}, + {0x786C, 0x00000070}, + {0x786C, 0x00000060}, + {0x786C, 0x00000050}, + {0x786C, 0x00000040}, + {0x786C, 0x00000030}, + {0x786C, 0x00000020}, + {0x786C, 0x00000010}, + {0x786C, 0x00000000}, + {0x304, 0x0CE31333}, + {0x300, 0xF30CE31C}, + {0x304, 0x13EF1F19}, + {0x308, 0x0C13E3F3}, + {0x30C, 0x130C0C0C}, + {0x310, 0x80496000}, + {0x314, 0x0041E000}, + {0x318, 0x20022042}, + {0x31C, 0x20448009}, + {0x320, 0x00490040}, + {0x324, 0xE0000070}, + {0x328, 0xE000E000}, + {0x32C, 0x0041E000}, + {0x35C, 0x000004C4}, + {0xC0D4, 0xA7C41460}, + {0xC0D8, 0xC6BA7F67}, + {0xC0DC, 0x30C52868}, + {0xC0E0, 0x75008128}, + {0xC0E4, 0x0000272B}, + {0xC1D4, 0xA7C41460}, + {0xC1D8, 0xC6BA7F67}, + {0xC1DC, 0x30C52868}, + {0xC1E0, 0x75008128}, + {0xC1E4, 0x0000272B}, + {0xC0EC, 0x00030003}, + {0xC1EC, 0x00030003}, + {0xC004, 0x03020000}, + {0xC024, 0x03020000}, + {0xC104, 0x03020000}, + {0xC124, 0x03020000}, + {0xC0E8, 0x000A0C81}, + {0xC0F0, 0x00000024}, + {0xC1E8, 0x000A0C81}, + {0xC1F0, 0x00000024}, + {0x334, 0xFFFFFFFF}, + {0x33C, 0x55000000}, + {0x340, 0x00005555}, + {0x724, 0x00111201}, + {0x5868, 0xA9550000}, + {0x5870, 0x33221100}, + {0x5874, 0x77665544}, + {0x5878, 0xBBAA9988}, + {0x587C, 0xFFEEDDCC}, + {0x5880, 0x76543210}, + {0x5884, 0xFEDCBA98}, + {0x5888, 0x00000000}, + {0x588C, 0x00000000}, + {0x5894, 0x00000008}, + {0x7868, 0xA9550000}, + {0x7870, 0x33221100}, + {0x7874, 0x77665544}, + {0x7878, 0xBBAA9988}, + {0x787C, 0xFFEEDDCC}, + {0x7880, 0x76543210}, + {0x7884, 0xFEDCBA98}, + {0x7888, 0x00000000}, + {0x788C, 0x00000000}, + {0x7894, 0x00000008}, + {0x650, 0x00200888}, + {0x710, 0xF3810000}, + {0x020, 0x0000F381}, + {0x024, 0x0000F381}, + {0xC0A8, 0x00000080}, + {0xC0AC, 0x00000100}, + {0xC0B8, 0x00020000}, + {0xC1A8, 0x00000080}, + {0xC1AC, 0x00000100}, + {0xC1B8, 0x00020000}, + {0x1038, 0x00003100}, + {0x1038, 0x00003100}, + {0x3038, 0x00003100}, + {0x3038, 0x00003100}, + {0xC14, 0xA5000000}, + {0x908, 0x00000001}, + {0xC54, 0x1EE14368}, + {0xC88, 0xC2AC8000}, + {0xC8C, 0x02F2FC08}, + {0xC70, 0x071BFC00}, + {0x980, 0x10002251}, + {0x988, 0x3C3C4107}, + {0x904, 0x00000005}, + {0x994, 0x00000010}, + {0x000, 0x0580801F}, + {0x240C, 0x00000000}, + {0x010, 0x000C01FF}, + {0x010, 0x001C01FF}, + {0x2424, 0x00000008}, + {0x620, 0x00141A30}, + {0x660, 0x00000004}, + {0x2620, 0x00141A30}, + {0x2660, 0x00000000}, + {0x640, 0x180A141E}, + {0x640, 0x1814141E}, + {0x640, 0x1814141E}, + {0x640, 0x14141414}, + {0x644, 0x3C14283C}, + {0x644, 0x3C29283C}, + {0x644, 0x3C29203C}, + {0x644, 0x3C29201A}, + {0x2640, 0x180A141E}, + {0x2640, 0x1814141E}, + {0x2640, 0x1814141E}, + {0x2640, 0x14141414}, + {0x2644, 0x3C14283C}, + {0x2644, 0x3C29283C}, + {0x2644, 0x3C29203C}, + {0x2644, 0x3C29201A}, + {0x620, 0x00141A40}, + {0x64C, 0x1D0A141E}, + {0x64C, 0x1D1D141E}, + {0x64C, 0x1D1D1D1E}, + {0x2620, 0x00141A40}, + {0x264C, 0x1D0A141E}, + {0x264C, 0x1D1D141E}, + {0x264C, 0x1D1D1D1E}, + {0x2300, 0x03020100}, + {0x2304, 0x07060504}, + {0x2308, 0x0B0A0908}, + {0x230C, 0x0F0E0D0C}, + {0x2310, 0x13121110}, + {0x2314, 0x17161514}, + {0x2318, 0x00000018}, + {0x231C, 0x00C00000}, + {0x2320, 0x00000000}, + {0x2324, 0x0005298F}, + {0x2328, 0x0015296E}, + {0x232C, 0x0D3B5200}, + {0x2330, 0x00000000}, + {0x2334, 0x00000000}, + {0x2338, 0x00000000}, + {0x233C, 0x00000402}, + {0x2340, 0x00020080}, + {0x2344, 0x03C00000}, + {0x2348, 0x0001FFFF}, + {0x234C, 0x00C80064}, + {0x2350, 0x0190012C}, + {0x2354, 0x000032FE}, + {0x2358, 0xF0203C28}, + {0x235C, 0xF027C000}, + {0x2360, 0x01210C00}, + {0x2320, 0x00000001}, + {0x2300, 0x0C811B40}, + {0x2304, 0xF3FC4ED8}, + {0x2308, 0x08FF808F}, + {0x230C, 0xFCBC80C8}, + {0x2310, 0xBC80536C}, + {0x2314, 0x0363A0F3}, + {0x2318, 0x000000BB}, + {0x724, 0x00111200}, + {0x704, 0x601E0D00}, + {0xC78, 0xBFFFFFFF}, + {0x704, 0x601E0D02}, + {0x704, 0x601E0D02}, + {0x5864, 0x080801FF}, + {0x7864, 0x080801FF}, + {0xC60, 0x017FFFF3}, + {0xC6C, 0x20061021}, + {0x58AC, 0x08000000}, + {0x78AC, 0x08000000}, + {0x8088, 0x007F0000}, + {0x81A4, 0x003F3A00}, + {0x81B4, 0x0100007F}, + {0x81C0, 0x0060010B}, + {0x81A0, 0x00000010}, + {0x8138, 0x40000002}, + {0x82A4, 0x003F3A00}, + {0x82B4, 0x0100007F}, + {0x82C0, 0x0060010B}, + {0x82A0, 0x00000010}, + {0x81A0, 0x00000010}, + {0x8238, 0x40000002}, + {0x8088, 0x00000000}, + {0x8020, 0x00000000}, + {0x8120, 0x00000000}, + {0x8220, 0x00000000}, + {0x8124, 0x00000F0F}, + {0x8224, 0x00000F0F}, + {0x5864, 0x180801FF}, + {0x7864, 0x180801FF}, + {0xC60, 0x017FFFF3}, + {0xC70, 0x071BFE00}, + {0xC70, 0x071BFE60}, + {0xC6C, 0x20061021}, + {0x58AC, 0x08000000}, + {0x78AC, 0x08000000}, + {0x8120, 0x10000000}, + {0x8120, 0x10030000}, + {0x8124, 0x00000F0F}, + {0x8124, 0x00000F0F}, + {0x8224, 0x00000F0F}, + {0x8224, 0x00000F0F}, + {0x8220, 0x10000000}, + {0x8220, 0x10030000}, + {0x704, 0x601E0D00}, + {0x5864, 0x100801FF}, + {0x7864, 0x100801FF}, + {0x5864, 0x180801FF}, + {0x7864, 0x180801FF}, + {0xC60, 0x017FFFF3}, + {0x58D4, 0x7401FE00}, + {0x78D4, 0x7401FE00}, + {0x58F0, 0x400401FF}, + {0x78F0, 0x400401FF}, + {0x58F0, 0x400401FF}, + {0x78F0, 0x400401FF}, + {0x704, 0x601E0D02}, + {0xC7C, 0x0020BFFF}, + {0x58C0, 0x00FE0000}, + {0x58FC, 0x00000000}, + {0x566C, 0x00010005}, + {0x566C, 0x00011005}, + {0x700, 0x00000030}, + {0x9D0, 0x00001001}, + {0x704, 0x601E0D02}, + {0x704, 0x601E0D00}, + {0x704, 0x601C0502}, + {0x000, 0x0580801F}, + {0x980, 0x10002250}, + {0x010, 0x001C01FF}, + {0xC3C, 0x2840E1BF}, + {0x12A8, 0x33337824}, + {0x32A8, 0x33337824}, + {0x620, 0x00141A40}, + {0x2320, 0x00000000}, + {0x664, 0x0000000C}, + {0xC0F8, 0x00000001}, + {0xC1F8, 0x00000001}, + {0x2D7C, 0x739C040A}, + {0x1010, 0x00000000}, + {0x3010, 0x00000000}, + {0x2C14, 0x80000005}, + {0x5818, 0x082C1800}, + {0x7818, 0x082C1800}, + {0x624, 0x0101030A}, + {0x028, 0x0000F381}, + {0x02C, 0x0000F381}, + {0x720, 0x20000000}, + {0x1200, 0x00010142}, + {0x12A0, 0x24903056}, + {0x12AC, 0x12333121}, + {0x12B8, 0x30020000}, + {0x2000, 0x18BBBF84}, + {0x2C14, 0x85000005}, + {0x3200, 0x00010142}, + {0x32A0, 0x24903056}, + {0x32AC, 0x12333121}, + {0x32B8, 0x30020000}, + {0x5800, 0x03FF807F}, + {0x5804, 0x04237040}, + {0x5808, 0x04237040}, + {0x7800, 0x03FF807F}, + {0x7804, 0x04237040}, + {0x7808, 0x04237040}, + {0x010, 0x001C61FF}, + {0x56C8, 0x0E800400}, + {0x76C8, 0x0E800400}, + {0x984, 0x000000E0}, + {0x2008, 0x000FFFFF}, + {0x58B0, 0x00000800}, + {0x5A00, 0x00000000}, + {0x5A04, 0x00000000}, + {0x5A08, 0x00000000}, + {0x5A0C, 0x00000000}, + {0x5A10, 0x00000000}, + {0x5A14, 0x00000000}, + {0x5A18, 0x00000000}, + {0x5A1C, 0x00000000}, + {0x5A20, 0x00000000}, + {0x5A24, 0x00050000}, + {0x5A28, 0x00000000}, + {0x5A2C, 0x00000000}, + {0x5A30, 0x00000000}, + {0x5A34, 0x00000000}, + {0x5A38, 0x00000000}, + {0x5A3C, 0x00000000}, + {0x5A40, 0x00000000}, + {0x5A44, 0x00000005}, + {0x5A48, 0x00000000}, + {0x5A4C, 0x00000000}, + {0x5A50, 0x00000000}, + {0x5A54, 0x00000000}, + {0x5A58, 0x00000000}, + {0x5A5C, 0x00000000}, + {0x5A60, 0x00050000}, + {0x5A64, 0x00000000}, + {0x5A68, 0x00000000}, + {0x5A6C, 0x00000000}, + {0x5A70, 0x00000000}, + {0x5A74, 0x00000000}, + {0x5A78, 0x00000000}, + {0x5A7C, 0x00000000}, + {0x5A80, 0x00000000}, + {0x5A84, 0x00000000}, + {0x5A88, 0x00000000}, + {0x5A8C, 0x00000000}, + {0x5A90, 0x00000000}, + {0x5A94, 0x00000000}, + {0x5A98, 0x00000000}, + {0x5A9C, 0x00000000}, + {0x5AA0, 0x00000000}, + {0x5AA4, 0x00000000}, + {0x5AA8, 0x00000000}, + {0x5AAC, 0x00000000}, + {0x5AB0, 0x00050005}, + {0x5AB4, 0x00050005}, + {0x5AB8, 0x00050005}, + {0x5ABC, 0x00050005}, + {0x5AC0, 0x00000005}, + {0x78B0, 0x00000800}, + {0x7A00, 0x00000000}, + {0x7A04, 0x00000000}, + {0x7A08, 0x00000000}, + {0x7A0C, 0x00000000}, + {0x7A10, 0x00000000}, + {0x7A14, 0x00000000}, + {0x7A18, 0x00000000}, + {0x7A1C, 0x00000000}, + {0x7A20, 0x00000000}, + {0x7A24, 0x00050000}, + {0x7A28, 0x00000000}, + {0x7A2C, 0x00000000}, + {0x7A30, 0x00000000}, + {0x7A34, 0x00000000}, + {0x7A38, 0x00000000}, + {0x7A3C, 0x00000000}, + {0x7A40, 0x00000000}, + {0x7A44, 0x00000005}, + {0x7A48, 0x00000000}, + {0x7A4C, 0x00000000}, + {0x7A50, 0x00000000}, + {0x7A54, 0x00000000}, + {0x7A58, 0x00000000}, + {0x7A5C, 0x00000000}, + {0x7A60, 0x00050000}, + {0x7A64, 0x00000000}, + {0x7A68, 0x00000000}, + {0x7A6C, 0x00000000}, + {0x7A70, 0x00000000}, + {0x7A74, 0x00000000}, + {0x7A78, 0x00000000}, + {0x7A7C, 0x00000000}, + {0x7A80, 0x00000000}, + {0x7A84, 0x00000000}, + {0x7A88, 0x00000000}, + {0x7A8C, 0x00000000}, + {0x7A90, 0x00000000}, + {0x7A94, 0x00000000}, + {0x7A98, 0x00000000}, + {0x7A9C, 0x00000000}, + {0x7AA0, 0x00000000}, + {0x7AA4, 0x00000000}, + {0x7AA8, 0x00000000}, + {0x7AAC, 0x00000000}, + {0x7AB0, 0x00050005}, + {0x7AB4, 0x00050005}, + {0x7AB8, 0x00050005}, + {0x7ABC, 0x00050005}, + {0x7AC0, 0x00000005}, + {0x0F0, 0x00010000}, + {0x0F4, 0x00000018}, + {0x0F8, 0x20220120}, +}; + +static const struct rtw89_reg2_def rtw89_8852c_phy_bb_reg_gain[] = { + {0xF0FF0000, 0x00000000}, + {0xF03300FF, 0x00000001}, + {0x000, 0x01E3C39F}, + {0x001, 0x00694727}, + {0x002, 0x00005536}, + {0x100, 0x02E3C39F}, + {0x101, 0x0069472A}, + {0x102, 0x00005536}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10000, 0x1A02E1C9}, + {0x10001, 0x00644A30}, + {0x10002, 0x00006750}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x10000, 0x0EF4D1B9}, + {0x10001, 0x00584125}, + {0x10002, 0x00006750}, + {0xA0000000, 0x00000000}, + {0x10000, 0x1A02E1C9}, + {0x10001, 0x00644A30}, + {0x10002, 0x00006750}, + {0xB0000000, 0x00000000}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10100, 0x1901E1C8}, + {0x10101, 0x0061482D}, + {0x10102, 0x00006750}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x10100, 0x04E8C5AD}, + {0x10101, 0x00594125}, + {0x10102, 0x00006850}, + {0xA0000000, 0x00000000}, + {0x10100, 0x1901E1C8}, + {0x10101, 0x0061482D}, + {0x10102, 0x00006750}, + {0xB0000000, 0x00000000}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x20000, 0x1601E2CA}, + {0x20001, 0x005D452A}, + {0x20002, 0x00006750}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x20000, 0x0EF4D3BB}, + {0x20001, 0x00563F25}, + {0x20002, 0x00006850}, + {0xA0000000, 0x00000000}, + {0x20000, 0x1601E2CA}, + {0x20001, 0x005D452A}, + {0x20002, 0x00006750}, + {0xB0000000, 0x00000000}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x20100, 0x1901E1C8}, + {0x20101, 0x0061482D}, + {0x20102, 0x00006750}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x20100, 0x0BF1CFB7}, + {0x20101, 0x00574025}, + {0x20102, 0x00006750}, + {0xA0000000, 0x00000000}, + {0x20100, 0x1901E1C8}, + {0x20101, 0x0061482D}, + {0x20102, 0x00006750}, + {0xB0000000, 0x00000000}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x30000, 0x1700E1CA}, + {0x30001, 0x005E472B}, + {0x30002, 0x00006750}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x30000, 0x05EFCEB7}, + {0x30001, 0x004B351A}, + {0x30002, 0x00006850}, + {0xA0000000, 0x00000000}, + {0x30000, 0x1700E1CA}, + {0x30001, 0x005E472B}, + {0x30002, 0x00006750}, + {0xB0000000, 0x00000000}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x30100, 0x14FEE0C9}, + {0x30101, 0x00594428}, + {0x30102, 0x00006650}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x30100, 0x0CF2D1B9}, + {0x30101, 0x00563F24}, + {0x30102, 0x00006750}, + {0xA0000000, 0x00000000}, + {0x30100, 0x14FEE0C9}, + {0x30101, 0x00594428}, + {0x30102, 0x00006650}, + {0xB0000000, 0x00000000}, + {0x40000, 0x13FCDDC8}, + {0x40001, 0x005D4328}, + {0x40002, 0x00006850}, + {0x40100, 0x14FEE3CF}, + {0x40101, 0x00583E24}, + {0x40102, 0x00006850}, + {0x50000, 0x0DF4D6C6}, + {0x50001, 0x00604227}, + {0x50002, 0x00006850}, + {0x50100, 0x1903E7D5}, + {0x50101, 0x0061462B}, + {0x50102, 0x00006850}, + {0x60000, 0x0FF5D7C6}, + {0x60001, 0x005D4429}, + {0x60002, 0x00006850}, + {0x60100, 0x12FADECF}, + {0x60101, 0x005B4126}, + {0x60102, 0x00006850}, + {0x70000, 0x09F1D2C3}, + {0x70001, 0x00554026}, + {0x70002, 0x00006750}, + {0x70100, 0x0CF5DACC}, + {0x70101, 0x00563E25}, + {0x70102, 0x00006750}, + {0x2000000, 0x02E4C4A0}, + {0x2000001, 0x006A4828}, + {0x2000100, 0x02E4C5A1}, + {0x2000101, 0x00664629}, + {0x2010000, 0x05EBC8AF}, + {0x2010001, 0x00543D24}, + {0x2010100, 0x07ECC9B0}, + {0x2010101, 0x005B4126}, + {0x2020000, 0x05EDCCB2}, + {0x2020001, 0x004D361C}, + {0x2020100, 0x06ECCBB2}, + {0x2020101, 0x00553D22}, + {0x2030000, 0x02ECCCB3}, + {0x2030001, 0x00483118}, + {0x2030100, 0x04ECCCB2}, + {0x2030101, 0x004F381C}, + {0x3000000, 0x00000000}, + {0x3000001, 0x00000000}, + {0x3000002, 0x00000000}, + {0x3000003, 0x00000000}, + {0x3000100, 0x00000000}, + {0x3000101, 0x00000000}, + {0x3000102, 0x00000000}, + {0x3000103, 0x00000000}, + {0x3010000, 0x0E0CFB0A}, + {0x3010001, 0x00100F06}, + {0x3010002, 0x34333333}, + {0x3010003, 0x3434343C}, + {0x3010100, 0x0E0CFB0A}, + {0x3010101, 0x00100F06}, + {0x3010102, 0x34333333}, + {0x3010103, 0x3434343C}, + {0x3020000, 0x0E0CFB0A}, + {0x3020001, 0x00100F06}, + {0x3020002, 0x34333333}, + {0x3020003, 0x3434343C}, + {0x3020100, 0x0E0CFB0A}, + {0x3020101, 0x00100F06}, + {0x3020102, 0x34333333}, + {0x3020103, 0x3434343C}, + {0x3030000, 0x0E0CFB0A}, + {0x3030001, 0x00100F06}, + {0x3030002, 0x34333333}, + {0x3030003, 0x3434343C}, + {0x3030100, 0x0E0CFB0A}, + {0x3030101, 0x00100F06}, + {0x3030102, 0x34333333}, + {0x3030103, 0x3434343C}, + {0x3040000, 0x0E0CFB0A}, + {0x3040001, 0x00100F06}, + {0x3040002, 0x343B3333}, + {0x3040003, 0x34343C3C}, + {0x3040100, 0x0E0CFB0A}, + {0x3040101, 0x00100F06}, + {0x3040102, 0x343B3333}, + {0x3040103, 0x34343C3C}, + {0x3050000, 0x0E0CFB0A}, + {0x3050001, 0x00100F06}, + {0x3050002, 0x343B3333}, + {0x3050003, 0x34343C3C}, + {0x3050100, 0x0E0CFB0A}, + {0x3050101, 0x00100F06}, + {0x3050102, 0x343B3333}, + {0x3050103, 0x34343C3C}, + {0x3060000, 0x0E0CFB0A}, + {0x3060001, 0x00100F06}, + {0x3060002, 0x3C3B3333}, + {0x3060003, 0x34343C3C}, + {0x3060100, 0x0E0CFB0A}, + {0x3060101, 0x00100F06}, + {0x3060102, 0x3C3B3333}, + {0x3060103, 0x34343C3C}, + {0x3070000, 0x0E0CFB0A}, + {0x3070001, 0x00100F06}, + {0x3070002, 0x3C3B3333}, + {0x3070003, 0x34343C3C}, + {0x3070100, 0x0E0CFB0A}, + {0x3070101, 0x00100F06}, + {0x3070102, 0x3C3B3333}, + {0x3070103, 0x34343C3C}, +}; + +static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = { + {0xF0010000, 0x00000000}, + {0xF0020000, 0x00000001}, + {0xF0320000, 0x00000002}, + {0xF0330000, 0x00000003}, + {0xF0340000, 0x00000004}, + {0xF0350000, 0x00000005}, + {0xF0360000, 0x00000006}, + {0xF0010001, 0x00000007}, + {0xF0020001, 0x00000008}, + {0xF0320001, 0x00000009}, + {0xF0330001, 0x0000000A}, + {0xF0340001, 0x0000000B}, + {0xF0350001, 0x0000000C}, + {0xF0360001, 0x0000000D}, + {0xF03F0001, 0x0000000E}, + {0xF0400001, 0x0000000F}, + {0x005, 0x00000000}, + {0x10005, 0x00000000}, + {0x000, 0x00030001}, + {0x10000, 0x00030000}, + {0x018, 0x00011124}, + {0x10018, 0x00011124}, + {0x0EF, 0x00080000}, + {0x033, 0x00000001}, + {0x03E, 0x00000620}, + {0x03F, 0x0000020C}, + {0x0EF, 0x00000000}, + {0x05F, 0x00000032}, + {0x097, 0x00043200}, + {0x0A6, 0x00066DB7}, + {0x0EF, 0x00004000}, + {0x033, 0x00000005}, + {0x03E, 0x00000000}, + {0x03F, 0x00010500}, + {0x033, 0x00000003}, + {0x03E, 0x00000000}, + {0x03F, 0x00028B00}, + {0x033, 0x00000002}, + {0x03E, 0x00000000}, + {0x03F, 0x0009AB00}, + {0x033, 0x0000000D}, + {0x03E, 0x00000000}, + {0x03F, 0x00010500}, + {0x033, 0x0000000B}, + {0x03E, 0x00000000}, + {0x03F, 0x00028B00}, + {0x033, 0x0000000A}, + {0x03E, 0x00000000}, + {0x03F, 0x0009AB00}, + {0x0EF, 0x00000000}, + {0x000, 0x00033C01}, + {0x10000, 0x00033C00}, + {0x01A, 0x00040004}, + {0x0FE, 0x00000000}, + {0x096, 0x00015200}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0xA0000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0xB0000000, 0x00000000}, + {0x057, 0x0000D589}, + {0x05A, 0x0007FFFF}, + {0x043, 0x00005000}, + {0x0B5, 0x00001720}, + {0x0ED, 0x00000080}, + {0x033, 0x00000000}, + {0x03E, 0x00013FAB}, + {0x03F, 0x000FD800}, + {0x033, 0x00000010}, + {0x03E, 0x00013FAB}, + {0x03F, 0x000FD800}, + {0x033, 0x00000020}, + {0x03E, 0x00013FAB}, + {0x03F, 0x000FD800}, + {0x0ED, 0x00000000}, + {0x0ED, 0x00000200}, + {0x033, 0x00000000}, + {0x03F, 0x000000FA}, + {0x033, 0x00000001}, + {0x03F, 0x000000F2}, + {0x033, 0x00000002}, + {0x03F, 0x000000EA}, + {0x033, 0x00000003}, + {0x03F, 0x000000E2}, + {0x033, 0x00000004}, + {0x03F, 0x000000DA}, + {0x033, 0x00000005}, + {0x03F, 0x000000D2}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000007}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000008}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000009}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000098}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000098}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000088}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000088}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000010}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000011}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000012}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000013}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000014}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000098}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000015}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000090}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000090}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000090}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000090}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000090}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000090}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000090}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000090}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000016}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000088}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000017}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000080}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000080}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000080}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000080}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000080}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000080}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000080}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000080}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000018}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000038}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000038}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000038}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000038}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000038}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000038}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000038}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000038}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000019}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000030}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000030}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000030}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000030}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000030}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000030}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000030}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000030}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000028}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000028}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000028}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000028}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000028}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000028}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000028}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000028}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000020}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000020}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000020}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000020}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000020}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000020}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000020}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000020}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000018}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000018}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000018}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000018}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000018}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000018}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000018}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000018}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000010}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000010}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000010}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000010}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000010}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000010}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000010}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000010}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000008}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000008}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000008}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000008}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000008}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000008}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000008}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000008}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000000}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000000}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000000}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000000}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000000}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000000}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000000}, + {0xB0000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x0B9, 0x00020440}, + {0x018, 0x00001001}, + {0x10018, 0x00001001}, + {0x002, 0x0000000D}, + {0x10002, 0x0000000D}, + {0x0EE, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x0EE, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0xA0000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0xB0000000, 0x00000000}, + {0x0EB, 0x00000000}, + {0x030, 0x000109B0}, + {0x030, 0x000189B0}, + {0x0EB, 0x00000000}, + {0x0EE, 0x00000010}, + {0x033, 0x00000006}, + {0x03F, 0x00000003}, + {0x033, 0x00000007}, + {0x03F, 0x00000003}, + {0x033, 0x00000008}, + {0x03F, 0x00000001}, + {0x0EE, 0x00000000}, + {0x0EF, 0x00001000}, + {0x033, 0x00000000}, + {0x03F, 0x00000015}, + {0x033, 0x00000001}, + {0x03F, 0x00000017}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00008000}, + {0x033, 0x00000020}, + {0x03F, 0x00060001}, + {0x033, 0x00000021}, + {0x03F, 0x00060032}, + {0x033, 0x00000022}, + {0x03F, 0x00050042}, + {0x033, 0x00000023}, + {0x03F, 0x00040042}, + {0x033, 0x00000024}, + {0x03F, 0x00000001}, + {0x033, 0x00000025}, + {0x03F, 0x00000002}, + {0x033, 0x00000026}, + {0x03F, 0x00000003}, + {0x033, 0x00000027}, + {0x03F, 0x00000003}, + {0x033, 0x00000028}, + {0x03F, 0x00060001}, + {0x033, 0x00000029}, + {0x03F, 0x00060032}, + {0x033, 0x0000002A}, + {0x03F, 0x00050042}, + {0x033, 0x0000002B}, + {0x03F, 0x00040042}, + {0x033, 0x0000002C}, + {0x03F, 0x00000001}, + {0x033, 0x0000002D}, + {0x03F, 0x00000002}, + {0x033, 0x0000002E}, + {0x03F, 0x00000003}, + {0x033, 0x0000002F}, + {0x03F, 0x00000003}, + {0x033, 0x00000030}, + {0x03F, 0x00060001}, + {0x033, 0x00000031}, + {0x03F, 0x00060032}, + {0x033, 0x00000032}, + {0x03F, 0x00050042}, + {0x033, 0x00000033}, + {0x03F, 0x00040042}, + {0x033, 0x00000034}, + {0x03F, 0x00000001}, + {0x033, 0x00000035}, + {0x03F, 0x00000002}, + {0x033, 0x00000036}, + {0x03F, 0x00000003}, + {0x033, 0x00000037}, + {0x03F, 0x00000003}, + {0x033, 0x00000060}, + {0x03F, 0x00060001}, + {0x033, 0x00000061}, + {0x03F, 0x00060032}, + {0x033, 0x00000062}, + {0x03F, 0x00050042}, + {0x033, 0x00000063}, + {0x03F, 0x00040042}, + {0x033, 0x00000064}, + {0x03F, 0x00000001}, + {0x033, 0x00000065}, + {0x03F, 0x00000002}, + {0x033, 0x00000066}, + {0x03F, 0x00000003}, + {0x033, 0x00000067}, + {0x03F, 0x00000003}, + {0x033, 0x00000068}, + {0x03F, 0x00060001}, + {0x033, 0x00000069}, + {0x03F, 0x00060032}, + {0x033, 0x0000006A}, + {0x03F, 0x00050042}, + {0x033, 0x0000006B}, + {0x03F, 0x00040042}, + {0x033, 0x0000006C}, + {0x03F, 0x00000001}, + {0x033, 0x0000006D}, + {0x03F, 0x00000002}, + {0x033, 0x0000006E}, + {0x03F, 0x00000003}, + {0x033, 0x0000006F}, + {0x03F, 0x00000003}, + {0x033, 0x00000070}, + {0x03F, 0x00060001}, + {0x033, 0x00000071}, + {0x03F, 0x00060032}, + {0x033, 0x00000072}, + {0x03F, 0x00050042}, + {0x033, 0x00000073}, + {0x03F, 0x00040042}, + {0x033, 0x00000074}, + {0x03F, 0x00000001}, + {0x033, 0x00000075}, + {0x03F, 0x00000002}, + {0x033, 0x00000076}, + {0x03F, 0x00000003}, + {0x033, 0x00000077}, + {0x03F, 0x00000003}, + {0x033, 0x00000078}, + {0x03F, 0x00060001}, + {0x033, 0x00000079}, + {0x03F, 0x00060032}, + {0x033, 0x0000007A}, + {0x03F, 0x00050042}, + {0x033, 0x0000007B}, + {0x03F, 0x00040042}, + {0x033, 0x0000007C}, + {0x03F, 0x00000001}, + {0x033, 0x0000007D}, + {0x03F, 0x00000002}, + {0x033, 0x0000007E}, + {0x03F, 0x00000003}, + {0x033, 0x0000007F}, + {0x03F, 0x00000003}, + {0x033, 0x000000A0}, + {0x03F, 0x00060001}, + {0x033, 0x000000A1}, + {0x03F, 0x00060032}, + {0x033, 0x000000A2}, + {0x03F, 0x00050042}, + {0x033, 0x000000A3}, + {0x03F, 0x00040042}, + {0x033, 0x000000A4}, + {0x03F, 0x00000001}, + {0x033, 0x000000A5}, + {0x03F, 0x00000002}, + {0x033, 0x000000A6}, + {0x03F, 0x00000003}, + {0x033, 0x000000A7}, + {0x03F, 0x00000003}, + {0x033, 0x000000A8}, + {0x03F, 0x00060001}, + {0x033, 0x000000A9}, + {0x03F, 0x00060032}, + {0x033, 0x000000AA}, + {0x03F, 0x00050042}, + {0x033, 0x000000AB}, + {0x03F, 0x00040042}, + {0x033, 0x000000AC}, + {0x03F, 0x00000001}, + {0x033, 0x000000AD}, + {0x03F, 0x00000002}, + {0x033, 0x000000AE}, + {0x03F, 0x00000003}, + {0x033, 0x000000AF}, + {0x03F, 0x00000003}, + {0x033, 0x000000B0}, + {0x03F, 0x00060001}, + {0x033, 0x000000B1}, + {0x03F, 0x00060032}, + {0x033, 0x000000B2}, + {0x03F, 0x00050042}, + {0x033, 0x000000B3}, + {0x03F, 0x00040042}, + {0x033, 0x000000B4}, + {0x03F, 0x00000001}, + {0x033, 0x000000B5}, + {0x03F, 0x00000002}, + {0x033, 0x000000B6}, + {0x03F, 0x00000003}, + {0x033, 0x000000B7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E0}, + {0x03F, 0x00060001}, + {0x033, 0x000000E1}, + {0x03F, 0x00060032}, + {0x033, 0x000000E2}, + {0x03F, 0x00050042}, + {0x033, 0x000000E3}, + {0x03F, 0x00040042}, + {0x033, 0x000000E4}, + {0x03F, 0x00000001}, + {0x033, 0x000000E5}, + {0x03F, 0x00000002}, + {0x033, 0x000000E6}, + {0x03F, 0x00000003}, + {0x033, 0x000000E7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E8}, + {0x03F, 0x00060001}, + {0x033, 0x000000E9}, + {0x03F, 0x00060032}, + {0x033, 0x000000EA}, + {0x03F, 0x00050042}, + {0x033, 0x000000EB}, + {0x03F, 0x00040042}, + {0x033, 0x000000EC}, + {0x03F, 0x00000001}, + {0x033, 0x000000ED}, + {0x03F, 0x00000002}, + {0x033, 0x000000EE}, + {0x03F, 0x00000003}, + {0x033, 0x000000EF}, + {0x03F, 0x00000003}, + {0x033, 0x000000F0}, + {0x03F, 0x00060001}, + {0x033, 0x000000F1}, + {0x03F, 0x00060032}, + {0x033, 0x000000F2}, + {0x03F, 0x00050042}, + {0x033, 0x000000F3}, + {0x03F, 0x00040042}, + {0x033, 0x000000F4}, + {0x03F, 0x00000001}, + {0x033, 0x000000F5}, + {0x03F, 0x00000002}, + {0x033, 0x000000F6}, + {0x03F, 0x00000003}, + {0x033, 0x000000F7}, + {0x03F, 0x00000003}, + {0x033, 0x000000F8}, + {0x03F, 0x00060001}, + {0x033, 0x000000F9}, + {0x03F, 0x00060032}, + {0x033, 0x000000FA}, + {0x03F, 0x00050042}, + {0x033, 0x000000FB}, + {0x03F, 0x00040042}, + {0x033, 0x000000FC}, + {0x03F, 0x00000001}, + {0x033, 0x000000FD}, + {0x03F, 0x00000002}, + {0x033, 0x000000FE}, + {0x03F, 0x00000003}, + {0x033, 0x000000FF}, + {0x03F, 0x00000003}, + {0x033, 0x00000120}, + {0x03F, 0x00060001}, + {0x033, 0x00000121}, + {0x03F, 0x00060032}, + {0x033, 0x00000122}, + {0x03F, 0x00050042}, + {0x033, 0x00000123}, + {0x03F, 0x00040042}, + {0x033, 0x00000124}, + {0x03F, 0x00000001}, + {0x033, 0x00000125}, + {0x03F, 0x00000002}, + {0x033, 0x00000126}, + {0x03F, 0x00000003}, + {0x033, 0x00000127}, + {0x03F, 0x00000003}, + {0x033, 0x00000128}, + {0x03F, 0x00060001}, + {0x033, 0x00000129}, + {0x03F, 0x00060032}, + {0x033, 0x0000012A}, + {0x03F, 0x00050042}, + {0x033, 0x0000012B}, + {0x03F, 0x00040042}, + {0x033, 0x0000012C}, + {0x03F, 0x00000001}, + {0x033, 0x0000012D}, + {0x03F, 0x00000002}, + {0x033, 0x0000012E}, + {0x03F, 0x00000003}, + {0x033, 0x0000012F}, + {0x03F, 0x00000003}, + {0x033, 0x00000130}, + {0x03F, 0x00060001}, + {0x033, 0x00000131}, + {0x03F, 0x00060032}, + {0x033, 0x00000132}, + {0x03F, 0x00050042}, + {0x033, 0x00000133}, + {0x03F, 0x00040042}, + {0x033, 0x00000134}, + {0x03F, 0x00000001}, + {0x033, 0x00000135}, + {0x03F, 0x00000002}, + {0x033, 0x00000136}, + {0x03F, 0x00000003}, + {0x033, 0x00000137}, + {0x03F, 0x00000003}, + {0x033, 0x00000160}, + {0x03F, 0x00060001}, + {0x033, 0x00000161}, + {0x03F, 0x00060032}, + {0x033, 0x00000162}, + {0x03F, 0x00050042}, + {0x033, 0x00000163}, + {0x03F, 0x00040042}, + {0x033, 0x00000164}, + {0x03F, 0x00000001}, + {0x033, 0x00000165}, + {0x03F, 0x00000002}, + {0x033, 0x00000166}, + {0x03F, 0x00000003}, + {0x033, 0x00000167}, + {0x03F, 0x00000003}, + {0x033, 0x00000168}, + {0x03F, 0x00060001}, + {0x033, 0x00000169}, + {0x03F, 0x00060032}, + {0x033, 0x0000016A}, + {0x03F, 0x00050042}, + {0x033, 0x0000016B}, + {0x03F, 0x00040042}, + {0x033, 0x0000016C}, + {0x03F, 0x00000001}, + {0x033, 0x0000016D}, + {0x03F, 0x00000002}, + {0x033, 0x0000016E}, + {0x03F, 0x00000003}, + {0x033, 0x0000016F}, + {0x03F, 0x00000003}, + {0x033, 0x00000170}, + {0x03F, 0x00060001}, + {0x033, 0x00000171}, + {0x03F, 0x00060032}, + {0x033, 0x00000172}, + {0x03F, 0x00050042}, + {0x033, 0x00000173}, + {0x03F, 0x00040042}, + {0x033, 0x00000174}, + {0x03F, 0x00000001}, + {0x033, 0x00000175}, + {0x03F, 0x00000002}, + {0x033, 0x00000176}, + {0x03F, 0x00000003}, + {0x033, 0x00000177}, + {0x03F, 0x00000003}, + {0x033, 0x00000178}, + {0x03F, 0x00060001}, + {0x033, 0x00000179}, + {0x03F, 0x00060032}, + {0x033, 0x0000017A}, + {0x03F, 0x00050042}, + {0x033, 0x0000017B}, + {0x03F, 0x00040042}, + {0x033, 0x0000017C}, + {0x03F, 0x00000001}, + {0x033, 0x0000017D}, + {0x03F, 0x00000002}, + {0x033, 0x0000017E}, + {0x03F, 0x00000003}, + {0x033, 0x0000017F}, + {0x03F, 0x00000003}, + {0x033, 0x000001A0}, + {0x03F, 0x00060001}, + {0x033, 0x000001A1}, + {0x03F, 0x00060032}, + {0x033, 0x000001A2}, + {0x03F, 0x00050042}, + {0x033, 0x000001A3}, + {0x03F, 0x00040042}, + {0x033, 0x000001A4}, + {0x03F, 0x00000001}, + {0x033, 0x000001A5}, + {0x03F, 0x00000002}, + {0x033, 0x000001A6}, + {0x03F, 0x00000003}, + {0x033, 0x000001A7}, + {0x03F, 0x00000003}, + {0x033, 0x000001A8}, + {0x03F, 0x00060001}, + {0x033, 0x000001A9}, + {0x03F, 0x00060032}, + {0x033, 0x000001AA}, + {0x03F, 0x00050042}, + {0x033, 0x000001AB}, + {0x03F, 0x00040042}, + {0x033, 0x000001AC}, + {0x03F, 0x00000001}, + {0x033, 0x000001AD}, + {0x03F, 0x00000002}, + {0x033, 0x000001AE}, + {0x03F, 0x00000003}, + {0x033, 0x000001AF}, + {0x03F, 0x00000003}, + {0x033, 0x000001B0}, + {0x03F, 0x00060001}, + {0x033, 0x000001B1}, + {0x03F, 0x00060032}, + {0x033, 0x000001B2}, + {0x03F, 0x00050042}, + {0x033, 0x000001B3}, + {0x03F, 0x00040042}, + {0x033, 0x000001B4}, + {0x03F, 0x00000001}, + {0x033, 0x000001B5}, + {0x03F, 0x00000002}, + {0x033, 0x000001B6}, + {0x03F, 0x00000003}, + {0x033, 0x000001B7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E0}, + {0x03F, 0x00060001}, + {0x033, 0x000001E1}, + {0x03F, 0x00060032}, + {0x033, 0x000001E2}, + {0x03F, 0x00050042}, + {0x033, 0x000001E3}, + {0x03F, 0x00040042}, + {0x033, 0x000001E4}, + {0x03F, 0x00000001}, + {0x033, 0x000001E5}, + {0x03F, 0x00000002}, + {0x033, 0x000001E6}, + {0x03F, 0x00000003}, + {0x033, 0x000001E7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E8}, + {0x03F, 0x00060001}, + {0x033, 0x000001E9}, + {0x03F, 0x00060032}, + {0x033, 0x000001EA}, + {0x03F, 0x00050042}, + {0x033, 0x000001EB}, + {0x03F, 0x00040042}, + {0x033, 0x000001EC}, + {0x03F, 0x00000001}, + {0x033, 0x000001ED}, + {0x03F, 0x00000002}, + {0x033, 0x000001EE}, + {0x03F, 0x00000003}, + {0x033, 0x000001EF}, + {0x03F, 0x00000003}, + {0x033, 0x000001F0}, + {0x03F, 0x00060001}, + {0x033, 0x000001F1}, + {0x03F, 0x00060032}, + {0x033, 0x000001F2}, + {0x03F, 0x00050042}, + {0x033, 0x000001F3}, + {0x03F, 0x00040042}, + {0x033, 0x000001F4}, + {0x03F, 0x00000001}, + {0x033, 0x000001F5}, + {0x03F, 0x00000002}, + {0x033, 0x000001F6}, + {0x03F, 0x00000003}, + {0x033, 0x000001F7}, + {0x03F, 0x00000003}, + {0x033, 0x000001F8}, + {0x03F, 0x00060001}, + {0x033, 0x000001F9}, + {0x03F, 0x00060032}, + {0x033, 0x000001FA}, + {0x03F, 0x00050042}, + {0x033, 0x000001FB}, + {0x03F, 0x00040042}, + {0x033, 0x000001FC}, + {0x03F, 0x00000001}, + {0x033, 0x000001FD}, + {0x03F, 0x00000002}, + {0x033, 0x000001FE}, + {0x03F, 0x00000003}, + {0x033, 0x000001FF}, + {0x03F, 0x00000003}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000100}, + {0x033, 0x00000001}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000002}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000003}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000004}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000005}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000006}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000007}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000008}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000009}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000A}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000B}, + {0x03F, 0x0000AFFF}, + {0x033, 0x0000000C}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000D}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000E}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000F}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000010}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000011}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000012}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000013}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000014}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000015}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000016}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000017}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000018}, + {0x03F, 0x0000FBFF}, + {0x033, 0x00000019}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001A}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001B}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001C}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001D}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001E}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001F}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000020}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000021}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000022}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000023}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000024}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000025}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000026}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000027}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000028}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000029}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002A}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002B}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002C}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002D}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002E}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002F}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000030}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000031}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000032}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000033}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000034}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000035}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000036}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000037}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000038}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000039}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000003A}, + {0x03F, 0x0000EFFF}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000040}, + {0x033, 0x00000000}, + {0x03F, 0x00004344}, + {0x033, 0x00000001}, + {0x03F, 0x00004344}, + {0x033, 0x00000002}, + {0x03F, 0x00004344}, + {0x033, 0x00000003}, + {0x03F, 0x00004344}, + {0x033, 0x00000004}, + {0x03F, 0x00004344}, + {0x033, 0x00000005}, + {0x03F, 0x00004344}, + {0x033, 0x00000006}, + {0x03F, 0x00004324}, + {0x033, 0x00000007}, + {0x03F, 0x00004344}, + {0x033, 0x00000008}, + {0x03F, 0x00004344}, + {0x033, 0x00000009}, + {0x03F, 0x00004344}, + {0x033, 0x0000000A}, + {0x03F, 0x00004344}, + {0x033, 0x0000000B}, + {0x03F, 0x00004344}, + {0x033, 0x00000010}, + {0x03F, 0x00004344}, + {0x033, 0x00000011}, + {0x03F, 0x00004344}, + {0x033, 0x00000012}, + {0x03F, 0x00004344}, + {0x033, 0x00000013}, + {0x03F, 0x00004344}, + {0x033, 0x00000014}, + {0x03F, 0x00004344}, + {0x033, 0x00000015}, + {0x03F, 0x00004344}, + {0x033, 0x00000016}, + {0x03F, 0x00004344}, + {0x033, 0x00000017}, + {0x03F, 0x00004344}, + {0x033, 0x00000018}, + {0x03F, 0x00004344}, + {0x033, 0x00000019}, + {0x03F, 0x00004344}, + {0x033, 0x0000001A}, + {0x03F, 0x00004344}, + {0x033, 0x0000001B}, + {0x03F, 0x00004344}, + {0x033, 0x0000001C}, + {0x03F, 0x00004344}, + {0x033, 0x0000001D}, + {0x03F, 0x00004344}, + {0x033, 0x0000001E}, + {0x03F, 0x00004344}, + {0x033, 0x0000001F}, + {0x03F, 0x00004344}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000020}, + {0x033, 0x00000010}, + {0x03F, 0x00000200}, + {0x033, 0x00000011}, + {0x03F, 0x00000200}, + {0x033, 0x00000012}, + {0x03F, 0x00000200}, + {0x033, 0x00000013}, + {0x03F, 0x00000200}, + {0x033, 0x00000020}, + {0x03F, 0x00000200}, + {0x033, 0x00000021}, + {0x03F, 0x00000200}, + {0x033, 0x00000022}, + {0x03F, 0x00000200}, + {0x033, 0x00000023}, + {0x03F, 0x00000200}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000010}, + {0x030, 0x000084DC}, + {0x030, 0x000103C9}, + {0x030, 0x00018399}, + {0x030, 0x00020287}, + {0x030, 0x00028277}, + {0x030, 0x00030165}, + {0x030, 0x00038144}, + {0x030, 0x00040044}, + {0x030, 0x00048022}, + {0x030, 0x00050011}, + {0x030, 0x00058000}, + {0x030, 0x00060000}, + {0x030, 0x00068000}, + {0x030, 0x00070000}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000080}, + {0x033, 0x00000004}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x00000005}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x00000006}, + {0x03E, 0x00000014}, + {0x03F, 0x00021C58}, + {0x033, 0x00000007}, + {0x03E, 0x00000014}, + {0x03F, 0x00022B58}, + {0x033, 0x00000008}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x00000009}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x0000000A}, + {0x03E, 0x00000014}, + {0x03F, 0x00021C58}, + {0x033, 0x0000000B}, + {0x03E, 0x00000014}, + {0x03F, 0x00022B58}, + {0x033, 0x0000000C}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x0000000D}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x0000000E}, + {0x03E, 0x00000014}, + {0x03F, 0x00021C58}, + {0x033, 0x0000000F}, + {0x03E, 0x00000014}, + {0x03F, 0x00022B58}, + {0x033, 0x00000010}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x00000011}, + {0x03E, 0x0000001B}, + {0x03F, 0x00023C58}, + {0x033, 0x00000012}, + {0x03E, 0x00000014}, + {0x03F, 0x00021C58}, + {0x033, 0x00000013}, + {0x03E, 0x00000014}, + {0x03F, 0x00022B58}, + {0x033, 0x00000014}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x00000015}, + {0x03E, 0x0000001B}, + {0x03F, 0x00025A58}, + {0x033, 0x00000016}, + {0x03E, 0x0000001C}, + {0x03F, 0x00021C58}, + {0x033, 0x00000017}, + {0x03E, 0x00000014}, + {0x03F, 0x00022A58}, + {0x033, 0x00000018}, + {0x03E, 0x00000013}, + {0x03F, 0x00025A58}, + {0x033, 0x00000019}, + {0x03E, 0x0000001B}, + {0x03F, 0x00025A58}, + {0x033, 0x0000001A}, + {0x03E, 0x00000014}, + {0x03F, 0x00022A58}, + {0x033, 0x0000001B}, + {0x03E, 0x00000014}, + {0x03F, 0x00022A58}, + {0x033, 0x0000001C}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x0000001D}, + {0x03E, 0x0000001B}, + {0x03F, 0x00025A58}, + {0x033, 0x0000001E}, + {0x03E, 0x00000013}, + {0x03F, 0x00021E58}, + {0x033, 0x0000001F}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x00000020}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000021}, + {0x03E, 0x0000001C}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000022}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x00000023}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x033, 0x00000024}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000025}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000026}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x00000027}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x033, 0x00000028}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000029}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x0000002A}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x0000002B}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x033, 0x0000002C}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x0000002D}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x0000002E}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x0000002F}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x033, 0x00000030}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000031}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000032}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x00000033}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x033, 0x00000034}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000035}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000036}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x00000037}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x033, 0x00000038}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000039}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x0000003A}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x0000003B}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x033, 0x0000003C}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x0000003D}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x0000003E}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x0000003F}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x0EF, 0x00000000}, + {0x0EE, 0x00000800}, + {0x033, 0x00000000}, + {0x03F, 0x00000031}, + {0x033, 0x00000001}, + {0x03F, 0x00000023}, + {0x033, 0x00000002}, + {0x03F, 0x00000015}, + {0x033, 0x00000003}, + {0x03F, 0x00000007}, + {0x0EE, 0x00000000}, + {0x0EC, 0x00000400}, + {0x033, 0x00000003}, + {0x03F, 0x00000030}, + {0x033, 0x00000004}, + {0x03F, 0x00000021}, + {0x0EC, 0x00000000}, + {0x0DE, 0x00000000}, + {0x0EF, 0x00000000}, + {0x033, 0x00000000}, + {0x008, 0x00060280}, + {0x009, 0x00030400}, + {0x0EF, 0x00000000}, + {0x0A7, 0x00080308}, + {0x066, 0x00006000}, + {0x0EF, 0x00000400}, + {0x030, 0x000001FF}, + {0x030, 0x000081FF}, + {0x030, 0x000101FF}, + {0x030, 0x000181FF}, + {0x030, 0x000201FF}, + {0x030, 0x000281FF}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x0003017F}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x0003017F}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x0003017F}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x0003017F}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x0003017F}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x0003017F}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x0003017F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0xA0000000, 0x00000000}, + {0x030, 0x0003017F}, + {0xB0000000, 0x00000000}, + {0x030, 0x000380FB}, + {0x0EF, 0x00000000}, + {0x06E, 0x00077A18}, + {0x06D, 0x00000C31}, + {0x06A, 0x000E0F8A}, + {0x06B, 0x000018A0}, + {0x06F, 0x000F81FC}, + {0x05E, 0x0000001F}, + {0x0EF, 0x00000200}, + {0x030, 0x0003D407}, + {0x030, 0x00035A87}, + {0x030, 0x0002CF07}, + {0x030, 0x00024F07}, + {0x030, 0x0001CF07}, + {0x030, 0x00014F07}, + {0x030, 0x0000CF07}, + {0x030, 0x00004F07}, + {0x0EF, 0x00000000}, + {0x0EB, 0x00080000}, + {0x030, 0x00008038}, + {0x030, 0x00010038}, + {0x030, 0x00018038}, + {0x030, 0x00020038}, + {0x030, 0x00028038}, + {0x030, 0x00030038}, + {0x030, 0x0003803C}, + {0x030, 0x0004003C}, + {0x030, 0x0004803C}, + {0x030, 0x0005003C}, + {0x030, 0x0005803C}, + {0x030, 0x0006003C}, + {0x030, 0x0006803C}, + {0x030, 0x0007003C}, + {0x0EB, 0x00000000}, + {0x094, 0x000000FC}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0xA0000000, 0x00000000}, + {0x095, 0x00000000}, + {0xB0000000, 0x00000000}, + {0x0EE, 0x00001000}, + {0x033, 0x00000020}, + {0x03F, 0x00000052}, + {0x033, 0x00000024}, + {0x03F, 0x0000005A}, + {0x033, 0x00000028}, + {0x03F, 0x0000009C}, + {0x033, 0x0000002C}, + {0x03F, 0x0000019C}, + {0x033, 0x00000030}, + {0x03F, 0x000001A4}, + {0x033, 0x00000034}, + {0x03F, 0x000001E7}, + {0x033, 0x00000038}, + {0x03F, 0x000002E7}, + {0x033, 0x0000003C}, + {0x03F, 0x000003E7}, + {0x033, 0x00000021}, + {0x03F, 0x00000052}, + {0x033, 0x00000025}, + {0x03F, 0x0000005A}, + {0x033, 0x00000029}, + {0x03F, 0x0000009C}, + {0x033, 0x0000002D}, + {0x03F, 0x0000019C}, + {0x033, 0x00000031}, + {0x03F, 0x000001A4}, + {0x033, 0x00000035}, + {0x03F, 0x000001E6}, + {0x033, 0x00000039}, + {0x03F, 0x000002E6}, + {0x033, 0x0000003D}, + {0x03F, 0x000003E6}, + {0x033, 0x00000022}, + {0x03F, 0x00000052}, + {0x033, 0x00000026}, + {0x03F, 0x0000005A}, + {0x033, 0x0000002A}, + {0x03F, 0x0000009C}, + {0x033, 0x0000002E}, + {0x03F, 0x0000019C}, + {0x033, 0x00000032}, + {0x03F, 0x000001A4}, + {0x033, 0x00000036}, + {0x03F, 0x000001E6}, + {0x033, 0x0000003A}, + {0x03F, 0x000002E6}, + {0x033, 0x0000003E}, + {0x03F, 0x000003E6}, + {0x033, 0x00000060}, + {0x03F, 0x00000052}, + {0x033, 0x00000064}, + {0x03F, 0x0000005A}, + {0x033, 0x00000068}, + {0x03F, 0x0000009C}, + {0x033, 0x0000006C}, + {0x03F, 0x0000019C}, + {0x033, 0x00000070}, + {0x03F, 0x000001A4}, + {0x033, 0x00000074}, + {0x03F, 0x000001E6}, + {0x033, 0x00000078}, + {0x03F, 0x000002E6}, + {0x033, 0x0000007C}, + {0x03F, 0x000003E6}, + {0x033, 0x00000061}, + {0x03F, 0x00000052}, + {0x033, 0x00000065}, + {0x03F, 0x0000005A}, + {0x033, 0x00000069}, + {0x03F, 0x0000009C}, + {0x033, 0x0000006D}, + {0x03F, 0x0000019C}, + {0x033, 0x00000071}, + {0x03F, 0x000001A4}, + {0x033, 0x00000075}, + {0x03F, 0x000001E6}, + {0x033, 0x00000079}, + {0x03F, 0x000002E6}, + {0x033, 0x0000007D}, + {0x03F, 0x000003E6}, + {0x033, 0x00000062}, + {0x03F, 0x00000052}, + {0x033, 0x00000066}, + {0x03F, 0x0000005A}, + {0x033, 0x0000006A}, + {0x03F, 0x0000009C}, + {0x033, 0x0000006E}, + {0x03F, 0x0000019C}, + {0x033, 0x00000072}, + {0x03F, 0x000001A4}, + {0x033, 0x00000076}, + {0x03F, 0x000001E6}, + {0x033, 0x0000007A}, + {0x03F, 0x000002E6}, + {0x033, 0x0000007E}, + {0x03F, 0x000003E6}, + {0x033, 0x00000063}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000052}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000067}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000073}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000077}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007B}, + {0x03F, 0x000002E7}, + {0x033, 0x0000007F}, + {0x03F, 0x000003E7}, + {0x0EE, 0x00000000}, + {0x100EE, 0x00004000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781EF}, + {0x10030, 0x000785E9}, + {0x10030, 0x000789E3}, + {0x10030, 0x00078DA3}, + {0x10030, 0x00079161}, + {0x10030, 0x0007955B}, + {0x10030, 0x00079921}, + {0x10030, 0x00079D1B}, + {0x10030, 0x0007A0E1}, + {0x10030, 0x0007A4DB}, + {0x10030, 0x0007A8A1}, + {0x10030, 0x0007AC9B}, + {0x10030, 0x0007B061}, + {0x10030, 0x0007B45B}, + {0x10030, 0x0007B821}, + {0x10030, 0x0007BC1B}, + {0x10030, 0x0007C015}, + {0x10030, 0x0007C40F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781EF}, + {0x10030, 0x000785E9}, + {0x10030, 0x000789E3}, + {0x10030, 0x00078DA3}, + {0x10030, 0x00079161}, + {0x10030, 0x0007955B}, + {0x10030, 0x00079921}, + {0x10030, 0x00079D1B}, + {0x10030, 0x0007A0E1}, + {0x10030, 0x0007A4DB}, + {0x10030, 0x0007A8A1}, + {0x10030, 0x0007AC9B}, + {0x10030, 0x0007B061}, + {0x10030, 0x0007B45B}, + {0x10030, 0x0007B821}, + {0x10030, 0x0007BC1B}, + {0x10030, 0x0007C015}, + {0x10030, 0x0007C40F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781EF}, + {0x10030, 0x000785E9}, + {0x10030, 0x000789E3}, + {0x10030, 0x00078DA3}, + {0x10030, 0x00079161}, + {0x10030, 0x0007955B}, + {0x10030, 0x00079921}, + {0x10030, 0x00079D1B}, + {0x10030, 0x0007A0E1}, + {0x10030, 0x0007A4DB}, + {0x10030, 0x0007A8A1}, + {0x10030, 0x0007AC9B}, + {0x10030, 0x0007B061}, + {0x10030, 0x0007B45B}, + {0x10030, 0x0007B821}, + {0x10030, 0x0007BC1B}, + {0x10030, 0x0007C015}, + {0x10030, 0x0007C40F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0xA0000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0xB0000000, 0x00000000}, + {0x100EE, 0x00000000}, + {0x100EE, 0x00002000}, + {0x10030, 0x000000FC}, + {0x10030, 0x000004F9}, + {0x10030, 0x000008F6}, + {0x10030, 0x00000CF3}, + {0x10030, 0x000010F0}, + {0x10030, 0x000014ED}, + {0x10030, 0x000018AC}, + {0x10030, 0x00001CA9}, + {0x10030, 0x00002069}, + {0x10030, 0x00002466}, + {0x10030, 0x00002829}, + {0x10030, 0x00002C26}, + {0x10030, 0x00003023}, + {0x10030, 0x00003420}, + {0x10030, 0x0000381D}, + {0x10030, 0x00003C1A}, + {0x10030, 0x00004017}, + {0x100EE, 0x00000000}, + {0x100EE, 0x00002000}, + {0x10030, 0x000780F4}, + {0x10030, 0x000784F1}, + {0x10030, 0x000788EE}, + {0x10030, 0x00078CEB}, + {0x10030, 0x000790E8}, + {0x10030, 0x000794E5}, + {0x10030, 0x000798E2}, + {0x10030, 0x00079CDF}, + {0x10030, 0x0007A0DC}, + {0x10030, 0x0007A4D9}, + {0x10030, 0x0007A8D6}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B0D0}, + {0x10030, 0x0007B4CD}, + {0x10030, 0x0007B8CA}, + {0x10030, 0x0007BC07}, + {0x10030, 0x0007C004}, + {0x100EE, 0x00000000}, + {0x0EF, 0x00002000}, + {0x033, 0x00000008}, + {0x03F, 0x00000004}, + {0x033, 0x00000009}, + {0x03F, 0x00000003}, + {0x033, 0x0000000A}, + {0x03F, 0x00000003}, + {0x033, 0x0000000B}, + {0x03F, 0x00000002}, + {0x033, 0x0000000C}, + {0x03F, 0x00000002}, + {0x033, 0x0000000D}, + {0x03F, 0x00000002}, + {0x033, 0x0000000E}, + {0x03F, 0x00000002}, + {0x033, 0x0000000F}, + {0x03F, 0x00000002}, + {0x0EF, 0x00000000}, + {0x0EB, 0x00040000}, + {0x030, 0x000109B7}, + {0x0EB, 0x00000000}, + {0x0EF, 0x00008000}, + {0x033, 0x00000020}, + {0x03F, 0x00050002}, + {0x033, 0x00000021}, + {0x03F, 0x00060032}, + {0x033, 0x00000022}, + {0x03F, 0x00050042}, + {0x033, 0x00000023}, + {0x03F, 0x00040042}, + {0x033, 0x00000024}, + {0x03F, 0x00008001}, + {0x033, 0x00000025}, + {0x03F, 0x00008002}, + {0x033, 0x00000026}, + {0x03F, 0x00000003}, + {0x033, 0x00000027}, + {0x03F, 0x00000003}, + {0x033, 0x00000028}, + {0x03F, 0x00050002}, + {0x033, 0x00000029}, + {0x03F, 0x00060032}, + {0x033, 0x0000002A}, + {0x03F, 0x00050042}, + {0x033, 0x0000002B}, + {0x03F, 0x00040042}, + {0x033, 0x0000002C}, + {0x03F, 0x00008001}, + {0x033, 0x0000002D}, + {0x03F, 0x00008002}, + {0x033, 0x0000002E}, + {0x03F, 0x00000003}, + {0x033, 0x0000002F}, + {0x03F, 0x00000003}, + {0x033, 0x00000030}, + {0x03F, 0x00050002}, + {0x033, 0x00000031}, + {0x03F, 0x00060032}, + {0x033, 0x00000032}, + {0x03F, 0x00050042}, + {0x033, 0x00000033}, + {0x03F, 0x00040042}, + {0x033, 0x00000034}, + {0x03F, 0x00008001}, + {0x033, 0x00000035}, + {0x03F, 0x00008002}, + {0x033, 0x00000036}, + {0x03F, 0x00000003}, + {0x033, 0x00000037}, + {0x03F, 0x00000003}, + {0x033, 0x00000060}, + {0x03F, 0x00050002}, + {0x033, 0x00000061}, + {0x03F, 0x00060032}, + {0x033, 0x00000062}, + {0x03F, 0x00050042}, + {0x033, 0x00000063}, + {0x03F, 0x00040042}, + {0x033, 0x00000064}, + {0x03F, 0x00008001}, + {0x033, 0x00000065}, + {0x03F, 0x00008002}, + {0x033, 0x00000066}, + {0x03F, 0x00000003}, + {0x033, 0x00000067}, + {0x03F, 0x00000003}, + {0x033, 0x00000068}, + {0x03F, 0x00050002}, + {0x033, 0x00000069}, + {0x03F, 0x00060032}, + {0x033, 0x0000006A}, + {0x03F, 0x00050042}, + {0x033, 0x0000006B}, + {0x03F, 0x00040042}, + {0x033, 0x0000006C}, + {0x03F, 0x00008001}, + {0x033, 0x0000006D}, + {0x03F, 0x00008002}, + {0x033, 0x0000006E}, + {0x03F, 0x00000003}, + {0x033, 0x0000006F}, + {0x03F, 0x00000003}, + {0x033, 0x00000070}, + {0x03F, 0x00050002}, + {0x033, 0x00000071}, + {0x03F, 0x00060032}, + {0x033, 0x00000072}, + {0x03F, 0x00050042}, + {0x033, 0x00000073}, + {0x03F, 0x00040042}, + {0x033, 0x00000074}, + {0x03F, 0x00008001}, + {0x033, 0x00000075}, + {0x03F, 0x00008002}, + {0x033, 0x00000076}, + {0x03F, 0x00000003}, + {0x033, 0x00000077}, + {0x03F, 0x00000003}, + {0x033, 0x00000078}, + {0x03F, 0x00050002}, + {0x033, 0x00000079}, + {0x03F, 0x00060032}, + {0x033, 0x0000007A}, + {0x03F, 0x00050042}, + {0x033, 0x0000007B}, + {0x03F, 0x00040042}, + {0x033, 0x0000007C}, + {0x03F, 0x00008001}, + {0x033, 0x0000007D}, + {0x03F, 0x00008002}, + {0x033, 0x0000007E}, + {0x03F, 0x00000003}, + {0x033, 0x0000007F}, + {0x03F, 0x00000003}, + {0x033, 0x000000A0}, + {0x03F, 0x00050002}, + {0x033, 0x000000A1}, + {0x03F, 0x00060032}, + {0x033, 0x000000A2}, + {0x03F, 0x00050042}, + {0x033, 0x000000A3}, + {0x03F, 0x00040042}, + {0x033, 0x000000A4}, + {0x03F, 0x00008001}, + {0x033, 0x000000A5}, + {0x03F, 0x00008002}, + {0x033, 0x000000A6}, + {0x03F, 0x00000003}, + {0x033, 0x000000A7}, + {0x03F, 0x00000003}, + {0x033, 0x000000A8}, + {0x03F, 0x00050002}, + {0x033, 0x000000A9}, + {0x03F, 0x00060032}, + {0x033, 0x000000AA}, + {0x03F, 0x00050042}, + {0x033, 0x000000AB}, + {0x03F, 0x00040042}, + {0x033, 0x000000AC}, + {0x03F, 0x00008001}, + {0x033, 0x000000AD}, + {0x03F, 0x00008002}, + {0x033, 0x000000AE}, + {0x03F, 0x00000003}, + {0x033, 0x000000AF}, + {0x03F, 0x00000003}, + {0x033, 0x000000B0}, + {0x03F, 0x00050002}, + {0x033, 0x000000B1}, + {0x03F, 0x00060032}, + {0x033, 0x000000B2}, + {0x03F, 0x00050042}, + {0x033, 0x000000B3}, + {0x03F, 0x00040042}, + {0x033, 0x000000B4}, + {0x03F, 0x00008001}, + {0x033, 0x000000B5}, + {0x03F, 0x00008002}, + {0x033, 0x000000B6}, + {0x03F, 0x00000003}, + {0x033, 0x000000B7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E0}, + {0x03F, 0x00050002}, + {0x033, 0x000000E1}, + {0x03F, 0x00060032}, + {0x033, 0x000000E2}, + {0x03F, 0x00050042}, + {0x033, 0x000000E3}, + {0x03F, 0x00040042}, + {0x033, 0x000000E4}, + {0x03F, 0x00008001}, + {0x033, 0x000000E5}, + {0x03F, 0x00008002}, + {0x033, 0x000000E6}, + {0x03F, 0x00000003}, + {0x033, 0x000000E7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E8}, + {0x03F, 0x00050002}, + {0x033, 0x000000E9}, + {0x03F, 0x00060032}, + {0x033, 0x000000EA}, + {0x03F, 0x00050042}, + {0x033, 0x000000EB}, + {0x03F, 0x00040042}, + {0x033, 0x000000EC}, + {0x03F, 0x00008001}, + {0x033, 0x000000ED}, + {0x03F, 0x00008002}, + {0x033, 0x000000EE}, + {0x03F, 0x00000003}, + {0x033, 0x000000EF}, + {0x03F, 0x00000003}, + {0x033, 0x000000F0}, + {0x03F, 0x00050002}, + {0x033, 0x000000F1}, + {0x03F, 0x00060032}, + {0x033, 0x000000F2}, + {0x03F, 0x00050042}, + {0x033, 0x000000F3}, + {0x03F, 0x00040042}, + {0x033, 0x000000F4}, + {0x03F, 0x00008001}, + {0x033, 0x000000F5}, + {0x03F, 0x00008002}, + {0x033, 0x000000F6}, + {0x03F, 0x00000003}, + {0x033, 0x000000F7}, + {0x03F, 0x00000003}, + {0x033, 0x000000F8}, + {0x03F, 0x00050002}, + {0x033, 0x000000F9}, + {0x03F, 0x00060032}, + {0x033, 0x000000FA}, + {0x03F, 0x00050042}, + {0x033, 0x000000FB}, + {0x03F, 0x00040042}, + {0x033, 0x000000FC}, + {0x03F, 0x00008001}, + {0x033, 0x000000FD}, + {0x03F, 0x00008002}, + {0x033, 0x000000FE}, + {0x03F, 0x00000003}, + {0x033, 0x000000FF}, + {0x03F, 0x00000003}, + {0x033, 0x00000120}, + {0x03F, 0x00050002}, + {0x033, 0x00000121}, + {0x03F, 0x00060032}, + {0x033, 0x00000122}, + {0x03F, 0x00050042}, + {0x033, 0x00000123}, + {0x03F, 0x00040042}, + {0x033, 0x00000124}, + {0x03F, 0x00008001}, + {0x033, 0x00000125}, + {0x03F, 0x00008002}, + {0x033, 0x00000126}, + {0x03F, 0x00000003}, + {0x033, 0x00000127}, + {0x03F, 0x00000003}, + {0x033, 0x00000128}, + {0x03F, 0x00050002}, + {0x033, 0x00000129}, + {0x03F, 0x00060032}, + {0x033, 0x0000012A}, + {0x03F, 0x00050042}, + {0x033, 0x0000012B}, + {0x03F, 0x00040042}, + {0x033, 0x0000012C}, + {0x03F, 0x00008001}, + {0x033, 0x0000012D}, + {0x03F, 0x00008002}, + {0x033, 0x0000012E}, + {0x03F, 0x00000003}, + {0x033, 0x0000012F}, + {0x03F, 0x00000003}, + {0x033, 0x00000130}, + {0x03F, 0x00050002}, + {0x033, 0x00000131}, + {0x03F, 0x00060032}, + {0x033, 0x00000132}, + {0x03F, 0x00050042}, + {0x033, 0x00000133}, + {0x03F, 0x00040042}, + {0x033, 0x00000134}, + {0x03F, 0x00008001}, + {0x033, 0x00000135}, + {0x03F, 0x00008002}, + {0x033, 0x00000136}, + {0x03F, 0x00000003}, + {0x033, 0x00000137}, + {0x03F, 0x00000003}, + {0x033, 0x00000160}, + {0x03F, 0x00050002}, + {0x033, 0x00000161}, + {0x03F, 0x00060032}, + {0x033, 0x00000162}, + {0x03F, 0x00050042}, + {0x033, 0x00000163}, + {0x03F, 0x00040042}, + {0x033, 0x00000164}, + {0x03F, 0x00008001}, + {0x033, 0x00000165}, + {0x03F, 0x00008002}, + {0x033, 0x00000166}, + {0x03F, 0x00000003}, + {0x033, 0x00000167}, + {0x03F, 0x00000003}, + {0x033, 0x00000168}, + {0x03F, 0x00050002}, + {0x033, 0x00000169}, + {0x03F, 0x00060032}, + {0x033, 0x0000016A}, + {0x03F, 0x00050042}, + {0x033, 0x0000016B}, + {0x03F, 0x00040042}, + {0x033, 0x0000016C}, + {0x03F, 0x00008001}, + {0x033, 0x0000016D}, + {0x03F, 0x00008002}, + {0x033, 0x0000016E}, + {0x03F, 0x00000003}, + {0x033, 0x0000016F}, + {0x03F, 0x00000003}, + {0x033, 0x00000170}, + {0x03F, 0x00050002}, + {0x033, 0x00000171}, + {0x03F, 0x00060032}, + {0x033, 0x00000172}, + {0x03F, 0x00050042}, + {0x033, 0x00000173}, + {0x03F, 0x00040042}, + {0x033, 0x00000174}, + {0x03F, 0x00008001}, + {0x033, 0x00000175}, + {0x03F, 0x00008002}, + {0x033, 0x00000176}, + {0x03F, 0x00000003}, + {0x033, 0x00000177}, + {0x03F, 0x00000003}, + {0x033, 0x00000178}, + {0x03F, 0x00050002}, + {0x033, 0x00000179}, + {0x03F, 0x00060032}, + {0x033, 0x0000017A}, + {0x03F, 0x00050042}, + {0x033, 0x0000017B}, + {0x03F, 0x00040042}, + {0x033, 0x0000017C}, + {0x03F, 0x00008001}, + {0x033, 0x0000017D}, + {0x03F, 0x00008002}, + {0x033, 0x0000017E}, + {0x03F, 0x00000003}, + {0x033, 0x0000017F}, + {0x03F, 0x00000003}, + {0x033, 0x000001A0}, + {0x03F, 0x00050002}, + {0x033, 0x000001A1}, + {0x03F, 0x00060032}, + {0x033, 0x000001A2}, + {0x03F, 0x00050042}, + {0x033, 0x000001A3}, + {0x03F, 0x00040042}, + {0x033, 0x000001A4}, + {0x03F, 0x00008001}, + {0x033, 0x000001A5}, + {0x03F, 0x00008002}, + {0x033, 0x000001A6}, + {0x03F, 0x00000003}, + {0x033, 0x000001A7}, + {0x03F, 0x00000003}, + {0x033, 0x000001A8}, + {0x03F, 0x00050002}, + {0x033, 0x000001A9}, + {0x03F, 0x00060032}, + {0x033, 0x000001AA}, + {0x03F, 0x00050042}, + {0x033, 0x000001AB}, + {0x03F, 0x00040042}, + {0x033, 0x000001AC}, + {0x03F, 0x00008001}, + {0x033, 0x000001AD}, + {0x03F, 0x00008002}, + {0x033, 0x000001AE}, + {0x03F, 0x00000003}, + {0x033, 0x000001AF}, + {0x03F, 0x00000003}, + {0x033, 0x000001B0}, + {0x03F, 0x00050002}, + {0x033, 0x000001B1}, + {0x03F, 0x00060032}, + {0x033, 0x000001B2}, + {0x03F, 0x00050042}, + {0x033, 0x000001B3}, + {0x03F, 0x00040042}, + {0x033, 0x000001B4}, + {0x03F, 0x00008001}, + {0x033, 0x000001B5}, + {0x03F, 0x00008002}, + {0x033, 0x000001B6}, + {0x03F, 0x00000003}, + {0x033, 0x000001B7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E0}, + {0x03F, 0x00050002}, + {0x033, 0x000001E1}, + {0x03F, 0x00060032}, + {0x033, 0x000001E2}, + {0x03F, 0x00050042}, + {0x033, 0x000001E3}, + {0x03F, 0x00040042}, + {0x033, 0x000001E4}, + {0x03F, 0x00008001}, + {0x033, 0x000001E5}, + {0x03F, 0x00008002}, + {0x033, 0x000001E6}, + {0x03F, 0x00000003}, + {0x033, 0x000001E7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E8}, + {0x03F, 0x00050002}, + {0x033, 0x000001E9}, + {0x03F, 0x00060032}, + {0x033, 0x000001EA}, + {0x03F, 0x00050042}, + {0x033, 0x000001EB}, + {0x03F, 0x00040042}, + {0x033, 0x000001EC}, + {0x03F, 0x00008001}, + {0x033, 0x000001ED}, + {0x03F, 0x00008002}, + {0x033, 0x000001EE}, + {0x03F, 0x00000003}, + {0x033, 0x000001EF}, + {0x03F, 0x00000003}, + {0x033, 0x000001F0}, + {0x03F, 0x00050002}, + {0x033, 0x000001F1}, + {0x03F, 0x00060032}, + {0x033, 0x000001F2}, + {0x03F, 0x00050042}, + {0x033, 0x000001F3}, + {0x03F, 0x00040042}, + {0x033, 0x000001F4}, + {0x03F, 0x00008001}, + {0x033, 0x000001F5}, + {0x03F, 0x00008002}, + {0x033, 0x000001F6}, + {0x03F, 0x00000003}, + {0x033, 0x000001F7}, + {0x03F, 0x00000003}, + {0x033, 0x000001F8}, + {0x03F, 0x00050002}, + {0x033, 0x000001F9}, + {0x03F, 0x00060032}, + {0x033, 0x000001FA}, + {0x03F, 0x00050042}, + {0x033, 0x000001FB}, + {0x03F, 0x00040042}, + {0x033, 0x000001FC}, + {0x03F, 0x00008001}, + {0x033, 0x000001FD}, + {0x03F, 0x00008002}, + {0x033, 0x000001FE}, + {0x03F, 0x00000003}, + {0x033, 0x000001FF}, + {0x03F, 0x00000003}, + {0x0EF, 0x00000000}, + {0x005, 0x00000001}, + {0x10005, 0x00000001}, + {0x100EE, 0x00000400}, + {0x10030, 0x00000000}, + {0x10030, 0x00001000}, + {0x10030, 0x00002000}, + {0x10030, 0x00003000}, + {0x10030, 0x00004000}, + {0x10030, 0x00005000}, + {0x10030, 0x00006003}, + {0x10030, 0x00007003}, + {0x10030, 0x00008000}, + {0x10030, 0x00009000}, + {0x10030, 0x0000A000}, + {0x10030, 0x0000B000}, + {0x10030, 0x0000C000}, + {0x10030, 0x0000D000}, + {0x10030, 0x0000E003}, + {0x10030, 0x0000F003}, + {0x10030, 0x00010000}, + {0x10030, 0x00011000}, + {0x10030, 0x00012000}, + {0x10030, 0x00013000}, + {0x10030, 0x00014000}, + {0x10030, 0x00015000}, + {0x10030, 0x00016003}, + {0x10030, 0x00017003}, + {0x10030, 0x00018000}, + {0x10030, 0x00019000}, + {0x10030, 0x0001A000}, + {0x10030, 0x0001B000}, + {0x10030, 0x0001C000}, + {0x10030, 0x0001D000}, + {0x10030, 0x0001E003}, + {0x10030, 0x0001F003}, + {0x10030, 0x00020000}, + {0x10030, 0x00021000}, + {0x10030, 0x00022000}, + {0x10030, 0x00023000}, + {0x10030, 0x00024000}, + {0x10030, 0x00025000}, + {0x10030, 0x00026003}, + {0x10030, 0x00027003}, + {0x10030, 0x00028000}, + {0x10030, 0x00029000}, + {0x10030, 0x0002A000}, + {0x10030, 0x0002B000}, + {0x10030, 0x0002C000}, + {0x10030, 0x0002D000}, + {0x10030, 0x0002E003}, + {0x10030, 0x0002F003}, + {0x10030, 0x00030000}, + {0x10030, 0x00031000}, + {0x10030, 0x00032000}, + {0x10030, 0x00033000}, + {0x10030, 0x00034000}, + {0x10030, 0x00035000}, + {0x10030, 0x00036003}, + {0x10030, 0x00037003}, + {0x10030, 0x00038000}, + {0x10030, 0x00039000}, + {0x10030, 0x0003A000}, + {0x10030, 0x0003B000}, + {0x10030, 0x0003C000}, + {0x10030, 0x0003D000}, + {0x10030, 0x0003E003}, + {0x10030, 0x0003F003}, + {0x10030, 0x00060000}, + {0x10030, 0x00061000}, + {0x10030, 0x00062000}, + {0x10030, 0x00063000}, + {0x10030, 0x00064000}, + {0x10030, 0x00065000}, + {0x10030, 0x00066000}, + {0x10030, 0x00067003}, + {0x10030, 0x00068000}, + {0x10030, 0x00069000}, + {0x10030, 0x0006A000}, + {0x10030, 0x0006B000}, + {0x10030, 0x0006C000}, + {0x10030, 0x0006D000}, + {0x10030, 0x0006E000}, + {0x10030, 0x0006F003}, + {0x10030, 0x00070000}, + {0x10030, 0x00071000}, + {0x10030, 0x00072000}, + {0x10030, 0x00073000}, + {0x10030, 0x00074000}, + {0x10030, 0x00075000}, + {0x10030, 0x00076000}, + {0x10030, 0x00077003}, + {0x10030, 0x00078000}, + {0x10030, 0x00079000}, + {0x10030, 0x0007A000}, + {0x10030, 0x0007B000}, + {0x10030, 0x0007C000}, + {0x10030, 0x0007D000}, + {0x10030, 0x0007E000}, + {0x10030, 0x0007F003}, + {0x100EE, 0x00000000}, + {0x0FE, 0x00000031}, +}; + +static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = { + {0xF0010000, 0x00000000}, + {0xF0020000, 0x00000001}, + {0xF0320000, 0x00000002}, + {0xF0330000, 0x00000003}, + {0xF0340000, 0x00000004}, + {0xF0350000, 0x00000005}, + {0xF0360000, 0x00000006}, + {0xF0010001, 0x00000007}, + {0xF0020001, 0x00000008}, + {0xF0320001, 0x00000009}, + {0xF0330001, 0x0000000A}, + {0xF0340001, 0x0000000B}, + {0xF0350001, 0x0000000C}, + {0xF0360001, 0x0000000D}, + {0xF03F0001, 0x0000000E}, + {0xF0400001, 0x0000000F}, + {0x005, 0x00000000}, + {0x10005, 0x00000000}, + {0x0B9, 0x00020440}, + {0x000, 0x00030001}, + {0x10000, 0x00030000}, + {0x018, 0x00011124}, + {0x10018, 0x00011124}, + {0x05F, 0x00000032}, + {0x097, 0x00043200}, + {0x0A6, 0x00066DB7}, + {0x0EF, 0x00004000}, + {0x033, 0x00000005}, + {0x03E, 0x00000000}, + {0x03F, 0x00010500}, + {0x033, 0x00000003}, + {0x03E, 0x00000000}, + {0x03F, 0x00028B00}, + {0x033, 0x00000002}, + {0x03E, 0x00000000}, + {0x03F, 0x0009AB00}, + {0x033, 0x0000000D}, + {0x03E, 0x00000000}, + {0x03F, 0x00010500}, + {0x033, 0x0000000B}, + {0x03E, 0x00000000}, + {0x03F, 0x00028B00}, + {0x033, 0x0000000A}, + {0x03E, 0x00000000}, + {0x03F, 0x0009AB00}, + {0x033, 0x00000015}, + {0x03E, 0x00000000}, + {0x03F, 0x00010500}, + {0x033, 0x00000013}, + {0x03E, 0x00000000}, + {0x03F, 0x00028B00}, + {0x033, 0x00000012}, + {0x03E, 0x00000000}, + {0x03F, 0x0009AB00}, + {0x0EF, 0x00000000}, + {0x000, 0x00033C01}, + {0x10000, 0x00033C00}, + {0x01A, 0x00040004}, + {0x0FE, 0x00000000}, + {0x096, 0x00015200}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0xA0000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0xB0000000, 0x00000000}, + {0x057, 0x0000D589}, + {0x05A, 0x0007FFFF}, + {0x043, 0x00005000}, + {0x018, 0x00001001}, + {0x10018, 0x00001001}, + {0x002, 0x0000000D}, + {0x10002, 0x0000000D}, + {0x0EE, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x0EE, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0xA0000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0xB0000000, 0x00000000}, + {0x0EB, 0x00000000}, + {0x030, 0x000109B0}, + {0x030, 0x000189B0}, + {0x0EB, 0x00000000}, + {0x0EE, 0x00000010}, + {0x033, 0x00000006}, + {0x03F, 0x00000003}, + {0x033, 0x00000007}, + {0x03F, 0x00000003}, + {0x033, 0x00000008}, + {0x03F, 0x00000001}, + {0x0EE, 0x00000000}, + {0x0EF, 0x00001000}, + {0x033, 0x00000000}, + {0x03F, 0x00000015}, + {0x033, 0x00000001}, + {0x03F, 0x00000017}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00008000}, + {0x033, 0x00000020}, + {0x03F, 0x00060001}, + {0x033, 0x00000021}, + {0x03F, 0x00060032}, + {0x033, 0x00000022}, + {0x03F, 0x00050042}, + {0x033, 0x00000023}, + {0x03F, 0x00040042}, + {0x033, 0x00000024}, + {0x03F, 0x00000001}, + {0x033, 0x00000025}, + {0x03F, 0x00000002}, + {0x033, 0x00000026}, + {0x03F, 0x00000003}, + {0x033, 0x00000027}, + {0x03F, 0x00000003}, + {0x033, 0x00000028}, + {0x03F, 0x00060001}, + {0x033, 0x00000029}, + {0x03F, 0x00060032}, + {0x033, 0x0000002A}, + {0x03F, 0x00050042}, + {0x033, 0x0000002B}, + {0x03F, 0x00040042}, + {0x033, 0x0000002C}, + {0x03F, 0x00000001}, + {0x033, 0x0000002D}, + {0x03F, 0x00000002}, + {0x033, 0x0000002E}, + {0x03F, 0x00000003}, + {0x033, 0x0000002F}, + {0x03F, 0x00000003}, + {0x033, 0x00000030}, + {0x03F, 0x00060001}, + {0x033, 0x00000031}, + {0x03F, 0x00060032}, + {0x033, 0x00000032}, + {0x03F, 0x00050042}, + {0x033, 0x00000033}, + {0x03F, 0x00040042}, + {0x033, 0x00000034}, + {0x03F, 0x00000001}, + {0x033, 0x00000035}, + {0x03F, 0x00000002}, + {0x033, 0x00000036}, + {0x03F, 0x00000003}, + {0x033, 0x00000037}, + {0x03F, 0x00000003}, + {0x033, 0x00000060}, + {0x03F, 0x00060001}, + {0x033, 0x00000061}, + {0x03F, 0x00060032}, + {0x033, 0x00000062}, + {0x03F, 0x00050042}, + {0x033, 0x00000063}, + {0x03F, 0x00040042}, + {0x033, 0x00000064}, + {0x03F, 0x00000001}, + {0x033, 0x00000065}, + {0x03F, 0x00000002}, + {0x033, 0x00000066}, + {0x03F, 0x00000003}, + {0x033, 0x00000067}, + {0x03F, 0x00000003}, + {0x033, 0x00000068}, + {0x03F, 0x00060001}, + {0x033, 0x00000069}, + {0x03F, 0x00060032}, + {0x033, 0x0000006A}, + {0x03F, 0x00050042}, + {0x033, 0x0000006B}, + {0x03F, 0x00040042}, + {0x033, 0x0000006C}, + {0x03F, 0x00000001}, + {0x033, 0x0000006D}, + {0x03F, 0x00000002}, + {0x033, 0x0000006E}, + {0x03F, 0x00000003}, + {0x033, 0x0000006F}, + {0x03F, 0x00000003}, + {0x033, 0x00000070}, + {0x03F, 0x00060001}, + {0x033, 0x00000071}, + {0x03F, 0x00060032}, + {0x033, 0x00000072}, + {0x03F, 0x00050042}, + {0x033, 0x00000073}, + {0x03F, 0x00040042}, + {0x033, 0x00000074}, + {0x03F, 0x00000001}, + {0x033, 0x00000075}, + {0x03F, 0x00000002}, + {0x033, 0x00000076}, + {0x03F, 0x00000003}, + {0x033, 0x00000077}, + {0x03F, 0x00000003}, + {0x033, 0x00000078}, + {0x03F, 0x00060001}, + {0x033, 0x00000079}, + {0x03F, 0x00060032}, + {0x033, 0x0000007A}, + {0x03F, 0x00050042}, + {0x033, 0x0000007B}, + {0x03F, 0x00040042}, + {0x033, 0x0000007C}, + {0x03F, 0x00000001}, + {0x033, 0x0000007D}, + {0x03F, 0x00000002}, + {0x033, 0x0000007E}, + {0x03F, 0x00000003}, + {0x033, 0x0000007F}, + {0x03F, 0x00000003}, + {0x033, 0x000000A0}, + {0x03F, 0x00060001}, + {0x033, 0x000000A1}, + {0x03F, 0x00060032}, + {0x033, 0x000000A2}, + {0x03F, 0x00050042}, + {0x033, 0x000000A3}, + {0x03F, 0x00040042}, + {0x033, 0x000000A4}, + {0x03F, 0x00000001}, + {0x033, 0x000000A5}, + {0x03F, 0x00000002}, + {0x033, 0x000000A6}, + {0x03F, 0x00000003}, + {0x033, 0x000000A7}, + {0x03F, 0x00000003}, + {0x033, 0x000000A8}, + {0x03F, 0x00060001}, + {0x033, 0x000000A9}, + {0x03F, 0x00060032}, + {0x033, 0x000000AA}, + {0x03F, 0x00050042}, + {0x033, 0x000000AB}, + {0x03F, 0x00040042}, + {0x033, 0x000000AC}, + {0x03F, 0x00000001}, + {0x033, 0x000000AD}, + {0x03F, 0x00000002}, + {0x033, 0x000000AE}, + {0x03F, 0x00000003}, + {0x033, 0x000000AF}, + {0x03F, 0x00000003}, + {0x033, 0x000000B0}, + {0x03F, 0x00060001}, + {0x033, 0x000000B1}, + {0x03F, 0x00060032}, + {0x033, 0x000000B2}, + {0x03F, 0x00050042}, + {0x033, 0x000000B3}, + {0x03F, 0x00040042}, + {0x033, 0x000000B4}, + {0x03F, 0x00000001}, + {0x033, 0x000000B5}, + {0x03F, 0x00000002}, + {0x033, 0x000000B6}, + {0x03F, 0x00000003}, + {0x033, 0x000000B7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E0}, + {0x03F, 0x00060001}, + {0x033, 0x000000E1}, + {0x03F, 0x00060032}, + {0x033, 0x000000E2}, + {0x03F, 0x00050042}, + {0x033, 0x000000E3}, + {0x03F, 0x00040042}, + {0x033, 0x000000E4}, + {0x03F, 0x00000001}, + {0x033, 0x000000E5}, + {0x03F, 0x00000002}, + {0x033, 0x000000E6}, + {0x03F, 0x00000003}, + {0x033, 0x000000E7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E8}, + {0x03F, 0x00060001}, + {0x033, 0x000000E9}, + {0x03F, 0x00060032}, + {0x033, 0x000000EA}, + {0x03F, 0x00050042}, + {0x033, 0x000000EB}, + {0x03F, 0x00040042}, + {0x033, 0x000000EC}, + {0x03F, 0x00000001}, + {0x033, 0x000000ED}, + {0x03F, 0x00000002}, + {0x033, 0x000000EE}, + {0x03F, 0x00000003}, + {0x033, 0x000000EF}, + {0x03F, 0x00000003}, + {0x033, 0x000000F0}, + {0x03F, 0x00060001}, + {0x033, 0x000000F1}, + {0x03F, 0x00060032}, + {0x033, 0x000000F2}, + {0x03F, 0x00050042}, + {0x033, 0x000000F3}, + {0x03F, 0x00040042}, + {0x033, 0x000000F4}, + {0x03F, 0x00000001}, + {0x033, 0x000000F5}, + {0x03F, 0x00000002}, + {0x033, 0x000000F6}, + {0x03F, 0x00000003}, + {0x033, 0x000000F7}, + {0x03F, 0x00000003}, + {0x033, 0x000000F8}, + {0x03F, 0x00060001}, + {0x033, 0x000000F9}, + {0x03F, 0x00060032}, + {0x033, 0x000000FA}, + {0x03F, 0x00050042}, + {0x033, 0x000000FB}, + {0x03F, 0x00040042}, + {0x033, 0x000000FC}, + {0x03F, 0x00000001}, + {0x033, 0x000000FD}, + {0x03F, 0x00000002}, + {0x033, 0x000000FE}, + {0x03F, 0x00000003}, + {0x033, 0x000000FF}, + {0x03F, 0x00000003}, + {0x033, 0x00000120}, + {0x03F, 0x00060001}, + {0x033, 0x00000121}, + {0x03F, 0x00060032}, + {0x033, 0x00000122}, + {0x03F, 0x00050042}, + {0x033, 0x00000123}, + {0x03F, 0x00040042}, + {0x033, 0x00000124}, + {0x03F, 0x00000001}, + {0x033, 0x00000125}, + {0x03F, 0x00000002}, + {0x033, 0x00000126}, + {0x03F, 0x00000003}, + {0x033, 0x00000127}, + {0x03F, 0x00000003}, + {0x033, 0x00000128}, + {0x03F, 0x00060001}, + {0x033, 0x00000129}, + {0x03F, 0x00060032}, + {0x033, 0x0000012A}, + {0x03F, 0x00050042}, + {0x033, 0x0000012B}, + {0x03F, 0x00040042}, + {0x033, 0x0000012C}, + {0x03F, 0x00000001}, + {0x033, 0x0000012D}, + {0x03F, 0x00000002}, + {0x033, 0x0000012E}, + {0x03F, 0x00000003}, + {0x033, 0x0000012F}, + {0x03F, 0x00000003}, + {0x033, 0x00000130}, + {0x03F, 0x00060001}, + {0x033, 0x00000131}, + {0x03F, 0x00060032}, + {0x033, 0x00000132}, + {0x03F, 0x00050042}, + {0x033, 0x00000133}, + {0x03F, 0x00040042}, + {0x033, 0x00000134}, + {0x03F, 0x00000001}, + {0x033, 0x00000135}, + {0x03F, 0x00000002}, + {0x033, 0x00000136}, + {0x03F, 0x00000003}, + {0x033, 0x00000137}, + {0x03F, 0x00000003}, + {0x033, 0x00000160}, + {0x03F, 0x00060001}, + {0x033, 0x00000161}, + {0x03F, 0x00060032}, + {0x033, 0x00000162}, + {0x03F, 0x00050042}, + {0x033, 0x00000163}, + {0x03F, 0x00040042}, + {0x033, 0x00000164}, + {0x03F, 0x00000001}, + {0x033, 0x00000165}, + {0x03F, 0x00000002}, + {0x033, 0x00000166}, + {0x03F, 0x00000003}, + {0x033, 0x00000167}, + {0x03F, 0x00000003}, + {0x033, 0x00000168}, + {0x03F, 0x00060001}, + {0x033, 0x00000169}, + {0x03F, 0x00060032}, + {0x033, 0x0000016A}, + {0x03F, 0x00050042}, + {0x033, 0x0000016B}, + {0x03F, 0x00040042}, + {0x033, 0x0000016C}, + {0x03F, 0x00000001}, + {0x033, 0x0000016D}, + {0x03F, 0x00000002}, + {0x033, 0x0000016E}, + {0x03F, 0x00000003}, + {0x033, 0x0000016F}, + {0x03F, 0x00000003}, + {0x033, 0x00000170}, + {0x03F, 0x00060001}, + {0x033, 0x00000171}, + {0x03F, 0x00060032}, + {0x033, 0x00000172}, + {0x03F, 0x00050042}, + {0x033, 0x00000173}, + {0x03F, 0x00040042}, + {0x033, 0x00000174}, + {0x03F, 0x00000001}, + {0x033, 0x00000175}, + {0x03F, 0x00000002}, + {0x033, 0x00000176}, + {0x03F, 0x00000003}, + {0x033, 0x00000177}, + {0x03F, 0x00000003}, + {0x033, 0x00000178}, + {0x03F, 0x00060001}, + {0x033, 0x00000179}, + {0x03F, 0x00060032}, + {0x033, 0x0000017A}, + {0x03F, 0x00050042}, + {0x033, 0x0000017B}, + {0x03F, 0x00040042}, + {0x033, 0x0000017C}, + {0x03F, 0x00000001}, + {0x033, 0x0000017D}, + {0x03F, 0x00000002}, + {0x033, 0x0000017E}, + {0x03F, 0x00000003}, + {0x033, 0x0000017F}, + {0x03F, 0x00000003}, + {0x033, 0x000001A0}, + {0x03F, 0x00060001}, + {0x033, 0x000001A1}, + {0x03F, 0x00060032}, + {0x033, 0x000001A2}, + {0x03F, 0x00050042}, + {0x033, 0x000001A3}, + {0x03F, 0x00040042}, + {0x033, 0x000001A4}, + {0x03F, 0x00000001}, + {0x033, 0x000001A5}, + {0x03F, 0x00000002}, + {0x033, 0x000001A6}, + {0x03F, 0x00000003}, + {0x033, 0x000001A7}, + {0x03F, 0x00000003}, + {0x033, 0x000001A8}, + {0x03F, 0x00060001}, + {0x033, 0x000001A9}, + {0x03F, 0x00060032}, + {0x033, 0x000001AA}, + {0x03F, 0x00050042}, + {0x033, 0x000001AB}, + {0x03F, 0x00040042}, + {0x033, 0x000001AC}, + {0x03F, 0x00000001}, + {0x033, 0x000001AD}, + {0x03F, 0x00000002}, + {0x033, 0x000001AE}, + {0x03F, 0x00000003}, + {0x033, 0x000001AF}, + {0x03F, 0x00000003}, + {0x033, 0x000001B0}, + {0x03F, 0x00060001}, + {0x033, 0x000001B1}, + {0x03F, 0x00060032}, + {0x033, 0x000001B2}, + {0x03F, 0x00050042}, + {0x033, 0x000001B3}, + {0x03F, 0x00040042}, + {0x033, 0x000001B4}, + {0x03F, 0x00000001}, + {0x033, 0x000001B5}, + {0x03F, 0x00000002}, + {0x033, 0x000001B6}, + {0x03F, 0x00000003}, + {0x033, 0x000001B7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E0}, + {0x03F, 0x00060001}, + {0x033, 0x000001E1}, + {0x03F, 0x00060032}, + {0x033, 0x000001E2}, + {0x03F, 0x00050042}, + {0x033, 0x000001E3}, + {0x03F, 0x00040042}, + {0x033, 0x000001E4}, + {0x03F, 0x00000001}, + {0x033, 0x000001E5}, + {0x03F, 0x00000002}, + {0x033, 0x000001E6}, + {0x03F, 0x00000003}, + {0x033, 0x000001E7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E8}, + {0x03F, 0x00060001}, + {0x033, 0x000001E9}, + {0x03F, 0x00060032}, + {0x033, 0x000001EA}, + {0x03F, 0x00050042}, + {0x033, 0x000001EB}, + {0x03F, 0x00040042}, + {0x033, 0x000001EC}, + {0x03F, 0x00000001}, + {0x033, 0x000001ED}, + {0x03F, 0x00000002}, + {0x033, 0x000001EE}, + {0x03F, 0x00000003}, + {0x033, 0x000001EF}, + {0x03F, 0x00000003}, + {0x033, 0x000001F0}, + {0x03F, 0x00060001}, + {0x033, 0x000001F1}, + {0x03F, 0x00060032}, + {0x033, 0x000001F2}, + {0x03F, 0x00050042}, + {0x033, 0x000001F3}, + {0x03F, 0x00040042}, + {0x033, 0x000001F4}, + {0x03F, 0x00000001}, + {0x033, 0x000001F5}, + {0x03F, 0x00000002}, + {0x033, 0x000001F6}, + {0x03F, 0x00000003}, + {0x033, 0x000001F7}, + {0x03F, 0x00000003}, + {0x033, 0x000001F8}, + {0x03F, 0x00060001}, + {0x033, 0x000001F9}, + {0x03F, 0x00060032}, + {0x033, 0x000001FA}, + {0x03F, 0x00050042}, + {0x033, 0x000001FB}, + {0x03F, 0x00040042}, + {0x033, 0x000001FC}, + {0x03F, 0x00000001}, + {0x033, 0x000001FD}, + {0x03F, 0x00000002}, + {0x033, 0x000001FE}, + {0x03F, 0x00000003}, + {0x033, 0x000001FF}, + {0x03F, 0x00000003}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000100}, + {0x033, 0x00000001}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000002}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000003}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000004}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000005}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000006}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000007}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000008}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000009}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000A}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000B}, + {0x03F, 0x0000AFFF}, + {0x033, 0x0000000C}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000D}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000E}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000F}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000010}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000011}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000012}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000013}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000014}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000015}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000016}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000017}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000018}, + {0x03F, 0x0000FBFF}, + {0x033, 0x00000019}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001A}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001B}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001C}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001D}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001E}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001F}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000020}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000021}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000022}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000023}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000024}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000025}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000026}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000027}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000028}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000029}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002A}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002B}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002C}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002D}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002E}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002F}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000030}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000031}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000032}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000033}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000034}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000035}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000036}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000037}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000038}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000039}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000003A}, + {0x03F, 0x0000EFFF}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000040}, + {0x033, 0x00000000}, + {0x03F, 0x00004344}, + {0x033, 0x00000001}, + {0x03F, 0x00004344}, + {0x033, 0x00000002}, + {0x03F, 0x00004344}, + {0x033, 0x00000003}, + {0x03F, 0x00004344}, + {0x033, 0x00000004}, + {0x03F, 0x00004344}, + {0x033, 0x00000005}, + {0x03F, 0x00004344}, + {0x033, 0x00000006}, + {0x03F, 0x00004324}, + {0x033, 0x00000007}, + {0x03F, 0x00004344}, + {0x033, 0x00000008}, + {0x03F, 0x00004344}, + {0x033, 0x00000009}, + {0x03F, 0x00004344}, + {0x033, 0x0000000A}, + {0x03F, 0x00004344}, + {0x033, 0x0000000B}, + {0x03F, 0x00004344}, + {0x033, 0x00000010}, + {0x03F, 0x00004344}, + {0x033, 0x00000011}, + {0x03F, 0x00004344}, + {0x033, 0x00000012}, + {0x03F, 0x00004344}, + {0x033, 0x00000013}, + {0x03F, 0x00004344}, + {0x033, 0x00000014}, + {0x03F, 0x00004344}, + {0x033, 0x00000015}, + {0x03F, 0x00004344}, + {0x033, 0x00000016}, + {0x03F, 0x00004344}, + {0x033, 0x00000017}, + {0x03F, 0x00004344}, + {0x033, 0x00000018}, + {0x03F, 0x00004344}, + {0x033, 0x00000019}, + {0x03F, 0x00004344}, + {0x033, 0x0000001A}, + {0x03F, 0x00004344}, + {0x033, 0x0000001B}, + {0x03F, 0x00004344}, + {0x033, 0x0000001C}, + {0x03F, 0x00004344}, + {0x033, 0x0000001D}, + {0x03F, 0x00004344}, + {0x033, 0x0000001E}, + {0x03F, 0x00004344}, + {0x033, 0x0000001F}, + {0x03F, 0x00004344}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000020}, + {0x033, 0x00000010}, + {0x03F, 0x00000200}, + {0x033, 0x00000011}, + {0x03F, 0x00000200}, + {0x033, 0x00000012}, + {0x03F, 0x00000200}, + {0x033, 0x00000013}, + {0x03F, 0x00000200}, + {0x033, 0x00000020}, + {0x03F, 0x00000200}, + {0x033, 0x00000021}, + {0x03F, 0x00000200}, + {0x033, 0x00000022}, + {0x03F, 0x00000200}, + {0x033, 0x00000023}, + {0x03F, 0x00000200}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000010}, + {0x030, 0x000084DC}, + {0x030, 0x000103C9}, + {0x030, 0x00018399}, + {0x030, 0x00020287}, + {0x030, 0x00028277}, + {0x030, 0x00030165}, + {0x030, 0x00038144}, + {0x030, 0x00040044}, + {0x030, 0x00048022}, + {0x030, 0x00050011}, + {0x030, 0x00058000}, + {0x030, 0x00060000}, + {0x030, 0x00068000}, + {0x030, 0x00070000}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000080}, + {0x033, 0x00000004}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x00000005}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x00000006}, + {0x03E, 0x00000014}, + {0x03F, 0x00023958}, + {0x033, 0x00000007}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000008}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x00000009}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x0000000A}, + {0x03E, 0x00000014}, + {0x03F, 0x00023958}, + {0x033, 0x0000000B}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000000C}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x0000000D}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x0000000E}, + {0x03E, 0x00000014}, + {0x03F, 0x00023958}, + {0x033, 0x0000000F}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000010}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x00000011}, + {0x03E, 0x0000001B}, + {0x03F, 0x00022A58}, + {0x033, 0x00000012}, + {0x03E, 0x00000014}, + {0x03F, 0x00023958}, + {0x033, 0x00000013}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000014}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x00000015}, + {0x03E, 0x0000001B}, + {0x03F, 0x00029858}, + {0x033, 0x00000016}, + {0x03E, 0x0000001C}, + {0x03F, 0x00023958}, + {0x033, 0x00000017}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000018}, + {0x03E, 0x00000013}, + {0x03F, 0x00029858}, + {0x033, 0x00000019}, + {0x03E, 0x0000001B}, + {0x03F, 0x00029858}, + {0x033, 0x0000001A}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000001B}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000001C}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x0000001D}, + {0x03E, 0x0000001B}, + {0x03F, 0x00029858}, + {0x033, 0x0000001E}, + {0x03E, 0x00000013}, + {0x03F, 0x00023A58}, + {0x033, 0x0000001F}, + {0x03E, 0x00000013}, + {0x03F, 0x00023A58}, + {0x033, 0x00000020}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000021}, + {0x03E, 0x0000001C}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000022}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000023}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000024}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000025}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000026}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000027}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000028}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000029}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x0000002A}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000002B}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000002C}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x0000002D}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x0000002E}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000002F}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000030}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000031}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000032}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000033}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000034}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000035}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000036}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000037}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000038}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000039}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x0000003A}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000003B}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000003C}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x0000003D}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x0000003E}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000003F}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x0EF, 0x00000000}, + {0x0EE, 0x00000800}, + {0x033, 0x00000000}, + {0x03F, 0x00000031}, + {0x033, 0x00000001}, + {0x03F, 0x00000023}, + {0x033, 0x00000002}, + {0x03F, 0x00000015}, + {0x033, 0x00000003}, + {0x03F, 0x00000007}, + {0x0EE, 0x00000000}, + {0x0EC, 0x00000400}, + {0x033, 0x00000003}, + {0x03F, 0x00000030}, + {0x033, 0x00000004}, + {0x03F, 0x00000021}, + {0x0EC, 0x00000000}, + {0x0DE, 0x00000000}, + {0x0EF, 0x00000000}, + {0x033, 0x00000000}, + {0x008, 0x00060280}, + {0x009, 0x00030400}, + {0x0EF, 0x00000000}, + {0x0A7, 0x00080308}, + {0x066, 0x00006000}, + {0x0EF, 0x00000400}, + {0x030, 0x000001FF}, + {0x030, 0x000081FF}, + {0x030, 0x000101FF}, + {0x030, 0x000181FF}, + {0x030, 0x000201FF}, + {0x030, 0x000281FF}, + {0x030, 0x0003017F}, + {0x030, 0x000380FB}, + {0x0EF, 0x00000000}, + {0x06E, 0x00077A18}, + {0x06D, 0x00000C31}, + {0x06A, 0x000E0F8A}, + {0x06B, 0x000018A0}, + {0x06F, 0x000F81FC}, + {0x05E, 0x0000001F}, + {0x0EF, 0x00000200}, + {0x030, 0x0003D407}, + {0x030, 0x00035A87}, + {0x030, 0x0002CF07}, + {0x030, 0x00024F07}, + {0x030, 0x0001CF07}, + {0x030, 0x00014F07}, + {0x030, 0x0000CF07}, + {0x030, 0x00004F07}, + {0x0EF, 0x00000000}, + {0x0EB, 0x00080000}, + {0x030, 0x00008038}, + {0x030, 0x00010038}, + {0x030, 0x00018038}, + {0x030, 0x00020038}, + {0x030, 0x00028038}, + {0x030, 0x00030038}, + {0x030, 0x0003803C}, + {0x030, 0x0004003C}, + {0x030, 0x0004803C}, + {0x030, 0x0005003C}, + {0x030, 0x0005803C}, + {0x030, 0x0006003C}, + {0x030, 0x0006803C}, + {0x030, 0x0007003C}, + {0x0EB, 0x00000000}, + {0x094, 0x000000FC}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0xA0000000, 0x00000000}, + {0x095, 0x00000000}, + {0xB0000000, 0x00000000}, + {0x0EE, 0x00001000}, + {0x033, 0x00000020}, + {0x03F, 0x00000052}, + {0x033, 0x00000024}, + {0x03F, 0x0000005A}, + {0x033, 0x00000028}, + {0x03F, 0x0000009C}, + {0x033, 0x0000002C}, + {0x03F, 0x0000019C}, + {0x033, 0x00000030}, + {0x03F, 0x000001A4}, + {0x033, 0x00000034}, + {0x03F, 0x000001E7}, + {0x033, 0x00000038}, + {0x03F, 0x000002E7}, + {0x033, 0x0000003C}, + {0x03F, 0x000003E7}, + {0x033, 0x00000021}, + {0x03F, 0x00000052}, + {0x033, 0x00000025}, + {0x03F, 0x0000005A}, + {0x033, 0x00000029}, + {0x03F, 0x0000009C}, + {0x033, 0x0000002D}, + {0x03F, 0x0000019C}, + {0x033, 0x00000031}, + {0x03F, 0x000001A4}, + {0x033, 0x00000035}, + {0x03F, 0x000001E6}, + {0x033, 0x00000039}, + {0x03F, 0x000002E6}, + {0x033, 0x0000003D}, + {0x03F, 0x000003E6}, + {0x033, 0x00000022}, + {0x03F, 0x00000052}, + {0x033, 0x00000026}, + {0x03F, 0x0000005A}, + {0x033, 0x0000002A}, + {0x03F, 0x0000009C}, + {0x033, 0x0000002E}, + {0x03F, 0x0000019C}, + {0x033, 0x00000032}, + {0x03F, 0x000001A4}, + {0x033, 0x00000036}, + {0x03F, 0x000001E6}, + {0x033, 0x0000003A}, + {0x03F, 0x000002E6}, + {0x033, 0x0000003E}, + {0x03F, 0x000003E6}, + {0x033, 0x00000060}, + {0x03F, 0x00000052}, + {0x033, 0x00000064}, + {0x03F, 0x0000005A}, + {0x033, 0x00000068}, + {0x03F, 0x0000009C}, + {0x033, 0x0000006C}, + {0x03F, 0x0000019C}, + {0x033, 0x00000070}, + {0x03F, 0x000001A4}, + {0x033, 0x00000074}, + {0x03F, 0x000001E6}, + {0x033, 0x00000078}, + {0x03F, 0x000002E6}, + {0x033, 0x0000007C}, + {0x03F, 0x000003E6}, + {0x033, 0x00000061}, + {0x03F, 0x00000052}, + {0x033, 0x00000065}, + {0x03F, 0x0000005A}, + {0x033, 0x00000069}, + {0x03F, 0x0000009C}, + {0x033, 0x0000006D}, + {0x03F, 0x0000019C}, + {0x033, 0x00000071}, + {0x03F, 0x000001A4}, + {0x033, 0x00000075}, + {0x03F, 0x000001E6}, + {0x033, 0x00000079}, + {0x03F, 0x000002E6}, + {0x033, 0x0000007D}, + {0x03F, 0x000003E6}, + {0x033, 0x00000062}, + {0x03F, 0x00000052}, + {0x033, 0x00000066}, + {0x03F, 0x0000005A}, + {0x033, 0x0000006A}, + {0x03F, 0x0000009C}, + {0x033, 0x0000006E}, + {0x03F, 0x0000019C}, + {0x033, 0x00000072}, + {0x03F, 0x000001A4}, + {0x033, 0x00000076}, + {0x03F, 0x000001E6}, + {0x033, 0x0000007A}, + {0x03F, 0x000002E6}, + {0x033, 0x0000007E}, + {0x03F, 0x000003E6}, + {0x033, 0x00000063}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000052}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000067}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000073}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000077}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007B}, + {0x03F, 0x000002E7}, + {0x033, 0x0000007F}, + {0x03F, 0x000003E7}, + {0x0EE, 0x00000000}, + {0x100EE, 0x00004000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781EF}, + {0x10030, 0x000785E9}, + {0x10030, 0x000789E3}, + {0x10030, 0x00078DA3}, + {0x10030, 0x00079161}, + {0x10030, 0x0007955B}, + {0x10030, 0x00079921}, + {0x10030, 0x00079D1B}, + {0x10030, 0x0007A0E1}, + {0x10030, 0x0007A4DB}, + {0x10030, 0x0007A8A1}, + {0x10030, 0x0007AC9B}, + {0x10030, 0x0007B061}, + {0x10030, 0x0007B45B}, + {0x10030, 0x0007B821}, + {0x10030, 0x0007BC1B}, + {0x10030, 0x0007C015}, + {0x10030, 0x0007C40F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781EF}, + {0x10030, 0x000785E9}, + {0x10030, 0x000789E3}, + {0x10030, 0x00078DA3}, + {0x10030, 0x00079161}, + {0x10030, 0x0007955B}, + {0x10030, 0x00079921}, + {0x10030, 0x00079D1B}, + {0x10030, 0x0007A0E1}, + {0x10030, 0x0007A4DB}, + {0x10030, 0x0007A8A1}, + {0x10030, 0x0007AC9B}, + {0x10030, 0x0007B061}, + {0x10030, 0x0007B45B}, + {0x10030, 0x0007B821}, + {0x10030, 0x0007BC1B}, + {0x10030, 0x0007C015}, + {0x10030, 0x0007C40F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781EF}, + {0x10030, 0x000785E9}, + {0x10030, 0x000789E3}, + {0x10030, 0x00078DA3}, + {0x10030, 0x00079161}, + {0x10030, 0x0007955B}, + {0x10030, 0x00079921}, + {0x10030, 0x00079D1B}, + {0x10030, 0x0007A0E1}, + {0x10030, 0x0007A4DB}, + {0x10030, 0x0007A8A1}, + {0x10030, 0x0007AC9B}, + {0x10030, 0x0007B061}, + {0x10030, 0x0007B45B}, + {0x10030, 0x0007B821}, + {0x10030, 0x0007BC1B}, + {0x10030, 0x0007C015}, + {0x10030, 0x0007C40F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0xA0000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0xB0000000, 0x00000000}, + {0x100EE, 0x00000000}, + {0x100EE, 0x00002000}, + {0x10030, 0x000000FC}, + {0x10030, 0x000004F9}, + {0x10030, 0x000008F6}, + {0x10030, 0x00000CF3}, + {0x10030, 0x000010F0}, + {0x10030, 0x000014ED}, + {0x10030, 0x000018AC}, + {0x10030, 0x00001CA9}, + {0x10030, 0x00002069}, + {0x10030, 0x00002466}, + {0x10030, 0x00002829}, + {0x10030, 0x00002C26}, + {0x10030, 0x00003023}, + {0x10030, 0x00003420}, + {0x10030, 0x0000381D}, + {0x10030, 0x00003C1A}, + {0x10030, 0x00004017}, + {0x100EE, 0x00000000}, + {0x100EE, 0x00002000}, + {0x10030, 0x000780F4}, + {0x10030, 0x000784F1}, + {0x10030, 0x000788EE}, + {0x10030, 0x00078CEB}, + {0x10030, 0x000790E8}, + {0x10030, 0x000794E5}, + {0x10030, 0x000798E2}, + {0x10030, 0x00079CDF}, + {0x10030, 0x0007A0DC}, + {0x10030, 0x0007A4D9}, + {0x10030, 0x0007A8D6}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B0D0}, + {0x10030, 0x0007B4CD}, + {0x10030, 0x0007B8CA}, + {0x10030, 0x0007BC07}, + {0x10030, 0x0007C004}, + {0x100EE, 0x00000000}, + {0x0EF, 0x00002000}, + {0x033, 0x00000008}, + {0x03F, 0x00000004}, + {0x033, 0x00000009}, + {0x03F, 0x00000003}, + {0x033, 0x0000000A}, + {0x03F, 0x00000003}, + {0x033, 0x0000000B}, + {0x03F, 0x00000002}, + {0x033, 0x0000000C}, + {0x03F, 0x00000002}, + {0x033, 0x0000000D}, + {0x03F, 0x00000002}, + {0x033, 0x0000000E}, + {0x03F, 0x00000002}, + {0x033, 0x0000000F}, + {0x03F, 0x00000002}, + {0x0EF, 0x00000000}, + {0x0EB, 0x00040000}, + {0x030, 0x000109B7}, + {0x0EB, 0x00000000}, + {0x0EF, 0x00008000}, + {0x033, 0x00000020}, + {0x03F, 0x00050002}, + {0x033, 0x00000021}, + {0x03F, 0x00060032}, + {0x033, 0x00000022}, + {0x03F, 0x00050042}, + {0x033, 0x00000023}, + {0x03F, 0x00040042}, + {0x033, 0x00000024}, + {0x03F, 0x00008001}, + {0x033, 0x00000025}, + {0x03F, 0x00008002}, + {0x033, 0x00000026}, + {0x03F, 0x00000003}, + {0x033, 0x00000027}, + {0x03F, 0x00000003}, + {0x033, 0x00000028}, + {0x03F, 0x00050002}, + {0x033, 0x00000029}, + {0x03F, 0x00060032}, + {0x033, 0x0000002A}, + {0x03F, 0x00050042}, + {0x033, 0x0000002B}, + {0x03F, 0x00040042}, + {0x033, 0x0000002C}, + {0x03F, 0x00008001}, + {0x033, 0x0000002D}, + {0x03F, 0x00008002}, + {0x033, 0x0000002E}, + {0x03F, 0x00000003}, + {0x033, 0x0000002F}, + {0x03F, 0x00000003}, + {0x033, 0x00000030}, + {0x03F, 0x00050002}, + {0x033, 0x00000031}, + {0x03F, 0x00060032}, + {0x033, 0x00000032}, + {0x03F, 0x00050042}, + {0x033, 0x00000033}, + {0x03F, 0x00040042}, + {0x033, 0x00000034}, + {0x03F, 0x00008001}, + {0x033, 0x00000035}, + {0x03F, 0x00008002}, + {0x033, 0x00000036}, + {0x03F, 0x00000003}, + {0x033, 0x00000037}, + {0x03F, 0x00000003}, + {0x033, 0x00000060}, + {0x03F, 0x00050002}, + {0x033, 0x00000061}, + {0x03F, 0x00060032}, + {0x033, 0x00000062}, + {0x03F, 0x00050042}, + {0x033, 0x00000063}, + {0x03F, 0x00040042}, + {0x033, 0x00000064}, + {0x03F, 0x00008001}, + {0x033, 0x00000065}, + {0x03F, 0x00008002}, + {0x033, 0x00000066}, + {0x03F, 0x00000003}, + {0x033, 0x00000067}, + {0x03F, 0x00000003}, + {0x033, 0x00000068}, + {0x03F, 0x00050002}, + {0x033, 0x00000069}, + {0x03F, 0x00060032}, + {0x033, 0x0000006A}, + {0x03F, 0x00050042}, + {0x033, 0x0000006B}, + {0x03F, 0x00040042}, + {0x033, 0x0000006C}, + {0x03F, 0x00008001}, + {0x033, 0x0000006D}, + {0x03F, 0x00008002}, + {0x033, 0x0000006E}, + {0x03F, 0x00000003}, + {0x033, 0x0000006F}, + {0x03F, 0x00000003}, + {0x033, 0x00000070}, + {0x03F, 0x00050002}, + {0x033, 0x00000071}, + {0x03F, 0x00060032}, + {0x033, 0x00000072}, + {0x03F, 0x00050042}, + {0x033, 0x00000073}, + {0x03F, 0x00040042}, + {0x033, 0x00000074}, + {0x03F, 0x00008001}, + {0x033, 0x00000075}, + {0x03F, 0x00008002}, + {0x033, 0x00000076}, + {0x03F, 0x00000003}, + {0x033, 0x00000077}, + {0x03F, 0x00000003}, + {0x033, 0x00000078}, + {0x03F, 0x00050002}, + {0x033, 0x00000079}, + {0x03F, 0x00060032}, + {0x033, 0x0000007A}, + {0x03F, 0x00050042}, + {0x033, 0x0000007B}, + {0x03F, 0x00040042}, + {0x033, 0x0000007C}, + {0x03F, 0x00008001}, + {0x033, 0x0000007D}, + {0x03F, 0x00008002}, + {0x033, 0x0000007E}, + {0x03F, 0x00000003}, + {0x033, 0x0000007F}, + {0x03F, 0x00000003}, + {0x033, 0x000000A0}, + {0x03F, 0x00050002}, + {0x033, 0x000000A1}, + {0x03F, 0x00060032}, + {0x033, 0x000000A2}, + {0x03F, 0x00050042}, + {0x033, 0x000000A3}, + {0x03F, 0x00040042}, + {0x033, 0x000000A4}, + {0x03F, 0x00008001}, + {0x033, 0x000000A5}, + {0x03F, 0x00008002}, + {0x033, 0x000000A6}, + {0x03F, 0x00000003}, + {0x033, 0x000000A7}, + {0x03F, 0x00000003}, + {0x033, 0x000000A8}, + {0x03F, 0x00050002}, + {0x033, 0x000000A9}, + {0x03F, 0x00060032}, + {0x033, 0x000000AA}, + {0x03F, 0x00050042}, + {0x033, 0x000000AB}, + {0x03F, 0x00040042}, + {0x033, 0x000000AC}, + {0x03F, 0x00008001}, + {0x033, 0x000000AD}, + {0x03F, 0x00008002}, + {0x033, 0x000000AE}, + {0x03F, 0x00000003}, + {0x033, 0x000000AF}, + {0x03F, 0x00000003}, + {0x033, 0x000000B0}, + {0x03F, 0x00050002}, + {0x033, 0x000000B1}, + {0x03F, 0x00060032}, + {0x033, 0x000000B2}, + {0x03F, 0x00050042}, + {0x033, 0x000000B3}, + {0x03F, 0x00040042}, + {0x033, 0x000000B4}, + {0x03F, 0x00008001}, + {0x033, 0x000000B5}, + {0x03F, 0x00008002}, + {0x033, 0x000000B6}, + {0x03F, 0x00000003}, + {0x033, 0x000000B7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E0}, + {0x03F, 0x00050002}, + {0x033, 0x000000E1}, + {0x03F, 0x00060032}, + {0x033, 0x000000E2}, + {0x03F, 0x00050042}, + {0x033, 0x000000E3}, + {0x03F, 0x00040042}, + {0x033, 0x000000E4}, + {0x03F, 0x00008001}, + {0x033, 0x000000E5}, + {0x03F, 0x00008002}, + {0x033, 0x000000E6}, + {0x03F, 0x00000003}, + {0x033, 0x000000E7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E8}, + {0x03F, 0x00050002}, + {0x033, 0x000000E9}, + {0x03F, 0x00060032}, + {0x033, 0x000000EA}, + {0x03F, 0x00050042}, + {0x033, 0x000000EB}, + {0x03F, 0x00040042}, + {0x033, 0x000000EC}, + {0x03F, 0x00008001}, + {0x033, 0x000000ED}, + {0x03F, 0x00008002}, + {0x033, 0x000000EE}, + {0x03F, 0x00000003}, + {0x033, 0x000000EF}, + {0x03F, 0x00000003}, + {0x033, 0x000000F0}, + {0x03F, 0x00050002}, + {0x033, 0x000000F1}, + {0x03F, 0x00060032}, + {0x033, 0x000000F2}, + {0x03F, 0x00050042}, + {0x033, 0x000000F3}, + {0x03F, 0x00040042}, + {0x033, 0x000000F4}, + {0x03F, 0x00008001}, + {0x033, 0x000000F5}, + {0x03F, 0x00008002}, + {0x033, 0x000000F6}, + {0x03F, 0x00000003}, + {0x033, 0x000000F7}, + {0x03F, 0x00000003}, + {0x033, 0x000000F8}, + {0x03F, 0x00050002}, + {0x033, 0x000000F9}, + {0x03F, 0x00060032}, + {0x033, 0x000000FA}, + {0x03F, 0x00050042}, + {0x033, 0x000000FB}, + {0x03F, 0x00040042}, + {0x033, 0x000000FC}, + {0x03F, 0x00008001}, + {0x033, 0x000000FD}, + {0x03F, 0x00008002}, + {0x033, 0x000000FE}, + {0x03F, 0x00000003}, + {0x033, 0x000000FF}, + {0x03F, 0x00000003}, + {0x033, 0x00000120}, + {0x03F, 0x00050002}, + {0x033, 0x00000121}, + {0x03F, 0x00060032}, + {0x033, 0x00000122}, + {0x03F, 0x00050042}, + {0x033, 0x00000123}, + {0x03F, 0x00040042}, + {0x033, 0x00000124}, + {0x03F, 0x00008001}, + {0x033, 0x00000125}, + {0x03F, 0x00008002}, + {0x033, 0x00000126}, + {0x03F, 0x00000003}, + {0x033, 0x00000127}, + {0x03F, 0x00000003}, + {0x033, 0x00000128}, + {0x03F, 0x00050002}, + {0x033, 0x00000129}, + {0x03F, 0x00060032}, + {0x033, 0x0000012A}, + {0x03F, 0x00050042}, + {0x033, 0x0000012B}, + {0x03F, 0x00040042}, + {0x033, 0x0000012C}, + {0x03F, 0x00008001}, + {0x033, 0x0000012D}, + {0x03F, 0x00008002}, + {0x033, 0x0000012E}, + {0x03F, 0x00000003}, + {0x033, 0x0000012F}, + {0x03F, 0x00000003}, + {0x033, 0x00000130}, + {0x03F, 0x00050002}, + {0x033, 0x00000131}, + {0x03F, 0x00060032}, + {0x033, 0x00000132}, + {0x03F, 0x00050042}, + {0x033, 0x00000133}, + {0x03F, 0x00040042}, + {0x033, 0x00000134}, + {0x03F, 0x00008001}, + {0x033, 0x00000135}, + {0x03F, 0x00008002}, + {0x033, 0x00000136}, + {0x03F, 0x00000003}, + {0x033, 0x00000137}, + {0x03F, 0x00000003}, + {0x033, 0x00000160}, + {0x03F, 0x00050002}, + {0x033, 0x00000161}, + {0x03F, 0x00060032}, + {0x033, 0x00000162}, + {0x03F, 0x00050042}, + {0x033, 0x00000163}, + {0x03F, 0x00040042}, + {0x033, 0x00000164}, + {0x03F, 0x00008001}, + {0x033, 0x00000165}, + {0x03F, 0x00008002}, + {0x033, 0x00000166}, + {0x03F, 0x00000003}, + {0x033, 0x00000167}, + {0x03F, 0x00000003}, + {0x033, 0x00000168}, + {0x03F, 0x00050002}, + {0x033, 0x00000169}, + {0x03F, 0x00060032}, + {0x033, 0x0000016A}, + {0x03F, 0x00050042}, + {0x033, 0x0000016B}, + {0x03F, 0x00040042}, + {0x033, 0x0000016C}, + {0x03F, 0x00008001}, + {0x033, 0x0000016D}, + {0x03F, 0x00008002}, + {0x033, 0x0000016E}, + {0x03F, 0x00000003}, + {0x033, 0x0000016F}, + {0x03F, 0x00000003}, + {0x033, 0x00000170}, + {0x03F, 0x00050002}, + {0x033, 0x00000171}, + {0x03F, 0x00060032}, + {0x033, 0x00000172}, + {0x03F, 0x00050042}, + {0x033, 0x00000173}, + {0x03F, 0x00040042}, + {0x033, 0x00000174}, + {0x03F, 0x00008001}, + {0x033, 0x00000175}, + {0x03F, 0x00008002}, + {0x033, 0x00000176}, + {0x03F, 0x00000003}, + {0x033, 0x00000177}, + {0x03F, 0x00000003}, + {0x033, 0x00000178}, + {0x03F, 0x00050002}, + {0x033, 0x00000179}, + {0x03F, 0x00060032}, + {0x033, 0x0000017A}, + {0x03F, 0x00050042}, + {0x033, 0x0000017B}, + {0x03F, 0x00040042}, + {0x033, 0x0000017C}, + {0x03F, 0x00008001}, + {0x033, 0x0000017D}, + {0x03F, 0x00008002}, + {0x033, 0x0000017E}, + {0x03F, 0x00000003}, + {0x033, 0x0000017F}, + {0x03F, 0x00000003}, + {0x033, 0x000001A0}, + {0x03F, 0x00050002}, + {0x033, 0x000001A1}, + {0x03F, 0x00060032}, + {0x033, 0x000001A2}, + {0x03F, 0x00050042}, + {0x033, 0x000001A3}, + {0x03F, 0x00040042}, + {0x033, 0x000001A4}, + {0x03F, 0x00008001}, + {0x033, 0x000001A5}, + {0x03F, 0x00008002}, + {0x033, 0x000001A6}, + {0x03F, 0x00000003}, + {0x033, 0x000001A7}, + {0x03F, 0x00000003}, + {0x033, 0x000001A8}, + {0x03F, 0x00050002}, + {0x033, 0x000001A9}, + {0x03F, 0x00060032}, + {0x033, 0x000001AA}, + {0x03F, 0x00050042}, + {0x033, 0x000001AB}, + {0x03F, 0x00040042}, + {0x033, 0x000001AC}, + {0x03F, 0x00008001}, + {0x033, 0x000001AD}, + {0x03F, 0x00008002}, + {0x033, 0x000001AE}, + {0x03F, 0x00000003}, + {0x033, 0x000001AF}, + {0x03F, 0x00000003}, + {0x033, 0x000001B0}, + {0x03F, 0x00050002}, + {0x033, 0x000001B1}, + {0x03F, 0x00060032}, + {0x033, 0x000001B2}, + {0x03F, 0x00050042}, + {0x033, 0x000001B3}, + {0x03F, 0x00040042}, + {0x033, 0x000001B4}, + {0x03F, 0x00008001}, + {0x033, 0x000001B5}, + {0x03F, 0x00008002}, + {0x033, 0x000001B6}, + {0x03F, 0x00000003}, + {0x033, 0x000001B7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E0}, + {0x03F, 0x00050002}, + {0x033, 0x000001E1}, + {0x03F, 0x00060032}, + {0x033, 0x000001E2}, + {0x03F, 0x00050042}, + {0x033, 0x000001E3}, + {0x03F, 0x00040042}, + {0x033, 0x000001E4}, + {0x03F, 0x00008001}, + {0x033, 0x000001E5}, + {0x03F, 0x00008002}, + {0x033, 0x000001E6}, + {0x03F, 0x00000003}, + {0x033, 0x000001E7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E8}, + {0x03F, 0x00050002}, + {0x033, 0x000001E9}, + {0x03F, 0x00060032}, + {0x033, 0x000001EA}, + {0x03F, 0x00050042}, + {0x033, 0x000001EB}, + {0x03F, 0x00040042}, + {0x033, 0x000001EC}, + {0x03F, 0x00008001}, + {0x033, 0x000001ED}, + {0x03F, 0x00008002}, + {0x033, 0x000001EE}, + {0x03F, 0x00000003}, + {0x033, 0x000001EF}, + {0x03F, 0x00000003}, + {0x033, 0x000001F0}, + {0x03F, 0x00050002}, + {0x033, 0x000001F1}, + {0x03F, 0x00060032}, + {0x033, 0x000001F2}, + {0x03F, 0x00050042}, + {0x033, 0x000001F3}, + {0x03F, 0x00040042}, + {0x033, 0x000001F4}, + {0x03F, 0x00008001}, + {0x033, 0x000001F5}, + {0x03F, 0x00008002}, + {0x033, 0x000001F6}, + {0x03F, 0x00000003}, + {0x033, 0x000001F7}, + {0x03F, 0x00000003}, + {0x033, 0x000001F8}, + {0x03F, 0x00050002}, + {0x033, 0x000001F9}, + {0x03F, 0x00060032}, + {0x033, 0x000001FA}, + {0x03F, 0x00050042}, + {0x033, 0x000001FB}, + {0x03F, 0x00040042}, + {0x033, 0x000001FC}, + {0x03F, 0x00008001}, + {0x033, 0x000001FD}, + {0x03F, 0x00008002}, + {0x033, 0x000001FE}, + {0x03F, 0x00000003}, + {0x033, 0x000001FF}, + {0x03F, 0x00000003}, + {0x0EF, 0x00000000}, + {0x005, 0x00000001}, + {0x10005, 0x00000001}, + {0x100EE, 0x00000400}, + {0x10030, 0x00000000}, + {0x10030, 0x00001000}, + {0x10030, 0x00002000}, + {0x10030, 0x00003000}, + {0x10030, 0x00004000}, + {0x10030, 0x00005000}, + {0x10030, 0x00006003}, + {0x10030, 0x00007003}, + {0x10030, 0x00008000}, + {0x10030, 0x00009000}, + {0x10030, 0x0000A000}, + {0x10030, 0x0000B000}, + {0x10030, 0x0000C000}, + {0x10030, 0x0000D000}, + {0x10030, 0x0000E003}, + {0x10030, 0x0000F003}, + {0x10030, 0x00010000}, + {0x10030, 0x00011000}, + {0x10030, 0x00012000}, + {0x10030, 0x00013000}, + {0x10030, 0x00014000}, + {0x10030, 0x00015000}, + {0x10030, 0x00016003}, + {0x10030, 0x00017003}, + {0x10030, 0x00018000}, + {0x10030, 0x00019000}, + {0x10030, 0x0001A000}, + {0x10030, 0x0001B000}, + {0x10030, 0x0001C000}, + {0x10030, 0x0001D000}, + {0x10030, 0x0001E003}, + {0x10030, 0x0001F003}, + {0x10030, 0x00020000}, + {0x10030, 0x00021000}, + {0x10030, 0x00022000}, + {0x10030, 0x00023000}, + {0x10030, 0x00024000}, + {0x10030, 0x00025000}, + {0x10030, 0x00026003}, + {0x10030, 0x00027003}, + {0x10030, 0x00028000}, + {0x10030, 0x00029000}, + {0x10030, 0x0002A000}, + {0x10030, 0x0002B000}, + {0x10030, 0x0002C000}, + {0x10030, 0x0002D000}, + {0x10030, 0x0002E003}, + {0x10030, 0x0002F003}, + {0x10030, 0x00030000}, + {0x10030, 0x00031000}, + {0x10030, 0x00032000}, + {0x10030, 0x00033000}, + {0x10030, 0x00034000}, + {0x10030, 0x00035000}, + {0x10030, 0x00036003}, + {0x10030, 0x00037003}, + {0x10030, 0x00038000}, + {0x10030, 0x00039000}, + {0x10030, 0x0003A000}, + {0x10030, 0x0003B000}, + {0x10030, 0x0003C000}, + {0x10030, 0x0003D000}, + {0x10030, 0x0003E003}, + {0x10030, 0x0003F003}, + {0x10030, 0x00060000}, + {0x10030, 0x00061000}, + {0x10030, 0x00062000}, + {0x10030, 0x00063000}, + {0x10030, 0x00064000}, + {0x10030, 0x00065000}, + {0x10030, 0x00066000}, + {0x10030, 0x00067003}, + {0x10030, 0x00068000}, + {0x10030, 0x00069000}, + {0x10030, 0x0006A000}, + {0x10030, 0x0006B000}, + {0x10030, 0x0006C000}, + {0x10030, 0x0006D000}, + {0x10030, 0x0006E000}, + {0x10030, 0x0006F003}, + {0x10030, 0x00070000}, + {0x10030, 0x00071000}, + {0x10030, 0x00072000}, + {0x10030, 0x00073000}, + {0x10030, 0x00074000}, + {0x10030, 0x00075000}, + {0x10030, 0x00076000}, + {0x10030, 0x00077003}, + {0x10030, 0x00078000}, + {0x10030, 0x00079000}, + {0x10030, 0x0007A000}, + {0x10030, 0x0007B000}, + {0x10030, 0x0007C000}, + {0x10030, 0x0007D000}, + {0x10030, 0x0007E000}, + {0x10030, 0x0007F003}, + {0x0ED, 0x00000010}, + {0x033, 0x00000001}, + {0x03F, 0x0000000A}, + {0x033, 0x00000002}, + {0x03F, 0x0000000A}, + {0x033, 0x00000003}, + {0x03F, 0x0000000A}, + {0x033, 0x00000005}, + {0x03F, 0x0000000A}, + {0x033, 0x00000006}, + {0x03F, 0x0000000A}, + {0x033, 0x00000007}, + {0x03F, 0x0000000A}, + {0x0ED, 0x00000000}, + {0x100EE, 0x00000000}, + {0x0FE, 0x00000031}, +}; + +static const struct rtw89_reg2_def rtw89_8852c_phy_nctl_regs[] = { + {0x8008, 0x00000000}, + {0x8000, 0x00000008}, + {0x8004, 0xf0862966}, + {0x800c, 0x78000000}, + {0x8010, 0x88015000}, + {0x8014, 0x80010100}, + {0x8018, 0x10010100}, + {0x801c, 0xa210bc00}, + {0x8020, 0x000403e0}, + {0x8024, 0x00072160}, + {0x8028, 0x00180e00}, + {0x8030, 0x400000c0}, + {0x8034, 0x11000830}, + {0x8038, 0x00000009}, + {0x803c, 0x00000008}, + {0x8040, 0x00000046}, + {0x8044, 0x0010001f}, + {0x8048, 0xf0000003}, + {0x804c, 0x62ac6162}, + {0x8050, 0xf2acf162}, + {0x8054, 0x62ac6162}, + {0x8058, 0xf2acf162}, + {0x805c, 0x150c0b02}, + {0x8060, 0x150c0b02}, + {0x8064, 0x2aa00047}, + {0x8074, 0x80000000}, + {0x807c, 0x000000ee}, + {0x8088, 0x80000000}, + {0x808c, 0x00000000}, + {0x80b0, 0x00000000}, + {0x80d0, 0x00000000}, + {0x80ec, 0x00000002}, + {0x8098, 0x0000ff00}, + {0x8070, 0x00e80000}, + {0x80b0, 0xffe00fff}, + {0x809c, 0x0000001f}, + {0x80b8, 0x00001000}, + {0x80bc, 0x0005001d}, + {0x810c, 0x33112211}, + {0x8110, 0x33112211}, + {0x8114, 0x00000000}, + {0x8120, 0x10010000}, + {0x8124, 0x00000000}, + {0x8128, 0x00000200}, + {0x812c, 0x0000c000}, + {0x8138, 0x40000000}, + {0x813c, 0x40000000}, + {0x8140, 0x00000000}, + {0x8144, 0x0b040b03}, + {0x8148, 0x0a040b04}, + {0x814c, 0x0a040b04}, + {0x8150, 0xe4e40000}, + {0x8158, 0xffffffff}, + {0x815c, 0xffffffff}, + {0x8160, 0xffffffff}, + {0x8164, 0xffffffff}, + {0x8168, 0xffffffff}, + {0x816c, 0x1fffffff}, + {0x81cc, 0x00000000}, + {0x81dc, 0x00000002}, + {0x81e0, 0x00000000}, + {0x81e4, 0x00000001}, + {0x81a0, 0x00000000}, + {0x81ac, 0x3fc20400}, + {0x81b0, 0x3f914100}, + {0x81bc, 0x0000005b}, + {0x81c0, 0x0000005b}, + {0x81b4, 0x01e0f078}, + {0x81b8, 0x01e0f078}, + {0x81f0, 0x0000f078}, + {0x820c, 0x33112211}, + {0x8210, 0x33112211}, + {0x8214, 0x00000000}, + {0x8220, 0x10010000}, + {0x8224, 0x00000000}, + {0x8228, 0x00000200}, + {0x822c, 0x0000d000}, + {0x8238, 0x40000000}, + {0x823c, 0x40000000}, + {0x8240, 0x00000000}, + {0x8244, 0x0b040b03}, + {0x8248, 0x0a040b04}, + {0x824c, 0x0a040b04}, + {0x8250, 0xe4e40000}, + {0x8258, 0xffffffff}, + {0x825c, 0xffffffff}, + {0x8260, 0xffffffff}, + {0x8264, 0xffffffff}, + {0x8268, 0xffffffff}, + {0x826c, 0x1fffffff}, + {0x82cc, 0x00000000}, + {0x82dc, 0x00000002}, + {0x82e0, 0x00100000}, + {0x82e4, 0x00000001}, + {0x82a0, 0x00000000}, + {0x82ac, 0x3fc20400}, + {0x82b0, 0x3f914100}, + {0x82bc, 0x0000005b}, + {0x82c0, 0x0000005b}, + {0x82b4, 0x01e0f078}, + {0x82b8, 0x01e0f078}, + {0x82f0, 0x0000f078}, + {0x81d8, 0x00000001}, + {0x82d8, 0x00000001}, + {0x9500, 0x00000000}, + {0x9504, 0x00000000}, + {0x9508, 0x00000000}, + {0x950c, 0x00000000}, + {0x9510, 0x00000000}, + {0x9514, 0x00000000}, + {0x9518, 0x00000000}, + {0x951c, 0x00000000}, + {0x9520, 0x00000000}, + {0x9524, 0x00000000}, + {0x9528, 0x00000000}, + {0x952c, 0x00000000}, + {0x9530, 0x00000000}, + {0x9534, 0x00000000}, + {0x9538, 0x00000000}, + {0x953c, 0x00000000}, + {0x9540, 0x04000000}, + {0x9544, 0x00000000}, + {0x9548, 0x00000000}, + {0x954c, 0x00000000}, + {0x9550, 0x00000000}, + {0x9554, 0x00000000}, + {0x9558, 0x00000000}, + {0x955c, 0x00000000}, + {0x9560, 0x00000000}, + {0x9564, 0x00000000}, + {0x9568, 0x00000000}, + {0x956c, 0x00000000}, + {0x9570, 0x00000000}, + {0x9574, 0x00000000}, + {0x9578, 0x00000000}, + {0x957c, 0x00000000}, + {0x9580, 0x00000000}, + {0x9584, 0x04000000}, + {0x9588, 0x00000000}, + {0x958c, 0x00000000}, + {0x9590, 0x00000000}, + {0x9594, 0x00000000}, + {0x9598, 0x00000000}, + {0x959c, 0x00000000}, + {0x95a0, 0x00000000}, + {0x95a4, 0x00000000}, + {0x95a8, 0x00000000}, + {0x95ac, 0x00000000}, + {0x95b0, 0x00000000}, + {0x95b4, 0x00000000}, + {0x95b8, 0x00000000}, + {0x95bc, 0x00000000}, + {0x95c0, 0x00000000}, + {0x95c4, 0x00000000}, + {0x95c8, 0x04000000}, + {0x95cc, 0x00000000}, + {0x95d0, 0x00000000}, + {0x95d4, 0x00000000}, + {0x95d8, 0x00000000}, + {0x95dc, 0x00000000}, + {0x95e0, 0x00000000}, + {0x95e4, 0x00000000}, + {0x95e8, 0x00000000}, + {0x95ec, 0x00000000}, + {0x95f0, 0x00000000}, + {0x95f4, 0x00000000}, + {0x95f8, 0x00000000}, + {0x95fc, 0x00000000}, + {0x9600, 0x00000000}, + {0x9604, 0x00000000}, + {0x9608, 0x00000000}, + {0x960c, 0x04000000}, + {0x9610, 0x00000000}, + {0x9614, 0x00000000}, + {0x9618, 0x00000000}, + {0x961c, 0x00000000}, + {0x9620, 0x00000000}, + {0x9624, 0x00000000}, + {0x9628, 0x00000000}, + {0x962c, 0x00000000}, + {0x9630, 0x00000000}, + {0x9634, 0x00000000}, + {0x9638, 0x00000000}, + {0x963c, 0x00000000}, + {0x9640, 0x00000000}, + {0x9644, 0x00000000}, + {0x9648, 0x00000000}, + {0x964c, 0x00000000}, + {0x9650, 0x04000000}, + {0x9654, 0x00000000}, + {0x9658, 0x00000000}, + {0x965c, 0x00000000}, + {0x9660, 0x00000000}, + {0x9664, 0x00000000}, + {0x9668, 0x00000000}, + {0x966c, 0x00000000}, + {0x9670, 0x00000000}, + {0x9674, 0x00000000}, + {0x9678, 0x00000000}, + {0x967c, 0x00000000}, + {0x9680, 0x00000000}, + {0x9684, 0x00000000}, + {0x9688, 0x00000000}, + {0x968c, 0x00000000}, + {0x9690, 0x00000000}, + {0x9694, 0x04000000}, + {0x9698, 0x00000000}, + {0x969c, 0x00000000}, + {0x96a0, 0x00000000}, + {0x96a4, 0x00000000}, + {0x96a8, 0x00000000}, + {0x96ac, 0x00000000}, + {0x96b0, 0x00000000}, + {0x96b4, 0x00000000}, + {0x96b8, 0x00000000}, + {0x96bc, 0x00000000}, + {0x96c0, 0x00000000}, + {0x96c4, 0x00000000}, + {0x96c8, 0x00000000}, + {0x96cc, 0x00000000}, + {0x96d0, 0x00000000}, + {0x96d4, 0x00000000}, + {0x96d8, 0x04000000}, + {0x96dc, 0x00000000}, + {0x96e0, 0x00000000}, + {0x96e4, 0x00000000}, + {0x96e8, 0x00000000}, + {0x96ec, 0x00000000}, + {0x96f0, 0x00000000}, + {0x96f4, 0x00000000}, + {0x96f8, 0x00000000}, + {0x96fc, 0x00000000}, + {0x9700, 0x00000000}, + {0x9704, 0x00000000}, + {0x9708, 0x00000000}, + {0x970c, 0x00000000}, + {0x9710, 0x00000000}, + {0x9714, 0x00000000}, + {0x9718, 0x00000000}, + {0x971c, 0x04000000}, + {0x9720, 0x00000000}, + {0x9724, 0x00000000}, + {0x9728, 0x00000000}, + {0x972c, 0x00000000}, + {0x9730, 0x00000000}, + {0x9734, 0x00000000}, + {0x9738, 0x00000000}, + {0x973c, 0x00000000}, + {0x9740, 0x00000000}, + {0x9744, 0x00000000}, + {0x9748, 0x00000000}, + {0x974c, 0x00000000}, + {0x9750, 0x00000000}, + {0x9754, 0x00000000}, + {0x9758, 0x00000000}, + {0x975c, 0x00000000}, + {0x9760, 0x04000000}, + {0x9764, 0x00000000}, + {0x9768, 0x00000000}, + {0x976c, 0x00000000}, + {0x9770, 0x00000000}, + {0x9774, 0x00000000}, + {0x9778, 0x00000000}, + {0x977c, 0x00000000}, + {0x9780, 0x00000000}, + {0x9784, 0x00000000}, + {0x9788, 0x00000000}, + {0x978c, 0x00000000}, + {0x9790, 0x00000000}, + {0x9794, 0x00000000}, + {0x9798, 0x00000000}, + {0x979c, 0x00000000}, + {0x97a0, 0x00000000}, + {0x97a4, 0x04000000}, + {0x97a8, 0x00000000}, + {0x97ac, 0x00000000}, + {0x97b0, 0x00000000}, + {0x97b4, 0x00000000}, + {0x97b8, 0x00000000}, + {0x97bc, 0x00000000}, + {0x97c0, 0x00000000}, + {0x97c4, 0x00000000}, + {0x97c8, 0x00000000}, + {0x97cc, 0x00000000}, + {0x97d0, 0x00000000}, + {0x97d4, 0x00000000}, + {0x97d8, 0x00000000}, + {0x97dc, 0x00000000}, + {0x97e0, 0x00000000}, + {0x97e4, 0x00000000}, + {0x97e8, 0x04000000}, + {0x97ec, 0x00000000}, + {0x97f0, 0x00000000}, + {0x97f4, 0x00000000}, + {0x97f8, 0x00000000}, + {0x97fc, 0x00000000}, + {0x9800, 0x00000000}, + {0x9804, 0x00000000}, + {0x9808, 0x00000000}, + {0x980c, 0x00000000}, + {0x9810, 0x00000000}, + {0x9814, 0x00000000}, + {0x9818, 0x00000000}, + {0x981c, 0x00000000}, + {0x9820, 0x00000000}, + {0x9824, 0x00000000}, + {0x9828, 0x00000000}, + {0x982c, 0x04000000}, + {0x9830, 0x00000000}, + {0x9834, 0x00000000}, + {0x9838, 0x00000000}, + {0x983c, 0x00000000}, + {0x9840, 0x00000000}, + {0x9844, 0x00000000}, + {0x9848, 0x00000000}, + {0x984c, 0x00000000}, + {0x9850, 0x00000000}, + {0x9854, 0x00000000}, + {0x9858, 0x00000000}, + {0x985c, 0x00000000}, + {0x9860, 0x00000000}, + {0x9864, 0x00000000}, + {0x9868, 0x00000000}, + {0x986c, 0x00000000}, + {0x9870, 0x04000000}, + {0x9874, 0x00000000}, + {0x9878, 0x00000000}, + {0x987c, 0x00000000}, + {0x9880, 0x00000000}, + {0x9884, 0x00000000}, + {0x9888, 0x00000000}, + {0x988c, 0x00000000}, + {0x9890, 0x00000000}, + {0x9894, 0x00000000}, + {0x9898, 0x00000000}, + {0x989c, 0x00000000}, + {0x98a0, 0x00000000}, + {0x98a4, 0x00000000}, + {0x98a8, 0x00000000}, + {0x98ac, 0x00000000}, + {0x98b0, 0x00000000}, + {0x98b4, 0x04000000}, + {0x98b8, 0x00000000}, + {0x98bc, 0x00000000}, + {0x98c0, 0x00000000}, + {0x98c4, 0x00000000}, + {0x98c8, 0x00000000}, + {0x98cc, 0x00000000}, + {0x98d0, 0x00000000}, + {0x98d4, 0x00000000}, + {0x98d8, 0x00000000}, + {0x98dc, 0x00000000}, + {0x98e0, 0x00000000}, + {0x98e4, 0x00000000}, + {0x98e8, 0x00000000}, + {0x98ec, 0x00000000}, + {0x98f0, 0x00000000}, + {0x98f4, 0x00000000}, + {0x98f8, 0x04000000}, + {0x98fc, 0x00000000}, + {0x9900, 0x00000000}, + {0x9904, 0x00000000}, + {0x9908, 0x00000000}, + {0x990c, 0x00000000}, + {0x9910, 0x00000000}, + {0x9914, 0x00000000}, + {0x9918, 0x00000000}, + {0x991c, 0x00000000}, + {0x9920, 0x00000000}, + {0x9924, 0x00000000}, + {0x9928, 0x00000000}, + {0x992c, 0x00000000}, + {0x9930, 0x00000000}, + {0x9934, 0x00000000}, + {0x9938, 0x00000000}, + {0x993c, 0x04000000}, + {0x9940, 0x00000000}, + {0x9944, 0x00000000}, + {0x9948, 0x00000000}, + {0x994c, 0x00000000}, + {0x9950, 0x00000000}, + {0x9954, 0x00000000}, + {0x9958, 0x00000000}, + {0x995c, 0x00000000}, + {0x9960, 0x00000000}, + {0x9964, 0x00000000}, + {0x9968, 0x00000000}, + {0x996c, 0x00000000}, + {0x9970, 0x00000000}, + {0x9974, 0x00000000}, + {0x9978, 0x00000000}, + {0x997c, 0x00000000}, + {0x9980, 0x04000000}, + {0x9984, 0x00000000}, + {0x9988, 0x00000000}, + {0x998c, 0x00000000}, + {0x9990, 0x00000000}, + {0x9994, 0x00000000}, + {0x9998, 0x00000000}, + {0x999c, 0x00000000}, + {0x99a0, 0x00000000}, + {0x99a4, 0x00000000}, + {0x99a8, 0x00000000}, + {0x99ac, 0x00000000}, + {0x99b0, 0x00000000}, + {0x99b4, 0x00000000}, + {0x99b8, 0x00000000}, + {0x99bc, 0x00000000}, + {0x99c0, 0x00000000}, + {0x99c4, 0x04000000}, + {0x99c8, 0x00000000}, + {0x99cc, 0x00000000}, + {0x99d0, 0x00000000}, + {0x99d4, 0x00000000}, + {0x99d8, 0x00000000}, + {0x99dc, 0x00000000}, + {0x99e0, 0x00000000}, + {0x99e4, 0x00000000}, + {0x99e8, 0x00000000}, + {0x99ec, 0x00000000}, + {0x99f0, 0x00000000}, + {0x99f4, 0x00000000}, + {0x99f8, 0x00000000}, + {0x99fc, 0x00000000}, + {0x9a00, 0x00000000}, + {0x9a04, 0x00000000}, + {0x9a08, 0x04000000}, + {0x9a0c, 0x00000000}, + {0x9a10, 0x00000000}, + {0x9a14, 0x00000000}, + {0x9a18, 0x00000000}, + {0x9a1c, 0x00000000}, + {0x9a20, 0x00000000}, + {0x9a24, 0x00000000}, + {0x9a28, 0x00000000}, + {0x9a2c, 0x00000000}, + {0x9a30, 0x00000000}, + {0x9a34, 0x00000000}, + {0x9a38, 0x00000000}, + {0x9a3c, 0x00000000}, + {0x9a40, 0x00000000}, + {0x9a44, 0x00000000}, + {0x9a48, 0x00000000}, + {0x9a4c, 0x04000000}, + {0x9a50, 0x00000000}, + {0x9a54, 0x00000000}, + {0x9a58, 0x00000000}, + {0x9a5c, 0x00000000}, + {0x9a60, 0x00000000}, + {0x9a64, 0x00000000}, + {0x9a68, 0x00000000}, + {0x9a6c, 0x00000000}, + {0x9a70, 0x00000000}, + {0x9a74, 0x00000000}, + {0x9a78, 0x00000000}, + {0x9a7c, 0x00000000}, + {0x9a80, 0x00000000}, + {0x9a84, 0x00000000}, + {0x9a88, 0x00000000}, + {0x9a8c, 0x00000000}, + {0x9a90, 0x04000000}, + {0x9a94, 0x00000000}, + {0x9a98, 0x00000000}, + {0x9a9c, 0x00000000}, + {0x9aa0, 0x00000000}, + {0x9aa4, 0x00000000}, + {0x9aa8, 0x00000000}, + {0x9aac, 0x00000000}, + {0x9ab0, 0x00000000}, + {0x9ab4, 0x00000000}, + {0x9ab8, 0x00000000}, + {0x9abc, 0x00000000}, + {0x9ac0, 0x00000000}, + {0x9ac4, 0x00000000}, + {0x9ac8, 0x00000000}, + {0x9acc, 0x00000000}, + {0x9ad0, 0x00000000}, + {0x9ad4, 0x04000000}, + {0x9ad8, 0x00000000}, + {0x9adc, 0x00000000}, + {0x9ae0, 0x00000000}, + {0x9ae4, 0x00000000}, + {0x9ae8, 0x00000000}, + {0x9aec, 0x00000000}, + {0x9af0, 0x00000000}, + {0x9af4, 0x00000000}, + {0x9af8, 0x00000000}, + {0x9afc, 0x00000000}, + {0x9b00, 0x00000000}, + {0x9b04, 0x00000000}, + {0x9b08, 0x00000000}, + {0x9b0c, 0x00000000}, + {0x9b10, 0x00000000}, + {0x9b14, 0x00000000}, + {0x9b18, 0x04000000}, + {0x9b1c, 0x00000000}, + {0x9b20, 0x00000000}, + {0x9b24, 0x00000000}, + {0x9b28, 0x00000000}, + {0x9b2c, 0x00000000}, + {0x9b30, 0x00000000}, + {0x9b34, 0x00000000}, + {0x9b38, 0x00000000}, + {0x9b3c, 0x00000000}, + {0x9b40, 0x00000000}, + {0x9b44, 0x00000000}, + {0x9b48, 0x00000000}, + {0x9b4c, 0x00000000}, + {0x9b50, 0x00000000}, + {0x9b54, 0x00000000}, + {0x9b58, 0x00000000}, + {0x9b5c, 0x04000000}, + {0x9d00, 0x00000000}, + {0x9d04, 0x00000000}, + {0x9d08, 0x00000000}, + {0x9d0c, 0x00000000}, + {0x9d10, 0x00000000}, + {0x9d14, 0x00000000}, + {0x9d18, 0x00000000}, + {0x9d1c, 0x00000000}, + {0x9d20, 0x00000000}, + {0x9d24, 0x00000000}, + {0x9d28, 0x00000000}, + {0x9d2c, 0x00000000}, + {0x9d30, 0x00000000}, + {0x9d34, 0x00000000}, + {0x9d38, 0x00000000}, + {0x9d3c, 0x00000000}, + {0x9d40, 0x04000000}, + {0x9d44, 0x00000000}, + {0x9d48, 0x00000000}, + {0x9d4c, 0x00000000}, + {0x9d50, 0x00000000}, + {0x9d54, 0x00000000}, + {0x9d58, 0x00000000}, + {0x9d5c, 0x00000000}, + {0x9d60, 0x00000000}, + {0x9d64, 0x00000000}, + {0x9d68, 0x00000000}, + {0x9d6c, 0x00000000}, + {0x9d70, 0x00000000}, + {0x9d74, 0x00000000}, + {0x9d78, 0x00000000}, + {0x9d7c, 0x00000000}, + {0x9d80, 0x00000000}, + {0x9d84, 0x04000000}, + {0x9d88, 0x00000000}, + {0x9d8c, 0x00000000}, + {0x9d90, 0x00000000}, + {0x9d94, 0x00000000}, + {0x9d98, 0x00000000}, + {0x9d9c, 0x00000000}, + {0x9da0, 0x00000000}, + {0x9da4, 0x00000000}, + {0x9da8, 0x00000000}, + {0x9dac, 0x00000000}, + {0x9db0, 0x00000000}, + {0x9db4, 0x00000000}, + {0x9db8, 0x00000000}, + {0x9dbc, 0x00000000}, + {0x9dc0, 0x00000000}, + {0x9dc4, 0x00000000}, + {0x9dc8, 0x04000000}, + {0x9dcc, 0x00000000}, + {0x9dd0, 0x00000000}, + {0x9dd4, 0x00000000}, + {0x9dd8, 0x00000000}, + {0x9ddc, 0x00000000}, + {0x9de0, 0x00000000}, + {0x9de4, 0x00000000}, + {0x9de8, 0x00000000}, + {0x9dec, 0x00000000}, + {0x9df0, 0x00000000}, + {0x9df4, 0x00000000}, + {0x9df8, 0x00000000}, + {0x9dfc, 0x00000000}, + {0x9e00, 0x00000000}, + {0x9e04, 0x00000000}, + {0x9e08, 0x00000000}, + {0x9e0c, 0x04000000}, + {0x9e10, 0x00000000}, + {0x9e14, 0x00000000}, + {0x9e18, 0x00000000}, + {0x9e1c, 0x00000000}, + {0x9e20, 0x00000000}, + {0x9e24, 0x00000000}, + {0x9e28, 0x00000000}, + {0x9e2c, 0x00000000}, + {0x9e30, 0x00000000}, + {0x9e34, 0x00000000}, + {0x9e38, 0x00000000}, + {0x9e3c, 0x00000000}, + {0x9e40, 0x00000000}, + {0x9e44, 0x00000000}, + {0x9e48, 0x00000000}, + {0x9e4c, 0x00000000}, + {0x9e50, 0x04000000}, + {0x9e54, 0x00000000}, + {0x9e58, 0x00000000}, + {0x9e5c, 0x00000000}, + {0x9e60, 0x00000000}, + {0x9e64, 0x00000000}, + {0x9e68, 0x00000000}, + {0x9e6c, 0x00000000}, + {0x9e70, 0x00000000}, + {0x9e74, 0x00000000}, + {0x9e78, 0x00000000}, + {0x9e7c, 0x00000000}, + {0x9e80, 0x00000000}, + {0x9e84, 0x00000000}, + {0x9e88, 0x00000000}, + {0x9e8c, 0x00000000}, + {0x9e90, 0x00000000}, + {0x9e94, 0x04000000}, + {0x9e98, 0x00000000}, + {0x9e9c, 0x00000000}, + {0x9ea0, 0x00000000}, + {0x9ea4, 0x00000000}, + {0x9ea8, 0x00000000}, + {0x9eac, 0x00000000}, + {0x9eb0, 0x00000000}, + {0x9eb4, 0x00000000}, + {0x9eb8, 0x00000000}, + {0x9ebc, 0x00000000}, + {0x9ec0, 0x00000000}, + {0x9ec4, 0x00000000}, + {0x9ec8, 0x00000000}, + {0x9ecc, 0x00000000}, + {0x9ed0, 0x00000000}, + {0x9ed4, 0x00000000}, + {0x9ed8, 0x04000000}, + {0x9edc, 0x00000000}, + {0x9ee0, 0x00000000}, + {0x9ee4, 0x00000000}, + {0x9ee8, 0x00000000}, + {0x9eec, 0x00000000}, + {0x9ef0, 0x00000000}, + {0x9ef4, 0x00000000}, + {0x9ef8, 0x00000000}, + {0x9efc, 0x00000000}, + {0x9f00, 0x00000000}, + {0x9f04, 0x00000000}, + {0x9f08, 0x00000000}, + {0x9f0c, 0x00000000}, + {0x9f10, 0x00000000}, + {0x9f14, 0x00000000}, + {0x9f18, 0x00000000}, + {0x9f1c, 0x04000000}, + {0x9f20, 0x00000000}, + {0x9f24, 0x00000000}, + {0x9f28, 0x00000000}, + {0x9f2c, 0x00000000}, + {0x9f30, 0x00000000}, + {0x9f34, 0x00000000}, + {0x9f38, 0x00000000}, + {0x9f3c, 0x00000000}, + {0x9f40, 0x00000000}, + {0x9f44, 0x00000000}, + {0x9f48, 0x00000000}, + {0x9f4c, 0x00000000}, + {0x9f50, 0x00000000}, + {0x9f54, 0x00000000}, + {0x9f58, 0x00000000}, + {0x9f5c, 0x00000000}, + {0x9f60, 0x04000000}, + {0x9f64, 0x00000000}, + {0x9f68, 0x00000000}, + {0x9f6c, 0x00000000}, + {0x9f70, 0x00000000}, + {0x9f74, 0x00000000}, + {0x9f78, 0x00000000}, + {0x9f7c, 0x00000000}, + {0x9f80, 0x00000000}, + {0x9f84, 0x00000000}, + {0x9f88, 0x00000000}, + {0x9f8c, 0x00000000}, + {0x9f90, 0x00000000}, + {0x9f94, 0x00000000}, + {0x9f98, 0x00000000}, + {0x9f9c, 0x00000000}, + {0x9fa0, 0x00000000}, + {0x9fa4, 0x04000000}, + {0x9fa8, 0x00000000}, + {0x9fac, 0x00000000}, + {0x9fb0, 0x00000000}, + {0x9fb4, 0x00000000}, + {0x9fb8, 0x00000000}, + {0x9fbc, 0x00000000}, + {0x9fc0, 0x00000000}, + {0x9fc4, 0x00000000}, + {0x9fc8, 0x00000000}, + {0x9fcc, 0x00000000}, + {0x9fd0, 0x00000000}, + {0x9fd4, 0x00000000}, + {0x9fd8, 0x00000000}, + {0x9fdc, 0x00000000}, + {0x9fe0, 0x00000000}, + {0x9fe4, 0x00000000}, + {0x9fe8, 0x04000000}, + {0x9fec, 0x00000000}, + {0x9ff0, 0x00000000}, + {0x9ff4, 0x00000000}, + {0x9ff8, 0x00000000}, + {0x9ffc, 0x00000000}, + {0xa000, 0x00000000}, + {0xa004, 0x00000000}, + {0xa008, 0x00000000}, + {0xa00c, 0x00000000}, + {0xa010, 0x00000000}, + {0xa014, 0x00000000}, + {0xa018, 0x00000000}, + {0xa01c, 0x00000000}, + {0xa020, 0x00000000}, + {0xa024, 0x00000000}, + {0xa028, 0x00000000}, + {0xa02c, 0x04000000}, + {0xa030, 0x00000000}, + {0xa034, 0x00000000}, + {0xa038, 0x00000000}, + {0xa03c, 0x00000000}, + {0xa040, 0x00000000}, + {0xa044, 0x00000000}, + {0xa048, 0x00000000}, + {0xa04c, 0x00000000}, + {0xa050, 0x00000000}, + {0xa054, 0x00000000}, + {0xa058, 0x00000000}, + {0xa05c, 0x00000000}, + {0xa060, 0x00000000}, + {0xa064, 0x00000000}, + {0xa068, 0x00000000}, + {0xa06c, 0x00000000}, + {0xa070, 0x04000000}, + {0xa074, 0x00000000}, + {0xa078, 0x00000000}, + {0xa07c, 0x00000000}, + {0xa080, 0x00000000}, + {0xa084, 0x00000000}, + {0xa088, 0x00000000}, + {0xa08c, 0x00000000}, + {0xa090, 0x00000000}, + {0xa094, 0x00000000}, + {0xa098, 0x00000000}, + {0xa09c, 0x00000000}, + {0xa0a0, 0x00000000}, + {0xa0a4, 0x00000000}, + {0xa0a8, 0x00000000}, + {0xa0ac, 0x00000000}, + {0xa0b0, 0x00000000}, + {0xa0b4, 0x04000000}, + {0xa0b8, 0x00000000}, + {0xa0bc, 0x00000000}, + {0xa0c0, 0x00000000}, + {0xa0c4, 0x00000000}, + {0xa0c8, 0x00000000}, + {0xa0cc, 0x00000000}, + {0xa0d0, 0x00000000}, + {0xa0d4, 0x00000000}, + {0xa0d8, 0x00000000}, + {0xa0dc, 0x00000000}, + {0xa0e0, 0x00000000}, + {0xa0e4, 0x00000000}, + {0xa0e8, 0x00000000}, + {0xa0ec, 0x00000000}, + {0xa0f0, 0x00000000}, + {0xa0f4, 0x00000000}, + {0xa0f8, 0x04000000}, + {0xa0fc, 0x00000000}, + {0xa100, 0x00000000}, + {0xa104, 0x00000000}, + {0xa108, 0x00000000}, + {0xa10c, 0x00000000}, + {0xa110, 0x00000000}, + {0xa114, 0x00000000}, + {0xa118, 0x00000000}, + {0xa11c, 0x00000000}, + {0xa120, 0x00000000}, + {0xa124, 0x00000000}, + {0xa128, 0x00000000}, + {0xa12c, 0x00000000}, + {0xa130, 0x00000000}, + {0xa134, 0x00000000}, + {0xa138, 0x00000000}, + {0xa13c, 0x04000000}, + {0xa140, 0x00000000}, + {0xa144, 0x00000000}, + {0xa148, 0x00000000}, + {0xa14c, 0x00000000}, + {0xa150, 0x00000000}, + {0xa154, 0x00000000}, + {0xa158, 0x00000000}, + {0xa15c, 0x00000000}, + {0xa160, 0x00000000}, + {0xa164, 0x00000000}, + {0xa168, 0x00000000}, + {0xa16c, 0x00000000}, + {0xa170, 0x00000000}, + {0xa174, 0x00000000}, + {0xa178, 0x00000000}, + {0xa17c, 0x00000000}, + {0xa180, 0x04000000}, + {0xa184, 0x00000000}, + {0xa188, 0x00000000}, + {0xa18c, 0x00000000}, + {0xa190, 0x00000000}, + {0xa194, 0x00000000}, + {0xa198, 0x00000000}, + {0xa19c, 0x00000000}, + {0xa1a0, 0x00000000}, + {0xa1a4, 0x00000000}, + {0xa1a8, 0x00000000}, + {0xa1ac, 0x00000000}, + {0xa1b0, 0x00000000}, + {0xa1b4, 0x00000000}, + {0xa1b8, 0x00000000}, + {0xa1bc, 0x00000000}, + {0xa1c0, 0x00000000}, + {0xa1c4, 0x04000000}, + {0xa1c8, 0x00000000}, + {0xa1cc, 0x00000000}, + {0xa1d0, 0x00000000}, + {0xa1d4, 0x00000000}, + {0xa1d8, 0x00000000}, + {0xa1dc, 0x00000000}, + {0xa1e0, 0x00000000}, + {0xa1e4, 0x00000000}, + {0xa1e8, 0x00000000}, + {0xa1ec, 0x00000000}, + {0xa1f0, 0x00000000}, + {0xa1f4, 0x00000000}, + {0xa1f8, 0x00000000}, + {0xa1fc, 0x00000000}, + {0xa200, 0x00000000}, + {0xa204, 0x00000000}, + {0xa208, 0x04000000}, + {0xa20c, 0x00000000}, + {0xa210, 0x00000000}, + {0xa214, 0x00000000}, + {0xa218, 0x00000000}, + {0xa21c, 0x00000000}, + {0xa220, 0x00000000}, + {0xa224, 0x00000000}, + {0xa228, 0x00000000}, + {0xa22c, 0x00000000}, + {0xa230, 0x00000000}, + {0xa234, 0x00000000}, + {0xa238, 0x00000000}, + {0xa23c, 0x00000000}, + {0xa240, 0x00000000}, + {0xa244, 0x00000000}, + {0xa248, 0x00000000}, + {0xa24c, 0x04000000}, + {0xa250, 0x00000000}, + {0xa254, 0x00000000}, + {0xa258, 0x00000000}, + {0xa25c, 0x00000000}, + {0xa260, 0x00000000}, + {0xa264, 0x00000000}, + {0xa268, 0x00000000}, + {0xa26c, 0x00000000}, + {0xa270, 0x00000000}, + {0xa274, 0x00000000}, + {0xa278, 0x00000000}, + {0xa27c, 0x00000000}, + {0xa280, 0x00000000}, + {0xa284, 0x00000000}, + {0xa288, 0x00000000}, + {0xa28c, 0x00000000}, + {0xa290, 0x04000000}, + {0xa294, 0x00000000}, + {0xa298, 0x00000000}, + {0xa29c, 0x00000000}, + {0xa2a0, 0x00000000}, + {0xa2a4, 0x00000000}, + {0xa2a8, 0x00000000}, + {0xa2ac, 0x00000000}, + {0xa2b0, 0x00000000}, + {0xa2b4, 0x00000000}, + {0xa2b8, 0x00000000}, + {0xa2bc, 0x00000000}, + {0xa2c0, 0x00000000}, + {0xa2c4, 0x00000000}, + {0xa2c8, 0x00000000}, + {0xa2cc, 0x00000000}, + {0xa2d0, 0x00000000}, + {0xa2d4, 0x04000000}, + {0xa2d8, 0x00000000}, + {0xa2dc, 0x00000000}, + {0xa2e0, 0x00000000}, + {0xa2e4, 0x00000000}, + {0xa2e8, 0x00000000}, + {0xa2ec, 0x00000000}, + {0xa2f0, 0x00000000}, + {0xa2f4, 0x00000000}, + {0xa2f8, 0x00000000}, + {0xa2fc, 0x00000000}, + {0xa300, 0x00000000}, + {0xa304, 0x00000000}, + {0xa308, 0x00000000}, + {0xa30c, 0x00000000}, + {0xa310, 0x00000000}, + {0xa314, 0x00000000}, + {0xa318, 0x04000000}, + {0xa31c, 0x00000000}, + {0xa320, 0x00000000}, + {0xa324, 0x00000000}, + {0xa328, 0x00000000}, + {0xa32c, 0x00000000}, + {0xa330, 0x00000000}, + {0xa334, 0x00000000}, + {0xa338, 0x00000000}, + {0xa33c, 0x00000000}, + {0xa340, 0x00000000}, + {0xa344, 0x00000000}, + {0xa348, 0x00000000}, + {0xa34c, 0x00000000}, + {0xa350, 0x00000000}, + {0xa354, 0x00000000}, + {0xa358, 0x00000000}, + {0xa35c, 0x04000000}, + {0x81d8, 0x00000000}, + {0x82d8, 0x00000000}, + {0xb104, 0x2b251f19}, + {0xb108, 0x433d3731}, + {0xb10c, 0x5b554f49}, + {0xb110, 0x736d6761}, + {0xb114, 0x7f7f7f79}, + {0xb118, 0x120f7f7f}, + {0xb11c, 0x1e1b1815}, + {0xb120, 0x2a272421}, + {0xb124, 0x3633302d}, + {0xb128, 0x3f3f3c39}, + {0xb12c, 0x3f3f3f3f}, + {0x8088, 0x00000110}, + {0x8000, 0x00000008}, + {0x8080, 0x00000005}, + {0x8500, 0x80000008}, + {0x8504, 0x43000004}, + {0x8508, 0x4b044a00}, + {0x850c, 0x40098604}, + {0x8510, 0x0004e024}, + {0x8514, 0x87044b05}, + {0x8518, 0xe024400b}, + {0x851c, 0x4b000004}, + {0x8520, 0x21e07410}, + {0x8524, 0x16580000}, + {0x8528, 0x00047430}, + {0x852c, 0x00074380}, + {0x8530, 0x00044c00}, + {0x8534, 0x00074300}, + {0x8538, 0x00045603}, + {0x853c, 0x42fe5700}, + {0x8540, 0x42004000}, + {0x8544, 0x30005055}, + {0x8548, 0xa512b41c}, + {0x854c, 0xf02fe66f}, + {0x8550, 0xf22ff12f}, + {0x8554, 0xf42ff32f}, + {0x8558, 0xf62ff52f}, + {0x855c, 0xf82ff72f}, + {0x8560, 0xfa2ff92f}, + {0x8564, 0xfc2ffb2f}, + {0x8568, 0xfe2ffd2f}, + {0x856c, 0xe66fff2f}, + {0x8570, 0xf12ef02e}, + {0x8574, 0xf32ef22e}, + {0x8578, 0xf52ef42e}, + {0x857c, 0xff2ef62e}, + {0x8580, 0xa511000b}, + {0x8584, 0xf12cf02c}, + {0x8588, 0xf32cf22c}, + {0x858c, 0xf52cf42c}, + {0x8590, 0xf72cf62c}, + {0x8594, 0xf92cf82c}, + {0x8598, 0xfb2cfa2c}, + {0x859c, 0xfd2cfc2c}, + {0x85a0, 0xff2cfe2c}, + {0x85a4, 0xf12cf02c}, + {0x85a8, 0x0001f22c}, + {0x85ac, 0x30b330b3}, + {0x85b0, 0x310c3125}, + {0x85b4, 0x31253161}, + {0x85b8, 0x3081316f}, + {0x85bc, 0x317f3172}, + {0x85c0, 0x3192318c}, + {0x85c4, 0x32b832a6}, + {0x85c8, 0x31fd32c2}, + {0x85cc, 0x330732cc}, + {0x85d0, 0x33193343}, + {0x85d4, 0x331d3312}, + {0x85d8, 0x31663316}, + {0x85dc, 0x3365335b}, + {0x85e0, 0x3379336f}, + {0x85e4, 0x338d3383}, + {0x85e8, 0x33a13397}, + {0x85ec, 0x33b833ab}, + {0x85f0, 0x33d733c9}, + {0x85f4, 0x342333db}, + {0x85f8, 0x343c343b}, + {0x85fc, 0x3471346f}, + {0x8600, 0xe493347c}, + {0x8604, 0x20887410}, + {0x8608, 0x140f0200}, + {0x860c, 0x02002098}, + {0x8610, 0x20a8140f}, + {0x8614, 0x140f0200}, + {0x8618, 0xe4df7430}, + {0x861c, 0x74105b10}, + {0x8620, 0x000120a0}, + {0x8624, 0x140f140f}, + {0x8628, 0x56e15507}, + {0x862c, 0xe4c95c06}, + {0x8630, 0x20a87410}, + {0x8634, 0x140f0201}, + {0x8638, 0xe4c95517}, + {0x863c, 0x20a87410}, + {0x8640, 0x140f0200}, + {0x8644, 0x56c15517}, + {0x8648, 0xe4c95c02}, + {0x864c, 0x20a07410}, + {0x8650, 0x140f0000}, + {0x8654, 0x55071407}, + {0x8658, 0xe47ee4c9}, + {0x865c, 0x4686750a}, + {0x8660, 0xe159e4d3}, + {0x8664, 0xe4930001}, + {0x8668, 0x20a87410}, + {0x866c, 0x140f0200}, + {0x8670, 0x02002098}, + {0x8674, 0x2088140f}, + {0x8678, 0x140f0200}, + {0x867c, 0xe4df7430}, + {0x8680, 0x74105b10}, + {0x8684, 0x020120a8}, + {0x8688, 0x2080140f}, + {0x868c, 0x140f0000}, + {0x8690, 0x56615507}, + {0x8694, 0xe4c95c06}, + {0x8698, 0x20887410}, + {0x869c, 0x140f0200}, + {0x86a0, 0xe4c95517}, + {0x86a4, 0x20a87410}, + {0x86a8, 0x140f0200}, + {0x86ac, 0x56415517}, + {0x86b0, 0xe4c95c02}, + {0x86b4, 0x20807410}, + {0x86b8, 0x140f0000}, + {0x86bc, 0x55071407}, + {0x86c0, 0xe47ee4c9}, + {0x86c4, 0x468e7508}, + {0x86c8, 0xe159e4d3}, + {0x86cc, 0x5b10f025}, + {0x86d0, 0x20a87410}, + {0x86d4, 0x140f0201}, + {0x86d8, 0x00002090}, + {0x86dc, 0x5507140f}, + {0x86e0, 0x5c065661}, + {0x86e4, 0x7410e4c9}, + {0x86e8, 0x02002098}, + {0x86ec, 0x5517140f}, + {0x86f0, 0x7410e4c9}, + {0x86f4, 0x020020a8}, + {0x86f8, 0x5517140f}, + {0x86fc, 0x5c025641}, + {0x8700, 0x7410e4c9}, + {0x8704, 0x00002090}, + {0x8708, 0x5507140f}, + {0x870c, 0x7509e4c9}, + {0x8710, 0xe4d34696}, + {0x8714, 0x0001e159}, + {0x8718, 0x74105b10}, + {0x871c, 0x000020a0}, + {0x8720, 0x5507140f}, + {0x8724, 0xe4c95601}, + {0x8728, 0x20a87410}, + {0x872c, 0x140f0200}, + {0x8730, 0xe4c95517}, + {0x8734, 0x750ae47e}, + {0x8738, 0xe4d34686}, + {0x873c, 0x5500e159}, + {0x8740, 0x5501e4c5}, + {0x8744, 0xe4930001}, + {0x8748, 0x5b10e4df}, + {0x874c, 0x20807410}, + {0x8750, 0x140f0000}, + {0x8754, 0x02002098}, + {0x8758, 0xf205140f}, + {0x875c, 0x20a8f504}, + {0x8760, 0x140f0200}, + {0x8764, 0x56015507}, + {0x8768, 0x7410e4c9}, + {0x876c, 0x02002088}, + {0x8770, 0x5517140f}, + {0x8774, 0xe47ee4c9}, + {0x8778, 0x468e7508}, + {0x877c, 0xe159e4d3}, + {0x8780, 0x7410f512}, + {0x8784, 0x00002090}, + {0x8788, 0x5507140f}, + {0x878c, 0x7410e4c9}, + {0x8790, 0x02002098}, + {0x8794, 0x5517140f}, + {0x8798, 0x7509e4c9}, + {0x879c, 0xe4d34696}, + {0x87a0, 0x0001e159}, + {0x87a4, 0x46965b90}, + {0x87a8, 0xe4c55500}, + {0x87ac, 0x5b105501}, + {0x87b0, 0x79000001}, + {0x87b4, 0x57107420}, + {0x87b8, 0x140f5700}, + {0x87bc, 0x74309700}, + {0x87c0, 0xe4930001}, + {0x87c4, 0x0bbde4df}, + {0x87c8, 0x0001e662}, + {0x87cc, 0x5720e493}, + {0x87d0, 0x540054fd}, + {0x87d4, 0x70005700}, + {0x87d8, 0x70c0e4dd}, + {0x87dc, 0xe4a90001}, + {0x87e0, 0x0001e512}, + {0x87e4, 0x31abe493}, + {0x87e8, 0xe6620023}, + {0x87ec, 0x54ed0002}, + {0x87f0, 0x00230baa}, + {0x87f4, 0x0002e662}, + {0x87f8, 0xe486e52e}, + {0x87fc, 0xe4930001}, + {0x8800, 0x002231a1}, + {0x8804, 0x0002e662}, + {0x8808, 0x0baa54ec}, + {0x880c, 0xe6620022}, + {0x8810, 0xe52e0002}, + {0x8814, 0x0001e486}, + {0x8818, 0x0baae493}, + {0x881c, 0xe52e3194}, + {0x8820, 0x0001e486}, + {0x8824, 0x0babe493}, + {0x8828, 0x6d0f6c67}, + {0x882c, 0xe662e4df}, + {0x8830, 0x6c8bfb04}, + {0x8834, 0xe662e4df}, + {0x8838, 0x6c95fa04}, + {0x883c, 0xe662e4df}, + {0x8840, 0x0bacfb06}, + {0x8844, 0x6d0f6cb3}, + {0x8848, 0xe662e4df}, + {0x884c, 0xf904fa05}, + {0x8850, 0xe4df6ccb}, + {0x8854, 0xfb06e662}, + {0x8858, 0x6cdb0bad}, + {0x885c, 0xe4df6d0f}, + {0x8860, 0x6cf5e662}, + {0x8864, 0xe4df6d0f}, + {0x8868, 0x6c0be662}, + {0x886c, 0xe4df6d00}, + {0x8870, 0xfb04e662}, + {0x8874, 0xe4df6c25}, + {0x8878, 0xf8b7e662}, + {0x887c, 0xf904fa05}, + {0x8880, 0xe4df6c35}, + {0x8884, 0xfb04e662}, + {0x8888, 0xe4df6c4d}, + {0x888c, 0xf9bae662}, + {0x8890, 0x6c6bfa04}, + {0x8894, 0xe662e4df}, + {0x8898, 0x6c75fb04}, + {0x889c, 0xe662e4df}, + {0x88a0, 0xe4df6c99}, + {0x88a4, 0xfabce662}, + {0x88a8, 0x57200ba8}, + {0x88ac, 0x540054f0}, + {0x88b0, 0x7c355700}, + {0x88b4, 0x70007d00}, + {0x88b8, 0x6d0e6cc5}, + {0x88bc, 0xe662e4dd}, + {0x88c0, 0xe4dd6cf5}, + {0x88c4, 0x6c29e662}, + {0x88c8, 0xe4dd6d0f}, + {0x88cc, 0x0bb3e662}, + {0x88d0, 0x54ed5720}, + {0x88d4, 0x57005400}, + {0x88d8, 0x7d0f7ccb}, + {0x88dc, 0x6d006cd7}, + {0x88e0, 0xe662e4dd}, + {0x88e4, 0x6d016c0b}, + {0x88e8, 0xe662e4dd}, + {0x88ec, 0xe4dd6c3b}, + {0x88f0, 0x70c0e662}, + {0x88f4, 0xe486e52e}, + {0x88f8, 0xe4a90001}, + {0x88fc, 0x63424380}, + {0x8900, 0x43006887}, + {0x8904, 0x74100ba6}, + {0x8908, 0x000121e8}, + {0x890c, 0x6ec71658}, + {0x8910, 0xe5126f0e}, + {0x8914, 0x7410e667}, + {0x8918, 0x000321e8}, + {0x891c, 0x6eeb1658}, + {0x8920, 0xe667e512}, + {0x8924, 0x21e87410}, + {0x8928, 0x16580005}, + {0x892c, 0x6f0f6e13}, + {0x8930, 0xe667e512}, + {0x8934, 0x21e87410}, + {0x8938, 0x16580007}, + {0x893c, 0xe5126e3b}, + {0x8940, 0x7410e667}, + {0x8944, 0x000921e8}, + {0x8948, 0x6e671658}, + {0x894c, 0xe5126f0f}, + {0x8950, 0x7410e667}, + {0x8954, 0x000b21e8}, + {0x8958, 0x6e8b1658}, + {0x895c, 0xe667e512}, + {0x8960, 0x21e87410}, + {0x8964, 0x1658000d}, + {0x8968, 0x6f0f6eb3}, + {0x896c, 0xe667e512}, + {0x8970, 0xfe08ff09}, + {0x8974, 0x21e87410}, + {0x8978, 0x1658000e}, + {0x897c, 0xe5126ec7}, + {0x8980, 0x7410e667}, + {0x8984, 0x000f21e8}, + {0x8988, 0x6edb1658}, + {0x898c, 0xe5126f0f}, + {0x8990, 0x7410e667}, + {0x8994, 0x001021e8}, + {0x8998, 0x6eef1658}, + {0x899c, 0xe667e512}, + {0x89a0, 0xfe02ff03}, + {0x89a4, 0x7410e667}, + {0x89a8, 0x001321e8}, + {0x89ac, 0x6e111658}, + {0x89b0, 0xe5126f00}, + {0x89b4, 0xff03e667}, + {0x89b8, 0xe667fe02}, + {0x89bc, 0x21e87410}, + {0x89c0, 0x16580014}, + {0x89c4, 0xe5126e25}, + {0x89c8, 0xfc48e667}, + {0x89cc, 0xfe08ff09}, + {0x89d0, 0x21e87410}, + {0x89d4, 0x16580015}, + {0x89d8, 0xe5126e39}, + {0x89dc, 0x7410e667}, + {0x89e0, 0x001621e8}, + {0x89e4, 0x6e4d1658}, + {0x89e8, 0xe667e512}, + {0x89ec, 0x7410fd49}, + {0x89f0, 0x001821e8}, + {0x89f4, 0x6e751658}, + {0x89f8, 0xe667e512}, + {0x89fc, 0x21e87410}, + {0x8a00, 0x1658001a}, + {0x8a04, 0xe5126e99}, + {0x8a08, 0xfe44e667}, + {0x8a0c, 0x21e87410}, + {0x8a10, 0x1658001c}, + {0x8a14, 0xe5126ec5}, + {0x8a18, 0x7410e667}, + {0x8a1c, 0x001e21e8}, + {0x8a20, 0x6eed1658}, + {0x8a24, 0xe667e512}, + {0x8a28, 0x21e87410}, + {0x8a2c, 0x16580020}, + {0x8a30, 0x6f016e15}, + {0x8a34, 0xe667e512}, + {0x8a38, 0x21e87410}, + {0x8a3c, 0x16580022}, + {0x8a40, 0xe5126e39}, + {0x8a44, 0xe52ee667}, + {0x8a48, 0x0001e49c}, + {0x8a4c, 0x4380e4a9}, + {0x8a50, 0x68806340}, + {0x8a54, 0x0bac4300}, + {0x8a58, 0x00223241}, + {0x8a5c, 0x0002e667}, + {0x8a60, 0x0baa54ec}, + {0x8a64, 0xe6670022}, + {0x8a68, 0xe52e0002}, + {0x8a6c, 0x0001e49c}, + {0x8a70, 0x4380e4a9}, + {0x8a74, 0x68816340}, + {0x8a78, 0x0baa4300}, + {0x8a7c, 0xe52e3230}, + {0x8a80, 0x0001e49c}, + {0x8a84, 0x4380e4a9}, + {0x8a88, 0x68826341}, + {0x8a8c, 0x0baa4300}, + {0x8a90, 0xe52e3221}, + {0x8a94, 0x0001e49c}, + {0x8a98, 0x42fc0004}, + {0x8a9c, 0x60010007}, + {0x8aa0, 0x42000004}, + {0x8aa4, 0x62200007}, + {0x8aa8, 0x00046200}, + {0x8aac, 0x5b405501}, + {0x8ab0, 0x00076605}, + {0x8ab4, 0x63006200}, + {0x8ab8, 0x0004e54f}, + {0x8abc, 0x0a010900}, + {0x8ac0, 0x0d000b40}, + {0x8ac4, 0x00320e01}, + {0x8ac8, 0x95090004}, + {0x8acc, 0x790442fb}, + {0x8ad0, 0x43804200}, + {0x8ad4, 0x4d010007}, + {0x8ad8, 0x43000004}, + {0x8adc, 0x05620007}, + {0x8ae0, 0x961d05a3}, + {0x8ae4, 0x0004e54f}, + {0x8ae8, 0x0007e4c5}, + {0x8aec, 0x07a306a2}, + {0x8af0, 0x0004e54f}, + {0x8af4, 0xe53fe4c5}, + {0x8af8, 0xe5470002}, + {0x8afc, 0x00074380}, + {0x8b00, 0x00044d00}, + {0x8b04, 0x42fe4300}, + {0x8b08, 0x42007900}, + {0x8b0c, 0x00040001}, + {0x8b10, 0x000742fc}, + {0x8b14, 0x00046003}, + {0x8b18, 0x32d24200}, + {0x8b1c, 0x06a20007}, + {0x8b20, 0x32fc07a3}, + {0x8b24, 0xe32ee320}, + {0x8b28, 0x0001e333}, + {0x8b2c, 0xe333e320}, + {0x8b30, 0xe3270001}, + {0x8b34, 0xe333e32e}, + {0x8b38, 0xe3270001}, + {0x8b3c, 0x0001e333}, + {0x8b40, 0x42fc0004}, + {0x8b44, 0x60030007}, + {0x8b48, 0x42000004}, + {0x8b4c, 0x00040001}, + {0x8b50, 0x000742fc}, + {0x8b54, 0x00046001}, + {0x8b58, 0x00014200}, + {0x8b5c, 0x62200007}, + {0x8b60, 0xe5476200}, + {0x8b64, 0x00070001}, + {0x8b68, 0x00046300}, + {0x8b6c, 0x0a000900}, + {0x8b70, 0x00320e01}, + {0x8b74, 0x06a20007}, + {0x8b78, 0xe559e54f}, + {0x8b7c, 0x42fe0002}, + {0x8b80, 0x42007900}, + {0x8b84, 0x00050001}, + {0x8b88, 0x00077700}, + {0x8b8c, 0x00045200}, + {0x8b90, 0x000742fe}, + {0x8b94, 0x00046000}, + {0x8b98, 0x43804200}, + {0x8b9c, 0x61006000}, + {0x8ba0, 0x63106201}, + {0x8ba4, 0x00056804}, + {0x8ba8, 0x55004100}, + {0x8bac, 0x5c020007}, + {0x8bb0, 0x43000004}, + {0x8bb4, 0x00050001}, + {0x8bb8, 0xe3c96c06}, + {0x8bbc, 0xe3b8e3db}, + {0x8bc0, 0xe423e567}, + {0x8bc4, 0xe43ce56f}, + {0x8bc8, 0xe3b80001}, + {0x8bcc, 0x6c060005}, + {0x8bd0, 0xe5e6e3c9}, + {0x8bd4, 0xe423e567}, + {0x8bd8, 0xe43ce56f}, + {0x8bdc, 0x00050001}, + {0x8be0, 0xe3c96c00}, + {0x8be4, 0xe3b8e3db}, + {0x8be8, 0xe423e582}, + {0x8bec, 0xe43ce58a}, + {0x8bf0, 0xe3b80001}, + {0x8bf4, 0x6c000005}, + {0x8bf8, 0xe5e6e3c9}, + {0x8bfc, 0xe423e582}, + {0x8c00, 0xe43ce58a}, + {0x8c04, 0x00050001}, + {0x8c08, 0xe3c96c04}, + {0x8c0c, 0xe3b8e3db}, + {0x8c10, 0xe423e59d}, + {0x8c14, 0xe43ce5a5}, + {0x8c18, 0xe3b80001}, + {0x8c1c, 0x6c040005}, + {0x8c20, 0xe5e6e3c9}, + {0x8c24, 0xe423e59d}, + {0x8c28, 0xe43ce5a5}, + {0x8c2c, 0x00050001}, + {0x8c30, 0xe3c96c02}, + {0x8c34, 0xe3b8e3db}, + {0x8c38, 0xe423e5b8}, + {0x8c3c, 0xe43ce5c0}, + {0x8c40, 0xe3b80001}, + {0x8c44, 0x6c020005}, + {0x8c48, 0xe5e6e3c9}, + {0x8c4c, 0xe423e5b8}, + {0x8c50, 0xe43ce5c0}, + {0x8c54, 0x00040001}, + {0x8c58, 0x60084380}, + {0x8c5c, 0x6200610a}, + {0x8c60, 0x000663ce}, + {0x8c64, 0x7f006080}, + {0x8c68, 0x43000004}, + {0x8c6c, 0x0001e618}, + {0x8c70, 0x55000007}, + {0x8c74, 0x74200004}, + {0x8c78, 0x77117901}, + {0x8c7c, 0x57005710}, + {0x8c80, 0x7430140f}, + {0x8c84, 0x43800004}, + {0x8c88, 0x72000007}, + {0x8c8c, 0x43000004}, + {0x8c90, 0x00040001}, + {0x8c94, 0x00057420}, + {0x8c98, 0x7e067700}, + {0x8c9c, 0x73807388}, + {0x8ca0, 0x140f8f00}, + {0x8ca4, 0x74300004}, + {0x8ca8, 0x73000005}, + {0x8cac, 0xe5d30001}, + {0x8cb0, 0x73000005}, + {0x8cb4, 0x00040001}, + {0x8cb8, 0xb1034380}, + {0x8cbc, 0x7cdb0006}, + {0x8cc0, 0x00079103}, + {0x8cc4, 0x000440db}, + {0x8cc8, 0xe5d34300}, + {0x8ccc, 0x73800005}, + {0x8cd0, 0x5d010006}, + {0x8cd4, 0x62006002}, + {0x8cd8, 0x0005e5f7}, + {0x8cdc, 0x00077300}, + {0x8ce0, 0x75787608}, + {0x8ce4, 0x43800004}, + {0x8ce8, 0x5e010007}, + {0x8cec, 0x140a5e00}, + {0x8cf0, 0x63800006}, + {0x8cf4, 0x00077f00}, + {0x8cf8, 0x4e204c3f}, + {0x8cfc, 0x73047280}, + {0x8d00, 0x140a7300}, + {0x8d04, 0x00044d20}, + {0x8d08, 0x00064300}, + {0x8d0c, 0x00077402}, + {0x8d10, 0x40004001}, + {0x8d14, 0x0006ab00}, + {0x8d18, 0x00077404}, + {0x8d1c, 0x40004001}, + {0x8d20, 0x140aab00}, + {0x8d24, 0x43800004}, + {0x8d28, 0x52800007}, + {0x8d2c, 0x140a5200}, + {0x8d30, 0x4d004c00}, + {0x8d34, 0x00064e00}, + {0x8d38, 0x63006080}, + {0x8d3c, 0x43000004}, + {0x8d40, 0x76000007}, + {0x8d44, 0x00040001}, + {0x8d48, 0xb1034380}, + {0x8d4c, 0x7cdb0006}, + {0x8d50, 0x00079103}, + {0x8d54, 0x000440db}, + {0x8d58, 0xe5d34300}, + {0x8d5c, 0xe5f77e03}, + {0x8d60, 0x43800004}, + {0x8d64, 0x0006b103}, + {0x8d68, 0x91037c5b}, + {0x8d6c, 0x405b0007}, + {0x8d70, 0x43000004}, + {0x8d74, 0x00010001}, + {0x8d78, 0x43800004}, + {0x8d7c, 0x4e200007}, + {0x8d80, 0x63800006}, + {0x8d84, 0x5f807cdb}, + {0x8d88, 0x43000004}, + {0x8d8c, 0x76080007}, + {0x8d90, 0x00057560}, + {0x8d94, 0x00047380}, + {0x8d98, 0x0005420e}, + {0x8d9c, 0x92006c01}, + {0x8da0, 0x6c001432}, + {0x8da4, 0x42000004}, + {0x8da8, 0x43800004}, + {0x8dac, 0x5f000006}, + {0x8db0, 0x73010007}, + {0x8db4, 0x00047300}, + {0x8db8, 0x0007420f}, + {0x8dbc, 0x52005280}, + {0x8dc0, 0x0004140a}, + {0x8dc4, 0x00064200}, + {0x8dc8, 0x7c5b6300}, + {0x8dcc, 0x4e000007}, + {0x8dd0, 0x43000004}, + {0x8dd4, 0x73000005}, + {0x8dd8, 0x76000007}, + {0x8ddc, 0xe5fb0001}, + {0x8de0, 0x00040001}, + {0x8de4, 0x60004380}, + {0x8de8, 0x62016100}, + {0x8dec, 0x00066310}, + {0x8df0, 0x00046000}, + {0x8df4, 0x00014300}, + {0x8df8, 0x0001e618}, + {0x8dfc, 0x4e004f02}, + {0x8e00, 0x52015302}, + {0x8e04, 0x140f0001}, + {0x8e08, 0x00019700}, + {0x8e0c, 0x8a084380}, + {0x8e10, 0x7800aa09}, + {0x8e14, 0x7a007900}, + {0x8e18, 0x43007b40}, + {0x8e1c, 0x65010001}, + {0x8e20, 0x67013489}, + {0x8e24, 0x43803489}, + {0x8e28, 0xaa058a04}, + {0x8e2c, 0x00014300}, + {0x8e30, 0x34966500}, + {0x8e34, 0x34966700}, + {0x8e38, 0x8a084380}, + {0x8e3c, 0x7c00aa09}, + {0x8e40, 0x7e007d00}, + {0x8e44, 0x43007f40}, + {0x8e48, 0x64010001}, + {0x8e4c, 0x6601349f}, + {0x8e50, 0x4380349f}, + {0x8e54, 0xaa058a04}, + {0x8e58, 0x00014300}, + {0x8e5c, 0x34ac6400}, + {0x8e60, 0x34ac6600}, + {0x8e64, 0x7b484380}, + {0x8e68, 0x79007a90}, + {0x8e6c, 0x43007802}, + {0x8e70, 0x34c95503}, + {0x8e74, 0x7b384380}, + {0x8e78, 0x43007a80}, + {0x8e7c, 0x34c95513}, + {0x8e80, 0x7b404380}, + {0x8e84, 0x43007a00}, + {0x8e88, 0x74015523}, + {0x8e8c, 0x8e007400}, + {0x8e90, 0x00070001}, + {0x8e94, 0x00045230}, + {0x8e98, 0x74307431}, + {0x8e9c, 0x00078e00}, + {0x8ea0, 0x00045220}, + {0x8ea4, 0x57020001}, + {0x8ea8, 0x8e005700}, + {0x8eac, 0x42ef0001}, + {0x8eb0, 0x56005610}, + {0x8eb4, 0x8c004200}, + {0x8eb8, 0x5b500001}, + {0x8ebc, 0x5b2034e0}, + {0x8ec0, 0x4e004f78}, + {0x8ec4, 0x52015388}, + {0x8ec8, 0x4e004f78}, + {0x8ecc, 0x52015388}, + {0x8ed0, 0x5480e4f2}, + {0x8ed4, 0x54815400}, + {0x8ed8, 0x54825400}, + {0x8edc, 0xe4fd5400}, + {0x8ee0, 0x3010bf1d}, + {0x8ee4, 0xe4bae4b2}, + {0x8ee8, 0xe4d3e4c0}, + {0x8eec, 0x5523e65b}, + {0x8ef0, 0x5525e4c9}, + {0x8ef4, 0xe65be4d3}, + {0x8ef8, 0x54bf0001}, + {0x8efc, 0x54a354c0}, + {0x8f00, 0x54a454c1}, + {0x8f04, 0xbf074c18}, + {0x8f08, 0x54a454c2}, + {0x8f0c, 0x54c1bf04}, + {0x8f10, 0xbf0154a3}, + {0x8f14, 0x54dfe66c}, + {0x8f18, 0x54bf0001}, + {0x8f1c, 0x050a54e5}, + {0x8f20, 0x000154df}, + {0x8f24, 0x7b801657}, + {0x8f28, 0x43807430}, + {0x8f2c, 0x7e007f40}, + {0x8f30, 0x7c027d00}, + {0x8f34, 0x5b404300}, + {0x8f38, 0x5c015501}, + {0x8f3c, 0x5480e4d7}, + {0x8f40, 0x54815400}, + {0x8f44, 0x54825400}, + {0x8f48, 0x7b005400}, + {0x8f4c, 0xe4fd7410}, + {0x8f50, 0x3010bfe5}, + {0x8f54, 0x56005610}, + {0x8f58, 0x00018c00}, + {0x8f5c, 0x57005704}, + {0x8f60, 0x57088e00}, + {0x8f64, 0x8e005700}, + {0x8f68, 0x57805781}, + {0x8f6c, 0x43808e00}, + {0x8f70, 0x5c010007}, + {0x8f74, 0x14035c00}, + {0x8f78, 0x43000004}, + {0x8f7c, 0x427f0001}, + {0x8f80, 0x62800007}, + {0x8f84, 0x92006200}, + {0x8f88, 0x42000004}, + {0x8f8c, 0x427f0001}, + {0x8f90, 0x63940007}, + {0x8f94, 0x92006314}, + {0x8f98, 0x42000004}, + {0x8f9c, 0x00040001}, + {0x8fa0, 0x790142fe}, + {0x8fa4, 0x74204200}, + {0x8fa8, 0x5710140f}, + {0x8fac, 0x141f5700}, + {0x8fb0, 0x00040001}, + {0x8fb4, 0x790142fe}, + {0x8fb8, 0x74204200}, + {0x8fbc, 0x42bf140f}, + {0x8fc0, 0x62400007}, + {0x8fc4, 0x141f6200}, + {0x8fc8, 0x42000004}, + {0x8fcc, 0x00060001}, + {0x8fd0, 0x60035d06}, + {0x8fd4, 0x62016104}, + {0x8fd8, 0x73100005}, + {0x8fdc, 0x00040001}, + {0x8fe0, 0x00074380}, + {0x8fe4, 0x5e005e01}, + {0x8fe8, 0xb103140a}, + {0x8fec, 0x7f070006}, + {0x8ff0, 0x00079103}, + {0x8ff4, 0x00064307}, + {0x8ff8, 0x5d025c00}, + {0x8ffc, 0x00045e03}, + {0x9000, 0x00014300}, + {0x9004, 0x5d040006}, + {0x9008, 0x61046000}, + {0x900c, 0x00056201}, + {0x9010, 0x00017310}, + {0x9014, 0x43800004}, + {0x9018, 0x5e010007}, + {0x901c, 0x140a5e00}, + {0x9020, 0x0006b103}, + {0x9024, 0x91037fc6}, + {0x9028, 0x43c60007}, + {0x902c, 0x5c000006}, + {0x9030, 0x5e035d02}, + {0x9034, 0x43000004}, + {0x9038, 0x00060001}, + {0x903c, 0x60005d04}, + {0x9040, 0x62016104}, + {0x9044, 0x73100005}, + {0x9048, 0x00040001}, + {0x904c, 0x00074380}, + {0x9050, 0x5e005e01}, + {0x9054, 0xb103140a}, + {0x9058, 0x7fc60006}, + {0x905c, 0x00079103}, + {0x9060, 0x000643c6}, + {0x9064, 0x5d025c00}, + {0x9068, 0x00045e03}, + {0x906c, 0x00014300}, + {0x9070, 0x5d000006}, + {0x9074, 0x61006002}, + {0x9078, 0x00056201}, + {0x907c, 0x00017300}, + {0x9080, 0x43800004}, + {0x9084, 0x5e010007}, + {0x9088, 0x140a5e00}, + {0x908c, 0x0006b103}, + {0x9090, 0x91037fc0}, + {0x9094, 0x43c00007}, + {0x9098, 0x5c000006}, + {0x909c, 0x5e035d02}, + {0x90a0, 0x43000004}, + {0x90a4, 0x00050001}, + {0x90a8, 0x00047e02}, + {0x90ac, 0x000542f7}, + {0x90b0, 0x00046c08}, + {0x90b4, 0x00054270}, + {0x90b8, 0x73807381}, + {0x90bc, 0x00049300}, + {0x90c0, 0x000542f7}, + {0x90c4, 0x00046c00}, + {0x90c8, 0x00014200}, + {0x90cc, 0x43800004}, + {0x90d0, 0x73040007}, + {0x90d4, 0x14057300}, + {0x90d8, 0x00047240}, + {0x90dc, 0x00064300}, + {0x90e0, 0x00077404}, + {0x90e4, 0x40004001}, + {0x90e8, 0x140fab00}, + {0x90ec, 0xe64f0001}, + {0x90f0, 0xe656e5fb}, + {0x90f4, 0x00040001}, + {0x90f8, 0x00047410}, + {0x90fc, 0x42f04380}, + {0x9100, 0x62080007}, + {0x9104, 0x24206301}, + {0x9108, 0x14c80000}, + {0x910c, 0x00002428}, + {0x9110, 0x1a4215f4}, + {0x9114, 0x6300000b}, + {0x9118, 0x42000004}, + {0x911c, 0x74304300}, + {0x9120, 0x4380140f}, + {0x9124, 0x73080007}, + {0x9128, 0x00047300}, + {0x912c, 0x00014300}, + {0x9130, 0x4bf00007}, + {0x9134, 0x490b4a8f}, + {0x9138, 0x4a8e48f1}, + {0x913c, 0x48a5490a}, + {0x9140, 0x49094a8d}, + {0x9144, 0x4a8c487d}, + {0x9148, 0x48754908}, + {0x914c, 0x49074a8b}, + {0x9150, 0x4a8a4889}, + {0x9154, 0x48b74906}, + {0x9158, 0x49054a89}, + {0x915c, 0x4a8848fc}, + {0x9160, 0x48564905}, + {0x9164, 0x49044a87}, + {0x9168, 0x4a8648c1}, + {0x916c, 0x483d4904}, + {0x9170, 0x49034a85}, + {0x9174, 0x4a8448c7}, + {0x9178, 0x485e4903}, + {0x917c, 0x49024a83}, + {0x9180, 0x4a8248ac}, + {0x9184, 0x48624902}, + {0x9188, 0x49024a81}, + {0x918c, 0x4a804820}, + {0x9190, 0x48004900}, + {0x9194, 0x49014a90}, + {0x9198, 0x4a10481f}, + {0x919c, 0x00060001}, + {0x91a0, 0x5f005f80}, + {0x91a4, 0x00059900}, + {0x91a8, 0x00017300}, + {0x91ac, 0x63800006}, + {0x91b0, 0x98006300}, + {0x91b4, 0x549f0001}, + {0x91b8, 0x5c015400}, + {0x91bc, 0x540054df}, + {0x91c0, 0x00015c02}, + {0x91c4, 0x07145c01}, + {0x91c8, 0x5c025400}, + {0x91cc, 0x5c020001}, + {0x91d0, 0x54000714}, + {0x91d4, 0x00015c01}, + {0x91d8, 0x4c184c98}, + {0x91dc, 0x00080001}, + {0x91e0, 0x5c020004}, + {0x91e4, 0x09017430}, + {0x91e8, 0x0ba60c01}, + {0x91ec, 0x77800005}, + {0x91f0, 0x52200007}, + {0x91f4, 0x43800004}, + {0x91f8, 0x610a6008}, + {0x91fc, 0x63c26200}, + {0x9200, 0x5c000007}, + {0x9204, 0x43000004}, + {0x9208, 0x00000001}, + {0x8080, 0x00000004}, + {0x8080, 0x00000000}, + {0x8088, 0x00000000}, +}; + +static const struct rtw89_txpwr_byrate_cfg rtw89_8852c_txpwr_byrate[] = { + { 0, 0, 0, 0, 4, 0x50505050, }, + { 0, 0, 1, 0, 4, 0x50505050, }, + { 0, 0, 1, 4, 4, 0x484c5050, }, + { 0, 0, 2, 0, 4, 0x50505050, }, + { 0, 0, 2, 4, 4, 0x44484c50, }, + { 0, 0, 2, 8, 4, 0x34383c40, }, + { 0, 0, 3, 0, 4, 0x50505050, }, + { 0, 1, 2, 0, 4, 0x50505050, }, + { 0, 1, 2, 4, 4, 0x44484c50, }, + { 0, 1, 2, 8, 4, 0x34383c40, }, + { 0, 1, 3, 0, 4, 0x50505050, }, + { 0, 0, 4, 1, 4, 0x00000000, }, + { 0, 0, 4, 0, 1, 0x00000000, }, + { 1, 0, 1, 0, 4, 0x48484848, }, + { 1, 0, 1, 4, 4, 0x40444848, }, + { 1, 0, 2, 0, 4, 0x48484848, }, + { 1, 0, 2, 4, 4, 0x3c404448, }, + { 1, 0, 2, 8, 4, 0x2c303438, }, + { 1, 0, 3, 0, 4, 0x48484848, }, + { 1, 1, 2, 0, 4, 0x48484848, }, + { 1, 1, 2, 4, 4, 0x3c404448, }, + { 1, 1, 2, 8, 4, 0x2c303438, }, + { 1, 1, 3, 0, 4, 0x48484848, }, + { 1, 0, 4, 0, 4, 0x00000000, }, + { 2, 0, 1, 0, 4, 0x40404040, }, + { 2, 0, 1, 4, 4, 0x383c4040, }, + { 2, 0, 2, 0, 4, 0x40404040, }, + { 2, 0, 2, 4, 4, 0x34383c40, }, + { 2, 0, 2, 8, 4, 0x24282c30, }, + { 2, 0, 3, 0, 4, 0x40404040, }, + { 2, 1, 2, 0, 4, 0x40404040, }, + { 2, 1, 2, 4, 4, 0x34383c40, }, + { 2, 1, 2, 8, 4, 0x24282c30, }, + { 2, 1, 3, 0, 4, 0x40404040, }, + { 2, 0, 4, 0, 4, 0x00000000, }, +}; + +static const s8 _txpwr_track_delta_swingidx_6gb_n[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10}, + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10}, + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10}, + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10}, +}; + +static const s8 _txpwr_track_delta_swingidx_6gb_p[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11, + 11, 12, 12, 13, 14, 14, 15, 15, 16, 17, 17, 18}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10, + 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10, + 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10, + 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16}, +}; + +static const s8 _txpwr_track_delta_swingidx_6ga_n[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6}, + {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6}, + {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6}, + {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6}, +}; + +static const s8 _txpwr_track_delta_swingidx_6ga_p[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15}, +}; + +static const s8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10}, + {0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, + 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8}, + {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7, + 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12}, +}; + +static const s8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10, + 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 16}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10, + 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 16}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10, + 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 16}, +}; + +static const s8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6}, + {0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3}, + {0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, + 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8}, +}; + +static const s8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14}, +}; + +static const s8 _txpwr_track_delta_swingidx_2gb_n[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 +}; + +static const s8 _txpwr_track_delta_swingidx_2gb_p[] = { + 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2 +}; + +static const s8 _txpwr_track_delta_swingidx_2ga_n[] = { + 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, -2, -2, + -2, -2, -2, -2, -2, -2, -3, -3, -3, -3, -3, -3, -3 +}; + +static const s8 _txpwr_track_delta_swingidx_2ga_p[] = { + 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, + 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5 +}; + +static const s8 _txpwr_track_delta_swingidx_2g_cck_b_n[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 +}; + +static const s8 _txpwr_track_delta_swingidx_2g_cck_b_p[] = { + 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2 +}; + +static const s8 _txpwr_track_delta_swingidx_2g_cck_a_n[] = { + 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, -2, -2, + -2, -2, -2, -2, -2, -2, -3, -3, -3, -3, -3, -3, -3 +}; + +static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = { + 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, + 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5 +}; + +const u8 rtw89_8852c_tx_shape[RTW89_BAND_MAX][RTW89_RS_TX_SHAPE_NUM] + [RTW89_REGD_NUM] = { + [0][0][RTW89_ACMA] = 0, + [0][0][RTW89_ETSI] = 0, + [0][0][RTW89_FCC] = 1, + [0][0][RTW89_IC] = 1, + [0][0][RTW89_MKK] = 0, + [0][1][RTW89_ACMA] = 0, + [0][1][RTW89_ETSI] = 0, + [0][1][RTW89_FCC] = 3, + [0][1][RTW89_IC] = 3, + [0][1][RTW89_MKK] = 0, + [1][1][RTW89_ACMA] = 0, + [1][1][RTW89_ETSI] = 0, + [1][1][RTW89_FCC] = 3, + [1][1][RTW89_IC] = 3, + [1][1][RTW89_MKK] = 0, + [2][1][RTW89_FCC] = 1, +}; + +const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { + [0][0][0][0][RTW89_WW][0] = 60, + [0][0][0][0][RTW89_WW][1] = 60, + [0][0][0][0][RTW89_WW][2] = 60, + [0][0][0][0][RTW89_WW][3] = 60, + [0][0][0][0][RTW89_WW][4] = 60, + [0][0][0][0][RTW89_WW][5] = 60, + [0][0][0][0][RTW89_WW][6] = 60, + [0][0][0][0][RTW89_WW][7] = 60, + [0][0][0][0][RTW89_WW][8] = 60, + [0][0][0][0][RTW89_WW][9] = 60, + [0][0][0][0][RTW89_WW][10] = 60, + [0][0][0][0][RTW89_WW][11] = 60, + [0][0][0][0][RTW89_WW][12] = 48, + [0][0][0][0][RTW89_WW][13] = 72, + [0][1][0][0][RTW89_WW][0] = 48, + [0][1][0][0][RTW89_WW][1] = 48, + [0][1][0][0][RTW89_WW][2] = 48, + [0][1][0][0][RTW89_WW][3] = 48, + [0][1][0][0][RTW89_WW][4] = 48, + [0][1][0][0][RTW89_WW][5] = 48, + [0][1][0][0][RTW89_WW][6] = 48, + [0][1][0][0][RTW89_WW][7] = 48, + [0][1][0][0][RTW89_WW][8] = 48, + [0][1][0][0][RTW89_WW][9] = 48, + [0][1][0][0][RTW89_WW][10] = 48, + [0][1][0][0][RTW89_WW][11] = 46, + [0][1][0][0][RTW89_WW][12] = 34, + [0][1][0][0][RTW89_WW][13] = 60, + [1][0][0][0][RTW89_WW][0] = 0, + [1][0][0][0][RTW89_WW][1] = 0, + [1][0][0][0][RTW89_WW][2] = 42, + [1][0][0][0][RTW89_WW][3] = 42, + [1][0][0][0][RTW89_WW][4] = 42, + [1][0][0][0][RTW89_WW][5] = 58, + [1][0][0][0][RTW89_WW][6] = 42, + [1][0][0][0][RTW89_WW][7] = 42, + [1][0][0][0][RTW89_WW][8] = 42, + [1][0][0][0][RTW89_WW][9] = 34, + [1][0][0][0][RTW89_WW][10] = 22, + [1][0][0][0][RTW89_WW][11] = 0, + [1][0][0][0][RTW89_WW][12] = 0, + [1][0][0][0][RTW89_WW][13] = 0, + [1][1][0][0][RTW89_WW][0] = 0, + [1][1][0][0][RTW89_WW][1] = 0, + [1][1][0][0][RTW89_WW][2] = 38, + [1][1][0][0][RTW89_WW][3] = 38, + [1][1][0][0][RTW89_WW][4] = 38, + [1][1][0][0][RTW89_WW][5] = 48, + [1][1][0][0][RTW89_WW][6] = 26, + [1][1][0][0][RTW89_WW][7] = 26, + [1][1][0][0][RTW89_WW][8] = 26, + [1][1][0][0][RTW89_WW][9] = 22, + [1][1][0][0][RTW89_WW][10] = 22, + [1][1][0][0][RTW89_WW][11] = 0, + [1][1][0][0][RTW89_WW][12] = 0, + [1][1][0][0][RTW89_WW][13] = 0, + [0][0][1][0][RTW89_WW][0] = 60, + [0][0][1][0][RTW89_WW][1] = 60, + [0][0][1][0][RTW89_WW][2] = 60, + [0][0][1][0][RTW89_WW][3] = 60, + [0][0][1][0][RTW89_WW][4] = 60, + [0][0][1][0][RTW89_WW][5] = 60, + [0][0][1][0][RTW89_WW][6] = 60, + [0][0][1][0][RTW89_WW][7] = 60, + [0][0][1][0][RTW89_WW][8] = 60, + [0][0][1][0][RTW89_WW][9] = 60, + [0][0][1][0][RTW89_WW][10] = 60, + [0][0][1][0][RTW89_WW][11] = 46, + [0][0][1][0][RTW89_WW][12] = 42, + [0][0][1][0][RTW89_WW][13] = 0, + [0][1][1][0][RTW89_WW][0] = 48, + [0][1][1][0][RTW89_WW][1] = 48, + [0][1][1][0][RTW89_WW][2] = 48, + [0][1][1][0][RTW89_WW][3] = 48, + [0][1][1][0][RTW89_WW][4] = 48, + [0][1][1][0][RTW89_WW][5] = 48, + [0][1][1][0][RTW89_WW][6] = 48, + [0][1][1][0][RTW89_WW][7] = 48, + [0][1][1][0][RTW89_WW][8] = 48, + [0][1][1][0][RTW89_WW][9] = 48, + [0][1][1][0][RTW89_WW][10] = 48, + [0][1][1][0][RTW89_WW][11] = 38, + [0][1][1][0][RTW89_WW][12] = 34, + [0][1][1][0][RTW89_WW][13] = 0, + [0][0][2][0][RTW89_WW][0] = 60, + [0][0][2][0][RTW89_WW][1] = 60, + [0][0][2][0][RTW89_WW][2] = 60, + [0][0][2][0][RTW89_WW][3] = 60, + [0][0][2][0][RTW89_WW][4] = 60, + [0][0][2][0][RTW89_WW][5] = 60, + [0][0][2][0][RTW89_WW][6] = 60, + [0][0][2][0][RTW89_WW][7] = 60, + [0][0][2][0][RTW89_WW][8] = 60, + [0][0][2][0][RTW89_WW][9] = 60, + [0][0][2][0][RTW89_WW][10] = 60, + [0][0][2][0][RTW89_WW][11] = 46, + [0][0][2][0][RTW89_WW][12] = 42, + [0][0][2][0][RTW89_WW][13] = 0, + [0][1][2][0][RTW89_WW][0] = 48, + [0][1][2][0][RTW89_WW][1] = 48, + [0][1][2][0][RTW89_WW][2] = 48, + [0][1][2][0][RTW89_WW][3] = 48, + [0][1][2][0][RTW89_WW][4] = 48, + [0][1][2][0][RTW89_WW][5] = 48, + [0][1][2][0][RTW89_WW][6] = 48, + [0][1][2][0][RTW89_WW][7] = 48, + [0][1][2][0][RTW89_WW][8] = 48, + [0][1][2][0][RTW89_WW][9] = 48, + [0][1][2][0][RTW89_WW][10] = 48, + [0][1][2][0][RTW89_WW][11] = 38, + [0][1][2][0][RTW89_WW][12] = 34, + [0][1][2][0][RTW89_WW][13] = 0, + [0][1][2][1][RTW89_WW][0] = 36, + [0][1][2][1][RTW89_WW][1] = 36, + [0][1][2][1][RTW89_WW][2] = 36, + [0][1][2][1][RTW89_WW][3] = 36, + [0][1][2][1][RTW89_WW][4] = 36, + [0][1][2][1][RTW89_WW][5] = 36, + [0][1][2][1][RTW89_WW][6] = 36, + [0][1][2][1][RTW89_WW][7] = 36, + [0][1][2][1][RTW89_WW][8] = 36, + [0][1][2][1][RTW89_WW][9] = 36, + [0][1][2][1][RTW89_WW][10] = 36, + [0][1][2][1][RTW89_WW][11] = 36, + [0][1][2][1][RTW89_WW][12] = 34, + [0][1][2][1][RTW89_WW][13] = 0, + [1][0][2][0][RTW89_WW][0] = 0, + [1][0][2][0][RTW89_WW][1] = 0, + [1][0][2][0][RTW89_WW][2] = 60, + [1][0][2][0][RTW89_WW][3] = 60, + [1][0][2][0][RTW89_WW][4] = 60, + [1][0][2][0][RTW89_WW][5] = 60, + [1][0][2][0][RTW89_WW][6] = 60, + [1][0][2][0][RTW89_WW][7] = 60, + [1][0][2][0][RTW89_WW][8] = 60, + [1][0][2][0][RTW89_WW][9] = 60, + [1][0][2][0][RTW89_WW][10] = 58, + [1][0][2][0][RTW89_WW][11] = 0, + [1][0][2][0][RTW89_WW][12] = 0, + [1][0][2][0][RTW89_WW][13] = 0, + [1][1][2][0][RTW89_WW][0] = 0, + [1][1][2][0][RTW89_WW][1] = 0, + [1][1][2][0][RTW89_WW][2] = 46, + [1][1][2][0][RTW89_WW][3] = 46, + [1][1][2][0][RTW89_WW][4] = 48, + [1][1][2][0][RTW89_WW][5] = 48, + [1][1][2][0][RTW89_WW][6] = 48, + [1][1][2][0][RTW89_WW][7] = 46, + [1][1][2][0][RTW89_WW][8] = 46, + [1][1][2][0][RTW89_WW][9] = 34, + [1][1][2][0][RTW89_WW][10] = 30, + [1][1][2][0][RTW89_WW][11] = 0, + [1][1][2][0][RTW89_WW][12] = 0, + [1][1][2][0][RTW89_WW][13] = 0, + [1][1][2][1][RTW89_WW][0] = 0, + [1][1][2][1][RTW89_WW][1] = 0, + [1][1][2][1][RTW89_WW][2] = 36, + [1][1][2][1][RTW89_WW][3] = 36, + [1][1][2][1][RTW89_WW][4] = 36, + [1][1][2][1][RTW89_WW][5] = 36, + [1][1][2][1][RTW89_WW][6] = 36, + [1][1][2][1][RTW89_WW][7] = 36, + [1][1][2][1][RTW89_WW][8] = 36, + [1][1][2][1][RTW89_WW][9] = 34, + [1][1][2][1][RTW89_WW][10] = 30, + [1][1][2][1][RTW89_WW][11] = 0, + [1][1][2][1][RTW89_WW][12] = 0, + [1][1][2][1][RTW89_WW][13] = 0, + [0][0][0][0][RTW89_FCC][0] = 70, + [0][0][0][0][RTW89_ETSI][0] = 60, + [0][0][0][0][RTW89_MKK][0] = 68, + [0][0][0][0][RTW89_IC][0] = 74, + [0][0][0][0][RTW89_ACMA][0] = 60, + [0][0][0][0][RTW89_FCC][1] = 70, + [0][0][0][0][RTW89_ETSI][1] = 60, + [0][0][0][0][RTW89_MKK][1] = 68, + [0][0][0][0][RTW89_IC][1] = 74, + [0][0][0][0][RTW89_ACMA][1] = 60, + [0][0][0][0][RTW89_FCC][2] = 70, + [0][0][0][0][RTW89_ETSI][2] = 60, + [0][0][0][0][RTW89_MKK][2] = 68, + [0][0][0][0][RTW89_IC][2] = 74, + [0][0][0][0][RTW89_ACMA][2] = 60, + [0][0][0][0][RTW89_FCC][3] = 70, + [0][0][0][0][RTW89_ETSI][3] = 60, + [0][0][0][0][RTW89_MKK][3] = 68, + [0][0][0][0][RTW89_IC][3] = 74, + [0][0][0][0][RTW89_ACMA][3] = 60, + [0][0][0][0][RTW89_FCC][4] = 70, + [0][0][0][0][RTW89_ETSI][4] = 60, + [0][0][0][0][RTW89_MKK][4] = 68, + [0][0][0][0][RTW89_IC][4] = 74, + [0][0][0][0][RTW89_ACMA][4] = 60, + [0][0][0][0][RTW89_FCC][5] = 70, + [0][0][0][0][RTW89_ETSI][5] = 60, + [0][0][0][0][RTW89_MKK][5] = 68, + [0][0][0][0][RTW89_IC][5] = 74, + [0][0][0][0][RTW89_ACMA][5] = 60, + [0][0][0][0][RTW89_FCC][6] = 70, + [0][0][0][0][RTW89_ETSI][6] = 60, + [0][0][0][0][RTW89_MKK][6] = 68, + [0][0][0][0][RTW89_IC][6] = 74, + [0][0][0][0][RTW89_ACMA][6] = 60, + [0][0][0][0][RTW89_FCC][7] = 70, + [0][0][0][0][RTW89_ETSI][7] = 60, + [0][0][0][0][RTW89_MKK][7] = 68, + [0][0][0][0][RTW89_IC][7] = 74, + [0][0][0][0][RTW89_ACMA][7] = 60, + [0][0][0][0][RTW89_FCC][8] = 70, + [0][0][0][0][RTW89_ETSI][8] = 60, + [0][0][0][0][RTW89_MKK][8] = 68, + [0][0][0][0][RTW89_IC][8] = 74, + [0][0][0][0][RTW89_ACMA][8] = 60, + [0][0][0][0][RTW89_FCC][9] = 70, + [0][0][0][0][RTW89_ETSI][9] = 60, + [0][0][0][0][RTW89_MKK][9] = 68, + [0][0][0][0][RTW89_IC][9] = 74, + [0][0][0][0][RTW89_ACMA][9] = 60, + [0][0][0][0][RTW89_FCC][10] = 70, + [0][0][0][0][RTW89_ETSI][10] = 60, + [0][0][0][0][RTW89_MKK][10] = 68, + [0][0][0][0][RTW89_IC][10] = 74, + [0][0][0][0][RTW89_ACMA][10] = 60, + [0][0][0][0][RTW89_FCC][11] = 62, + [0][0][0][0][RTW89_ETSI][11] = 60, + [0][0][0][0][RTW89_MKK][11] = 68, + [0][0][0][0][RTW89_IC][11] = 72, + [0][0][0][0][RTW89_ACMA][11] = 60, + [0][0][0][0][RTW89_FCC][12] = 48, + [0][0][0][0][RTW89_ETSI][12] = 60, + [0][0][0][0][RTW89_MKK][12] = 68, + [0][0][0][0][RTW89_IC][12] = 58, + [0][0][0][0][RTW89_ACMA][12] = 60, + [0][0][0][0][RTW89_FCC][13] = 127, + [0][0][0][0][RTW89_ETSI][13] = 127, + [0][0][0][0][RTW89_MKK][13] = 72, + [0][0][0][0][RTW89_IC][13] = 127, + [0][0][0][0][RTW89_ACMA][13] = 127, + [0][1][0][0][RTW89_FCC][0] = 66, + [0][1][0][0][RTW89_ETSI][0] = 48, + [0][1][0][0][RTW89_MKK][0] = 58, + [0][1][0][0][RTW89_IC][0] = 74, + [0][1][0][0][RTW89_ACMA][0] = 48, + [0][1][0][0][RTW89_FCC][1] = 66, + [0][1][0][0][RTW89_ETSI][1] = 48, + [0][1][0][0][RTW89_MKK][1] = 58, + [0][1][0][0][RTW89_IC][1] = 74, + [0][1][0][0][RTW89_ACMA][1] = 48, + [0][1][0][0][RTW89_FCC][2] = 66, + [0][1][0][0][RTW89_ETSI][2] = 48, + [0][1][0][0][RTW89_MKK][2] = 58, + [0][1][0][0][RTW89_IC][2] = 74, + [0][1][0][0][RTW89_ACMA][2] = 48, + [0][1][0][0][RTW89_FCC][3] = 66, + [0][1][0][0][RTW89_ETSI][3] = 48, + [0][1][0][0][RTW89_MKK][3] = 58, + [0][1][0][0][RTW89_IC][3] = 74, + [0][1][0][0][RTW89_ACMA][3] = 48, + [0][1][0][0][RTW89_FCC][4] = 66, + [0][1][0][0][RTW89_ETSI][4] = 48, + [0][1][0][0][RTW89_MKK][4] = 58, + [0][1][0][0][RTW89_IC][4] = 74, + [0][1][0][0][RTW89_ACMA][4] = 48, + [0][1][0][0][RTW89_FCC][5] = 66, + [0][1][0][0][RTW89_ETSI][5] = 48, + [0][1][0][0][RTW89_MKK][5] = 58, + [0][1][0][0][RTW89_IC][5] = 74, + [0][1][0][0][RTW89_ACMA][5] = 48, + [0][1][0][0][RTW89_FCC][6] = 66, + [0][1][0][0][RTW89_ETSI][6] = 48, + [0][1][0][0][RTW89_MKK][6] = 58, + [0][1][0][0][RTW89_IC][6] = 74, + [0][1][0][0][RTW89_ACMA][6] = 48, + [0][1][0][0][RTW89_FCC][7] = 66, + [0][1][0][0][RTW89_ETSI][7] = 48, + [0][1][0][0][RTW89_MKK][7] = 58, + [0][1][0][0][RTW89_IC][7] = 74, + [0][1][0][0][RTW89_ACMA][7] = 48, + [0][1][0][0][RTW89_FCC][8] = 66, + [0][1][0][0][RTW89_ETSI][8] = 48, + [0][1][0][0][RTW89_MKK][8] = 58, + [0][1][0][0][RTW89_IC][8] = 74, + [0][1][0][0][RTW89_ACMA][8] = 48, + [0][1][0][0][RTW89_FCC][9] = 66, + [0][1][0][0][RTW89_ETSI][9] = 48, + [0][1][0][0][RTW89_MKK][9] = 58, + [0][1][0][0][RTW89_IC][9] = 74, + [0][1][0][0][RTW89_ACMA][9] = 48, + [0][1][0][0][RTW89_FCC][10] = 66, + [0][1][0][0][RTW89_ETSI][10] = 48, + [0][1][0][0][RTW89_MKK][10] = 58, + [0][1][0][0][RTW89_IC][10] = 74, + [0][1][0][0][RTW89_ACMA][10] = 48, + [0][1][0][0][RTW89_FCC][11] = 46, + [0][1][0][0][RTW89_ETSI][11] = 48, + [0][1][0][0][RTW89_MKK][11] = 58, + [0][1][0][0][RTW89_IC][11] = 56, + [0][1][0][0][RTW89_ACMA][11] = 48, + [0][1][0][0][RTW89_FCC][12] = 34, + [0][1][0][0][RTW89_ETSI][12] = 48, + [0][1][0][0][RTW89_MKK][12] = 58, + [0][1][0][0][RTW89_IC][12] = 44, + [0][1][0][0][RTW89_ACMA][12] = 48, + [0][1][0][0][RTW89_FCC][13] = 127, + [0][1][0][0][RTW89_ETSI][13] = 127, + [0][1][0][0][RTW89_MKK][13] = 60, + [0][1][0][0][RTW89_IC][13] = 127, + [0][1][0][0][RTW89_ACMA][13] = 127, + [1][0][0][0][RTW89_FCC][0] = 127, + [1][0][0][0][RTW89_ETSI][0] = 127, + [1][0][0][0][RTW89_MKK][0] = 127, + [1][0][0][0][RTW89_IC][0] = 127, + [1][0][0][0][RTW89_ACMA][0] = 127, + [1][0][0][0][RTW89_FCC][1] = 127, + [1][0][0][0][RTW89_ETSI][1] = 127, + [1][0][0][0][RTW89_MKK][1] = 127, + [1][0][0][0][RTW89_IC][1] = 127, + [1][0][0][0][RTW89_ACMA][1] = 127, + [1][0][0][0][RTW89_FCC][2] = 42, + [1][0][0][0][RTW89_ETSI][2] = 60, + [1][0][0][0][RTW89_MKK][2] = 66, + [1][0][0][0][RTW89_IC][2] = 52, + [1][0][0][0][RTW89_ACMA][2] = 60, + [1][0][0][0][RTW89_FCC][3] = 42, + [1][0][0][0][RTW89_ETSI][3] = 60, + [1][0][0][0][RTW89_MKK][3] = 66, + [1][0][0][0][RTW89_IC][3] = 52, + [1][0][0][0][RTW89_ACMA][3] = 60, + [1][0][0][0][RTW89_FCC][4] = 42, + [1][0][0][0][RTW89_ETSI][4] = 60, + [1][0][0][0][RTW89_MKK][4] = 66, + [1][0][0][0][RTW89_IC][4] = 52, + [1][0][0][0][RTW89_ACMA][4] = 60, + [1][0][0][0][RTW89_FCC][5] = 58, + [1][0][0][0][RTW89_ETSI][5] = 60, + [1][0][0][0][RTW89_MKK][5] = 66, + [1][0][0][0][RTW89_IC][5] = 68, + [1][0][0][0][RTW89_ACMA][5] = 60, + [1][0][0][0][RTW89_FCC][6] = 42, + [1][0][0][0][RTW89_ETSI][6] = 60, + [1][0][0][0][RTW89_MKK][6] = 66, + [1][0][0][0][RTW89_IC][6] = 52, + [1][0][0][0][RTW89_ACMA][6] = 60, + [1][0][0][0][RTW89_FCC][7] = 42, + [1][0][0][0][RTW89_ETSI][7] = 60, + [1][0][0][0][RTW89_MKK][7] = 66, + [1][0][0][0][RTW89_IC][7] = 52, + [1][0][0][0][RTW89_ACMA][7] = 60, + [1][0][0][0][RTW89_FCC][8] = 42, + [1][0][0][0][RTW89_ETSI][8] = 60, + [1][0][0][0][RTW89_MKK][8] = 66, + [1][0][0][0][RTW89_IC][8] = 52, + [1][0][0][0][RTW89_ACMA][8] = 60, + [1][0][0][0][RTW89_FCC][9] = 34, + [1][0][0][0][RTW89_ETSI][9] = 60, + [1][0][0][0][RTW89_MKK][9] = 66, + [1][0][0][0][RTW89_IC][9] = 44, + [1][0][0][0][RTW89_ACMA][9] = 60, + [1][0][0][0][RTW89_FCC][10] = 22, + [1][0][0][0][RTW89_ETSI][10] = 60, + [1][0][0][0][RTW89_MKK][10] = 66, + [1][0][0][0][RTW89_IC][10] = 32, + [1][0][0][0][RTW89_ACMA][10] = 60, + [1][0][0][0][RTW89_FCC][11] = 127, + [1][0][0][0][RTW89_ETSI][11] = 127, + [1][0][0][0][RTW89_MKK][11] = 127, + [1][0][0][0][RTW89_IC][11] = 127, + [1][0][0][0][RTW89_ACMA][11] = 127, + [1][0][0][0][RTW89_FCC][12] = 127, + [1][0][0][0][RTW89_ETSI][12] = 127, + [1][0][0][0][RTW89_MKK][12] = 127, + [1][0][0][0][RTW89_IC][12] = 127, + [1][0][0][0][RTW89_ACMA][12] = 127, + [1][0][0][0][RTW89_FCC][13] = 127, + [1][0][0][0][RTW89_ETSI][13] = 127, + [1][0][0][0][RTW89_MKK][13] = 127, + [1][0][0][0][RTW89_IC][13] = 127, + [1][0][0][0][RTW89_ACMA][13] = 127, + [1][1][0][0][RTW89_FCC][0] = 127, + [1][1][0][0][RTW89_ETSI][0] = 127, + [1][1][0][0][RTW89_MKK][0] = 127, + [1][1][0][0][RTW89_IC][0] = 127, + [1][1][0][0][RTW89_ACMA][0] = 127, + [1][1][0][0][RTW89_FCC][1] = 127, + [1][1][0][0][RTW89_ETSI][1] = 127, + [1][1][0][0][RTW89_MKK][1] = 127, + [1][1][0][0][RTW89_IC][1] = 127, + [1][1][0][0][RTW89_ACMA][1] = 127, + [1][1][0][0][RTW89_FCC][2] = 38, + [1][1][0][0][RTW89_ETSI][2] = 48, + [1][1][0][0][RTW89_MKK][2] = 58, + [1][1][0][0][RTW89_IC][2] = 48, + [1][1][0][0][RTW89_ACMA][2] = 48, + [1][1][0][0][RTW89_FCC][3] = 38, + [1][1][0][0][RTW89_ETSI][3] = 48, + [1][1][0][0][RTW89_MKK][3] = 58, + [1][1][0][0][RTW89_IC][3] = 48, + [1][1][0][0][RTW89_ACMA][3] = 48, + [1][1][0][0][RTW89_FCC][4] = 38, + [1][1][0][0][RTW89_ETSI][4] = 48, + [1][1][0][0][RTW89_MKK][4] = 58, + [1][1][0][0][RTW89_IC][4] = 48, + [1][1][0][0][RTW89_ACMA][4] = 48, + [1][1][0][0][RTW89_FCC][5] = 54, + [1][1][0][0][RTW89_ETSI][5] = 48, + [1][1][0][0][RTW89_MKK][5] = 58, + [1][1][0][0][RTW89_IC][5] = 64, + [1][1][0][0][RTW89_ACMA][5] = 48, + [1][1][0][0][RTW89_FCC][6] = 26, + [1][1][0][0][RTW89_ETSI][6] = 48, + [1][1][0][0][RTW89_MKK][6] = 58, + [1][1][0][0][RTW89_IC][6] = 36, + [1][1][0][0][RTW89_ACMA][6] = 48, + [1][1][0][0][RTW89_FCC][7] = 26, + [1][1][0][0][RTW89_ETSI][7] = 48, + [1][1][0][0][RTW89_MKK][7] = 58, + [1][1][0][0][RTW89_IC][7] = 36, + [1][1][0][0][RTW89_ACMA][7] = 48, + [1][1][0][0][RTW89_FCC][8] = 26, + [1][1][0][0][RTW89_ETSI][8] = 48, + [1][1][0][0][RTW89_MKK][8] = 58, + [1][1][0][0][RTW89_IC][8] = 36, + [1][1][0][0][RTW89_ACMA][8] = 48, + [1][1][0][0][RTW89_FCC][9] = 22, + [1][1][0][0][RTW89_ETSI][9] = 48, + [1][1][0][0][RTW89_MKK][9] = 58, + [1][1][0][0][RTW89_IC][9] = 32, + [1][1][0][0][RTW89_ACMA][9] = 48, + [1][1][0][0][RTW89_FCC][10] = 22, + [1][1][0][0][RTW89_ETSI][10] = 48, + [1][1][0][0][RTW89_MKK][10] = 56, + [1][1][0][0][RTW89_IC][10] = 32, + [1][1][0][0][RTW89_ACMA][10] = 48, + [1][1][0][0][RTW89_FCC][11] = 127, + [1][1][0][0][RTW89_ETSI][11] = 127, + [1][1][0][0][RTW89_MKK][11] = 127, + [1][1][0][0][RTW89_IC][11] = 127, + [1][1][0][0][RTW89_ACMA][11] = 127, + [1][1][0][0][RTW89_FCC][12] = 127, + [1][1][0][0][RTW89_ETSI][12] = 127, + [1][1][0][0][RTW89_MKK][12] = 127, + [1][1][0][0][RTW89_IC][12] = 127, + [1][1][0][0][RTW89_ACMA][12] = 127, + [1][1][0][0][RTW89_FCC][13] = 127, + [1][1][0][0][RTW89_ETSI][13] = 127, + [1][1][0][0][RTW89_MKK][13] = 127, + [1][1][0][0][RTW89_IC][13] = 127, + [1][1][0][0][RTW89_ACMA][13] = 127, + [0][0][1][0][RTW89_FCC][0] = 68, + [0][0][1][0][RTW89_ETSI][0] = 60, + [0][0][1][0][RTW89_MKK][0] = 76, + [0][0][1][0][RTW89_IC][0] = 78, + [0][0][1][0][RTW89_ACMA][0] = 60, + [0][0][1][0][RTW89_FCC][1] = 68, + [0][0][1][0][RTW89_ETSI][1] = 60, + [0][0][1][0][RTW89_MKK][1] = 78, + [0][0][1][0][RTW89_IC][1] = 78, + [0][0][1][0][RTW89_ACMA][1] = 60, + [0][0][1][0][RTW89_FCC][2] = 70, + [0][0][1][0][RTW89_ETSI][2] = 60, + [0][0][1][0][RTW89_MKK][2] = 78, + [0][0][1][0][RTW89_IC][2] = 78, + [0][0][1][0][RTW89_ACMA][2] = 60, + [0][0][1][0][RTW89_FCC][3] = 70, + [0][0][1][0][RTW89_ETSI][3] = 60, + [0][0][1][0][RTW89_MKK][3] = 78, + [0][0][1][0][RTW89_IC][3] = 78, + [0][0][1][0][RTW89_ACMA][3] = 60, + [0][0][1][0][RTW89_FCC][4] = 70, + [0][0][1][0][RTW89_ETSI][4] = 60, + [0][0][1][0][RTW89_MKK][4] = 78, + [0][0][1][0][RTW89_IC][4] = 78, + [0][0][1][0][RTW89_ACMA][4] = 60, + [0][0][1][0][RTW89_FCC][5] = 70, + [0][0][1][0][RTW89_ETSI][5] = 60, + [0][0][1][0][RTW89_MKK][5] = 78, + [0][0][1][0][RTW89_IC][5] = 78, + [0][0][1][0][RTW89_ACMA][5] = 60, + [0][0][1][0][RTW89_FCC][6] = 70, + [0][0][1][0][RTW89_ETSI][6] = 60, + [0][0][1][0][RTW89_MKK][6] = 76, + [0][0][1][0][RTW89_IC][6] = 78, + [0][0][1][0][RTW89_ACMA][6] = 60, + [0][0][1][0][RTW89_FCC][7] = 70, + [0][0][1][0][RTW89_ETSI][7] = 60, + [0][0][1][0][RTW89_MKK][7] = 78, + [0][0][1][0][RTW89_IC][7] = 78, + [0][0][1][0][RTW89_ACMA][7] = 60, + [0][0][1][0][RTW89_FCC][8] = 70, + [0][0][1][0][RTW89_ETSI][8] = 60, + [0][0][1][0][RTW89_MKK][8] = 78, + [0][0][1][0][RTW89_IC][8] = 78, + [0][0][1][0][RTW89_ACMA][8] = 60, + [0][0][1][0][RTW89_FCC][9] = 66, + [0][0][1][0][RTW89_ETSI][9] = 60, + [0][0][1][0][RTW89_MKK][9] = 78, + [0][0][1][0][RTW89_IC][9] = 76, + [0][0][1][0][RTW89_ACMA][9] = 60, + [0][0][1][0][RTW89_FCC][10] = 66, + [0][0][1][0][RTW89_ETSI][10] = 60, + [0][0][1][0][RTW89_MKK][10] = 78, + [0][0][1][0][RTW89_IC][10] = 76, + [0][0][1][0][RTW89_ACMA][10] = 60, + [0][0][1][0][RTW89_FCC][11] = 46, + [0][0][1][0][RTW89_ETSI][11] = 60, + [0][0][1][0][RTW89_MKK][11] = 78, + [0][0][1][0][RTW89_IC][11] = 56, + [0][0][1][0][RTW89_ACMA][11] = 60, + [0][0][1][0][RTW89_FCC][12] = 42, + [0][0][1][0][RTW89_ETSI][12] = 60, + [0][0][1][0][RTW89_MKK][12] = 78, + [0][0][1][0][RTW89_IC][12] = 52, + [0][0][1][0][RTW89_ACMA][12] = 60, + [0][0][1][0][RTW89_FCC][13] = 127, + [0][0][1][0][RTW89_ETSI][13] = 127, + [0][0][1][0][RTW89_MKK][13] = 127, + [0][0][1][0][RTW89_IC][13] = 127, + [0][0][1][0][RTW89_ACMA][13] = 127, + [0][1][1][0][RTW89_FCC][0] = 54, + [0][1][1][0][RTW89_ETSI][0] = 48, + [0][1][1][0][RTW89_MKK][0] = 66, + [0][1][1][0][RTW89_IC][0] = 64, + [0][1][1][0][RTW89_ACMA][0] = 48, + [0][1][1][0][RTW89_FCC][1] = 54, + [0][1][1][0][RTW89_ETSI][1] = 48, + [0][1][1][0][RTW89_MKK][1] = 66, + [0][1][1][0][RTW89_IC][1] = 64, + [0][1][1][0][RTW89_ACMA][1] = 48, + [0][1][1][0][RTW89_FCC][2] = 58, + [0][1][1][0][RTW89_ETSI][2] = 48, + [0][1][1][0][RTW89_MKK][2] = 66, + [0][1][1][0][RTW89_IC][2] = 68, + [0][1][1][0][RTW89_ACMA][2] = 48, + [0][1][1][0][RTW89_FCC][3] = 62, + [0][1][1][0][RTW89_ETSI][3] = 48, + [0][1][1][0][RTW89_MKK][3] = 66, + [0][1][1][0][RTW89_IC][3] = 72, + [0][1][1][0][RTW89_ACMA][3] = 48, + [0][1][1][0][RTW89_FCC][4] = 70, + [0][1][1][0][RTW89_ETSI][4] = 48, + [0][1][1][0][RTW89_MKK][4] = 66, + [0][1][1][0][RTW89_IC][4] = 78, + [0][1][1][0][RTW89_ACMA][4] = 48, + [0][1][1][0][RTW89_FCC][5] = 70, + [0][1][1][0][RTW89_ETSI][5] = 48, + [0][1][1][0][RTW89_MKK][5] = 66, + [0][1][1][0][RTW89_IC][5] = 78, + [0][1][1][0][RTW89_ACMA][5] = 48, + [0][1][1][0][RTW89_FCC][6] = 70, + [0][1][1][0][RTW89_ETSI][6] = 48, + [0][1][1][0][RTW89_MKK][6] = 66, + [0][1][1][0][RTW89_IC][6] = 78, + [0][1][1][0][RTW89_ACMA][6] = 48, + [0][1][1][0][RTW89_FCC][7] = 62, + [0][1][1][0][RTW89_ETSI][7] = 48, + [0][1][1][0][RTW89_MKK][7] = 66, + [0][1][1][0][RTW89_IC][7] = 72, + [0][1][1][0][RTW89_ACMA][7] = 48, + [0][1][1][0][RTW89_FCC][8] = 58, + [0][1][1][0][RTW89_ETSI][8] = 48, + [0][1][1][0][RTW89_MKK][8] = 66, + [0][1][1][0][RTW89_IC][8] = 68, + [0][1][1][0][RTW89_ACMA][8] = 48, + [0][1][1][0][RTW89_FCC][9] = 54, + [0][1][1][0][RTW89_ETSI][9] = 48, + [0][1][1][0][RTW89_MKK][9] = 66, + [0][1][1][0][RTW89_IC][9] = 64, + [0][1][1][0][RTW89_ACMA][9] = 48, + [0][1][1][0][RTW89_FCC][10] = 54, + [0][1][1][0][RTW89_ETSI][10] = 48, + [0][1][1][0][RTW89_MKK][10] = 66, + [0][1][1][0][RTW89_IC][10] = 64, + [0][1][1][0][RTW89_ACMA][10] = 48, + [0][1][1][0][RTW89_FCC][11] = 38, + [0][1][1][0][RTW89_ETSI][11] = 48, + [0][1][1][0][RTW89_MKK][11] = 66, + [0][1][1][0][RTW89_IC][11] = 48, + [0][1][1][0][RTW89_ACMA][11] = 48, + [0][1][1][0][RTW89_FCC][12] = 34, + [0][1][1][0][RTW89_ETSI][12] = 48, + [0][1][1][0][RTW89_MKK][12] = 66, + [0][1][1][0][RTW89_IC][12] = 44, + [0][1][1][0][RTW89_ACMA][12] = 48, + [0][1][1][0][RTW89_FCC][13] = 127, + [0][1][1][0][RTW89_ETSI][13] = 127, + [0][1][1][0][RTW89_MKK][13] = 127, + [0][1][1][0][RTW89_IC][13] = 127, + [0][1][1][0][RTW89_ACMA][13] = 127, + [0][0][2][0][RTW89_FCC][0] = 68, + [0][0][2][0][RTW89_ETSI][0] = 60, + [0][0][2][0][RTW89_MKK][0] = 78, + [0][0][2][0][RTW89_IC][0] = 78, + [0][0][2][0][RTW89_ACMA][0] = 60, + [0][0][2][0][RTW89_FCC][1] = 68, + [0][0][2][0][RTW89_ETSI][1] = 60, + [0][0][2][0][RTW89_MKK][1] = 78, + [0][0][2][0][RTW89_IC][1] = 78, + [0][0][2][0][RTW89_ACMA][1] = 60, + [0][0][2][0][RTW89_FCC][2] = 70, + [0][0][2][0][RTW89_ETSI][2] = 60, + [0][0][2][0][RTW89_MKK][2] = 78, + [0][0][2][0][RTW89_IC][2] = 78, + [0][0][2][0][RTW89_ACMA][2] = 60, + [0][0][2][0][RTW89_FCC][3] = 70, + [0][0][2][0][RTW89_ETSI][3] = 60, + [0][0][2][0][RTW89_MKK][3] = 78, + [0][0][2][0][RTW89_IC][3] = 78, + [0][0][2][0][RTW89_ACMA][3] = 60, + [0][0][2][0][RTW89_FCC][4] = 70, + [0][0][2][0][RTW89_ETSI][4] = 60, + [0][0][2][0][RTW89_MKK][4] = 78, + [0][0][2][0][RTW89_IC][4] = 78, + [0][0][2][0][RTW89_ACMA][4] = 60, + [0][0][2][0][RTW89_FCC][5] = 70, + [0][0][2][0][RTW89_ETSI][5] = 60, + [0][0][2][0][RTW89_MKK][5] = 78, + [0][0][2][0][RTW89_IC][5] = 78, + [0][0][2][0][RTW89_ACMA][5] = 60, + [0][0][2][0][RTW89_FCC][6] = 70, + [0][0][2][0][RTW89_ETSI][6] = 60, + [0][0][2][0][RTW89_MKK][6] = 78, + [0][0][2][0][RTW89_IC][6] = 78, + [0][0][2][0][RTW89_ACMA][6] = 60, + [0][0][2][0][RTW89_FCC][7] = 70, + [0][0][2][0][RTW89_ETSI][7] = 60, + [0][0][2][0][RTW89_MKK][7] = 78, + [0][0][2][0][RTW89_IC][7] = 78, + [0][0][2][0][RTW89_ACMA][7] = 60, + [0][0][2][0][RTW89_FCC][8] = 68, + [0][0][2][0][RTW89_ETSI][8] = 60, + [0][0][2][0][RTW89_MKK][8] = 78, + [0][0][2][0][RTW89_IC][8] = 78, + [0][0][2][0][RTW89_ACMA][8] = 60, + [0][0][2][0][RTW89_FCC][9] = 64, + [0][0][2][0][RTW89_ETSI][9] = 60, + [0][0][2][0][RTW89_MKK][9] = 78, + [0][0][2][0][RTW89_IC][9] = 74, + [0][0][2][0][RTW89_ACMA][9] = 60, + [0][0][2][0][RTW89_FCC][10] = 64, + [0][0][2][0][RTW89_ETSI][10] = 60, + [0][0][2][0][RTW89_MKK][10] = 78, + [0][0][2][0][RTW89_IC][10] = 74, + [0][0][2][0][RTW89_ACMA][10] = 60, + [0][0][2][0][RTW89_FCC][11] = 46, + [0][0][2][0][RTW89_ETSI][11] = 60, + [0][0][2][0][RTW89_MKK][11] = 78, + [0][0][2][0][RTW89_IC][11] = 56, + [0][0][2][0][RTW89_ACMA][11] = 60, + [0][0][2][0][RTW89_FCC][12] = 42, + [0][0][2][0][RTW89_ETSI][12] = 60, + [0][0][2][0][RTW89_MKK][12] = 78, + [0][0][2][0][RTW89_IC][12] = 52, + [0][0][2][0][RTW89_ACMA][12] = 60, + [0][0][2][0][RTW89_FCC][13] = 127, + [0][0][2][0][RTW89_ETSI][13] = 127, + [0][0][2][0][RTW89_MKK][13] = 127, + [0][0][2][0][RTW89_IC][13] = 127, + [0][0][2][0][RTW89_ACMA][13] = 127, + [0][1][2][0][RTW89_FCC][0] = 50, + [0][1][2][0][RTW89_ETSI][0] = 48, + [0][1][2][0][RTW89_MKK][0] = 68, + [0][1][2][0][RTW89_IC][0] = 60, + [0][1][2][0][RTW89_ACMA][0] = 48, + [0][1][2][0][RTW89_FCC][1] = 50, + [0][1][2][0][RTW89_ETSI][1] = 48, + [0][1][2][0][RTW89_MKK][1] = 68, + [0][1][2][0][RTW89_IC][1] = 60, + [0][1][2][0][RTW89_ACMA][1] = 48, + [0][1][2][0][RTW89_FCC][2] = 54, + [0][1][2][0][RTW89_ETSI][2] = 48, + [0][1][2][0][RTW89_MKK][2] = 68, + [0][1][2][0][RTW89_IC][2] = 64, + [0][1][2][0][RTW89_ACMA][2] = 48, + [0][1][2][0][RTW89_FCC][3] = 58, + [0][1][2][0][RTW89_ETSI][3] = 48, + [0][1][2][0][RTW89_MKK][3] = 68, + [0][1][2][0][RTW89_IC][3] = 68, + [0][1][2][0][RTW89_ACMA][3] = 48, + [0][1][2][0][RTW89_FCC][4] = 64, + [0][1][2][0][RTW89_ETSI][4] = 48, + [0][1][2][0][RTW89_MKK][4] = 68, + [0][1][2][0][RTW89_IC][4] = 74, + [0][1][2][0][RTW89_ACMA][4] = 48, + [0][1][2][0][RTW89_FCC][5] = 70, + [0][1][2][0][RTW89_ETSI][5] = 48, + [0][1][2][0][RTW89_MKK][5] = 68, + [0][1][2][0][RTW89_IC][5] = 78, + [0][1][2][0][RTW89_ACMA][5] = 48, + [0][1][2][0][RTW89_FCC][6] = 66, + [0][1][2][0][RTW89_ETSI][6] = 48, + [0][1][2][0][RTW89_MKK][6] = 68, + [0][1][2][0][RTW89_IC][6] = 76, + [0][1][2][0][RTW89_ACMA][6] = 48, + [0][1][2][0][RTW89_FCC][7] = 58, + [0][1][2][0][RTW89_ETSI][7] = 48, + [0][1][2][0][RTW89_MKK][7] = 68, + [0][1][2][0][RTW89_IC][7] = 68, + [0][1][2][0][RTW89_ACMA][7] = 48, + [0][1][2][0][RTW89_FCC][8] = 54, + [0][1][2][0][RTW89_ETSI][8] = 48, + [0][1][2][0][RTW89_MKK][8] = 68, + [0][1][2][0][RTW89_IC][8] = 64, + [0][1][2][0][RTW89_ACMA][8] = 48, + [0][1][2][0][RTW89_FCC][9] = 50, + [0][1][2][0][RTW89_ETSI][9] = 48, + [0][1][2][0][RTW89_MKK][9] = 68, + [0][1][2][0][RTW89_IC][9] = 60, + [0][1][2][0][RTW89_ACMA][9] = 48, + [0][1][2][0][RTW89_FCC][10] = 50, + [0][1][2][0][RTW89_ETSI][10] = 48, + [0][1][2][0][RTW89_MKK][10] = 68, + [0][1][2][0][RTW89_IC][10] = 60, + [0][1][2][0][RTW89_ACMA][10] = 48, + [0][1][2][0][RTW89_FCC][11] = 38, + [0][1][2][0][RTW89_ETSI][11] = 48, + [0][1][2][0][RTW89_MKK][11] = 68, + [0][1][2][0][RTW89_IC][11] = 48, + [0][1][2][0][RTW89_ACMA][11] = 48, + [0][1][2][0][RTW89_FCC][12] = 34, + [0][1][2][0][RTW89_ETSI][12] = 48, + [0][1][2][0][RTW89_MKK][12] = 68, + [0][1][2][0][RTW89_IC][12] = 44, + [0][1][2][0][RTW89_ACMA][12] = 48, + [0][1][2][0][RTW89_FCC][13] = 127, + [0][1][2][0][RTW89_ETSI][13] = 127, + [0][1][2][0][RTW89_MKK][13] = 127, + [0][1][2][0][RTW89_IC][13] = 127, + [0][1][2][0][RTW89_ACMA][13] = 127, + [0][1][2][1][RTW89_FCC][0] = 50, + [0][1][2][1][RTW89_ETSI][0] = 36, + [0][1][2][1][RTW89_MKK][0] = 68, + [0][1][2][1][RTW89_IC][0] = 60, + [0][1][2][1][RTW89_ACMA][0] = 36, + [0][1][2][1][RTW89_FCC][1] = 50, + [0][1][2][1][RTW89_ETSI][1] = 36, + [0][1][2][1][RTW89_MKK][1] = 68, + [0][1][2][1][RTW89_IC][1] = 60, + [0][1][2][1][RTW89_ACMA][1] = 36, + [0][1][2][1][RTW89_FCC][2] = 54, + [0][1][2][1][RTW89_ETSI][2] = 36, + [0][1][2][1][RTW89_MKK][2] = 68, + [0][1][2][1][RTW89_IC][2] = 64, + [0][1][2][1][RTW89_ACMA][2] = 36, + [0][1][2][1][RTW89_FCC][3] = 58, + [0][1][2][1][RTW89_ETSI][3] = 36, + [0][1][2][1][RTW89_MKK][3] = 68, + [0][1][2][1][RTW89_IC][3] = 68, + [0][1][2][1][RTW89_ACMA][3] = 36, + [0][1][2][1][RTW89_FCC][4] = 64, + [0][1][2][1][RTW89_ETSI][4] = 36, + [0][1][2][1][RTW89_MKK][4] = 68, + [0][1][2][1][RTW89_IC][4] = 74, + [0][1][2][1][RTW89_ACMA][4] = 36, + [0][1][2][1][RTW89_FCC][5] = 70, + [0][1][2][1][RTW89_ETSI][5] = 36, + [0][1][2][1][RTW89_MKK][5] = 68, + [0][1][2][1][RTW89_IC][5] = 78, + [0][1][2][1][RTW89_ACMA][5] = 36, + [0][1][2][1][RTW89_FCC][6] = 66, + [0][1][2][1][RTW89_ETSI][6] = 36, + [0][1][2][1][RTW89_MKK][6] = 68, + [0][1][2][1][RTW89_IC][6] = 76, + [0][1][2][1][RTW89_ACMA][6] = 36, + [0][1][2][1][RTW89_FCC][7] = 58, + [0][1][2][1][RTW89_ETSI][7] = 36, + [0][1][2][1][RTW89_MKK][7] = 68, + [0][1][2][1][RTW89_IC][7] = 68, + [0][1][2][1][RTW89_ACMA][7] = 36, + [0][1][2][1][RTW89_FCC][8] = 54, + [0][1][2][1][RTW89_ETSI][8] = 36, + [0][1][2][1][RTW89_MKK][8] = 68, + [0][1][2][1][RTW89_IC][8] = 64, + [0][1][2][1][RTW89_ACMA][8] = 36, + [0][1][2][1][RTW89_FCC][9] = 50, + [0][1][2][1][RTW89_ETSI][9] = 36, + [0][1][2][1][RTW89_MKK][9] = 68, + [0][1][2][1][RTW89_IC][9] = 60, + [0][1][2][1][RTW89_ACMA][9] = 36, + [0][1][2][1][RTW89_FCC][10] = 50, + [0][1][2][1][RTW89_ETSI][10] = 36, + [0][1][2][1][RTW89_MKK][10] = 68, + [0][1][2][1][RTW89_IC][10] = 60, + [0][1][2][1][RTW89_ACMA][10] = 36, + [0][1][2][1][RTW89_FCC][11] = 38, + [0][1][2][1][RTW89_ETSI][11] = 36, + [0][1][2][1][RTW89_MKK][11] = 68, + [0][1][2][1][RTW89_IC][11] = 48, + [0][1][2][1][RTW89_ACMA][11] = 36, + [0][1][2][1][RTW89_FCC][12] = 34, + [0][1][2][1][RTW89_ETSI][12] = 36, + [0][1][2][1][RTW89_MKK][12] = 68, + [0][1][2][1][RTW89_IC][12] = 44, + [0][1][2][1][RTW89_ACMA][12] = 36, + [0][1][2][1][RTW89_FCC][13] = 127, + [0][1][2][1][RTW89_ETSI][13] = 127, + [0][1][2][1][RTW89_MKK][13] = 127, + [0][1][2][1][RTW89_IC][13] = 127, + [0][1][2][1][RTW89_ACMA][13] = 127, + [1][0][2][0][RTW89_FCC][0] = 127, + [1][0][2][0][RTW89_ETSI][0] = 127, + [1][0][2][0][RTW89_MKK][0] = 127, + [1][0][2][0][RTW89_IC][0] = 127, + [1][0][2][0][RTW89_ACMA][0] = 127, + [1][0][2][0][RTW89_FCC][1] = 127, + [1][0][2][0][RTW89_ETSI][1] = 127, + [1][0][2][0][RTW89_MKK][1] = 127, + [1][0][2][0][RTW89_IC][1] = 127, + [1][0][2][0][RTW89_ACMA][1] = 127, + [1][0][2][0][RTW89_FCC][2] = 62, + [1][0][2][0][RTW89_ETSI][2] = 60, + [1][0][2][0][RTW89_MKK][2] = 74, + [1][0][2][0][RTW89_IC][2] = 72, + [1][0][2][0][RTW89_ACMA][2] = 60, + [1][0][2][0][RTW89_FCC][3] = 62, + [1][0][2][0][RTW89_ETSI][3] = 60, + [1][0][2][0][RTW89_MKK][3] = 74, + [1][0][2][0][RTW89_IC][3] = 72, + [1][0][2][0][RTW89_ACMA][3] = 60, + [1][0][2][0][RTW89_FCC][4] = 64, + [1][0][2][0][RTW89_ETSI][4] = 60, + [1][0][2][0][RTW89_MKK][4] = 74, + [1][0][2][0][RTW89_IC][4] = 74, + [1][0][2][0][RTW89_ACMA][4] = 60, + [1][0][2][0][RTW89_FCC][5] = 64, + [1][0][2][0][RTW89_ETSI][5] = 60, + [1][0][2][0][RTW89_MKK][5] = 74, + [1][0][2][0][RTW89_IC][5] = 74, + [1][0][2][0][RTW89_ACMA][5] = 60, + [1][0][2][0][RTW89_FCC][6] = 64, + [1][0][2][0][RTW89_ETSI][6] = 60, + [1][0][2][0][RTW89_MKK][6] = 74, + [1][0][2][0][RTW89_IC][6] = 74, + [1][0][2][0][RTW89_ACMA][6] = 60, + [1][0][2][0][RTW89_FCC][7] = 60, + [1][0][2][0][RTW89_ETSI][7] = 60, + [1][0][2][0][RTW89_MKK][7] = 74, + [1][0][2][0][RTW89_IC][7] = 70, + [1][0][2][0][RTW89_ACMA][7] = 60, + [1][0][2][0][RTW89_FCC][8] = 60, + [1][0][2][0][RTW89_ETSI][8] = 60, + [1][0][2][0][RTW89_MKK][8] = 74, + [1][0][2][0][RTW89_IC][8] = 70, + [1][0][2][0][RTW89_ACMA][8] = 60, + [1][0][2][0][RTW89_FCC][9] = 60, + [1][0][2][0][RTW89_ETSI][9] = 60, + [1][0][2][0][RTW89_MKK][9] = 74, + [1][0][2][0][RTW89_IC][9] = 70, + [1][0][2][0][RTW89_ACMA][9] = 60, + [1][0][2][0][RTW89_FCC][10] = 58, + [1][0][2][0][RTW89_ETSI][10] = 60, + [1][0][2][0][RTW89_MKK][10] = 74, + [1][0][2][0][RTW89_IC][10] = 68, + [1][0][2][0][RTW89_ACMA][10] = 60, + [1][0][2][0][RTW89_FCC][11] = 127, + [1][0][2][0][RTW89_ETSI][11] = 127, + [1][0][2][0][RTW89_MKK][11] = 127, + [1][0][2][0][RTW89_IC][11] = 127, + [1][0][2][0][RTW89_ACMA][11] = 127, + [1][0][2][0][RTW89_FCC][12] = 127, + [1][0][2][0][RTW89_ETSI][12] = 127, + [1][0][2][0][RTW89_MKK][12] = 127, + [1][0][2][0][RTW89_IC][12] = 127, + [1][0][2][0][RTW89_ACMA][12] = 127, + [1][0][2][0][RTW89_FCC][13] = 127, + [1][0][2][0][RTW89_ETSI][13] = 127, + [1][0][2][0][RTW89_MKK][13] = 127, + [1][0][2][0][RTW89_IC][13] = 127, + [1][0][2][0][RTW89_ACMA][13] = 127, + [1][1][2][0][RTW89_FCC][0] = 127, + [1][1][2][0][RTW89_ETSI][0] = 127, + [1][1][2][0][RTW89_MKK][0] = 127, + [1][1][2][0][RTW89_IC][0] = 127, + [1][1][2][0][RTW89_ACMA][0] = 127, + [1][1][2][0][RTW89_FCC][1] = 127, + [1][1][2][0][RTW89_ETSI][1] = 127, + [1][1][2][0][RTW89_MKK][1] = 127, + [1][1][2][0][RTW89_IC][1] = 127, + [1][1][2][0][RTW89_ACMA][1] = 127, + [1][1][2][0][RTW89_FCC][2] = 46, + [1][1][2][0][RTW89_ETSI][2] = 48, + [1][1][2][0][RTW89_MKK][2] = 68, + [1][1][2][0][RTW89_IC][2] = 56, + [1][1][2][0][RTW89_ACMA][2] = 48, + [1][1][2][0][RTW89_FCC][3] = 46, + [1][1][2][0][RTW89_ETSI][3] = 48, + [1][1][2][0][RTW89_MKK][3] = 68, + [1][1][2][0][RTW89_IC][3] = 56, + [1][1][2][0][RTW89_ACMA][3] = 48, + [1][1][2][0][RTW89_FCC][4] = 50, + [1][1][2][0][RTW89_ETSI][4] = 48, + [1][1][2][0][RTW89_MKK][4] = 68, + [1][1][2][0][RTW89_IC][4] = 60, + [1][1][2][0][RTW89_ACMA][4] = 48, + [1][1][2][0][RTW89_FCC][5] = 58, + [1][1][2][0][RTW89_ETSI][5] = 48, + [1][1][2][0][RTW89_MKK][5] = 68, + [1][1][2][0][RTW89_IC][5] = 68, + [1][1][2][0][RTW89_ACMA][5] = 48, + [1][1][2][0][RTW89_FCC][6] = 50, + [1][1][2][0][RTW89_ETSI][6] = 48, + [1][1][2][0][RTW89_MKK][6] = 68, + [1][1][2][0][RTW89_IC][6] = 60, + [1][1][2][0][RTW89_ACMA][6] = 48, + [1][1][2][0][RTW89_FCC][7] = 46, + [1][1][2][0][RTW89_ETSI][7] = 48, + [1][1][2][0][RTW89_MKK][7] = 68, + [1][1][2][0][RTW89_IC][7] = 56, + [1][1][2][0][RTW89_ACMA][7] = 48, + [1][1][2][0][RTW89_FCC][8] = 46, + [1][1][2][0][RTW89_ETSI][8] = 48, + [1][1][2][0][RTW89_MKK][8] = 68, + [1][1][2][0][RTW89_IC][8] = 56, + [1][1][2][0][RTW89_ACMA][8] = 48, + [1][1][2][0][RTW89_FCC][9] = 34, + [1][1][2][0][RTW89_ETSI][9] = 48, + [1][1][2][0][RTW89_MKK][9] = 68, + [1][1][2][0][RTW89_IC][9] = 44, + [1][1][2][0][RTW89_ACMA][9] = 48, + [1][1][2][0][RTW89_FCC][10] = 30, + [1][1][2][0][RTW89_ETSI][10] = 48, + [1][1][2][0][RTW89_MKK][10] = 68, + [1][1][2][0][RTW89_IC][10] = 40, + [1][1][2][0][RTW89_ACMA][10] = 48, + [1][1][2][0][RTW89_FCC][11] = 127, + [1][1][2][0][RTW89_ETSI][11] = 127, + [1][1][2][0][RTW89_MKK][11] = 127, + [1][1][2][0][RTW89_IC][11] = 127, + [1][1][2][0][RTW89_ACMA][11] = 127, + [1][1][2][0][RTW89_FCC][12] = 127, + [1][1][2][0][RTW89_ETSI][12] = 127, + [1][1][2][0][RTW89_MKK][12] = 127, + [1][1][2][0][RTW89_IC][12] = 127, + [1][1][2][0][RTW89_ACMA][12] = 127, + [1][1][2][0][RTW89_FCC][13] = 127, + [1][1][2][0][RTW89_ETSI][13] = 127, + [1][1][2][0][RTW89_MKK][13] = 127, + [1][1][2][0][RTW89_IC][13] = 127, + [1][1][2][0][RTW89_ACMA][13] = 127, + [1][1][2][1][RTW89_FCC][0] = 127, + [1][1][2][1][RTW89_ETSI][0] = 127, + [1][1][2][1][RTW89_MKK][0] = 127, + [1][1][2][1][RTW89_IC][0] = 127, + [1][1][2][1][RTW89_ACMA][0] = 127, + [1][1][2][1][RTW89_FCC][1] = 127, + [1][1][2][1][RTW89_ETSI][1] = 127, + [1][1][2][1][RTW89_MKK][1] = 127, + [1][1][2][1][RTW89_IC][1] = 127, + [1][1][2][1][RTW89_ACMA][1] = 127, + [1][1][2][1][RTW89_FCC][2] = 46, + [1][1][2][1][RTW89_ETSI][2] = 36, + [1][1][2][1][RTW89_MKK][2] = 68, + [1][1][2][1][RTW89_IC][2] = 56, + [1][1][2][1][RTW89_ACMA][2] = 36, + [1][1][2][1][RTW89_FCC][3] = 46, + [1][1][2][1][RTW89_ETSI][3] = 36, + [1][1][2][1][RTW89_MKK][3] = 68, + [1][1][2][1][RTW89_IC][3] = 56, + [1][1][2][1][RTW89_ACMA][3] = 36, + [1][1][2][1][RTW89_FCC][4] = 50, + [1][1][2][1][RTW89_ETSI][4] = 36, + [1][1][2][1][RTW89_MKK][4] = 68, + [1][1][2][1][RTW89_IC][4] = 60, + [1][1][2][1][RTW89_ACMA][4] = 36, + [1][1][2][1][RTW89_FCC][5] = 58, + [1][1][2][1][RTW89_ETSI][5] = 36, + [1][1][2][1][RTW89_MKK][5] = 68, + [1][1][2][1][RTW89_IC][5] = 68, + [1][1][2][1][RTW89_ACMA][5] = 36, + [1][1][2][1][RTW89_FCC][6] = 50, + [1][1][2][1][RTW89_ETSI][6] = 36, + [1][1][2][1][RTW89_MKK][6] = 68, + [1][1][2][1][RTW89_IC][6] = 60, + [1][1][2][1][RTW89_ACMA][6] = 36, + [1][1][2][1][RTW89_FCC][7] = 46, + [1][1][2][1][RTW89_ETSI][7] = 36, + [1][1][2][1][RTW89_MKK][7] = 68, + [1][1][2][1][RTW89_IC][7] = 56, + [1][1][2][1][RTW89_ACMA][7] = 36, + [1][1][2][1][RTW89_FCC][8] = 46, + [1][1][2][1][RTW89_ETSI][8] = 36, + [1][1][2][1][RTW89_MKK][8] = 68, + [1][1][2][1][RTW89_IC][8] = 56, + [1][1][2][1][RTW89_ACMA][8] = 36, + [1][1][2][1][RTW89_FCC][9] = 34, + [1][1][2][1][RTW89_ETSI][9] = 36, + [1][1][2][1][RTW89_MKK][9] = 68, + [1][1][2][1][RTW89_IC][9] = 44, + [1][1][2][1][RTW89_ACMA][9] = 36, + [1][1][2][1][RTW89_FCC][10] = 30, + [1][1][2][1][RTW89_ETSI][10] = 36, + [1][1][2][1][RTW89_MKK][10] = 68, + [1][1][2][1][RTW89_IC][10] = 40, + [1][1][2][1][RTW89_ACMA][10] = 36, + [1][1][2][1][RTW89_FCC][11] = 127, + [1][1][2][1][RTW89_ETSI][11] = 127, + [1][1][2][1][RTW89_MKK][11] = 127, + [1][1][2][1][RTW89_IC][11] = 127, + [1][1][2][1][RTW89_ACMA][11] = 127, + [1][1][2][1][RTW89_FCC][12] = 127, + [1][1][2][1][RTW89_ETSI][12] = 127, + [1][1][2][1][RTW89_MKK][12] = 127, + [1][1][2][1][RTW89_IC][12] = 127, + [1][1][2][1][RTW89_ACMA][12] = 127, + [1][1][2][1][RTW89_FCC][13] = 127, + [1][1][2][1][RTW89_ETSI][13] = 127, + [1][1][2][1][RTW89_MKK][13] = 127, + [1][1][2][1][RTW89_IC][13] = 127, + [1][1][2][1][RTW89_ACMA][13] = 127, +}; + +const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = { + [0][0][1][0][RTW89_WW][0] = 60, + [0][0][1][0][RTW89_WW][2] = 60, + [0][0][1][0][RTW89_WW][4] = 60, + [0][0][1][0][RTW89_WW][6] = 60, + [0][0][1][0][RTW89_WW][8] = 60, + [0][0][1][0][RTW89_WW][10] = 60, + [0][0][1][0][RTW89_WW][12] = 60, + [0][0][1][0][RTW89_WW][14] = 60, + [0][0][1][0][RTW89_WW][15] = 60, + [0][0][1][0][RTW89_WW][17] = 60, + [0][0][1][0][RTW89_WW][19] = 60, + [0][0][1][0][RTW89_WW][21] = 60, + [0][0][1][0][RTW89_WW][23] = 60, + [0][0][1][0][RTW89_WW][25] = 66, + [0][0][1][0][RTW89_WW][27] = 66, + [0][0][1][0][RTW89_WW][29] = 66, + [0][0][1][0][RTW89_WW][31] = 60, + [0][0][1][0][RTW89_WW][33] = 60, + [0][0][1][0][RTW89_WW][35] = 60, + [0][0][1][0][RTW89_WW][37] = 70, + [0][0][1][0][RTW89_WW][38] = 30, + [0][0][1][0][RTW89_WW][40] = 30, + [0][0][1][0][RTW89_WW][42] = 30, + [0][0][1][0][RTW89_WW][44] = 30, + [0][0][1][0][RTW89_WW][46] = 30, + [0][0][1][0][RTW89_WW][48] = 70, + [0][0][1][0][RTW89_WW][50] = 70, + [0][0][1][0][RTW89_WW][52] = 70, + [0][1][1][0][RTW89_WW][0] = 42, + [0][1][1][0][RTW89_WW][2] = 42, + [0][1][1][0][RTW89_WW][4] = 42, + [0][1][1][0][RTW89_WW][6] = 42, + [0][1][1][0][RTW89_WW][8] = 48, + [0][1][1][0][RTW89_WW][10] = 48, + [0][1][1][0][RTW89_WW][12] = 48, + [0][1][1][0][RTW89_WW][14] = 48, + [0][1][1][0][RTW89_WW][15] = 48, + [0][1][1][0][RTW89_WW][17] = 48, + [0][1][1][0][RTW89_WW][19] = 48, + [0][1][1][0][RTW89_WW][21] = 48, + [0][1][1][0][RTW89_WW][23] = 48, + [0][1][1][0][RTW89_WW][25] = 54, + [0][1][1][0][RTW89_WW][27] = 54, + [0][1][1][0][RTW89_WW][29] = 54, + [0][1][1][0][RTW89_WW][31] = 48, + [0][1][1][0][RTW89_WW][33] = 48, + [0][1][1][0][RTW89_WW][35] = 48, + [0][1][1][0][RTW89_WW][37] = 60, + [0][1][1][0][RTW89_WW][38] = 18, + [0][1][1][0][RTW89_WW][40] = 16, + [0][1][1][0][RTW89_WW][42] = 18, + [0][1][1][0][RTW89_WW][44] = 16, + [0][1][1][0][RTW89_WW][46] = 18, + [0][1][1][0][RTW89_WW][48] = 48, + [0][1][1][0][RTW89_WW][50] = 48, + [0][1][1][0][RTW89_WW][52] = 48, + [0][0][2][0][RTW89_WW][0] = 62, + [0][0][2][0][RTW89_WW][2] = 62, + [0][0][2][0][RTW89_WW][4] = 62, + [0][0][2][0][RTW89_WW][6] = 60, + [0][0][2][0][RTW89_WW][8] = 58, + [0][0][2][0][RTW89_WW][10] = 62, + [0][0][2][0][RTW89_WW][12] = 62, + [0][0][2][0][RTW89_WW][14] = 62, + [0][0][2][0][RTW89_WW][15] = 62, + [0][0][2][0][RTW89_WW][17] = 62, + [0][0][2][0][RTW89_WW][19] = 62, + [0][0][2][0][RTW89_WW][21] = 62, + [0][0][2][0][RTW89_WW][23] = 62, + [0][0][2][0][RTW89_WW][25] = 66, + [0][0][2][0][RTW89_WW][27] = 66, + [0][0][2][0][RTW89_WW][29] = 66, + [0][0][2][0][RTW89_WW][31] = 62, + [0][0][2][0][RTW89_WW][33] = 62, + [0][0][2][0][RTW89_WW][35] = 62, + [0][0][2][0][RTW89_WW][37] = 70, + [0][0][2][0][RTW89_WW][38] = 30, + [0][0][2][0][RTW89_WW][40] = 30, + [0][0][2][0][RTW89_WW][42] = 30, + [0][0][2][0][RTW89_WW][44] = 30, + [0][0][2][0][RTW89_WW][46] = 30, + [0][0][2][0][RTW89_WW][48] = 70, + [0][0][2][0][RTW89_WW][50] = 70, + [0][0][2][0][RTW89_WW][52] = 70, + [0][1][2][0][RTW89_WW][0] = 44, + [0][1][2][0][RTW89_WW][2] = 44, + [0][1][2][0][RTW89_WW][4] = 44, + [0][1][2][0][RTW89_WW][6] = 44, + [0][1][2][0][RTW89_WW][8] = 42, + [0][1][2][0][RTW89_WW][10] = 50, + [0][1][2][0][RTW89_WW][12] = 50, + [0][1][2][0][RTW89_WW][14] = 50, + [0][1][2][0][RTW89_WW][15] = 50, + [0][1][2][0][RTW89_WW][17] = 50, + [0][1][2][0][RTW89_WW][19] = 50, + [0][1][2][0][RTW89_WW][21] = 50, + [0][1][2][0][RTW89_WW][23] = 50, + [0][1][2][0][RTW89_WW][25] = 54, + [0][1][2][0][RTW89_WW][27] = 54, + [0][1][2][0][RTW89_WW][29] = 54, + [0][1][2][0][RTW89_WW][31] = 50, + [0][1][2][0][RTW89_WW][33] = 50, + [0][1][2][0][RTW89_WW][35] = 50, + [0][1][2][0][RTW89_WW][37] = 62, + [0][1][2][0][RTW89_WW][38] = 18, + [0][1][2][0][RTW89_WW][40] = 18, + [0][1][2][0][RTW89_WW][42] = 18, + [0][1][2][0][RTW89_WW][44] = 18, + [0][1][2][0][RTW89_WW][46] = 18, + [0][1][2][0][RTW89_WW][48] = 50, + [0][1][2][0][RTW89_WW][50] = 50, + [0][1][2][0][RTW89_WW][52] = 50, + [0][1][2][1][RTW89_WW][0] = 38, + [0][1][2][1][RTW89_WW][2] = 38, + [0][1][2][1][RTW89_WW][4] = 38, + [0][1][2][1][RTW89_WW][6] = 38, + [0][1][2][1][RTW89_WW][8] = 38, + [0][1][2][1][RTW89_WW][10] = 38, + [0][1][2][1][RTW89_WW][12] = 38, + [0][1][2][1][RTW89_WW][14] = 38, + [0][1][2][1][RTW89_WW][15] = 38, + [0][1][2][1][RTW89_WW][17] = 38, + [0][1][2][1][RTW89_WW][19] = 38, + [0][1][2][1][RTW89_WW][21] = 38, + [0][1][2][1][RTW89_WW][23] = 38, + [0][1][2][1][RTW89_WW][25] = 40, + [0][1][2][1][RTW89_WW][27] = 40, + [0][1][2][1][RTW89_WW][29] = 40, + [0][1][2][1][RTW89_WW][31] = 38, + [0][1][2][1][RTW89_WW][33] = 38, + [0][1][2][1][RTW89_WW][35] = 38, + [0][1][2][1][RTW89_WW][37] = 60, + [0][1][2][1][RTW89_WW][38] = 6, + [0][1][2][1][RTW89_WW][40] = 6, + [0][1][2][1][RTW89_WW][42] = 6, + [0][1][2][1][RTW89_WW][44] = 6, + [0][1][2][1][RTW89_WW][46] = 6, + [0][1][2][1][RTW89_WW][48] = 50, + [0][1][2][1][RTW89_WW][50] = 50, + [0][1][2][1][RTW89_WW][52] = 50, + [1][0][2][0][RTW89_WW][1] = 58, + [1][0][2][0][RTW89_WW][5] = 66, + [1][0][2][0][RTW89_WW][9] = 66, + [1][0][2][0][RTW89_WW][13] = 58, + [1][0][2][0][RTW89_WW][16] = 56, + [1][0][2][0][RTW89_WW][20] = 66, + [1][0][2][0][RTW89_WW][24] = 66, + [1][0][2][0][RTW89_WW][28] = 66, + [1][0][2][0][RTW89_WW][32] = 66, + [1][0][2][0][RTW89_WW][36] = 66, + [1][0][2][0][RTW89_WW][39] = 30, + [1][0][2][0][RTW89_WW][43] = 30, + [1][0][2][0][RTW89_WW][47] = 68, + [1][0][2][0][RTW89_WW][51] = 68, + [1][1][2][0][RTW89_WW][1] = 48, + [1][1][2][0][RTW89_WW][5] = 52, + [1][1][2][0][RTW89_WW][9] = 52, + [1][1][2][0][RTW89_WW][13] = 52, + [1][1][2][0][RTW89_WW][16] = 48, + [1][1][2][0][RTW89_WW][20] = 54, + [1][1][2][0][RTW89_WW][24] = 54, + [1][1][2][0][RTW89_WW][28] = 54, + [1][1][2][0][RTW89_WW][32] = 54, + [1][1][2][0][RTW89_WW][36] = 66, + [1][1][2][0][RTW89_WW][39] = 18, + [1][1][2][0][RTW89_WW][43] = 18, + [1][1][2][0][RTW89_WW][47] = 60, + [1][1][2][0][RTW89_WW][51] = 58, + [1][1][2][1][RTW89_WW][1] = 40, + [1][1][2][1][RTW89_WW][5] = 40, + [1][1][2][1][RTW89_WW][9] = 40, + [1][1][2][1][RTW89_WW][13] = 40, + [1][1][2][1][RTW89_WW][16] = 40, + [1][1][2][1][RTW89_WW][20] = 40, + [1][1][2][1][RTW89_WW][24] = 40, + [1][1][2][1][RTW89_WW][28] = 40, + [1][1][2][1][RTW89_WW][32] = 40, + [1][1][2][1][RTW89_WW][36] = 60, + [1][1][2][1][RTW89_WW][39] = 6, + [1][1][2][1][RTW89_WW][43] = 6, + [1][1][2][1][RTW89_WW][47] = 60, + [1][1][2][1][RTW89_WW][51] = 58, + [2][0][2][0][RTW89_WW][3] = 56, + [2][0][2][0][RTW89_WW][11] = 58, + [2][0][2][0][RTW89_WW][18] = 54, + [2][0][2][0][RTW89_WW][26] = 60, + [2][0][2][0][RTW89_WW][34] = 60, + [2][0][2][0][RTW89_WW][41] = 30, + [2][0][2][0][RTW89_WW][49] = 56, + [2][1][2][0][RTW89_WW][3] = 48, + [2][1][2][0][RTW89_WW][11] = 52, + [2][1][2][0][RTW89_WW][18] = 48, + [2][1][2][0][RTW89_WW][26] = 54, + [2][1][2][0][RTW89_WW][34] = 60, + [2][1][2][0][RTW89_WW][41] = 18, + [2][1][2][0][RTW89_WW][49] = 50, + [2][1][2][1][RTW89_WW][3] = 40, + [2][1][2][1][RTW89_WW][11] = 40, + [2][1][2][1][RTW89_WW][18] = 40, + [2][1][2][1][RTW89_WW][26] = 42, + [2][1][2][1][RTW89_WW][34] = 60, + [2][1][2][1][RTW89_WW][41] = 6, + [2][1][2][1][RTW89_WW][49] = 50, + [3][0][2][0][RTW89_WW][7] = 38, + [3][0][2][0][RTW89_WW][22] = 50, + [3][0][2][0][RTW89_WW][45] = 0, + [3][1][2][0][RTW89_WW][7] = 26, + [3][1][2][0][RTW89_WW][22] = 42, + [3][1][2][0][RTW89_WW][45] = 0, + [3][1][2][1][RTW89_WW][7] = 14, + [3][1][2][1][RTW89_WW][22] = 30, + [3][1][2][1][RTW89_WW][45] = 0, + [0][0][1][0][RTW89_FCC][0] = 70, + [0][0][1][0][RTW89_ETSI][0] = 66, + [0][0][1][0][RTW89_MKK][0] = 66, + [0][0][1][0][RTW89_IC][0] = 62, + [0][0][1][0][RTW89_ACMA][0] = 60, + [0][0][1][0][RTW89_FCC][2] = 70, + [0][0][1][0][RTW89_ETSI][2] = 66, + [0][0][1][0][RTW89_MKK][2] = 66, + [0][0][1][0][RTW89_IC][2] = 62, + [0][0][1][0][RTW89_ACMA][2] = 60, + [0][0][1][0][RTW89_FCC][4] = 70, + [0][0][1][0][RTW89_ETSI][4] = 66, + [0][0][1][0][RTW89_MKK][4] = 66, + [0][0][1][0][RTW89_IC][4] = 62, + [0][0][1][0][RTW89_ACMA][4] = 60, + [0][0][1][0][RTW89_FCC][6] = 70, + [0][0][1][0][RTW89_ETSI][6] = 66, + [0][0][1][0][RTW89_MKK][6] = 66, + [0][0][1][0][RTW89_IC][6] = 62, + [0][0][1][0][RTW89_ACMA][6] = 60, + [0][0][1][0][RTW89_FCC][8] = 70, + [0][0][1][0][RTW89_ETSI][8] = 66, + [0][0][1][0][RTW89_MKK][8] = 66, + [0][0][1][0][RTW89_IC][8] = 66, + [0][0][1][0][RTW89_ACMA][8] = 60, + [0][0][1][0][RTW89_FCC][10] = 70, + [0][0][1][0][RTW89_ETSI][10] = 66, + [0][0][1][0][RTW89_MKK][10] = 66, + [0][0][1][0][RTW89_IC][10] = 66, + [0][0][1][0][RTW89_ACMA][10] = 60, + [0][0][1][0][RTW89_FCC][12] = 70, + [0][0][1][0][RTW89_ETSI][12] = 66, + [0][0][1][0][RTW89_MKK][12] = 66, + [0][0][1][0][RTW89_IC][12] = 66, + [0][0][1][0][RTW89_ACMA][12] = 60, + [0][0][1][0][RTW89_FCC][14] = 70, + [0][0][1][0][RTW89_ETSI][14] = 66, + [0][0][1][0][RTW89_MKK][14] = 66, + [0][0][1][0][RTW89_IC][14] = 66, + [0][0][1][0][RTW89_ACMA][14] = 60, + [0][0][1][0][RTW89_FCC][15] = 68, + [0][0][1][0][RTW89_ETSI][15] = 66, + [0][0][1][0][RTW89_MKK][15] = 70, + [0][0][1][0][RTW89_IC][15] = 70, + [0][0][1][0][RTW89_ACMA][15] = 60, + [0][0][1][0][RTW89_FCC][17] = 70, + [0][0][1][0][RTW89_ETSI][17] = 66, + [0][0][1][0][RTW89_MKK][17] = 70, + [0][0][1][0][RTW89_IC][17] = 70, + [0][0][1][0][RTW89_ACMA][17] = 60, + [0][0][1][0][RTW89_FCC][19] = 70, + [0][0][1][0][RTW89_ETSI][19] = 66, + [0][0][1][0][RTW89_MKK][19] = 70, + [0][0][1][0][RTW89_IC][19] = 70, + [0][0][1][0][RTW89_ACMA][19] = 60, + [0][0][1][0][RTW89_FCC][21] = 70, + [0][0][1][0][RTW89_ETSI][21] = 66, + [0][0][1][0][RTW89_MKK][21] = 70, + [0][0][1][0][RTW89_IC][21] = 70, + [0][0][1][0][RTW89_ACMA][21] = 60, + [0][0][1][0][RTW89_FCC][23] = 70, + [0][0][1][0][RTW89_ETSI][23] = 66, + [0][0][1][0][RTW89_MKK][23] = 70, + [0][0][1][0][RTW89_IC][23] = 70, + [0][0][1][0][RTW89_ACMA][23] = 60, + [0][0][1][0][RTW89_FCC][25] = 70, + [0][0][1][0][RTW89_ETSI][25] = 66, + [0][0][1][0][RTW89_MKK][25] = 70, + [0][0][1][0][RTW89_IC][25] = 127, + [0][0][1][0][RTW89_ACMA][25] = 127, + [0][0][1][0][RTW89_FCC][27] = 70, + [0][0][1][0][RTW89_ETSI][27] = 66, + [0][0][1][0][RTW89_MKK][27] = 70, + [0][0][1][0][RTW89_IC][27] = 127, + [0][0][1][0][RTW89_ACMA][27] = 127, + [0][0][1][0][RTW89_FCC][29] = 70, + [0][0][1][0][RTW89_ETSI][29] = 66, + [0][0][1][0][RTW89_MKK][29] = 70, + [0][0][1][0][RTW89_IC][29] = 127, + [0][0][1][0][RTW89_ACMA][29] = 127, + [0][0][1][0][RTW89_FCC][31] = 70, + [0][0][1][0][RTW89_ETSI][31] = 66, + [0][0][1][0][RTW89_MKK][31] = 70, + [0][0][1][0][RTW89_IC][31] = 70, + [0][0][1][0][RTW89_ACMA][31] = 60, + [0][0][1][0][RTW89_FCC][33] = 70, + [0][0][1][0][RTW89_ETSI][33] = 66, + [0][0][1][0][RTW89_MKK][33] = 70, + [0][0][1][0][RTW89_IC][33] = 70, + [0][0][1][0][RTW89_ACMA][33] = 60, + [0][0][1][0][RTW89_FCC][35] = 62, + [0][0][1][0][RTW89_ETSI][35] = 66, + [0][0][1][0][RTW89_MKK][35] = 70, + [0][0][1][0][RTW89_IC][35] = 70, + [0][0][1][0][RTW89_ACMA][35] = 60, + [0][0][1][0][RTW89_FCC][37] = 70, + [0][0][1][0][RTW89_ETSI][37] = 127, + [0][0][1][0][RTW89_MKK][37] = 70, + [0][0][1][0][RTW89_IC][37] = 70, + [0][0][1][0][RTW89_ACMA][37] = 70, + [0][0][1][0][RTW89_FCC][38] = 70, + [0][0][1][0][RTW89_ETSI][38] = 30, + [0][0][1][0][RTW89_MKK][38] = 127, + [0][0][1][0][RTW89_IC][38] = 70, + [0][0][1][0][RTW89_ACMA][38] = 70, + [0][0][1][0][RTW89_FCC][40] = 70, + [0][0][1][0][RTW89_ETSI][40] = 30, + [0][0][1][0][RTW89_MKK][40] = 127, + [0][0][1][0][RTW89_IC][40] = 70, + [0][0][1][0][RTW89_ACMA][40] = 70, + [0][0][1][0][RTW89_FCC][42] = 70, + [0][0][1][0][RTW89_ETSI][42] = 30, + [0][0][1][0][RTW89_MKK][42] = 127, + [0][0][1][0][RTW89_IC][42] = 70, + [0][0][1][0][RTW89_ACMA][42] = 70, + [0][0][1][0][RTW89_FCC][44] = 70, + [0][0][1][0][RTW89_ETSI][44] = 30, + [0][0][1][0][RTW89_MKK][44] = 127, + [0][0][1][0][RTW89_IC][44] = 70, + [0][0][1][0][RTW89_ACMA][44] = 70, + [0][0][1][0][RTW89_FCC][46] = 70, + [0][0][1][0][RTW89_ETSI][46] = 30, + [0][0][1][0][RTW89_MKK][46] = 127, + [0][0][1][0][RTW89_IC][46] = 70, + [0][0][1][0][RTW89_ACMA][46] = 70, + [0][0][1][0][RTW89_FCC][48] = 70, + [0][0][1][0][RTW89_ETSI][48] = 127, + [0][0][1][0][RTW89_MKK][48] = 127, + [0][0][1][0][RTW89_IC][48] = 127, + [0][0][1][0][RTW89_ACMA][48] = 127, + [0][0][1][0][RTW89_FCC][50] = 70, + [0][0][1][0][RTW89_ETSI][50] = 127, + [0][0][1][0][RTW89_MKK][50] = 127, + [0][0][1][0][RTW89_IC][50] = 127, + [0][0][1][0][RTW89_ACMA][50] = 127, + [0][0][1][0][RTW89_FCC][52] = 70, + [0][0][1][0][RTW89_ETSI][52] = 127, + [0][0][1][0][RTW89_MKK][52] = 127, + [0][0][1][0][RTW89_IC][52] = 127, + [0][0][1][0][RTW89_ACMA][52] = 127, + [0][1][1][0][RTW89_FCC][0] = 60, + [0][1][1][0][RTW89_ETSI][0] = 54, + [0][1][1][0][RTW89_MKK][0] = 54, + [0][1][1][0][RTW89_IC][0] = 42, + [0][1][1][0][RTW89_ACMA][0] = 48, + [0][1][1][0][RTW89_FCC][2] = 60, + [0][1][1][0][RTW89_ETSI][2] = 54, + [0][1][1][0][RTW89_MKK][2] = 54, + [0][1][1][0][RTW89_IC][2] = 42, + [0][1][1][0][RTW89_ACMA][2] = 48, + [0][1][1][0][RTW89_FCC][4] = 60, + [0][1][1][0][RTW89_ETSI][4] = 54, + [0][1][1][0][RTW89_MKK][4] = 54, + [0][1][1][0][RTW89_IC][4] = 42, + [0][1][1][0][RTW89_ACMA][4] = 48, + [0][1][1][0][RTW89_FCC][6] = 60, + [0][1][1][0][RTW89_ETSI][6] = 54, + [0][1][1][0][RTW89_MKK][6] = 54, + [0][1][1][0][RTW89_IC][6] = 42, + [0][1][1][0][RTW89_ACMA][6] = 48, + [0][1][1][0][RTW89_FCC][8] = 60, + [0][1][1][0][RTW89_ETSI][8] = 54, + [0][1][1][0][RTW89_MKK][8] = 52, + [0][1][1][0][RTW89_IC][8] = 54, + [0][1][1][0][RTW89_ACMA][8] = 48, + [0][1][1][0][RTW89_FCC][10] = 60, + [0][1][1][0][RTW89_ETSI][10] = 54, + [0][1][1][0][RTW89_MKK][10] = 54, + [0][1][1][0][RTW89_IC][10] = 54, + [0][1][1][0][RTW89_ACMA][10] = 48, + [0][1][1][0][RTW89_FCC][12] = 60, + [0][1][1][0][RTW89_ETSI][12] = 54, + [0][1][1][0][RTW89_MKK][12] = 54, + [0][1][1][0][RTW89_IC][12] = 54, + [0][1][1][0][RTW89_ACMA][12] = 48, + [0][1][1][0][RTW89_FCC][14] = 60, + [0][1][1][0][RTW89_ETSI][14] = 54, + [0][1][1][0][RTW89_MKK][14] = 54, + [0][1][1][0][RTW89_IC][14] = 54, + [0][1][1][0][RTW89_ACMA][14] = 48, + [0][1][1][0][RTW89_FCC][15] = 58, + [0][1][1][0][RTW89_ETSI][15] = 54, + [0][1][1][0][RTW89_MKK][15] = 70, + [0][1][1][0][RTW89_IC][15] = 68, + [0][1][1][0][RTW89_ACMA][15] = 48, + [0][1][1][0][RTW89_FCC][17] = 60, + [0][1][1][0][RTW89_ETSI][17] = 54, + [0][1][1][0][RTW89_MKK][17] = 70, + [0][1][1][0][RTW89_IC][17] = 70, + [0][1][1][0][RTW89_ACMA][17] = 48, + [0][1][1][0][RTW89_FCC][19] = 60, + [0][1][1][0][RTW89_ETSI][19] = 54, + [0][1][1][0][RTW89_MKK][19] = 70, + [0][1][1][0][RTW89_IC][19] = 70, + [0][1][1][0][RTW89_ACMA][19] = 48, + [0][1][1][0][RTW89_FCC][21] = 60, + [0][1][1][0][RTW89_ETSI][21] = 54, + [0][1][1][0][RTW89_MKK][21] = 70, + [0][1][1][0][RTW89_IC][21] = 70, + [0][1][1][0][RTW89_ACMA][21] = 48, + [0][1][1][0][RTW89_FCC][23] = 60, + [0][1][1][0][RTW89_ETSI][23] = 54, + [0][1][1][0][RTW89_MKK][23] = 70, + [0][1][1][0][RTW89_IC][23] = 70, + [0][1][1][0][RTW89_ACMA][23] = 48, + [0][1][1][0][RTW89_FCC][25] = 60, + [0][1][1][0][RTW89_ETSI][25] = 54, + [0][1][1][0][RTW89_MKK][25] = 70, + [0][1][1][0][RTW89_IC][25] = 127, + [0][1][1][0][RTW89_ACMA][25] = 127, + [0][1][1][0][RTW89_FCC][27] = 60, + [0][1][1][0][RTW89_ETSI][27] = 54, + [0][1][1][0][RTW89_MKK][27] = 70, + [0][1][1][0][RTW89_IC][27] = 127, + [0][1][1][0][RTW89_ACMA][27] = 127, + [0][1][1][0][RTW89_FCC][29] = 60, + [0][1][1][0][RTW89_ETSI][29] = 54, + [0][1][1][0][RTW89_MKK][29] = 70, + [0][1][1][0][RTW89_IC][29] = 127, + [0][1][1][0][RTW89_ACMA][29] = 127, + [0][1][1][0][RTW89_FCC][31] = 60, + [0][1][1][0][RTW89_ETSI][31] = 54, + [0][1][1][0][RTW89_MKK][31] = 70, + [0][1][1][0][RTW89_IC][31] = 70, + [0][1][1][0][RTW89_ACMA][31] = 48, + [0][1][1][0][RTW89_FCC][33] = 60, + [0][1][1][0][RTW89_ETSI][33] = 54, + [0][1][1][0][RTW89_MKK][33] = 70, + [0][1][1][0][RTW89_IC][33] = 70, + [0][1][1][0][RTW89_ACMA][33] = 48, + [0][1][1][0][RTW89_FCC][35] = 58, + [0][1][1][0][RTW89_ETSI][35] = 54, + [0][1][1][0][RTW89_MKK][35] = 70, + [0][1][1][0][RTW89_IC][35] = 68, + [0][1][1][0][RTW89_ACMA][35] = 48, + [0][1][1][0][RTW89_FCC][37] = 60, + [0][1][1][0][RTW89_ETSI][37] = 127, + [0][1][1][0][RTW89_MKK][37] = 70, + [0][1][1][0][RTW89_IC][37] = 70, + [0][1][1][0][RTW89_ACMA][37] = 70, + [0][1][1][0][RTW89_FCC][38] = 70, + [0][1][1][0][RTW89_ETSI][38] = 18, + [0][1][1][0][RTW89_MKK][38] = 127, + [0][1][1][0][RTW89_IC][38] = 70, + [0][1][1][0][RTW89_ACMA][38] = 70, + [0][1][1][0][RTW89_FCC][40] = 70, + [0][1][1][0][RTW89_ETSI][40] = 18, + [0][1][1][0][RTW89_MKK][40] = 127, + [0][1][1][0][RTW89_IC][40] = 70, + [0][1][1][0][RTW89_ACMA][40] = 16, + [0][1][1][0][RTW89_FCC][42] = 70, + [0][1][1][0][RTW89_ETSI][42] = 18, + [0][1][1][0][RTW89_MKK][42] = 127, + [0][1][1][0][RTW89_IC][42] = 70, + [0][1][1][0][RTW89_ACMA][42] = 70, + [0][1][1][0][RTW89_FCC][44] = 70, + [0][1][1][0][RTW89_ETSI][44] = 18, + [0][1][1][0][RTW89_MKK][44] = 127, + [0][1][1][0][RTW89_IC][44] = 70, + [0][1][1][0][RTW89_ACMA][44] = 16, + [0][1][1][0][RTW89_FCC][46] = 70, + [0][1][1][0][RTW89_ETSI][46] = 18, + [0][1][1][0][RTW89_MKK][46] = 127, + [0][1][1][0][RTW89_IC][46] = 70, + [0][1][1][0][RTW89_ACMA][46] = 70, + [0][1][1][0][RTW89_FCC][48] = 48, + [0][1][1][0][RTW89_ETSI][48] = 127, + [0][1][1][0][RTW89_MKK][48] = 127, + [0][1][1][0][RTW89_IC][48] = 127, + [0][1][1][0][RTW89_ACMA][48] = 127, + [0][1][1][0][RTW89_FCC][50] = 48, + [0][1][1][0][RTW89_ETSI][50] = 127, + [0][1][1][0][RTW89_MKK][50] = 127, + [0][1][1][0][RTW89_IC][50] = 127, + [0][1][1][0][RTW89_ACMA][50] = 127, + [0][1][1][0][RTW89_FCC][52] = 48, + [0][1][1][0][RTW89_ETSI][52] = 127, + [0][1][1][0][RTW89_MKK][52] = 127, + [0][1][1][0][RTW89_IC][52] = 127, + [0][1][1][0][RTW89_ACMA][52] = 127, + [0][0][2][0][RTW89_FCC][0] = 70, + [0][0][2][0][RTW89_ETSI][0] = 66, + [0][0][2][0][RTW89_MKK][0] = 68, + [0][0][2][0][RTW89_IC][0] = 66, + [0][0][2][0][RTW89_ACMA][0] = 62, + [0][0][2][0][RTW89_FCC][2] = 70, + [0][0][2][0][RTW89_ETSI][2] = 66, + [0][0][2][0][RTW89_MKK][2] = 68, + [0][0][2][0][RTW89_IC][2] = 66, + [0][0][2][0][RTW89_ACMA][2] = 62, + [0][0][2][0][RTW89_FCC][4] = 70, + [0][0][2][0][RTW89_ETSI][4] = 66, + [0][0][2][0][RTW89_MKK][4] = 68, + [0][0][2][0][RTW89_IC][4] = 66, + [0][0][2][0][RTW89_ACMA][4] = 62, + [0][0][2][0][RTW89_FCC][6] = 70, + [0][0][2][0][RTW89_ETSI][6] = 66, + [0][0][2][0][RTW89_MKK][6] = 60, + [0][0][2][0][RTW89_IC][6] = 66, + [0][0][2][0][RTW89_ACMA][6] = 62, + [0][0][2][0][RTW89_FCC][8] = 70, + [0][0][2][0][RTW89_ETSI][8] = 66, + [0][0][2][0][RTW89_MKK][8] = 58, + [0][0][2][0][RTW89_IC][8] = 66, + [0][0][2][0][RTW89_ACMA][8] = 62, + [0][0][2][0][RTW89_FCC][10] = 70, + [0][0][2][0][RTW89_ETSI][10] = 66, + [0][0][2][0][RTW89_MKK][10] = 70, + [0][0][2][0][RTW89_IC][10] = 66, + [0][0][2][0][RTW89_ACMA][10] = 62, + [0][0][2][0][RTW89_FCC][12] = 70, + [0][0][2][0][RTW89_ETSI][12] = 66, + [0][0][2][0][RTW89_MKK][12] = 70, + [0][0][2][0][RTW89_IC][12] = 66, + [0][0][2][0][RTW89_ACMA][12] = 62, + [0][0][2][0][RTW89_FCC][14] = 70, + [0][0][2][0][RTW89_ETSI][14] = 66, + [0][0][2][0][RTW89_MKK][14] = 70, + [0][0][2][0][RTW89_IC][14] = 66, + [0][0][2][0][RTW89_ACMA][14] = 62, + [0][0][2][0][RTW89_FCC][15] = 66, + [0][0][2][0][RTW89_ETSI][15] = 66, + [0][0][2][0][RTW89_MKK][15] = 70, + [0][0][2][0][RTW89_IC][15] = 70, + [0][0][2][0][RTW89_ACMA][15] = 62, + [0][0][2][0][RTW89_FCC][17] = 70, + [0][0][2][0][RTW89_ETSI][17] = 66, + [0][0][2][0][RTW89_MKK][17] = 70, + [0][0][2][0][RTW89_IC][17] = 70, + [0][0][2][0][RTW89_ACMA][17] = 62, + [0][0][2][0][RTW89_FCC][19] = 70, + [0][0][2][0][RTW89_ETSI][19] = 66, + [0][0][2][0][RTW89_MKK][19] = 70, + [0][0][2][0][RTW89_IC][19] = 70, + [0][0][2][0][RTW89_ACMA][19] = 62, + [0][0][2][0][RTW89_FCC][21] = 70, + [0][0][2][0][RTW89_ETSI][21] = 66, + [0][0][2][0][RTW89_MKK][21] = 70, + [0][0][2][0][RTW89_IC][21] = 70, + [0][0][2][0][RTW89_ACMA][21] = 62, + [0][0][2][0][RTW89_FCC][23] = 70, + [0][0][2][0][RTW89_ETSI][23] = 66, + [0][0][2][0][RTW89_MKK][23] = 70, + [0][0][2][0][RTW89_IC][23] = 70, + [0][0][2][0][RTW89_ACMA][23] = 62, + [0][0][2][0][RTW89_FCC][25] = 70, + [0][0][2][0][RTW89_ETSI][25] = 66, + [0][0][2][0][RTW89_MKK][25] = 70, + [0][0][2][0][RTW89_IC][25] = 127, + [0][0][2][0][RTW89_ACMA][25] = 127, + [0][0][2][0][RTW89_FCC][27] = 70, + [0][0][2][0][RTW89_ETSI][27] = 66, + [0][0][2][0][RTW89_MKK][27] = 70, + [0][0][2][0][RTW89_IC][27] = 127, + [0][0][2][0][RTW89_ACMA][27] = 127, + [0][0][2][0][RTW89_FCC][29] = 70, + [0][0][2][0][RTW89_ETSI][29] = 66, + [0][0][2][0][RTW89_MKK][29] = 70, + [0][0][2][0][RTW89_IC][29] = 127, + [0][0][2][0][RTW89_ACMA][29] = 127, + [0][0][2][0][RTW89_FCC][31] = 70, + [0][0][2][0][RTW89_ETSI][31] = 66, + [0][0][2][0][RTW89_MKK][31] = 70, + [0][0][2][0][RTW89_IC][31] = 70, + [0][0][2][0][RTW89_ACMA][31] = 62, + [0][0][2][0][RTW89_FCC][33] = 70, + [0][0][2][0][RTW89_ETSI][33] = 66, + [0][0][2][0][RTW89_MKK][33] = 70, + [0][0][2][0][RTW89_IC][33] = 70, + [0][0][2][0][RTW89_ACMA][33] = 62, + [0][0][2][0][RTW89_FCC][35] = 62, + [0][0][2][0][RTW89_ETSI][35] = 66, + [0][0][2][0][RTW89_MKK][35] = 70, + [0][0][2][0][RTW89_IC][35] = 70, + [0][0][2][0][RTW89_ACMA][35] = 62, + [0][0][2][0][RTW89_FCC][37] = 70, + [0][0][2][0][RTW89_ETSI][37] = 127, + [0][0][2][0][RTW89_MKK][37] = 70, + [0][0][2][0][RTW89_IC][37] = 70, + [0][0][2][0][RTW89_ACMA][37] = 70, + [0][0][2][0][RTW89_FCC][38] = 70, + [0][0][2][0][RTW89_ETSI][38] = 30, + [0][0][2][0][RTW89_MKK][38] = 127, + [0][0][2][0][RTW89_IC][38] = 70, + [0][0][2][0][RTW89_ACMA][38] = 70, + [0][0][2][0][RTW89_FCC][40] = 70, + [0][0][2][0][RTW89_ETSI][40] = 30, + [0][0][2][0][RTW89_MKK][40] = 127, + [0][0][2][0][RTW89_IC][40] = 70, + [0][0][2][0][RTW89_ACMA][40] = 70, + [0][0][2][0][RTW89_FCC][42] = 70, + [0][0][2][0][RTW89_ETSI][42] = 30, + [0][0][2][0][RTW89_MKK][42] = 127, + [0][0][2][0][RTW89_IC][42] = 70, + [0][0][2][0][RTW89_ACMA][42] = 70, + [0][0][2][0][RTW89_FCC][44] = 70, + [0][0][2][0][RTW89_ETSI][44] = 30, + [0][0][2][0][RTW89_MKK][44] = 127, + [0][0][2][0][RTW89_IC][44] = 70, + [0][0][2][0][RTW89_ACMA][44] = 70, + [0][0][2][0][RTW89_FCC][46] = 70, + [0][0][2][0][RTW89_ETSI][46] = 30, + [0][0][2][0][RTW89_MKK][46] = 127, + [0][0][2][0][RTW89_IC][46] = 70, + [0][0][2][0][RTW89_ACMA][46] = 70, + [0][0][2][0][RTW89_FCC][48] = 70, + [0][0][2][0][RTW89_ETSI][48] = 127, + [0][0][2][0][RTW89_MKK][48] = 127, + [0][0][2][0][RTW89_IC][48] = 127, + [0][0][2][0][RTW89_ACMA][48] = 127, + [0][0][2][0][RTW89_FCC][50] = 70, + [0][0][2][0][RTW89_ETSI][50] = 127, + [0][0][2][0][RTW89_MKK][50] = 127, + [0][0][2][0][RTW89_IC][50] = 127, + [0][0][2][0][RTW89_ACMA][50] = 127, + [0][0][2][0][RTW89_FCC][52] = 70, + [0][0][2][0][RTW89_ETSI][52] = 127, + [0][0][2][0][RTW89_MKK][52] = 127, + [0][0][2][0][RTW89_IC][52] = 127, + [0][0][2][0][RTW89_ACMA][52] = 127, + [0][1][2][0][RTW89_FCC][0] = 62, + [0][1][2][0][RTW89_ETSI][0] = 54, + [0][1][2][0][RTW89_MKK][0] = 54, + [0][1][2][0][RTW89_IC][0] = 44, + [0][1][2][0][RTW89_ACMA][0] = 50, + [0][1][2][0][RTW89_FCC][2] = 62, + [0][1][2][0][RTW89_ETSI][2] = 54, + [0][1][2][0][RTW89_MKK][2] = 54, + [0][1][2][0][RTW89_IC][2] = 44, + [0][1][2][0][RTW89_ACMA][2] = 50, + [0][1][2][0][RTW89_FCC][4] = 62, + [0][1][2][0][RTW89_ETSI][4] = 54, + [0][1][2][0][RTW89_MKK][4] = 54, + [0][1][2][0][RTW89_IC][4] = 44, + [0][1][2][0][RTW89_ACMA][4] = 50, + [0][1][2][0][RTW89_FCC][6] = 62, + [0][1][2][0][RTW89_ETSI][6] = 54, + [0][1][2][0][RTW89_MKK][6] = 50, + [0][1][2][0][RTW89_IC][6] = 44, + [0][1][2][0][RTW89_ACMA][6] = 50, + [0][1][2][0][RTW89_FCC][8] = 62, + [0][1][2][0][RTW89_ETSI][8] = 54, + [0][1][2][0][RTW89_MKK][8] = 42, + [0][1][2][0][RTW89_IC][8] = 54, + [0][1][2][0][RTW89_ACMA][8] = 50, + [0][1][2][0][RTW89_FCC][10] = 62, + [0][1][2][0][RTW89_ETSI][10] = 54, + [0][1][2][0][RTW89_MKK][10] = 54, + [0][1][2][0][RTW89_IC][10] = 54, + [0][1][2][0][RTW89_ACMA][10] = 50, + [0][1][2][0][RTW89_FCC][12] = 62, + [0][1][2][0][RTW89_ETSI][12] = 54, + [0][1][2][0][RTW89_MKK][12] = 54, + [0][1][2][0][RTW89_IC][12] = 54, + [0][1][2][0][RTW89_ACMA][12] = 50, + [0][1][2][0][RTW89_FCC][14] = 62, + [0][1][2][0][RTW89_ETSI][14] = 54, + [0][1][2][0][RTW89_MKK][14] = 54, + [0][1][2][0][RTW89_IC][14] = 54, + [0][1][2][0][RTW89_ACMA][14] = 50, + [0][1][2][0][RTW89_FCC][15] = 60, + [0][1][2][0][RTW89_ETSI][15] = 54, + [0][1][2][0][RTW89_MKK][15] = 68, + [0][1][2][0][RTW89_IC][15] = 70, + [0][1][2][0][RTW89_ACMA][15] = 50, + [0][1][2][0][RTW89_FCC][17] = 62, + [0][1][2][0][RTW89_ETSI][17] = 54, + [0][1][2][0][RTW89_MKK][17] = 68, + [0][1][2][0][RTW89_IC][17] = 70, + [0][1][2][0][RTW89_ACMA][17] = 50, + [0][1][2][0][RTW89_FCC][19] = 62, + [0][1][2][0][RTW89_ETSI][19] = 54, + [0][1][2][0][RTW89_MKK][19] = 68, + [0][1][2][0][RTW89_IC][19] = 70, + [0][1][2][0][RTW89_ACMA][19] = 50, + [0][1][2][0][RTW89_FCC][21] = 62, + [0][1][2][0][RTW89_ETSI][21] = 54, + [0][1][2][0][RTW89_MKK][21] = 68, + [0][1][2][0][RTW89_IC][21] = 70, + [0][1][2][0][RTW89_ACMA][21] = 50, + [0][1][2][0][RTW89_FCC][23] = 62, + [0][1][2][0][RTW89_ETSI][23] = 54, + [0][1][2][0][RTW89_MKK][23] = 68, + [0][1][2][0][RTW89_IC][23] = 70, + [0][1][2][0][RTW89_ACMA][23] = 50, + [0][1][2][0][RTW89_FCC][25] = 62, + [0][1][2][0][RTW89_ETSI][25] = 54, + [0][1][2][0][RTW89_MKK][25] = 68, + [0][1][2][0][RTW89_IC][25] = 127, + [0][1][2][0][RTW89_ACMA][25] = 127, + [0][1][2][0][RTW89_FCC][27] = 62, + [0][1][2][0][RTW89_ETSI][27] = 54, + [0][1][2][0][RTW89_MKK][27] = 68, + [0][1][2][0][RTW89_IC][27] = 127, + [0][1][2][0][RTW89_ACMA][27] = 127, + [0][1][2][0][RTW89_FCC][29] = 62, + [0][1][2][0][RTW89_ETSI][29] = 54, + [0][1][2][0][RTW89_MKK][29] = 68, + [0][1][2][0][RTW89_IC][29] = 127, + [0][1][2][0][RTW89_ACMA][29] = 127, + [0][1][2][0][RTW89_FCC][31] = 62, + [0][1][2][0][RTW89_ETSI][31] = 54, + [0][1][2][0][RTW89_MKK][31] = 68, + [0][1][2][0][RTW89_IC][31] = 70, + [0][1][2][0][RTW89_ACMA][31] = 50, + [0][1][2][0][RTW89_FCC][33] = 62, + [0][1][2][0][RTW89_ETSI][33] = 54, + [0][1][2][0][RTW89_MKK][33] = 68, + [0][1][2][0][RTW89_IC][33] = 70, + [0][1][2][0][RTW89_ACMA][33] = 50, + [0][1][2][0][RTW89_FCC][35] = 58, + [0][1][2][0][RTW89_ETSI][35] = 54, + [0][1][2][0][RTW89_MKK][35] = 68, + [0][1][2][0][RTW89_IC][35] = 68, + [0][1][2][0][RTW89_ACMA][35] = 50, + [0][1][2][0][RTW89_FCC][37] = 62, + [0][1][2][0][RTW89_ETSI][37] = 127, + [0][1][2][0][RTW89_MKK][37] = 68, + [0][1][2][0][RTW89_IC][37] = 70, + [0][1][2][0][RTW89_ACMA][37] = 70, + [0][1][2][0][RTW89_FCC][38] = 70, + [0][1][2][0][RTW89_ETSI][38] = 18, + [0][1][2][0][RTW89_MKK][38] = 127, + [0][1][2][0][RTW89_IC][38] = 70, + [0][1][2][0][RTW89_ACMA][38] = 70, + [0][1][2][0][RTW89_FCC][40] = 70, + [0][1][2][0][RTW89_ETSI][40] = 18, + [0][1][2][0][RTW89_MKK][40] = 127, + [0][1][2][0][RTW89_IC][40] = 70, + [0][1][2][0][RTW89_ACMA][40] = 70, + [0][1][2][0][RTW89_FCC][42] = 70, + [0][1][2][0][RTW89_ETSI][42] = 18, + [0][1][2][0][RTW89_MKK][42] = 127, + [0][1][2][0][RTW89_IC][42] = 70, + [0][1][2][0][RTW89_ACMA][42] = 70, + [0][1][2][0][RTW89_FCC][44] = 70, + [0][1][2][0][RTW89_ETSI][44] = 18, + [0][1][2][0][RTW89_MKK][44] = 127, + [0][1][2][0][RTW89_IC][44] = 70, + [0][1][2][0][RTW89_ACMA][44] = 70, + [0][1][2][0][RTW89_FCC][46] = 70, + [0][1][2][0][RTW89_ETSI][46] = 18, + [0][1][2][0][RTW89_MKK][46] = 127, + [0][1][2][0][RTW89_IC][46] = 70, + [0][1][2][0][RTW89_ACMA][46] = 70, + [0][1][2][0][RTW89_FCC][48] = 50, + [0][1][2][0][RTW89_ETSI][48] = 127, + [0][1][2][0][RTW89_MKK][48] = 127, + [0][1][2][0][RTW89_IC][48] = 127, + [0][1][2][0][RTW89_ACMA][48] = 127, + [0][1][2][0][RTW89_FCC][50] = 50, + [0][1][2][0][RTW89_ETSI][50] = 127, + [0][1][2][0][RTW89_MKK][50] = 127, + [0][1][2][0][RTW89_IC][50] = 127, + [0][1][2][0][RTW89_ACMA][50] = 127, + [0][1][2][0][RTW89_FCC][52] = 50, + [0][1][2][0][RTW89_ETSI][52] = 127, + [0][1][2][0][RTW89_MKK][52] = 127, + [0][1][2][0][RTW89_IC][52] = 127, + [0][1][2][0][RTW89_ACMA][52] = 127, + [0][1][2][1][RTW89_FCC][0] = 60, + [0][1][2][1][RTW89_ETSI][0] = 40, + [0][1][2][1][RTW89_MKK][0] = 54, + [0][1][2][1][RTW89_IC][0] = 42, + [0][1][2][1][RTW89_ACMA][0] = 38, + [0][1][2][1][RTW89_FCC][2] = 60, + [0][1][2][1][RTW89_ETSI][2] = 40, + [0][1][2][1][RTW89_MKK][2] = 54, + [0][1][2][1][RTW89_IC][2] = 42, + [0][1][2][1][RTW89_ACMA][2] = 38, + [0][1][2][1][RTW89_FCC][4] = 60, + [0][1][2][1][RTW89_ETSI][4] = 40, + [0][1][2][1][RTW89_MKK][4] = 54, + [0][1][2][1][RTW89_IC][4] = 42, + [0][1][2][1][RTW89_ACMA][4] = 38, + [0][1][2][1][RTW89_FCC][6] = 60, + [0][1][2][1][RTW89_ETSI][6] = 40, + [0][1][2][1][RTW89_MKK][6] = 50, + [0][1][2][1][RTW89_IC][6] = 42, + [0][1][2][1][RTW89_ACMA][6] = 38, + [0][1][2][1][RTW89_FCC][8] = 60, + [0][1][2][1][RTW89_ETSI][8] = 40, + [0][1][2][1][RTW89_MKK][8] = 42, + [0][1][2][1][RTW89_IC][8] = 42, + [0][1][2][1][RTW89_ACMA][8] = 38, + [0][1][2][1][RTW89_FCC][10] = 60, + [0][1][2][1][RTW89_ETSI][10] = 40, + [0][1][2][1][RTW89_MKK][10] = 66, + [0][1][2][1][RTW89_IC][10] = 42, + [0][1][2][1][RTW89_ACMA][10] = 38, + [0][1][2][1][RTW89_FCC][12] = 60, + [0][1][2][1][RTW89_ETSI][12] = 40, + [0][1][2][1][RTW89_MKK][12] = 66, + [0][1][2][1][RTW89_IC][12] = 42, + [0][1][2][1][RTW89_ACMA][12] = 38, + [0][1][2][1][RTW89_FCC][14] = 60, + [0][1][2][1][RTW89_ETSI][14] = 40, + [0][1][2][1][RTW89_MKK][14] = 66, + [0][1][2][1][RTW89_IC][14] = 42, + [0][1][2][1][RTW89_ACMA][14] = 38, + [0][1][2][1][RTW89_FCC][15] = 60, + [0][1][2][1][RTW89_ETSI][15] = 40, + [0][1][2][1][RTW89_MKK][15] = 68, + [0][1][2][1][RTW89_IC][15] = 70, + [0][1][2][1][RTW89_ACMA][15] = 38, + [0][1][2][1][RTW89_FCC][17] = 60, + [0][1][2][1][RTW89_ETSI][17] = 40, + [0][1][2][1][RTW89_MKK][17] = 68, + [0][1][2][1][RTW89_IC][17] = 70, + [0][1][2][1][RTW89_ACMA][17] = 38, + [0][1][2][1][RTW89_FCC][19] = 60, + [0][1][2][1][RTW89_ETSI][19] = 40, + [0][1][2][1][RTW89_MKK][19] = 68, + [0][1][2][1][RTW89_IC][19] = 70, + [0][1][2][1][RTW89_ACMA][19] = 38, + [0][1][2][1][RTW89_FCC][21] = 60, + [0][1][2][1][RTW89_ETSI][21] = 40, + [0][1][2][1][RTW89_MKK][21] = 68, + [0][1][2][1][RTW89_IC][21] = 70, + [0][1][2][1][RTW89_ACMA][21] = 38, + [0][1][2][1][RTW89_FCC][23] = 60, + [0][1][2][1][RTW89_ETSI][23] = 40, + [0][1][2][1][RTW89_MKK][23] = 68, + [0][1][2][1][RTW89_IC][23] = 70, + [0][1][2][1][RTW89_ACMA][23] = 38, + [0][1][2][1][RTW89_FCC][25] = 58, + [0][1][2][1][RTW89_ETSI][25] = 40, + [0][1][2][1][RTW89_MKK][25] = 68, + [0][1][2][1][RTW89_IC][25] = 127, + [0][1][2][1][RTW89_ACMA][25] = 127, + [0][1][2][1][RTW89_FCC][27] = 58, + [0][1][2][1][RTW89_ETSI][27] = 40, + [0][1][2][1][RTW89_MKK][27] = 68, + [0][1][2][1][RTW89_IC][27] = 127, + [0][1][2][1][RTW89_ACMA][27] = 127, + [0][1][2][1][RTW89_FCC][29] = 58, + [0][1][2][1][RTW89_ETSI][29] = 40, + [0][1][2][1][RTW89_MKK][29] = 68, + [0][1][2][1][RTW89_IC][29] = 127, + [0][1][2][1][RTW89_ACMA][29] = 127, + [0][1][2][1][RTW89_FCC][31] = 58, + [0][1][2][1][RTW89_ETSI][31] = 40, + [0][1][2][1][RTW89_MKK][31] = 68, + [0][1][2][1][RTW89_IC][31] = 68, + [0][1][2][1][RTW89_ACMA][31] = 38, + [0][1][2][1][RTW89_FCC][33] = 58, + [0][1][2][1][RTW89_ETSI][33] = 40, + [0][1][2][1][RTW89_MKK][33] = 68, + [0][1][2][1][RTW89_IC][33] = 68, + [0][1][2][1][RTW89_ACMA][33] = 38, + [0][1][2][1][RTW89_FCC][35] = 58, + [0][1][2][1][RTW89_ETSI][35] = 40, + [0][1][2][1][RTW89_MKK][35] = 68, + [0][1][2][1][RTW89_IC][35] = 68, + [0][1][2][1][RTW89_ACMA][35] = 38, + [0][1][2][1][RTW89_FCC][37] = 60, + [0][1][2][1][RTW89_ETSI][37] = 127, + [0][1][2][1][RTW89_MKK][37] = 68, + [0][1][2][1][RTW89_IC][37] = 70, + [0][1][2][1][RTW89_ACMA][37] = 70, + [0][1][2][1][RTW89_FCC][38] = 70, + [0][1][2][1][RTW89_ETSI][38] = 6, + [0][1][2][1][RTW89_MKK][38] = 127, + [0][1][2][1][RTW89_IC][38] = 70, + [0][1][2][1][RTW89_ACMA][38] = 70, + [0][1][2][1][RTW89_FCC][40] = 70, + [0][1][2][1][RTW89_ETSI][40] = 6, + [0][1][2][1][RTW89_MKK][40] = 127, + [0][1][2][1][RTW89_IC][40] = 70, + [0][1][2][1][RTW89_ACMA][40] = 70, + [0][1][2][1][RTW89_FCC][42] = 70, + [0][1][2][1][RTW89_ETSI][42] = 6, + [0][1][2][1][RTW89_MKK][42] = 127, + [0][1][2][1][RTW89_IC][42] = 70, + [0][1][2][1][RTW89_ACMA][42] = 70, + [0][1][2][1][RTW89_FCC][44] = 70, + [0][1][2][1][RTW89_ETSI][44] = 6, + [0][1][2][1][RTW89_MKK][44] = 127, + [0][1][2][1][RTW89_IC][44] = 70, + [0][1][2][1][RTW89_ACMA][44] = 70, + [0][1][2][1][RTW89_FCC][46] = 70, + [0][1][2][1][RTW89_ETSI][46] = 6, + [0][1][2][1][RTW89_MKK][46] = 127, + [0][1][2][1][RTW89_IC][46] = 70, + [0][1][2][1][RTW89_ACMA][46] = 70, + [0][1][2][1][RTW89_FCC][48] = 50, + [0][1][2][1][RTW89_ETSI][48] = 127, + [0][1][2][1][RTW89_MKK][48] = 127, + [0][1][2][1][RTW89_IC][48] = 127, + [0][1][2][1][RTW89_ACMA][48] = 127, + [0][1][2][1][RTW89_FCC][50] = 50, + [0][1][2][1][RTW89_ETSI][50] = 127, + [0][1][2][1][RTW89_MKK][50] = 127, + [0][1][2][1][RTW89_IC][50] = 127, + [0][1][2][1][RTW89_ACMA][50] = 127, + [0][1][2][1][RTW89_FCC][52] = 50, + [0][1][2][1][RTW89_ETSI][52] = 127, + [0][1][2][1][RTW89_MKK][52] = 127, + [0][1][2][1][RTW89_IC][52] = 127, + [0][1][2][1][RTW89_ACMA][52] = 127, + [1][0][2][0][RTW89_FCC][1] = 58, + [1][0][2][0][RTW89_ETSI][1] = 66, + [1][0][2][0][RTW89_MKK][1] = 66, + [1][0][2][0][RTW89_IC][1] = 66, + [1][0][2][0][RTW89_ACMA][1] = 66, + [1][0][2][0][RTW89_FCC][5] = 68, + [1][0][2][0][RTW89_ETSI][5] = 66, + [1][0][2][0][RTW89_MKK][5] = 66, + [1][0][2][0][RTW89_IC][5] = 66, + [1][0][2][0][RTW89_ACMA][5] = 66, + [1][0][2][0][RTW89_FCC][9] = 68, + [1][0][2][0][RTW89_ETSI][9] = 66, + [1][0][2][0][RTW89_MKK][9] = 66, + [1][0][2][0][RTW89_IC][9] = 66, + [1][0][2][0][RTW89_ACMA][9] = 66, + [1][0][2][0][RTW89_FCC][13] = 58, + [1][0][2][0][RTW89_ETSI][13] = 66, + [1][0][2][0][RTW89_MKK][13] = 66, + [1][0][2][0][RTW89_IC][13] = 66, + [1][0][2][0][RTW89_ACMA][13] = 66, + [1][0][2][0][RTW89_FCC][16] = 56, + [1][0][2][0][RTW89_ETSI][16] = 66, + [1][0][2][0][RTW89_MKK][16] = 66, + [1][0][2][0][RTW89_IC][16] = 66, + [1][0][2][0][RTW89_ACMA][16] = 66, + [1][0][2][0][RTW89_FCC][20] = 68, + [1][0][2][0][RTW89_ETSI][20] = 66, + [1][0][2][0][RTW89_MKK][20] = 66, + [1][0][2][0][RTW89_IC][20] = 66, + [1][0][2][0][RTW89_ACMA][20] = 66, + [1][0][2][0][RTW89_FCC][24] = 68, + [1][0][2][0][RTW89_ETSI][24] = 66, + [1][0][2][0][RTW89_MKK][24] = 66, + [1][0][2][0][RTW89_IC][24] = 127, + [1][0][2][0][RTW89_ACMA][24] = 127, + [1][0][2][0][RTW89_FCC][28] = 68, + [1][0][2][0][RTW89_ETSI][28] = 66, + [1][0][2][0][RTW89_MKK][28] = 66, + [1][0][2][0][RTW89_IC][28] = 127, + [1][0][2][0][RTW89_ACMA][28] = 127, + [1][0][2][0][RTW89_FCC][32] = 68, + [1][0][2][0][RTW89_ETSI][32] = 66, + [1][0][2][0][RTW89_MKK][32] = 66, + [1][0][2][0][RTW89_IC][32] = 66, + [1][0][2][0][RTW89_ACMA][32] = 66, + [1][0][2][0][RTW89_FCC][36] = 68, + [1][0][2][0][RTW89_ETSI][36] = 127, + [1][0][2][0][RTW89_MKK][36] = 66, + [1][0][2][0][RTW89_IC][36] = 66, + [1][0][2][0][RTW89_ACMA][36] = 66, + [1][0][2][0][RTW89_FCC][39] = 68, + [1][0][2][0][RTW89_ETSI][39] = 30, + [1][0][2][0][RTW89_MKK][39] = 127, + [1][0][2][0][RTW89_IC][39] = 66, + [1][0][2][0][RTW89_ACMA][39] = 66, + [1][0][2][0][RTW89_FCC][43] = 68, + [1][0][2][0][RTW89_ETSI][43] = 30, + [1][0][2][0][RTW89_MKK][43] = 127, + [1][0][2][0][RTW89_IC][43] = 66, + [1][0][2][0][RTW89_ACMA][43] = 66, + [1][0][2][0][RTW89_FCC][47] = 68, + [1][0][2][0][RTW89_ETSI][47] = 127, + [1][0][2][0][RTW89_MKK][47] = 127, + [1][0][2][0][RTW89_IC][47] = 127, + [1][0][2][0][RTW89_ACMA][47] = 127, + [1][0][2][0][RTW89_FCC][51] = 68, + [1][0][2][0][RTW89_ETSI][51] = 127, + [1][0][2][0][RTW89_MKK][51] = 127, + [1][0][2][0][RTW89_IC][51] = 127, + [1][0][2][0][RTW89_ACMA][51] = 127, + [1][1][2][0][RTW89_FCC][1] = 54, + [1][1][2][0][RTW89_ETSI][1] = 54, + [1][1][2][0][RTW89_MKK][1] = 48, + [1][1][2][0][RTW89_IC][1] = 60, + [1][1][2][0][RTW89_ACMA][1] = 60, + [1][1][2][0][RTW89_FCC][5] = 68, + [1][1][2][0][RTW89_ETSI][5] = 54, + [1][1][2][0][RTW89_MKK][5] = 52, + [1][1][2][0][RTW89_IC][5] = 60, + [1][1][2][0][RTW89_ACMA][5] = 60, + [1][1][2][0][RTW89_FCC][9] = 68, + [1][1][2][0][RTW89_ETSI][9] = 54, + [1][1][2][0][RTW89_MKK][9] = 52, + [1][1][2][0][RTW89_IC][9] = 60, + [1][1][2][0][RTW89_ACMA][9] = 60, + [1][1][2][0][RTW89_FCC][13] = 54, + [1][1][2][0][RTW89_ETSI][13] = 54, + [1][1][2][0][RTW89_MKK][13] = 52, + [1][1][2][0][RTW89_IC][13] = 60, + [1][1][2][0][RTW89_ACMA][13] = 60, + [1][1][2][0][RTW89_FCC][16] = 48, + [1][1][2][0][RTW89_ETSI][16] = 54, + [1][1][2][0][RTW89_MKK][16] = 66, + [1][1][2][0][RTW89_IC][16] = 58, + [1][1][2][0][RTW89_ACMA][16] = 60, + [1][1][2][0][RTW89_FCC][20] = 68, + [1][1][2][0][RTW89_ETSI][20] = 54, + [1][1][2][0][RTW89_MKK][20] = 66, + [1][1][2][0][RTW89_IC][20] = 66, + [1][1][2][0][RTW89_ACMA][20] = 60, + [1][1][2][0][RTW89_FCC][24] = 68, + [1][1][2][0][RTW89_ETSI][24] = 54, + [1][1][2][0][RTW89_MKK][24] = 66, + [1][1][2][0][RTW89_IC][24] = 127, + [1][1][2][0][RTW89_ACMA][24] = 127, + [1][1][2][0][RTW89_FCC][28] = 68, + [1][1][2][0][RTW89_ETSI][28] = 54, + [1][1][2][0][RTW89_MKK][28] = 66, + [1][1][2][0][RTW89_IC][28] = 127, + [1][1][2][0][RTW89_ACMA][28] = 127, + [1][1][2][0][RTW89_FCC][32] = 60, + [1][1][2][0][RTW89_ETSI][32] = 54, + [1][1][2][0][RTW89_MKK][32] = 66, + [1][1][2][0][RTW89_IC][32] = 66, + [1][1][2][0][RTW89_ACMA][32] = 54, + [1][1][2][0][RTW89_FCC][36] = 68, + [1][1][2][0][RTW89_ETSI][36] = 127, + [1][1][2][0][RTW89_MKK][36] = 66, + [1][1][2][0][RTW89_IC][36] = 66, + [1][1][2][0][RTW89_ACMA][36] = 66, + [1][1][2][0][RTW89_FCC][39] = 68, + [1][1][2][0][RTW89_ETSI][39] = 18, + [1][1][2][0][RTW89_MKK][39] = 127, + [1][1][2][0][RTW89_IC][39] = 66, + [1][1][2][0][RTW89_ACMA][39] = 66, + [1][1][2][0][RTW89_FCC][43] = 68, + [1][1][2][0][RTW89_ETSI][43] = 18, + [1][1][2][0][RTW89_MKK][43] = 127, + [1][1][2][0][RTW89_IC][43] = 66, + [1][1][2][0][RTW89_ACMA][43] = 66, + [1][1][2][0][RTW89_FCC][47] = 60, + [1][1][2][0][RTW89_ETSI][47] = 127, + [1][1][2][0][RTW89_MKK][47] = 127, + [1][1][2][0][RTW89_IC][47] = 127, + [1][1][2][0][RTW89_ACMA][47] = 127, + [1][1][2][0][RTW89_FCC][51] = 58, + [1][1][2][0][RTW89_ETSI][51] = 127, + [1][1][2][0][RTW89_MKK][51] = 127, + [1][1][2][0][RTW89_IC][51] = 127, + [1][1][2][0][RTW89_ACMA][51] = 127, + [1][1][2][1][RTW89_FCC][1] = 54, + [1][1][2][1][RTW89_ETSI][1] = 40, + [1][1][2][1][RTW89_MKK][1] = 48, + [1][1][2][1][RTW89_IC][1] = 48, + [1][1][2][1][RTW89_ACMA][1] = 48, + [1][1][2][1][RTW89_FCC][5] = 60, + [1][1][2][1][RTW89_ETSI][5] = 40, + [1][1][2][1][RTW89_MKK][5] = 52, + [1][1][2][1][RTW89_IC][5] = 48, + [1][1][2][1][RTW89_ACMA][5] = 48, + [1][1][2][1][RTW89_FCC][9] = 60, + [1][1][2][1][RTW89_ETSI][9] = 40, + [1][1][2][1][RTW89_MKK][9] = 52, + [1][1][2][1][RTW89_IC][9] = 48, + [1][1][2][1][RTW89_ACMA][9] = 48, + [1][1][2][1][RTW89_FCC][13] = 54, + [1][1][2][1][RTW89_ETSI][13] = 40, + [1][1][2][1][RTW89_MKK][13] = 52, + [1][1][2][1][RTW89_IC][13] = 48, + [1][1][2][1][RTW89_ACMA][13] = 48, + [1][1][2][1][RTW89_FCC][16] = 48, + [1][1][2][1][RTW89_ETSI][16] = 40, + [1][1][2][1][RTW89_MKK][16] = 66, + [1][1][2][1][RTW89_IC][16] = 58, + [1][1][2][1][RTW89_ACMA][16] = 48, + [1][1][2][1][RTW89_FCC][20] = 60, + [1][1][2][1][RTW89_ETSI][20] = 40, + [1][1][2][1][RTW89_MKK][20] = 66, + [1][1][2][1][RTW89_IC][20] = 66, + [1][1][2][1][RTW89_ACMA][20] = 48, + [1][1][2][1][RTW89_FCC][24] = 60, + [1][1][2][1][RTW89_ETSI][24] = 40, + [1][1][2][1][RTW89_MKK][24] = 66, + [1][1][2][1][RTW89_IC][24] = 127, + [1][1][2][1][RTW89_ACMA][24] = 127, + [1][1][2][1][RTW89_FCC][28] = 60, + [1][1][2][1][RTW89_ETSI][28] = 40, + [1][1][2][1][RTW89_MKK][28] = 66, + [1][1][2][1][RTW89_IC][28] = 127, + [1][1][2][1][RTW89_ACMA][28] = 127, + [1][1][2][1][RTW89_FCC][32] = 60, + [1][1][2][1][RTW89_ETSI][32] = 40, + [1][1][2][1][RTW89_MKK][32] = 66, + [1][1][2][1][RTW89_IC][32] = 66, + [1][1][2][1][RTW89_ACMA][32] = 42, + [1][1][2][1][RTW89_FCC][36] = 60, + [1][1][2][1][RTW89_ETSI][36] = 127, + [1][1][2][1][RTW89_MKK][36] = 66, + [1][1][2][1][RTW89_IC][36] = 66, + [1][1][2][1][RTW89_ACMA][36] = 66, + [1][1][2][1][RTW89_FCC][39] = 68, + [1][1][2][1][RTW89_ETSI][39] = 6, + [1][1][2][1][RTW89_MKK][39] = 127, + [1][1][2][1][RTW89_IC][39] = 66, + [1][1][2][1][RTW89_ACMA][39] = 66, + [1][1][2][1][RTW89_FCC][43] = 68, + [1][1][2][1][RTW89_ETSI][43] = 6, + [1][1][2][1][RTW89_MKK][43] = 127, + [1][1][2][1][RTW89_IC][43] = 66, + [1][1][2][1][RTW89_ACMA][43] = 66, + [1][1][2][1][RTW89_FCC][47] = 60, + [1][1][2][1][RTW89_ETSI][47] = 127, + [1][1][2][1][RTW89_MKK][47] = 127, + [1][1][2][1][RTW89_IC][47] = 127, + [1][1][2][1][RTW89_ACMA][47] = 127, + [1][1][2][1][RTW89_FCC][51] = 58, + [1][1][2][1][RTW89_ETSI][51] = 127, + [1][1][2][1][RTW89_MKK][51] = 127, + [1][1][2][1][RTW89_IC][51] = 127, + [1][1][2][1][RTW89_ACMA][51] = 127, + [2][0][2][0][RTW89_FCC][3] = 56, + [2][0][2][0][RTW89_ETSI][3] = 60, + [2][0][2][0][RTW89_MKK][3] = 60, + [2][0][2][0][RTW89_IC][3] = 60, + [2][0][2][0][RTW89_ACMA][3] = 60, + [2][0][2][0][RTW89_FCC][11] = 58, + [2][0][2][0][RTW89_ETSI][11] = 60, + [2][0][2][0][RTW89_MKK][11] = 60, + [2][0][2][0][RTW89_IC][11] = 60, + [2][0][2][0][RTW89_ACMA][11] = 60, + [2][0][2][0][RTW89_FCC][18] = 54, + [2][0][2][0][RTW89_ETSI][18] = 60, + [2][0][2][0][RTW89_MKK][18] = 60, + [2][0][2][0][RTW89_IC][18] = 60, + [2][0][2][0][RTW89_ACMA][18] = 60, + [2][0][2][0][RTW89_FCC][26] = 62, + [2][0][2][0][RTW89_ETSI][26] = 60, + [2][0][2][0][RTW89_MKK][26] = 60, + [2][0][2][0][RTW89_IC][26] = 127, + [2][0][2][0][RTW89_ACMA][26] = 127, + [2][0][2][0][RTW89_FCC][34] = 62, + [2][0][2][0][RTW89_ETSI][34] = 127, + [2][0][2][0][RTW89_MKK][34] = 60, + [2][0][2][0][RTW89_IC][34] = 60, + [2][0][2][0][RTW89_ACMA][34] = 60, + [2][0][2][0][RTW89_FCC][41] = 62, + [2][0][2][0][RTW89_ETSI][41] = 30, + [2][0][2][0][RTW89_MKK][41] = 127, + [2][0][2][0][RTW89_IC][41] = 60, + [2][0][2][0][RTW89_ACMA][41] = 60, + [2][0][2][0][RTW89_FCC][49] = 56, + [2][0][2][0][RTW89_ETSI][49] = 127, + [2][0][2][0][RTW89_MKK][49] = 127, + [2][0][2][0][RTW89_IC][49] = 127, + [2][0][2][0][RTW89_ACMA][49] = 127, + [2][1][2][0][RTW89_FCC][3] = 48, + [2][1][2][0][RTW89_ETSI][3] = 54, + [2][1][2][0][RTW89_MKK][3] = 56, + [2][1][2][0][RTW89_IC][3] = 52, + [2][1][2][0][RTW89_ACMA][3] = 52, + [2][1][2][0][RTW89_FCC][11] = 54, + [2][1][2][0][RTW89_ETSI][11] = 54, + [2][1][2][0][RTW89_MKK][11] = 54, + [2][1][2][0][RTW89_IC][11] = 52, + [2][1][2][0][RTW89_ACMA][11] = 52, + [2][1][2][0][RTW89_FCC][18] = 48, + [2][1][2][0][RTW89_ETSI][18] = 54, + [2][1][2][0][RTW89_MKK][18] = 60, + [2][1][2][0][RTW89_IC][18] = 58, + [2][1][2][0][RTW89_ACMA][18] = 52, + [2][1][2][0][RTW89_FCC][26] = 62, + [2][1][2][0][RTW89_ETSI][26] = 54, + [2][1][2][0][RTW89_MKK][26] = 56, + [2][1][2][0][RTW89_IC][26] = 127, + [2][1][2][0][RTW89_ACMA][26] = 127, + [2][1][2][0][RTW89_FCC][34] = 62, + [2][1][2][0][RTW89_ETSI][34] = 127, + [2][1][2][0][RTW89_MKK][34] = 60, + [2][1][2][0][RTW89_IC][34] = 60, + [2][1][2][0][RTW89_ACMA][34] = 60, + [2][1][2][0][RTW89_FCC][41] = 62, + [2][1][2][0][RTW89_ETSI][41] = 18, + [2][1][2][0][RTW89_MKK][41] = 127, + [2][1][2][0][RTW89_IC][41] = 60, + [2][1][2][0][RTW89_ACMA][41] = 60, + [2][1][2][0][RTW89_FCC][49] = 50, + [2][1][2][0][RTW89_ETSI][49] = 127, + [2][1][2][0][RTW89_MKK][49] = 127, + [2][1][2][0][RTW89_IC][49] = 127, + [2][1][2][0][RTW89_ACMA][49] = 127, + [2][1][2][1][RTW89_FCC][3] = 48, + [2][1][2][1][RTW89_ETSI][3] = 40, + [2][1][2][1][RTW89_MKK][3] = 56, + [2][1][2][1][RTW89_IC][3] = 40, + [2][1][2][1][RTW89_ACMA][3] = 40, + [2][1][2][1][RTW89_FCC][11] = 54, + [2][1][2][1][RTW89_ETSI][11] = 40, + [2][1][2][1][RTW89_MKK][11] = 54, + [2][1][2][1][RTW89_IC][11] = 40, + [2][1][2][1][RTW89_ACMA][11] = 40, + [2][1][2][1][RTW89_FCC][18] = 48, + [2][1][2][1][RTW89_ETSI][18] = 40, + [2][1][2][1][RTW89_MKK][18] = 60, + [2][1][2][1][RTW89_IC][18] = 58, + [2][1][2][1][RTW89_ACMA][18] = 40, + [2][1][2][1][RTW89_FCC][26] = 60, + [2][1][2][1][RTW89_ETSI][26] = 42, + [2][1][2][1][RTW89_MKK][26] = 56, + [2][1][2][1][RTW89_IC][26] = 127, + [2][1][2][1][RTW89_ACMA][26] = 127, + [2][1][2][1][RTW89_FCC][34] = 60, + [2][1][2][1][RTW89_ETSI][34] = 127, + [2][1][2][1][RTW89_MKK][34] = 60, + [2][1][2][1][RTW89_IC][34] = 60, + [2][1][2][1][RTW89_ACMA][34] = 60, + [2][1][2][1][RTW89_FCC][41] = 62, + [2][1][2][1][RTW89_ETSI][41] = 6, + [2][1][2][1][RTW89_MKK][41] = 127, + [2][1][2][1][RTW89_IC][41] = 60, + [2][1][2][1][RTW89_ACMA][41] = 60, + [2][1][2][1][RTW89_FCC][49] = 50, + [2][1][2][1][RTW89_ETSI][49] = 127, + [2][1][2][1][RTW89_MKK][49] = 127, + [2][1][2][1][RTW89_IC][49] = 127, + [2][1][2][1][RTW89_ACMA][49] = 127, + [3][0][2][0][RTW89_FCC][7] = 38, + [3][0][2][0][RTW89_ETSI][7] = 50, + [3][0][2][0][RTW89_MKK][7] = 50, + [3][0][2][0][RTW89_IC][7] = 50, + [3][0][2][0][RTW89_ACMA][7] = 50, + [3][0][2][0][RTW89_FCC][22] = 52, + [3][0][2][0][RTW89_ETSI][22] = 50, + [3][0][2][0][RTW89_MKK][22] = 50, + [3][0][2][0][RTW89_IC][22] = 50, + [3][0][2][0][RTW89_ACMA][22] = 50, + [3][0][2][0][RTW89_FCC][45] = 127, + [3][0][2][0][RTW89_ETSI][45] = 127, + [3][0][2][0][RTW89_MKK][45] = 127, + [3][0][2][0][RTW89_IC][45] = 127, + [3][0][2][0][RTW89_ACMA][45] = 127, + [3][1][2][0][RTW89_FCC][7] = 26, + [3][1][2][0][RTW89_ETSI][7] = 50, + [3][1][2][0][RTW89_MKK][7] = 36, + [3][1][2][0][RTW89_IC][7] = 44, + [3][1][2][0][RTW89_ACMA][7] = 44, + [3][1][2][0][RTW89_FCC][22] = 42, + [3][1][2][0][RTW89_ETSI][22] = 50, + [3][1][2][0][RTW89_MKK][22] = 48, + [3][1][2][0][RTW89_IC][22] = 44, + [3][1][2][0][RTW89_ACMA][22] = 44, + [3][1][2][0][RTW89_FCC][45] = 127, + [3][1][2][0][RTW89_ETSI][45] = 127, + [3][1][2][0][RTW89_MKK][45] = 127, + [3][1][2][0][RTW89_IC][45] = 127, + [3][1][2][0][RTW89_ACMA][45] = 127, + [3][1][2][1][RTW89_FCC][7] = 14, + [3][1][2][1][RTW89_ETSI][7] = 42, + [3][1][2][1][RTW89_MKK][7] = 36, + [3][1][2][1][RTW89_IC][7] = 32, + [3][1][2][1][RTW89_ACMA][7] = 32, + [3][1][2][1][RTW89_FCC][22] = 30, + [3][1][2][1][RTW89_ETSI][22] = 42, + [3][1][2][1][RTW89_MKK][22] = 48, + [3][1][2][1][RTW89_IC][22] = 32, + [3][1][2][1][RTW89_ACMA][22] = 32, + [3][1][2][1][RTW89_FCC][45] = 127, + [3][1][2][1][RTW89_ETSI][45] = 127, + [3][1][2][1][RTW89_MKK][45] = 127, + [3][1][2][1][RTW89_IC][45] = 127, + [3][1][2][1][RTW89_ACMA][45] = 127, +}; + +const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_6G_CH_NUM] = { + [0][0][1][0][RTW89_WW][0] = 72, + [0][0][1][0][RTW89_WW][2] = 72, + [0][0][1][0][RTW89_WW][4] = 72, + [0][0][1][0][RTW89_WW][6] = 72, + [0][0][1][0][RTW89_WW][8] = 72, + [0][0][1][0][RTW89_WW][10] = 72, + [0][0][1][0][RTW89_WW][12] = 72, + [0][0][1][0][RTW89_WW][14] = 72, + [0][0][1][0][RTW89_WW][15] = 72, + [0][0][1][0][RTW89_WW][17] = 72, + [0][0][1][0][RTW89_WW][19] = 72, + [0][0][1][0][RTW89_WW][21] = 72, + [0][0][1][0][RTW89_WW][23] = 72, + [0][0][1][0][RTW89_WW][25] = 72, + [0][0][1][0][RTW89_WW][27] = 72, + [0][0][1][0][RTW89_WW][29] = 72, + [0][0][1][0][RTW89_WW][30] = 72, + [0][0][1][0][RTW89_WW][32] = 72, + [0][0][1][0][RTW89_WW][34] = 72, + [0][0][1][0][RTW89_WW][36] = 72, + [0][0][1][0][RTW89_WW][38] = 72, + [0][0][1][0][RTW89_WW][40] = 72, + [0][0][1][0][RTW89_WW][42] = 72, + [0][0][1][0][RTW89_WW][44] = 72, + [0][0][1][0][RTW89_WW][45] = 72, + [0][0][1][0][RTW89_WW][47] = 72, + [0][0][1][0][RTW89_WW][49] = 72, + [0][0][1][0][RTW89_WW][51] = 72, + [0][0][1][0][RTW89_WW][53] = 72, + [0][0][1][0][RTW89_WW][55] = 72, + [0][0][1][0][RTW89_WW][57] = 72, + [0][0][1][0][RTW89_WW][59] = 72, + [0][0][1][0][RTW89_WW][60] = 72, + [0][0][1][0][RTW89_WW][62] = 72, + [0][0][1][0][RTW89_WW][64] = 72, + [0][0][1][0][RTW89_WW][66] = 72, + [0][0][1][0][RTW89_WW][68] = 72, + [0][0][1][0][RTW89_WW][70] = 72, + [0][0][1][0][RTW89_WW][72] = 72, + [0][0][1][0][RTW89_WW][74] = 72, + [0][0][1][0][RTW89_WW][75] = 72, + [0][0][1][0][RTW89_WW][77] = 72, + [0][0][1][0][RTW89_WW][79] = 72, + [0][0][1][0][RTW89_WW][81] = 72, + [0][0][1][0][RTW89_WW][83] = 72, + [0][0][1][0][RTW89_WW][85] = 72, + [0][0][1][0][RTW89_WW][87] = 72, + [0][0][1][0][RTW89_WW][89] = 72, + [0][0][1][0][RTW89_WW][90] = 72, + [0][0][1][0][RTW89_WW][92] = 72, + [0][0][1][0][RTW89_WW][94] = 72, + [0][0][1][0][RTW89_WW][96] = 72, + [0][0][1][0][RTW89_WW][98] = 72, + [0][0][1][0][RTW89_WW][100] = 72, + [0][0][1][0][RTW89_WW][102] = 72, + [0][0][1][0][RTW89_WW][104] = 72, + [0][0][1][0][RTW89_WW][105] = 72, + [0][0][1][0][RTW89_WW][107] = 72, + [0][0][1][0][RTW89_WW][109] = 72, + [0][0][1][0][RTW89_WW][111] = 0, + [0][0][1][0][RTW89_WW][113] = 0, + [0][0][1][0][RTW89_WW][115] = 0, + [0][0][1][0][RTW89_WW][117] = 0, + [0][0][1][0][RTW89_WW][119] = 0, + [0][1][1][0][RTW89_WW][0] = 60, + [0][1][1][0][RTW89_WW][2] = 60, + [0][1][1][0][RTW89_WW][4] = 60, + [0][1][1][0][RTW89_WW][6] = 60, + [0][1][1][0][RTW89_WW][8] = 60, + [0][1][1][0][RTW89_WW][10] = 60, + [0][1][1][0][RTW89_WW][12] = 60, + [0][1][1][0][RTW89_WW][14] = 60, + [0][1][1][0][RTW89_WW][15] = 60, + [0][1][1][0][RTW89_WW][17] = 60, + [0][1][1][0][RTW89_WW][19] = 60, + [0][1][1][0][RTW89_WW][21] = 60, + [0][1][1][0][RTW89_WW][23] = 60, + [0][1][1][0][RTW89_WW][25] = 60, + [0][1][1][0][RTW89_WW][27] = 60, + [0][1][1][0][RTW89_WW][29] = 60, + [0][1][1][0][RTW89_WW][30] = 60, + [0][1][1][0][RTW89_WW][32] = 60, + [0][1][1][0][RTW89_WW][34] = 60, + [0][1][1][0][RTW89_WW][36] = 60, + [0][1][1][0][RTW89_WW][38] = 60, + [0][1][1][0][RTW89_WW][40] = 60, + [0][1][1][0][RTW89_WW][42] = 60, + [0][1][1][0][RTW89_WW][44] = 60, + [0][1][1][0][RTW89_WW][45] = 60, + [0][1][1][0][RTW89_WW][47] = 60, + [0][1][1][0][RTW89_WW][49] = 60, + [0][1][1][0][RTW89_WW][51] = 60, + [0][1][1][0][RTW89_WW][53] = 60, + [0][1][1][0][RTW89_WW][55] = 60, + [0][1][1][0][RTW89_WW][57] = 60, + [0][1][1][0][RTW89_WW][59] = 60, + [0][1][1][0][RTW89_WW][60] = 60, + [0][1][1][0][RTW89_WW][62] = 60, + [0][1][1][0][RTW89_WW][64] = 60, + [0][1][1][0][RTW89_WW][66] = 60, + [0][1][1][0][RTW89_WW][68] = 60, + [0][1][1][0][RTW89_WW][70] = 60, + [0][1][1][0][RTW89_WW][72] = 60, + [0][1][1][0][RTW89_WW][74] = 60, + [0][1][1][0][RTW89_WW][75] = 60, + [0][1][1][0][RTW89_WW][77] = 60, + [0][1][1][0][RTW89_WW][79] = 60, + [0][1][1][0][RTW89_WW][81] = 60, + [0][1][1][0][RTW89_WW][83] = 60, + [0][1][1][0][RTW89_WW][85] = 60, + [0][1][1][0][RTW89_WW][87] = 60, + [0][1][1][0][RTW89_WW][89] = 60, + [0][1][1][0][RTW89_WW][90] = 60, + [0][1][1][0][RTW89_WW][92] = 60, + [0][1][1][0][RTW89_WW][94] = 60, + [0][1][1][0][RTW89_WW][96] = 60, + [0][1][1][0][RTW89_WW][98] = 60, + [0][1][1][0][RTW89_WW][100] = 60, + [0][1][1][0][RTW89_WW][102] = 60, + [0][1][1][0][RTW89_WW][104] = 60, + [0][1][1][0][RTW89_WW][105] = 60, + [0][1][1][0][RTW89_WW][107] = 60, + [0][1][1][0][RTW89_WW][109] = 60, + [0][1][1][0][RTW89_WW][111] = 0, + [0][1][1][0][RTW89_WW][113] = 0, + [0][1][1][0][RTW89_WW][115] = 0, + [0][1][1][0][RTW89_WW][117] = 0, + [0][1][1][0][RTW89_WW][119] = 0, + [0][0][2][0][RTW89_WW][0] = 72, + [0][0][2][0][RTW89_WW][2] = 72, + [0][0][2][0][RTW89_WW][4] = 72, + [0][0][2][0][RTW89_WW][6] = 72, + [0][0][2][0][RTW89_WW][8] = 72, + [0][0][2][0][RTW89_WW][10] = 72, + [0][0][2][0][RTW89_WW][12] = 72, + [0][0][2][0][RTW89_WW][14] = 72, + [0][0][2][0][RTW89_WW][15] = 72, + [0][0][2][0][RTW89_WW][17] = 72, + [0][0][2][0][RTW89_WW][19] = 72, + [0][0][2][0][RTW89_WW][21] = 72, + [0][0][2][0][RTW89_WW][23] = 72, + [0][0][2][0][RTW89_WW][25] = 72, + [0][0][2][0][RTW89_WW][27] = 72, + [0][0][2][0][RTW89_WW][29] = 72, + [0][0][2][0][RTW89_WW][30] = 72, + [0][0][2][0][RTW89_WW][32] = 72, + [0][0][2][0][RTW89_WW][34] = 72, + [0][0][2][0][RTW89_WW][36] = 72, + [0][0][2][0][RTW89_WW][38] = 72, + [0][0][2][0][RTW89_WW][40] = 72, + [0][0][2][0][RTW89_WW][42] = 72, + [0][0][2][0][RTW89_WW][44] = 72, + [0][0][2][0][RTW89_WW][45] = 72, + [0][0][2][0][RTW89_WW][47] = 72, + [0][0][2][0][RTW89_WW][49] = 72, + [0][0][2][0][RTW89_WW][51] = 72, + [0][0][2][0][RTW89_WW][53] = 72, + [0][0][2][0][RTW89_WW][55] = 72, + [0][0][2][0][RTW89_WW][57] = 72, + [0][0][2][0][RTW89_WW][59] = 72, + [0][0][2][0][RTW89_WW][60] = 72, + [0][0][2][0][RTW89_WW][62] = 72, + [0][0][2][0][RTW89_WW][64] = 72, + [0][0][2][0][RTW89_WW][66] = 72, + [0][0][2][0][RTW89_WW][68] = 72, + [0][0][2][0][RTW89_WW][70] = 72, + [0][0][2][0][RTW89_WW][72] = 72, + [0][0][2][0][RTW89_WW][74] = 72, + [0][0][2][0][RTW89_WW][75] = 72, + [0][0][2][0][RTW89_WW][77] = 72, + [0][0][2][0][RTW89_WW][79] = 72, + [0][0][2][0][RTW89_WW][81] = 72, + [0][0][2][0][RTW89_WW][83] = 72, + [0][0][2][0][RTW89_WW][85] = 72, + [0][0][2][0][RTW89_WW][87] = 72, + [0][0][2][0][RTW89_WW][89] = 72, + [0][0][2][0][RTW89_WW][90] = 72, + [0][0][2][0][RTW89_WW][92] = 72, + [0][0][2][0][RTW89_WW][94] = 72, + [0][0][2][0][RTW89_WW][96] = 72, + [0][0][2][0][RTW89_WW][98] = 72, + [0][0][2][0][RTW89_WW][100] = 72, + [0][0][2][0][RTW89_WW][102] = 72, + [0][0][2][0][RTW89_WW][104] = 72, + [0][0][2][0][RTW89_WW][105] = 72, + [0][0][2][0][RTW89_WW][107] = 72, + [0][0][2][0][RTW89_WW][109] = 72, + [0][0][2][0][RTW89_WW][111] = 0, + [0][0][2][0][RTW89_WW][113] = 0, + [0][0][2][0][RTW89_WW][115] = 0, + [0][0][2][0][RTW89_WW][117] = 0, + [0][0][2][0][RTW89_WW][119] = 0, + [0][1][2][0][RTW89_WW][0] = 60, + [0][1][2][0][RTW89_WW][2] = 60, + [0][1][2][0][RTW89_WW][4] = 60, + [0][1][2][0][RTW89_WW][6] = 60, + [0][1][2][0][RTW89_WW][8] = 60, + [0][1][2][0][RTW89_WW][10] = 60, + [0][1][2][0][RTW89_WW][12] = 60, + [0][1][2][0][RTW89_WW][14] = 60, + [0][1][2][0][RTW89_WW][15] = 60, + [0][1][2][0][RTW89_WW][17] = 60, + [0][1][2][0][RTW89_WW][19] = 60, + [0][1][2][0][RTW89_WW][21] = 60, + [0][1][2][0][RTW89_WW][23] = 60, + [0][1][2][0][RTW89_WW][25] = 60, + [0][1][2][0][RTW89_WW][27] = 60, + [0][1][2][0][RTW89_WW][29] = 60, + [0][1][2][0][RTW89_WW][30] = 60, + [0][1][2][0][RTW89_WW][32] = 60, + [0][1][2][0][RTW89_WW][34] = 60, + [0][1][2][0][RTW89_WW][36] = 60, + [0][1][2][0][RTW89_WW][38] = 60, + [0][1][2][0][RTW89_WW][40] = 60, + [0][1][2][0][RTW89_WW][42] = 60, + [0][1][2][0][RTW89_WW][44] = 60, + [0][1][2][0][RTW89_WW][45] = 60, + [0][1][2][0][RTW89_WW][47] = 60, + [0][1][2][0][RTW89_WW][49] = 60, + [0][1][2][0][RTW89_WW][51] = 60, + [0][1][2][0][RTW89_WW][53] = 60, + [0][1][2][0][RTW89_WW][55] = 60, + [0][1][2][0][RTW89_WW][57] = 60, + [0][1][2][0][RTW89_WW][59] = 60, + [0][1][2][0][RTW89_WW][60] = 60, + [0][1][2][0][RTW89_WW][62] = 60, + [0][1][2][0][RTW89_WW][64] = 60, + [0][1][2][0][RTW89_WW][66] = 60, + [0][1][2][0][RTW89_WW][68] = 60, + [0][1][2][0][RTW89_WW][70] = 60, + [0][1][2][0][RTW89_WW][72] = 60, + [0][1][2][0][RTW89_WW][74] = 60, + [0][1][2][0][RTW89_WW][75] = 60, + [0][1][2][0][RTW89_WW][77] = 60, + [0][1][2][0][RTW89_WW][79] = 60, + [0][1][2][0][RTW89_WW][81] = 60, + [0][1][2][0][RTW89_WW][83] = 60, + [0][1][2][0][RTW89_WW][85] = 60, + [0][1][2][0][RTW89_WW][87] = 60, + [0][1][2][0][RTW89_WW][89] = 60, + [0][1][2][0][RTW89_WW][90] = 60, + [0][1][2][0][RTW89_WW][92] = 60, + [0][1][2][0][RTW89_WW][94] = 60, + [0][1][2][0][RTW89_WW][96] = 60, + [0][1][2][0][RTW89_WW][98] = 60, + [0][1][2][0][RTW89_WW][100] = 60, + [0][1][2][0][RTW89_WW][102] = 60, + [0][1][2][0][RTW89_WW][104] = 60, + [0][1][2][0][RTW89_WW][105] = 60, + [0][1][2][0][RTW89_WW][107] = 60, + [0][1][2][0][RTW89_WW][109] = 60, + [0][1][2][0][RTW89_WW][111] = 0, + [0][1][2][0][RTW89_WW][113] = 0, + [0][1][2][0][RTW89_WW][115] = 0, + [0][1][2][0][RTW89_WW][117] = 0, + [0][1][2][0][RTW89_WW][119] = 0, + [0][1][2][1][RTW89_WW][0] = 48, + [0][1][2][1][RTW89_WW][2] = 48, + [0][1][2][1][RTW89_WW][4] = 48, + [0][1][2][1][RTW89_WW][6] = 48, + [0][1][2][1][RTW89_WW][8] = 48, + [0][1][2][1][RTW89_WW][10] = 48, + [0][1][2][1][RTW89_WW][12] = 48, + [0][1][2][1][RTW89_WW][14] = 48, + [0][1][2][1][RTW89_WW][15] = 48, + [0][1][2][1][RTW89_WW][17] = 48, + [0][1][2][1][RTW89_WW][19] = 48, + [0][1][2][1][RTW89_WW][21] = 48, + [0][1][2][1][RTW89_WW][23] = 48, + [0][1][2][1][RTW89_WW][25] = 48, + [0][1][2][1][RTW89_WW][27] = 48, + [0][1][2][1][RTW89_WW][29] = 48, + [0][1][2][1][RTW89_WW][30] = 48, + [0][1][2][1][RTW89_WW][32] = 48, + [0][1][2][1][RTW89_WW][34] = 48, + [0][1][2][1][RTW89_WW][36] = 48, + [0][1][2][1][RTW89_WW][38] = 48, + [0][1][2][1][RTW89_WW][40] = 48, + [0][1][2][1][RTW89_WW][42] = 48, + [0][1][2][1][RTW89_WW][44] = 48, + [0][1][2][1][RTW89_WW][45] = 48, + [0][1][2][1][RTW89_WW][47] = 48, + [0][1][2][1][RTW89_WW][49] = 48, + [0][1][2][1][RTW89_WW][51] = 48, + [0][1][2][1][RTW89_WW][53] = 48, + [0][1][2][1][RTW89_WW][55] = 48, + [0][1][2][1][RTW89_WW][57] = 48, + [0][1][2][1][RTW89_WW][59] = 48, + [0][1][2][1][RTW89_WW][60] = 48, + [0][1][2][1][RTW89_WW][62] = 48, + [0][1][2][1][RTW89_WW][64] = 48, + [0][1][2][1][RTW89_WW][66] = 48, + [0][1][2][1][RTW89_WW][68] = 48, + [0][1][2][1][RTW89_WW][70] = 48, + [0][1][2][1][RTW89_WW][72] = 48, + [0][1][2][1][RTW89_WW][74] = 48, + [0][1][2][1][RTW89_WW][75] = 48, + [0][1][2][1][RTW89_WW][77] = 48, + [0][1][2][1][RTW89_WW][79] = 48, + [0][1][2][1][RTW89_WW][81] = 48, + [0][1][2][1][RTW89_WW][83] = 48, + [0][1][2][1][RTW89_WW][85] = 48, + [0][1][2][1][RTW89_WW][87] = 48, + [0][1][2][1][RTW89_WW][89] = 48, + [0][1][2][1][RTW89_WW][90] = 48, + [0][1][2][1][RTW89_WW][92] = 48, + [0][1][2][1][RTW89_WW][94] = 48, + [0][1][2][1][RTW89_WW][96] = 48, + [0][1][2][1][RTW89_WW][98] = 48, + [0][1][2][1][RTW89_WW][100] = 48, + [0][1][2][1][RTW89_WW][102] = 48, + [0][1][2][1][RTW89_WW][104] = 48, + [0][1][2][1][RTW89_WW][105] = 48, + [0][1][2][1][RTW89_WW][107] = 48, + [0][1][2][1][RTW89_WW][109] = 48, + [0][1][2][1][RTW89_WW][111] = 0, + [0][1][2][1][RTW89_WW][113] = 0, + [0][1][2][1][RTW89_WW][115] = 0, + [0][1][2][1][RTW89_WW][117] = 0, + [0][1][2][1][RTW89_WW][119] = 0, + [1][0][2][0][RTW89_WW][1] = 72, + [1][0][2][0][RTW89_WW][5] = 72, + [1][0][2][0][RTW89_WW][9] = 72, + [1][0][2][0][RTW89_WW][13] = 72, + [1][0][2][0][RTW89_WW][16] = 72, + [1][0][2][0][RTW89_WW][20] = 72, + [1][0][2][0][RTW89_WW][24] = 72, + [1][0][2][0][RTW89_WW][28] = 72, + [1][0][2][0][RTW89_WW][31] = 72, + [1][0][2][0][RTW89_WW][35] = 72, + [1][0][2][0][RTW89_WW][39] = 72, + [1][0][2][0][RTW89_WW][43] = 72, + [1][0][2][0][RTW89_WW][46] = 72, + [1][0][2][0][RTW89_WW][50] = 72, + [1][0][2][0][RTW89_WW][54] = 72, + [1][0][2][0][RTW89_WW][58] = 72, + [1][0][2][0][RTW89_WW][61] = 72, + [1][0][2][0][RTW89_WW][65] = 72, + [1][0][2][0][RTW89_WW][69] = 72, + [1][0][2][0][RTW89_WW][73] = 72, + [1][0][2][0][RTW89_WW][76] = 72, + [1][0][2][0][RTW89_WW][80] = 72, + [1][0][2][0][RTW89_WW][84] = 72, + [1][0][2][0][RTW89_WW][88] = 72, + [1][0][2][0][RTW89_WW][91] = 72, + [1][0][2][0][RTW89_WW][95] = 72, + [1][0][2][0][RTW89_WW][99] = 72, + [1][0][2][0][RTW89_WW][103] = 72, + [1][0][2][0][RTW89_WW][106] = 72, + [1][0][2][0][RTW89_WW][110] = 0, + [1][0][2][0][RTW89_WW][114] = 0, + [1][0][2][0][RTW89_WW][118] = 0, + [1][1][2][0][RTW89_WW][1] = 60, + [1][1][2][0][RTW89_WW][5] = 60, + [1][1][2][0][RTW89_WW][9] = 60, + [1][1][2][0][RTW89_WW][13] = 60, + [1][1][2][0][RTW89_WW][16] = 60, + [1][1][2][0][RTW89_WW][20] = 60, + [1][1][2][0][RTW89_WW][24] = 60, + [1][1][2][0][RTW89_WW][28] = 60, + [1][1][2][0][RTW89_WW][31] = 60, + [1][1][2][0][RTW89_WW][35] = 60, + [1][1][2][0][RTW89_WW][39] = 60, + [1][1][2][0][RTW89_WW][43] = 60, + [1][1][2][0][RTW89_WW][46] = 60, + [1][1][2][0][RTW89_WW][50] = 60, + [1][1][2][0][RTW89_WW][54] = 60, + [1][1][2][0][RTW89_WW][58] = 60, + [1][1][2][0][RTW89_WW][61] = 60, + [1][1][2][0][RTW89_WW][65] = 60, + [1][1][2][0][RTW89_WW][69] = 60, + [1][1][2][0][RTW89_WW][73] = 60, + [1][1][2][0][RTW89_WW][76] = 60, + [1][1][2][0][RTW89_WW][80] = 60, + [1][1][2][0][RTW89_WW][84] = 60, + [1][1][2][0][RTW89_WW][88] = 60, + [1][1][2][0][RTW89_WW][91] = 60, + [1][1][2][0][RTW89_WW][95] = 60, + [1][1][2][0][RTW89_WW][99] = 60, + [1][1][2][0][RTW89_WW][103] = 60, + [1][1][2][0][RTW89_WW][106] = 60, + [1][1][2][0][RTW89_WW][110] = 0, + [1][1][2][0][RTW89_WW][114] = 0, + [1][1][2][0][RTW89_WW][118] = 0, + [1][1][2][1][RTW89_WW][1] = 48, + [1][1][2][1][RTW89_WW][5] = 48, + [1][1][2][1][RTW89_WW][9] = 48, + [1][1][2][1][RTW89_WW][13] = 48, + [1][1][2][1][RTW89_WW][16] = 48, + [1][1][2][1][RTW89_WW][20] = 48, + [1][1][2][1][RTW89_WW][24] = 48, + [1][1][2][1][RTW89_WW][28] = 48, + [1][1][2][1][RTW89_WW][31] = 48, + [1][1][2][1][RTW89_WW][35] = 48, + [1][1][2][1][RTW89_WW][39] = 48, + [1][1][2][1][RTW89_WW][43] = 48, + [1][1][2][1][RTW89_WW][46] = 48, + [1][1][2][1][RTW89_WW][50] = 48, + [1][1][2][1][RTW89_WW][54] = 48, + [1][1][2][1][RTW89_WW][58] = 48, + [1][1][2][1][RTW89_WW][61] = 48, + [1][1][2][1][RTW89_WW][65] = 48, + [1][1][2][1][RTW89_WW][69] = 48, + [1][1][2][1][RTW89_WW][73] = 48, + [1][1][2][1][RTW89_WW][76] = 48, + [1][1][2][1][RTW89_WW][80] = 48, + [1][1][2][1][RTW89_WW][84] = 48, + [1][1][2][1][RTW89_WW][88] = 48, + [1][1][2][1][RTW89_WW][91] = 48, + [1][1][2][1][RTW89_WW][95] = 48, + [1][1][2][1][RTW89_WW][99] = 48, + [1][1][2][1][RTW89_WW][103] = 48, + [1][1][2][1][RTW89_WW][106] = 48, + [1][1][2][1][RTW89_WW][110] = 0, + [1][1][2][1][RTW89_WW][114] = 0, + [1][1][2][1][RTW89_WW][118] = 0, + [2][0][2][0][RTW89_WW][3] = 64, + [2][0][2][0][RTW89_WW][11] = 64, + [2][0][2][0][RTW89_WW][18] = 64, + [2][0][2][0][RTW89_WW][26] = 64, + [2][0][2][0][RTW89_WW][33] = 64, + [2][0][2][0][RTW89_WW][41] = 64, + [2][0][2][0][RTW89_WW][48] = 64, + [2][0][2][0][RTW89_WW][56] = 64, + [2][0][2][0][RTW89_WW][63] = 64, + [2][0][2][0][RTW89_WW][71] = 64, + [2][0][2][0][RTW89_WW][78] = 64, + [2][0][2][0][RTW89_WW][86] = 64, + [2][0][2][0][RTW89_WW][93] = 64, + [2][0][2][0][RTW89_WW][101] = 64, + [2][0][2][0][RTW89_WW][108] = 0, + [2][0][2][0][RTW89_WW][116] = 0, + [2][1][2][0][RTW89_WW][3] = 52, + [2][1][2][0][RTW89_WW][11] = 52, + [2][1][2][0][RTW89_WW][18] = 52, + [2][1][2][0][RTW89_WW][26] = 52, + [2][1][2][0][RTW89_WW][33] = 52, + [2][1][2][0][RTW89_WW][41] = 52, + [2][1][2][0][RTW89_WW][48] = 52, + [2][1][2][0][RTW89_WW][56] = 52, + [2][1][2][0][RTW89_WW][63] = 52, + [2][1][2][0][RTW89_WW][71] = 52, + [2][1][2][0][RTW89_WW][78] = 52, + [2][1][2][0][RTW89_WW][86] = 52, + [2][1][2][0][RTW89_WW][93] = 52, + [2][1][2][0][RTW89_WW][101] = 52, + [2][1][2][0][RTW89_WW][108] = 0, + [2][1][2][0][RTW89_WW][116] = 0, + [2][1][2][1][RTW89_WW][3] = 40, + [2][1][2][1][RTW89_WW][11] = 40, + [2][1][2][1][RTW89_WW][18] = 40, + [2][1][2][1][RTW89_WW][26] = 40, + [2][1][2][1][RTW89_WW][33] = 40, + [2][1][2][1][RTW89_WW][41] = 40, + [2][1][2][1][RTW89_WW][48] = 40, + [2][1][2][1][RTW89_WW][56] = 40, + [2][1][2][1][RTW89_WW][63] = 40, + [2][1][2][1][RTW89_WW][71] = 40, + [2][1][2][1][RTW89_WW][78] = 40, + [2][1][2][1][RTW89_WW][86] = 40, + [2][1][2][1][RTW89_WW][93] = 40, + [2][1][2][1][RTW89_WW][101] = 40, + [2][1][2][1][RTW89_WW][108] = 0, + [2][1][2][1][RTW89_WW][116] = 0, + [3][0][2][0][RTW89_WW][7] = 56, + [3][0][2][0][RTW89_WW][22] = 56, + [3][0][2][0][RTW89_WW][37] = 56, + [3][0][2][0][RTW89_WW][52] = 56, + [3][0][2][0][RTW89_WW][67] = 56, + [3][0][2][0][RTW89_WW][82] = 56, + [3][0][2][0][RTW89_WW][97] = 56, + [3][0][2][0][RTW89_WW][112] = 0, + [3][1][2][0][RTW89_WW][7] = 44, + [3][1][2][0][RTW89_WW][22] = 44, + [3][1][2][0][RTW89_WW][37] = 44, + [3][1][2][0][RTW89_WW][52] = 44, + [3][1][2][0][RTW89_WW][67] = 44, + [3][1][2][0][RTW89_WW][82] = 44, + [3][1][2][0][RTW89_WW][97] = 44, + [3][1][2][0][RTW89_WW][112] = 0, + [3][1][2][1][RTW89_WW][7] = 32, + [3][1][2][1][RTW89_WW][22] = 32, + [3][1][2][1][RTW89_WW][37] = 32, + [3][1][2][1][RTW89_WW][52] = 32, + [3][1][2][1][RTW89_WW][67] = 32, + [3][1][2][1][RTW89_WW][82] = 32, + [3][1][2][1][RTW89_WW][97] = 32, + [3][1][2][1][RTW89_WW][112] = 0, + [0][0][1][0][RTW89_FCC][0] = 72, + [0][0][1][0][RTW89_FCC][2] = 72, + [0][0][1][0][RTW89_FCC][4] = 72, + [0][0][1][0][RTW89_FCC][6] = 72, + [0][0][1][0][RTW89_FCC][8] = 72, + [0][0][1][0][RTW89_FCC][10] = 72, + [0][0][1][0][RTW89_FCC][12] = 72, + [0][0][1][0][RTW89_FCC][14] = 72, + [0][0][1][0][RTW89_FCC][15] = 72, + [0][0][1][0][RTW89_FCC][17] = 72, + [0][0][1][0][RTW89_FCC][19] = 72, + [0][0][1][0][RTW89_FCC][21] = 72, + [0][0][1][0][RTW89_FCC][23] = 72, + [0][0][1][0][RTW89_FCC][25] = 72, + [0][0][1][0][RTW89_FCC][27] = 72, + [0][0][1][0][RTW89_FCC][29] = 72, + [0][0][1][0][RTW89_FCC][30] = 72, + [0][0][1][0][RTW89_FCC][32] = 72, + [0][0][1][0][RTW89_FCC][34] = 72, + [0][0][1][0][RTW89_FCC][36] = 72, + [0][0][1][0][RTW89_FCC][38] = 72, + [0][0][1][0][RTW89_FCC][40] = 72, + [0][0][1][0][RTW89_FCC][42] = 72, + [0][0][1][0][RTW89_FCC][44] = 72, + [0][0][1][0][RTW89_FCC][45] = 72, + [0][0][1][0][RTW89_FCC][47] = 72, + [0][0][1][0][RTW89_FCC][49] = 72, + [0][0][1][0][RTW89_FCC][51] = 72, + [0][0][1][0][RTW89_FCC][53] = 72, + [0][0][1][0][RTW89_FCC][55] = 72, + [0][0][1][0][RTW89_FCC][57] = 72, + [0][0][1][0][RTW89_FCC][59] = 72, + [0][0][1][0][RTW89_FCC][60] = 72, + [0][0][1][0][RTW89_FCC][62] = 72, + [0][0][1][0][RTW89_FCC][64] = 72, + [0][0][1][0][RTW89_FCC][66] = 72, + [0][0][1][0][RTW89_FCC][68] = 72, + [0][0][1][0][RTW89_FCC][70] = 72, + [0][0][1][0][RTW89_FCC][72] = 72, + [0][0][1][0][RTW89_FCC][74] = 72, + [0][0][1][0][RTW89_FCC][75] = 72, + [0][0][1][0][RTW89_FCC][77] = 72, + [0][0][1][0][RTW89_FCC][79] = 72, + [0][0][1][0][RTW89_FCC][81] = 72, + [0][0][1][0][RTW89_FCC][83] = 72, + [0][0][1][0][RTW89_FCC][85] = 72, + [0][0][1][0][RTW89_FCC][87] = 72, + [0][0][1][0][RTW89_FCC][89] = 72, + [0][0][1][0][RTW89_FCC][90] = 72, + [0][0][1][0][RTW89_FCC][92] = 72, + [0][0][1][0][RTW89_FCC][94] = 72, + [0][0][1][0][RTW89_FCC][96] = 72, + [0][0][1][0][RTW89_FCC][98] = 72, + [0][0][1][0][RTW89_FCC][100] = 72, + [0][0][1][0][RTW89_FCC][102] = 72, + [0][0][1][0][RTW89_FCC][104] = 72, + [0][0][1][0][RTW89_FCC][105] = 72, + [0][0][1][0][RTW89_FCC][107] = 72, + [0][0][1][0][RTW89_FCC][109] = 72, + [0][0][1][0][RTW89_FCC][111] = 127, + [0][0][1][0][RTW89_FCC][113] = 127, + [0][0][1][0][RTW89_FCC][115] = 127, + [0][0][1][0][RTW89_FCC][117] = 127, + [0][0][1][0][RTW89_FCC][119] = 127, + [0][1][1][0][RTW89_FCC][0] = 60, + [0][1][1][0][RTW89_FCC][2] = 60, + [0][1][1][0][RTW89_FCC][4] = 60, + [0][1][1][0][RTW89_FCC][6] = 60, + [0][1][1][0][RTW89_FCC][8] = 60, + [0][1][1][0][RTW89_FCC][10] = 60, + [0][1][1][0][RTW89_FCC][12] = 60, + [0][1][1][0][RTW89_FCC][14] = 60, + [0][1][1][0][RTW89_FCC][15] = 60, + [0][1][1][0][RTW89_FCC][17] = 60, + [0][1][1][0][RTW89_FCC][19] = 60, + [0][1][1][0][RTW89_FCC][21] = 60, + [0][1][1][0][RTW89_FCC][23] = 60, + [0][1][1][0][RTW89_FCC][25] = 60, + [0][1][1][0][RTW89_FCC][27] = 60, + [0][1][1][0][RTW89_FCC][29] = 60, + [0][1][1][0][RTW89_FCC][30] = 60, + [0][1][1][0][RTW89_FCC][32] = 60, + [0][1][1][0][RTW89_FCC][34] = 60, + [0][1][1][0][RTW89_FCC][36] = 60, + [0][1][1][0][RTW89_FCC][38] = 60, + [0][1][1][0][RTW89_FCC][40] = 60, + [0][1][1][0][RTW89_FCC][42] = 60, + [0][1][1][0][RTW89_FCC][44] = 60, + [0][1][1][0][RTW89_FCC][45] = 60, + [0][1][1][0][RTW89_FCC][47] = 60, + [0][1][1][0][RTW89_FCC][49] = 60, + [0][1][1][0][RTW89_FCC][51] = 60, + [0][1][1][0][RTW89_FCC][53] = 60, + [0][1][1][0][RTW89_FCC][55] = 60, + [0][1][1][0][RTW89_FCC][57] = 60, + [0][1][1][0][RTW89_FCC][59] = 60, + [0][1][1][0][RTW89_FCC][60] = 60, + [0][1][1][0][RTW89_FCC][62] = 60, + [0][1][1][0][RTW89_FCC][64] = 60, + [0][1][1][0][RTW89_FCC][66] = 60, + [0][1][1][0][RTW89_FCC][68] = 60, + [0][1][1][0][RTW89_FCC][70] = 60, + [0][1][1][0][RTW89_FCC][72] = 60, + [0][1][1][0][RTW89_FCC][74] = 60, + [0][1][1][0][RTW89_FCC][75] = 60, + [0][1][1][0][RTW89_FCC][77] = 60, + [0][1][1][0][RTW89_FCC][79] = 60, + [0][1][1][0][RTW89_FCC][81] = 60, + [0][1][1][0][RTW89_FCC][83] = 60, + [0][1][1][0][RTW89_FCC][85] = 60, + [0][1][1][0][RTW89_FCC][87] = 60, + [0][1][1][0][RTW89_FCC][89] = 60, + [0][1][1][0][RTW89_FCC][90] = 60, + [0][1][1][0][RTW89_FCC][92] = 60, + [0][1][1][0][RTW89_FCC][94] = 60, + [0][1][1][0][RTW89_FCC][96] = 60, + [0][1][1][0][RTW89_FCC][98] = 60, + [0][1][1][0][RTW89_FCC][100] = 60, + [0][1][1][0][RTW89_FCC][102] = 60, + [0][1][1][0][RTW89_FCC][104] = 60, + [0][1][1][0][RTW89_FCC][105] = 60, + [0][1][1][0][RTW89_FCC][107] = 60, + [0][1][1][0][RTW89_FCC][109] = 60, + [0][1][1][0][RTW89_FCC][111] = 127, + [0][1][1][0][RTW89_FCC][113] = 127, + [0][1][1][0][RTW89_FCC][115] = 127, + [0][1][1][0][RTW89_FCC][117] = 127, + [0][1][1][0][RTW89_FCC][119] = 127, + [0][0][2][0][RTW89_FCC][0] = 72, + [0][0][2][0][RTW89_FCC][2] = 72, + [0][0][2][0][RTW89_FCC][4] = 72, + [0][0][2][0][RTW89_FCC][6] = 72, + [0][0][2][0][RTW89_FCC][8] = 72, + [0][0][2][0][RTW89_FCC][10] = 72, + [0][0][2][0][RTW89_FCC][12] = 72, + [0][0][2][0][RTW89_FCC][14] = 72, + [0][0][2][0][RTW89_FCC][15] = 72, + [0][0][2][0][RTW89_FCC][17] = 72, + [0][0][2][0][RTW89_FCC][19] = 72, + [0][0][2][0][RTW89_FCC][21] = 72, + [0][0][2][0][RTW89_FCC][23] = 72, + [0][0][2][0][RTW89_FCC][25] = 72, + [0][0][2][0][RTW89_FCC][27] = 72, + [0][0][2][0][RTW89_FCC][29] = 72, + [0][0][2][0][RTW89_FCC][30] = 72, + [0][0][2][0][RTW89_FCC][32] = 72, + [0][0][2][0][RTW89_FCC][34] = 72, + [0][0][2][0][RTW89_FCC][36] = 72, + [0][0][2][0][RTW89_FCC][38] = 72, + [0][0][2][0][RTW89_FCC][40] = 72, + [0][0][2][0][RTW89_FCC][42] = 72, + [0][0][2][0][RTW89_FCC][44] = 72, + [0][0][2][0][RTW89_FCC][45] = 72, + [0][0][2][0][RTW89_FCC][47] = 72, + [0][0][2][0][RTW89_FCC][49] = 72, + [0][0][2][0][RTW89_FCC][51] = 72, + [0][0][2][0][RTW89_FCC][53] = 72, + [0][0][2][0][RTW89_FCC][55] = 72, + [0][0][2][0][RTW89_FCC][57] = 72, + [0][0][2][0][RTW89_FCC][59] = 72, + [0][0][2][0][RTW89_FCC][60] = 72, + [0][0][2][0][RTW89_FCC][62] = 72, + [0][0][2][0][RTW89_FCC][64] = 72, + [0][0][2][0][RTW89_FCC][66] = 72, + [0][0][2][0][RTW89_FCC][68] = 72, + [0][0][2][0][RTW89_FCC][70] = 72, + [0][0][2][0][RTW89_FCC][72] = 72, + [0][0][2][0][RTW89_FCC][74] = 72, + [0][0][2][0][RTW89_FCC][75] = 72, + [0][0][2][0][RTW89_FCC][77] = 72, + [0][0][2][0][RTW89_FCC][79] = 72, + [0][0][2][0][RTW89_FCC][81] = 72, + [0][0][2][0][RTW89_FCC][83] = 72, + [0][0][2][0][RTW89_FCC][85] = 72, + [0][0][2][0][RTW89_FCC][87] = 72, + [0][0][2][0][RTW89_FCC][89] = 72, + [0][0][2][0][RTW89_FCC][90] = 72, + [0][0][2][0][RTW89_FCC][92] = 72, + [0][0][2][0][RTW89_FCC][94] = 72, + [0][0][2][0][RTW89_FCC][96] = 72, + [0][0][2][0][RTW89_FCC][98] = 72, + [0][0][2][0][RTW89_FCC][100] = 72, + [0][0][2][0][RTW89_FCC][102] = 72, + [0][0][2][0][RTW89_FCC][104] = 72, + [0][0][2][0][RTW89_FCC][105] = 72, + [0][0][2][0][RTW89_FCC][107] = 72, + [0][0][2][0][RTW89_FCC][109] = 72, + [0][0][2][0][RTW89_FCC][111] = 127, + [0][0][2][0][RTW89_FCC][113] = 127, + [0][0][2][0][RTW89_FCC][115] = 127, + [0][0][2][0][RTW89_FCC][117] = 127, + [0][0][2][0][RTW89_FCC][119] = 127, + [0][1][2][0][RTW89_FCC][0] = 60, + [0][1][2][0][RTW89_FCC][2] = 60, + [0][1][2][0][RTW89_FCC][4] = 60, + [0][1][2][0][RTW89_FCC][6] = 60, + [0][1][2][0][RTW89_FCC][8] = 60, + [0][1][2][0][RTW89_FCC][10] = 60, + [0][1][2][0][RTW89_FCC][12] = 60, + [0][1][2][0][RTW89_FCC][14] = 60, + [0][1][2][0][RTW89_FCC][15] = 60, + [0][1][2][0][RTW89_FCC][17] = 60, + [0][1][2][0][RTW89_FCC][19] = 60, + [0][1][2][0][RTW89_FCC][21] = 60, + [0][1][2][0][RTW89_FCC][23] = 60, + [0][1][2][0][RTW89_FCC][25] = 60, + [0][1][2][0][RTW89_FCC][27] = 60, + [0][1][2][0][RTW89_FCC][29] = 60, + [0][1][2][0][RTW89_FCC][30] = 60, + [0][1][2][0][RTW89_FCC][32] = 60, + [0][1][2][0][RTW89_FCC][34] = 60, + [0][1][2][0][RTW89_FCC][36] = 60, + [0][1][2][0][RTW89_FCC][38] = 60, + [0][1][2][0][RTW89_FCC][40] = 60, + [0][1][2][0][RTW89_FCC][42] = 60, + [0][1][2][0][RTW89_FCC][44] = 60, + [0][1][2][0][RTW89_FCC][45] = 60, + [0][1][2][0][RTW89_FCC][47] = 60, + [0][1][2][0][RTW89_FCC][49] = 60, + [0][1][2][0][RTW89_FCC][51] = 60, + [0][1][2][0][RTW89_FCC][53] = 60, + [0][1][2][0][RTW89_FCC][55] = 60, + [0][1][2][0][RTW89_FCC][57] = 60, + [0][1][2][0][RTW89_FCC][59] = 60, + [0][1][2][0][RTW89_FCC][60] = 60, + [0][1][2][0][RTW89_FCC][62] = 60, + [0][1][2][0][RTW89_FCC][64] = 60, + [0][1][2][0][RTW89_FCC][66] = 60, + [0][1][2][0][RTW89_FCC][68] = 60, + [0][1][2][0][RTW89_FCC][70] = 60, + [0][1][2][0][RTW89_FCC][72] = 60, + [0][1][2][0][RTW89_FCC][74] = 60, + [0][1][2][0][RTW89_FCC][75] = 60, + [0][1][2][0][RTW89_FCC][77] = 60, + [0][1][2][0][RTW89_FCC][79] = 60, + [0][1][2][0][RTW89_FCC][81] = 60, + [0][1][2][0][RTW89_FCC][83] = 60, + [0][1][2][0][RTW89_FCC][85] = 60, + [0][1][2][0][RTW89_FCC][87] = 60, + [0][1][2][0][RTW89_FCC][89] = 60, + [0][1][2][0][RTW89_FCC][90] = 60, + [0][1][2][0][RTW89_FCC][92] = 60, + [0][1][2][0][RTW89_FCC][94] = 60, + [0][1][2][0][RTW89_FCC][96] = 60, + [0][1][2][0][RTW89_FCC][98] = 60, + [0][1][2][0][RTW89_FCC][100] = 60, + [0][1][2][0][RTW89_FCC][102] = 60, + [0][1][2][0][RTW89_FCC][104] = 60, + [0][1][2][0][RTW89_FCC][105] = 60, + [0][1][2][0][RTW89_FCC][107] = 60, + [0][1][2][0][RTW89_FCC][109] = 60, + [0][1][2][0][RTW89_FCC][111] = 127, + [0][1][2][0][RTW89_FCC][113] = 127, + [0][1][2][0][RTW89_FCC][115] = 127, + [0][1][2][0][RTW89_FCC][117] = 127, + [0][1][2][0][RTW89_FCC][119] = 127, + [0][1][2][1][RTW89_FCC][0] = 48, + [0][1][2][1][RTW89_FCC][2] = 48, + [0][1][2][1][RTW89_FCC][4] = 48, + [0][1][2][1][RTW89_FCC][6] = 48, + [0][1][2][1][RTW89_FCC][8] = 48, + [0][1][2][1][RTW89_FCC][10] = 48, + [0][1][2][1][RTW89_FCC][12] = 48, + [0][1][2][1][RTW89_FCC][14] = 48, + [0][1][2][1][RTW89_FCC][15] = 48, + [0][1][2][1][RTW89_FCC][17] = 48, + [0][1][2][1][RTW89_FCC][19] = 48, + [0][1][2][1][RTW89_FCC][21] = 48, + [0][1][2][1][RTW89_FCC][23] = 48, + [0][1][2][1][RTW89_FCC][25] = 48, + [0][1][2][1][RTW89_FCC][27] = 48, + [0][1][2][1][RTW89_FCC][29] = 48, + [0][1][2][1][RTW89_FCC][30] = 48, + [0][1][2][1][RTW89_FCC][32] = 48, + [0][1][2][1][RTW89_FCC][34] = 48, + [0][1][2][1][RTW89_FCC][36] = 48, + [0][1][2][1][RTW89_FCC][38] = 48, + [0][1][2][1][RTW89_FCC][40] = 48, + [0][1][2][1][RTW89_FCC][42] = 48, + [0][1][2][1][RTW89_FCC][44] = 48, + [0][1][2][1][RTW89_FCC][45] = 48, + [0][1][2][1][RTW89_FCC][47] = 48, + [0][1][2][1][RTW89_FCC][49] = 48, + [0][1][2][1][RTW89_FCC][51] = 48, + [0][1][2][1][RTW89_FCC][53] = 48, + [0][1][2][1][RTW89_FCC][55] = 48, + [0][1][2][1][RTW89_FCC][57] = 48, + [0][1][2][1][RTW89_FCC][59] = 48, + [0][1][2][1][RTW89_FCC][60] = 48, + [0][1][2][1][RTW89_FCC][62] = 48, + [0][1][2][1][RTW89_FCC][64] = 48, + [0][1][2][1][RTW89_FCC][66] = 48, + [0][1][2][1][RTW89_FCC][68] = 48, + [0][1][2][1][RTW89_FCC][70] = 48, + [0][1][2][1][RTW89_FCC][72] = 48, + [0][1][2][1][RTW89_FCC][74] = 48, + [0][1][2][1][RTW89_FCC][75] = 48, + [0][1][2][1][RTW89_FCC][77] = 48, + [0][1][2][1][RTW89_FCC][79] = 48, + [0][1][2][1][RTW89_FCC][81] = 48, + [0][1][2][1][RTW89_FCC][83] = 48, + [0][1][2][1][RTW89_FCC][85] = 48, + [0][1][2][1][RTW89_FCC][87] = 48, + [0][1][2][1][RTW89_FCC][89] = 48, + [0][1][2][1][RTW89_FCC][90] = 48, + [0][1][2][1][RTW89_FCC][92] = 48, + [0][1][2][1][RTW89_FCC][94] = 48, + [0][1][2][1][RTW89_FCC][96] = 48, + [0][1][2][1][RTW89_FCC][98] = 48, + [0][1][2][1][RTW89_FCC][100] = 48, + [0][1][2][1][RTW89_FCC][102] = 48, + [0][1][2][1][RTW89_FCC][104] = 48, + [0][1][2][1][RTW89_FCC][105] = 48, + [0][1][2][1][RTW89_FCC][107] = 48, + [0][1][2][1][RTW89_FCC][109] = 48, + [0][1][2][1][RTW89_FCC][111] = 127, + [0][1][2][1][RTW89_FCC][113] = 127, + [0][1][2][1][RTW89_FCC][115] = 127, + [0][1][2][1][RTW89_FCC][117] = 127, + [0][1][2][1][RTW89_FCC][119] = 127, + [1][0][2][0][RTW89_FCC][1] = 72, + [1][0][2][0][RTW89_FCC][5] = 72, + [1][0][2][0][RTW89_FCC][9] = 72, + [1][0][2][0][RTW89_FCC][13] = 72, + [1][0][2][0][RTW89_FCC][16] = 72, + [1][0][2][0][RTW89_FCC][20] = 72, + [1][0][2][0][RTW89_FCC][24] = 72, + [1][0][2][0][RTW89_FCC][28] = 72, + [1][0][2][0][RTW89_FCC][31] = 72, + [1][0][2][0][RTW89_FCC][35] = 72, + [1][0][2][0][RTW89_FCC][39] = 72, + [1][0][2][0][RTW89_FCC][43] = 72, + [1][0][2][0][RTW89_FCC][46] = 72, + [1][0][2][0][RTW89_FCC][50] = 72, + [1][0][2][0][RTW89_FCC][54] = 72, + [1][0][2][0][RTW89_FCC][58] = 72, + [1][0][2][0][RTW89_FCC][61] = 72, + [1][0][2][0][RTW89_FCC][65] = 72, + [1][0][2][0][RTW89_FCC][69] = 72, + [1][0][2][0][RTW89_FCC][73] = 72, + [1][0][2][0][RTW89_FCC][76] = 72, + [1][0][2][0][RTW89_FCC][80] = 72, + [1][0][2][0][RTW89_FCC][84] = 72, + [1][0][2][0][RTW89_FCC][88] = 72, + [1][0][2][0][RTW89_FCC][91] = 72, + [1][0][2][0][RTW89_FCC][95] = 72, + [1][0][2][0][RTW89_FCC][99] = 72, + [1][0][2][0][RTW89_FCC][103] = 72, + [1][0][2][0][RTW89_FCC][106] = 72, + [1][0][2][0][RTW89_FCC][110] = 127, + [1][0][2][0][RTW89_FCC][114] = 127, + [1][0][2][0][RTW89_FCC][118] = 127, + [1][1][2][0][RTW89_FCC][1] = 60, + [1][1][2][0][RTW89_FCC][5] = 60, + [1][1][2][0][RTW89_FCC][9] = 60, + [1][1][2][0][RTW89_FCC][13] = 60, + [1][1][2][0][RTW89_FCC][16] = 60, + [1][1][2][0][RTW89_FCC][20] = 60, + [1][1][2][0][RTW89_FCC][24] = 60, + [1][1][2][0][RTW89_FCC][28] = 60, + [1][1][2][0][RTW89_FCC][31] = 60, + [1][1][2][0][RTW89_FCC][35] = 60, + [1][1][2][0][RTW89_FCC][39] = 60, + [1][1][2][0][RTW89_FCC][43] = 60, + [1][1][2][0][RTW89_FCC][46] = 60, + [1][1][2][0][RTW89_FCC][50] = 60, + [1][1][2][0][RTW89_FCC][54] = 60, + [1][1][2][0][RTW89_FCC][58] = 60, + [1][1][2][0][RTW89_FCC][61] = 60, + [1][1][2][0][RTW89_FCC][65] = 60, + [1][1][2][0][RTW89_FCC][69] = 60, + [1][1][2][0][RTW89_FCC][73] = 60, + [1][1][2][0][RTW89_FCC][76] = 60, + [1][1][2][0][RTW89_FCC][80] = 60, + [1][1][2][0][RTW89_FCC][84] = 60, + [1][1][2][0][RTW89_FCC][88] = 60, + [1][1][2][0][RTW89_FCC][91] = 60, + [1][1][2][0][RTW89_FCC][95] = 60, + [1][1][2][0][RTW89_FCC][99] = 60, + [1][1][2][0][RTW89_FCC][103] = 60, + [1][1][2][0][RTW89_FCC][106] = 60, + [1][1][2][0][RTW89_FCC][110] = 127, + [1][1][2][0][RTW89_FCC][114] = 127, + [1][1][2][0][RTW89_FCC][118] = 127, + [1][1][2][1][RTW89_FCC][1] = 48, + [1][1][2][1][RTW89_FCC][5] = 48, + [1][1][2][1][RTW89_FCC][9] = 48, + [1][1][2][1][RTW89_FCC][13] = 48, + [1][1][2][1][RTW89_FCC][16] = 48, + [1][1][2][1][RTW89_FCC][20] = 48, + [1][1][2][1][RTW89_FCC][24] = 48, + [1][1][2][1][RTW89_FCC][28] = 48, + [1][1][2][1][RTW89_FCC][31] = 48, + [1][1][2][1][RTW89_FCC][35] = 48, + [1][1][2][1][RTW89_FCC][39] = 48, + [1][1][2][1][RTW89_FCC][43] = 48, + [1][1][2][1][RTW89_FCC][46] = 48, + [1][1][2][1][RTW89_FCC][50] = 48, + [1][1][2][1][RTW89_FCC][54] = 48, + [1][1][2][1][RTW89_FCC][58] = 48, + [1][1][2][1][RTW89_FCC][61] = 48, + [1][1][2][1][RTW89_FCC][65] = 48, + [1][1][2][1][RTW89_FCC][69] = 48, + [1][1][2][1][RTW89_FCC][73] = 48, + [1][1][2][1][RTW89_FCC][76] = 48, + [1][1][2][1][RTW89_FCC][80] = 48, + [1][1][2][1][RTW89_FCC][84] = 48, + [1][1][2][1][RTW89_FCC][88] = 48, + [1][1][2][1][RTW89_FCC][91] = 48, + [1][1][2][1][RTW89_FCC][95] = 48, + [1][1][2][1][RTW89_FCC][99] = 48, + [1][1][2][1][RTW89_FCC][103] = 48, + [1][1][2][1][RTW89_FCC][106] = 48, + [1][1][2][1][RTW89_FCC][110] = 127, + [1][1][2][1][RTW89_FCC][114] = 127, + [1][1][2][1][RTW89_FCC][118] = 127, + [2][0][2][0][RTW89_FCC][3] = 64, + [2][0][2][0][RTW89_FCC][11] = 64, + [2][0][2][0][RTW89_FCC][18] = 64, + [2][0][2][0][RTW89_FCC][26] = 64, + [2][0][2][0][RTW89_FCC][33] = 64, + [2][0][2][0][RTW89_FCC][41] = 64, + [2][0][2][0][RTW89_FCC][48] = 64, + [2][0][2][0][RTW89_FCC][56] = 64, + [2][0][2][0][RTW89_FCC][63] = 64, + [2][0][2][0][RTW89_FCC][71] = 64, + [2][0][2][0][RTW89_FCC][78] = 64, + [2][0][2][0][RTW89_FCC][86] = 64, + [2][0][2][0][RTW89_FCC][93] = 64, + [2][0][2][0][RTW89_FCC][101] = 64, + [2][0][2][0][RTW89_FCC][108] = 127, + [2][0][2][0][RTW89_FCC][116] = 127, + [2][1][2][0][RTW89_FCC][3] = 52, + [2][1][2][0][RTW89_FCC][11] = 52, + [2][1][2][0][RTW89_FCC][18] = 52, + [2][1][2][0][RTW89_FCC][26] = 52, + [2][1][2][0][RTW89_FCC][33] = 52, + [2][1][2][0][RTW89_FCC][41] = 52, + [2][1][2][0][RTW89_FCC][48] = 52, + [2][1][2][0][RTW89_FCC][56] = 52, + [2][1][2][0][RTW89_FCC][63] = 52, + [2][1][2][0][RTW89_FCC][71] = 52, + [2][1][2][0][RTW89_FCC][78] = 52, + [2][1][2][0][RTW89_FCC][86] = 52, + [2][1][2][0][RTW89_FCC][93] = 52, + [2][1][2][0][RTW89_FCC][101] = 52, + [2][1][2][0][RTW89_FCC][108] = 127, + [2][1][2][0][RTW89_FCC][116] = 127, + [2][1][2][1][RTW89_FCC][3] = 40, + [2][1][2][1][RTW89_FCC][11] = 40, + [2][1][2][1][RTW89_FCC][18] = 40, + [2][1][2][1][RTW89_FCC][26] = 40, + [2][1][2][1][RTW89_FCC][33] = 40, + [2][1][2][1][RTW89_FCC][41] = 40, + [2][1][2][1][RTW89_FCC][48] = 40, + [2][1][2][1][RTW89_FCC][56] = 40, + [2][1][2][1][RTW89_FCC][63] = 40, + [2][1][2][1][RTW89_FCC][71] = 40, + [2][1][2][1][RTW89_FCC][78] = 40, + [2][1][2][1][RTW89_FCC][86] = 40, + [2][1][2][1][RTW89_FCC][93] = 40, + [2][1][2][1][RTW89_FCC][101] = 40, + [2][1][2][1][RTW89_FCC][108] = 127, + [2][1][2][1][RTW89_FCC][116] = 127, + [3][0][2][0][RTW89_FCC][7] = 56, + [3][0][2][0][RTW89_FCC][22] = 56, + [3][0][2][0][RTW89_FCC][37] = 56, + [3][0][2][0][RTW89_FCC][52] = 56, + [3][0][2][0][RTW89_FCC][67] = 56, + [3][0][2][0][RTW89_FCC][82] = 56, + [3][0][2][0][RTW89_FCC][97] = 56, + [3][0][2][0][RTW89_FCC][112] = 127, + [3][1][2][0][RTW89_FCC][7] = 44, + [3][1][2][0][RTW89_FCC][22] = 44, + [3][1][2][0][RTW89_FCC][37] = 44, + [3][1][2][0][RTW89_FCC][52] = 44, + [3][1][2][0][RTW89_FCC][67] = 44, + [3][1][2][0][RTW89_FCC][82] = 44, + [3][1][2][0][RTW89_FCC][97] = 44, + [3][1][2][0][RTW89_FCC][112] = 127, + [3][1][2][1][RTW89_FCC][7] = 32, + [3][1][2][1][RTW89_FCC][22] = 32, + [3][1][2][1][RTW89_FCC][37] = 32, + [3][1][2][1][RTW89_FCC][52] = 32, + [3][1][2][1][RTW89_FCC][67] = 32, + [3][1][2][1][RTW89_FCC][82] = 32, + [3][1][2][1][RTW89_FCC][97] = 32, + [3][1][2][1][RTW89_FCC][112] = 127, +}; + +const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { + [0][0][RTW89_WW][0] = 32, + [0][0][RTW89_WW][1] = 32, + [0][0][RTW89_WW][2] = 32, + [0][0][RTW89_WW][3] = 32, + [0][0][RTW89_WW][4] = 32, + [0][0][RTW89_WW][5] = 32, + [0][0][RTW89_WW][6] = 32, + [0][0][RTW89_WW][7] = 32, + [0][0][RTW89_WW][8] = 32, + [0][0][RTW89_WW][9] = 32, + [0][0][RTW89_WW][10] = 32, + [0][0][RTW89_WW][11] = 32, + [0][0][RTW89_WW][12] = 24, + [0][0][RTW89_WW][13] = 0, + [0][1][RTW89_WW][0] = 20, + [0][1][RTW89_WW][1] = 22, + [0][1][RTW89_WW][2] = 22, + [0][1][RTW89_WW][3] = 22, + [0][1][RTW89_WW][4] = 22, + [0][1][RTW89_WW][5] = 22, + [0][1][RTW89_WW][6] = 22, + [0][1][RTW89_WW][7] = 22, + [0][1][RTW89_WW][8] = 22, + [0][1][RTW89_WW][9] = 22, + [0][1][RTW89_WW][10] = 22, + [0][1][RTW89_WW][11] = 22, + [0][1][RTW89_WW][12] = 20, + [0][1][RTW89_WW][13] = 0, + [1][0][RTW89_WW][0] = 42, + [1][0][RTW89_WW][1] = 44, + [1][0][RTW89_WW][2] = 44, + [1][0][RTW89_WW][3] = 44, + [1][0][RTW89_WW][4] = 44, + [1][0][RTW89_WW][5] = 44, + [1][0][RTW89_WW][6] = 44, + [1][0][RTW89_WW][7] = 44, + [1][0][RTW89_WW][8] = 44, + [1][0][RTW89_WW][9] = 44, + [1][0][RTW89_WW][10] = 44, + [1][0][RTW89_WW][11] = 42, + [1][0][RTW89_WW][12] = 30, + [1][0][RTW89_WW][13] = 0, + [1][1][RTW89_WW][0] = 32, + [1][1][RTW89_WW][1] = 32, + [1][1][RTW89_WW][2] = 32, + [1][1][RTW89_WW][3] = 32, + [1][1][RTW89_WW][4] = 32, + [1][1][RTW89_WW][5] = 32, + [1][1][RTW89_WW][6] = 32, + [1][1][RTW89_WW][7] = 32, + [1][1][RTW89_WW][8] = 32, + [1][1][RTW89_WW][9] = 32, + [1][1][RTW89_WW][10] = 32, + [1][1][RTW89_WW][11] = 30, + [1][1][RTW89_WW][12] = 24, + [1][1][RTW89_WW][13] = 0, + [2][0][RTW89_WW][0] = 56, + [2][0][RTW89_WW][1] = 56, + [2][0][RTW89_WW][2] = 56, + [2][0][RTW89_WW][3] = 56, + [2][0][RTW89_WW][4] = 56, + [2][0][RTW89_WW][5] = 56, + [2][0][RTW89_WW][6] = 56, + [2][0][RTW89_WW][7] = 56, + [2][0][RTW89_WW][8] = 56, + [2][0][RTW89_WW][9] = 56, + [2][0][RTW89_WW][10] = 56, + [2][0][RTW89_WW][11] = 42, + [2][0][RTW89_WW][12] = 38, + [2][0][RTW89_WW][13] = 0, + [2][1][RTW89_WW][0] = 44, + [2][1][RTW89_WW][1] = 44, + [2][1][RTW89_WW][2] = 44, + [2][1][RTW89_WW][3] = 44, + [2][1][RTW89_WW][4] = 44, + [2][1][RTW89_WW][5] = 44, + [2][1][RTW89_WW][6] = 44, + [2][1][RTW89_WW][7] = 44, + [2][1][RTW89_WW][8] = 44, + [2][1][RTW89_WW][9] = 44, + [2][1][RTW89_WW][10] = 44, + [2][1][RTW89_WW][11] = 30, + [2][1][RTW89_WW][12] = 26, + [2][1][RTW89_WW][13] = 0, + [0][0][RTW89_FCC][0] = 60, + [0][0][RTW89_ETSI][0] = 34, + [0][0][RTW89_MKK][0] = 36, + [0][0][RTW89_IC][0] = 68, + [0][0][RTW89_ACMA][0] = 32, + [0][0][RTW89_FCC][1] = 60, + [0][0][RTW89_ETSI][1] = 38, + [0][0][RTW89_MKK][1] = 40, + [0][0][RTW89_IC][1] = 68, + [0][0][RTW89_ACMA][1] = 32, + [0][0][RTW89_FCC][2] = 64, + [0][0][RTW89_ETSI][2] = 38, + [0][0][RTW89_MKK][2] = 40, + [0][0][RTW89_IC][2] = 72, + [0][0][RTW89_ACMA][2] = 32, + [0][0][RTW89_FCC][3] = 68, + [0][0][RTW89_ETSI][3] = 38, + [0][0][RTW89_MKK][3] = 40, + [0][0][RTW89_IC][3] = 76, + [0][0][RTW89_ACMA][3] = 32, + [0][0][RTW89_FCC][4] = 68, + [0][0][RTW89_ETSI][4] = 38, + [0][0][RTW89_MKK][4] = 40, + [0][0][RTW89_IC][4] = 76, + [0][0][RTW89_ACMA][4] = 32, + [0][0][RTW89_FCC][5] = 76, + [0][0][RTW89_ETSI][5] = 38, + [0][0][RTW89_MKK][5] = 40, + [0][0][RTW89_IC][5] = 84, + [0][0][RTW89_ACMA][5] = 32, + [0][0][RTW89_FCC][6] = 66, + [0][0][RTW89_ETSI][6] = 38, + [0][0][RTW89_MKK][6] = 40, + [0][0][RTW89_IC][6] = 74, + [0][0][RTW89_ACMA][6] = 32, + [0][0][RTW89_FCC][7] = 66, + [0][0][RTW89_ETSI][7] = 38, + [0][0][RTW89_MKK][7] = 40, + [0][0][RTW89_IC][7] = 74, + [0][0][RTW89_ACMA][7] = 32, + [0][0][RTW89_FCC][8] = 62, + [0][0][RTW89_ETSI][8] = 38, + [0][0][RTW89_MKK][8] = 40, + [0][0][RTW89_IC][8] = 70, + [0][0][RTW89_ACMA][8] = 32, + [0][0][RTW89_FCC][9] = 58, + [0][0][RTW89_ETSI][9] = 38, + [0][0][RTW89_MKK][9] = 40, + [0][0][RTW89_IC][9] = 66, + [0][0][RTW89_ACMA][9] = 32, + [0][0][RTW89_FCC][10] = 58, + [0][0][RTW89_ETSI][10] = 38, + [0][0][RTW89_MKK][10] = 40, + [0][0][RTW89_IC][10] = 66, + [0][0][RTW89_ACMA][10] = 32, + [0][0][RTW89_FCC][11] = 42, + [0][0][RTW89_ETSI][11] = 38, + [0][0][RTW89_MKK][11] = 40, + [0][0][RTW89_IC][11] = 56, + [0][0][RTW89_ACMA][11] = 32, + [0][0][RTW89_FCC][12] = 24, + [0][0][RTW89_ETSI][12] = 34, + [0][0][RTW89_MKK][12] = 36, + [0][0][RTW89_IC][12] = 32, + [0][0][RTW89_ACMA][12] = 32, + [0][0][RTW89_FCC][13] = 127, + [0][0][RTW89_ETSI][13] = 127, + [0][0][RTW89_MKK][13] = 127, + [0][0][RTW89_IC][13] = 127, + [0][0][RTW89_ACMA][13] = 127, + [0][1][RTW89_FCC][0] = 46, + [0][1][RTW89_ETSI][0] = 22, + [0][1][RTW89_MKK][0] = 24, + [0][1][RTW89_IC][0] = 62, + [0][1][RTW89_ACMA][0] = 20, + [0][1][RTW89_FCC][1] = 46, + [0][1][RTW89_ETSI][1] = 24, + [0][1][RTW89_MKK][1] = 30, + [0][1][RTW89_IC][1] = 62, + [0][1][RTW89_ACMA][1] = 22, + [0][1][RTW89_FCC][2] = 50, + [0][1][RTW89_ETSI][2] = 24, + [0][1][RTW89_MKK][2] = 30, + [0][1][RTW89_IC][2] = 66, + [0][1][RTW89_ACMA][2] = 22, + [0][1][RTW89_FCC][3] = 54, + [0][1][RTW89_ETSI][3] = 24, + [0][1][RTW89_MKK][3] = 30, + [0][1][RTW89_IC][3] = 70, + [0][1][RTW89_ACMA][3] = 22, + [0][1][RTW89_FCC][4] = 58, + [0][1][RTW89_ETSI][4] = 24, + [0][1][RTW89_MKK][4] = 30, + [0][1][RTW89_IC][4] = 74, + [0][1][RTW89_ACMA][4] = 22, + [0][1][RTW89_FCC][5] = 66, + [0][1][RTW89_ETSI][5] = 24, + [0][1][RTW89_MKK][5] = 30, + [0][1][RTW89_IC][5] = 74, + [0][1][RTW89_ACMA][5] = 22, + [0][1][RTW89_FCC][6] = 58, + [0][1][RTW89_ETSI][6] = 24, + [0][1][RTW89_MKK][6] = 30, + [0][1][RTW89_IC][6] = 72, + [0][1][RTW89_ACMA][6] = 22, + [0][1][RTW89_FCC][7] = 54, + [0][1][RTW89_ETSI][7] = 24, + [0][1][RTW89_MKK][7] = 30, + [0][1][RTW89_IC][7] = 68, + [0][1][RTW89_ACMA][7] = 22, + [0][1][RTW89_FCC][8] = 50, + [0][1][RTW89_ETSI][8] = 24, + [0][1][RTW89_MKK][8] = 30, + [0][1][RTW89_IC][8] = 64, + [0][1][RTW89_ACMA][8] = 22, + [0][1][RTW89_FCC][9] = 46, + [0][1][RTW89_ETSI][9] = 24, + [0][1][RTW89_MKK][9] = 30, + [0][1][RTW89_IC][9] = 60, + [0][1][RTW89_ACMA][9] = 22, + [0][1][RTW89_FCC][10] = 46, + [0][1][RTW89_ETSI][10] = 24, + [0][1][RTW89_MKK][10] = 30, + [0][1][RTW89_IC][10] = 60, + [0][1][RTW89_ACMA][10] = 22, + [0][1][RTW89_FCC][11] = 30, + [0][1][RTW89_ETSI][11] = 24, + [0][1][RTW89_MKK][11] = 30, + [0][1][RTW89_IC][11] = 52, + [0][1][RTW89_ACMA][11] = 22, + [0][1][RTW89_FCC][12] = 22, + [0][1][RTW89_ETSI][12] = 20, + [0][1][RTW89_MKK][12] = 24, + [0][1][RTW89_IC][12] = 30, + [0][1][RTW89_ACMA][12] = 20, + [0][1][RTW89_FCC][13] = 127, + [0][1][RTW89_ETSI][13] = 127, + [0][1][RTW89_MKK][13] = 127, + [0][1][RTW89_IC][13] = 127, + [0][1][RTW89_ACMA][13] = 127, + [1][0][RTW89_FCC][0] = 64, + [1][0][RTW89_ETSI][0] = 46, + [1][0][RTW89_MKK][0] = 48, + [1][0][RTW89_IC][0] = 78, + [1][0][RTW89_ACMA][0] = 42, + [1][0][RTW89_FCC][1] = 64, + [1][0][RTW89_ETSI][1] = 46, + [1][0][RTW89_MKK][1] = 48, + [1][0][RTW89_IC][1] = 78, + [1][0][RTW89_ACMA][1] = 44, + [1][0][RTW89_FCC][2] = 68, + [1][0][RTW89_ETSI][2] = 46, + [1][0][RTW89_MKK][2] = 48, + [1][0][RTW89_IC][2] = 82, + [1][0][RTW89_ACMA][2] = 44, + [1][0][RTW89_FCC][3] = 70, + [1][0][RTW89_ETSI][3] = 46, + [1][0][RTW89_MKK][3] = 48, + [1][0][RTW89_IC][3] = 84, + [1][0][RTW89_ACMA][3] = 44, + [1][0][RTW89_FCC][4] = 70, + [1][0][RTW89_ETSI][4] = 46, + [1][0][RTW89_MKK][4] = 48, + [1][0][RTW89_IC][4] = 84, + [1][0][RTW89_ACMA][4] = 44, + [1][0][RTW89_FCC][5] = 76, + [1][0][RTW89_ETSI][5] = 46, + [1][0][RTW89_MKK][5] = 48, + [1][0][RTW89_IC][5] = 84, + [1][0][RTW89_ACMA][5] = 44, + [1][0][RTW89_FCC][6] = 64, + [1][0][RTW89_ETSI][6] = 44, + [1][0][RTW89_MKK][6] = 48, + [1][0][RTW89_IC][6] = 78, + [1][0][RTW89_ACMA][6] = 44, + [1][0][RTW89_FCC][7] = 64, + [1][0][RTW89_ETSI][7] = 46, + [1][0][RTW89_MKK][7] = 48, + [1][0][RTW89_IC][7] = 78, + [1][0][RTW89_ACMA][7] = 44, + [1][0][RTW89_FCC][8] = 64, + [1][0][RTW89_ETSI][8] = 46, + [1][0][RTW89_MKK][8] = 48, + [1][0][RTW89_IC][8] = 78, + [1][0][RTW89_ACMA][8] = 44, + [1][0][RTW89_FCC][9] = 60, + [1][0][RTW89_ETSI][9] = 46, + [1][0][RTW89_MKK][9] = 48, + [1][0][RTW89_IC][9] = 74, + [1][0][RTW89_ACMA][9] = 44, + [1][0][RTW89_FCC][10] = 60, + [1][0][RTW89_ETSI][10] = 46, + [1][0][RTW89_MKK][10] = 48, + [1][0][RTW89_IC][10] = 74, + [1][0][RTW89_ACMA][10] = 44, + [1][0][RTW89_FCC][11] = 42, + [1][0][RTW89_ETSI][11] = 46, + [1][0][RTW89_MKK][11] = 48, + [1][0][RTW89_IC][11] = 72, + [1][0][RTW89_ACMA][11] = 44, + [1][0][RTW89_FCC][12] = 30, + [1][0][RTW89_ETSI][12] = 46, + [1][0][RTW89_MKK][12] = 46, + [1][0][RTW89_IC][12] = 38, + [1][0][RTW89_ACMA][12] = 42, + [1][0][RTW89_FCC][13] = 127, + [1][0][RTW89_ETSI][13] = 127, + [1][0][RTW89_MKK][13] = 127, + [1][0][RTW89_IC][13] = 127, + [1][0][RTW89_ACMA][13] = 127, + [1][1][RTW89_FCC][0] = 46, + [1][1][RTW89_ETSI][0] = 32, + [1][1][RTW89_MKK][0] = 34, + [1][1][RTW89_IC][0] = 66, + [1][1][RTW89_ACMA][0] = 32, + [1][1][RTW89_FCC][1] = 46, + [1][1][RTW89_ETSI][1] = 34, + [1][1][RTW89_MKK][1] = 34, + [1][1][RTW89_IC][1] = 66, + [1][1][RTW89_ACMA][1] = 32, + [1][1][RTW89_FCC][2] = 50, + [1][1][RTW89_ETSI][2] = 34, + [1][1][RTW89_MKK][2] = 34, + [1][1][RTW89_IC][2] = 70, + [1][1][RTW89_ACMA][2] = 32, + [1][1][RTW89_FCC][3] = 54, + [1][1][RTW89_ETSI][3] = 34, + [1][1][RTW89_MKK][3] = 34, + [1][1][RTW89_IC][3] = 74, + [1][1][RTW89_ACMA][3] = 32, + [1][1][RTW89_FCC][4] = 58, + [1][1][RTW89_ETSI][4] = 34, + [1][1][RTW89_MKK][4] = 34, + [1][1][RTW89_IC][4] = 74, + [1][1][RTW89_ACMA][4] = 32, + [1][1][RTW89_FCC][5] = 66, + [1][1][RTW89_ETSI][5] = 34, + [1][1][RTW89_MKK][5] = 34, + [1][1][RTW89_IC][5] = 74, + [1][1][RTW89_ACMA][5] = 32, + [1][1][RTW89_FCC][6] = 58, + [1][1][RTW89_ETSI][6] = 34, + [1][1][RTW89_MKK][6] = 34, + [1][1][RTW89_IC][6] = 74, + [1][1][RTW89_ACMA][6] = 32, + [1][1][RTW89_FCC][7] = 54, + [1][1][RTW89_ETSI][7] = 34, + [1][1][RTW89_MKK][7] = 34, + [1][1][RTW89_IC][7] = 74, + [1][1][RTW89_ACMA][7] = 32, + [1][1][RTW89_FCC][8] = 50, + [1][1][RTW89_ETSI][8] = 34, + [1][1][RTW89_MKK][8] = 34, + [1][1][RTW89_IC][8] = 70, + [1][1][RTW89_ACMA][8] = 32, + [1][1][RTW89_FCC][9] = 46, + [1][1][RTW89_ETSI][9] = 34, + [1][1][RTW89_MKK][9] = 34, + [1][1][RTW89_IC][9] = 66, + [1][1][RTW89_ACMA][9] = 32, + [1][1][RTW89_FCC][10] = 46, + [1][1][RTW89_ETSI][10] = 34, + [1][1][RTW89_MKK][10] = 34, + [1][1][RTW89_IC][10] = 66, + [1][1][RTW89_ACMA][10] = 32, + [1][1][RTW89_FCC][11] = 30, + [1][1][RTW89_ETSI][11] = 34, + [1][1][RTW89_MKK][11] = 34, + [1][1][RTW89_IC][11] = 48, + [1][1][RTW89_ACMA][11] = 32, + [1][1][RTW89_FCC][12] = 24, + [1][1][RTW89_ETSI][12] = 34, + [1][1][RTW89_MKK][12] = 34, + [1][1][RTW89_IC][12] = 32, + [1][1][RTW89_ACMA][12] = 32, + [1][1][RTW89_FCC][13] = 127, + [1][1][RTW89_ETSI][13] = 127, + [1][1][RTW89_MKK][13] = 127, + [1][1][RTW89_IC][13] = 127, + [1][1][RTW89_ACMA][13] = 127, + [2][0][RTW89_FCC][0] = 64, + [2][0][RTW89_ETSI][0] = 58, + [2][0][RTW89_MKK][0] = 58, + [2][0][RTW89_IC][0] = 78, + [2][0][RTW89_ACMA][0] = 56, + [2][0][RTW89_FCC][1] = 64, + [2][0][RTW89_ETSI][1] = 58, + [2][0][RTW89_MKK][1] = 58, + [2][0][RTW89_IC][1] = 78, + [2][0][RTW89_ACMA][1] = 56, + [2][0][RTW89_FCC][2] = 66, + [2][0][RTW89_ETSI][2] = 58, + [2][0][RTW89_MKK][2] = 58, + [2][0][RTW89_IC][2] = 80, + [2][0][RTW89_ACMA][2] = 56, + [2][0][RTW89_FCC][3] = 66, + [2][0][RTW89_ETSI][3] = 58, + [2][0][RTW89_MKK][3] = 58, + [2][0][RTW89_IC][3] = 80, + [2][0][RTW89_ACMA][3] = 56, + [2][0][RTW89_FCC][4] = 66, + [2][0][RTW89_ETSI][4] = 58, + [2][0][RTW89_MKK][4] = 58, + [2][0][RTW89_IC][4] = 80, + [2][0][RTW89_ACMA][4] = 56, + [2][0][RTW89_FCC][5] = 76, + [2][0][RTW89_ETSI][5] = 58, + [2][0][RTW89_MKK][5] = 58, + [2][0][RTW89_IC][5] = 84, + [2][0][RTW89_ACMA][5] = 56, + [2][0][RTW89_FCC][6] = 62, + [2][0][RTW89_ETSI][6] = 56, + [2][0][RTW89_MKK][6] = 58, + [2][0][RTW89_IC][6] = 76, + [2][0][RTW89_ACMA][6] = 56, + [2][0][RTW89_FCC][7] = 62, + [2][0][RTW89_ETSI][7] = 58, + [2][0][RTW89_MKK][7] = 58, + [2][0][RTW89_IC][7] = 76, + [2][0][RTW89_ACMA][7] = 56, + [2][0][RTW89_FCC][8] = 62, + [2][0][RTW89_ETSI][8] = 58, + [2][0][RTW89_MKK][8] = 58, + [2][0][RTW89_IC][8] = 76, + [2][0][RTW89_ACMA][8] = 56, + [2][0][RTW89_FCC][9] = 60, + [2][0][RTW89_ETSI][9] = 58, + [2][0][RTW89_MKK][9] = 58, + [2][0][RTW89_IC][9] = 74, + [2][0][RTW89_ACMA][9] = 56, + [2][0][RTW89_FCC][10] = 60, + [2][0][RTW89_ETSI][10] = 58, + [2][0][RTW89_MKK][10] = 58, + [2][0][RTW89_IC][10] = 74, + [2][0][RTW89_ACMA][10] = 56, + [2][0][RTW89_FCC][11] = 42, + [2][0][RTW89_ETSI][11] = 58, + [2][0][RTW89_MKK][11] = 58, + [2][0][RTW89_IC][11] = 66, + [2][0][RTW89_ACMA][11] = 56, + [2][0][RTW89_FCC][12] = 38, + [2][0][RTW89_ETSI][12] = 58, + [2][0][RTW89_MKK][12] = 58, + [2][0][RTW89_IC][12] = 56, + [2][0][RTW89_ACMA][12] = 56, + [2][0][RTW89_FCC][13] = 127, + [2][0][RTW89_ETSI][13] = 127, + [2][0][RTW89_MKK][13] = 127, + [2][0][RTW89_IC][13] = 127, + [2][0][RTW89_ACMA][13] = 127, + [2][1][RTW89_FCC][0] = 46, + [2][1][RTW89_ETSI][0] = 46, + [2][1][RTW89_MKK][0] = 46, + [2][1][RTW89_IC][0] = 70, + [2][1][RTW89_ACMA][0] = 44, + [2][1][RTW89_FCC][1] = 46, + [2][1][RTW89_ETSI][1] = 46, + [2][1][RTW89_MKK][1] = 46, + [2][1][RTW89_IC][1] = 70, + [2][1][RTW89_ACMA][1] = 44, + [2][1][RTW89_FCC][2] = 50, + [2][1][RTW89_ETSI][2] = 46, + [2][1][RTW89_MKK][2] = 46, + [2][1][RTW89_IC][2] = 74, + [2][1][RTW89_ACMA][2] = 44, + [2][1][RTW89_FCC][3] = 54, + [2][1][RTW89_ETSI][3] = 46, + [2][1][RTW89_MKK][3] = 46, + [2][1][RTW89_IC][3] = 78, + [2][1][RTW89_ACMA][3] = 44, + [2][1][RTW89_FCC][4] = 56, + [2][1][RTW89_ETSI][4] = 46, + [2][1][RTW89_MKK][4] = 46, + [2][1][RTW89_IC][4] = 80, + [2][1][RTW89_ACMA][4] = 44, + [2][1][RTW89_FCC][5] = 72, + [2][1][RTW89_ETSI][5] = 46, + [2][1][RTW89_MKK][5] = 46, + [2][1][RTW89_IC][5] = 80, + [2][1][RTW89_ACMA][5] = 44, + [2][1][RTW89_FCC][6] = 54, + [2][1][RTW89_ETSI][6] = 44, + [2][1][RTW89_MKK][6] = 46, + [2][1][RTW89_IC][6] = 78, + [2][1][RTW89_ACMA][6] = 44, + [2][1][RTW89_FCC][7] = 54, + [2][1][RTW89_ETSI][7] = 46, + [2][1][RTW89_MKK][7] = 46, + [2][1][RTW89_IC][7] = 78, + [2][1][RTW89_ACMA][7] = 44, + [2][1][RTW89_FCC][8] = 50, + [2][1][RTW89_ETSI][8] = 46, + [2][1][RTW89_MKK][8] = 46, + [2][1][RTW89_IC][8] = 74, + [2][1][RTW89_ACMA][8] = 44, + [2][1][RTW89_FCC][9] = 46, + [2][1][RTW89_ETSI][9] = 46, + [2][1][RTW89_MKK][9] = 46, + [2][1][RTW89_IC][9] = 70, + [2][1][RTW89_ACMA][9] = 44, + [2][1][RTW89_FCC][10] = 46, + [2][1][RTW89_ETSI][10] = 46, + [2][1][RTW89_MKK][10] = 46, + [2][1][RTW89_IC][10] = 70, + [2][1][RTW89_ACMA][10] = 44, + [2][1][RTW89_FCC][11] = 30, + [2][1][RTW89_ETSI][11] = 46, + [2][1][RTW89_MKK][11] = 46, + [2][1][RTW89_IC][11] = 60, + [2][1][RTW89_ACMA][11] = 44, + [2][1][RTW89_FCC][12] = 26, + [2][1][RTW89_ETSI][12] = 44, + [2][1][RTW89_MKK][12] = 46, + [2][1][RTW89_IC][12] = 44, + [2][1][RTW89_ACMA][12] = 42, + [2][1][RTW89_FCC][13] = 127, + [2][1][RTW89_ETSI][13] = 127, + [2][1][RTW89_MKK][13] = 127, + [2][1][RTW89_IC][13] = 127, + [2][1][RTW89_ACMA][13] = 127, +}; + +const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = { + [0][0][RTW89_WW][0] = 24, + [0][0][RTW89_WW][2] = 24, + [0][0][RTW89_WW][4] = 22, + [0][0][RTW89_WW][6] = 22, + [0][0][RTW89_WW][8] = 18, + [0][0][RTW89_WW][10] = 18, + [0][0][RTW89_WW][12] = 24, + [0][0][RTW89_WW][14] = 24, + [0][0][RTW89_WW][15] = 24, + [0][0][RTW89_WW][17] = 24, + [0][0][RTW89_WW][19] = 24, + [0][0][RTW89_WW][21] = 24, + [0][0][RTW89_WW][23] = 24, + [0][0][RTW89_WW][25] = 30, + [0][0][RTW89_WW][27] = 30, + [0][0][RTW89_WW][29] = 30, + [0][0][RTW89_WW][31] = 24, + [0][0][RTW89_WW][33] = 24, + [0][0][RTW89_WW][35] = 24, + [0][0][RTW89_WW][37] = 44, + [0][0][RTW89_WW][38] = 28, + [0][0][RTW89_WW][40] = 28, + [0][0][RTW89_WW][42] = 28, + [0][0][RTW89_WW][44] = 28, + [0][0][RTW89_WW][46] = 28, + [0][0][RTW89_WW][48] = 24, + [0][0][RTW89_WW][50] = 24, + [0][0][RTW89_WW][52] = 24, + [0][1][RTW89_WW][0] = 0, + [0][1][RTW89_WW][2] = 4, + [0][1][RTW89_WW][4] = 0, + [0][1][RTW89_WW][6] = 0, + [0][1][RTW89_WW][8] = 12, + [0][1][RTW89_WW][10] = 12, + [0][1][RTW89_WW][12] = 12, + [0][1][RTW89_WW][14] = 12, + [0][1][RTW89_WW][15] = 12, + [0][1][RTW89_WW][17] = 12, + [0][1][RTW89_WW][19] = 12, + [0][1][RTW89_WW][21] = 12, + [0][1][RTW89_WW][23] = 12, + [0][1][RTW89_WW][25] = 18, + [0][1][RTW89_WW][27] = 16, + [0][1][RTW89_WW][29] = 16, + [0][1][RTW89_WW][31] = 12, + [0][1][RTW89_WW][33] = 12, + [0][1][RTW89_WW][35] = 12, + [0][1][RTW89_WW][37] = 30, + [0][1][RTW89_WW][38] = 16, + [0][1][RTW89_WW][40] = 16, + [0][1][RTW89_WW][42] = 16, + [0][1][RTW89_WW][44] = 16, + [0][1][RTW89_WW][46] = 16, + [0][1][RTW89_WW][48] = 12, + [0][1][RTW89_WW][50] = 12, + [0][1][RTW89_WW][52] = 12, + [1][0][RTW89_WW][0] = 34, + [1][0][RTW89_WW][2] = 34, + [1][0][RTW89_WW][4] = 34, + [1][0][RTW89_WW][6] = 34, + [1][0][RTW89_WW][8] = 34, + [1][0][RTW89_WW][10] = 34, + [1][0][RTW89_WW][12] = 34, + [1][0][RTW89_WW][14] = 34, + [1][0][RTW89_WW][15] = 34, + [1][0][RTW89_WW][17] = 34, + [1][0][RTW89_WW][19] = 34, + [1][0][RTW89_WW][21] = 34, + [1][0][RTW89_WW][23] = 34, + [1][0][RTW89_WW][25] = 40, + [1][0][RTW89_WW][27] = 42, + [1][0][RTW89_WW][29] = 42, + [1][0][RTW89_WW][31] = 34, + [1][0][RTW89_WW][33] = 34, + [1][0][RTW89_WW][35] = 34, + [1][0][RTW89_WW][37] = 56, + [1][0][RTW89_WW][38] = 28, + [1][0][RTW89_WW][40] = 28, + [1][0][RTW89_WW][42] = 28, + [1][0][RTW89_WW][44] = 28, + [1][0][RTW89_WW][46] = 28, + [1][0][RTW89_WW][48] = 36, + [1][0][RTW89_WW][50] = 36, + [1][0][RTW89_WW][52] = 36, + [1][1][RTW89_WW][0] = 10, + [1][1][RTW89_WW][2] = 14, + [1][1][RTW89_WW][4] = 10, + [1][1][RTW89_WW][6] = 10, + [1][1][RTW89_WW][8] = 20, + [1][1][RTW89_WW][10] = 20, + [1][1][RTW89_WW][12] = 22, + [1][1][RTW89_WW][14] = 22, + [1][1][RTW89_WW][15] = 22, + [1][1][RTW89_WW][17] = 22, + [1][1][RTW89_WW][19] = 22, + [1][1][RTW89_WW][21] = 22, + [1][1][RTW89_WW][23] = 22, + [1][1][RTW89_WW][25] = 28, + [1][1][RTW89_WW][27] = 30, + [1][1][RTW89_WW][29] = 30, + [1][1][RTW89_WW][31] = 22, + [1][1][RTW89_WW][33] = 22, + [1][1][RTW89_WW][35] = 22, + [1][1][RTW89_WW][37] = 40, + [1][1][RTW89_WW][38] = 16, + [1][1][RTW89_WW][40] = 16, + [1][1][RTW89_WW][42] = 16, + [1][1][RTW89_WW][44] = 16, + [1][1][RTW89_WW][46] = 16, + [1][1][RTW89_WW][48] = 24, + [1][1][RTW89_WW][50] = 24, + [1][1][RTW89_WW][52] = 24, + [2][0][RTW89_WW][0] = 46, + [2][0][RTW89_WW][2] = 46, + [2][0][RTW89_WW][4] = 46, + [2][0][RTW89_WW][6] = 46, + [2][0][RTW89_WW][8] = 44, + [2][0][RTW89_WW][10] = 44, + [2][0][RTW89_WW][12] = 48, + [2][0][RTW89_WW][14] = 48, + [2][0][RTW89_WW][15] = 48, + [2][0][RTW89_WW][17] = 48, + [2][0][RTW89_WW][19] = 48, + [2][0][RTW89_WW][21] = 48, + [2][0][RTW89_WW][23] = 48, + [2][0][RTW89_WW][25] = 52, + [2][0][RTW89_WW][27] = 52, + [2][0][RTW89_WW][29] = 52, + [2][0][RTW89_WW][31] = 48, + [2][0][RTW89_WW][33] = 48, + [2][0][RTW89_WW][35] = 48, + [2][0][RTW89_WW][37] = 62, + [2][0][RTW89_WW][38] = 28, + [2][0][RTW89_WW][40] = 28, + [2][0][RTW89_WW][42] = 28, + [2][0][RTW89_WW][44] = 28, + [2][0][RTW89_WW][46] = 28, + [2][0][RTW89_WW][48] = 48, + [2][0][RTW89_WW][50] = 48, + [2][0][RTW89_WW][52] = 48, + [2][1][RTW89_WW][0] = 20, + [2][1][RTW89_WW][2] = 18, + [2][1][RTW89_WW][4] = 22, + [2][1][RTW89_WW][6] = 22, + [2][1][RTW89_WW][8] = 32, + [2][1][RTW89_WW][10] = 32, + [2][1][RTW89_WW][12] = 36, + [2][1][RTW89_WW][14] = 36, + [2][1][RTW89_WW][15] = 36, + [2][1][RTW89_WW][17] = 36, + [2][1][RTW89_WW][19] = 36, + [2][1][RTW89_WW][21] = 36, + [2][1][RTW89_WW][23] = 36, + [2][1][RTW89_WW][25] = 40, + [2][1][RTW89_WW][27] = 40, + [2][1][RTW89_WW][29] = 40, + [2][1][RTW89_WW][31] = 36, + [2][1][RTW89_WW][33] = 36, + [2][1][RTW89_WW][35] = 36, + [2][1][RTW89_WW][37] = 42, + [2][1][RTW89_WW][38] = 16, + [2][1][RTW89_WW][40] = 16, + [2][1][RTW89_WW][42] = 16, + [2][1][RTW89_WW][44] = 16, + [2][1][RTW89_WW][46] = 16, + [2][1][RTW89_WW][48] = 36, + [2][1][RTW89_WW][50] = 36, + [2][1][RTW89_WW][52] = 36, + [0][0][RTW89_FCC][0] = 44, + [0][0][RTW89_ETSI][0] = 30, + [0][0][RTW89_MKK][0] = 36, + [0][0][RTW89_IC][0] = 24, + [0][0][RTW89_ACMA][0] = 24, + [0][0][RTW89_FCC][2] = 44, + [0][0][RTW89_ETSI][2] = 30, + [0][0][RTW89_MKK][2] = 36, + [0][0][RTW89_IC][2] = 24, + [0][0][RTW89_ACMA][2] = 24, + [0][0][RTW89_FCC][4] = 44, + [0][0][RTW89_ETSI][4] = 30, + [0][0][RTW89_MKK][4] = 22, + [0][0][RTW89_IC][4] = 24, + [0][0][RTW89_ACMA][4] = 24, + [0][0][RTW89_FCC][6] = 44, + [0][0][RTW89_ETSI][6] = 30, + [0][0][RTW89_MKK][6] = 22, + [0][0][RTW89_IC][6] = 24, + [0][0][RTW89_ACMA][6] = 24, + [0][0][RTW89_FCC][8] = 44, + [0][0][RTW89_ETSI][8] = 28, + [0][0][RTW89_MKK][8] = 18, + [0][0][RTW89_IC][8] = 52, + [0][0][RTW89_ACMA][8] = 24, + [0][0][RTW89_FCC][10] = 44, + [0][0][RTW89_ETSI][10] = 28, + [0][0][RTW89_MKK][10] = 18, + [0][0][RTW89_IC][10] = 52, + [0][0][RTW89_ACMA][10] = 24, + [0][0][RTW89_FCC][12] = 44, + [0][0][RTW89_ETSI][12] = 28, + [0][0][RTW89_MKK][12] = 34, + [0][0][RTW89_IC][12] = 52, + [0][0][RTW89_ACMA][12] = 24, + [0][0][RTW89_FCC][14] = 44, + [0][0][RTW89_ETSI][14] = 28, + [0][0][RTW89_MKK][14] = 34, + [0][0][RTW89_IC][14] = 52, + [0][0][RTW89_ACMA][14] = 24, + [0][0][RTW89_FCC][15] = 44, + [0][0][RTW89_ETSI][15] = 30, + [0][0][RTW89_MKK][15] = 56, + [0][0][RTW89_IC][15] = 52, + [0][0][RTW89_ACMA][15] = 24, + [0][0][RTW89_FCC][17] = 44, + [0][0][RTW89_ETSI][17] = 30, + [0][0][RTW89_MKK][17] = 58, + [0][0][RTW89_IC][17] = 52, + [0][0][RTW89_ACMA][17] = 24, + [0][0][RTW89_FCC][19] = 44, + [0][0][RTW89_ETSI][19] = 30, + [0][0][RTW89_MKK][19] = 58, + [0][0][RTW89_IC][19] = 52, + [0][0][RTW89_ACMA][19] = 24, + [0][0][RTW89_FCC][21] = 44, + [0][0][RTW89_ETSI][21] = 30, + [0][0][RTW89_MKK][21] = 58, + [0][0][RTW89_IC][21] = 52, + [0][0][RTW89_ACMA][21] = 24, + [0][0][RTW89_FCC][23] = 44, + [0][0][RTW89_ETSI][23] = 30, + [0][0][RTW89_MKK][23] = 58, + [0][0][RTW89_IC][23] = 52, + [0][0][RTW89_ACMA][23] = 24, + [0][0][RTW89_FCC][25] = 44, + [0][0][RTW89_ETSI][25] = 30, + [0][0][RTW89_MKK][25] = 58, + [0][0][RTW89_IC][25] = 127, + [0][0][RTW89_ACMA][25] = 127, + [0][0][RTW89_FCC][27] = 44, + [0][0][RTW89_ETSI][27] = 30, + [0][0][RTW89_MKK][27] = 58, + [0][0][RTW89_IC][27] = 127, + [0][0][RTW89_ACMA][27] = 127, + [0][0][RTW89_FCC][29] = 44, + [0][0][RTW89_ETSI][29] = 30, + [0][0][RTW89_MKK][29] = 58, + [0][0][RTW89_IC][29] = 127, + [0][0][RTW89_ACMA][29] = 127, + [0][0][RTW89_FCC][31] = 44, + [0][0][RTW89_ETSI][31] = 30, + [0][0][RTW89_MKK][31] = 58, + [0][0][RTW89_IC][31] = 52, + [0][0][RTW89_ACMA][31] = 24, + [0][0][RTW89_FCC][33] = 44, + [0][0][RTW89_ETSI][33] = 30, + [0][0][RTW89_MKK][33] = 58, + [0][0][RTW89_IC][33] = 52, + [0][0][RTW89_ACMA][33] = 24, + [0][0][RTW89_FCC][35] = 44, + [0][0][RTW89_ETSI][35] = 30, + [0][0][RTW89_MKK][35] = 58, + [0][0][RTW89_IC][35] = 52, + [0][0][RTW89_ACMA][35] = 24, + [0][0][RTW89_FCC][37] = 44, + [0][0][RTW89_ETSI][37] = 127, + [0][0][RTW89_MKK][37] = 58, + [0][0][RTW89_IC][37] = 52, + [0][0][RTW89_ACMA][37] = 52, + [0][0][RTW89_FCC][38] = 76, + [0][0][RTW89_ETSI][38] = 28, + [0][0][RTW89_MKK][38] = 127, + [0][0][RTW89_IC][38] = 84, + [0][0][RTW89_ACMA][38] = 84, + [0][0][RTW89_FCC][40] = 76, + [0][0][RTW89_ETSI][40] = 28, + [0][0][RTW89_MKK][40] = 127, + [0][0][RTW89_IC][40] = 84, + [0][0][RTW89_ACMA][40] = 84, + [0][0][RTW89_FCC][42] = 76, + [0][0][RTW89_ETSI][42] = 28, + [0][0][RTW89_MKK][42] = 127, + [0][0][RTW89_IC][42] = 84, + [0][0][RTW89_ACMA][42] = 84, + [0][0][RTW89_FCC][44] = 76, + [0][0][RTW89_ETSI][44] = 28, + [0][0][RTW89_MKK][44] = 127, + [0][0][RTW89_IC][44] = 84, + [0][0][RTW89_ACMA][44] = 84, + [0][0][RTW89_FCC][46] = 76, + [0][0][RTW89_ETSI][46] = 28, + [0][0][RTW89_MKK][46] = 127, + [0][0][RTW89_IC][46] = 84, + [0][0][RTW89_ACMA][46] = 84, + [0][0][RTW89_FCC][48] = 24, + [0][0][RTW89_ETSI][48] = 127, + [0][0][RTW89_MKK][48] = 127, + [0][0][RTW89_IC][48] = 127, + [0][0][RTW89_ACMA][48] = 127, + [0][0][RTW89_FCC][50] = 24, + [0][0][RTW89_ETSI][50] = 127, + [0][0][RTW89_MKK][50] = 127, + [0][0][RTW89_IC][50] = 127, + [0][0][RTW89_ACMA][50] = 127, + [0][0][RTW89_FCC][52] = 24, + [0][0][RTW89_ETSI][52] = 127, + [0][0][RTW89_MKK][52] = 127, + [0][0][RTW89_IC][52] = 127, + [0][0][RTW89_ACMA][52] = 127, + [0][1][RTW89_FCC][0] = 26, + [0][1][RTW89_ETSI][0] = 18, + [0][1][RTW89_MKK][0] = 20, + [0][1][RTW89_IC][0] = 0, + [0][1][RTW89_ACMA][0] = 12, + [0][1][RTW89_FCC][2] = 30, + [0][1][RTW89_ETSI][2] = 18, + [0][1][RTW89_MKK][2] = 20, + [0][1][RTW89_IC][2] = 4, + [0][1][RTW89_ACMA][2] = 12, + [0][1][RTW89_FCC][4] = 26, + [0][1][RTW89_ETSI][4] = 18, + [0][1][RTW89_MKK][4] = 8, + [0][1][RTW89_IC][4] = 0, + [0][1][RTW89_ACMA][4] = 12, + [0][1][RTW89_FCC][6] = 26, + [0][1][RTW89_ETSI][6] = 18, + [0][1][RTW89_MKK][6] = 8, + [0][1][RTW89_IC][6] = 0, + [0][1][RTW89_ACMA][6] = 12, + [0][1][RTW89_FCC][8] = 26, + [0][1][RTW89_ETSI][8] = 16, + [0][1][RTW89_MKK][8] = 20, + [0][1][RTW89_IC][8] = 34, + [0][1][RTW89_ACMA][8] = 12, + [0][1][RTW89_FCC][10] = 26, + [0][1][RTW89_ETSI][10] = 16, + [0][1][RTW89_MKK][10] = 20, + [0][1][RTW89_IC][10] = 34, + [0][1][RTW89_ACMA][10] = 12, + [0][1][RTW89_FCC][12] = 30, + [0][1][RTW89_ETSI][12] = 16, + [0][1][RTW89_MKK][12] = 34, + [0][1][RTW89_IC][12] = 38, + [0][1][RTW89_ACMA][12] = 12, + [0][1][RTW89_FCC][14] = 26, + [0][1][RTW89_ETSI][14] = 16, + [0][1][RTW89_MKK][14] = 34, + [0][1][RTW89_IC][14] = 34, + [0][1][RTW89_ACMA][14] = 12, + [0][1][RTW89_FCC][15] = 26, + [0][1][RTW89_ETSI][15] = 18, + [0][1][RTW89_MKK][15] = 44, + [0][1][RTW89_IC][15] = 34, + [0][1][RTW89_ACMA][15] = 12, + [0][1][RTW89_FCC][17] = 26, + [0][1][RTW89_ETSI][17] = 18, + [0][1][RTW89_MKK][17] = 44, + [0][1][RTW89_IC][17] = 34, + [0][1][RTW89_ACMA][17] = 12, + [0][1][RTW89_FCC][19] = 30, + [0][1][RTW89_ETSI][19] = 18, + [0][1][RTW89_MKK][19] = 44, + [0][1][RTW89_IC][19] = 38, + [0][1][RTW89_ACMA][19] = 12, + [0][1][RTW89_FCC][21] = 30, + [0][1][RTW89_ETSI][21] = 18, + [0][1][RTW89_MKK][21] = 44, + [0][1][RTW89_IC][21] = 38, + [0][1][RTW89_ACMA][21] = 12, + [0][1][RTW89_FCC][23] = 30, + [0][1][RTW89_ETSI][23] = 18, + [0][1][RTW89_MKK][23] = 44, + [0][1][RTW89_IC][23] = 38, + [0][1][RTW89_ACMA][23] = 12, + [0][1][RTW89_FCC][25] = 30, + [0][1][RTW89_ETSI][25] = 18, + [0][1][RTW89_MKK][25] = 44, + [0][1][RTW89_IC][25] = 127, + [0][1][RTW89_ACMA][25] = 127, + [0][1][RTW89_FCC][27] = 30, + [0][1][RTW89_ETSI][27] = 16, + [0][1][RTW89_MKK][27] = 44, + [0][1][RTW89_IC][27] = 127, + [0][1][RTW89_ACMA][27] = 127, + [0][1][RTW89_FCC][29] = 30, + [0][1][RTW89_ETSI][29] = 16, + [0][1][RTW89_MKK][29] = 44, + [0][1][RTW89_IC][29] = 127, + [0][1][RTW89_ACMA][29] = 127, + [0][1][RTW89_FCC][31] = 30, + [0][1][RTW89_ETSI][31] = 16, + [0][1][RTW89_MKK][31] = 44, + [0][1][RTW89_IC][31] = 34, + [0][1][RTW89_ACMA][31] = 12, + [0][1][RTW89_FCC][33] = 26, + [0][1][RTW89_ETSI][33] = 16, + [0][1][RTW89_MKK][33] = 44, + [0][1][RTW89_IC][33] = 34, + [0][1][RTW89_ACMA][33] = 12, + [0][1][RTW89_FCC][35] = 26, + [0][1][RTW89_ETSI][35] = 16, + [0][1][RTW89_MKK][35] = 44, + [0][1][RTW89_IC][35] = 34, + [0][1][RTW89_ACMA][35] = 12, + [0][1][RTW89_FCC][37] = 30, + [0][1][RTW89_ETSI][37] = 127, + [0][1][RTW89_MKK][37] = 44, + [0][1][RTW89_IC][37] = 38, + [0][1][RTW89_ACMA][37] = 38, + [0][1][RTW89_FCC][38] = 74, + [0][1][RTW89_ETSI][38] = 16, + [0][1][RTW89_MKK][38] = 127, + [0][1][RTW89_IC][38] = 82, + [0][1][RTW89_ACMA][38] = 84, + [0][1][RTW89_FCC][40] = 74, + [0][1][RTW89_ETSI][40] = 16, + [0][1][RTW89_MKK][40] = 127, + [0][1][RTW89_IC][40] = 82, + [0][1][RTW89_ACMA][40] = 84, + [0][1][RTW89_FCC][42] = 74, + [0][1][RTW89_ETSI][42] = 16, + [0][1][RTW89_MKK][42] = 127, + [0][1][RTW89_IC][42] = 82, + [0][1][RTW89_ACMA][42] = 84, + [0][1][RTW89_FCC][44] = 74, + [0][1][RTW89_ETSI][44] = 16, + [0][1][RTW89_MKK][44] = 127, + [0][1][RTW89_IC][44] = 82, + [0][1][RTW89_ACMA][44] = 84, + [0][1][RTW89_FCC][46] = 74, + [0][1][RTW89_ETSI][46] = 16, + [0][1][RTW89_MKK][46] = 127, + [0][1][RTW89_IC][46] = 82, + [0][1][RTW89_ACMA][46] = 84, + [0][1][RTW89_FCC][48] = 12, + [0][1][RTW89_ETSI][48] = 127, + [0][1][RTW89_MKK][48] = 127, + [0][1][RTW89_IC][48] = 127, + [0][1][RTW89_ACMA][48] = 127, + [0][1][RTW89_FCC][50] = 12, + [0][1][RTW89_ETSI][50] = 127, + [0][1][RTW89_MKK][50] = 127, + [0][1][RTW89_IC][50] = 127, + [0][1][RTW89_ACMA][50] = 127, + [0][1][RTW89_FCC][52] = 12, + [0][1][RTW89_ETSI][52] = 127, + [0][1][RTW89_MKK][52] = 127, + [0][1][RTW89_IC][52] = 127, + [0][1][RTW89_ACMA][52] = 127, + [1][0][RTW89_FCC][0] = 54, + [1][0][RTW89_ETSI][0] = 40, + [1][0][RTW89_MKK][0] = 48, + [1][0][RTW89_IC][0] = 36, + [1][0][RTW89_ACMA][0] = 34, + [1][0][RTW89_FCC][2] = 54, + [1][0][RTW89_ETSI][2] = 40, + [1][0][RTW89_MKK][2] = 48, + [1][0][RTW89_IC][2] = 36, + [1][0][RTW89_ACMA][2] = 34, + [1][0][RTW89_FCC][4] = 54, + [1][0][RTW89_ETSI][4] = 40, + [1][0][RTW89_MKK][4] = 40, + [1][0][RTW89_IC][4] = 36, + [1][0][RTW89_ACMA][4] = 34, + [1][0][RTW89_FCC][6] = 54, + [1][0][RTW89_ETSI][6] = 40, + [1][0][RTW89_MKK][6] = 40, + [1][0][RTW89_IC][6] = 36, + [1][0][RTW89_ACMA][6] = 34, + [1][0][RTW89_FCC][8] = 54, + [1][0][RTW89_ETSI][8] = 40, + [1][0][RTW89_MKK][8] = 34, + [1][0][RTW89_IC][8] = 62, + [1][0][RTW89_ACMA][8] = 34, + [1][0][RTW89_FCC][10] = 54, + [1][0][RTW89_ETSI][10] = 40, + [1][0][RTW89_MKK][10] = 34, + [1][0][RTW89_IC][10] = 62, + [1][0][RTW89_ACMA][10] = 34, + [1][0][RTW89_FCC][12] = 56, + [1][0][RTW89_ETSI][12] = 40, + [1][0][RTW89_MKK][12] = 46, + [1][0][RTW89_IC][12] = 64, + [1][0][RTW89_ACMA][12] = 34, + [1][0][RTW89_FCC][14] = 54, + [1][0][RTW89_ETSI][14] = 40, + [1][0][RTW89_MKK][14] = 46, + [1][0][RTW89_IC][14] = 62, + [1][0][RTW89_ACMA][14] = 34, + [1][0][RTW89_FCC][15] = 54, + [1][0][RTW89_ETSI][15] = 40, + [1][0][RTW89_MKK][15] = 62, + [1][0][RTW89_IC][15] = 62, + [1][0][RTW89_ACMA][15] = 34, + [1][0][RTW89_FCC][17] = 54, + [1][0][RTW89_ETSI][17] = 40, + [1][0][RTW89_MKK][17] = 68, + [1][0][RTW89_IC][17] = 62, + [1][0][RTW89_ACMA][17] = 34, + [1][0][RTW89_FCC][19] = 54, + [1][0][RTW89_ETSI][19] = 40, + [1][0][RTW89_MKK][19] = 68, + [1][0][RTW89_IC][19] = 62, + [1][0][RTW89_ACMA][19] = 34, + [1][0][RTW89_FCC][21] = 54, + [1][0][RTW89_ETSI][21] = 40, + [1][0][RTW89_MKK][21] = 68, + [1][0][RTW89_IC][21] = 62, + [1][0][RTW89_ACMA][21] = 34, + [1][0][RTW89_FCC][23] = 54, + [1][0][RTW89_ETSI][23] = 40, + [1][0][RTW89_MKK][23] = 68, + [1][0][RTW89_IC][23] = 62, + [1][0][RTW89_ACMA][23] = 34, + [1][0][RTW89_FCC][25] = 54, + [1][0][RTW89_ETSI][25] = 40, + [1][0][RTW89_MKK][25] = 68, + [1][0][RTW89_IC][25] = 127, + [1][0][RTW89_ACMA][25] = 127, + [1][0][RTW89_FCC][27] = 54, + [1][0][RTW89_ETSI][27] = 42, + [1][0][RTW89_MKK][27] = 68, + [1][0][RTW89_IC][27] = 127, + [1][0][RTW89_ACMA][27] = 127, + [1][0][RTW89_FCC][29] = 54, + [1][0][RTW89_ETSI][29] = 42, + [1][0][RTW89_MKK][29] = 68, + [1][0][RTW89_IC][29] = 127, + [1][0][RTW89_ACMA][29] = 127, + [1][0][RTW89_FCC][31] = 54, + [1][0][RTW89_ETSI][31] = 42, + [1][0][RTW89_MKK][31] = 68, + [1][0][RTW89_IC][31] = 62, + [1][0][RTW89_ACMA][31] = 34, + [1][0][RTW89_FCC][33] = 54, + [1][0][RTW89_ETSI][33] = 42, + [1][0][RTW89_MKK][33] = 68, + [1][0][RTW89_IC][33] = 62, + [1][0][RTW89_ACMA][33] = 34, + [1][0][RTW89_FCC][35] = 54, + [1][0][RTW89_ETSI][35] = 42, + [1][0][RTW89_MKK][35] = 68, + [1][0][RTW89_IC][35] = 62, + [1][0][RTW89_ACMA][35] = 34, + [1][0][RTW89_FCC][37] = 56, + [1][0][RTW89_ETSI][37] = 127, + [1][0][RTW89_MKK][37] = 68, + [1][0][RTW89_IC][37] = 64, + [1][0][RTW89_ACMA][37] = 64, + [1][0][RTW89_FCC][38] = 76, + [1][0][RTW89_ETSI][38] = 28, + [1][0][RTW89_MKK][38] = 127, + [1][0][RTW89_IC][38] = 84, + [1][0][RTW89_ACMA][38] = 84, + [1][0][RTW89_FCC][40] = 76, + [1][0][RTW89_ETSI][40] = 28, + [1][0][RTW89_MKK][40] = 127, + [1][0][RTW89_IC][40] = 84, + [1][0][RTW89_ACMA][40] = 84, + [1][0][RTW89_FCC][42] = 76, + [1][0][RTW89_ETSI][42] = 28, + [1][0][RTW89_MKK][42] = 127, + [1][0][RTW89_IC][42] = 84, + [1][0][RTW89_ACMA][42] = 84, + [1][0][RTW89_FCC][44] = 76, + [1][0][RTW89_ETSI][44] = 28, + [1][0][RTW89_MKK][44] = 127, + [1][0][RTW89_IC][44] = 84, + [1][0][RTW89_ACMA][44] = 84, + [1][0][RTW89_FCC][46] = 76, + [1][0][RTW89_ETSI][46] = 28, + [1][0][RTW89_MKK][46] = 127, + [1][0][RTW89_IC][46] = 84, + [1][0][RTW89_ACMA][46] = 84, + [1][0][RTW89_FCC][48] = 36, + [1][0][RTW89_ETSI][48] = 127, + [1][0][RTW89_MKK][48] = 127, + [1][0][RTW89_IC][48] = 127, + [1][0][RTW89_ACMA][48] = 127, + [1][0][RTW89_FCC][50] = 36, + [1][0][RTW89_ETSI][50] = 127, + [1][0][RTW89_MKK][50] = 127, + [1][0][RTW89_IC][50] = 127, + [1][0][RTW89_ACMA][50] = 127, + [1][0][RTW89_FCC][52] = 36, + [1][0][RTW89_ETSI][52] = 127, + [1][0][RTW89_MKK][52] = 127, + [1][0][RTW89_IC][52] = 127, + [1][0][RTW89_ACMA][52] = 127, + [1][1][RTW89_FCC][0] = 34, + [1][1][RTW89_ETSI][0] = 30, + [1][1][RTW89_MKK][0] = 34, + [1][1][RTW89_IC][0] = 10, + [1][1][RTW89_ACMA][0] = 22, + [1][1][RTW89_FCC][2] = 36, + [1][1][RTW89_ETSI][2] = 30, + [1][1][RTW89_MKK][2] = 34, + [1][1][RTW89_IC][2] = 14, + [1][1][RTW89_ACMA][2] = 22, + [1][1][RTW89_FCC][4] = 34, + [1][1][RTW89_ETSI][4] = 30, + [1][1][RTW89_MKK][4] = 26, + [1][1][RTW89_IC][4] = 10, + [1][1][RTW89_ACMA][4] = 22, + [1][1][RTW89_FCC][6] = 34, + [1][1][RTW89_ETSI][6] = 30, + [1][1][RTW89_MKK][6] = 26, + [1][1][RTW89_IC][6] = 10, + [1][1][RTW89_ACMA][6] = 22, + [1][1][RTW89_FCC][8] = 36, + [1][1][RTW89_ETSI][8] = 30, + [1][1][RTW89_MKK][8] = 20, + [1][1][RTW89_IC][8] = 44, + [1][1][RTW89_ACMA][8] = 22, + [1][1][RTW89_FCC][10] = 36, + [1][1][RTW89_ETSI][10] = 30, + [1][1][RTW89_MKK][10] = 20, + [1][1][RTW89_IC][10] = 44, + [1][1][RTW89_ACMA][10] = 22, + [1][1][RTW89_FCC][12] = 38, + [1][1][RTW89_ETSI][12] = 30, + [1][1][RTW89_MKK][12] = 34, + [1][1][RTW89_IC][12] = 46, + [1][1][RTW89_ACMA][12] = 22, + [1][1][RTW89_FCC][14] = 34, + [1][1][RTW89_ETSI][14] = 30, + [1][1][RTW89_MKK][14] = 34, + [1][1][RTW89_IC][14] = 40, + [1][1][RTW89_ACMA][14] = 22, + [1][1][RTW89_FCC][15] = 34, + [1][1][RTW89_ETSI][15] = 28, + [1][1][RTW89_MKK][15] = 56, + [1][1][RTW89_IC][15] = 42, + [1][1][RTW89_ACMA][15] = 22, + [1][1][RTW89_FCC][17] = 34, + [1][1][RTW89_ETSI][17] = 28, + [1][1][RTW89_MKK][17] = 58, + [1][1][RTW89_IC][17] = 42, + [1][1][RTW89_ACMA][17] = 22, + [1][1][RTW89_FCC][19] = 34, + [1][1][RTW89_ETSI][19] = 28, + [1][1][RTW89_MKK][19] = 58, + [1][1][RTW89_IC][19] = 42, + [1][1][RTW89_ACMA][19] = 22, + [1][1][RTW89_FCC][21] = 34, + [1][1][RTW89_ETSI][21] = 28, + [1][1][RTW89_MKK][21] = 58, + [1][1][RTW89_IC][21] = 42, + [1][1][RTW89_ACMA][21] = 22, + [1][1][RTW89_FCC][23] = 34, + [1][1][RTW89_ETSI][23] = 28, + [1][1][RTW89_MKK][23] = 58, + [1][1][RTW89_IC][23] = 42, + [1][1][RTW89_ACMA][23] = 22, + [1][1][RTW89_FCC][25] = 34, + [1][1][RTW89_ETSI][25] = 28, + [1][1][RTW89_MKK][25] = 58, + [1][1][RTW89_IC][25] = 127, + [1][1][RTW89_ACMA][25] = 127, + [1][1][RTW89_FCC][27] = 34, + [1][1][RTW89_ETSI][27] = 30, + [1][1][RTW89_MKK][27] = 58, + [1][1][RTW89_IC][27] = 127, + [1][1][RTW89_ACMA][27] = 127, + [1][1][RTW89_FCC][29] = 34, + [1][1][RTW89_ETSI][29] = 30, + [1][1][RTW89_MKK][29] = 58, + [1][1][RTW89_IC][29] = 127, + [1][1][RTW89_ACMA][29] = 127, + [1][1][RTW89_FCC][31] = 34, + [1][1][RTW89_ETSI][31] = 30, + [1][1][RTW89_MKK][31] = 58, + [1][1][RTW89_IC][31] = 38, + [1][1][RTW89_ACMA][31] = 22, + [1][1][RTW89_FCC][33] = 32, + [1][1][RTW89_ETSI][33] = 30, + [1][1][RTW89_MKK][33] = 58, + [1][1][RTW89_IC][33] = 38, + [1][1][RTW89_ACMA][33] = 22, + [1][1][RTW89_FCC][35] = 32, + [1][1][RTW89_ETSI][35] = 30, + [1][1][RTW89_MKK][35] = 58, + [1][1][RTW89_IC][35] = 38, + [1][1][RTW89_ACMA][35] = 22, + [1][1][RTW89_FCC][37] = 40, + [1][1][RTW89_ETSI][37] = 127, + [1][1][RTW89_MKK][37] = 58, + [1][1][RTW89_IC][37] = 48, + [1][1][RTW89_ACMA][37] = 48, + [1][1][RTW89_FCC][38] = 76, + [1][1][RTW89_ETSI][38] = 16, + [1][1][RTW89_MKK][38] = 127, + [1][1][RTW89_IC][38] = 84, + [1][1][RTW89_ACMA][38] = 82, + [1][1][RTW89_FCC][40] = 76, + [1][1][RTW89_ETSI][40] = 16, + [1][1][RTW89_MKK][40] = 127, + [1][1][RTW89_IC][40] = 84, + [1][1][RTW89_ACMA][40] = 82, + [1][1][RTW89_FCC][42] = 76, + [1][1][RTW89_ETSI][42] = 16, + [1][1][RTW89_MKK][42] = 127, + [1][1][RTW89_IC][42] = 84, + [1][1][RTW89_ACMA][42] = 84, + [1][1][RTW89_FCC][44] = 76, + [1][1][RTW89_ETSI][44] = 16, + [1][1][RTW89_MKK][44] = 127, + [1][1][RTW89_IC][44] = 84, + [1][1][RTW89_ACMA][44] = 84, + [1][1][RTW89_FCC][46] = 76, + [1][1][RTW89_ETSI][46] = 16, + [1][1][RTW89_MKK][46] = 127, + [1][1][RTW89_IC][46] = 84, + [1][1][RTW89_ACMA][46] = 84, + [1][1][RTW89_FCC][48] = 24, + [1][1][RTW89_ETSI][48] = 127, + [1][1][RTW89_MKK][48] = 127, + [1][1][RTW89_IC][48] = 127, + [1][1][RTW89_ACMA][48] = 127, + [1][1][RTW89_FCC][50] = 24, + [1][1][RTW89_ETSI][50] = 127, + [1][1][RTW89_MKK][50] = 127, + [1][1][RTW89_IC][50] = 127, + [1][1][RTW89_ACMA][50] = 127, + [1][1][RTW89_FCC][52] = 24, + [1][1][RTW89_ETSI][52] = 127, + [1][1][RTW89_MKK][52] = 127, + [1][1][RTW89_IC][52] = 127, + [1][1][RTW89_ACMA][52] = 127, + [2][0][RTW89_FCC][0] = 62, + [2][0][RTW89_ETSI][0] = 52, + [2][0][RTW89_MKK][0] = 60, + [2][0][RTW89_IC][0] = 46, + [2][0][RTW89_ACMA][0] = 48, + [2][0][RTW89_FCC][2] = 62, + [2][0][RTW89_ETSI][2] = 52, + [2][0][RTW89_MKK][2] = 60, + [2][0][RTW89_IC][2] = 46, + [2][0][RTW89_ACMA][2] = 48, + [2][0][RTW89_FCC][4] = 62, + [2][0][RTW89_ETSI][4] = 52, + [2][0][RTW89_MKK][4] = 50, + [2][0][RTW89_IC][4] = 46, + [2][0][RTW89_ACMA][4] = 48, + [2][0][RTW89_FCC][6] = 62, + [2][0][RTW89_ETSI][6] = 52, + [2][0][RTW89_MKK][6] = 50, + [2][0][RTW89_IC][6] = 46, + [2][0][RTW89_ACMA][6] = 48, + [2][0][RTW89_FCC][8] = 62, + [2][0][RTW89_ETSI][8] = 52, + [2][0][RTW89_MKK][8] = 44, + [2][0][RTW89_IC][8] = 66, + [2][0][RTW89_ACMA][8] = 48, + [2][0][RTW89_FCC][10] = 62, + [2][0][RTW89_ETSI][10] = 52, + [2][0][RTW89_MKK][10] = 44, + [2][0][RTW89_IC][10] = 66, + [2][0][RTW89_ACMA][10] = 48, + [2][0][RTW89_FCC][12] = 62, + [2][0][RTW89_ETSI][12] = 52, + [2][0][RTW89_MKK][12] = 58, + [2][0][RTW89_IC][12] = 66, + [2][0][RTW89_ACMA][12] = 48, + [2][0][RTW89_FCC][14] = 62, + [2][0][RTW89_ETSI][14] = 52, + [2][0][RTW89_MKK][14] = 58, + [2][0][RTW89_IC][14] = 66, + [2][0][RTW89_ACMA][14] = 48, + [2][0][RTW89_FCC][15] = 62, + [2][0][RTW89_ETSI][15] = 52, + [2][0][RTW89_MKK][15] = 68, + [2][0][RTW89_IC][15] = 70, + [2][0][RTW89_ACMA][15] = 48, + [2][0][RTW89_FCC][17] = 62, + [2][0][RTW89_ETSI][17] = 52, + [2][0][RTW89_MKK][17] = 74, + [2][0][RTW89_IC][17] = 70, + [2][0][RTW89_ACMA][17] = 48, + [2][0][RTW89_FCC][19] = 62, + [2][0][RTW89_ETSI][19] = 52, + [2][0][RTW89_MKK][19] = 74, + [2][0][RTW89_IC][19] = 70, + [2][0][RTW89_ACMA][19] = 48, + [2][0][RTW89_FCC][21] = 62, + [2][0][RTW89_ETSI][21] = 52, + [2][0][RTW89_MKK][21] = 74, + [2][0][RTW89_IC][21] = 70, + [2][0][RTW89_ACMA][21] = 48, + [2][0][RTW89_FCC][23] = 62, + [2][0][RTW89_ETSI][23] = 52, + [2][0][RTW89_MKK][23] = 74, + [2][0][RTW89_IC][23] = 70, + [2][0][RTW89_ACMA][23] = 48, + [2][0][RTW89_FCC][25] = 62, + [2][0][RTW89_ETSI][25] = 52, + [2][0][RTW89_MKK][25] = 74, + [2][0][RTW89_IC][25] = 127, + [2][0][RTW89_ACMA][25] = 127, + [2][0][RTW89_FCC][27] = 62, + [2][0][RTW89_ETSI][27] = 52, + [2][0][RTW89_MKK][27] = 74, + [2][0][RTW89_IC][27] = 127, + [2][0][RTW89_ACMA][27] = 127, + [2][0][RTW89_FCC][29] = 62, + [2][0][RTW89_ETSI][29] = 52, + [2][0][RTW89_MKK][29] = 74, + [2][0][RTW89_IC][29] = 127, + [2][0][RTW89_ACMA][29] = 127, + [2][0][RTW89_FCC][31] = 62, + [2][0][RTW89_ETSI][31] = 52, + [2][0][RTW89_MKK][31] = 74, + [2][0][RTW89_IC][31] = 72, + [2][0][RTW89_ACMA][31] = 48, + [2][0][RTW89_FCC][33] = 64, + [2][0][RTW89_ETSI][33] = 52, + [2][0][RTW89_MKK][33] = 74, + [2][0][RTW89_IC][33] = 72, + [2][0][RTW89_ACMA][33] = 48, + [2][0][RTW89_FCC][35] = 64, + [2][0][RTW89_ETSI][35] = 52, + [2][0][RTW89_MKK][35] = 74, + [2][0][RTW89_IC][35] = 72, + [2][0][RTW89_ACMA][35] = 48, + [2][0][RTW89_FCC][37] = 62, + [2][0][RTW89_ETSI][37] = 127, + [2][0][RTW89_MKK][37] = 74, + [2][0][RTW89_IC][37] = 70, + [2][0][RTW89_ACMA][37] = 76, + [2][0][RTW89_FCC][38] = 76, + [2][0][RTW89_ETSI][38] = 28, + [2][0][RTW89_MKK][38] = 127, + [2][0][RTW89_IC][38] = 84, + [2][0][RTW89_ACMA][38] = 84, + [2][0][RTW89_FCC][40] = 76, + [2][0][RTW89_ETSI][40] = 28, + [2][0][RTW89_MKK][40] = 127, + [2][0][RTW89_IC][40] = 84, + [2][0][RTW89_ACMA][40] = 84, + [2][0][RTW89_FCC][42] = 76, + [2][0][RTW89_ETSI][42] = 28, + [2][0][RTW89_MKK][42] = 127, + [2][0][RTW89_IC][42] = 84, + [2][0][RTW89_ACMA][42] = 84, + [2][0][RTW89_FCC][44] = 76, + [2][0][RTW89_ETSI][44] = 28, + [2][0][RTW89_MKK][44] = 127, + [2][0][RTW89_IC][44] = 84, + [2][0][RTW89_ACMA][44] = 84, + [2][0][RTW89_FCC][46] = 76, + [2][0][RTW89_ETSI][46] = 28, + [2][0][RTW89_MKK][46] = 127, + [2][0][RTW89_IC][46] = 84, + [2][0][RTW89_ACMA][46] = 84, + [2][0][RTW89_FCC][48] = 48, + [2][0][RTW89_ETSI][48] = 127, + [2][0][RTW89_MKK][48] = 127, + [2][0][RTW89_IC][48] = 127, + [2][0][RTW89_ACMA][48] = 127, + [2][0][RTW89_FCC][50] = 48, + [2][0][RTW89_ETSI][50] = 127, + [2][0][RTW89_MKK][50] = 127, + [2][0][RTW89_IC][50] = 127, + [2][0][RTW89_ACMA][50] = 127, + [2][0][RTW89_FCC][52] = 48, + [2][0][RTW89_ETSI][52] = 127, + [2][0][RTW89_MKK][52] = 127, + [2][0][RTW89_IC][52] = 127, + [2][0][RTW89_ACMA][52] = 127, + [2][1][RTW89_FCC][0] = 42, + [2][1][RTW89_ETSI][0] = 40, + [2][1][RTW89_MKK][0] = 44, + [2][1][RTW89_IC][0] = 20, + [2][1][RTW89_ACMA][0] = 36, + [2][1][RTW89_FCC][2] = 42, + [2][1][RTW89_ETSI][2] = 40, + [2][1][RTW89_MKK][2] = 44, + [2][1][RTW89_IC][2] = 18, + [2][1][RTW89_ACMA][2] = 36, + [2][1][RTW89_FCC][4] = 42, + [2][1][RTW89_ETSI][4] = 40, + [2][1][RTW89_MKK][4] = 36, + [2][1][RTW89_IC][4] = 22, + [2][1][RTW89_ACMA][4] = 36, + [2][1][RTW89_FCC][6] = 42, + [2][1][RTW89_ETSI][6] = 40, + [2][1][RTW89_MKK][6] = 36, + [2][1][RTW89_IC][6] = 22, + [2][1][RTW89_ACMA][6] = 36, + [2][1][RTW89_FCC][8] = 42, + [2][1][RTW89_ETSI][8] = 40, + [2][1][RTW89_MKK][8] = 32, + [2][1][RTW89_IC][8] = 50, + [2][1][RTW89_ACMA][8] = 36, + [2][1][RTW89_FCC][10] = 42, + [2][1][RTW89_ETSI][10] = 40, + [2][1][RTW89_MKK][10] = 32, + [2][1][RTW89_IC][10] = 50, + [2][1][RTW89_ACMA][10] = 36, + [2][1][RTW89_FCC][12] = 44, + [2][1][RTW89_ETSI][12] = 40, + [2][1][RTW89_MKK][12] = 44, + [2][1][RTW89_IC][12] = 52, + [2][1][RTW89_ACMA][12] = 36, + [2][1][RTW89_FCC][14] = 44, + [2][1][RTW89_ETSI][14] = 40, + [2][1][RTW89_MKK][14] = 44, + [2][1][RTW89_IC][14] = 52, + [2][1][RTW89_ACMA][14] = 36, + [2][1][RTW89_FCC][15] = 42, + [2][1][RTW89_ETSI][15] = 40, + [2][1][RTW89_MKK][15] = 66, + [2][1][RTW89_IC][15] = 50, + [2][1][RTW89_ACMA][15] = 36, + [2][1][RTW89_FCC][17] = 42, + [2][1][RTW89_ETSI][17] = 40, + [2][1][RTW89_MKK][17] = 66, + [2][1][RTW89_IC][17] = 50, + [2][1][RTW89_ACMA][17] = 36, + [2][1][RTW89_FCC][19] = 42, + [2][1][RTW89_ETSI][19] = 40, + [2][1][RTW89_MKK][19] = 66, + [2][1][RTW89_IC][19] = 50, + [2][1][RTW89_ACMA][19] = 36, + [2][1][RTW89_FCC][21] = 42, + [2][1][RTW89_ETSI][21] = 40, + [2][1][RTW89_MKK][21] = 66, + [2][1][RTW89_IC][21] = 50, + [2][1][RTW89_ACMA][21] = 36, + [2][1][RTW89_FCC][23] = 42, + [2][1][RTW89_ETSI][23] = 40, + [2][1][RTW89_MKK][23] = 66, + [2][1][RTW89_IC][23] = 50, + [2][1][RTW89_ACMA][23] = 36, + [2][1][RTW89_FCC][25] = 42, + [2][1][RTW89_ETSI][25] = 40, + [2][1][RTW89_MKK][25] = 66, + [2][1][RTW89_IC][25] = 127, + [2][1][RTW89_ACMA][25] = 127, + [2][1][RTW89_FCC][27] = 42, + [2][1][RTW89_ETSI][27] = 40, + [2][1][RTW89_MKK][27] = 66, + [2][1][RTW89_IC][27] = 127, + [2][1][RTW89_ACMA][27] = 127, + [2][1][RTW89_FCC][29] = 42, + [2][1][RTW89_ETSI][29] = 40, + [2][1][RTW89_MKK][29] = 66, + [2][1][RTW89_IC][29] = 127, + [2][1][RTW89_ACMA][29] = 127, + [2][1][RTW89_FCC][31] = 42, + [2][1][RTW89_ETSI][31] = 40, + [2][1][RTW89_MKK][31] = 66, + [2][1][RTW89_IC][31] = 50, + [2][1][RTW89_ACMA][31] = 36, + [2][1][RTW89_FCC][33] = 42, + [2][1][RTW89_ETSI][33] = 40, + [2][1][RTW89_MKK][33] = 66, + [2][1][RTW89_IC][33] = 50, + [2][1][RTW89_ACMA][33] = 36, + [2][1][RTW89_FCC][35] = 42, + [2][1][RTW89_ETSI][35] = 40, + [2][1][RTW89_MKK][35] = 66, + [2][1][RTW89_IC][35] = 50, + [2][1][RTW89_ACMA][35] = 36, + [2][1][RTW89_FCC][37] = 42, + [2][1][RTW89_ETSI][37] = 127, + [2][1][RTW89_MKK][37] = 66, + [2][1][RTW89_IC][37] = 50, + [2][1][RTW89_ACMA][37] = 60, + [2][1][RTW89_FCC][38] = 76, + [2][1][RTW89_ETSI][38] = 16, + [2][1][RTW89_MKK][38] = 127, + [2][1][RTW89_IC][38] = 84, + [2][1][RTW89_ACMA][38] = 84, + [2][1][RTW89_FCC][40] = 76, + [2][1][RTW89_ETSI][40] = 16, + [2][1][RTW89_MKK][40] = 127, + [2][1][RTW89_IC][40] = 84, + [2][1][RTW89_ACMA][40] = 84, + [2][1][RTW89_FCC][42] = 76, + [2][1][RTW89_ETSI][42] = 16, + [2][1][RTW89_MKK][42] = 127, + [2][1][RTW89_IC][42] = 84, + [2][1][RTW89_ACMA][42] = 84, + [2][1][RTW89_FCC][44] = 76, + [2][1][RTW89_ETSI][44] = 16, + [2][1][RTW89_MKK][44] = 127, + [2][1][RTW89_IC][44] = 84, + [2][1][RTW89_ACMA][44] = 84, + [2][1][RTW89_FCC][46] = 76, + [2][1][RTW89_ETSI][46] = 16, + [2][1][RTW89_MKK][46] = 127, + [2][1][RTW89_IC][46] = 84, + [2][1][RTW89_ACMA][46] = 84, + [2][1][RTW89_FCC][48] = 36, + [2][1][RTW89_ETSI][48] = 127, + [2][1][RTW89_MKK][48] = 127, + [2][1][RTW89_IC][48] = 127, + [2][1][RTW89_ACMA][48] = 127, + [2][1][RTW89_FCC][50] = 36, + [2][1][RTW89_ETSI][50] = 127, + [2][1][RTW89_MKK][50] = 127, + [2][1][RTW89_IC][50] = 127, + [2][1][RTW89_ACMA][50] = 127, + [2][1][RTW89_FCC][52] = 36, + [2][1][RTW89_ETSI][52] = 127, + [2][1][RTW89_MKK][52] = 127, + [2][1][RTW89_IC][52] = 127, + [2][1][RTW89_ACMA][52] = 127, +}; + +const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_6G_CH_NUM] = { + [0][0][RTW89_WW][0] = 76, + [0][0][RTW89_WW][2] = 76, + [0][0][RTW89_WW][4] = 76, + [0][0][RTW89_WW][6] = 76, + [0][0][RTW89_WW][8] = 76, + [0][0][RTW89_WW][10] = 76, + [0][0][RTW89_WW][12] = 76, + [0][0][RTW89_WW][14] = 76, + [0][0][RTW89_WW][15] = 76, + [0][0][RTW89_WW][17] = 76, + [0][0][RTW89_WW][19] = 76, + [0][0][RTW89_WW][21] = 76, + [0][0][RTW89_WW][23] = 76, + [0][0][RTW89_WW][25] = 76, + [0][0][RTW89_WW][27] = 76, + [0][0][RTW89_WW][29] = 76, + [0][0][RTW89_WW][30] = 76, + [0][0][RTW89_WW][32] = 76, + [0][0][RTW89_WW][34] = 76, + [0][0][RTW89_WW][36] = 76, + [0][0][RTW89_WW][38] = 76, + [0][0][RTW89_WW][40] = 76, + [0][0][RTW89_WW][42] = 76, + [0][0][RTW89_WW][44] = 76, + [0][0][RTW89_WW][45] = 76, + [0][0][RTW89_WW][47] = 76, + [0][0][RTW89_WW][49] = 76, + [0][0][RTW89_WW][51] = 76, + [0][0][RTW89_WW][53] = 76, + [0][0][RTW89_WW][55] = 76, + [0][0][RTW89_WW][57] = 76, + [0][0][RTW89_WW][59] = 76, + [0][0][RTW89_WW][60] = 76, + [0][0][RTW89_WW][62] = 76, + [0][0][RTW89_WW][64] = 76, + [0][0][RTW89_WW][66] = 76, + [0][0][RTW89_WW][68] = 76, + [0][0][RTW89_WW][70] = 76, + [0][0][RTW89_WW][72] = 76, + [0][0][RTW89_WW][74] = 76, + [0][0][RTW89_WW][75] = 76, + [0][0][RTW89_WW][77] = 76, + [0][0][RTW89_WW][79] = 76, + [0][0][RTW89_WW][81] = 76, + [0][0][RTW89_WW][83] = 76, + [0][0][RTW89_WW][85] = 76, + [0][0][RTW89_WW][87] = 76, + [0][0][RTW89_WW][89] = 76, + [0][0][RTW89_WW][90] = 76, + [0][0][RTW89_WW][92] = 76, + [0][0][RTW89_WW][94] = 76, + [0][0][RTW89_WW][96] = 76, + [0][0][RTW89_WW][98] = 76, + [0][0][RTW89_WW][100] = 76, + [0][0][RTW89_WW][102] = 76, + [0][0][RTW89_WW][104] = 76, + [0][0][RTW89_WW][105] = 76, + [0][0][RTW89_WW][107] = 76, + [0][0][RTW89_WW][109] = 76, + [0][0][RTW89_WW][111] = 0, + [0][0][RTW89_WW][113] = 0, + [0][0][RTW89_WW][115] = 0, + [0][0][RTW89_WW][117] = 0, + [0][0][RTW89_WW][119] = 0, + [0][1][RTW89_WW][0] = 76, + [0][1][RTW89_WW][2] = 76, + [0][1][RTW89_WW][4] = 76, + [0][1][RTW89_WW][6] = 76, + [0][1][RTW89_WW][8] = 76, + [0][1][RTW89_WW][10] = 76, + [0][1][RTW89_WW][12] = 76, + [0][1][RTW89_WW][14] = 76, + [0][1][RTW89_WW][15] = 76, + [0][1][RTW89_WW][17] = 76, + [0][1][RTW89_WW][19] = 76, + [0][1][RTW89_WW][21] = 76, + [0][1][RTW89_WW][23] = 76, + [0][1][RTW89_WW][25] = 76, + [0][1][RTW89_WW][27] = 76, + [0][1][RTW89_WW][29] = 76, + [0][1][RTW89_WW][30] = 76, + [0][1][RTW89_WW][32] = 76, + [0][1][RTW89_WW][34] = 76, + [0][1][RTW89_WW][36] = 76, + [0][1][RTW89_WW][38] = 76, + [0][1][RTW89_WW][40] = 76, + [0][1][RTW89_WW][42] = 76, + [0][1][RTW89_WW][44] = 76, + [0][1][RTW89_WW][45] = 76, + [0][1][RTW89_WW][47] = 76, + [0][1][RTW89_WW][49] = 76, + [0][1][RTW89_WW][51] = 76, + [0][1][RTW89_WW][53] = 76, + [0][1][RTW89_WW][55] = 76, + [0][1][RTW89_WW][57] = 76, + [0][1][RTW89_WW][59] = 76, + [0][1][RTW89_WW][60] = 76, + [0][1][RTW89_WW][62] = 76, + [0][1][RTW89_WW][64] = 76, + [0][1][RTW89_WW][66] = 76, + [0][1][RTW89_WW][68] = 76, + [0][1][RTW89_WW][70] = 76, + [0][1][RTW89_WW][72] = 76, + [0][1][RTW89_WW][74] = 76, + [0][1][RTW89_WW][75] = 76, + [0][1][RTW89_WW][77] = 76, + [0][1][RTW89_WW][79] = 76, + [0][1][RTW89_WW][81] = 76, + [0][1][RTW89_WW][83] = 76, + [0][1][RTW89_WW][85] = 76, + [0][1][RTW89_WW][87] = 76, + [0][1][RTW89_WW][89] = 76, + [0][1][RTW89_WW][90] = 76, + [0][1][RTW89_WW][92] = 76, + [0][1][RTW89_WW][94] = 76, + [0][1][RTW89_WW][96] = 76, + [0][1][RTW89_WW][98] = 76, + [0][1][RTW89_WW][100] = 76, + [0][1][RTW89_WW][102] = 76, + [0][1][RTW89_WW][104] = 76, + [0][1][RTW89_WW][105] = 76, + [0][1][RTW89_WW][107] = 76, + [0][1][RTW89_WW][109] = 76, + [0][1][RTW89_WW][111] = 0, + [0][1][RTW89_WW][113] = 0, + [0][1][RTW89_WW][115] = 0, + [0][1][RTW89_WW][117] = 0, + [0][1][RTW89_WW][119] = 0, + [1][0][RTW89_WW][0] = 76, + [1][0][RTW89_WW][2] = 76, + [1][0][RTW89_WW][4] = 76, + [1][0][RTW89_WW][6] = 76, + [1][0][RTW89_WW][8] = 76, + [1][0][RTW89_WW][10] = 76, + [1][0][RTW89_WW][12] = 76, + [1][0][RTW89_WW][14] = 76, + [1][0][RTW89_WW][15] = 76, + [1][0][RTW89_WW][17] = 76, + [1][0][RTW89_WW][19] = 76, + [1][0][RTW89_WW][21] = 76, + [1][0][RTW89_WW][23] = 76, + [1][0][RTW89_WW][25] = 76, + [1][0][RTW89_WW][27] = 76, + [1][0][RTW89_WW][29] = 76, + [1][0][RTW89_WW][30] = 76, + [1][0][RTW89_WW][32] = 76, + [1][0][RTW89_WW][34] = 76, + [1][0][RTW89_WW][36] = 76, + [1][0][RTW89_WW][38] = 76, + [1][0][RTW89_WW][40] = 76, + [1][0][RTW89_WW][42] = 76, + [1][0][RTW89_WW][44] = 76, + [1][0][RTW89_WW][45] = 76, + [1][0][RTW89_WW][47] = 76, + [1][0][RTW89_WW][49] = 76, + [1][0][RTW89_WW][51] = 76, + [1][0][RTW89_WW][53] = 76, + [1][0][RTW89_WW][55] = 76, + [1][0][RTW89_WW][57] = 76, + [1][0][RTW89_WW][59] = 76, + [1][0][RTW89_WW][60] = 76, + [1][0][RTW89_WW][62] = 76, + [1][0][RTW89_WW][64] = 76, + [1][0][RTW89_WW][66] = 76, + [1][0][RTW89_WW][68] = 76, + [1][0][RTW89_WW][70] = 76, + [1][0][RTW89_WW][72] = 76, + [1][0][RTW89_WW][74] = 76, + [1][0][RTW89_WW][75] = 76, + [1][0][RTW89_WW][77] = 76, + [1][0][RTW89_WW][79] = 76, + [1][0][RTW89_WW][81] = 76, + [1][0][RTW89_WW][83] = 76, + [1][0][RTW89_WW][85] = 76, + [1][0][RTW89_WW][87] = 76, + [1][0][RTW89_WW][89] = 76, + [1][0][RTW89_WW][90] = 76, + [1][0][RTW89_WW][92] = 76, + [1][0][RTW89_WW][94] = 76, + [1][0][RTW89_WW][96] = 76, + [1][0][RTW89_WW][98] = 76, + [1][0][RTW89_WW][100] = 76, + [1][0][RTW89_WW][102] = 76, + [1][0][RTW89_WW][104] = 76, + [1][0][RTW89_WW][105] = 76, + [1][0][RTW89_WW][107] = 76, + [1][0][RTW89_WW][109] = 76, + [1][0][RTW89_WW][111] = 0, + [1][0][RTW89_WW][113] = 0, + [1][0][RTW89_WW][115] = 0, + [1][0][RTW89_WW][117] = 0, + [1][0][RTW89_WW][119] = 0, + [1][1][RTW89_WW][0] = 76, + [1][1][RTW89_WW][2] = 76, + [1][1][RTW89_WW][4] = 76, + [1][1][RTW89_WW][6] = 76, + [1][1][RTW89_WW][8] = 76, + [1][1][RTW89_WW][10] = 76, + [1][1][RTW89_WW][12] = 76, + [1][1][RTW89_WW][14] = 76, + [1][1][RTW89_WW][15] = 76, + [1][1][RTW89_WW][17] = 76, + [1][1][RTW89_WW][19] = 76, + [1][1][RTW89_WW][21] = 76, + [1][1][RTW89_WW][23] = 76, + [1][1][RTW89_WW][25] = 76, + [1][1][RTW89_WW][27] = 76, + [1][1][RTW89_WW][29] = 76, + [1][1][RTW89_WW][30] = 76, + [1][1][RTW89_WW][32] = 76, + [1][1][RTW89_WW][34] = 76, + [1][1][RTW89_WW][36] = 76, + [1][1][RTW89_WW][38] = 76, + [1][1][RTW89_WW][40] = 76, + [1][1][RTW89_WW][42] = 76, + [1][1][RTW89_WW][44] = 76, + [1][1][RTW89_WW][45] = 76, + [1][1][RTW89_WW][47] = 76, + [1][1][RTW89_WW][49] = 76, + [1][1][RTW89_WW][51] = 76, + [1][1][RTW89_WW][53] = 76, + [1][1][RTW89_WW][55] = 76, + [1][1][RTW89_WW][57] = 76, + [1][1][RTW89_WW][59] = 76, + [1][1][RTW89_WW][60] = 76, + [1][1][RTW89_WW][62] = 76, + [1][1][RTW89_WW][64] = 76, + [1][1][RTW89_WW][66] = 76, + [1][1][RTW89_WW][68] = 76, + [1][1][RTW89_WW][70] = 76, + [1][1][RTW89_WW][72] = 76, + [1][1][RTW89_WW][74] = 76, + [1][1][RTW89_WW][75] = 76, + [1][1][RTW89_WW][77] = 76, + [1][1][RTW89_WW][79] = 76, + [1][1][RTW89_WW][81] = 76, + [1][1][RTW89_WW][83] = 76, + [1][1][RTW89_WW][85] = 76, + [1][1][RTW89_WW][87] = 76, + [1][1][RTW89_WW][89] = 76, + [1][1][RTW89_WW][90] = 76, + [1][1][RTW89_WW][92] = 76, + [1][1][RTW89_WW][94] = 76, + [1][1][RTW89_WW][96] = 76, + [1][1][RTW89_WW][98] = 76, + [1][1][RTW89_WW][100] = 76, + [1][1][RTW89_WW][102] = 76, + [1][1][RTW89_WW][104] = 76, + [1][1][RTW89_WW][105] = 76, + [1][1][RTW89_WW][107] = 76, + [1][1][RTW89_WW][109] = 76, + [1][1][RTW89_WW][111] = 0, + [1][1][RTW89_WW][113] = 0, + [1][1][RTW89_WW][115] = 0, + [1][1][RTW89_WW][117] = 0, + [1][1][RTW89_WW][119] = 0, + [2][0][RTW89_WW][0] = 76, + [2][0][RTW89_WW][2] = 76, + [2][0][RTW89_WW][4] = 76, + [2][0][RTW89_WW][6] = 76, + [2][0][RTW89_WW][8] = 76, + [2][0][RTW89_WW][10] = 76, + [2][0][RTW89_WW][12] = 76, + [2][0][RTW89_WW][14] = 76, + [2][0][RTW89_WW][15] = 76, + [2][0][RTW89_WW][17] = 76, + [2][0][RTW89_WW][19] = 76, + [2][0][RTW89_WW][21] = 76, + [2][0][RTW89_WW][23] = 76, + [2][0][RTW89_WW][25] = 76, + [2][0][RTW89_WW][27] = 76, + [2][0][RTW89_WW][29] = 76, + [2][0][RTW89_WW][30] = 76, + [2][0][RTW89_WW][32] = 76, + [2][0][RTW89_WW][34] = 76, + [2][0][RTW89_WW][36] = 76, + [2][0][RTW89_WW][38] = 76, + [2][0][RTW89_WW][40] = 76, + [2][0][RTW89_WW][42] = 76, + [2][0][RTW89_WW][44] = 76, + [2][0][RTW89_WW][45] = 76, + [2][0][RTW89_WW][47] = 76, + [2][0][RTW89_WW][49] = 76, + [2][0][RTW89_WW][51] = 76, + [2][0][RTW89_WW][53] = 76, + [2][0][RTW89_WW][55] = 76, + [2][0][RTW89_WW][57] = 76, + [2][0][RTW89_WW][59] = 76, + [2][0][RTW89_WW][60] = 76, + [2][0][RTW89_WW][62] = 76, + [2][0][RTW89_WW][64] = 76, + [2][0][RTW89_WW][66] = 76, + [2][0][RTW89_WW][68] = 76, + [2][0][RTW89_WW][70] = 76, + [2][0][RTW89_WW][72] = 76, + [2][0][RTW89_WW][74] = 76, + [2][0][RTW89_WW][75] = 76, + [2][0][RTW89_WW][77] = 76, + [2][0][RTW89_WW][79] = 76, + [2][0][RTW89_WW][81] = 76, + [2][0][RTW89_WW][83] = 76, + [2][0][RTW89_WW][85] = 76, + [2][0][RTW89_WW][87] = 76, + [2][0][RTW89_WW][89] = 76, + [2][0][RTW89_WW][90] = 76, + [2][0][RTW89_WW][92] = 76, + [2][0][RTW89_WW][94] = 76, + [2][0][RTW89_WW][96] = 76, + [2][0][RTW89_WW][98] = 76, + [2][0][RTW89_WW][100] = 76, + [2][0][RTW89_WW][102] = 76, + [2][0][RTW89_WW][104] = 76, + [2][0][RTW89_WW][105] = 76, + [2][0][RTW89_WW][107] = 76, + [2][0][RTW89_WW][109] = 76, + [2][0][RTW89_WW][111] = 0, + [2][0][RTW89_WW][113] = 0, + [2][0][RTW89_WW][115] = 0, + [2][0][RTW89_WW][117] = 0, + [2][0][RTW89_WW][119] = 0, + [2][1][RTW89_WW][0] = 76, + [2][1][RTW89_WW][2] = 76, + [2][1][RTW89_WW][4] = 76, + [2][1][RTW89_WW][6] = 76, + [2][1][RTW89_WW][8] = 76, + [2][1][RTW89_WW][10] = 76, + [2][1][RTW89_WW][12] = 76, + [2][1][RTW89_WW][14] = 76, + [2][1][RTW89_WW][15] = 76, + [2][1][RTW89_WW][17] = 76, + [2][1][RTW89_WW][19] = 76, + [2][1][RTW89_WW][21] = 76, + [2][1][RTW89_WW][23] = 76, + [2][1][RTW89_WW][25] = 76, + [2][1][RTW89_WW][27] = 76, + [2][1][RTW89_WW][29] = 76, + [2][1][RTW89_WW][30] = 76, + [2][1][RTW89_WW][32] = 76, + [2][1][RTW89_WW][34] = 76, + [2][1][RTW89_WW][36] = 76, + [2][1][RTW89_WW][38] = 76, + [2][1][RTW89_WW][40] = 76, + [2][1][RTW89_WW][42] = 76, + [2][1][RTW89_WW][44] = 76, + [2][1][RTW89_WW][45] = 76, + [2][1][RTW89_WW][47] = 76, + [2][1][RTW89_WW][49] = 76, + [2][1][RTW89_WW][51] = 76, + [2][1][RTW89_WW][53] = 76, + [2][1][RTW89_WW][55] = 76, + [2][1][RTW89_WW][57] = 76, + [2][1][RTW89_WW][59] = 76, + [2][1][RTW89_WW][60] = 76, + [2][1][RTW89_WW][62] = 76, + [2][1][RTW89_WW][64] = 76, + [2][1][RTW89_WW][66] = 76, + [2][1][RTW89_WW][68] = 76, + [2][1][RTW89_WW][70] = 76, + [2][1][RTW89_WW][72] = 76, + [2][1][RTW89_WW][74] = 76, + [2][1][RTW89_WW][75] = 76, + [2][1][RTW89_WW][77] = 76, + [2][1][RTW89_WW][79] = 76, + [2][1][RTW89_WW][81] = 76, + [2][1][RTW89_WW][83] = 76, + [2][1][RTW89_WW][85] = 76, + [2][1][RTW89_WW][87] = 76, + [2][1][RTW89_WW][89] = 76, + [2][1][RTW89_WW][90] = 76, + [2][1][RTW89_WW][92] = 76, + [2][1][RTW89_WW][94] = 76, + [2][1][RTW89_WW][96] = 76, + [2][1][RTW89_WW][98] = 76, + [2][1][RTW89_WW][100] = 76, + [2][1][RTW89_WW][102] = 76, + [2][1][RTW89_WW][104] = 76, + [2][1][RTW89_WW][105] = 76, + [2][1][RTW89_WW][107] = 76, + [2][1][RTW89_WW][109] = 76, + [2][1][RTW89_WW][111] = 0, + [2][1][RTW89_WW][113] = 0, + [2][1][RTW89_WW][115] = 0, + [2][1][RTW89_WW][117] = 0, + [2][1][RTW89_WW][119] = 0, + [0][0][RTW89_FCC][0] = 76, + [0][0][RTW89_FCC][2] = 76, + [0][0][RTW89_FCC][4] = 76, + [0][0][RTW89_FCC][6] = 76, + [0][0][RTW89_FCC][8] = 76, + [0][0][RTW89_FCC][10] = 76, + [0][0][RTW89_FCC][12] = 76, + [0][0][RTW89_FCC][14] = 76, + [0][0][RTW89_FCC][15] = 76, + [0][0][RTW89_FCC][17] = 76, + [0][0][RTW89_FCC][19] = 76, + [0][0][RTW89_FCC][21] = 76, + [0][0][RTW89_FCC][23] = 76, + [0][0][RTW89_FCC][25] = 76, + [0][0][RTW89_FCC][27] = 76, + [0][0][RTW89_FCC][29] = 76, + [0][0][RTW89_FCC][30] = 76, + [0][0][RTW89_FCC][32] = 76, + [0][0][RTW89_FCC][34] = 76, + [0][0][RTW89_FCC][36] = 76, + [0][0][RTW89_FCC][38] = 76, + [0][0][RTW89_FCC][40] = 76, + [0][0][RTW89_FCC][42] = 76, + [0][0][RTW89_FCC][44] = 76, + [0][0][RTW89_FCC][45] = 76, + [0][0][RTW89_FCC][47] = 76, + [0][0][RTW89_FCC][49] = 76, + [0][0][RTW89_FCC][51] = 76, + [0][0][RTW89_FCC][53] = 76, + [0][0][RTW89_FCC][55] = 76, + [0][0][RTW89_FCC][57] = 76, + [0][0][RTW89_FCC][59] = 76, + [0][0][RTW89_FCC][60] = 76, + [0][0][RTW89_FCC][62] = 76, + [0][0][RTW89_FCC][64] = 76, + [0][0][RTW89_FCC][66] = 76, + [0][0][RTW89_FCC][68] = 76, + [0][0][RTW89_FCC][70] = 76, + [0][0][RTW89_FCC][72] = 76, + [0][0][RTW89_FCC][74] = 76, + [0][0][RTW89_FCC][75] = 76, + [0][0][RTW89_FCC][77] = 76, + [0][0][RTW89_FCC][79] = 76, + [0][0][RTW89_FCC][81] = 76, + [0][0][RTW89_FCC][83] = 76, + [0][0][RTW89_FCC][85] = 76, + [0][0][RTW89_FCC][87] = 76, + [0][0][RTW89_FCC][89] = 76, + [0][0][RTW89_FCC][90] = 76, + [0][0][RTW89_FCC][92] = 76, + [0][0][RTW89_FCC][94] = 76, + [0][0][RTW89_FCC][96] = 76, + [0][0][RTW89_FCC][98] = 76, + [0][0][RTW89_FCC][100] = 76, + [0][0][RTW89_FCC][102] = 76, + [0][0][RTW89_FCC][104] = 76, + [0][0][RTW89_FCC][105] = 76, + [0][0][RTW89_FCC][107] = 76, + [0][0][RTW89_FCC][109] = 76, + [0][0][RTW89_FCC][111] = 127, + [0][0][RTW89_FCC][113] = 127, + [0][0][RTW89_FCC][115] = 127, + [0][0][RTW89_FCC][117] = 127, + [0][0][RTW89_FCC][119] = 127, + [0][1][RTW89_FCC][0] = 76, + [0][1][RTW89_FCC][2] = 76, + [0][1][RTW89_FCC][4] = 76, + [0][1][RTW89_FCC][6] = 76, + [0][1][RTW89_FCC][8] = 76, + [0][1][RTW89_FCC][10] = 76, + [0][1][RTW89_FCC][12] = 76, + [0][1][RTW89_FCC][14] = 76, + [0][1][RTW89_FCC][15] = 76, + [0][1][RTW89_FCC][17] = 76, + [0][1][RTW89_FCC][19] = 76, + [0][1][RTW89_FCC][21] = 76, + [0][1][RTW89_FCC][23] = 76, + [0][1][RTW89_FCC][25] = 76, + [0][1][RTW89_FCC][27] = 76, + [0][1][RTW89_FCC][29] = 76, + [0][1][RTW89_FCC][30] = 76, + [0][1][RTW89_FCC][32] = 76, + [0][1][RTW89_FCC][34] = 76, + [0][1][RTW89_FCC][36] = 76, + [0][1][RTW89_FCC][38] = 76, + [0][1][RTW89_FCC][40] = 76, + [0][1][RTW89_FCC][42] = 76, + [0][1][RTW89_FCC][44] = 76, + [0][1][RTW89_FCC][45] = 76, + [0][1][RTW89_FCC][47] = 76, + [0][1][RTW89_FCC][49] = 76, + [0][1][RTW89_FCC][51] = 76, + [0][1][RTW89_FCC][53] = 76, + [0][1][RTW89_FCC][55] = 76, + [0][1][RTW89_FCC][57] = 76, + [0][1][RTW89_FCC][59] = 76, + [0][1][RTW89_FCC][60] = 76, + [0][1][RTW89_FCC][62] = 76, + [0][1][RTW89_FCC][64] = 76, + [0][1][RTW89_FCC][66] = 76, + [0][1][RTW89_FCC][68] = 76, + [0][1][RTW89_FCC][70] = 76, + [0][1][RTW89_FCC][72] = 76, + [0][1][RTW89_FCC][74] = 76, + [0][1][RTW89_FCC][75] = 76, + [0][1][RTW89_FCC][77] = 76, + [0][1][RTW89_FCC][79] = 76, + [0][1][RTW89_FCC][81] = 76, + [0][1][RTW89_FCC][83] = 76, + [0][1][RTW89_FCC][85] = 76, + [0][1][RTW89_FCC][87] = 76, + [0][1][RTW89_FCC][89] = 76, + [0][1][RTW89_FCC][90] = 76, + [0][1][RTW89_FCC][92] = 76, + [0][1][RTW89_FCC][94] = 76, + [0][1][RTW89_FCC][96] = 76, + [0][1][RTW89_FCC][98] = 76, + [0][1][RTW89_FCC][100] = 76, + [0][1][RTW89_FCC][102] = 76, + [0][1][RTW89_FCC][104] = 76, + [0][1][RTW89_FCC][105] = 76, + [0][1][RTW89_FCC][107] = 76, + [0][1][RTW89_FCC][109] = 76, + [0][1][RTW89_FCC][111] = 127, + [0][1][RTW89_FCC][113] = 127, + [0][1][RTW89_FCC][115] = 127, + [0][1][RTW89_FCC][117] = 127, + [0][1][RTW89_FCC][119] = 127, + [1][0][RTW89_FCC][0] = 76, + [1][0][RTW89_FCC][2] = 76, + [1][0][RTW89_FCC][4] = 76, + [1][0][RTW89_FCC][6] = 76, + [1][0][RTW89_FCC][8] = 76, + [1][0][RTW89_FCC][10] = 76, + [1][0][RTW89_FCC][12] = 76, + [1][0][RTW89_FCC][14] = 76, + [1][0][RTW89_FCC][15] = 76, + [1][0][RTW89_FCC][17] = 76, + [1][0][RTW89_FCC][19] = 76, + [1][0][RTW89_FCC][21] = 76, + [1][0][RTW89_FCC][23] = 76, + [1][0][RTW89_FCC][25] = 76, + [1][0][RTW89_FCC][27] = 76, + [1][0][RTW89_FCC][29] = 76, + [1][0][RTW89_FCC][30] = 76, + [1][0][RTW89_FCC][32] = 76, + [1][0][RTW89_FCC][34] = 76, + [1][0][RTW89_FCC][36] = 76, + [1][0][RTW89_FCC][38] = 76, + [1][0][RTW89_FCC][40] = 76, + [1][0][RTW89_FCC][42] = 76, + [1][0][RTW89_FCC][44] = 76, + [1][0][RTW89_FCC][45] = 76, + [1][0][RTW89_FCC][47] = 76, + [1][0][RTW89_FCC][49] = 76, + [1][0][RTW89_FCC][51] = 76, + [1][0][RTW89_FCC][53] = 76, + [1][0][RTW89_FCC][55] = 76, + [1][0][RTW89_FCC][57] = 76, + [1][0][RTW89_FCC][59] = 76, + [1][0][RTW89_FCC][60] = 76, + [1][0][RTW89_FCC][62] = 76, + [1][0][RTW89_FCC][64] = 76, + [1][0][RTW89_FCC][66] = 76, + [1][0][RTW89_FCC][68] = 76, + [1][0][RTW89_FCC][70] = 76, + [1][0][RTW89_FCC][72] = 76, + [1][0][RTW89_FCC][74] = 76, + [1][0][RTW89_FCC][75] = 76, + [1][0][RTW89_FCC][77] = 76, + [1][0][RTW89_FCC][79] = 76, + [1][0][RTW89_FCC][81] = 76, + [1][0][RTW89_FCC][83] = 76, + [1][0][RTW89_FCC][85] = 76, + [1][0][RTW89_FCC][87] = 76, + [1][0][RTW89_FCC][89] = 76, + [1][0][RTW89_FCC][90] = 76, + [1][0][RTW89_FCC][92] = 76, + [1][0][RTW89_FCC][94] = 76, + [1][0][RTW89_FCC][96] = 76, + [1][0][RTW89_FCC][98] = 76, + [1][0][RTW89_FCC][100] = 76, + [1][0][RTW89_FCC][102] = 76, + [1][0][RTW89_FCC][104] = 76, + [1][0][RTW89_FCC][105] = 76, + [1][0][RTW89_FCC][107] = 76, + [1][0][RTW89_FCC][109] = 76, + [1][0][RTW89_FCC][111] = 127, + [1][0][RTW89_FCC][113] = 127, + [1][0][RTW89_FCC][115] = 127, + [1][0][RTW89_FCC][117] = 127, + [1][0][RTW89_FCC][119] = 127, + [1][1][RTW89_FCC][0] = 76, + [1][1][RTW89_FCC][2] = 76, + [1][1][RTW89_FCC][4] = 76, + [1][1][RTW89_FCC][6] = 76, + [1][1][RTW89_FCC][8] = 76, + [1][1][RTW89_FCC][10] = 76, + [1][1][RTW89_FCC][12] = 76, + [1][1][RTW89_FCC][14] = 76, + [1][1][RTW89_FCC][15] = 76, + [1][1][RTW89_FCC][17] = 76, + [1][1][RTW89_FCC][19] = 76, + [1][1][RTW89_FCC][21] = 76, + [1][1][RTW89_FCC][23] = 76, + [1][1][RTW89_FCC][25] = 76, + [1][1][RTW89_FCC][27] = 76, + [1][1][RTW89_FCC][29] = 76, + [1][1][RTW89_FCC][30] = 76, + [1][1][RTW89_FCC][32] = 76, + [1][1][RTW89_FCC][34] = 76, + [1][1][RTW89_FCC][36] = 76, + [1][1][RTW89_FCC][38] = 76, + [1][1][RTW89_FCC][40] = 76, + [1][1][RTW89_FCC][42] = 76, + [1][1][RTW89_FCC][44] = 76, + [1][1][RTW89_FCC][45] = 76, + [1][1][RTW89_FCC][47] = 76, + [1][1][RTW89_FCC][49] = 76, + [1][1][RTW89_FCC][51] = 76, + [1][1][RTW89_FCC][53] = 76, + [1][1][RTW89_FCC][55] = 76, + [1][1][RTW89_FCC][57] = 76, + [1][1][RTW89_FCC][59] = 76, + [1][1][RTW89_FCC][60] = 76, + [1][1][RTW89_FCC][62] = 76, + [1][1][RTW89_FCC][64] = 76, + [1][1][RTW89_FCC][66] = 76, + [1][1][RTW89_FCC][68] = 76, + [1][1][RTW89_FCC][70] = 76, + [1][1][RTW89_FCC][72] = 76, + [1][1][RTW89_FCC][74] = 76, + [1][1][RTW89_FCC][75] = 76, + [1][1][RTW89_FCC][77] = 76, + [1][1][RTW89_FCC][79] = 76, + [1][1][RTW89_FCC][81] = 76, + [1][1][RTW89_FCC][83] = 76, + [1][1][RTW89_FCC][85] = 76, + [1][1][RTW89_FCC][87] = 76, + [1][1][RTW89_FCC][89] = 76, + [1][1][RTW89_FCC][90] = 76, + [1][1][RTW89_FCC][92] = 76, + [1][1][RTW89_FCC][94] = 76, + [1][1][RTW89_FCC][96] = 76, + [1][1][RTW89_FCC][98] = 76, + [1][1][RTW89_FCC][100] = 76, + [1][1][RTW89_FCC][102] = 76, + [1][1][RTW89_FCC][104] = 76, + [1][1][RTW89_FCC][105] = 76, + [1][1][RTW89_FCC][107] = 76, + [1][1][RTW89_FCC][109] = 76, + [1][1][RTW89_FCC][111] = 127, + [1][1][RTW89_FCC][113] = 127, + [1][1][RTW89_FCC][115] = 127, + [1][1][RTW89_FCC][117] = 127, + [1][1][RTW89_FCC][119] = 127, + [2][0][RTW89_FCC][0] = 76, + [2][0][RTW89_FCC][2] = 76, + [2][0][RTW89_FCC][4] = 76, + [2][0][RTW89_FCC][6] = 76, + [2][0][RTW89_FCC][8] = 76, + [2][0][RTW89_FCC][10] = 76, + [2][0][RTW89_FCC][12] = 76, + [2][0][RTW89_FCC][14] = 76, + [2][0][RTW89_FCC][15] = 76, + [2][0][RTW89_FCC][17] = 76, + [2][0][RTW89_FCC][19] = 76, + [2][0][RTW89_FCC][21] = 76, + [2][0][RTW89_FCC][23] = 76, + [2][0][RTW89_FCC][25] = 76, + [2][0][RTW89_FCC][27] = 76, + [2][0][RTW89_FCC][29] = 76, + [2][0][RTW89_FCC][30] = 76, + [2][0][RTW89_FCC][32] = 76, + [2][0][RTW89_FCC][34] = 76, + [2][0][RTW89_FCC][36] = 76, + [2][0][RTW89_FCC][38] = 76, + [2][0][RTW89_FCC][40] = 76, + [2][0][RTW89_FCC][42] = 76, + [2][0][RTW89_FCC][44] = 76, + [2][0][RTW89_FCC][45] = 76, + [2][0][RTW89_FCC][47] = 76, + [2][0][RTW89_FCC][49] = 76, + [2][0][RTW89_FCC][51] = 76, + [2][0][RTW89_FCC][53] = 76, + [2][0][RTW89_FCC][55] = 76, + [2][0][RTW89_FCC][57] = 76, + [2][0][RTW89_FCC][59] = 76, + [2][0][RTW89_FCC][60] = 76, + [2][0][RTW89_FCC][62] = 76, + [2][0][RTW89_FCC][64] = 76, + [2][0][RTW89_FCC][66] = 76, + [2][0][RTW89_FCC][68] = 76, + [2][0][RTW89_FCC][70] = 76, + [2][0][RTW89_FCC][72] = 76, + [2][0][RTW89_FCC][74] = 76, + [2][0][RTW89_FCC][75] = 76, + [2][0][RTW89_FCC][77] = 76, + [2][0][RTW89_FCC][79] = 76, + [2][0][RTW89_FCC][81] = 76, + [2][0][RTW89_FCC][83] = 76, + [2][0][RTW89_FCC][85] = 76, + [2][0][RTW89_FCC][87] = 76, + [2][0][RTW89_FCC][89] = 76, + [2][0][RTW89_FCC][90] = 76, + [2][0][RTW89_FCC][92] = 76, + [2][0][RTW89_FCC][94] = 76, + [2][0][RTW89_FCC][96] = 76, + [2][0][RTW89_FCC][98] = 76, + [2][0][RTW89_FCC][100] = 76, + [2][0][RTW89_FCC][102] = 76, + [2][0][RTW89_FCC][104] = 76, + [2][0][RTW89_FCC][105] = 76, + [2][0][RTW89_FCC][107] = 76, + [2][0][RTW89_FCC][109] = 76, + [2][0][RTW89_FCC][111] = 127, + [2][0][RTW89_FCC][113] = 127, + [2][0][RTW89_FCC][115] = 127, + [2][0][RTW89_FCC][117] = 127, + [2][0][RTW89_FCC][119] = 127, + [2][1][RTW89_FCC][0] = 76, + [2][1][RTW89_FCC][2] = 76, + [2][1][RTW89_FCC][4] = 76, + [2][1][RTW89_FCC][6] = 76, + [2][1][RTW89_FCC][8] = 76, + [2][1][RTW89_FCC][10] = 76, + [2][1][RTW89_FCC][12] = 76, + [2][1][RTW89_FCC][14] = 76, + [2][1][RTW89_FCC][15] = 76, + [2][1][RTW89_FCC][17] = 76, + [2][1][RTW89_FCC][19] = 76, + [2][1][RTW89_FCC][21] = 76, + [2][1][RTW89_FCC][23] = 76, + [2][1][RTW89_FCC][25] = 76, + [2][1][RTW89_FCC][27] = 76, + [2][1][RTW89_FCC][29] = 76, + [2][1][RTW89_FCC][30] = 76, + [2][1][RTW89_FCC][32] = 76, + [2][1][RTW89_FCC][34] = 76, + [2][1][RTW89_FCC][36] = 76, + [2][1][RTW89_FCC][38] = 76, + [2][1][RTW89_FCC][40] = 76, + [2][1][RTW89_FCC][42] = 76, + [2][1][RTW89_FCC][44] = 76, + [2][1][RTW89_FCC][45] = 76, + [2][1][RTW89_FCC][47] = 76, + [2][1][RTW89_FCC][49] = 76, + [2][1][RTW89_FCC][51] = 76, + [2][1][RTW89_FCC][53] = 76, + [2][1][RTW89_FCC][55] = 76, + [2][1][RTW89_FCC][57] = 76, + [2][1][RTW89_FCC][59] = 76, + [2][1][RTW89_FCC][60] = 76, + [2][1][RTW89_FCC][62] = 76, + [2][1][RTW89_FCC][64] = 76, + [2][1][RTW89_FCC][66] = 76, + [2][1][RTW89_FCC][68] = 76, + [2][1][RTW89_FCC][70] = 76, + [2][1][RTW89_FCC][72] = 76, + [2][1][RTW89_FCC][74] = 76, + [2][1][RTW89_FCC][75] = 76, + [2][1][RTW89_FCC][77] = 76, + [2][1][RTW89_FCC][79] = 76, + [2][1][RTW89_FCC][81] = 76, + [2][1][RTW89_FCC][83] = 76, + [2][1][RTW89_FCC][85] = 76, + [2][1][RTW89_FCC][87] = 76, + [2][1][RTW89_FCC][89] = 76, + [2][1][RTW89_FCC][90] = 76, + [2][1][RTW89_FCC][92] = 76, + [2][1][RTW89_FCC][94] = 76, + [2][1][RTW89_FCC][96] = 76, + [2][1][RTW89_FCC][98] = 76, + [2][1][RTW89_FCC][100] = 76, + [2][1][RTW89_FCC][102] = 76, + [2][1][RTW89_FCC][104] = 76, + [2][1][RTW89_FCC][105] = 76, + [2][1][RTW89_FCC][107] = 76, + [2][1][RTW89_FCC][109] = 76, + [2][1][RTW89_FCC][111] = 127, + [2][1][RTW89_FCC][113] = 127, + [2][1][RTW89_FCC][115] = 127, + [2][1][RTW89_FCC][117] = 127, + [2][1][RTW89_FCC][119] = 127, +}; + +const struct rtw89_phy_table rtw89_8852c_phy_bb_table = { + .regs = rtw89_8852c_phy_bb_regs, + .n_regs = ARRAY_SIZE(rtw89_8852c_phy_bb_regs), + .rf_path = 0, /* don't care */ +}; + +const struct rtw89_phy_table rtw89_8852c_phy_bb_gain_table = { + .regs = rtw89_8852c_phy_bb_reg_gain, + .n_regs = ARRAY_SIZE(rtw89_8852c_phy_bb_reg_gain), + .rf_path = 0, /* don't care */ +}; + +const struct rtw89_phy_table rtw89_8852c_phy_radioa_table = { + .regs = rtw89_8852c_phy_radioa_regs, + .n_regs = ARRAY_SIZE(rtw89_8852c_phy_radioa_regs), + .rf_path = RF_PATH_A, + .config = rtw89_phy_config_rf_reg_v1, +}; + +const struct rtw89_phy_table rtw89_8852c_phy_radiob_table = { + .regs = rtw89_8852c_phy_radiob_regs, + .n_regs = ARRAY_SIZE(rtw89_8852c_phy_radiob_regs), + .rf_path = RF_PATH_B, + .config = rtw89_phy_config_rf_reg_v1, +}; + +const struct rtw89_phy_table rtw89_8852c_phy_nctl_table = { + .regs = rtw89_8852c_phy_nctl_regs, + .n_regs = ARRAY_SIZE(rtw89_8852c_phy_nctl_regs), + .rf_path = 0, /* don't care */ +}; + +const struct rtw89_txpwr_table rtw89_8852c_byr_table = { + .data = rtw89_8852c_txpwr_byrate, + .size = ARRAY_SIZE(rtw89_8852c_txpwr_byrate), + .load = rtw89_phy_load_txpwr_byrate, +}; + +const struct rtw89_txpwr_track_cfg rtw89_8852c_trk_cfg = { + .delta_swingidx_6gb_n = _txpwr_track_delta_swingidx_6gb_n, + .delta_swingidx_6gb_p = _txpwr_track_delta_swingidx_6gb_p, + .delta_swingidx_6ga_n = _txpwr_track_delta_swingidx_6ga_n, + .delta_swingidx_6ga_p = _txpwr_track_delta_swingidx_6ga_p, + .delta_swingidx_5gb_n = _txpwr_track_delta_swingidx_5gb_n, + .delta_swingidx_5gb_p = _txpwr_track_delta_swingidx_5gb_p, + .delta_swingidx_5ga_n = _txpwr_track_delta_swingidx_5ga_n, + .delta_swingidx_5ga_p = _txpwr_track_delta_swingidx_5ga_p, + .delta_swingidx_2gb_n = _txpwr_track_delta_swingidx_2gb_n, + .delta_swingidx_2gb_p = _txpwr_track_delta_swingidx_2gb_p, + .delta_swingidx_2ga_n = _txpwr_track_delta_swingidx_2ga_n, + .delta_swingidx_2ga_p = _txpwr_track_delta_swingidx_2ga_p, + .delta_swingidx_2g_cck_b_n = _txpwr_track_delta_swingidx_2g_cck_b_n, + .delta_swingidx_2g_cck_b_p = _txpwr_track_delta_swingidx_2g_cck_b_p, + .delta_swingidx_2g_cck_a_n = _txpwr_track_delta_swingidx_2g_cck_a_n, + .delta_swingidx_2g_cck_a_p = _txpwr_track_delta_swingidx_2g_cck_a_p, +}; + +const struct rtw89_phy_tssi_dbw_table rtw89_8852c_tssi_dbw_table = { + .data[RTW89_TSSI_BANDEDGE_FLAT] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .data[RTW89_TSSI_BANDEDGE_LOW] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .data[RTW89_TSSI_BANDEDGE_MID] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .data[RTW89_TSSI_BANDEDGE_HIGH] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, +}; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h new file mode 100644 index 0000000000..7d71a92e2d --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2022 Realtek Corporation + */ + +#ifndef __RTW89_8852C_TABLE_H__ +#define __RTW89_8852C_TABLE_H__ + +#include "core.h" + +extern const struct rtw89_phy_table rtw89_8852c_phy_bb_table; +extern const struct rtw89_phy_table rtw89_8852c_phy_bb_gain_table; +extern const struct rtw89_phy_table rtw89_8852c_phy_radioa_table; +extern const struct rtw89_phy_table rtw89_8852c_phy_radiob_table; +extern const struct rtw89_phy_table rtw89_8852c_phy_nctl_table; +extern const struct rtw89_txpwr_table rtw89_8852c_byr_table; +extern const struct rtw89_phy_tssi_dbw_table rtw89_8852c_tssi_dbw_table; +extern const struct rtw89_txpwr_track_cfg rtw89_8852c_trk_cfg; +extern const u8 rtw89_8852c_tx_shape[RTW89_BAND_MAX][RTW89_RS_TX_SHAPE_NUM] + [RTW89_REGD_NUM]; +extern const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM]; +extern const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM]; +extern const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_6G_CH_NUM]; +extern const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM]; +extern const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM]; +extern const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_6G_CH_NUM]; + +#endif diff --git a/drivers/net/wireless/silabs/Kconfig b/drivers/net/wireless/silabs/Kconfig new file mode 100644 index 0000000000..6262a799bf --- /dev/null +++ b/drivers/net/wireless/silabs/Kconfig @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 + +config WLAN_VENDOR_SILABS + bool "Silicon Laboratories devices" + default y + help + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_SILABS + +source "drivers/net/wireless/silabs/wfx/Kconfig" + +endif # WLAN_VENDOR_SILABS diff --git a/drivers/net/wireless/silabs/Makefile b/drivers/net/wireless/silabs/Makefile new file mode 100644 index 0000000000..c2263ee210 --- /dev/null +++ b/drivers/net/wireless/silabs/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_WFX) += wfx/ diff --git a/drivers/net/wireless/silabs/wfx/Kconfig b/drivers/net/wireless/silabs/wfx/Kconfig new file mode 100644 index 0000000000..835a855409 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +config WFX + tristate "Silicon Labs wireless chips WF200 and further" + depends on MAC80211 + depends on MMC || !MMC # do not allow WFX=y if MMC=m + depends on (SPI || MMC) + help + This is a driver for Silicons Labs WFxxx series (WF200 and further) + chipsets. This chip can be found on SPI or SDIO buses. + + Silabs does not use a reliable SDIO vendor ID. So, to avoid conflicts, + the driver won't probe the device if it is not also declared in the + Device Tree. diff --git a/drivers/net/wireless/silabs/wfx/Makefile b/drivers/net/wireless/silabs/wfx/Makefile new file mode 100644 index 0000000000..c8b356f71c --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/Makefile @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# Necessary for CREATE_TRACE_POINTS +CFLAGS_debug.o = -I$(src) + +wfx-y := \ + bh.o \ + hwio.o \ + fwio.o \ + hif_tx_mib.o \ + hif_tx.o \ + hif_rx.o \ + queue.o \ + data_tx.o \ + data_rx.o \ + scan.o \ + sta.o \ + key.o \ + main.o \ + debug.o +wfx-$(CONFIG_SPI) += bus_spi.o +# When CONFIG_MMC == m, append to 'wfx-y' (and not to 'wfx-m') +wfx-$(subst m,y,$(CONFIG_MMC)) += bus_sdio.o + +obj-$(CONFIG_WFX) += wfx.o diff --git a/drivers/net/wireless/silabs/wfx/bh.c b/drivers/net/wireless/silabs/wfx/bh.c new file mode 100644 index 0000000000..21dfdcf9cc --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/bh.c @@ -0,0 +1,324 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Interrupt bottom half (BH). + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include + +#include "bh.h" +#include "wfx.h" +#include "hwio.h" +#include "traces.h" +#include "hif_rx.h" +#include "hif_api_cmd.h" + +static void device_wakeup(struct wfx_dev *wdev) +{ + int max_retry = 3; + + if (!wdev->pdata.gpio_wakeup) + return; + if (gpiod_get_value_cansleep(wdev->pdata.gpio_wakeup) > 0) + return; + + if (wfx_api_older_than(wdev, 1, 4)) { + gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1); + if (!completion_done(&wdev->hif.ctrl_ready)) + usleep_range(2000, 2500); + return; + } + for (;;) { + gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1); + /* completion.h does not provide any function to wait completion without consume it + * (a kind of wait_for_completion_done_timeout()). So we have to emulate it. + */ + if (wait_for_completion_timeout(&wdev->hif.ctrl_ready, msecs_to_jiffies(2))) { + complete(&wdev->hif.ctrl_ready); + return; + } else if (max_retry-- > 0) { + /* Older firmwares have a race in sleep/wake-up process. Redo the process + * is sufficient to unfreeze the chip. + */ + dev_err(wdev->dev, "timeout while wake up chip\n"); + gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0); + usleep_range(2000, 2500); + } else { + dev_err(wdev->dev, "max wake-up retries reached\n"); + return; + } + } +} + +static void device_release(struct wfx_dev *wdev) +{ + if (!wdev->pdata.gpio_wakeup) + return; + + gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0); +} + +static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf) +{ + struct sk_buff *skb; + struct wfx_hif_msg *hif; + size_t alloc_len; + size_t computed_len; + int release_count; + int piggyback = 0; + + WARN(read_len > round_down(0xFFF, 2) * sizeof(u16), "request exceed the chip capability"); + + /* Add 2 to take into account piggyback size */ + alloc_len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, read_len + 2); + skb = dev_alloc_skb(alloc_len); + if (!skb) + return -ENOMEM; + + if (wfx_data_read(wdev, skb->data, alloc_len)) + goto err; + + piggyback = le16_to_cpup((__le16 *)(skb->data + alloc_len - 2)); + _trace_piggyback(piggyback, false); + + hif = (struct wfx_hif_msg *)skb->data; + WARN(hif->encrypted & 0x3, "encryption is unsupported"); + if (WARN(read_len < sizeof(struct wfx_hif_msg), "corrupted read")) + goto err; + computed_len = le16_to_cpu(hif->len); + computed_len = round_up(computed_len, 2); + if (computed_len != read_len) { + dev_err(wdev->dev, "inconsistent message length: %zu != %zu\n", + computed_len, read_len); + print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, 16, 1, + hif, read_len, true); + goto err; + } + + if (!(hif->id & HIF_ID_IS_INDICATION)) { + (*is_cnf)++; + if (hif->id == HIF_CNF_ID_MULTI_TRANSMIT) + release_count = + ((struct wfx_hif_cnf_multi_transmit *)hif->body)->num_tx_confs; + else + release_count = 1; + WARN(wdev->hif.tx_buffers_used < release_count, "corrupted buffer counter"); + wdev->hif.tx_buffers_used -= release_count; + } + _trace_hif_recv(hif, wdev->hif.tx_buffers_used); + + if (hif->id != HIF_IND_ID_EXCEPTION && hif->id != HIF_IND_ID_ERROR) { + if (hif->seqnum != wdev->hif.rx_seqnum) + dev_warn(wdev->dev, "wrong message sequence: %d != %d\n", + hif->seqnum, wdev->hif.rx_seqnum); + wdev->hif.rx_seqnum = (hif->seqnum + 1) % (HIF_COUNTER_MAX + 1); + } + + skb_put(skb, le16_to_cpu(hif->len)); + /* wfx_handle_rx takes care on SKB livetime */ + wfx_handle_rx(wdev, skb); + if (!wdev->hif.tx_buffers_used) + wake_up(&wdev->hif.tx_buffers_empty); + + return piggyback; + +err: + if (skb) + dev_kfree_skb(skb); + return -EIO; +} + +static int bh_work_rx(struct wfx_dev *wdev, int max_msg, int *num_cnf) +{ + size_t len; + int i; + int ctrl_reg, piggyback; + + piggyback = 0; + for (i = 0; i < max_msg; i++) { + if (piggyback & CTRL_NEXT_LEN_MASK) + ctrl_reg = piggyback; + else if (try_wait_for_completion(&wdev->hif.ctrl_ready)) + ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, 0); + else + ctrl_reg = 0; + if (!(ctrl_reg & CTRL_NEXT_LEN_MASK)) + return i; + /* ctrl_reg units are 16bits words */ + len = (ctrl_reg & CTRL_NEXT_LEN_MASK) * 2; + piggyback = rx_helper(wdev, len, num_cnf); + if (piggyback < 0) + return i; + if (!(piggyback & CTRL_WLAN_READY)) + dev_err(wdev->dev, "unexpected piggyback value: ready bit not set: %04x\n", + piggyback); + } + if (piggyback & CTRL_NEXT_LEN_MASK) { + ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, piggyback); + complete(&wdev->hif.ctrl_ready); + if (ctrl_reg) + dev_err(wdev->dev, "unexpected IRQ happened: %04x/%04x\n", + ctrl_reg, piggyback); + } + return i; +} + +static void tx_helper(struct wfx_dev *wdev, struct wfx_hif_msg *hif) +{ + int ret; + void *data; + bool is_encrypted = false; + size_t len = le16_to_cpu(hif->len); + + WARN(len < sizeof(*hif), "try to send corrupted data"); + + hif->seqnum = wdev->hif.tx_seqnum; + wdev->hif.tx_seqnum = (wdev->hif.tx_seqnum + 1) % (HIF_COUNTER_MAX + 1); + + data = hif; + WARN(len > le16_to_cpu(wdev->hw_caps.size_inp_ch_buf), + "request exceed the chip capability: %zu > %d\n", + len, le16_to_cpu(wdev->hw_caps.size_inp_ch_buf)); + len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, len); + ret = wfx_data_write(wdev, data, len); + if (ret) + goto end; + + wdev->hif.tx_buffers_used++; + _trace_hif_send(hif, wdev->hif.tx_buffers_used); +end: + if (is_encrypted) + kfree(data); +} + +static int bh_work_tx(struct wfx_dev *wdev, int max_msg) +{ + struct wfx_hif_msg *hif; + int i; + + for (i = 0; i < max_msg; i++) { + hif = NULL; + if (wdev->hif.tx_buffers_used < le16_to_cpu(wdev->hw_caps.num_inp_ch_bufs)) { + if (try_wait_for_completion(&wdev->hif_cmd.ready)) { + WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error"); + hif = wdev->hif_cmd.buf_send; + } else { + hif = wfx_tx_queues_get(wdev); + } + } + if (!hif) + return i; + tx_helper(wdev, hif); + } + return i; +} + +/* In SDIO mode, it is necessary to make an access to a register to acknowledge last received + * message. It could be possible to restrict this acknowledge to SDIO mode and only if last + * operation was rx. + */ +static void ack_sdio_data(struct wfx_dev *wdev) +{ + u32 cfg_reg; + + wfx_config_reg_read(wdev, &cfg_reg); + if (cfg_reg & 0xFF) { + dev_warn(wdev->dev, "chip reports errors: %02x\n", cfg_reg & 0xFF); + wfx_config_reg_write_bits(wdev, 0xFF, 0x00); + } +} + +static void bh_work(struct work_struct *work) +{ + struct wfx_dev *wdev = container_of(work, struct wfx_dev, hif.bh); + int stats_req = 0, stats_cnf = 0, stats_ind = 0; + bool release_chip = false, last_op_is_rx = false; + int num_tx, num_rx; + + device_wakeup(wdev); + do { + num_tx = bh_work_tx(wdev, 32); + stats_req += num_tx; + if (num_tx) + last_op_is_rx = false; + num_rx = bh_work_rx(wdev, 32, &stats_cnf); + stats_ind += num_rx; + if (num_rx) + last_op_is_rx = true; + } while (num_rx || num_tx); + stats_ind -= stats_cnf; + + if (last_op_is_rx) + ack_sdio_data(wdev); + if (!wdev->hif.tx_buffers_used && !work_pending(work)) { + device_release(wdev); + release_chip = true; + } + _trace_bh_stats(stats_ind, stats_req, stats_cnf, wdev->hif.tx_buffers_used, release_chip); +} + +/* An IRQ from chip did occur */ +void wfx_bh_request_rx(struct wfx_dev *wdev) +{ + u32 cur, prev; + + wfx_control_reg_read(wdev, &cur); + prev = atomic_xchg(&wdev->hif.ctrl_reg, cur); + complete(&wdev->hif.ctrl_ready); + queue_work(wdev->bh_wq, &wdev->hif.bh); + + if (!(cur & CTRL_NEXT_LEN_MASK)) + dev_err(wdev->dev, "unexpected control register value: length field is 0: %04x\n", + cur); + if (prev != 0) + dev_err(wdev->dev, "received IRQ but previous data was not (yet) read: %04x/%04x\n", + prev, cur); +} + +/* Driver want to send data */ +void wfx_bh_request_tx(struct wfx_dev *wdev) +{ + queue_work(wdev->bh_wq, &wdev->hif.bh); +} + +/* If IRQ is not available, this function allow to manually poll the control register and simulate + * an IRQ ahen an event happened. + * + * Note that the device has a bug: If an IRQ raise while host read control register, the IRQ is + * lost. So, use this function carefully (only duing device initialisation). + */ +void wfx_bh_poll_irq(struct wfx_dev *wdev) +{ + ktime_t now, start; + u32 reg; + + WARN(!wdev->poll_irq, "unexpected IRQ polling can mask IRQ"); + flush_workqueue(wdev->bh_wq); + start = ktime_get(); + for (;;) { + wfx_control_reg_read(wdev, ®); + now = ktime_get(); + if (reg & 0xFFF) + break; + if (ktime_after(now, ktime_add_ms(start, 1000))) { + dev_err(wdev->dev, "time out while polling control register\n"); + return; + } + udelay(200); + } + wfx_bh_request_rx(wdev); +} + +void wfx_bh_register(struct wfx_dev *wdev) +{ + INIT_WORK(&wdev->hif.bh, bh_work); + init_completion(&wdev->hif.ctrl_ready); + init_waitqueue_head(&wdev->hif.tx_buffers_empty); +} + +void wfx_bh_unregister(struct wfx_dev *wdev) +{ + flush_work(&wdev->hif.bh); +} diff --git a/drivers/net/wireless/silabs/wfx/bh.h b/drivers/net/wireless/silabs/wfx/bh.h new file mode 100644 index 0000000000..a44c8b421b --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/bh.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Interrupt bottom half (BH). + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_BH_H +#define WFX_BH_H + +#include +#include +#include +#include + +struct wfx_dev; + +struct wfx_hif { + struct work_struct bh; + struct completion ctrl_ready; + wait_queue_head_t tx_buffers_empty; + atomic_t ctrl_reg; + int rx_seqnum; + int tx_seqnum; + int tx_buffers_used; +}; + +void wfx_bh_register(struct wfx_dev *wdev); +void wfx_bh_unregister(struct wfx_dev *wdev); +void wfx_bh_request_rx(struct wfx_dev *wdev); +void wfx_bh_request_tx(struct wfx_dev *wdev); +void wfx_bh_poll_irq(struct wfx_dev *wdev); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/bus.h b/drivers/net/wireless/silabs/wfx/bus.h new file mode 100644 index 0000000000..ccadfdd687 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/bus.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Common bus abstraction layer. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_BUS_H +#define WFX_BUS_H + +#include +#include + +#define WFX_REG_CONFIG 0x0 +#define WFX_REG_CONTROL 0x1 +#define WFX_REG_IN_OUT_QUEUE 0x2 +#define WFX_REG_AHB_DPORT 0x3 +#define WFX_REG_BASE_ADDR 0x4 +#define WFX_REG_SRAM_DPORT 0x5 +#define WFX_REG_SET_GEN_R_W 0x6 +#define WFX_REG_FRAME_OUT 0x7 + +struct wfx_hwbus_ops { + int (*copy_from_io)(void *bus_priv, unsigned int addr, void *dst, size_t count); + int (*copy_to_io)(void *bus_priv, unsigned int addr, const void *src, size_t count); + int (*irq_subscribe)(void *bus_priv); + int (*irq_unsubscribe)(void *bus_priv); + void (*lock)(void *bus_priv); + void (*unlock)(void *bus_priv); + size_t (*align_size)(void *bus_priv, size_t size); +}; + +extern struct sdio_driver wfx_sdio_driver; +extern struct spi_driver wfx_spi_driver; + +#endif diff --git a/drivers/net/wireless/silabs/wfx/bus_sdio.c b/drivers/net/wireless/silabs/wfx/bus_sdio.c new file mode 100644 index 0000000000..51a0d58a90 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/bus_sdio.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SDIO interface. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bus.h" +#include "wfx.h" +#include "hwio.h" +#include "main.h" +#include "bh.h" + +static const struct wfx_platform_data pdata_wf200 = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/wf200.pds", +}; + +static const struct wfx_platform_data pdata_brd4001a = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/brd4001a.pds", +}; + +static const struct wfx_platform_data pdata_brd8022a = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/brd8022a.pds", +}; + +static const struct wfx_platform_data pdata_brd8023a = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/brd8023a.pds", +}; + +struct wfx_sdio_priv { + struct sdio_func *func; + struct wfx_dev *core; + u8 buf_id_tx; + u8 buf_id_rx; + int of_irq; +}; + +static int wfx_sdio_copy_from_io(void *priv, unsigned int reg_id, void *dst, size_t count) +{ + struct wfx_sdio_priv *bus = priv; + unsigned int sdio_addr = reg_id << 2; + int ret; + + WARN(reg_id > 7, "chip only has 7 registers"); + WARN(!IS_ALIGNED((uintptr_t)dst, 4), "unaligned buffer address"); + WARN(!IS_ALIGNED(count, 4), "unaligned buffer size"); + + /* Use queue mode buffers */ + if (reg_id == WFX_REG_IN_OUT_QUEUE) + sdio_addr |= (bus->buf_id_rx + 1) << 7; + ret = sdio_memcpy_fromio(bus->func, dst, sdio_addr, count); + if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE) + bus->buf_id_rx = (bus->buf_id_rx + 1) % 4; + + return ret; +} + +static int wfx_sdio_copy_to_io(void *priv, unsigned int reg_id, const void *src, size_t count) +{ + struct wfx_sdio_priv *bus = priv; + unsigned int sdio_addr = reg_id << 2; + int ret; + + WARN(reg_id > 7, "chip only has 7 registers"); + WARN(!IS_ALIGNED((uintptr_t)src, 4), "unaligned buffer address"); + WARN(!IS_ALIGNED(count, 4), "unaligned buffer size"); + + /* Use queue mode buffers */ + if (reg_id == WFX_REG_IN_OUT_QUEUE) + sdio_addr |= bus->buf_id_tx << 7; + /* FIXME: discards 'const' qualifier for src */ + ret = sdio_memcpy_toio(bus->func, sdio_addr, (void *)src, count); + if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE) + bus->buf_id_tx = (bus->buf_id_tx + 1) % 32; + + return ret; +} + +static void wfx_sdio_lock(void *priv) +{ + struct wfx_sdio_priv *bus = priv; + + sdio_claim_host(bus->func); +} + +static void wfx_sdio_unlock(void *priv) +{ + struct wfx_sdio_priv *bus = priv; + + sdio_release_host(bus->func); +} + +static void wfx_sdio_irq_handler(struct sdio_func *func) +{ + struct wfx_sdio_priv *bus = sdio_get_drvdata(func); + + wfx_bh_request_rx(bus->core); +} + +static irqreturn_t wfx_sdio_irq_handler_ext(int irq, void *priv) +{ + struct wfx_sdio_priv *bus = priv; + + sdio_claim_host(bus->func); + wfx_bh_request_rx(bus->core); + sdio_release_host(bus->func); + return IRQ_HANDLED; +} + +static int wfx_sdio_irq_subscribe(void *priv) +{ + struct wfx_sdio_priv *bus = priv; + u32 flags; + int ret; + u8 cccr; + + if (!bus->of_irq) { + sdio_claim_host(bus->func); + ret = sdio_claim_irq(bus->func, wfx_sdio_irq_handler); + sdio_release_host(bus->func); + return ret; + } + + flags = irq_get_trigger_type(bus->of_irq); + if (!flags) + flags = IRQF_TRIGGER_HIGH; + flags |= IRQF_ONESHOT; + ret = devm_request_threaded_irq(&bus->func->dev, bus->of_irq, NULL, + wfx_sdio_irq_handler_ext, flags, "wfx", bus); + if (ret) + return ret; + sdio_claim_host(bus->func); + cccr = sdio_f0_readb(bus->func, SDIO_CCCR_IENx, NULL); + cccr |= BIT(0); + cccr |= BIT(bus->func->num); + sdio_f0_writeb(bus->func, cccr, SDIO_CCCR_IENx, NULL); + sdio_release_host(bus->func); + return 0; +} + +static int wfx_sdio_irq_unsubscribe(void *priv) +{ + struct wfx_sdio_priv *bus = priv; + int ret; + + if (bus->of_irq) + devm_free_irq(&bus->func->dev, bus->of_irq, bus); + sdio_claim_host(bus->func); + ret = sdio_release_irq(bus->func); + sdio_release_host(bus->func); + return ret; +} + +static size_t wfx_sdio_align_size(void *priv, size_t size) +{ + struct wfx_sdio_priv *bus = priv; + + return sdio_align_size(bus->func, size); +} + +static const struct wfx_hwbus_ops wfx_sdio_hwbus_ops = { + .copy_from_io = wfx_sdio_copy_from_io, + .copy_to_io = wfx_sdio_copy_to_io, + .irq_subscribe = wfx_sdio_irq_subscribe, + .irq_unsubscribe = wfx_sdio_irq_unsubscribe, + .lock = wfx_sdio_lock, + .unlock = wfx_sdio_unlock, + .align_size = wfx_sdio_align_size, +}; + +static const struct of_device_id wfx_sdio_of_match[] = { + { .compatible = "silabs,wf200", .data = &pdata_wf200 }, + { .compatible = "silabs,brd4001a", .data = &pdata_brd4001a }, + { .compatible = "silabs,brd8022a", .data = &pdata_brd8022a }, + { .compatible = "silabs,brd8023a", .data = &pdata_brd8023a }, + { }, +}; +MODULE_DEVICE_TABLE(of, wfx_sdio_of_match); + +static int wfx_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) +{ + const struct wfx_platform_data *pdata = of_device_get_match_data(&func->dev); + struct device_node *np = func->dev.of_node; + struct wfx_sdio_priv *bus; + int ret; + + if (func->num != 1) { + dev_err(&func->dev, "SDIO function number is %d while it should always be 1 (unsupported chip?)\n", + func->num); + return -ENODEV; + } + + if (!pdata) { + dev_warn(&func->dev, "no compatible device found in DT\n"); + return -ENODEV; + } + + bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL); + if (!bus) + return -ENOMEM; + + bus->func = func; + bus->of_irq = irq_of_parse_and_map(np, 0); + sdio_set_drvdata(func, bus); + + sdio_claim_host(func); + ret = sdio_enable_func(func); + /* Block of 64 bytes is more efficient than 512B for frame sizes < 4k */ + sdio_set_block_size(func, 64); + sdio_release_host(func); + if (ret) + return ret; + + bus->core = wfx_init_common(&func->dev, pdata, &wfx_sdio_hwbus_ops, bus); + if (!bus->core) { + ret = -EIO; + goto sdio_release; + } + + ret = wfx_probe(bus->core); + if (ret) + goto sdio_release; + + return 0; + +sdio_release: + sdio_claim_host(func); + sdio_disable_func(func); + sdio_release_host(func); + return ret; +} + +static void wfx_sdio_remove(struct sdio_func *func) +{ + struct wfx_sdio_priv *bus = sdio_get_drvdata(func); + + wfx_release(bus->core); + sdio_claim_host(func); + sdio_disable_func(func); + sdio_release_host(func); +} + +static const struct sdio_device_id wfx_sdio_ids[] = { + /* WF200 does not have official VID/PID */ + { SDIO_DEVICE(0x0000, 0x1000) }, + { }, +}; +MODULE_DEVICE_TABLE(sdio, wfx_sdio_ids); + +struct sdio_driver wfx_sdio_driver = { + .name = "wfx-sdio", + .id_table = wfx_sdio_ids, + .probe = wfx_sdio_probe, + .remove = wfx_sdio_remove, + .drv = { + .owner = THIS_MODULE, + .of_match_table = wfx_sdio_of_match, + } +}; diff --git a/drivers/net/wireless/silabs/wfx/bus_spi.c b/drivers/net/wireless/silabs/wfx/bus_spi.c new file mode 100644 index 0000000000..7fb1afb8ed --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/bus_spi.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SPI interface. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2011, Sagrad Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include +#include +#include +#include +#include +#include + +#include "bus.h" +#include "wfx.h" +#include "hwio.h" +#include "main.h" +#include "bh.h" + +#define SET_WRITE 0x7FFF /* usage: and operation */ +#define SET_READ 0x8000 /* usage: or operation */ + +static const struct wfx_platform_data pdata_wf200 = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/wf200.pds", + .use_rising_clk = true, +}; + +static const struct wfx_platform_data pdata_brd4001a = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/brd4001a.pds", + .use_rising_clk = true, +}; + +static const struct wfx_platform_data pdata_brd8022a = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/brd8022a.pds", + .use_rising_clk = true, +}; + +static const struct wfx_platform_data pdata_brd8023a = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/brd8023a.pds", + .use_rising_clk = true, +}; + +struct wfx_spi_priv { + struct spi_device *func; + struct wfx_dev *core; + struct gpio_desc *gpio_reset; + bool need_swab; +}; + +/* The chip reads 16bits of data at time and place them directly into (little endian) CPU register. + * So, the chip expects bytes order to be "B1 B0 B3 B2" (while LE is "B0 B1 B2 B3" and BE is + * "B3 B2 B1 B0") + * + * A little endian host with bits_per_word == 16 should do the right job natively. The code below to + * support big endian host and commonly used SPI 8bits. + */ +static int wfx_spi_copy_from_io(void *priv, unsigned int addr, void *dst, size_t count) +{ + struct wfx_spi_priv *bus = priv; + u16 regaddr = (addr << 12) | (count / 2) | SET_READ; + struct spi_message m; + struct spi_transfer t_addr = { + .tx_buf = ®addr, + .len = sizeof(regaddr), + }; + struct spi_transfer t_msg = { + .rx_buf = dst, + .len = count, + }; + u16 *dst16 = dst; + int ret, i; + + WARN(count % 2, "buffer size must be a multiple of 2"); + + cpu_to_le16s(®addr); + if (bus->need_swab) + swab16s(®addr); + + spi_message_init(&m); + spi_message_add_tail(&t_addr, &m); + spi_message_add_tail(&t_msg, &m); + ret = spi_sync(bus->func, &m); + + if (bus->need_swab && addr == WFX_REG_CONFIG) + for (i = 0; i < count / 2; i++) + swab16s(&dst16[i]); + return ret; +} + +static int wfx_spi_copy_to_io(void *priv, unsigned int addr, const void *src, size_t count) +{ + struct wfx_spi_priv *bus = priv; + u16 regaddr = (addr << 12) | (count / 2); + /* FIXME: use a bounce buffer */ + u16 *src16 = (void *)src; + int ret, i; + struct spi_message m; + struct spi_transfer t_addr = { + .tx_buf = ®addr, + .len = sizeof(regaddr), + }; + struct spi_transfer t_msg = { + .tx_buf = src, + .len = count, + }; + + WARN(count % 2, "buffer size must be a multiple of 2"); + WARN(regaddr & SET_READ, "bad addr or size overflow"); + + cpu_to_le16s(®addr); + + /* Register address and CONFIG content always use 16bit big endian + * ("BADC" order) + */ + if (bus->need_swab) + swab16s(®addr); + if (bus->need_swab && addr == WFX_REG_CONFIG) + for (i = 0; i < count / 2; i++) + swab16s(&src16[i]); + + spi_message_init(&m); + spi_message_add_tail(&t_addr, &m); + spi_message_add_tail(&t_msg, &m); + ret = spi_sync(bus->func, &m); + + if (bus->need_swab && addr == WFX_REG_CONFIG) + for (i = 0; i < count / 2; i++) + swab16s(&src16[i]); + return ret; +} + +static void wfx_spi_lock(void *priv) +{ +} + +static void wfx_spi_unlock(void *priv) +{ +} + +static irqreturn_t wfx_spi_irq_handler(int irq, void *priv) +{ + struct wfx_spi_priv *bus = priv; + + wfx_bh_request_rx(bus->core); + return IRQ_HANDLED; +} + +static int wfx_spi_irq_subscribe(void *priv) +{ + struct wfx_spi_priv *bus = priv; + u32 flags; + + flags = irq_get_trigger_type(bus->func->irq); + if (!flags) + flags = IRQF_TRIGGER_HIGH; + flags |= IRQF_ONESHOT; + return devm_request_threaded_irq(&bus->func->dev, bus->func->irq, NULL, + wfx_spi_irq_handler, flags, "wfx", bus); +} + +static int wfx_spi_irq_unsubscribe(void *priv) +{ + struct wfx_spi_priv *bus = priv; + + devm_free_irq(&bus->func->dev, bus->func->irq, bus); + return 0; +} + +static size_t wfx_spi_align_size(void *priv, size_t size) +{ + /* Most of SPI controllers avoid DMA if buffer size is not 32bit aligned */ + return ALIGN(size, 4); +} + +static const struct wfx_hwbus_ops wfx_spi_hwbus_ops = { + .copy_from_io = wfx_spi_copy_from_io, + .copy_to_io = wfx_spi_copy_to_io, + .irq_subscribe = wfx_spi_irq_subscribe, + .irq_unsubscribe = wfx_spi_irq_unsubscribe, + .lock = wfx_spi_lock, + .unlock = wfx_spi_unlock, + .align_size = wfx_spi_align_size, +}; + +static int wfx_spi_probe(struct spi_device *func) +{ + struct wfx_platform_data *pdata; + struct wfx_spi_priv *bus; + int ret; + + if (!func->bits_per_word) + func->bits_per_word = 16; + ret = spi_setup(func); + if (ret) + return ret; + pdata = (struct wfx_platform_data *)spi_get_device_id(func)->driver_data; + if (!pdata) { + dev_err(&func->dev, "unable to retrieve driver data (please report)\n"); + return -ENODEV; + } + + /* Trace below is also displayed by spi_setup() if compiled with DEBUG */ + dev_dbg(&func->dev, "SPI params: CS=%d, mode=%d bits/word=%d speed=%d\n", + func->chip_select, func->mode, func->bits_per_word, func->max_speed_hz); + if (func->bits_per_word != 16 && func->bits_per_word != 8) + dev_warn(&func->dev, "unusual bits/word value: %d\n", func->bits_per_word); + if (func->max_speed_hz > 50000000) + dev_warn(&func->dev, "%dHz is a very high speed\n", func->max_speed_hz); + + bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL); + if (!bus) + return -ENOMEM; + bus->func = func; + if (func->bits_per_word == 8 || IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + bus->need_swab = true; + spi_set_drvdata(func, bus); + + bus->gpio_reset = devm_gpiod_get_optional(&func->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(bus->gpio_reset)) + return PTR_ERR(bus->gpio_reset); + if (!bus->gpio_reset) { + dev_warn(&func->dev, "gpio reset is not defined, trying to load firmware anyway\n"); + } else { + gpiod_set_consumer_name(bus->gpio_reset, "wfx reset"); + gpiod_set_value_cansleep(bus->gpio_reset, 1); + usleep_range(100, 150); + gpiod_set_value_cansleep(bus->gpio_reset, 0); + usleep_range(2000, 2500); + } + + bus->core = wfx_init_common(&func->dev, pdata, &wfx_spi_hwbus_ops, bus); + if (!bus->core) + return -EIO; + + return wfx_probe(bus->core); +} + +static void wfx_spi_remove(struct spi_device *func) +{ + struct wfx_spi_priv *bus = spi_get_drvdata(func); + + wfx_release(bus->core); +} + +/* For dynamic driver binding, kernel does not use OF to match driver. It only + * use modalias and modalias is a copy of 'compatible' DT node with vendor + * stripped. + */ +static const struct spi_device_id wfx_spi_id[] = { + { "wf200", (kernel_ulong_t)&pdata_wf200 }, + { "brd4001a", (kernel_ulong_t)&pdata_brd4001a }, + { "brd8022a", (kernel_ulong_t)&pdata_brd8022a }, + { "brd8023a", (kernel_ulong_t)&pdata_brd8023a }, + { }, +}; +MODULE_DEVICE_TABLE(spi, wfx_spi_id); + +#ifdef CONFIG_OF +static const struct of_device_id wfx_spi_of_match[] = { + { .compatible = "silabs,wf200" }, + { .compatible = "silabs,brd4001a" }, + { .compatible = "silabs,brd8022a" }, + { .compatible = "silabs,brd8023a" }, + { }, +}; +MODULE_DEVICE_TABLE(of, wfx_spi_of_match); +#endif + +struct spi_driver wfx_spi_driver = { + .driver = { + .name = "wfx-spi", + .of_match_table = of_match_ptr(wfx_spi_of_match), + }, + .id_table = wfx_spi_id, + .probe = wfx_spi_probe, + .remove = wfx_spi_remove, +}; diff --git a/drivers/net/wireless/silabs/wfx/data_rx.c b/drivers/net/wireless/silabs/wfx/data_rx.c new file mode 100644 index 0000000000..e099a9e65b --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/data_rx.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Data receiving implementation. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include + +#include "data_rx.h" +#include "wfx.h" +#include "bh.h" +#include "sta.h" + +static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt) +{ + struct ieee80211_vif *vif = wvif_to_vif(wvif); + int params, tid; + + if (wfx_api_older_than(wvif->wdev, 3, 6)) + return; + + switch (mgmt->u.action.u.addba_req.action_code) { + case WLAN_ACTION_ADDBA_REQ: + params = le16_to_cpu(mgmt->u.action.u.addba_req.capab); + tid = (params & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; + ieee80211_start_rx_ba_session_offl(vif, mgmt->sa, tid); + break; + case WLAN_ACTION_DELBA: + params = le16_to_cpu(mgmt->u.action.u.delba.params); + tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; + ieee80211_stop_rx_ba_session_offl(vif, mgmt->sa, tid); + break; + } +} + +void wfx_rx_cb(struct wfx_vif *wvif, const struct wfx_hif_ind_rx *arg, struct sk_buff *skb) +{ + struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + + memset(hdr, 0, sizeof(*hdr)); + + if (arg->status == HIF_STATUS_RX_FAIL_MIC) + hdr->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_IV_STRIPPED; + else if (arg->status) + goto drop; + + if (skb->len < sizeof(struct ieee80211_pspoll)) { + dev_warn(wvif->wdev->dev, "malformed SDU received\n"); + goto drop; + } + + hdr->band = NL80211_BAND_2GHZ; + hdr->freq = ieee80211_channel_to_frequency(arg->channel_number, hdr->band); + + if (arg->rxed_rate >= 14) { + hdr->encoding = RX_ENC_HT; + hdr->rate_idx = arg->rxed_rate - 14; + } else if (arg->rxed_rate >= 4) { + hdr->rate_idx = arg->rxed_rate - 2; + } else { + hdr->rate_idx = arg->rxed_rate; + } + + if (!arg->rcpi_rssi) { + hdr->flag |= RX_FLAG_NO_SIGNAL_VAL; + dev_info(wvif->wdev->dev, "received frame without RSSI data\n"); + } + hdr->signal = arg->rcpi_rssi / 2 - 110; + hdr->antenna = 0; + + if (arg->encryp) + hdr->flag |= RX_FLAG_DECRYPTED; + + /* Block ack negotiation is offloaded by the firmware. However, re-ordering must be done by + * the mac80211. + */ + if (ieee80211_is_action(frame->frame_control) && + mgmt->u.action.category == WLAN_CATEGORY_BACK && + skb->len > IEEE80211_MIN_ACTION_SIZE) { + wfx_rx_handle_ba(wvif, mgmt); + goto drop; + } + + ieee80211_rx_irqsafe(wvif->wdev->hw, skb); + return; + +drop: + dev_kfree_skb(skb); +} diff --git a/drivers/net/wireless/silabs/wfx/data_rx.h b/drivers/net/wireless/silabs/wfx/data_rx.h new file mode 100644 index 0000000000..cf708f16d6 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/data_rx.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Data receiving implementation. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_DATA_RX_H +#define WFX_DATA_RX_H + +struct wfx_vif; +struct sk_buff; +struct wfx_hif_ind_rx; + +void wfx_rx_cb(struct wfx_vif *wvif, const struct wfx_hif_ind_rx *arg, struct sk_buff *skb); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/data_tx.c b/drivers/net/wireless/silabs/wfx/data_tx.c new file mode 100644 index 0000000000..6a5e52a96d --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/data_tx.c @@ -0,0 +1,569 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Data transmitting implementation. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include + +#include "data_tx.h" +#include "wfx.h" +#include "bh.h" +#include "sta.h" +#include "queue.h" +#include "debug.h" +#include "traces.h" +#include "hif_tx_mib.h" + +static int wfx_get_hw_rate(struct wfx_dev *wdev, const struct ieee80211_tx_rate *rate) +{ + struct ieee80211_supported_band *band; + + if (rate->idx < 0) + return -1; + if (rate->flags & IEEE80211_TX_RC_MCS) { + if (rate->idx > 7) { + WARN(1, "wrong rate->idx value: %d", rate->idx); + return -1; + } + return rate->idx + 14; + } + /* The device only support 2GHz, else band information should be retrieved from + * ieee80211_tx_info + */ + band = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]; + if (rate->idx >= band->n_bitrates) { + WARN(1, "wrong rate->idx value: %d", rate->idx); + return -1; + } + return band->bitrates[rate->idx].hw_value; +} + +/* TX policy cache implementation */ + +static void wfx_tx_policy_build(struct wfx_vif *wvif, struct wfx_tx_policy *policy, + struct ieee80211_tx_rate *rates) +{ + struct wfx_dev *wdev = wvif->wdev; + int i, rateid; + u8 count; + + WARN(rates[0].idx < 0, "invalid rate policy"); + memset(policy, 0, sizeof(*policy)); + for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) { + if (rates[i].idx < 0) + break; + WARN_ON(rates[i].count > 15); + rateid = wfx_get_hw_rate(wdev, &rates[i]); + /* Pack two values in each byte of policy->rates */ + count = rates[i].count; + if (rateid % 2) + count <<= 4; + policy->rates[rateid / 2] |= count; + } +} + +static bool wfx_tx_policy_is_equal(const struct wfx_tx_policy *a, const struct wfx_tx_policy *b) +{ + return !memcmp(a->rates, b->rates, sizeof(a->rates)); +} + +static int wfx_tx_policy_find(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *wanted) +{ + struct wfx_tx_policy *it; + + list_for_each_entry(it, &cache->used, link) + if (wfx_tx_policy_is_equal(wanted, it)) + return it - cache->cache; + list_for_each_entry(it, &cache->free, link) + if (wfx_tx_policy_is_equal(wanted, it)) + return it - cache->cache; + return -1; +} + +static void wfx_tx_policy_use(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry) +{ + ++entry->usage_count; + list_move(&entry->link, &cache->used); +} + +static int wfx_tx_policy_release(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry) +{ + int ret = --entry->usage_count; + + if (!ret) + list_move(&entry->link, &cache->free); + return ret; +} + +static int wfx_tx_policy_get(struct wfx_vif *wvif, struct ieee80211_tx_rate *rates, bool *renew) +{ + int idx; + struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache; + struct wfx_tx_policy wanted; + struct wfx_tx_policy *entry; + + wfx_tx_policy_build(wvif, &wanted, rates); + + spin_lock_bh(&cache->lock); + if (list_empty(&cache->free)) { + WARN(1, "unable to get a valid Tx policy"); + spin_unlock_bh(&cache->lock); + return HIF_TX_RETRY_POLICY_INVALID; + } + idx = wfx_tx_policy_find(cache, &wanted); + if (idx >= 0) { + *renew = false; + } else { + /* If policy is not found create a new one using the oldest entry in "free" list */ + *renew = true; + entry = list_entry(cache->free.prev, struct wfx_tx_policy, link); + memcpy(entry->rates, wanted.rates, sizeof(entry->rates)); + entry->uploaded = false; + entry->usage_count = 0; + idx = entry - cache->cache; + } + wfx_tx_policy_use(cache, &cache->cache[idx]); + if (list_empty(&cache->free)) + ieee80211_stop_queues(wvif->wdev->hw); + spin_unlock_bh(&cache->lock); + return idx; +} + +static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx) +{ + int usage, locked; + struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache; + + if (idx == HIF_TX_RETRY_POLICY_INVALID) + return; + spin_lock_bh(&cache->lock); + locked = list_empty(&cache->free); + usage = wfx_tx_policy_release(cache, &cache->cache[idx]); + if (locked && !usage) + ieee80211_wake_queues(wvif->wdev->hw); + spin_unlock_bh(&cache->lock); +} + +static int wfx_tx_policy_upload(struct wfx_vif *wvif) +{ + struct wfx_tx_policy *policies = wvif->tx_policy_cache.cache; + u8 tmp_rates[12]; + int i, is_used; + + do { + spin_lock_bh(&wvif->tx_policy_cache.lock); + for (i = 0; i < ARRAY_SIZE(wvif->tx_policy_cache.cache); ++i) { + is_used = memzcmp(policies[i].rates, sizeof(policies[i].rates)); + if (!policies[i].uploaded && is_used) + break; + } + if (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)) { + policies[i].uploaded = true; + memcpy(tmp_rates, policies[i].rates, sizeof(tmp_rates)); + spin_unlock_bh(&wvif->tx_policy_cache.lock); + wfx_hif_set_tx_rate_retry_policy(wvif, i, tmp_rates); + } else { + spin_unlock_bh(&wvif->tx_policy_cache.lock); + } + } while (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)); + return 0; +} + +void wfx_tx_policy_upload_work(struct work_struct *work) +{ + struct wfx_vif *wvif = container_of(work, struct wfx_vif, tx_policy_upload_work); + + wfx_tx_policy_upload(wvif); + wfx_tx_unlock(wvif->wdev); +} + +void wfx_tx_policy_init(struct wfx_vif *wvif) +{ + struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache; + int i; + + memset(cache, 0, sizeof(*cache)); + + spin_lock_init(&cache->lock); + INIT_LIST_HEAD(&cache->used); + INIT_LIST_HEAD(&cache->free); + + for (i = 0; i < ARRAY_SIZE(cache->cache); ++i) + list_add(&cache->cache[i].link, &cache->free); +} + +/* Tx implementation */ + +static bool wfx_is_action_back(struct ieee80211_hdr *hdr) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr; + + if (!ieee80211_is_action(mgmt->frame_control)) + return false; + if (mgmt->u.action.category != WLAN_CATEGORY_BACK) + return false; + return true; +} + +static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta, + struct ieee80211_hdr *hdr) +{ + struct wfx_sta_priv *sta_priv = sta ? (struct wfx_sta_priv *)&sta->drv_priv : NULL; + struct ieee80211_vif *vif = wvif_to_vif(wvif); + const u8 *da = ieee80211_get_DA(hdr); + + if (sta_priv && sta_priv->link_id) + return sta_priv->link_id; + if (vif->type != NL80211_IFTYPE_AP) + return 0; + if (is_multicast_ether_addr(da)) + return 0; + return HIF_LINK_ID_NOT_ASSOCIATED; +} + +static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates) +{ + int i; + bool finished; + + /* Firmware is not able to mix rates with different flags */ + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI) + rates[i].flags |= IEEE80211_TX_RC_SHORT_GI; + if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI)) + rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI; + if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)) + rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS; + } + + /* Sort rates and remove duplicates */ + do { + finished = true; + for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) { + if (rates[i + 1].idx == rates[i].idx && + rates[i].idx != -1) { + rates[i].count += rates[i + 1].count; + if (rates[i].count > 15) + rates[i].count = 15; + rates[i + 1].idx = -1; + rates[i + 1].count = 0; + + finished = false; + } + if (rates[i + 1].idx > rates[i].idx) { + swap(rates[i + 1], rates[i]); + finished = false; + } + } + } while (!finished); + /* Ensure that MCS0 or 1Mbps is present at the end of the retry list */ + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + if (rates[i].idx == 0) + break; + if (rates[i].idx == -1) { + rates[i].idx = 0; + rates[i].count = 8; /* == hw->max_rate_tries */ + rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS; + break; + } + } + /* All retries use long GI */ + for (i = 1; i < IEEE80211_TX_MAX_RATES; i++) + rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI; +} + +static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info) +{ + bool tx_policy_renew = false; + u8 ret; + + ret = wfx_tx_policy_get(wvif, tx_info->driver_rates, &tx_policy_renew); + if (ret == HIF_TX_RETRY_POLICY_INVALID) + dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy"); + + if (tx_policy_renew) { + wfx_tx_lock(wvif->wdev); + if (!schedule_work(&wvif->tx_policy_upload_work)) + wfx_tx_unlock(wvif->wdev); + } + return ret; +} + +static int wfx_tx_get_frame_format(struct ieee80211_tx_info *tx_info) +{ + if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_MCS)) + return HIF_FRAME_FORMAT_NON_HT; + else if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)) + return HIF_FRAME_FORMAT_MIXED_FORMAT_HT; + else + return HIF_FRAME_FORMAT_GF_HT_11N; +} + +static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key) +{ + int mic_space; + + if (!hw_key) + return 0; + if (hw_key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) + return 0; + mic_space = (hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) ? 8 : 0; + return hw_key->icv_len + mic_space; +} + +static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct sk_buff *skb) +{ + struct wfx_hif_msg *hif_msg; + struct wfx_hif_req_tx *req; + struct wfx_tx_priv *tx_priv; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int queue_id = skb_get_queue_mapping(skb); + size_t offset = (size_t)skb->data & 3; + int wmsg_len = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) + offset; + + WARN(queue_id >= IEEE80211_NUM_ACS, "unsupported queue_id"); + wfx_tx_fixup_rates(tx_info->driver_rates); + + /* From now tx_info->control is unusable */ + memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv)); + /* Fill tx_priv */ + tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data; + tx_priv->icv_size = wfx_tx_get_icv_len(hw_key); + + /* Fill hif_msg */ + WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb"); + WARN(offset & 1, "attempt to transmit an unaligned frame"); + skb_put(skb, tx_priv->icv_size); + skb_push(skb, wmsg_len); + memset(skb->data, 0, wmsg_len); + hif_msg = (struct wfx_hif_msg *)skb->data; + hif_msg->len = cpu_to_le16(skb->len); + hif_msg->id = HIF_REQ_ID_TX; + hif_msg->interface = wvif->id; + if (skb->len > le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf)) { + dev_warn(wvif->wdev->dev, + "requested frame size (%d) is larger than maximum supported (%d)\n", + skb->len, le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf)); + skb_pull(skb, wmsg_len); + return -EIO; + } + + /* Fill tx request */ + req = (struct wfx_hif_req_tx *)hif_msg->body; + /* packet_id just need to be unique on device. 32bits are more than necessary for that task, + * so we take advantage of it to add some extra data for debug. + */ + req->packet_id = atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF; + req->packet_id |= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16; + req->packet_id |= queue_id << 28; + + req->fc_offset = offset; + /* Queue index are inverted between firmware and Linux */ + req->queue_id = 3 - queue_id; + req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr); + req->retry_policy_index = wfx_tx_get_retry_policy_id(wvif, tx_info); + req->frame_format = wfx_tx_get_frame_format(tx_info); + if (tx_info->driver_rates[0].flags & IEEE80211_TX_RC_SHORT_GI) + req->short_gi = 1; + if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) + req->after_dtim = 1; + + /* Auxiliary operations */ + wfx_tx_queues_put(wvif, skb); + if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) + schedule_work(&wvif->update_tim_work); + wfx_bh_request_tx(wvif->wdev); + return 0; +} + +void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) +{ + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif; + struct ieee80211_sta *sta = control ? control->sta : NULL; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + size_t driver_data_room = sizeof_field(struct ieee80211_tx_info, rate_driver_data); + + BUILD_BUG_ON_MSG(sizeof(struct wfx_tx_priv) > driver_data_room, + "struct tx_priv is too large"); + WARN(skb->next || skb->prev, "skb is already member of a list"); + /* control.vif can be NULL for injected frames */ + if (tx_info->control.vif) + wvif = (struct wfx_vif *)tx_info->control.vif->drv_priv; + else + wvif = wvif_iterate(wdev, NULL); + if (WARN_ON(!wvif)) + goto drop; + /* Because of TX_AMPDU_SETUP_IN_HW, mac80211 does not try to send any BlockAck session + * management frame. The check below exist just in case. + */ + if (wfx_is_action_back(hdr)) { + dev_info(wdev->dev, "drop BA action\n"); + goto drop; + } + if (wfx_tx_inner(wvif, sta, skb)) + goto drop; + + return; + +drop: + ieee80211_tx_status_irqsafe(wdev->hw, skb); +} + +static void wfx_skb_dtor(struct wfx_vif *wvif, struct sk_buff *skb) +{ + struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data; + struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body; + unsigned int offset = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) + + req->fc_offset; + + if (!wvif) { + pr_warn("vif associated with the skb does not exist anymore\n"); + return; + } + wfx_tx_policy_put(wvif, req->retry_policy_index); + skb_pull(skb, offset); + ieee80211_tx_status_irqsafe(wvif->wdev->hw, skb); +} + +static void wfx_tx_fill_rates(struct wfx_dev *wdev, struct ieee80211_tx_info *tx_info, + const struct wfx_hif_cnf_tx *arg) +{ + struct ieee80211_tx_rate *rate; + int tx_count; + int i; + + tx_count = arg->ack_failures; + if (!arg->status || arg->ack_failures) + tx_count += 1; /* Also report success */ + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + rate = &tx_info->status.rates[i]; + if (rate->idx < 0) + break; + if (tx_count < rate->count && arg->status == HIF_STATUS_TX_FAIL_RETRIES && + arg->ack_failures) + dev_dbg(wdev->dev, "all retries were not consumed: %d != %d\n", + rate->count, tx_count); + if (tx_count <= rate->count && tx_count && + arg->txed_rate != wfx_get_hw_rate(wdev, rate)) + dev_dbg(wdev->dev, "inconsistent tx_info rates: %d != %d\n", + arg->txed_rate, wfx_get_hw_rate(wdev, rate)); + if (tx_count > rate->count) { + tx_count -= rate->count; + } else if (!tx_count) { + rate->count = 0; + rate->idx = -1; + } else { + rate->count = tx_count; + tx_count = 0; + } + } + if (tx_count) + dev_dbg(wdev->dev, "%d more retries than expected\n", tx_count); +} + +void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct wfx_hif_cnf_tx *arg) +{ + const struct wfx_tx_priv *tx_priv; + struct ieee80211_tx_info *tx_info; + struct wfx_vif *wvif; + struct sk_buff *skb; + + skb = wfx_pending_get(wdev, arg->packet_id); + if (!skb) { + dev_warn(wdev->dev, "received unknown packet_id (%#.8x) from chip\n", + arg->packet_id); + return; + } + tx_info = IEEE80211_SKB_CB(skb); + tx_priv = wfx_skb_tx_priv(skb); + wvif = wdev_to_wvif(wdev, ((struct wfx_hif_msg *)skb->data)->interface); + WARN_ON(!wvif); + if (!wvif) + return; + + /* Note that wfx_pending_get_pkt_us_delay() get data from tx_info */ + _trace_tx_stats(arg, skb, wfx_pending_get_pkt_us_delay(wdev, skb)); + wfx_tx_fill_rates(wdev, tx_info, arg); + skb_trim(skb, skb->len - tx_priv->icv_size); + + /* From now, you can touch to tx_info->status, but do not touch to tx_priv anymore */ + /* FIXME: use ieee80211_tx_info_clear_status() */ + memset(tx_info->rate_driver_data, 0, sizeof(tx_info->rate_driver_data)); + memset(tx_info->pad, 0, sizeof(tx_info->pad)); + + if (!arg->status) { + tx_info->status.tx_time = le32_to_cpu(arg->media_delay) - + le32_to_cpu(arg->tx_queue_delay); + if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) + tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + else + tx_info->flags |= IEEE80211_TX_STAT_ACK; + } else if (arg->status == HIF_STATUS_TX_FAIL_REQUEUE) { + WARN(!arg->requeue, "incoherent status and result_flags"); + if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { + wvif->after_dtim_tx_allowed = false; /* DTIM period elapsed */ + schedule_work(&wvif->update_tim_work); + } + tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; + } + wfx_skb_dtor(wvif, skb); +} + +static void wfx_flush_vif(struct wfx_vif *wvif, u32 queues, struct sk_buff_head *dropped) +{ + struct wfx_queue *queue; + int i; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + if (!(BIT(i) & queues)) + continue; + queue = &wvif->tx_queue[i]; + if (dropped) + wfx_tx_queue_drop(wvif, queue, dropped); + } + if (wvif->wdev->chip_frozen) + return; + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + if (!(BIT(i) & queues)) + continue; + queue = &wvif->tx_queue[i]; + if (wait_event_timeout(wvif->wdev->tx_dequeue, wfx_tx_queue_empty(wvif, queue), + msecs_to_jiffies(1000)) <= 0) + dev_warn(wvif->wdev->dev, "frames queued while flushing tx queues?"); + } +} + +void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) +{ + struct wfx_dev *wdev = hw->priv; + struct sk_buff_head dropped; + struct wfx_vif *wvif; + struct wfx_hif_msg *hif; + struct sk_buff *skb; + + skb_queue_head_init(&dropped); + if (vif) { + wvif = (struct wfx_vif *)vif->drv_priv; + wfx_flush_vif(wvif, queues, drop ? &dropped : NULL); + } else { + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) + wfx_flush_vif(wvif, queues, drop ? &dropped : NULL); + } + wfx_tx_flush(wdev); + if (wdev->chip_frozen) + wfx_pending_drop(wdev, &dropped); + while ((skb = skb_dequeue(&dropped)) != NULL) { + hif = (struct wfx_hif_msg *)skb->data; + wvif = wdev_to_wvif(wdev, hif->interface); + ieee80211_tx_info_clear_status(IEEE80211_SKB_CB(skb)); + wfx_skb_dtor(wvif, skb); + } +} diff --git a/drivers/net/wireless/silabs/wfx/data_tx.h b/drivers/net/wireless/silabs/wfx/data_tx.h new file mode 100644 index 0000000000..983470705e --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/data_tx.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Data transmitting implementation. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_DATA_TX_H +#define WFX_DATA_TX_H + +#include +#include + +#include "hif_api_cmd.h" +#include "hif_api_mib.h" + +struct wfx_tx_priv; +struct wfx_dev; +struct wfx_vif; + +struct wfx_tx_policy { + struct list_head link; + int usage_count; + u8 rates[12]; + bool uploaded; +}; + +struct wfx_tx_policy_cache { + struct wfx_tx_policy cache[HIF_TX_RETRY_POLICY_MAX]; + /* FIXME: use a trees and drop hash from tx_policy */ + struct list_head used; + struct list_head free; + spinlock_t lock; +}; + +struct wfx_tx_priv { + ktime_t xmit_timestamp; + unsigned char icv_size; +}; + +void wfx_tx_policy_init(struct wfx_vif *wvif); +void wfx_tx_policy_upload_work(struct work_struct *work); + +void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb); +void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct wfx_hif_cnf_tx *arg); +void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop); + +static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb) +{ + struct ieee80211_tx_info *tx_info; + + if (!skb) + return NULL; + tx_info = IEEE80211_SKB_CB(skb); + return (struct wfx_tx_priv *)tx_info->rate_driver_data; +} + +static inline struct wfx_hif_req_tx *wfx_skb_txreq(struct sk_buff *skb) +{ + struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data; + struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body; + + return req; +} + +#endif diff --git a/drivers/net/wireless/silabs/wfx/debug.c b/drivers/net/wireless/silabs/wfx/debug.c new file mode 100644 index 0000000000..e8265208f9 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/debug.c @@ -0,0 +1,331 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Debugfs interface. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include +#include + +#include "debug.h" +#include "wfx.h" +#include "sta.h" +#include "main.h" +#include "hif_tx.h" +#include "hif_tx_mib.h" + +#define CREATE_TRACE_POINTS +#include "traces.h" + +static const struct trace_print_flags hif_msg_print_map[] = { + hif_msg_list, +}; + +static const struct trace_print_flags hif_mib_print_map[] = { + hif_mib_list, +}; + +static const struct trace_print_flags wfx_reg_print_map[] = { + wfx_reg_list, +}; + +static const char *get_symbol(unsigned long val, const struct trace_print_flags *symbol_array) +{ + int i; + + for (i = 0; symbol_array[i].mask != -1; i++) { + if (val == symbol_array[i].mask) + return symbol_array[i].name; + } + + return "unknown"; +} + +const char *wfx_get_hif_name(unsigned long id) +{ + return get_symbol(id, hif_msg_print_map); +} + +const char *wfx_get_mib_name(unsigned long id) +{ + return get_symbol(id, hif_mib_print_map); +} + +const char *wfx_get_reg_name(unsigned long id) +{ + return get_symbol(id, wfx_reg_print_map); +} + +static int wfx_counters_show(struct seq_file *seq, void *v) +{ + int ret, i; + struct wfx_dev *wdev = seq->private; + struct wfx_hif_mib_extended_count_table counters[3]; + + for (i = 0; i < ARRAY_SIZE(counters); i++) { + ret = wfx_hif_get_counters_table(wdev, i, counters + i); + if (ret < 0) + return ret; + if (ret > 0) + return -EIO; + } + + seq_printf(seq, "%-24s %12s %12s %12s\n", "", "global", "iface 0", "iface 1"); + +#define PUT_COUNTER(name) \ + seq_printf(seq, "%-24s %12d %12d %12d\n", #name, \ + le32_to_cpu(counters[2].count_##name), \ + le32_to_cpu(counters[0].count_##name), \ + le32_to_cpu(counters[1].count_##name)) + + PUT_COUNTER(tx_frames); + PUT_COUNTER(tx_frames_multicast); + PUT_COUNTER(tx_frames_success); + PUT_COUNTER(tx_frames_retried); + PUT_COUNTER(tx_frames_multi_retried); + PUT_COUNTER(tx_frames_failed); + + PUT_COUNTER(ack_failed); + PUT_COUNTER(rts_success); + PUT_COUNTER(rts_failed); + + PUT_COUNTER(rx_frames); + PUT_COUNTER(rx_frames_multicast); + PUT_COUNTER(rx_frames_success); + PUT_COUNTER(rx_frames_failed); + PUT_COUNTER(drop_plcp); + PUT_COUNTER(drop_fcs); + PUT_COUNTER(drop_no_key); + PUT_COUNTER(drop_decryption); + PUT_COUNTER(drop_tkip_mic); + PUT_COUNTER(drop_bip_mic); + PUT_COUNTER(drop_cmac_icv); + PUT_COUNTER(drop_cmac_replay); + PUT_COUNTER(drop_ccmp_replay); + PUT_COUNTER(drop_duplicate); + + PUT_COUNTER(rx_bcn_miss); + PUT_COUNTER(rx_bcn_success); + PUT_COUNTER(rx_bcn_dtim); + PUT_COUNTER(rx_bcn_dtim_aid0_clr); + PUT_COUNTER(rx_bcn_dtim_aid0_set); + +#undef PUT_COUNTER + + for (i = 0; i < ARRAY_SIZE(counters[0].reserved); i++) + seq_printf(seq, "reserved[%02d]%12s %12d %12d %12d\n", i, "", + le32_to_cpu(counters[2].reserved[i]), + le32_to_cpu(counters[0].reserved[i]), + le32_to_cpu(counters[1].reserved[i])); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wfx_counters); + +static const char * const channel_names[] = { + [0] = "1M", + [1] = "2M", + [2] = "5.5M", + [3] = "11M", + /* Entries 4 and 5 does not exist */ + [6] = "6M", + [7] = "9M", + [8] = "12M", + [9] = "18M", + [10] = "24M", + [11] = "36M", + [12] = "48M", + [13] = "54M", + [14] = "MCS0", + [15] = "MCS1", + [16] = "MCS2", + [17] = "MCS3", + [18] = "MCS4", + [19] = "MCS5", + [20] = "MCS6", + [21] = "MCS7", +}; + +static int wfx_rx_stats_show(struct seq_file *seq, void *v) +{ + struct wfx_dev *wdev = seq->private; + struct wfx_hif_rx_stats *st = &wdev->rx_stats; + int i; + + mutex_lock(&wdev->rx_stats_lock); + seq_printf(seq, "Timestamp: %dus\n", st->date); + seq_printf(seq, "Low power clock: frequency %uHz, external %s\n", + le32_to_cpu(st->pwr_clk_freq), st->is_ext_pwr_clk ? "yes" : "no"); + seq_printf(seq, "Num. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n", + st->nb_rx_frame, st->per_total, st->throughput); + seq_puts(seq, " Num. of PER RSSI SNR CFO\n"); + seq_puts(seq, " frames (x10e4) (dBm) (dB) (kHz)\n"); + for (i = 0; i < ARRAY_SIZE(channel_names); i++) { + if (channel_names[i]) + seq_printf(seq, "%5s %8d %8d %8d %8d %8d\n", + channel_names[i], + le32_to_cpu(st->nb_rx_by_rate[i]), + le16_to_cpu(st->per[i]), + (s16)le16_to_cpu(st->rssi[i]) / 100, + (s16)le16_to_cpu(st->snr[i]) / 100, + (s16)le16_to_cpu(st->cfo[i])); + } + mutex_unlock(&wdev->rx_stats_lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wfx_rx_stats); + +static int wfx_tx_power_loop_show(struct seq_file *seq, void *v) +{ + struct wfx_dev *wdev = seq->private; + struct wfx_hif_tx_power_loop_info *st = &wdev->tx_power_loop_info; + int tmp; + + mutex_lock(&wdev->tx_power_loop_info_lock); + tmp = le16_to_cpu(st->tx_gain_dig); + seq_printf(seq, "Tx gain digital: %d\n", tmp); + tmp = le16_to_cpu(st->tx_gain_pa); + seq_printf(seq, "Tx gain PA: %d\n", tmp); + tmp = (s16)le16_to_cpu(st->target_pout); + seq_printf(seq, "Target Pout: %d.%02d dBm\n", tmp / 4, (tmp % 4) * 25); + tmp = (s16)le16_to_cpu(st->p_estimation); + seq_printf(seq, "FEM Pout: %d.%02d dBm\n", tmp / 4, (tmp % 4) * 25); + tmp = le16_to_cpu(st->vpdet); + seq_printf(seq, "Vpdet: %d mV\n", tmp); + seq_printf(seq, "Measure index: %d\n", st->measurement_index); + mutex_unlock(&wdev->tx_power_loop_info_lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wfx_tx_power_loop); + +static ssize_t wfx_send_pds_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wfx_dev *wdev = file->private_data; + char *buf; + int ret; + + if (*ppos != 0) { + dev_dbg(wdev->dev, "PDS data must be written in one transaction"); + return -EBUSY; + } + buf = memdup_user(user_buf, count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + *ppos = *ppos + count; + ret = wfx_send_pds(wdev, buf, count); + kfree(buf); + if (ret < 0) + return ret; + return count; +} + +static const struct file_operations wfx_send_pds_fops = { + .open = simple_open, + .write = wfx_send_pds_write, +}; + +struct dbgfs_hif_msg { + struct wfx_dev *wdev; + struct completion complete; + u8 reply[1024]; + int ret; +}; + +static ssize_t wfx_send_hif_msg_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dbgfs_hif_msg *context = file->private_data; + struct wfx_dev *wdev = context->wdev; + struct wfx_hif_msg *request; + + if (completion_done(&context->complete)) { + dev_dbg(wdev->dev, "read previous result before start a new one\n"); + return -EBUSY; + } + if (count < sizeof(struct wfx_hif_msg)) + return -EINVAL; + + /* wfx_cmd_send() checks that reply buffer is wide enough, but does not return precise + * length read. User have to know how many bytes should be read. Filling reply buffer with a + * memory pattern may help user. + */ + memset(context->reply, 0xFF, sizeof(context->reply)); + request = memdup_user(user_buf, count); + if (IS_ERR(request)) + return PTR_ERR(request); + if (le16_to_cpu(request->len) != count) { + kfree(request); + return -EINVAL; + } + context->ret = wfx_cmd_send(wdev, request, context->reply, sizeof(context->reply), false); + + kfree(request); + complete(&context->complete); + return count; +} + +static ssize_t wfx_send_hif_msg_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dbgfs_hif_msg *context = file->private_data; + int ret; + + if (count > sizeof(context->reply)) + return -EINVAL; + ret = wait_for_completion_interruptible(&context->complete); + if (ret) + return ret; + if (context->ret < 0) + return context->ret; + /* Be careful, write() is waiting for a full message while read() only returns a payload */ + if (copy_to_user(user_buf, context->reply, count)) + return -EFAULT; + + return count; +} + +static int wfx_send_hif_msg_open(struct inode *inode, struct file *file) +{ + struct dbgfs_hif_msg *context = kzalloc(sizeof(*context), GFP_KERNEL); + + if (!context) + return -ENOMEM; + context->wdev = inode->i_private; + init_completion(&context->complete); + file->private_data = context; + return 0; +} + +static int wfx_send_hif_msg_release(struct inode *inode, struct file *file) +{ + struct dbgfs_hif_msg *context = file->private_data; + + kfree(context); + return 0; +} + +static const struct file_operations wfx_send_hif_msg_fops = { + .open = wfx_send_hif_msg_open, + .release = wfx_send_hif_msg_release, + .write = wfx_send_hif_msg_write, + .read = wfx_send_hif_msg_read, +}; + +int wfx_debug_init(struct wfx_dev *wdev) +{ + struct dentry *d; + + d = debugfs_create_dir("wfx", wdev->hw->wiphy->debugfsdir); + debugfs_create_file("counters", 0444, d, wdev, &wfx_counters_fops); + debugfs_create_file("rx_stats", 0444, d, wdev, &wfx_rx_stats_fops); + debugfs_create_file("tx_power_loop", 0444, d, wdev, &wfx_tx_power_loop_fops); + debugfs_create_file("send_pds", 0200, d, wdev, &wfx_send_pds_fops); + debugfs_create_file("send_hif_msg", 0600, d, wdev, &wfx_send_hif_msg_fops); + + return 0; +} diff --git a/drivers/net/wireless/silabs/wfx/debug.h b/drivers/net/wireless/silabs/wfx/debug.h new file mode 100644 index 0000000000..3840575e5e --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/debug.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Debugfs interface. + * + * Copyright (c) 2017-2019, Silicon Laboratories, Inc. + * Copyright (c) 2011, ST-Ericsson + */ +#ifndef WFX_DEBUG_H +#define WFX_DEBUG_H + +struct wfx_dev; + +int wfx_debug_init(struct wfx_dev *wdev); + +const char *wfx_get_hif_name(unsigned long id); +const char *wfx_get_mib_name(unsigned long id); +const char *wfx_get_reg_name(unsigned long id); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/fwio.c b/drivers/net/wireless/silabs/wfx/fwio.c new file mode 100644 index 0000000000..52c7f560b0 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/fwio.c @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Firmware loading. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include +#include +#include + +#include "fwio.h" +#include "wfx.h" +#include "hwio.h" + +/* Addresses below are in SRAM area */ +#define WFX_DNLD_FIFO 0x09004000 +#define DNLD_BLOCK_SIZE 0x0400 +#define DNLD_FIFO_SIZE 0x8000 /* (32 * DNLD_BLOCK_SIZE) */ +/* Download Control Area (DCA) */ +#define WFX_DCA_IMAGE_SIZE 0x0900C000 +#define WFX_DCA_PUT 0x0900C004 +#define WFX_DCA_GET 0x0900C008 +#define WFX_DCA_HOST_STATUS 0x0900C00C +#define HOST_READY 0x87654321 +#define HOST_INFO_READ 0xA753BD99 +#define HOST_UPLOAD_PENDING 0xABCDDCBA +#define HOST_UPLOAD_COMPLETE 0xD4C64A99 +#define HOST_OK_TO_JUMP 0x174FC882 +#define WFX_DCA_NCP_STATUS 0x0900C010 +#define NCP_NOT_READY 0x12345678 +#define NCP_READY 0x87654321 +#define NCP_INFO_READY 0xBD53EF99 +#define NCP_DOWNLOAD_PENDING 0xABCDDCBA +#define NCP_DOWNLOAD_COMPLETE 0xCAFEFECA +#define NCP_AUTH_OK 0xD4C64A99 +#define NCP_AUTH_FAIL 0x174FC882 +#define NCP_PUB_KEY_RDY 0x7AB41D19 +#define WFX_DCA_FW_SIGNATURE 0x0900C014 +#define FW_SIGNATURE_SIZE 0x40 +#define WFX_DCA_FW_HASH 0x0900C054 +#define FW_HASH_SIZE 0x08 +#define WFX_DCA_FW_VERSION 0x0900C05C +#define FW_VERSION_SIZE 0x04 +#define WFX_DCA_RESERVED 0x0900C060 +#define DCA_RESERVED_SIZE 0x20 +#define WFX_STATUS_INFO 0x0900C080 +#define WFX_BOOTLOADER_LABEL 0x0900C084 +#define BOOTLOADER_LABEL_SIZE 0x3C +#define WFX_PTE_INFO 0x0900C0C0 +#define PTE_INFO_KEYSET_IDX 0x0D +#define PTE_INFO_SIZE 0x10 +#define WFX_ERR_INFO 0x0900C0D0 +#define ERR_INVALID_SEC_TYPE 0x05 +#define ERR_SIG_VERIF_FAILED 0x0F +#define ERR_AES_CTRL_KEY 0x10 +#define ERR_ECC_PUB_KEY 0x11 +#define ERR_MAC_KEY 0x18 + +#define DCA_TIMEOUT 50 /* milliseconds */ +#define WAKEUP_TIMEOUT 200 /* milliseconds */ + +static const char * const fwio_errors[] = { + [ERR_INVALID_SEC_TYPE] = "Invalid section type or wrong encryption", + [ERR_SIG_VERIF_FAILED] = "Signature verification failed", + [ERR_AES_CTRL_KEY] = "AES control key not initialized", + [ERR_ECC_PUB_KEY] = "ECC public key not initialized", + [ERR_MAC_KEY] = "MAC key not initialized", +}; + +/* request_firmware() allocate data using vmalloc(). It is not compatible with underlying hardware + * that use DMA. Function below detect this case and allocate a bounce buffer if necessary. + * + * Notice that, in doubt, you can enable CONFIG_DEBUG_SG to ask kernel to detect this problem at + * runtime (else, kernel silently fail). + * + * NOTE: it may also be possible to use 'pages' from struct firmware and avoid bounce buffer + */ +static int wfx_sram_write_dma_safe(struct wfx_dev *wdev, u32 addr, const u8 *buf, size_t len) +{ + int ret; + const u8 *tmp; + + if (!virt_addr_valid(buf)) { + tmp = kmemdup(buf, len, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + } else { + tmp = buf; + } + ret = wfx_sram_buf_write(wdev, addr, tmp, len); + if (tmp != buf) + kfree(tmp); + return ret; +} + +static int get_firmware(struct wfx_dev *wdev, u32 keyset_chip, + const struct firmware **fw, int *file_offset) +{ + int keyset_file; + char filename[256]; + const char *data; + int ret; + + snprintf(filename, sizeof(filename), "%s_%02X.sec", + wdev->pdata.file_fw, keyset_chip); + ret = firmware_request_nowarn(fw, filename, wdev->dev); + if (ret) { + dev_info(wdev->dev, "can't load %s, falling back to %s.sec\n", + filename, wdev->pdata.file_fw); + snprintf(filename, sizeof(filename), "%s.sec", wdev->pdata.file_fw); + ret = request_firmware(fw, filename, wdev->dev); + if (ret) { + dev_err(wdev->dev, "can't load %s\n", filename); + *fw = NULL; + return ret; + } + } + + data = (*fw)->data; + if (memcmp(data, "KEYSET", 6) != 0) { + /* Legacy firmware format */ + *file_offset = 0; + keyset_file = 0x90; + } else { + *file_offset = 8; + keyset_file = (hex_to_bin(data[6]) * 16) | hex_to_bin(data[7]); + if (keyset_file < 0) { + dev_err(wdev->dev, "%s corrupted\n", filename); + release_firmware(*fw); + *fw = NULL; + return -EINVAL; + } + } + if (keyset_file != keyset_chip) { + dev_err(wdev->dev, "firmware keyset is incompatible with chip (file: 0x%02X, chip: 0x%02X)\n", + keyset_file, keyset_chip); + release_firmware(*fw); + *fw = NULL; + return -ENODEV; + } + wdev->keyset = keyset_file; + return 0; +} + +static int wait_ncp_status(struct wfx_dev *wdev, u32 status) +{ + ktime_t now, start; + u32 reg; + int ret; + + start = ktime_get(); + for (;;) { + ret = wfx_sram_reg_read(wdev, WFX_DCA_NCP_STATUS, ®); + if (ret < 0) + return -EIO; + now = ktime_get(); + if (reg == status) + break; + if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT))) + return -ETIMEDOUT; + } + if (ktime_compare(now, start)) + dev_dbg(wdev->dev, "chip answer after %lldus\n", ktime_us_delta(now, start)); + else + dev_dbg(wdev->dev, "chip answer immediately\n"); + return 0; +} + +static int upload_firmware(struct wfx_dev *wdev, const u8 *data, size_t len) +{ + int ret; + u32 offs, bytes_done = 0; + ktime_t now, start; + + if (len % DNLD_BLOCK_SIZE) { + dev_err(wdev->dev, "firmware size is not aligned. Buffer overrun will occur\n"); + return -EIO; + } + offs = 0; + while (offs < len) { + start = ktime_get(); + for (;;) { + now = ktime_get(); + if (offs + DNLD_BLOCK_SIZE - bytes_done < DNLD_FIFO_SIZE) + break; + if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT))) + return -ETIMEDOUT; + ret = wfx_sram_reg_read(wdev, WFX_DCA_GET, &bytes_done); + if (ret < 0) + return ret; + } + if (ktime_compare(now, start)) + dev_dbg(wdev->dev, "answer after %lldus\n", ktime_us_delta(now, start)); + + ret = wfx_sram_write_dma_safe(wdev, WFX_DNLD_FIFO + (offs % DNLD_FIFO_SIZE), + data + offs, DNLD_BLOCK_SIZE); + if (ret < 0) + return ret; + + /* The device seems to not support writing 0 in this register during first loop */ + offs += DNLD_BLOCK_SIZE; + ret = wfx_sram_reg_write(wdev, WFX_DCA_PUT, offs); + if (ret < 0) + return ret; + } + return 0; +} + +static void print_boot_status(struct wfx_dev *wdev) +{ + u32 reg; + + wfx_sram_reg_read(wdev, WFX_STATUS_INFO, ®); + if (reg == 0x12345678) + return; + wfx_sram_reg_read(wdev, WFX_ERR_INFO, ®); + if (reg < ARRAY_SIZE(fwio_errors) && fwio_errors[reg]) + dev_info(wdev->dev, "secure boot: %s\n", fwio_errors[reg]); + else + dev_info(wdev->dev, "secure boot: Error %#02x\n", reg); +} + +static int load_firmware_secure(struct wfx_dev *wdev) +{ + const struct firmware *fw = NULL; + int header_size; + int fw_offset; + ktime_t start; + u8 *buf; + int ret; + + BUILD_BUG_ON(PTE_INFO_SIZE > BOOTLOADER_LABEL_SIZE); + buf = kmalloc(BOOTLOADER_LABEL_SIZE + 1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_READY); + ret = wait_ncp_status(wdev, NCP_INFO_READY); + if (ret) + goto error; + + wfx_sram_buf_read(wdev, WFX_BOOTLOADER_LABEL, buf, BOOTLOADER_LABEL_SIZE); + buf[BOOTLOADER_LABEL_SIZE] = 0; + dev_dbg(wdev->dev, "bootloader: \"%s\"\n", buf); + + wfx_sram_buf_read(wdev, WFX_PTE_INFO, buf, PTE_INFO_SIZE); + ret = get_firmware(wdev, buf[PTE_INFO_KEYSET_IDX], &fw, &fw_offset); + if (ret) + goto error; + header_size = fw_offset + FW_SIGNATURE_SIZE + FW_HASH_SIZE; + + wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_INFO_READ); + ret = wait_ncp_status(wdev, NCP_READY); + if (ret) + goto error; + + wfx_sram_reg_write(wdev, WFX_DNLD_FIFO, 0xFFFFFFFF); /* Fifo init */ + wfx_sram_write_dma_safe(wdev, WFX_DCA_FW_VERSION, "\x01\x00\x00\x00", FW_VERSION_SIZE); + wfx_sram_write_dma_safe(wdev, WFX_DCA_FW_SIGNATURE, fw->data + fw_offset, + FW_SIGNATURE_SIZE); + wfx_sram_write_dma_safe(wdev, WFX_DCA_FW_HASH, fw->data + fw_offset + FW_SIGNATURE_SIZE, + FW_HASH_SIZE); + wfx_sram_reg_write(wdev, WFX_DCA_IMAGE_SIZE, fw->size - header_size); + wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_PENDING); + ret = wait_ncp_status(wdev, NCP_DOWNLOAD_PENDING); + if (ret) + goto error; + + start = ktime_get(); + ret = upload_firmware(wdev, fw->data + header_size, fw->size - header_size); + if (ret) + goto error; + dev_dbg(wdev->dev, "firmware load after %lldus\n", + ktime_us_delta(ktime_get(), start)); + + wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_COMPLETE); + ret = wait_ncp_status(wdev, NCP_AUTH_OK); + /* Legacy ROM support */ + if (ret < 0) + ret = wait_ncp_status(wdev, NCP_PUB_KEY_RDY); + if (ret < 0) + goto error; + wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_OK_TO_JUMP); + +error: + kfree(buf); + release_firmware(fw); + if (ret) + print_boot_status(wdev); + return ret; +} + +static int init_gpr(struct wfx_dev *wdev) +{ + int ret, i; + static const struct { + int index; + u32 value; + } gpr_init[] = { + { 0x07, 0x208775 }, + { 0x08, 0x2EC020 }, + { 0x09, 0x3C3C3C }, + { 0x0B, 0x322C44 }, + { 0x0C, 0xA06497 }, + }; + + for (i = 0; i < ARRAY_SIZE(gpr_init); i++) { + ret = wfx_igpr_reg_write(wdev, gpr_init[i].index, gpr_init[i].value); + if (ret < 0) + return ret; + dev_dbg(wdev->dev, " index %02x: %08x\n", gpr_init[i].index, gpr_init[i].value); + } + return 0; +} + +int wfx_init_device(struct wfx_dev *wdev) +{ + int ret; + int hw_revision, hw_type; + int wakeup_timeout = 50; /* ms */ + ktime_t now, start; + u32 reg; + + reg = CFG_DIRECT_ACCESS_MODE | CFG_CPU_RESET | CFG_BYTE_ORDER_ABCD; + if (wdev->pdata.use_rising_clk) + reg |= CFG_CLK_RISE_EDGE; + ret = wfx_config_reg_write(wdev, reg); + if (ret < 0) { + dev_err(wdev->dev, "bus returned an error during first write access. Host configuration error?\n"); + return -EIO; + } + + ret = wfx_config_reg_read(wdev, ®); + if (ret < 0) { + dev_err(wdev->dev, "bus returned an error during first read access. Bus configuration error?\n"); + return -EIO; + } + if (reg == 0 || reg == ~0) { + dev_err(wdev->dev, "chip mute. Bus configuration error or chip wasn't reset?\n"); + return -EIO; + } + dev_dbg(wdev->dev, "initial config register value: %08x\n", reg); + + hw_revision = FIELD_GET(CFG_DEVICE_ID_MAJOR, reg); + if (hw_revision == 0) { + dev_err(wdev->dev, "bad hardware revision number: %d\n", hw_revision); + return -ENODEV; + } + hw_type = FIELD_GET(CFG_DEVICE_ID_TYPE, reg); + if (hw_type == 1) { + dev_notice(wdev->dev, "development hardware detected\n"); + wakeup_timeout = 2000; + } + + ret = init_gpr(wdev); + if (ret < 0) + return ret; + + ret = wfx_control_reg_write(wdev, CTRL_WLAN_WAKEUP); + if (ret < 0) + return -EIO; + start = ktime_get(); + for (;;) { + ret = wfx_control_reg_read(wdev, ®); + now = ktime_get(); + if (reg & CTRL_WLAN_READY) + break; + if (ktime_after(now, ktime_add_ms(start, wakeup_timeout))) { + dev_err(wdev->dev, "chip didn't wake up. Chip wasn't reset?\n"); + return -ETIMEDOUT; + } + } + dev_dbg(wdev->dev, "chip wake up after %lldus\n", ktime_us_delta(now, start)); + + ret = wfx_config_reg_write_bits(wdev, CFG_CPU_RESET, 0); + if (ret < 0) + return ret; + ret = load_firmware_secure(wdev); + if (ret < 0) + return ret; + return wfx_config_reg_write_bits(wdev, + CFG_DIRECT_ACCESS_MODE | + CFG_IRQ_ENABLE_DATA | + CFG_IRQ_ENABLE_WRDY, + CFG_IRQ_ENABLE_DATA); +} diff --git a/drivers/net/wireless/silabs/wfx/fwio.h b/drivers/net/wireless/silabs/wfx/fwio.h new file mode 100644 index 0000000000..eeea61210e --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/fwio.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Firmware loading. + * + * Copyright (c) 2017-2019, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_FWIO_H +#define WFX_FWIO_H + +struct wfx_dev; + +int wfx_init_device(struct wfx_dev *wdev); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/hif_api_cmd.h b/drivers/net/wireless/silabs/wfx/hif_api_cmd.h new file mode 100644 index 0000000000..8b91b1d4a4 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_api_cmd.h @@ -0,0 +1,553 @@ +/* SPDX-License-Identifier: GPL-2.0-only or Apache-2.0 */ +/* + * WF200 hardware interface definitions + * + * Copyright (c) 2018-2020, Silicon Laboratories Inc. + */ + +#ifndef WFX_HIF_API_CMD_H +#define WFX_HIF_API_CMD_H + +#include "hif_api_general.h" + +enum wfx_hif_requests_ids { + HIF_REQ_ID_RESET = 0x0a, + HIF_REQ_ID_READ_MIB = 0x05, + HIF_REQ_ID_WRITE_MIB = 0x06, + HIF_REQ_ID_START_SCAN = 0x07, + HIF_REQ_ID_STOP_SCAN = 0x08, + HIF_REQ_ID_TX = 0x04, + HIF_REQ_ID_JOIN = 0x0b, + HIF_REQ_ID_SET_PM_MODE = 0x10, + HIF_REQ_ID_SET_BSS_PARAMS = 0x11, + HIF_REQ_ID_ADD_KEY = 0x0c, + HIF_REQ_ID_REMOVE_KEY = 0x0d, + HIF_REQ_ID_EDCA_QUEUE_PARAMS = 0x13, + HIF_REQ_ID_START = 0x17, + HIF_REQ_ID_BEACON_TRANSMIT = 0x18, + HIF_REQ_ID_UPDATE_IE = 0x1b, + HIF_REQ_ID_MAP_LINK = 0x1c, +}; + +enum wfx_hif_confirmations_ids { + HIF_CNF_ID_RESET = 0x0a, + HIF_CNF_ID_READ_MIB = 0x05, + HIF_CNF_ID_WRITE_MIB = 0x06, + HIF_CNF_ID_START_SCAN = 0x07, + HIF_CNF_ID_STOP_SCAN = 0x08, + HIF_CNF_ID_TX = 0x04, + HIF_CNF_ID_MULTI_TRANSMIT = 0x1e, + HIF_CNF_ID_JOIN = 0x0b, + HIF_CNF_ID_SET_PM_MODE = 0x10, + HIF_CNF_ID_SET_BSS_PARAMS = 0x11, + HIF_CNF_ID_ADD_KEY = 0x0c, + HIF_CNF_ID_REMOVE_KEY = 0x0d, + HIF_CNF_ID_EDCA_QUEUE_PARAMS = 0x13, + HIF_CNF_ID_START = 0x17, + HIF_CNF_ID_BEACON_TRANSMIT = 0x18, + HIF_CNF_ID_UPDATE_IE = 0x1b, + HIF_CNF_ID_MAP_LINK = 0x1c, +}; + +enum wfx_hif_indications_ids { + HIF_IND_ID_RX = 0x84, + HIF_IND_ID_SCAN_CMPL = 0x86, + HIF_IND_ID_JOIN_COMPLETE = 0x8f, + HIF_IND_ID_SET_PM_MODE_CMPL = 0x89, + HIF_IND_ID_SUSPEND_RESUME_TX = 0x8c, + HIF_IND_ID_EVENT = 0x85 +}; + +struct wfx_hif_req_reset { + u8 reset_stat:1; + u8 reset_all_int:1; + u8 reserved1:6; + u8 reserved2[3]; +} __packed; + +struct wfx_hif_cnf_reset { + __le32 status; +} __packed; + +struct wfx_hif_req_read_mib { + __le16 mib_id; + __le16 reserved; +} __packed; + +struct wfx_hif_cnf_read_mib { + __le32 status; + __le16 mib_id; + __le16 length; + u8 mib_data[]; +} __packed; + +struct wfx_hif_req_write_mib { + __le16 mib_id; + __le16 length; + u8 mib_data[]; +} __packed; + +struct wfx_hif_cnf_write_mib { + __le32 status; +} __packed; + +struct wfx_hif_req_update_ie { + u8 beacon:1; + u8 probe_resp:1; + u8 probe_req:1; + u8 reserved1:5; + u8 reserved2; + __le16 num_ies; + u8 ie[]; +} __packed; + +struct wfx_hif_cnf_update_ie { + __le32 status; +} __packed; + +struct wfx_hif_ssid_def { + __le32 ssid_length; + u8 ssid[IEEE80211_MAX_SSID_LEN]; +} __packed; + +#define HIF_API_MAX_NB_SSIDS 2 +#define HIF_API_MAX_NB_CHANNELS 14 + +struct wfx_hif_req_start_scan_alt { + u8 band; + u8 maintain_current_bss:1; + u8 periodic:1; + u8 reserved1:6; + u8 disallow_ps:1; + u8 reserved2:1; + u8 short_preamble:1; + u8 reserved3:5; + u8 max_transmit_rate; + __le16 periodic_interval; + u8 reserved4; + s8 periodic_rssi_thr; + u8 num_of_probe_requests; + u8 probe_delay; + u8 num_of_ssids; + u8 num_of_channels; + __le32 min_channel_time; + __le32 max_channel_time; + __le32 tx_power_level; /* signed value */ + struct wfx_hif_ssid_def ssid_def[HIF_API_MAX_NB_SSIDS]; + u8 channel_list[]; +} __packed; + +struct wfx_hif_cnf_start_scan { + __le32 status; +} __packed; + +struct wfx_hif_cnf_stop_scan { + __le32 status; +} __packed; + +enum wfx_hif_pm_mode_status { + HIF_PM_MODE_ACTIVE = 0x0, + HIF_PM_MODE_PS = 0x1, + HIF_PM_MODE_UNDETERMINED = 0x2 +}; + +struct wfx_hif_ind_scan_cmpl { + __le32 status; + u8 pm_mode; + u8 num_channels_completed; + __le16 reserved; +} __packed; + +enum wfx_hif_queue_id { + HIF_QUEUE_ID_BACKGROUND = 0x0, + HIF_QUEUE_ID_BESTEFFORT = 0x1, + HIF_QUEUE_ID_VIDEO = 0x2, + HIF_QUEUE_ID_VOICE = 0x3 +}; + +enum wfx_hif_frame_format { + HIF_FRAME_FORMAT_NON_HT = 0x0, + HIF_FRAME_FORMAT_MIXED_FORMAT_HT = 0x1, + HIF_FRAME_FORMAT_GF_HT_11N = 0x2 +}; + +struct wfx_hif_req_tx { + /* packet_id is not interpreted by the device, so it is not necessary to declare it little + * endian + */ + u32 packet_id; + u8 max_tx_rate; + u8 queue_id:2; + u8 peer_sta_id:4; + u8 reserved1:2; + u8 more:1; + u8 fc_offset:3; + u8 after_dtim:1; + u8 reserved2:3; + u8 start_exp:1; + u8 reserved3:3; + u8 retry_policy_index:4; + __le32 reserved4; + __le32 expire_time; + u8 frame_format:4; + u8 fec_coding:1; + u8 short_gi:1; + u8 reserved5:1; + u8 stbc:1; + u8 reserved6; + u8 aggregation:1; + u8 reserved7:7; + u8 reserved8; + u8 frame[]; +} __packed; + +enum wfx_hif_qos_ackplcy { + HIF_QOS_ACKPLCY_NORMAL = 0x0, + HIF_QOS_ACKPLCY_TXNOACK = 0x1, + HIF_QOS_ACKPLCY_NOEXPACK = 0x2, + HIF_QOS_ACKPLCY_BLCKACK = 0x3 +}; + +struct wfx_hif_cnf_tx { + __le32 status; + /* packet_id is copied from struct wfx_hif_req_tx without been interpreted by the device, so + * it is not necessary to declare it little endian + */ + u32 packet_id; + u8 txed_rate; + u8 ack_failures; + u8 aggr:1; + u8 requeue:1; + u8 ack_policy:2; + u8 txop_limit:1; + u8 reserved1:3; + u8 reserved2; + __le32 media_delay; + __le32 tx_queue_delay; +} __packed; + +struct wfx_hif_cnf_multi_transmit { + u8 num_tx_confs; + u8 reserved[3]; + struct wfx_hif_cnf_tx tx_conf_payload[]; +} __packed; + +enum wfx_hif_ri_flags_encrypt { + HIF_RI_FLAGS_UNENCRYPTED = 0x0, + HIF_RI_FLAGS_WEP_ENCRYPTED = 0x1, + HIF_RI_FLAGS_TKIP_ENCRYPTED = 0x2, + HIF_RI_FLAGS_AES_ENCRYPTED = 0x3, + HIF_RI_FLAGS_WAPI_ENCRYPTED = 0x4 +}; + +struct wfx_hif_ind_rx { + __le32 status; + u8 channel_number; + u8 reserved1; + u8 rxed_rate; + u8 rcpi_rssi; + u8 encryp:3; + u8 in_aggr:1; + u8 first_aggr:1; + u8 last_aggr:1; + u8 defrag:1; + u8 beacon:1; + u8 tim:1; + u8 bitmap:1; + u8 match_ssid:1; + u8 match_bssid:1; + u8 more:1; + u8 reserved2:1; + u8 ht:1; + u8 stbc:1; + u8 match_uc_addr:1; + u8 match_mc_addr:1; + u8 match_bc_addr:1; + u8 key_type:1; + u8 key_index:4; + u8 reserved3:1; + u8 peer_sta_id:4; + u8 reserved4:2; + u8 reserved5:1; + u8 frame[]; +} __packed; + +struct wfx_hif_req_edca_queue_params { + u8 queue_id; + u8 reserved1; + u8 aifsn; + u8 reserved2; + __le16 cw_min; + __le16 cw_max; + __le16 tx_op_limit; + __le16 allowed_medium_time; + __le32 reserved3; +} __packed; + +struct wfx_hif_cnf_edca_queue_params { + __le32 status; +} __packed; + +struct wfx_hif_req_join { + u8 infrastructure_bss_mode:1; + u8 reserved1:7; + u8 band; + u8 channel_number; + u8 reserved2; + u8 bssid[ETH_ALEN]; + __le16 atim_window; + u8 short_preamble:1; + u8 reserved3:7; + u8 probe_for_join; + u8 reserved4; + u8 reserved5:2; + u8 force_no_beacon:1; + u8 force_with_ind:1; + u8 reserved6:4; + __le32 ssid_length; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + __le32 beacon_interval; + __le32 basic_rate_set; +} __packed; + +struct wfx_hif_cnf_join { + __le32 status; +} __packed; + +struct wfx_hif_ind_join_complete { + __le32 status; +} __packed; + +struct wfx_hif_req_set_bss_params { + u8 lost_count_only:1; + u8 reserved:7; + u8 beacon_lost_count; + __le16 aid; + __le32 operational_rate_set; +} __packed; + +struct wfx_hif_cnf_set_bss_params { + __le32 status; +} __packed; + +struct wfx_hif_req_set_pm_mode { + u8 enter_psm:1; + u8 reserved:6; + u8 fast_psm:1; + u8 fast_psm_idle_period; + u8 ap_psm_change_period; + u8 min_auto_ps_poll_period; +} __packed; + +struct wfx_hif_cnf_set_pm_mode { + __le32 status; +} __packed; + +struct wfx_hif_ind_set_pm_mode_cmpl { + __le32 status; + u8 pm_mode; + u8 reserved[3]; +} __packed; + +struct wfx_hif_req_start { + u8 mode; + u8 band; + u8 channel_number; + u8 reserved1; + __le32 reserved2; + __le32 beacon_interval; + u8 dtim_period; + u8 short_preamble:1; + u8 reserved3:7; + u8 reserved4; + u8 ssid_length; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + __le32 basic_rate_set; +} __packed; + +struct wfx_hif_cnf_start { + __le32 status; +} __packed; + +struct wfx_hif_req_beacon_transmit { + u8 enable_beaconing; + u8 reserved[3]; +} __packed; + +struct wfx_hif_cnf_beacon_transmit { + __le32 status; +} __packed; + +#define HIF_LINK_ID_MAX 14 +#define HIF_LINK_ID_NOT_ASSOCIATED (HIF_LINK_ID_MAX + 1) + +struct wfx_hif_req_map_link { + u8 mac_addr[ETH_ALEN]; + u8 unmap:1; + u8 mfpc:1; + u8 reserved:6; + u8 peer_sta_id; +} __packed; + +struct wfx_hif_cnf_map_link { + __le32 status; +} __packed; + +struct wfx_hif_ind_suspend_resume_tx { + u8 resume:1; + u8 reserved1:2; + u8 bc_mc_only:1; + u8 reserved2:4; + u8 reserved3; + __le16 peer_sta_set; +} __packed; + + +#define MAX_KEY_ENTRIES 24 +#define HIF_API_WEP_KEY_DATA_SIZE 16 +#define HIF_API_TKIP_KEY_DATA_SIZE 16 +#define HIF_API_RX_MIC_KEY_SIZE 8 +#define HIF_API_TX_MIC_KEY_SIZE 8 +#define HIF_API_AES_KEY_DATA_SIZE 16 +#define HIF_API_WAPI_KEY_DATA_SIZE 16 +#define HIF_API_MIC_KEY_DATA_SIZE 16 +#define HIF_API_IGTK_KEY_DATA_SIZE 16 +#define HIF_API_RX_SEQUENCE_COUNTER_SIZE 8 +#define HIF_API_IPN_SIZE 8 + +enum wfx_hif_key_type { + HIF_KEY_TYPE_WEP_DEFAULT = 0x0, + HIF_KEY_TYPE_WEP_PAIRWISE = 0x1, + HIF_KEY_TYPE_TKIP_GROUP = 0x2, + HIF_KEY_TYPE_TKIP_PAIRWISE = 0x3, + HIF_KEY_TYPE_AES_GROUP = 0x4, + HIF_KEY_TYPE_AES_PAIRWISE = 0x5, + HIF_KEY_TYPE_WAPI_GROUP = 0x6, + HIF_KEY_TYPE_WAPI_PAIRWISE = 0x7, + HIF_KEY_TYPE_IGTK_GROUP = 0x8, + HIF_KEY_TYPE_NONE = 0x9 +}; + +struct wfx_hif_wep_pairwise_key { + u8 peer_address[ETH_ALEN]; + u8 reserved; + u8 key_length; + u8 key_data[HIF_API_WEP_KEY_DATA_SIZE]; +} __packed; + +struct wfx_hif_wep_group_key { + u8 key_id; + u8 key_length; + u8 reserved[2]; + u8 key_data[HIF_API_WEP_KEY_DATA_SIZE]; +} __packed; + +struct wfx_hif_tkip_pairwise_key { + u8 peer_address[ETH_ALEN]; + u8 reserved[2]; + u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE]; + u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE]; + u8 tx_mic_key[HIF_API_TX_MIC_KEY_SIZE]; +} __packed; + +struct wfx_hif_tkip_group_key { + u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE]; + u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE]; + u8 key_id; + u8 reserved[3]; + u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE]; +} __packed; + +struct wfx_hif_aes_pairwise_key { + u8 peer_address[ETH_ALEN]; + u8 reserved[2]; + u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE]; +} __packed; + +struct wfx_hif_aes_group_key { + u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE]; + u8 key_id; + u8 reserved[3]; + u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE]; +} __packed; + +struct wfx_hif_wapi_pairwise_key { + u8 peer_address[ETH_ALEN]; + u8 key_id; + u8 reserved; + u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE]; + u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE]; +} __packed; + +struct wfx_hif_wapi_group_key { + u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE]; + u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE]; + u8 key_id; + u8 reserved[3]; +} __packed; + +struct wfx_hif_igtk_group_key { + u8 igtk_key_data[HIF_API_IGTK_KEY_DATA_SIZE]; + u8 key_id; + u8 reserved[3]; + u8 ipn[HIF_API_IPN_SIZE]; +} __packed; + +struct wfx_hif_req_add_key { + u8 type; + u8 entry_index; + u8 int_id:2; + u8 reserved1:6; + u8 reserved2; + union { + struct wfx_hif_wep_pairwise_key wep_pairwise_key; + struct wfx_hif_wep_group_key wep_group_key; + struct wfx_hif_tkip_pairwise_key tkip_pairwise_key; + struct wfx_hif_tkip_group_key tkip_group_key; + struct wfx_hif_aes_pairwise_key aes_pairwise_key; + struct wfx_hif_aes_group_key aes_group_key; + struct wfx_hif_wapi_pairwise_key wapi_pairwise_key; + struct wfx_hif_wapi_group_key wapi_group_key; + struct wfx_hif_igtk_group_key igtk_group_key; + } key; +} __packed; + +struct wfx_hif_cnf_add_key { + __le32 status; +} __packed; + +struct wfx_hif_req_remove_key { + u8 entry_index; + u8 reserved[3]; +} __packed; + +struct wfx_hif_cnf_remove_key { + __le32 status; +} __packed; + +enum wfx_hif_event_ind { + HIF_EVENT_IND_BSSLOST = 0x1, + HIF_EVENT_IND_BSSREGAINED = 0x2, + HIF_EVENT_IND_RCPI_RSSI = 0x3, + HIF_EVENT_IND_PS_MODE_ERROR = 0x4, + HIF_EVENT_IND_INACTIVITY = 0x5 +}; + +enum wfx_hif_ps_mode_error { + HIF_PS_ERROR_NO_ERROR = 0, + HIF_PS_ERROR_AP_NOT_RESP_TO_POLL = 1, + HIF_PS_ERROR_AP_NOT_RESP_TO_UAPSD_TRIGGER = 2, + HIF_PS_ERROR_AP_SENT_UNICAST_IN_DOZE = 3, + HIF_PS_ERROR_AP_NO_DATA_AFTER_TIM = 4 +}; + +struct wfx_hif_ind_event { + __le32 event_id; + union { + u8 rcpi_rssi; + __le32 ps_mode_error; + __le32 peer_sta_set; + } event_data; +} __packed; + +#endif diff --git a/drivers/net/wireless/silabs/wfx/hif_api_general.h b/drivers/net/wireless/silabs/wfx/hif_api_general.h new file mode 100644 index 0000000000..4d400fdc22 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_api_general.h @@ -0,0 +1,252 @@ +/* SPDX-License-Identifier: GPL-2.0-only or Apache-2.0 */ +/* + * WF200 hardware interface definitions + * + * Copyright (c) 2018-2020, Silicon Laboratories Inc. + */ + +#ifndef WFX_HIF_API_GENERAL_H +#define WFX_HIF_API_GENERAL_H + +#include +#include + +#define HIF_ID_IS_INDICATION 0x80 +#define HIF_COUNTER_MAX 7 + +struct wfx_hif_msg { + __le16 len; + u8 id; + u8 reserved:1; + u8 interface:2; + u8 seqnum:3; + u8 encrypted:2; + u8 body[]; +} __packed; + +enum wfx_hif_general_requests_ids { + HIF_REQ_ID_CONFIGURATION = 0x09, + HIF_REQ_ID_CONTROL_GPIO = 0x26, + HIF_REQ_ID_SET_SL_MAC_KEY = 0x27, + HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS = 0x28, + HIF_REQ_ID_SL_CONFIGURE = 0x29, + HIF_REQ_ID_PREVENT_ROLLBACK = 0x2a, + HIF_REQ_ID_PTA_SETTINGS = 0x2b, + HIF_REQ_ID_PTA_PRIORITY = 0x2c, + HIF_REQ_ID_PTA_STATE = 0x2d, + HIF_REQ_ID_SHUT_DOWN = 0x32, +}; + +enum wfx_hif_general_confirmations_ids { + HIF_CNF_ID_CONFIGURATION = 0x09, + HIF_CNF_ID_CONTROL_GPIO = 0x26, + HIF_CNF_ID_SET_SL_MAC_KEY = 0x27, + HIF_CNF_ID_SL_EXCHANGE_PUB_KEYS = 0x28, + HIF_CNF_ID_SL_CONFIGURE = 0x29, + HIF_CNF_ID_PREVENT_ROLLBACK = 0x2a, + HIF_CNF_ID_PTA_SETTINGS = 0x2b, + HIF_CNF_ID_PTA_PRIORITY = 0x2c, + HIF_CNF_ID_PTA_STATE = 0x2d, + HIF_CNF_ID_SHUT_DOWN = 0x32, +}; + +enum wfx_hif_general_indications_ids { + HIF_IND_ID_EXCEPTION = 0xe0, + HIF_IND_ID_STARTUP = 0xe1, + HIF_IND_ID_WAKEUP = 0xe2, + HIF_IND_ID_GENERIC = 0xe3, + HIF_IND_ID_ERROR = 0xe4, + HIF_IND_ID_SL_EXCHANGE_PUB_KEYS = 0xe5 +}; + +#define HIF_STATUS_SUCCESS (cpu_to_le32(0x0000)) +#define HIF_STATUS_FAIL (cpu_to_le32(0x0001)) +#define HIF_STATUS_INVALID_PARAMETER (cpu_to_le32(0x0002)) +#define HIF_STATUS_WARNING (cpu_to_le32(0x0003)) +#define HIF_STATUS_UNKNOWN_REQUEST (cpu_to_le32(0x0004)) +#define HIF_STATUS_RX_FAIL_DECRYPT (cpu_to_le32(0x0010)) +#define HIF_STATUS_RX_FAIL_MIC (cpu_to_le32(0x0011)) +#define HIF_STATUS_RX_FAIL_NO_KEY (cpu_to_le32(0x0012)) +#define HIF_STATUS_TX_FAIL_RETRIES (cpu_to_le32(0x0013)) +#define HIF_STATUS_TX_FAIL_TIMEOUT (cpu_to_le32(0x0014)) +#define HIF_STATUS_TX_FAIL_REQUEUE (cpu_to_le32(0x0015)) +#define HIF_STATUS_REFUSED (cpu_to_le32(0x0016)) +#define HIF_STATUS_BUSY (cpu_to_le32(0x0017)) +#define HIF_STATUS_SLK_SET_KEY_SUCCESS (cpu_to_le32(0x005A)) +#define HIF_STATUS_SLK_SET_KEY_ALREADY_BURNED (cpu_to_le32(0x006B)) +#define HIF_STATUS_SLK_SET_KEY_DISALLOWED_MODE (cpu_to_le32(0x007C)) +#define HIF_STATUS_SLK_SET_KEY_UNKNOWN_MODE (cpu_to_le32(0x008D)) +#define HIF_STATUS_SLK_NEGO_SUCCESS (cpu_to_le32(0x009E)) +#define HIF_STATUS_SLK_NEGO_FAILED (cpu_to_le32(0x00AF)) +#define HIF_STATUS_ROLLBACK_SUCCESS (cpu_to_le32(0x1234)) +#define HIF_STATUS_ROLLBACK_FAIL (cpu_to_le32(0x1256)) + +enum wfx_hif_api_rate_index { + API_RATE_INDEX_B_1MBPS = 0, + API_RATE_INDEX_B_2MBPS = 1, + API_RATE_INDEX_B_5P5MBPS = 2, + API_RATE_INDEX_B_11MBPS = 3, + API_RATE_INDEX_PBCC_22MBPS = 4, + API_RATE_INDEX_PBCC_33MBPS = 5, + API_RATE_INDEX_G_6MBPS = 6, + API_RATE_INDEX_G_9MBPS = 7, + API_RATE_INDEX_G_12MBPS = 8, + API_RATE_INDEX_G_18MBPS = 9, + API_RATE_INDEX_G_24MBPS = 10, + API_RATE_INDEX_G_36MBPS = 11, + API_RATE_INDEX_G_48MBPS = 12, + API_RATE_INDEX_G_54MBPS = 13, + API_RATE_INDEX_N_6P5MBPS = 14, + API_RATE_INDEX_N_13MBPS = 15, + API_RATE_INDEX_N_19P5MBPS = 16, + API_RATE_INDEX_N_26MBPS = 17, + API_RATE_INDEX_N_39MBPS = 18, + API_RATE_INDEX_N_52MBPS = 19, + API_RATE_INDEX_N_58P5MBPS = 20, + API_RATE_INDEX_N_65MBPS = 21, + API_RATE_NUM_ENTRIES = 22 +}; + +struct wfx_hif_ind_startup { + __le32 status; + __le16 hardware_id; + u8 opn[14]; + u8 uid[8]; + __le16 num_inp_ch_bufs; + __le16 size_inp_ch_buf; + u8 num_links_ap; + u8 num_interfaces; + u8 mac_addr[2][ETH_ALEN]; + u8 api_version_minor; + u8 api_version_major; + u8 link_mode:2; + u8 reserved1:6; + u8 reserved2; + u8 reserved3; + u8 reserved4; + u8 firmware_build; + u8 firmware_minor; + u8 firmware_major; + u8 firmware_type; + u8 disabled_channel_list[2]; + u8 region_sel_mode:4; + u8 reserved5:4; + u8 phy1_region:3; + u8 phy0_region:3; + u8 otp_phy_ver:2; + __le32 supported_rate_mask; + u8 firmware_label[128]; +} __packed; + +struct wfx_hif_ind_wakeup { +} __packed; + +struct wfx_hif_req_configuration { + __le16 length; + u8 pds_data[]; +} __packed; + +struct wfx_hif_cnf_configuration { + __le32 status; +} __packed; + +enum wfx_hif_gpio_mode { + HIF_GPIO_MODE_D0 = 0x0, + HIF_GPIO_MODE_D1 = 0x1, + HIF_GPIO_MODE_OD0 = 0x2, + HIF_GPIO_MODE_OD1 = 0x3, + HIF_GPIO_MODE_TRISTATE = 0x4, + HIF_GPIO_MODE_TOGGLE = 0x5, + HIF_GPIO_MODE_READ = 0x6 +}; + +struct wfx_hif_req_control_gpio { + u8 gpio_label; + u8 gpio_mode; +} __packed; + +struct wfx_hif_cnf_control_gpio { + __le32 status; + __le32 value; +} __packed; + +enum wfx_hif_generic_indication_type { + HIF_GENERIC_INDICATION_TYPE_RAW = 0x0, + HIF_GENERIC_INDICATION_TYPE_STRING = 0x1, + HIF_GENERIC_INDICATION_TYPE_RX_STATS = 0x2, + HIF_GENERIC_INDICATION_TYPE_TX_POWER_LOOP_INFO = 0x3, +}; + +struct wfx_hif_rx_stats { + __le32 nb_rx_frame; + __le32 nb_crc_frame; + __le32 per_total; + __le32 throughput; + __le32 nb_rx_by_rate[API_RATE_NUM_ENTRIES]; + __le16 per[API_RATE_NUM_ENTRIES]; + __le16 snr[API_RATE_NUM_ENTRIES]; /* signed value */ + __le16 rssi[API_RATE_NUM_ENTRIES]; /* signed value */ + __le16 cfo[API_RATE_NUM_ENTRIES]; /* signed value */ + __le32 date; + __le32 pwr_clk_freq; + u8 is_ext_pwr_clk; + s8 current_temp; +} __packed; + +struct wfx_hif_tx_power_loop_info { + __le16 tx_gain_dig; + __le16 tx_gain_pa; + __le16 target_pout; /* signed value */ + __le16 p_estimation; /* signed value */ + __le16 vpdet; + u8 measurement_index; + u8 reserved; +} __packed; + +struct wfx_hif_ind_generic { + __le32 type; + union { + struct wfx_hif_rx_stats rx_stats; + struct wfx_hif_tx_power_loop_info tx_power_loop_info; + } data; +} __packed; + +enum wfx_hif_error { + HIF_ERROR_FIRMWARE_ROLLBACK = 0x00, + HIF_ERROR_FIRMWARE_DEBUG_ENABLED = 0x01, + HIF_ERROR_SLK_OUTDATED_SESSION_KEY = 0x02, + HIF_ERROR_SLK_SESSION_KEY = 0x03, + HIF_ERROR_OOR_VOLTAGE = 0x04, + HIF_ERROR_PDS_PAYLOAD = 0x05, + HIF_ERROR_OOR_TEMPERATURE = 0x06, + HIF_ERROR_SLK_REQ_DURING_KEY_EXCHANGE = 0x07, + HIF_ERROR_SLK_MULTI_TX_UNSUPPORTED = 0x08, + HIF_ERROR_SLK_OVERFLOW = 0x09, + HIF_ERROR_SLK_DECRYPTION = 0x0a, + HIF_ERROR_SLK_WRONG_ENCRYPTION_STATE = 0x0b, + HIF_ERROR_HIF_BUS_FREQUENCY_TOO_LOW = 0x0c, + HIF_ERROR_HIF_RX_DATA_TOO_LARGE = 0x0e, + HIF_ERROR_HIF_TX_QUEUE_FULL = 0x0d, + HIF_ERROR_HIF_BUS = 0x0f, + HIF_ERROR_PDS_TESTFEATURE = 0x10, + HIF_ERROR_SLK_UNCONFIGURED = 0x11, +}; + +struct wfx_hif_ind_error { + __le32 type; + u8 data[]; +} __packed; + +struct wfx_hif_ind_exception { + __le32 type; + u8 data[]; +} __packed; + +enum wfx_hif_secure_link_state { + SEC_LINK_UNAVAILABLE = 0x0, + SEC_LINK_RESERVED = 0x1, + SEC_LINK_EVAL = 0x2, + SEC_LINK_ENFORCED = 0x3 +}; + +#endif diff --git a/drivers/net/wireless/silabs/wfx/hif_api_mib.h b/drivers/net/wireless/silabs/wfx/hif_api_mib.h new file mode 100644 index 0000000000..7b68b83866 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_api_mib.h @@ -0,0 +1,346 @@ +/* SPDX-License-Identifier: GPL-2.0-only or Apache-2.0 */ +/* + * WF200 hardware interface definitions + * + * Copyright (c) 2018-2020, Silicon Laboratories Inc. + */ + +#ifndef WFX_HIF_API_MIB_H +#define WFX_HIF_API_MIB_H + +#include "hif_api_general.h" + +#define HIF_API_IPV4_ADDRESS_SIZE 4 +#define HIF_API_IPV6_ADDRESS_SIZE 16 + +enum wfx_hif_mib_ids { + HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE = 0x2000, + HIF_MIB_ID_GL_BLOCK_ACK_INFO = 0x2001, + HIF_MIB_ID_GL_SET_MULTI_MSG = 0x2002, + HIF_MIB_ID_CCA_CONFIG = 0x2003, + HIF_MIB_ID_ETHERTYPE_DATAFRAME_CONDITION = 0x2010, + HIF_MIB_ID_PORT_DATAFRAME_CONDITION = 0x2011, + HIF_MIB_ID_MAGIC_DATAFRAME_CONDITION = 0x2012, + HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION = 0x2013, + HIF_MIB_ID_IPV4_ADDR_DATAFRAME_CONDITION = 0x2014, + HIF_MIB_ID_IPV6_ADDR_DATAFRAME_CONDITION = 0x2015, + HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION = 0x2016, + HIF_MIB_ID_CONFIG_DATA_FILTER = 0x2017, + HIF_MIB_ID_SET_DATA_FILTERING = 0x2018, + HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE = 0x2019, + HIF_MIB_ID_NS_IP_ADDRESSES_TABLE = 0x201A, + HIF_MIB_ID_RX_FILTER = 0x201B, + HIF_MIB_ID_BEACON_FILTER_TABLE = 0x201C, + HIF_MIB_ID_BEACON_FILTER_ENABLE = 0x201D, + HIF_MIB_ID_GRP_SEQ_COUNTER = 0x2030, + HIF_MIB_ID_TSF_COUNTER = 0x2031, + HIF_MIB_ID_STATISTICS_TABLE = 0x2032, + HIF_MIB_ID_COUNTERS_TABLE = 0x2033, + HIF_MIB_ID_MAX_TX_POWER_LEVEL = 0x2034, + HIF_MIB_ID_EXTENDED_COUNTERS_TABLE = 0x2035, + HIF_MIB_ID_DOT11_MAC_ADDRESS = 0x2040, + HIF_MIB_ID_DOT11_MAX_TRANSMIT_MSDU_LIFETIME = 0x2041, + HIF_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME = 0x2042, + HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID = 0x2043, + HIF_MIB_ID_DOT11_RTS_THRESHOLD = 0x2044, + HIF_MIB_ID_SLOT_TIME = 0x2045, + HIF_MIB_ID_CURRENT_TX_POWER_LEVEL = 0x2046, + HIF_MIB_ID_NON_ERP_PROTECTION = 0x2047, + HIF_MIB_ID_TEMPLATE_FRAME = 0x2048, + HIF_MIB_ID_BEACON_WAKEUP_PERIOD = 0x2049, + HIF_MIB_ID_RCPI_RSSI_THRESHOLD = 0x204A, + HIF_MIB_ID_BLOCK_ACK_POLICY = 0x204B, + HIF_MIB_ID_OVERRIDE_INTERNAL_TX_RATE = 0x204C, + HIF_MIB_ID_SET_ASSOCIATION_MODE = 0x204D, + HIF_MIB_ID_SET_UAPSD_INFORMATION = 0x204E, + HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY = 0x204F, + HIF_MIB_ID_PROTECTED_MGMT_POLICY = 0x2050, + HIF_MIB_ID_SET_HT_PROTECTION = 0x2051, + HIF_MIB_ID_KEEP_ALIVE_PERIOD = 0x2052, + HIF_MIB_ID_ARP_KEEP_ALIVE_PERIOD = 0x2053, + HIF_MIB_ID_INACTIVITY_TIMER = 0x2054, + HIF_MIB_ID_INTERFACE_PROTECTION = 0x2055, + HIF_MIB_ID_BEACON_STATS = 0x2056, +}; + +enum wfx_hif_op_power_mode { + HIF_OP_POWER_MODE_ACTIVE = 0x0, + HIF_OP_POWER_MODE_DOZE = 0x1, + HIF_OP_POWER_MODE_QUIESCENT = 0x2 +}; + +struct wfx_hif_mib_gl_operational_power_mode { + u8 power_mode:4; + u8 reserved1:3; + u8 wup_ind_activation:1; + u8 reserved2[3]; +} __packed; + +struct wfx_hif_mib_gl_set_multi_msg { + u8 enable_multi_tx_conf:1; + u8 reserved1:7; + u8 reserved2[3]; +} __packed; + +enum wfx_hif_arp_ns_frame_treatment { + HIF_ARP_NS_FILTERING_DISABLE = 0x0, + HIF_ARP_NS_FILTERING_ENABLE = 0x1, + HIF_ARP_NS_REPLY_ENABLE = 0x2 +}; + +struct wfx_hif_mib_arp_ip_addr_table { + u8 condition_idx; + u8 arp_enable; + u8 reserved[2]; + u8 ipv4_address[HIF_API_IPV4_ADDRESS_SIZE]; +} __packed; + +struct wfx_hif_mib_rx_filter { + u8 reserved1:1; + u8 bssid_filter:1; + u8 reserved2:1; + u8 fwd_probe_req:1; + u8 keep_alive_filter:1; + u8 reserved3:3; + u8 reserved4[3]; +} __packed; + +struct wfx_hif_ie_table_entry { + u8 ie_id; + u8 has_changed:1; + u8 no_longer:1; + u8 has_appeared:1; + u8 reserved:1; + u8 num_match_data:4; + u8 oui[3]; + u8 match_data[3]; +} __packed; + +struct wfx_hif_mib_bcn_filter_table { + __le32 num_of_info_elmts; + struct wfx_hif_ie_table_entry ie_table[]; +} __packed; + +enum wfx_hif_beacon_filter { + HIF_BEACON_FILTER_DISABLE = 0x0, + HIF_BEACON_FILTER_ENABLE = 0x1, + HIF_BEACON_FILTER_AUTO_ERP = 0x2 +}; + +struct wfx_hif_mib_bcn_filter_enable { + __le32 enable; + __le32 bcn_count; +} __packed; + +struct wfx_hif_mib_extended_count_table { + __le32 count_drop_plcp; + __le32 count_drop_fcs; + __le32 count_tx_frames; + __le32 count_rx_frames; + __le32 count_rx_frames_failed; + __le32 count_drop_decryption; + __le32 count_drop_tkip_mic; + __le32 count_drop_no_key; + __le32 count_tx_frames_multicast; + __le32 count_tx_frames_success; + __le32 count_tx_frames_failed; + __le32 count_tx_frames_retried; + __le32 count_tx_frames_multi_retried; + __le32 count_drop_duplicate; + __le32 count_rts_success; + __le32 count_rts_failed; + __le32 count_ack_failed; + __le32 count_rx_frames_multicast; + __le32 count_rx_frames_success; + __le32 count_drop_cmac_icv; + __le32 count_drop_cmac_replay; + __le32 count_drop_ccmp_replay; + __le32 count_drop_bip_mic; + __le32 count_rx_bcn_success; + __le32 count_rx_bcn_miss; + __le32 count_rx_bcn_dtim; + __le32 count_rx_bcn_dtim_aid0_clr; + __le32 count_rx_bcn_dtim_aid0_set; + __le32 reserved[12]; +} __packed; + +struct wfx_hif_mib_count_table { + __le32 count_drop_plcp; + __le32 count_drop_fcs; + __le32 count_tx_frames; + __le32 count_rx_frames; + __le32 count_rx_frames_failed; + __le32 count_drop_decryption; + __le32 count_drop_tkip_mic; + __le32 count_drop_no_key; + __le32 count_tx_frames_multicast; + __le32 count_tx_frames_success; + __le32 count_tx_frames_failed; + __le32 count_tx_frames_retried; + __le32 count_tx_frames_multi_retried; + __le32 count_drop_duplicate; + __le32 count_rts_success; + __le32 count_rts_failed; + __le32 count_ack_failed; + __le32 count_rx_frames_multicast; + __le32 count_rx_frames_success; + __le32 count_drop_cmac_icv; + __le32 count_drop_cmac_replay; + __le32 count_drop_ccmp_replay; + __le32 count_drop_bip_mic; +} __packed; + +struct wfx_hif_mib_mac_address { + u8 mac_addr[ETH_ALEN]; + __le16 reserved; +} __packed; + +struct wfx_hif_mib_wep_default_key_id { + u8 wep_default_key_id; + u8 reserved[3]; +} __packed; + +struct wfx_hif_mib_dot11_rts_threshold { + __le32 threshold; +} __packed; + +struct wfx_hif_mib_slot_time { + __le32 slot_time; +} __packed; + +struct wfx_hif_mib_current_tx_power_level { + __le32 power_level; /* signed value */ +} __packed; + +struct wfx_hif_mib_non_erp_protection { + u8 use_cts_to_self:1; + u8 reserved1:7; + u8 reserved2[3]; +} __packed; + +enum wfx_hif_tmplt { + HIF_TMPLT_PRBREQ = 0x0, + HIF_TMPLT_BCN = 0x1, + HIF_TMPLT_NULL = 0x2, + HIF_TMPLT_QOSNUL = 0x3, + HIF_TMPLT_PSPOLL = 0x4, + HIF_TMPLT_PRBRES = 0x5, + HIF_TMPLT_ARP = 0x6, + HIF_TMPLT_NA = 0x7 +}; + +#define HIF_API_MAX_TEMPLATE_FRAME_SIZE 700 + +struct wfx_hif_mib_template_frame { + u8 frame_type; + u8 init_rate:7; + u8 mode:1; + __le16 frame_length; + u8 frame[]; +} __packed; + +struct wfx_hif_mib_beacon_wake_up_period { + u8 wakeup_period_min; + u8 receive_dtim:1; + u8 reserved1:7; + u8 wakeup_period_max; + u8 reserved2; +} __packed; + +struct wfx_hif_mib_rcpi_rssi_threshold { + u8 detection:1; + u8 rcpi_rssi:1; + u8 upperthresh:1; + u8 lowerthresh:1; + u8 reserved:4; + u8 lower_threshold; + u8 upper_threshold; + u8 rolling_average_count; +} __packed; + +#define DEFAULT_BA_MAX_RX_BUFFER_SIZE 16 + +struct wfx_hif_mib_block_ack_policy { + u8 block_ack_tx_tid_policy; + u8 reserved1; + u8 block_ack_rx_tid_policy; + u8 block_ack_rx_max_buffer_size; +} __packed; + +enum wfx_hif_mpdu_start_spacing { + HIF_MPDU_START_SPACING_NO_RESTRIC = 0x0, + HIF_MPDU_START_SPACING_QUARTER = 0x1, + HIF_MPDU_START_SPACING_HALF = 0x2, + HIF_MPDU_START_SPACING_ONE = 0x3, + HIF_MPDU_START_SPACING_TWO = 0x4, + HIF_MPDU_START_SPACING_FOUR = 0x5, + HIF_MPDU_START_SPACING_EIGHT = 0x6, + HIF_MPDU_START_SPACING_SIXTEEN = 0x7 +}; + +struct wfx_hif_mib_set_association_mode { + u8 preambtype_use:1; + u8 mode:1; + u8 rateset:1; + u8 spacing:1; + u8 reserved1:4; + u8 short_preamble:1; + u8 reserved2:7; + u8 greenfield:1; + u8 reserved3:7; + u8 mpdu_start_spacing; + __le32 basic_rate_set; +} __packed; + +struct wfx_hif_mib_set_uapsd_information { + u8 trig_bckgrnd:1; + u8 trig_be:1; + u8 trig_video:1; + u8 trig_voice:1; + u8 reserved1:4; + u8 deliv_bckgrnd:1; + u8 deliv_be:1; + u8 deliv_video:1; + u8 deliv_voice:1; + u8 reserved2:4; + __le16 min_auto_trigger_interval; + __le16 max_auto_trigger_interval; + __le16 auto_trigger_step; +} __packed; + +struct wfx_hif_tx_rate_retry_policy { + u8 policy_index; + u8 short_retry_count; + u8 long_retry_count; + u8 first_rate_sel:2; + u8 terminate:1; + u8 count_init:1; + u8 reserved1:4; + u8 rate_recovery_count; + u8 reserved2[3]; + u8 rates[12]; +} __packed; + +#define HIF_TX_RETRY_POLICY_MAX 15 +#define HIF_TX_RETRY_POLICY_INVALID HIF_TX_RETRY_POLICY_MAX + +struct wfx_hif_mib_set_tx_rate_retry_policy { + u8 num_tx_rate_policies; + u8 reserved[3]; + struct wfx_hif_tx_rate_retry_policy tx_rate_retry_policy[]; +} __packed; + +struct wfx_hif_mib_protected_mgmt_policy { + u8 pmf_enable:1; + u8 unpmf_allowed:1; + u8 host_enc_auth_frames:1; + u8 reserved1:5; + u8 reserved2[3]; +} __packed; + +struct wfx_hif_mib_keep_alive_period { + __le16 keep_alive_period; + u8 reserved[2]; +} __packed; + +#endif diff --git a/drivers/net/wireless/silabs/wfx/hif_rx.c b/drivers/net/wireless/silabs/wfx/hif_rx.c new file mode 100644 index 0000000000..64ca8acb8e --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_rx.c @@ -0,0 +1,391 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Handling of the chip-to-host events (aka indications) of the hardware API. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include + +#include "hif_rx.h" +#include "wfx.h" +#include "scan.h" +#include "bh.h" +#include "sta.h" +#include "data_rx.h" +#include "hif_api_cmd.h" + +static int wfx_hif_generic_confirm(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + /* All confirm messages start with status */ + int status = le32_to_cpup((__le32 *)buf); + int cmd = hif->id; + int len = le16_to_cpu(hif->len) - 4; /* drop header */ + + WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error"); + + if (!wdev->hif_cmd.buf_send) { + dev_warn(wdev->dev, "unexpected confirmation: 0x%.2x\n", cmd); + return -EINVAL; + } + + if (cmd != wdev->hif_cmd.buf_send->id) { + dev_warn(wdev->dev, "chip response mismatch request: 0x%.2x vs 0x%.2x\n", + cmd, wdev->hif_cmd.buf_send->id); + return -EINVAL; + } + + if (wdev->hif_cmd.buf_recv) { + if (wdev->hif_cmd.len_recv >= len && len > 0) + memcpy(wdev->hif_cmd.buf_recv, buf, len); + else + status = -EIO; + } + wdev->hif_cmd.ret = status; + + complete(&wdev->hif_cmd.done); + return status; +} + +static int wfx_hif_tx_confirm(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + const struct wfx_hif_cnf_tx *body = buf; + + wfx_tx_confirm_cb(wdev, body); + return 0; +} + +static int wfx_hif_multi_tx_confirm(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + const struct wfx_hif_cnf_multi_transmit *body = buf; + int i; + + WARN(body->num_tx_confs <= 0, "corrupted message"); + for (i = 0; i < body->num_tx_confs; i++) + wfx_tx_confirm_cb(wdev, &body->tx_conf_payload[i]); + return 0; +} + +static int wfx_hif_startup_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + const struct wfx_hif_ind_startup *body = buf; + + if (body->status || body->firmware_type > 4) { + dev_err(wdev->dev, "received invalid startup indication"); + return -EINVAL; + } + memcpy(&wdev->hw_caps, body, sizeof(struct wfx_hif_ind_startup)); + complete(&wdev->firmware_ready); + return 0; +} + +static int wfx_hif_wakeup_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + if (!wdev->pdata.gpio_wakeup || gpiod_get_value(wdev->pdata.gpio_wakeup) == 0) { + dev_warn(wdev->dev, "unexpected wake-up indication\n"); + return -EIO; + } + return 0; +} + +static int wfx_hif_receive_indication(struct wfx_dev *wdev, const struct wfx_hif_msg *hif, + const void *buf, struct sk_buff *skb) +{ + struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface); + const struct wfx_hif_ind_rx *body = buf; + + if (!wvif) { + dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); + return -EIO; + } + skb_pull(skb, sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_ind_rx)); + wfx_rx_cb(wvif, body, skb); + + return 0; +} + +static int wfx_hif_event_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface); + const struct wfx_hif_ind_event *body = buf; + int type = le32_to_cpu(body->event_id); + + if (!wvif) { + dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); + return -EIO; + } + + switch (type) { + case HIF_EVENT_IND_RCPI_RSSI: + wfx_event_report_rssi(wvif, body->event_data.rcpi_rssi); + break; + case HIF_EVENT_IND_BSSLOST: + schedule_delayed_work(&wvif->beacon_loss_work, 0); + break; + case HIF_EVENT_IND_BSSREGAINED: + cancel_delayed_work(&wvif->beacon_loss_work); + dev_dbg(wdev->dev, "ignore BSSREGAINED indication\n"); + break; + case HIF_EVENT_IND_PS_MODE_ERROR: + dev_warn(wdev->dev, "error while processing power save request: %d\n", + le32_to_cpu(body->event_data.ps_mode_error)); + break; + default: + dev_warn(wdev->dev, "unhandled event indication: %.2x\n", type); + break; + } + return 0; +} + +static int wfx_hif_pm_mode_complete_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface); + + if (!wvif) { + dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); + return -EIO; + } + complete(&wvif->set_pm_mode_complete); + + return 0; +} + +static int wfx_hif_scan_complete_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface); + const struct wfx_hif_ind_scan_cmpl *body = buf; + + if (!wvif) { + dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); + return -EIO; + } + + wfx_scan_complete(wvif, body->num_channels_completed); + + return 0; +} + +static int wfx_hif_join_complete_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface); + + if (!wvif) { + dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); + return -EIO; + } + dev_warn(wdev->dev, "unattended JoinCompleteInd\n"); + + return 0; +} + +static int wfx_hif_suspend_resume_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + const struct wfx_hif_ind_suspend_resume_tx *body = buf; + struct wfx_vif *wvif; + + if (body->bc_mc_only) { + wvif = wdev_to_wvif(wdev, hif->interface); + if (!wvif) { + dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); + return -EIO; + } + if (body->resume) + wfx_suspend_resume_mc(wvif, STA_NOTIFY_AWAKE); + else + wfx_suspend_resume_mc(wvif, STA_NOTIFY_SLEEP); + } else { + WARN(body->peer_sta_set, "misunderstood indication"); + WARN(hif->interface != 2, "misunderstood indication"); + if (body->resume) + wfx_suspend_hot_dev(wdev, STA_NOTIFY_AWAKE); + else + wfx_suspend_hot_dev(wdev, STA_NOTIFY_SLEEP); + } + + return 0; +} + +static int wfx_hif_generic_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + const struct wfx_hif_ind_generic *body = buf; + int type = le32_to_cpu(body->type); + + switch (type) { + case HIF_GENERIC_INDICATION_TYPE_RAW: + return 0; + case HIF_GENERIC_INDICATION_TYPE_STRING: + dev_info(wdev->dev, "firmware says: %s\n", (char *)&body->data); + return 0; + case HIF_GENERIC_INDICATION_TYPE_RX_STATS: + mutex_lock(&wdev->rx_stats_lock); + /* Older firmware send a generic indication beside RxStats */ + if (!wfx_api_older_than(wdev, 1, 4)) + dev_info(wdev->dev, "Rx test ongoing. Temperature: %d degrees C\n", + body->data.rx_stats.current_temp); + memcpy(&wdev->rx_stats, &body->data.rx_stats, sizeof(wdev->rx_stats)); + mutex_unlock(&wdev->rx_stats_lock); + return 0; + case HIF_GENERIC_INDICATION_TYPE_TX_POWER_LOOP_INFO: + mutex_lock(&wdev->tx_power_loop_info_lock); + memcpy(&wdev->tx_power_loop_info, &body->data.tx_power_loop_info, + sizeof(wdev->tx_power_loop_info)); + mutex_unlock(&wdev->tx_power_loop_info_lock); + return 0; + default: + dev_err(wdev->dev, "generic_indication: unknown indication type: %#.8x\n", type); + return -EIO; + } +} + +static const struct { + int val; + const char *str; + bool has_param; +} hif_errors[] = { + { HIF_ERROR_FIRMWARE_ROLLBACK, + "rollback status" }, + { HIF_ERROR_FIRMWARE_DEBUG_ENABLED, + "debug feature enabled" }, + { HIF_ERROR_PDS_PAYLOAD, + "PDS version is not supported" }, + { HIF_ERROR_PDS_TESTFEATURE, + "PDS ask for an unknown test mode" }, + { HIF_ERROR_OOR_VOLTAGE, + "out-of-range power supply voltage", true }, + { HIF_ERROR_OOR_TEMPERATURE, + "out-of-range temperature", true }, + { HIF_ERROR_SLK_REQ_DURING_KEY_EXCHANGE, + "secure link does not expect request during key exchange" }, + { HIF_ERROR_SLK_SESSION_KEY, + "secure link session key is invalid" }, + { HIF_ERROR_SLK_OVERFLOW, + "secure link overflow" }, + { HIF_ERROR_SLK_WRONG_ENCRYPTION_STATE, + "secure link messages list does not match message encryption" }, + { HIF_ERROR_SLK_UNCONFIGURED, + "secure link not yet configured" }, + { HIF_ERROR_HIF_BUS_FREQUENCY_TOO_LOW, + "bus clock is too slow (<1kHz)" }, + { HIF_ERROR_HIF_RX_DATA_TOO_LARGE, + "HIF message too large" }, + /* Following errors only exists in old firmware versions: */ + { HIF_ERROR_HIF_TX_QUEUE_FULL, + "HIF messages queue is full" }, + { HIF_ERROR_HIF_BUS, + "HIF bus" }, + { HIF_ERROR_SLK_MULTI_TX_UNSUPPORTED, + "secure link does not support multi-tx confirmations" }, + { HIF_ERROR_SLK_OUTDATED_SESSION_KEY, + "secure link session key is outdated" }, + { HIF_ERROR_SLK_DECRYPTION, + "secure link params (nonce or tag) mismatch" }, +}; + +static int wfx_hif_error_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + const struct wfx_hif_ind_error *body = buf; + int type = le32_to_cpu(body->type); + int param = (s8)body->data[0]; + int i; + + for (i = 0; i < ARRAY_SIZE(hif_errors); i++) + if (type == hif_errors[i].val) + break; + if (i < ARRAY_SIZE(hif_errors)) + if (hif_errors[i].has_param) + dev_err(wdev->dev, "asynchronous error: %s: %d\n", + hif_errors[i].str, param); + else + dev_err(wdev->dev, "asynchronous error: %s\n", hif_errors[i].str); + else + dev_err(wdev->dev, "asynchronous error: unknown: %08x\n", type); + print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, + 16, 1, hif, le16_to_cpu(hif->len), false); + wdev->chip_frozen = true; + + return 0; +}; + +static int wfx_hif_exception_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + const struct wfx_hif_ind_exception *body = buf; + int type = le32_to_cpu(body->type); + + if (type == 4) + dev_err(wdev->dev, "firmware assert %d\n", le32_to_cpup((__le32 *)body->data)); + else + dev_err(wdev->dev, "firmware exception\n"); + print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, + 16, 1, hif, le16_to_cpu(hif->len), false); + wdev->chip_frozen = true; + + return -1; +} + +static const struct { + int msg_id; + int (*handler)(struct wfx_dev *wdev, const struct wfx_hif_msg *hif, const void *buf); +} hif_handlers[] = { + /* Confirmations */ + { HIF_CNF_ID_TX, wfx_hif_tx_confirm }, + { HIF_CNF_ID_MULTI_TRANSMIT, wfx_hif_multi_tx_confirm }, + /* Indications */ + { HIF_IND_ID_STARTUP, wfx_hif_startup_indication }, + { HIF_IND_ID_WAKEUP, wfx_hif_wakeup_indication }, + { HIF_IND_ID_JOIN_COMPLETE, wfx_hif_join_complete_indication }, + { HIF_IND_ID_SET_PM_MODE_CMPL, wfx_hif_pm_mode_complete_indication }, + { HIF_IND_ID_SCAN_CMPL, wfx_hif_scan_complete_indication }, + { HIF_IND_ID_SUSPEND_RESUME_TX, wfx_hif_suspend_resume_indication }, + { HIF_IND_ID_EVENT, wfx_hif_event_indication }, + { HIF_IND_ID_GENERIC, wfx_hif_generic_indication }, + { HIF_IND_ID_ERROR, wfx_hif_error_indication }, + { HIF_IND_ID_EXCEPTION, wfx_hif_exception_indication }, + /* FIXME: allocate skb_p from wfx_hif_receive_indication and make it generic */ + //{ HIF_IND_ID_RX, wfx_hif_receive_indication }, +}; + +void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb) +{ + int i; + const struct wfx_hif_msg *hif = (const struct wfx_hif_msg *)skb->data; + int hif_id = hif->id; + + if (hif_id == HIF_IND_ID_RX) { + /* wfx_hif_receive_indication take care of skb lifetime */ + wfx_hif_receive_indication(wdev, hif, hif->body, skb); + return; + } + /* Note: mutex_is_lock cause an implicit memory barrier that protect buf_send */ + if (mutex_is_locked(&wdev->hif_cmd.lock) && + wdev->hif_cmd.buf_send && wdev->hif_cmd.buf_send->id == hif_id) { + wfx_hif_generic_confirm(wdev, hif, hif->body); + goto free; + } + for (i = 0; i < ARRAY_SIZE(hif_handlers); i++) { + if (hif_handlers[i].msg_id == hif_id) { + if (hif_handlers[i].handler) + hif_handlers[i].handler(wdev, hif, hif->body); + goto free; + } + } + if (hif_id & HIF_ID_IS_INDICATION) + dev_err(wdev->dev, "unsupported HIF indication: ID %02x\n", hif_id); + else + dev_err(wdev->dev, "unexpected HIF confirmation: ID %02x\n", hif_id); +free: + dev_kfree_skb(skb); +} diff --git a/drivers/net/wireless/silabs/wfx/hif_rx.h b/drivers/net/wireless/silabs/wfx/hif_rx.h new file mode 100644 index 0000000000..96543b81fa --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_rx.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Handling of the chip-to-host events (aka indications) of the hardware API. + * + * Copyright (c) 2017-2019, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + * Copyright (C) 2010, ST-Ericsson SA + */ +#ifndef WFX_HIF_RX_H +#define WFX_HIF_RX_H + +struct wfx_dev; +struct sk_buff; + +void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/hif_tx.c b/drivers/net/wireless/silabs/wfx/hif_tx.c new file mode 100644 index 0000000000..9402503fbd --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_tx.c @@ -0,0 +1,494 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Implementation of the host-to-chip commands (aka request/confirmation) of the + * hardware API. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include + +#include "hif_tx.h" +#include "wfx.h" +#include "bh.h" +#include "hwio.h" +#include "debug.h" +#include "sta.h" + +void wfx_init_hif_cmd(struct wfx_hif_cmd *hif_cmd) +{ + init_completion(&hif_cmd->ready); + init_completion(&hif_cmd->done); + mutex_init(&hif_cmd->lock); +} + +static void wfx_fill_header(struct wfx_hif_msg *hif, int if_id, unsigned int cmd, size_t size) +{ + if (if_id == -1) + if_id = 2; + + WARN(cmd > 0x3f, "invalid hardware command %#.2x", cmd); + WARN(size > 0xFFF, "requested buffer is too large: %zu bytes", size); + WARN(if_id > 0x3, "invalid interface ID %d", if_id); + + hif->len = cpu_to_le16(size + 4); + hif->id = cmd; + hif->interface = if_id; +} + +static void *wfx_alloc_hif(size_t body_len, struct wfx_hif_msg **hif) +{ + *hif = kzalloc(sizeof(struct wfx_hif_msg) + body_len, GFP_KERNEL); + if (*hif) + return (*hif)->body; + else + return NULL; +} + +int wfx_cmd_send(struct wfx_dev *wdev, struct wfx_hif_msg *request, + void *reply, size_t reply_len, bool no_reply) +{ + const char *mib_name = ""; + const char *mib_sep = ""; + int cmd = request->id; + int vif = request->interface; + int ret; + + /* Do not wait for any reply if chip is frozen */ + if (wdev->chip_frozen) + return -ETIMEDOUT; + + mutex_lock(&wdev->hif_cmd.lock); + WARN(wdev->hif_cmd.buf_send, "data locking error"); + + /* Note: call to complete() below has an implicit memory barrier that hopefully protect + * buf_send + */ + wdev->hif_cmd.buf_send = request; + wdev->hif_cmd.buf_recv = reply; + wdev->hif_cmd.len_recv = reply_len; + complete(&wdev->hif_cmd.ready); + + wfx_bh_request_tx(wdev); + + if (no_reply) { + /* Chip won't reply. Ensure the wq has send the buffer before to continue. */ + flush_workqueue(wdev->bh_wq); + ret = 0; + goto end; + } + + if (wdev->poll_irq) + wfx_bh_poll_irq(wdev); + + ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 1 * HZ); + if (!ret) { + dev_err(wdev->dev, "chip is abnormally long to answer\n"); + reinit_completion(&wdev->hif_cmd.ready); + ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 3 * HZ); + } + if (!ret) { + dev_err(wdev->dev, "chip did not answer\n"); + wfx_pending_dump_old_frames(wdev, 3000); + wdev->chip_frozen = true; + reinit_completion(&wdev->hif_cmd.done); + ret = -ETIMEDOUT; + } else { + ret = wdev->hif_cmd.ret; + } + +end: + wdev->hif_cmd.buf_send = NULL; + mutex_unlock(&wdev->hif_cmd.lock); + + if (ret && + (cmd == HIF_REQ_ID_READ_MIB || cmd == HIF_REQ_ID_WRITE_MIB)) { + mib_name = wfx_get_mib_name(((u16 *)request)[2]); + mib_sep = "/"; + } + if (ret < 0) + dev_err(wdev->dev, "hardware request %s%s%s (%#.2x) on vif %d returned error %d\n", + wfx_get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret); + if (ret > 0) + dev_warn(wdev->dev, "hardware request %s%s%s (%#.2x) on vif %d returned status %d\n", + wfx_get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret); + + return ret; +} + +/* This function is special. After HIF_REQ_ID_SHUT_DOWN, chip won't reply to any request anymore. + * Obviously, only call this function during device unregister. + */ +int wfx_hif_shutdown(struct wfx_dev *wdev) +{ + int ret; + struct wfx_hif_msg *hif; + + wfx_alloc_hif(0, &hif); + if (!hif) + return -ENOMEM; + wfx_fill_header(hif, -1, HIF_REQ_ID_SHUT_DOWN, 0); + ret = wfx_cmd_send(wdev, hif, NULL, 0, true); + if (wdev->pdata.gpio_wakeup) + gpiod_set_value(wdev->pdata.gpio_wakeup, 0); + else + wfx_control_reg_write(wdev, 0); + kfree(hif); + return ret; +} + +int wfx_hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len) +{ + int ret; + size_t buf_len = sizeof(struct wfx_hif_req_configuration) + len; + struct wfx_hif_msg *hif; + struct wfx_hif_req_configuration *body = wfx_alloc_hif(buf_len, &hif); + + if (!hif) + return -ENOMEM; + body->length = cpu_to_le16(len); + memcpy(body->pds_data, conf, len); + wfx_fill_header(hif, -1, HIF_REQ_ID_CONFIGURATION, buf_len); + ret = wfx_cmd_send(wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_reset(struct wfx_vif *wvif, bool reset_stat) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_reset *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!hif) + return -ENOMEM; + body->reset_stat = reset_stat; + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_RESET, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val, size_t val_len) +{ + int ret; + struct wfx_hif_msg *hif; + int buf_len = sizeof(struct wfx_hif_cnf_read_mib) + val_len; + struct wfx_hif_req_read_mib *body = wfx_alloc_hif(sizeof(*body), &hif); + struct wfx_hif_cnf_read_mib *reply = kmalloc(buf_len, GFP_KERNEL); + + if (!body || !reply) { + ret = -ENOMEM; + goto out; + } + body->mib_id = cpu_to_le16(mib_id); + wfx_fill_header(hif, vif_id, HIF_REQ_ID_READ_MIB, sizeof(*body)); + ret = wfx_cmd_send(wdev, hif, reply, buf_len, false); + + if (!ret && mib_id != le16_to_cpu(reply->mib_id)) { + dev_warn(wdev->dev, "%s: confirmation mismatch request\n", __func__); + ret = -EIO; + } + if (ret == -ENOMEM) + dev_err(wdev->dev, "buffer is too small to receive %s (%zu < %d)\n", + wfx_get_mib_name(mib_id), val_len, le16_to_cpu(reply->length)); + if (!ret) + memcpy(val, &reply->mib_data, le16_to_cpu(reply->length)); + else + memset(val, 0xFF, val_len); +out: + kfree(hif); + kfree(reply); + return ret; +} + +int wfx_hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val, size_t val_len) +{ + int ret; + struct wfx_hif_msg *hif; + int buf_len = sizeof(struct wfx_hif_req_write_mib) + val_len; + struct wfx_hif_req_write_mib *body = wfx_alloc_hif(buf_len, &hif); + + if (!hif) + return -ENOMEM; + body->mib_id = cpu_to_le16(mib_id); + body->length = cpu_to_le16(val_len); + memcpy(&body->mib_data, val, val_len); + wfx_fill_header(hif, vif_id, HIF_REQ_ID_WRITE_MIB, buf_len); + ret = wfx_cmd_send(wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req, + int chan_start_idx, int chan_num) +{ + int ret, i; + struct wfx_hif_msg *hif; + size_t buf_len = sizeof(struct wfx_hif_req_start_scan_alt) + chan_num * sizeof(u8); + struct wfx_hif_req_start_scan_alt *body = wfx_alloc_hif(buf_len, &hif); + + WARN(chan_num > HIF_API_MAX_NB_CHANNELS, "invalid params"); + WARN(req->n_ssids > HIF_API_MAX_NB_SSIDS, "invalid params"); + + if (!hif) + return -ENOMEM; + for (i = 0; i < req->n_ssids; i++) { + memcpy(body->ssid_def[i].ssid, req->ssids[i].ssid, IEEE80211_MAX_SSID_LEN); + body->ssid_def[i].ssid_length = cpu_to_le32(req->ssids[i].ssid_len); + } + body->num_of_ssids = HIF_API_MAX_NB_SSIDS; + body->maintain_current_bss = 1; + body->disallow_ps = 1; + body->tx_power_level = cpu_to_le32(req->channels[chan_start_idx]->max_power); + body->num_of_channels = chan_num; + for (i = 0; i < chan_num; i++) + body->channel_list[i] = req->channels[i + chan_start_idx]->hw_value; + if (req->no_cck) + body->max_transmit_rate = API_RATE_INDEX_G_6MBPS; + else + body->max_transmit_rate = API_RATE_INDEX_B_1MBPS; + if (req->channels[chan_start_idx]->flags & IEEE80211_CHAN_NO_IR) { + body->min_channel_time = cpu_to_le32(50); + body->max_channel_time = cpu_to_le32(150); + } else { + body->min_channel_time = cpu_to_le32(10); + body->max_channel_time = cpu_to_le32(50); + body->num_of_probe_requests = 2; + body->probe_delay = 100; + } + + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_stop_scan(struct wfx_vif *wvif) +{ + int ret; + struct wfx_hif_msg *hif; + /* body associated to HIF_REQ_ID_STOP_SCAN is empty */ + wfx_alloc_hif(0, &hif); + + if (!hif) + return -ENOMEM; + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_STOP_SCAN, 0); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, + struct ieee80211_channel *channel, const u8 *ssid, int ssid_len) +{ + struct ieee80211_vif *vif = container_of(conf, struct ieee80211_vif, + bss_conf); + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_join *body = wfx_alloc_hif(sizeof(*body), &hif); + + WARN_ON(!conf->beacon_int); + WARN_ON(!conf->basic_rates); + WARN_ON(sizeof(body->ssid) < ssid_len); + WARN(!vif->cfg.ibss_joined && !ssid_len, "joining an unknown BSS"); + if (!hif) + return -ENOMEM; + body->infrastructure_bss_mode = !vif->cfg.ibss_joined; + body->short_preamble = conf->use_short_preamble; + body->probe_for_join = !(channel->flags & IEEE80211_CHAN_NO_IR); + body->channel_number = channel->hw_value; + body->beacon_interval = cpu_to_le32(conf->beacon_int); + body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates)); + memcpy(body->bssid, conf->bssid, sizeof(body->bssid)); + if (ssid) { + body->ssid_length = cpu_to_le32(ssid_len); + memcpy(body->ssid, ssid, ssid_len); + } + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_set_bss_params(struct wfx_vif *wvif, int aid, int beacon_lost_count) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_set_bss_params *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!hif) + return -ENOMEM; + body->aid = cpu_to_le16(aid); + body->beacon_lost_count = beacon_lost_count; + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_BSS_PARAMS, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_add_key(struct wfx_dev *wdev, const struct wfx_hif_req_add_key *arg) +{ + int ret; + struct wfx_hif_msg *hif; + /* FIXME: only send necessary bits */ + struct wfx_hif_req_add_key *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!hif) + return -ENOMEM; + /* FIXME: swap bytes as necessary in body */ + memcpy(body, arg, sizeof(*body)); + if (wfx_api_older_than(wdev, 1, 5)) + /* Legacy firmwares expect that add_key to be sent on right interface. */ + wfx_fill_header(hif, arg->int_id, HIF_REQ_ID_ADD_KEY, sizeof(*body)); + else + wfx_fill_header(hif, -1, HIF_REQ_ID_ADD_KEY, sizeof(*body)); + ret = wfx_cmd_send(wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_remove_key(struct wfx_dev *wdev, int idx) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_remove_key *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!hif) + return -ENOMEM; + body->entry_index = idx; + wfx_fill_header(hif, -1, HIF_REQ_ID_REMOVE_KEY, sizeof(*body)); + ret = wfx_cmd_send(wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue, + const struct ieee80211_tx_queue_params *arg) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_edca_queue_params *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!body) + return -ENOMEM; + + WARN_ON(arg->aifs > 255); + if (!hif) + return -ENOMEM; + body->aifsn = arg->aifs; + body->cw_min = cpu_to_le16(arg->cw_min); + body->cw_max = cpu_to_le16(arg->cw_max); + body->tx_op_limit = cpu_to_le16(arg->txop * USEC_PER_TXOP); + body->queue_id = 3 - queue; + /* API 2.0 has changed queue IDs values */ + if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BE) + body->queue_id = HIF_QUEUE_ID_BACKGROUND; + if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BK) + body->queue_id = HIF_QUEUE_ID_BESTEFFORT; + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_EDCA_QUEUE_PARAMS, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_set_pm_mode *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!body) + return -ENOMEM; + + if (!hif) + return -ENOMEM; + if (ps) { + body->enter_psm = 1; + /* Firmware does not support more than 128ms */ + body->fast_psm_idle_period = min(dynamic_ps_timeout * 2, 255); + if (body->fast_psm_idle_period) + body->fast_psm = 1; + } + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_PM_MODE, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, + const struct ieee80211_channel *channel) +{ + struct ieee80211_vif *vif = container_of(conf, struct ieee80211_vif, + bss_conf); + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif); + + WARN_ON(!conf->beacon_int); + if (!hif) + return -ENOMEM; + body->dtim_period = conf->dtim_period; + body->short_preamble = conf->use_short_preamble; + body->channel_number = channel->hw_value; + body->beacon_interval = cpu_to_le32(conf->beacon_int); + body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates)); + body->ssid_length = vif->cfg.ssid_len; + memcpy(body->ssid, vif->cfg.ssid, vif->cfg.ssid_len); + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_beacon_transmit(struct wfx_vif *wvif, bool enable) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_beacon_transmit *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!hif) + return -ENOMEM; + body->enable_beaconing = enable ? 1 : 0; + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_BEACON_TRANSMIT, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_map_link(struct wfx_vif *wvif, bool unmap, u8 *mac_addr, int sta_id, bool mfp) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_map_link *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!hif) + return -ENOMEM; + if (mac_addr) + ether_addr_copy(body->mac_addr, mac_addr); + body->mfpc = mfp ? 1 : 0; + body->unmap = unmap ? 1 : 0; + body->peer_sta_id = sta_id; + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_MAP_LINK, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len) +{ + int ret; + struct wfx_hif_msg *hif; + int buf_len = sizeof(struct wfx_hif_req_update_ie) + ies_len; + struct wfx_hif_req_update_ie *body = wfx_alloc_hif(buf_len, &hif); + + if (!hif) + return -ENOMEM; + body->beacon = 1; + body->num_ies = cpu_to_le16(1); + memcpy(body->ie, ies, ies_len); + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_UPDATE_IE, buf_len); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} diff --git a/drivers/net/wireless/silabs/wfx/hif_tx.h b/drivers/net/wireless/silabs/wfx/hif_tx.h new file mode 100644 index 0000000000..71817a6571 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_tx.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Implementation of the host-to-chip commands (aka request/confirmation) of the + * hardware API. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + * Copyright (C) 2010, ST-Ericsson SA + */ +#ifndef WFX_HIF_TX_H +#define WFX_HIF_TX_H + +#include +#include +#include + +struct ieee80211_channel; +struct ieee80211_bss_conf; +struct ieee80211_tx_queue_params; +struct cfg80211_scan_request; +struct wfx_hif_req_add_key; +struct wfx_dev; +struct wfx_vif; + +struct wfx_hif_cmd { + struct mutex lock; + struct completion ready; + struct completion done; + struct wfx_hif_msg *buf_send; + void *buf_recv; + size_t len_recv; + int ret; +}; + +void wfx_init_hif_cmd(struct wfx_hif_cmd *wfx_hif_cmd); +int wfx_cmd_send(struct wfx_dev *wdev, struct wfx_hif_msg *request, + void *reply, size_t reply_len, bool async); + +int wfx_hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *buf, size_t buf_size); +int wfx_hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *buf, size_t buf_size); +int wfx_hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, + const struct ieee80211_channel *channel); +int wfx_hif_reset(struct wfx_vif *wvif, bool reset_stat); +int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, + struct ieee80211_channel *channel, const u8 *ssid, int ssidlen); +int wfx_hif_map_link(struct wfx_vif *wvif, bool unmap, u8 *mac_addr, int sta_id, bool mfp); +int wfx_hif_add_key(struct wfx_dev *wdev, const struct wfx_hif_req_add_key *arg); +int wfx_hif_remove_key(struct wfx_dev *wdev, int idx); +int wfx_hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout); +int wfx_hif_set_bss_params(struct wfx_vif *wvif, int aid, int beacon_lost_count); +int wfx_hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue, + const struct ieee80211_tx_queue_params *arg); +int wfx_hif_beacon_transmit(struct wfx_vif *wvif, bool enable); +int wfx_hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len); +int wfx_hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211, + int chan_start, int chan_num); +int wfx_hif_stop_scan(struct wfx_vif *wvif); +int wfx_hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len); +int wfx_hif_shutdown(struct wfx_dev *wdev); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/hif_tx_mib.c b/drivers/net/wireless/silabs/wfx/hif_tx_mib.c new file mode 100644 index 0000000000..df1bcb1e2c --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_tx_mib.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Implementation of the host-to-chip MIBs of the hardware API. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + * Copyright (C) 2010, ST-Ericsson SA + */ + +#include + +#include "wfx.h" +#include "hif_tx.h" +#include "hif_tx_mib.h" +#include "hif_api_mib.h" + +int wfx_hif_set_output_power(struct wfx_vif *wvif, int val) +{ + struct wfx_hif_mib_current_tx_power_level arg = { + .power_level = cpu_to_le32(val * 10), + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_CURRENT_TX_POWER_LEVEL, + &arg, sizeof(arg)); +} + +int wfx_hif_set_beacon_wakeup_period(struct wfx_vif *wvif, + unsigned int dtim_interval, unsigned int listen_interval) +{ + struct wfx_hif_mib_beacon_wake_up_period arg = { + .wakeup_period_min = dtim_interval, + .receive_dtim = 0, + .wakeup_period_max = listen_interval, + }; + + if (dtim_interval > 0xFF || listen_interval > 0xFFFF) + return -EINVAL; + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BEACON_WAKEUP_PERIOD, + &arg, sizeof(arg)); +} + +int wfx_hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif, int rssi_thold, int rssi_hyst) +{ + struct wfx_hif_mib_rcpi_rssi_threshold arg = { + .rolling_average_count = 8, + .detection = 1, + }; + + if (!rssi_thold && !rssi_hyst) { + arg.upperthresh = 1; + arg.lowerthresh = 1; + } else { + arg.upper_threshold = rssi_thold + rssi_hyst; + arg.upper_threshold = (arg.upper_threshold + 110) * 2; + arg.lower_threshold = rssi_thold; + arg.lower_threshold = (arg.lower_threshold + 110) * 2; + } + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RCPI_RSSI_THRESHOLD, + &arg, sizeof(arg)); +} + +int wfx_hif_get_counters_table(struct wfx_dev *wdev, int vif_id, + struct wfx_hif_mib_extended_count_table *arg) +{ + if (wfx_api_older_than(wdev, 1, 3)) { + /* extended_count_table is wider than count_table */ + memset(arg, 0xFF, sizeof(*arg)); + return wfx_hif_read_mib(wdev, vif_id, HIF_MIB_ID_COUNTERS_TABLE, + arg, sizeof(struct wfx_hif_mib_count_table)); + } else { + return wfx_hif_read_mib(wdev, vif_id, HIF_MIB_ID_EXTENDED_COUNTERS_TABLE, + arg, sizeof(struct wfx_hif_mib_extended_count_table)); + } +} + +int wfx_hif_set_macaddr(struct wfx_vif *wvif, u8 *mac) +{ + struct wfx_hif_mib_mac_address arg = { }; + + if (mac) + ether_addr_copy(arg.mac_addr, mac); + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_MAC_ADDRESS, + &arg, sizeof(arg)); +} + +int wfx_hif_set_rx_filter(struct wfx_vif *wvif, bool filter_bssid, bool filter_prbreq) +{ + struct wfx_hif_mib_rx_filter arg = { }; + + if (filter_bssid) + arg.bssid_filter = 1; + if (!filter_prbreq) + arg.fwd_probe_req = 1; + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RX_FILTER, &arg, sizeof(arg)); +} + +int wfx_hif_set_beacon_filter_table(struct wfx_vif *wvif, int tbl_len, + const struct wfx_hif_ie_table_entry *tbl) +{ + int ret; + struct wfx_hif_mib_bcn_filter_table *arg; + int buf_len = struct_size(arg, ie_table, tbl_len); + + arg = kzalloc(buf_len, GFP_KERNEL); + if (!arg) + return -ENOMEM; + arg->num_of_info_elmts = cpu_to_le32(tbl_len); + memcpy(arg->ie_table, tbl, flex_array_size(arg, ie_table, tbl_len)); + ret = wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BEACON_FILTER_TABLE, + arg, buf_len); + kfree(arg); + return ret; +} + +int wfx_hif_beacon_filter_control(struct wfx_vif *wvif, int enable, int beacon_count) +{ + struct wfx_hif_mib_bcn_filter_enable arg = { + .enable = cpu_to_le32(enable), + .bcn_count = cpu_to_le32(beacon_count), + }; + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BEACON_FILTER_ENABLE, + &arg, sizeof(arg)); +} + +int wfx_hif_set_operational_mode(struct wfx_dev *wdev, enum wfx_hif_op_power_mode mode) +{ + struct wfx_hif_mib_gl_operational_power_mode arg = { + .power_mode = mode, + .wup_ind_activation = 1, + }; + + return wfx_hif_write_mib(wdev, -1, HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE, + &arg, sizeof(arg)); +} + +int wfx_hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb, + u8 frame_type, int init_rate) +{ + struct wfx_hif_mib_template_frame *arg; + + WARN(skb->len > HIF_API_MAX_TEMPLATE_FRAME_SIZE, "frame is too big"); + skb_push(skb, 4); + arg = (struct wfx_hif_mib_template_frame *)skb->data; + skb_pull(skb, 4); + arg->init_rate = init_rate; + arg->frame_type = frame_type; + arg->frame_length = cpu_to_le16(skb->len); + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_TEMPLATE_FRAME, + arg, sizeof(*arg) + skb->len); +} + +int wfx_hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required) +{ + struct wfx_hif_mib_protected_mgmt_policy arg = { }; + + WARN(required && !capable, "incoherent arguments"); + if (capable) { + arg.pmf_enable = 1; + arg.host_enc_auth_frames = 1; + } + if (!required) + arg.unpmf_allowed = 1; + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_PROTECTED_MGMT_POLICY, + &arg, sizeof(arg)); +} + +int wfx_hif_set_block_ack_policy(struct wfx_vif *wvif, u8 tx_tid_policy, u8 rx_tid_policy) +{ + struct wfx_hif_mib_block_ack_policy arg = { + .block_ack_tx_tid_policy = tx_tid_policy, + .block_ack_rx_tid_policy = rx_tid_policy, + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BLOCK_ACK_POLICY, + &arg, sizeof(arg)); +} + +int wfx_hif_set_association_mode(struct wfx_vif *wvif, int ampdu_density, + bool greenfield, bool short_preamble) +{ + struct wfx_hif_mib_set_association_mode arg = { + .preambtype_use = 1, + .mode = 1, + .spacing = 1, + .short_preamble = short_preamble, + .greenfield = greenfield, + .mpdu_start_spacing = ampdu_density, + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_ASSOCIATION_MODE, + &arg, sizeof(arg)); +} + +int wfx_hif_set_tx_rate_retry_policy(struct wfx_vif *wvif, int policy_index, u8 *rates) +{ + struct wfx_hif_mib_set_tx_rate_retry_policy *arg; + size_t size = struct_size(arg, tx_rate_retry_policy, 1); + int ret; + + arg = kzalloc(size, GFP_KERNEL); + if (!arg) + return -ENOMEM; + arg->num_tx_rate_policies = 1; + arg->tx_rate_retry_policy[0].policy_index = policy_index; + arg->tx_rate_retry_policy[0].short_retry_count = 255; + arg->tx_rate_retry_policy[0].long_retry_count = 255; + arg->tx_rate_retry_policy[0].first_rate_sel = 1; + arg->tx_rate_retry_policy[0].terminate = 1; + arg->tx_rate_retry_policy[0].count_init = 1; + memcpy(&arg->tx_rate_retry_policy[0].rates, rates, + sizeof(arg->tx_rate_retry_policy[0].rates)); + ret = wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, + arg, size); + kfree(arg); + return ret; +} + +int wfx_hif_keep_alive_period(struct wfx_vif *wvif, int period) +{ + struct wfx_hif_mib_keep_alive_period arg = { + .keep_alive_period = cpu_to_le16(period), + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_KEEP_ALIVE_PERIOD, + &arg, sizeof(arg)); +}; + +int wfx_hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx, __be32 *addr) +{ + struct wfx_hif_mib_arp_ip_addr_table arg = { + .condition_idx = idx, + .arp_enable = HIF_ARP_NS_FILTERING_DISABLE, + }; + + if (addr) { + /* Caution: type of addr is __be32 */ + memcpy(arg.ipv4_address, addr, sizeof(arg.ipv4_address)); + arg.arp_enable = HIF_ARP_NS_FILTERING_ENABLE; + } + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE, + &arg, sizeof(arg)); +} + +int wfx_hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable) +{ + struct wfx_hif_mib_gl_set_multi_msg arg = { + .enable_multi_tx_conf = enable, + }; + + return wfx_hif_write_mib(wdev, -1, HIF_MIB_ID_GL_SET_MULTI_MSG, &arg, sizeof(arg)); +} + +int wfx_hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val) +{ + struct wfx_hif_mib_set_uapsd_information arg = { }; + + if (val & BIT(IEEE80211_AC_VO)) + arg.trig_voice = 1; + if (val & BIT(IEEE80211_AC_VI)) + arg.trig_video = 1; + if (val & BIT(IEEE80211_AC_BE)) + arg.trig_be = 1; + if (val & BIT(IEEE80211_AC_BK)) + arg.trig_bckgrnd = 1; + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_UAPSD_INFORMATION, + &arg, sizeof(arg)); +} + +int wfx_hif_erp_use_protection(struct wfx_vif *wvif, bool enable) +{ + struct wfx_hif_mib_non_erp_protection arg = { + .use_cts_to_self = enable, + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_NON_ERP_PROTECTION, + &arg, sizeof(arg)); +} + +int wfx_hif_slot_time(struct wfx_vif *wvif, int val) +{ + struct wfx_hif_mib_slot_time arg = { + .slot_time = cpu_to_le32(val), + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SLOT_TIME, &arg, sizeof(arg)); +} + +int wfx_hif_wep_default_key_id(struct wfx_vif *wvif, int val) +{ + struct wfx_hif_mib_wep_default_key_id arg = { + .wep_default_key_id = val, + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID, + &arg, sizeof(arg)); +} + +int wfx_hif_rts_threshold(struct wfx_vif *wvif, int val) +{ + struct wfx_hif_mib_dot11_rts_threshold arg = { + .threshold = cpu_to_le32(val >= 0 ? val : 0xFFFF), + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_RTS_THRESHOLD, + &arg, sizeof(arg)); +} diff --git a/drivers/net/wireless/silabs/wfx/hif_tx_mib.h b/drivers/net/wireless/silabs/wfx/hif_tx_mib.h new file mode 100644 index 0000000000..bcd4ef6a84 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_tx_mib.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Implementation of the host-to-chip MIBs of the hardware API. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + * Copyright (C) 2010, ST-Ericsson SA + */ +#ifndef WFX_HIF_TX_MIB_H +#define WFX_HIF_TX_MIB_H + +#include + +struct sk_buff; +struct wfx_vif; +struct wfx_dev; +struct wfx_hif_ie_table_entry; +struct wfx_hif_mib_extended_count_table; + +int wfx_hif_set_output_power(struct wfx_vif *wvif, int val); +int wfx_hif_set_beacon_wakeup_period(struct wfx_vif *wvif, + unsigned int dtim_interval, unsigned int listen_interval); +int wfx_hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif, int rssi_thold, int rssi_hyst); +int wfx_hif_get_counters_table(struct wfx_dev *wdev, int vif_id, + struct wfx_hif_mib_extended_count_table *arg); +int wfx_hif_set_macaddr(struct wfx_vif *wvif, u8 *mac); +int wfx_hif_set_rx_filter(struct wfx_vif *wvif, bool filter_bssid, bool fwd_probe_req); +int wfx_hif_set_beacon_filter_table(struct wfx_vif *wvif, int tbl_len, + const struct wfx_hif_ie_table_entry *tbl); +int wfx_hif_beacon_filter_control(struct wfx_vif *wvif, int enable, int beacon_count); +int wfx_hif_set_operational_mode(struct wfx_dev *wdev, enum wfx_hif_op_power_mode mode); +int wfx_hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb, + u8 frame_type, int init_rate); +int wfx_hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required); +int wfx_hif_set_block_ack_policy(struct wfx_vif *wvif, u8 tx_tid_policy, u8 rx_tid_policy); +int wfx_hif_set_association_mode(struct wfx_vif *wvif, int ampdu_density, + bool greenfield, bool short_preamble); +int wfx_hif_set_tx_rate_retry_policy(struct wfx_vif *wvif, int policy_index, u8 *rates); +int wfx_hif_keep_alive_period(struct wfx_vif *wvif, int period); +int wfx_hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx, __be32 *addr); +int wfx_hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable); +int wfx_hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val); +int wfx_hif_erp_use_protection(struct wfx_vif *wvif, bool enable); +int wfx_hif_slot_time(struct wfx_vif *wvif, int val); +int wfx_hif_wep_default_key_id(struct wfx_vif *wvif, int val); +int wfx_hif_rts_threshold(struct wfx_vif *wvif, int val); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/hwio.c b/drivers/net/wireless/silabs/wfx/hwio.c new file mode 100644 index 0000000000..3f9750b470 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hwio.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Low-level I/O functions. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include +#include +#include + +#include "hwio.h" +#include "wfx.h" +#include "bus.h" +#include "traces.h" + +#define WFX_HIF_BUFFER_SIZE 0x2000 + +static int wfx_read32(struct wfx_dev *wdev, int reg, u32 *val) +{ + int ret; + __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL); + + *val = ~0; /* Never return undefined value */ + if (!tmp) + return -ENOMEM; + ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, tmp, sizeof(u32)); + if (ret >= 0) + *val = le32_to_cpu(*tmp); + kfree(tmp); + if (ret) + dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret); + return ret; +} + +static int wfx_write32(struct wfx_dev *wdev, int reg, u32 val) +{ + int ret; + __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL); + + if (!tmp) + return -ENOMEM; + *tmp = cpu_to_le32(val); + ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, tmp, sizeof(u32)); + kfree(tmp); + if (ret) + dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret); + return ret; +} + +static int wfx_read32_locked(struct wfx_dev *wdev, int reg, u32 *val) +{ + int ret; + + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wfx_read32(wdev, reg, val); + _trace_io_read32(reg, *val); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + return ret; +} + +static int wfx_write32_locked(struct wfx_dev *wdev, int reg, u32 val) +{ + int ret; + + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wfx_write32(wdev, reg, val); + _trace_io_write32(reg, val); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + return ret; +} + +static int wfx_write32_bits_locked(struct wfx_dev *wdev, int reg, u32 mask, u32 val) +{ + int ret; + u32 val_r, val_w; + + WARN_ON(~mask & val); + val &= mask; + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wfx_read32(wdev, reg, &val_r); + _trace_io_read32(reg, val_r); + if (ret < 0) + goto err; + val_w = (val_r & ~mask) | val; + if (val_w != val_r) { + ret = wfx_write32(wdev, reg, val_w); + _trace_io_write32(reg, val_w); + } +err: + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + return ret; +} + +static int wfx_indirect_read(struct wfx_dev *wdev, int reg, u32 addr, void *buf, size_t len) +{ + int ret; + int i; + u32 cfg; + u32 prefetch; + + WARN_ON(len >= WFX_HIF_BUFFER_SIZE); + WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT); + + if (reg == WFX_REG_AHB_DPORT) + prefetch = CFG_PREFETCH_AHB; + else if (reg == WFX_REG_SRAM_DPORT) + prefetch = CFG_PREFETCH_SRAM; + else + return -ENODEV; + + ret = wfx_write32(wdev, WFX_REG_BASE_ADDR, addr); + if (ret < 0) + goto err; + + ret = wfx_read32(wdev, WFX_REG_CONFIG, &cfg); + if (ret < 0) + goto err; + + ret = wfx_write32(wdev, WFX_REG_CONFIG, cfg | prefetch); + if (ret < 0) + goto err; + + for (i = 0; i < 20; i++) { + ret = wfx_read32(wdev, WFX_REG_CONFIG, &cfg); + if (ret < 0) + goto err; + if (!(cfg & prefetch)) + break; + usleep_range(200, 250); + } + if (i == 20) { + ret = -ETIMEDOUT; + goto err; + } + + ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, buf, len); + +err: + if (ret < 0) + memset(buf, 0xFF, len); /* Never return undefined value */ + return ret; +} + +static int wfx_indirect_write(struct wfx_dev *wdev, int reg, u32 addr, + const void *buf, size_t len) +{ + int ret; + + WARN_ON(len >= WFX_HIF_BUFFER_SIZE); + WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT); + ret = wfx_write32(wdev, WFX_REG_BASE_ADDR, addr); + if (ret < 0) + return ret; + + return wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, buf, len); +} + +static int wfx_indirect_read_locked(struct wfx_dev *wdev, int reg, u32 addr, + void *buf, size_t len) +{ + int ret; + + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wfx_indirect_read(wdev, reg, addr, buf, len); + _trace_io_ind_read(reg, addr, buf, len); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + return ret; +} + +static int wfx_indirect_write_locked(struct wfx_dev *wdev, int reg, u32 addr, + const void *buf, size_t len) +{ + int ret; + + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wfx_indirect_write(wdev, reg, addr, buf, len); + _trace_io_ind_write(reg, addr, buf, len); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + return ret; +} + +static int wfx_indirect_read32_locked(struct wfx_dev *wdev, int reg, u32 addr, u32 *val) +{ + int ret; + __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL); + + if (!tmp) + return -ENOMEM; + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wfx_indirect_read(wdev, reg, addr, tmp, sizeof(u32)); + *val = le32_to_cpu(*tmp); + _trace_io_ind_read32(reg, addr, *val); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + kfree(tmp); + return ret; +} + +static int wfx_indirect_write32_locked(struct wfx_dev *wdev, int reg, u32 addr, u32 val) +{ + int ret; + __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL); + + if (!tmp) + return -ENOMEM; + *tmp = cpu_to_le32(val); + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wfx_indirect_write(wdev, reg, addr, tmp, sizeof(u32)); + _trace_io_ind_write32(reg, addr, val); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + kfree(tmp); + return ret; +} + +int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t len) +{ + int ret; + + WARN(!IS_ALIGNED((uintptr_t)buf, 4), "unaligned buffer"); + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, WFX_REG_IN_OUT_QUEUE, buf, len); + _trace_io_read(WFX_REG_IN_OUT_QUEUE, buf, len); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + if (ret) + dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret); + return ret; +} + +int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t len) +{ + int ret; + + WARN(!IS_ALIGNED((uintptr_t)buf, 4), "unaligned buffer"); + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, WFX_REG_IN_OUT_QUEUE, buf, len); + _trace_io_write(WFX_REG_IN_OUT_QUEUE, buf, len); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + if (ret) + dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret); + return ret; +} + +int wfx_sram_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len) +{ + return wfx_indirect_read_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len); +} + +int wfx_ahb_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len) +{ + return wfx_indirect_read_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len); +} + +int wfx_sram_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len) +{ + return wfx_indirect_write_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len); +} + +int wfx_ahb_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len) +{ + return wfx_indirect_write_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len); +} + +int wfx_sram_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val) +{ + return wfx_indirect_read32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val); +} + +int wfx_ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val) +{ + return wfx_indirect_read32_locked(wdev, WFX_REG_AHB_DPORT, addr, val); +} + +int wfx_sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val) +{ + return wfx_indirect_write32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val); +} + +int wfx_ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val) +{ + return wfx_indirect_write32_locked(wdev, WFX_REG_AHB_DPORT, addr, val); +} + +int wfx_config_reg_read(struct wfx_dev *wdev, u32 *val) +{ + return wfx_read32_locked(wdev, WFX_REG_CONFIG, val); +} + +int wfx_config_reg_write(struct wfx_dev *wdev, u32 val) +{ + return wfx_write32_locked(wdev, WFX_REG_CONFIG, val); +} + +int wfx_config_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val) +{ + return wfx_write32_bits_locked(wdev, WFX_REG_CONFIG, mask, val); +} + +int wfx_control_reg_read(struct wfx_dev *wdev, u32 *val) +{ + return wfx_read32_locked(wdev, WFX_REG_CONTROL, val); +} + +int wfx_control_reg_write(struct wfx_dev *wdev, u32 val) +{ + return wfx_write32_locked(wdev, WFX_REG_CONTROL, val); +} + +int wfx_control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val) +{ + return wfx_write32_bits_locked(wdev, WFX_REG_CONTROL, mask, val); +} + +int wfx_igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val) +{ + int ret; + + *val = ~0; /* Never return undefined value */ + ret = wfx_write32_locked(wdev, WFX_REG_SET_GEN_R_W, IGPR_RW | index << 24); + if (ret) + return ret; + ret = wfx_read32_locked(wdev, WFX_REG_SET_GEN_R_W, val); + if (ret) + return ret; + *val &= IGPR_VALUE; + return ret; +} + +int wfx_igpr_reg_write(struct wfx_dev *wdev, int index, u32 val) +{ + return wfx_write32_locked(wdev, WFX_REG_SET_GEN_R_W, index << 24 | val); +} diff --git a/drivers/net/wireless/silabs/wfx/hwio.h b/drivers/net/wireless/silabs/wfx/hwio.h new file mode 100644 index 0000000000..c6e7b065b7 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hwio.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Low-level I/O functions. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_HWIO_H +#define WFX_HWIO_H + +#include + +struct wfx_dev; + +/* Caution: in the functions below, 'buf' will used with a DMA. So, it must be kmalloc'd (do not use + * stack allocated buffers). In doubt, enable CONFIG_DEBUG_SG to detect badly located buffer. + */ +int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t buf_len); +int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t buf_len); + +int wfx_sram_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len); +int wfx_sram_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len); + +int wfx_ahb_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len); +int wfx_ahb_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len); + +int wfx_sram_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val); +int wfx_sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val); + +int wfx_ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val); +int wfx_ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val); + +#define CFG_ERR_SPI_FRAME 0x00000001 /* only with SPI */ +#define CFG_ERR_SDIO_BUF_MISMATCH 0x00000001 /* only with SDIO */ +#define CFG_ERR_BUF_UNDERRUN 0x00000002 +#define CFG_ERR_DATA_IN_TOO_LARGE 0x00000004 +#define CFG_ERR_HOST_NO_OUT_QUEUE 0x00000008 +#define CFG_ERR_BUF_OVERRUN 0x00000010 +#define CFG_ERR_DATA_OUT_TOO_LARGE 0x00000020 +#define CFG_ERR_HOST_NO_IN_QUEUE 0x00000040 +#define CFG_ERR_HOST_CRC_MISS 0x00000080 /* only with SDIO */ +#define CFG_SPI_IGNORE_CS 0x00000080 /* only with SPI */ +#define CFG_BYTE_ORDER_MASK 0x00000300 /* only writable with SPI */ +#define CFG_BYTE_ORDER_BADC 0x00000000 +#define CFG_BYTE_ORDER_DCBA 0x00000100 +#define CFG_BYTE_ORDER_ABCD 0x00000200 /* SDIO always use this value */ +#define CFG_DIRECT_ACCESS_MODE 0x00000400 +#define CFG_PREFETCH_AHB 0x00000800 +#define CFG_DISABLE_CPU_CLK 0x00001000 +#define CFG_PREFETCH_SRAM 0x00002000 +#define CFG_CPU_RESET 0x00004000 +#define CFG_SDIO_DISABLE_IRQ 0x00008000 /* only with SDIO */ +#define CFG_IRQ_ENABLE_DATA 0x00010000 +#define CFG_IRQ_ENABLE_WRDY 0x00020000 +#define CFG_CLK_RISE_EDGE 0x00040000 +#define CFG_SDIO_DISABLE_CRC_CHK 0x00080000 /* only with SDIO */ +#define CFG_RESERVED 0x00F00000 +#define CFG_DEVICE_ID_MAJOR 0x07000000 +#define CFG_DEVICE_ID_RESERVED 0x78000000 +#define CFG_DEVICE_ID_TYPE 0x80000000 +int wfx_config_reg_read(struct wfx_dev *wdev, u32 *val); +int wfx_config_reg_write(struct wfx_dev *wdev, u32 val); +int wfx_config_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val); + +#define CTRL_NEXT_LEN_MASK 0x00000FFF +#define CTRL_WLAN_WAKEUP 0x00001000 +#define CTRL_WLAN_READY 0x00002000 +int wfx_control_reg_read(struct wfx_dev *wdev, u32 *val); +int wfx_control_reg_write(struct wfx_dev *wdev, u32 val); +int wfx_control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val); + +#define IGPR_RW 0x80000000 +#define IGPR_INDEX 0x7F000000 +#define IGPR_VALUE 0x00FFFFFF +int wfx_igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val); +int wfx_igpr_reg_write(struct wfx_dev *wdev, int index, u32 val); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/key.c b/drivers/net/wireless/silabs/wfx/key.c new file mode 100644 index 0000000000..196d64ef68 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/key.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Key management related functions. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include + +#include "key.h" +#include "wfx.h" +#include "hif_tx_mib.h" + +static int wfx_alloc_key(struct wfx_dev *wdev) +{ + int idx; + + idx = ffs(~wdev->key_map) - 1; + if (idx < 0 || idx >= MAX_KEY_ENTRIES) + return -1; + + wdev->key_map |= BIT(idx); + return idx; +} + +static void wfx_free_key(struct wfx_dev *wdev, int idx) +{ + WARN(!(wdev->key_map & BIT(idx)), "inconsistent key allocation"); + wdev->key_map &= ~BIT(idx); +} + +static u8 fill_wep_pair(struct wfx_hif_wep_pairwise_key *msg, + struct ieee80211_key_conf *key, u8 *peer_addr) +{ + WARN(key->keylen > sizeof(msg->key_data), "inconsistent data"); + msg->key_length = key->keylen; + memcpy(msg->key_data, key->key, key->keylen); + ether_addr_copy(msg->peer_address, peer_addr); + return HIF_KEY_TYPE_WEP_PAIRWISE; +} + +static u8 fill_wep_group(struct wfx_hif_wep_group_key *msg, + struct ieee80211_key_conf *key) +{ + WARN(key->keylen > sizeof(msg->key_data), "inconsistent data"); + msg->key_id = key->keyidx; + msg->key_length = key->keylen; + memcpy(msg->key_data, key->key, key->keylen); + return HIF_KEY_TYPE_WEP_DEFAULT; +} + +static u8 fill_tkip_pair(struct wfx_hif_tkip_pairwise_key *msg, + struct ieee80211_key_conf *key, u8 *peer_addr) +{ + u8 *keybuf = key->key; + + WARN(key->keylen != sizeof(msg->tkip_key_data) + sizeof(msg->tx_mic_key) + + sizeof(msg->rx_mic_key), "inconsistent data"); + memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data)); + keybuf += sizeof(msg->tkip_key_data); + memcpy(msg->tx_mic_key, keybuf, sizeof(msg->tx_mic_key)); + keybuf += sizeof(msg->tx_mic_key); + memcpy(msg->rx_mic_key, keybuf, sizeof(msg->rx_mic_key)); + ether_addr_copy(msg->peer_address, peer_addr); + return HIF_KEY_TYPE_TKIP_PAIRWISE; +} + +static u8 fill_tkip_group(struct wfx_hif_tkip_group_key *msg, struct ieee80211_key_conf *key, + struct ieee80211_key_seq *seq, enum nl80211_iftype iftype) +{ + u8 *keybuf = key->key; + + WARN(key->keylen != sizeof(msg->tkip_key_data) + 2 * sizeof(msg->rx_mic_key), + "inconsistent data"); + msg->key_id = key->keyidx; + memcpy(msg->rx_sequence_counter, &seq->tkip.iv16, sizeof(seq->tkip.iv16)); + memcpy(msg->rx_sequence_counter + sizeof(u16), &seq->tkip.iv32, sizeof(seq->tkip.iv32)); + memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data)); + keybuf += sizeof(msg->tkip_key_data); + if (iftype == NL80211_IFTYPE_AP) + /* Use Tx MIC Key */ + memcpy(msg->rx_mic_key, keybuf + 0, sizeof(msg->rx_mic_key)); + else + /* Use Rx MIC Key */ + memcpy(msg->rx_mic_key, keybuf + 8, sizeof(msg->rx_mic_key)); + return HIF_KEY_TYPE_TKIP_GROUP; +} + +static u8 fill_ccmp_pair(struct wfx_hif_aes_pairwise_key *msg, + struct ieee80211_key_conf *key, u8 *peer_addr) +{ + WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data"); + ether_addr_copy(msg->peer_address, peer_addr); + memcpy(msg->aes_key_data, key->key, key->keylen); + return HIF_KEY_TYPE_AES_PAIRWISE; +} + +static u8 fill_ccmp_group(struct wfx_hif_aes_group_key *msg, + struct ieee80211_key_conf *key, struct ieee80211_key_seq *seq) +{ + WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data"); + memcpy(msg->aes_key_data, key->key, key->keylen); + memcpy(msg->rx_sequence_counter, seq->ccmp.pn, sizeof(seq->ccmp.pn)); + memreverse(msg->rx_sequence_counter, sizeof(seq->ccmp.pn)); + msg->key_id = key->keyidx; + return HIF_KEY_TYPE_AES_GROUP; +} + +static u8 fill_sms4_pair(struct wfx_hif_wapi_pairwise_key *msg, + struct ieee80211_key_conf *key, u8 *peer_addr) +{ + u8 *keybuf = key->key; + + WARN(key->keylen != sizeof(msg->wapi_key_data) + sizeof(msg->mic_key_data), + "inconsistent data"); + ether_addr_copy(msg->peer_address, peer_addr); + memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data)); + keybuf += sizeof(msg->wapi_key_data); + memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data)); + msg->key_id = key->keyidx; + return HIF_KEY_TYPE_WAPI_PAIRWISE; +} + +static u8 fill_sms4_group(struct wfx_hif_wapi_group_key *msg, + struct ieee80211_key_conf *key) +{ + u8 *keybuf = key->key; + + WARN(key->keylen != sizeof(msg->wapi_key_data) + sizeof(msg->mic_key_data), + "inconsistent data"); + memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data)); + keybuf += sizeof(msg->wapi_key_data); + memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data)); + msg->key_id = key->keyidx; + return HIF_KEY_TYPE_WAPI_GROUP; +} + +static u8 fill_aes_cmac_group(struct wfx_hif_igtk_group_key *msg, + struct ieee80211_key_conf *key, struct ieee80211_key_seq *seq) +{ + WARN(key->keylen != sizeof(msg->igtk_key_data), "inconsistent data"); + memcpy(msg->igtk_key_data, key->key, key->keylen); + memcpy(msg->ipn, seq->aes_cmac.pn, sizeof(seq->aes_cmac.pn)); + memreverse(msg->ipn, sizeof(seq->aes_cmac.pn)); + msg->key_id = key->keyidx; + return HIF_KEY_TYPE_IGTK_GROUP; +} + +static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + int ret; + struct wfx_hif_req_add_key k = { }; + struct ieee80211_key_seq seq; + struct wfx_dev *wdev = wvif->wdev; + int idx = wfx_alloc_key(wvif->wdev); + bool pairwise = key->flags & IEEE80211_KEY_FLAG_PAIRWISE; + struct ieee80211_vif *vif = wvif_to_vif(wvif); + + WARN(key->flags & IEEE80211_KEY_FLAG_PAIRWISE && !sta, "inconsistent data"); + ieee80211_get_key_rx_seq(key, 0, &seq); + if (idx < 0) + return -EINVAL; + k.int_id = wvif->id; + k.entry_index = idx; + if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) { + if (pairwise) + k.type = fill_wep_pair(&k.key.wep_pairwise_key, key, sta->addr); + else + k.type = fill_wep_group(&k.key.wep_group_key, key); + } else if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { + if (pairwise) + k.type = fill_tkip_pair(&k.key.tkip_pairwise_key, key, sta->addr); + else + k.type = fill_tkip_group(&k.key.tkip_group_key, key, &seq, + vif->type); + } else if (key->cipher == WLAN_CIPHER_SUITE_CCMP) { + if (pairwise) + k.type = fill_ccmp_pair(&k.key.aes_pairwise_key, key, sta->addr); + else + k.type = fill_ccmp_group(&k.key.aes_group_key, key, &seq); + } else if (key->cipher == WLAN_CIPHER_SUITE_SMS4) { + if (pairwise) + k.type = fill_sms4_pair(&k.key.wapi_pairwise_key, key, sta->addr); + else + k.type = fill_sms4_group(&k.key.wapi_group_key, key); + } else if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { + k.type = fill_aes_cmac_group(&k.key.igtk_group_key, key, &seq); + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; + } else { + dev_warn(wdev->dev, "unsupported key type %d\n", key->cipher); + wfx_free_key(wdev, idx); + return -EOPNOTSUPP; + } + ret = wfx_hif_add_key(wdev, &k); + if (ret) { + wfx_free_key(wdev, idx); + return -EOPNOTSUPP; + } + key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE | IEEE80211_KEY_FLAG_RESERVE_TAILROOM; + key->hw_key_idx = idx; + return 0; +} + +static int wfx_remove_key(struct wfx_vif *wvif, struct ieee80211_key_conf *key) +{ + WARN(key->hw_key_idx >= MAX_KEY_ENTRIES, "corrupted hw_key_idx"); + wfx_free_key(wvif->wdev, key->hw_key_idx); + return wfx_hif_remove_key(wvif->wdev, key->hw_key_idx); +} + +int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct ieee80211_key_conf *key) +{ + int ret = -EOPNOTSUPP; + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + mutex_lock(&wvif->wdev->conf_mutex); + if (cmd == SET_KEY) + ret = wfx_add_key(wvif, sta, key); + if (cmd == DISABLE_KEY) + ret = wfx_remove_key(wvif, key); + mutex_unlock(&wvif->wdev->conf_mutex); + return ret; +} diff --git a/drivers/net/wireless/silabs/wfx/key.h b/drivers/net/wireless/silabs/wfx/key.h new file mode 100644 index 0000000000..2234e36dbb --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/key.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Key management related functions. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_KEY_H +#define WFX_KEY_H + +#include + +struct wfx_dev; +struct wfx_vif; + +int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct ieee80211_key_conf *key); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c new file mode 100644 index 0000000000..e015bfb8d2 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/main.c @@ -0,0 +1,497 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Device probe and register. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + * Copyright (c) 2008, Johannes Berg + * Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies). + * Copyright (c) 2007-2009, Christian Lamparter + * Copyright (c) 2006, Michael Wu + * Copyright (c) 2004-2006 Jean-Baptiste Note , et al. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" +#include "wfx.h" +#include "fwio.h" +#include "hwio.h" +#include "bus.h" +#include "bh.h" +#include "sta.h" +#include "key.h" +#include "scan.h" +#include "debug.h" +#include "data_tx.h" +#include "hif_tx_mib.h" +#include "hif_api_cmd.h" + +#define WFX_PDS_TLV_TYPE 0x4450 // "PD" (Platform Data) in ascii little-endian +#define WFX_PDS_MAX_CHUNK_SIZE 1500 + +MODULE_DESCRIPTION("Silicon Labs 802.11 Wireless LAN driver for WF200"); +MODULE_AUTHOR("Jérôme Pouiller "); +MODULE_LICENSE("GPL"); + +#define RATETAB_ENT(_rate, _rateid, _flags) { \ + .bitrate = (_rate), \ + .hw_value = (_rateid), \ + .flags = (_flags), \ +} + +static struct ieee80211_rate wfx_rates[] = { + RATETAB_ENT(10, 0, 0), + RATETAB_ENT(20, 1, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(55, 2, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(110, 3, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(60, 6, 0), + RATETAB_ENT(90, 7, 0), + RATETAB_ENT(120, 8, 0), + RATETAB_ENT(180, 9, 0), + RATETAB_ENT(240, 10, 0), + RATETAB_ENT(360, 11, 0), + RATETAB_ENT(480, 12, 0), + RATETAB_ENT(540, 13, 0), +}; + +#define CHAN2G(_channel, _freq, _flags) { \ + .band = NL80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +static struct ieee80211_channel wfx_2ghz_chantable[] = { + CHAN2G(1, 2412, 0), + CHAN2G(2, 2417, 0), + CHAN2G(3, 2422, 0), + CHAN2G(4, 2427, 0), + CHAN2G(5, 2432, 0), + CHAN2G(6, 2437, 0), + CHAN2G(7, 2442, 0), + CHAN2G(8, 2447, 0), + CHAN2G(9, 2452, 0), + CHAN2G(10, 2457, 0), + CHAN2G(11, 2462, 0), + CHAN2G(12, 2467, 0), + CHAN2G(13, 2472, 0), + CHAN2G(14, 2484, 0), +}; + +static const struct ieee80211_supported_band wfx_band_2ghz = { + .channels = wfx_2ghz_chantable, + .n_channels = ARRAY_SIZE(wfx_2ghz_chantable), + .bitrates = wfx_rates, + .n_bitrates = ARRAY_SIZE(wfx_rates), + .ht_cap = { + /* Receive caps */ + .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_MAX_AMSDU | (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT), + .ht_supported = 1, + .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE, + .mcs = { + .rx_mask = { 0xFF }, /* MCS0 to MCS7 */ + .rx_highest = cpu_to_le16(72), + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, + }, + }, +}; + +static const struct ieee80211_iface_limit wdev_iface_limits[] = { + { .max = 1, .types = BIT(NL80211_IFTYPE_STATION) }, + { .max = 1, .types = BIT(NL80211_IFTYPE_AP) }, +}; + +static const struct ieee80211_iface_combination wfx_iface_combinations[] = { + { + .num_different_channels = 2, + .max_interfaces = 2, + .limits = wdev_iface_limits, + .n_limits = ARRAY_SIZE(wdev_iface_limits), + } +}; + +static const struct ieee80211_ops wfx_ops = { + .start = wfx_start, + .stop = wfx_stop, + .add_interface = wfx_add_interface, + .remove_interface = wfx_remove_interface, + .config = wfx_config, + .tx = wfx_tx, + .join_ibss = wfx_join_ibss, + .leave_ibss = wfx_leave_ibss, + .conf_tx = wfx_conf_tx, + .hw_scan = wfx_hw_scan, + .cancel_hw_scan = wfx_cancel_hw_scan, + .start_ap = wfx_start_ap, + .stop_ap = wfx_stop_ap, + .sta_add = wfx_sta_add, + .sta_remove = wfx_sta_remove, + .set_tim = wfx_set_tim, + .set_key = wfx_set_key, + .set_rts_threshold = wfx_set_rts_threshold, + .set_default_unicast_key = wfx_set_default_unicast_key, + .bss_info_changed = wfx_bss_info_changed, + .configure_filter = wfx_configure_filter, + .ampdu_action = wfx_ampdu_action, + .flush = wfx_flush, + .add_chanctx = wfx_add_chanctx, + .remove_chanctx = wfx_remove_chanctx, + .change_chanctx = wfx_change_chanctx, + .assign_vif_chanctx = wfx_assign_vif_chanctx, + .unassign_vif_chanctx = wfx_unassign_vif_chanctx, +}; + +bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor) +{ + if (wdev->hw_caps.api_version_major < major) + return true; + if (wdev->hw_caps.api_version_major > major) + return false; + if (wdev->hw_caps.api_version_minor < minor) + return true; + return false; +} + +/* The device needs data about the antenna configuration. This information in provided by PDS + * (Platform Data Set, this is the wording used in WF200 documentation) files. For hardware + * integrators, the full process to create PDS files is described here: + * https://github.com/SiliconLabs/wfx-firmware/blob/master/PDS/README.md + * + * The PDS file is an array of Time-Length-Value structs. + */ +int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len) +{ + int ret, chunk_type, chunk_len, chunk_num = 0; + + if (*buf == '{') { + dev_err(wdev->dev, "PDS: malformed file (legacy format?)\n"); + return -EINVAL; + } + while (len > 0) { + chunk_type = get_unaligned_le16(buf + 0); + chunk_len = get_unaligned_le16(buf + 2); + if (chunk_len > len) { + dev_err(wdev->dev, "PDS:%d: corrupted file\n", chunk_num); + return -EINVAL; + } + if (chunk_type != WFX_PDS_TLV_TYPE) { + dev_info(wdev->dev, "PDS:%d: skip unknown data\n", chunk_num); + goto next; + } + if (chunk_len > WFX_PDS_MAX_CHUNK_SIZE) + dev_warn(wdev->dev, "PDS:%d: unexpectedly large chunk\n", chunk_num); + if (buf[4] != '{' || buf[chunk_len - 1] != '}') + dev_warn(wdev->dev, "PDS:%d: unexpected content\n", chunk_num); + + ret = wfx_hif_configuration(wdev, buf + 4, chunk_len - 4); + if (ret > 0) { + dev_err(wdev->dev, "PDS:%d: invalid data (unsupported options?)\n", chunk_num); + return -EINVAL; + } + if (ret == -ETIMEDOUT) { + dev_err(wdev->dev, "PDS:%d: chip didn't reply (corrupted file?)\n", chunk_num); + return ret; + } + if (ret) { + dev_err(wdev->dev, "PDS:%d: chip returned an unknown error\n", chunk_num); + return -EIO; + } +next: + chunk_num++; + len -= chunk_len; + buf += chunk_len; + } + return 0; +} + +static int wfx_send_pdata_pds(struct wfx_dev *wdev) +{ + int ret = 0; + const struct firmware *pds; + u8 *tmp_buf; + + ret = request_firmware(&pds, wdev->pdata.file_pds, wdev->dev); + if (ret) { + dev_err(wdev->dev, "can't load antenna parameters (PDS file %s). The device may be unstable.\n", + wdev->pdata.file_pds); + return ret; + } + tmp_buf = kmemdup(pds->data, pds->size, GFP_KERNEL); + if (!tmp_buf) { + ret = -ENOMEM; + goto release_fw; + } + ret = wfx_send_pds(wdev, tmp_buf, pds->size); + kfree(tmp_buf); +release_fw: + release_firmware(pds); + return ret; +} + +static void wfx_free_common(void *data) +{ + struct wfx_dev *wdev = data; + + mutex_destroy(&wdev->tx_power_loop_info_lock); + mutex_destroy(&wdev->rx_stats_lock); + mutex_destroy(&wdev->conf_mutex); + ieee80211_free_hw(wdev->hw); +} + +struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_data *pdata, + const struct wfx_hwbus_ops *hwbus_ops, void *hwbus_priv) +{ + struct ieee80211_hw *hw; + struct wfx_dev *wdev; + + hw = ieee80211_alloc_hw(sizeof(struct wfx_dev), &wfx_ops); + if (!hw) + return NULL; + + SET_IEEE80211_DEV(hw, dev); + + ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); + ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, CONNECTION_MONITOR); + ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); + ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, SUPPORTS_PS); + ieee80211_hw_set(hw, MFP_CAPABLE); + + hw->vif_data_size = sizeof(struct wfx_vif); + hw->sta_data_size = sizeof(struct wfx_sta_priv); + hw->queues = 4; + hw->max_rates = 8; + hw->max_rate_tries = 8; + hw->extra_tx_headroom = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) + + 4 /* alignment */ + 8 /* TKIP IV */; + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_AP); + hw->wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U; + hw->wiphy->features |= NL80211_FEATURE_AP_SCAN; + hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; + hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + hw->wiphy->max_ap_assoc_sta = HIF_LINK_ID_MAX; + hw->wiphy->max_scan_ssids = 2; + hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; + hw->wiphy->n_iface_combinations = ARRAY_SIZE(wfx_iface_combinations); + hw->wiphy->iface_combinations = wfx_iface_combinations; + hw->wiphy->bands[NL80211_BAND_2GHZ] = devm_kmalloc(dev, sizeof(wfx_band_2ghz), GFP_KERNEL); + if (!hw->wiphy->bands[NL80211_BAND_2GHZ]) + goto err; + + /* FIXME: also copy wfx_rates and wfx_2ghz_chantable */ + memcpy(hw->wiphy->bands[NL80211_BAND_2GHZ], &wfx_band_2ghz, sizeof(wfx_band_2ghz)); + + wdev = hw->priv; + wdev->hw = hw; + wdev->dev = dev; + wdev->hwbus_ops = hwbus_ops; + wdev->hwbus_priv = hwbus_priv; + memcpy(&wdev->pdata, pdata, sizeof(*pdata)); + of_property_read_string(dev->of_node, "silabs,antenna-config-file", &wdev->pdata.file_pds); + wdev->pdata.gpio_wakeup = devm_gpiod_get_optional(dev, "wakeup", GPIOD_OUT_LOW); + if (IS_ERR(wdev->pdata.gpio_wakeup)) + goto err; + + if (wdev->pdata.gpio_wakeup) + gpiod_set_consumer_name(wdev->pdata.gpio_wakeup, "wfx wakeup"); + + mutex_init(&wdev->conf_mutex); + mutex_init(&wdev->rx_stats_lock); + mutex_init(&wdev->tx_power_loop_info_lock); + init_completion(&wdev->firmware_ready); + INIT_DELAYED_WORK(&wdev->cooling_timeout_work, wfx_cooling_timeout_work); + skb_queue_head_init(&wdev->tx_pending); + init_waitqueue_head(&wdev->tx_dequeue); + wfx_init_hif_cmd(&wdev->hif_cmd); + + if (devm_add_action_or_reset(dev, wfx_free_common, wdev)) + return NULL; + + return wdev; + +err: + ieee80211_free_hw(hw); + return NULL; +} + +int wfx_probe(struct wfx_dev *wdev) +{ + int i; + int err; + struct gpio_desc *gpio_saved; + + /* During first part of boot, gpio_wakeup cannot yet been used. So prevent bh() to touch + * it. + */ + gpio_saved = wdev->pdata.gpio_wakeup; + wdev->pdata.gpio_wakeup = NULL; + wdev->poll_irq = true; + + wdev->bh_wq = alloc_workqueue("wfx_bh_wq", WQ_HIGHPRI, 0); + if (!wdev->bh_wq) + return -ENOMEM; + + wfx_bh_register(wdev); + + err = wfx_init_device(wdev); + if (err) + goto bh_unregister; + + wfx_bh_poll_irq(wdev); + err = wait_for_completion_timeout(&wdev->firmware_ready, 1 * HZ); + if (err <= 0) { + if (err == 0) { + dev_err(wdev->dev, "timeout while waiting for startup indication\n"); + err = -ETIMEDOUT; + } else if (err == -ERESTARTSYS) { + dev_info(wdev->dev, "probe interrupted by user\n"); + } + goto bh_unregister; + } + + /* FIXME: fill wiphy::hw_version */ + dev_info(wdev->dev, "started firmware %d.%d.%d \"%s\" (API: %d.%d, keyset: %02X, caps: 0x%.8X)\n", + wdev->hw_caps.firmware_major, wdev->hw_caps.firmware_minor, + wdev->hw_caps.firmware_build, wdev->hw_caps.firmware_label, + wdev->hw_caps.api_version_major, wdev->hw_caps.api_version_minor, + wdev->keyset, wdev->hw_caps.link_mode); + snprintf(wdev->hw->wiphy->fw_version, + sizeof(wdev->hw->wiphy->fw_version), + "%d.%d.%d", + wdev->hw_caps.firmware_major, + wdev->hw_caps.firmware_minor, + wdev->hw_caps.firmware_build); + + if (wfx_api_older_than(wdev, 1, 0)) { + dev_err(wdev->dev, "unsupported firmware API version (expect 1 while firmware returns %d)\n", + wdev->hw_caps.api_version_major); + err = -EOPNOTSUPP; + goto bh_unregister; + } + + if (wdev->hw_caps.link_mode == SEC_LINK_ENFORCED) { + dev_err(wdev->dev, "chip require secure_link, but can't negotiate it\n"); + goto bh_unregister; + } + + if (wdev->hw_caps.region_sel_mode) { + wdev->hw->wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS; + wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[11].flags |= + IEEE80211_CHAN_NO_IR; + wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[12].flags |= + IEEE80211_CHAN_NO_IR; + wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[13].flags |= + IEEE80211_CHAN_DISABLED; + } + + dev_dbg(wdev->dev, "sending configuration file %s\n", wdev->pdata.file_pds); + err = wfx_send_pdata_pds(wdev); + if (err < 0 && err != -ENOENT) + goto bh_unregister; + + wdev->poll_irq = false; + err = wdev->hwbus_ops->irq_subscribe(wdev->hwbus_priv); + if (err) + goto bh_unregister; + + err = wfx_hif_use_multi_tx_conf(wdev, true); + if (err) + dev_err(wdev->dev, "misconfigured IRQ?\n"); + + wdev->pdata.gpio_wakeup = gpio_saved; + if (wdev->pdata.gpio_wakeup) { + dev_dbg(wdev->dev, "enable 'quiescent' power mode with wakeup GPIO and PDS file %s\n", + wdev->pdata.file_pds); + gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1); + wfx_control_reg_write(wdev, 0); + wfx_hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_QUIESCENT); + } else { + wfx_hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_DOZE); + } + + for (i = 0; i < ARRAY_SIZE(wdev->addresses); i++) { + eth_zero_addr(wdev->addresses[i].addr); + err = of_get_mac_address(wdev->dev->of_node, wdev->addresses[i].addr); + if (!err) + wdev->addresses[i].addr[ETH_ALEN - 1] += i; + else + ether_addr_copy(wdev->addresses[i].addr, wdev->hw_caps.mac_addr[i]); + if (!is_valid_ether_addr(wdev->addresses[i].addr)) { + dev_warn(wdev->dev, "using random MAC address\n"); + eth_random_addr(wdev->addresses[i].addr); + } + dev_info(wdev->dev, "MAC address %d: %pM\n", i, wdev->addresses[i].addr); + } + wdev->hw->wiphy->n_addresses = ARRAY_SIZE(wdev->addresses); + wdev->hw->wiphy->addresses = wdev->addresses; + + if (!wfx_api_older_than(wdev, 3, 8)) + wdev->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + + err = ieee80211_register_hw(wdev->hw); + if (err) + goto irq_unsubscribe; + + err = wfx_debug_init(wdev); + if (err) + goto ieee80211_unregister; + + return 0; + +ieee80211_unregister: + ieee80211_unregister_hw(wdev->hw); +irq_unsubscribe: + wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv); +bh_unregister: + wfx_bh_unregister(wdev); + destroy_workqueue(wdev->bh_wq); + return err; +} + +void wfx_release(struct wfx_dev *wdev) +{ + ieee80211_unregister_hw(wdev->hw); + wfx_hif_shutdown(wdev); + wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv); + wfx_bh_unregister(wdev); + destroy_workqueue(wdev->bh_wq); +} + +static int __init wfx_core_init(void) +{ + int ret = 0; + + if (IS_ENABLED(CONFIG_SPI)) + ret = spi_register_driver(&wfx_spi_driver); + if (IS_ENABLED(CONFIG_MMC) && !ret) + ret = sdio_register_driver(&wfx_sdio_driver); + return ret; +} +module_init(wfx_core_init); + +static void __exit wfx_core_exit(void) +{ + if (IS_ENABLED(CONFIG_MMC)) + sdio_unregister_driver(&wfx_sdio_driver); + if (IS_ENABLED(CONFIG_SPI)) + spi_unregister_driver(&wfx_spi_driver); +} +module_exit(wfx_core_exit); diff --git a/drivers/net/wireless/silabs/wfx/main.h b/drivers/net/wireless/silabs/wfx/main.h new file mode 100644 index 0000000000..68c6653071 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/main.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Device probe and register. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + * Copyright (c) 2006, Michael Wu + * Copyright 2004-2006 Jean-Baptiste Note , et al. + */ +#ifndef WFX_MAIN_H +#define WFX_MAIN_H + +#include +#include + +#include "hif_api_general.h" + +struct wfx_dev; +struct wfx_hwbus_ops; + +struct wfx_platform_data { + /* Keyset and ".sec" extension will be appended to this string */ + const char *file_fw; + const char *file_pds; + struct gpio_desc *gpio_wakeup; + /* if true HIF D_out is sampled on the rising edge of the clock (intended to be used in + * 50Mhz SDIO) + */ + bool use_rising_clk; +}; + +struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_data *pdata, + const struct wfx_hwbus_ops *hwbus_ops, void *hwbus_priv); + +int wfx_probe(struct wfx_dev *wdev); +void wfx_release(struct wfx_dev *wdev); + +bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor); +int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/queue.c b/drivers/net/wireless/silabs/wfx/queue.c new file mode 100644 index 0000000000..37f492e5d3 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/queue.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Queue between the tx operation and the bh workqueue. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include + +#include "queue.h" +#include "wfx.h" +#include "sta.h" +#include "data_tx.h" +#include "traces.h" + +void wfx_tx_lock(struct wfx_dev *wdev) +{ + atomic_inc(&wdev->tx_lock); +} + +void wfx_tx_unlock(struct wfx_dev *wdev) +{ + int tx_lock = atomic_dec_return(&wdev->tx_lock); + + WARN(tx_lock < 0, "inconsistent tx_lock value"); + if (!tx_lock) + wfx_bh_request_tx(wdev); +} + +void wfx_tx_flush(struct wfx_dev *wdev) +{ + int ret; + + /* Do not wait for any reply if chip is frozen */ + if (wdev->chip_frozen) + return; + + wfx_tx_lock(wdev); + mutex_lock(&wdev->hif_cmd.lock); + ret = wait_event_timeout(wdev->hif.tx_buffers_empty, !wdev->hif.tx_buffers_used, + msecs_to_jiffies(3000)); + if (!ret) { + dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n", + wdev->hif.tx_buffers_used); + wfx_pending_dump_old_frames(wdev, 3000); + /* FIXME: drop pending frames here */ + wdev->chip_frozen = true; + } + mutex_unlock(&wdev->hif_cmd.lock); + wfx_tx_unlock(wdev); +} + +void wfx_tx_lock_flush(struct wfx_dev *wdev) +{ + wfx_tx_lock(wdev); + wfx_tx_flush(wdev); +} + +void wfx_tx_queues_init(struct wfx_vif *wvif) +{ + /* The device is in charge to respect the details of the QoS parameters. The driver just + * ensure that it roughtly respect the priorities to avoid any shortage. + */ + const int priorities[IEEE80211_NUM_ACS] = { 1, 2, 64, 128 }; + int i; + + for (i = 0; i < IEEE80211_NUM_ACS; ++i) { + skb_queue_head_init(&wvif->tx_queue[i].normal); + skb_queue_head_init(&wvif->tx_queue[i].cab); + wvif->tx_queue[i].priority = priorities[i]; + } +} + +bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue) +{ + return skb_queue_empty_lockless(&queue->normal) && skb_queue_empty_lockless(&queue->cab); +} + +void wfx_tx_queues_check_empty(struct wfx_vif *wvif) +{ + int i; + + for (i = 0; i < IEEE80211_NUM_ACS; ++i) { + WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames)); + WARN_ON(!wfx_tx_queue_empty(wvif, &wvif->tx_queue[i])); + } +} + +static void __wfx_tx_queue_drop(struct wfx_vif *wvif, + struct sk_buff_head *skb_queue, struct sk_buff_head *dropped) +{ + struct sk_buff *skb, *tmp; + + spin_lock_bh(&skb_queue->lock); + skb_queue_walk_safe(skb_queue, skb, tmp) { + __skb_unlink(skb, skb_queue); + skb_queue_head(dropped, skb); + } + spin_unlock_bh(&skb_queue->lock); +} + +void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue, + struct sk_buff_head *dropped) +{ + __wfx_tx_queue_drop(wvif, &queue->cab, dropped); + __wfx_tx_queue_drop(wvif, &queue->normal, dropped); + wake_up(&wvif->wdev->tx_dequeue); +} + +void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb) +{ + struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + + if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) + skb_queue_tail(&queue->cab, skb); + else + skb_queue_tail(&queue->normal, skb); +} + +void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped) +{ + struct wfx_queue *queue; + struct wfx_vif *wvif; + struct wfx_hif_msg *hif; + struct sk_buff *skb; + + WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device", __func__); + while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) { + hif = (struct wfx_hif_msg *)skb->data; + wvif = wdev_to_wvif(wdev, hif->interface); + if (wvif) { + queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; + WARN_ON(skb_get_queue_mapping(skb) > 3); + WARN_ON(!atomic_read(&queue->pending_frames)); + atomic_dec(&queue->pending_frames); + } + skb_queue_head(dropped, skb); + } +} + +struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id) +{ + struct wfx_queue *queue; + struct wfx_hif_req_tx *req; + struct wfx_vif *wvif; + struct wfx_hif_msg *hif; + struct sk_buff *skb; + + spin_lock_bh(&wdev->tx_pending.lock); + skb_queue_walk(&wdev->tx_pending, skb) { + hif = (struct wfx_hif_msg *)skb->data; + req = (struct wfx_hif_req_tx *)hif->body; + if (req->packet_id != packet_id) + continue; + spin_unlock_bh(&wdev->tx_pending.lock); + wvif = wdev_to_wvif(wdev, hif->interface); + if (wvif) { + queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; + WARN_ON(skb_get_queue_mapping(skb) > 3); + WARN_ON(!atomic_read(&queue->pending_frames)); + atomic_dec(&queue->pending_frames); + } + skb_unlink(skb, &wdev->tx_pending); + return skb; + } + spin_unlock_bh(&wdev->tx_pending.lock); + WARN(1, "cannot find packet in pending queue"); + return NULL; +} + +void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms) +{ + ktime_t now = ktime_get(); + struct wfx_tx_priv *tx_priv; + struct wfx_hif_req_tx *req; + struct sk_buff *skb; + bool first = true; + + spin_lock_bh(&wdev->tx_pending.lock); + skb_queue_walk(&wdev->tx_pending, skb) { + tx_priv = wfx_skb_tx_priv(skb); + req = wfx_skb_txreq(skb); + if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp, limit_ms))) { + if (first) { + dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n", + limit_ms); + first = false; + } + dev_info(wdev->dev, " id %08x sent %lldms ago\n", + req->packet_id, ktime_ms_delta(now, tx_priv->xmit_timestamp)); + } + } + spin_unlock_bh(&wdev->tx_pending.lock); +} + +unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev, struct sk_buff *skb) +{ + ktime_t now = ktime_get(); + struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb); + + return ktime_us_delta(now, tx_priv->xmit_timestamp); +} + +bool wfx_tx_queues_has_cab(struct wfx_vif *wvif) +{ + struct ieee80211_vif *vif = wvif_to_vif(wvif); + int i; + + if (vif->type != NL80211_IFTYPE_AP) + return false; + for (i = 0; i < IEEE80211_NUM_ACS; ++i) + /* Note: since only AP can have mcast frames in queue and only one vif can be AP, + * all queued frames has same interface id + */ + if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab)) + return true; + return false; +} + +static int wfx_tx_queue_get_weight(struct wfx_queue *queue) +{ + return atomic_read(&queue->pending_frames) * queue->priority; +} + +static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev) +{ + struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)]; + int i, j, num_queues = 0; + struct wfx_vif *wvif; + struct wfx_hif_msg *hif; + struct sk_buff *skb; + + /* sort the queues */ + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + WARN_ON(num_queues >= ARRAY_SIZE(queues)); + queues[num_queues] = &wvif->tx_queue[i]; + for (j = num_queues; j > 0; j--) + if (wfx_tx_queue_get_weight(queues[j]) < + wfx_tx_queue_get_weight(queues[j - 1])) + swap(queues[j - 1], queues[j]); + num_queues++; + } + } + + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { + if (!wvif->after_dtim_tx_allowed) + continue; + for (i = 0; i < num_queues; i++) { + skb = skb_dequeue(&queues[i]->cab); + if (!skb) + continue; + /* Note: since only AP can have mcast frames in queue and only one vif can + * be AP, all queued frames has same interface id + */ + hif = (struct wfx_hif_msg *)skb->data; + WARN_ON(hif->interface != wvif->id); + WARN_ON(queues[i] != &wvif->tx_queue[skb_get_queue_mapping(skb)]); + atomic_inc(&queues[i]->pending_frames); + trace_queues_stats(wdev, queues[i]); + return skb; + } + /* No more multicast to sent */ + wvif->after_dtim_tx_allowed = false; + schedule_work(&wvif->update_tim_work); + } + + for (i = 0; i < num_queues; i++) { + skb = skb_dequeue(&queues[i]->normal); + if (skb) { + atomic_inc(&queues[i]->pending_frames); + trace_queues_stats(wdev, queues[i]); + return skb; + } + } + return NULL; +} + +struct wfx_hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev) +{ + struct wfx_tx_priv *tx_priv; + struct sk_buff *skb; + + if (atomic_read(&wdev->tx_lock)) + return NULL; + skb = wfx_tx_queues_get_skb(wdev); + if (!skb) + return NULL; + skb_queue_tail(&wdev->tx_pending, skb); + wake_up(&wdev->tx_dequeue); + tx_priv = wfx_skb_tx_priv(skb); + tx_priv->xmit_timestamp = ktime_get(); + return (struct wfx_hif_msg *)skb->data; +} diff --git a/drivers/net/wireless/silabs/wfx/queue.h b/drivers/net/wireless/silabs/wfx/queue.h new file mode 100644 index 0000000000..4731debca9 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/queue.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Queue between the tx operation and the bh workqueue. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_QUEUE_H +#define WFX_QUEUE_H + +#include +#include + +struct wfx_dev; +struct wfx_vif; + +struct wfx_queue { + struct sk_buff_head normal; + struct sk_buff_head cab; /* Content After (DTIM) Beacon */ + atomic_t pending_frames; + int priority; +}; + +void wfx_tx_lock(struct wfx_dev *wdev); +void wfx_tx_unlock(struct wfx_dev *wdev); +void wfx_tx_flush(struct wfx_dev *wdev); +void wfx_tx_lock_flush(struct wfx_dev *wdev); + +void wfx_tx_queues_init(struct wfx_vif *wvif); +void wfx_tx_queues_check_empty(struct wfx_vif *wvif); +bool wfx_tx_queues_has_cab(struct wfx_vif *wvif); +void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb); +struct wfx_hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev); + +bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue); +void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue, + struct sk_buff_head *dropped); + +struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id); +void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped); +unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev, struct sk_buff *skb); +void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/scan.c b/drivers/net/wireless/silabs/wfx/scan.c new file mode 100644 index 0000000000..16f619ed22 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/scan.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Scan related functions. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include + +#include "scan.h" +#include "wfx.h" +#include "sta.h" +#include "hif_tx_mib.h" + +static void wfx_ieee80211_scan_completed_compat(struct ieee80211_hw *hw, bool aborted) +{ + struct cfg80211_scan_info info = { + .aborted = aborted, + }; + + ieee80211_scan_completed(hw, &info); +} + +static int update_probe_tmpl(struct wfx_vif *wvif, struct cfg80211_scan_request *req) +{ + struct ieee80211_vif *vif = wvif_to_vif(wvif); + struct sk_buff *skb; + + skb = ieee80211_probereq_get(wvif->wdev->hw, vif->addr, NULL, 0, + req->ie_len); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, req->ie, req->ie_len); + wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBREQ, 0); + dev_kfree_skb(skb); + return 0; +} + +static int send_scan_req(struct wfx_vif *wvif, struct cfg80211_scan_request *req, int start_idx) +{ + struct ieee80211_vif *vif = wvif_to_vif(wvif); + struct ieee80211_channel *ch_start, *ch_cur; + int i, ret; + + for (i = start_idx; i < req->n_channels; i++) { + ch_start = req->channels[start_idx]; + ch_cur = req->channels[i]; + WARN(ch_cur->band != NL80211_BAND_2GHZ, "band not supported"); + if (ch_cur->max_power != ch_start->max_power) + break; + if ((ch_cur->flags ^ ch_start->flags) & IEEE80211_CHAN_NO_IR) + break; + } + wfx_tx_lock_flush(wvif->wdev); + wvif->scan_abort = false; + reinit_completion(&wvif->scan_complete); + ret = wfx_hif_scan(wvif, req, start_idx, i - start_idx); + if (ret) { + wfx_tx_unlock(wvif->wdev); + return -EIO; + } + ret = wait_for_completion_timeout(&wvif->scan_complete, 1 * HZ); + if (!ret) { + wfx_hif_stop_scan(wvif); + ret = wait_for_completion_timeout(&wvif->scan_complete, 1 * HZ); + dev_dbg(wvif->wdev->dev, "scan timeout (%d channels done)\n", + wvif->scan_nb_chan_done); + } + if (!ret) { + dev_err(wvif->wdev->dev, "scan didn't stop\n"); + ret = -ETIMEDOUT; + } else if (wvif->scan_abort) { + dev_notice(wvif->wdev->dev, "scan abort\n"); + ret = -ECONNABORTED; + } else if (wvif->scan_nb_chan_done > i - start_idx) { + ret = -EIO; + } else { + ret = wvif->scan_nb_chan_done; + } + if (req->channels[start_idx]->max_power != vif->bss_conf.txpower) + wfx_hif_set_output_power(wvif, vif->bss_conf.txpower); + wfx_tx_unlock(wvif->wdev); + return ret; +} + +/* It is not really necessary to run scan request asynchronously. However, + * there is a bug in "iw scan" when ieee80211_scan_completed() is called before + * wfx_hw_scan() return + */ +void wfx_hw_scan_work(struct work_struct *work) +{ + struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan_work); + struct ieee80211_scan_request *hw_req = wvif->scan_req; + int chan_cur, ret, err; + + mutex_lock(&wvif->wdev->conf_mutex); + mutex_lock(&wvif->scan_lock); + if (wvif->join_in_progress) { + dev_info(wvif->wdev->dev, "abort in-progress REQ_JOIN"); + wfx_reset(wvif); + } + update_probe_tmpl(wvif, &hw_req->req); + chan_cur = 0; + err = 0; + do { + ret = send_scan_req(wvif, &hw_req->req, chan_cur); + if (ret > 0) { + chan_cur += ret; + err = 0; + } + if (!ret) + err++; + if (err > 2) { + dev_err(wvif->wdev->dev, "scan has not been able to start\n"); + ret = -ETIMEDOUT; + } + } while (ret >= 0 && chan_cur < hw_req->req.n_channels); + mutex_unlock(&wvif->scan_lock); + mutex_unlock(&wvif->wdev->conf_mutex); + wfx_ieee80211_scan_completed_compat(wvif->wdev->hw, ret < 0); +} + +int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + WARN_ON(hw_req->req.n_channels > HIF_API_MAX_NB_CHANNELS); + wvif->scan_req = hw_req; + schedule_work(&wvif->scan_work); + return 0; +} + +void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + wvif->scan_abort = true; + wfx_hif_stop_scan(wvif); +} + +void wfx_scan_complete(struct wfx_vif *wvif, int nb_chan_done) +{ + wvif->scan_nb_chan_done = nb_chan_done; + complete(&wvif->scan_complete); +} diff --git a/drivers/net/wireless/silabs/wfx/scan.h b/drivers/net/wireless/silabs/wfx/scan.h new file mode 100644 index 0000000000..78e3b984f3 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/scan.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Scan related functions. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_SCAN_H +#define WFX_SCAN_H + +#include + +struct wfx_dev; +struct wfx_vif; + +void wfx_hw_scan_work(struct work_struct *work); +int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *req); +void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +void wfx_scan_complete(struct wfx_vif *wvif, int nb_chan_done); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c new file mode 100644 index 0000000000..626dfb4b7a --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/sta.c @@ -0,0 +1,817 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Implementation of mac80211 API. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include + +#include "sta.h" +#include "wfx.h" +#include "fwio.h" +#include "bh.h" +#include "key.h" +#include "scan.h" +#include "debug.h" +#include "hif_tx.h" +#include "hif_tx_mib.h" + +#define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2 + +u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates) +{ + int i; + u32 ret = 0; + /* The device only supports 2GHz */ + struct ieee80211_supported_band *sband = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]; + + for (i = 0; i < sband->n_bitrates; i++) { + if (rates & BIT(i)) { + if (i >= sband->n_bitrates) + dev_warn(wdev->dev, "unsupported basic rate\n"); + else + ret |= BIT(sband->bitrates[i].hw_value); + } + } + return ret; +} + +void wfx_cooling_timeout_work(struct work_struct *work) +{ + struct wfx_dev *wdev = container_of(to_delayed_work(work), struct wfx_dev, + cooling_timeout_work); + + wdev->chip_frozen = true; + wfx_tx_unlock(wdev); +} + +void wfx_suspend_hot_dev(struct wfx_dev *wdev, enum sta_notify_cmd cmd) +{ + if (cmd == STA_NOTIFY_AWAKE) { + /* Device recover normal temperature */ + if (cancel_delayed_work(&wdev->cooling_timeout_work)) + wfx_tx_unlock(wdev); + } else { + /* Device is too hot */ + schedule_delayed_work(&wdev->cooling_timeout_work, 10 * HZ); + wfx_tx_lock(wdev); + } +} + +static void wfx_filter_beacon(struct wfx_vif *wvif, bool filter_beacon) +{ + static const struct wfx_hif_ie_table_entry filter_ies[] = { + { + .ie_id = WLAN_EID_VENDOR_SPECIFIC, + .has_changed = 1, + .no_longer = 1, + .has_appeared = 1, + .oui = { 0x50, 0x6F, 0x9A }, + }, { + .ie_id = WLAN_EID_HT_OPERATION, + .has_changed = 1, + .no_longer = 1, + .has_appeared = 1, + }, { + .ie_id = WLAN_EID_ERP_INFO, + .has_changed = 1, + .no_longer = 1, + .has_appeared = 1, + }, { + .ie_id = WLAN_EID_CHANNEL_SWITCH, + .has_changed = 1, + .no_longer = 1, + .has_appeared = 1, + } + }; + + if (!filter_beacon) { + wfx_hif_beacon_filter_control(wvif, 0, 1); + } else { + wfx_hif_set_beacon_filter_table(wvif, ARRAY_SIZE(filter_ies), filter_ies); + wfx_hif_beacon_filter_control(wvif, HIF_BEACON_FILTER_ENABLE, 0); + } +} + +void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, + unsigned int *total_flags, u64 unused) +{ + bool filter_bssid, filter_prbreq, filter_beacon; + struct ieee80211_vif *vif = NULL; + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif = NULL; + + /* Notes: + * - Probe responses (FIF_BCN_PRBRESP_PROMISC) are never filtered + * - PS-Poll (FIF_PSPOLL) are never filtered + * - RTS, CTS and Ack (FIF_CONTROL) are always filtered + * - Broken frames (FIF_FCSFAIL and FIF_PLCPFAIL) are always filtered + * - Firmware does (yet) allow to forward unicast traffic sent to other stations (aka. + * promiscuous mode) + */ + *total_flags &= FIF_BCN_PRBRESP_PROMISC | FIF_ALLMULTI | FIF_OTHER_BSS | + FIF_PROBE_REQ | FIF_PSPOLL; + + mutex_lock(&wdev->conf_mutex); + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { + mutex_lock(&wvif->scan_lock); + + /* Note: FIF_BCN_PRBRESP_PROMISC covers probe response and + * beacons from other BSS + */ + if (*total_flags & FIF_BCN_PRBRESP_PROMISC) + filter_beacon = false; + else + filter_beacon = true; + wfx_filter_beacon(wvif, filter_beacon); + + if (*total_flags & FIF_OTHER_BSS) + filter_bssid = false; + else + filter_bssid = true; + + vif = wvif_to_vif(wvif); + /* In AP mode, chip can reply to probe request itself */ + if (*total_flags & FIF_PROBE_REQ && vif->type == NL80211_IFTYPE_AP) { + dev_dbg(wdev->dev, "do not forward probe request in AP mode\n"); + *total_flags &= ~FIF_PROBE_REQ; + } + + if (*total_flags & FIF_PROBE_REQ) + filter_prbreq = false; + else + filter_prbreq = true; + wfx_hif_set_rx_filter(wvif, filter_bssid, filter_prbreq); + + mutex_unlock(&wvif->scan_lock); + } + mutex_unlock(&wdev->conf_mutex); +} + +static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps) +{ + struct ieee80211_channel *chan0 = NULL, *chan1 = NULL; + struct ieee80211_conf *conf = &wvif->wdev->hw->conf; + struct ieee80211_vif *vif = wvif_to_vif(wvif); + + WARN(!vif->cfg.assoc && enable_ps, + "enable_ps is reliable only if associated"); + if (wdev_to_wvif(wvif->wdev, 0)) { + struct wfx_vif *wvif_ch0 = wdev_to_wvif(wvif->wdev, 0); + struct ieee80211_vif *vif_ch0 = wvif_to_vif(wvif_ch0); + + chan0 = vif_ch0->bss_conf.chandef.chan; + } + if (wdev_to_wvif(wvif->wdev, 1)) { + struct wfx_vif *wvif_ch1 = wdev_to_wvif(wvif->wdev, 1); + struct ieee80211_vif *vif_ch1 = wvif_to_vif(wvif_ch1); + + chan1 = vif_ch1->bss_conf.chandef.chan; + } + if (chan0 && chan1 && vif->type != NL80211_IFTYPE_AP) { + if (chan0->hw_value == chan1->hw_value) { + /* It is useless to enable PS if channels are the same. */ + if (enable_ps) + *enable_ps = false; + if (vif->cfg.assoc && vif->cfg.ps) + dev_info(wvif->wdev->dev, "ignoring requested PS mode"); + return -1; + } + /* It is necessary to enable PS if channels are different. */ + if (enable_ps) + *enable_ps = true; + if (wfx_api_older_than(wvif->wdev, 3, 2)) + return 0; + else + return 30; + } + if (enable_ps) + *enable_ps = vif->cfg.ps; + if (vif->cfg.assoc && vif->cfg.ps) + return conf->dynamic_ps_timeout; + else + return -1; +} + +int wfx_update_pm(struct wfx_vif *wvif) +{ + struct ieee80211_vif *vif = wvif_to_vif(wvif); + int ps_timeout; + bool ps; + + if (!vif->cfg.assoc) + return 0; + ps_timeout = wfx_get_ps_timeout(wvif, &ps); + if (!ps) + ps_timeout = 0; + WARN_ON(ps_timeout < 0); + if (wvif->uapsd_mask) + ps_timeout = 0; + + if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete, TU_TO_JIFFIES(512))) + dev_warn(wvif->wdev->dev, "timeout while waiting of set_pm_mode_complete\n"); + return wfx_hif_set_pm(wvif, ps, ps_timeout); +} + +int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + unsigned int link_id, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + int old_uapsd = wvif->uapsd_mask; + + WARN_ON(queue >= hw->queues); + + mutex_lock(&wdev->conf_mutex); + assign_bit(queue, &wvif->uapsd_mask, params->uapsd); + wfx_hif_set_edca_queue_params(wvif, queue, params); + if (vif->type == NL80211_IFTYPE_STATION && + old_uapsd != wvif->uapsd_mask) { + wfx_hif_set_uapsd_info(wvif, wvif->uapsd_mask); + wfx_update_pm(wvif); + } + mutex_unlock(&wdev->conf_mutex); + return 0; +} + +int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif = NULL; + + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) + wfx_hif_rts_threshold(wvif, value); + return 0; +} + +void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi) +{ + /* RSSI: signed Q8.0, RCPI: unsigned Q7.1 + * RSSI = RCPI / 2 - 110 + */ + struct ieee80211_vif *vif = wvif_to_vif(wvif); + int rcpi_rssi; + int cqm_evt; + + rcpi_rssi = raw_rcpi_rssi / 2 - 110; + if (rcpi_rssi <= vif->bss_conf.cqm_rssi_thold) + cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; + else + cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; + ieee80211_cqm_rssi_notify(vif, cqm_evt, rcpi_rssi, GFP_KERNEL); +} + +static void wfx_beacon_loss_work(struct work_struct *work) +{ + struct wfx_vif *wvif = container_of(to_delayed_work(work), struct wfx_vif, + beacon_loss_work); + struct ieee80211_vif *vif = wvif_to_vif(wvif); + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + + ieee80211_beacon_loss(vif); + schedule_delayed_work(to_delayed_work(work), msecs_to_jiffies(bss_conf->beacon_int)); +} + +void wfx_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + wfx_hif_wep_default_key_id(wvif, idx); +} + +void wfx_reset(struct wfx_vif *wvif) +{ + struct wfx_dev *wdev = wvif->wdev; + + wfx_tx_lock_flush(wdev); + wfx_hif_reset(wvif, false); + wfx_tx_policy_init(wvif); + if (wvif_count(wdev) <= 1) + wfx_hif_set_block_ack_policy(wvif, 0xFF, 0xFF); + wfx_tx_unlock(wdev); + wvif->join_in_progress = false; + cancel_delayed_work_sync(&wvif->beacon_loss_work); + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) + wfx_update_pm(wvif); +} + +int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *)&sta->drv_priv; + + sta_priv->vif_id = wvif->id; + + if (vif->type == NL80211_IFTYPE_STATION) + wfx_hif_set_mfp(wvif, sta->mfp, sta->mfp); + + /* In station mode, the firmware interprets new link-id as a TDLS peer */ + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + return 0; + sta_priv->link_id = ffz(wvif->link_id_map); + wvif->link_id_map |= BIT(sta_priv->link_id); + WARN_ON(!sta_priv->link_id); + WARN_ON(sta_priv->link_id >= HIF_LINK_ID_MAX); + wfx_hif_map_link(wvif, false, sta->addr, sta_priv->link_id, sta->mfp); + + return 0; +} + +int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *)&sta->drv_priv; + + /* See note in wfx_sta_add() */ + if (!sta_priv->link_id) + return 0; + /* FIXME add a mutex? */ + wfx_hif_map_link(wvif, true, sta->addr, sta_priv->link_id, false); + wvif->link_id_map &= ~BIT(sta_priv->link_id); + return 0; +} + +static int wfx_upload_ap_templates(struct wfx_vif *wvif) +{ + struct ieee80211_vif *vif = wvif_to_vif(wvif); + struct sk_buff *skb; + + skb = ieee80211_beacon_get(wvif->wdev->hw, vif, 0); + if (!skb) + return -ENOMEM; + wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_BCN, API_RATE_INDEX_B_1MBPS); + dev_kfree_skb(skb); + + skb = ieee80211_proberesp_get(wvif->wdev->hw, vif); + if (!skb) + return -ENOMEM; + wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBRES, API_RATE_INDEX_B_1MBPS); + dev_kfree_skb(skb); + return 0; +} + +static void wfx_set_mfp_ap(struct wfx_vif *wvif) +{ + struct ieee80211_vif *vif = wvif_to_vif(wvif); + struct sk_buff *skb = ieee80211_beacon_get(wvif->wdev->hw, vif, 0); + const int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable); + const u16 *ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset, + skb->len - ieoffset); + const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16); + const int pairwise_cipher_suite_size = 4 / sizeof(u16); + const int akm_suite_size = 4 / sizeof(u16); + + if (ptr) { + ptr += pairwise_cipher_suite_count_offset; + if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) + return; + ptr += 1 + pairwise_cipher_suite_size * *ptr; + if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) + return; + ptr += 1 + akm_suite_size * *ptr; + if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) + return; + wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6)); + } +} + +int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + struct wfx_dev *wdev = wvif->wdev; + int ret; + + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) + wfx_update_pm(wvif); + wvif = (struct wfx_vif *)vif->drv_priv; + wfx_upload_ap_templates(wvif); + ret = wfx_hif_start(wvif, &vif->bss_conf, wvif->channel); + if (ret > 0) + return -EIO; + wfx_set_mfp_ap(wvif); + return ret; +} + +void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + wfx_reset(wvif); +} + +static void wfx_join(struct wfx_vif *wvif) +{ + struct ieee80211_vif *vif = wvif_to_vif(wvif); + struct ieee80211_bss_conf *conf = &vif->bss_conf; + struct cfg80211_bss *bss = NULL; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + const u8 *ssid_ie = NULL; + int ssid_len = 0; + int ret; + + wfx_tx_lock_flush(wvif->wdev); + + bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel, conf->bssid, NULL, 0, + IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); + if (!bss && !vif->cfg.ibss_joined) { + wfx_tx_unlock(wvif->wdev); + return; + } + + rcu_read_lock(); /* protect ssid_ie */ + if (bss) + ssid_ie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID); + if (ssid_ie) { + ssid_len = ssid_ie[1]; + if (ssid_len > IEEE80211_MAX_SSID_LEN) + ssid_len = IEEE80211_MAX_SSID_LEN; + memcpy(ssid, &ssid_ie[2], ssid_len); + } + rcu_read_unlock(); + + cfg80211_put_bss(wvif->wdev->hw->wiphy, bss); + + wvif->join_in_progress = true; + ret = wfx_hif_join(wvif, conf, wvif->channel, ssid, ssid_len); + if (ret) { + ieee80211_connection_loss(vif); + wfx_reset(wvif); + } else { + /* Due to beacon filtering it is possible that the AP's beacon is not known for the + * mac80211 stack. Disable filtering temporary to make sure the stack receives at + * least one + */ + wfx_filter_beacon(wvif, false); + } + wfx_tx_unlock(wvif->wdev); +} + +static void wfx_join_finalize(struct wfx_vif *wvif, struct ieee80211_bss_conf *info) +{ + struct ieee80211_vif *vif = wvif_to_vif(wvif); + struct ieee80211_sta *sta = NULL; + int ampdu_density = 0; + bool greenfield = false; + + rcu_read_lock(); /* protect sta */ + if (info->bssid && !vif->cfg.ibss_joined) + sta = ieee80211_find_sta(vif, info->bssid); + if (sta && sta->deflink.ht_cap.ht_supported) + ampdu_density = sta->deflink.ht_cap.ampdu_density; + if (sta && sta->deflink.ht_cap.ht_supported && + !(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)) + greenfield = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD); + rcu_read_unlock(); + + wvif->join_in_progress = false; + wfx_hif_set_association_mode(wvif, ampdu_density, greenfield, info->use_short_preamble); + wfx_hif_keep_alive_period(wvif, 0); + /* beacon_loss_count is defined to 7 in net/mac80211/mlme.c. Let's use the same value. */ + wfx_hif_set_bss_params(wvif, vif->cfg.aid, 7); + wfx_hif_set_beacon_wakeup_period(wvif, 1, 1); + wfx_update_pm(wvif); +} + +int wfx_join_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + wfx_upload_ap_templates(wvif); + wfx_join(wvif); + return 0; +} + +void wfx_leave_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + wfx_reset(wvif); +} + +static void wfx_enable_beacon(struct wfx_vif *wvif, bool enable) +{ + /* Driver has Content After DTIM Beacon in queue. Driver is waiting for a signal from the + * firmware. Since we are going to stop to send beacons, this signal will never happens. See + * also wfx_suspend_resume_mc() + */ + if (!enable && wfx_tx_queues_has_cab(wvif)) { + wvif->after_dtim_tx_allowed = true; + wfx_bh_request_tx(wvif->wdev); + } + wfx_hif_beacon_transmit(wvif, enable); +} + +void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u64 changed) +{ + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + int i; + + mutex_lock(&wdev->conf_mutex); + + if (changed & BSS_CHANGED_BASIC_RATES || + changed & BSS_CHANGED_BEACON_INT || + changed & BSS_CHANGED_BSSID) { + if (vif->type == NL80211_IFTYPE_STATION) + wfx_join(wvif); + } + + if (changed & BSS_CHANGED_ASSOC) { + if (vif->cfg.assoc || vif->cfg.ibss_joined) + wfx_join_finalize(wvif, info); + else if (!vif->cfg.assoc && vif->type == NL80211_IFTYPE_STATION) + wfx_reset(wvif); + else + dev_warn(wdev->dev, "misunderstood change: ASSOC\n"); + } + + if (changed & BSS_CHANGED_BEACON_INFO) { + if (vif->type != NL80211_IFTYPE_STATION) + dev_warn(wdev->dev, "misunderstood change: BEACON_INFO\n"); + wfx_hif_set_beacon_wakeup_period(wvif, info->dtim_period, info->dtim_period); + /* We temporary forwarded beacon for join process. It is now no more necessary. */ + wfx_filter_beacon(wvif, true); + } + + if (changed & BSS_CHANGED_ARP_FILTER) { + for (i = 0; i < HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES; i++) { + __be32 *arp_addr = &vif->cfg.arp_addr_list[i]; + + if (vif->cfg.arp_addr_cnt > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES) + arp_addr = NULL; + if (i >= vif->cfg.arp_addr_cnt) + arp_addr = NULL; + wfx_hif_set_arp_ipv4_filter(wvif, i, arp_addr); + } + } + + if (changed & BSS_CHANGED_AP_PROBE_RESP || changed & BSS_CHANGED_BEACON) + wfx_upload_ap_templates(wvif); + + if (changed & BSS_CHANGED_BEACON_ENABLED) + wfx_enable_beacon(wvif, info->enable_beacon); + + if (changed & BSS_CHANGED_KEEP_ALIVE) + wfx_hif_keep_alive_period(wvif, + info->max_idle_period * USEC_PER_TU / USEC_PER_MSEC); + + if (changed & BSS_CHANGED_ERP_CTS_PROT) + wfx_hif_erp_use_protection(wvif, info->use_cts_prot); + + if (changed & BSS_CHANGED_ERP_SLOT) + wfx_hif_slot_time(wvif, info->use_short_slot ? 9 : 20); + + if (changed & BSS_CHANGED_CQM) + wfx_hif_set_rcpi_rssi_threshold(wvif, info->cqm_rssi_thold, info->cqm_rssi_hyst); + + if (changed & BSS_CHANGED_TXPOWER) + wfx_hif_set_output_power(wvif, info->txpower); + + if (changed & BSS_CHANGED_PS) + wfx_update_pm(wvif); + + mutex_unlock(&wdev->conf_mutex); +} + +static int wfx_update_tim(struct wfx_vif *wvif) +{ + struct ieee80211_vif *vif = wvif_to_vif(wvif); + struct sk_buff *skb; + u16 tim_offset, tim_length; + u8 *tim_ptr; + + skb = ieee80211_beacon_get_tim(wvif->wdev->hw, vif, &tim_offset, + &tim_length, 0); + if (!skb) + return -ENOENT; + tim_ptr = skb->data + tim_offset; + + if (tim_offset && tim_length >= 6) { + /* Firmware handles DTIM counter internally */ + tim_ptr[2] = 0; + + /* Set/reset aid0 bit */ + if (wfx_tx_queues_has_cab(wvif)) + tim_ptr[4] |= 1; + else + tim_ptr[4] &= ~1; + } + + wfx_hif_update_ie_beacon(wvif, tim_ptr, tim_length); + dev_kfree_skb(skb); + + return 0; +} + +static void wfx_update_tim_work(struct work_struct *work) +{ + struct wfx_vif *wvif = container_of(work, struct wfx_vif, update_tim_work); + + wfx_update_tim(wvif); +} + +int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) +{ + struct wfx_dev *wdev = hw->priv; + struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *)&sta->drv_priv; + struct wfx_vif *wvif = wdev_to_wvif(wdev, sta_dev->vif_id); + + if (!wvif) { + dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); + return -EIO; + } + schedule_work(&wvif->update_tim_work); + return 0; +} + +void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd) +{ + struct wfx_vif *wvif_it; + + if (notify_cmd != STA_NOTIFY_AWAKE) + return; + + /* Device won't be able to honor CAB if a scan is in progress on any interface. Prefer to + * skip this DTIM and wait for the next one. + */ + wvif_it = NULL; + while ((wvif_it = wvif_iterate(wvif->wdev, wvif_it)) != NULL) + if (mutex_is_locked(&wvif_it->scan_lock)) + return; + + if (!wfx_tx_queues_has_cab(wvif) || wvif->after_dtim_tx_allowed) + dev_warn(wvif->wdev->dev, "incorrect sequence (%d CAB in queue)", + wfx_tx_queues_has_cab(wvif)); + wvif->after_dtim_tx_allowed = true; + wfx_bh_request_tx(wvif->wdev); +} + +int wfx_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + /* Aggregation is implemented fully in firmware */ + switch (params->action) { + case IEEE80211_AMPDU_RX_START: + case IEEE80211_AMPDU_RX_STOP: + /* Just acknowledge it to enable frame re-ordering */ + return 0; + default: + /* Leave the firmware doing its business for tx aggregation */ + return -EOPNOTSUPP; + } +} + +int wfx_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf) +{ + return 0; +} + +void wfx_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf) +{ +} + +void wfx_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf, u32 changed) +{ +} + +int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *conf) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + struct ieee80211_channel *ch = conf->def.chan; + + WARN(wvif->channel, "channel overwrite"); + wvif->channel = ch; + + return 0; +} + +void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *conf) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + struct ieee80211_channel *ch = conf->def.chan; + + WARN(wvif->channel != ch, "channel mismatch"); + wvif->channel = NULL; +} + +int wfx_config(struct ieee80211_hw *hw, u32 changed) +{ + return 0; +} + +int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + int i; + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_UAPSD | + IEEE80211_VIF_SUPPORTS_CQM_RSSI; + + mutex_lock(&wdev->conf_mutex); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: + break; + default: + mutex_unlock(&wdev->conf_mutex); + return -EOPNOTSUPP; + } + + wvif->wdev = wdev; + + wvif->link_id_map = 1; /* link-id 0 is reserved for multicast */ + INIT_WORK(&wvif->update_tim_work, wfx_update_tim_work); + INIT_DELAYED_WORK(&wvif->beacon_loss_work, wfx_beacon_loss_work); + + init_completion(&wvif->set_pm_mode_complete); + complete(&wvif->set_pm_mode_complete); + INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work); + + mutex_init(&wvif->scan_lock); + init_completion(&wvif->scan_complete); + INIT_WORK(&wvif->scan_work, wfx_hw_scan_work); + + wfx_tx_queues_init(wvif); + wfx_tx_policy_init(wvif); + + for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) { + if (!wdev->vif[i]) { + wdev->vif[i] = vif; + wvif->id = i; + break; + } + } + WARN(i == ARRAY_SIZE(wdev->vif), "try to instantiate more vif than supported"); + + wfx_hif_set_macaddr(wvif, vif->addr); + + mutex_unlock(&wdev->conf_mutex); + + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { + /* Combo mode does not support Block Acks. We can re-enable them */ + if (wvif_count(wdev) == 1) + wfx_hif_set_block_ack_policy(wvif, 0xFF, 0xFF); + else + wfx_hif_set_block_ack_policy(wvif, 0x00, 0x00); + } + return 0; +} + +void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + wait_for_completion_timeout(&wvif->set_pm_mode_complete, msecs_to_jiffies(300)); + wfx_tx_queues_check_empty(wvif); + + mutex_lock(&wdev->conf_mutex); + WARN(wvif->link_id_map != 1, "corrupted state"); + + wfx_hif_reset(wvif, false); + wfx_hif_set_macaddr(wvif, NULL); + wfx_tx_policy_init(wvif); + + cancel_delayed_work_sync(&wvif->beacon_loss_work); + wdev->vif[wvif->id] = NULL; + + mutex_unlock(&wdev->conf_mutex); + + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { + /* Combo mode does not support Block Acks. We can re-enable them */ + if (wvif_count(wdev) == 1) + wfx_hif_set_block_ack_policy(wvif, 0xFF, 0xFF); + else + wfx_hif_set_block_ack_policy(wvif, 0x00, 0x00); + } +} + +int wfx_start(struct ieee80211_hw *hw) +{ + return 0; +} + +void wfx_stop(struct ieee80211_hw *hw) +{ + struct wfx_dev *wdev = hw->priv; + + WARN_ON(!skb_queue_empty_lockless(&wdev->tx_pending)); +} diff --git a/drivers/net/wireless/silabs/wfx/sta.h b/drivers/net/wireless/silabs/wfx/sta.h new file mode 100644 index 0000000000..888db5cd32 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/sta.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Implementation of mac80211 API. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_STA_H +#define WFX_STA_H + +#include + +struct wfx_dev; +struct wfx_vif; + +struct wfx_sta_priv { + int link_id; + int vif_id; +}; + +/* mac80211 interface */ +int wfx_start(struct ieee80211_hw *hw); +void wfx_stop(struct ieee80211_hw *hw); +int wfx_config(struct ieee80211_hw *hw, u32 changed); +int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value); +void wfx_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx); +void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, + unsigned int *total_flags, u64 unused); + +int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +int wfx_join_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +void wfx_leave_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + unsigned int link_id, u16 queue, + const struct ieee80211_tx_queue_params *params); +void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u64 changed); +int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); +int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); +void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, struct ieee80211_sta *sta); +int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set); +int wfx_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params); +int wfx_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf); +void wfx_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf); +void wfx_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf, u32 changed); +int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *conf); +void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *conf); + +/* Hardware API Callbacks */ +void wfx_cooling_timeout_work(struct work_struct *work); +void wfx_suspend_hot_dev(struct wfx_dev *wdev, enum sta_notify_cmd cmd); +void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd cmd); +void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi); +int wfx_update_pm(struct wfx_vif *wvif); + +/* Other Helpers */ +void wfx_reset(struct wfx_vif *wvif); +u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/traces.h b/drivers/net/wireless/silabs/wfx/traces.h new file mode 100644 index 0000000000..e011e8a46b --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/traces.h @@ -0,0 +1,496 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tracepoints definitions. + * + * Copyright (c) 2018-2020, Silicon Laboratories, Inc. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM wfx + +#if !defined(_WFX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _WFX_TRACE_H + +#include +#include + +#include "bus.h" +#include "hif_api_cmd.h" +#include "hif_api_mib.h" + +/* The hell below need some explanations. For each symbolic number, we need to define it with + * TRACE_DEFINE_ENUM() and in a list for __print_symbolic. + * + * 1. Define a new macro that call TRACE_DEFINE_ENUM(): + * + * #define xxx_name(sym) TRACE_DEFINE_ENUM(sym); + * + * 2. Define list of all symbols: + * + * #define list_names \ + * ... \ + * xxx_name(XXX) \ + * ... + * + * 3. Instantiate that list_names: + * + * list_names + * + * 4. Redefine xxx_name() as an entry of array for __print_symbolic() + * + * #undef xxx_name + * #define xxx_name(msg) { msg, #msg }, + * + * 5. list_name can now nearly be used with __print_symbolic() but, __print_symbolic() dislike + * last comma of list. So we define a new list with a dummy element: + * + * #define list_for_print_symbolic list_names { -1, NULL } + */ + +#define _hif_msg_list \ + hif_cnf_name(ADD_KEY) \ + hif_cnf_name(BEACON_TRANSMIT) \ + hif_cnf_name(EDCA_QUEUE_PARAMS) \ + hif_cnf_name(JOIN) \ + hif_cnf_name(MAP_LINK) \ + hif_cnf_name(READ_MIB) \ + hif_cnf_name(REMOVE_KEY) \ + hif_cnf_name(RESET) \ + hif_cnf_name(SET_BSS_PARAMS) \ + hif_cnf_name(SET_PM_MODE) \ + hif_cnf_name(START) \ + hif_cnf_name(START_SCAN) \ + hif_cnf_name(STOP_SCAN) \ + hif_cnf_name(TX) \ + hif_cnf_name(MULTI_TRANSMIT) \ + hif_cnf_name(UPDATE_IE) \ + hif_cnf_name(WRITE_MIB) \ + hif_cnf_name(CONFIGURATION) \ + hif_cnf_name(CONTROL_GPIO) \ + hif_cnf_name(PREVENT_ROLLBACK) \ + hif_cnf_name(SET_SL_MAC_KEY) \ + hif_cnf_name(SL_CONFIGURE) \ + hif_cnf_name(SL_EXCHANGE_PUB_KEYS) \ + hif_cnf_name(SHUT_DOWN) \ + hif_ind_name(EVENT) \ + hif_ind_name(JOIN_COMPLETE) \ + hif_ind_name(RX) \ + hif_ind_name(SCAN_CMPL) \ + hif_ind_name(SET_PM_MODE_CMPL) \ + hif_ind_name(SUSPEND_RESUME_TX) \ + hif_ind_name(SL_EXCHANGE_PUB_KEYS) \ + hif_ind_name(ERROR) \ + hif_ind_name(EXCEPTION) \ + hif_ind_name(GENERIC) \ + hif_ind_name(WAKEUP) \ + hif_ind_name(STARTUP) + +#define hif_msg_list_enum _hif_msg_list + +#undef hif_cnf_name +#undef hif_ind_name +#define hif_cnf_name(msg) TRACE_DEFINE_ENUM(HIF_CNF_ID_##msg); +#define hif_ind_name(msg) TRACE_DEFINE_ENUM(HIF_IND_ID_##msg); +hif_msg_list_enum +#undef hif_cnf_name +#undef hif_ind_name +#define hif_cnf_name(msg) { HIF_CNF_ID_##msg, #msg }, +#define hif_ind_name(msg) { HIF_IND_ID_##msg, #msg }, +#define hif_msg_list hif_msg_list_enum { -1, NULL } + +#define _hif_mib_list \ + hif_mib_name(ARP_IP_ADDRESSES_TABLE) \ + hif_mib_name(ARP_KEEP_ALIVE_PERIOD) \ + hif_mib_name(BEACON_FILTER_ENABLE) \ + hif_mib_name(BEACON_FILTER_TABLE) \ + hif_mib_name(BEACON_STATS) \ + hif_mib_name(BEACON_WAKEUP_PERIOD) \ + hif_mib_name(BLOCK_ACK_POLICY) \ + hif_mib_name(CCA_CONFIG) \ + hif_mib_name(CONFIG_DATA_FILTER) \ + hif_mib_name(COUNTERS_TABLE) \ + hif_mib_name(CURRENT_TX_POWER_LEVEL) \ + hif_mib_name(DOT11_MAC_ADDRESS) \ + hif_mib_name(DOT11_MAX_RECEIVE_LIFETIME) \ + hif_mib_name(DOT11_MAX_TRANSMIT_MSDU_LIFETIME) \ + hif_mib_name(DOT11_RTS_THRESHOLD) \ + hif_mib_name(DOT11_WEP_DEFAULT_KEY_ID) \ + hif_mib_name(ETHERTYPE_DATAFRAME_CONDITION) \ + hif_mib_name(EXTENDED_COUNTERS_TABLE) \ + hif_mib_name(GL_BLOCK_ACK_INFO) \ + hif_mib_name(GL_OPERATIONAL_POWER_MODE) \ + hif_mib_name(GL_SET_MULTI_MSG) \ + hif_mib_name(GRP_SEQ_COUNTER) \ + hif_mib_name(INACTIVITY_TIMER) \ + hif_mib_name(INTERFACE_PROTECTION) \ + hif_mib_name(IPV4_ADDR_DATAFRAME_CONDITION) \ + hif_mib_name(IPV6_ADDR_DATAFRAME_CONDITION) \ + hif_mib_name(KEEP_ALIVE_PERIOD) \ + hif_mib_name(MAC_ADDR_DATAFRAME_CONDITION) \ + hif_mib_name(MAGIC_DATAFRAME_CONDITION) \ + hif_mib_name(MAX_TX_POWER_LEVEL) \ + hif_mib_name(NON_ERP_PROTECTION) \ + hif_mib_name(NS_IP_ADDRESSES_TABLE) \ + hif_mib_name(OVERRIDE_INTERNAL_TX_RATE) \ + hif_mib_name(PORT_DATAFRAME_CONDITION) \ + hif_mib_name(PROTECTED_MGMT_POLICY) \ + hif_mib_name(RCPI_RSSI_THRESHOLD) \ + hif_mib_name(RX_FILTER) \ + hif_mib_name(SET_ASSOCIATION_MODE) \ + hif_mib_name(SET_DATA_FILTERING) \ + hif_mib_name(SET_HT_PROTECTION) \ + hif_mib_name(SET_TX_RATE_RETRY_POLICY) \ + hif_mib_name(SET_UAPSD_INFORMATION) \ + hif_mib_name(SLOT_TIME) \ + hif_mib_name(STATISTICS_TABLE) \ + hif_mib_name(TEMPLATE_FRAME) \ + hif_mib_name(TSF_COUNTER) \ + hif_mib_name(UC_MC_BC_DATAFRAME_CONDITION) + +#define hif_mib_list_enum _hif_mib_list + +#undef hif_mib_name +#define hif_mib_name(mib) TRACE_DEFINE_ENUM(HIF_MIB_ID_##mib); +hif_mib_list_enum +#undef hif_mib_name +#define hif_mib_name(mib) { HIF_MIB_ID_##mib, #mib }, +#define hif_mib_list hif_mib_list_enum { -1, NULL } + +DECLARE_EVENT_CLASS(hif_data, + TP_PROTO(const struct wfx_hif_msg *hif, int tx_fill_level, bool is_recv), + TP_ARGS(hif, tx_fill_level, is_recv), + TP_STRUCT__entry( + __field(int, tx_fill_level) + __field(int, msg_id) + __field(const char *, msg_type) + __field(int, msg_len) + __field(int, buf_len) + __field(int, if_id) + __field(int, mib) + __array(u8, buf, 128) + ), + TP_fast_assign( + int header_len; + + __entry->tx_fill_level = tx_fill_level; + __entry->msg_len = le16_to_cpu(hif->len); + __entry->msg_id = hif->id; + __entry->if_id = hif->interface; + if (is_recv) + __entry->msg_type = __entry->msg_id & 0x80 ? "IND" : "CNF"; + else + __entry->msg_type = "REQ"; + if (!is_recv && + (__entry->msg_id == HIF_REQ_ID_READ_MIB || + __entry->msg_id == HIF_REQ_ID_WRITE_MIB)) { + __entry->mib = le16_to_cpup((__le16 *)hif->body); + header_len = 4; + } else { + __entry->mib = -1; + header_len = 0; + } + __entry->buf_len = min_t(int, __entry->msg_len, sizeof(__entry->buf)) + - sizeof(struct wfx_hif_msg) - header_len; + memcpy(__entry->buf, hif->body + header_len, __entry->buf_len); + ), + TP_printk("%d:%d:%s_%s%s%s: %s%s (%d bytes)", + __entry->tx_fill_level, + __entry->if_id, + __entry->msg_type, + __print_symbolic(__entry->msg_id, hif_msg_list), + __entry->mib != -1 ? "/" : "", + __entry->mib != -1 ? __print_symbolic(__entry->mib, hif_mib_list) : "", + __print_hex(__entry->buf, __entry->buf_len), + __entry->msg_len > sizeof(__entry->buf) ? " ..." : "", + __entry->msg_len + ) +); +DEFINE_EVENT(hif_data, hif_send, + TP_PROTO(const struct wfx_hif_msg *hif, int tx_fill_level, bool is_recv), + TP_ARGS(hif, tx_fill_level, is_recv)); +#define _trace_hif_send(hif, tx_fill_level)\ + trace_hif_send(hif, tx_fill_level, false) +DEFINE_EVENT(hif_data, hif_recv, + TP_PROTO(const struct wfx_hif_msg *hif, int tx_fill_level, bool is_recv), + TP_ARGS(hif, tx_fill_level, is_recv)); +#define _trace_hif_recv(hif, tx_fill_level)\ + trace_hif_recv(hif, tx_fill_level, true) + +#define wfx_reg_list_enum \ + wfx_reg_name(WFX_REG_CONFIG, "CONFIG") \ + wfx_reg_name(WFX_REG_CONTROL, "CONTROL") \ + wfx_reg_name(WFX_REG_IN_OUT_QUEUE, "QUEUE") \ + wfx_reg_name(WFX_REG_AHB_DPORT, "AHB") \ + wfx_reg_name(WFX_REG_BASE_ADDR, "BASE_ADDR") \ + wfx_reg_name(WFX_REG_SRAM_DPORT, "SRAM") \ + wfx_reg_name(WFX_REG_SET_GEN_R_W, "SET_GEN_R_W") \ + wfx_reg_name(WFX_REG_FRAME_OUT, "FRAME_OUT") + +#undef wfx_reg_name +#define wfx_reg_name(sym, name) TRACE_DEFINE_ENUM(sym); +wfx_reg_list_enum +#undef wfx_reg_name +#define wfx_reg_name(sym, name) { sym, name }, +#define wfx_reg_list wfx_reg_list_enum { -1, NULL } + +DECLARE_EVENT_CLASS(io_data, + TP_PROTO(int reg, int addr, const void *io_buf, size_t len), + TP_ARGS(reg, addr, io_buf, len), + TP_STRUCT__entry( + __field(int, reg) + __field(int, addr) + __field(int, msg_len) + __field(int, buf_len) + __array(u8, buf, 32) + __array(u8, addr_str, 10) + ), + TP_fast_assign( + __entry->reg = reg; + __entry->addr = addr; + __entry->msg_len = len; + __entry->buf_len = min_t(int, sizeof(__entry->buf), __entry->msg_len); + memcpy(__entry->buf, io_buf, __entry->buf_len); + if (addr >= 0) + snprintf(__entry->addr_str, 10, "/%08x", addr); + else + __entry->addr_str[0] = 0; + ), + TP_printk("%s%s: %s%s (%d bytes)", + __print_symbolic(__entry->reg, wfx_reg_list), + __entry->addr_str, + __print_hex(__entry->buf, __entry->buf_len), + __entry->msg_len > sizeof(__entry->buf) ? " ..." : "", + __entry->msg_len + ) +); +DEFINE_EVENT(io_data, io_write, + TP_PROTO(int reg, int addr, const void *io_buf, size_t len), + TP_ARGS(reg, addr, io_buf, len)); +#define _trace_io_ind_write(reg, addr, io_buf, len)\ + trace_io_write(reg, addr, io_buf, len) +#define _trace_io_write(reg, io_buf, len) trace_io_write(reg, -1, io_buf, len) +DEFINE_EVENT(io_data, io_read, + TP_PROTO(int reg, int addr, const void *io_buf, size_t len), + TP_ARGS(reg, addr, io_buf, len)); +#define _trace_io_ind_read(reg, addr, io_buf, len)\ + trace_io_read(reg, addr, io_buf, len) +#define _trace_io_read(reg, io_buf, len) trace_io_read(reg, -1, io_buf, len) + +DECLARE_EVENT_CLASS(io_data32, + TP_PROTO(int reg, int addr, u32 val), + TP_ARGS(reg, addr, val), + TP_STRUCT__entry( + __field(int, reg) + __field(int, addr) + __field(int, val) + __array(u8, addr_str, 10) + ), + TP_fast_assign( + __entry->reg = reg; + __entry->addr = addr; + __entry->val = val; + if (addr >= 0) + snprintf(__entry->addr_str, 10, "/%08x", addr); + else + __entry->addr_str[0] = 0; + ), + TP_printk("%s%s: %08x", + __print_symbolic(__entry->reg, wfx_reg_list), + __entry->addr_str, + __entry->val + ) +); +DEFINE_EVENT(io_data32, io_write32, + TP_PROTO(int reg, int addr, u32 val), + TP_ARGS(reg, addr, val)); +#define _trace_io_ind_write32(reg, addr, val) trace_io_write32(reg, addr, val) +#define _trace_io_write32(reg, val) trace_io_write32(reg, -1, val) +DEFINE_EVENT(io_data32, io_read32, + TP_PROTO(int reg, int addr, u32 val), + TP_ARGS(reg, addr, val)); +#define _trace_io_ind_read32(reg, addr, val) trace_io_read32(reg, addr, val) +#define _trace_io_read32(reg, val) trace_io_read32(reg, -1, val) + +DECLARE_EVENT_CLASS(piggyback, + TP_PROTO(u32 val, bool ignored), + TP_ARGS(val, ignored), + TP_STRUCT__entry( + __field(int, val) + __field(bool, ignored) + ), + TP_fast_assign( + __entry->val = val; + __entry->ignored = ignored; + ), + TP_printk("CONTROL: %08x%s", + __entry->val, + __entry->ignored ? " (ignored)" : "" + ) +); +DEFINE_EVENT(piggyback, piggyback, + TP_PROTO(u32 val, bool ignored), + TP_ARGS(val, ignored)); +#define _trace_piggyback(val, ignored) trace_piggyback(val, ignored) + +TRACE_EVENT(bh_stats, + TP_PROTO(int ind, int req, int cnf, int busy, bool release), + TP_ARGS(ind, req, cnf, busy, release), + TP_STRUCT__entry( + __field(int, ind) + __field(int, req) + __field(int, cnf) + __field(int, busy) + __field(bool, release) + ), + TP_fast_assign( + __entry->ind = ind; + __entry->req = req; + __entry->cnf = cnf; + __entry->busy = busy; + __entry->release = release; + ), + TP_printk("IND/REQ/CNF:%3d/%3d/%3d, REQ in progress:%3d, WUP: %s", + __entry->ind, + __entry->req, + __entry->cnf, + __entry->busy, + __entry->release ? "release" : "keep" + ) +); +#define _trace_bh_stats(ind, req, cnf, busy, release)\ + trace_bh_stats(ind, req, cnf, busy, release) + +TRACE_EVENT(tx_stats, + TP_PROTO(const struct wfx_hif_cnf_tx *tx_cnf, const struct sk_buff *skb, + int delay), + TP_ARGS(tx_cnf, skb, delay), + TP_STRUCT__entry( + __field(int, pkt_id) + __field(int, delay_media) + __field(int, delay_queue) + __field(int, delay_fw) + __field(int, ack_failures) + __field(int, flags) + __array(int, rate, 4) + __array(int, tx_count, 4) + ), + TP_fast_assign( + /* Keep sync with wfx_rates definition in main.c */ + static const int hw_rate[] = { 0, 1, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13 }; + const struct ieee80211_tx_info *tx_info = + (const struct ieee80211_tx_info *)skb->cb; + const struct ieee80211_tx_rate *rates = tx_info->driver_rates; + int i; + + __entry->pkt_id = tx_cnf->packet_id; + __entry->delay_media = le32_to_cpu(tx_cnf->media_delay); + __entry->delay_queue = le32_to_cpu(tx_cnf->tx_queue_delay); + __entry->delay_fw = delay; + __entry->ack_failures = tx_cnf->ack_failures; + if (!tx_cnf->status || __entry->ack_failures) + __entry->ack_failures += 1; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + if (rates[0].flags & IEEE80211_TX_RC_MCS) + __entry->rate[i] = rates[i].idx; + else + __entry->rate[i] = hw_rate[rates[i].idx]; + __entry->tx_count[i] = rates[i].count; + } + __entry->flags = 0; + if (rates[0].flags & IEEE80211_TX_RC_MCS) + __entry->flags |= 0x01; + if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI) + __entry->flags |= 0x02; + if (rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD) + __entry->flags |= 0x04; + if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) + __entry->flags |= 0x08; + if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) + __entry->flags |= 0x10; + if (tx_cnf->status) + __entry->flags |= 0x20; + if (tx_cnf->status == HIF_STATUS_TX_FAIL_REQUEUE) + __entry->flags |= 0x40; + ), + TP_printk("packet ID: %08x, rate policy: %s %d|%d %d|%d %d|%d %d|%d -> %d attempt, Delays media/queue/total: %4dus/%4dus/%4dus", + __entry->pkt_id, + __print_flags(__entry->flags, NULL, + { 0x01, "M" }, { 0x02, "S" }, { 0x04, "G" }, { 0x08, "R" }, + { 0x10, "D" }, { 0x20, "F" }, { 0x40, "Q" }), + __entry->rate[0], + __entry->tx_count[0], + __entry->rate[1], + __entry->tx_count[1], + __entry->rate[2], + __entry->tx_count[2], + __entry->rate[3], + __entry->tx_count[3], + __entry->ack_failures, + __entry->delay_media, + __entry->delay_queue, + __entry->delay_fw + ) +); +#define _trace_tx_stats(tx_cnf, skb, delay) trace_tx_stats(tx_cnf, skb, delay) + +TRACE_EVENT(queues_stats, + TP_PROTO(struct wfx_dev *wdev, const struct wfx_queue *elected_queue), + TP_ARGS(wdev, elected_queue), + TP_STRUCT__entry( + __field(int, vif_id) + __field(int, queue_id) + __array(int, hw, IEEE80211_NUM_ACS * 2) + __array(int, drv, IEEE80211_NUM_ACS * 2) + __array(int, cab, IEEE80211_NUM_ACS * 2) + ), + TP_fast_assign( + const struct wfx_queue *queue; + struct wfx_vif *wvif; + int i, j; + + for (j = 0; j < IEEE80211_NUM_ACS * 2; j++) { + __entry->hw[j] = -1; + __entry->drv[j] = -1; + __entry->cab[j] = -1; + } + __entry->vif_id = -1; + __entry->queue_id = -1; + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + j = wvif->id * IEEE80211_NUM_ACS + i; + WARN_ON(j >= IEEE80211_NUM_ACS * 2); + queue = &wvif->tx_queue[i]; + __entry->hw[j] = atomic_read(&queue->pending_frames); + __entry->drv[j] = skb_queue_len(&queue->normal); + __entry->cab[j] = skb_queue_len(&queue->cab); + if (queue == elected_queue) { + __entry->vif_id = wvif->id; + __entry->queue_id = i; + } + } + } + ), + TP_printk("got skb from %d/%d, pend. hw/norm/cab: [ %d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d ] [ %d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d ]", + __entry->vif_id, __entry->queue_id, + __entry->hw[0], __entry->drv[0], __entry->cab[0], + __entry->hw[1], __entry->drv[1], __entry->cab[1], + __entry->hw[2], __entry->drv[2], __entry->cab[2], + __entry->hw[3], __entry->drv[3], __entry->cab[3], + __entry->hw[4], __entry->drv[4], __entry->cab[4], + __entry->hw[5], __entry->drv[5], __entry->cab[5], + __entry->hw[6], __entry->drv[6], __entry->cab[6], + __entry->hw[7], __entry->drv[7], __entry->cab[7] + ) +); + +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE traces + +#include diff --git a/drivers/net/wireless/silabs/wfx/wfx.h b/drivers/net/wireless/silabs/wfx/wfx.h new file mode 100644 index 0000000000..13ba84b3b2 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/wfx.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Common private data. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + * Copyright (c) 2006, Michael Wu + * Copyright 2004-2006 Jean-Baptiste Note , et al. + */ +#ifndef WFX_H +#define WFX_H + +#include +#include +#include +#include +#include + +#include "bh.h" +#include "data_tx.h" +#include "main.h" +#include "queue.h" +#include "hif_tx.h" + +#define USEC_PER_TXOP 32 /* see struct ieee80211_tx_queue_params */ +#define USEC_PER_TU 1024 + +struct wfx_hwbus_ops; + +struct wfx_dev { + struct wfx_platform_data pdata; + struct device *dev; + struct ieee80211_hw *hw; + struct ieee80211_vif *vif[2]; + struct mac_address addresses[2]; + const struct wfx_hwbus_ops *hwbus_ops; + void *hwbus_priv; + + u8 keyset; + struct completion firmware_ready; + struct wfx_hif_ind_startup hw_caps; + struct wfx_hif hif; + struct delayed_work cooling_timeout_work; + bool poll_irq; + bool chip_frozen; + struct mutex conf_mutex; + + struct wfx_hif_cmd hif_cmd; + struct sk_buff_head tx_pending; + wait_queue_head_t tx_dequeue; + atomic_t tx_lock; + + atomic_t packet_id; + u32 key_map; + + struct wfx_hif_rx_stats rx_stats; + struct mutex rx_stats_lock; + struct wfx_hif_tx_power_loop_info tx_power_loop_info; + struct mutex tx_power_loop_info_lock; + struct workqueue_struct *bh_wq; +}; + +struct wfx_vif { + struct wfx_dev *wdev; + struct ieee80211_channel *channel; + int id; + + u32 link_id_map; + + bool after_dtim_tx_allowed; + bool join_in_progress; + + struct delayed_work beacon_loss_work; + + struct wfx_queue tx_queue[4]; + struct wfx_tx_policy_cache tx_policy_cache; + struct work_struct tx_policy_upload_work; + + struct work_struct update_tim_work; + + unsigned long uapsd_mask; + + /* avoid some operations in parallel with scan */ + struct mutex scan_lock; + struct work_struct scan_work; + struct completion scan_complete; + int scan_nb_chan_done; + bool scan_abort; + struct ieee80211_scan_request *scan_req; + + struct completion set_pm_mode_complete; +}; + +static inline struct ieee80211_vif *wvif_to_vif(struct wfx_vif *wvif) +{ + return container_of((void *)wvif, struct ieee80211_vif, drv_priv); +} + +static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id) +{ + if (vif_id >= ARRAY_SIZE(wdev->vif)) { + dev_dbg(wdev->dev, "requesting non-existent vif: %d\n", vif_id); + return NULL; + } + vif_id = array_index_nospec(vif_id, ARRAY_SIZE(wdev->vif)); + if (!wdev->vif[vif_id]) + return NULL; + return (struct wfx_vif *)wdev->vif[vif_id]->drv_priv; +} + +static inline struct wfx_vif *wvif_iterate(struct wfx_dev *wdev, struct wfx_vif *cur) +{ + int i; + int mark = 0; + struct wfx_vif *tmp; + + if (!cur) + mark = 1; + for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) { + tmp = wdev_to_wvif(wdev, i); + if (mark && tmp) + return tmp; + if (tmp == cur) + mark = 1; + } + return NULL; +} + +static inline int wvif_count(struct wfx_dev *wdev) +{ + int i; + int ret = 0; + struct wfx_vif *wvif; + + for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) { + wvif = wdev_to_wvif(wdev, i); + if (wvif) + ret++; + } + return ret; +} + +static inline void memreverse(u8 *src, u8 length) +{ + u8 *lo = src; + u8 *hi = src + length - 1; + u8 swap; + + while (lo < hi) { + swap = *lo; + *lo++ = *hi; + *hi-- = swap; + } +} + +static inline int memzcmp(void *src, unsigned int size) +{ + u8 *buf = src; + + if (!size) + return 0; + if (*buf) + return 1; + return memcmp(buf, buf + 1, size - 1); +} + +#endif diff --git a/drivers/net/wwan/t7xx/Makefile b/drivers/net/wwan/t7xx/Makefile new file mode 100644 index 0000000000..dc6a7d682c --- /dev/null +++ b/drivers/net/wwan/t7xx/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only + +ccflags-y += -Werror + +obj-${CONFIG_MTK_T7XX} := mtk_t7xx.o +mtk_t7xx-y:= t7xx_pci.o \ + t7xx_pcie_mac.o \ + t7xx_mhccif.o \ + t7xx_state_monitor.o \ + t7xx_modem_ops.o \ + t7xx_cldma.o \ + t7xx_hif_cldma.o \ + t7xx_port_proxy.o \ + t7xx_port_ctrl_msg.o \ + t7xx_port_wwan.o \ + t7xx_hif_dpmaif.o \ + t7xx_hif_dpmaif_tx.o \ + t7xx_hif_dpmaif_rx.o \ + t7xx_dpmaif.o \ + t7xx_netdev.o diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.c b/drivers/net/wwan/t7xx/t7xx_cldma.c new file mode 100644 index 0000000000..9f43f256db --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_cldma.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include + +#include "t7xx_cldma.h" + +#define ADDR_SIZE 8 + +void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info) +{ + u32 val; + + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY); + val |= IP_BUSY_WAKEUP; + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY); +} + +/** + * t7xx_cldma_hw_restore() - Restore CLDMA HW registers. + * @hw_info: Pointer to struct t7xx_cldma_hw. + * + * Restore HW after resume. Writes uplink configuration for CLDMA HW. + */ +void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info) +{ + u32 ul_cfg; + + ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); + ul_cfg &= ~UL_CFG_BIT_MODE_MASK; + + if (hw_info->hw_mode == MODE_BIT_64) + ul_cfg |= UL_CFG_BIT_MODE_64; + else if (hw_info->hw_mode == MODE_BIT_40) + ul_cfg |= UL_CFG_BIT_MODE_40; + else if (hw_info->hw_mode == MODE_BIT_36) + ul_cfg |= UL_CFG_BIT_MODE_36; + + iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); + /* Disable TX and RX invalid address check */ + iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM); + iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM); +} + +void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_START_CMD : + hw_info->ap_pdn_base + REG_CLDMA_UL_START_CMD; + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + iowrite32(val, reg); +} + +void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info) +{ + /* Enable the TX & RX interrupts */ + iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0); + iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0); + /* Enable the empty queue interrupt */ + iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0); + iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0); +} + +void t7xx_cldma_hw_reset(void __iomem *ao_base) +{ + u32 val; + + val = ioread32(ao_base + REG_INFRA_RST2_SET); + val |= RST2_PMIC_SW_RST_SET; + iowrite32(val, ao_base + REG_INFRA_RST2_SET); + val = ioread32(ao_base + REG_INFRA_RST4_SET); + val |= RST4_CLDMA1_SW_RST_SET; + iowrite32(val, ao_base + REG_INFRA_RST4_SET); + udelay(1); + + val = ioread32(ao_base + REG_INFRA_RST4_CLR); + val |= RST4_CLDMA1_SW_RST_CLR; + iowrite32(val, ao_base + REG_INFRA_RST4_CLR); + val = ioread32(ao_base + REG_INFRA_RST2_CLR); + val |= RST2_PMIC_SW_RST_CLR; + iowrite32(val, ao_base + REG_INFRA_RST2_CLR); +} + +bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno) +{ + u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE; + + return ioread64(hw_info->ap_pdn_base + offset); +} + +void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address, + enum mtk_txrx tx_rx) +{ + u32 offset = qno * ADDR_SIZE; + void __iomem *reg; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 : + hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0; + iowrite64(address, reg + offset); +} + +void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx) +{ + void __iomem *base = hw_info->ap_pdn_base; + + if (tx_rx == MTK_RX) + iowrite32(BIT(qno), base + REG_CLDMA_DL_RESUME_CMD); + else + iowrite32(BIT(qno), base + REG_CLDMA_UL_RESUME_CMD); +} + +unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 mask, val; + + mask = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_STATUS : + hw_info->ap_pdn_base + REG_CLDMA_UL_STATUS; + val = ioread32(reg); + + return val & mask; +} + +void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask) +{ + unsigned int ch_id; + + ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); + ch_id &= bitmask; + /* Clear the ch IDs in the TX interrupt status register */ + iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); + ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); +} + +void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask) +{ + unsigned int ch_id; + + ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); + ch_id &= bitmask; + /* Clear the ch IDs in the RX interrupt status register */ + iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); + ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); +} + +unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask, + enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0; + val = ioread32(reg); + return val & bitmask; +} + +void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0; + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + iowrite32(val, reg); +} + +void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0; + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + iowrite32(val << EQ_STA_BIT_OFFSET, reg); +} + +void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0; + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + iowrite32(val, reg); +} + +void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0; + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + iowrite32(val << EQ_STA_BIT_OFFSET, reg); +} + +/** + * t7xx_cldma_hw_init() - Initialize CLDMA HW. + * @hw_info: Pointer to struct t7xx_cldma_hw. + * + * Write uplink and downlink configuration to CLDMA HW. + */ +void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info) +{ + u32 ul_cfg, dl_cfg; + + ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); + dl_cfg = ioread32(hw_info->ap_ao_base + REG_CLDMA_DL_CFG); + /* Configure the DRAM address mode */ + ul_cfg &= ~UL_CFG_BIT_MODE_MASK; + dl_cfg &= ~DL_CFG_BIT_MODE_MASK; + + if (hw_info->hw_mode == MODE_BIT_64) { + ul_cfg |= UL_CFG_BIT_MODE_64; + dl_cfg |= DL_CFG_BIT_MODE_64; + } else if (hw_info->hw_mode == MODE_BIT_40) { + ul_cfg |= UL_CFG_BIT_MODE_40; + dl_cfg |= DL_CFG_BIT_MODE_40; + } else if (hw_info->hw_mode == MODE_BIT_36) { + ul_cfg |= UL_CFG_BIT_MODE_36; + dl_cfg |= DL_CFG_BIT_MODE_36; + } + + iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); + dl_cfg |= DL_CFG_UP_HW_LAST; + iowrite32(dl_cfg, hw_info->ap_ao_base + REG_CLDMA_DL_CFG); + iowrite32(0, hw_info->ap_ao_base + REG_CLDMA_INT_MASK); + iowrite32(BUSY_MASK_MD, hw_info->ap_ao_base + REG_CLDMA_BUSY_MASK); + iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM); + iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM); +} + +void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx) +{ + void __iomem *reg; + + reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_STOP_CMD : + hw_info->ap_pdn_base + REG_CLDMA_UL_STOP_CMD; + iowrite32(CLDMA_ALL_Q, reg); +} + +void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx) +{ + void __iomem *reg; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0; + iowrite32(TXRX_STATUS_BITMASK, reg); + iowrite32(EMPTY_STATUS_BITMASK, reg); +} diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.h b/drivers/net/wwan/t7xx/t7xx_cldma.h new file mode 100644 index 0000000000..8949e8377f --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_cldma.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Sreehari Kancharla + */ + +#ifndef __T7XX_CLDMA_H__ +#define __T7XX_CLDMA_H__ + +#include +#include + +#define CLDMA_TXQ_NUM 8 +#define CLDMA_RXQ_NUM 8 +#define CLDMA_ALL_Q GENMASK(7, 0) + +/* Interrupt status bits */ +#define EMPTY_STATUS_BITMASK GENMASK(15, 8) +#define TXRX_STATUS_BITMASK GENMASK(7, 0) +#define EQ_STA_BIT_OFFSET 8 +#define L2_INT_BIT_COUNT 16 +#define EQ_STA_BIT(index) (BIT((index) + EQ_STA_BIT_OFFSET) & EMPTY_STATUS_BITMASK) + +#define TQ_ERR_INT_BITMASK GENMASK(23, 16) +#define TQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24) + +#define RQ_ERR_INT_BITMASK GENMASK(23, 16) +#define RQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24) + +#define CLDMA0_AO_BASE 0x10049000 +#define CLDMA0_PD_BASE 0x1021d000 +#define CLDMA1_AO_BASE 0x1004b000 +#define CLDMA1_PD_BASE 0x1021f000 + +#define CLDMA_R_AO_BASE 0x10023000 +#define CLDMA_R_PD_BASE 0x1023d000 + +/* CLDMA TX */ +#define REG_CLDMA_UL_START_ADDRL_0 0x0004 +#define REG_CLDMA_UL_START_ADDRH_0 0x0008 +#define REG_CLDMA_UL_CURRENT_ADDRL_0 0x0044 +#define REG_CLDMA_UL_CURRENT_ADDRH_0 0x0048 +#define REG_CLDMA_UL_STATUS 0x0084 +#define REG_CLDMA_UL_START_CMD 0x0088 +#define REG_CLDMA_UL_RESUME_CMD 0x008c +#define REG_CLDMA_UL_STOP_CMD 0x0090 +#define REG_CLDMA_UL_ERROR 0x0094 +#define REG_CLDMA_UL_CFG 0x0098 +#define UL_CFG_BIT_MODE_36 BIT(5) +#define UL_CFG_BIT_MODE_40 BIT(6) +#define UL_CFG_BIT_MODE_64 BIT(7) +#define UL_CFG_BIT_MODE_MASK GENMASK(7, 5) + +#define REG_CLDMA_UL_MEM 0x009c +#define UL_MEM_CHECK_DIS BIT(0) + +/* CLDMA RX */ +#define REG_CLDMA_DL_START_CMD 0x05bc +#define REG_CLDMA_DL_RESUME_CMD 0x05c0 +#define REG_CLDMA_DL_STOP_CMD 0x05c4 +#define REG_CLDMA_DL_MEM 0x0508 +#define DL_MEM_CHECK_DIS BIT(0) + +#define REG_CLDMA_DL_CFG 0x0404 +#define DL_CFG_UP_HW_LAST BIT(2) +#define DL_CFG_BIT_MODE_36 BIT(10) +#define DL_CFG_BIT_MODE_40 BIT(11) +#define DL_CFG_BIT_MODE_64 BIT(12) +#define DL_CFG_BIT_MODE_MASK GENMASK(12, 10) + +#define REG_CLDMA_DL_START_ADDRL_0 0x0478 +#define REG_CLDMA_DL_START_ADDRH_0 0x047c +#define REG_CLDMA_DL_CURRENT_ADDRL_0 0x04b8 +#define REG_CLDMA_DL_CURRENT_ADDRH_0 0x04bc +#define REG_CLDMA_DL_STATUS 0x04f8 + +/* CLDMA MISC */ +#define REG_CLDMA_L2TISAR0 0x0810 +#define REG_CLDMA_L2TISAR1 0x0814 +#define REG_CLDMA_L2TIMR0 0x0818 +#define REG_CLDMA_L2TIMR1 0x081c +#define REG_CLDMA_L2TIMCR0 0x0820 +#define REG_CLDMA_L2TIMCR1 0x0824 +#define REG_CLDMA_L2TIMSR0 0x0828 +#define REG_CLDMA_L2TIMSR1 0x082c +#define REG_CLDMA_L3TISAR0 0x0830 +#define REG_CLDMA_L3TISAR1 0x0834 +#define REG_CLDMA_L2RISAR0 0x0850 +#define REG_CLDMA_L2RISAR1 0x0854 +#define REG_CLDMA_L3RISAR0 0x0870 +#define REG_CLDMA_L3RISAR1 0x0874 +#define REG_CLDMA_IP_BUSY 0x08b4 +#define IP_BUSY_WAKEUP BIT(0) +#define CLDMA_L2TISAR0_ALL_INT_MASK GENMASK(15, 0) +#define CLDMA_L2RISAR0_ALL_INT_MASK GENMASK(15, 0) + +/* CLDMA MISC */ +#define REG_CLDMA_L2RIMR0 0x0858 +#define REG_CLDMA_L2RIMR1 0x085c +#define REG_CLDMA_L2RIMCR0 0x0860 +#define REG_CLDMA_L2RIMCR1 0x0864 +#define REG_CLDMA_L2RIMSR0 0x0868 +#define REG_CLDMA_L2RIMSR1 0x086c +#define REG_CLDMA_BUSY_MASK 0x0954 +#define BUSY_MASK_PCIE BIT(0) +#define BUSY_MASK_AP BIT(1) +#define BUSY_MASK_MD BIT(2) + +#define REG_CLDMA_INT_MASK 0x0960 + +/* CLDMA RESET */ +#define REG_INFRA_RST4_SET 0x0730 +#define RST4_CLDMA1_SW_RST_SET BIT(20) + +#define REG_INFRA_RST4_CLR 0x0734 +#define RST4_CLDMA1_SW_RST_CLR BIT(20) + +#define REG_INFRA_RST2_SET 0x0140 +#define RST2_PMIC_SW_RST_SET BIT(18) + +#define REG_INFRA_RST2_CLR 0x0144 +#define RST2_PMIC_SW_RST_CLR BIT(18) + +enum mtk_txrx { + MTK_TX, + MTK_RX, +}; + +enum t7xx_hw_mode { + MODE_BIT_32, + MODE_BIT_36, + MODE_BIT_40, + MODE_BIT_64, +}; + +struct t7xx_cldma_hw { + enum t7xx_hw_mode hw_mode; + void __iomem *ap_ao_base; + void __iomem *ap_pdn_base; + u32 phy_interrupt_id; +}; + +void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx); +unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info); +void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info); +void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask); +void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask); +void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx); +void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, + unsigned int qno, u64 address, enum mtk_txrx tx_rx); +void t7xx_cldma_hw_reset(void __iomem *ao_base); +void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx); +unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info); +void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info); +bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno); +#endif diff --git a/drivers/net/wwan/t7xx/t7xx_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_dpmaif.c new file mode 100644 index 0000000000..6d3edadecb --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.c @@ -0,0 +1,1281 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Andy Shevchenko + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_dpmaif.h" +#include "t7xx_reg.h" + +#define ioread32_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout_atomic(ioread32, addr, val, cond, delay_us, timeout_us) + +static int t7xx_dpmaif_init_intr(struct dpmaif_hw_info *hw_info) +{ + struct dpmaif_isr_en_mask *isr_en_msk = &hw_info->isr_en_mask; + u32 value, ul_intr_enable, dl_intr_enable; + int ret; + + ul_intr_enable = DP_UL_INT_ERR_MSK | DP_UL_INT_QDONE_MSK; + isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable; + iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); + + /* Set interrupt enable mask */ + iowrite32(ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0); + iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0); + + /* Check mask status */ + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0, + value, (value & ul_intr_enable) != ul_intr_enable, 0, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (ret) + return ret; + + dl_intr_enable = DP_DL_INT_PITCNT_LEN_ERR | DP_DL_INT_BATCNT_LEN_ERR; + isr_en_msk->ap_dl_l2intr_err_en_msk = dl_intr_enable; + ul_intr_enable = DPMAIF_DL_INT_DLQ0_QDONE | DPMAIF_DL_INT_DLQ0_PITCNT_LEN | + DPMAIF_DL_INT_DLQ1_QDONE | DPMAIF_DL_INT_DLQ1_PITCNT_LEN; + isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable; + iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); + + /* Set DL ISR PD enable mask */ + iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0, + value, (value & ul_intr_enable) != ul_intr_enable, 0, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (ret) + return ret; + + isr_en_msk->ap_udl_ip_busy_en_msk = DPMAIF_UDL_IP_BUSY; + iowrite32(DPMAIF_AP_IP_BUSY_MASK, hw_info->pcie_base + DPMAIF_AP_IP_BUSY); + iowrite32(isr_en_msk->ap_udl_ip_busy_en_msk, + hw_info->pcie_base + DPMAIF_AO_AP_DLUL_IP_BUSY_MASK); + value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0); + value |= DPMAIF_DL_INT_Q2APTOP | DPMAIF_DL_INT_Q2TOQ1; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0); + iowrite32(DPMA_HPC_ALL_INT_MASK, hw_info->pcie_base + DPMAIF_HPC_INTR_MASK); + + return 0; +} + +static void t7xx_dpmaif_mask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + struct dpmaif_isr_en_mask *isr_en_msk; + u32 value, ul_int_que_done; + int ret; + + isr_en_msk = &hw_info->isr_en_mask; + ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK; + isr_en_msk->ap_ul_l2intr_en_msk &= ~ul_int_que_done; + iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0, + value, (value & ul_int_que_done) == ul_int_que_done, 0, + DPMAIF_CHECK_TIMEOUT_US); + if (ret) + dev_err(hw_info->dev, + "Could not mask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n", + value); +} + +void t7xx_dpmaif_unmask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + struct dpmaif_isr_en_mask *isr_en_msk; + u32 value, ul_int_que_done; + int ret; + + isr_en_msk = &hw_info->isr_en_mask; + ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK; + isr_en_msk->ap_ul_l2intr_en_msk |= ul_int_que_done; + iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0, + value, (value & ul_int_que_done) != ul_int_que_done, 0, + DPMAIF_CHECK_TIMEOUT_US); + if (ret) + dev_err(hw_info->dev, + "Could not unmask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n", + value); +} + +void t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(struct dpmaif_hw_info *hw_info) +{ + hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_BATCNT_LEN_ERR; + iowrite32(DP_DL_INT_BATCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); +} + +void t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info) +{ + hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_PITCNT_LEN_ERR; + iowrite32(DP_DL_INT_PITCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); +} + +static u32 t7xx_update_dlq_intr(struct dpmaif_hw_info *hw_info, u32 q_done) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0); + iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + return value; +} + +static int t7xx_mask_dlq_intr(struct dpmaif_hw_info *hw_info, unsigned int qno) +{ + u32 value, q_done; + int ret; + + q_done = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE; + iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + + ret = read_poll_timeout_atomic(t7xx_update_dlq_intr, value, value & q_done, + 0, DPMAIF_CHECK_TIMEOUT_US, false, hw_info, q_done); + if (ret) { + dev_err(hw_info->dev, + "Could not mask the DL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n", + value); + return -ETIMEDOUT; + } + + hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~q_done; + return 0; +} + +void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno) +{ + u32 mask; + + mask = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE; + iowrite32(mask, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); + hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= mask; +} + +void t7xx_dpmaif_clr_ip_busy_sts(struct dpmaif_hw_info *hw_info) +{ + u32 ip_busy_sts; + + ip_busy_sts = ioread32(hw_info->pcie_base + DPMAIF_AP_IP_BUSY); + iowrite32(ip_busy_sts, hw_info->pcie_base + DPMAIF_AP_IP_BUSY); +} + +static void t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info, + unsigned int qno) +{ + if (qno == DPF_RX_QNO0) + iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + else + iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); +} + +void t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info, + unsigned int qno) +{ + if (qno == DPF_RX_QNO0) + iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); + else + iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); +} + +void t7xx_dpmaif_ul_clr_all_intr(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); +} + +void t7xx_dpmaif_dl_clr_all_intr(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); +} + +static void t7xx_dpmaif_set_intr_para(struct dpmaif_hw_intr_st_para *para, + enum dpmaif_hw_intr_type intr_type, unsigned int intr_queue) +{ + para->intr_types[para->intr_cnt] = intr_type; + para->intr_queues[para->intr_cnt] = intr_queue; + para->intr_cnt++; +} + +/* The para->intr_cnt counter is set to zero before this function is called. + * It does not check for overflow as there is no risk of overflowing intr_types or intr_queues. + */ +static void t7xx_dpmaif_hw_check_tx_intr(struct dpmaif_hw_info *hw_info, + unsigned int intr_status, + struct dpmaif_hw_intr_st_para *para) +{ + unsigned long value; + + value = FIELD_GET(DP_UL_INT_QDONE_MSK, intr_status); + if (value) { + unsigned int index; + + t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DONE, value); + + for_each_set_bit(index, &value, DPMAIF_TXQ_NUM) + t7xx_dpmaif_mask_ulq_intr(hw_info, index); + } + + value = FIELD_GET(DP_UL_INT_EMPTY_MSK, intr_status); + if (value) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DRB_EMPTY, value); + + value = FIELD_GET(DP_UL_INT_MD_NOTREADY_MSK, intr_status); + if (value) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_NOTREADY, value); + + value = FIELD_GET(DP_UL_INT_MD_PWR_NOTREADY_MSK, intr_status); + if (value) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_PWR_NOTREADY, value); + + value = FIELD_GET(DP_UL_INT_ERR_MSK, intr_status); + if (value) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_LEN_ERR, value); + + /* Clear interrupt status */ + iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); +} + +/* The para->intr_cnt counter is set to zero before this function is called. + * It does not check for overflow as there is no risk of overflowing intr_types or intr_queues. + */ +static void t7xx_dpmaif_hw_check_rx_intr(struct dpmaif_hw_info *hw_info, + unsigned int intr_status, + struct dpmaif_hw_intr_st_para *para, int qno) +{ + if (qno == DPF_RX_QNO_DFT) { + if (intr_status & DP_DL_INT_SKB_LEN_ERR) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_SKB_LEN_ERR, DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_BATCNT_LEN_ERR) { + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_BATCNT_LEN_ERR, DPF_RX_QNO_DFT); + hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_BATCNT_LEN_ERR; + iowrite32(DP_DL_INT_BATCNT_LEN_ERR, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + } + + if (intr_status & DP_DL_INT_PITCNT_LEN_ERR) { + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PITCNT_LEN_ERR, DPF_RX_QNO_DFT); + hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_PITCNT_LEN_ERR; + iowrite32(DP_DL_INT_PITCNT_LEN_ERR, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + } + + if (intr_status & DP_DL_INT_PKT_EMPTY_MSK) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PKT_EMPTY_SET, DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_FRG_EMPTY_MSK) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRG_EMPTY_SET, DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_MTU_ERR_MSK) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_MTU_ERR, DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_FRG_LEN_ERR_MSK) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRGCNT_LEN_ERR, DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_Q0_PITCNT_LEN_ERR) { + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_PITCNT_LEN_ERR, BIT(qno)); + t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno); + } + + if (intr_status & DP_DL_INT_HPC_ENT_TYPE_ERR) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_HPC_ENT_TYPE_ERR, + DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_Q0_DONE) { + /* Mask RX done interrupt immediately after it occurs, do not clear + * the interrupt if the mask operation fails. + */ + if (!t7xx_mask_dlq_intr(hw_info, qno)) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_DONE, BIT(qno)); + else + intr_status &= ~DP_DL_INT_Q0_DONE; + } + } else { + if (intr_status & DP_DL_INT_Q1_PITCNT_LEN_ERR) { + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_PITCNT_LEN_ERR, BIT(qno)); + t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno); + } + + if (intr_status & DP_DL_INT_Q1_DONE) { + if (!t7xx_mask_dlq_intr(hw_info, qno)) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_DONE, BIT(qno)); + else + intr_status &= ~DP_DL_INT_Q1_DONE; + } + } + + intr_status |= DP_DL_INT_BATCNT_LEN_ERR; + /* Clear interrupt status */ + iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); +} + +/** + * t7xx_dpmaif_hw_get_intr_cnt() - Reads interrupt status and count from HW. + * @hw_info: Pointer to struct hw_info. + * @para: Pointer to struct dpmaif_hw_intr_st_para. + * @qno: Queue number. + * + * Reads RX/TX interrupt status from HW and clears UL/DL status as needed. + * + * Return: Interrupt count. + */ +int t7xx_dpmaif_hw_get_intr_cnt(struct dpmaif_hw_info *hw_info, + struct dpmaif_hw_intr_st_para *para, int qno) +{ + u32 rx_intr_status, tx_intr_status = 0; + u32 rx_intr_qdone, tx_intr_qdone = 0; + + rx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); + rx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0); + + /* TX interrupt status */ + if (qno == DPF_RX_QNO_DFT) { + /* All ULQ and DLQ0 interrupts use the same source no need to check ULQ interrupts + * when a DLQ1 interrupt has occurred. + */ + tx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0); + tx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0); + } + + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + + if (qno == DPF_RX_QNO_DFT) { + /* Do not schedule bottom half again or clear UL interrupt status when we + * have already masked it. + */ + tx_intr_status &= ~tx_intr_qdone; + if (tx_intr_status) + t7xx_dpmaif_hw_check_tx_intr(hw_info, tx_intr_status, para); + } + + if (rx_intr_status) { + if (qno == DPF_RX_QNO0) { + rx_intr_status &= DP_DL_Q0_STATUS_MASK; + if (rx_intr_qdone & DPMAIF_DL_INT_DLQ0_QDONE) + /* Do not schedule bottom half again or clear DL + * queue done interrupt status when we have already masked it. + */ + rx_intr_status &= ~DP_DL_INT_Q0_DONE; + } else { + rx_intr_status &= DP_DL_Q1_STATUS_MASK; + if (rx_intr_qdone & DPMAIF_DL_INT_DLQ1_QDONE) + rx_intr_status &= ~DP_DL_INT_Q1_DONE; + } + + if (rx_intr_status) + t7xx_dpmaif_hw_check_rx_intr(hw_info, rx_intr_status, para, qno); + } + + return para->intr_cnt; +} + +static int t7xx_dpmaif_sram_init(struct dpmaif_hw_info *hw_info) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AP_MEM_CLR); + value |= DPMAIF_MEM_CLR; + iowrite32(value, hw_info->pcie_base + DPMAIF_AP_MEM_CLR); + + return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_MEM_CLR, + value, !(value & DPMAIF_MEM_CLR), 0, + DPMAIF_CHECK_INIT_TIMEOUT_US); +} + +static void t7xx_dpmaif_hw_reset(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_ASSERT); + udelay(2); + iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_ASSERT); + udelay(2); + iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_DEASSERT); + udelay(2); + iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_DEASSERT); + udelay(2); +} + +static int t7xx_dpmaif_hw_config(struct dpmaif_hw_info *hw_info) +{ + u32 ap_port_mode; + int ret; + + t7xx_dpmaif_hw_reset(hw_info); + + ret = t7xx_dpmaif_sram_init(hw_info); + if (ret) + return ret; + + ap_port_mode = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + ap_port_mode |= DPMAIF_PORT_MODE_PCIE; + iowrite32(ap_port_mode, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + iowrite32(DPMAIF_CG_EN, hw_info->pcie_base + DPMAIF_AP_CG_EN); + return 0; +} + +static void t7xx_dpmaif_pcie_dpmaif_sign(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_PCIE_MODE_SET_VALUE, hw_info->pcie_base + DPMAIF_UL_RESERVE_AO_RW); +} + +static void t7xx_dpmaif_dl_performance(struct dpmaif_hw_info *hw_info) +{ + u32 enable_bat_cache, enable_pit_burst; + + enable_bat_cache = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + enable_bat_cache |= DPMAIF_DL_BAT_CACHE_PRI; + iowrite32(enable_bat_cache, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + + enable_pit_burst = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + enable_pit_burst |= DPMAIF_DL_BURST_PIT_EN; + iowrite32(enable_pit_burst, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); +} + + /* DPMAIF DL DLQ part HW setting */ + +static void t7xx_dpmaif_hw_hpc_cntl_set(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = DPMAIF_HPC_DLQ_PATH_MODE | DPMAIF_HPC_ADD_MODE_DF << 2; + value |= DPMAIF_HASH_PRIME_DF << 4; + value |= DPMAIF_HPC_TOTAL_NUM << 8; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_HPC_CNTL); +} + +static void t7xx_dpmaif_hw_agg_cfg_set(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = DPMAIF_AGG_MAX_LEN_DF | DPMAIF_AGG_TBL_ENT_NUM_DF << 16; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_DLQ_AGG_CFG); +} + +static void t7xx_dpmaif_hw_hash_bit_choose_set(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_DLQ_HASH_BIT_CHOOSE_DF, + hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_INIT_CON5); +} + +static void t7xx_dpmaif_hw_mid_pit_timeout_thres_set(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_MID_TIMEOUT_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT0); +} + +static void t7xx_dpmaif_hw_dlq_timeout_thres_set(struct dpmaif_hw_info *hw_info) +{ + unsigned int value, i; + + /* Each register holds two DLQ threshold timeout values */ + for (i = 0; i < DPMAIF_HPC_MAX_TOTAL_NUM / 2; i++) { + value = FIELD_PREP(DPMAIF_DLQ_LOW_TIMEOUT_THRES_MKS, DPMAIF_DLQ_TIMEOUT_THRES_DF); + value |= FIELD_PREP(DPMAIF_DLQ_HIGH_TIMEOUT_THRES_MSK, + DPMAIF_DLQ_TIMEOUT_THRES_DF); + iowrite32(value, + hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT1 + sizeof(u32) * i); + } +} + +static void t7xx_dpmaif_hw_dlq_start_prs_thres_set(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_DLQ_PRS_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TRIG_THRES); +} + +static void t7xx_dpmaif_dl_dlq_hpc_hw_init(struct dpmaif_hw_info *hw_info) +{ + t7xx_dpmaif_hw_hpc_cntl_set(hw_info); + t7xx_dpmaif_hw_agg_cfg_set(hw_info); + t7xx_dpmaif_hw_hash_bit_choose_set(hw_info); + t7xx_dpmaif_hw_mid_pit_timeout_thres_set(hw_info); + t7xx_dpmaif_hw_dlq_timeout_thres_set(hw_info); + t7xx_dpmaif_hw_dlq_start_prs_thres_set(hw_info); +} + +static int t7xx_dpmaif_dl_bat_init_done(struct dpmaif_hw_info *hw_info, bool frg_en) +{ + u32 value, dl_bat_init = 0; + int ret; + + if (frg_en) + dl_bat_init = DPMAIF_DL_BAT_FRG_INIT; + + dl_bat_init |= DPMAIF_DL_BAT_INIT_ALLSET; + dl_bat_init |= DPMAIF_DL_BAT_INIT_EN; + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, + value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (ret) { + dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n"); + return ret; + } + + iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, + value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (ret) + dev_err(hw_info->dev, "Data plane modem DL BAT initialization failed\n"); + + return ret; +} + +static void t7xx_dpmaif_dl_set_bat_base_addr(struct dpmaif_hw_info *hw_info, + dma_addr_t addr) +{ + iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON0); + iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON3); +} + +static void t7xx_dpmaif_dl_set_bat_size(struct dpmaif_hw_info *hw_info, unsigned int size) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + value &= ~DPMAIF_BAT_SIZE_MSK; + value |= size & DPMAIF_BAT_SIZE_MSK; + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); +} + +static void t7xx_dpmaif_dl_bat_en(struct dpmaif_hw_info *hw_info, bool enable) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + + if (enable) + value |= DPMAIF_BAT_EN_MSK; + else + value &= ~DPMAIF_BAT_EN_MSK; + + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); +} + +static void t7xx_dpmaif_dl_set_ao_bid_maxcnt(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); + value &= ~DPMAIF_BAT_BID_MAXCNT_MSK; + value |= FIELD_PREP(DPMAIF_BAT_BID_MAXCNT_MSK, DPMAIF_HW_PKT_BIDCNT); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); +} + +static void t7xx_dpmaif_dl_set_ao_mtu(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_HW_MTU_SIZE, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON1); +} + +static void t7xx_dpmaif_dl_set_ao_pit_chknum(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); + value &= ~DPMAIF_PIT_CHK_NUM_MSK; + value |= FIELD_PREP(DPMAIF_PIT_CHK_NUM_MSK, DPMAIF_HW_CHK_PIT_NUM); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); +} + +static void t7xx_dpmaif_dl_set_ao_remain_minsz(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); + value &= ~DPMAIF_BAT_REMAIN_MINSZ_MSK; + value |= FIELD_PREP(DPMAIF_BAT_REMAIN_MINSZ_MSK, + DPMAIF_HW_BAT_REMAIN / DPMAIF_BAT_REMAIN_SZ_BASE); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); +} + +static void t7xx_dpmaif_dl_set_ao_bat_bufsz(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); + value &= ~DPMAIF_BAT_BUF_SZ_MSK; + value |= FIELD_PREP(DPMAIF_BAT_BUF_SZ_MSK, + DPMAIF_HW_BAT_PKTBUF / DPMAIF_BAT_BUFFER_SZ_BASE); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); +} + +static void t7xx_dpmaif_dl_set_ao_bat_rsv_length(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); + value &= ~DPMAIF_BAT_RSV_LEN_MSK; + value |= DPMAIF_HW_BAT_RSVLEN; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); +} + +static void t7xx_dpmaif_dl_set_pkt_alignment(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + value &= ~DPMAIF_PKT_ALIGN_MSK; + value |= DPMAIF_PKT_ALIGN_EN; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); +} + +static void t7xx_dpmaif_dl_set_pkt_checksum(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + value |= DPMAIF_DL_PKT_CHECKSUM_EN; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); +} + +static void t7xx_dpmaif_dl_set_ao_frg_check_thres(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); + value &= ~DPMAIF_FRG_CHECK_THRES_MSK; + value |= DPMAIF_HW_CHK_FRG_NUM; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); +} + +static void t7xx_dpmaif_dl_set_ao_frg_bufsz(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); + value &= ~DPMAIF_FRG_BUF_SZ_MSK; + value |= FIELD_PREP(DPMAIF_FRG_BUF_SZ_MSK, + DPMAIF_HW_FRG_PKTBUF / DPMAIF_FRG_BUFFER_SZ_BASE); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); +} + +static void t7xx_dpmaif_dl_frg_ao_en(struct dpmaif_hw_info *hw_info, bool enable) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); + + if (enable) + value |= DPMAIF_FRG_EN_MSK; + else + value &= ~DPMAIF_FRG_EN_MSK; + + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); +} + +static void t7xx_dpmaif_dl_set_ao_bat_check_thres(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + value &= ~DPMAIF_BAT_CHECK_THRES_MSK; + value |= FIELD_PREP(DPMAIF_BAT_CHECK_THRES_MSK, DPMAIF_HW_CHK_BAT_NUM); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); +} + +static void t7xx_dpmaif_dl_set_pit_seqnum(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END); + value &= ~DPMAIF_DL_PIT_SEQ_MSK; + value |= DPMAIF_DL_PIT_SEQ_VALUE; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END); +} + +static void t7xx_dpmaif_dl_set_dlq_pit_base_addr(struct dpmaif_hw_info *hw_info, + dma_addr_t addr) +{ + iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON0); + iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON4); +} + +static void t7xx_dpmaif_dl_set_dlq_pit_size(struct dpmaif_hw_info *hw_info, unsigned int size) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1); + value &= ~DPMAIF_PIT_SIZE_MSK; + value |= size & DPMAIF_PIT_SIZE_MSK; + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1); + iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON2); + iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3); + iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON5); + iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON6); +} + +static void t7xx_dpmaif_dl_dlq_pit_en(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3); + value |= DPMAIF_DLQPIT_EN_MSK; + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3); +} + +static void t7xx_dpmaif_dl_dlq_pit_init_done(struct dpmaif_hw_info *hw_info, + unsigned int pit_idx) +{ + unsigned int dl_pit_init; + int timeout; + u32 value; + + dl_pit_init = DPMAIF_DL_PIT_INIT_ALLSET; + dl_pit_init |= (pit_idx << DPMAIF_DLQPIT_CHAN_OFS); + dl_pit_init |= DPMAIF_DL_PIT_INIT_EN; + + timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT, + value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY), + DPMAIF_CHECK_DELAY_US, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (timeout) { + dev_err(hw_info->dev, "Data plane modem DL PIT is not ready\n"); + return; + } + + iowrite32(dl_pit_init, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT); + timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT, + value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY), + DPMAIF_CHECK_DELAY_US, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (timeout) + dev_err(hw_info->dev, "Data plane modem DL PIT initialization failed\n"); +} + +static void t7xx_dpmaif_config_dlq_pit_hw(struct dpmaif_hw_info *hw_info, unsigned int q_num, + struct dpmaif_dl *dl_que) +{ + t7xx_dpmaif_dl_set_dlq_pit_base_addr(hw_info, dl_que->pit_base); + t7xx_dpmaif_dl_set_dlq_pit_size(hw_info, dl_que->pit_size_cnt); + t7xx_dpmaif_dl_dlq_pit_en(hw_info); + t7xx_dpmaif_dl_dlq_pit_init_done(hw_info, q_num); +} + +static void t7xx_dpmaif_config_all_dlq_hw(struct dpmaif_hw_info *hw_info) +{ + int i; + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) + t7xx_dpmaif_config_dlq_pit_hw(hw_info, i, &hw_info->dl_que[i]); +} + +static void t7xx_dpmaif_dl_all_q_en(struct dpmaif_hw_info *hw_info, bool enable) +{ + u32 dl_bat_init, value; + int timeout; + + value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + + if (enable) + value |= DPMAIF_BAT_EN_MSK; + else + value &= ~DPMAIF_BAT_EN_MSK; + + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + dl_bat_init = DPMAIF_DL_BAT_INIT_ONLY_ENABLE_BIT; + dl_bat_init |= DPMAIF_DL_BAT_INIT_EN; + + timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, + value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (timeout) + dev_err(hw_info->dev, "Timeout updating BAT setting to HW\n"); + + iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT); + timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, + value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (timeout) + dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n"); +} + +static int t7xx_dpmaif_config_dlq_hw(struct dpmaif_hw_info *hw_info) +{ + struct dpmaif_dl *dl_que; + int ret; + + t7xx_dpmaif_dl_dlq_hpc_hw_init(hw_info); + + dl_que = &hw_info->dl_que[0]; /* All queues share one BAT/frag BAT table */ + if (!dl_que->que_started) + return -EBUSY; + + t7xx_dpmaif_dl_set_ao_remain_minsz(hw_info); + t7xx_dpmaif_dl_set_ao_bat_bufsz(hw_info); + t7xx_dpmaif_dl_set_ao_frg_bufsz(hw_info); + t7xx_dpmaif_dl_set_ao_bat_rsv_length(hw_info); + t7xx_dpmaif_dl_set_ao_bid_maxcnt(hw_info); + t7xx_dpmaif_dl_set_pkt_alignment(hw_info); + t7xx_dpmaif_dl_set_pit_seqnum(hw_info); + t7xx_dpmaif_dl_set_ao_mtu(hw_info); + t7xx_dpmaif_dl_set_ao_pit_chknum(hw_info); + t7xx_dpmaif_dl_set_ao_bat_check_thres(hw_info); + t7xx_dpmaif_dl_set_ao_frg_check_thres(hw_info); + t7xx_dpmaif_dl_frg_ao_en(hw_info, true); + + t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->frg_base); + t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->frg_size_cnt); + t7xx_dpmaif_dl_bat_en(hw_info, true); + + ret = t7xx_dpmaif_dl_bat_init_done(hw_info, true); + if (ret) + return ret; + + t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->bat_base); + t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->bat_size_cnt); + t7xx_dpmaif_dl_bat_en(hw_info, false); + + ret = t7xx_dpmaif_dl_bat_init_done(hw_info, false); + if (ret) + return ret; + + /* Init PIT (two PIT table) */ + t7xx_dpmaif_config_all_dlq_hw(hw_info); + t7xx_dpmaif_dl_all_q_en(hw_info, true); + t7xx_dpmaif_dl_set_pkt_checksum(hw_info); + return 0; +} + +static void t7xx_dpmaif_ul_update_drb_size(struct dpmaif_hw_info *hw_info, + unsigned int q_num, unsigned int size) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num)); + value &= ~DPMAIF_DRB_SIZE_MSK; + value |= size & DPMAIF_DRB_SIZE_MSK; + iowrite32(value, hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num)); +} + +static void t7xx_dpmaif_ul_update_drb_base_addr(struct dpmaif_hw_info *hw_info, + unsigned int q_num, dma_addr_t addr) +{ + iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_ULQSAR_n(q_num)); + iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_UL_DRB_ADDRH_n(q_num)); +} + +static void t7xx_dpmaif_ul_rdy_en(struct dpmaif_hw_info *hw_info, + unsigned int q_num, bool ready) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); + + if (ready) + value |= BIT(q_num); + else + value &= ~BIT(q_num); + + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); +} + +static void t7xx_dpmaif_ul_arb_en(struct dpmaif_hw_info *hw_info, + unsigned int q_num, bool enable) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); + + if (enable) + value |= BIT(q_num + 8); + else + value &= ~BIT(q_num + 8); + + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); +} + +static void t7xx_dpmaif_config_ulq_hw(struct dpmaif_hw_info *hw_info) +{ + struct dpmaif_ul *ul_que; + int i; + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + ul_que = &hw_info->ul_que[i]; + if (ul_que->que_started) { + t7xx_dpmaif_ul_update_drb_size(hw_info, i, ul_que->drb_size_cnt * + DPMAIF_UL_DRB_SIZE_WORD); + t7xx_dpmaif_ul_update_drb_base_addr(hw_info, i, ul_que->drb_base); + t7xx_dpmaif_ul_rdy_en(hw_info, i, true); + t7xx_dpmaif_ul_arb_en(hw_info, i, true); + } else { + t7xx_dpmaif_ul_arb_en(hw_info, i, false); + } + } +} + +static int t7xx_dpmaif_hw_init_done(struct dpmaif_hw_info *hw_info) +{ + u32 ap_cfg; + int ret; + + ap_cfg = ioread32(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG); + ap_cfg |= DPMAIF_SRAM_SYNC; + iowrite32(ap_cfg, hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG, + ap_cfg, !(ap_cfg & DPMAIF_SRAM_SYNC), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (ret) + return ret; + + iowrite32(DPMAIF_UL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_UL_INIT_SET); + iowrite32(DPMAIF_DL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_DL_INIT_SET); + return 0; +} + +static bool t7xx_dpmaif_dl_idle_check(struct dpmaif_hw_info *hw_info) +{ + u32 dpmaif_dl_is_busy = ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY); + + return !(dpmaif_dl_is_busy & DPMAIF_DL_IDLE_STS); +} + +static void t7xx_dpmaif_ul_all_q_en(struct dpmaif_hw_info *hw_info, bool enable) +{ + u32 ul_arb_en = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); + + if (enable) + ul_arb_en |= DPMAIF_UL_ALL_QUE_ARB_EN; + else + ul_arb_en &= ~DPMAIF_UL_ALL_QUE_ARB_EN; + + iowrite32(ul_arb_en, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); +} + +static bool t7xx_dpmaif_ul_idle_check(struct dpmaif_hw_info *hw_info) +{ + u32 dpmaif_ul_is_busy = ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY); + + return !(dpmaif_ul_is_busy & DPMAIF_UL_IDLE_STS); +} + +void t7xx_dpmaif_ul_update_hw_drb_cnt(struct dpmaif_hw_info *hw_info, unsigned int q_num, + unsigned int drb_entry_cnt) +{ + u32 ul_update, value; + int err; + + ul_update = drb_entry_cnt & DPMAIF_UL_ADD_COUNT_MASK; + ul_update |= DPMAIF_UL_ADD_UPDATE; + + err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num), + value, !(value & DPMAIF_UL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (err) { + dev_err(hw_info->dev, "UL add is not ready\n"); + return; + } + + iowrite32(ul_update, hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num)); + + err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num), + value, !(value & DPMAIF_UL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (err) + dev_err(hw_info->dev, "Timeout updating UL add\n"); +} + +unsigned int t7xx_dpmaif_ul_get_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + unsigned int value = ioread32(hw_info->pcie_base + DPMAIF_ULQ_STA0_n(q_num)); + + return FIELD_GET(DPMAIF_UL_DRB_RIDX_MSK, value) / DPMAIF_UL_DRB_SIZE_WORD; +} + +int t7xx_dpmaif_dlq_add_pit_remain_cnt(struct dpmaif_hw_info *hw_info, unsigned int dlq_pit_idx, + unsigned int pit_remain_cnt) +{ + u32 dl_update, value; + int ret; + + dl_update = pit_remain_cnt & DPMAIF_PIT_REM_CNT_MSK; + dl_update |= DPMAIF_DL_ADD_UPDATE | (dlq_pit_idx << DPMAIF_ADD_DLQ_PIT_CHAN_OFS); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD, + value, !(value & DPMAIF_DL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (ret) { + dev_err(hw_info->dev, "Data plane modem is not ready to add dlq\n"); + return ret; + } + + iowrite32(dl_update, hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD, + value, !(value & DPMAIF_DL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (ret) { + dev_err(hw_info->dev, "Data plane modem add dlq failed\n"); + return ret; + } + + return 0; +} + +unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info, + unsigned int dlq_pit_idx) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_DLQ_WR_IDX + + dlq_pit_idx * DLQ_PIT_IDX_SIZE); + return value & DPMAIF_DL_RD_WR_IDX_MSK; +} + +static int t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info) +{ + u32 value; + + return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD, + value, !(value & DPMAIF_DL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); +} + +int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt) +{ + unsigned int value; + + if (t7xx_dl_add_timedout(hw_info)) { + dev_err(hw_info->dev, "DL add BAT not ready\n"); + return -EBUSY; + } + + value = bat_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK; + value |= DPMAIF_DL_ADD_UPDATE; + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD); + + if (t7xx_dl_add_timedout(hw_info)) { + dev_err(hw_info->dev, "DL add BAT timeout\n"); + return -EBUSY; + } + + return 0; +} + +unsigned int t7xx_dpmaif_dl_get_bat_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_RD_IDX); + return value & DPMAIF_DL_RD_WR_IDX_MSK; +} + +unsigned int t7xx_dpmaif_dl_get_bat_wr_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_WR_IDX); + return value & DPMAIF_DL_RD_WR_IDX_MSK; +} + +int t7xx_dpmaif_dl_snd_hw_frg_cnt(struct dpmaif_hw_info *hw_info, unsigned int frg_entry_cnt) +{ + unsigned int value; + + if (t7xx_dl_add_timedout(hw_info)) { + dev_err(hw_info->dev, "Data plane modem is not ready to add frag DLQ\n"); + return -EBUSY; + } + + value = frg_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK; + value |= DPMAIF_DL_FRG_ADD_UPDATE | DPMAIF_DL_ADD_UPDATE; + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD); + + if (t7xx_dl_add_timedout(hw_info)) { + dev_err(hw_info->dev, "Data plane modem add frag DLQ failed"); + return -EBUSY; + } + + return 0; +} + +unsigned int t7xx_dpmaif_dl_get_frg_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_FRGBAT_RD_IDX); + return value & DPMAIF_DL_RD_WR_IDX_MSK; +} + +static void t7xx_dpmaif_set_queue_property(struct dpmaif_hw_info *hw_info, + struct dpmaif_hw_params *init_para) +{ + struct dpmaif_dl *dl_que; + struct dpmaif_ul *ul_que; + int i; + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + dl_que = &hw_info->dl_que[i]; + dl_que->bat_base = init_para->pkt_bat_base_addr[i]; + dl_que->bat_size_cnt = init_para->pkt_bat_size_cnt[i]; + dl_que->pit_base = init_para->pit_base_addr[i]; + dl_que->pit_size_cnt = init_para->pit_size_cnt[i]; + dl_que->frg_base = init_para->frg_bat_base_addr[i]; + dl_que->frg_size_cnt = init_para->frg_bat_size_cnt[i]; + dl_que->que_started = true; + } + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + ul_que = &hw_info->ul_que[i]; + ul_que->drb_base = init_para->drb_base_addr[i]; + ul_que->drb_size_cnt = init_para->drb_size_cnt[i]; + ul_que->que_started = true; + } +} + +/** + * t7xx_dpmaif_hw_stop_all_txq() - Stop all TX queues. + * @hw_info: Pointer to struct hw_info. + * + * Disable HW UL queues. Checks busy UL queues to go to idle + * with an attempt count of 1000000. + * + * Return: + * * 0 - Success + * * -ETIMEDOUT - Timed out checking busy queues + */ +int t7xx_dpmaif_hw_stop_all_txq(struct dpmaif_hw_info *hw_info) +{ + int count = 0; + + t7xx_dpmaif_ul_all_q_en(hw_info, false); + while (t7xx_dpmaif_ul_idle_check(hw_info)) { + if (++count >= DPMAIF_MAX_CHECK_COUNT) { + dev_err(hw_info->dev, "Failed to stop TX, status: 0x%x\n", + ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY)); + return -ETIMEDOUT; + } + } + + return 0; +} + +/** + * t7xx_dpmaif_hw_stop_all_rxq() - Stop all RX queues. + * @hw_info: Pointer to struct hw_info. + * + * Disable HW DL queue. Checks busy UL queues to go to idle + * with an attempt count of 1000000. + * Check that HW PIT write index equals read index with the same + * attempt count. + * + * Return: + * * 0 - Success. + * * -ETIMEDOUT - Timed out checking busy queues. + */ +int t7xx_dpmaif_hw_stop_all_rxq(struct dpmaif_hw_info *hw_info) +{ + unsigned int wr_idx, rd_idx; + int count = 0; + + t7xx_dpmaif_dl_all_q_en(hw_info, false); + while (t7xx_dpmaif_dl_idle_check(hw_info)) { + if (++count >= DPMAIF_MAX_CHECK_COUNT) { + dev_err(hw_info->dev, "Failed to stop RX, status: 0x%x\n", + ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY)); + return -ETIMEDOUT; + } + } + + /* Check middle PIT sync done */ + count = 0; + do { + wr_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_WR_IDX); + wr_idx &= DPMAIF_DL_RD_WR_IDX_MSK; + rd_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_RD_IDX); + rd_idx &= DPMAIF_DL_RD_WR_IDX_MSK; + + if (wr_idx == rd_idx) + return 0; + } while (++count < DPMAIF_MAX_CHECK_COUNT); + + dev_err(hw_info->dev, "Check middle PIT sync fail\n"); + return -ETIMEDOUT; +} + +void t7xx_dpmaif_start_hw(struct dpmaif_hw_info *hw_info) +{ + t7xx_dpmaif_ul_all_q_en(hw_info, true); + t7xx_dpmaif_dl_all_q_en(hw_info, true); +} + +/** + * t7xx_dpmaif_hw_init() - Initialize HW data path API. + * @hw_info: Pointer to struct hw_info. + * @init_param: Pointer to struct dpmaif_hw_params. + * + * Configures port mode, clock config, HW interrupt initialization, and HW queue. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code from failure sub-initializations. + */ +int t7xx_dpmaif_hw_init(struct dpmaif_hw_info *hw_info, struct dpmaif_hw_params *init_param) +{ + int ret; + + ret = t7xx_dpmaif_hw_config(hw_info); + if (ret) { + dev_err(hw_info->dev, "DPMAIF HW config failed\n"); + return ret; + } + + ret = t7xx_dpmaif_init_intr(hw_info); + if (ret) { + dev_err(hw_info->dev, "DPMAIF HW interrupts init failed\n"); + return ret; + } + + t7xx_dpmaif_set_queue_property(hw_info, init_param); + t7xx_dpmaif_pcie_dpmaif_sign(hw_info); + t7xx_dpmaif_dl_performance(hw_info); + + ret = t7xx_dpmaif_config_dlq_hw(hw_info); + if (ret) { + dev_err(hw_info->dev, "DPMAIF HW dlq config failed\n"); + return ret; + } + + t7xx_dpmaif_config_ulq_hw(hw_info); + + ret = t7xx_dpmaif_hw_init_done(hw_info); + if (ret) + dev_err(hw_info->dev, "DPMAIF HW queue init failed\n"); + + return ret; +} + +bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno) +{ + u32 intr_status; + + intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0); + intr_status &= BIT(DP_UL_INT_DONE_OFFSET + qno); + if (intr_status) { + iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); + return true; + } + + return false; +} diff --git a/drivers/net/wwan/t7xx/t7xx_dpmaif.h b/drivers/net/wwan/t7xx/t7xx_dpmaif.h new file mode 100644 index 0000000000..ae292355a3 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#ifndef __T7XX_DPMAIF_H__ +#define __T7XX_DPMAIF_H__ + +#include +#include + +#define DPMAIF_DL_PIT_SEQ_VALUE 251 +#define DPMAIF_UL_DRB_SIZE_WORD 4 + +#define DPMAIF_MAX_CHECK_COUNT 1000000 +#define DPMAIF_CHECK_TIMEOUT_US 10000 +#define DPMAIF_CHECK_INIT_TIMEOUT_US 100000 +#define DPMAIF_CHECK_DELAY_US 10 + +#define DPMAIF_RXQ_NUM 2 +#define DPMAIF_TXQ_NUM 5 + +struct dpmaif_isr_en_mask { + unsigned int ap_ul_l2intr_en_msk; + unsigned int ap_dl_l2intr_en_msk; + unsigned int ap_udl_ip_busy_en_msk; + unsigned int ap_dl_l2intr_err_en_msk; +}; + +struct dpmaif_ul { + bool que_started; + unsigned char reserved[3]; + dma_addr_t drb_base; + unsigned int drb_size_cnt; +}; + +struct dpmaif_dl { + bool que_started; + unsigned char reserved[3]; + dma_addr_t pit_base; + unsigned int pit_size_cnt; + dma_addr_t bat_base; + unsigned int bat_size_cnt; + dma_addr_t frg_base; + unsigned int frg_size_cnt; + unsigned int pit_seq; +}; + +struct dpmaif_hw_info { + struct device *dev; + void __iomem *pcie_base; + struct dpmaif_dl dl_que[DPMAIF_RXQ_NUM]; + struct dpmaif_ul ul_que[DPMAIF_TXQ_NUM]; + struct dpmaif_isr_en_mask isr_en_mask; +}; + +/* DPMAIF HW Initialization parameter structure */ +struct dpmaif_hw_params { + /* UL part */ + dma_addr_t drb_base_addr[DPMAIF_TXQ_NUM]; + unsigned int drb_size_cnt[DPMAIF_TXQ_NUM]; + /* DL part */ + dma_addr_t pkt_bat_base_addr[DPMAIF_RXQ_NUM]; + unsigned int pkt_bat_size_cnt[DPMAIF_RXQ_NUM]; + dma_addr_t frg_bat_base_addr[DPMAIF_RXQ_NUM]; + unsigned int frg_bat_size_cnt[DPMAIF_RXQ_NUM]; + dma_addr_t pit_base_addr[DPMAIF_RXQ_NUM]; + unsigned int pit_size_cnt[DPMAIF_RXQ_NUM]; +}; + +enum dpmaif_hw_intr_type { + DPF_INTR_INVALID_MIN, + DPF_INTR_UL_DONE, + DPF_INTR_UL_DRB_EMPTY, + DPF_INTR_UL_MD_NOTREADY, + DPF_INTR_UL_MD_PWR_NOTREADY, + DPF_INTR_UL_LEN_ERR, + DPF_INTR_DL_DONE, + DPF_INTR_DL_SKB_LEN_ERR, + DPF_INTR_DL_BATCNT_LEN_ERR, + DPF_INTR_DL_PITCNT_LEN_ERR, + DPF_INTR_DL_PKT_EMPTY_SET, + DPF_INTR_DL_FRG_EMPTY_SET, + DPF_INTR_DL_MTU_ERR, + DPF_INTR_DL_FRGCNT_LEN_ERR, + DPF_INTR_DL_Q0_PITCNT_LEN_ERR, + DPF_INTR_DL_Q1_PITCNT_LEN_ERR, + DPF_INTR_DL_HPC_ENT_TYPE_ERR, + DPF_INTR_DL_Q0_DONE, + DPF_INTR_DL_Q1_DONE, + DPF_INTR_INVALID_MAX +}; + +#define DPF_RX_QNO0 0 +#define DPF_RX_QNO1 1 +#define DPF_RX_QNO_DFT DPF_RX_QNO0 + +struct dpmaif_hw_intr_st_para { + unsigned int intr_cnt; + enum dpmaif_hw_intr_type intr_types[DPF_INTR_INVALID_MAX - 1]; + unsigned int intr_queues[DPF_INTR_INVALID_MAX - 1]; +}; + +#define DPMAIF_HW_BAT_REMAIN 64 +#define DPMAIF_HW_BAT_PKTBUF (128 * 28) +#define DPMAIF_HW_FRG_PKTBUF 128 +#define DPMAIF_HW_BAT_RSVLEN 64 +#define DPMAIF_HW_PKT_BIDCNT 1 +#define DPMAIF_HW_MTU_SIZE (3 * 1024 + 8) +#define DPMAIF_HW_CHK_BAT_NUM 62 +#define DPMAIF_HW_CHK_FRG_NUM 3 +#define DPMAIF_HW_CHK_PIT_NUM (2 * DPMAIF_HW_CHK_BAT_NUM) + +#define DP_UL_INT_DONE_OFFSET 0 +#define DP_UL_INT_QDONE_MSK GENMASK(4, 0) +#define DP_UL_INT_EMPTY_MSK GENMASK(9, 5) +#define DP_UL_INT_MD_NOTREADY_MSK GENMASK(14, 10) +#define DP_UL_INT_MD_PWR_NOTREADY_MSK GENMASK(19, 15) +#define DP_UL_INT_ERR_MSK GENMASK(24, 20) + +#define DP_DL_INT_QDONE_MSK BIT(0) +#define DP_DL_INT_SKB_LEN_ERR BIT(1) +#define DP_DL_INT_BATCNT_LEN_ERR BIT(2) +#define DP_DL_INT_PITCNT_LEN_ERR BIT(3) +#define DP_DL_INT_PKT_EMPTY_MSK BIT(4) +#define DP_DL_INT_FRG_EMPTY_MSK BIT(5) +#define DP_DL_INT_MTU_ERR_MSK BIT(6) +#define DP_DL_INT_FRG_LEN_ERR_MSK BIT(7) +#define DP_DL_INT_Q0_PITCNT_LEN_ERR BIT(8) +#define DP_DL_INT_Q1_PITCNT_LEN_ERR BIT(9) +#define DP_DL_INT_HPC_ENT_TYPE_ERR BIT(10) +#define DP_DL_INT_Q0_DONE BIT(13) +#define DP_DL_INT_Q1_DONE BIT(14) + +#define DP_DL_Q0_STATUS_MASK (DP_DL_INT_Q0_PITCNT_LEN_ERR | DP_DL_INT_Q0_DONE) +#define DP_DL_Q1_STATUS_MASK (DP_DL_INT_Q1_PITCNT_LEN_ERR | DP_DL_INT_Q1_DONE) + +int t7xx_dpmaif_hw_init(struct dpmaif_hw_info *hw_info, struct dpmaif_hw_params *init_param); +int t7xx_dpmaif_hw_stop_all_txq(struct dpmaif_hw_info *hw_info); +int t7xx_dpmaif_hw_stop_all_rxq(struct dpmaif_hw_info *hw_info); +void t7xx_dpmaif_start_hw(struct dpmaif_hw_info *hw_info); +int t7xx_dpmaif_hw_get_intr_cnt(struct dpmaif_hw_info *hw_info, + struct dpmaif_hw_intr_st_para *para, int qno); +void t7xx_dpmaif_unmask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num); +void t7xx_dpmaif_ul_update_hw_drb_cnt(struct dpmaif_hw_info *hw_info, unsigned int q_num, + unsigned int drb_entry_cnt); +int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt); +int t7xx_dpmaif_dl_snd_hw_frg_cnt(struct dpmaif_hw_info *hw_info, unsigned int frg_entry_cnt); +int t7xx_dpmaif_dlq_add_pit_remain_cnt(struct dpmaif_hw_info *hw_info, unsigned int dlq_pit_idx, + unsigned int pit_remain_cnt); +void t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info, + unsigned int qno); +void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno); +bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno); +void t7xx_dpmaif_ul_clr_all_intr(struct dpmaif_hw_info *hw_info); +void t7xx_dpmaif_dl_clr_all_intr(struct dpmaif_hw_info *hw_info); +void t7xx_dpmaif_clr_ip_busy_sts(struct dpmaif_hw_info *hw_info); +void t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(struct dpmaif_hw_info *hw_info); +void t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info); +unsigned int t7xx_dpmaif_ul_get_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); +unsigned int t7xx_dpmaif_dl_get_bat_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); +unsigned int t7xx_dpmaif_dl_get_bat_wr_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); +unsigned int t7xx_dpmaif_dl_get_frg_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); +unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info, + unsigned int dlq_pit_idx); + +#endif /* __T7XX_DPMAIF_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c new file mode 100644 index 0000000000..6ff30cb8eb --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c @@ -0,0 +1,1339 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * Sreehari Kancharla + * + * Contributors: + * Andy Shevchenko + * Chiranjeevi Rapolu + * Eliot Lee + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_cldma.h" +#include "t7xx_hif_cldma.h" +#include "t7xx_mhccif.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_port_proxy.h" +#include "t7xx_reg.h" +#include "t7xx_state_monitor.h" + +#define MAX_TX_BUDGET 16 +#define MAX_RX_BUDGET 16 + +#define CHECK_Q_STOP_TIMEOUT_US 1000000 +#define CHECK_Q_STOP_STEP_US 10000 + +#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header)) + +static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, + enum mtk_txrx tx_rx, unsigned int index) +{ + queue->dir = tx_rx; + queue->index = index; + queue->md_ctrl = md_ctrl; + queue->tr_ring = NULL; + queue->tr_done = NULL; + queue->tx_next = NULL; +} + +static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, + enum mtk_txrx tx_rx, unsigned int index) +{ + md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index); + init_waitqueue_head(&queue->req_wq); + spin_lock_init(&queue->ring_lock); +} + +static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr) +{ + gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr)); + gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr)); +} + +static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr) +{ + gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr)); + gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr)); +} + +static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req, + size_t size, gfp_t gfp_mask) +{ + req->skb = __dev_alloc_skb(size, gfp_mask); + if (!req->skb) + return -ENOMEM; + + req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE); + if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) { + dev_kfree_skb_any(req->skb); + req->skb = NULL; + req->mapped_buff = 0; + dev_err(md_ctrl->dev, "DMA mapping failed\n"); + return -ENOMEM; + } + + return 0; +} + +static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + unsigned int hwo_polling_count = 0; + struct t7xx_cldma_hw *hw_info; + bool rx_not_done = true; + unsigned long flags; + int count = 0; + + hw_info = &md_ctrl->hw_info; + + do { + struct cldma_request *req; + struct cldma_gpd *gpd; + struct sk_buff *skb; + int ret; + + req = queue->tr_done; + if (!req) + return -ENODATA; + + gpd = req->gpd; + if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) { + dma_addr_t gpd_addr; + + if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) { + dev_err(md_ctrl->dev, "PCIe Link disconnected\n"); + return -ENODEV; + } + + gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 + + queue->index * sizeof(u64)); + if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100) + return 0; + + udelay(1); + continue; + } + + hwo_polling_count = 0; + skb = req->skb; + + if (req->mapped_buff) { + dma_unmap_single(md_ctrl->dev, req->mapped_buff, + queue->tr_ring->pkt_size, DMA_FROM_DEVICE); + req->mapped_buff = 0; + } + + skb->len = 0; + skb_reset_tail_pointer(skb); + skb_put(skb, le16_to_cpu(gpd->data_buff_len)); + + ret = md_ctrl->recv_skb(queue, skb); + /* Break processing, will try again later */ + if (ret < 0) + return ret; + + req->skb = NULL; + t7xx_cldma_gpd_set_data_ptr(gpd, 0); + + spin_lock_irqsave(&queue->ring_lock, flags); + queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + req = queue->rx_refill; + + ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL); + if (ret) + return ret; + + gpd = req->gpd; + t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff); + gpd->data_buff_len = 0; + gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; + + spin_lock_irqsave(&queue->ring_lock, flags); + queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + + rx_not_done = ++count < budget || !need_resched(); + } while (rx_not_done); + + *over_budget = true; + return 0; +} + +static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + struct t7xx_cldma_hw *hw_info; + unsigned int pending_rx_int; + bool over_budget = false; + unsigned long flags; + int ret; + + hw_info = &md_ctrl->hw_info; + + do { + ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget); + if (ret == -ENODATA) + return 0; + else if (ret) + return ret; + + pending_rx_int = 0; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (md_ctrl->rxq_active & BIT(queue->index)) { + if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX)) + t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX); + + pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index), + MTK_RX); + if (pending_rx_int) { + t7xx_cldma_hw_rx_done(hw_info, pending_rx_int); + + if (over_budget) { + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + return -EAGAIN; + } + } + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + } while (pending_rx_int); + + return 0; +} + +static void t7xx_cldma_rx_done(struct work_struct *work) +{ + struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + int value; + + value = t7xx_cldma_gpd_rx_collect(queue, queue->budget); + if (value && md_ctrl->rxq_active & BIT(queue->index)) { + queue_work(queue->worker, &queue->cldma_work); + return; + } + + t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info); + t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX); + t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX); + pm_runtime_mark_last_busy(md_ctrl->dev); + pm_runtime_put_autosuspend(md_ctrl->dev); +} + +static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + unsigned int dma_len, count = 0; + struct cldma_request *req; + struct cldma_gpd *gpd; + unsigned long flags; + dma_addr_t dma_free; + struct sk_buff *skb; + + while (!kthread_should_stop()) { + spin_lock_irqsave(&queue->ring_lock, flags); + req = queue->tr_done; + if (!req) { + spin_unlock_irqrestore(&queue->ring_lock, flags); + break; + } + gpd = req->gpd; + if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) { + spin_unlock_irqrestore(&queue->ring_lock, flags); + break; + } + queue->budget++; + dma_free = req->mapped_buff; + dma_len = le16_to_cpu(gpd->data_buff_len); + skb = req->skb; + req->skb = NULL; + queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + + count++; + dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + } + + if (count) + wake_up_nr(&queue->req_wq, count); + + return count; +} + +static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + struct cldma_request *req; + dma_addr_t ul_curr_addr; + unsigned long flags; + bool pending_gpd; + + if (!(md_ctrl->txq_active & BIT(queue->index))) + return; + + spin_lock_irqsave(&queue->ring_lock, flags); + req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + + pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (pending_gpd) { + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + + /* Check current processing TGPD, 64-bit address is in a table by Q index */ + ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 + + queue->index * sizeof(u64)); + if (req->gpd_addr != ul_curr_addr) { + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n", + md_ctrl->hif_id, queue->index); + return; + } + + t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX); + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static void t7xx_cldma_tx_done(struct work_struct *work) +{ + struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + struct t7xx_cldma_hw *hw_info; + unsigned int l2_tx_int; + unsigned long flags; + + hw_info = &md_ctrl->hw_info; + t7xx_cldma_gpd_tx_collect(queue); + l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index), + MTK_TX); + if (l2_tx_int & EQ_STA_BIT(queue->index)) { + t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index)); + t7xx_cldma_txq_empty_hndl(queue); + } + + if (l2_tx_int & BIT(queue->index)) { + t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index)); + queue_work(queue->worker, &queue->cldma_work); + return; + } + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (md_ctrl->txq_active & BIT(queue->index)) { + t7xx_cldma_clear_ip_busy(hw_info); + t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX); + t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX); + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + pm_runtime_mark_last_busy(md_ctrl->dev); + pm_runtime_put_autosuspend(md_ctrl->dev); +} + +static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl, + struct cldma_ring *ring, enum dma_data_direction tx_rx) +{ + struct cldma_request *req_cur, *req_next; + + list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) { + if (req_cur->mapped_buff && req_cur->skb) { + dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff, + ring->pkt_size, tx_rx); + req_cur->mapped_buff = 0; + } + + dev_kfree_skb_any(req_cur->skb); + + if (req_cur->gpd) + dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr); + + list_del(&req_cur->entry); + kfree(req_cur); + } +} + +static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size) +{ + struct cldma_request *req; + int val; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return NULL; + + req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); + if (!req->gpd) + goto err_free_req; + + val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL); + if (val) + goto err_free_pool; + + return req; + +err_free_pool: + dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr); + +err_free_req: + kfree(req); + + return NULL; +} + +static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) +{ + struct cldma_request *req; + struct cldma_gpd *gpd; + int i; + + INIT_LIST_HEAD(&ring->gpd_ring); + ring->length = MAX_RX_BUDGET; + + for (i = 0; i < ring->length; i++) { + req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size); + if (!req) { + t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE); + return -ENOMEM; + } + + gpd = req->gpd; + t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff); + gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size); + gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; + INIT_LIST_HEAD(&req->entry); + list_add_tail(&req->entry, &ring->gpd_ring); + } + + /* Link previous GPD to next GPD, circular */ + list_for_each_entry(req, &ring->gpd_ring, entry) { + t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr); + gpd = req->gpd; + } + + return 0; +} + +static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl) +{ + struct cldma_request *req; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return NULL; + + req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); + if (!req->gpd) { + kfree(req); + return NULL; + } + + return req; +} + +static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) +{ + struct cldma_request *req; + struct cldma_gpd *gpd; + int i; + + INIT_LIST_HEAD(&ring->gpd_ring); + ring->length = MAX_TX_BUDGET; + + for (i = 0; i < ring->length; i++) { + req = t7xx_alloc_tx_request(md_ctrl); + if (!req) { + t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE); + return -ENOMEM; + } + + gpd = req->gpd; + gpd->flags = GPD_FLAGS_IOC; + INIT_LIST_HEAD(&req->entry); + list_add_tail(&req->entry, &ring->gpd_ring); + } + + /* Link previous GPD to next GPD, circular */ + list_for_each_entry(req, &ring->gpd_ring, entry) { + t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr); + gpd = req->gpd; + } + + return 0; +} + +/** + * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values. + * @queue: Pointer to the queue structure. + * + * Called with ring_lock (unless called during initialization phase) + */ +static void t7xx_cldma_q_reset(struct cldma_queue *queue) +{ + struct cldma_request *req; + + req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry); + queue->tr_done = req; + queue->budget = queue->tr_ring->length; + + if (queue->dir == MTK_TX) + queue->tx_next = req; + else + queue->rx_refill = req; +} + +static void t7xx_cldma_rxq_init(struct cldma_queue *queue) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + + queue->dir = MTK_RX; + queue->tr_ring = &md_ctrl->rx_ring[queue->index]; + t7xx_cldma_q_reset(queue); +} + +static void t7xx_cldma_txq_init(struct cldma_queue *queue) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + + queue->dir = MTK_TX; + queue->tr_ring = &md_ctrl->tx_ring[queue->index]; + t7xx_cldma_q_reset(queue); +} + +static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl) +{ + t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); +} + +static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl) +{ + t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); +} + +static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl) +{ + unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val; + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + int i; + + /* L2 raw interrupt status */ + l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); + l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); + l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0); + l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0); + l2_tx_int &= ~l2_tx_int_msk; + l2_rx_int &= ~l2_rx_int_msk; + + if (l2_tx_int) { + if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) { + /* Read and clear L3 TX interrupt status */ + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0); + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0); + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1); + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1); + } + + t7xx_cldma_hw_tx_done(hw_info, l2_tx_int); + if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { + for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) { + if (i < CLDMA_TXQ_NUM) { + pm_runtime_get(md_ctrl->dev); + t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX); + t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX); + queue_work(md_ctrl->txq[i].worker, + &md_ctrl->txq[i].cldma_work); + } else { + t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]); + } + } + } + } + + if (l2_rx_int) { + if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) { + /* Read and clear L3 RX interrupt status */ + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0); + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0); + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1); + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1); + } + + t7xx_cldma_hw_rx_done(hw_info, l2_rx_int); + if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { + l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM; + for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) { + pm_runtime_get(md_ctrl->dev); + t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX); + t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX); + queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work); + } + } + } +} + +static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + unsigned int tx_active; + unsigned int rx_active; + + if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) + return false; + + tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX); + rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX); + + return tx_active || rx_active; +} + +/** + * t7xx_cldma_stop() - Stop CLDMA. + * @md_ctrl: CLDMA context structure. + * + * Stop TX and RX queues. Disable L1 and L2 interrupts. + * Clear status registers. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code from polling cldma_queues_active. + */ +int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + bool active; + int i, ret; + + md_ctrl->rxq_active = 0; + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX); + md_ctrl->txq_active = 0; + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX); + md_ctrl->txq_started = 0; + t7xx_cldma_disable_irq(md_ctrl); + t7xx_cldma_hw_stop(hw_info, MTK_RX); + t7xx_cldma_hw_stop(hw_info, MTK_TX); + t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK); + t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK); + + if (md_ctrl->is_late_init) { + for (i = 0; i < CLDMA_TXQ_NUM; i++) + flush_work(&md_ctrl->txq[i].cldma_work); + + for (i = 0; i < CLDMA_RXQ_NUM; i++) + flush_work(&md_ctrl->rxq[i].cldma_work); + } + + ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US, + CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl); + if (ret) + dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id); + + return ret; +} + +static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl) +{ + int i; + + if (!md_ctrl->is_late_init) + return; + + for (i = 0; i < CLDMA_TXQ_NUM; i++) + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); + + for (i = 0; i < CLDMA_RXQ_NUM; i++) + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE); + + dma_pool_destroy(md_ctrl->gpd_dmapool); + md_ctrl->gpd_dmapool = NULL; + md_ctrl->is_late_init = false; +} + +void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + md_ctrl->txq_active = 0; + md_ctrl->rxq_active = 0; + t7xx_cldma_disable_irq(md_ctrl); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + cancel_work_sync(&md_ctrl->txq[i].cldma_work); + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + } + + for (i = 0; i < CLDMA_RXQ_NUM; i++) { + cancel_work_sync(&md_ctrl->rxq[i].cldma_work); + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + } + + t7xx_cldma_late_release(md_ctrl); +} + +/** + * t7xx_cldma_start() - Start CLDMA. + * @md_ctrl: CLDMA context structure. + * + * Set TX/RX start address. + * Start all RX queues and enable L2 interrupt. + */ +void t7xx_cldma_start(struct cldma_ctrl *md_ctrl) +{ + unsigned long flags; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (md_ctrl->is_late_init) { + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + int i; + + t7xx_cldma_enable_irq(md_ctrl); + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + if (md_ctrl->txq[i].tr_done) + t7xx_cldma_hw_set_start_addr(hw_info, i, + md_ctrl->txq[i].tr_done->gpd_addr, + MTK_TX); + } + + for (i = 0; i < CLDMA_RXQ_NUM; i++) { + if (md_ctrl->rxq[i].tr_done) + t7xx_cldma_hw_set_start_addr(hw_info, i, + md_ctrl->rxq[i].tr_done->gpd_addr, + MTK_RX); + } + + /* Enable L2 interrupt */ + t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX); + t7xx_cldma_hw_start(hw_info); + md_ctrl->txq_started = 0; + md_ctrl->txq_active |= TXRX_STATUS_BITMASK; + md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum) +{ + struct cldma_queue *txq = &md_ctrl->txq[qnum]; + struct cldma_request *req; + struct cldma_gpd *gpd; + unsigned long flags; + + spin_lock_irqsave(&txq->ring_lock, flags); + t7xx_cldma_q_reset(txq); + list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) { + gpd = req->gpd; + gpd->flags &= ~GPD_FLAGS_HWO; + t7xx_cldma_gpd_set_data_ptr(gpd, 0); + gpd->data_buff_len = 0; + dev_kfree_skb_any(req->skb); + req->skb = NULL; + } + spin_unlock_irqrestore(&txq->ring_lock, flags); +} + +static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum) +{ + struct cldma_queue *rxq = &md_ctrl->rxq[qnum]; + struct cldma_request *req; + struct cldma_gpd *gpd; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&rxq->ring_lock, flags); + t7xx_cldma_q_reset(rxq); + list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) { + gpd = req->gpd; + gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; + gpd->data_buff_len = 0; + + if (req->skb) { + req->skb->len = 0; + skb_reset_tail_pointer(req->skb); + } + } + + list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) { + if (req->skb) + continue; + + ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC); + if (ret) + break; + + t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff); + } + spin_unlock_irqrestore(&rxq->ring_lock, flags); + + return ret; +} + +void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) +{ + int i; + + if (tx_rx == MTK_TX) { + for (i = 0; i < CLDMA_TXQ_NUM; i++) + t7xx_cldma_clear_txq(md_ctrl, i); + } else { + for (i = 0; i < CLDMA_RXQ_NUM; i++) + t7xx_cldma_clear_rxq(md_ctrl, i); + } +} + +void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + unsigned long flags; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx); + t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx); + if (tx_rx == MTK_RX) + md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; + else + md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; + t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req, + struct sk_buff *skb) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + struct cldma_gpd *gpd = tx_req->gpd; + unsigned long flags; + + /* Update GPD */ + tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE); + + if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) { + dev_err(md_ctrl->dev, "DMA mapping failed\n"); + return -ENOMEM; + } + + t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff); + gpd->data_buff_len = cpu_to_le16(skb->len); + + /* This lock must cover TGPD setting, as even without a resume operation, + * CLDMA can send next HWO=1 if last TGPD just finished. + */ + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (md_ctrl->txq_active & BIT(queue->index)) + gpd->flags |= GPD_FLAGS_HWO; + + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + tx_req->skb = skb; + return 0; +} + +/* Called with cldma_lock */ +static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno, + struct cldma_request *prev_req) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + + /* Check whether the device was powered off (CLDMA start address is not set) */ + if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) { + t7xx_cldma_hw_init(hw_info); + t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX); + md_ctrl->txq_started &= ~BIT(qno); + } + + if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) { + if (md_ctrl->txq_started & BIT(qno)) + t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX); + else + t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX); + + md_ctrl->txq_started |= BIT(qno); + } +} + +/** + * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets. + * @md_ctrl: CLDMA context structure. + * @recv_skb: Receiving skb callback. + */ +void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl, + int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)) +{ + md_ctrl->recv_skb = recv_skb; +} + +/** + * t7xx_cldma_send_skb() - Send control data to modem. + * @md_ctrl: CLDMA context structure. + * @qno: Queue number. + * @skb: Socket buffer. + * + * Return: + * * 0 - Success. + * * -ENOMEM - Allocation failure. + * * -EINVAL - Invalid queue request. + * * -EIO - Queue is not active. + * * -ETIMEDOUT - Timeout waiting for the device to wake up. + */ +int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb) +{ + struct cldma_request *tx_req; + struct cldma_queue *queue; + unsigned long flags; + int ret; + + if (qno >= CLDMA_TXQ_NUM) + return -EINVAL; + + ret = pm_runtime_resume_and_get(md_ctrl->dev); + if (ret < 0 && ret != -EACCES) + return ret; + + t7xx_pci_disable_sleep(md_ctrl->t7xx_dev); + queue = &md_ctrl->txq[qno]; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (!(md_ctrl->txq_active & BIT(qno))) { + ret = -EIO; + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + goto allow_sleep; + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + do { + spin_lock_irqsave(&queue->ring_lock, flags); + tx_req = queue->tx_next; + if (queue->budget > 0 && !tx_req->skb) { + struct list_head *gpd_ring = &queue->tr_ring->gpd_ring; + + queue->budget--; + t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb); + queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + + if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { + ret = -ETIMEDOUT; + break; + } + + /* Protect the access to the modem for queues operations (resume/start) + * which access shared locations by all the queues. + * cldma_lock is independent of ring_lock which is per queue. + */ + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + break; + } + spin_unlock_irqrestore(&queue->ring_lock, flags); + + if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { + ret = -ETIMEDOUT; + break; + } + + if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) { + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + } + + ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0); + } while (!ret); + +allow_sleep: + t7xx_pci_enable_sleep(md_ctrl->t7xx_dev); + pm_runtime_mark_last_busy(md_ctrl->dev); + pm_runtime_put_autosuspend(md_ctrl->dev); + return ret; +} + +static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl) +{ + char dma_pool_name[32]; + int i, j, ret; + + if (md_ctrl->is_late_init) { + dev_err(md_ctrl->dev, "CLDMA late init was already done\n"); + return -EALREADY; + } + + snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id); + + md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev, + sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0); + if (!md_ctrl->gpd_dmapool) { + dev_err(md_ctrl->dev, "DMA pool alloc fail\n"); + return -ENOMEM; + } + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]); + if (ret) { + dev_err(md_ctrl->dev, "control TX ring init fail\n"); + goto err_free_tx_ring; + } + } + + for (j = 0; j < CLDMA_RXQ_NUM; j++) { + md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU; + + if (j == CLDMA_RXQ_NUM - 1) + md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ; + + ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]); + if (ret) { + dev_err(md_ctrl->dev, "Control RX ring init fail\n"); + goto err_free_rx_ring; + } + } + + for (i = 0; i < CLDMA_TXQ_NUM; i++) + t7xx_cldma_txq_init(&md_ctrl->txq[i]); + + for (j = 0; j < CLDMA_RXQ_NUM; j++) + t7xx_cldma_rxq_init(&md_ctrl->rxq[j]); + + md_ctrl->is_late_init = true; + return 0; + +err_free_rx_ring: + while (j--) + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE); + +err_free_tx_ring: + while (i--) + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); + + return ret; +} + +static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr) +{ + return addr + phy_addr - addr_trs1; +} + +static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl) +{ + struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr; + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + u32 phy_ao_base, phy_pd_base; + + if (md_ctrl->hif_id != CLDMA_ID_MD) + return; + + phy_ao_base = CLDMA1_AO_BASE; + phy_pd_base = CLDMA1_PD_BASE; + hw_info->phy_interrupt_id = CLDMA1_INT; + hw_info->hw_mode = MODE_BIT_64; + hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, + pbase->pcie_dev_reg_trsl_addr, phy_ao_base); + hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, + pbase->pcie_dev_reg_trsl_addr, phy_pd_base); +} + +static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); + return 0; +} + +int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev) +{ + struct device *dev = &t7xx_dev->pdev->dev; + struct cldma_ctrl *md_ctrl; + + md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL); + if (!md_ctrl) + return -ENOMEM; + + md_ctrl->t7xx_dev = t7xx_dev; + md_ctrl->dev = dev; + md_ctrl->hif_id = hif_id; + md_ctrl->recv_skb = t7xx_cldma_default_recv_skb; + t7xx_hw_info_init(md_ctrl); + t7xx_dev->md->md_ctrl[hif_id] = md_ctrl; + return 0; +} + +static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param) +{ + struct cldma_ctrl *md_ctrl = entity_param; + struct t7xx_cldma_hw *hw_info; + unsigned long flags; + int qno_t; + + hw_info = &md_ctrl->hw_info; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_restore(hw_info); + for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) { + t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr, + MTK_TX); + t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr, + MTK_RX); + } + t7xx_cldma_enable_irq(md_ctrl); + t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX); + md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; + t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX); + t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param) +{ + struct cldma_ctrl *md_ctrl = entity_param; + unsigned long flags; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + md_ctrl->txq_active |= TXRX_STATUS_BITMASK; + t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); + t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + if (md_ctrl->hif_id == CLDMA_ID_MD) + t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK); + + return 0; +} + +static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param) +{ + struct cldma_ctrl *md_ctrl = entity_param; + struct t7xx_cldma_hw *hw_info; + unsigned long flags; + + hw_info = &md_ctrl->hw_info; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX); + t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX); + md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX); + t7xx_cldma_clear_ip_busy(hw_info); + t7xx_cldma_disable_irq(md_ctrl); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param) +{ + struct cldma_ctrl *md_ctrl = entity_param; + struct t7xx_cldma_hw *hw_info; + unsigned long flags; + + if (md_ctrl->hif_id == CLDMA_ID_MD) + t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK); + + hw_info = &md_ctrl->hw_info; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX); + t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX); + md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX); + md_ctrl->txq_started = 0; + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + return 0; +} + +static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl) +{ + md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL); + if (!md_ctrl->pm_entity) + return -ENOMEM; + + md_ctrl->pm_entity->entity_param = md_ctrl; + + if (md_ctrl->hif_id == CLDMA_ID_MD) + md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1; + else + md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2; + + md_ctrl->pm_entity->suspend = t7xx_cldma_suspend; + md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late; + md_ctrl->pm_entity->resume = t7xx_cldma_resume; + md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early; + + return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity); +} + +static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl) +{ + if (!md_ctrl->pm_entity) + return -EINVAL; + + t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity); + kfree(md_ctrl->pm_entity); + md_ctrl->pm_entity = NULL; + return 0; +} + +void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + unsigned long flags; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_stop(hw_info, MTK_TX); + t7xx_cldma_hw_stop(hw_info, MTK_RX); + t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK); + t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK); + t7xx_cldma_hw_init(hw_info); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data) +{ + struct cldma_ctrl *md_ctrl = data; + u32 interrupt; + + interrupt = md_ctrl->hw_info.phy_interrupt_id; + t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt); + t7xx_cldma_irq_work_cb(md_ctrl); + t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt); + t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt); + return IRQ_HANDLED; +} + +static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl) +{ + int i; + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + if (md_ctrl->txq[i].worker) { + destroy_workqueue(md_ctrl->txq[i].worker); + md_ctrl->txq[i].worker = NULL; + } + } + + for (i = 0; i < CLDMA_RXQ_NUM; i++) { + if (md_ctrl->rxq[i].worker) { + destroy_workqueue(md_ctrl->rxq[i].worker); + md_ctrl->rxq[i].worker = NULL; + } + } +} + +/** + * t7xx_cldma_init() - Initialize CLDMA. + * @md_ctrl: CLDMA context structure. + * + * Allocate and initialize device power management entity. + * Initialize HIF TX/RX queue structure. + * Register CLDMA callback ISR with PCIe driver. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code from failure sub-initializations. + */ +int t7xx_cldma_init(struct cldma_ctrl *md_ctrl) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + int ret, i; + + md_ctrl->txq_active = 0; + md_ctrl->rxq_active = 0; + md_ctrl->is_late_init = false; + + ret = t7xx_cldma_pm_init(md_ctrl); + if (ret) + return ret; + + spin_lock_init(&md_ctrl->cldma_lock); + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); + md_ctrl->txq[i].worker = + alloc_workqueue("md_hif%d_tx%d_worker", + WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI), + 1, md_ctrl->hif_id, i); + if (!md_ctrl->txq[i].worker) + goto err_workqueue; + + INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done); + } + + for (i = 0; i < CLDMA_RXQ_NUM; i++) { + md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); + INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done); + + md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker", + WQ_UNBOUND | WQ_MEM_RECLAIM, + 1, md_ctrl->hif_id, i); + if (!md_ctrl->rxq[i].worker) + goto err_workqueue; + } + + t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); + md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler; + md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL; + md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl; + t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); + return 0; + +err_workqueue: + t7xx_cldma_destroy_wqs(md_ctrl); + t7xx_cldma_pm_uninit(md_ctrl); + return -ENOMEM; +} + +void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl) +{ + t7xx_cldma_late_release(md_ctrl); + t7xx_cldma_late_init(md_ctrl); +} + +void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl) +{ + t7xx_cldma_stop(md_ctrl); + t7xx_cldma_late_release(md_ctrl); + t7xx_cldma_destroy_wqs(md_ctrl); + t7xx_cldma_pm_uninit(md_ctrl); +} diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h new file mode 100644 index 0000000000..47a35e552d --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Eliot Lee + */ + +#ifndef __T7XX_HIF_CLDMA_H__ +#define __T7XX_HIF_CLDMA_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_cldma.h" +#include "t7xx_pci.h" + +/** + * enum cldma_id - Identifiers for CLDMA HW units. + * @CLDMA_ID_MD: Modem control channel. + * @CLDMA_ID_AP: Application Processor control channel (not used at the moment). + * @CLDMA_NUM: Number of CLDMA HW units available. + */ +enum cldma_id { + CLDMA_ID_MD, + CLDMA_ID_AP, + CLDMA_NUM +}; + +struct cldma_gpd { + u8 flags; + u8 not_used1; + __le16 rx_data_allow_len; + __le32 next_gpd_ptr_h; + __le32 next_gpd_ptr_l; + __le32 data_buff_bd_ptr_h; + __le32 data_buff_bd_ptr_l; + __le16 data_buff_len; + __le16 not_used2; +}; + +struct cldma_request { + struct cldma_gpd *gpd; /* Virtual address for CPU */ + dma_addr_t gpd_addr; /* Physical address for DMA */ + struct sk_buff *skb; + dma_addr_t mapped_buff; + struct list_head entry; +}; + +struct cldma_ring { + struct list_head gpd_ring; /* Ring of struct cldma_request */ + unsigned int length; /* Number of struct cldma_request */ + int pkt_size; +}; + +struct cldma_queue { + struct cldma_ctrl *md_ctrl; + enum mtk_txrx dir; + unsigned int index; + struct cldma_ring *tr_ring; + struct cldma_request *tr_done; + struct cldma_request *rx_refill; + struct cldma_request *tx_next; + int budget; /* Same as ring buffer size by default */ + spinlock_t ring_lock; + wait_queue_head_t req_wq; /* Only for TX */ + struct workqueue_struct *worker; + struct work_struct cldma_work; +}; + +struct cldma_ctrl { + enum cldma_id hif_id; + struct device *dev; + struct t7xx_pci_dev *t7xx_dev; + struct cldma_queue txq[CLDMA_TXQ_NUM]; + struct cldma_queue rxq[CLDMA_RXQ_NUM]; + unsigned short txq_active; + unsigned short rxq_active; + unsigned short txq_started; + spinlock_t cldma_lock; /* Protects CLDMA structure */ + /* Assumes T/R GPD/BD/SPD have the same size */ + struct dma_pool *gpd_dmapool; + struct cldma_ring tx_ring[CLDMA_TXQ_NUM]; + struct cldma_ring rx_ring[CLDMA_RXQ_NUM]; + struct md_pm_entity *pm_entity; + struct t7xx_cldma_hw hw_info; + bool is_late_init; + int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb); +}; + +#define GPD_FLAGS_HWO BIT(0) +#define GPD_FLAGS_IOC BIT(7) +#define GPD_DMAPOOL_ALIGN 16 + +#define CLDMA_MTU 3584 /* 3.5kB */ + +int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev); +void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl); +int t7xx_cldma_init(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_start(struct cldma_ctrl *md_ctrl); +int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl, + int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)); +int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb); +void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); +void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); + +#endif /* __T7XX_HIF_CLDMA_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c new file mode 100644 index 0000000000..7eff3531b9 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c @@ -0,0 +1,574 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_dpmaif.h" +#include "t7xx_hif_dpmaif.h" +#include "t7xx_hif_dpmaif_rx.h" +#include "t7xx_hif_dpmaif_tx.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_state_monitor.h" + +unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx) +{ + buf_idx++; + + return buf_idx < buf_len ? buf_idx : 0; +} + +unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx, + unsigned int wr_idx, enum dpmaif_rdwr rd_wr) +{ + int pkt_cnt; + + if (rd_wr == DPMAIF_READ) + pkt_cnt = wr_idx - rd_idx; + else + pkt_cnt = rd_idx - wr_idx - 1; + + if (pkt_cnt < 0) + pkt_cnt += total_cnt; + + return (unsigned int)pkt_cnt; +} + +static void t7xx_dpmaif_enable_irq(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_isr_para *isr_para; + int i; + + for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) { + isr_para = &dpmaif_ctrl->isr_para[i]; + t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + } +} + +static void t7xx_dpmaif_disable_irq(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_isr_para *isr_para; + int i; + + for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) { + isr_para = &dpmaif_ctrl->isr_para[i]; + t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + } +} + +static void t7xx_dpmaif_irq_cb(struct dpmaif_isr_para *isr_para) +{ + struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl; + struct dpmaif_hw_intr_st_para intr_status; + struct device *dev = dpmaif_ctrl->dev; + struct dpmaif_hw_info *hw_info; + int i; + + memset(&intr_status, 0, sizeof(intr_status)); + hw_info = &dpmaif_ctrl->hw_info; + + if (t7xx_dpmaif_hw_get_intr_cnt(hw_info, &intr_status, isr_para->dlq_id) < 0) { + dev_err(dev, "Failed to get HW interrupt count\n"); + return; + } + + t7xx_pcie_mac_clear_int_status(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + + for (i = 0; i < intr_status.intr_cnt; i++) { + switch (intr_status.intr_types[i]) { + case DPF_INTR_UL_DONE: + t7xx_dpmaif_irq_tx_done(dpmaif_ctrl, intr_status.intr_queues[i]); + break; + + case DPF_INTR_UL_DRB_EMPTY: + case DPF_INTR_UL_MD_NOTREADY: + case DPF_INTR_UL_MD_PWR_NOTREADY: + /* No need to log an error for these */ + break; + + case DPF_INTR_DL_BATCNT_LEN_ERR: + dev_err_ratelimited(dev, "DL interrupt: packet BAT count length error\n"); + t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(hw_info); + break; + + case DPF_INTR_DL_PITCNT_LEN_ERR: + dev_err_ratelimited(dev, "DL interrupt: PIT count length error\n"); + t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(hw_info); + break; + + case DPF_INTR_DL_Q0_PITCNT_LEN_ERR: + dev_err_ratelimited(dev, "DL interrupt: DLQ0 PIT count length error\n"); + t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO_DFT); + break; + + case DPF_INTR_DL_Q1_PITCNT_LEN_ERR: + dev_err_ratelimited(dev, "DL interrupt: DLQ1 PIT count length error\n"); + t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO1); + break; + + case DPF_INTR_DL_DONE: + case DPF_INTR_DL_Q0_DONE: + case DPF_INTR_DL_Q1_DONE: + t7xx_dpmaif_irq_rx_done(dpmaif_ctrl, intr_status.intr_queues[i]); + break; + + default: + dev_err_ratelimited(dev, "DL interrupt error: unknown type : %d\n", + intr_status.intr_types[i]); + } + } +} + +static irqreturn_t t7xx_dpmaif_isr_handler(int irq, void *data) +{ + struct dpmaif_isr_para *isr_para = data; + struct dpmaif_ctrl *dpmaif_ctrl; + + dpmaif_ctrl = isr_para->dpmaif_ctrl; + if (dpmaif_ctrl->state != DPMAIF_STATE_PWRON) { + dev_err(dpmaif_ctrl->dev, "Interrupt received before initializing DPMAIF\n"); + return IRQ_HANDLED; + } + + t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + t7xx_dpmaif_irq_cb(isr_para); + t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + return IRQ_HANDLED; +} + +static void t7xx_dpmaif_isr_parameter_init(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_isr_para *isr_para; + unsigned char i; + + dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO0] = DPMAIF_INT; + dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO1] = DPMAIF2_INT; + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + isr_para = &dpmaif_ctrl->isr_para[i]; + isr_para->dpmaif_ctrl = dpmaif_ctrl; + isr_para->dlq_id = i; + isr_para->pcie_int = dpmaif_ctrl->rxq_int_mapping[i]; + } +} + +static void t7xx_dpmaif_register_pcie_irq(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct t7xx_pci_dev *t7xx_dev = dpmaif_ctrl->t7xx_dev; + struct dpmaif_isr_para *isr_para; + enum t7xx_int int_type; + int i; + + t7xx_dpmaif_isr_parameter_init(dpmaif_ctrl); + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + isr_para = &dpmaif_ctrl->isr_para[i]; + int_type = isr_para->pcie_int; + t7xx_pcie_mac_clear_int(t7xx_dev, int_type); + + t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler; + t7xx_dev->intr_thread[int_type] = NULL; + t7xx_dev->callback_param[int_type] = isr_para; + + t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type); + t7xx_pcie_mac_set_int(t7xx_dev, int_type); + } +} + +static int t7xx_dpmaif_rxtx_sw_allocs(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_rx_queue *rx_q; + struct dpmaif_tx_queue *tx_q; + int ret, rx_idx, tx_idx, i; + + ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, BAT_TYPE_NORMAL); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to allocate normal BAT table: %d\n", ret); + return ret; + } + + ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, BAT_TYPE_FRAG); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to allocate frag BAT table: %d\n", ret); + goto err_free_normal_bat; + } + + for (rx_idx = 0; rx_idx < DPMAIF_RXQ_NUM; rx_idx++) { + rx_q = &dpmaif_ctrl->rxq[rx_idx]; + rx_q->index = rx_idx; + rx_q->dpmaif_ctrl = dpmaif_ctrl; + ret = t7xx_dpmaif_rxq_init(rx_q); + if (ret) + goto err_free_rxq; + } + + for (tx_idx = 0; tx_idx < DPMAIF_TXQ_NUM; tx_idx++) { + tx_q = &dpmaif_ctrl->txq[tx_idx]; + tx_q->index = tx_idx; + tx_q->dpmaif_ctrl = dpmaif_ctrl; + ret = t7xx_dpmaif_txq_init(tx_q); + if (ret) + goto err_free_txq; + } + + ret = t7xx_dpmaif_tx_thread_init(dpmaif_ctrl); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to start TX thread\n"); + goto err_free_txq; + } + + ret = t7xx_dpmaif_bat_rel_wq_alloc(dpmaif_ctrl); + if (ret) + goto err_thread_rel; + + return 0; + +err_thread_rel: + t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl); + +err_free_txq: + for (i = 0; i < tx_idx; i++) { + tx_q = &dpmaif_ctrl->txq[i]; + t7xx_dpmaif_txq_free(tx_q); + } + +err_free_rxq: + for (i = 0; i < rx_idx; i++) { + rx_q = &dpmaif_ctrl->rxq[i]; + t7xx_dpmaif_rxq_free(rx_q); + } + + t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_frag); + +err_free_normal_bat: + t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_req); + + return ret; +} + +static void t7xx_dpmaif_sw_release(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_rx_queue *rx_q; + struct dpmaif_tx_queue *tx_q; + int i; + + t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl); + t7xx_dpmaif_bat_wq_rel(dpmaif_ctrl); + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + tx_q = &dpmaif_ctrl->txq[i]; + t7xx_dpmaif_txq_free(tx_q); + } + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + rx_q = &dpmaif_ctrl->rxq[i]; + t7xx_dpmaif_rxq_free(rx_q); + } +} + +static int t7xx_dpmaif_start(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info; + struct dpmaif_hw_params hw_init_para; + struct dpmaif_rx_queue *rxq; + struct dpmaif_tx_queue *txq; + unsigned int buf_cnt; + int i, ret = 0; + + if (dpmaif_ctrl->state == DPMAIF_STATE_PWRON) + return -EFAULT; + + memset(&hw_init_para, 0, sizeof(hw_init_para)); + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + rxq = &dpmaif_ctrl->rxq[i]; + rxq->que_started = true; + rxq->index = i; + rxq->budget = rxq->bat_req->bat_size_cnt - 1; + + hw_init_para.pkt_bat_base_addr[i] = rxq->bat_req->bat_bus_addr; + hw_init_para.pkt_bat_size_cnt[i] = rxq->bat_req->bat_size_cnt; + hw_init_para.pit_base_addr[i] = rxq->pit_bus_addr; + hw_init_para.pit_size_cnt[i] = rxq->pit_size_cnt; + hw_init_para.frg_bat_base_addr[i] = rxq->bat_frag->bat_bus_addr; + hw_init_para.frg_bat_size_cnt[i] = rxq->bat_frag->bat_size_cnt; + } + + bitmap_zero(dpmaif_ctrl->bat_req.bat_bitmap, dpmaif_ctrl->bat_req.bat_size_cnt); + buf_cnt = dpmaif_ctrl->bat_req.bat_size_cnt - 1; + ret = t7xx_dpmaif_rx_buf_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, 0, buf_cnt, true); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to allocate RX buffer: %d\n", ret); + return ret; + } + + buf_cnt = dpmaif_ctrl->bat_frag.bat_size_cnt - 1; + ret = t7xx_dpmaif_rx_frag_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, buf_cnt, true); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to allocate frag RX buffer: %d\n", ret); + goto err_free_normal_bat; + } + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + txq = &dpmaif_ctrl->txq[i]; + txq->que_started = true; + + hw_init_para.drb_base_addr[i] = txq->drb_bus_addr; + hw_init_para.drb_size_cnt[i] = txq->drb_size_cnt; + } + + ret = t7xx_dpmaif_hw_init(hw_info, &hw_init_para); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to initialize DPMAIF HW: %d\n", ret); + goto err_free_frag_bat; + } + + ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(hw_info, rxq->bat_req->bat_size_cnt - 1); + if (ret) + goto err_free_frag_bat; + + ret = t7xx_dpmaif_dl_snd_hw_frg_cnt(hw_info, rxq->bat_frag->bat_size_cnt - 1); + if (ret) + goto err_free_frag_bat; + + t7xx_dpmaif_ul_clr_all_intr(hw_info); + t7xx_dpmaif_dl_clr_all_intr(hw_info); + dpmaif_ctrl->state = DPMAIF_STATE_PWRON; + t7xx_dpmaif_enable_irq(dpmaif_ctrl); + wake_up(&dpmaif_ctrl->tx_wq); + return 0; + +err_free_frag_bat: + t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag); + +err_free_normal_bat: + t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req); + + return ret; +} + +static void t7xx_dpmaif_stop_sw(struct dpmaif_ctrl *dpmaif_ctrl) +{ + t7xx_dpmaif_tx_stop(dpmaif_ctrl); + t7xx_dpmaif_rx_stop(dpmaif_ctrl); +} + +static void t7xx_dpmaif_stop_hw(struct dpmaif_ctrl *dpmaif_ctrl) +{ + t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info); + t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info); +} + +static int t7xx_dpmaif_stop(struct dpmaif_ctrl *dpmaif_ctrl) +{ + if (!dpmaif_ctrl->dpmaif_sw_init_done) { + dev_err(dpmaif_ctrl->dev, "dpmaif SW init fail\n"); + return -EFAULT; + } + + if (dpmaif_ctrl->state == DPMAIF_STATE_PWROFF) + return -EFAULT; + + t7xx_dpmaif_disable_irq(dpmaif_ctrl); + dpmaif_ctrl->state = DPMAIF_STATE_PWROFF; + t7xx_dpmaif_stop_sw(dpmaif_ctrl); + t7xx_dpmaif_tx_clear(dpmaif_ctrl); + t7xx_dpmaif_rx_clear(dpmaif_ctrl); + return 0; +} + +static int t7xx_dpmaif_suspend(struct t7xx_pci_dev *t7xx_dev, void *param) +{ + struct dpmaif_ctrl *dpmaif_ctrl = param; + + t7xx_dpmaif_tx_stop(dpmaif_ctrl); + t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info); + t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info); + t7xx_dpmaif_disable_irq(dpmaif_ctrl); + t7xx_dpmaif_rx_stop(dpmaif_ctrl); + return 0; +} + +static void t7xx_dpmaif_unmask_dlq_intr(struct dpmaif_ctrl *dpmaif_ctrl) +{ + int qno; + + for (qno = 0; qno < DPMAIF_RXQ_NUM; qno++) + t7xx_dpmaif_dlq_unmask_rx_done(&dpmaif_ctrl->hw_info, qno); +} + +static void t7xx_dpmaif_start_txrx_qs(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_rx_queue *rxq; + struct dpmaif_tx_queue *txq; + unsigned int que_cnt; + + for (que_cnt = 0; que_cnt < DPMAIF_TXQ_NUM; que_cnt++) { + txq = &dpmaif_ctrl->txq[que_cnt]; + txq->que_started = true; + } + + for (que_cnt = 0; que_cnt < DPMAIF_RXQ_NUM; que_cnt++) { + rxq = &dpmaif_ctrl->rxq[que_cnt]; + rxq->que_started = true; + } +} + +static int t7xx_dpmaif_resume(struct t7xx_pci_dev *t7xx_dev, void *param) +{ + struct dpmaif_ctrl *dpmaif_ctrl = param; + + if (!dpmaif_ctrl) + return 0; + + t7xx_dpmaif_start_txrx_qs(dpmaif_ctrl); + t7xx_dpmaif_enable_irq(dpmaif_ctrl); + t7xx_dpmaif_unmask_dlq_intr(dpmaif_ctrl); + t7xx_dpmaif_start_hw(&dpmaif_ctrl->hw_info); + wake_up(&dpmaif_ctrl->tx_wq); + return 0; +} + +static int t7xx_dpmaif_pm_entity_init(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity; + int ret; + + INIT_LIST_HEAD(&dpmaif_pm_entity->entity); + dpmaif_pm_entity->suspend = &t7xx_dpmaif_suspend; + dpmaif_pm_entity->suspend_late = NULL; + dpmaif_pm_entity->resume_early = NULL; + dpmaif_pm_entity->resume = &t7xx_dpmaif_resume; + dpmaif_pm_entity->id = PM_ENTITY_ID_DATA; + dpmaif_pm_entity->entity_param = dpmaif_ctrl; + + ret = t7xx_pci_pm_entity_register(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity); + if (ret) + dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n"); + + return ret; +} + +static int t7xx_dpmaif_pm_entity_release(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity; + int ret; + + ret = t7xx_pci_pm_entity_unregister(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity); + if (ret < 0) + dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n"); + + return ret; +} + +int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state) +{ + int ret = 0; + + switch (state) { + case MD_STATE_WAITING_FOR_HS1: + ret = t7xx_dpmaif_start(dpmaif_ctrl); + break; + + case MD_STATE_EXCEPTION: + ret = t7xx_dpmaif_stop(dpmaif_ctrl); + break; + + case MD_STATE_STOPPED: + ret = t7xx_dpmaif_stop(dpmaif_ctrl); + break; + + case MD_STATE_WAITING_TO_STOP: + t7xx_dpmaif_stop_hw(dpmaif_ctrl); + break; + + default: + break; + } + + return ret; +} + +/** + * t7xx_dpmaif_hif_init() - Initialize data path. + * @t7xx_dev: MTK context structure. + * @callbacks: Callbacks implemented by the network layer to handle RX skb and + * event notifications. + * + * Allocate and initialize datapath control block. + * Register datapath ISR, TX and RX resources. + * + * Return: + * * dpmaif_ctrl pointer - Pointer to DPMAIF context structure. + * * NULL - In case of error. + */ +struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev, + struct dpmaif_callbacks *callbacks) +{ + struct device *dev = &t7xx_dev->pdev->dev; + struct dpmaif_ctrl *dpmaif_ctrl; + int ret; + + if (!callbacks) + return NULL; + + dpmaif_ctrl = devm_kzalloc(dev, sizeof(*dpmaif_ctrl), GFP_KERNEL); + if (!dpmaif_ctrl) + return NULL; + + dpmaif_ctrl->t7xx_dev = t7xx_dev; + dpmaif_ctrl->callbacks = callbacks; + dpmaif_ctrl->dev = dev; + dpmaif_ctrl->dpmaif_sw_init_done = false; + dpmaif_ctrl->hw_info.dev = dev; + dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base - + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; + + ret = t7xx_dpmaif_pm_entity_init(dpmaif_ctrl); + if (ret) + return NULL; + + t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl); + t7xx_dpmaif_disable_irq(dpmaif_ctrl); + + ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl); + if (ret) { + t7xx_dpmaif_pm_entity_release(dpmaif_ctrl); + dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n", ret); + return NULL; + } + + dpmaif_ctrl->dpmaif_sw_init_done = true; + return dpmaif_ctrl; +} + +void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl) +{ + if (dpmaif_ctrl->dpmaif_sw_init_done) { + t7xx_dpmaif_stop(dpmaif_ctrl); + t7xx_dpmaif_pm_entity_release(dpmaif_ctrl); + t7xx_dpmaif_sw_release(dpmaif_ctrl); + dpmaif_ctrl->dpmaif_sw_init_done = false; + } +} diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h new file mode 100644 index 0000000000..1225ca0ed5 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#ifndef __T7XX_HIF_DPMAIF_H__ +#define __T7XX_HIF_DPMAIF_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_dpmaif.h" +#include "t7xx_pci.h" +#include "t7xx_state_monitor.h" + +/* SKB control buffer */ +struct t7xx_skb_cb { + u8 netif_idx; + u8 txq_number; + u8 rx_pkt_type; +}; + +#define T7XX_SKB_CB(__skb) ((struct t7xx_skb_cb *)(__skb)->cb) + +enum dpmaif_rdwr { + DPMAIF_READ, + DPMAIF_WRITE, +}; + +/* Structure of DL BAT */ +struct dpmaif_cur_rx_skb_info { + bool msg_pit_received; + struct sk_buff *cur_skb; + unsigned int cur_chn_idx; + unsigned int check_sum; + unsigned int pit_dp; + unsigned int pkt_type; + int err_payload; +}; + +struct dpmaif_bat { + unsigned int p_buffer_addr; + unsigned int buffer_addr_ext; +}; + +struct dpmaif_bat_skb { + struct sk_buff *skb; + dma_addr_t data_bus_addr; + unsigned int data_len; +}; + +struct dpmaif_bat_page { + struct page *page; + dma_addr_t data_bus_addr; + unsigned int offset; + unsigned int data_len; +}; + +enum bat_type { + BAT_TYPE_NORMAL, + BAT_TYPE_FRAG, +}; + +struct dpmaif_bat_request { + void *bat_base; + dma_addr_t bat_bus_addr; + unsigned int bat_size_cnt; + unsigned int bat_wr_idx; + unsigned int bat_release_rd_idx; + void *bat_skb; + unsigned int pkt_buf_sz; + unsigned long *bat_bitmap; + atomic_t refcnt; + spinlock_t mask_lock; /* Protects BAT mask */ + enum bat_type type; +}; + +struct dpmaif_rx_queue { + unsigned int index; + bool que_started; + unsigned int budget; + + void *pit_base; + dma_addr_t pit_bus_addr; + unsigned int pit_size_cnt; + + unsigned int pit_rd_idx; + unsigned int pit_wr_idx; + unsigned int pit_release_rd_idx; + + struct dpmaif_bat_request *bat_req; + struct dpmaif_bat_request *bat_frag; + + wait_queue_head_t rx_wq; + struct task_struct *rx_thread; + struct sk_buff_head skb_list; + unsigned int skb_list_max_len; + + struct workqueue_struct *worker; + struct work_struct dpmaif_rxq_work; + + atomic_t rx_processing; + + struct dpmaif_ctrl *dpmaif_ctrl; + unsigned int expect_pit_seq; + unsigned int pit_remain_release_cnt; + struct dpmaif_cur_rx_skb_info rx_data_info; +}; + +struct dpmaif_tx_queue { + unsigned int index; + bool que_started; + atomic_t tx_budget; + void *drb_base; + dma_addr_t drb_bus_addr; + unsigned int drb_size_cnt; + unsigned int drb_wr_idx; + unsigned int drb_rd_idx; + unsigned int drb_release_rd_idx; + void *drb_skb_base; + wait_queue_head_t req_wq; + struct workqueue_struct *worker; + struct work_struct dpmaif_tx_work; + spinlock_t tx_lock; /* Protects txq DRB */ + atomic_t tx_processing; + + struct dpmaif_ctrl *dpmaif_ctrl; + struct sk_buff_head tx_skb_head; +}; + +struct dpmaif_isr_para { + struct dpmaif_ctrl *dpmaif_ctrl; + unsigned char pcie_int; + unsigned char dlq_id; +}; + +enum dpmaif_state { + DPMAIF_STATE_MIN, + DPMAIF_STATE_PWROFF, + DPMAIF_STATE_PWRON, + DPMAIF_STATE_EXCEPTION, + DPMAIF_STATE_MAX +}; + +enum dpmaif_txq_state { + DMPAIF_TXQ_STATE_IRQ, + DMPAIF_TXQ_STATE_FULL, +}; + +struct dpmaif_callbacks { + void (*state_notify)(struct t7xx_pci_dev *t7xx_dev, + enum dpmaif_txq_state state, int txq_number); + void (*recv_skb)(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb); +}; + +struct dpmaif_ctrl { + struct device *dev; + struct t7xx_pci_dev *t7xx_dev; + struct md_pm_entity dpmaif_pm_entity; + enum dpmaif_state state; + bool dpmaif_sw_init_done; + struct dpmaif_hw_info hw_info; + struct dpmaif_tx_queue txq[DPMAIF_TXQ_NUM]; + struct dpmaif_rx_queue rxq[DPMAIF_RXQ_NUM]; + + unsigned char rxq_int_mapping[DPMAIF_RXQ_NUM]; + struct dpmaif_isr_para isr_para[DPMAIF_RXQ_NUM]; + + struct dpmaif_bat_request bat_req; + struct dpmaif_bat_request bat_frag; + struct workqueue_struct *bat_release_wq; + struct work_struct bat_release_work; + + wait_queue_head_t tx_wq; + struct task_struct *tx_thread; + + struct dpmaif_callbacks *callbacks; +}; + +struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev, + struct dpmaif_callbacks *callbacks); +void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl); +int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state); +unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx); +unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx, + unsigned int wr_idx, enum dpmaif_rdwr); + +#endif /* __T7XX_HIF_DPMAIF_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c new file mode 100644 index 0000000000..91a0eb19e0 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c @@ -0,0 +1,1243 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Andy Shevchenko + * Chiranjeevi Rapolu + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_dpmaif.h" +#include "t7xx_hif_dpmaif.h" +#include "t7xx_hif_dpmaif_rx.h" +#include "t7xx_pci.h" + +#define DPMAIF_BAT_COUNT 8192 +#define DPMAIF_FRG_COUNT 4814 +#define DPMAIF_PIT_COUNT (DPMAIF_BAT_COUNT * 2) + +#define DPMAIF_BAT_CNT_THRESHOLD 30 +#define DPMAIF_PIT_CNT_THRESHOLD 60 +#define DPMAIF_RX_PUSH_THRESHOLD_MASK GENMASK(2, 0) +#define DPMAIF_NOTIFY_RELEASE_COUNT 128 +#define DPMAIF_POLL_PIT_TIME_US 20 +#define DPMAIF_POLL_PIT_MAX_TIME_US 2000 +#define DPMAIF_WQ_TIME_LIMIT_MS 2 +#define DPMAIF_CS_RESULT_PASS 0 + +/* Packet type */ +#define DES_PT_PD 0 +#define DES_PT_MSG 1 +/* Buffer type */ +#define PKT_BUF_FRAG 1 + +static unsigned int t7xx_normal_pit_bid(const struct dpmaif_pit *pit_info) +{ + u32 value; + + value = FIELD_GET(PD_PIT_H_BID, le32_to_cpu(pit_info->pd.footer)); + value <<= 13; + value += FIELD_GET(PD_PIT_BUFFER_ID, le32_to_cpu(pit_info->header)); + return value; +} + +static int t7xx_dpmaif_net_rx_push_thread(void *arg) +{ + struct dpmaif_rx_queue *q = arg; + struct dpmaif_ctrl *hif_ctrl; + struct dpmaif_callbacks *cb; + + hif_ctrl = q->dpmaif_ctrl; + cb = hif_ctrl->callbacks; + + while (!kthread_should_stop()) { + struct sk_buff *skb; + unsigned long flags; + + if (skb_queue_empty(&q->skb_list)) { + if (wait_event_interruptible(q->rx_wq, + !skb_queue_empty(&q->skb_list) || + kthread_should_stop())) + continue; + + if (kthread_should_stop()) + break; + } + + spin_lock_irqsave(&q->skb_list.lock, flags); + skb = __skb_dequeue(&q->skb_list); + spin_unlock_irqrestore(&q->skb_list.lock, flags); + + if (!skb) + continue; + + cb->recv_skb(hif_ctrl->t7xx_dev, skb); + cond_resched(); + } + + return 0; +} + +static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl, + const unsigned int q_num, const unsigned int bat_cnt) +{ + struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; + struct dpmaif_bat_request *bat_req = rxq->bat_req; + unsigned int old_rl_idx, new_wr_idx, old_wr_idx; + + if (!rxq->que_started) { + dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index); + return -EINVAL; + } + + old_rl_idx = bat_req->bat_release_rd_idx; + old_wr_idx = bat_req->bat_wr_idx; + new_wr_idx = old_wr_idx + bat_cnt; + + if (old_rl_idx > old_wr_idx && new_wr_idx >= old_rl_idx) + goto err_flow; + + if (new_wr_idx >= bat_req->bat_size_cnt) { + new_wr_idx -= bat_req->bat_size_cnt; + if (new_wr_idx >= old_rl_idx) + goto err_flow; + } + + bat_req->bat_wr_idx = new_wr_idx; + return 0; + +err_flow: + dev_err(dpmaif_ctrl->dev, "RX BAT flow check fail\n"); + return -EINVAL; +} + +static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl, + const unsigned int size, struct dpmaif_bat_skb *cur_skb) +{ + dma_addr_t data_bus_addr; + struct sk_buff *skb; + + skb = __dev_alloc_skb(size, GFP_KERNEL); + if (!skb) + return false; + + data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE); + if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) { + dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n"); + dev_kfree_skb_any(skb); + return false; + } + + cur_skb->skb = skb; + cur_skb->data_bus_addr = data_bus_addr; + cur_skb->data_len = size; + + return true; +} + +static void t7xx_unmap_bat_skb(struct device *dev, struct dpmaif_bat_skb *bat_skb_base, + unsigned int index) +{ + struct dpmaif_bat_skb *bat_skb = bat_skb_base + index; + + if (bat_skb->skb) { + dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE); + dev_kfree_skb(bat_skb->skb); + bat_skb->skb = NULL; + } +} + +/** + * t7xx_dpmaif_rx_buf_alloc() - Allocate buffers for the BAT ring. + * @dpmaif_ctrl: Pointer to DPMAIF context structure. + * @bat_req: Pointer to BAT request structure. + * @q_num: Queue number. + * @buf_cnt: Number of buffers to allocate. + * @initial: Indicates if the ring is being populated for the first time. + * + * Allocate skb and store the start address of the data buffer into the BAT ring. + * If this is not the initial call, notify the HW about the new entries. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code. + */ +int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl, + const struct dpmaif_bat_request *bat_req, + const unsigned int q_num, const unsigned int buf_cnt, + const bool initial) +{ + unsigned int i, bat_cnt, bat_max_cnt, bat_start_idx; + int ret; + + if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt) + return -EINVAL; + + /* Check BAT buffer space */ + bat_max_cnt = bat_req->bat_size_cnt; + + bat_cnt = t7xx_ring_buf_rd_wr_count(bat_max_cnt, bat_req->bat_release_rd_idx, + bat_req->bat_wr_idx, DPMAIF_WRITE); + if (buf_cnt > bat_cnt) + return -ENOMEM; + + bat_start_idx = bat_req->bat_wr_idx; + + for (i = 0; i < buf_cnt; i++) { + unsigned int cur_bat_idx = bat_start_idx + i; + struct dpmaif_bat_skb *cur_skb; + struct dpmaif_bat *cur_bat; + + if (cur_bat_idx >= bat_max_cnt) + cur_bat_idx -= bat_max_cnt; + + cur_skb = (struct dpmaif_bat_skb *)bat_req->bat_skb + cur_bat_idx; + if (!cur_skb->skb && + !t7xx_alloc_and_map_skb_info(dpmaif_ctrl, bat_req->pkt_buf_sz, cur_skb)) + break; + + cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx; + cur_bat->buffer_addr_ext = upper_32_bits(cur_skb->data_bus_addr); + cur_bat->p_buffer_addr = lower_32_bits(cur_skb->data_bus_addr); + } + + if (!i) + return -ENOMEM; + + ret = t7xx_dpmaif_update_bat_wr_idx(dpmaif_ctrl, q_num, i); + if (ret) + goto err_unmap_skbs; + + if (!initial) { + unsigned int hw_wr_idx; + + ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(&dpmaif_ctrl->hw_info, i); + if (ret) + goto err_unmap_skbs; + + hw_wr_idx = t7xx_dpmaif_dl_get_bat_wr_idx(&dpmaif_ctrl->hw_info, + DPF_RX_QNO_DFT); + if (hw_wr_idx != bat_req->bat_wr_idx) { + ret = -EFAULT; + dev_err(dpmaif_ctrl->dev, "Write index mismatch in RX ring\n"); + goto err_unmap_skbs; + } + } + + return 0; + +err_unmap_skbs: + while (--i > 0) + t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i); + + return ret; +} + +static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq, + const unsigned int rel_entry_num) +{ + struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info; + unsigned int old_rel_idx, new_rel_idx, hw_wr_idx; + int ret; + + if (!rxq->que_started) + return 0; + + if (rel_entry_num >= rxq->pit_size_cnt) { + dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n"); + return -EINVAL; + } + + old_rel_idx = rxq->pit_release_rd_idx; + new_rel_idx = old_rel_idx + rel_entry_num; + hw_wr_idx = rxq->pit_wr_idx; + if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt) + new_rel_idx -= rxq->pit_size_cnt; + + ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num); + if (ret) { + dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret); + return ret; + } + + rxq->pit_release_rd_idx = new_rel_idx; + return 0; +} + +static void t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request *bat_req, unsigned int idx) +{ + unsigned long flags; + + spin_lock_irqsave(&bat_req->mask_lock, flags); + set_bit(idx, bat_req->bat_bitmap); + spin_unlock_irqrestore(&bat_req->mask_lock, flags); +} + +static int t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, + const unsigned int cur_bid) +{ + struct dpmaif_bat_request *bat_frag = rxq->bat_frag; + struct dpmaif_bat_page *bat_page; + + if (cur_bid >= DPMAIF_FRG_COUNT) + return -EINVAL; + + bat_page = bat_frag->bat_skb + cur_bid; + if (!bat_page->page) + return -EINVAL; + + return 0; +} + +static void t7xx_unmap_bat_page(struct device *dev, struct dpmaif_bat_page *bat_page_base, + unsigned int index) +{ + struct dpmaif_bat_page *bat_page = bat_page_base + index; + + if (bat_page->page) { + dma_unmap_page(dev, bat_page->data_bus_addr, bat_page->data_len, DMA_FROM_DEVICE); + put_page(bat_page->page); + bat_page->page = NULL; + } +} + +/** + * t7xx_dpmaif_rx_frag_alloc() - Allocates buffers for the Fragment BAT ring. + * @dpmaif_ctrl: Pointer to DPMAIF context structure. + * @bat_req: Pointer to BAT request structure. + * @buf_cnt: Number of buffers to allocate. + * @initial: Indicates if the ring is being populated for the first time. + * + * Fragment BAT is used when the received packet does not fit in a normal BAT entry. + * This function allocates a page fragment and stores the start address of the page + * into the Fragment BAT ring. + * If this is not the initial call, notify the HW about the new entries. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code. + */ +int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, + const unsigned int buf_cnt, const bool initial) +{ + unsigned int buf_space, cur_bat_idx = bat_req->bat_wr_idx; + struct dpmaif_bat_page *bat_skb = bat_req->bat_skb; + int ret = 0, i; + + if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt) + return -EINVAL; + + buf_space = t7xx_ring_buf_rd_wr_count(bat_req->bat_size_cnt, + bat_req->bat_release_rd_idx, bat_req->bat_wr_idx, + DPMAIF_WRITE); + if (buf_cnt > buf_space) { + dev_err(dpmaif_ctrl->dev, + "Requested more buffers than the space available in RX frag ring\n"); + return -EINVAL; + } + + for (i = 0; i < buf_cnt; i++) { + struct dpmaif_bat_page *cur_page = bat_skb + cur_bat_idx; + struct dpmaif_bat *cur_bat; + dma_addr_t data_base_addr; + + if (!cur_page->page) { + unsigned long offset; + struct page *page; + void *data; + + data = netdev_alloc_frag(bat_req->pkt_buf_sz); + if (!data) + break; + + page = virt_to_head_page(data); + offset = data - page_address(page); + + data_base_addr = dma_map_page(dpmaif_ctrl->dev, page, offset, + bat_req->pkt_buf_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(dpmaif_ctrl->dev, data_base_addr)) { + put_page(virt_to_head_page(data)); + dev_err(dpmaif_ctrl->dev, "DMA mapping fail\n"); + break; + } + + cur_page->page = page; + cur_page->data_bus_addr = data_base_addr; + cur_page->offset = offset; + cur_page->data_len = bat_req->pkt_buf_sz; + } + + data_base_addr = cur_page->data_bus_addr; + cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx; + cur_bat->buffer_addr_ext = upper_32_bits(data_base_addr); + cur_bat->p_buffer_addr = lower_32_bits(data_base_addr); + cur_bat_idx = t7xx_ring_buf_get_next_wr_idx(bat_req->bat_size_cnt, cur_bat_idx); + } + + bat_req->bat_wr_idx = cur_bat_idx; + + if (!initial) + t7xx_dpmaif_dl_snd_hw_frg_cnt(&dpmaif_ctrl->hw_info, i); + + if (i < buf_cnt) { + ret = -ENOMEM; + if (initial) { + while (--i > 0) + t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i); + } + } + + return ret; +} + +static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *pkt_info, + struct sk_buff *skb) +{ + unsigned long long data_bus_addr, data_base_addr; + struct device *dev = rxq->dpmaif_ctrl->dev; + struct dpmaif_bat_page *page_info; + unsigned int data_len; + int data_offset; + + page_info = rxq->bat_frag->bat_skb; + page_info += t7xx_normal_pit_bid(pkt_info); + dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE); + + if (!page_info->page) + return -EINVAL; + + data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h); + data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l); + data_base_addr = page_info->data_bus_addr; + data_offset = data_bus_addr - data_base_addr; + data_offset += page_info->offset; + data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header)); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page, + data_offset, data_len, page_info->data_len); + + page_info->page = NULL; + page_info->offset = 0; + page_info->data_len = 0; + return 0; +} + +static int t7xx_dpmaif_get_frag(struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *pkt_info, + const struct dpmaif_cur_rx_skb_info *skb_info) +{ + unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info); + int ret; + + ret = t7xx_frag_bat_cur_bid_check(rxq, cur_bid); + if (ret < 0) + return ret; + + ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb); + if (ret < 0) { + dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret); + return ret; + } + + t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid); + return 0; +} + +static int t7xx_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, const unsigned int cur_bid) +{ + struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb; + + bat_skb += cur_bid; + if (cur_bid >= DPMAIF_BAT_COUNT || !bat_skb->skb) + return -EINVAL; + + return 0; +} + +static int t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit *pit) +{ + return FIELD_GET(PD_PIT_PIT_SEQ, le32_to_cpu(pit->pd.footer)); +} + +static int t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *pit) +{ + unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq; + + if (read_poll_timeout_atomic(t7xx_dpmaif_read_pit_seq, cur_pit_seq, + cur_pit_seq == expect_pit_seq, DPMAIF_POLL_PIT_TIME_US, + DPMAIF_POLL_PIT_MAX_TIME_US, false, pit)) + return -EFAULT; + + rxq->expect_pit_seq++; + if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE) + rxq->expect_pit_seq = 0; + + return 0; +} + +static unsigned int t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request *bat_req) +{ + unsigned int zero_index; + unsigned long flags; + + spin_lock_irqsave(&bat_req->mask_lock, flags); + + zero_index = find_next_zero_bit(bat_req->bat_bitmap, bat_req->bat_size_cnt, + bat_req->bat_release_rd_idx); + + if (zero_index < bat_req->bat_size_cnt) { + spin_unlock_irqrestore(&bat_req->mask_lock, flags); + return zero_index - bat_req->bat_release_rd_idx; + } + + /* limiting the search till bat_release_rd_idx */ + zero_index = find_first_zero_bit(bat_req->bat_bitmap, bat_req->bat_release_rd_idx); + spin_unlock_irqrestore(&bat_req->mask_lock, flags); + return bat_req->bat_size_cnt - bat_req->bat_release_rd_idx + zero_index; +} + +static int t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue *rxq, + const unsigned int rel_entry_num, + const enum bat_type buf_type) +{ + struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info; + unsigned int old_rel_idx, new_rel_idx, hw_rd_idx, i; + struct dpmaif_bat_request *bat; + unsigned long flags; + + if (!rxq->que_started || !rel_entry_num) + return -EINVAL; + + if (buf_type == BAT_TYPE_FRAG) { + bat = rxq->bat_frag; + hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index); + } else { + bat = rxq->bat_req; + hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index); + } + + if (rel_entry_num >= bat->bat_size_cnt) + return -EINVAL; + + old_rel_idx = bat->bat_release_rd_idx; + new_rel_idx = old_rel_idx + rel_entry_num; + + /* Do not need to release if the queue is empty */ + if (bat->bat_wr_idx == old_rel_idx) + return 0; + + if (hw_rd_idx >= old_rel_idx) { + if (new_rel_idx > hw_rd_idx) + return -EINVAL; + } + + if (new_rel_idx >= bat->bat_size_cnt) { + new_rel_idx -= bat->bat_size_cnt; + if (new_rel_idx > hw_rd_idx) + return -EINVAL; + } + + spin_lock_irqsave(&bat->mask_lock, flags); + for (i = 0; i < rel_entry_num; i++) { + unsigned int index = bat->bat_release_rd_idx + i; + + if (index >= bat->bat_size_cnt) + index -= bat->bat_size_cnt; + + clear_bit(index, bat->bat_bitmap); + } + spin_unlock_irqrestore(&bat->mask_lock, flags); + + bat->bat_release_rd_idx = new_rel_idx; + return rel_entry_num; +} + +static int t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue *rxq) +{ + int ret; + + if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD) + return 0; + + ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt); + if (ret) + return ret; + + rxq->pit_remain_release_cnt = 0; + return 0; +} + +static int t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue *rxq) +{ + unsigned int bid_cnt; + int ret; + + bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req); + if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD) + return 0; + + ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_NORMAL); + if (ret <= 0) { + dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret); + return ret; + } + + ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false); + if (ret < 0) + dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret); + + return ret; +} + +static int t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue *rxq) +{ + unsigned int bid_cnt; + int ret; + + bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag); + if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD) + return 0; + + ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_FRAG); + if (ret <= 0) { + dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret); + return ret; + } + + return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false); +} + +static void t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *msg_pit, + struct dpmaif_cur_rx_skb_info *skb_info) +{ + int header = le32_to_cpu(msg_pit->header); + + skb_info->cur_chn_idx = FIELD_GET(MSG_PIT_CHANNEL_ID, header); + skb_info->check_sum = FIELD_GET(MSG_PIT_CHECKSUM, header); + skb_info->pit_dp = FIELD_GET(MSG_PIT_DP, header); + skb_info->pkt_type = FIELD_GET(MSG_PIT_IP, le32_to_cpu(msg_pit->msg.params_3)); +} + +static int t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *pkt_info, + struct dpmaif_cur_rx_skb_info *skb_info) +{ + unsigned long long data_bus_addr, data_base_addr; + struct device *dev = rxq->dpmaif_ctrl->dev; + struct dpmaif_bat_skb *bat_skb; + unsigned int data_len; + struct sk_buff *skb; + int data_offset; + + bat_skb = rxq->bat_req->bat_skb; + bat_skb += t7xx_normal_pit_bid(pkt_info); + dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE); + + data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h); + data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l); + data_base_addr = bat_skb->data_bus_addr; + data_offset = data_bus_addr - data_base_addr; + data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header)); + skb = bat_skb->skb; + skb->len = 0; + skb_reset_tail_pointer(skb); + skb_reserve(skb, data_offset); + + if (skb->tail + data_len > skb->end) { + dev_err(dev, "No buffer space available\n"); + return -ENOBUFS; + } + + skb_put(skb, data_len); + skb_info->cur_skb = skb; + bat_skb->skb = NULL; + return 0; +} + +static int t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *pkt_info, + struct dpmaif_cur_rx_skb_info *skb_info) +{ + unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info); + int ret; + + ret = t7xx_bat_cur_bid_check(rxq, cur_bid); + if (ret < 0) + return ret; + + ret = t7xx_dpmaif_set_data_to_skb(rxq, pkt_info, skb_info); + if (ret < 0) { + dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret); + return ret; + } + + t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid); + return 0; +} + +static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq) +{ + struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; + int ret; + + queue_work(dpmaif_ctrl->bat_release_wq, &dpmaif_ctrl->bat_release_work); + + ret = t7xx_dpmaif_pit_release_and_add(rxq); + if (ret < 0) + dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret); + + return ret; +} + +static void t7xx_dpmaif_rx_skb_enqueue(struct dpmaif_rx_queue *rxq, struct sk_buff *skb) +{ + unsigned long flags; + + spin_lock_irqsave(&rxq->skb_list.lock, flags); + if (rxq->skb_list.qlen < rxq->skb_list_max_len) + __skb_queue_tail(&rxq->skb_list, skb); + else + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&rxq->skb_list.lock, flags); +} + +static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq, + struct dpmaif_cur_rx_skb_info *skb_info) +{ + struct sk_buff *skb = skb_info->cur_skb; + struct t7xx_skb_cb *skb_cb; + u8 netif_id; + + skb_info->cur_skb = NULL; + + if (skb_info->pit_dp) { + dev_kfree_skb_any(skb); + return; + } + + skb->ip_summed = skb_info->check_sum == DPMAIF_CS_RESULT_PASS ? CHECKSUM_UNNECESSARY : + CHECKSUM_NONE; + netif_id = FIELD_GET(NETIF_MASK, skb_info->cur_chn_idx); + skb_cb = T7XX_SKB_CB(skb); + skb_cb->netif_idx = netif_id; + skb_cb->rx_pkt_type = skb_info->pkt_type; + t7xx_dpmaif_rx_skb_enqueue(rxq, skb); +} + +static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt, + const unsigned long timeout) +{ + unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0; + struct device *dev = rxq->dpmaif_ctrl->dev; + struct dpmaif_cur_rx_skb_info *skb_info; + int ret = 0; + + pit_len = rxq->pit_size_cnt; + skb_info = &rxq->rx_data_info; + cur_pit = rxq->pit_rd_idx; + + for (rx_cnt = 0; rx_cnt < pit_cnt; rx_cnt++) { + struct dpmaif_pit *pkt_info; + u32 val; + + if (!skb_info->msg_pit_received && time_after_eq(jiffies, timeout)) + break; + + pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit; + if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) { + dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index); + return -EAGAIN; + } + + val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header)); + if (val == DES_PT_MSG) { + if (skb_info->msg_pit_received) + dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index); + + skb_info->msg_pit_received = true; + t7xx_dpmaif_parse_msg_pit(rxq, pkt_info, skb_info); + } else { /* DES_PT_PD */ + val = FIELD_GET(PD_PIT_BUFFER_TYPE, le32_to_cpu(pkt_info->header)); + if (val != PKT_BUF_FRAG) + ret = t7xx_dpmaif_get_rx_pkt(rxq, pkt_info, skb_info); + else if (!skb_info->cur_skb) + ret = -EINVAL; + else + ret = t7xx_dpmaif_get_frag(rxq, pkt_info, skb_info); + + if (ret < 0) { + skb_info->err_payload = 1; + dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index); + } + + val = FIELD_GET(PD_PIT_CONT, le32_to_cpu(pkt_info->header)); + if (!val) { + if (!skb_info->err_payload) { + t7xx_dpmaif_rx_skb(rxq, skb_info); + } else if (skb_info->cur_skb) { + dev_kfree_skb_any(skb_info->cur_skb); + skb_info->cur_skb = NULL; + } + + memset(skb_info, 0, sizeof(*skb_info)); + + recv_skb_cnt++; + if (!(recv_skb_cnt & DPMAIF_RX_PUSH_THRESHOLD_MASK)) { + wake_up_all(&rxq->rx_wq); + recv_skb_cnt = 0; + } + } + } + + cur_pit = t7xx_ring_buf_get_next_wr_idx(pit_len, cur_pit); + rxq->pit_rd_idx = cur_pit; + rxq->pit_remain_release_cnt++; + + if (rx_cnt > 0 && !(rx_cnt % DPMAIF_NOTIFY_RELEASE_COUNT)) { + ret = t7xx_dpmaifq_rx_notify_hw(rxq); + if (ret < 0) + break; + } + } + + if (recv_skb_cnt) + wake_up_all(&rxq->rx_wq); + + if (!ret) + ret = t7xx_dpmaifq_rx_notify_hw(rxq); + + if (ret) + return ret; + + return rx_cnt; +} + +static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq) +{ + unsigned int hw_wr_idx, pit_cnt; + + if (!rxq->que_started) + return 0; + + hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index); + pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx, + DPMAIF_READ); + rxq->pit_wr_idx = hw_wr_idx; + return pit_cnt; +} + +static int t7xx_dpmaif_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl, + const unsigned int q_num, const unsigned int budget) +{ + struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; + unsigned long time_limit; + unsigned int cnt; + + time_limit = jiffies + msecs_to_jiffies(DPMAIF_WQ_TIME_LIMIT_MS); + + while ((cnt = t7xx_dpmaifq_poll_pit(rxq))) { + unsigned int rd_cnt; + int real_cnt; + + rd_cnt = min(cnt, budget); + + real_cnt = t7xx_dpmaif_rx_start(rxq, rd_cnt, time_limit); + if (real_cnt < 0) + return real_cnt; + + if (real_cnt < cnt) + return -EAGAIN; + } + + return 0; +} + +static void t7xx_dpmaif_do_rx(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_rx_queue *rxq) +{ + struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info; + int ret; + + ret = t7xx_dpmaif_rx_data_collect(dpmaif_ctrl, rxq->index, rxq->budget); + if (ret < 0) { + /* Try one more time */ + queue_work(rxq->worker, &rxq->dpmaif_rxq_work); + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + } else { + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + t7xx_dpmaif_dlq_unmask_rx_done(hw_info, rxq->index); + } +} + +static void t7xx_dpmaif_rxq_work(struct work_struct *work) +{ + struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work); + struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; + int ret; + + atomic_set(&rxq->rx_processing, 1); + /* Ensure rx_processing is changed to 1 before actually begin RX flow */ + smp_mb(); + + if (!rxq->que_started) { + atomic_set(&rxq->rx_processing, 0); + dev_err(dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index); + return; + } + + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); + if (ret < 0 && ret != -EACCES) + return; + + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); + if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) + t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq); + + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); + atomic_set(&rxq->rx_processing, 0); +} + +void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask) +{ + struct dpmaif_rx_queue *rxq; + int qno; + + qno = ffs(que_mask) - 1; + if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) { + dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno); + return; + } + + rxq = &dpmaif_ctrl->rxq[qno]; + queue_work(rxq->worker, &rxq->dpmaif_rxq_work); +} + +static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl, + const struct dpmaif_bat_request *bat_req) +{ + if (bat_req->bat_base) + dma_free_coherent(dpmaif_ctrl->dev, + bat_req->bat_size_cnt * sizeof(struct dpmaif_bat), + bat_req->bat_base, bat_req->bat_bus_addr); +} + +/** + * t7xx_dpmaif_bat_alloc() - Allocate the BAT ring buffer. + * @dpmaif_ctrl: Pointer to DPMAIF context structure. + * @bat_req: Pointer to BAT request structure. + * @buf_type: BAT ring type. + * + * This function allocates the BAT ring buffer shared with the HW device, also allocates + * a buffer used to store information about the BAT skbs for further release. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code. + */ +int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, + const enum bat_type buf_type) +{ + int sw_buf_size; + + if (buf_type == BAT_TYPE_FRAG) { + sw_buf_size = sizeof(struct dpmaif_bat_page); + bat_req->bat_size_cnt = DPMAIF_FRG_COUNT; + bat_req->pkt_buf_sz = DPMAIF_HW_FRG_PKTBUF; + } else { + sw_buf_size = sizeof(struct dpmaif_bat_skb); + bat_req->bat_size_cnt = DPMAIF_BAT_COUNT; + bat_req->pkt_buf_sz = DPMAIF_HW_BAT_PKTBUF; + } + + bat_req->type = buf_type; + bat_req->bat_wr_idx = 0; + bat_req->bat_release_rd_idx = 0; + + bat_req->bat_base = dma_alloc_coherent(dpmaif_ctrl->dev, + bat_req->bat_size_cnt * sizeof(struct dpmaif_bat), + &bat_req->bat_bus_addr, GFP_KERNEL | __GFP_ZERO); + if (!bat_req->bat_base) + return -ENOMEM; + + /* For AP SW to record skb information */ + bat_req->bat_skb = devm_kzalloc(dpmaif_ctrl->dev, bat_req->bat_size_cnt * sw_buf_size, + GFP_KERNEL); + if (!bat_req->bat_skb) + goto err_free_dma_mem; + + bat_req->bat_bitmap = bitmap_zalloc(bat_req->bat_size_cnt, GFP_KERNEL); + if (!bat_req->bat_bitmap) + goto err_free_dma_mem; + + spin_lock_init(&bat_req->mask_lock); + atomic_set(&bat_req->refcnt, 0); + return 0; + +err_free_dma_mem: + t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req); + + return -ENOMEM; +} + +void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req) +{ + if (!bat_req || !atomic_dec_and_test(&bat_req->refcnt)) + return; + + bitmap_free(bat_req->bat_bitmap); + bat_req->bat_bitmap = NULL; + + if (bat_req->bat_skb) { + unsigned int i; + + for (i = 0; i < bat_req->bat_size_cnt; i++) { + if (bat_req->type == BAT_TYPE_FRAG) + t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i); + else + t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i); + } + } + + t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req); +} + +static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq) +{ + rxq->pit_size_cnt = DPMAIF_PIT_COUNT; + rxq->pit_rd_idx = 0; + rxq->pit_wr_idx = 0; + rxq->pit_release_rd_idx = 0; + rxq->expect_pit_seq = 0; + rxq->pit_remain_release_cnt = 0; + memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info)); + + rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev, + rxq->pit_size_cnt * sizeof(struct dpmaif_pit), + &rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO); + if (!rxq->pit_base) + return -ENOMEM; + + rxq->bat_req = &rxq->dpmaif_ctrl->bat_req; + atomic_inc(&rxq->bat_req->refcnt); + + rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag; + atomic_inc(&rxq->bat_frag->refcnt); + return 0; +} + +static void t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue *rxq) +{ + if (!rxq->dpmaif_ctrl) + return; + + t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req); + t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag); + + if (rxq->pit_base) + dma_free_coherent(rxq->dpmaif_ctrl->dev, + rxq->pit_size_cnt * sizeof(struct dpmaif_pit), + rxq->pit_base, rxq->pit_bus_addr); +} + +int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue) +{ + int ret; + + ret = t7xx_dpmaif_rx_alloc(queue); + if (ret < 0) { + dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret); + return ret; + } + + INIT_WORK(&queue->dpmaif_rxq_work, t7xx_dpmaif_rxq_work); + + queue->worker = alloc_workqueue("dpmaif_rx%d_worker", + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1, queue->index); + if (!queue->worker) { + ret = -ENOMEM; + goto err_free_rx_buffer; + } + + init_waitqueue_head(&queue->rx_wq); + skb_queue_head_init(&queue->skb_list); + queue->skb_list_max_len = queue->bat_req->pkt_buf_sz; + queue->rx_thread = kthread_run(t7xx_dpmaif_net_rx_push_thread, + queue, "dpmaif_rx%d_push", queue->index); + + ret = PTR_ERR_OR_ZERO(queue->rx_thread); + if (ret) + goto err_free_workqueue; + + return 0; + +err_free_workqueue: + destroy_workqueue(queue->worker); + +err_free_rx_buffer: + t7xx_dpmaif_rx_buf_free(queue); + + return ret; +} + +void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue) +{ + if (queue->worker) + destroy_workqueue(queue->worker); + + if (queue->rx_thread) + kthread_stop(queue->rx_thread); + + skb_queue_purge(&queue->skb_list); + t7xx_dpmaif_rx_buf_free(queue); +} + +static void t7xx_dpmaif_bat_release_work(struct work_struct *work) +{ + struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work); + struct dpmaif_rx_queue *rxq; + int ret; + + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); + if (ret < 0 && ret != -EACCES) + return; + + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); + + /* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */ + rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT]; + if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) { + t7xx_dpmaif_bat_release_and_add(rxq); + t7xx_dpmaif_frag_bat_release_and_add(rxq); + } + + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); +} + +int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl) +{ + dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue", + WQ_MEM_RECLAIM, 1); + if (!dpmaif_ctrl->bat_release_wq) + return -ENOMEM; + + INIT_WORK(&dpmaif_ctrl->bat_release_work, t7xx_dpmaif_bat_release_work); + return 0; +} + +void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl) +{ + flush_work(&dpmaif_ctrl->bat_release_work); + + if (dpmaif_ctrl->bat_release_wq) { + destroy_workqueue(dpmaif_ctrl->bat_release_wq); + dpmaif_ctrl->bat_release_wq = NULL; + } +} + +/** + * t7xx_dpmaif_rx_stop() - Suspend RX flow. + * @dpmaif_ctrl: Pointer to data path control struct dpmaif_ctrl. + * + * Wait for all the RX work to finish executing and mark the RX queue as paused. + */ +void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl) +{ + unsigned int i; + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i]; + int timeout, value; + + flush_work(&rxq->dpmaif_rxq_work); + + timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value, + !value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US); + if (timeout) + dev_err(dpmaif_ctrl->dev, "Stop RX SW failed\n"); + + /* Ensure RX processing has stopped before we set rxq->que_started to false */ + smp_mb(); + rxq->que_started = false; + } +} + +static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq) +{ + int cnt, j = 0; + + flush_work(&rxq->dpmaif_rxq_work); + rxq->que_started = false; + + do { + cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, + rxq->pit_wr_idx, DPMAIF_READ); + + if (++j >= DPMAIF_MAX_CHECK_COUNT) { + dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt); + break; + } + } while (cnt); + + memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit)); + memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat)); + bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt); + memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info)); + + rxq->pit_rd_idx = 0; + rxq->pit_wr_idx = 0; + rxq->pit_release_rd_idx = 0; + rxq->expect_pit_seq = 0; + rxq->pit_remain_release_cnt = 0; + rxq->bat_req->bat_release_rd_idx = 0; + rxq->bat_req->bat_wr_idx = 0; + rxq->bat_frag->bat_release_rd_idx = 0; + rxq->bat_frag->bat_wr_idx = 0; +} + +void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl) +{ + int i; + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) + t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]); +} diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h new file mode 100644 index 0000000000..182f62dfe3 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Eliot Lee + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Moises Veleta + * Sreehari Kancharla + */ + +#ifndef __T7XX_HIF_DPMA_RX_H__ +#define __T7XX_HIF_DPMA_RX_H__ + +#include +#include + +#include "t7xx_hif_dpmaif.h" + +#define NETIF_MASK GENMASK(4, 0) + +#define PKT_TYPE_IP4 0 +#define PKT_TYPE_IP6 1 + +/* Structure of DL PIT */ +struct dpmaif_pit { + __le32 header; + union { + struct { + __le32 data_addr_l; + __le32 data_addr_h; + __le32 footer; + } pd; + struct { + __le32 params_1; + __le32 params_2; + __le32 params_3; + } msg; + }; +}; + +/* PIT header fields */ +#define PD_PIT_DATA_LEN GENMASK(31, 16) +#define PD_PIT_BUFFER_ID GENMASK(15, 3) +#define PD_PIT_BUFFER_TYPE BIT(2) +#define PD_PIT_CONT BIT(1) +#define PD_PIT_PACKET_TYPE BIT(0) +/* PIT footer fields */ +#define PD_PIT_DLQ_DONE GENMASK(31, 30) +#define PD_PIT_ULQ_DONE GENMASK(29, 24) +#define PD_PIT_HEADER_OFFSET GENMASK(23, 19) +#define PD_PIT_BI_F GENMASK(18, 17) +#define PD_PIT_IG BIT(16) +#define PD_PIT_RES GENMASK(15, 11) +#define PD_PIT_H_BID GENMASK(10, 8) +#define PD_PIT_PIT_SEQ GENMASK(7, 0) + +#define MSG_PIT_DP BIT(31) +#define MSG_PIT_RES GENMASK(30, 27) +#define MSG_PIT_NETWORK_TYPE GENMASK(26, 24) +#define MSG_PIT_CHANNEL_ID GENMASK(23, 16) +#define MSG_PIT_RES2 GENMASK(15, 12) +#define MSG_PIT_HPC_IDX GENMASK(11, 8) +#define MSG_PIT_SRC_QID GENMASK(7, 5) +#define MSG_PIT_ERROR_BIT BIT(4) +#define MSG_PIT_CHECKSUM GENMASK(3, 2) +#define MSG_PIT_CONT BIT(1) +#define MSG_PIT_PACKET_TYPE BIT(0) + +#define MSG_PIT_HP_IDX GENMASK(31, 27) +#define MSG_PIT_CMD GENMASK(26, 24) +#define MSG_PIT_RES3 GENMASK(23, 21) +#define MSG_PIT_FLOW GENMASK(20, 16) +#define MSG_PIT_COUNT GENMASK(15, 0) + +#define MSG_PIT_HASH GENMASK(31, 24) +#define MSG_PIT_RES4 GENMASK(23, 18) +#define MSG_PIT_PRO GENMASK(17, 16) +#define MSG_PIT_VBID GENMASK(15, 3) +#define MSG_PIT_RES5 GENMASK(2, 0) + +#define MSG_PIT_DLQ_DONE GENMASK(31, 30) +#define MSG_PIT_ULQ_DONE GENMASK(29, 24) +#define MSG_PIT_IP BIT(23) +#define MSG_PIT_RES6 BIT(22) +#define MSG_PIT_MR GENMASK(21, 20) +#define MSG_PIT_RES7 GENMASK(19, 17) +#define MSG_PIT_IG BIT(16) +#define MSG_PIT_RES8 GENMASK(15, 11) +#define MSG_PIT_H_BID GENMASK(10, 8) +#define MSG_PIT_PIT_SEQ GENMASK(7, 0) + +int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue); +void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl); +int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl); +int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl, + const struct dpmaif_bat_request *bat_req, + const unsigned int q_num, const unsigned int buf_cnt, + const bool initial); +int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, + const unsigned int buf_cnt, const bool first_time); +void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl); +void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask); +void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue); +void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl); +int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, + const enum bat_type buf_type); +void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, + struct dpmaif_bat_request *bat_req); + +#endif /* __T7XX_HIF_DPMA_RX_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c new file mode 100644 index 0000000000..46514208d4 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c @@ -0,0 +1,683 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Chiranjeevi Rapolu + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_dpmaif.h" +#include "t7xx_hif_dpmaif.h" +#include "t7xx_hif_dpmaif_tx.h" +#include "t7xx_pci.h" + +#define DPMAIF_SKB_TX_BURST_CNT 5 +#define DPMAIF_DRB_LIST_LEN 6144 + +/* DRB dtype */ +#define DES_DTYP_PD 0 +#define DES_DTYP_MSG 1 + +static unsigned int t7xx_dpmaif_update_drb_rd_idx(struct dpmaif_ctrl *dpmaif_ctrl, + unsigned int q_num) +{ + struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; + unsigned int old_sw_rd_idx, new_hw_rd_idx, drb_cnt; + unsigned long flags; + + if (!txq->que_started) + return 0; + + old_sw_rd_idx = txq->drb_rd_idx; + new_hw_rd_idx = t7xx_dpmaif_ul_get_rd_idx(&dpmaif_ctrl->hw_info, q_num); + if (new_hw_rd_idx >= DPMAIF_DRB_LIST_LEN) { + dev_err(dpmaif_ctrl->dev, "Out of range read index: %u\n", new_hw_rd_idx); + return 0; + } + + if (old_sw_rd_idx <= new_hw_rd_idx) + drb_cnt = new_hw_rd_idx - old_sw_rd_idx; + else + drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx; + + spin_lock_irqsave(&txq->tx_lock, flags); + txq->drb_rd_idx = new_hw_rd_idx; + spin_unlock_irqrestore(&txq->tx_lock, flags); + + return drb_cnt; +} + +static unsigned int t7xx_dpmaif_release_tx_buffer(struct dpmaif_ctrl *dpmaif_ctrl, + unsigned int q_num, unsigned int release_cnt) +{ + struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; + struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks; + struct dpmaif_drb_skb *cur_drb_skb, *drb_skb_base; + struct dpmaif_drb *cur_drb, *drb_base; + unsigned int drb_cnt, i, cur_idx; + unsigned long flags; + + drb_skb_base = txq->drb_skb_base; + drb_base = txq->drb_base; + + spin_lock_irqsave(&txq->tx_lock, flags); + drb_cnt = txq->drb_size_cnt; + cur_idx = txq->drb_release_rd_idx; + spin_unlock_irqrestore(&txq->tx_lock, flags); + + for (i = 0; i < release_cnt; i++) { + cur_drb = drb_base + cur_idx; + if (FIELD_GET(DRB_HDR_DTYP, le32_to_cpu(cur_drb->header)) == DES_DTYP_PD) { + cur_drb_skb = drb_skb_base + cur_idx; + if (!cur_drb_skb->is_msg) + dma_unmap_single(dpmaif_ctrl->dev, cur_drb_skb->bus_addr, + cur_drb_skb->data_len, DMA_TO_DEVICE); + + if (!FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) { + if (!cur_drb_skb->skb) { + dev_err(dpmaif_ctrl->dev, + "txq%u: DRB check fail, invalid skb\n", q_num); + continue; + } + + dev_kfree_skb_any(cur_drb_skb->skb); + } + + cur_drb_skb->skb = NULL; + } + + spin_lock_irqsave(&txq->tx_lock, flags); + cur_idx = t7xx_ring_buf_get_next_wr_idx(drb_cnt, cur_idx); + txq->drb_release_rd_idx = cur_idx; + spin_unlock_irqrestore(&txq->tx_lock, flags); + + if (atomic_inc_return(&txq->tx_budget) > txq->drb_size_cnt / 8) + cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_IRQ, txq->index); + } + + if (FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) + dev_err(dpmaif_ctrl->dev, "txq%u: DRB not marked as the last one\n", q_num); + + return i; +} + +static int t7xx_dpmaif_tx_release(struct dpmaif_ctrl *dpmaif_ctrl, + unsigned int q_num, unsigned int budget) +{ + struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; + unsigned int rel_cnt, real_rel_cnt; + + /* Update read index from HW */ + t7xx_dpmaif_update_drb_rd_idx(dpmaif_ctrl, q_num); + + rel_cnt = t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx, + txq->drb_rd_idx, DPMAIF_READ); + + real_rel_cnt = min_not_zero(budget, rel_cnt); + if (real_rel_cnt) + real_rel_cnt = t7xx_dpmaif_release_tx_buffer(dpmaif_ctrl, q_num, real_rel_cnt); + + return real_rel_cnt < rel_cnt ? -EAGAIN : 0; +} + +static bool t7xx_dpmaif_drb_ring_not_empty(struct dpmaif_tx_queue *txq) +{ + return !!t7xx_dpmaif_update_drb_rd_idx(txq->dpmaif_ctrl, txq->index); +} + +static void t7xx_dpmaif_tx_done(struct work_struct *work) +{ + struct dpmaif_tx_queue *txq = container_of(work, struct dpmaif_tx_queue, dpmaif_tx_work); + struct dpmaif_ctrl *dpmaif_ctrl = txq->dpmaif_ctrl; + struct dpmaif_hw_info *hw_info; + int ret; + + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); + if (ret < 0 && ret != -EACCES) + return; + + /* The device may be in low power state. Disable sleep if needed */ + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); + if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) { + hw_info = &dpmaif_ctrl->hw_info; + ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt); + if (ret == -EAGAIN || + (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) && + t7xx_dpmaif_drb_ring_not_empty(txq))) { + queue_work(dpmaif_ctrl->txq[txq->index].worker, + &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work); + /* Give the device time to enter the low power state */ + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + } else { + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index); + } + } + + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); +} + +static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num, + unsigned int cur_idx, unsigned int pkt_len, unsigned int count_l, + unsigned int channel_id) +{ + struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base; + struct dpmaif_drb *drb = drb_base + cur_idx; + + drb->header = cpu_to_le32(FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_MSG) | + FIELD_PREP(DRB_HDR_CONT, 1) | + FIELD_PREP(DRB_HDR_DATA_LEN, pkt_len)); + + drb->msg.msg_hdr = cpu_to_le32(FIELD_PREP(DRB_MSG_COUNT_L, count_l) | + FIELD_PREP(DRB_MSG_CHANNEL_ID, channel_id) | + FIELD_PREP(DRB_MSG_L4_CHK, 1)); +} + +static void t7xx_setup_payload_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num, + unsigned int cur_idx, dma_addr_t data_addr, + unsigned int pkt_size, bool last_one) +{ + struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base; + struct dpmaif_drb *drb = drb_base + cur_idx; + u32 header; + + header = FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_PD) | FIELD_PREP(DRB_HDR_DATA_LEN, pkt_size); + if (!last_one) + header |= FIELD_PREP(DRB_HDR_CONT, 1); + + drb->header = cpu_to_le32(header); + drb->pd.data_addr_l = cpu_to_le32(lower_32_bits(data_addr)); + drb->pd.data_addr_h = cpu_to_le32(upper_32_bits(data_addr)); +} + +static void t7xx_record_drb_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num, + unsigned int cur_idx, struct sk_buff *skb, bool is_msg, + bool is_frag, bool is_last_one, dma_addr_t bus_addr, + unsigned int data_len) +{ + struct dpmaif_drb_skb *drb_skb_base = dpmaif_ctrl->txq[q_num].drb_skb_base; + struct dpmaif_drb_skb *drb_skb = drb_skb_base + cur_idx; + + drb_skb->skb = skb; + drb_skb->bus_addr = bus_addr; + drb_skb->data_len = data_len; + drb_skb->index = cur_idx; + drb_skb->is_msg = is_msg; + drb_skb->is_frag = is_frag; + drb_skb->is_last = is_last_one; +} + +static int t7xx_dpmaif_add_skb_to_ring(struct dpmaif_ctrl *dpmaif_ctrl, struct sk_buff *skb) +{ + struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks; + unsigned int wr_cnt, send_cnt, payload_cnt; + unsigned int cur_idx, drb_wr_idx_backup; + struct skb_shared_info *shinfo; + struct dpmaif_tx_queue *txq; + struct t7xx_skb_cb *skb_cb; + unsigned long flags; + + skb_cb = T7XX_SKB_CB(skb); + txq = &dpmaif_ctrl->txq[skb_cb->txq_number]; + if (!txq->que_started || dpmaif_ctrl->state != DPMAIF_STATE_PWRON) + return -ENODEV; + + atomic_set(&txq->tx_processing, 1); + /* Ensure tx_processing is changed to 1 before actually begin TX flow */ + smp_mb(); + + shinfo = skb_shinfo(skb); + if (shinfo->frag_list) + dev_warn_ratelimited(dpmaif_ctrl->dev, "frag_list not supported\n"); + + payload_cnt = shinfo->nr_frags + 1; + /* nr_frags: frag cnt, 1: skb->data, 1: msg DRB */ + send_cnt = payload_cnt + 1; + + spin_lock_irqsave(&txq->tx_lock, flags); + cur_idx = txq->drb_wr_idx; + drb_wr_idx_backup = cur_idx; + txq->drb_wr_idx += send_cnt; + if (txq->drb_wr_idx >= txq->drb_size_cnt) + txq->drb_wr_idx -= txq->drb_size_cnt; + t7xx_setup_msg_drb(dpmaif_ctrl, txq->index, cur_idx, skb->len, 0, skb_cb->netif_idx); + t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, true, 0, 0, 0, 0); + spin_unlock_irqrestore(&txq->tx_lock, flags); + + for (wr_cnt = 0; wr_cnt < payload_cnt; wr_cnt++) { + bool is_frag, is_last_one = wr_cnt == payload_cnt - 1; + unsigned int data_len; + dma_addr_t bus_addr; + void *data_addr; + + if (!wr_cnt) { + data_len = skb_headlen(skb); + data_addr = skb->data; + is_frag = false; + } else { + skb_frag_t *frag = shinfo->frags + wr_cnt - 1; + + data_len = skb_frag_size(frag); + data_addr = skb_frag_address(frag); + is_frag = true; + } + + bus_addr = dma_map_single(dpmaif_ctrl->dev, data_addr, data_len, DMA_TO_DEVICE); + if (dma_mapping_error(dpmaif_ctrl->dev, bus_addr)) + goto unmap_buffers; + + cur_idx = t7xx_ring_buf_get_next_wr_idx(txq->drb_size_cnt, cur_idx); + + spin_lock_irqsave(&txq->tx_lock, flags); + t7xx_setup_payload_drb(dpmaif_ctrl, txq->index, cur_idx, bus_addr, data_len, + is_last_one); + t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, false, is_frag, + is_last_one, bus_addr, data_len); + spin_unlock_irqrestore(&txq->tx_lock, flags); + } + + if (atomic_sub_return(send_cnt, &txq->tx_budget) <= (MAX_SKB_FRAGS + 2)) + cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq->index); + + atomic_set(&txq->tx_processing, 0); + + return 0; + +unmap_buffers: + while (wr_cnt--) { + struct dpmaif_drb_skb *drb_skb = txq->drb_skb_base; + + cur_idx = cur_idx ? cur_idx - 1 : txq->drb_size_cnt - 1; + drb_skb += cur_idx; + dma_unmap_single(dpmaif_ctrl->dev, drb_skb->bus_addr, + drb_skb->data_len, DMA_TO_DEVICE); + } + + txq->drb_wr_idx = drb_wr_idx_backup; + atomic_set(&txq->tx_processing, 0); + + return -ENOMEM; +} + +static bool t7xx_tx_lists_are_all_empty(const struct dpmaif_ctrl *dpmaif_ctrl) +{ + int i; + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + if (!skb_queue_empty(&dpmaif_ctrl->txq[i].tx_skb_head)) + return false; + } + + return true; +} + +/* Currently, only the default TX queue is used */ +static struct dpmaif_tx_queue *t7xx_select_tx_queue(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_tx_queue *txq; + + txq = &dpmaif_ctrl->txq[DPMAIF_TX_DEFAULT_QUEUE]; + if (!txq->que_started) + return NULL; + + return txq; +} + +static unsigned int t7xx_txq_drb_wr_available(struct dpmaif_tx_queue *txq) +{ + return t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx, + txq->drb_wr_idx, DPMAIF_WRITE); +} + +static unsigned int t7xx_skb_drb_cnt(struct sk_buff *skb) +{ + /* Normal DRB (frags data + skb linear data) + msg DRB */ + return skb_shinfo(skb)->nr_frags + 2; +} + +static int t7xx_txq_burst_send_skb(struct dpmaif_tx_queue *txq) +{ + unsigned int drb_remain_cnt, i; + unsigned int send_drb_cnt; + int drb_cnt = 0; + int ret = 0; + + drb_remain_cnt = t7xx_txq_drb_wr_available(txq); + + for (i = 0; i < DPMAIF_SKB_TX_BURST_CNT; i++) { + struct sk_buff *skb; + + skb = skb_peek(&txq->tx_skb_head); + if (!skb) + break; + + send_drb_cnt = t7xx_skb_drb_cnt(skb); + if (drb_remain_cnt < send_drb_cnt) { + drb_remain_cnt = t7xx_txq_drb_wr_available(txq); + continue; + } + + drb_remain_cnt -= send_drb_cnt; + + ret = t7xx_dpmaif_add_skb_to_ring(txq->dpmaif_ctrl, skb); + if (ret < 0) { + dev_err(txq->dpmaif_ctrl->dev, + "Failed to add skb to device's ring: %d\n", ret); + break; + } + + drb_cnt += send_drb_cnt; + skb_unlink(skb, &txq->tx_skb_head); + } + + if (drb_cnt > 0) + return drb_cnt; + + return ret; +} + +static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl) +{ + bool wait_disable_sleep = true; + + do { + struct dpmaif_tx_queue *txq; + int drb_send_cnt; + + txq = t7xx_select_tx_queue(dpmaif_ctrl); + if (!txq) + return; + + drb_send_cnt = t7xx_txq_burst_send_skb(txq); + if (drb_send_cnt <= 0) { + usleep_range(10, 20); + cond_resched(); + continue; + } + + /* Wait for the PCIe resource to unlock */ + if (wait_disable_sleep) { + if (!t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) + return; + + wait_disable_sleep = false; + } + + t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index, + drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD); + + cond_resched(); + } while (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && !kthread_should_stop() && + (dpmaif_ctrl->state == DPMAIF_STATE_PWRON)); +} + +static int t7xx_dpmaif_tx_hw_push_thread(void *arg) +{ + struct dpmaif_ctrl *dpmaif_ctrl = arg; + int ret; + + while (!kthread_should_stop()) { + if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) || + dpmaif_ctrl->state != DPMAIF_STATE_PWRON) { + if (wait_event_interruptible(dpmaif_ctrl->tx_wq, + (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && + dpmaif_ctrl->state == DPMAIF_STATE_PWRON) || + kthread_should_stop())) + continue; + + if (kthread_should_stop()) + break; + } + + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); + if (ret < 0 && ret != -EACCES) + return ret; + + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); + t7xx_do_tx_hw_push(dpmaif_ctrl); + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); + } + + return 0; +} + +int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl) +{ + init_waitqueue_head(&dpmaif_ctrl->tx_wq); + dpmaif_ctrl->tx_thread = kthread_run(t7xx_dpmaif_tx_hw_push_thread, + dpmaif_ctrl, "dpmaif_tx_hw_push"); + return PTR_ERR_OR_ZERO(dpmaif_ctrl->tx_thread); +} + +void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl) +{ + if (dpmaif_ctrl->tx_thread) + kthread_stop(dpmaif_ctrl->tx_thread); +} + +/** + * t7xx_dpmaif_tx_send_skb() - Add skb to the transmit queue. + * @dpmaif_ctrl: Pointer to struct dpmaif_ctrl. + * @txq_number: Queue number to xmit on. + * @skb: Pointer to the skb to transmit. + * + * Add the skb to the queue of the skbs to be transmit. + * Wake up the thread that push the skbs from the queue to the HW. + * + * Return: + * * 0 - Success. + * * -EBUSY - Tx budget exhausted. + * In normal circumstances t7xx_dpmaif_add_skb_to_ring() must report the txq full + * state to prevent this error condition. + */ +int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number, + struct sk_buff *skb) +{ + struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[txq_number]; + struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks; + struct t7xx_skb_cb *skb_cb; + + if (atomic_read(&txq->tx_budget) <= t7xx_skb_drb_cnt(skb)) { + cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq_number); + return -EBUSY; + } + + skb_cb = T7XX_SKB_CB(skb); + skb_cb->txq_number = txq_number; + skb_queue_tail(&txq->tx_skb_head, skb); + wake_up(&dpmaif_ctrl->tx_wq); + + return 0; +} + +void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask) +{ + int i; + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + if (que_mask & BIT(i)) + queue_work(dpmaif_ctrl->txq[i].worker, &dpmaif_ctrl->txq[i].dpmaif_tx_work); + } +} + +static int t7xx_dpmaif_tx_drb_buf_init(struct dpmaif_tx_queue *txq) +{ + size_t brb_skb_size, brb_pd_size; + + brb_pd_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb); + brb_skb_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb_skb); + + txq->drb_size_cnt = DPMAIF_DRB_LIST_LEN; + + /* For HW && AP SW */ + txq->drb_base = dma_alloc_coherent(txq->dpmaif_ctrl->dev, brb_pd_size, + &txq->drb_bus_addr, GFP_KERNEL | __GFP_ZERO); + if (!txq->drb_base) + return -ENOMEM; + + /* For AP SW to record the skb information */ + txq->drb_skb_base = devm_kzalloc(txq->dpmaif_ctrl->dev, brb_skb_size, GFP_KERNEL); + if (!txq->drb_skb_base) { + dma_free_coherent(txq->dpmaif_ctrl->dev, brb_pd_size, + txq->drb_base, txq->drb_bus_addr); + return -ENOMEM; + } + + return 0; +} + +static void t7xx_dpmaif_tx_free_drb_skb(struct dpmaif_tx_queue *txq) +{ + struct dpmaif_drb_skb *drb_skb, *drb_skb_base = txq->drb_skb_base; + unsigned int i; + + if (!drb_skb_base) + return; + + for (i = 0; i < txq->drb_size_cnt; i++) { + drb_skb = drb_skb_base + i; + if (!drb_skb->skb) + continue; + + if (!drb_skb->is_msg) + dma_unmap_single(txq->dpmaif_ctrl->dev, drb_skb->bus_addr, + drb_skb->data_len, DMA_TO_DEVICE); + + if (drb_skb->is_last) { + dev_kfree_skb(drb_skb->skb); + drb_skb->skb = NULL; + } + } +} + +static void t7xx_dpmaif_tx_drb_buf_rel(struct dpmaif_tx_queue *txq) +{ + if (txq->drb_base) + dma_free_coherent(txq->dpmaif_ctrl->dev, + txq->drb_size_cnt * sizeof(struct dpmaif_drb), + txq->drb_base, txq->drb_bus_addr); + + t7xx_dpmaif_tx_free_drb_skb(txq); +} + +/** + * t7xx_dpmaif_txq_init() - Initialize TX queue. + * @txq: Pointer to struct dpmaif_tx_queue. + * + * Initialize the TX queue data structure and allocate memory for it to use. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code from failure sub-initializations. + */ +int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq) +{ + int ret; + + skb_queue_head_init(&txq->tx_skb_head); + init_waitqueue_head(&txq->req_wq); + atomic_set(&txq->tx_budget, DPMAIF_DRB_LIST_LEN); + + ret = t7xx_dpmaif_tx_drb_buf_init(txq); + if (ret) { + dev_err(txq->dpmaif_ctrl->dev, "Failed to initialize DRB buffers: %d\n", ret); + return ret; + } + + txq->worker = alloc_workqueue("md_dpmaif_tx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM | + (txq->index ? 0 : WQ_HIGHPRI), 1, txq->index); + if (!txq->worker) + return -ENOMEM; + + INIT_WORK(&txq->dpmaif_tx_work, t7xx_dpmaif_tx_done); + spin_lock_init(&txq->tx_lock); + + return 0; +} + +void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq) +{ + if (txq->worker) + destroy_workqueue(txq->worker); + + skb_queue_purge(&txq->tx_skb_head); + t7xx_dpmaif_tx_drb_buf_rel(txq); +} + +void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl) +{ + int i; + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + struct dpmaif_tx_queue *txq; + int count = 0; + + txq = &dpmaif_ctrl->txq[i]; + txq->que_started = false; + /* Make sure TXQ is disabled */ + smp_mb(); + + /* Wait for active Tx to be done */ + while (atomic_read(&txq->tx_processing)) { + if (++count >= DPMAIF_MAX_CHECK_COUNT) { + dev_err(dpmaif_ctrl->dev, "TX queue stop failed\n"); + break; + } + } + } +} + +static void t7xx_dpmaif_txq_flush_rel(struct dpmaif_tx_queue *txq) +{ + txq->que_started = false; + + cancel_work_sync(&txq->dpmaif_tx_work); + flush_work(&txq->dpmaif_tx_work); + t7xx_dpmaif_tx_free_drb_skb(txq); + + txq->drb_rd_idx = 0; + txq->drb_wr_idx = 0; + txq->drb_release_rd_idx = 0; +} + +void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl) +{ + int i; + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) + t7xx_dpmaif_txq_flush_rel(&dpmaif_ctrl->txq[i]); +} diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h new file mode 100644 index 0000000000..ca9b9ea2da --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Eliot Lee + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Moises Veleta + * Sreehari Kancharla + */ + +#ifndef __T7XX_HIF_DPMA_TX_H__ +#define __T7XX_HIF_DPMA_TX_H__ + +#include +#include +#include + +#include "t7xx_hif_dpmaif.h" + +#define DPMAIF_TX_DEFAULT_QUEUE 0 + +struct dpmaif_drb { + __le32 header; + union { + struct { + __le32 data_addr_l; + __le32 data_addr_h; + } pd; + struct { + __le32 msg_hdr; + __le32 reserved1; + } msg; + }; + __le32 reserved2; +}; + +/* Header fields */ +#define DRB_HDR_DATA_LEN GENMASK(31, 16) +#define DRB_HDR_RESERVED GENMASK(15, 3) +#define DRB_HDR_CONT BIT(2) +#define DRB_HDR_DTYP GENMASK(1, 0) + +#define DRB_MSG_DW2_RES GENMASK(31, 30) +#define DRB_MSG_L4_CHK BIT(29) +#define DRB_MSG_IP_CHK BIT(28) +#define DRB_MSG_RESERVED BIT(27) +#define DRB_MSG_NETWORK_TYPE GENMASK(26, 24) +#define DRB_MSG_CHANNEL_ID GENMASK(23, 16) +#define DRB_MSG_COUNT_L GENMASK(15, 0) + +struct dpmaif_drb_skb { + struct sk_buff *skb; + dma_addr_t bus_addr; + unsigned int data_len; + u16 index:13; + u16 is_msg:1; + u16 is_frag:1; + u16 is_last:1; +}; + +int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number, + struct sk_buff *skb); +void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl); +int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl); +void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq); +void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask); +int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq); +void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl); +void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl); + +#endif /* __T7XX_HIF_DPMA_TX_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.c b/drivers/net/wwan/t7xx/t7xx_mhccif.c new file mode 100644 index 0000000000..3ee18d46f8 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_mhccif.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Ricardo Martinez + */ + +#include +#include +#include +#include +#include + +#include "t7xx_mhccif.h" +#include "t7xx_modem_ops.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_reg.h" + +#define D2H_INT_SR_ACK (D2H_INT_SUSPEND_ACK | \ + D2H_INT_RESUME_ACK | \ + D2H_INT_SUSPEND_ACK_AP | \ + D2H_INT_RESUME_ACK_AP) + +static void t7xx_mhccif_clear_interrupts(struct t7xx_pci_dev *t7xx_dev, u32 mask) +{ + void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base; + + /* Clear level 2 interrupt */ + iowrite32(mask, mhccif_pbase + REG_EP2RC_SW_INT_ACK); + /* Ensure write is complete */ + t7xx_mhccif_read_sw_int_sts(t7xx_dev); + /* Clear level 1 interrupt */ + t7xx_pcie_mac_clear_int_status(t7xx_dev, MHCCIF_INT); +} + +static irqreturn_t t7xx_mhccif_isr_thread(int irq, void *data) +{ + struct t7xx_pci_dev *t7xx_dev = data; + u32 int_status, val; + + val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1); + iowrite32(val, IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + + int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev); + if (int_status & D2H_SW_INT_MASK) { + int ret = t7xx_pci_mhccif_isr(t7xx_dev); + + if (ret) + dev_err(&t7xx_dev->pdev->dev, "PCI MHCCIF ISR failure: %d", ret); + } + + t7xx_mhccif_clear_interrupts(t7xx_dev, int_status); + + if (int_status & D2H_INT_DS_LOCK_ACK) + complete_all(&t7xx_dev->sleep_lock_acquire); + + if (int_status & D2H_INT_SR_ACK) + complete(&t7xx_dev->pm_sr_ack); + + iowrite32(T7XX_L1_BIT(1), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + + int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev); + if (!int_status) { + val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1); + iowrite32(val, IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + } + + t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); + return IRQ_HANDLED; +} + +u32 t7xx_mhccif_read_sw_int_sts(struct t7xx_pci_dev *t7xx_dev) +{ + return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_STS); +} + +void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val) +{ + iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_SET); +} + +void t7xx_mhccif_mask_clr(struct t7xx_pci_dev *t7xx_dev, u32 val) +{ + iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_CLR); +} + +u32 t7xx_mhccif_mask_get(struct t7xx_pci_dev *t7xx_dev) +{ + return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK); +} + +static irqreturn_t t7xx_mhccif_isr_handler(int irq, void *data) +{ + return IRQ_WAKE_THREAD; +} + +void t7xx_mhccif_init(struct t7xx_pci_dev *t7xx_dev) +{ + t7xx_dev->base_addr.mhccif_rc_base = t7xx_dev->base_addr.pcie_ext_reg_base + + MHCCIF_RC_DEV_BASE - + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; + + t7xx_dev->intr_handler[MHCCIF_INT] = t7xx_mhccif_isr_handler; + t7xx_dev->intr_thread[MHCCIF_INT] = t7xx_mhccif_isr_thread; + t7xx_dev->callback_param[MHCCIF_INT] = t7xx_dev; +} + +void t7xx_mhccif_h2d_swint_trigger(struct t7xx_pci_dev *t7xx_dev, u32 channel) +{ + void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base; + + iowrite32(BIT(channel), mhccif_pbase + REG_RC2EP_SW_BSY); + iowrite32(channel, mhccif_pbase + REG_RC2EP_SW_TCHNUM); +} diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.h b/drivers/net/wwan/t7xx/t7xx_mhccif.h new file mode 100644 index 0000000000..209b386bc0 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_mhccif.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Ricardo Martinez + */ + +#ifndef __T7XX_MHCCIF_H__ +#define __T7XX_MHCCIF_H__ + +#include + +#include "t7xx_pci.h" +#include "t7xx_reg.h" + +#define D2H_SW_INT_MASK (D2H_INT_EXCEPTION_INIT | \ + D2H_INT_EXCEPTION_INIT_DONE | \ + D2H_INT_EXCEPTION_CLEARQ_DONE | \ + D2H_INT_EXCEPTION_ALLQ_RESET | \ + D2H_INT_PORT_ENUM | \ + D2H_INT_ASYNC_MD_HK) + +void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val); +void t7xx_mhccif_mask_clr(struct t7xx_pci_dev *t7xx_dev, u32 val); +u32 t7xx_mhccif_mask_get(struct t7xx_pci_dev *t7xx_dev); +void t7xx_mhccif_init(struct t7xx_pci_dev *t7xx_dev); +u32 t7xx_mhccif_read_sw_int_sts(struct t7xx_pci_dev *t7xx_dev); +void t7xx_mhccif_h2d_swint_trigger(struct t7xx_pci_dev *t7xx_dev, u32 channel); + +#endif /*__T7XX_MHCCIF_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c new file mode 100644 index 0000000000..3458af31e8 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c @@ -0,0 +1,727 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_cldma.h" +#include "t7xx_hif_cldma.h" +#include "t7xx_mhccif.h" +#include "t7xx_modem_ops.h" +#include "t7xx_netdev.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_port.h" +#include "t7xx_port_proxy.h" +#include "t7xx_reg.h" +#include "t7xx_state_monitor.h" + +#define RT_ID_MD_PORT_ENUM 0 +/* Modem feature query identification code - "ICCC" */ +#define MD_FEATURE_QUERY_ID 0x49434343 + +#define FEATURE_VER GENMASK(7, 4) +#define FEATURE_MSK GENMASK(3, 0) + +#define RGU_RESET_DELAY_MS 10 +#define PORT_RESET_DELAY_MS 2000 +#define EX_HS_TIMEOUT_MS 5000 +#define EX_HS_POLL_DELAY_MS 10 + +enum mtk_feature_support_type { + MTK_FEATURE_DOES_NOT_EXIST, + MTK_FEATURE_NOT_SUPPORTED, + MTK_FEATURE_MUST_BE_SUPPORTED, +}; + +static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev) +{ + return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK; +} + +/** + * t7xx_pci_mhccif_isr() - Process MHCCIF interrupts. + * @t7xx_dev: MTK device. + * + * Check the interrupt status and queue commands accordingly. + * + * Returns: + ** 0 - Success. + ** -EINVAL - Failure to get FSM control. + */ +int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_modem *md = t7xx_dev->md; + struct t7xx_fsm_ctl *ctl; + unsigned int int_sta; + int ret = 0; + u32 mask; + + ctl = md->fsm_ctl; + if (!ctl) { + dev_err_ratelimited(&t7xx_dev->pdev->dev, + "MHCCIF interrupt received before initializing MD monitor\n"); + return -EINVAL; + } + + spin_lock_bh(&md->exp_lock); + int_sta = t7xx_get_interrupt_status(t7xx_dev); + md->exp_id |= int_sta; + if (md->exp_id & D2H_INT_EXCEPTION_INIT) { + if (ctl->md_state == MD_STATE_INVALID || + ctl->md_state == MD_STATE_WAITING_FOR_HS1 || + ctl->md_state == MD_STATE_WAITING_FOR_HS2 || + ctl->md_state == MD_STATE_READY) { + md->exp_id &= ~D2H_INT_EXCEPTION_INIT; + ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX); + } + } else if (md->exp_id & D2H_INT_PORT_ENUM) { + md->exp_id &= ~D2H_INT_PORT_ENUM; + + if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START || + ctl->curr_state == FSM_STATE_STOPPED) + ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM); + } else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) { + mask = t7xx_mhccif_mask_get(t7xx_dev); + if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) { + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; + queue_work(md->handshake_wq, &md->handshake_work); + } + } + spin_unlock_bh(&md->exp_lock); + + return ret; +} + +static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr; + void __iomem *reset_pcie_reg; + u32 val; + + reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA - + pbase_addr->pcie_dev_reg_trsl_addr; + val = ioread32(reset_pcie_reg); + iowrite32(val, reset_pcie_reg); +} + +void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev) +{ + /* Clear L2 */ + t7xx_clr_device_irq_via_pcie(t7xx_dev); + /* Clear L1 */ + t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT); +} + +static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name) +{ +#ifdef CONFIG_ACPI + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct device *dev = &t7xx_dev->pdev->dev; + acpi_status acpi_ret; + acpi_handle handle; + + handle = ACPI_HANDLE(dev); + if (!handle) { + dev_err(dev, "ACPI handle not found\n"); + return -EFAULT; + } + + if (!acpi_has_method(handle, fn_name)) { + dev_err(dev, "%s method not found\n", fn_name); + return -EFAULT; + } + + acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer); + if (ACPI_FAILURE(acpi_ret)) { + dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret)); + return -EFAULT; + } + +#endif + return 0; +} + +int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev) +{ + return t7xx_acpi_reset(t7xx_dev, "_RST"); +} + +static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev) +{ + u32 val; + + val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); + if (val & MISC_RESET_TYPE_PLDR) + t7xx_acpi_reset(t7xx_dev, "MRST._RST"); + else if (val & MISC_RESET_TYPE_FLDR) + t7xx_acpi_fldr_func(t7xx_dev); +} + +static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data) +{ + struct t7xx_pci_dev *t7xx_dev = data; + + msleep(RGU_RESET_DELAY_MS); + t7xx_reset_device_via_pmic(t7xx_dev); + return IRQ_HANDLED; +} + +static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data) +{ + struct t7xx_pci_dev *t7xx_dev = data; + struct t7xx_modem *modem; + + t7xx_clear_rgu_irq(t7xx_dev); + if (!t7xx_dev->rgu_pci_irq_en) + return IRQ_HANDLED; + + modem = t7xx_dev->md; + modem->rgu_irq_asserted = true; + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); + return IRQ_WAKE_THREAD; +} + +static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev) +{ + /* Registers RGU callback ISR with PCIe driver */ + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); + t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT); + + t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler; + t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread; + t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev; + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); +} + +/** + * t7xx_cldma_exception() - CLDMA exception handler. + * @md_ctrl: modem control struct. + * @stage: exception stage. + * + * Part of the modem exception recovery. + * Stages are one after the other as describe below: + * HIF_EX_INIT: Disable and clear TXQ. + * HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX. + * HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart. + */ + +/* Modem Exception Handshake Flow + * + * Modem HW Exception interrupt received + * (MD_IRQ_CCIF_EX) + * | + * +---------v--------+ + * | HIF_EX_INIT | : Disable and clear TXQ + * +------------------+ + * | + * +---------v--------+ + * | HIF_EX_INIT_DONE | : Wait for the init to be done + * +------------------+ + * | + * +---------v--------+ + * |HIF_EX_CLEARQ_DONE| : Disable and clear RXQ + * +------------------+ : Flush TX/RX workqueues + * | + * +---------v--------+ + * |HIF_EX_ALLQ_RESET | : Restart HW and CLDMA + * +------------------+ + */ +static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage) +{ + switch (stage) { + case HIF_EX_INIT: + t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX); + t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX); + break; + + case HIF_EX_CLEARQ_DONE: + /* We do not want to get CLDMA IRQ when MD is + * resetting CLDMA after it got clearq_ack. + */ + t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX); + t7xx_cldma_stop(md_ctrl); + + if (md_ctrl->hif_id == CLDMA_ID_MD) + t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base); + + t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX); + break; + + case HIF_EX_ALLQ_RESET: + t7xx_cldma_hw_init(&md_ctrl->hw_info); + t7xx_cldma_start(md_ctrl); + break; + + default: + break; + } +} + +static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage) +{ + struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev; + + if (stage == HIF_EX_CLEARQ_DONE) { + /* Give DHL time to flush data */ + msleep(PORT_RESET_DELAY_MS); + t7xx_port_proxy_reset(md->port_prox); + } + + t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage); + + if (stage == HIF_EX_INIT) + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK); + else if (stage == HIF_EX_CLEARQ_DONE) + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK); +} + +static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id) +{ + unsigned int waited_time_ms = 0; + + do { + if (md->exp_id & event_id) + return 0; + + waited_time_ms += EX_HS_POLL_DELAY_MS; + msleep(EX_HS_POLL_DELAY_MS); + } while (waited_time_ms < EX_HS_TIMEOUT_MS); + + return -EFAULT; +} + +static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev) +{ + /* Register the MHCCIF ISR for MD exception, port enum and + * async handshake notifications. + */ + t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK); + t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM); + + /* Register RGU IRQ handler for sAP exception notification */ + t7xx_dev->rgu_pci_irq_en = true; + t7xx_pcie_register_rgu_isr(t7xx_dev); +} + +struct feature_query { + __le32 head_pattern; + u8 feature_set[FEATURE_COUNT]; + __le32 tail_pattern; +}; + +static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core) +{ + struct feature_query *ft_query; + struct sk_buff *skb; + + skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query)); + if (!skb) + return; + + ft_query = skb_put(skb, sizeof(*ft_query)); + ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID); + memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT); + ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID); + + /* Send HS1 message to device */ + t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0); +} + +static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev, + void *data) +{ + struct feature_query *md_feature = data; + struct mtk_runtime_feature *rt_feature; + unsigned int i, rt_data_len = 0; + struct sk_buff *skb; + + /* Parse MD runtime data query */ + if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID || + le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) { + dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n", + le32_to_cpu(md_feature->head_pattern), + le32_to_cpu(md_feature->tail_pattern)); + return -EINVAL; + } + + for (i = 0; i < FEATURE_COUNT; i++) { + if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) != + MTK_FEATURE_MUST_BE_SUPPORTED) + rt_data_len += sizeof(*rt_feature); + } + + skb = t7xx_ctrl_alloc_skb(rt_data_len); + if (!skb) + return -ENOMEM; + + rt_feature = skb_put(skb, rt_data_len); + memset(rt_feature, 0, rt_data_len); + + /* Fill runtime feature */ + for (i = 0; i < FEATURE_COUNT; i++) { + u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]); + + if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED) + continue; + + rt_feature->feature_id = i; + if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST) + rt_feature->support_info = md_feature->feature_set[i]; + + rt_feature++; + } + + /* Send HS3 message to device */ + t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0); + return 0; +} + +static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core, + struct device *dev, void *data, int data_length) +{ + enum mtk_feature_support_type ft_spt_st, ft_spt_cfg; + struct mtk_runtime_feature *rt_feature; + int i, offset; + + offset = sizeof(struct feature_query); + for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) { + rt_feature = data + offset; + offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len); + + ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]); + if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED) + continue; + + ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info); + if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED) + return -EINVAL; + + if (i == RT_ID_MD_PORT_ENUM) + t7xx_port_enum_msg_handler(ctl->md, rt_feature->data); + } + + return 0; +} + +static int t7xx_core_reset(struct t7xx_modem *md) +{ + struct device *dev = &md->t7xx_dev->pdev->dev; + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + + md->core_md.ready = false; + + if (!ctl) { + dev_err(dev, "FSM is not initialized\n"); + return -EINVAL; + } + + if (md->core_md.handshake_ongoing) { + int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0); + + if (ret) + return ret; + } + + md->core_md.handshake_ongoing = false; + return 0; +} + +static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl, + enum t7xx_fsm_event_state event_id, + enum t7xx_fsm_event_state err_detect) +{ + struct t7xx_fsm_event *event = NULL, *event_next; + struct t7xx_sys_info *core_info = &md->core_md; + struct device *dev = &md->t7xx_dev->pdev->dev; + unsigned long flags; + int ret; + + t7xx_prepare_host_rt_data_query(core_info); + + while (!kthread_should_stop()) { + bool event_received = false; + + spin_lock_irqsave(&ctl->event_lock, flags); + list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) { + if (event->event_id == err_detect) { + list_del(&event->entry); + spin_unlock_irqrestore(&ctl->event_lock, flags); + dev_err(dev, "Core handshake error event received\n"); + goto err_free_event; + } else if (event->event_id == event_id) { + list_del(&event->entry); + event_received = true; + break; + } + } + spin_unlock_irqrestore(&ctl->event_lock, flags); + + if (event_received) + break; + + wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) || + kthread_should_stop()); + if (kthread_should_stop()) + goto err_free_event; + } + + if (!event || ctl->exp_flg) + goto err_free_event; + + ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length); + if (ret) { + dev_err(dev, "Host failure parsing runtime data: %d\n", ret); + goto err_free_event; + } + + if (ctl->exp_flg) + goto err_free_event; + + ret = t7xx_prepare_device_rt_data(core_info, dev, event->data); + if (ret) { + dev_err(dev, "Device failure parsing runtime data: %d", ret); + goto err_free_event; + } + + core_info->ready = true; + core_info->handshake_ongoing = false; + wake_up(&ctl->async_hk_wq); +err_free_event: + kfree(event); +} + +static void t7xx_md_hk_wq(struct work_struct *work) +{ + struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work); + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + + /* Clear the HS2 EXIT event appended in core_reset() */ + t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT); + t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]); + t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]); + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2); + md->core_md.handshake_ongoing = true; + t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT); +} + +void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id) +{ + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + void __iomem *mhccif_base; + unsigned int int_sta; + unsigned long flags; + + switch (evt_id) { + case FSM_PRE_START: + t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM); + break; + + case FSM_START: + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM); + + spin_lock_irqsave(&md->exp_lock, flags); + int_sta = t7xx_get_interrupt_status(md->t7xx_dev); + md->exp_id |= int_sta; + if (md->exp_id & D2H_INT_EXCEPTION_INIT) { + ctl->exp_flg = true; + md->exp_id &= ~D2H_INT_EXCEPTION_INIT; + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; + } else if (ctl->exp_flg) { + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; + } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) { + queue_work(md->handshake_wq, &md->handshake_work); + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; + mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base; + iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); + } else { + t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); + } + spin_unlock_irqrestore(&md->exp_lock, flags); + + t7xx_mhccif_mask_clr(md->t7xx_dev, + D2H_INT_EXCEPTION_INIT | + D2H_INT_EXCEPTION_INIT_DONE | + D2H_INT_EXCEPTION_CLEARQ_DONE | + D2H_INT_EXCEPTION_ALLQ_RESET); + break; + + case FSM_READY: + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); + break; + + default: + break; + } +} + +void t7xx_md_exception_handshake(struct t7xx_modem *md) +{ + struct device *dev = &md->t7xx_dev->pdev->dev; + int ret; + + t7xx_md_exception(md, HIF_EX_INIT); + ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE); + if (ret) + dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE); + + t7xx_md_exception(md, HIF_EX_INIT_DONE); + ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE); + if (ret) + dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE); + + t7xx_md_exception(md, HIF_EX_CLEARQ_DONE); + ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET); + if (ret) + dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET); + + t7xx_md_exception(md, HIF_EX_ALLQ_RESET); +} + +static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev) +{ + struct device *dev = &t7xx_dev->pdev->dev; + struct t7xx_modem *md; + + md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL); + if (!md) + return NULL; + + md->t7xx_dev = t7xx_dev; + t7xx_dev->md = md; + spin_lock_init(&md->exp_lock); + md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, + 0, "md_hk_wq"); + if (!md->handshake_wq) + return NULL; + + INIT_WORK(&md->handshake_work, t7xx_md_hk_wq); + md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK; + md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |= + FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED); + return md; +} + +int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_modem *md = t7xx_dev->md; + + md->md_init_finish = false; + md->exp_id = 0; + t7xx_fsm_reset(md); + t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]); + t7xx_port_proxy_reset(md->port_prox); + md->md_init_finish = true; + return t7xx_core_reset(md); +} + +/** + * t7xx_md_init() - Initialize modem. + * @t7xx_dev: MTK device. + * + * Allocate and initialize MD control block, and initialize data path. + * Register MHCCIF ISR and RGU ISR, and start the state machine. + * + * Return: + ** 0 - Success. + ** -ENOMEM - Allocation failure. + */ +int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_modem *md; + int ret; + + md = t7xx_md_alloc(t7xx_dev); + if (!md) + return -ENOMEM; + + ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev); + if (ret) + goto err_destroy_hswq; + + ret = t7xx_fsm_init(md); + if (ret) + goto err_destroy_hswq; + + ret = t7xx_ccmni_init(t7xx_dev); + if (ret) + goto err_uninit_fsm; + + ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]); + if (ret) + goto err_uninit_ccmni; + + ret = t7xx_port_proxy_init(md); + if (ret) + goto err_uninit_md_cldma; + + ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0); + if (ret) /* fsm_uninit flushes cmd queue */ + goto err_uninit_proxy; + + t7xx_md_sys_sw_init(t7xx_dev); + md->md_init_finish = true; + return 0; + +err_uninit_proxy: + t7xx_port_proxy_uninit(md->port_prox); + +err_uninit_md_cldma: + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); + +err_uninit_ccmni: + t7xx_ccmni_exit(t7xx_dev); + +err_uninit_fsm: + t7xx_fsm_uninit(md); + +err_destroy_hswq: + destroy_workqueue(md->handshake_wq); + dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n"); + return ret; +} + +void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_modem *md = t7xx_dev->md; + + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); + + if (!md->md_init_finish) + return; + + t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); + t7xx_port_proxy_uninit(md->port_prox); + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); + t7xx_ccmni_exit(t7xx_dev); + t7xx_fsm_uninit(md); + destroy_workqueue(md->handshake_wq); +} diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h new file mode 100644 index 0000000000..7469ed636a --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Sreehari Kancharla + */ + +#ifndef __T7XX_MODEM_OPS_H__ +#define __T7XX_MODEM_OPS_H__ + +#include +#include +#include + +#include "t7xx_hif_cldma.h" +#include "t7xx_pci.h" + +#define FEATURE_COUNT 64 + +/** + * enum hif_ex_stage - HIF exception handshake stages with the HW. + * @HIF_EX_INIT: Disable and clear TXQ. + * @HIF_EX_INIT_DONE: Polling for initialization to be done. + * @HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX. + * @HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart. + */ +enum hif_ex_stage { + HIF_EX_INIT, + HIF_EX_INIT_DONE, + HIF_EX_CLEARQ_DONE, + HIF_EX_ALLQ_RESET, +}; + +struct mtk_runtime_feature { + u8 feature_id; + u8 support_info; + u8 reserved[2]; + __le32 data_len; + __le32 data[]; +}; + +enum md_event_id { + FSM_PRE_START, + FSM_START, + FSM_READY, +}; + +struct t7xx_sys_info { + bool ready; + bool handshake_ongoing; + u8 feature_set[FEATURE_COUNT]; + struct t7xx_port *ctl_port; +}; + +struct t7xx_modem { + struct cldma_ctrl *md_ctrl[CLDMA_NUM]; + struct t7xx_pci_dev *t7xx_dev; + struct t7xx_sys_info core_md; + bool md_init_finish; + bool rgu_irq_asserted; + struct workqueue_struct *handshake_wq; + struct work_struct handshake_work; + struct t7xx_fsm_ctl *fsm_ctl; + struct port_proxy *port_prox; + unsigned int exp_id; + spinlock_t exp_lock; /* Protects exception events */ +}; + +void t7xx_md_exception_handshake(struct t7xx_modem *md); +void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id); +int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev); +int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev); +void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev); +void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev); +int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev); +int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev); + +#endif /* __T7XX_MODEM_OPS_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c new file mode 100644 index 0000000000..c6b6547f2c --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_netdev.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Chandrashekar Devegowda + * Haijun Liu + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Chiranjeevi Rapolu + * Eliot Lee + * Moises Veleta + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_hif_dpmaif_rx.h" +#include "t7xx_hif_dpmaif_tx.h" +#include "t7xx_netdev.h" +#include "t7xx_pci.h" +#include "t7xx_port_proxy.h" +#include "t7xx_state_monitor.h" + +#define IP_MUX_SESSION_DEFAULT 0 + +static int t7xx_ccmni_open(struct net_device *dev) +{ + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); + + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + atomic_inc(&ccmni->usage); + return 0; +} + +static int t7xx_ccmni_close(struct net_device *dev) +{ + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); + + atomic_dec(&ccmni->usage); + netif_carrier_off(dev); + netif_tx_disable(dev); + return 0; +} + +static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb, + unsigned int txq_number) +{ + struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb; + struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb); + + skb_cb->netif_idx = ccmni->index; + + if (t7xx_dpmaif_tx_send_skb(ctlb->hif_ctrl, txq_number, skb)) + return NETDEV_TX_BUSY; + + return 0; +} + +static int t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); + int skb_len = skb->len; + + /* If MTU is changed or there is no headroom, drop the packet */ + if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) { + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE)) + return NETDEV_TX_BUSY; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb_len; + + return NETDEV_TX_OK; +} + +static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue) +{ + struct t7xx_ccmni *ccmni = netdev_priv(dev); + + dev->stats.tx_errors++; + + if (atomic_read(&ccmni->usage) > 0) + netif_tx_wake_all_queues(dev); +} + +static const struct net_device_ops ccmni_netdev_ops = { + .ndo_open = t7xx_ccmni_open, + .ndo_stop = t7xx_ccmni_close, + .ndo_start_xmit = t7xx_ccmni_start_xmit, + .ndo_tx_timeout = t7xx_ccmni_tx_timeout, +}; + +static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb) +{ + struct t7xx_ccmni *ccmni; + int i; + + for (i = 0; i < ctlb->nic_dev_num; i++) { + ccmni = ctlb->ccmni_inst[i]; + if (!ccmni) + continue; + + if (atomic_read(&ccmni->usage) > 0) { + netif_tx_start_all_queues(ccmni->dev); + netif_carrier_on(ccmni->dev); + } + } +} + +static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb) +{ + struct t7xx_ccmni *ccmni; + int i; + + for (i = 0; i < ctlb->nic_dev_num; i++) { + ccmni = ctlb->ccmni_inst[i]; + if (!ccmni) + continue; + + if (atomic_read(&ccmni->usage) > 0) + netif_tx_disable(ccmni->dev); + } +} + +static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb) +{ + struct t7xx_ccmni *ccmni; + int i; + + for (i = 0; i < ctlb->nic_dev_num; i++) { + ccmni = ctlb->ccmni_inst[i]; + if (!ccmni) + continue; + + if (atomic_read(&ccmni->usage) > 0) + netif_carrier_off(ccmni->dev); + } +} + +static void t7xx_ccmni_wwan_setup(struct net_device *dev) +{ + dev->hard_header_len += sizeof(struct ccci_header); + + dev->mtu = ETH_DATA_LEN; + dev->max_mtu = CCMNI_MTU_MAX; + BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE); + + dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; + dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO; + + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + + dev->features = NETIF_F_VLAN_CHALLENGED; + + dev->features |= NETIF_F_SG; + dev->hw_features |= NETIF_F_SG; + + dev->features |= NETIF_F_HW_CSUM; + dev->hw_features |= NETIF_F_HW_CSUM; + + dev->features |= NETIF_F_RXCSUM; + dev->hw_features |= NETIF_F_RXCSUM; + + dev->needs_free_netdev = true; + + dev->type = ARPHRD_NONE; + + dev->netdev_ops = &ccmni_netdev_ops; +} + +static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id, + struct netlink_ext_ack *extack) +{ + struct t7xx_ccmni_ctrl *ctlb = ctxt; + struct t7xx_ccmni *ccmni; + int ret; + + if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst)) + return -EINVAL; + + ccmni = wwan_netdev_drvpriv(dev); + ccmni->index = if_id; + ccmni->ctlb = ctlb; + ccmni->dev = dev; + atomic_set(&ccmni->usage, 0); + ctlb->ccmni_inst[if_id] = ccmni; + + ret = register_netdevice(dev); + if (ret) + return ret; + + netif_device_attach(dev); + return 0; +} + +static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head) +{ + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); + struct t7xx_ccmni_ctrl *ctlb = ctxt; + u8 if_id = ccmni->index; + + if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst)) + return; + + if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni)) + return; + + unregister_netdevice(dev); +} + +static const struct wwan_ops ccmni_wwan_ops = { + .priv_size = sizeof(struct t7xx_ccmni), + .setup = t7xx_ccmni_wwan_setup, + .newlink = t7xx_ccmni_wwan_newlink, + .dellink = t7xx_ccmni_wwan_dellink, +}; + +static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb) +{ + struct device *dev = ctlb->hif_ctrl->dev; + int ret; + + if (ctlb->wwan_is_registered) + return 0; + + /* WWAN core will create a netdev for the default IP MUX channel */ + ret = wwan_register_ops(dev, &ccmni_wwan_ops, ctlb, IP_MUX_SESSION_DEFAULT); + if (ret < 0) { + dev_err(dev, "Unable to register WWAN ops, %d\n", ret); + return ret; + } + + ctlb->wwan_is_registered = true; + return 0; +} + +static int t7xx_ccmni_md_state_callback(enum md_state state, void *para) +{ + struct t7xx_ccmni_ctrl *ctlb = para; + struct device *dev; + int ret = 0; + + dev = ctlb->hif_ctrl->dev; + ctlb->md_sta = state; + + switch (state) { + case MD_STATE_READY: + ret = t7xx_ccmni_register_wwan(ctlb); + if (!ret) + t7xx_ccmni_start(ctlb); + break; + + case MD_STATE_EXCEPTION: + case MD_STATE_STOPPED: + t7xx_ccmni_pre_stop(ctlb); + + ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state); + if (ret < 0) + dev_err(dev, "DPMAIF md state callback err, state=%d\n", state); + + t7xx_ccmni_post_stop(ctlb); + break; + + case MD_STATE_WAITING_FOR_HS1: + case MD_STATE_WAITING_TO_STOP: + ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state); + if (ret < 0) + dev_err(dev, "DPMAIF md state callback err, state=%d\n", state); + + break; + + default: + break; + } + + return ret; +} + +static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; + struct t7xx_fsm_notifier *md_status_notifier; + + md_status_notifier = &ctlb->md_status_notify; + INIT_LIST_HEAD(&md_status_notifier->entry); + md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback; + md_status_notifier->data = ctlb; + + t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier); +} + +static void t7xx_ccmni_recv_skb(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb) +{ + struct t7xx_skb_cb *skb_cb; + struct net_device *net_dev; + struct t7xx_ccmni *ccmni; + int pkt_type, skb_len; + u8 netif_id; + + skb_cb = T7XX_SKB_CB(skb); + netif_id = skb_cb->netif_idx; + ccmni = t7xx_dev->ccmni_ctlb->ccmni_inst[netif_id]; + if (!ccmni) { + dev_kfree_skb(skb); + return; + } + + net_dev = ccmni->dev; + skb->dev = net_dev; + + pkt_type = skb_cb->rx_pkt_type; + if (pkt_type == PKT_TYPE_IP6) + skb->protocol = htons(ETH_P_IPV6); + else + skb->protocol = htons(ETH_P_IP); + + skb_len = skb->len; + netif_rx(skb); + net_dev->stats.rx_packets++; + net_dev->stats.rx_bytes += skb_len; +} + +static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) +{ + struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; + struct netdev_queue *net_queue; + + if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) { + net_queue = netdev_get_tx_queue(ccmni->dev, qno); + if (netif_tx_queue_stopped(net_queue)) + netif_tx_wake_queue(net_queue); + } +} + +static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) +{ + struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; + struct netdev_queue *net_queue; + + if (atomic_read(&ccmni->usage) > 0) { + netdev_err(ccmni->dev, "TX queue %d is full\n", qno); + net_queue = netdev_get_tx_queue(ccmni->dev, qno); + netif_tx_stop_queue(net_queue); + } +} + +static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev, + enum dpmaif_txq_state state, int qno) +{ + struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; + + if (ctlb->md_sta != MD_STATE_READY) + return; + + if (!ctlb->ccmni_inst[0]) { + dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n"); + return; + } + + if (state == DMPAIF_TXQ_STATE_IRQ) + t7xx_ccmni_queue_tx_irq_notify(ctlb, qno); + else if (state == DMPAIF_TXQ_STATE_FULL) + t7xx_ccmni_queue_tx_full_notify(ctlb, qno); +} + +int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev) +{ + struct device *dev = &t7xx_dev->pdev->dev; + struct t7xx_ccmni_ctrl *ctlb; + + ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL); + if (!ctlb) + return -ENOMEM; + + t7xx_dev->ccmni_ctlb = ctlb; + ctlb->t7xx_dev = t7xx_dev; + ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify; + ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb; + ctlb->nic_dev_num = NIC_DEV_DEFAULT; + + ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, &ctlb->callbacks); + if (!ctlb->hif_ctrl) + return -ENOMEM; + + init_md_status_notifier(t7xx_dev); + return 0; +} + +void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; + + t7xx_fsm_notifier_unregister(t7xx_dev->md, &ctlb->md_status_notify); + + if (ctlb->wwan_is_registered) { + wwan_unregister_ops(&t7xx_dev->pdev->dev); + ctlb->wwan_is_registered = false; + } + + t7xx_dpmaif_hif_exit(ctlb->hif_ctrl); +} diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.h b/drivers/net/wwan/t7xx/t7xx_netdev.h new file mode 100644 index 0000000000..f5ad49ca12 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_netdev.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Ricardo Martinez + */ + +#ifndef __T7XX_NETDEV_H__ +#define __T7XX_NETDEV_H__ + +#include +#include +#include + +#include "t7xx_hif_dpmaif.h" +#include "t7xx_pci.h" +#include "t7xx_state_monitor.h" + +#define RXQ_NUM DPMAIF_RXQ_NUM +#define NIC_DEV_MAX 21 +#define NIC_DEV_DEFAULT 2 + +#define CCMNI_NETDEV_WDT_TO (1 * HZ) +#define CCMNI_MTU_MAX 3000 + +struct t7xx_ccmni { + u8 index; + atomic_t usage; + struct net_device *dev; + struct t7xx_ccmni_ctrl *ctlb; +}; + +struct t7xx_ccmni_ctrl { + struct t7xx_pci_dev *t7xx_dev; + struct dpmaif_ctrl *hif_ctrl; + struct t7xx_ccmni *ccmni_inst[NIC_DEV_MAX]; + struct dpmaif_callbacks callbacks; + unsigned int nic_dev_num; + unsigned int md_sta; + struct t7xx_fsm_notifier md_status_notify; + bool wwan_is_registered; +}; + +int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev); +void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev); + +#endif /* __T7XX_NETDEV_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c new file mode 100644 index 0000000000..871f2a27a3 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_pci.c @@ -0,0 +1,761 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Ricardo Martinez + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Chiranjeevi Rapolu + * Eliot Lee + * Moises Veleta + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_mhccif.h" +#include "t7xx_modem_ops.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_reg.h" +#include "t7xx_state_monitor.h" + +#define T7XX_PCI_IREG_BASE 0 +#define T7XX_PCI_EREG_BASE 2 + +#define PM_SLEEP_DIS_TIMEOUT_MS 20 +#define PM_ACK_TIMEOUT_MS 1500 +#define PM_AUTOSUSPEND_MS 20000 +#define PM_RESOURCE_POLL_TIMEOUT_US 10000 +#define PM_RESOURCE_POLL_STEP_US 100 + +enum t7xx_pm_state { + MTK_PM_EXCEPTION, + MTK_PM_INIT, /* Device initialized, but handshake not completed */ + MTK_PM_SUSPENDED, + MTK_PM_RESUMED, +}; + +static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable) +{ + void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL; + u32 value; + + value = ioread32(ctrl_reg); + + if (enable) + value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS; + else + value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS; + + iowrite32(value, ctrl_reg); +} + +static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev) +{ + int ret, val; + + ret = read_poll_timeout(ioread32, val, + (val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK, + PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true, + IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS); + if (ret == -ETIMEDOUT) + dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n"); + + return ret; +} + +static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev) +{ + struct pci_dev *pdev = t7xx_dev->pdev; + + INIT_LIST_HEAD(&t7xx_dev->md_pm_entities); + mutex_init(&t7xx_dev->md_pm_entity_mtx); + spin_lock_init(&t7xx_dev->md_pm_lock); + init_completion(&t7xx_dev->sleep_lock_acquire); + init_completion(&t7xx_dev->pm_sr_ack); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); + + device_init_wakeup(&pdev->dev, true); + dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags | + DPM_FLAG_NO_DIRECT_COMPLETE); + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS); + pm_runtime_use_autosuspend(&pdev->dev); + + return t7xx_wait_pm_config(t7xx_dev); +} + +void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev) +{ + /* Enable the PCIe resource lock only after MD deep sleep is done */ + t7xx_mhccif_mask_clr(t7xx_dev, + D2H_INT_DS_LOCK_ACK | + D2H_INT_SUSPEND_ACK | + D2H_INT_RESUME_ACK | + D2H_INT_SUSPEND_ACK_AP | + D2H_INT_RESUME_ACK_AP); + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); + + pm_runtime_put_noidle(&t7xx_dev->pdev->dev); +} + +static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev) +{ + /* The device is kept in FSM re-init flow + * so just roll back PM setting to the init setting. + */ + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); + + pm_runtime_get_noresume(&t7xx_dev->pdev->dev); + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + return t7xx_wait_pm_config(t7xx_dev); +} + +void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev) +{ + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + t7xx_wait_pm_config(t7xx_dev); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION); +} + +int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity) +{ + struct md_pm_entity *entity; + + mutex_lock(&t7xx_dev->md_pm_entity_mtx); + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (entity->id == pm_entity->id) { + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); + return -EEXIST; + } + } + + list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities); + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); + return 0; +} + +int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity) +{ + struct md_pm_entity *entity, *tmp_entity; + + mutex_lock(&t7xx_dev->md_pm_entity_mtx); + list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) { + if (entity->id == pm_entity->id) { + list_del(&pm_entity->entity); + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); + return 0; + } + } + + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); + + return -ENXIO; +} + +int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev) +{ + struct device *dev = &t7xx_dev->pdev->dev; + int ret; + + ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire, + msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS)); + if (!ret) + dev_err_ratelimited(dev, "Resource wait complete timed out\n"); + + return ret; +} + +/** + * t7xx_pci_disable_sleep() - Disable deep sleep capability. + * @t7xx_dev: MTK device. + * + * Lock the deep sleep capability, note that the device can still go into deep sleep + * state while device is in D0 state, from the host's point-of-view. + * + * If device is in deep sleep state, wake up the device and disable deep sleep capability. + */ +void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev) +{ + unsigned long flags; + + spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); + t7xx_dev->sleep_disable_count++; + if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) + goto unlock_and_complete; + + if (t7xx_dev->sleep_disable_count == 1) { + u32 status; + + reinit_completion(&t7xx_dev->sleep_lock_acquire); + t7xx_dev_set_sleep_capability(t7xx_dev, false); + + status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS); + if (status & T7XX_PCIE_RESOURCE_STS_MSK) + goto unlock_and_complete; + + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK); + } + spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); + return; + +unlock_and_complete: + spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); + complete_all(&t7xx_dev->sleep_lock_acquire); +} + +/** + * t7xx_pci_enable_sleep() - Enable deep sleep capability. + * @t7xx_dev: MTK device. + * + * After enabling deep sleep, device can enter into deep sleep state. + */ +void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev) +{ + unsigned long flags; + + spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); + t7xx_dev->sleep_disable_count--; + if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) + goto unlock; + + if (t7xx_dev->sleep_disable_count == 0) + t7xx_dev_set_sleep_capability(t7xx_dev, true); + +unlock: + spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); +} + +static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request) +{ + unsigned long wait_ret; + + reinit_completion(&t7xx_dev->pm_sr_ack); + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request); + wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack, + msecs_to_jiffies(PM_ACK_TIMEOUT_MS)); + if (!wait_ret) + return -ETIMEDOUT; + + return 0; +} + +static int __t7xx_pci_pm_suspend(struct pci_dev *pdev) +{ + enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID; + struct t7xx_pci_dev *t7xx_dev; + struct md_pm_entity *entity; + int ret; + + t7xx_dev = pci_get_drvdata(pdev); + if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { + dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n"); + return -EFAULT; + } + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + ret = t7xx_wait_pm_config(t7xx_dev); + if (ret) { + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + return ret; + } + + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); + t7xx_dev->rgu_pci_irq_en = false; + + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (!entity->suspend) + continue; + + ret = entity->suspend(t7xx_dev, entity->entity_param); + if (ret) { + entity_id = entity->id; + dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id); + goto abort_suspend; + } + } + + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ); + if (ret) { + dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret); + goto abort_suspend; + } + + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP); + if (ret) { + t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ); + dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret); + goto abort_suspend; + } + + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (entity->suspend_late) + entity->suspend_late(t7xx_dev, entity->entity_param); + } + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + return 0; + +abort_suspend: + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (entity_id == entity->id) + break; + + if (entity->resume) + entity->resume(t7xx_dev, entity->entity_param); + } + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); + return ret; +} + +static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev) +{ + t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM); + + /* Disable interrupt first and let the IPs enable them */ + iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0); + + /* Device disables PCIe interrupts during resume and + * following function will re-enable PCIe interrupts. + */ + t7xx_pcie_mac_interrupts_en(t7xx_dev); + t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); +} + +static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3) +{ + int ret; + + ret = pcim_enable_device(t7xx_dev->pdev); + if (ret) + return ret; + + t7xx_pcie_mac_atr_init(t7xx_dev); + t7xx_pcie_interrupt_reinit(t7xx_dev); + + if (is_d3) { + t7xx_mhccif_init(t7xx_dev); + return t7xx_pci_pm_reinit(t7xx_dev); + } + + return 0; +} + +static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event) +{ + struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl; + struct device *dev = &t7xx_dev->pdev->dev; + int ret = -EINVAL; + + switch (event) { + case FSM_CMD_STOP: + ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); + break; + + case FSM_CMD_START: + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); + t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT); + t7xx_dev->rgu_pci_irq_en = true; + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); + ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0); + break; + + default: + break; + } + + if (ret) + dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret); + + return ret; +} + +static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check) +{ + struct t7xx_pci_dev *t7xx_dev; + struct md_pm_entity *entity; + u32 prev_state; + int ret = 0; + + t7xx_dev = pci_get_drvdata(pdev); + if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + return 0; + } + + t7xx_pcie_mac_interrupts_en(t7xx_dev); + prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE); + + if (state_check) { + /* For D3/L3 resume, the device could boot so quickly that the + * initial value of the dummy register might be overwritten. + * Identify new boots if the ATR source address register is not initialized. + */ + u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) + + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR); + if (prev_state == PM_RESUME_REG_STATE_L3 || + (prev_state == PM_RESUME_REG_STATE_INIT && + atr_reg_val == ATR_SRC_ADDR_INVALID)) { + ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP); + if (ret) + return ret; + + ret = t7xx_pcie_reinit(t7xx_dev, true); + if (ret) + return ret; + + t7xx_clear_rgu_irq(t7xx_dev); + return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START); + } + + if (prev_state == PM_RESUME_REG_STATE_EXP || + prev_state == PM_RESUME_REG_STATE_L2_EXP) { + if (prev_state == PM_RESUME_REG_STATE_L2_EXP) { + ret = t7xx_pcie_reinit(t7xx_dev, false); + if (ret) + return ret; + } + + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); + t7xx_dev->rgu_pci_irq_en = true; + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); + + t7xx_mhccif_mask_clr(t7xx_dev, + D2H_INT_EXCEPTION_INIT | + D2H_INT_EXCEPTION_INIT_DONE | + D2H_INT_EXCEPTION_CLEARQ_DONE | + D2H_INT_EXCEPTION_ALLQ_RESET | + D2H_INT_PORT_ENUM); + + return ret; + } + + if (prev_state == PM_RESUME_REG_STATE_L2) { + ret = t7xx_pcie_reinit(t7xx_dev, false); + if (ret) + return ret; + + } else if (prev_state != PM_RESUME_REG_STATE_L1 && + prev_state != PM_RESUME_REG_STATE_INIT) { + ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP); + if (ret) + return ret; + + t7xx_clear_rgu_irq(t7xx_dev); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); + return 0; + } + } + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + t7xx_wait_pm_config(t7xx_dev); + + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (entity->resume_early) + entity->resume_early(t7xx_dev, entity->entity_param); + } + + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ); + if (ret) + dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret); + + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP); + if (ret) + dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret); + + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (entity->resume) { + ret = entity->resume(t7xx_dev, entity->entity_param); + if (ret) + dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n", + entity->id, ret); + } + } + + t7xx_dev->rgu_pci_irq_en = true; + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + pm_runtime_mark_last_busy(&pdev->dev); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); + + return ret; +} + +static int t7xx_pci_pm_resume_noirq(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct t7xx_pci_dev *t7xx_dev; + + t7xx_dev = pci_get_drvdata(pdev); + t7xx_pcie_mac_interrupts_dis(t7xx_dev); + + return 0; +} + +static void t7xx_pci_shutdown(struct pci_dev *pdev) +{ + __t7xx_pci_pm_suspend(pdev); +} + +static int t7xx_pci_pm_suspend(struct device *dev) +{ + return __t7xx_pci_pm_suspend(to_pci_dev(dev)); +} + +static int t7xx_pci_pm_resume(struct device *dev) +{ + return __t7xx_pci_pm_resume(to_pci_dev(dev), true); +} + +static int t7xx_pci_pm_thaw(struct device *dev) +{ + return __t7xx_pci_pm_resume(to_pci_dev(dev), false); +} + +static int t7xx_pci_pm_runtime_suspend(struct device *dev) +{ + return __t7xx_pci_pm_suspend(to_pci_dev(dev)); +} + +static int t7xx_pci_pm_runtime_resume(struct device *dev) +{ + return __t7xx_pci_pm_resume(to_pci_dev(dev), true); +} + +static const struct dev_pm_ops t7xx_pci_pm_ops = { + .suspend = t7xx_pci_pm_suspend, + .resume = t7xx_pci_pm_resume, + .resume_noirq = t7xx_pci_pm_resume_noirq, + .freeze = t7xx_pci_pm_suspend, + .thaw = t7xx_pci_pm_thaw, + .poweroff = t7xx_pci_pm_suspend, + .restore = t7xx_pci_pm_resume, + .restore_noirq = t7xx_pci_pm_resume_noirq, + .runtime_suspend = t7xx_pci_pm_runtime_suspend, + .runtime_resume = t7xx_pci_pm_runtime_resume +}; + +static int t7xx_request_irq(struct pci_dev *pdev) +{ + struct t7xx_pci_dev *t7xx_dev; + int ret = 0, i; + + t7xx_dev = pci_get_drvdata(pdev); + + for (i = 0; i < EXT_INT_NUM; i++) { + const char *irq_descr; + int irq_vec; + + if (!t7xx_dev->intr_handler[i]) + continue; + + irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d", + dev_driver_string(&pdev->dev), i); + if (!irq_descr) { + ret = -ENOMEM; + break; + } + + irq_vec = pci_irq_vector(pdev, i); + ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i], + t7xx_dev->intr_thread[i], 0, irq_descr, + t7xx_dev->callback_param[i]); + if (ret) { + dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret); + break; + } + } + + if (ret) { + while (i--) { + if (!t7xx_dev->intr_handler[i]) + continue; + + free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); + } + } + + return ret; +} + +static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev) +{ + struct pci_dev *pdev = t7xx_dev->pdev; + int ret; + + /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */ + ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret); + return ret; + } + + ret = t7xx_request_irq(pdev); + if (ret) { + pci_free_irq_vectors(pdev); + return ret; + } + + t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM); + return 0; +} + +static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev) +{ + int ret, i; + + if (!t7xx_dev->pdev->msix_cap) + return -EINVAL; + + ret = t7xx_setup_msix(t7xx_dev); + if (ret) + return ret; + + /* IPs enable interrupts when ready */ + for (i = 0; i < EXT_INT_NUM; i++) + t7xx_pcie_mac_set_int(t7xx_dev, i); + + return 0; +} + +static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev) +{ + t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base + + INFRACFG_AO_DEV_CHIP - + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; +} + +static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct t7xx_pci_dev *t7xx_dev; + int ret; + + t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL); + if (!t7xx_dev) + return -ENOMEM; + + pci_set_drvdata(pdev, t7xx_dev); + t7xx_dev->pdev = pdev; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE), + pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "Could not request BARs: %d\n", ret); + return -ENOMEM; + } + + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret); + return ret; + } + + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret); + return ret; + } + + IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE]; + t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE]; + + ret = t7xx_pci_pm_init(t7xx_dev); + if (ret) + return ret; + + t7xx_pcie_mac_atr_init(t7xx_dev); + t7xx_pci_infracfg_ao_calc(t7xx_dev); + t7xx_mhccif_init(t7xx_dev); + + ret = t7xx_md_init(t7xx_dev); + if (ret) + return ret; + + t7xx_pcie_mac_interrupts_dis(t7xx_dev); + + ret = t7xx_interrupt_init(t7xx_dev); + if (ret) { + t7xx_md_exit(t7xx_dev); + return ret; + } + + t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); + t7xx_pcie_mac_interrupts_en(t7xx_dev); + + return 0; +} + +static void t7xx_pci_remove(struct pci_dev *pdev) +{ + struct t7xx_pci_dev *t7xx_dev; + int i; + + t7xx_dev = pci_get_drvdata(pdev); + t7xx_md_exit(t7xx_dev); + + for (i = 0; i < EXT_INT_NUM; i++) { + if (!t7xx_dev->intr_handler[i]) + continue; + + free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); + } + + pci_free_irq_vectors(t7xx_dev->pdev); +} + +static const struct pci_device_id t7xx_pci_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) }, + { } +}; +MODULE_DEVICE_TABLE(pci, t7xx_pci_table); + +static struct pci_driver t7xx_pci_driver = { + .name = "mtk_t7xx", + .id_table = t7xx_pci_table, + .probe = t7xx_pci_probe, + .remove = t7xx_pci_remove, + .driver.pm = &t7xx_pci_pm_ops, + .shutdown = t7xx_pci_shutdown, +}; + +module_pci_driver(t7xx_pci_driver); + +MODULE_AUTHOR("MediaTek Inc"); +MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h new file mode 100644 index 0000000000..50b37056ce --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_pci.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Ricardo Martinez + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Moises Veleta + */ + +#ifndef __T7XX_PCI_H__ +#define __T7XX_PCI_H__ + +#include +#include +#include +#include +#include +#include + +#include "t7xx_reg.h" + +/* struct t7xx_addr_base - holds base addresses + * @pcie_mac_ireg_base: PCIe MAC register base + * @pcie_ext_reg_base: used to calculate base addresses for CLDMA, DPMA and MHCCIF registers + * @pcie_dev_reg_trsl_addr: used to calculate the register base address + * @infracfg_ao_base: base address used in CLDMA reset operations + * @mhccif_rc_base: host view of MHCCIF rc base addr + */ +struct t7xx_addr_base { + void __iomem *pcie_mac_ireg_base; + void __iomem *pcie_ext_reg_base; + u32 pcie_dev_reg_trsl_addr; + void __iomem *infracfg_ao_base; + void __iomem *mhccif_rc_base; +}; + +typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param); + +/* struct t7xx_pci_dev - MTK device context structure + * @intr_handler: array of handler function for request_threaded_irq + * @intr_thread: array of thread_fn for request_threaded_irq + * @callback_param: array of cookie passed back to interrupt functions + * @pdev: PCI device + * @base_addr: memory base addresses of HW components + * @md: modem interface + * @ccmni_ctlb: context structure used to control the network data path + * @rgu_pci_irq_en: RGU callback ISR registered and active + * @md_pm_entities: list of pm entities + * @md_pm_entity_mtx: protects md_pm_entities list + * @pm_sr_ack: ack from the device when went to sleep or woke up + * @md_pm_state: state for resume/suspend + * @md_pm_lock: protects PCIe sleep lock + * @sleep_disable_count: PCIe L1.2 lock counter + * @sleep_lock_acquire: indicates that sleep has been disabled + */ +struct t7xx_pci_dev { + t7xx_intr_callback intr_handler[EXT_INT_NUM]; + t7xx_intr_callback intr_thread[EXT_INT_NUM]; + void *callback_param[EXT_INT_NUM]; + struct pci_dev *pdev; + struct t7xx_addr_base base_addr; + struct t7xx_modem *md; + struct t7xx_ccmni_ctrl *ccmni_ctlb; + bool rgu_pci_irq_en; + + /* Low Power Items */ + struct list_head md_pm_entities; + struct mutex md_pm_entity_mtx; /* Protects MD PM entities list */ + struct completion pm_sr_ack; + atomic_t md_pm_state; + spinlock_t md_pm_lock; /* Protects PCI resource lock */ + unsigned int sleep_disable_count; + struct completion sleep_lock_acquire; +}; + +enum t7xx_pm_id { + PM_ENTITY_ID_CTRL1, + PM_ENTITY_ID_CTRL2, + PM_ENTITY_ID_DATA, + PM_ENTITY_ID_INVALID +}; + +/* struct md_pm_entity - device power management entity + * @entity: list of PM Entities + * @suspend: callback invoked before sending D3 request to device + * @suspend_late: callback invoked after getting D3 ACK from device + * @resume_early: callback invoked before sending the resume request to device + * @resume: callback invoked after getting resume ACK from device + * @id: unique PM entity identifier + * @entity_param: parameter passed to the registered callbacks + * + * This structure is used to indicate PM operations required by internal + * HW modules such as CLDMA and DPMA. + */ +struct md_pm_entity { + struct list_head entity; + int (*suspend)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); + void (*suspend_late)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); + void (*resume_early)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); + int (*resume)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); + enum t7xx_pm_id id; + void *entity_param; +}; + +void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev); +void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev); +int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev); +int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); +int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); +void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev); +void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev); + +#endif /* __T7XX_PCI_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c new file mode 100644 index 0000000000..76da4c15e3 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Ricardo Martinez + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_reg.h" + +#define T7XX_PCIE_REG_BAR 2 +#define T7XX_PCIE_REG_PORT ATR_SRC_PCI_WIN0 +#define T7XX_PCIE_REG_TABLE_NUM 0 +#define T7XX_PCIE_REG_TRSL_PORT ATR_DST_AXIM_0 + +#define T7XX_PCIE_DEV_DMA_PORT_START ATR_SRC_AXIS_0 +#define T7XX_PCIE_DEV_DMA_PORT_END ATR_SRC_AXIS_2 +#define T7XX_PCIE_DEV_DMA_TABLE_NUM 0 +#define T7XX_PCIE_DEV_DMA_TRSL_ADDR 0 +#define T7XX_PCIE_DEV_DMA_SRC_ADDR 0 +#define T7XX_PCIE_DEV_DMA_TRANSPARENT 1 +#define T7XX_PCIE_DEV_DMA_SIZE 0 + +enum t7xx_atr_src_port { + ATR_SRC_PCI_WIN0, + ATR_SRC_PCI_WIN1, + ATR_SRC_AXIS_0, + ATR_SRC_AXIS_1, + ATR_SRC_AXIS_2, + ATR_SRC_AXIS_3, +}; + +enum t7xx_atr_dst_port { + ATR_DST_PCI_TRX, + ATR_DST_PCI_CONFIG, + ATR_DST_AXIM_0 = 4, + ATR_DST_AXIM_1, + ATR_DST_AXIM_2, + ATR_DST_AXIM_3, +}; + +struct t7xx_atr_config { + u64 src_addr; + u64 trsl_addr; + u64 size; + u32 port; + u32 table; + enum t7xx_atr_dst_port trsl_id; + u32 transparent; +}; + +static void t7xx_pcie_mac_atr_tables_dis(void __iomem *pbase, enum t7xx_atr_src_port port) +{ + void __iomem *reg; + int i, offset; + + for (i = 0; i < ATR_TABLE_NUM_PER_ATR; i++) { + offset = ATR_PORT_OFFSET * port + ATR_TABLE_OFFSET * i; + reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset; + iowrite64(0, reg); + } +} + +static int t7xx_pcie_mac_atr_cfg(struct t7xx_pci_dev *t7xx_dev, struct t7xx_atr_config *cfg) +{ + struct device *dev = &t7xx_dev->pdev->dev; + void __iomem *pbase = IREG_BASE(t7xx_dev); + int atr_size, pos, offset; + void __iomem *reg; + u64 value; + + if (cfg->transparent) { + /* No address conversion is performed */ + atr_size = ATR_TRANSPARENT_SIZE; + } else { + if (cfg->src_addr & (cfg->size - 1)) { + dev_err(dev, "Source address is not aligned to size\n"); + return -EINVAL; + } + + if (cfg->trsl_addr & (cfg->size - 1)) { + dev_err(dev, "Translation address %llx is not aligned to size %llx\n", + cfg->trsl_addr, cfg->size - 1); + return -EINVAL; + } + + pos = __ffs64(cfg->size); + + /* HW calculates the address translation space as 2^(atr_size + 1) */ + atr_size = pos - 1; + } + + offset = ATR_PORT_OFFSET * cfg->port + ATR_TABLE_OFFSET * cfg->table; + + reg = pbase + ATR_PCIE_WIN0_T0_TRSL_ADDR + offset; + value = cfg->trsl_addr & ATR_PCIE_WIN0_ADDR_ALGMT; + iowrite64(value, reg); + + reg = pbase + ATR_PCIE_WIN0_T0_TRSL_PARAM + offset; + iowrite32(cfg->trsl_id, reg); + + reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset; + value = (cfg->src_addr & ATR_PCIE_WIN0_ADDR_ALGMT) | (atr_size << 1) | BIT(0); + iowrite64(value, reg); + + /* Ensure ATR is set */ + ioread64(reg); + return 0; +} + +/** + * t7xx_pcie_mac_atr_init() - Initialize address translation. + * @t7xx_dev: MTK device. + * + * Setup ATR for ports & device. + */ +void t7xx_pcie_mac_atr_init(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_atr_config cfg; + u32 i; + + /* Disable for all ports */ + for (i = ATR_SRC_PCI_WIN0; i <= ATR_SRC_AXIS_3; i++) + t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), i); + + memset(&cfg, 0, sizeof(cfg)); + /* Config ATR for RC to access device's register */ + cfg.src_addr = pci_resource_start(t7xx_dev->pdev, T7XX_PCIE_REG_BAR); + cfg.size = T7XX_PCIE_REG_SIZE_CHIP; + cfg.trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP; + cfg.port = T7XX_PCIE_REG_PORT; + cfg.table = T7XX_PCIE_REG_TABLE_NUM; + cfg.trsl_id = T7XX_PCIE_REG_TRSL_PORT; + t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port); + t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg); + + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP; + + /* Config ATR for EP to access RC's memory */ + for (i = T7XX_PCIE_DEV_DMA_PORT_START; i <= T7XX_PCIE_DEV_DMA_PORT_END; i++) { + cfg.src_addr = T7XX_PCIE_DEV_DMA_SRC_ADDR; + cfg.size = T7XX_PCIE_DEV_DMA_SIZE; + cfg.trsl_addr = T7XX_PCIE_DEV_DMA_TRSL_ADDR; + cfg.port = i; + cfg.table = T7XX_PCIE_DEV_DMA_TABLE_NUM; + cfg.trsl_id = ATR_DST_PCI_TRX; + cfg.transparent = T7XX_PCIE_DEV_DMA_TRANSPARENT; + t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port); + t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg); + } +} + +/** + * t7xx_pcie_mac_enable_disable_int() - Enable/disable interrupts. + * @t7xx_dev: MTK device. + * @enable: Enable/disable. + * + * Enable or disable device interrupts. + */ +static void t7xx_pcie_mac_enable_disable_int(struct t7xx_pci_dev *t7xx_dev, bool enable) +{ + u32 value; + + value = ioread32(IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL); + + if (enable) + value &= ~ISTAT_HST_CTRL_DIS; + else + value |= ISTAT_HST_CTRL_DIS; + + iowrite32(value, IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL); +} + +void t7xx_pcie_mac_interrupts_en(struct t7xx_pci_dev *t7xx_dev) +{ + t7xx_pcie_mac_enable_disable_int(t7xx_dev, true); +} + +void t7xx_pcie_mac_interrupts_dis(struct t7xx_pci_dev *t7xx_dev) +{ + t7xx_pcie_mac_enable_disable_int(t7xx_dev, false); +} + +/** + * t7xx_pcie_mac_clear_set_int() - Clear/set interrupt by type. + * @t7xx_dev: MTK device. + * @int_type: Interrupt type. + * @clear: Clear/set. + * + * Clear or set device interrupt by type. + */ +static void t7xx_pcie_mac_clear_set_int(struct t7xx_pci_dev *t7xx_dev, + enum t7xx_int int_type, bool clear) +{ + void __iomem *reg; + u32 val; + + if (clear) + reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0; + else + reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_SET_GRP0_0; + + val = BIT(EXT_INT_START + int_type); + iowrite32(val, reg); +} + +void t7xx_pcie_mac_clear_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type) +{ + t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, true); +} + +void t7xx_pcie_mac_set_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type) +{ + t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, false); +} + +/** + * t7xx_pcie_mac_clear_int_status() - Clear interrupt status by type. + * @t7xx_dev: MTK device. + * @int_type: Interrupt type. + * + * Enable or disable device interrupts' status by type. + */ +void t7xx_pcie_mac_clear_int_status(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type) +{ + void __iomem *reg = IREG_BASE(t7xx_dev) + MSIX_ISTAT_HST_GRP0_0; + u32 val = BIT(EXT_INT_START + int_type); + + iowrite32(val, reg); +} + +/** + * t7xx_pcie_set_mac_msix_cfg() - Write MSIX control configuration. + * @t7xx_dev: MTK device. + * @irq_count: Number of MSIX IRQ vectors. + * + * Write IRQ count to device. + */ +void t7xx_pcie_set_mac_msix_cfg(struct t7xx_pci_dev *t7xx_dev, unsigned int irq_count) +{ + u32 val = ffs(irq_count) * 2 - 1; + + iowrite32(val, IREG_BASE(t7xx_dev) + T7XX_PCIE_CFG_MSIX); +} diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.h b/drivers/net/wwan/t7xx/t7xx_pcie_mac.h new file mode 100644 index 0000000000..50336fa7e8 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Sreehari Kancharla + * + * Contributors: + * Moises Veleta + * Ricardo Martinez + */ + +#ifndef __T7XX_PCIE_MAC_H__ +#define __T7XX_PCIE_MAC_H__ + +#include "t7xx_pci.h" +#include "t7xx_reg.h" + +#define IREG_BASE(t7xx_dev) ((t7xx_dev)->base_addr.pcie_mac_ireg_base) + +void t7xx_pcie_mac_interrupts_en(struct t7xx_pci_dev *t7xx_dev); +void t7xx_pcie_mac_interrupts_dis(struct t7xx_pci_dev *t7xx_dev); +void t7xx_pcie_mac_atr_init(struct t7xx_pci_dev *t7xx_dev); +void t7xx_pcie_mac_clear_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type); +void t7xx_pcie_mac_set_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type); +void t7xx_pcie_mac_clear_int_status(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type); +void t7xx_pcie_set_mac_msix_cfg(struct t7xx_pci_dev *t7xx_dev, unsigned int irq_count); + +#endif /* __T7XX_PCIE_MAC_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_port.h b/drivers/net/wwan/t7xx/t7xx_port.h new file mode 100644 index 0000000000..dc4133eb43 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_port.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Chandrashekar Devegowda + * Eliot Lee + */ + +#ifndef __T7XX_PORT_H__ +#define __T7XX_PORT_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_hif_cldma.h" +#include "t7xx_pci.h" + +#define PORT_CH_ID_MASK GENMASK(7, 0) + +/* Channel ID and Message ID definitions. + * The channel number consists of peer_id(15:12) , channel_id(11:0) + * peer_id: + * 0:reserved, 1: to sAP, 2: to MD + */ +enum port_ch { + /* to MD */ + PORT_CH_CONTROL_RX = 0x2000, + PORT_CH_CONTROL_TX = 0x2001, + PORT_CH_UART1_RX = 0x2006, /* META */ + PORT_CH_UART1_TX = 0x2008, + PORT_CH_UART2_RX = 0x200a, /* AT */ + PORT_CH_UART2_TX = 0x200c, + PORT_CH_MD_LOG_RX = 0x202a, /* MD logging */ + PORT_CH_MD_LOG_TX = 0x202b, + PORT_CH_LB_IT_RX = 0x203e, /* Loop back test */ + PORT_CH_LB_IT_TX = 0x203f, + PORT_CH_STATUS_RX = 0x2043, /* Status events */ + PORT_CH_MIPC_RX = 0x20ce, /* MIPC */ + PORT_CH_MIPC_TX = 0x20cf, + PORT_CH_MBIM_RX = 0x20d0, + PORT_CH_MBIM_TX = 0x20d1, + PORT_CH_DSS0_RX = 0x20d2, + PORT_CH_DSS0_TX = 0x20d3, + PORT_CH_DSS1_RX = 0x20d4, + PORT_CH_DSS1_TX = 0x20d5, + PORT_CH_DSS2_RX = 0x20d6, + PORT_CH_DSS2_TX = 0x20d7, + PORT_CH_DSS3_RX = 0x20d8, + PORT_CH_DSS3_TX = 0x20d9, + PORT_CH_DSS4_RX = 0x20da, + PORT_CH_DSS4_TX = 0x20db, + PORT_CH_DSS5_RX = 0x20dc, + PORT_CH_DSS5_TX = 0x20dd, + PORT_CH_DSS6_RX = 0x20de, + PORT_CH_DSS6_TX = 0x20df, + PORT_CH_DSS7_RX = 0x20e0, + PORT_CH_DSS7_TX = 0x20e1, +}; + +struct t7xx_port; +struct port_ops { + int (*init)(struct t7xx_port *port); + int (*recv_skb)(struct t7xx_port *port, struct sk_buff *skb); + void (*md_state_notify)(struct t7xx_port *port, unsigned int md_state); + void (*uninit)(struct t7xx_port *port); + int (*enable_chl)(struct t7xx_port *port); + int (*disable_chl)(struct t7xx_port *port); +}; + +struct t7xx_port_conf { + enum port_ch tx_ch; + enum port_ch rx_ch; + unsigned char txq_index; + unsigned char rxq_index; + unsigned char txq_exp_index; + unsigned char rxq_exp_index; + enum cldma_id path_id; + struct port_ops *ops; + char *name; + enum wwan_port_type port_type; +}; + +struct t7xx_port { + /* Members not initialized in definition */ + const struct t7xx_port_conf *port_conf; + struct wwan_port *wwan_port; + struct t7xx_pci_dev *t7xx_dev; + struct device *dev; + u16 seq_nums[2]; /* TX/RX sequence numbers */ + atomic_t usage_cnt; + struct list_head entry; + struct list_head queue_entry; + /* TX and RX flows are asymmetric since ports are multiplexed on + * queues. + * + * TX: data blocks are sent directly to a queue. Each port + * does not maintain a TX list; instead, they only provide + * a wait_queue_head for blocking writes. + * + * RX: Each port uses a RX list to hold packets, + * allowing the modem to dispatch RX packet as quickly as possible. + */ + struct sk_buff_head rx_skb_list; + spinlock_t port_update_lock; /* Protects port configuration */ + wait_queue_head_t rx_wq; + int rx_length_th; + bool chan_enable; + struct task_struct *thread; +}; + +struct sk_buff *t7xx_port_alloc_skb(int payload); +struct sk_buff *t7xx_ctrl_alloc_skb(int payload); +int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb); +int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header, + unsigned int ex_msg); +int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg, + unsigned int ex_msg); + +#endif /* __T7XX_PORT_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c new file mode 100644 index 0000000000..68430b130a --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Ricardo Martinez + * Moises Veleta + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_port.h" +#include "t7xx_port_proxy.h" +#include "t7xx_state_monitor.h" + +#define PORT_MSG_VERSION GENMASK(31, 16) +#define PORT_MSG_PRT_CNT GENMASK(15, 0) + +struct port_msg { + __le32 head_pattern; + __le32 info; + __le32 tail_pattern; + __le32 data[]; +}; + +static int port_ctl_send_msg_to_md(struct t7xx_port *port, unsigned int msg, unsigned int ex_msg) +{ + struct sk_buff *skb; + int ret; + + skb = t7xx_ctrl_alloc_skb(0); + if (!skb) + return -ENOMEM; + + ret = t7xx_port_send_ctl_skb(port, skb, msg, ex_msg); + if (ret) + dev_kfree_skb_any(skb); + + return ret; +} + +static int fsm_ee_message_handler(struct t7xx_port *port, struct t7xx_fsm_ctl *ctl, + struct sk_buff *skb) +{ + struct ctrl_msg_header *ctrl_msg_h = (struct ctrl_msg_header *)skb->data; + struct device *dev = &ctl->md->t7xx_dev->pdev->dev; + enum md_state md_state; + int ret = -EINVAL; + + md_state = t7xx_fsm_get_md_state(ctl); + if (md_state != MD_STATE_EXCEPTION) { + dev_err(dev, "Receive invalid MD_EX %x when MD state is %d\n", + ctrl_msg_h->ex_msg, md_state); + return -EINVAL; + } + + switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) { + case CTL_ID_MD_EX: + if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ID) { + dev_err(dev, "Receive invalid MD_EX %x\n", ctrl_msg_h->ex_msg); + break; + } + + ret = port_ctl_send_msg_to_md(port, CTL_ID_MD_EX, MD_EX_CHK_ID); + if (ret) { + dev_err(dev, "Failed to send exception message to modem\n"); + break; + } + + ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX, NULL, 0); + if (ret) + dev_err(dev, "Failed to append Modem Exception event"); + + break; + + case CTL_ID_MD_EX_ACK: + if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ACK_ID) { + dev_err(dev, "Receive invalid MD_EX_ACK %x\n", ctrl_msg_h->ex_msg); + break; + } + + ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_REC_OK, NULL, 0); + if (ret) + dev_err(dev, "Failed to append Modem Exception Received event"); + + break; + + case CTL_ID_MD_EX_PASS: + ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_PASS, NULL, 0); + if (ret) + dev_err(dev, "Failed to append Modem Exception Passed event"); + + break; + + case CTL_ID_DRV_VER_ERROR: + dev_err(dev, "AP/MD driver version mismatch\n"); + } + + return ret; +} + +/** + * t7xx_port_enum_msg_handler() - Parse the port enumeration message to create/remove nodes. + * @md: Modem context. + * @msg: Message. + * + * Used to control create/remove device node. + * + * Return: + * * 0 - Success. + * * -EFAULT - Message check failure. + */ +int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg) +{ + struct device *dev = &md->t7xx_dev->pdev->dev; + unsigned int version, port_count, i; + struct port_msg *port_msg = msg; + + version = FIELD_GET(PORT_MSG_VERSION, le32_to_cpu(port_msg->info)); + if (version != PORT_ENUM_VER || + le32_to_cpu(port_msg->head_pattern) != PORT_ENUM_HEAD_PATTERN || + le32_to_cpu(port_msg->tail_pattern) != PORT_ENUM_TAIL_PATTERN) { + dev_err(dev, "Invalid port control message %x:%x:%x\n", + version, le32_to_cpu(port_msg->head_pattern), + le32_to_cpu(port_msg->tail_pattern)); + return -EFAULT; + } + + port_count = FIELD_GET(PORT_MSG_PRT_CNT, le32_to_cpu(port_msg->info)); + for (i = 0; i < port_count; i++) { + u32 port_info = le32_to_cpu(port_msg->data[i]); + unsigned int ch_id; + bool en_flag; + + ch_id = FIELD_GET(PORT_INFO_CH_ID, port_info); + en_flag = port_info & PORT_INFO_ENFLG; + if (t7xx_port_proxy_chl_enable_disable(md->port_prox, ch_id, en_flag)) + dev_dbg(dev, "Port:%x not found\n", ch_id); + } + + return 0; +} + +static int control_msg_handler(struct t7xx_port *port, struct sk_buff *skb) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl; + struct ctrl_msg_header *ctrl_msg_h; + int ret = 0; + + ctrl_msg_h = (struct ctrl_msg_header *)skb->data; + switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) { + case CTL_ID_HS2_MSG: + skb_pull(skb, sizeof(*ctrl_msg_h)); + + if (port_conf->rx_ch == PORT_CH_CONTROL_RX) { + ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2, skb->data, + le32_to_cpu(ctrl_msg_h->data_length)); + if (ret) + dev_err(port->dev, "Failed to append Handshake 2 event"); + } + + dev_kfree_skb_any(skb); + break; + + case CTL_ID_MD_EX: + case CTL_ID_MD_EX_ACK: + case CTL_ID_MD_EX_PASS: + case CTL_ID_DRV_VER_ERROR: + ret = fsm_ee_message_handler(port, ctl, skb); + dev_kfree_skb_any(skb); + break; + + case CTL_ID_PORT_ENUM: + skb_pull(skb, sizeof(*ctrl_msg_h)); + ret = t7xx_port_enum_msg_handler(ctl->md, (struct port_msg *)skb->data); + if (!ret) + ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM, 0); + else + ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM, + PORT_ENUM_VER_MISMATCH); + + break; + + default: + ret = -EINVAL; + dev_err(port->dev, "Unknown control message ID to FSM %x\n", + le32_to_cpu(ctrl_msg_h->ctrl_msg_id)); + break; + } + + if (ret) + dev_err(port->dev, "%s control message handle error: %d\n", port_conf->name, ret); + + return ret; +} + +static int port_ctl_rx_thread(void *arg) +{ + while (!kthread_should_stop()) { + struct t7xx_port *port = arg; + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&port->rx_wq.lock, flags); + if (skb_queue_empty(&port->rx_skb_list) && + wait_event_interruptible_locked_irq(port->rx_wq, + !skb_queue_empty(&port->rx_skb_list) || + kthread_should_stop())) { + spin_unlock_irqrestore(&port->rx_wq.lock, flags); + continue; + } + if (kthread_should_stop()) { + spin_unlock_irqrestore(&port->rx_wq.lock, flags); + break; + } + skb = __skb_dequeue(&port->rx_skb_list); + spin_unlock_irqrestore(&port->rx_wq.lock, flags); + + control_msg_handler(port, skb); + } + + return 0; +} + +static int port_ctl_init(struct t7xx_port *port) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + + port->thread = kthread_run(port_ctl_rx_thread, port, "%s", port_conf->name); + if (IS_ERR(port->thread)) { + dev_err(port->dev, "Failed to start port control thread\n"); + return PTR_ERR(port->thread); + } + + port->rx_length_th = CTRL_QUEUE_MAXLEN; + return 0; +} + +static void port_ctl_uninit(struct t7xx_port *port) +{ + unsigned long flags; + struct sk_buff *skb; + + if (port->thread) + kthread_stop(port->thread); + + spin_lock_irqsave(&port->rx_wq.lock, flags); + port->rx_length_th = 0; + while ((skb = __skb_dequeue(&port->rx_skb_list)) != NULL) + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&port->rx_wq.lock, flags); +} + +struct port_ops ctl_port_ops = { + .init = port_ctl_init, + .recv_skb = t7xx_port_enqueue_skb, + .uninit = port_ctl_uninit, +}; diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c new file mode 100644 index 0000000000..d4de047ff0 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c @@ -0,0 +1,509 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Andy Shevchenko + * Chandrashekar Devegowda + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_hif_cldma.h" +#include "t7xx_modem_ops.h" +#include "t7xx_port.h" +#include "t7xx_port_proxy.h" +#include "t7xx_state_monitor.h" + +#define Q_IDX_CTRL 0 +#define Q_IDX_MBIM 2 +#define Q_IDX_AT_CMD 5 + +#define INVALID_SEQ_NUM GENMASK(15, 0) + +#define for_each_proxy_port(i, p, proxy) \ + for (i = 0, (p) = &(proxy)->ports[i]; \ + i < (proxy)->port_count; \ + i++, (p) = &(proxy)->ports[i]) + +static const struct t7xx_port_conf t7xx_md_port_conf[] = { + { + .tx_ch = PORT_CH_UART2_TX, + .rx_ch = PORT_CH_UART2_RX, + .txq_index = Q_IDX_AT_CMD, + .rxq_index = Q_IDX_AT_CMD, + .txq_exp_index = 0xff, + .rxq_exp_index = 0xff, + .path_id = CLDMA_ID_MD, + .ops = &wwan_sub_port_ops, + .name = "AT", + .port_type = WWAN_PORT_AT, + }, { + .tx_ch = PORT_CH_MBIM_TX, + .rx_ch = PORT_CH_MBIM_RX, + .txq_index = Q_IDX_MBIM, + .rxq_index = Q_IDX_MBIM, + .path_id = CLDMA_ID_MD, + .ops = &wwan_sub_port_ops, + .name = "MBIM", + .port_type = WWAN_PORT_MBIM, + }, { + .tx_ch = PORT_CH_CONTROL_TX, + .rx_ch = PORT_CH_CONTROL_RX, + .txq_index = Q_IDX_CTRL, + .rxq_index = Q_IDX_CTRL, + .path_id = CLDMA_ID_MD, + .ops = &ctl_port_ops, + .name = "t7xx_ctrl", + }, +}; + +static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch) +{ + const struct t7xx_port_conf *port_conf; + struct t7xx_port *port; + int i; + + for_each_proxy_port(i, port, port_prox) { + port_conf = port->port_conf; + if (port_conf->rx_ch == ch || port_conf->tx_ch == ch) + return port; + } + + return NULL; +} + +static u16 t7xx_port_next_rx_seq_num(struct t7xx_port *port, struct ccci_header *ccci_h) +{ + u32 status = le32_to_cpu(ccci_h->status); + u16 seq_num, next_seq_num; + bool assert_bit; + + seq_num = FIELD_GET(CCCI_H_SEQ_FLD, status); + next_seq_num = (seq_num + 1) & FIELD_MAX(CCCI_H_SEQ_FLD); + assert_bit = status & CCCI_H_AST_BIT; + if (!assert_bit || port->seq_nums[MTK_RX] == INVALID_SEQ_NUM) + return next_seq_num; + + if (seq_num != port->seq_nums[MTK_RX]) + dev_warn_ratelimited(port->dev, + "seq num out-of-order %u != %u (header %X, len %X)\n", + seq_num, port->seq_nums[MTK_RX], + le32_to_cpu(ccci_h->packet_header), + le32_to_cpu(ccci_h->packet_len)); + + return next_seq_num; +} + +void t7xx_port_proxy_reset(struct port_proxy *port_prox) +{ + struct t7xx_port *port; + int i; + + for_each_proxy_port(i, port, port_prox) { + port->seq_nums[MTK_RX] = INVALID_SEQ_NUM; + port->seq_nums[MTK_TX] = 0; + } +} + +static int t7xx_port_get_queue_no(struct t7xx_port *port) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl; + + return t7xx_fsm_get_md_state(ctl) == MD_STATE_EXCEPTION ? + port_conf->txq_exp_index : port_conf->txq_index; +} + +static void t7xx_port_struct_init(struct t7xx_port *port) +{ + INIT_LIST_HEAD(&port->entry); + INIT_LIST_HEAD(&port->queue_entry); + skb_queue_head_init(&port->rx_skb_list); + init_waitqueue_head(&port->rx_wq); + port->seq_nums[MTK_RX] = INVALID_SEQ_NUM; + port->seq_nums[MTK_TX] = 0; + atomic_set(&port->usage_cnt, 0); +} + +struct sk_buff *t7xx_port_alloc_skb(int payload) +{ + struct sk_buff *skb = __dev_alloc_skb(payload + sizeof(struct ccci_header), GFP_KERNEL); + + if (skb) + skb_reserve(skb, sizeof(struct ccci_header)); + + return skb; +} + +struct sk_buff *t7xx_ctrl_alloc_skb(int payload) +{ + struct sk_buff *skb = t7xx_port_alloc_skb(payload + sizeof(struct ctrl_msg_header)); + + if (skb) + skb_reserve(skb, sizeof(struct ctrl_msg_header)); + + return skb; +} + +/** + * t7xx_port_enqueue_skb() - Enqueue the received skb into the port's rx_skb_list. + * @port: port context. + * @skb: received skb. + * + * Return: + * * 0 - Success. + * * -ENOBUFS - Not enough buffer space. Caller will try again later, skb is not consumed. + */ +int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb) +{ + unsigned long flags; + + spin_lock_irqsave(&port->rx_wq.lock, flags); + if (port->rx_skb_list.qlen >= port->rx_length_th) { + spin_unlock_irqrestore(&port->rx_wq.lock, flags); + + return -ENOBUFS; + } + __skb_queue_tail(&port->rx_skb_list, skb); + spin_unlock_irqrestore(&port->rx_wq.lock, flags); + + wake_up_all(&port->rx_wq); + return 0; +} + +static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb) +{ + enum cldma_id path_id = port->port_conf->path_id; + struct cldma_ctrl *md_ctrl; + int ret, tx_qno; + + md_ctrl = port->t7xx_dev->md->md_ctrl[path_id]; + tx_qno = t7xx_port_get_queue_no(port); + ret = t7xx_cldma_send_skb(md_ctrl, tx_qno, skb); + if (ret) + dev_err(port->dev, "Failed to send skb: %d\n", ret); + + return ret; +} + +static int t7xx_port_send_ccci_skb(struct t7xx_port *port, struct sk_buff *skb, + unsigned int pkt_header, unsigned int ex_msg) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + struct ccci_header *ccci_h; + u32 status; + int ret; + + ccci_h = skb_push(skb, sizeof(*ccci_h)); + status = FIELD_PREP(CCCI_H_CHN_FLD, port_conf->tx_ch) | + FIELD_PREP(CCCI_H_SEQ_FLD, port->seq_nums[MTK_TX]) | CCCI_H_AST_BIT; + ccci_h->status = cpu_to_le32(status); + ccci_h->packet_header = cpu_to_le32(pkt_header); + ccci_h->packet_len = cpu_to_le32(skb->len); + ccci_h->ex_msg = cpu_to_le32(ex_msg); + + ret = t7xx_port_send_raw_skb(port, skb); + if (ret) + return ret; + + port->seq_nums[MTK_TX]++; + return 0; +} + +int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg, + unsigned int ex_msg) +{ + struct ctrl_msg_header *ctrl_msg_h; + unsigned int msg_len = skb->len; + u32 pkt_header = 0; + + ctrl_msg_h = skb_push(skb, sizeof(*ctrl_msg_h)); + ctrl_msg_h->ctrl_msg_id = cpu_to_le32(msg); + ctrl_msg_h->ex_msg = cpu_to_le32(ex_msg); + ctrl_msg_h->data_length = cpu_to_le32(msg_len); + + if (!msg_len) + pkt_header = CCCI_HEADER_NO_DATA; + + return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg); +} + +int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header, + unsigned int ex_msg) +{ + struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl; + unsigned int fsm_state; + + fsm_state = t7xx_fsm_get_ctl_state(ctl); + if (fsm_state != FSM_STATE_PRE_START) { + const struct t7xx_port_conf *port_conf = port->port_conf; + enum md_state md_state = t7xx_fsm_get_md_state(ctl); + + switch (md_state) { + case MD_STATE_EXCEPTION: + if (port_conf->tx_ch != PORT_CH_MD_LOG_TX) + return -EBUSY; + break; + + case MD_STATE_WAITING_FOR_HS1: + case MD_STATE_WAITING_FOR_HS2: + case MD_STATE_STOPPED: + case MD_STATE_WAITING_TO_STOP: + case MD_STATE_INVALID: + return -ENODEV; + + default: + break; + } + } + + return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg); +} + +static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox) +{ + struct t7xx_port *port; + + int i, j; + + for (i = 0; i < ARRAY_SIZE(port_prox->rx_ch_ports); i++) + INIT_LIST_HEAD(&port_prox->rx_ch_ports[i]); + + for (j = 0; j < ARRAY_SIZE(port_prox->queue_ports); j++) { + for (i = 0; i < ARRAY_SIZE(port_prox->queue_ports[j]); i++) + INIT_LIST_HEAD(&port_prox->queue_ports[j][i]); + } + + for_each_proxy_port(i, port, port_prox) { + const struct t7xx_port_conf *port_conf = port->port_conf; + enum cldma_id path_id = port_conf->path_id; + u8 ch_id; + + ch_id = FIELD_GET(PORT_CH_ID_MASK, port_conf->rx_ch); + list_add_tail(&port->entry, &port_prox->rx_ch_ports[ch_id]); + list_add_tail(&port->queue_entry, + &port_prox->queue_ports[path_id][port_conf->rxq_index]); + } +} + +static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev, + struct cldma_queue *queue, u16 channel) +{ + struct port_proxy *port_prox = t7xx_dev->md->port_prox; + struct list_head *port_list; + struct t7xx_port *port; + u8 ch_id; + + ch_id = FIELD_GET(PORT_CH_ID_MASK, channel); + port_list = &port_prox->rx_ch_ports[ch_id]; + list_for_each_entry(port, port_list, entry) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + if (queue->md_ctrl->hif_id == port_conf->path_id && + channel == port_conf->rx_ch) + return port; + } + + return NULL; +} + +/** + * t7xx_port_proxy_recv_skb() - Dispatch received skb. + * @queue: CLDMA queue. + * @skb: Socket buffer. + * + * Return: + ** 0 - Packet consumed. + ** -ERROR - Failed to process skb. + */ +static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) +{ + struct ccci_header *ccci_h = (struct ccci_header *)skb->data; + struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev; + struct t7xx_fsm_ctl *ctl = t7xx_dev->md->fsm_ctl; + struct device *dev = queue->md_ctrl->dev; + const struct t7xx_port_conf *port_conf; + struct t7xx_port *port; + u16 seq_num, channel; + int ret; + + channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status)); + if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) { + dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel); + goto drop_skb; + } + + port = t7xx_port_proxy_find_port(t7xx_dev, queue, channel); + if (!port) { + dev_err_ratelimited(dev, "Packet drop on channel 0x%x, port not found\n", channel); + goto drop_skb; + } + + seq_num = t7xx_port_next_rx_seq_num(port, ccci_h); + port_conf = port->port_conf; + skb_pull(skb, sizeof(*ccci_h)); + + ret = port_conf->ops->recv_skb(port, skb); + /* Error indicates to try again later */ + if (ret) { + skb_push(skb, sizeof(*ccci_h)); + return ret; + } + + port->seq_nums[MTK_RX] = seq_num; + return 0; + +drop_skb: + dev_kfree_skb_any(skb); + return 0; +} + +/** + * t7xx_port_proxy_md_status_notify() - Notify all ports of state. + *@port_prox: The port_proxy pointer. + *@state: State. + * + * Called by t7xx_fsm. Used to dispatch modem status for all ports, + * which want to know MD state transition. + */ +void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state) +{ + struct t7xx_port *port; + int i; + + for_each_proxy_port(i, port, port_prox) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + if (port_conf->ops->md_state_notify) + port_conf->ops->md_state_notify(port, state); + } +} + +static void t7xx_proxy_init_all_ports(struct t7xx_modem *md) +{ + struct port_proxy *port_prox = md->port_prox; + struct t7xx_port *port; + int i; + + for_each_proxy_port(i, port, port_prox) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + t7xx_port_struct_init(port); + + if (port_conf->tx_ch == PORT_CH_CONTROL_TX) + md->core_md.ctl_port = port; + + port->t7xx_dev = md->t7xx_dev; + port->dev = &md->t7xx_dev->pdev->dev; + spin_lock_init(&port->port_update_lock); + port->chan_enable = false; + + if (port_conf->ops->init) + port_conf->ops->init(port); + } + + t7xx_proxy_setup_ch_mapping(port_prox); +} + +static int t7xx_proxy_alloc(struct t7xx_modem *md) +{ + unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf); + struct device *dev = &md->t7xx_dev->pdev->dev; + struct port_proxy *port_prox; + int i; + + port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count, + GFP_KERNEL); + if (!port_prox) + return -ENOMEM; + + md->port_prox = port_prox; + port_prox->dev = dev; + + for (i = 0; i < port_count; i++) + port_prox->ports[i].port_conf = &t7xx_md_port_conf[i]; + + port_prox->port_count = port_count; + t7xx_proxy_init_all_ports(md); + return 0; +} + +/** + * t7xx_port_proxy_init() - Initialize ports. + * @md: Modem. + * + * Create all port instances. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code from failure sub-initializations. + */ +int t7xx_port_proxy_init(struct t7xx_modem *md) +{ + int ret; + + ret = t7xx_proxy_alloc(md); + if (ret) + return ret; + + t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb); + return 0; +} + +void t7xx_port_proxy_uninit(struct port_proxy *port_prox) +{ + struct t7xx_port *port; + int i; + + for_each_proxy_port(i, port, port_prox) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + if (port_conf->ops->uninit) + port_conf->ops->uninit(port); + } +} + +int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id, + bool en_flag) +{ + struct t7xx_port *port = t7xx_proxy_get_port_by_ch(port_prox, ch_id); + const struct t7xx_port_conf *port_conf; + + if (!port) + return -EINVAL; + + port_conf = port->port_conf; + + if (en_flag) { + if (port_conf->ops->enable_chl) + port_conf->ops->enable_chl(port); + } else { + if (port_conf->ops->disable_chl) + port_conf->ops->disable_chl(port); + } + + return 0; +} diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.h b/drivers/net/wwan/t7xx/t7xx_port_proxy.h new file mode 100644 index 0000000000..bc1ff5c6c7 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#ifndef __T7XX_PORT_PROXY_H__ +#define __T7XX_PORT_PROXY_H__ + +#include +#include +#include +#include + +#include "t7xx_hif_cldma.h" +#include "t7xx_modem_ops.h" +#include "t7xx_port.h" + +#define MTK_QUEUES 16 +#define RX_QUEUE_MAXLEN 32 +#define CTRL_QUEUE_MAXLEN 16 + +struct port_proxy { + int port_count; + struct list_head rx_ch_ports[PORT_CH_ID_MASK + 1]; + struct list_head queue_ports[CLDMA_NUM][MTK_QUEUES]; + struct device *dev; + struct t7xx_port ports[]; +}; + +struct ccci_header { + __le32 packet_header; + __le32 packet_len; + __le32 status; + __le32 ex_msg; +}; + +/* Coupled with HW - indicates if there is data following the CCCI header or not */ +#define CCCI_HEADER_NO_DATA 0xffffffff + +#define CCCI_H_AST_BIT BIT(31) +#define CCCI_H_SEQ_FLD GENMASK(30, 16) +#define CCCI_H_CHN_FLD GENMASK(15, 0) + +struct ctrl_msg_header { + __le32 ctrl_msg_id; + __le32 ex_msg; + __le32 data_length; +}; + +/* Control identification numbers for AP<->MD messages */ +#define CTL_ID_HS1_MSG 0x0 +#define CTL_ID_HS2_MSG 0x1 +#define CTL_ID_HS3_MSG 0x2 +#define CTL_ID_MD_EX 0x4 +#define CTL_ID_DRV_VER_ERROR 0x5 +#define CTL_ID_MD_EX_ACK 0x6 +#define CTL_ID_MD_EX_PASS 0x8 +#define CTL_ID_PORT_ENUM 0x9 + +/* Modem exception check identification code - "EXCP" */ +#define MD_EX_CHK_ID 0x45584350 +/* Modem exception check acknowledge identification code - "EREC" */ +#define MD_EX_CHK_ACK_ID 0x45524543 + +#define PORT_INFO_RSRVD GENMASK(31, 16) +#define PORT_INFO_ENFLG BIT(15) +#define PORT_INFO_CH_ID GENMASK(14, 0) + +#define PORT_ENUM_VER 0 +#define PORT_ENUM_HEAD_PATTERN 0x5a5a5a5a +#define PORT_ENUM_TAIL_PATTERN 0xa5a5a5a5 +#define PORT_ENUM_VER_MISMATCH 0x00657272 + +/* Port operations mapping */ +extern struct port_ops wwan_sub_port_ops; +extern struct port_ops ctl_port_ops; + +void t7xx_port_proxy_reset(struct port_proxy *port_prox); +void t7xx_port_proxy_uninit(struct port_proxy *port_prox); +int t7xx_port_proxy_init(struct t7xx_modem *md); +void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state); +int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg); +int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id, + bool en_flag); + +#endif /* __T7XX_PORT_PROXY_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_port_wwan.c b/drivers/net/wwan/t7xx/t7xx_port_wwan.c new file mode 100644 index 0000000000..33931bfd78 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Chandrashekar Devegowda + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Andy Shevchenko + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_port.h" +#include "t7xx_port_proxy.h" +#include "t7xx_state_monitor.h" + +static int t7xx_port_ctrl_start(struct wwan_port *port) +{ + struct t7xx_port *port_mtk = wwan_port_get_drvdata(port); + + if (atomic_read(&port_mtk->usage_cnt)) + return -EBUSY; + + atomic_inc(&port_mtk->usage_cnt); + return 0; +} + +static void t7xx_port_ctrl_stop(struct wwan_port *port) +{ + struct t7xx_port *port_mtk = wwan_port_get_drvdata(port); + + atomic_dec(&port_mtk->usage_cnt); +} + +static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb) +{ + struct t7xx_port *port_private = wwan_port_get_drvdata(port); + size_t len, offset, chunk_len = 0, txq_mtu = CLDMA_MTU; + const struct t7xx_port_conf *port_conf; + struct t7xx_fsm_ctl *ctl; + enum md_state md_state; + + len = skb->len; + if (!len || !port_private->chan_enable) + return -EINVAL; + + port_conf = port_private->port_conf; + ctl = port_private->t7xx_dev->md->fsm_ctl; + md_state = t7xx_fsm_get_md_state(ctl); + if (md_state == MD_STATE_WAITING_FOR_HS1 || md_state == MD_STATE_WAITING_FOR_HS2) { + dev_warn(port_private->dev, "Cannot write to %s port when md_state=%d\n", + port_conf->name, md_state); + return -ENODEV; + } + + for (offset = 0; offset < len; offset += chunk_len) { + struct sk_buff *skb_ccci; + int ret; + + chunk_len = min(len - offset, txq_mtu - sizeof(struct ccci_header)); + skb_ccci = t7xx_port_alloc_skb(chunk_len); + if (!skb_ccci) + return -ENOMEM; + + skb_put_data(skb_ccci, skb->data + offset, chunk_len); + ret = t7xx_port_send_skb(port_private, skb_ccci, 0, 0); + if (ret) { + dev_kfree_skb_any(skb_ccci); + dev_err(port_private->dev, "Write error on %s port, %d\n", + port_conf->name, ret); + return ret; + } + } + + dev_kfree_skb(skb); + return 0; +} + +static const struct wwan_port_ops wwan_ops = { + .start = t7xx_port_ctrl_start, + .stop = t7xx_port_ctrl_stop, + .tx = t7xx_port_ctrl_tx, +}; + +static int t7xx_port_wwan_init(struct t7xx_port *port) +{ + port->rx_length_th = RX_QUEUE_MAXLEN; + return 0; +} + +static void t7xx_port_wwan_uninit(struct t7xx_port *port) +{ + if (!port->wwan_port) + return; + + port->rx_length_th = 0; + wwan_remove_port(port->wwan_port); + port->wwan_port = NULL; +} + +static int t7xx_port_wwan_recv_skb(struct t7xx_port *port, struct sk_buff *skb) +{ + if (!atomic_read(&port->usage_cnt) || !port->chan_enable) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + dev_kfree_skb_any(skb); + dev_err_ratelimited(port->dev, "Port %s is not opened, drop packets\n", + port_conf->name); + /* Dropping skb, caller should not access skb.*/ + return 0; + } + + wwan_port_rx(port->wwan_port, skb); + return 0; +} + +static int t7xx_port_wwan_enable_chl(struct t7xx_port *port) +{ + spin_lock(&port->port_update_lock); + port->chan_enable = true; + spin_unlock(&port->port_update_lock); + + return 0; +} + +static int t7xx_port_wwan_disable_chl(struct t7xx_port *port) +{ + spin_lock(&port->port_update_lock); + port->chan_enable = false; + spin_unlock(&port->port_update_lock); + + return 0; +} + +static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + + if (state != MD_STATE_READY) + return; + + if (!port->wwan_port) { + port->wwan_port = wwan_create_port(port->dev, port_conf->port_type, + &wwan_ops, port); + if (IS_ERR(port->wwan_port)) + dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name); + } +} + +struct port_ops wwan_sub_port_ops = { + .init = t7xx_port_wwan_init, + .recv_skb = t7xx_port_wwan_recv_skb, + .uninit = t7xx_port_wwan_uninit, + .enable_chl = t7xx_port_wwan_enable_chl, + .disable_chl = t7xx_port_wwan_disable_chl, + .md_state_notify = t7xx_port_wwan_md_state_notify, +}; diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h new file mode 100644 index 0000000000..7c1b81091a --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_reg.h @@ -0,0 +1,350 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Chiranjeevi Rapolu + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * Sreehari Kancharla + */ + +#ifndef __T7XX_REG_H__ +#define __T7XX_REG_H__ + +#include + +/* Device base address offset */ +#define MHCCIF_RC_DEV_BASE 0x10024000 + +#define REG_RC2EP_SW_BSY 0x04 +#define REG_RC2EP_SW_INT_START 0x08 + +#define REG_RC2EP_SW_TCHNUM 0x0c +#define H2D_CH_EXCEPTION_ACK 1 +#define H2D_CH_EXCEPTION_CLEARQ_ACK 2 +#define H2D_CH_DS_LOCK 3 +/* Channels 4-8 are reserved */ +#define H2D_CH_SUSPEND_REQ 9 +#define H2D_CH_RESUME_REQ 10 +#define H2D_CH_SUSPEND_REQ_AP 11 +#define H2D_CH_RESUME_REQ_AP 12 +#define H2D_CH_DEVICE_RESET 13 +#define H2D_CH_DRM_DISABLE_AP 14 + +#define REG_EP2RC_SW_INT_STS 0x10 +#define REG_EP2RC_SW_INT_ACK 0x14 +#define REG_EP2RC_SW_INT_EAP_MASK 0x20 +#define REG_EP2RC_SW_INT_EAP_MASK_SET 0x30 +#define REG_EP2RC_SW_INT_EAP_MASK_CLR 0x40 + +#define D2H_INT_DS_LOCK_ACK BIT(0) +#define D2H_INT_EXCEPTION_INIT BIT(1) +#define D2H_INT_EXCEPTION_INIT_DONE BIT(2) +#define D2H_INT_EXCEPTION_CLEARQ_DONE BIT(3) +#define D2H_INT_EXCEPTION_ALLQ_RESET BIT(4) +#define D2H_INT_PORT_ENUM BIT(5) +/* Bits 6-10 are reserved */ +#define D2H_INT_SUSPEND_ACK BIT(11) +#define D2H_INT_RESUME_ACK BIT(12) +#define D2H_INT_SUSPEND_ACK_AP BIT(13) +#define D2H_INT_RESUME_ACK_AP BIT(14) +#define D2H_INT_ASYNC_SAP_HK BIT(15) +#define D2H_INT_ASYNC_MD_HK BIT(16) + +/* Register base */ +#define INFRACFG_AO_DEV_CHIP 0x10001000 + +/* ATR setting */ +#define T7XX_PCIE_REG_TRSL_ADDR_CHIP 0x10000000 +#define T7XX_PCIE_REG_SIZE_CHIP 0x00400000 + +/* Reset Generic Unit (RGU) */ +#define TOPRGU_CH_PCIE_IRQ_STA 0x1000790c + +#define ATR_PORT_OFFSET 0x100 +#define ATR_TABLE_OFFSET 0x20 +#define ATR_TABLE_NUM_PER_ATR 8 +#define ATR_TRANSPARENT_SIZE 0x3f + +/* PCIE_MAC_IREG Register Definition */ + +#define ISTAT_HST_CTRL 0x01ac +#define ISTAT_HST_CTRL_DIS BIT(0) + +#define T7XX_PCIE_MISC_CTRL 0x0348 +#define T7XX_PCIE_MISC_MAC_SLEEP_DIS BIT(7) + +#define T7XX_PCIE_CFG_MSIX 0x03ec +#define ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR 0x0600 +#define ATR_PCIE_WIN0_T0_TRSL_ADDR 0x0608 +#define ATR_PCIE_WIN0_T0_TRSL_PARAM 0x0610 +#define ATR_PCIE_WIN0_ADDR_ALGMT GENMASK_ULL(63, 12) + +#define ATR_SRC_ADDR_INVALID 0x007f + +#define T7XX_PCIE_PM_RESUME_STATE 0x0d0c + +enum t7xx_pm_resume_state { + PM_RESUME_REG_STATE_L3, + PM_RESUME_REG_STATE_L1, + PM_RESUME_REG_STATE_INIT, + PM_RESUME_REG_STATE_EXP, + PM_RESUME_REG_STATE_L2, + PM_RESUME_REG_STATE_L2_EXP, +}; + +#define T7XX_PCIE_MISC_DEV_STATUS 0x0d1c +#define MISC_STAGE_MASK GENMASK(2, 0) +#define MISC_RESET_TYPE_PLDR BIT(26) +#define MISC_RESET_TYPE_FLDR BIT(27) +#define LINUX_STAGE 4 + +#define T7XX_PCIE_RESOURCE_STATUS 0x0d28 +#define T7XX_PCIE_RESOURCE_STS_MSK GENMASK(4, 0) + +#define DISABLE_ASPM_LOWPWR 0x0e50 +#define ENABLE_ASPM_LOWPWR 0x0e54 +#define T7XX_L1_BIT(i) BIT((i) * 4 + 1) +#define T7XX_L1_1_BIT(i) BIT((i) * 4 + 2) +#define T7XX_L1_2_BIT(i) BIT((i) * 4 + 3) + +#define MSIX_ISTAT_HST_GRP0_0 0x0f00 +#define IMASK_HOST_MSIX_SET_GRP0_0 0x3000 +#define IMASK_HOST_MSIX_CLR_GRP0_0 0x3080 +#define EXT_INT_START 24 +#define EXT_INT_NUM 8 +#define MSIX_MSK_SET_ALL GENMASK(31, 24) + +enum t7xx_int { + DPMAIF_INT, + CLDMA0_INT, + CLDMA1_INT, + CLDMA2_INT, + MHCCIF_INT, + DPMAIF2_INT, + SAP_RGU_INT, + CLDMA3_INT, +}; + +/* DPMA definitions */ + +#define DPMAIF_PD_BASE 0x1022d000 +#define BASE_DPMAIF_UL DPMAIF_PD_BASE +#define BASE_DPMAIF_DL (DPMAIF_PD_BASE + 0x100) +#define BASE_DPMAIF_AP_MISC (DPMAIF_PD_BASE + 0x400) +#define BASE_DPMAIF_MMW_HPC (DPMAIF_PD_BASE + 0x600) +#define BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX (DPMAIF_PD_BASE + 0x900) +#define BASE_DPMAIF_PD_SRAM_DL (DPMAIF_PD_BASE + 0xc00) +#define BASE_DPMAIF_PD_SRAM_UL (DPMAIF_PD_BASE + 0xd00) + +#define DPMAIF_AO_BASE 0x10014000 +#define BASE_DPMAIF_AO_UL DPMAIF_AO_BASE +#define BASE_DPMAIF_AO_DL (DPMAIF_AO_BASE + 0x400) + +#define DPMAIF_UL_ADD_DESC (BASE_DPMAIF_UL + 0x00) +#define DPMAIF_UL_CHK_BUSY (BASE_DPMAIF_UL + 0x88) +#define DPMAIF_UL_RESERVE_AO_RW (BASE_DPMAIF_UL + 0xac) +#define DPMAIF_UL_ADD_DESC_CH0 (BASE_DPMAIF_UL + 0xb0) + +#define DPMAIF_DL_BAT_INIT (BASE_DPMAIF_DL + 0x00) +#define DPMAIF_DL_BAT_ADD (BASE_DPMAIF_DL + 0x04) +#define DPMAIF_DL_BAT_INIT_CON0 (BASE_DPMAIF_DL + 0x08) +#define DPMAIF_DL_BAT_INIT_CON1 (BASE_DPMAIF_DL + 0x0c) +#define DPMAIF_DL_BAT_INIT_CON2 (BASE_DPMAIF_DL + 0x10) +#define DPMAIF_DL_BAT_INIT_CON3 (BASE_DPMAIF_DL + 0x50) +#define DPMAIF_DL_CHK_BUSY (BASE_DPMAIF_DL + 0xb4) + +#define DPMAIF_AP_L2TISAR0 (BASE_DPMAIF_AP_MISC + 0x00) +#define DPMAIF_AP_APDL_L2TISAR0 (BASE_DPMAIF_AP_MISC + 0x50) +#define DPMAIF_AP_IP_BUSY (BASE_DPMAIF_AP_MISC + 0x60) +#define DPMAIF_AP_CG_EN (BASE_DPMAIF_AP_MISC + 0x68) +#define DPMAIF_AP_OVERWRITE_CFG (BASE_DPMAIF_AP_MISC + 0x90) +#define DPMAIF_AP_MEM_CLR (BASE_DPMAIF_AP_MISC + 0x94) +#define DPMAIF_AP_ALL_L2TISAR0_MASK GENMASK(31, 0) +#define DPMAIF_AP_APDL_ALL_L2TISAR0_MASK GENMASK(31, 0) +#define DPMAIF_AP_IP_BUSY_MASK GENMASK(31, 0) + +#define DPMAIF_AO_UL_INIT_SET (BASE_DPMAIF_AO_UL + 0x0) +#define DPMAIF_AO_UL_CHNL_ARB0 (BASE_DPMAIF_AO_UL + 0x1c) +#define DPMAIF_AO_UL_AP_L2TIMR0 (BASE_DPMAIF_AO_UL + 0x80) +#define DPMAIF_AO_UL_AP_L2TIMCR0 (BASE_DPMAIF_AO_UL + 0x84) +#define DPMAIF_AO_UL_AP_L2TIMSR0 (BASE_DPMAIF_AO_UL + 0x88) +#define DPMAIF_AO_UL_AP_L1TIMR0 (BASE_DPMAIF_AO_UL + 0x8c) +#define DPMAIF_AO_UL_APDL_L2TIMR0 (BASE_DPMAIF_AO_UL + 0x90) +#define DPMAIF_AO_UL_APDL_L2TIMCR0 (BASE_DPMAIF_AO_UL + 0x94) +#define DPMAIF_AO_UL_APDL_L2TIMSR0 (BASE_DPMAIF_AO_UL + 0x98) +#define DPMAIF_AO_AP_DLUL_IP_BUSY_MASK (BASE_DPMAIF_AO_UL + 0x9c) + +#define DPMAIF_AO_UL_CHNL0_CON0 (BASE_DPMAIF_PD_SRAM_UL + 0x10) +#define DPMAIF_AO_UL_CHNL0_CON1 (BASE_DPMAIF_PD_SRAM_UL + 0x14) +#define DPMAIF_AO_UL_CHNL0_CON2 (BASE_DPMAIF_PD_SRAM_UL + 0x18) +#define DPMAIF_AO_UL_CH0_STA (BASE_DPMAIF_PD_SRAM_UL + 0x70) + +#define DPMAIF_AO_DL_INIT_SET (BASE_DPMAIF_AO_DL + 0x00) +#define DPMAIF_AO_DL_IRQ_MASK (BASE_DPMAIF_AO_DL + 0x0c) +#define DPMAIF_AO_DL_DLQPIT_INIT_CON5 (BASE_DPMAIF_AO_DL + 0x28) +#define DPMAIF_AO_DL_DLQPIT_TRIG_THRES (BASE_DPMAIF_AO_DL + 0x34) + +#define DPMAIF_AO_DL_PKTINFO_CON0 (BASE_DPMAIF_PD_SRAM_DL + 0x00) +#define DPMAIF_AO_DL_PKTINFO_CON1 (BASE_DPMAIF_PD_SRAM_DL + 0x04) +#define DPMAIF_AO_DL_PKTINFO_CON2 (BASE_DPMAIF_PD_SRAM_DL + 0x08) +#define DPMAIF_AO_DL_RDY_CHK_THRES (BASE_DPMAIF_PD_SRAM_DL + 0x0c) +#define DPMAIF_AO_DL_RDY_CHK_FRG_THRES (BASE_DPMAIF_PD_SRAM_DL + 0x10) + +#define DPMAIF_AO_DL_DLQ_AGG_CFG (BASE_DPMAIF_PD_SRAM_DL + 0x20) +#define DPMAIF_AO_DL_DLQPIT_TIMEOUT0 (BASE_DPMAIF_PD_SRAM_DL + 0x24) +#define DPMAIF_AO_DL_DLQPIT_TIMEOUT1 (BASE_DPMAIF_PD_SRAM_DL + 0x28) +#define DPMAIF_AO_DL_HPC_CNTL (BASE_DPMAIF_PD_SRAM_DL + 0x38) +#define DPMAIF_AO_DL_PIT_SEQ_END (BASE_DPMAIF_PD_SRAM_DL + 0x40) + +#define DPMAIF_AO_DL_BAT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xd8) +#define DPMAIF_AO_DL_BAT_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xdc) +#define DPMAIF_AO_DL_PIT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xec) +#define DPMAIF_AO_DL_PIT_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0x60) +#define DPMAIF_AO_DL_FRGBAT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0x78) +#define DPMAIF_AO_DL_DLQ_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xa4) + +#define DPMAIF_HPC_INTR_MASK (BASE_DPMAIF_MMW_HPC + 0x0f4) +#define DPMA_HPC_ALL_INT_MASK GENMASK(15, 0) + +#define DPMAIF_HPC_DLQ_PATH_MODE 3 +#define DPMAIF_HPC_ADD_MODE_DF 0 +#define DPMAIF_HPC_TOTAL_NUM 8 +#define DPMAIF_HPC_MAX_TOTAL_NUM 8 + +#define DPMAIF_DL_DLQPIT_INIT (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x00) +#define DPMAIF_DL_DLQPIT_ADD (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x10) +#define DPMAIF_DL_DLQPIT_INIT_CON0 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x14) +#define DPMAIF_DL_DLQPIT_INIT_CON1 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x18) +#define DPMAIF_DL_DLQPIT_INIT_CON2 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x1c) +#define DPMAIF_DL_DLQPIT_INIT_CON3 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x20) +#define DPMAIF_DL_DLQPIT_INIT_CON4 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x24) +#define DPMAIF_DL_DLQPIT_INIT_CON5 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x28) +#define DPMAIF_DL_DLQPIT_INIT_CON6 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x2c) + +#define DPMAIF_ULQSAR_n(q) (DPMAIF_AO_UL_CHNL0_CON0 + 0x10 * (q)) +#define DPMAIF_UL_DRBSIZE_ADDRH_n(q) (DPMAIF_AO_UL_CHNL0_CON1 + 0x10 * (q)) +#define DPMAIF_UL_DRB_ADDRH_n(q) (DPMAIF_AO_UL_CHNL0_CON2 + 0x10 * (q)) +#define DPMAIF_ULQ_STA0_n(q) (DPMAIF_AO_UL_CH0_STA + 0x04 * (q)) +#define DPMAIF_ULQ_ADD_DESC_CH_n(q) (DPMAIF_UL_ADD_DESC_CH0 + 0x04 * (q)) + +#define DPMAIF_UL_DRB_RIDX_MSK GENMASK(31, 16) + +#define DPMAIF_AP_RGU_ASSERT 0x10001150 +#define DPMAIF_AP_RGU_DEASSERT 0x10001154 +#define DPMAIF_AP_RST_BIT BIT(2) + +#define DPMAIF_AP_AO_RGU_ASSERT 0x10001140 +#define DPMAIF_AP_AO_RGU_DEASSERT 0x10001144 +#define DPMAIF_AP_AO_RST_BIT BIT(6) + +/* DPMAIF init/restore */ +#define DPMAIF_UL_ADD_NOT_READY BIT(31) +#define DPMAIF_UL_ADD_UPDATE BIT(31) +#define DPMAIF_UL_ADD_COUNT_MASK GENMASK(15, 0) +#define DPMAIF_UL_ALL_QUE_ARB_EN GENMASK(11, 8) + +#define DPMAIF_DL_ADD_UPDATE BIT(31) +#define DPMAIF_DL_ADD_NOT_READY BIT(31) +#define DPMAIF_DL_FRG_ADD_UPDATE BIT(16) +#define DPMAIF_DL_ADD_COUNT_MASK GENMASK(15, 0) + +#define DPMAIF_DL_BAT_INIT_ALLSET BIT(0) +#define DPMAIF_DL_BAT_FRG_INIT BIT(16) +#define DPMAIF_DL_BAT_INIT_EN BIT(31) +#define DPMAIF_DL_BAT_INIT_NOT_READY BIT(31) +#define DPMAIF_DL_BAT_INIT_ONLY_ENABLE_BIT 0 + +#define DPMAIF_DL_PIT_INIT_ALLSET BIT(0) +#define DPMAIF_DL_PIT_INIT_EN BIT(31) +#define DPMAIF_DL_PIT_INIT_NOT_READY BIT(31) + +#define DPMAIF_BAT_REMAIN_SZ_BASE 16 +#define DPMAIF_BAT_BUFFER_SZ_BASE 128 +#define DPMAIF_FRG_BUFFER_SZ_BASE 128 + +#define DLQ_PIT_IDX_SIZE 0x20 + +#define DPMAIF_PIT_SIZE_MSK GENMASK(17, 0) + +#define DPMAIF_PIT_REM_CNT_MSK GENMASK(17, 0) + +#define DPMAIF_BAT_EN_MSK BIT(16) +#define DPMAIF_FRG_EN_MSK BIT(28) +#define DPMAIF_BAT_SIZE_MSK GENMASK(15, 0) + +#define DPMAIF_BAT_BID_MAXCNT_MSK GENMASK(31, 16) +#define DPMAIF_BAT_REMAIN_MINSZ_MSK GENMASK(15, 8) +#define DPMAIF_PIT_CHK_NUM_MSK GENMASK(31, 24) +#define DPMAIF_BAT_BUF_SZ_MSK GENMASK(16, 8) +#define DPMAIF_FRG_BUF_SZ_MSK GENMASK(16, 8) +#define DPMAIF_BAT_RSV_LEN_MSK GENMASK(7, 0) +#define DPMAIF_PKT_ALIGN_MSK GENMASK(23, 22) + +#define DPMAIF_BAT_CHECK_THRES_MSK GENMASK(21, 16) +#define DPMAIF_FRG_CHECK_THRES_MSK GENMASK(7, 0) + +#define DPMAIF_PKT_ALIGN_EN BIT(23) + +#define DPMAIF_DRB_SIZE_MSK GENMASK(15, 0) + +#define DPMAIF_DL_RD_WR_IDX_MSK GENMASK(17, 0) + +/* DPMAIF_UL_CHK_BUSY */ +#define DPMAIF_UL_IDLE_STS BIT(11) +/* DPMAIF_DL_CHK_BUSY */ +#define DPMAIF_DL_IDLE_STS BIT(23) +/* DPMAIF_AO_DL_RDY_CHK_THRES */ +#define DPMAIF_DL_PKT_CHECKSUM_EN BIT(31) +#define DPMAIF_PORT_MODE_PCIE BIT(30) +#define DPMAIF_DL_BURST_PIT_EN BIT(13) +/* DPMAIF_DL_BAT_INIT_CON1 */ +#define DPMAIF_DL_BAT_CACHE_PRI BIT(22) +/* DPMAIF_AP_MEM_CLR */ +#define DPMAIF_MEM_CLR BIT(0) +/* DPMAIF_AP_OVERWRITE_CFG */ +#define DPMAIF_SRAM_SYNC BIT(0) +/* DPMAIF_AO_UL_INIT_SET */ +#define DPMAIF_UL_INIT_DONE BIT(0) +/* DPMAIF_AO_DL_INIT_SET */ +#define DPMAIF_DL_INIT_DONE BIT(0) +/* DPMAIF_AO_DL_PIT_SEQ_END */ +#define DPMAIF_DL_PIT_SEQ_MSK GENMASK(7, 0) +/* DPMAIF_UL_RESERVE_AO_RW */ +#define DPMAIF_PCIE_MODE_SET_VALUE 0x55 +/* DPMAIF_AP_CG_EN */ +#define DPMAIF_CG_EN 0x7f + +#define DPMAIF_UDL_IP_BUSY BIT(0) +#define DPMAIF_DL_INT_DLQ0_QDONE BIT(8) +#define DPMAIF_DL_INT_DLQ1_QDONE BIT(9) +#define DPMAIF_DL_INT_DLQ0_PITCNT_LEN BIT(10) +#define DPMAIF_DL_INT_DLQ1_PITCNT_LEN BIT(11) +#define DPMAIF_DL_INT_Q2TOQ1 BIT(24) +#define DPMAIF_DL_INT_Q2APTOP BIT(25) + +#define DPMAIF_DLQ_LOW_TIMEOUT_THRES_MKS GENMASK(15, 0) +#define DPMAIF_DLQ_HIGH_TIMEOUT_THRES_MSK GENMASK(31, 16) + +/* DPMAIF DLQ HW configure */ +#define DPMAIF_AGG_MAX_LEN_DF 65535 +#define DPMAIF_AGG_TBL_ENT_NUM_DF 50 +#define DPMAIF_HASH_PRIME_DF 13 +#define DPMAIF_MID_TIMEOUT_THRES_DF 100 +#define DPMAIF_DLQ_TIMEOUT_THRES_DF 100 +#define DPMAIF_DLQ_PRS_THRES_DF 10 +#define DPMAIF_DLQ_HASH_BIT_CHOOSE_DF 0 + +#define DPMAIF_DLQPIT_EN_MSK BIT(20) +#define DPMAIF_DLQPIT_CHAN_OFS 16 +#define DPMAIF_ADD_DLQ_PIT_CHAN_OFS 20 + +#endif /* __T7XX_REG_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c new file mode 100644 index 0000000000..0bcca08ff2 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_hif_cldma.h" +#include "t7xx_mhccif.h" +#include "t7xx_modem_ops.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_port_proxy.h" +#include "t7xx_reg.h" +#include "t7xx_state_monitor.h" + +#define FSM_DRM_DISABLE_DELAY_MS 200 +#define FSM_EVENT_POLL_INTERVAL_MS 20 +#define FSM_MD_EX_REC_OK_TIMEOUT_MS 10000 +#define FSM_MD_EX_PASS_TIMEOUT_MS 45000 +#define FSM_CMD_TIMEOUT_MS 2000 + +void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) +{ + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + unsigned long flags; + + spin_lock_irqsave(&ctl->notifier_lock, flags); + list_add_tail(¬ifier->entry, &ctl->notifier_list); + spin_unlock_irqrestore(&ctl->notifier_lock, flags); +} + +void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) +{ + struct t7xx_fsm_notifier *notifier_cur, *notifier_next; + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + unsigned long flags; + + spin_lock_irqsave(&ctl->notifier_lock, flags); + list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) { + if (notifier_cur == notifier) + list_del(¬ifier->entry); + } + spin_unlock_irqrestore(&ctl->notifier_lock, flags); +} + +static void fsm_state_notify(struct t7xx_modem *md, enum md_state state) +{ + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + struct t7xx_fsm_notifier *notifier; + unsigned long flags; + + spin_lock_irqsave(&ctl->notifier_lock, flags); + list_for_each_entry(notifier, &ctl->notifier_list, entry) { + spin_unlock_irqrestore(&ctl->notifier_lock, flags); + if (notifier->notifier_fn) + notifier->notifier_fn(state, notifier->data); + + spin_lock_irqsave(&ctl->notifier_lock, flags); + } + spin_unlock_irqrestore(&ctl->notifier_lock, flags); +} + +void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state) +{ + ctl->md_state = state; + + /* Update to port first, otherwise sending message on HS2 may fail */ + t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state); + fsm_state_notify(ctl->md, state); +} + +static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result) +{ + if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { + *cmd->ret = result; + complete_all(cmd->done); + } + + kfree(cmd); +} + +static void fsm_del_kf_event(struct t7xx_fsm_event *event) +{ + list_del(&event->entry); + kfree(event); +} + +static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl) +{ + struct device *dev = &ctl->md->t7xx_dev->pdev->dev; + struct t7xx_fsm_event *event, *evt_next; + struct t7xx_fsm_command *cmd, *cmd_next; + unsigned long flags; + + spin_lock_irqsave(&ctl->command_lock, flags); + list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) { + dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id); + list_del(&cmd->entry); + fsm_finish_command(ctl, cmd, -EINVAL); + } + spin_unlock_irqrestore(&ctl->command_lock, flags); + + spin_lock_irqsave(&ctl->event_lock, flags); + list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { + dev_warn(dev, "Unhandled event %d\n", event->event_id); + fsm_del_kf_event(event); + } + spin_unlock_irqrestore(&ctl->event_lock, flags); +} + +static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected, + enum t7xx_fsm_event_state event_ignore, int retries) +{ + struct t7xx_fsm_event *event; + bool event_received = false; + unsigned long flags; + int cnt = 0; + + while (cnt++ < retries && !event_received) { + bool sleep_required = true; + + if (kthread_should_stop()) + return; + + spin_lock_irqsave(&ctl->event_lock, flags); + event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry); + if (event) { + event_received = event->event_id == event_expected; + if (event_received || event->event_id == event_ignore) { + fsm_del_kf_event(event); + sleep_required = false; + } + } + spin_unlock_irqrestore(&ctl->event_lock, flags); + + if (sleep_required) + msleep(FSM_EVENT_POLL_INTERVAL_MS); + } +} + +static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, + enum t7xx_ex_reason reason) +{ + struct device *dev = &ctl->md->t7xx_dev->pdev->dev; + + if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) { + if (cmd) + fsm_finish_command(ctl, cmd, -EINVAL); + + return; + } + + ctl->curr_state = FSM_STATE_EXCEPTION; + + switch (reason) { + case EXCEPTION_HS_TIMEOUT: + dev_err(dev, "Boot Handshake failure\n"); + break; + + case EXCEPTION_EVENT: + dev_err(dev, "Exception event\n"); + t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION); + t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev); + t7xx_md_exception_handshake(ctl->md); + + fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX, + FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS); + fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID, + FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS); + break; + + default: + dev_err(dev, "Exception %d\n", reason); + break; + } + + if (cmd) + fsm_finish_command(ctl, cmd, 0); +} + +static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl) +{ + ctl->curr_state = FSM_STATE_STOPPED; + + t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED); + return t7xx_md_reset(ctl->md->t7xx_dev); +} + +static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) +{ + if (ctl->curr_state == FSM_STATE_STOPPED) { + fsm_finish_command(ctl, cmd, -EINVAL); + return; + } + + fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); +} + +static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) +{ + struct t7xx_pci_dev *t7xx_dev; + struct cldma_ctrl *md_ctrl; + int err; + + if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) { + fsm_finish_command(ctl, cmd, -EINVAL); + return; + } + + md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD]; + t7xx_dev = ctl->md->t7xx_dev; + + ctl->curr_state = FSM_STATE_STOPPING; + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP); + t7xx_cldma_stop(md_ctrl); + + if (!ctl->md->rgu_irq_asserted) { + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP); + /* Wait for the DRM disable to take effect */ + msleep(FSM_DRM_DISABLE_DELAY_MS); + + err = t7xx_acpi_fldr_func(t7xx_dev); + if (err) + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET); + } + + fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); +} + +static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl) +{ + if (ctl->md_state != MD_STATE_WAITING_FOR_HS2) + return; + + ctl->md_state = MD_STATE_READY; + + fsm_state_notify(ctl->md, MD_STATE_READY); + t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY); +} + +static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl) +{ + struct t7xx_modem *md = ctl->md; + + ctl->curr_state = FSM_STATE_READY; + t7xx_fsm_broadcast_ready_state(ctl); + t7xx_md_event_notify(md, FSM_READY); +} + +static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl) +{ + struct t7xx_modem *md = ctl->md; + struct device *dev; + + ctl->curr_state = FSM_STATE_STARTING; + + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1); + t7xx_md_event_notify(md, FSM_START); + + wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg, + HZ * 60); + dev = &md->t7xx_dev->pdev->dev; + + if (ctl->exp_flg) + dev_err(dev, "MD exception is captured during handshake\n"); + + if (!md->core_md.ready) { + dev_err(dev, "MD handshake timeout\n"); + if (md->core_md.handshake_ongoing) + t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0); + + fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); + return -ETIMEDOUT; + } + + t7xx_pci_pm_init_late(md->t7xx_dev); + fsm_routine_ready(ctl); + return 0; +} + +static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) +{ + struct t7xx_modem *md = ctl->md; + u32 dev_status; + int ret; + + if (!md) + return; + + if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START && + ctl->curr_state != FSM_STATE_STOPPED) { + fsm_finish_command(ctl, cmd, -EINVAL); + return; + } + + ctl->curr_state = FSM_STATE_PRE_START; + t7xx_md_event_notify(md, FSM_PRE_START); + + ret = read_poll_timeout(ioread32, dev_status, + (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000, + false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); + if (ret) { + struct device *dev = &md->t7xx_dev->pdev->dev; + + fsm_finish_command(ctl, cmd, -ETIMEDOUT); + dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK); + return; + } + + t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]); + fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl)); +} + +static int fsm_main_thread(void *data) +{ + struct t7xx_fsm_ctl *ctl = data; + struct t7xx_fsm_command *cmd; + unsigned long flags; + + while (!kthread_should_stop()) { + if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) || + kthread_should_stop())) + continue; + + if (kthread_should_stop()) + break; + + spin_lock_irqsave(&ctl->command_lock, flags); + cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry); + list_del(&cmd->entry); + spin_unlock_irqrestore(&ctl->command_lock, flags); + + switch (cmd->cmd_id) { + case FSM_CMD_START: + fsm_routine_start(ctl, cmd); + break; + + case FSM_CMD_EXCEPTION: + fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag)); + break; + + case FSM_CMD_PRE_STOP: + fsm_routine_stopping(ctl, cmd); + break; + + case FSM_CMD_STOP: + fsm_routine_stopped(ctl, cmd); + break; + + default: + fsm_finish_command(ctl, cmd, -EINVAL); + fsm_flush_event_cmd_qs(ctl); + break; + } + } + + return 0; +} + +int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag) +{ + DECLARE_COMPLETION_ONSTACK(done); + struct t7xx_fsm_command *cmd; + unsigned long flags; + int ret; + + cmd = kzalloc(sizeof(*cmd), flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + INIT_LIST_HEAD(&cmd->entry); + cmd->cmd_id = cmd_id; + cmd->flag = flag; + if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { + cmd->done = &done; + cmd->ret = &ret; + } + + spin_lock_irqsave(&ctl->command_lock, flags); + list_add_tail(&cmd->entry, &ctl->command_queue); + spin_unlock_irqrestore(&ctl->command_lock, flags); + + wake_up(&ctl->command_wq); + + if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { + unsigned long wait_ret; + + wait_ret = wait_for_completion_timeout(&done, + msecs_to_jiffies(FSM_CMD_TIMEOUT_MS)); + if (!wait_ret) + return -ETIMEDOUT; + + return ret; + } + + return 0; +} + +int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id, + unsigned char *data, unsigned int length) +{ + struct device *dev = &ctl->md->t7xx_dev->pdev->dev; + struct t7xx_fsm_event *event; + unsigned long flags; + + if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) { + dev_err(dev, "Invalid event %d\n", event_id); + return -EINVAL; + } + + event = kmalloc(sizeof(*event) + length, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); + if (!event) + return -ENOMEM; + + INIT_LIST_HEAD(&event->entry); + event->event_id = event_id; + event->length = length; + + if (data && length) + memcpy(event->data, data, length); + + spin_lock_irqsave(&ctl->event_lock, flags); + list_add_tail(&event->entry, &ctl->event_queue); + spin_unlock_irqrestore(&ctl->event_lock, flags); + + wake_up_all(&ctl->event_wq); + return 0; +} + +void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id) +{ + struct t7xx_fsm_event *event, *evt_next; + unsigned long flags; + + spin_lock_irqsave(&ctl->event_lock, flags); + list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { + if (event->event_id == event_id) + fsm_del_kf_event(event); + } + spin_unlock_irqrestore(&ctl->event_lock, flags); +} + +enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl) +{ + if (ctl) + return ctl->md_state; + + return MD_STATE_INVALID; +} + +unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl) +{ + if (ctl) + return ctl->curr_state; + + return FSM_STATE_STOPPED; +} + +int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type) +{ + unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT; + + if (type == MD_IRQ_PORT_ENUM) { + return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags); + } else if (type == MD_IRQ_CCIF_EX) { + ctl->exp_flg = true; + wake_up(&ctl->async_hk_wq); + cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT); + return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags); + } + + return -EINVAL; +} + +void t7xx_fsm_reset(struct t7xx_modem *md) +{ + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + + fsm_flush_event_cmd_qs(ctl); + ctl->curr_state = FSM_STATE_STOPPED; + ctl->exp_flg = false; +} + +int t7xx_fsm_init(struct t7xx_modem *md) +{ + struct device *dev = &md->t7xx_dev->pdev->dev; + struct t7xx_fsm_ctl *ctl; + + ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL); + if (!ctl) + return -ENOMEM; + + md->fsm_ctl = ctl; + ctl->md = md; + ctl->curr_state = FSM_STATE_INIT; + INIT_LIST_HEAD(&ctl->command_queue); + INIT_LIST_HEAD(&ctl->event_queue); + init_waitqueue_head(&ctl->async_hk_wq); + init_waitqueue_head(&ctl->event_wq); + INIT_LIST_HEAD(&ctl->notifier_list); + init_waitqueue_head(&ctl->command_wq); + spin_lock_init(&ctl->event_lock); + spin_lock_init(&ctl->command_lock); + ctl->exp_flg = false; + spin_lock_init(&ctl->notifier_lock); + + ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm"); + return PTR_ERR_OR_ZERO(ctl->fsm_thread); +} + +void t7xx_fsm_uninit(struct t7xx_modem *md) +{ + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + + if (!ctl) + return; + + if (ctl->fsm_thread) + kthread_stop(ctl->fsm_thread); + + fsm_flush_event_cmd_qs(ctl); +} diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h new file mode 100644 index 0000000000..b1af0259d4 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * + * Contributors: + * Eliot Lee + * Ricardo Martinez + * Sreehari Kancharla + */ + +#ifndef __T7XX_MONITOR_H__ +#define __T7XX_MONITOR_H__ + +#include +#include +#include +#include +#include + +#include "t7xx_modem_ops.h" + +enum t7xx_fsm_state { + FSM_STATE_INIT, + FSM_STATE_PRE_START, + FSM_STATE_STARTING, + FSM_STATE_READY, + FSM_STATE_EXCEPTION, + FSM_STATE_STOPPING, + FSM_STATE_STOPPED, +}; + +enum t7xx_fsm_event_state { + FSM_EVENT_INVALID, + FSM_EVENT_MD_HS2, + FSM_EVENT_MD_EX, + FSM_EVENT_MD_EX_REC_OK, + FSM_EVENT_MD_EX_PASS, + FSM_EVENT_MD_HS2_EXIT, + FSM_EVENT_MAX +}; + +enum t7xx_fsm_cmd_state { + FSM_CMD_INVALID, + FSM_CMD_START, + FSM_CMD_EXCEPTION, + FSM_CMD_PRE_STOP, + FSM_CMD_STOP, +}; + +enum t7xx_ex_reason { + EXCEPTION_HS_TIMEOUT, + EXCEPTION_EVENT, +}; + +enum t7xx_md_irq_type { + MD_IRQ_WDT, + MD_IRQ_CCIF_EX, + MD_IRQ_PORT_ENUM, +}; + +enum md_state { + MD_STATE_INVALID, + MD_STATE_WAITING_FOR_HS1, + MD_STATE_WAITING_FOR_HS2, + MD_STATE_READY, + MD_STATE_EXCEPTION, + MD_STATE_WAITING_TO_STOP, + MD_STATE_STOPPED, +}; + +#define FSM_CMD_FLAG_WAIT_FOR_COMPLETION BIT(0) +#define FSM_CMD_FLAG_FLIGHT_MODE BIT(1) +#define FSM_CMD_FLAG_IN_INTERRUPT BIT(2) +#define FSM_CMD_EX_REASON GENMASK(23, 16) + +struct t7xx_fsm_ctl { + struct t7xx_modem *md; + enum md_state md_state; + unsigned int curr_state; + struct list_head command_queue; + struct list_head event_queue; + wait_queue_head_t command_wq; + wait_queue_head_t event_wq; + wait_queue_head_t async_hk_wq; + spinlock_t event_lock; /* Protects event queue */ + spinlock_t command_lock; /* Protects command queue */ + struct task_struct *fsm_thread; + bool exp_flg; + spinlock_t notifier_lock; /* Protects notifier list */ + struct list_head notifier_list; +}; + +struct t7xx_fsm_event { + struct list_head entry; + enum t7xx_fsm_event_state event_id; + unsigned int length; + unsigned char data[]; +}; + +struct t7xx_fsm_command { + struct list_head entry; + enum t7xx_fsm_cmd_state cmd_id; + unsigned int flag; + struct completion *done; + int *ret; +}; + +struct t7xx_fsm_notifier { + struct list_head entry; + int (*notifier_fn)(enum md_state state, void *data); + void *data; +}; + +int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, + unsigned int flag); +int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id, + unsigned char *data, unsigned int length); +void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id); +void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state); +void t7xx_fsm_reset(struct t7xx_modem *md); +int t7xx_fsm_init(struct t7xx_modem *md); +void t7xx_fsm_uninit(struct t7xx_modem *md); +int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type); +enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl); +unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl); +void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier); +void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier); + +#endif /* __T7XX_MONITOR_H__ */ diff --git a/drivers/nvme/common/Kconfig b/drivers/nvme/common/Kconfig new file mode 100644 index 0000000000..4514f44362 --- /dev/null +++ b/drivers/nvme/common/Kconfig @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config NVME_COMMON + tristate diff --git a/drivers/nvme/common/Makefile b/drivers/nvme/common/Makefile new file mode 100644 index 0000000000..720c625b8a --- /dev/null +++ b/drivers/nvme/common/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 + +ccflags-y += -I$(src) + +obj-$(CONFIG_NVME_COMMON) += nvme-common.o + +nvme-common-y += auth.o diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c new file mode 100644 index 0000000000..04bd28f17d --- /dev/null +++ b/drivers/nvme/common/auth.c @@ -0,0 +1,483 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020 Hannes Reinecke, SUSE Linux + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static u32 nvme_dhchap_seqnum; +static DEFINE_MUTEX(nvme_dhchap_mutex); + +u32 nvme_auth_get_seqnum(void) +{ + u32 seqnum; + + mutex_lock(&nvme_dhchap_mutex); + if (!nvme_dhchap_seqnum) + nvme_dhchap_seqnum = prandom_u32(); + else { + nvme_dhchap_seqnum++; + if (!nvme_dhchap_seqnum) + nvme_dhchap_seqnum++; + } + seqnum = nvme_dhchap_seqnum; + mutex_unlock(&nvme_dhchap_mutex); + return seqnum; +} +EXPORT_SYMBOL_GPL(nvme_auth_get_seqnum); + +static struct nvme_auth_dhgroup_map { + const char name[16]; + const char kpp[16]; +} dhgroup_map[] = { + [NVME_AUTH_DHGROUP_NULL] = { + .name = "null", .kpp = "null" }, + [NVME_AUTH_DHGROUP_2048] = { + .name = "ffdhe2048", .kpp = "ffdhe2048(dh)" }, + [NVME_AUTH_DHGROUP_3072] = { + .name = "ffdhe3072", .kpp = "ffdhe3072(dh)" }, + [NVME_AUTH_DHGROUP_4096] = { + .name = "ffdhe4096", .kpp = "ffdhe4096(dh)" }, + [NVME_AUTH_DHGROUP_6144] = { + .name = "ffdhe6144", .kpp = "ffdhe6144(dh)" }, + [NVME_AUTH_DHGROUP_8192] = { + .name = "ffdhe8192", .kpp = "ffdhe8192(dh)" }, +}; + +const char *nvme_auth_dhgroup_name(u8 dhgroup_id) +{ + if (dhgroup_id >= ARRAY_SIZE(dhgroup_map)) + return NULL; + return dhgroup_map[dhgroup_id].name; +} +EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_name); + +const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id) +{ + if (dhgroup_id >= ARRAY_SIZE(dhgroup_map)) + return NULL; + return dhgroup_map[dhgroup_id].kpp; +} +EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_kpp); + +u8 nvme_auth_dhgroup_id(const char *dhgroup_name) +{ + int i; + + if (!dhgroup_name || !strlen(dhgroup_name)) + return NVME_AUTH_DHGROUP_INVALID; + for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) { + if (!strlen(dhgroup_map[i].name)) + continue; + if (!strncmp(dhgroup_map[i].name, dhgroup_name, + strlen(dhgroup_map[i].name))) + return i; + } + return NVME_AUTH_DHGROUP_INVALID; +} +EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id); + +static struct nvme_dhchap_hash_map { + int len; + const char hmac[15]; + const char digest[8]; +} hash_map[] = { + [NVME_AUTH_HASH_SHA256] = { + .len = 32, + .hmac = "hmac(sha256)", + .digest = "sha256", + }, + [NVME_AUTH_HASH_SHA384] = { + .len = 48, + .hmac = "hmac(sha384)", + .digest = "sha384", + }, + [NVME_AUTH_HASH_SHA512] = { + .len = 64, + .hmac = "hmac(sha512)", + .digest = "sha512", + }, +}; + +const char *nvme_auth_hmac_name(u8 hmac_id) +{ + if (hmac_id >= ARRAY_SIZE(hash_map)) + return NULL; + return hash_map[hmac_id].hmac; +} +EXPORT_SYMBOL_GPL(nvme_auth_hmac_name); + +const char *nvme_auth_digest_name(u8 hmac_id) +{ + if (hmac_id >= ARRAY_SIZE(hash_map)) + return NULL; + return hash_map[hmac_id].digest; +} +EXPORT_SYMBOL_GPL(nvme_auth_digest_name); + +u8 nvme_auth_hmac_id(const char *hmac_name) +{ + int i; + + if (!hmac_name || !strlen(hmac_name)) + return NVME_AUTH_HASH_INVALID; + + for (i = 0; i < ARRAY_SIZE(hash_map); i++) { + if (!strlen(hash_map[i].hmac)) + continue; + if (!strncmp(hash_map[i].hmac, hmac_name, + strlen(hash_map[i].hmac))) + return i; + } + return NVME_AUTH_HASH_INVALID; +} +EXPORT_SYMBOL_GPL(nvme_auth_hmac_id); + +size_t nvme_auth_hmac_hash_len(u8 hmac_id) +{ + if (hmac_id >= ARRAY_SIZE(hash_map)) + return 0; + return hash_map[hmac_id].len; +} +EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len); + +struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret, + u8 key_hash) +{ + struct nvme_dhchap_key *key; + unsigned char *p; + u32 crc; + int ret, key_len; + size_t allocated_len = strlen(secret); + + /* Secret might be affixed with a ':' */ + p = strrchr(secret, ':'); + if (p) + allocated_len = p - secret; + key = kzalloc(sizeof(*key), GFP_KERNEL); + if (!key) + return ERR_PTR(-ENOMEM); + key->key = kzalloc(allocated_len, GFP_KERNEL); + if (!key->key) { + ret = -ENOMEM; + goto out_free_key; + } + + key_len = base64_decode(secret, allocated_len, key->key); + if (key_len < 0) { + pr_debug("base64 key decoding error %d\n", + key_len); + ret = key_len; + goto out_free_secret; + } + + if (key_len != 36 && key_len != 52 && + key_len != 68) { + pr_err("Invalid key len %d\n", key_len); + ret = -EINVAL; + goto out_free_secret; + } + + if (key_hash > 0 && + (key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) { + pr_err("Mismatched key len %d for %s\n", key_len, + nvme_auth_hmac_name(key_hash)); + ret = -EINVAL; + goto out_free_secret; + } + + /* The last four bytes is the CRC in little-endian format */ + key_len -= 4; + /* + * The linux implementation doesn't do pre- and post-increments, + * so we have to do it manually. + */ + crc = ~crc32(~0, key->key, key_len); + + if (get_unaligned_le32(key->key + key_len) != crc) { + pr_err("key crc mismatch (key %08x, crc %08x)\n", + get_unaligned_le32(key->key + key_len), crc); + ret = -EKEYREJECTED; + goto out_free_secret; + } + key->len = key_len; + key->hash = key_hash; + return key; +out_free_secret: + kfree_sensitive(key->key); +out_free_key: + kfree(key); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(nvme_auth_extract_key); + +void nvme_auth_free_key(struct nvme_dhchap_key *key) +{ + if (!key) + return; + kfree_sensitive(key->key); + kfree(key); +} +EXPORT_SYMBOL_GPL(nvme_auth_free_key); + +u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn) +{ + const char *hmac_name; + struct crypto_shash *key_tfm; + struct shash_desc *shash; + u8 *transformed_key; + int ret; + + if (!key || !key->key) { + pr_warn("No key specified\n"); + return ERR_PTR(-ENOKEY); + } + if (key->hash == 0) { + transformed_key = kmemdup(key->key, key->len, GFP_KERNEL); + return transformed_key ? transformed_key : ERR_PTR(-ENOMEM); + } + hmac_name = nvme_auth_hmac_name(key->hash); + if (!hmac_name) { + pr_warn("Invalid key hash id %d\n", key->hash); + return ERR_PTR(-EINVAL); + } + + key_tfm = crypto_alloc_shash(hmac_name, 0, 0); + if (IS_ERR(key_tfm)) + return (u8 *)key_tfm; + + shash = kmalloc(sizeof(struct shash_desc) + + crypto_shash_descsize(key_tfm), + GFP_KERNEL); + if (!shash) { + ret = -ENOMEM; + goto out_free_key; + } + + transformed_key = kzalloc(crypto_shash_digestsize(key_tfm), GFP_KERNEL); + if (!transformed_key) { + ret = -ENOMEM; + goto out_free_shash; + } + + shash->tfm = key_tfm; + ret = crypto_shash_setkey(key_tfm, key->key, key->len); + if (ret < 0) + goto out_free_transformed_key; + ret = crypto_shash_init(shash); + if (ret < 0) + goto out_free_transformed_key; + ret = crypto_shash_update(shash, nqn, strlen(nqn)); + if (ret < 0) + goto out_free_transformed_key; + ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17); + if (ret < 0) + goto out_free_transformed_key; + ret = crypto_shash_final(shash, transformed_key); + if (ret < 0) + goto out_free_transformed_key; + + kfree(shash); + crypto_free_shash(key_tfm); + + return transformed_key; + +out_free_transformed_key: + kfree_sensitive(transformed_key); +out_free_shash: + kfree(shash); +out_free_key: + crypto_free_shash(key_tfm); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(nvme_auth_transform_key); + +static int nvme_auth_hash_skey(int hmac_id, u8 *skey, size_t skey_len, u8 *hkey) +{ + const char *digest_name; + struct crypto_shash *tfm; + int ret; + + digest_name = nvme_auth_digest_name(hmac_id); + if (!digest_name) { + pr_debug("%s: failed to get digest for %d\n", __func__, + hmac_id); + return -EINVAL; + } + tfm = crypto_alloc_shash(digest_name, 0, 0); + if (IS_ERR(tfm)) + return -ENOMEM; + + ret = crypto_shash_tfm_digest(tfm, skey, skey_len, hkey); + if (ret < 0) + pr_debug("%s: Failed to hash digest len %zu\n", __func__, + skey_len); + + crypto_free_shash(tfm); + return ret; +} + +int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len, + u8 *challenge, u8 *aug, size_t hlen) +{ + struct crypto_shash *tfm; + struct shash_desc *desc; + u8 *hashed_key; + const char *hmac_name; + int ret; + + hashed_key = kmalloc(hlen, GFP_KERNEL); + if (!hashed_key) + return -ENOMEM; + + ret = nvme_auth_hash_skey(hmac_id, skey, + skey_len, hashed_key); + if (ret < 0) + goto out_free_key; + + hmac_name = nvme_auth_hmac_name(hmac_id); + if (!hmac_name) { + pr_warn("%s: invalid hash algorithm %d\n", + __func__, hmac_id); + ret = -EINVAL; + goto out_free_key; + } + + tfm = crypto_alloc_shash(hmac_name, 0, 0); + if (IS_ERR(tfm)) { + ret = PTR_ERR(tfm); + goto out_free_key; + } + + desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm), + GFP_KERNEL); + if (!desc) { + ret = -ENOMEM; + goto out_free_hash; + } + desc->tfm = tfm; + + ret = crypto_shash_setkey(tfm, hashed_key, hlen); + if (ret) + goto out_free_desc; + + ret = crypto_shash_init(desc); + if (ret) + goto out_free_desc; + + ret = crypto_shash_update(desc, challenge, hlen); + if (ret) + goto out_free_desc; + + ret = crypto_shash_final(desc, aug); +out_free_desc: + kfree_sensitive(desc); +out_free_hash: + crypto_free_shash(tfm); +out_free_key: + kfree_sensitive(hashed_key); + return ret; +} +EXPORT_SYMBOL_GPL(nvme_auth_augmented_challenge); + +int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid) +{ + int ret; + + ret = crypto_kpp_set_secret(dh_tfm, NULL, 0); + if (ret) + pr_debug("failed to set private key, error %d\n", ret); + + return ret; +} +EXPORT_SYMBOL_GPL(nvme_auth_gen_privkey); + +int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm, + u8 *host_key, size_t host_key_len) +{ + struct kpp_request *req; + struct crypto_wait wait; + struct scatterlist dst; + int ret; + + req = kpp_request_alloc(dh_tfm, GFP_KERNEL); + if (!req) + return -ENOMEM; + + crypto_init_wait(&wait); + kpp_request_set_input(req, NULL, 0); + sg_init_one(&dst, host_key, host_key_len); + kpp_request_set_output(req, &dst, host_key_len); + kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &wait); + + ret = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait); + kpp_request_free(req); + return ret; +} +EXPORT_SYMBOL_GPL(nvme_auth_gen_pubkey); + +int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm, + u8 *ctrl_key, size_t ctrl_key_len, + u8 *sess_key, size_t sess_key_len) +{ + struct kpp_request *req; + struct crypto_wait wait; + struct scatterlist src, dst; + int ret; + + req = kpp_request_alloc(dh_tfm, GFP_KERNEL); + if (!req) + return -ENOMEM; + + crypto_init_wait(&wait); + sg_init_one(&src, ctrl_key, ctrl_key_len); + kpp_request_set_input(req, &src, ctrl_key_len); + sg_init_one(&dst, sess_key, sess_key_len); + kpp_request_set_output(req, &dst, sess_key_len); + kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &wait); + + ret = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait); + + kpp_request_free(req); + return ret; +} +EXPORT_SYMBOL_GPL(nvme_auth_gen_shared_secret); + +int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key) +{ + struct nvme_dhchap_key *key; + u8 key_hash; + + if (!secret) { + *ret_key = NULL; + return 0; + } + + if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1) + return -EINVAL; + + /* Pass in the secret without the 'DHHC-1:XX:' prefix */ + key = nvme_auth_extract_key(secret + 10, key_hash); + if (IS_ERR(key)) { + *ret_key = NULL; + return PTR_ERR(key); + } + + *ret_key = key; + return 0; +} +EXPORT_SYMBOL_GPL(nvme_auth_generate_key); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c new file mode 100644 index 0000000000..5fc5ea196b --- /dev/null +++ b/drivers/nvme/host/apple.c @@ -0,0 +1,1598 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Apple ANS NVM Express device driver + * Copyright The Asahi Linux Contributors + * + * Based on the pci.c NVM Express device driver + * Copyright (c) 2011-2014, Intel Corporation. + * and on the rdma.c NVMe over Fabrics RDMA host code. + * Copyright (c) 2015-2016 HGST, a Western Digital Company. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nvme.h" + +#define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC +#define APPLE_ANS_MAX_QUEUE_DEPTH 64 + +#define APPLE_ANS_COPROC_CPU_CONTROL 0x44 +#define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4) + +#define APPLE_ANS_ACQ_DB 0x1004 +#define APPLE_ANS_IOCQ_DB 0x100c + +#define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210 + +#define APPLE_ANS_BOOT_STATUS 0x1300 +#define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55 + +#define APPLE_ANS_UNKNOWN_CTRL 0x24008 +#define APPLE_ANS_PRP_NULL_CHECK BIT(11) + +#define APPLE_ANS_LINEAR_SQ_CTRL 0x24908 +#define APPLE_ANS_LINEAR_SQ_EN BIT(0) + +#define APPLE_ANS_LINEAR_ASQ_DB 0x2490c +#define APPLE_ANS_LINEAR_IOSQ_DB 0x24910 + +#define APPLE_NVMMU_NUM_TCBS 0x28100 +#define APPLE_NVMMU_ASQ_TCB_BASE 0x28108 +#define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110 +#define APPLE_NVMMU_TCB_INVAL 0x28118 +#define APPLE_NVMMU_TCB_STAT 0x28120 + +/* + * This controller is a bit weird in the way command tags works: Both the + * admin and the IO queue share the same tag space. Additionally, tags + * cannot be higher than 0x40 which effectively limits the combined + * queue depth to 0x40. Instead of wasting half of that on the admin queue + * which gets much less traffic we instead reduce its size here. + * The controller also doesn't support async event such that no space must + * be reserved for NVME_NR_AEN_COMMANDS. + */ +#define APPLE_NVME_AQ_DEPTH 2 +#define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1) + +/* + * These can be higher, but we need to ensure that any command doesn't + * require an sg allocation that needs more than a page of data. + */ +#define NVME_MAX_KB_SZ 4096 +#define NVME_MAX_SEGS 127 + +/* + * This controller comes with an embedded IOMMU known as NVMMU. + * The NVMMU is pointed to an array of TCBs indexed by the command tag. + * Each command must be configured inside this structure before it's allowed + * to execute, including commands that don't require DMA transfers. + * + * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the + * admin queue): Those commands must still be added to the NVMMU but the DMA + * buffers cannot be represented as PRPs and must instead be allowed using SART. + * + * Programming the PRPs to the same values as those in the submission queue + * looks rather silly at first. This hardware is however designed for a kernel + * that runs the NVMMU code in a higher exception level than the NVMe driver. + * In that setting the NVMe driver first programs the submission queue entry + * and then executes a hypercall to the code that is allowed to program the + * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while + * verifying that they don't point to kernel text, data, pagetables, or similar + * protected areas before programming the TCB to point to this shadow copy. + * Since Linux doesn't do any of that we may as well just point both the queue + * and the TCB PRP pointer to the same memory. + */ +struct apple_nvmmu_tcb { + u8 opcode; + +#define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0) +#define APPLE_ANS_TCB_DMA_TO_DEVICE BIT(1) + u8 dma_flags; + + u8 command_id; + u8 _unk0; + __le16 length; + u8 _unk1[18]; + __le64 prp1; + __le64 prp2; + u8 _unk2[16]; + u8 aes_iv[8]; + u8 _aes_unk[64]; +}; + +/* + * The Apple NVMe controller only supports a single admin and a single IO queue + * which are both limited to 64 entries and share a single interrupt. + * + * The completion queue works as usual. The submission "queue" instead is + * an array indexed by the command tag on this hardware. Commands must also be + * present in the NVMMU's tcb array. They are triggered by writing their tag to + * a MMIO register. + */ +struct apple_nvme_queue { + struct nvme_command *sqes; + struct nvme_completion *cqes; + struct apple_nvmmu_tcb *tcbs; + + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + dma_addr_t tcb_dma_addr; + + u32 __iomem *sq_db; + u32 __iomem *cq_db; + + u16 cq_head; + u8 cq_phase; + + bool is_adminq; + bool enabled; +}; + +/* + * The apple_nvme_iod describes the data in an I/O. + * + * The sg pointer contains the list of PRP chunk allocations in addition + * to the actual struct scatterlist. + */ +struct apple_nvme_iod { + struct nvme_request req; + struct nvme_command cmd; + struct apple_nvme_queue *q; + int npages; /* In the PRP list. 0 means small pool in use */ + int nents; /* Used in scatterlist */ + dma_addr_t first_dma; + unsigned int dma_len; /* length of single DMA segment mapping */ + struct scatterlist *sg; +}; + +struct apple_nvme { + struct device *dev; + + void __iomem *mmio_coproc; + void __iomem *mmio_nvme; + + struct device **pd_dev; + struct device_link **pd_link; + int pd_count; + + struct apple_sart *sart; + struct apple_rtkit *rtk; + struct reset_control *reset; + + struct dma_pool *prp_page_pool; + struct dma_pool *prp_small_pool; + mempool_t *iod_mempool; + + struct nvme_ctrl ctrl; + struct work_struct remove_work; + + struct apple_nvme_queue adminq; + struct apple_nvme_queue ioq; + + struct blk_mq_tag_set admin_tagset; + struct blk_mq_tag_set tagset; + + int irq; + spinlock_t lock; +}; + +static_assert(sizeof(struct nvme_command) == 64); +static_assert(sizeof(struct apple_nvmmu_tcb) == 128); + +static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl) +{ + return container_of(ctrl, struct apple_nvme, ctrl); +} + +static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q) +{ + if (q->is_adminq) + return container_of(q, struct apple_nvme, adminq); + else + return container_of(q, struct apple_nvme, ioq); +} + +static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q) +{ + if (q->is_adminq) + return APPLE_NVME_AQ_DEPTH; + else + return APPLE_ANS_MAX_QUEUE_DEPTH; +} + +static void apple_nvme_rtkit_crashed(void *cookie) +{ + struct apple_nvme *anv = cookie; + + dev_warn(anv->dev, "RTKit crashed; unable to recover without a reboot"); + nvme_reset_ctrl(&anv->ctrl); +} + +static int apple_nvme_sart_dma_setup(void *cookie, + struct apple_rtkit_shmem *bfr) +{ + struct apple_nvme *anv = cookie; + int ret; + + if (bfr->iova) + return -EINVAL; + if (!bfr->size) + return -EINVAL; + + bfr->buffer = + dma_alloc_coherent(anv->dev, bfr->size, &bfr->iova, GFP_KERNEL); + if (!bfr->buffer) + return -ENOMEM; + + ret = apple_sart_add_allowed_region(anv->sart, bfr->iova, bfr->size); + if (ret) { + dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova); + bfr->buffer = NULL; + return -ENOMEM; + } + + return 0; +} + +static void apple_nvme_sart_dma_destroy(void *cookie, + struct apple_rtkit_shmem *bfr) +{ + struct apple_nvme *anv = cookie; + + apple_sart_remove_allowed_region(anv->sart, bfr->iova, bfr->size); + dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova); +} + +static const struct apple_rtkit_ops apple_nvme_rtkit_ops = { + .crashed = apple_nvme_rtkit_crashed, + .shmem_setup = apple_nvme_sart_dma_setup, + .shmem_destroy = apple_nvme_sart_dma_destroy, +}; + +static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag) +{ + struct apple_nvme *anv = queue_to_apple_nvme(q); + + writel(tag, anv->mmio_nvme + APPLE_NVMMU_TCB_INVAL); + if (readl(anv->mmio_nvme + APPLE_NVMMU_TCB_STAT)) + dev_warn_ratelimited(anv->dev, + "NVMMU TCB invalidation failed\n"); +} + +static void apple_nvme_submit_cmd(struct apple_nvme_queue *q, + struct nvme_command *cmd) +{ + struct apple_nvme *anv = queue_to_apple_nvme(q); + u32 tag = nvme_tag_from_cid(cmd->common.command_id); + struct apple_nvmmu_tcb *tcb = &q->tcbs[tag]; + + tcb->opcode = cmd->common.opcode; + tcb->prp1 = cmd->common.dptr.prp1; + tcb->prp2 = cmd->common.dptr.prp2; + tcb->length = cmd->rw.length; + tcb->command_id = tag; + + if (nvme_is_write(cmd)) + tcb->dma_flags = APPLE_ANS_TCB_DMA_TO_DEVICE; + else + tcb->dma_flags = APPLE_ANS_TCB_DMA_FROM_DEVICE; + + memcpy(&q->sqes[tag], cmd, sizeof(*cmd)); + + /* + * This lock here doesn't make much sense at a first glace but + * removing it will result in occasional missed completetion + * interrupts even though the commands still appear on the CQ. + * It's unclear why this happens but our best guess is that + * there is a bug in the firmware triggered when a new command + * is issued while we're inside the irq handler between the + * NVMMU invalidation (and making the tag available again) + * and the final CQ update. + */ + spin_lock_irq(&anv->lock); + writel(tag, q->sq_db); + spin_unlock_irq(&anv->lock); +} + +/* + * From pci.c: + * Will slightly overestimate the number of pages needed. This is OK + * as it only leads to a small amount of wasted memory for the lifetime of + * the I/O. + */ +static inline size_t apple_nvme_iod_alloc_size(void) +{ + const unsigned int nprps = DIV_ROUND_UP( + NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE); + const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); + const size_t alloc_size = sizeof(__le64 *) * npages + + sizeof(struct scatterlist) * NVME_MAX_SEGS; + + return alloc_size; +} + +static void **apple_nvme_iod_list(struct request *req) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + + return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); +} + +static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req) +{ + const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + dma_addr_t dma_addr = iod->first_dma; + int i; + + for (i = 0; i < iod->npages; i++) { + __le64 *prp_list = apple_nvme_iod_list(req)[i]; + dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); + + dma_pool_free(anv->prp_page_pool, prp_list, dma_addr); + dma_addr = next_dma_addr; + } +} + +static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + + if (iod->dma_len) { + dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len, + rq_dma_dir(req)); + return; + } + + WARN_ON_ONCE(!iod->nents); + + dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req)); + if (iod->npages == 0) + dma_pool_free(anv->prp_small_pool, apple_nvme_iod_list(req)[0], + iod->first_dma); + else + apple_nvme_free_prps(anv, req); + mempool_free(iod->sg, anv->iod_mempool); +} + +static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + dma_addr_t phys = sg_phys(sg); + + pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n", + i, &phys, sg->offset, sg->length, &sg_dma_address(sg), + sg_dma_len(sg)); + } +} + +static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv, + struct request *req, + struct nvme_rw_command *cmnd) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct dma_pool *pool; + int length = blk_rq_payload_bytes(req); + struct scatterlist *sg = iod->sg; + int dma_len = sg_dma_len(sg); + u64 dma_addr = sg_dma_address(sg); + int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); + __le64 *prp_list; + void **list = apple_nvme_iod_list(req); + dma_addr_t prp_dma; + int nprps, i; + + length -= (NVME_CTRL_PAGE_SIZE - offset); + if (length <= 0) { + iod->first_dma = 0; + goto done; + } + + dma_len -= (NVME_CTRL_PAGE_SIZE - offset); + if (dma_len) { + dma_addr += (NVME_CTRL_PAGE_SIZE - offset); + } else { + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } + + if (length <= NVME_CTRL_PAGE_SIZE) { + iod->first_dma = dma_addr; + goto done; + } + + nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); + if (nprps <= (256 / 8)) { + pool = anv->prp_small_pool; + iod->npages = 0; + } else { + pool = anv->prp_page_pool; + iod->npages = 1; + } + + prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + if (!prp_list) { + iod->first_dma = dma_addr; + iod->npages = -1; + return BLK_STS_RESOURCE; + } + list[0] = prp_list; + iod->first_dma = prp_dma; + i = 0; + for (;;) { + if (i == NVME_CTRL_PAGE_SIZE >> 3) { + __le64 *old_prp_list = prp_list; + + prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + if (!prp_list) + goto free_prps; + list[iod->npages++] = prp_list; + prp_list[0] = old_prp_list[i - 1]; + old_prp_list[i - 1] = cpu_to_le64(prp_dma); + i = 1; + } + prp_list[i++] = cpu_to_le64(dma_addr); + dma_len -= NVME_CTRL_PAGE_SIZE; + dma_addr += NVME_CTRL_PAGE_SIZE; + length -= NVME_CTRL_PAGE_SIZE; + if (length <= 0) + break; + if (dma_len > 0) + continue; + if (unlikely(dma_len < 0)) + goto bad_sgl; + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } +done: + cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); + cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); + return BLK_STS_OK; +free_prps: + apple_nvme_free_prps(anv, req); + return BLK_STS_RESOURCE; +bad_sgl: + WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents), + "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req), + iod->nents); + return BLK_STS_IOERR; +} + +static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv, + struct request *req, + struct nvme_rw_command *cmnd, + struct bio_vec *bv) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); + unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; + + iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0); + if (dma_mapping_error(anv->dev, iod->first_dma)) + return BLK_STS_RESOURCE; + iod->dma_len = bv->bv_len; + + cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); + if (bv->bv_len > first_prp_len) + cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); + return BLK_STS_OK; +} + +static blk_status_t apple_nvme_map_data(struct apple_nvme *anv, + struct request *req, + struct nvme_command *cmnd) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + blk_status_t ret = BLK_STS_RESOURCE; + int nr_mapped; + + if (blk_rq_nr_phys_segments(req) == 1) { + struct bio_vec bv = req_bvec(req); + + if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) + return apple_nvme_setup_prp_simple(anv, req, &cmnd->rw, + &bv); + } + + iod->dma_len = 0; + iod->sg = mempool_alloc(anv->iod_mempool, GFP_ATOMIC); + if (!iod->sg) + return BLK_STS_RESOURCE; + sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); + iod->nents = blk_rq_map_sg(req->q, req, iod->sg); + if (!iod->nents) + goto out_free_sg; + + nr_mapped = dma_map_sg_attrs(anv->dev, iod->sg, iod->nents, + rq_dma_dir(req), DMA_ATTR_NO_WARN); + if (!nr_mapped) + goto out_free_sg; + + ret = apple_nvme_setup_prps(anv, req, &cmnd->rw); + if (ret != BLK_STS_OK) + goto out_unmap_sg; + return BLK_STS_OK; + +out_unmap_sg: + dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req)); +out_free_sg: + mempool_free(iod->sg, anv->iod_mempool); + return ret; +} + +static __always_inline void apple_nvme_unmap_rq(struct request *req) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct apple_nvme *anv = queue_to_apple_nvme(iod->q); + + if (blk_rq_nr_phys_segments(req)) + apple_nvme_unmap_data(anv, req); +} + +static void apple_nvme_complete_rq(struct request *req) +{ + apple_nvme_unmap_rq(req); + nvme_complete_rq(req); +} + +static void apple_nvme_complete_batch(struct io_comp_batch *iob) +{ + nvme_complete_batch(iob, apple_nvme_unmap_rq); +} + +static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q) +{ + struct nvme_completion *hcqe = &q->cqes[q->cq_head]; + + return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase; +} + +static inline struct blk_mq_tags * +apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q) +{ + if (q->is_adminq) + return anv->admin_tagset.tags[0]; + else + return anv->tagset.tags[0]; +} + +static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q, + struct io_comp_batch *iob, u16 idx) +{ + struct apple_nvme *anv = queue_to_apple_nvme(q); + struct nvme_completion *cqe = &q->cqes[idx]; + __u16 command_id = READ_ONCE(cqe->command_id); + struct request *req; + + apple_nvmmu_inval(q, command_id); + + req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id); + if (unlikely(!req)) { + dev_warn(anv->dev, "invalid id %d completed", command_id); + return; + } + + if (!nvme_try_complete_req(req, cqe->status, cqe->result) && + !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, + apple_nvme_complete_batch)) + apple_nvme_complete_rq(req); +} + +static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q) +{ + u32 tmp = q->cq_head + 1; + + if (tmp == apple_nvme_queue_depth(q)) { + q->cq_head = 0; + q->cq_phase ^= 1; + } else { + q->cq_head = tmp; + } +} + +static bool apple_nvme_poll_cq(struct apple_nvme_queue *q, + struct io_comp_batch *iob) +{ + bool found = false; + + while (apple_nvme_cqe_pending(q)) { + found = true; + + /* + * load-load control dependency between phase and the rest of + * the cqe requires a full read memory barrier + */ + dma_rmb(); + apple_nvme_handle_cqe(q, iob, q->cq_head); + apple_nvme_update_cq_head(q); + } + + if (found) + writel(q->cq_head, q->cq_db); + + return found; +} + +static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force) +{ + bool found; + DEFINE_IO_COMP_BATCH(iob); + + if (!READ_ONCE(q->enabled) && !force) + return false; + + found = apple_nvme_poll_cq(q, &iob); + + if (!rq_list_empty(iob.req_list)) + apple_nvme_complete_batch(&iob); + + return found; +} + +static irqreturn_t apple_nvme_irq(int irq, void *data) +{ + struct apple_nvme *anv = data; + bool handled = false; + unsigned long flags; + + spin_lock_irqsave(&anv->lock, flags); + if (apple_nvme_handle_cq(&anv->ioq, false)) + handled = true; + if (apple_nvme_handle_cq(&anv->adminq, false)) + handled = true; + spin_unlock_irqrestore(&anv->lock, flags); + + if (handled) + return IRQ_HANDLED; + return IRQ_NONE; +} + +static int apple_nvme_create_cq(struct apple_nvme *anv) +{ + struct nvme_command c = {}; + + /* + * Note: we (ab)use the fact that the prp fields survive if no data + * is attached to the request. + */ + c.create_cq.opcode = nvme_admin_create_cq; + c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr); + c.create_cq.cqid = cpu_to_le16(1); + c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1); + c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED); + c.create_cq.irq_vector = cpu_to_le16(0); + + return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); +} + +static int apple_nvme_remove_cq(struct apple_nvme *anv) +{ + struct nvme_command c = {}; + + c.delete_queue.opcode = nvme_admin_delete_cq; + c.delete_queue.qid = cpu_to_le16(1); + + return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); +} + +static int apple_nvme_create_sq(struct apple_nvme *anv) +{ + struct nvme_command c = {}; + + /* + * Note: we (ab)use the fact that the prp fields survive if no data + * is attached to the request. + */ + c.create_sq.opcode = nvme_admin_create_sq; + c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr); + c.create_sq.sqid = cpu_to_le16(1); + c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1); + c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG); + c.create_sq.cqid = cpu_to_le16(1); + + return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); +} + +static int apple_nvme_remove_sq(struct apple_nvme *anv) +{ + struct nvme_command c = {}; + + c.delete_queue.opcode = nvme_admin_delete_sq; + c.delete_queue.qid = cpu_to_le16(1); + + return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); +} + +static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct nvme_ns *ns = hctx->queue->queuedata; + struct apple_nvme_queue *q = hctx->driver_data; + struct apple_nvme *anv = queue_to_apple_nvme(q); + struct request *req = bd->rq; + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_command *cmnd = &iod->cmd; + blk_status_t ret; + + iod->npages = -1; + iod->nents = 0; + + /* + * We should not need to do this, but we're still using this to + * ensure we can drain requests on a dying queue. + */ + if (unlikely(!READ_ONCE(q->enabled))) + return BLK_STS_IOERR; + + if (!nvme_check_ready(&anv->ctrl, req, true)) + return nvme_fail_nonready_command(&anv->ctrl, req); + + ret = nvme_setup_cmd(ns, req); + if (ret) + return ret; + + if (blk_rq_nr_phys_segments(req)) { + ret = apple_nvme_map_data(anv, req, cmnd); + if (ret) + goto out_free_cmd; + } + + blk_mq_start_request(req); + apple_nvme_submit_cmd(q, cmnd); + return BLK_STS_OK; + +out_free_cmd: + nvme_cleanup_cmd(req); + return ret; +} + +static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + hctx->driver_data = data; + return 0; +} + +static int apple_nvme_init_request(struct blk_mq_tag_set *set, + struct request *req, unsigned int hctx_idx, + unsigned int numa_node) +{ + struct apple_nvme_queue *q = set->driver_data; + struct apple_nvme *anv = queue_to_apple_nvme(q); + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_request *nreq = nvme_req(req); + + iod->q = q; + nreq->ctrl = &anv->ctrl; + nreq->cmd = &iod->cmd; + + return 0; +} + +static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) +{ + u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS); + bool dead = false, freeze = false; + unsigned long flags; + + if (apple_rtkit_is_crashed(anv->rtk)) + dead = true; + if (!(csts & NVME_CSTS_RDY)) + dead = true; + if (csts & NVME_CSTS_CFS) + dead = true; + + if (anv->ctrl.state == NVME_CTRL_LIVE || + anv->ctrl.state == NVME_CTRL_RESETTING) { + freeze = true; + nvme_start_freeze(&anv->ctrl); + } + + /* + * Give the controller a chance to complete all entered requests if + * doing a safe shutdown. + */ + if (!dead && shutdown && freeze) + nvme_wait_freeze_timeout(&anv->ctrl, NVME_IO_TIMEOUT); + + nvme_stop_queues(&anv->ctrl); + + if (!dead) { + if (READ_ONCE(anv->ioq.enabled)) { + apple_nvme_remove_sq(anv); + apple_nvme_remove_cq(anv); + } + + if (shutdown) + nvme_shutdown_ctrl(&anv->ctrl); + nvme_disable_ctrl(&anv->ctrl); + } + + WRITE_ONCE(anv->ioq.enabled, false); + WRITE_ONCE(anv->adminq.enabled, false); + mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */ + nvme_stop_admin_queue(&anv->ctrl); + + /* last chance to complete any requests before nvme_cancel_request */ + spin_lock_irqsave(&anv->lock, flags); + apple_nvme_handle_cq(&anv->ioq, true); + apple_nvme_handle_cq(&anv->adminq, true); + spin_unlock_irqrestore(&anv->lock, flags); + + nvme_cancel_tagset(&anv->ctrl); + nvme_cancel_admin_tagset(&anv->ctrl); + + /* + * The driver will not be starting up queues again if shutting down so + * must flush all entered requests to their failed completion to avoid + * deadlocking blk-mq hot-cpu notifier. + */ + if (shutdown) { + nvme_start_queues(&anv->ctrl); + nvme_start_admin_queue(&anv->ctrl); + } +} + +static enum blk_eh_timer_return apple_nvme_timeout(struct request *req) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct apple_nvme_queue *q = iod->q; + struct apple_nvme *anv = queue_to_apple_nvme(q); + unsigned long flags; + u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS); + + if (anv->ctrl.state != NVME_CTRL_LIVE) { + /* + * From rdma.c: + * If we are resetting, connecting or deleting we should + * complete immediately because we may block controller + * teardown or setup sequence + * - ctrl disable/shutdown fabrics requests + * - connect requests + * - initialization admin requests + * - I/O requests that entered after unquiescing and + * the controller stopped responding + * + * All other requests should be cancelled by the error + * recovery work, so it's fine that we fail it here. + */ + dev_warn(anv->dev, + "I/O %d(aq:%d) timeout while not in live state\n", + req->tag, q->is_adminq); + if (blk_mq_request_started(req) && + !blk_mq_request_completed(req)) { + nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; + nvme_req(req)->flags |= NVME_REQ_CANCELLED; + blk_mq_complete_request(req); + } + return BLK_EH_DONE; + } + + /* check if we just missed an interrupt if we're still alive */ + if (!apple_rtkit_is_crashed(anv->rtk) && !(csts & NVME_CSTS_CFS)) { + spin_lock_irqsave(&anv->lock, flags); + apple_nvme_handle_cq(q, false); + spin_unlock_irqrestore(&anv->lock, flags); + if (blk_mq_request_completed(req)) { + dev_warn(anv->dev, + "I/O %d(aq:%d) timeout: completion polled\n", + req->tag, q->is_adminq); + return BLK_EH_DONE; + } + } + + /* + * aborting commands isn't supported which leaves a full reset as our + * only option here + */ + dev_warn(anv->dev, "I/O %d(aq:%d) timeout: resetting controller\n", + req->tag, q->is_adminq); + nvme_req(req)->flags |= NVME_REQ_CANCELLED; + apple_nvme_disable(anv, false); + nvme_reset_ctrl(&anv->ctrl); + return BLK_EH_DONE; +} + +static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx, + struct io_comp_batch *iob) +{ + struct apple_nvme_queue *q = hctx->driver_data; + struct apple_nvme *anv = queue_to_apple_nvme(q); + bool found; + unsigned long flags; + + spin_lock_irqsave(&anv->lock, flags); + found = apple_nvme_poll_cq(q, iob); + spin_unlock_irqrestore(&anv->lock, flags); + + return found; +} + +static const struct blk_mq_ops apple_nvme_mq_admin_ops = { + .queue_rq = apple_nvme_queue_rq, + .complete = apple_nvme_complete_rq, + .init_hctx = apple_nvme_init_hctx, + .init_request = apple_nvme_init_request, + .timeout = apple_nvme_timeout, +}; + +static const struct blk_mq_ops apple_nvme_mq_ops = { + .queue_rq = apple_nvme_queue_rq, + .complete = apple_nvme_complete_rq, + .init_hctx = apple_nvme_init_hctx, + .init_request = apple_nvme_init_request, + .timeout = apple_nvme_timeout, + .poll = apple_nvme_poll, +}; + +static void apple_nvme_init_queue(struct apple_nvme_queue *q) +{ + unsigned int depth = apple_nvme_queue_depth(q); + + q->cq_head = 0; + q->cq_phase = 1; + memset(q->tcbs, 0, + APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb)); + memset(q->cqes, 0, depth * sizeof(struct nvme_completion)); + WRITE_ONCE(q->enabled, true); + wmb(); /* ensure the first interrupt sees the initialization */ +} + +static void apple_nvme_reset_work(struct work_struct *work) +{ + unsigned int nr_io_queues = 1; + int ret; + u32 boot_status, aqa; + struct apple_nvme *anv = + container_of(work, struct apple_nvme, ctrl.reset_work); + + if (anv->ctrl.state != NVME_CTRL_RESETTING) { + dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", + anv->ctrl.state); + ret = -ENODEV; + goto out; + } + + /* there's unfortunately no known way to recover if RTKit crashed :( */ + if (apple_rtkit_is_crashed(anv->rtk)) { + dev_err(anv->dev, + "RTKit has crashed without any way to recover."); + ret = -EIO; + goto out; + } + + if (anv->ctrl.ctrl_config & NVME_CC_ENABLE) + apple_nvme_disable(anv, false); + + /* RTKit must be shut down cleanly for the (soft)-reset to work */ + if (apple_rtkit_is_running(anv->rtk)) { + dev_dbg(anv->dev, "Trying to shut down RTKit before reset."); + ret = apple_rtkit_shutdown(anv->rtk); + if (ret) + goto out; + } + + writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + + ret = reset_control_assert(anv->reset); + if (ret) + goto out; + + ret = apple_rtkit_reinit(anv->rtk); + if (ret) + goto out; + + ret = reset_control_deassert(anv->reset); + if (ret) + goto out; + + writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN, + anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + ret = apple_rtkit_boot(anv->rtk); + if (ret) { + dev_err(anv->dev, "ANS did not boot"); + goto out; + } + + ret = readl_poll_timeout(anv->mmio_nvme + APPLE_ANS_BOOT_STATUS, + boot_status, + boot_status == APPLE_ANS_BOOT_STATUS_OK, + USEC_PER_MSEC, APPLE_ANS_BOOT_TIMEOUT); + if (ret) { + dev_err(anv->dev, "ANS did not initialize"); + goto out; + } + + dev_dbg(anv->dev, "ANS booted successfully."); + + /* + * Limit the max command size to prevent iod->sg allocations going + * over a single page. + */ + anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1, + dma_max_mapping_size(anv->dev) >> 9); + anv->ctrl.max_segments = NVME_MAX_SEGS; + + /* + * Enable NVMMU and linear submission queues. + * While we could keep those disabled and pretend this is slightly + * more common NVMe controller we'd still need some quirks (e.g. + * sq entries will be 128 bytes) and Apple might drop support for + * that mode in the future. + */ + writel(APPLE_ANS_LINEAR_SQ_EN, + anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL); + + /* Allow as many pending command as possible for both queues */ + writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16), + anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL); + + /* Setup the NVMMU for the maximum admin and IO queue depth */ + writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1, + anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS); + + /* + * This is probably a chicken bit: without it all commands where any PRP + * is set to zero (including those that don't use that field) fail and + * the co-processor complains about "completed with err BAD_CMD-" or + * a "NULL_PRP_PTR_ERR" in the syslog + */ + writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) & + ~APPLE_ANS_PRP_NULL_CHECK, + anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL); + + /* Setup the admin queue */ + aqa = APPLE_NVME_AQ_DEPTH - 1; + aqa |= aqa << 16; + writel(aqa, anv->mmio_nvme + NVME_REG_AQA); + writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ); + writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ); + + /* Setup NVMMU for both queues */ + writeq(anv->adminq.tcb_dma_addr, + anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE); + writeq(anv->ioq.tcb_dma_addr, + anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE); + + anv->ctrl.sqsize = + APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */ + anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP); + + dev_dbg(anv->dev, "Enabling controller now"); + ret = nvme_enable_ctrl(&anv->ctrl); + if (ret) + goto out; + + dev_dbg(anv->dev, "Starting admin queue"); + apple_nvme_init_queue(&anv->adminq); + nvme_start_admin_queue(&anv->ctrl); + + if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) { + dev_warn(anv->ctrl.device, + "failed to mark controller CONNECTING\n"); + ret = -ENODEV; + goto out; + } + + ret = nvme_init_ctrl_finish(&anv->ctrl); + if (ret) + goto out; + + dev_dbg(anv->dev, "Creating IOCQ"); + ret = apple_nvme_create_cq(anv); + if (ret) + goto out; + dev_dbg(anv->dev, "Creating IOSQ"); + ret = apple_nvme_create_sq(anv); + if (ret) + goto out_remove_cq; + + apple_nvme_init_queue(&anv->ioq); + nr_io_queues = 1; + ret = nvme_set_queue_count(&anv->ctrl, &nr_io_queues); + if (ret) + goto out_remove_sq; + if (nr_io_queues != 1) { + ret = -ENXIO; + goto out_remove_sq; + } + + anv->ctrl.queue_count = nr_io_queues + 1; + + nvme_start_queues(&anv->ctrl); + nvme_wait_freeze(&anv->ctrl); + blk_mq_update_nr_hw_queues(&anv->tagset, 1); + nvme_unfreeze(&anv->ctrl); + + if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_LIVE)) { + dev_warn(anv->ctrl.device, + "failed to mark controller live state\n"); + ret = -ENODEV; + goto out_remove_sq; + } + + nvme_start_ctrl(&anv->ctrl); + + dev_dbg(anv->dev, "ANS boot and NVMe init completed."); + return; + +out_remove_sq: + apple_nvme_remove_sq(anv); +out_remove_cq: + apple_nvme_remove_cq(anv); +out: + dev_warn(anv->ctrl.device, "Reset failure status: %d\n", ret); + nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING); + nvme_get_ctrl(&anv->ctrl); + apple_nvme_disable(anv, false); + nvme_kill_queues(&anv->ctrl); + if (!queue_work(nvme_wq, &anv->remove_work)) + nvme_put_ctrl(&anv->ctrl); +} + +static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work) +{ + struct apple_nvme *anv = + container_of(work, struct apple_nvme, remove_work); + + nvme_put_ctrl(&anv->ctrl); + device_release_driver(anv->dev); +} + +static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) +{ + *val = readl(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off); + return 0; +} + +static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) +{ + writel(val, ctrl_to_apple_nvme(ctrl)->mmio_nvme + off); + return 0; +} + +static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) +{ + *val = readq(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off); + return 0; +} + +static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size) +{ + struct device *dev = ctrl_to_apple_nvme(ctrl)->dev; + + return snprintf(buf, size, "%s\n", dev_name(dev)); +} + +static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl) +{ + struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl); + + if (anv->ctrl.admin_q) + blk_put_queue(anv->ctrl.admin_q); + put_device(anv->dev); +} + +static const struct nvme_ctrl_ops nvme_ctrl_ops = { + .name = "apple-nvme", + .module = THIS_MODULE, + .flags = 0, + .reg_read32 = apple_nvme_reg_read32, + .reg_write32 = apple_nvme_reg_write32, + .reg_read64 = apple_nvme_reg_read64, + .free_ctrl = apple_nvme_free_ctrl, + .get_address = apple_nvme_get_address, +}; + +static void apple_nvme_async_probe(void *data, async_cookie_t cookie) +{ + struct apple_nvme *anv = data; + + flush_work(&anv->ctrl.reset_work); + flush_work(&anv->ctrl.scan_work); + nvme_put_ctrl(&anv->ctrl); +} + +static void devm_apple_nvme_put_tag_set(void *data) +{ + blk_mq_free_tag_set(data); +} + +static int apple_nvme_alloc_tagsets(struct apple_nvme *anv) +{ + int ret; + + anv->admin_tagset.ops = &apple_nvme_mq_admin_ops; + anv->admin_tagset.nr_hw_queues = 1; + anv->admin_tagset.queue_depth = APPLE_NVME_AQ_MQ_TAG_DEPTH; + anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT; + anv->admin_tagset.numa_node = NUMA_NO_NODE; + anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod); + anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED; + anv->admin_tagset.driver_data = &anv->adminq; + + ret = blk_mq_alloc_tag_set(&anv->admin_tagset); + if (ret) + return ret; + ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set, + &anv->admin_tagset); + if (ret) + return ret; + + anv->tagset.ops = &apple_nvme_mq_ops; + anv->tagset.nr_hw_queues = 1; + anv->tagset.nr_maps = 1; + /* + * Tags are used as an index to the NVMMU and must be unique across + * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which + * must be marked as reserved in the IO queue. + */ + anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH; + anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1; + anv->tagset.timeout = NVME_IO_TIMEOUT; + anv->tagset.numa_node = NUMA_NO_NODE; + anv->tagset.cmd_size = sizeof(struct apple_nvme_iod); + anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE; + anv->tagset.driver_data = &anv->ioq; + + ret = blk_mq_alloc_tag_set(&anv->tagset); + if (ret) + return ret; + ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set, + &anv->tagset); + if (ret) + return ret; + + anv->ctrl.admin_tagset = &anv->admin_tagset; + anv->ctrl.tagset = &anv->tagset; + + return 0; +} + +static int apple_nvme_queue_alloc(struct apple_nvme *anv, + struct apple_nvme_queue *q) +{ + unsigned int depth = apple_nvme_queue_depth(q); + + q->cqes = dmam_alloc_coherent(anv->dev, + depth * sizeof(struct nvme_completion), + &q->cq_dma_addr, GFP_KERNEL); + if (!q->cqes) + return -ENOMEM; + + q->sqes = dmam_alloc_coherent(anv->dev, + depth * sizeof(struct nvme_command), + &q->sq_dma_addr, GFP_KERNEL); + if (!q->sqes) + return -ENOMEM; + + /* + * We need the maximum queue depth here because the NVMMU only has a + * single depth configuration shared between both queues. + */ + q->tcbs = dmam_alloc_coherent(anv->dev, + APPLE_ANS_MAX_QUEUE_DEPTH * + sizeof(struct apple_nvmmu_tcb), + &q->tcb_dma_addr, GFP_KERNEL); + if (!q->tcbs) + return -ENOMEM; + + /* + * initialize phase to make sure the allocated and empty memory + * doesn't look like a full cq already. + */ + q->cq_phase = 1; + return 0; +} + +static void apple_nvme_detach_genpd(struct apple_nvme *anv) +{ + int i; + + if (anv->pd_count <= 1) + return; + + for (i = anv->pd_count - 1; i >= 0; i--) { + if (anv->pd_link[i]) + device_link_del(anv->pd_link[i]); + if (!IS_ERR_OR_NULL(anv->pd_dev[i])) + dev_pm_domain_detach(anv->pd_dev[i], true); + } +} + +static int apple_nvme_attach_genpd(struct apple_nvme *anv) +{ + struct device *dev = anv->dev; + int i; + + anv->pd_count = of_count_phandle_with_args( + dev->of_node, "power-domains", "#power-domain-cells"); + if (anv->pd_count <= 1) + return 0; + + anv->pd_dev = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_dev), + GFP_KERNEL); + if (!anv->pd_dev) + return -ENOMEM; + + anv->pd_link = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_link), + GFP_KERNEL); + if (!anv->pd_link) + return -ENOMEM; + + for (i = 0; i < anv->pd_count; i++) { + anv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i); + if (IS_ERR(anv->pd_dev[i])) { + apple_nvme_detach_genpd(anv); + return PTR_ERR(anv->pd_dev[i]); + } + + anv->pd_link[i] = device_link_add(dev, anv->pd_dev[i], + DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | + DL_FLAG_RPM_ACTIVE); + if (!anv->pd_link[i]) { + apple_nvme_detach_genpd(anv); + return -EINVAL; + } + } + + return 0; +} + +static void devm_apple_nvme_mempool_destroy(void *data) +{ + mempool_destroy(data); +} + +static int apple_nvme_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct apple_nvme *anv; + int ret; + + anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL); + if (!anv) + return -ENOMEM; + + anv->dev = get_device(dev); + anv->adminq.is_adminq = true; + platform_set_drvdata(pdev, anv); + + ret = apple_nvme_attach_genpd(anv); + if (ret < 0) { + dev_err_probe(dev, ret, "Failed to attach power domains"); + goto put_dev; + } + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) { + ret = -ENXIO; + goto put_dev; + } + + anv->irq = platform_get_irq(pdev, 0); + if (anv->irq < 0) { + ret = anv->irq; + goto put_dev; + } + if (!anv->irq) { + ret = -ENXIO; + goto put_dev; + } + + anv->mmio_coproc = devm_platform_ioremap_resource_byname(pdev, "ans"); + if (IS_ERR(anv->mmio_coproc)) { + ret = PTR_ERR(anv->mmio_coproc); + goto put_dev; + } + anv->mmio_nvme = devm_platform_ioremap_resource_byname(pdev, "nvme"); + if (IS_ERR(anv->mmio_nvme)) { + ret = PTR_ERR(anv->mmio_nvme); + goto put_dev; + } + + anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB; + anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB; + anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB; + anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB; + + anv->sart = devm_apple_sart_get(dev); + if (IS_ERR(anv->sart)) { + ret = dev_err_probe(dev, PTR_ERR(anv->sart), + "Failed to initialize SART"); + goto put_dev; + } + + anv->reset = devm_reset_control_array_get_exclusive(anv->dev); + if (IS_ERR(anv->reset)) { + ret = dev_err_probe(dev, PTR_ERR(anv->reset), + "Failed to get reset control"); + goto put_dev; + } + + INIT_WORK(&anv->ctrl.reset_work, apple_nvme_reset_work); + INIT_WORK(&anv->remove_work, apple_nvme_remove_dead_ctrl_work); + spin_lock_init(&anv->lock); + + ret = apple_nvme_queue_alloc(anv, &anv->adminq); + if (ret) + goto put_dev; + ret = apple_nvme_queue_alloc(anv, &anv->ioq); + if (ret) + goto put_dev; + + anv->prp_page_pool = dmam_pool_create("prp list page", anv->dev, + NVME_CTRL_PAGE_SIZE, + NVME_CTRL_PAGE_SIZE, 0); + if (!anv->prp_page_pool) { + ret = -ENOMEM; + goto put_dev; + } + + anv->prp_small_pool = + dmam_pool_create("prp list 256", anv->dev, 256, 256, 0); + if (!anv->prp_small_pool) { + ret = -ENOMEM; + goto put_dev; + } + + WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE); + anv->iod_mempool = + mempool_create_kmalloc_pool(1, apple_nvme_iod_alloc_size()); + if (!anv->iod_mempool) { + ret = -ENOMEM; + goto put_dev; + } + ret = devm_add_action_or_reset(anv->dev, + devm_apple_nvme_mempool_destroy, anv->iod_mempool); + if (ret) + goto put_dev; + + ret = apple_nvme_alloc_tagsets(anv); + if (ret) + goto put_dev; + + ret = devm_request_irq(anv->dev, anv->irq, apple_nvme_irq, 0, + "nvme-apple", anv); + if (ret) { + dev_err_probe(dev, ret, "Failed to request IRQ"); + goto put_dev; + } + + anv->rtk = + devm_apple_rtkit_init(dev, anv, NULL, 0, &apple_nvme_rtkit_ops); + if (IS_ERR(anv->rtk)) { + ret = dev_err_probe(dev, PTR_ERR(anv->rtk), + "Failed to initialize RTKit"); + goto put_dev; + } + + ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops, + NVME_QUIRK_SKIP_CID_GEN); + if (ret) { + dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl"); + goto put_dev; + } + + anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset); + if (IS_ERR(anv->ctrl.admin_q)) { + ret = -ENOMEM; + goto put_dev; + } + + if (!blk_get_queue(anv->ctrl.admin_q)) { + nvme_start_admin_queue(&anv->ctrl); + blk_mq_destroy_queue(anv->ctrl.admin_q); + anv->ctrl.admin_q = NULL; + ret = -ENODEV; + goto put_dev; + } + + nvme_reset_ctrl(&anv->ctrl); + async_schedule(apple_nvme_async_probe, anv); + + return 0; + +put_dev: + put_device(anv->dev); + return ret; +} + +static int apple_nvme_remove(struct platform_device *pdev) +{ + struct apple_nvme *anv = platform_get_drvdata(pdev); + + nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING); + flush_work(&anv->ctrl.reset_work); + nvme_stop_ctrl(&anv->ctrl); + nvme_remove_namespaces(&anv->ctrl); + apple_nvme_disable(anv, true); + nvme_uninit_ctrl(&anv->ctrl); + + if (apple_rtkit_is_running(anv->rtk)) + apple_rtkit_shutdown(anv->rtk); + + apple_nvme_detach_genpd(anv); + + return 0; +} + +static void apple_nvme_shutdown(struct platform_device *pdev) +{ + struct apple_nvme *anv = platform_get_drvdata(pdev); + + apple_nvme_disable(anv, true); + if (apple_rtkit_is_running(anv->rtk)) + apple_rtkit_shutdown(anv->rtk); +} + +static int apple_nvme_resume(struct device *dev) +{ + struct apple_nvme *anv = dev_get_drvdata(dev); + + return nvme_reset_ctrl(&anv->ctrl); +} + +static int apple_nvme_suspend(struct device *dev) +{ + struct apple_nvme *anv = dev_get_drvdata(dev); + int ret = 0; + + apple_nvme_disable(anv, true); + + if (apple_rtkit_is_running(anv->rtk)) + ret = apple_rtkit_shutdown(anv->rtk); + + writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + + return ret; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend, + apple_nvme_resume); + +static const struct of_device_id apple_nvme_of_match[] = { + { .compatible = "apple,nvme-ans2" }, + {}, +}; +MODULE_DEVICE_TABLE(of, apple_nvme_of_match); + +static struct platform_driver apple_nvme_driver = { + .driver = { + .name = "nvme-apple", + .of_match_table = apple_nvme_of_match, + .pm = pm_sleep_ptr(&apple_nvme_pm_ops), + }, + .probe = apple_nvme_probe, + .remove = apple_nvme_remove, + .shutdown = apple_nvme_shutdown, +}; +module_platform_driver(apple_nvme_driver); + +MODULE_AUTHOR("Sven Peter "); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c new file mode 100644 index 0000000000..c8a6db7c44 --- /dev/null +++ b/drivers/nvme/host/auth.c @@ -0,0 +1,1017 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020 Hannes Reinecke, SUSE Linux + */ + +#include +#include +#include +#include +#include +#include +#include "nvme.h" +#include "fabrics.h" +#include + +struct nvme_dhchap_queue_context { + struct list_head entry; + struct work_struct auth_work; + struct nvme_ctrl *ctrl; + struct crypto_shash *shash_tfm; + struct crypto_kpp *dh_tfm; + void *buf; + size_t buf_size; + int qid; + int error; + u32 s1; + u32 s2; + u16 transaction; + u8 status; + u8 hash_id; + size_t hash_len; + u8 dhgroup_id; + u8 c1[64]; + u8 c2[64]; + u8 response[64]; + u8 *host_response; + u8 *ctrl_key; + int ctrl_key_len; + u8 *host_key; + int host_key_len; + u8 *sess_key; + int sess_key_len; +}; + +#define nvme_auth_flags_from_qid(qid) \ + (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED +#define nvme_auth_queue_from_qid(ctrl, qid) \ + (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q + +static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, + void *data, size_t data_len, bool auth_send) +{ + struct nvme_command cmd = {}; + blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid); + struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid); + int ret; + + cmd.auth_common.opcode = nvme_fabrics_command; + cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER; + cmd.auth_common.spsp0 = 0x01; + cmd.auth_common.spsp1 = 0x01; + if (auth_send) { + cmd.auth_send.fctype = nvme_fabrics_type_auth_send; + cmd.auth_send.tl = cpu_to_le32(data_len); + } else { + cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive; + cmd.auth_receive.al = cpu_to_le32(data_len); + } + + ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len, + qid == 0 ? NVME_QID_ANY : qid, + 0, flags); + if (ret > 0) + dev_warn(ctrl->device, + "qid %d auth_send failed with status %d\n", qid, ret); + else if (ret < 0) + dev_err(ctrl->device, + "qid %d auth_send failed with error %d\n", qid, ret); + return ret; +} + +static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid, + struct nvmf_auth_dhchap_failure_data *data, + u16 transaction, u8 expected_msg) +{ + dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n", + __func__, qid, data->auth_type, data->auth_id); + + if (data->auth_type == NVME_AUTH_COMMON_MESSAGES && + data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { + return data->rescode_exp; + } + if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES || + data->auth_id != expected_msg) { + dev_warn(ctrl->device, + "qid %d invalid message %02x/%02x\n", + qid, data->auth_type, data->auth_id); + return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; + } + if (le16_to_cpu(data->t_id) != transaction) { + dev_warn(ctrl->device, + "qid %d invalid transaction ID %d\n", + qid, le16_to_cpu(data->t_id)); + return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; + } + return 0; +} + +static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + struct nvmf_auth_dhchap_negotiate_data *data = chap->buf; + size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol); + + if (chap->buf_size < size) { + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return -EINVAL; + } + memset((u8 *)chap->buf, 0, size); + data->auth_type = NVME_AUTH_COMMON_MESSAGES; + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; + data->t_id = cpu_to_le16(chap->transaction); + data->sc_c = 0; /* No secure channel concatenation */ + data->napd = 1; + data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID; + data->auth_protocol[0].dhchap.halen = 3; + data->auth_protocol[0].dhchap.dhlen = 6; + data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256; + data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384; + data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512; + data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL; + data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048; + data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072; + data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096; + data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144; + data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192; + + return size; +} + +static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + struct nvmf_auth_dhchap_challenge_data *data = chap->buf; + u16 dhvlen = le16_to_cpu(data->dhvlen); + size_t size = sizeof(*data) + data->hl + dhvlen; + const char *gid_name = nvme_auth_dhgroup_name(data->dhgid); + const char *hmac_name, *kpp_name; + + if (chap->buf_size < size) { + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return NVME_SC_INVALID_FIELD; + } + + hmac_name = nvme_auth_hmac_name(data->hashid); + if (!hmac_name) { + dev_warn(ctrl->device, + "qid %d: invalid HASH ID %d\n", + chap->qid, data->hashid); + chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; + return NVME_SC_INVALID_FIELD; + } + + if (chap->hash_id == data->hashid && chap->shash_tfm && + !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) && + crypto_shash_digestsize(chap->shash_tfm) == data->hl) { + dev_dbg(ctrl->device, + "qid %d: reuse existing hash %s\n", + chap->qid, hmac_name); + goto select_kpp; + } + + /* Reset if hash cannot be reused */ + if (chap->shash_tfm) { + crypto_free_shash(chap->shash_tfm); + chap->hash_id = 0; + chap->hash_len = 0; + } + chap->shash_tfm = crypto_alloc_shash(hmac_name, 0, + CRYPTO_ALG_ALLOCATES_MEMORY); + if (IS_ERR(chap->shash_tfm)) { + dev_warn(ctrl->device, + "qid %d: failed to allocate hash %s, error %ld\n", + chap->qid, hmac_name, PTR_ERR(chap->shash_tfm)); + chap->shash_tfm = NULL; + chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; + return NVME_SC_AUTH_REQUIRED; + } + + if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) { + dev_warn(ctrl->device, + "qid %d: invalid hash length %d\n", + chap->qid, data->hl); + crypto_free_shash(chap->shash_tfm); + chap->shash_tfm = NULL; + chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; + return NVME_SC_AUTH_REQUIRED; + } + + /* Reset host response if the hash had been changed */ + if (chap->hash_id != data->hashid) { + kfree(chap->host_response); + chap->host_response = NULL; + } + + chap->hash_id = data->hashid; + chap->hash_len = data->hl; + dev_dbg(ctrl->device, "qid %d: selected hash %s\n", + chap->qid, hmac_name); + +select_kpp: + kpp_name = nvme_auth_dhgroup_kpp(data->dhgid); + if (!kpp_name) { + dev_warn(ctrl->device, + "qid %d: invalid DH group id %d\n", + chap->qid, data->dhgid); + chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; + /* Leave previous dh_tfm intact */ + return NVME_SC_AUTH_REQUIRED; + } + + /* Clear host and controller key to avoid accidental reuse */ + kfree_sensitive(chap->host_key); + chap->host_key = NULL; + chap->host_key_len = 0; + kfree_sensitive(chap->ctrl_key); + chap->ctrl_key = NULL; + chap->ctrl_key_len = 0; + + if (chap->dhgroup_id == data->dhgid && + (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) { + dev_dbg(ctrl->device, + "qid %d: reuse existing DH group %s\n", + chap->qid, gid_name); + goto skip_kpp; + } + + /* Reset dh_tfm if it can't be reused */ + if (chap->dh_tfm) { + crypto_free_kpp(chap->dh_tfm); + chap->dh_tfm = NULL; + } + + if (data->dhgid != NVME_AUTH_DHGROUP_NULL) { + if (dhvlen == 0) { + dev_warn(ctrl->device, + "qid %d: empty DH value\n", + chap->qid); + chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; + return NVME_SC_INVALID_FIELD; + } + + chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0); + if (IS_ERR(chap->dh_tfm)) { + int ret = PTR_ERR(chap->dh_tfm); + + dev_warn(ctrl->device, + "qid %d: error %d initializing DH group %s\n", + chap->qid, ret, gid_name); + chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; + chap->dh_tfm = NULL; + return NVME_SC_AUTH_REQUIRED; + } + dev_dbg(ctrl->device, "qid %d: selected DH group %s\n", + chap->qid, gid_name); + } else if (dhvlen != 0) { + dev_warn(ctrl->device, + "qid %d: invalid DH value for NULL DH\n", + chap->qid); + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return NVME_SC_INVALID_FIELD; + } + chap->dhgroup_id = data->dhgid; + +skip_kpp: + chap->s1 = le32_to_cpu(data->seqnum); + memcpy(chap->c1, data->cval, chap->hash_len); + if (dhvlen) { + chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL); + if (!chap->ctrl_key) { + chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; + return NVME_SC_AUTH_REQUIRED; + } + chap->ctrl_key_len = dhvlen; + memcpy(chap->ctrl_key, data->cval + chap->hash_len, + dhvlen); + dev_dbg(ctrl->device, "ctrl public key %*ph\n", + (int)chap->ctrl_key_len, chap->ctrl_key); + } + + return 0; +} + +static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + struct nvmf_auth_dhchap_reply_data *data = chap->buf; + size_t size = sizeof(*data); + + size += 2 * chap->hash_len; + + if (chap->host_key_len) + size += chap->host_key_len; + + if (chap->buf_size < size) { + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return -EINVAL; + } + + memset(chap->buf, 0, size); + data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY; + data->t_id = cpu_to_le16(chap->transaction); + data->hl = chap->hash_len; + data->dhvlen = cpu_to_le16(chap->host_key_len); + memcpy(data->rval, chap->response, chap->hash_len); + if (ctrl->ctrl_key) { + get_random_bytes(chap->c2, chap->hash_len); + data->cvalid = 1; + chap->s2 = nvme_auth_get_seqnum(); + memcpy(data->rval + chap->hash_len, chap->c2, + chap->hash_len); + dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n", + __func__, chap->qid, (int)chap->hash_len, chap->c2); + } else { + memset(chap->c2, 0, chap->hash_len); + chap->s2 = 0; + } + data->seqnum = cpu_to_le32(chap->s2); + if (chap->host_key_len) { + dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n", + __func__, chap->qid, + chap->host_key_len, chap->host_key); + memcpy(data->rval + 2 * chap->hash_len, chap->host_key, + chap->host_key_len); + } + + return size; +} + +static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + struct nvmf_auth_dhchap_success1_data *data = chap->buf; + size_t size = sizeof(*data); + + if (ctrl->ctrl_key) + size += chap->hash_len; + + if (chap->buf_size < size) { + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return NVME_SC_INVALID_FIELD; + } + + if (data->hl != chap->hash_len) { + dev_warn(ctrl->device, + "qid %d: invalid hash length %u\n", + chap->qid, data->hl); + chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; + return NVME_SC_INVALID_FIELD; + } + + /* Just print out information for the admin queue */ + if (chap->qid == 0) + dev_info(ctrl->device, + "qid 0: authenticated with hash %s dhgroup %s\n", + nvme_auth_hmac_name(chap->hash_id), + nvme_auth_dhgroup_name(chap->dhgroup_id)); + + if (!data->rvalid) + return 0; + + /* Validate controller response */ + if (memcmp(chap->response, data->rval, data->hl)) { + dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n", + __func__, chap->qid, (int)chap->hash_len, data->rval); + dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n", + __func__, chap->qid, (int)chap->hash_len, + chap->response); + dev_warn(ctrl->device, + "qid %d: controller authentication failed\n", + chap->qid); + chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; + return NVME_SC_AUTH_REQUIRED; + } + + /* Just print out information for the admin queue */ + if (chap->qid == 0) + dev_info(ctrl->device, + "qid 0: controller authenticated\n"); + return 0; +} + +static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + struct nvmf_auth_dhchap_success2_data *data = chap->buf; + size_t size = sizeof(*data); + + memset(chap->buf, 0, size); + data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2; + data->t_id = cpu_to_le16(chap->transaction); + + return size; +} + +static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + struct nvmf_auth_dhchap_failure_data *data = chap->buf; + size_t size = sizeof(*data); + + memset(chap->buf, 0, size); + data->auth_type = NVME_AUTH_COMMON_MESSAGES; + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; + data->t_id = cpu_to_le16(chap->transaction); + data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED; + data->rescode_exp = chap->status; + + return size; +} + +static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + SHASH_DESC_ON_STACK(shash, chap->shash_tfm); + u8 buf[4], *challenge = chap->c1; + int ret; + + dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n", + __func__, chap->qid, chap->s1, chap->transaction); + + if (!chap->host_response) { + chap->host_response = nvme_auth_transform_key(ctrl->host_key, + ctrl->opts->host->nqn); + if (IS_ERR(chap->host_response)) { + ret = PTR_ERR(chap->host_response); + chap->host_response = NULL; + return ret; + } + } else { + dev_dbg(ctrl->device, "%s: qid %d re-using host response\n", + __func__, chap->qid); + } + + ret = crypto_shash_setkey(chap->shash_tfm, + chap->host_response, ctrl->host_key->len); + if (ret) { + dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n", + chap->qid, ret); + goto out; + } + + if (chap->dh_tfm) { + challenge = kmalloc(chap->hash_len, GFP_KERNEL); + if (!challenge) { + ret = -ENOMEM; + goto out; + } + ret = nvme_auth_augmented_challenge(chap->hash_id, + chap->sess_key, + chap->sess_key_len, + chap->c1, challenge, + chap->hash_len); + if (ret) + goto out; + } + + shash->tfm = chap->shash_tfm; + ret = crypto_shash_init(shash); + if (ret) + goto out; + ret = crypto_shash_update(shash, challenge, chap->hash_len); + if (ret) + goto out; + put_unaligned_le32(chap->s1, buf); + ret = crypto_shash_update(shash, buf, 4); + if (ret) + goto out; + put_unaligned_le16(chap->transaction, buf); + ret = crypto_shash_update(shash, buf, 2); + if (ret) + goto out; + memset(buf, 0, sizeof(buf)); + ret = crypto_shash_update(shash, buf, 1); + if (ret) + goto out; + ret = crypto_shash_update(shash, "HostHost", 8); + if (ret) + goto out; + ret = crypto_shash_update(shash, ctrl->opts->host->nqn, + strlen(ctrl->opts->host->nqn)); + if (ret) + goto out; + ret = crypto_shash_update(shash, buf, 1); + if (ret) + goto out; + ret = crypto_shash_update(shash, ctrl->opts->subsysnqn, + strlen(ctrl->opts->subsysnqn)); + if (ret) + goto out; + ret = crypto_shash_final(shash, chap->response); +out: + if (challenge != chap->c1) + kfree(challenge); + return ret; +} + +static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + SHASH_DESC_ON_STACK(shash, chap->shash_tfm); + u8 *ctrl_response; + u8 buf[4], *challenge = chap->c2; + int ret; + + ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key, + ctrl->opts->subsysnqn); + if (IS_ERR(ctrl_response)) { + ret = PTR_ERR(ctrl_response); + return ret; + } + ret = crypto_shash_setkey(chap->shash_tfm, + ctrl_response, ctrl->ctrl_key->len); + if (ret) { + dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n", + chap->qid, ret); + goto out; + } + + if (chap->dh_tfm) { + challenge = kmalloc(chap->hash_len, GFP_KERNEL); + if (!challenge) { + ret = -ENOMEM; + goto out; + } + ret = nvme_auth_augmented_challenge(chap->hash_id, + chap->sess_key, + chap->sess_key_len, + chap->c2, challenge, + chap->hash_len); + if (ret) + goto out; + } + dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n", + __func__, chap->qid, chap->s2, chap->transaction); + dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n", + __func__, chap->qid, (int)chap->hash_len, challenge); + dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n", + __func__, chap->qid, ctrl->opts->subsysnqn); + dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n", + __func__, chap->qid, ctrl->opts->host->nqn); + shash->tfm = chap->shash_tfm; + ret = crypto_shash_init(shash); + if (ret) + goto out; + ret = crypto_shash_update(shash, challenge, chap->hash_len); + if (ret) + goto out; + put_unaligned_le32(chap->s2, buf); + ret = crypto_shash_update(shash, buf, 4); + if (ret) + goto out; + put_unaligned_le16(chap->transaction, buf); + ret = crypto_shash_update(shash, buf, 2); + if (ret) + goto out; + memset(buf, 0, 4); + ret = crypto_shash_update(shash, buf, 1); + if (ret) + goto out; + ret = crypto_shash_update(shash, "Controller", 10); + if (ret) + goto out; + ret = crypto_shash_update(shash, ctrl->opts->subsysnqn, + strlen(ctrl->opts->subsysnqn)); + if (ret) + goto out; + ret = crypto_shash_update(shash, buf, 1); + if (ret) + goto out; + ret = crypto_shash_update(shash, ctrl->opts->host->nqn, + strlen(ctrl->opts->host->nqn)); + if (ret) + goto out; + ret = crypto_shash_final(shash, chap->response); +out: + if (challenge != chap->c2) + kfree(challenge); + kfree(ctrl_response); + return ret; +} + +static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + int ret; + + if (chap->host_key && chap->host_key_len) { + dev_dbg(ctrl->device, + "qid %d: reusing host key\n", chap->qid); + goto gen_sesskey; + } + ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id); + if (ret < 0) { + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return ret; + } + + chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm); + + chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL); + if (!chap->host_key) { + chap->host_key_len = 0; + chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; + return -ENOMEM; + } + ret = nvme_auth_gen_pubkey(chap->dh_tfm, + chap->host_key, chap->host_key_len); + if (ret) { + dev_dbg(ctrl->device, + "failed to generate public key, error %d\n", ret); + kfree(chap->host_key); + chap->host_key = NULL; + chap->host_key_len = 0; + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return ret; + } + +gen_sesskey: + chap->sess_key_len = chap->host_key_len; + chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL); + if (!chap->sess_key) { + chap->sess_key_len = 0; + chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; + return -ENOMEM; + } + + ret = nvme_auth_gen_shared_secret(chap->dh_tfm, + chap->ctrl_key, chap->ctrl_key_len, + chap->sess_key, chap->sess_key_len); + if (ret) { + dev_dbg(ctrl->device, + "failed to generate shared secret, error %d\n", ret); + kfree_sensitive(chap->sess_key); + chap->sess_key = NULL; + chap->sess_key_len = 0; + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return ret; + } + dev_dbg(ctrl->device, "shared secret %*ph\n", + (int)chap->sess_key_len, chap->sess_key); + return 0; +} + +static void __nvme_auth_reset(struct nvme_dhchap_queue_context *chap) +{ + kfree_sensitive(chap->host_response); + chap->host_response = NULL; + kfree_sensitive(chap->host_key); + chap->host_key = NULL; + chap->host_key_len = 0; + kfree_sensitive(chap->ctrl_key); + chap->ctrl_key = NULL; + chap->ctrl_key_len = 0; + kfree_sensitive(chap->sess_key); + chap->sess_key = NULL; + chap->sess_key_len = 0; + chap->status = 0; + chap->error = 0; + chap->s1 = 0; + chap->s2 = 0; + chap->transaction = 0; + memset(chap->c1, 0, sizeof(chap->c1)); + memset(chap->c2, 0, sizeof(chap->c2)); +} + +static void __nvme_auth_free(struct nvme_dhchap_queue_context *chap) +{ + __nvme_auth_reset(chap); + if (chap->shash_tfm) + crypto_free_shash(chap->shash_tfm); + if (chap->dh_tfm) + crypto_free_kpp(chap->dh_tfm); + kfree_sensitive(chap->ctrl_key); + kfree_sensitive(chap->host_key); + kfree_sensitive(chap->sess_key); + kfree_sensitive(chap->host_response); + kfree(chap->buf); + kfree(chap); +} + +static void __nvme_auth_work(struct work_struct *work) +{ + struct nvme_dhchap_queue_context *chap = + container_of(work, struct nvme_dhchap_queue_context, auth_work); + struct nvme_ctrl *ctrl = chap->ctrl; + size_t tl; + int ret = 0; + + chap->transaction = ctrl->transaction++; + + /* DH-HMAC-CHAP Step 1: send negotiate */ + dev_dbg(ctrl->device, "%s: qid %d send negotiate\n", + __func__, chap->qid); + ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap); + if (ret < 0) { + chap->error = ret; + return; + } + tl = ret; + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); + if (ret) { + chap->error = ret; + return; + } + + /* DH-HMAC-CHAP Step 2: receive challenge */ + dev_dbg(ctrl->device, "%s: qid %d receive challenge\n", + __func__, chap->qid); + + memset(chap->buf, 0, chap->buf_size); + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false); + if (ret) { + dev_warn(ctrl->device, + "qid %d failed to receive challenge, %s %d\n", + chap->qid, ret < 0 ? "error" : "nvme status", ret); + chap->error = ret; + return; + } + ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction, + NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE); + if (ret) { + chap->status = ret; + chap->error = NVME_SC_AUTH_REQUIRED; + return; + } + + ret = nvme_auth_process_dhchap_challenge(ctrl, chap); + if (ret) { + /* Invalid challenge parameters */ + chap->error = ret; + goto fail2; + } + + if (chap->ctrl_key_len) { + dev_dbg(ctrl->device, + "%s: qid %d DH exponential\n", + __func__, chap->qid); + ret = nvme_auth_dhchap_exponential(ctrl, chap); + if (ret) { + chap->error = ret; + goto fail2; + } + } + + dev_dbg(ctrl->device, "%s: qid %d host response\n", + __func__, chap->qid); + ret = nvme_auth_dhchap_setup_host_response(ctrl, chap); + if (ret) { + chap->error = ret; + goto fail2; + } + + /* DH-HMAC-CHAP Step 3: send reply */ + dev_dbg(ctrl->device, "%s: qid %d send reply\n", + __func__, chap->qid); + ret = nvme_auth_set_dhchap_reply_data(ctrl, chap); + if (ret < 0) { + chap->error = ret; + goto fail2; + } + + tl = ret; + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); + if (ret) { + chap->error = ret; + goto fail2; + } + + /* DH-HMAC-CHAP Step 4: receive success1 */ + dev_dbg(ctrl->device, "%s: qid %d receive success1\n", + __func__, chap->qid); + + memset(chap->buf, 0, chap->buf_size); + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false); + if (ret) { + dev_warn(ctrl->device, + "qid %d failed to receive success1, %s %d\n", + chap->qid, ret < 0 ? "error" : "nvme status", ret); + chap->error = ret; + return; + } + ret = nvme_auth_receive_validate(ctrl, chap->qid, + chap->buf, chap->transaction, + NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1); + if (ret) { + chap->status = ret; + chap->error = NVME_SC_AUTH_REQUIRED; + return; + } + + if (ctrl->ctrl_key) { + dev_dbg(ctrl->device, + "%s: qid %d controller response\n", + __func__, chap->qid); + ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap); + if (ret) { + chap->error = ret; + goto fail2; + } + } + + ret = nvme_auth_process_dhchap_success1(ctrl, chap); + if (ret) { + /* Controller authentication failed */ + chap->error = NVME_SC_AUTH_REQUIRED; + goto fail2; + } + + if (ctrl->ctrl_key) { + /* DH-HMAC-CHAP Step 5: send success2 */ + dev_dbg(ctrl->device, "%s: qid %d send success2\n", + __func__, chap->qid); + tl = nvme_auth_set_dhchap_success2_data(ctrl, chap); + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); + if (ret) + chap->error = ret; + } + if (!ret) { + chap->error = 0; + return; + } + +fail2: + dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n", + __func__, chap->qid, chap->status); + tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap); + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); + /* + * only update error if send failure2 failed and no other + * error had been set during authentication. + */ + if (ret && !chap->error) + chap->error = ret; +} + +int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) +{ + struct nvme_dhchap_queue_context *chap; + + if (!ctrl->host_key) { + dev_warn(ctrl->device, "qid %d: no key\n", qid); + return -ENOKEY; + } + + if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) { + dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid); + return -ENOKEY; + } + + mutex_lock(&ctrl->dhchap_auth_mutex); + /* Check if the context is already queued */ + list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) { + WARN_ON(!chap->buf); + if (chap->qid == qid) { + dev_dbg(ctrl->device, "qid %d: re-using context\n", qid); + mutex_unlock(&ctrl->dhchap_auth_mutex); + flush_work(&chap->auth_work); + __nvme_auth_reset(chap); + queue_work(nvme_wq, &chap->auth_work); + return 0; + } + } + chap = kzalloc(sizeof(*chap), GFP_KERNEL); + if (!chap) { + mutex_unlock(&ctrl->dhchap_auth_mutex); + return -ENOMEM; + } + chap->qid = (qid == NVME_QID_ANY) ? 0 : qid; + chap->ctrl = ctrl; + + /* + * Allocate a large enough buffer for the entire negotiation: + * 4k should be enough to ffdhe8192. + */ + chap->buf_size = 4096; + chap->buf = kzalloc(chap->buf_size, GFP_KERNEL); + if (!chap->buf) { + mutex_unlock(&ctrl->dhchap_auth_mutex); + kfree(chap); + return -ENOMEM; + } + + INIT_WORK(&chap->auth_work, __nvme_auth_work); + list_add(&chap->entry, &ctrl->dhchap_auth_list); + mutex_unlock(&ctrl->dhchap_auth_mutex); + queue_work(nvme_wq, &chap->auth_work); + return 0; +} +EXPORT_SYMBOL_GPL(nvme_auth_negotiate); + +int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid) +{ + struct nvme_dhchap_queue_context *chap; + int ret; + + mutex_lock(&ctrl->dhchap_auth_mutex); + list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) { + if (chap->qid != qid) + continue; + mutex_unlock(&ctrl->dhchap_auth_mutex); + flush_work(&chap->auth_work); + ret = chap->error; + return ret; + } + mutex_unlock(&ctrl->dhchap_auth_mutex); + return -ENXIO; +} +EXPORT_SYMBOL_GPL(nvme_auth_wait); + +void nvme_auth_reset(struct nvme_ctrl *ctrl) +{ + struct nvme_dhchap_queue_context *chap; + + mutex_lock(&ctrl->dhchap_auth_mutex); + list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) { + mutex_unlock(&ctrl->dhchap_auth_mutex); + flush_work(&chap->auth_work); + __nvme_auth_reset(chap); + } + mutex_unlock(&ctrl->dhchap_auth_mutex); +} +EXPORT_SYMBOL_GPL(nvme_auth_reset); + +static void nvme_dhchap_auth_work(struct work_struct *work) +{ + struct nvme_ctrl *ctrl = + container_of(work, struct nvme_ctrl, dhchap_auth_work); + int ret, q; + + /* Authenticate admin queue first */ + ret = nvme_auth_negotiate(ctrl, 0); + if (ret) { + dev_warn(ctrl->device, + "qid 0: error %d setting up authentication\n", ret); + return; + } + ret = nvme_auth_wait(ctrl, 0); + if (ret) { + dev_warn(ctrl->device, + "qid 0: authentication failed\n"); + return; + } + + for (q = 1; q < ctrl->queue_count; q++) { + ret = nvme_auth_negotiate(ctrl, q); + if (ret) { + dev_warn(ctrl->device, + "qid %d: error %d setting up authentication\n", + q, ret); + break; + } + } + + /* + * Failure is a soft-state; credentials remain valid until + * the controller terminates the connection. + */ +} + +void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) +{ + INIT_LIST_HEAD(&ctrl->dhchap_auth_list); + INIT_WORK(&ctrl->dhchap_auth_work, nvme_dhchap_auth_work); + mutex_init(&ctrl->dhchap_auth_mutex); + if (!ctrl->opts) + return; + nvme_auth_generate_key(ctrl->opts->dhchap_secret, &ctrl->host_key); + nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret, &ctrl->ctrl_key); +} +EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl); + +void nvme_auth_stop(struct nvme_ctrl *ctrl) +{ + struct nvme_dhchap_queue_context *chap = NULL, *tmp; + + cancel_work_sync(&ctrl->dhchap_auth_work); + mutex_lock(&ctrl->dhchap_auth_mutex); + list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) + cancel_work_sync(&chap->auth_work); + mutex_unlock(&ctrl->dhchap_auth_mutex); +} +EXPORT_SYMBOL_GPL(nvme_auth_stop); + +void nvme_auth_free(struct nvme_ctrl *ctrl) +{ + struct nvme_dhchap_queue_context *chap = NULL, *tmp; + + mutex_lock(&ctrl->dhchap_auth_mutex); + list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) { + list_del_init(&chap->entry); + flush_work(&chap->auth_work); + __nvme_auth_free(chap); + } + mutex_unlock(&ctrl->dhchap_auth_mutex); + if (ctrl->host_key) { + nvme_auth_free_key(ctrl->host_key); + ctrl->host_key = NULL; + } + if (ctrl->ctrl_key) { + nvme_auth_free_key(ctrl->ctrl_key); + ctrl->ctrl_key = NULL; + } +} +EXPORT_SYMBOL_GPL(nvme_auth_free); diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c new file mode 100644 index 0000000000..c4113b43db --- /dev/null +++ b/drivers/nvme/target/auth.c @@ -0,0 +1,526 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVMe over Fabrics DH-HMAC-CHAP authentication. + * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions. + * All rights reserved. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nvmet.h" + +int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, + bool set_ctrl) +{ + unsigned char key_hash; + char *dhchap_secret; + + if (sscanf(secret, "DHHC-1:%hhd:%*s", &key_hash) != 1) + return -EINVAL; + if (key_hash > 3) { + pr_warn("Invalid DH-HMAC-CHAP hash id %d\n", + key_hash); + return -EINVAL; + } + if (key_hash > 0) { + /* Validate selected hash algorithm */ + const char *hmac = nvme_auth_hmac_name(key_hash); + + if (!crypto_has_shash(hmac, 0, 0)) { + pr_err("DH-HMAC-CHAP hash %s unsupported\n", hmac); + return -ENOTSUPP; + } + } + dhchap_secret = kstrdup(secret, GFP_KERNEL); + if (!dhchap_secret) + return -ENOMEM; + if (set_ctrl) { + host->dhchap_ctrl_secret = strim(dhchap_secret); + host->dhchap_ctrl_key_hash = key_hash; + } else { + host->dhchap_secret = strim(dhchap_secret); + host->dhchap_key_hash = key_hash; + } + return 0; +} + +int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id) +{ + const char *dhgroup_kpp; + int ret = 0; + + pr_debug("%s: ctrl %d selecting dhgroup %d\n", + __func__, ctrl->cntlid, dhgroup_id); + + if (ctrl->dh_tfm) { + if (ctrl->dh_gid == dhgroup_id) { + pr_debug("%s: ctrl %d reuse existing DH group %d\n", + __func__, ctrl->cntlid, dhgroup_id); + return 0; + } + crypto_free_kpp(ctrl->dh_tfm); + ctrl->dh_tfm = NULL; + ctrl->dh_gid = 0; + } + + if (dhgroup_id == NVME_AUTH_DHGROUP_NULL) + return 0; + + dhgroup_kpp = nvme_auth_dhgroup_kpp(dhgroup_id); + if (!dhgroup_kpp) { + pr_debug("%s: ctrl %d invalid DH group %d\n", + __func__, ctrl->cntlid, dhgroup_id); + return -EINVAL; + } + ctrl->dh_tfm = crypto_alloc_kpp(dhgroup_kpp, 0, 0); + if (IS_ERR(ctrl->dh_tfm)) { + pr_debug("%s: ctrl %d failed to setup DH group %d, err %ld\n", + __func__, ctrl->cntlid, dhgroup_id, + PTR_ERR(ctrl->dh_tfm)); + ret = PTR_ERR(ctrl->dh_tfm); + ctrl->dh_tfm = NULL; + ctrl->dh_gid = 0; + } else { + ctrl->dh_gid = dhgroup_id; + pr_debug("%s: ctrl %d setup DH group %d\n", + __func__, ctrl->cntlid, ctrl->dh_gid); + ret = nvme_auth_gen_privkey(ctrl->dh_tfm, ctrl->dh_gid); + if (ret < 0) { + pr_debug("%s: ctrl %d failed to generate private key, err %d\n", + __func__, ctrl->cntlid, ret); + kfree_sensitive(ctrl->dh_key); + return ret; + } + ctrl->dh_keysize = crypto_kpp_maxsize(ctrl->dh_tfm); + kfree_sensitive(ctrl->dh_key); + ctrl->dh_key = kzalloc(ctrl->dh_keysize, GFP_KERNEL); + if (!ctrl->dh_key) { + pr_warn("ctrl %d failed to allocate public key\n", + ctrl->cntlid); + return -ENOMEM; + } + ret = nvme_auth_gen_pubkey(ctrl->dh_tfm, ctrl->dh_key, + ctrl->dh_keysize); + if (ret < 0) { + pr_warn("ctrl %d failed to generate public key\n", + ctrl->cntlid); + kfree(ctrl->dh_key); + ctrl->dh_key = NULL; + } + } + + return ret; +} + +int nvmet_setup_auth(struct nvmet_ctrl *ctrl) +{ + int ret = 0; + struct nvmet_host_link *p; + struct nvmet_host *host = NULL; + const char *hash_name; + + down_read(&nvmet_config_sem); + if (nvmet_is_disc_subsys(ctrl->subsys)) + goto out_unlock; + + if (ctrl->subsys->allow_any_host) + goto out_unlock; + + list_for_each_entry(p, &ctrl->subsys->hosts, entry) { + pr_debug("check %s\n", nvmet_host_name(p->host)); + if (strcmp(nvmet_host_name(p->host), ctrl->hostnqn)) + continue; + host = p->host; + break; + } + if (!host) { + pr_debug("host %s not found\n", ctrl->hostnqn); + ret = -EPERM; + goto out_unlock; + } + + ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id); + if (ret < 0) + pr_warn("Failed to setup DH group"); + + if (!host->dhchap_secret) { + pr_debug("No authentication provided\n"); + goto out_unlock; + } + + if (host->dhchap_hash_id == ctrl->shash_id) { + pr_debug("Re-use existing hash ID %d\n", + ctrl->shash_id); + } else { + hash_name = nvme_auth_hmac_name(host->dhchap_hash_id); + if (!hash_name) { + pr_warn("Hash ID %d invalid\n", host->dhchap_hash_id); + ret = -EINVAL; + goto out_unlock; + } + ctrl->shash_id = host->dhchap_hash_id; + } + + /* Skip the 'DHHC-1:XX:' prefix */ + nvme_auth_free_key(ctrl->host_key); + ctrl->host_key = nvme_auth_extract_key(host->dhchap_secret + 10, + host->dhchap_key_hash); + if (IS_ERR(ctrl->host_key)) { + ret = PTR_ERR(ctrl->host_key); + ctrl->host_key = NULL; + goto out_free_hash; + } + pr_debug("%s: using hash %s key %*ph\n", __func__, + ctrl->host_key->hash > 0 ? + nvme_auth_hmac_name(ctrl->host_key->hash) : "none", + (int)ctrl->host_key->len, ctrl->host_key->key); + + nvme_auth_free_key(ctrl->ctrl_key); + if (!host->dhchap_ctrl_secret) { + ctrl->ctrl_key = NULL; + goto out_unlock; + } + + ctrl->ctrl_key = nvme_auth_extract_key(host->dhchap_ctrl_secret + 10, + host->dhchap_ctrl_key_hash); + if (IS_ERR(ctrl->ctrl_key)) { + ret = PTR_ERR(ctrl->ctrl_key); + ctrl->ctrl_key = NULL; + goto out_free_hash; + } + pr_debug("%s: using ctrl hash %s key %*ph\n", __func__, + ctrl->ctrl_key->hash > 0 ? + nvme_auth_hmac_name(ctrl->ctrl_key->hash) : "none", + (int)ctrl->ctrl_key->len, ctrl->ctrl_key->key); + +out_free_hash: + if (ret) { + if (ctrl->host_key) { + nvme_auth_free_key(ctrl->host_key); + ctrl->host_key = NULL; + } + ctrl->shash_id = 0; + } +out_unlock: + up_read(&nvmet_config_sem); + + return ret; +} + +void nvmet_auth_sq_free(struct nvmet_sq *sq) +{ + cancel_delayed_work(&sq->auth_expired_work); + kfree(sq->dhchap_c1); + sq->dhchap_c1 = NULL; + kfree(sq->dhchap_c2); + sq->dhchap_c2 = NULL; + kfree(sq->dhchap_skey); + sq->dhchap_skey = NULL; +} + +void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) +{ + ctrl->shash_id = 0; + + if (ctrl->dh_tfm) { + crypto_free_kpp(ctrl->dh_tfm); + ctrl->dh_tfm = NULL; + ctrl->dh_gid = 0; + } + kfree_sensitive(ctrl->dh_key); + ctrl->dh_key = NULL; + + if (ctrl->host_key) { + nvme_auth_free_key(ctrl->host_key); + ctrl->host_key = NULL; + } + if (ctrl->ctrl_key) { + nvme_auth_free_key(ctrl->ctrl_key); + ctrl->ctrl_key = NULL; + } +} + +bool nvmet_check_auth_status(struct nvmet_req *req) +{ + if (req->sq->ctrl->host_key && + !req->sq->authenticated) + return false; + return true; +} + +int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, + unsigned int shash_len) +{ + struct crypto_shash *shash_tfm; + struct shash_desc *shash; + struct nvmet_ctrl *ctrl = req->sq->ctrl; + const char *hash_name; + u8 *challenge = req->sq->dhchap_c1, *host_response; + u8 buf[4]; + int ret; + + hash_name = nvme_auth_hmac_name(ctrl->shash_id); + if (!hash_name) { + pr_warn("Hash ID %d invalid\n", ctrl->shash_id); + return -EINVAL; + } + + shash_tfm = crypto_alloc_shash(hash_name, 0, 0); + if (IS_ERR(shash_tfm)) { + pr_err("failed to allocate shash %s\n", hash_name); + return PTR_ERR(shash_tfm); + } + + if (shash_len != crypto_shash_digestsize(shash_tfm)) { + pr_debug("%s: hash len mismatch (len %d digest %d)\n", + __func__, shash_len, + crypto_shash_digestsize(shash_tfm)); + ret = -EINVAL; + goto out_free_tfm; + } + + host_response = nvme_auth_transform_key(ctrl->host_key, ctrl->hostnqn); + if (IS_ERR(host_response)) { + ret = PTR_ERR(host_response); + goto out_free_tfm; + } + + ret = crypto_shash_setkey(shash_tfm, host_response, + ctrl->host_key->len); + if (ret) + goto out_free_response; + + if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) { + challenge = kmalloc(shash_len, GFP_KERNEL); + if (!challenge) { + ret = -ENOMEM; + goto out_free_response; + } + ret = nvme_auth_augmented_challenge(ctrl->shash_id, + req->sq->dhchap_skey, + req->sq->dhchap_skey_len, + req->sq->dhchap_c1, + challenge, shash_len); + if (ret) + goto out_free_response; + } + + pr_debug("ctrl %d qid %d host response seq %u transaction %d\n", + ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, + req->sq->dhchap_tid); + + shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm), + GFP_KERNEL); + if (!shash) { + ret = -ENOMEM; + goto out_free_response; + } + shash->tfm = shash_tfm; + ret = crypto_shash_init(shash); + if (ret) + goto out; + ret = crypto_shash_update(shash, challenge, shash_len); + if (ret) + goto out; + put_unaligned_le32(req->sq->dhchap_s1, buf); + ret = crypto_shash_update(shash, buf, 4); + if (ret) + goto out; + put_unaligned_le16(req->sq->dhchap_tid, buf); + ret = crypto_shash_update(shash, buf, 2); + if (ret) + goto out; + memset(buf, 0, 4); + ret = crypto_shash_update(shash, buf, 1); + if (ret) + goto out; + ret = crypto_shash_update(shash, "HostHost", 8); + if (ret) + goto out; + ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn)); + if (ret) + goto out; + ret = crypto_shash_update(shash, buf, 1); + if (ret) + goto out; + ret = crypto_shash_update(shash, ctrl->subsysnqn, + strlen(ctrl->subsysnqn)); + if (ret) + goto out; + ret = crypto_shash_final(shash, response); +out: + if (challenge != req->sq->dhchap_c1) + kfree(challenge); + kfree(shash); +out_free_response: + kfree_sensitive(host_response); +out_free_tfm: + crypto_free_shash(shash_tfm); + return 0; +} + +int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, + unsigned int shash_len) +{ + struct crypto_shash *shash_tfm; + struct shash_desc *shash; + struct nvmet_ctrl *ctrl = req->sq->ctrl; + const char *hash_name; + u8 *challenge = req->sq->dhchap_c2, *ctrl_response; + u8 buf[4]; + int ret; + + hash_name = nvme_auth_hmac_name(ctrl->shash_id); + if (!hash_name) { + pr_warn("Hash ID %d invalid\n", ctrl->shash_id); + return -EINVAL; + } + + shash_tfm = crypto_alloc_shash(hash_name, 0, 0); + if (IS_ERR(shash_tfm)) { + pr_err("failed to allocate shash %s\n", hash_name); + return PTR_ERR(shash_tfm); + } + + if (shash_len != crypto_shash_digestsize(shash_tfm)) { + pr_debug("%s: hash len mismatch (len %d digest %d)\n", + __func__, shash_len, + crypto_shash_digestsize(shash_tfm)); + ret = -EINVAL; + goto out_free_tfm; + } + + ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key, + ctrl->subsysnqn); + if (IS_ERR(ctrl_response)) { + ret = PTR_ERR(ctrl_response); + goto out_free_tfm; + } + + ret = crypto_shash_setkey(shash_tfm, ctrl_response, + ctrl->ctrl_key->len); + if (ret) + goto out_free_response; + + if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) { + challenge = kmalloc(shash_len, GFP_KERNEL); + if (!challenge) { + ret = -ENOMEM; + goto out_free_response; + } + ret = nvme_auth_augmented_challenge(ctrl->shash_id, + req->sq->dhchap_skey, + req->sq->dhchap_skey_len, + req->sq->dhchap_c2, + challenge, shash_len); + if (ret) + goto out_free_response; + } + + shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm), + GFP_KERNEL); + if (!shash) { + ret = -ENOMEM; + goto out_free_response; + } + shash->tfm = shash_tfm; + + ret = crypto_shash_init(shash); + if (ret) + goto out; + ret = crypto_shash_update(shash, challenge, shash_len); + if (ret) + goto out; + put_unaligned_le32(req->sq->dhchap_s2, buf); + ret = crypto_shash_update(shash, buf, 4); + if (ret) + goto out; + put_unaligned_le16(req->sq->dhchap_tid, buf); + ret = crypto_shash_update(shash, buf, 2); + if (ret) + goto out; + memset(buf, 0, 4); + ret = crypto_shash_update(shash, buf, 1); + if (ret) + goto out; + ret = crypto_shash_update(shash, "Controller", 10); + if (ret) + goto out; + ret = crypto_shash_update(shash, ctrl->subsysnqn, + strlen(ctrl->subsysnqn)); + if (ret) + goto out; + ret = crypto_shash_update(shash, buf, 1); + if (ret) + goto out; + ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn)); + if (ret) + goto out; + ret = crypto_shash_final(shash, response); +out: + if (challenge != req->sq->dhchap_c2) + kfree(challenge); + kfree(shash); +out_free_response: + kfree_sensitive(ctrl_response); +out_free_tfm: + crypto_free_shash(shash_tfm); + return 0; +} + +int nvmet_auth_ctrl_exponential(struct nvmet_req *req, + u8 *buf, int buf_size) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + int ret = 0; + + if (!ctrl->dh_key) { + pr_warn("ctrl %d no DH public key!\n", ctrl->cntlid); + return -ENOKEY; + } + if (buf_size != ctrl->dh_keysize) { + pr_warn("ctrl %d DH public key size mismatch, need %zu is %d\n", + ctrl->cntlid, ctrl->dh_keysize, buf_size); + ret = -EINVAL; + } else { + memcpy(buf, ctrl->dh_key, buf_size); + pr_debug("%s: ctrl %d public key %*ph\n", __func__, + ctrl->cntlid, (int)buf_size, buf); + } + + return ret; +} + +int nvmet_auth_ctrl_sesskey(struct nvmet_req *req, + u8 *pkey, int pkey_size) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + int ret; + + req->sq->dhchap_skey_len = ctrl->dh_keysize; + req->sq->dhchap_skey = kzalloc(req->sq->dhchap_skey_len, GFP_KERNEL); + if (!req->sq->dhchap_skey) + return -ENOMEM; + ret = nvme_auth_gen_shared_secret(ctrl->dh_tfm, + pkey, pkey_size, + req->sq->dhchap_skey, + req->sq->dhchap_skey_len); + if (ret) + pr_debug("failed to compute shared secret, err %d\n", ret); + else + pr_debug("%s: shared secret %*ph\n", __func__, + (int)req->sq->dhchap_skey_len, + req->sq->dhchap_skey); + + return ret; +} diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c new file mode 100644 index 0000000000..ebdf9aa810 --- /dev/null +++ b/drivers/nvme/target/fabrics-cmd-auth.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVMe over Fabrics DH-HMAC-CHAP authentication command handling. + * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions. + * All rights reserved. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include "nvmet.h" + +static void nvmet_auth_expired_work(struct work_struct *work) +{ + struct nvmet_sq *sq = container_of(to_delayed_work(work), + struct nvmet_sq, auth_expired_work); + + pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n", + __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid); + sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; + sq->dhchap_tid = -1; +} + +void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req) +{ + u32 result = le32_to_cpu(req->cqe->result.u32); + + /* Initialize in-band authentication */ + INIT_DELAYED_WORK(&req->sq->auth_expired_work, + nvmet_auth_expired_work); + req->sq->authenticated = false; + req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; + result |= (u32)NVME_CONNECT_AUTHREQ_ATR << 16; + req->cqe->result.u32 = cpu_to_le32(result); +} + +static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmf_auth_dhchap_negotiate_data *data = d; + int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid; + + pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n", + __func__, ctrl->cntlid, req->sq->qid, + data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid, + data->auth_protocol[0].dhchap.halen, + data->auth_protocol[0].dhchap.dhlen); + req->sq->dhchap_tid = le16_to_cpu(data->t_id); + if (data->sc_c) + return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH; + + if (data->napd != 1) + return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; + + if (data->auth_protocol[0].dhchap.authid != + NVME_AUTH_DHCHAP_AUTH_ID) + return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + + for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) { + u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i]; + + if (!fallback_hash_id && + crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0)) + fallback_hash_id = host_hmac_id; + if (ctrl->shash_id != host_hmac_id) + continue; + hash_id = ctrl->shash_id; + break; + } + if (hash_id == 0) { + if (fallback_hash_id == 0) { + pr_debug("%s: ctrl %d qid %d: no usable hash found\n", + __func__, ctrl->cntlid, req->sq->qid); + return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; + } + pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n", + __func__, ctrl->cntlid, req->sq->qid, + nvme_auth_hmac_name(fallback_hash_id)); + ctrl->shash_id = fallback_hash_id; + } + + dhgid = -1; + fallback_dhgid = -1; + for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) { + int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30]; + + if (tmp_dhgid != ctrl->dh_gid) { + dhgid = tmp_dhgid; + break; + } + if (fallback_dhgid < 0) { + const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid); + + if (crypto_has_kpp(kpp, 0, 0)) + fallback_dhgid = tmp_dhgid; + } + } + if (dhgid < 0) { + if (fallback_dhgid < 0) { + pr_debug("%s: ctrl %d qid %d: no usable DH group found\n", + __func__, ctrl->cntlid, req->sq->qid); + return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; + } + pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n", + __func__, ctrl->cntlid, req->sq->qid, + nvme_auth_dhgroup_name(fallback_dhgid)); + ctrl->dh_gid = fallback_dhgid; + } + pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n", + __func__, ctrl->cntlid, req->sq->qid, + nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid); + return 0; +} + +static u16 nvmet_auth_reply(struct nvmet_req *req, void *d) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmf_auth_dhchap_reply_data *data = d; + u16 dhvlen = le16_to_cpu(data->dhvlen); + u8 *response; + + pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n", + __func__, ctrl->cntlid, req->sq->qid, + data->hl, data->cvalid, dhvlen); + + if (dhvlen) { + if (!ctrl->dh_tfm) + return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl, + dhvlen) < 0) + return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; + } + + response = kmalloc(data->hl, GFP_KERNEL); + if (!response) + return NVME_AUTH_DHCHAP_FAILURE_FAILED; + + if (!ctrl->host_key) { + pr_warn("ctrl %d qid %d no host key\n", + ctrl->cntlid, req->sq->qid); + kfree(response); + return NVME_AUTH_DHCHAP_FAILURE_FAILED; + } + if (nvmet_auth_host_hash(req, response, data->hl) < 0) { + pr_debug("ctrl %d qid %d host hash failed\n", + ctrl->cntlid, req->sq->qid); + kfree(response); + return NVME_AUTH_DHCHAP_FAILURE_FAILED; + } + + if (memcmp(data->rval, response, data->hl)) { + pr_info("ctrl %d qid %d host response mismatch\n", + ctrl->cntlid, req->sq->qid); + kfree(response); + return NVME_AUTH_DHCHAP_FAILURE_FAILED; + } + kfree(response); + pr_debug("%s: ctrl %d qid %d host authenticated\n", + __func__, ctrl->cntlid, req->sq->qid); + if (data->cvalid) { + req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl, + GFP_KERNEL); + if (!req->sq->dhchap_c2) + return NVME_AUTH_DHCHAP_FAILURE_FAILED; + + pr_debug("%s: ctrl %d qid %d challenge %*ph\n", + __func__, ctrl->cntlid, req->sq->qid, data->hl, + req->sq->dhchap_c2); + req->sq->dhchap_s2 = le32_to_cpu(data->seqnum); + } else { + req->sq->authenticated = true; + req->sq->dhchap_c2 = NULL; + } + + return 0; +} + +static u16 nvmet_auth_failure2(struct nvmet_req *req, void *d) +{ + struct nvmf_auth_dhchap_failure_data *data = d; + + return data->rescode_exp; +} + +void nvmet_execute_auth_send(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmf_auth_dhchap_success2_data *data; + void *d; + u32 tl; + u16 status = 0; + + if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + req->error_loc = + offsetof(struct nvmf_auth_send_command, secp); + goto done; + } + if (req->cmd->auth_send.spsp0 != 0x01) { + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + req->error_loc = + offsetof(struct nvmf_auth_send_command, spsp0); + goto done; + } + if (req->cmd->auth_send.spsp1 != 0x01) { + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + req->error_loc = + offsetof(struct nvmf_auth_send_command, spsp1); + goto done; + } + tl = le32_to_cpu(req->cmd->auth_send.tl); + if (!tl) { + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + req->error_loc = + offsetof(struct nvmf_auth_send_command, tl); + goto done; + } + if (!nvmet_check_transfer_len(req, tl)) { + pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl); + return; + } + + d = kmalloc(tl, GFP_KERNEL); + if (!d) { + status = NVME_SC_INTERNAL; + goto done; + } + + status = nvmet_copy_from_sgl(req, 0, d, tl); + if (status) { + kfree(d); + goto done; + } + + data = d; + pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__, + ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id, + req->sq->dhchap_step); + if (data->auth_type != NVME_AUTH_COMMON_MESSAGES && + data->auth_type != NVME_AUTH_DHCHAP_MESSAGES) + goto done_failure1; + if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) { + if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) { + /* Restart negotiation */ + pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__, + ctrl->cntlid, req->sq->qid); + if (!req->sq->qid) { + if (nvmet_setup_auth(ctrl) < 0) { + status = NVME_SC_INTERNAL; + pr_err("ctrl %d qid 0 failed to setup" + "re-authentication", + ctrl->cntlid); + goto done_failure1; + } + } + req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; + } else if (data->auth_id != req->sq->dhchap_step) + goto done_failure1; + /* Validate negotiation parameters */ + status = nvmet_auth_negotiate(req, d); + if (status == 0) + req->sq->dhchap_step = + NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE; + else { + req->sq->dhchap_step = + NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; + req->sq->dhchap_status = status; + status = 0; + } + goto done_kfree; + } + if (data->auth_id != req->sq->dhchap_step) { + pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n", + __func__, ctrl->cntlid, req->sq->qid, + data->auth_id, req->sq->dhchap_step); + goto done_failure1; + } + if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) { + pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n", + __func__, ctrl->cntlid, req->sq->qid, + le16_to_cpu(data->t_id), + req->sq->dhchap_tid); + req->sq->dhchap_step = + NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; + req->sq->dhchap_status = + NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + goto done_kfree; + } + + switch (data->auth_id) { + case NVME_AUTH_DHCHAP_MESSAGE_REPLY: + status = nvmet_auth_reply(req, d); + if (status == 0) + req->sq->dhchap_step = + NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1; + else { + req->sq->dhchap_step = + NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; + req->sq->dhchap_status = status; + status = 0; + } + goto done_kfree; + break; + case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2: + req->sq->authenticated = true; + pr_debug("%s: ctrl %d qid %d ctrl authenticated\n", + __func__, ctrl->cntlid, req->sq->qid); + goto done_kfree; + break; + case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2: + status = nvmet_auth_failure2(req, d); + if (status) { + pr_warn("ctrl %d qid %d: authentication failed (%d)\n", + ctrl->cntlid, req->sq->qid, status); + req->sq->dhchap_status = status; + req->sq->authenticated = false; + status = 0; + } + goto done_kfree; + break; + default: + req->sq->dhchap_status = + NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; + req->sq->dhchap_step = + NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; + req->sq->authenticated = false; + goto done_kfree; + break; + } +done_failure1: + req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; + req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; + +done_kfree: + kfree(d); +done: + pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__, + ctrl->cntlid, req->sq->qid, + req->sq->dhchap_status, req->sq->dhchap_step); + if (status) + pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n", + __func__, ctrl->cntlid, req->sq->qid, + status, req->error_loc); + req->cqe->result.u64 = 0; + nvmet_req_complete(req, status); + if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 && + req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) { + unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120; + + mod_delayed_work(system_wq, &req->sq->auth_expired_work, + auth_expire_secs * HZ); + return; + } + /* Final states, clear up variables */ + nvmet_auth_sq_free(req->sq); + if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) + nvmet_ctrl_fatal_error(ctrl); +} + +static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al) +{ + struct nvmf_auth_dhchap_challenge_data *data = d; + struct nvmet_ctrl *ctrl = req->sq->ctrl; + int ret = 0; + int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id); + int data_size = sizeof(*d) + hash_len; + + if (ctrl->dh_tfm) + data_size += ctrl->dh_keysize; + if (al < data_size) { + pr_debug("%s: buffer too small (al %d need %d)\n", __func__, + al, data_size); + return -EINVAL; + } + memset(data, 0, data_size); + req->sq->dhchap_s1 = nvme_auth_get_seqnum(); + data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE; + data->t_id = cpu_to_le16(req->sq->dhchap_tid); + data->hashid = ctrl->shash_id; + data->hl = hash_len; + data->seqnum = cpu_to_le32(req->sq->dhchap_s1); + req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL); + if (!req->sq->dhchap_c1) + return -ENOMEM; + get_random_bytes(req->sq->dhchap_c1, data->hl); + memcpy(data->cval, req->sq->dhchap_c1, data->hl); + if (ctrl->dh_tfm) { + data->dhgid = ctrl->dh_gid; + data->dhvlen = cpu_to_le16(ctrl->dh_keysize); + ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl, + ctrl->dh_keysize); + } + pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n", + __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, + req->sq->dhchap_tid, data->hl, ctrl->dh_keysize); + return ret; +} + +static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al) +{ + struct nvmf_auth_dhchap_success1_data *data = d; + struct nvmet_ctrl *ctrl = req->sq->ctrl; + int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id); + + WARN_ON(al < sizeof(*data)); + memset(data, 0, sizeof(*data)); + data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1; + data->t_id = cpu_to_le16(req->sq->dhchap_tid); + data->hl = hash_len; + if (req->sq->dhchap_c2) { + if (!ctrl->ctrl_key) { + pr_warn("ctrl %d qid %d no ctrl key\n", + ctrl->cntlid, req->sq->qid); + return NVME_AUTH_DHCHAP_FAILURE_FAILED; + } + if (nvmet_auth_ctrl_hash(req, data->rval, data->hl)) + return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; + data->rvalid = 1; + pr_debug("ctrl %d qid %d response %*ph\n", + ctrl->cntlid, req->sq->qid, data->hl, data->rval); + } + return 0; +} + +static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al) +{ + struct nvmf_auth_dhchap_failure_data *data = d; + + WARN_ON(al < sizeof(*data)); + data->auth_type = NVME_AUTH_COMMON_MESSAGES; + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; + data->t_id = cpu_to_le16(req->sq->dhchap_tid); + data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED; + data->rescode_exp = req->sq->dhchap_status; +} + +void nvmet_execute_auth_receive(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + void *d; + u32 al; + u16 status = 0; + + if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + req->error_loc = + offsetof(struct nvmf_auth_receive_command, secp); + goto done; + } + if (req->cmd->auth_receive.spsp0 != 0x01) { + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + req->error_loc = + offsetof(struct nvmf_auth_receive_command, spsp0); + goto done; + } + if (req->cmd->auth_receive.spsp1 != 0x01) { + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + req->error_loc = + offsetof(struct nvmf_auth_receive_command, spsp1); + goto done; + } + al = le32_to_cpu(req->cmd->auth_receive.al); + if (!al) { + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + req->error_loc = + offsetof(struct nvmf_auth_receive_command, al); + goto done; + } + if (!nvmet_check_transfer_len(req, al)) { + pr_debug("%s: transfer length mismatch (%u)\n", __func__, al); + return; + } + + d = kmalloc(al, GFP_KERNEL); + if (!d) { + status = NVME_SC_INTERNAL; + goto done; + } + pr_debug("%s: ctrl %d qid %d step %x\n", __func__, + ctrl->cntlid, req->sq->qid, req->sq->dhchap_step); + switch (req->sq->dhchap_step) { + case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE: + if (nvmet_auth_challenge(req, d, al) < 0) { + pr_warn("ctrl %d qid %d: challenge error (%d)\n", + ctrl->cntlid, req->sq->qid, status); + status = NVME_SC_INTERNAL; + break; + } + if (status) { + req->sq->dhchap_status = status; + nvmet_auth_failure1(req, d, al); + pr_warn("ctrl %d qid %d: challenge status (%x)\n", + ctrl->cntlid, req->sq->qid, + req->sq->dhchap_status); + status = 0; + break; + } + req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY; + break; + case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1: + status = nvmet_auth_success1(req, d, al); + if (status) { + req->sq->dhchap_status = status; + req->sq->authenticated = false; + nvmet_auth_failure1(req, d, al); + pr_warn("ctrl %d qid %d: success1 status (%x)\n", + ctrl->cntlid, req->sq->qid, + req->sq->dhchap_status); + break; + } + req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2; + break; + case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1: + req->sq->authenticated = false; + nvmet_auth_failure1(req, d, al); + pr_warn("ctrl %d qid %d failure1 (%x)\n", + ctrl->cntlid, req->sq->qid, req->sq->dhchap_status); + break; + default: + pr_warn("ctrl %d qid %d unhandled step (%d)\n", + ctrl->cntlid, req->sq->qid, req->sq->dhchap_step); + req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; + req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED; + nvmet_auth_failure1(req, d, al); + status = 0; + break; + } + + status = nvmet_copy_to_sgl(req, 0, d, al); + kfree(d); +done: + req->cqe->result.u64 = 0; + nvmet_req_complete(req, status); + if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2) + nvmet_auth_sq_free(req->sq); + else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { + nvmet_auth_sq_free(req->sq); + nvmet_ctrl_fatal_error(ctrl); + } +} diff --git a/drivers/nvmem/apple-efuses.c b/drivers/nvmem/apple-efuses.c new file mode 100644 index 0000000000..9b7c871021 --- /dev/null +++ b/drivers/nvmem/apple-efuses.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Apple SoC eFuse driver + * + * Copyright (C) The Asahi Linux Contributors + */ + +#include +#include +#include +#include +#include + +struct apple_efuses_priv { + void __iomem *fuses; +}; + +static int apple_efuses_read(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct apple_efuses_priv *priv = context; + u32 *dst = val; + + while (bytes >= sizeof(u32)) { + *dst++ = readl_relaxed(priv->fuses + offset); + bytes -= sizeof(u32); + offset += sizeof(u32); + } + + return 0; +} + +static int apple_efuses_probe(struct platform_device *pdev) +{ + struct apple_efuses_priv *priv; + struct resource *res; + struct nvmem_config config = { + .dev = &pdev->dev, + .read_only = true, + .reg_read = apple_efuses_read, + .stride = sizeof(u32), + .word_size = sizeof(u32), + .name = "apple_efuses_nvmem", + .id = NVMEM_DEVID_AUTO, + .root_only = true, + }; + + priv = devm_kzalloc(config.dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->fuses = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(priv->fuses)) + return PTR_ERR(priv->fuses); + + config.priv = priv; + config.size = resource_size(res); + + return PTR_ERR_OR_ZERO(devm_nvmem_register(config.dev, &config)); +} + +static const struct of_device_id apple_efuses_of_match[] = { + { .compatible = "apple,efuses", }, + {} +}; + +MODULE_DEVICE_TABLE(of, apple_efuses_of_match); + +static struct platform_driver apple_efuses_driver = { + .driver = { + .name = "apple_efuses", + .of_match_table = apple_efuses_of_match, + }, + .probe = apple_efuses_probe, +}; + +module_platform_driver(apple_efuses_driver); + +MODULE_AUTHOR("Sven Peter "); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvmem/microchip-otpc.c b/drivers/nvmem/microchip-otpc.c new file mode 100644 index 0000000000..436e0dc4f3 --- /dev/null +++ b/drivers/nvmem/microchip-otpc.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * OTP Memory controller + * + * Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries + * + * Author: Claudiu Beznea + */ + +#include +#include +#include +#include +#include +#include + +#define MCHP_OTPC_CR (0x0) +#define MCHP_OTPC_CR_READ BIT(6) +#define MCHP_OTPC_MR (0x4) +#define MCHP_OTPC_MR_ADDR GENMASK(31, 16) +#define MCHP_OTPC_AR (0x8) +#define MCHP_OTPC_SR (0xc) +#define MCHP_OTPC_SR_READ BIT(6) +#define MCHP_OTPC_HR (0x20) +#define MCHP_OTPC_HR_SIZE GENMASK(15, 8) +#define MCHP_OTPC_DR (0x24) + +#define MCHP_OTPC_NAME "mchp-otpc" +#define MCHP_OTPC_SIZE (11 * 1024) + +/** + * struct mchp_otpc - OTPC private data structure + * @base: base address + * @dev: struct device pointer + * @packets: list of packets in OTP memory + * @npackets: number of packets in OTP memory + */ +struct mchp_otpc { + void __iomem *base; + struct device *dev; + struct list_head packets; + u32 npackets; +}; + +/** + * struct mchp_otpc_packet - OTPC packet data structure + * @list: list head + * @id: packet ID + * @offset: packet offset (in words) in OTP memory + */ +struct mchp_otpc_packet { + struct list_head list; + u32 id; + u32 offset; +}; + +static struct mchp_otpc_packet *mchp_otpc_id_to_packet(struct mchp_otpc *otpc, + u32 id) +{ + struct mchp_otpc_packet *packet; + + if (id >= otpc->npackets) + return NULL; + + list_for_each_entry(packet, &otpc->packets, list) { + if (packet->id == id) + return packet; + } + + return NULL; +} + +static int mchp_otpc_prepare_read(struct mchp_otpc *otpc, + unsigned int offset) +{ + u32 tmp; + + /* Set address. */ + tmp = readl_relaxed(otpc->base + MCHP_OTPC_MR); + tmp &= ~MCHP_OTPC_MR_ADDR; + tmp |= FIELD_PREP(MCHP_OTPC_MR_ADDR, offset); + writel_relaxed(tmp, otpc->base + MCHP_OTPC_MR); + + /* Set read. */ + tmp = readl_relaxed(otpc->base + MCHP_OTPC_CR); + tmp |= MCHP_OTPC_CR_READ; + writel_relaxed(tmp, otpc->base + MCHP_OTPC_CR); + + /* Wait for packet to be transferred into temporary buffers. */ + return read_poll_timeout(readl_relaxed, tmp, !(tmp & MCHP_OTPC_SR_READ), + 10000, 2000, false, otpc->base + MCHP_OTPC_SR); +} + +/* + * OTPC memory is organized into packets. Each packets contains a header and + * a payload. Header is 4 bytes long and contains the size of the payload. + * Payload size varies. The memory footprint is something as follows: + * + * Memory offset Memory footprint Packet ID + * ------------- ---------------- --------- + * + * 0x0 +------------+ <-- packet 0 + * | header 0 | + * 0x4 +------------+ + * | payload 0 | + * . . + * . ... . + * . . + * offset1 +------------+ <-- packet 1 + * | header 1 | + * offset1 + 0x4 +------------+ + * | payload 1 | + * . . + * . ... . + * . . + * offset2 +------------+ <-- packet 2 + * . . + * . ... . + * . . + * offsetN +------------+ <-- packet N + * | header N | + * offsetN + 0x4 +------------+ + * | payload N | + * . . + * . ... . + * . . + * +------------+ + * + * where offset1, offset2, offsetN depends on the size of payload 0, payload 1, + * payload N-1. + * + * The access to memory is done on a per packet basis: the control registers + * need to be updated with an offset address (within a packet range) and the + * data registers will be update by controller with information contained by + * that packet. E.g. if control registers are updated with any address within + * the range [offset1, offset2) the data registers are updated by controller + * with packet 1. Header data is accessible though MCHP_OTPC_HR register. + * Payload data is accessible though MCHP_OTPC_DR and MCHP_OTPC_AR registers. + * There is no direct mapping b/w the offset requested by software and the + * offset returned by hardware. + * + * For this, the read function will return the first requested bytes in the + * packet. The user will have to be aware of the memory footprint before doing + * the read request. + */ +static int mchp_otpc_read(void *priv, unsigned int off, void *val, + size_t bytes) +{ + struct mchp_otpc *otpc = priv; + struct mchp_otpc_packet *packet; + u32 *buf = val; + u32 offset; + size_t len = 0; + int ret, payload_size; + + /* + * We reach this point with off being multiple of stride = 4 to + * be able to cross the subsystem. Inside the driver we use continuous + * unsigned integer numbers for packet id, thus devide off by 4 + * before passing it to mchp_otpc_id_to_packet(). + */ + packet = mchp_otpc_id_to_packet(otpc, off / 4); + if (!packet) + return -EINVAL; + offset = packet->offset; + + while (len < bytes) { + ret = mchp_otpc_prepare_read(otpc, offset); + if (ret) + return ret; + + /* Read and save header content. */ + *buf++ = readl_relaxed(otpc->base + MCHP_OTPC_HR); + len += sizeof(*buf); + offset++; + if (len >= bytes) + break; + + /* Read and save payload content. */ + payload_size = FIELD_GET(MCHP_OTPC_HR_SIZE, *(buf - 1)); + writel_relaxed(0UL, otpc->base + MCHP_OTPC_AR); + do { + *buf++ = readl_relaxed(otpc->base + MCHP_OTPC_DR); + len += sizeof(*buf); + offset++; + payload_size--; + } while (payload_size >= 0 && len < bytes); + } + + return 0; +} + +static int mchp_otpc_init_packets_list(struct mchp_otpc *otpc, u32 *size) +{ + struct mchp_otpc_packet *packet; + u32 word, word_pos = 0, id = 0, npackets = 0, payload_size; + int ret; + + INIT_LIST_HEAD(&otpc->packets); + *size = 0; + + while (*size < MCHP_OTPC_SIZE) { + ret = mchp_otpc_prepare_read(otpc, word_pos); + if (ret) + return ret; + + word = readl_relaxed(otpc->base + MCHP_OTPC_HR); + payload_size = FIELD_GET(MCHP_OTPC_HR_SIZE, word); + if (!payload_size) + break; + + packet = devm_kzalloc(otpc->dev, sizeof(*packet), GFP_KERNEL); + if (!packet) + return -ENOMEM; + + packet->id = id++; + packet->offset = word_pos; + INIT_LIST_HEAD(&packet->list); + list_add_tail(&packet->list, &otpc->packets); + + /* Count size by adding header and paload sizes. */ + *size += 4 * (payload_size + 1); + /* Next word: this packet (header, payload) position + 1. */ + word_pos += payload_size + 2; + + npackets++; + } + + otpc->npackets = npackets; + + return 0; +} + +static struct nvmem_config mchp_nvmem_config = { + .name = MCHP_OTPC_NAME, + .type = NVMEM_TYPE_OTP, + .read_only = true, + .word_size = 4, + .stride = 4, + .reg_read = mchp_otpc_read, +}; + +static int mchp_otpc_probe(struct platform_device *pdev) +{ + struct nvmem_device *nvmem; + struct mchp_otpc *otpc; + u32 size; + int ret; + + otpc = devm_kzalloc(&pdev->dev, sizeof(*otpc), GFP_KERNEL); + if (!otpc) + return -ENOMEM; + + otpc->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(otpc->base)) + return PTR_ERR(otpc->base); + + otpc->dev = &pdev->dev; + ret = mchp_otpc_init_packets_list(otpc, &size); + if (ret) + return ret; + + mchp_nvmem_config.dev = otpc->dev; + mchp_nvmem_config.size = size; + mchp_nvmem_config.priv = otpc; + nvmem = devm_nvmem_register(&pdev->dev, &mchp_nvmem_config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id __maybe_unused mchp_otpc_ids[] = { + { .compatible = "microchip,sama7g5-otpc", }, + { }, +}; +MODULE_DEVICE_TABLE(of, mchp_otpc_ids); + +static struct platform_driver mchp_otpc_driver = { + .probe = mchp_otpc_probe, + .driver = { + .name = MCHP_OTPC_NAME, + .of_match_table = of_match_ptr(mchp_otpc_ids), + }, +}; +module_platform_driver(mchp_otpc_driver); + +MODULE_AUTHOR("Claudiu Beznea "); +MODULE_DESCRIPTION("Microchip SAMA7G5 OTPC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/of/unittest-data/overlay_16.dts b/drivers/of/unittest-data/overlay_16.dts new file mode 100644 index 0000000000..80a46dc025 --- /dev/null +++ b/drivers/of/unittest-data/overlay_16.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_16 - notify test */ + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + + test-unittest16 { + compatible = "unittest"; + reg = <16>; + }; +}; diff --git a/drivers/of/unittest-data/overlay_17.dts b/drivers/of/unittest-data/overlay_17.dts new file mode 100644 index 0000000000..5b8a220917 --- /dev/null +++ b/drivers/of/unittest-data/overlay_17.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_17 - notify test */ + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + + test-unittest17 { + compatible = "unittest"; + reg = <17>; + }; +}; diff --git a/drivers/of/unittest-data/overlay_18.dts b/drivers/of/unittest-data/overlay_18.dts new file mode 100644 index 0000000000..dcddd5f6d3 --- /dev/null +++ b/drivers/of/unittest-data/overlay_18.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_18 - notify test */ + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + + test-unittest18 { + compatible = "unittest"; + reg = <18>; + }; +}; diff --git a/drivers/of/unittest-data/overlay_19.dts b/drivers/of/unittest-data/overlay_19.dts new file mode 100644 index 0000000000..32b3ba0b66 --- /dev/null +++ b/drivers/of/unittest-data/overlay_19.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_19 - notify test */ + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + + test-unittest19 { + compatible = "unittest"; + reg = <19>; + }; +}; diff --git a/drivers/of/unittest-data/overlay_20.dts b/drivers/of/unittest-data/overlay_20.dts new file mode 100644 index 0000000000..d4a4f2f32e --- /dev/null +++ b/drivers/of/unittest-data/overlay_20.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_20 - notify test */ + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + + test-unittest20 { + compatible = "unittest"; + reg = <20>; + }; +}; diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c new file mode 100644 index 0000000000..e402f05068 --- /dev/null +++ b/drivers/pci/doe.c @@ -0,0 +1,536 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Data Object Exchange + * PCIe r6.0, sec 6.30 DOE + * + * Copyright (C) 2021 Huawei + * Jonathan Cameron + * + * Copyright (C) 2022 Intel Corporation + * Ira Weiny + */ + +#define dev_fmt(fmt) "DOE: " fmt + +#include +#include +#include +#include +#include +#include +#include + +#define PCI_DOE_PROTOCOL_DISCOVERY 0 + +/* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */ +#define PCI_DOE_TIMEOUT HZ +#define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128) + +#define PCI_DOE_FLAG_CANCEL 0 +#define PCI_DOE_FLAG_DEAD 1 + +/** + * struct pci_doe_mb - State for a single DOE mailbox + * + * This state is used to manage a single DOE mailbox capability. All fields + * should be considered opaque to the consumers and the structure passed into + * the helpers below after being created by devm_pci_doe_create() + * + * @pdev: PCI device this mailbox belongs to + * @cap_offset: Capability offset + * @prots: Array of protocols supported (encoded as long values) + * @wq: Wait queue for work item + * @work_queue: Queue of pci_doe_work items + * @flags: Bit array of PCI_DOE_FLAG_* flags + */ +struct pci_doe_mb { + struct pci_dev *pdev; + u16 cap_offset; + struct xarray prots; + + wait_queue_head_t wq; + struct workqueue_struct *work_queue; + unsigned long flags; +}; + +static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout) +{ + if (wait_event_timeout(doe_mb->wq, + test_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags), + timeout)) + return -EIO; + return 0; +} + +static void pci_doe_write_ctrl(struct pci_doe_mb *doe_mb, u32 val) +{ + struct pci_dev *pdev = doe_mb->pdev; + int offset = doe_mb->cap_offset; + + pci_write_config_dword(pdev, offset + PCI_DOE_CTRL, val); +} + +static int pci_doe_abort(struct pci_doe_mb *doe_mb) +{ + struct pci_dev *pdev = doe_mb->pdev; + int offset = doe_mb->cap_offset; + unsigned long timeout_jiffies; + + pci_dbg(pdev, "[%x] Issuing Abort\n", offset); + + timeout_jiffies = jiffies + PCI_DOE_TIMEOUT; + pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_ABORT); + + do { + int rc; + u32 val; + + rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL); + if (rc) + return rc; + pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); + + /* Abort success! */ + if (!FIELD_GET(PCI_DOE_STATUS_ERROR, val) && + !FIELD_GET(PCI_DOE_STATUS_BUSY, val)) + return 0; + + } while (!time_after(jiffies, timeout_jiffies)); + + /* Abort has timed out and the MB is dead */ + pci_err(pdev, "[%x] ABORT timed out\n", offset); + return -EIO; +} + +static int pci_doe_send_req(struct pci_doe_mb *doe_mb, + struct pci_doe_task *task) +{ + struct pci_dev *pdev = doe_mb->pdev; + int offset = doe_mb->cap_offset; + u32 val; + int i; + + /* + * Check the DOE busy bit is not set. If it is set, this could indicate + * someone other than Linux (e.g. firmware) is using the mailbox. Note + * it is expected that firmware and OS will negotiate access rights via + * an, as yet to be defined, method. + */ + pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); + if (FIELD_GET(PCI_DOE_STATUS_BUSY, val)) + return -EBUSY; + + if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) + return -EIO; + + /* Write DOE Header */ + val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) | + FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type); + pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val); + /* Length is 2 DW of header + length of payload in DW */ + pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, + FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, + 2 + task->request_pl_sz / + sizeof(u32))); + for (i = 0; i < task->request_pl_sz / sizeof(u32); i++) + pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, + task->request_pl[i]); + + pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO); + + return 0; +} + +static bool pci_doe_data_obj_ready(struct pci_doe_mb *doe_mb) +{ + struct pci_dev *pdev = doe_mb->pdev; + int offset = doe_mb->cap_offset; + u32 val; + + pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); + if (FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val)) + return true; + return false; +} + +static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *task) +{ + struct pci_dev *pdev = doe_mb->pdev; + int offset = doe_mb->cap_offset; + size_t length, payload_length; + u32 val; + int i; + + /* Read the first dword to get the protocol */ + pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val); + if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->prot.vid) || + (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->prot.type)) { + dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x]\n", + doe_mb->cap_offset, task->prot.vid, task->prot.type, + FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val), + FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val)); + return -EIO; + } + + pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); + /* Read the second dword to get the length */ + pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val); + pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); + + length = FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, val); + if (length > SZ_1M || length < 2) + return -EIO; + + /* First 2 dwords have already been read */ + length -= 2; + payload_length = min(length, task->response_pl_sz / sizeof(u32)); + /* Read the rest of the response payload */ + for (i = 0; i < payload_length; i++) { + pci_read_config_dword(pdev, offset + PCI_DOE_READ, + &task->response_pl[i]); + /* Prior to the last ack, ensure Data Object Ready */ + if (i == (payload_length - 1) && !pci_doe_data_obj_ready(doe_mb)) + return -EIO; + pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); + } + + /* Flush excess length */ + for (; i < length; i++) { + pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val); + pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); + } + + /* Final error check to pick up on any since Data Object Ready */ + pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); + if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) + return -EIO; + + return min(length, task->response_pl_sz / sizeof(u32)) * sizeof(u32); +} + +static void signal_task_complete(struct pci_doe_task *task, int rv) +{ + task->rv = rv; + task->complete(task); +} + +static void signal_task_abort(struct pci_doe_task *task, int rv) +{ + struct pci_doe_mb *doe_mb = task->doe_mb; + struct pci_dev *pdev = doe_mb->pdev; + + if (pci_doe_abort(doe_mb)) { + /* + * If the device can't process an abort; set the mailbox dead + * - no more submissions + */ + pci_err(pdev, "[%x] Abort failed marking mailbox dead\n", + doe_mb->cap_offset); + set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags); + } + signal_task_complete(task, rv); +} + +static void doe_statemachine_work(struct work_struct *work) +{ + struct pci_doe_task *task = container_of(work, struct pci_doe_task, + work); + struct pci_doe_mb *doe_mb = task->doe_mb; + struct pci_dev *pdev = doe_mb->pdev; + int offset = doe_mb->cap_offset; + unsigned long timeout_jiffies; + u32 val; + int rc; + + if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) { + signal_task_complete(task, -EIO); + return; + } + + /* Send request */ + rc = pci_doe_send_req(doe_mb, task); + if (rc) { + /* + * The specification does not provide any guidance on how to + * resolve conflicting requests from other entities. + * Furthermore, it is likely that busy will not be detected + * most of the time. Flag any detection of status busy with an + * error. + */ + if (rc == -EBUSY) + dev_err_ratelimited(&pdev->dev, "[%x] busy detected; another entity is sending conflicting requests\n", + offset); + signal_task_abort(task, rc); + return; + } + + timeout_jiffies = jiffies + PCI_DOE_TIMEOUT; + /* Poll for response */ +retry_resp: + pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); + if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) { + signal_task_abort(task, -EIO); + return; + } + + if (!FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val)) { + if (time_after(jiffies, timeout_jiffies)) { + signal_task_abort(task, -EIO); + return; + } + rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL); + if (rc) { + signal_task_abort(task, rc); + return; + } + goto retry_resp; + } + + rc = pci_doe_recv_resp(doe_mb, task); + if (rc < 0) { + signal_task_abort(task, rc); + return; + } + + signal_task_complete(task, rc); +} + +static void pci_doe_task_complete(struct pci_doe_task *task) +{ + complete(task->private); +} + +static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid, + u8 *protocol) +{ + u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX, + *index); + u32 response_pl; + DECLARE_COMPLETION_ONSTACK(c); + struct pci_doe_task task = { + .prot.vid = PCI_VENDOR_ID_PCI_SIG, + .prot.type = PCI_DOE_PROTOCOL_DISCOVERY, + .request_pl = &request_pl, + .request_pl_sz = sizeof(request_pl), + .response_pl = &response_pl, + .response_pl_sz = sizeof(response_pl), + .complete = pci_doe_task_complete, + .private = &c, + }; + int rc; + + rc = pci_doe_submit_task(doe_mb, &task); + if (rc < 0) + return rc; + + wait_for_completion(&c); + + if (task.rv != sizeof(response_pl)) + return -EIO; + + *vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl); + *protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL, + response_pl); + *index = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX, + response_pl); + + return 0; +} + +static void *pci_doe_xa_prot_entry(u16 vid, u8 prot) +{ + return xa_mk_value((vid << 8) | prot); +} + +static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb) +{ + u8 index = 0; + u8 xa_idx = 0; + + do { + int rc; + u16 vid; + u8 prot; + + rc = pci_doe_discovery(doe_mb, &index, &vid, &prot); + if (rc) + return rc; + + pci_dbg(doe_mb->pdev, + "[%x] Found protocol %d vid: %x prot: %x\n", + doe_mb->cap_offset, xa_idx, vid, prot); + + rc = xa_insert(&doe_mb->prots, xa_idx++, + pci_doe_xa_prot_entry(vid, prot), GFP_KERNEL); + if (rc) + return rc; + } while (index); + + return 0; +} + +static void pci_doe_xa_destroy(void *mb) +{ + struct pci_doe_mb *doe_mb = mb; + + xa_destroy(&doe_mb->prots); +} + +static void pci_doe_destroy_workqueue(void *mb) +{ + struct pci_doe_mb *doe_mb = mb; + + destroy_workqueue(doe_mb->work_queue); +} + +static void pci_doe_flush_mb(void *mb) +{ + struct pci_doe_mb *doe_mb = mb; + + /* Stop all pending work items from starting */ + set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags); + + /* Cancel an in progress work item, if necessary */ + set_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags); + wake_up(&doe_mb->wq); + + /* Flush all work items */ + flush_workqueue(doe_mb->work_queue); +} + +/** + * pcim_doe_create_mb() - Create a DOE mailbox object + * + * @pdev: PCI device to create the DOE mailbox for + * @cap_offset: Offset of the DOE mailbox + * + * Create a single mailbox object to manage the mailbox protocol at the + * cap_offset specified. + * + * RETURNS: created mailbox object on success + * ERR_PTR(-errno) on failure + */ +struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset) +{ + struct pci_doe_mb *doe_mb; + struct device *dev = &pdev->dev; + int rc; + + doe_mb = devm_kzalloc(dev, sizeof(*doe_mb), GFP_KERNEL); + if (!doe_mb) + return ERR_PTR(-ENOMEM); + + doe_mb->pdev = pdev; + doe_mb->cap_offset = cap_offset; + init_waitqueue_head(&doe_mb->wq); + + xa_init(&doe_mb->prots); + rc = devm_add_action(dev, pci_doe_xa_destroy, doe_mb); + if (rc) + return ERR_PTR(rc); + + doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0, + dev_driver_string(&pdev->dev), + pci_name(pdev), + doe_mb->cap_offset); + if (!doe_mb->work_queue) { + pci_err(pdev, "[%x] failed to allocate work queue\n", + doe_mb->cap_offset); + return ERR_PTR(-ENOMEM); + } + rc = devm_add_action_or_reset(dev, pci_doe_destroy_workqueue, doe_mb); + if (rc) + return ERR_PTR(rc); + + /* Reset the mailbox by issuing an abort */ + rc = pci_doe_abort(doe_mb); + if (rc) { + pci_err(pdev, "[%x] failed to reset mailbox with abort command : %d\n", + doe_mb->cap_offset, rc); + return ERR_PTR(rc); + } + + /* + * The state machine and the mailbox should be in sync now; + * Set up mailbox flush prior to using the mailbox to query protocols. + */ + rc = devm_add_action_or_reset(dev, pci_doe_flush_mb, doe_mb); + if (rc) + return ERR_PTR(rc); + + rc = pci_doe_cache_protocols(doe_mb); + if (rc) { + pci_err(pdev, "[%x] failed to cache protocols : %d\n", + doe_mb->cap_offset, rc); + return ERR_PTR(rc); + } + + return doe_mb; +} +EXPORT_SYMBOL_GPL(pcim_doe_create_mb); + +/** + * pci_doe_supports_prot() - Return if the DOE instance supports the given + * protocol + * @doe_mb: DOE mailbox capability to query + * @vid: Protocol Vendor ID + * @type: Protocol type + * + * RETURNS: True if the DOE mailbox supports the protocol specified + */ +bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type) +{ + unsigned long index; + void *entry; + + /* The discovery protocol must always be supported */ + if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_PROTOCOL_DISCOVERY) + return true; + + xa_for_each(&doe_mb->prots, index, entry) + if (entry == pci_doe_xa_prot_entry(vid, type)) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(pci_doe_supports_prot); + +/** + * pci_doe_submit_task() - Submit a task to be processed by the state machine + * + * @doe_mb: DOE mailbox capability to submit to + * @task: task to be queued + * + * Submit a DOE task (request/response) to the DOE mailbox to be processed. + * Returns upon queueing the task object. If the queue is full this function + * will sleep until there is room in the queue. + * + * task->complete will be called when the state machine is done processing this + * task. + * + * Excess data will be discarded. + * + * RETURNS: 0 when task has been successfully queued, -ERRNO on error + */ +int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task) +{ + if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type)) + return -EINVAL; + + /* + * DOE requests must be a whole number of DW and the response needs to + * be big enough for at least 1 DW + */ + if (task->request_pl_sz % sizeof(u32) || + task->response_pl_sz < sizeof(u32)) + return -EINVAL; + + if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) + return -EIO; + + task->doe_mb = doe_mb; + INIT_WORK(&task->work, doe_statemachine_work); + queue_work(doe_mb->work_queue, &task->work); + return 0; +} +EXPORT_SYMBOL_GPL(pci_doe_submit_task); diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c new file mode 100644 index 0000000000..0ea85e1d29 --- /dev/null +++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c @@ -0,0 +1,1442 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Endpoint Function Driver to implement Non-Transparent Bridge functionality + * Between PCI RC and EP + * + * Copyright (C) 2020 Texas Instruments + * Copyright (C) 2022 NXP + * + * Based on pci-epf-ntb.c + * Author: Frank Li + * Author: Kishon Vijay Abraham I + */ + +/** + * +------------+ +---------------------------------------+ + * | | | | + * +------------+ | +--------------+ + * | NTB | | | NTB | + * | NetDev | | | NetDev | + * +------------+ | +--------------+ + * | NTB | | | NTB | + * | Transfer | | | Transfer | + * +------------+ | +--------------+ + * | | | | | + * | PCI NTB | | | | + * | EPF | | | | + * | Driver | | | PCI Virtual | + * | | +---------------+ | NTB Driver | + * | | | PCI EP NTB |<------>| | + * | | | FN Driver | | | + * +------------+ +---------------+ +--------------+ + * | | | | | | + * | PCI Bus | <-----> | PCI EP Bus | | Virtual PCI | + * | | PCI | | | Bus | + * +------------+ +---------------+--------+--------------+ + * PCIe Root Port PCI EP + */ + +#include +#include +#include +#include + +#include +#include +#include + +static struct workqueue_struct *kpcintb_workqueue; + +#define COMMAND_CONFIGURE_DOORBELL 1 +#define COMMAND_TEARDOWN_DOORBELL 2 +#define COMMAND_CONFIGURE_MW 3 +#define COMMAND_TEARDOWN_MW 4 +#define COMMAND_LINK_UP 5 +#define COMMAND_LINK_DOWN 6 + +#define COMMAND_STATUS_OK 1 +#define COMMAND_STATUS_ERROR 2 + +#define LINK_STATUS_UP BIT(0) + +#define SPAD_COUNT 64 +#define DB_COUNT 4 +#define NTB_MW_OFFSET 2 +#define DB_COUNT_MASK GENMASK(15, 0) +#define MSIX_ENABLE BIT(16) +#define MAX_DB_COUNT 32 +#define MAX_MW 4 + +enum epf_ntb_bar { + BAR_CONFIG, + BAR_DB, + BAR_MW0, + BAR_MW1, + BAR_MW2, +}; + +/* + * +--------------------------------------------------+ Base + * | | + * | | + * | | + * | Common Control Register | + * | | + * | | + * | | + * +-----------------------+--------------------------+ Base+span_offset + * | | | + * | Peer Span Space | Span Space | + * | | | + * | | | + * +-----------------------+--------------------------+ Base+span_offset + * | | | +span_count * 4 + * | | | + * | Span Space | Peer Span Space | + * | | | + * +-----------------------+--------------------------+ + * Virtual PCI PCIe Endpoint + * NTB Driver NTB Driver + */ +struct epf_ntb_ctrl { + u32 command; + u32 argument; + u16 command_status; + u16 link_status; + u32 topology; + u64 addr; + u64 size; + u32 num_mws; + u32 reserved; + u32 spad_offset; + u32 spad_count; + u32 db_entry_size; + u32 db_data[MAX_DB_COUNT]; + u32 db_offset[MAX_DB_COUNT]; +} __packed; + +struct epf_ntb { + struct ntb_dev ntb; + struct pci_epf *epf; + struct config_group group; + + u32 num_mws; + u32 db_count; + u32 spad_count; + u64 mws_size[MAX_MW]; + u64 db; + u32 vbus_number; + u16 vntb_pid; + u16 vntb_vid; + + bool linkup; + u32 spad_size; + + enum pci_barno epf_ntb_bar[6]; + + struct epf_ntb_ctrl *reg; + + phys_addr_t epf_db_phy; + void __iomem *epf_db; + + phys_addr_t vpci_mw_phy[MAX_MW]; + void __iomem *vpci_mw_addr[MAX_MW]; + + struct delayed_work cmd_handler; +}; + +#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group) +#define ntb_ndev(__ntb) container_of(__ntb, struct epf_ntb, ntb) + +static struct pci_epf_header epf_ntb_header = { + .vendorid = PCI_ANY_ID, + .deviceid = PCI_ANY_ID, + .baseclass_code = PCI_BASE_CLASS_MEMORY, + .interrupt_pin = PCI_INTERRUPT_INTA, +}; + +/** + * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host + * @ntb: NTB device that facilitates communication between HOST and VHOST + * @link_up: true or false indicating Link is UP or Down + * + * Once NTB function in HOST invoke ntb_link_enable(), + * this NTB function driver will trigger a link event to vhost. + */ +static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up) +{ + if (link_up) + ntb->reg->link_status |= LINK_STATUS_UP; + else + ntb->reg->link_status &= ~LINK_STATUS_UP; + + ntb_link_event(&ntb->ntb); + return 0; +} + +/** + * epf_ntb_configure_mw() - Configure the Outbound Address Space for vhost + * to access the memory window of host + * @ntb: NTB device that facilitates communication between host and vhost + * @mw: Index of the memory window (either 0, 1, 2 or 3) + * + * EP Outbound Window + * +--------+ +-----------+ + * | | | | + * | | | | + * | | | | + * | | | | + * | | +-----------+ + * | Virtual| | Memory Win| + * | NTB | -----------> | | + * | Driver | | | + * | | +-----------+ + * | | | | + * | | | | + * +--------+ +-----------+ + * VHost PCI EP + */ +static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw) +{ + phys_addr_t phys_addr; + u8 func_no, vfunc_no; + u64 addr, size; + int ret = 0; + + phys_addr = ntb->vpci_mw_phy[mw]; + addr = ntb->reg->addr; + size = ntb->reg->size; + + func_no = ntb->epf->func_no; + vfunc_no = ntb->epf->vfunc_no; + + ret = pci_epc_map_addr(ntb->epf->epc, func_no, vfunc_no, phys_addr, addr, size); + if (ret) + dev_err(&ntb->epf->epc->dev, + "Failed to map memory window %d address\n", mw); + return ret; +} + +/** + * epf_ntb_teardown_mw() - Teardown the configured OB ATU + * @ntb: NTB device that facilitates communication between HOST and vHOST + * @mw: Index of the memory window (either 0, 1, 2 or 3) + * + * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using + * pci_epc_unmap_addr() + */ +static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw) +{ + pci_epc_unmap_addr(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + ntb->vpci_mw_phy[mw]); +} + +/** + * epf_ntb_cmd_handler() - Handle commands provided by the NTB Host + * @work: work_struct for the epf_ntb_epc + * + * Workqueue function that gets invoked for the two epf_ntb_epc + * periodically (once every 5ms) to see if it has received any commands + * from NTB host. The host can send commands to configure doorbell or + * configure memory window or to update link status. + */ +static void epf_ntb_cmd_handler(struct work_struct *work) +{ + struct epf_ntb_ctrl *ctrl; + u32 command, argument; + struct epf_ntb *ntb; + struct device *dev; + int ret; + int i; + + ntb = container_of(work, struct epf_ntb, cmd_handler.work); + + for (i = 1; i < ntb->db_count; i++) { + if (readl(ntb->epf_db + i * 4)) { + if (readl(ntb->epf_db + i * 4)) + ntb->db |= 1 << (i - 1); + + ntb_db_event(&ntb->ntb, i); + writel(0, ntb->epf_db + i * 4); + } + } + + ctrl = ntb->reg; + command = ctrl->command; + if (!command) + goto reset_handler; + argument = ctrl->argument; + + ctrl->command = 0; + ctrl->argument = 0; + + ctrl = ntb->reg; + dev = &ntb->epf->dev; + + switch (command) { + case COMMAND_CONFIGURE_DOORBELL: + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_TEARDOWN_DOORBELL: + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_CONFIGURE_MW: + ret = epf_ntb_configure_mw(ntb, argument); + if (ret < 0) + ctrl->command_status = COMMAND_STATUS_ERROR; + else + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_TEARDOWN_MW: + epf_ntb_teardown_mw(ntb, argument); + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_LINK_UP: + ntb->linkup = true; + ret = epf_ntb_link_up(ntb, true); + if (ret < 0) + ctrl->command_status = COMMAND_STATUS_ERROR; + else + ctrl->command_status = COMMAND_STATUS_OK; + goto reset_handler; + case COMMAND_LINK_DOWN: + ntb->linkup = false; + ret = epf_ntb_link_up(ntb, false); + if (ret < 0) + ctrl->command_status = COMMAND_STATUS_ERROR; + else + ctrl->command_status = COMMAND_STATUS_OK; + break; + default: + dev_err(dev, "UNKNOWN command: %d\n", command); + break; + } + +reset_handler: + queue_delayed_work(kpcintb_workqueue, &ntb->cmd_handler, + msecs_to_jiffies(5)); +} + +/** + * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR + * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound + * address. + * + * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and + * self scratchpad region (removes inbound ATU configuration). While BAR0 is + * the default self scratchpad BAR, an NTB could have other BARs for self + * scratchpad (because of reserved BARs). This function can get the exact BAR + * used for self scratchpad from epf_ntb_bar[BAR_CONFIG]. + * + * Please note the self scratchpad region and config region is combined to + * a single region and mapped using the same BAR. Also note HOST2's peer + * scratchpad is HOST1's self scratchpad. + */ +static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb) +{ + struct pci_epf_bar *epf_bar; + enum pci_barno barno; + + barno = ntb->epf_ntb_bar[BAR_CONFIG]; + epf_bar = &ntb->epf->bar[barno]; + + pci_epc_clear_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar); +} + +/** + * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR + * @ntb: NTB device that facilitates communication between HOST and vHOST + * + * Map BAR0 of EP CONTROLLER 1 which contains the HOST1's config and + * self scratchpad region. + * + * Please note the self scratchpad region and config region is combined to + * a single region and mapped using the same BAR. + */ +static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb) +{ + struct pci_epf_bar *epf_bar; + enum pci_barno barno; + u8 func_no, vfunc_no; + struct device *dev; + int ret; + + dev = &ntb->epf->dev; + func_no = ntb->epf->func_no; + vfunc_no = ntb->epf->vfunc_no; + barno = ntb->epf_ntb_bar[BAR_CONFIG]; + epf_bar = &ntb->epf->bar[barno]; + + ret = pci_epc_set_bar(ntb->epf->epc, func_no, vfunc_no, epf_bar); + if (ret) { + dev_err(dev, "inft: Config/Status/SPAD BAR set failed\n"); + return ret; + } + return 0; +} + +/** + * epf_ntb_config_spad_bar_free() - Free the physical memory associated with + * config + scratchpad region + * @ntb: NTB device that facilitates communication between HOST and vHOST + */ +static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb) +{ + enum pci_barno barno; + + barno = ntb->epf_ntb_bar[BAR_CONFIG]; + pci_epf_free_space(ntb->epf, ntb->reg, barno, 0); +} + +/** + * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad + * region + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * + * Allocate the Local Memory mentioned in the above diagram. The size of + * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION + * is obtained from "spad-count" configfs entry. + */ +static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb) +{ + size_t align; + enum pci_barno barno; + struct epf_ntb_ctrl *ctrl; + u32 spad_size, ctrl_size; + u64 size; + struct pci_epf *epf = ntb->epf; + struct device *dev = &epf->dev; + u32 spad_count; + void *base; + int i; + const struct pci_epc_features *epc_features = pci_epc_get_features(epf->epc, + epf->func_no, + epf->vfunc_no); + barno = ntb->epf_ntb_bar[BAR_CONFIG]; + size = epc_features->bar_fixed_size[barno]; + align = epc_features->align; + + if ((!IS_ALIGNED(size, align))) + return -EINVAL; + + spad_count = ntb->spad_count; + + ctrl_size = sizeof(struct epf_ntb_ctrl); + spad_size = 2 * spad_count * 4; + + if (!align) { + ctrl_size = roundup_pow_of_two(ctrl_size); + spad_size = roundup_pow_of_two(spad_size); + } else { + ctrl_size = ALIGN(ctrl_size, align); + spad_size = ALIGN(spad_size, align); + } + + if (!size) + size = ctrl_size + spad_size; + else if (size < ctrl_size + spad_size) + return -EINVAL; + + base = pci_epf_alloc_space(epf, size, barno, align, 0); + if (!base) { + dev_err(dev, "Config/Status/SPAD alloc region fail\n"); + return -ENOMEM; + } + + ntb->reg = base; + + ctrl = ntb->reg; + ctrl->spad_offset = ctrl_size; + + ctrl->spad_count = spad_count; + ctrl->num_mws = ntb->num_mws; + ntb->spad_size = spad_size; + + ctrl->db_entry_size = 4; + + for (i = 0; i < ntb->db_count; i++) { + ntb->reg->db_data[i] = 1 + i; + ntb->reg->db_offset[i] = 0; + } + + return 0; +} + +/** + * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capaiblity + * @ntb: NTB device that facilitates communication between HOST and vHOST + * + * Configure MSI/MSI-X capability for each interface with number of + * interrupts equal to "db_count" configfs entry. + */ +static int epf_ntb_configure_interrupt(struct epf_ntb *ntb) +{ + const struct pci_epc_features *epc_features; + struct device *dev; + u32 db_count; + int ret; + + dev = &ntb->epf->dev; + + epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no); + + if (!(epc_features->msix_capable || epc_features->msi_capable)) { + dev_err(dev, "MSI or MSI-X is required for doorbell\n"); + return -EINVAL; + } + + db_count = ntb->db_count; + if (db_count > MAX_DB_COUNT) { + dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT); + return -EINVAL; + } + + ntb->db_count = db_count; + + if (epc_features->msi_capable) { + ret = pci_epc_set_msi(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + 16); + if (ret) { + dev_err(dev, "MSI configuration failed\n"); + return ret; + } + } + + return 0; +} + +/** + * epf_ntb_db_bar_init() - Configure Doorbell window BARs + * @ntb: NTB device that facilitates communication between HOST and vHOST + */ +static int epf_ntb_db_bar_init(struct epf_ntb *ntb) +{ + const struct pci_epc_features *epc_features; + u32 align; + struct device *dev = &ntb->epf->dev; + int ret; + struct pci_epf_bar *epf_bar; + void __iomem *mw_addr; + enum pci_barno barno; + size_t size = 4 * ntb->db_count; + + epc_features = pci_epc_get_features(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no); + align = epc_features->align; + + if (size < 128) + size = 128; + + if (align) + size = ALIGN(size, align); + else + size = roundup_pow_of_two(size); + + barno = ntb->epf_ntb_bar[BAR_DB]; + + mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, align, 0); + if (!mw_addr) { + dev_err(dev, "Failed to allocate OB address\n"); + return -ENOMEM; + } + + ntb->epf_db = mw_addr; + + epf_bar = &ntb->epf->bar[barno]; + + ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar); + if (ret) { + dev_err(dev, "Doorbell BAR set failed\n"); + goto err_alloc_peer_mem; + } + return ret; + +err_alloc_peer_mem: + pci_epc_mem_free_addr(ntb->epf->epc, epf_bar->phys_addr, mw_addr, epf_bar->size); + return -1; +} + +static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws); + +/** + * epf_ntb_db_bar_clear() - Clear doorbell BAR and free memory + * allocated in peer's outbound address space + * @ntb: NTB device that facilitates communication between HOST and vHOST + */ +static void epf_ntb_db_bar_clear(struct epf_ntb *ntb) +{ + enum pci_barno barno; + + barno = ntb->epf_ntb_bar[BAR_DB]; + pci_epf_free_space(ntb->epf, ntb->epf_db, barno, 0); + pci_epc_clear_bar(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + &ntb->epf->bar[barno]); +} + +/** + * epf_ntb_mw_bar_init() - Configure Memory window BARs + * @ntb: NTB device that facilitates communication between HOST and vHOST + * + */ +static int epf_ntb_mw_bar_init(struct epf_ntb *ntb) +{ + int ret = 0; + int i; + u64 size; + enum pci_barno barno; + struct device *dev = &ntb->epf->dev; + + for (i = 0; i < ntb->num_mws; i++) { + size = ntb->mws_size[i]; + barno = ntb->epf_ntb_bar[BAR_MW0 + i]; + + ntb->epf->bar[barno].barno = barno; + ntb->epf->bar[barno].size = size; + ntb->epf->bar[barno].addr = NULL; + ntb->epf->bar[barno].phys_addr = 0; + ntb->epf->bar[barno].flags |= upper_32_bits(size) ? + PCI_BASE_ADDRESS_MEM_TYPE_64 : + PCI_BASE_ADDRESS_MEM_TYPE_32; + + ret = pci_epc_set_bar(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + &ntb->epf->bar[barno]); + if (ret) { + dev_err(dev, "MW set failed\n"); + goto err_alloc_mem; + } + + /* Allocate EPC outbound memory windows to vpci vntb device */ + ntb->vpci_mw_addr[i] = pci_epc_mem_alloc_addr(ntb->epf->epc, + &ntb->vpci_mw_phy[i], + size); + if (!ntb->vpci_mw_addr[i]) { + ret = -ENOMEM; + dev_err(dev, "Failed to allocate source address\n"); + goto err_set_bar; + } + } + + return ret; + +err_set_bar: + pci_epc_clear_bar(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + &ntb->epf->bar[barno]); +err_alloc_mem: + epf_ntb_mw_bar_clear(ntb, i); + return ret; +} + +/** + * epf_ntb_mw_bar_clear() - Clear Memory window BARs + * @ntb: NTB device that facilitates communication between HOST and vHOST + */ +static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws) +{ + enum pci_barno barno; + int i; + + for (i = 0; i < num_mws; i++) { + barno = ntb->epf_ntb_bar[BAR_MW0 + i]; + pci_epc_clear_bar(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + &ntb->epf->bar[barno]); + + pci_epc_mem_free_addr(ntb->epf->epc, + ntb->vpci_mw_phy[i], + ntb->vpci_mw_addr[i], + ntb->mws_size[i]); + } +} + +/** + * epf_ntb_epc_destroy() - Cleanup NTB EPC interface + * @ntb: NTB device that facilitates communication between HOST and vHOST + * + * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces + */ +static void epf_ntb_epc_destroy(struct epf_ntb *ntb) +{ + pci_epc_remove_epf(ntb->epf->epc, ntb->epf, 0); + pci_epc_put(ntb->epf->epc); +} + +/** + * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB + * constructs (scratchpad region, doorbell, memorywindow) + * @ntb: NTB device that facilitates communication between HOST and vHOST + */ +static int epf_ntb_init_epc_bar(struct epf_ntb *ntb) +{ + const struct pci_epc_features *epc_features; + enum pci_barno barno; + enum epf_ntb_bar bar; + struct device *dev; + u32 num_mws; + int i; + + barno = BAR_0; + num_mws = ntb->num_mws; + dev = &ntb->epf->dev; + epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no); + + /* These are required BARs which are mandatory for NTB functionality */ + for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) { + barno = pci_epc_get_next_free_bar(epc_features, barno); + if (barno < 0) { + dev_err(dev, "Fail to get NTB function BAR\n"); + return barno; + } + ntb->epf_ntb_bar[bar] = barno; + } + + /* These are optional BARs which don't impact NTB functionality */ + for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) { + barno = pci_epc_get_next_free_bar(epc_features, barno); + if (barno < 0) { + ntb->num_mws = i; + dev_dbg(dev, "BAR not available for > MW%d\n", i + 1); + } + ntb->epf_ntb_bar[bar] = barno; + } + + return 0; +} + +/** + * epf_ntb_epc_init() - Initialize NTB interface + * @ntb: NTB device that facilitates communication between HOST and vHOST2 + * + * Wrapper to initialize a particular EPC interface and start the workqueue + * to check for commands from host. This function will write to the + * EP controller HW for configuring it. + */ +static int epf_ntb_epc_init(struct epf_ntb *ntb) +{ + u8 func_no, vfunc_no; + struct pci_epc *epc; + struct pci_epf *epf; + struct device *dev; + int ret; + + epf = ntb->epf; + dev = &epf->dev; + epc = epf->epc; + func_no = ntb->epf->func_no; + vfunc_no = ntb->epf->vfunc_no; + + ret = epf_ntb_config_sspad_bar_set(ntb); + if (ret) { + dev_err(dev, "Config/self SPAD BAR init failed"); + return ret; + } + + ret = epf_ntb_configure_interrupt(ntb); + if (ret) { + dev_err(dev, "Interrupt configuration failed\n"); + goto err_config_interrupt; + } + + ret = epf_ntb_db_bar_init(ntb); + if (ret) { + dev_err(dev, "DB BAR init failed\n"); + goto err_db_bar_init; + } + + ret = epf_ntb_mw_bar_init(ntb); + if (ret) { + dev_err(dev, "MW BAR init failed\n"); + goto err_mw_bar_init; + } + + if (vfunc_no <= 1) { + ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header); + if (ret) { + dev_err(dev, "Configuration header write failed\n"); + goto err_write_header; + } + } + + INIT_DELAYED_WORK(&ntb->cmd_handler, epf_ntb_cmd_handler); + queue_work(kpcintb_workqueue, &ntb->cmd_handler.work); + + return 0; + +err_write_header: + epf_ntb_mw_bar_clear(ntb, ntb->num_mws); +err_mw_bar_init: + epf_ntb_db_bar_clear(ntb); +err_db_bar_init: +err_config_interrupt: + epf_ntb_config_sspad_bar_clear(ntb); + + return ret; +} + + +/** + * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * + * Wrapper to cleanup all NTB interfaces. + */ +static void epf_ntb_epc_cleanup(struct epf_ntb *ntb) +{ + epf_ntb_db_bar_clear(ntb); + epf_ntb_mw_bar_clear(ntb, ntb->num_mws); +} + +#define EPF_NTB_R(_name) \ +static ssize_t epf_ntb_##_name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + \ + return sprintf(page, "%d\n", ntb->_name); \ +} + +#define EPF_NTB_W(_name) \ +static ssize_t epf_ntb_##_name##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + u32 val; \ + int ret; \ + \ + ret = kstrtou32(page, 0, &val); \ + if (ret) \ + return ret; \ + \ + ntb->_name = val; \ + \ + return len; \ +} + +#define EPF_NTB_MW_R(_name) \ +static ssize_t epf_ntb_##_name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + struct device *dev = &ntb->epf->dev; \ + int win_no; \ + \ + if (sscanf(#_name, "mw%d", &win_no) != 1) \ + return -EINVAL; \ + \ + if (win_no <= 0 || win_no > ntb->num_mws) { \ + dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \ + return -EINVAL; \ + } \ + \ + return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \ +} + +#define EPF_NTB_MW_W(_name) \ +static ssize_t epf_ntb_##_name##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + struct device *dev = &ntb->epf->dev; \ + int win_no; \ + u64 val; \ + int ret; \ + \ + ret = kstrtou64(page, 0, &val); \ + if (ret) \ + return ret; \ + \ + if (sscanf(#_name, "mw%d", &win_no) != 1) \ + return -EINVAL; \ + \ + if (win_no <= 0 || win_no > ntb->num_mws) { \ + dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \ + return -EINVAL; \ + } \ + \ + ntb->mws_size[win_no - 1] = val; \ + \ + return len; \ +} + +static ssize_t epf_ntb_num_mws_store(struct config_item *item, + const char *page, size_t len) +{ + struct config_group *group = to_config_group(item); + struct epf_ntb *ntb = to_epf_ntb(group); + u32 val; + int ret; + + ret = kstrtou32(page, 0, &val); + if (ret) + return ret; + + if (val > MAX_MW) + return -EINVAL; + + ntb->num_mws = val; + + return len; +} + +EPF_NTB_R(spad_count) +EPF_NTB_W(spad_count) +EPF_NTB_R(db_count) +EPF_NTB_W(db_count) +EPF_NTB_R(num_mws) +EPF_NTB_R(vbus_number) +EPF_NTB_W(vbus_number) +EPF_NTB_R(vntb_pid) +EPF_NTB_W(vntb_pid) +EPF_NTB_R(vntb_vid) +EPF_NTB_W(vntb_vid) +EPF_NTB_MW_R(mw1) +EPF_NTB_MW_W(mw1) +EPF_NTB_MW_R(mw2) +EPF_NTB_MW_W(mw2) +EPF_NTB_MW_R(mw3) +EPF_NTB_MW_W(mw3) +EPF_NTB_MW_R(mw4) +EPF_NTB_MW_W(mw4) + +CONFIGFS_ATTR(epf_ntb_, spad_count); +CONFIGFS_ATTR(epf_ntb_, db_count); +CONFIGFS_ATTR(epf_ntb_, num_mws); +CONFIGFS_ATTR(epf_ntb_, mw1); +CONFIGFS_ATTR(epf_ntb_, mw2); +CONFIGFS_ATTR(epf_ntb_, mw3); +CONFIGFS_ATTR(epf_ntb_, mw4); +CONFIGFS_ATTR(epf_ntb_, vbus_number); +CONFIGFS_ATTR(epf_ntb_, vntb_pid); +CONFIGFS_ATTR(epf_ntb_, vntb_vid); + +static struct configfs_attribute *epf_ntb_attrs[] = { + &epf_ntb_attr_spad_count, + &epf_ntb_attr_db_count, + &epf_ntb_attr_num_mws, + &epf_ntb_attr_mw1, + &epf_ntb_attr_mw2, + &epf_ntb_attr_mw3, + &epf_ntb_attr_mw4, + &epf_ntb_attr_vbus_number, + &epf_ntb_attr_vntb_pid, + &epf_ntb_attr_vntb_vid, + NULL, +}; + +static const struct config_item_type ntb_group_type = { + .ct_attrs = epf_ntb_attrs, + .ct_owner = THIS_MODULE, +}; + +/** + * epf_ntb_add_cfs() - Add configfs directory specific to NTB + * @epf: NTB endpoint function device + * @group: A pointer to the config_group structure referencing a group of + * config_items of a specific type that belong to a specific sub-system. + * + * Add configfs directory specific to NTB. This directory will hold + * NTB specific properties like db_count, spad_count, num_mws etc., + */ +static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf, + struct config_group *group) +{ + struct epf_ntb *ntb = epf_get_drvdata(epf); + struct config_group *ntb_group = &ntb->group; + struct device *dev = &epf->dev; + + config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type); + + return ntb_group; +} + +/*==== virtual PCI bus driver, which only load virtual NTB PCI driver ====*/ + +static u32 pci_space[] = { + 0xffffffff, /*DeviceID, Vendor ID*/ + 0, /*Status, Command*/ + 0xffffffff, /*Class code, subclass, prog if, revision id*/ + 0x40, /*bist, header type, latency Timer, cache line size*/ + 0, /*BAR 0*/ + 0, /*BAR 1*/ + 0, /*BAR 2*/ + 0, /*BAR 3*/ + 0, /*BAR 4*/ + 0, /*BAR 5*/ + 0, /*Cardbus cis point*/ + 0, /*Subsystem ID Subystem vendor id*/ + 0, /*ROM Base Address*/ + 0, /*Reserved, Cap. Point*/ + 0, /*Reserved,*/ + 0, /*Max Lat, Min Gnt, interrupt pin, interrupt line*/ +}; + +static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) +{ + if (devfn == 0) { + memcpy(val, ((u8 *)pci_space) + where, size); + return PCIBIOS_SUCCESSFUL; + } + return PCIBIOS_DEVICE_NOT_FOUND; +} + +static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) +{ + return 0; +} + +static struct pci_ops vpci_ops = { + .read = pci_read, + .write = pci_write, +}; + +static int vpci_scan_bus(void *sysdata) +{ + struct pci_bus *vpci_bus; + struct epf_ntb *ndev = sysdata; + + vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata); + if (vpci_bus) + pr_err("create pci bus\n"); + + pci_bus_add_devices(vpci_bus); + + return 0; +} + +/*==================== Virtual PCIe NTB driver ==========================*/ + +static int vntb_epf_mw_count(struct ntb_dev *ntb, int pidx) +{ + struct epf_ntb *ndev = ntb_ndev(ntb); + + return ndev->num_mws; +} + +static int vntb_epf_spad_count(struct ntb_dev *ntb) +{ + return ntb_ndev(ntb)->spad_count; +} + +static int vntb_epf_peer_mw_count(struct ntb_dev *ntb) +{ + return ntb_ndev(ntb)->num_mws; +} + +static u64 vntb_epf_db_valid_mask(struct ntb_dev *ntb) +{ + return BIT_ULL(ntb_ndev(ntb)->db_count) - 1; +} + +static int vntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits) +{ + return 0; +} + +static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx, + dma_addr_t addr, resource_size_t size) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + struct pci_epf_bar *epf_bar; + enum pci_barno barno; + int ret; + struct device *dev; + + dev = &ntb->ntb.dev; + barno = ntb->epf_ntb_bar[BAR_MW0 + idx]; + epf_bar = &ntb->epf->bar[barno]; + epf_bar->phys_addr = addr; + epf_bar->barno = barno; + epf_bar->size = size; + + ret = pci_epc_set_bar(ntb->epf->epc, 0, 0, epf_bar); + if (ret) { + dev_err(dev, "failure set mw trans\n"); + return ret; + } + return 0; +} + +static int vntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx) +{ + return 0; +} + +static int vntb_epf_peer_mw_get_addr(struct ntb_dev *ndev, int idx, + phys_addr_t *base, resource_size_t *size) +{ + + struct epf_ntb *ntb = ntb_ndev(ndev); + + if (base) + *base = ntb->vpci_mw_phy[idx]; + + if (size) + *size = ntb->mws_size[idx]; + + return 0; +} + +static int vntb_epf_link_enable(struct ntb_dev *ntb, + enum ntb_speed max_speed, + enum ntb_width max_width) +{ + return 0; +} + +static u32 vntb_epf_spad_read(struct ntb_dev *ndev, int idx) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + int off = ntb->reg->spad_offset, ct = ntb->reg->spad_count * 4; + u32 val; + void __iomem *base = ntb->reg; + + val = readl(base + off + ct + idx * 4); + return val; +} + +static int vntb_epf_spad_write(struct ntb_dev *ndev, int idx, u32 val) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + struct epf_ntb_ctrl *ctrl = ntb->reg; + int off = ctrl->spad_offset, ct = ctrl->spad_count * 4; + void __iomem *base = ntb->reg; + + writel(val, base + off + ct + idx * 4); + return 0; +} + +static u32 vntb_epf_peer_spad_read(struct ntb_dev *ndev, int pidx, int idx) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + struct epf_ntb_ctrl *ctrl = ntb->reg; + int off = ctrl->spad_offset; + void __iomem *base = ntb->reg; + u32 val; + + val = readl(base + off + idx * 4); + return val; +} + +static int vntb_epf_peer_spad_write(struct ntb_dev *ndev, int pidx, int idx, u32 val) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + struct epf_ntb_ctrl *ctrl = ntb->reg; + int off = ctrl->spad_offset; + void __iomem *base = ntb->reg; + + writel(val, base + off + idx * 4); + return 0; +} + +static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits) +{ + u32 interrupt_num = ffs(db_bits) + 1; + struct epf_ntb *ntb = ntb_ndev(ndev); + u8 func_no, vfunc_no; + int ret; + + func_no = ntb->epf->func_no; + vfunc_no = ntb->epf->vfunc_no; + + ret = pci_epc_raise_irq(ntb->epf->epc, + func_no, + vfunc_no, + PCI_EPC_IRQ_MSI, + interrupt_num + 1); + if (ret) + dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n"); + + return ret; +} + +static u64 vntb_epf_db_read(struct ntb_dev *ndev) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + + return ntb->db; +} + +static int vntb_epf_mw_get_align(struct ntb_dev *ndev, int pidx, int idx, + resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + + if (addr_align) + *addr_align = SZ_4K; + + if (size_align) + *size_align = 1; + + if (size_max) + *size_max = ntb->mws_size[idx]; + + return 0; +} + +static u64 vntb_epf_link_is_up(struct ntb_dev *ndev, + enum ntb_speed *speed, + enum ntb_width *width) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + + return ntb->reg->link_status; +} + +static int vntb_epf_db_clear_mask(struct ntb_dev *ndev, u64 db_bits) +{ + return 0; +} + +static int vntb_epf_db_clear(struct ntb_dev *ndev, u64 db_bits) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + + ntb->db &= ~db_bits; + return 0; +} + +static int vntb_epf_link_disable(struct ntb_dev *ntb) +{ + return 0; +} + +static const struct ntb_dev_ops vntb_epf_ops = { + .mw_count = vntb_epf_mw_count, + .spad_count = vntb_epf_spad_count, + .peer_mw_count = vntb_epf_peer_mw_count, + .db_valid_mask = vntb_epf_db_valid_mask, + .db_set_mask = vntb_epf_db_set_mask, + .mw_set_trans = vntb_epf_mw_set_trans, + .mw_clear_trans = vntb_epf_mw_clear_trans, + .peer_mw_get_addr = vntb_epf_peer_mw_get_addr, + .link_enable = vntb_epf_link_enable, + .spad_read = vntb_epf_spad_read, + .spad_write = vntb_epf_spad_write, + .peer_spad_read = vntb_epf_peer_spad_read, + .peer_spad_write = vntb_epf_peer_spad_write, + .peer_db_set = vntb_epf_peer_db_set, + .db_read = vntb_epf_db_read, + .mw_get_align = vntb_epf_mw_get_align, + .link_is_up = vntb_epf_link_is_up, + .db_clear_mask = vntb_epf_db_clear_mask, + .db_clear = vntb_epf_db_clear, + .link_disable = vntb_epf_link_disable, +}; + +static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int ret; + struct epf_ntb *ndev = (struct epf_ntb *)pdev->sysdata; + struct device *dev = &pdev->dev; + + ndev->ntb.pdev = pdev; + ndev->ntb.topo = NTB_TOPO_NONE; + ndev->ntb.ops = &vntb_epf_ops; + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "Cannot set DMA mask\n"); + return -EINVAL; + } + + ret = ntb_register_device(&ndev->ntb); + if (ret) { + dev_err(dev, "Failed to register NTB device\n"); + goto err_register_dev; + } + + dev_dbg(dev, "PCI Virtual NTB driver loaded\n"); + return 0; + +err_register_dev: + return -EINVAL; +} + +static struct pci_device_id pci_vntb_table[] = { + { + PCI_DEVICE(0xffff, 0xffff), + }, + {}, +}; + +static struct pci_driver vntb_pci_driver = { + .name = "pci-vntb", + .id_table = pci_vntb_table, + .probe = pci_vntb_probe, +}; + +/* ============ PCIe EPF Driver Bind ====================*/ + +/** + * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality + * @epf: NTB endpoint function device + * + * Initialize both the endpoint controllers associated with NTB function device. + * Invoked when a primary interface or secondary interface is bound to EPC + * device. This function will succeed only when EPC is bound to both the + * interfaces. + */ +static int epf_ntb_bind(struct pci_epf *epf) +{ + struct epf_ntb *ntb = epf_get_drvdata(epf); + struct device *dev = &epf->dev; + int ret; + + if (!epf->epc) { + dev_dbg(dev, "PRIMARY EPC interface not yet bound\n"); + return 0; + } + + ret = epf_ntb_init_epc_bar(ntb); + if (ret) { + dev_err(dev, "Failed to create NTB EPC\n"); + goto err_bar_init; + } + + ret = epf_ntb_config_spad_bar_alloc(ntb); + if (ret) { + dev_err(dev, "Failed to allocate BAR memory\n"); + goto err_bar_alloc; + } + + ret = epf_ntb_epc_init(ntb); + if (ret) { + dev_err(dev, "Failed to initialize EPC\n"); + goto err_bar_alloc; + } + + epf_set_drvdata(epf, ntb); + + pci_space[0] = (ntb->vntb_pid << 16) | ntb->vntb_vid; + pci_vntb_table[0].vendor = ntb->vntb_vid; + pci_vntb_table[0].device = ntb->vntb_pid; + + ret = pci_register_driver(&vntb_pci_driver); + if (ret) { + dev_err(dev, "failure register vntb pci driver\n"); + goto err_bar_alloc; + } + + vpci_scan_bus(ntb); + + return 0; + +err_bar_alloc: + epf_ntb_config_spad_bar_free(ntb); + +err_bar_init: + epf_ntb_epc_destroy(ntb); + + return ret; +} + +/** + * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind() + * @epf: NTB endpoint function device + * + * Cleanup the initialization from epf_ntb_bind() + */ +static void epf_ntb_unbind(struct pci_epf *epf) +{ + struct epf_ntb *ntb = epf_get_drvdata(epf); + + epf_ntb_epc_cleanup(ntb); + epf_ntb_config_spad_bar_free(ntb); + epf_ntb_epc_destroy(ntb); + + pci_unregister_driver(&vntb_pci_driver); +} + +// EPF driver probe +static struct pci_epf_ops epf_ntb_ops = { + .bind = epf_ntb_bind, + .unbind = epf_ntb_unbind, + .add_cfs = epf_ntb_add_cfs, +}; + +/** + * epf_ntb_probe() - Probe NTB function driver + * @epf: NTB endpoint function device + * + * Probe NTB function driver when endpoint function bus detects a NTB + * endpoint function. + */ +static int epf_ntb_probe(struct pci_epf *epf) +{ + struct epf_ntb *ntb; + struct device *dev; + + dev = &epf->dev; + + ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL); + if (!ntb) + return -ENOMEM; + + epf->header = &epf_ntb_header; + ntb->epf = epf; + ntb->vbus_number = 0xff; + epf_set_drvdata(epf, ntb); + + dev_info(dev, "pci-ep epf driver loaded\n"); + return 0; +} + +static const struct pci_epf_device_id epf_ntb_ids[] = { + { + .name = "pci_epf_vntb", + }, + {}, +}; + +static struct pci_epf_driver epf_ntb_driver = { + .driver.name = "pci_epf_vntb", + .probe = epf_ntb_probe, + .id_table = epf_ntb_ids, + .ops = &epf_ntb_ops, + .owner = THIS_MODULE, +}; + +static int __init epf_ntb_init(void) +{ + int ret; + + kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM | + WQ_HIGHPRI, 0); + ret = pci_epf_register_driver(&epf_ntb_driver); + if (ret) { + destroy_workqueue(kpcintb_workqueue); + pr_err("Failed to register pci epf ntb driver --> %d\n", ret); + return ret; + } + + return 0; +} +module_init(epf_ntb_init); + +static void __exit epf_ntb_exit(void) +{ + pci_epf_unregister_driver(&epf_ntb_driver); + destroy_workqueue(kpcintb_workqueue); +} +module_exit(epf_ntb_exit); + +MODULE_DESCRIPTION("PCI EPF NTB DRIVER"); +MODULE_AUTHOR("Frank Li "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c new file mode 100644 index 0000000000..a9bb73f76b --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c @@ -0,0 +1,409 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon SoC CPA(Coherency Protocol Agent) hardware event counters support + * + * Copyright (C) 2022 HiSilicon Limited + * Author: Qi Liu + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ + +#define pr_fmt(fmt) "cpa pmu: " fmt +#include +#include +#include +#include +#include +#include +#include + +#include "hisi_uncore_pmu.h" + +/* CPA register definition */ +#define CPA_PERF_CTRL 0x1c00 +#define CPA_EVENT_CTRL 0x1c04 +#define CPA_INT_MASK 0x1c70 +#define CPA_INT_STATUS 0x1c78 +#define CPA_INT_CLEAR 0x1c7c +#define CPA_EVENT_TYPE0 0x1c80 +#define CPA_VERSION 0x1cf0 +#define CPA_CNT0_LOWER 0x1d00 +#define CPA_CFG_REG 0x0534 + +/* CPA operation command */ +#define CPA_PERF_CTRL_EN BIT_ULL(0) +#define CPA_EVTYPE_MASK 0xffUL +#define CPA_PM_CTRL BIT_ULL(9) + +/* CPA has 8-counters */ +#define CPA_NR_COUNTERS 0x8 +#define CPA_COUNTER_BITS 64 +#define CPA_NR_EVENTS 0xff +#define CPA_REG_OFFSET 0x8 + +static u32 hisi_cpa_pmu_get_counter_offset(int idx) +{ + return (CPA_CNT0_LOWER + idx * CPA_REG_OFFSET); +} + +static u64 hisi_cpa_pmu_read_counter(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc) +{ + return readq(cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_cpa_pmu_write_counter(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc, u64 val) +{ + writeq(val, cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_cpa_pmu_write_evtype(struct hisi_pmu *cpa_pmu, int idx, + u32 type) +{ + u32 reg, reg_idx, shift, val; + + /* + * Select the appropriate event select register(CPA_EVENT_TYPE0/1). + * There are 2 event select registers for the 8 hardware counters. + * Event code is 8-bits and for the former 4 hardware counters, + * CPA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters, + * CPA_EVENT_TYPE1 is chosen. + */ + reg = CPA_EVENT_TYPE0 + (idx / 4) * 4; + reg_idx = idx % 4; + shift = CPA_REG_OFFSET * reg_idx; + + /* Write event code to CPA_EVENT_TYPEx Register */ + val = readl(cpa_pmu->base + reg); + val &= ~(CPA_EVTYPE_MASK << shift); + val |= type << shift; + writel(val, cpa_pmu->base + reg); +} + +static void hisi_cpa_pmu_start_counters(struct hisi_pmu *cpa_pmu) +{ + u32 val; + + val = readl(cpa_pmu->base + CPA_PERF_CTRL); + val |= CPA_PERF_CTRL_EN; + writel(val, cpa_pmu->base + CPA_PERF_CTRL); +} + +static void hisi_cpa_pmu_stop_counters(struct hisi_pmu *cpa_pmu) +{ + u32 val; + + val = readl(cpa_pmu->base + CPA_PERF_CTRL); + val &= ~(CPA_PERF_CTRL_EN); + writel(val, cpa_pmu->base + CPA_PERF_CTRL); +} + +static void hisi_cpa_pmu_disable_pm(struct hisi_pmu *cpa_pmu) +{ + u32 val; + + val = readl(cpa_pmu->base + CPA_CFG_REG); + val |= CPA_PM_CTRL; + writel(val, cpa_pmu->base + CPA_CFG_REG); +} + +static void hisi_cpa_pmu_enable_pm(struct hisi_pmu *cpa_pmu) +{ + u32 val; + + val = readl(cpa_pmu->base + CPA_CFG_REG); + val &= ~(CPA_PM_CTRL); + writel(val, cpa_pmu->base + CPA_CFG_REG); +} + +static void hisi_cpa_pmu_enable_counter(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Enable counter index in CPA_EVENT_CTRL register */ + val = readl(cpa_pmu->base + CPA_EVENT_CTRL); + val |= 1 << hwc->idx; + writel(val, cpa_pmu->base + CPA_EVENT_CTRL); +} + +static void hisi_cpa_pmu_disable_counter(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index in CPA_EVENT_CTRL register */ + val = readl(cpa_pmu->base + CPA_EVENT_CTRL); + val &= ~(1UL << hwc->idx); + writel(val, cpa_pmu->base + CPA_EVENT_CTRL); +} + +static void hisi_cpa_pmu_enable_counter_int(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 0 to enable interrupt */ + val = readl(cpa_pmu->base + CPA_INT_MASK); + val &= ~(1UL << hwc->idx); + writel(val, cpa_pmu->base + CPA_INT_MASK); +} + +static void hisi_cpa_pmu_disable_counter_int(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 1 to mask interrupt */ + val = readl(cpa_pmu->base + CPA_INT_MASK); + val |= 1 << hwc->idx; + writel(val, cpa_pmu->base + CPA_INT_MASK); +} + +static u32 hisi_cpa_pmu_get_int_status(struct hisi_pmu *cpa_pmu) +{ + return readl(cpa_pmu->base + CPA_INT_STATUS); +} + +static void hisi_cpa_pmu_clear_int_status(struct hisi_pmu *cpa_pmu, int idx) +{ + writel(1 << idx, cpa_pmu->base + CPA_INT_CLEAR); +} + +static const struct acpi_device_id hisi_cpa_pmu_acpi_match[] = { + { "HISI0281", }, + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_cpa_pmu_acpi_match); + +static int hisi_cpa_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *cpa_pmu) +{ + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &cpa_pmu->sicl_id)) { + dev_err(&pdev->dev, "Can not read sicl-id\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id", + &cpa_pmu->index_id)) { + dev_err(&pdev->dev, "Cannot read idx-id\n"); + return -EINVAL; + } + + cpa_pmu->ccl_id = -1; + cpa_pmu->sccl_id = -1; + cpa_pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(cpa_pmu->base)) + return PTR_ERR(cpa_pmu->base); + + cpa_pmu->identifier = readl(cpa_pmu->base + CPA_VERSION); + + return 0; +} + +static struct attribute *hisi_cpa_pmu_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-15"), + NULL +}; + +static const struct attribute_group hisi_cpa_pmu_format_group = { + .name = "format", + .attrs = hisi_cpa_pmu_format_attr, +}; + +static struct attribute *hisi_cpa_pmu_events_attr[] = { + HISI_PMU_EVENT_ATTR(cpa_cycles, 0x00), + HISI_PMU_EVENT_ATTR(cpa_p1_wr_dat, 0x61), + HISI_PMU_EVENT_ATTR(cpa_p1_rd_dat, 0x62), + HISI_PMU_EVENT_ATTR(cpa_p0_wr_dat, 0xE1), + HISI_PMU_EVENT_ATTR(cpa_p0_rd_dat, 0xE2), + NULL +}; + +static const struct attribute_group hisi_cpa_pmu_events_group = { + .name = "events", + .attrs = hisi_cpa_pmu_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_cpa_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static const struct attribute_group hisi_cpa_pmu_cpumask_attr_group = { + .attrs = hisi_cpa_pmu_cpumask_attrs, +}; + +static struct device_attribute hisi_cpa_pmu_identifier_attr = + __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL); + +static struct attribute *hisi_cpa_pmu_identifier_attrs[] = { + &hisi_cpa_pmu_identifier_attr.attr, + NULL +}; + +static const struct attribute_group hisi_cpa_pmu_identifier_group = { + .attrs = hisi_cpa_pmu_identifier_attrs, +}; + +static const struct attribute_group *hisi_cpa_pmu_attr_groups[] = { + &hisi_cpa_pmu_format_group, + &hisi_cpa_pmu_events_group, + &hisi_cpa_pmu_cpumask_attr_group, + &hisi_cpa_pmu_identifier_group, + NULL +}; + +static const struct hisi_uncore_ops hisi_uncore_cpa_pmu_ops = { + .write_evtype = hisi_cpa_pmu_write_evtype, + .get_event_idx = hisi_uncore_pmu_get_event_idx, + .start_counters = hisi_cpa_pmu_start_counters, + .stop_counters = hisi_cpa_pmu_stop_counters, + .enable_counter = hisi_cpa_pmu_enable_counter, + .disable_counter = hisi_cpa_pmu_disable_counter, + .enable_counter_int = hisi_cpa_pmu_enable_counter_int, + .disable_counter_int = hisi_cpa_pmu_disable_counter_int, + .write_counter = hisi_cpa_pmu_write_counter, + .read_counter = hisi_cpa_pmu_read_counter, + .get_int_status = hisi_cpa_pmu_get_int_status, + .clear_int_status = hisi_cpa_pmu_clear_int_status, +}; + +static int hisi_cpa_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *cpa_pmu) +{ + int ret; + + ret = hisi_cpa_pmu_init_data(pdev, cpa_pmu); + if (ret) + return ret; + + ret = hisi_uncore_pmu_init_irq(cpa_pmu, pdev); + if (ret) + return ret; + + cpa_pmu->counter_bits = CPA_COUNTER_BITS; + cpa_pmu->check_event = CPA_NR_EVENTS; + cpa_pmu->pmu_events.attr_groups = hisi_cpa_pmu_attr_groups; + cpa_pmu->ops = &hisi_uncore_cpa_pmu_ops; + cpa_pmu->num_counters = CPA_NR_COUNTERS; + cpa_pmu->dev = &pdev->dev; + cpa_pmu->on_cpu = -1; + + return 0; +} + +static int hisi_cpa_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *cpa_pmu; + char *name; + int ret; + + cpa_pmu = devm_kzalloc(&pdev->dev, sizeof(*cpa_pmu), GFP_KERNEL); + if (!cpa_pmu) + return -ENOMEM; + + ret = hisi_cpa_pmu_dev_probe(pdev, cpa_pmu); + if (ret) + return ret; + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%u", + cpa_pmu->sicl_id, cpa_pmu->index_id); + if (!name) + return -ENOMEM; + + cpa_pmu->pmu = (struct pmu) { + .name = name, + .module = THIS_MODULE, + .task_ctx_nr = perf_invalid_context, + .event_init = hisi_uncore_pmu_event_init, + .pmu_enable = hisi_uncore_pmu_enable, + .pmu_disable = hisi_uncore_pmu_disable, + .add = hisi_uncore_pmu_add, + .del = hisi_uncore_pmu_del, + .start = hisi_uncore_pmu_start, + .stop = hisi_uncore_pmu_stop, + .read = hisi_uncore_pmu_read, + .attr_groups = cpa_pmu->pmu_events.attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + /* Power Management should be disabled before using CPA PMU. */ + hisi_cpa_pmu_disable_pm(cpa_pmu); + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, + &cpa_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + hisi_cpa_pmu_enable_pm(cpa_pmu); + return ret; + } + + ret = perf_pmu_register(&cpa_pmu->pmu, name, -1); + if (ret) { + dev_err(cpa_pmu->dev, "PMU register failed\n"); + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, &cpa_pmu->node); + hisi_cpa_pmu_enable_pm(cpa_pmu); + return ret; + } + + platform_set_drvdata(pdev, cpa_pmu); + return ret; +} + +static int hisi_cpa_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *cpa_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&cpa_pmu->pmu); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, + &cpa_pmu->node); + hisi_cpa_pmu_enable_pm(cpa_pmu); + return 0; +} + +static struct platform_driver hisi_cpa_pmu_driver = { + .driver = { + .name = "hisi_cpa_pmu", + .acpi_match_table = ACPI_PTR(hisi_cpa_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = hisi_cpa_pmu_probe, + .remove = hisi_cpa_pmu_remove, +}; + +static int __init hisi_cpa_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, + "AP_PERF_ARM_HISI_CPA_ONLINE", + hisi_uncore_pmu_online_cpu, + hisi_uncore_pmu_offline_cpu); + if (ret) { + pr_err("setup hotplug failed: %d\n", ret); + return ret; + } + + ret = platform_driver_register(&hisi_cpa_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE); + + return ret; +} +module_init(hisi_cpa_pmu_module_init); + +static void __exit hisi_cpa_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_cpa_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE); +} +module_exit(hisi_cpa_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC CPA PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Qi Liu "); diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c new file mode 100644 index 0000000000..e0457d84af --- /dev/null +++ b/drivers/perf/hisilicon/hns3_pmu.c @@ -0,0 +1,1671 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This driver adds support for HNS3 PMU iEP device. Related perf events are + * bandwidth, latency, packet rate, interrupt rate etc. + * + * Copyright (C) 2022 HiSilicon Limited + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* registers offset address */ +#define HNS3_PMU_REG_GLOBAL_CTRL 0x0000 +#define HNS3_PMU_REG_CLOCK_FREQ 0x0020 +#define HNS3_PMU_REG_BDF 0x0fe0 +#define HNS3_PMU_REG_VERSION 0x0fe4 +#define HNS3_PMU_REG_DEVICE_ID 0x0fe8 + +#define HNS3_PMU_REG_EVENT_OFFSET 0x1000 +#define HNS3_PMU_REG_EVENT_SIZE 0x1000 +#define HNS3_PMU_REG_EVENT_CTRL_LOW 0x00 +#define HNS3_PMU_REG_EVENT_CTRL_HIGH 0x04 +#define HNS3_PMU_REG_EVENT_INTR_STATUS 0x08 +#define HNS3_PMU_REG_EVENT_INTR_MASK 0x0c +#define HNS3_PMU_REG_EVENT_COUNTER 0x10 +#define HNS3_PMU_REG_EVENT_EXT_COUNTER 0x18 +#define HNS3_PMU_REG_EVENT_QID_CTRL 0x28 +#define HNS3_PMU_REG_EVENT_QID_PARA 0x2c + +#define HNS3_PMU_FILTER_SUPPORT_GLOBAL BIT(0) +#define HNS3_PMU_FILTER_SUPPORT_PORT BIT(1) +#define HNS3_PMU_FILTER_SUPPORT_PORT_TC BIT(2) +#define HNS3_PMU_FILTER_SUPPORT_FUNC BIT(3) +#define HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE BIT(4) +#define HNS3_PMU_FILTER_SUPPORT_FUNC_INTR BIT(5) + +#define HNS3_PMU_FILTER_ALL_TC 0xf +#define HNS3_PMU_FILTER_ALL_QUEUE 0xffff + +#define HNS3_PMU_CTRL_SUBEVENT_S 4 +#define HNS3_PMU_CTRL_FILTER_MODE_S 24 + +#define HNS3_PMU_GLOBAL_START BIT(0) + +#define HNS3_PMU_EVENT_STATUS_RESET BIT(11) +#define HNS3_PMU_EVENT_EN BIT(12) +#define HNS3_PMU_EVENT_OVERFLOW_RESTART BIT(15) + +#define HNS3_PMU_QID_PARA_FUNC_S 0 +#define HNS3_PMU_QID_PARA_QUEUE_S 16 + +#define HNS3_PMU_QID_CTRL_REQ_ENABLE BIT(0) +#define HNS3_PMU_QID_CTRL_DONE BIT(1) +#define HNS3_PMU_QID_CTRL_MISS BIT(2) + +#define HNS3_PMU_INTR_MASK_OVERFLOW BIT(1) + +#define HNS3_PMU_MAX_HW_EVENTS 8 + +/* + * Each hardware event contains two registers (counter and ext_counter) for + * bandwidth, packet rate, latency and interrupt rate. These two registers will + * be triggered to run at the same when a hardware event is enabled. The meaning + * of counter and ext_counter of different event type are different, their + * meaning show as follow: + * + * +----------------+------------------+---------------+ + * | event type | counter | ext_counter | + * +----------------+------------------+---------------+ + * | bandwidth | byte number | cycle number | + * +----------------+------------------+---------------+ + * | packet rate | packet number | cycle number | + * +----------------+------------------+---------------+ + * | latency | cycle number | packet number | + * +----------------+------------------+---------------+ + * | interrupt rate | interrupt number | cycle number | + * +----------------+------------------+---------------+ + * + * The cycle number indicates increment of counter of hardware timer, the + * frequency of hardware timer can be read from hw_clk_freq file. + * + * Performance of each hardware event is calculated by: counter / ext_counter. + * + * Since processing of data is preferred to be done in userspace, we expose + * ext_counter as a separate event for userspace and use bit 16 to indicate it. + * For example, event 0x00001 and 0x10001 are actually one event for hardware + * because bit 0-15 are same. If the bit 16 of one event is 0 means to read + * counter register, otherwise means to read ext_counter register. + */ +/* bandwidth events */ +#define HNS3_PMU_EVT_BW_SSU_EGU_BYTE_NUM 0x00001 +#define HNS3_PMU_EVT_BW_SSU_EGU_TIME 0x10001 +#define HNS3_PMU_EVT_BW_SSU_RPU_BYTE_NUM 0x00002 +#define HNS3_PMU_EVT_BW_SSU_RPU_TIME 0x10002 +#define HNS3_PMU_EVT_BW_SSU_ROCE_BYTE_NUM 0x00003 +#define HNS3_PMU_EVT_BW_SSU_ROCE_TIME 0x10003 +#define HNS3_PMU_EVT_BW_ROCE_SSU_BYTE_NUM 0x00004 +#define HNS3_PMU_EVT_BW_ROCE_SSU_TIME 0x10004 +#define HNS3_PMU_EVT_BW_TPU_SSU_BYTE_NUM 0x00005 +#define HNS3_PMU_EVT_BW_TPU_SSU_TIME 0x10005 +#define HNS3_PMU_EVT_BW_RPU_RCBRX_BYTE_NUM 0x00006 +#define HNS3_PMU_EVT_BW_RPU_RCBRX_TIME 0x10006 +#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_BYTE_NUM 0x00008 +#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_TIME 0x10008 +#define HNS3_PMU_EVT_BW_WR_FBD_BYTE_NUM 0x00009 +#define HNS3_PMU_EVT_BW_WR_FBD_TIME 0x10009 +#define HNS3_PMU_EVT_BW_WR_EBD_BYTE_NUM 0x0000a +#define HNS3_PMU_EVT_BW_WR_EBD_TIME 0x1000a +#define HNS3_PMU_EVT_BW_RD_FBD_BYTE_NUM 0x0000b +#define HNS3_PMU_EVT_BW_RD_FBD_TIME 0x1000b +#define HNS3_PMU_EVT_BW_RD_EBD_BYTE_NUM 0x0000c +#define HNS3_PMU_EVT_BW_RD_EBD_TIME 0x1000c +#define HNS3_PMU_EVT_BW_RD_PAY_M0_BYTE_NUM 0x0000d +#define HNS3_PMU_EVT_BW_RD_PAY_M0_TIME 0x1000d +#define HNS3_PMU_EVT_BW_RD_PAY_M1_BYTE_NUM 0x0000e +#define HNS3_PMU_EVT_BW_RD_PAY_M1_TIME 0x1000e +#define HNS3_PMU_EVT_BW_WR_PAY_M0_BYTE_NUM 0x0000f +#define HNS3_PMU_EVT_BW_WR_PAY_M0_TIME 0x1000f +#define HNS3_PMU_EVT_BW_WR_PAY_M1_BYTE_NUM 0x00010 +#define HNS3_PMU_EVT_BW_WR_PAY_M1_TIME 0x10010 + +/* packet rate events */ +#define HNS3_PMU_EVT_PPS_IGU_SSU_PACKET_NUM 0x00100 +#define HNS3_PMU_EVT_PPS_IGU_SSU_TIME 0x10100 +#define HNS3_PMU_EVT_PPS_SSU_EGU_PACKET_NUM 0x00101 +#define HNS3_PMU_EVT_PPS_SSU_EGU_TIME 0x10101 +#define HNS3_PMU_EVT_PPS_SSU_RPU_PACKET_NUM 0x00102 +#define HNS3_PMU_EVT_PPS_SSU_RPU_TIME 0x10102 +#define HNS3_PMU_EVT_PPS_SSU_ROCE_PACKET_NUM 0x00103 +#define HNS3_PMU_EVT_PPS_SSU_ROCE_TIME 0x10103 +#define HNS3_PMU_EVT_PPS_ROCE_SSU_PACKET_NUM 0x00104 +#define HNS3_PMU_EVT_PPS_ROCE_SSU_TIME 0x10104 +#define HNS3_PMU_EVT_PPS_TPU_SSU_PACKET_NUM 0x00105 +#define HNS3_PMU_EVT_PPS_TPU_SSU_TIME 0x10105 +#define HNS3_PMU_EVT_PPS_RPU_RCBRX_PACKET_NUM 0x00106 +#define HNS3_PMU_EVT_PPS_RPU_RCBRX_TIME 0x10106 +#define HNS3_PMU_EVT_PPS_RCBTX_TPU_PACKET_NUM 0x00107 +#define HNS3_PMU_EVT_PPS_RCBTX_TPU_TIME 0x10107 +#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_PACKET_NUM 0x00108 +#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_TIME 0x10108 +#define HNS3_PMU_EVT_PPS_WR_FBD_PACKET_NUM 0x00109 +#define HNS3_PMU_EVT_PPS_WR_FBD_TIME 0x10109 +#define HNS3_PMU_EVT_PPS_WR_EBD_PACKET_NUM 0x0010a +#define HNS3_PMU_EVT_PPS_WR_EBD_TIME 0x1010a +#define HNS3_PMU_EVT_PPS_RD_FBD_PACKET_NUM 0x0010b +#define HNS3_PMU_EVT_PPS_RD_FBD_TIME 0x1010b +#define HNS3_PMU_EVT_PPS_RD_EBD_PACKET_NUM 0x0010c +#define HNS3_PMU_EVT_PPS_RD_EBD_TIME 0x1010c +#define HNS3_PMU_EVT_PPS_RD_PAY_M0_PACKET_NUM 0x0010d +#define HNS3_PMU_EVT_PPS_RD_PAY_M0_TIME 0x1010d +#define HNS3_PMU_EVT_PPS_RD_PAY_M1_PACKET_NUM 0x0010e +#define HNS3_PMU_EVT_PPS_RD_PAY_M1_TIME 0x1010e +#define HNS3_PMU_EVT_PPS_WR_PAY_M0_PACKET_NUM 0x0010f +#define HNS3_PMU_EVT_PPS_WR_PAY_M0_TIME 0x1010f +#define HNS3_PMU_EVT_PPS_WR_PAY_M1_PACKET_NUM 0x00110 +#define HNS3_PMU_EVT_PPS_WR_PAY_M1_TIME 0x10110 +#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_PACKET_NUM 0x00111 +#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_TIME 0x10111 +#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_PACKET_NUM 0x00112 +#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_TIME 0x10112 + +/* latency events */ +#define HNS3_PMU_EVT_DLY_TX_PUSH_TIME 0x00202 +#define HNS3_PMU_EVT_DLY_TX_PUSH_PACKET_NUM 0x10202 +#define HNS3_PMU_EVT_DLY_TX_TIME 0x00204 +#define HNS3_PMU_EVT_DLY_TX_PACKET_NUM 0x10204 +#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_TIME 0x00206 +#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_PACKET_NUM 0x10206 +#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_TIME 0x00207 +#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_PACKET_NUM 0x10207 +#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_TIME 0x00208 +#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_PACKET_NUM 0x10208 +#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_TIME 0x00209 +#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_PACKET_NUM 0x10209 +#define HNS3_PMU_EVT_DLY_RPU_TIME 0x0020e +#define HNS3_PMU_EVT_DLY_RPU_PACKET_NUM 0x1020e +#define HNS3_PMU_EVT_DLY_TPU_TIME 0x0020f +#define HNS3_PMU_EVT_DLY_TPU_PACKET_NUM 0x1020f +#define HNS3_PMU_EVT_DLY_RPE_TIME 0x00210 +#define HNS3_PMU_EVT_DLY_RPE_PACKET_NUM 0x10210 +#define HNS3_PMU_EVT_DLY_TPE_TIME 0x00211 +#define HNS3_PMU_EVT_DLY_TPE_PACKET_NUM 0x10211 +#define HNS3_PMU_EVT_DLY_TPE_PUSH_TIME 0x00212 +#define HNS3_PMU_EVT_DLY_TPE_PUSH_PACKET_NUM 0x10212 +#define HNS3_PMU_EVT_DLY_WR_FBD_TIME 0x00213 +#define HNS3_PMU_EVT_DLY_WR_FBD_PACKET_NUM 0x10213 +#define HNS3_PMU_EVT_DLY_WR_EBD_TIME 0x00214 +#define HNS3_PMU_EVT_DLY_WR_EBD_PACKET_NUM 0x10214 +#define HNS3_PMU_EVT_DLY_RD_FBD_TIME 0x00215 +#define HNS3_PMU_EVT_DLY_RD_FBD_PACKET_NUM 0x10215 +#define HNS3_PMU_EVT_DLY_RD_EBD_TIME 0x00216 +#define HNS3_PMU_EVT_DLY_RD_EBD_PACKET_NUM 0x10216 +#define HNS3_PMU_EVT_DLY_RD_PAY_M0_TIME 0x00217 +#define HNS3_PMU_EVT_DLY_RD_PAY_M0_PACKET_NUM 0x10217 +#define HNS3_PMU_EVT_DLY_RD_PAY_M1_TIME 0x00218 +#define HNS3_PMU_EVT_DLY_RD_PAY_M1_PACKET_NUM 0x10218 +#define HNS3_PMU_EVT_DLY_WR_PAY_M0_TIME 0x00219 +#define HNS3_PMU_EVT_DLY_WR_PAY_M0_PACKET_NUM 0x10219 +#define HNS3_PMU_EVT_DLY_WR_PAY_M1_TIME 0x0021a +#define HNS3_PMU_EVT_DLY_WR_PAY_M1_PACKET_NUM 0x1021a +#define HNS3_PMU_EVT_DLY_MSIX_WRITE_TIME 0x0021c +#define HNS3_PMU_EVT_DLY_MSIX_WRITE_PACKET_NUM 0x1021c + +/* interrupt rate events */ +#define HNS3_PMU_EVT_PPS_MSIX_NIC_INTR_NUM 0x00300 +#define HNS3_PMU_EVT_PPS_MSIX_NIC_TIME 0x10300 + +/* filter mode supported by each bandwidth event */ +#define HNS3_PMU_FILTER_BW_SSU_EGU 0x07 +#define HNS3_PMU_FILTER_BW_SSU_RPU 0x1f +#define HNS3_PMU_FILTER_BW_SSU_ROCE 0x0f +#define HNS3_PMU_FILTER_BW_ROCE_SSU 0x0f +#define HNS3_PMU_FILTER_BW_TPU_SSU 0x1f +#define HNS3_PMU_FILTER_BW_RPU_RCBRX 0x11 +#define HNS3_PMU_FILTER_BW_RCBTX_TXSCH 0x11 +#define HNS3_PMU_FILTER_BW_WR_FBD 0x1b +#define HNS3_PMU_FILTER_BW_WR_EBD 0x11 +#define HNS3_PMU_FILTER_BW_RD_FBD 0x01 +#define HNS3_PMU_FILTER_BW_RD_EBD 0x1b +#define HNS3_PMU_FILTER_BW_RD_PAY_M0 0x01 +#define HNS3_PMU_FILTER_BW_RD_PAY_M1 0x01 +#define HNS3_PMU_FILTER_BW_WR_PAY_M0 0x01 +#define HNS3_PMU_FILTER_BW_WR_PAY_M1 0x01 + +/* filter mode supported by each packet rate event */ +#define HNS3_PMU_FILTER_PPS_IGU_SSU 0x07 +#define HNS3_PMU_FILTER_PPS_SSU_EGU 0x07 +#define HNS3_PMU_FILTER_PPS_SSU_RPU 0x1f +#define HNS3_PMU_FILTER_PPS_SSU_ROCE 0x0f +#define HNS3_PMU_FILTER_PPS_ROCE_SSU 0x0f +#define HNS3_PMU_FILTER_PPS_TPU_SSU 0x1f +#define HNS3_PMU_FILTER_PPS_RPU_RCBRX 0x11 +#define HNS3_PMU_FILTER_PPS_RCBTX_TPU 0x1f +#define HNS3_PMU_FILTER_PPS_RCBTX_TXSCH 0x11 +#define HNS3_PMU_FILTER_PPS_WR_FBD 0x1b +#define HNS3_PMU_FILTER_PPS_WR_EBD 0x11 +#define HNS3_PMU_FILTER_PPS_RD_FBD 0x01 +#define HNS3_PMU_FILTER_PPS_RD_EBD 0x1b +#define HNS3_PMU_FILTER_PPS_RD_PAY_M0 0x01 +#define HNS3_PMU_FILTER_PPS_RD_PAY_M1 0x01 +#define HNS3_PMU_FILTER_PPS_WR_PAY_M0 0x01 +#define HNS3_PMU_FILTER_PPS_WR_PAY_M1 0x01 +#define HNS3_PMU_FILTER_PPS_NICROH_TX_PRE 0x01 +#define HNS3_PMU_FILTER_PPS_NICROH_RX_PRE 0x01 + +/* filter mode supported by each latency event */ +#define HNS3_PMU_FILTER_DLY_TX_PUSH 0x01 +#define HNS3_PMU_FILTER_DLY_TX 0x01 +#define HNS3_PMU_FILTER_DLY_SSU_TX_NIC 0x07 +#define HNS3_PMU_FILTER_DLY_SSU_TX_ROCE 0x07 +#define HNS3_PMU_FILTER_DLY_SSU_RX_NIC 0x07 +#define HNS3_PMU_FILTER_DLY_SSU_RX_ROCE 0x07 +#define HNS3_PMU_FILTER_DLY_RPU 0x11 +#define HNS3_PMU_FILTER_DLY_TPU 0x1f +#define HNS3_PMU_FILTER_DLY_RPE 0x01 +#define HNS3_PMU_FILTER_DLY_TPE 0x0b +#define HNS3_PMU_FILTER_DLY_TPE_PUSH 0x1b +#define HNS3_PMU_FILTER_DLY_WR_FBD 0x1b +#define HNS3_PMU_FILTER_DLY_WR_EBD 0x11 +#define HNS3_PMU_FILTER_DLY_RD_FBD 0x01 +#define HNS3_PMU_FILTER_DLY_RD_EBD 0x1b +#define HNS3_PMU_FILTER_DLY_RD_PAY_M0 0x01 +#define HNS3_PMU_FILTER_DLY_RD_PAY_M1 0x01 +#define HNS3_PMU_FILTER_DLY_WR_PAY_M0 0x01 +#define HNS3_PMU_FILTER_DLY_WR_PAY_M1 0x01 +#define HNS3_PMU_FILTER_DLY_MSIX_WRITE 0x01 + +/* filter mode supported by each interrupt rate event */ +#define HNS3_PMU_FILTER_INTR_MSIX_NIC 0x01 + +enum hns3_pmu_hw_filter_mode { + HNS3_PMU_HW_FILTER_GLOBAL, + HNS3_PMU_HW_FILTER_PORT, + HNS3_PMU_HW_FILTER_PORT_TC, + HNS3_PMU_HW_FILTER_FUNC, + HNS3_PMU_HW_FILTER_FUNC_QUEUE, + HNS3_PMU_HW_FILTER_FUNC_INTR, +}; + +struct hns3_pmu_event_attr { + u32 event; + u16 filter_support; +}; + +struct hns3_pmu { + struct perf_event *hw_events[HNS3_PMU_MAX_HW_EVENTS]; + struct hlist_node node; + struct pci_dev *pdev; + struct pmu pmu; + void __iomem *base; + int irq; + int on_cpu; + u32 identifier; + u32 hw_clk_freq; /* hardware clock frequency of PMU */ + /* maximum and minimum bdf allowed by PMU */ + u16 bdf_min; + u16 bdf_max; +}; + +#define to_hns3_pmu(p) (container_of((p), struct hns3_pmu, pmu)) + +#define GET_PCI_DEVFN(bdf) ((bdf) & 0xff) + +#define FILTER_CONDITION_PORT(port) ((1 << (port)) & 0xff) +#define FILTER_CONDITION_PORT_TC(port, tc) (((port) << 3) | ((tc) & 0x07)) +#define FILTER_CONDITION_FUNC_INTR(func, intr) (((intr) << 8) | (func)) + +#define HNS3_PMU_FILTER_ATTR(_name, _config, _start, _end) \ + static inline u64 hns3_pmu_get_##_name(struct perf_event *event) \ + { \ + return FIELD_GET(GENMASK_ULL(_end, _start), \ + event->attr._config); \ + } + +HNS3_PMU_FILTER_ATTR(subevent, config, 0, 7); +HNS3_PMU_FILTER_ATTR(event_type, config, 8, 15); +HNS3_PMU_FILTER_ATTR(ext_counter_used, config, 16, 16); +HNS3_PMU_FILTER_ATTR(port, config1, 0, 3); +HNS3_PMU_FILTER_ATTR(tc, config1, 4, 7); +HNS3_PMU_FILTER_ATTR(bdf, config1, 8, 23); +HNS3_PMU_FILTER_ATTR(queue, config1, 24, 39); +HNS3_PMU_FILTER_ATTR(intr, config1, 40, 51); +HNS3_PMU_FILTER_ATTR(global, config1, 52, 52); + +#define HNS3_BW_EVT_BYTE_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_BW_##_name##_BYTE_NUM, \ + HNS3_PMU_FILTER_BW_##_name}) +#define HNS3_BW_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_BW_##_name##_TIME, \ + HNS3_PMU_FILTER_BW_##_name}) +#define HNS3_PPS_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_PACKET_NUM, \ + HNS3_PMU_FILTER_PPS_##_name}) +#define HNS3_PPS_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_TIME, \ + HNS3_PMU_FILTER_PPS_##_name}) +#define HNS3_DLY_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_DLY_##_name##_TIME, \ + HNS3_PMU_FILTER_DLY_##_name}) +#define HNS3_DLY_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_DLY_##_name##_PACKET_NUM, \ + HNS3_PMU_FILTER_DLY_##_name}) +#define HNS3_INTR_EVT_INTR_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_INTR_NUM, \ + HNS3_PMU_FILTER_INTR_##_name}) +#define HNS3_INTR_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_TIME, \ + HNS3_PMU_FILTER_INTR_##_name}) + +static ssize_t hns3_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sysfs_emit(buf, "%s\n", (char *)eattr->var); +} + +static ssize_t hns3_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns3_pmu_event_attr *event; + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + event = eattr->var; + + return sysfs_emit(buf, "config=0x%x\n", event->event); +} + +static ssize_t hns3_pmu_filter_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu_event_attr *event; + struct dev_ext_attribute *eattr; + int len; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + event = eattr->var; + + len = sysfs_emit_at(buf, 0, "filter mode supported: "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL) + len += sysfs_emit_at(buf, len, "global "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT) + len += sysfs_emit_at(buf, len, "port "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC) + len += sysfs_emit_at(buf, len, "port-tc "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC) + len += sysfs_emit_at(buf, len, "func "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE) + len += sysfs_emit_at(buf, len, "func-queue "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR) + len += sysfs_emit_at(buf, len, "func-intr "); + + len += sysfs_emit_at(buf, len, "\n"); + + return len; +} + +#define HNS3_PMU_ATTR(_name, _func, _config) \ + (&((struct dev_ext_attribute[]) { \ + { __ATTR(_name, 0444, _func, NULL), (void *)_config } \ + })[0].attr.attr) + +#define HNS3_PMU_FORMAT_ATTR(_name, _format) \ + HNS3_PMU_ATTR(_name, hns3_pmu_format_show, (void *)_format) +#define HNS3_PMU_EVENT_ATTR(_name, _event) \ + HNS3_PMU_ATTR(_name, hns3_pmu_event_show, (void *)_event) +#define HNS3_PMU_FLT_MODE_ATTR(_name, _event) \ + HNS3_PMU_ATTR(_name, hns3_pmu_filter_mode_show, (void *)_event) + +#define HNS3_PMU_BW_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro)) +#define HNS3_PMU_PPS_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro)) +#define HNS3_PMU_DLY_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro)) +#define HNS3_PMU_INTR_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro)) + +#define HNS3_PMU_BW_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro)) +#define HNS3_PMU_PPS_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro)) +#define HNS3_PMU_DLY_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro)) +#define HNS3_PMU_INTR_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro)) + +static u8 hns3_pmu_hw_filter_modes[] = { + HNS3_PMU_HW_FILTER_GLOBAL, + HNS3_PMU_HW_FILTER_PORT, + HNS3_PMU_HW_FILTER_PORT_TC, + HNS3_PMU_HW_FILTER_FUNC, + HNS3_PMU_HW_FILTER_FUNC_QUEUE, + HNS3_PMU_HW_FILTER_FUNC_INTR, +}; + +#define HNS3_PMU_SET_HW_FILTER(_hwc, _mode) \ + ((_hwc)->addr_filters = (void *)&hns3_pmu_hw_filter_modes[(_mode)]) + +static ssize_t identifier_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "0x%x\n", hns3_pmu->identifier); +} +static DEVICE_ATTR_RO(identifier); + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%d\n", hns3_pmu->on_cpu); +} +static DEVICE_ATTR_RO(cpumask); + +static ssize_t bdf_min_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + u16 bdf = hns3_pmu->bdf_min; + + return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf), + PCI_SLOT(bdf), PCI_FUNC(bdf)); +} +static DEVICE_ATTR_RO(bdf_min); + +static ssize_t bdf_max_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + u16 bdf = hns3_pmu->bdf_max; + + return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf), + PCI_SLOT(bdf), PCI_FUNC(bdf)); +} +static DEVICE_ATTR_RO(bdf_max); + +static ssize_t hw_clk_freq_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%u\n", hns3_pmu->hw_clk_freq); +} +static DEVICE_ATTR_RO(hw_clk_freq); + +static struct attribute *hns3_pmu_events_attr[] = { + /* bandwidth events */ + HNS3_PMU_BW_EVT_PAIR(bw_ssu_egu, SSU_EGU), + HNS3_PMU_BW_EVT_PAIR(bw_ssu_rpu, SSU_RPU), + HNS3_PMU_BW_EVT_PAIR(bw_ssu_roce, SSU_ROCE), + HNS3_PMU_BW_EVT_PAIR(bw_roce_ssu, ROCE_SSU), + HNS3_PMU_BW_EVT_PAIR(bw_tpu_ssu, TPU_SSU), + HNS3_PMU_BW_EVT_PAIR(bw_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_BW_EVT_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_BW_EVT_PAIR(bw_wr_fbd, WR_FBD), + HNS3_PMU_BW_EVT_PAIR(bw_wr_ebd, WR_EBD), + HNS3_PMU_BW_EVT_PAIR(bw_rd_fbd, RD_FBD), + HNS3_PMU_BW_EVT_PAIR(bw_rd_ebd, RD_EBD), + HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m1, WR_PAY_M1), + + /* packet rate events */ + HNS3_PMU_PPS_EVT_PAIR(pps_igu_ssu, IGU_SSU), + HNS3_PMU_PPS_EVT_PAIR(pps_ssu_egu, SSU_EGU), + HNS3_PMU_PPS_EVT_PAIR(pps_ssu_rpu, SSU_RPU), + HNS3_PMU_PPS_EVT_PAIR(pps_ssu_roce, SSU_ROCE), + HNS3_PMU_PPS_EVT_PAIR(pps_roce_ssu, ROCE_SSU), + HNS3_PMU_PPS_EVT_PAIR(pps_tpu_ssu, TPU_SSU), + HNS3_PMU_PPS_EVT_PAIR(pps_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_tpu, RCBTX_TPU), + HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_fbd, WR_FBD), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_ebd, WR_EBD), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_fbd, RD_FBD), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_ebd, RD_EBD), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE), + HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE), + + /* latency events */ + HNS3_PMU_DLY_EVT_PAIR(dly_tx_push_to_mac, TX_PUSH), + HNS3_PMU_DLY_EVT_PAIR(dly_tx_normal_to_mac, TX), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE), + HNS3_PMU_DLY_EVT_PAIR(dly_rpu, RPU), + HNS3_PMU_DLY_EVT_PAIR(dly_tpu, TPU), + HNS3_PMU_DLY_EVT_PAIR(dly_rpe, RPE), + HNS3_PMU_DLY_EVT_PAIR(dly_tpe_normal, TPE), + HNS3_PMU_DLY_EVT_PAIR(dly_tpe_push, TPE_PUSH), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_fbd, WR_FBD), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_ebd, WR_EBD), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_fbd, RD_FBD), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_ebd, RD_EBD), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_DLY_EVT_PAIR(dly_msix_write, MSIX_WRITE), + + /* interrupt rate events */ + HNS3_PMU_INTR_EVT_PAIR(pps_intr_msix_nic, MSIX_NIC), + + NULL +}; + +static struct attribute *hns3_pmu_filter_mode_attr[] = { + /* bandwidth events */ + HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_egu, SSU_EGU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_rpu, SSU_RPU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_roce, SSU_ROCE), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_roce_ssu, ROCE_SSU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_tpu_ssu, TPU_SSU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_fbd, WR_FBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_ebd, WR_EBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_fbd, RD_FBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_ebd, RD_EBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m1, WR_PAY_M1), + + /* packet rate events */ + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_igu_ssu, IGU_SSU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_egu, SSU_EGU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_rpu, SSU_RPU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_roce, SSU_ROCE), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_roce_ssu, ROCE_SSU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_tpu_ssu, TPU_SSU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_tpu, RCBTX_TPU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_fbd, WR_FBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_ebd, WR_EBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_fbd, RD_FBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_ebd, RD_EBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE), + + /* latency events */ + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_push_to_mac, TX_PUSH), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_normal_to_mac, TX), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpu, RPU), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpu, TPU), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpe, RPE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_normal, TPE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_push, TPE_PUSH), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_fbd, WR_FBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_ebd, WR_EBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_fbd, RD_FBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_ebd, RD_EBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_msix_write, MSIX_WRITE), + + /* interrupt rate events */ + HNS3_PMU_INTR_FLT_MODE_PAIR(pps_intr_msix_nic, MSIX_NIC), + + NULL +}; + +static struct attribute_group hns3_pmu_events_group = { + .name = "events", + .attrs = hns3_pmu_events_attr, +}; + +static struct attribute_group hns3_pmu_filter_mode_group = { + .name = "filtermode", + .attrs = hns3_pmu_filter_mode_attr, +}; + +static struct attribute *hns3_pmu_format_attr[] = { + HNS3_PMU_FORMAT_ATTR(subevent, "config:0-7"), + HNS3_PMU_FORMAT_ATTR(event_type, "config:8-15"), + HNS3_PMU_FORMAT_ATTR(ext_counter_used, "config:16"), + HNS3_PMU_FORMAT_ATTR(port, "config1:0-3"), + HNS3_PMU_FORMAT_ATTR(tc, "config1:4-7"), + HNS3_PMU_FORMAT_ATTR(bdf, "config1:8-23"), + HNS3_PMU_FORMAT_ATTR(queue, "config1:24-39"), + HNS3_PMU_FORMAT_ATTR(intr, "config1:40-51"), + HNS3_PMU_FORMAT_ATTR(global, "config1:52"), + NULL +}; + +static struct attribute_group hns3_pmu_format_group = { + .name = "format", + .attrs = hns3_pmu_format_attr, +}; + +static struct attribute *hns3_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static struct attribute_group hns3_pmu_cpumask_attr_group = { + .attrs = hns3_pmu_cpumask_attrs, +}; + +static struct attribute *hns3_pmu_identifier_attrs[] = { + &dev_attr_identifier.attr, + NULL +}; + +static struct attribute_group hns3_pmu_identifier_attr_group = { + .attrs = hns3_pmu_identifier_attrs, +}; + +static struct attribute *hns3_pmu_bdf_range_attrs[] = { + &dev_attr_bdf_min.attr, + &dev_attr_bdf_max.attr, + NULL +}; + +static struct attribute_group hns3_pmu_bdf_range_attr_group = { + .attrs = hns3_pmu_bdf_range_attrs, +}; + +static struct attribute *hns3_pmu_hw_clk_freq_attrs[] = { + &dev_attr_hw_clk_freq.attr, + NULL +}; + +static struct attribute_group hns3_pmu_hw_clk_freq_attr_group = { + .attrs = hns3_pmu_hw_clk_freq_attrs, +}; + +static const struct attribute_group *hns3_pmu_attr_groups[] = { + &hns3_pmu_events_group, + &hns3_pmu_filter_mode_group, + &hns3_pmu_format_group, + &hns3_pmu_cpumask_attr_group, + &hns3_pmu_identifier_attr_group, + &hns3_pmu_bdf_range_attr_group, + &hns3_pmu_hw_clk_freq_attr_group, + NULL +}; + +static u32 hns3_pmu_get_event(struct perf_event *event) +{ + return hns3_pmu_get_ext_counter_used(event) << 16 | + hns3_pmu_get_event_type(event) << 8 | + hns3_pmu_get_subevent(event); +} + +static u32 hns3_pmu_get_real_event(struct perf_event *event) +{ + return hns3_pmu_get_event_type(event) << 8 | + hns3_pmu_get_subevent(event); +} + +static u32 hns3_pmu_get_offset(u32 offset, u32 idx) +{ + return offset + HNS3_PMU_REG_EVENT_OFFSET + + HNS3_PMU_REG_EVENT_SIZE * idx; +} + +static u32 hns3_pmu_readl(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + return readl(hns3_pmu->base + offset); +} + +static void hns3_pmu_writel(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx, + u32 val) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + writel(val, hns3_pmu->base + offset); +} + +static u64 hns3_pmu_readq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + return readq(hns3_pmu->base + offset); +} + +static void hns3_pmu_writeq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx, + u64 val) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + writeq(val, hns3_pmu->base + offset); +} + +static bool hns3_pmu_cmp_event(struct perf_event *target, + struct perf_event *event) +{ + return hns3_pmu_get_real_event(target) == hns3_pmu_get_real_event(event); +} + +static int hns3_pmu_find_related_event_idx(struct hns3_pmu *hns3_pmu, + struct perf_event *event) +{ + struct perf_event *sibling; + int hw_event_used = 0; + int idx; + + for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) { + sibling = hns3_pmu->hw_events[idx]; + if (!sibling) + continue; + + hw_event_used++; + + if (!hns3_pmu_cmp_event(sibling, event)) + continue; + + /* Related events is used in group */ + if (sibling->group_leader == event->group_leader) + return idx; + } + + /* No related event and all hardware events are used up */ + if (hw_event_used >= HNS3_PMU_MAX_HW_EVENTS) + return -EBUSY; + + /* No related event and there is extra hardware events can be use */ + return -ENOENT; +} + +static int hns3_pmu_get_event_idx(struct hns3_pmu *hns3_pmu) +{ + int idx; + + for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) { + if (!hns3_pmu->hw_events[idx]) + return idx; + } + + return -EBUSY; +} + +static bool hns3_pmu_valid_bdf(struct hns3_pmu *hns3_pmu, u16 bdf) +{ + struct pci_dev *pdev; + + if (bdf < hns3_pmu->bdf_min || bdf > hns3_pmu->bdf_max) { + pci_err(hns3_pmu->pdev, "Invalid EP device: %#x!\n", bdf); + return false; + } + + pdev = pci_get_domain_bus_and_slot(pci_domain_nr(hns3_pmu->pdev->bus), + PCI_BUS_NUM(bdf), + GET_PCI_DEVFN(bdf)); + if (!pdev) { + pci_err(hns3_pmu->pdev, "Nonexistent EP device: %#x!\n", bdf); + return false; + } + + pci_dev_put(pdev); + return true; +} + +static void hns3_pmu_set_qid_para(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf, + u16 queue) +{ + u32 val; + + val = GET_PCI_DEVFN(bdf); + val |= (u32)queue << HNS3_PMU_QID_PARA_QUEUE_S; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_PARA, idx, val); +} + +static bool hns3_pmu_qid_req_start(struct hns3_pmu *hns3_pmu, u32 idx) +{ + bool queue_id_valid = false; + u32 reg_qid_ctrl, val; + int err; + + /* enable queue id request */ + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, + HNS3_PMU_QID_CTRL_REQ_ENABLE); + + reg_qid_ctrl = hns3_pmu_get_offset(HNS3_PMU_REG_EVENT_QID_CTRL, idx); + err = readl_poll_timeout(hns3_pmu->base + reg_qid_ctrl, val, + val & HNS3_PMU_QID_CTRL_DONE, 1, 1000); + if (err == -ETIMEDOUT) { + pci_err(hns3_pmu->pdev, "QID request timeout!\n"); + goto out; + } + + queue_id_valid = !(val & HNS3_PMU_QID_CTRL_MISS); + +out: + /* disable qid request and clear status */ + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, 0); + + return queue_id_valid; +} + +static bool hns3_pmu_valid_queue(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf, + u16 queue) +{ + hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue); + + return hns3_pmu_qid_req_start(hns3_pmu, idx); +} + +static struct hns3_pmu_event_attr *hns3_pmu_get_pmu_event(u32 event) +{ + struct hns3_pmu_event_attr *pmu_event; + struct dev_ext_attribute *eattr; + struct device_attribute *dattr; + struct attribute *attr; + u32 i; + + for (i = 0; i < ARRAY_SIZE(hns3_pmu_events_attr) - 1; i++) { + attr = hns3_pmu_events_attr[i]; + dattr = container_of(attr, struct device_attribute, attr); + eattr = container_of(dattr, struct dev_ext_attribute, attr); + pmu_event = eattr->var; + + if (event == pmu_event->event) + return pmu_event; + } + + return NULL; +} + +static int hns3_pmu_set_func_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu) +{ + struct hw_perf_event *hwc = &event->hw; + u16 bdf = hns3_pmu_get_bdf(event); + + if (!hns3_pmu_valid_bdf(hns3_pmu, bdf)) + return -ENOENT; + + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC); + + return 0; +} + +static int hns3_pmu_set_func_queue_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu) +{ + u16 queue_id = hns3_pmu_get_queue(event); + struct hw_perf_event *hwc = &event->hw; + u16 bdf = hns3_pmu_get_bdf(event); + + if (!hns3_pmu_valid_bdf(hns3_pmu, bdf)) + return -ENOENT; + + if (!hns3_pmu_valid_queue(hns3_pmu, hwc->idx, bdf, queue_id)) { + pci_err(hns3_pmu->pdev, "Invalid queue: %u\n", queue_id); + return -ENOENT; + } + + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_QUEUE); + + return 0; +} + +static bool +hns3_pmu_is_enabled_global_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u8 global = hns3_pmu_get_global(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL)) + return false; + + return global; +} + +static bool hns3_pmu_is_enabled_func_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u16 queue_id = hns3_pmu_get_queue(event); + u16 bdf = hns3_pmu_get_bdf(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC)) + return false; + else if (queue_id != HNS3_PMU_FILTER_ALL_QUEUE) + return false; + + return bdf; +} + +static bool +hns3_pmu_is_enabled_func_queue_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u16 queue_id = hns3_pmu_get_queue(event); + u16 bdf = hns3_pmu_get_bdf(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE)) + return false; + else if (queue_id == HNS3_PMU_FILTER_ALL_QUEUE) + return false; + + return bdf; +} + +static bool hns3_pmu_is_enabled_port_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u8 tc_id = hns3_pmu_get_tc(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT)) + return false; + + return tc_id == HNS3_PMU_FILTER_ALL_TC; +} + +static bool +hns3_pmu_is_enabled_port_tc_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u8 tc_id = hns3_pmu_get_tc(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC)) + return false; + + return tc_id != HNS3_PMU_FILTER_ALL_TC; +} + +static bool +hns3_pmu_is_enabled_func_intr_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu, + struct hns3_pmu_event_attr *pmu_event) +{ + u16 bdf = hns3_pmu_get_bdf(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR)) + return false; + + return hns3_pmu_valid_bdf(hns3_pmu, bdf); +} + +static int hns3_pmu_select_filter_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu) +{ + u32 event_id = hns3_pmu_get_event(event); + struct hw_perf_event *hwc = &event->hw; + struct hns3_pmu_event_attr *pmu_event; + + pmu_event = hns3_pmu_get_pmu_event(event_id); + if (!pmu_event) { + pci_err(hns3_pmu->pdev, "Invalid pmu event\n"); + return -ENOENT; + } + + if (hns3_pmu_is_enabled_global_mode(event, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_GLOBAL); + return 0; + } + + if (hns3_pmu_is_enabled_func_mode(event, pmu_event)) + return hns3_pmu_set_func_mode(event, hns3_pmu); + + if (hns3_pmu_is_enabled_func_queue_mode(event, pmu_event)) + return hns3_pmu_set_func_queue_mode(event, hns3_pmu); + + if (hns3_pmu_is_enabled_port_mode(event, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT); + return 0; + } + + if (hns3_pmu_is_enabled_port_tc_mode(event, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT_TC); + return 0; + } + + if (hns3_pmu_is_enabled_func_intr_mode(event, hns3_pmu, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_INTR); + return 0; + } + + return -ENOENT; +} + +static bool hns3_pmu_validate_event_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct perf_event *event_group[HNS3_PMU_MAX_HW_EVENTS]; + int counters = 1; + int num; + + event_group[0] = leader; + if (!is_software_event(leader)) { + if (leader->pmu != event->pmu) + return false; + + if (leader != event && !hns3_pmu_cmp_event(leader, event)) + event_group[counters++] = event; + } + + for_each_sibling_event(sibling, event->group_leader) { + if (is_software_event(sibling)) + continue; + + if (sibling->pmu != event->pmu) + return false; + + for (num = 0; num < counters; num++) { + if (hns3_pmu_cmp_event(event_group[num], sibling)) + break; + } + + if (num == counters) + event_group[counters++] = sibling; + } + + return counters <= HNS3_PMU_MAX_HW_EVENTS; +} + +static u32 hns3_pmu_get_filter_condition(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u16 intr_id = hns3_pmu_get_intr(event); + u8 port_id = hns3_pmu_get_port(event); + u16 bdf = hns3_pmu_get_bdf(event); + u8 tc_id = hns3_pmu_get_tc(event); + u8 filter_mode; + + filter_mode = *(u8 *)hwc->addr_filters; + switch (filter_mode) { + case HNS3_PMU_HW_FILTER_PORT: + return FILTER_CONDITION_PORT(port_id); + case HNS3_PMU_HW_FILTER_PORT_TC: + return FILTER_CONDITION_PORT_TC(port_id, tc_id); + case HNS3_PMU_HW_FILTER_FUNC: + case HNS3_PMU_HW_FILTER_FUNC_QUEUE: + return GET_PCI_DEVFN(bdf); + case HNS3_PMU_HW_FILTER_FUNC_INTR: + return FILTER_CONDITION_FUNC_INTR(GET_PCI_DEVFN(bdf), intr_id); + default: + break; + } + + return 0; +} + +static void hns3_pmu_config_filter(struct perf_event *event) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + u8 event_type = hns3_pmu_get_event_type(event); + u8 subevent_id = hns3_pmu_get_subevent(event); + u16 queue_id = hns3_pmu_get_queue(event); + struct hw_perf_event *hwc = &event->hw; + u8 filter_mode = *(u8 *)hwc->addr_filters; + u16 bdf = hns3_pmu_get_bdf(event); + u32 idx = hwc->idx; + u32 val; + + val = event_type; + val |= subevent_id << HNS3_PMU_CTRL_SUBEVENT_S; + val |= filter_mode << HNS3_PMU_CTRL_FILTER_MODE_S; + val |= HNS3_PMU_EVENT_OVERFLOW_RESTART; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); + + val = hns3_pmu_get_filter_condition(event); + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_HIGH, idx, val); + + if (filter_mode == HNS3_PMU_HW_FILTER_FUNC_QUEUE) + hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue_id); +} + +static void hns3_pmu_enable_counter(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val |= HNS3_PMU_EVENT_EN; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); +} + +static void hns3_pmu_disable_counter(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val &= ~HNS3_PMU_EVENT_EN; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); +} + +static void hns3_pmu_enable_intr(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx); + val &= ~HNS3_PMU_INTR_MASK_OVERFLOW; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val); +} + +static void hns3_pmu_disable_intr(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx); + val |= HNS3_PMU_INTR_MASK_OVERFLOW; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val); +} + +static void hns3_pmu_clear_intr_status(struct hns3_pmu *hns3_pmu, u32 idx) +{ + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val |= HNS3_PMU_EVENT_STATUS_RESET; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val &= ~HNS3_PMU_EVENT_STATUS_RESET; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); +} + +static u64 hns3_pmu_read_counter(struct perf_event *event) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + + return hns3_pmu_readq(hns3_pmu, event->hw.event_base, event->hw.idx); +} + +static void hns3_pmu_write_counter(struct perf_event *event, u64 value) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + u32 idx = event->hw.idx; + + hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_COUNTER, idx, value); + hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_EXT_COUNTER, idx, value); +} + +static void hns3_pmu_init_counter(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + local64_set(&hwc->prev_count, 0); + hns3_pmu_write_counter(event, 0); +} + +static int hns3_pmu_event_init(struct perf_event *event) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + int ret; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* Sampling is not supported */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + event->cpu = hns3_pmu->on_cpu; + + idx = hns3_pmu_get_event_idx(hns3_pmu); + if (idx < 0) { + pci_err(hns3_pmu->pdev, "Up to %u events are supported!\n", + HNS3_PMU_MAX_HW_EVENTS); + return -EBUSY; + } + + hwc->idx = idx; + + ret = hns3_pmu_select_filter_mode(event, hns3_pmu); + if (ret) { + pci_err(hns3_pmu->pdev, "Invalid filter, ret = %d.\n", ret); + return ret; + } + + if (!hns3_pmu_validate_event_group(event)) { + pci_err(hns3_pmu->pdev, "Invalid event group.\n"); + return -EINVAL; + } + + if (hns3_pmu_get_ext_counter_used(event)) + hwc->event_base = HNS3_PMU_REG_EVENT_EXT_COUNTER; + else + hwc->event_base = HNS3_PMU_REG_EVENT_COUNTER; + + return 0; +} + +static void hns3_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 new_cnt, prev_cnt, delta; + + do { + prev_cnt = local64_read(&hwc->prev_count); + new_cnt = hns3_pmu_read_counter(event); + } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) != + prev_cnt); + + delta = new_cnt - prev_cnt; + local64_add(delta, &event->count); +} + +static void hns3_pmu_start(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + hwc->state = 0; + + hns3_pmu_config_filter(event); + hns3_pmu_init_counter(event); + hns3_pmu_enable_intr(hns3_pmu, hwc); + hns3_pmu_enable_counter(hns3_pmu, hwc); + + perf_event_update_userpage(event); +} + +static void hns3_pmu_stop(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hns3_pmu_disable_counter(hns3_pmu, hwc); + hns3_pmu_disable_intr(hns3_pmu, hwc); + + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + if (hwc->state & PERF_HES_UPTODATE) + return; + + /* Read hardware counter and update the perf counter statistics */ + hns3_pmu_read(event); + hwc->state |= PERF_HES_UPTODATE; +} + +static int hns3_pmu_add(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + /* Check all working events to find a related event. */ + idx = hns3_pmu_find_related_event_idx(hns3_pmu, event); + if (idx < 0 && idx != -ENOENT) + return idx; + + /* Current event shares an enabled hardware event with related event */ + if (idx >= 0 && idx < HNS3_PMU_MAX_HW_EVENTS) { + hwc->idx = idx; + goto start_count; + } + + idx = hns3_pmu_get_event_idx(hns3_pmu); + if (idx < 0) + return idx; + + hwc->idx = idx; + hns3_pmu->hw_events[idx] = event; + +start_count: + if (flags & PERF_EF_START) + hns3_pmu_start(event, PERF_EF_RELOAD); + + return 0; +} + +static void hns3_pmu_del(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hns3_pmu_stop(event, PERF_EF_UPDATE); + hns3_pmu->hw_events[hwc->idx] = NULL; + perf_event_update_userpage(event); +} + +static void hns3_pmu_enable(struct pmu *pmu) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu); + u32 val; + + val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); + val |= HNS3_PMU_GLOBAL_START; + writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); +} + +static void hns3_pmu_disable(struct pmu *pmu) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu); + u32 val; + + val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); + val &= ~HNS3_PMU_GLOBAL_START; + writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); +} + +static int hns3_pmu_alloc_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu) +{ + u16 device_id; + char *name; + u32 val; + + hns3_pmu->base = pcim_iomap_table(pdev)[BAR_2]; + if (!hns3_pmu->base) { + pci_err(pdev, "ioremap failed\n"); + return -ENOMEM; + } + + hns3_pmu->hw_clk_freq = readl(hns3_pmu->base + HNS3_PMU_REG_CLOCK_FREQ); + + val = readl(hns3_pmu->base + HNS3_PMU_REG_BDF); + hns3_pmu->bdf_min = val & 0xffff; + hns3_pmu->bdf_max = val >> 16; + + val = readl(hns3_pmu->base + HNS3_PMU_REG_DEVICE_ID); + device_id = val & 0xffff; + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hns3_pmu_sicl_%u", device_id); + if (!name) + return -ENOMEM; + + hns3_pmu->pdev = pdev; + hns3_pmu->on_cpu = -1; + hns3_pmu->identifier = readl(hns3_pmu->base + HNS3_PMU_REG_VERSION); + hns3_pmu->pmu = (struct pmu) { + .name = name, + .module = THIS_MODULE, + .event_init = hns3_pmu_event_init, + .pmu_enable = hns3_pmu_enable, + .pmu_disable = hns3_pmu_disable, + .add = hns3_pmu_add, + .del = hns3_pmu_del, + .start = hns3_pmu_start, + .stop = hns3_pmu_stop, + .read = hns3_pmu_read, + .task_ctx_nr = perf_invalid_context, + .attr_groups = hns3_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + return 0; +} + +static irqreturn_t hns3_pmu_irq(int irq, void *data) +{ + struct hns3_pmu *hns3_pmu = data; + u32 intr_status, idx; + + for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) { + intr_status = hns3_pmu_readl(hns3_pmu, + HNS3_PMU_REG_EVENT_INTR_STATUS, + idx); + + /* + * As each counter will restart from 0 when it is overflowed, + * extra processing is no need, just clear interrupt status. + */ + if (intr_status) + hns3_pmu_clear_intr_status(hns3_pmu, idx); + } + + return IRQ_HANDLED; +} + +static int hns3_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hns3_pmu *hns3_pmu; + + hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node); + if (!hns3_pmu) + return -ENODEV; + + if (hns3_pmu->on_cpu == -1) { + hns3_pmu->on_cpu = cpu; + irq_set_affinity(hns3_pmu->irq, cpumask_of(cpu)); + } + + return 0; +} + +static int hns3_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hns3_pmu *hns3_pmu; + unsigned int target; + + hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node); + if (!hns3_pmu) + return -ENODEV; + + /* Nothing to do if this CPU doesn't own the PMU */ + if (hns3_pmu->on_cpu != cpu) + return 0; + + /* Choose a new CPU from all online cpus */ + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&hns3_pmu->pmu, cpu, target); + hns3_pmu->on_cpu = target; + irq_set_affinity(hns3_pmu->irq, cpumask_of(target)); + + return 0; +} + +static void hns3_pmu_free_irq(void *data) +{ + struct pci_dev *pdev = data; + + pci_free_irq_vectors(pdev); +} + +static int hns3_pmu_irq_register(struct pci_dev *pdev, + struct hns3_pmu *hns3_pmu) +{ + int irq, ret; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (ret < 0) { + pci_err(pdev, "failed to enable MSI vectors, ret = %d.\n", ret); + return ret; + } + + ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev); + if (ret) { + pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret); + return ret; + } + + irq = pci_irq_vector(pdev, 0); + ret = devm_request_irq(&pdev->dev, irq, hns3_pmu_irq, 0, + hns3_pmu->pmu.name, hns3_pmu); + if (ret) { + pci_err(pdev, "failed to register irq, ret = %d.\n", ret); + return ret; + } + + hns3_pmu->irq = irq; + + return 0; +} + +static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu) +{ + int ret; + + ret = hns3_pmu_alloc_pmu(pdev, hns3_pmu); + if (ret) + return ret; + + ret = hns3_pmu_irq_register(pdev, hns3_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + &hns3_pmu->node); + if (ret) { + pci_err(pdev, "failed to register hotplug, ret = %d.\n", ret); + return ret; + } + + ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1); + if (ret) { + pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret); + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + &hns3_pmu->node); + } + + return ret; +} + +static void hns3_pmu_uninit_pmu(struct pci_dev *pdev) +{ + struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev); + + perf_pmu_unregister(&hns3_pmu->pmu); + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + &hns3_pmu->node); +} + +static int hns3_pmu_init_dev(struct pci_dev *pdev) +{ + int ret; + + ret = pcim_enable_device(pdev); + if (ret) { + pci_err(pdev, "failed to enable pci device, ret = %d.\n", ret); + return ret; + } + + ret = pcim_iomap_regions(pdev, BIT(BAR_2), "hns3_pmu"); + if (ret < 0) { + pci_err(pdev, "failed to request pci region, ret = %d.\n", ret); + return ret; + } + + pci_set_master(pdev); + + return 0; +} + +static int hns3_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hns3_pmu *hns3_pmu; + int ret; + + hns3_pmu = devm_kzalloc(&pdev->dev, sizeof(*hns3_pmu), GFP_KERNEL); + if (!hns3_pmu) + return -ENOMEM; + + ret = hns3_pmu_init_dev(pdev); + if (ret) + return ret; + + ret = hns3_pmu_init_pmu(pdev, hns3_pmu); + if (ret) { + pci_clear_master(pdev); + return ret; + } + + pci_set_drvdata(pdev, hns3_pmu); + + return ret; +} + +static void hns3_pmu_remove(struct pci_dev *pdev) +{ + hns3_pmu_uninit_pmu(pdev); + pci_clear_master(pdev); + pci_set_drvdata(pdev, NULL); +} + +static const struct pci_device_id hns3_pmu_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa22b) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, hns3_pmu_ids); + +static struct pci_driver hns3_pmu_driver = { + .name = "hns3_pmu", + .id_table = hns3_pmu_ids, + .probe = hns3_pmu_probe, + .remove = hns3_pmu_remove, +}; + +static int __init hns3_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + "AP_PERF_ARM_HNS3_PMU_ONLINE", + hns3_pmu_online_cpu, + hns3_pmu_offline_cpu); + if (ret) { + pr_err("failed to setup HNS3 PMU hotplug, ret = %d.\n", ret); + return ret; + } + + ret = pci_register_driver(&hns3_pmu_driver); + if (ret) { + pr_err("failed to register pci driver, ret = %d.\n", ret); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE); + } + + return ret; +} +module_init(hns3_pmu_module_init); + +static void __exit hns3_pmu_module_exit(void) +{ + pci_unregister_driver(&hns3_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE); +} +module_exit(hns3_pmu_module_exit); + +MODULE_DESCRIPTION("HNS3 PMU driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c new file mode 100644 index 0000000000..c14089fa7d --- /dev/null +++ b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Meson G12A MIPI DSI Analog PHY + * + * Copyright (C) 2018 Amlogic, Inc. All rights reserved + * Copyright (C) 2022 BayLibre, SAS + * Author: Neil Armstrong + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HHI_MIPI_CNTL0 0x00 +#define HHI_MIPI_CNTL0_DIF_REF_CTL1 GENMASK(31, 16) +#define HHI_MIPI_CNTL0_DIF_REF_CTL0 GENMASK(15, 0) + +#define HHI_MIPI_CNTL1 0x04 +#define HHI_MIPI_CNTL1_BANDGAP BIT(16) +#define HHI_MIPI_CNTL2_DIF_REF_CTL2 GENMASK(15, 0) + +#define HHI_MIPI_CNTL2 0x08 +#define HHI_MIPI_CNTL2_DIF_TX_CTL1 GENMASK(31, 16) +#define HHI_MIPI_CNTL2_CH_EN GENMASK(15, 11) +#define HHI_MIPI_CNTL2_DIF_TX_CTL0 GENMASK(10, 0) + +#define DSI_LANE_0 BIT(4) +#define DSI_LANE_1 BIT(3) +#define DSI_LANE_CLK BIT(2) +#define DSI_LANE_2 BIT(1) +#define DSI_LANE_3 BIT(0) + +struct phy_g12a_mipi_dphy_analog_priv { + struct phy *phy; + struct regmap *regmap; + struct phy_configure_opts_mipi_dphy config; +}; + +static int phy_g12a_mipi_dphy_analog_configure(struct phy *phy, + union phy_configure_opts *opts) +{ + struct phy_g12a_mipi_dphy_analog_priv *priv = phy_get_drvdata(phy); + int ret; + + ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy); + if (ret) + return ret; + + memcpy(&priv->config, opts, sizeof(priv->config)); + + return 0; +} + +static int phy_g12a_mipi_dphy_analog_power_on(struct phy *phy) +{ + struct phy_g12a_mipi_dphy_analog_priv *priv = phy_get_drvdata(phy); + unsigned int reg; + + regmap_write(priv->regmap, HHI_MIPI_CNTL0, + FIELD_PREP(HHI_MIPI_CNTL0_DIF_REF_CTL0, 0x8) | + FIELD_PREP(HHI_MIPI_CNTL0_DIF_REF_CTL1, 0xa487)); + + regmap_write(priv->regmap, HHI_MIPI_CNTL1, + FIELD_PREP(HHI_MIPI_CNTL2_DIF_REF_CTL2, 0x2e) | + HHI_MIPI_CNTL1_BANDGAP); + + regmap_write(priv->regmap, HHI_MIPI_CNTL2, + FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x459) | + FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL1, 0x2680)); + + reg = DSI_LANE_CLK; + switch (priv->config.lanes) { + case 4: + reg |= DSI_LANE_3; + fallthrough; + case 3: + reg |= DSI_LANE_2; + fallthrough; + case 2: + reg |= DSI_LANE_1; + fallthrough; + case 1: + reg |= DSI_LANE_0; + break; + default: + reg = 0; + } + + regmap_update_bits(priv->regmap, HHI_MIPI_CNTL2, + HHI_MIPI_CNTL2_CH_EN, + FIELD_PREP(HHI_MIPI_CNTL2_CH_EN, reg)); + + return 0; +} + +static int phy_g12a_mipi_dphy_analog_power_off(struct phy *phy) +{ + struct phy_g12a_mipi_dphy_analog_priv *priv = phy_get_drvdata(phy); + + regmap_write(priv->regmap, HHI_MIPI_CNTL0, 0); + regmap_write(priv->regmap, HHI_MIPI_CNTL1, 0); + regmap_write(priv->regmap, HHI_MIPI_CNTL2, 0); + + return 0; +} + +static const struct phy_ops phy_g12a_mipi_dphy_analog_ops = { + .configure = phy_g12a_mipi_dphy_analog_configure, + .power_on = phy_g12a_mipi_dphy_analog_power_on, + .power_off = phy_g12a_mipi_dphy_analog_power_off, + .owner = THIS_MODULE, +}; + +static int phy_g12a_mipi_dphy_analog_probe(struct platform_device *pdev) +{ + struct phy_provider *phy; + struct device *dev = &pdev->dev; + struct phy_g12a_mipi_dphy_analog_priv *priv; + struct device_node *np = dev->of_node, *parent_np; + struct regmap *map; + + priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + /* Get the hhi system controller node */ + parent_np = of_get_parent(np); + map = syscon_node_to_regmap(parent_np); + of_node_put(parent_np); + if (IS_ERR(map)) + return dev_err_probe(dev, PTR_ERR(map), "failed to get HHI regmap\n"); + + priv->regmap = map; + + priv->phy = devm_phy_create(dev, np, &phy_g12a_mipi_dphy_analog_ops); + if (IS_ERR(priv->phy)) + return dev_err_probe(dev, PTR_ERR(priv->phy), "failed to create PHY\n"); + + phy_set_drvdata(priv->phy, priv); + dev_set_drvdata(dev, priv); + + phy = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(phy); +} + +static const struct of_device_id phy_g12a_mipi_dphy_analog_of_match[] = { + { + .compatible = "amlogic,g12a-mipi-dphy-analog", + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, phy_g12a_mipi_dphy_analog_of_match); + +static struct platform_driver phy_g12a_mipi_dphy_analog_driver = { + .probe = phy_g12a_mipi_dphy_analog_probe, + .driver = { + .name = "phy-meson-g12a-mipi-dphy-analog", + .of_match_table = phy_g12a_mipi_dphy_analog_of_match, + }, +}; +module_platform_driver(phy_g12a_mipi_dphy_analog_driver); + +MODULE_AUTHOR("Neil Armstrong "); +MODULE_DESCRIPTION("Meson G12A MIPI Analog D-PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c b/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c new file mode 100644 index 0000000000..e514b64bfd --- /dev/null +++ b/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c @@ -0,0 +1,450 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2017-2020,2022 NXP + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define REG_SET 0x4 +#define REG_CLR 0x8 + +#define PHY_CTRL 0x0 +#define M_MASK GENMASK(18, 17) +#define M(n) FIELD_PREP(M_MASK, (n)) +#define CCM_MASK GENMASK(16, 14) +#define CCM(n) FIELD_PREP(CCM_MASK, (n)) +#define CA_MASK GENMASK(13, 11) +#define CA(n) FIELD_PREP(CA_MASK, (n)) +#define TST_MASK GENMASK(10, 5) +#define TST(n) FIELD_PREP(TST_MASK, (n)) +#define CH_EN(id) BIT(3 + (id)) +#define NB BIT(2) +#define RFB BIT(1) +#define PD BIT(0) + +/* Power On Reset(POR) value */ +#define CTRL_RESET_VAL (M(0x0) | CCM(0x4) | CA(0x4) | TST(0x25)) + +/* PHY initialization value and mask */ +#define CTRL_INIT_MASK (M_MASK | CCM_MASK | CA_MASK | TST_MASK | NB | RFB) +#define CTRL_INIT_VAL (M(0x0) | CCM(0x5) | CA(0x4) | TST(0x25) | RFB) + +#define PHY_STATUS 0x10 +#define LOCK BIT(0) + +#define PHY_NUM 2 + +#define MIN_CLKIN_FREQ (25 * MEGA) +#define MAX_CLKIN_FREQ (165 * MEGA) + +#define PLL_LOCK_SLEEP 10 +#define PLL_LOCK_TIMEOUT 1000 + +struct mixel_lvds_phy { + struct phy *phy; + struct phy_configure_opts_lvds cfg; + unsigned int id; +}; + +struct mixel_lvds_phy_priv { + struct regmap *regmap; + struct mutex lock; /* protect remap access and cfg of our own */ + struct clk *phy_ref_clk; + struct mixel_lvds_phy *phys[PHY_NUM]; +}; + +static int mixel_lvds_phy_init(struct phy *phy) +{ + struct mixel_lvds_phy_priv *priv = dev_get_drvdata(phy->dev.parent); + + mutex_lock(&priv->lock); + regmap_update_bits(priv->regmap, + PHY_CTRL, CTRL_INIT_MASK, CTRL_INIT_VAL); + mutex_unlock(&priv->lock); + + return 0; +} + +static int mixel_lvds_phy_power_on(struct phy *phy) +{ + struct mixel_lvds_phy_priv *priv = dev_get_drvdata(phy->dev.parent); + struct mixel_lvds_phy *lvds_phy = phy_get_drvdata(phy); + struct mixel_lvds_phy *companion = priv->phys[lvds_phy->id ^ 1]; + struct phy_configure_opts_lvds *cfg = &lvds_phy->cfg; + u32 val = 0; + u32 locked; + int ret; + + /* The master PHY would power on the slave PHY. */ + if (cfg->is_slave) + return 0; + + ret = clk_prepare_enable(priv->phy_ref_clk); + if (ret < 0) { + dev_err(&phy->dev, + "failed to enable PHY reference clock: %d\n", ret); + return ret; + } + + mutex_lock(&priv->lock); + if (cfg->bits_per_lane_and_dclk_cycle == 7) { + if (cfg->differential_clk_rate < 44000000) + val |= M(0x2); + else if (cfg->differential_clk_rate < 90000000) + val |= M(0x1); + else + val |= M(0x0); + } else { + val = NB; + + if (cfg->differential_clk_rate < 32000000) + val |= M(0x2); + else if (cfg->differential_clk_rate < 63000000) + val |= M(0x1); + else + val |= M(0x0); + } + regmap_update_bits(priv->regmap, PHY_CTRL, M_MASK | NB, val); + + /* + * Enable two channels synchronously, + * if the companion PHY is a slave PHY. + */ + if (companion->cfg.is_slave) + val = CH_EN(0) | CH_EN(1); + else + val = CH_EN(lvds_phy->id); + regmap_write(priv->regmap, PHY_CTRL + REG_SET, val); + + ret = regmap_read_poll_timeout(priv->regmap, PHY_STATUS, locked, + locked, PLL_LOCK_SLEEP, + PLL_LOCK_TIMEOUT); + if (ret < 0) { + dev_err(&phy->dev, "failed to get PHY lock: %d\n", ret); + clk_disable_unprepare(priv->phy_ref_clk); + } + mutex_unlock(&priv->lock); + + return ret; +} + +static int mixel_lvds_phy_power_off(struct phy *phy) +{ + struct mixel_lvds_phy_priv *priv = dev_get_drvdata(phy->dev.parent); + struct mixel_lvds_phy *lvds_phy = phy_get_drvdata(phy); + struct mixel_lvds_phy *companion = priv->phys[lvds_phy->id ^ 1]; + struct phy_configure_opts_lvds *cfg = &lvds_phy->cfg; + + /* The master PHY would power off the slave PHY. */ + if (cfg->is_slave) + return 0; + + mutex_lock(&priv->lock); + if (companion->cfg.is_slave) + regmap_write(priv->regmap, PHY_CTRL + REG_CLR, + CH_EN(0) | CH_EN(1)); + else + regmap_write(priv->regmap, PHY_CTRL + REG_CLR, + CH_EN(lvds_phy->id)); + mutex_unlock(&priv->lock); + + clk_disable_unprepare(priv->phy_ref_clk); + + return 0; +} + +static int mixel_lvds_phy_configure(struct phy *phy, + union phy_configure_opts *opts) +{ + struct mixel_lvds_phy_priv *priv = dev_get_drvdata(phy->dev.parent); + struct phy_configure_opts_lvds *cfg = &opts->lvds; + int ret; + + ret = clk_set_rate(priv->phy_ref_clk, cfg->differential_clk_rate); + if (ret) + dev_err(&phy->dev, "failed to set PHY reference clock rate(%lu): %d\n", + cfg->differential_clk_rate, ret); + + return ret; +} + +/* Assume the master PHY's configuration set is cached first. */ +static int mixel_lvds_phy_check_slave(struct phy *slave_phy) +{ + struct device *dev = &slave_phy->dev; + struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev->parent); + struct mixel_lvds_phy *slv = phy_get_drvdata(slave_phy); + struct mixel_lvds_phy *mst = priv->phys[slv->id ^ 1]; + struct phy_configure_opts_lvds *mst_cfg = &mst->cfg; + struct phy_configure_opts_lvds *slv_cfg = &slv->cfg; + + if (mst_cfg->bits_per_lane_and_dclk_cycle != + slv_cfg->bits_per_lane_and_dclk_cycle) { + dev_err(dev, "number bits mismatch(mst: %u vs slv: %u)\n", + mst_cfg->bits_per_lane_and_dclk_cycle, + slv_cfg->bits_per_lane_and_dclk_cycle); + return -EINVAL; + } + + if (mst_cfg->differential_clk_rate != + slv_cfg->differential_clk_rate) { + dev_err(dev, "dclk rate mismatch(mst: %lu vs slv: %lu)\n", + mst_cfg->differential_clk_rate, + slv_cfg->differential_clk_rate); + return -EINVAL; + } + + if (mst_cfg->lanes != slv_cfg->lanes) { + dev_err(dev, "lanes mismatch(mst: %u vs slv: %u)\n", + mst_cfg->lanes, slv_cfg->lanes); + return -EINVAL; + } + + if (mst_cfg->is_slave == slv_cfg->is_slave) { + dev_err(dev, "master PHY is not found\n"); + return -EINVAL; + } + + return 0; +} + +static int mixel_lvds_phy_validate(struct phy *phy, enum phy_mode mode, + int submode, union phy_configure_opts *opts) +{ + struct mixel_lvds_phy_priv *priv = dev_get_drvdata(phy->dev.parent); + struct mixel_lvds_phy *lvds_phy = phy_get_drvdata(phy); + struct phy_configure_opts_lvds *cfg = &opts->lvds; + int ret = 0; + + if (mode != PHY_MODE_LVDS) { + dev_err(&phy->dev, "invalid PHY mode(%d)\n", mode); + return -EINVAL; + } + + if (cfg->bits_per_lane_and_dclk_cycle != 7 && + cfg->bits_per_lane_and_dclk_cycle != 10) { + dev_err(&phy->dev, "invalid bits per data lane(%u)\n", + cfg->bits_per_lane_and_dclk_cycle); + return -EINVAL; + } + + if (cfg->lanes != 4 && cfg->lanes != 3) { + dev_err(&phy->dev, "invalid data lanes(%u)\n", cfg->lanes); + return -EINVAL; + } + + if (cfg->differential_clk_rate < MIN_CLKIN_FREQ || + cfg->differential_clk_rate > MAX_CLKIN_FREQ) { + dev_err(&phy->dev, "invalid differential clock rate(%lu)\n", + cfg->differential_clk_rate); + return -EINVAL; + } + + mutex_lock(&priv->lock); + /* cache configuration set of our own for check */ + memcpy(&lvds_phy->cfg, cfg, sizeof(*cfg)); + + if (cfg->is_slave) { + ret = mixel_lvds_phy_check_slave(phy); + if (ret) + dev_err(&phy->dev, "failed to check slave PHY: %d\n", ret); + } + mutex_unlock(&priv->lock); + + return ret; +} + +static const struct phy_ops mixel_lvds_phy_ops = { + .init = mixel_lvds_phy_init, + .power_on = mixel_lvds_phy_power_on, + .power_off = mixel_lvds_phy_power_off, + .configure = mixel_lvds_phy_configure, + .validate = mixel_lvds_phy_validate, + .owner = THIS_MODULE, +}; + +static int mixel_lvds_phy_reset(struct device *dev) +{ + struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) { + dev_err(dev, "failed to get PM runtime: %d\n", ret); + return ret; + } + + regmap_write(priv->regmap, PHY_CTRL, CTRL_RESET_VAL); + + ret = pm_runtime_put(dev); + if (ret < 0) + dev_err(dev, "failed to put PM runtime: %d\n", ret); + + return ret; +} + +static struct phy *mixel_lvds_phy_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev); + unsigned int phy_id; + + if (args->args_count != 1) { + dev_err(dev, + "invalid argument number(%d) for 'phys' property\n", + args->args_count); + return ERR_PTR(-EINVAL); + } + + phy_id = args->args[0]; + + if (phy_id >= PHY_NUM) { + dev_err(dev, "invalid PHY index(%d)\n", phy_id); + return ERR_PTR(-ENODEV); + } + + return priv->phys[phy_id]->phy; +} + +static int mixel_lvds_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phy_provider *phy_provider; + struct mixel_lvds_phy_priv *priv; + struct mixel_lvds_phy *lvds_phy; + struct phy *phy; + int i; + int ret; + + if (!dev->of_node) + return -ENODEV; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->regmap = syscon_node_to_regmap(dev->of_node->parent); + if (IS_ERR(priv->regmap)) + return dev_err_probe(dev, PTR_ERR(priv->regmap), + "failed to get regmap\n"); + + priv->phy_ref_clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->phy_ref_clk)) + return dev_err_probe(dev, PTR_ERR(priv->phy_ref_clk), + "failed to get PHY reference clock\n"); + + mutex_init(&priv->lock); + + dev_set_drvdata(dev, priv); + + pm_runtime_enable(dev); + + ret = mixel_lvds_phy_reset(dev); + if (ret) { + dev_err(dev, "failed to do POR reset: %d\n", ret); + return ret; + } + + for (i = 0; i < PHY_NUM; i++) { + lvds_phy = devm_kzalloc(dev, sizeof(*lvds_phy), GFP_KERNEL); + if (!lvds_phy) { + ret = -ENOMEM; + goto err; + } + + phy = devm_phy_create(dev, NULL, &mixel_lvds_phy_ops); + if (IS_ERR(phy)) { + ret = PTR_ERR(phy); + dev_err(dev, "failed to create PHY for channel%d: %d\n", + i, ret); + goto err; + } + + lvds_phy->phy = phy; + lvds_phy->id = i; + priv->phys[i] = lvds_phy; + + phy_set_drvdata(phy, lvds_phy); + } + + phy_provider = devm_of_phy_provider_register(dev, mixel_lvds_phy_xlate); + if (IS_ERR(phy_provider)) { + ret = PTR_ERR(phy_provider); + dev_err(dev, "failed to register PHY provider: %d\n", ret); + goto err; + } + + return 0; +err: + pm_runtime_disable(dev); + + return ret; +} + +static int mixel_lvds_phy_remove(struct platform_device *pdev) +{ + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static int __maybe_unused mixel_lvds_phy_runtime_suspend(struct device *dev) +{ + struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev); + + /* power down */ + mutex_lock(&priv->lock); + regmap_write(priv->regmap, PHY_CTRL + REG_SET, PD); + mutex_unlock(&priv->lock); + + return 0; +} + +static int __maybe_unused mixel_lvds_phy_runtime_resume(struct device *dev) +{ + struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev); + + /* power up + control initialization */ + mutex_lock(&priv->lock); + regmap_update_bits(priv->regmap, PHY_CTRL, + CTRL_INIT_MASK | PD, CTRL_INIT_VAL); + mutex_unlock(&priv->lock); + + return 0; +} + +static const struct dev_pm_ops mixel_lvds_phy_pm_ops = { + SET_RUNTIME_PM_OPS(mixel_lvds_phy_runtime_suspend, + mixel_lvds_phy_runtime_resume, NULL) +}; + +static const struct of_device_id mixel_lvds_phy_of_match[] = { + { .compatible = "fsl,imx8qm-lvds-phy" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mixel_lvds_phy_of_match); + +static struct platform_driver mixel_lvds_phy_driver = { + .probe = mixel_lvds_phy_probe, + .remove = mixel_lvds_phy_remove, + .driver = { + .pm = &mixel_lvds_phy_pm_ops, + .name = "mixel-lvds-phy", + .of_match_table = mixel_lvds_phy_of_match, + } +}; +module_platform_driver(mixel_lvds_phy_driver); + +MODULE_DESCRIPTION("Mixel LVDS PHY driver"); +MODULE_AUTHOR("Liu Ying "); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/mediatek/phy-mtk-dp.c b/drivers/phy/mediatek/phy-mtk-dp.c new file mode 100644 index 0000000000..31266e7ca3 --- /dev/null +++ b/drivers/phy/mediatek/phy-mtk-dp.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MediaTek DisplayPort PHY driver + * + * Copyright (c) 2022, BayLibre Inc. + * Copyright (c) 2022, MediaTek Inc. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define PHY_OFFSET 0x1000 + +#define MTK_DP_PHY_DIG_PLL_CTL_1 (PHY_OFFSET + 0x14) +#define TPLL_SSC_EN BIT(3) + +#define MTK_DP_PHY_DIG_BIT_RATE (PHY_OFFSET + 0x3C) +#define BIT_RATE_RBR 0 +#define BIT_RATE_HBR 1 +#define BIT_RATE_HBR2 2 +#define BIT_RATE_HBR3 3 + +#define MTK_DP_PHY_DIG_SW_RST (PHY_OFFSET + 0x38) +#define DP_GLB_SW_RST_PHYD BIT(0) + +#define MTK_DP_LANE0_DRIVING_PARAM_3 (PHY_OFFSET + 0x138) +#define MTK_DP_LANE1_DRIVING_PARAM_3 (PHY_OFFSET + 0x238) +#define MTK_DP_LANE2_DRIVING_PARAM_3 (PHY_OFFSET + 0x338) +#define MTK_DP_LANE3_DRIVING_PARAM_3 (PHY_OFFSET + 0x438) +#define XTP_LN_TX_LCTXC0_SW0_PRE0_DEFAULT BIT(4) +#define XTP_LN_TX_LCTXC0_SW0_PRE1_DEFAULT (BIT(10) | BIT(12)) +#define XTP_LN_TX_LCTXC0_SW0_PRE2_DEFAULT GENMASK(20, 19) +#define XTP_LN_TX_LCTXC0_SW0_PRE3_DEFAULT GENMASK(29, 29) +#define DRIVING_PARAM_3_DEFAULT (XTP_LN_TX_LCTXC0_SW0_PRE0_DEFAULT | \ + XTP_LN_TX_LCTXC0_SW0_PRE1_DEFAULT | \ + XTP_LN_TX_LCTXC0_SW0_PRE2_DEFAULT | \ + XTP_LN_TX_LCTXC0_SW0_PRE3_DEFAULT) + +#define XTP_LN_TX_LCTXC0_SW1_PRE0_DEFAULT GENMASK(4, 3) +#define XTP_LN_TX_LCTXC0_SW1_PRE1_DEFAULT GENMASK(12, 9) +#define XTP_LN_TX_LCTXC0_SW1_PRE2_DEFAULT (BIT(18) | BIT(21)) +#define XTP_LN_TX_LCTXC0_SW2_PRE0_DEFAULT GENMASK(29, 29) +#define DRIVING_PARAM_4_DEFAULT (XTP_LN_TX_LCTXC0_SW1_PRE0_DEFAULT | \ + XTP_LN_TX_LCTXC0_SW1_PRE1_DEFAULT | \ + XTP_LN_TX_LCTXC0_SW1_PRE2_DEFAULT | \ + XTP_LN_TX_LCTXC0_SW2_PRE0_DEFAULT) + +#define XTP_LN_TX_LCTXC0_SW2_PRE1_DEFAULT (BIT(3) | BIT(5)) +#define XTP_LN_TX_LCTXC0_SW3_PRE0_DEFAULT GENMASK(13, 12) +#define DRIVING_PARAM_5_DEFAULT (XTP_LN_TX_LCTXC0_SW2_PRE1_DEFAULT | \ + XTP_LN_TX_LCTXC0_SW3_PRE0_DEFAULT) + +#define XTP_LN_TX_LCTXCP1_SW0_PRE0_DEFAULT 0 +#define XTP_LN_TX_LCTXCP1_SW0_PRE1_DEFAULT GENMASK(10, 10) +#define XTP_LN_TX_LCTXCP1_SW0_PRE2_DEFAULT GENMASK(19, 19) +#define XTP_LN_TX_LCTXCP1_SW0_PRE3_DEFAULT GENMASK(28, 28) +#define DRIVING_PARAM_6_DEFAULT (XTP_LN_TX_LCTXCP1_SW0_PRE0_DEFAULT | \ + XTP_LN_TX_LCTXCP1_SW0_PRE1_DEFAULT | \ + XTP_LN_TX_LCTXCP1_SW0_PRE2_DEFAULT | \ + XTP_LN_TX_LCTXCP1_SW0_PRE3_DEFAULT) + +#define XTP_LN_TX_LCTXCP1_SW1_PRE0_DEFAULT 0 +#define XTP_LN_TX_LCTXCP1_SW1_PRE1_DEFAULT GENMASK(10, 9) +#define XTP_LN_TX_LCTXCP1_SW1_PRE2_DEFAULT GENMASK(19, 18) +#define XTP_LN_TX_LCTXCP1_SW2_PRE0_DEFAULT 0 +#define DRIVING_PARAM_7_DEFAULT (XTP_LN_TX_LCTXCP1_SW1_PRE0_DEFAULT | \ + XTP_LN_TX_LCTXCP1_SW1_PRE1_DEFAULT | \ + XTP_LN_TX_LCTXCP1_SW1_PRE2_DEFAULT | \ + XTP_LN_TX_LCTXCP1_SW2_PRE0_DEFAULT) + +#define XTP_LN_TX_LCTXCP1_SW2_PRE1_DEFAULT GENMASK(3, 3) +#define XTP_LN_TX_LCTXCP1_SW3_PRE0_DEFAULT 0 +#define DRIVING_PARAM_8_DEFAULT (XTP_LN_TX_LCTXCP1_SW2_PRE1_DEFAULT | \ + XTP_LN_TX_LCTXCP1_SW3_PRE0_DEFAULT) + +struct mtk_dp_phy { + struct regmap *regs; +}; + +static int mtk_dp_phy_init(struct phy *phy) +{ + struct mtk_dp_phy *dp_phy = phy_get_drvdata(phy); + u32 driving_params[] = { + DRIVING_PARAM_3_DEFAULT, + DRIVING_PARAM_4_DEFAULT, + DRIVING_PARAM_5_DEFAULT, + DRIVING_PARAM_6_DEFAULT, + DRIVING_PARAM_7_DEFAULT, + DRIVING_PARAM_8_DEFAULT + }; + + regmap_bulk_write(dp_phy->regs, MTK_DP_LANE0_DRIVING_PARAM_3, + driving_params, ARRAY_SIZE(driving_params)); + regmap_bulk_write(dp_phy->regs, MTK_DP_LANE1_DRIVING_PARAM_3, + driving_params, ARRAY_SIZE(driving_params)); + regmap_bulk_write(dp_phy->regs, MTK_DP_LANE2_DRIVING_PARAM_3, + driving_params, ARRAY_SIZE(driving_params)); + regmap_bulk_write(dp_phy->regs, MTK_DP_LANE3_DRIVING_PARAM_3, + driving_params, ARRAY_SIZE(driving_params)); + + return 0; +} + +static int mtk_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts) +{ + struct mtk_dp_phy *dp_phy = phy_get_drvdata(phy); + u32 val; + + if (opts->dp.set_rate) { + switch (opts->dp.link_rate) { + default: + dev_err(&phy->dev, + "Implementation error, unknown linkrate %x\n", + opts->dp.link_rate); + return -EINVAL; + case 1620: + val = BIT_RATE_RBR; + break; + case 2700: + val = BIT_RATE_HBR; + break; + case 5400: + val = BIT_RATE_HBR2; + break; + case 8100: + val = BIT_RATE_HBR3; + break; + } + regmap_write(dp_phy->regs, MTK_DP_PHY_DIG_BIT_RATE, val); + } + + regmap_update_bits(dp_phy->regs, MTK_DP_PHY_DIG_PLL_CTL_1, + TPLL_SSC_EN, opts->dp.ssc ? TPLL_SSC_EN : 0); + + return 0; +} + +static int mtk_dp_phy_reset(struct phy *phy) +{ + struct mtk_dp_phy *dp_phy = phy_get_drvdata(phy); + + regmap_update_bits(dp_phy->regs, MTK_DP_PHY_DIG_SW_RST, + DP_GLB_SW_RST_PHYD, 0); + usleep_range(50, 200); + regmap_update_bits(dp_phy->regs, MTK_DP_PHY_DIG_SW_RST, + DP_GLB_SW_RST_PHYD, 1); + + return 0; +} + +static const struct phy_ops mtk_dp_phy_dev_ops = { + .init = mtk_dp_phy_init, + .configure = mtk_dp_phy_configure, + .reset = mtk_dp_phy_reset, + .owner = THIS_MODULE, +}; + +static int mtk_dp_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_dp_phy *dp_phy; + struct phy *phy; + struct regmap *regs; + + regs = *(struct regmap **)dev->platform_data; + if (!regs) + return dev_err_probe(dev, EINVAL, + "No data passed, requires struct regmap**\n"); + + dp_phy = devm_kzalloc(dev, sizeof(*dp_phy), GFP_KERNEL); + if (!dp_phy) + return -ENOMEM; + + dp_phy->regs = regs; + phy = devm_phy_create(dev, NULL, &mtk_dp_phy_dev_ops); + if (IS_ERR(phy)) + return dev_err_probe(dev, PTR_ERR(phy), + "Failed to create DP PHY\n"); + + phy_set_drvdata(phy, dp_phy); + if (!dev->of_node) + phy_create_lookup(phy, "dp", dev_name(dev)); + + return 0; +} + +static struct platform_driver mtk_dp_phy_driver = { + .probe = mtk_dp_phy_probe, + .driver = { + .name = "mediatek-dp-phy", + }, +}; +module_platform_driver(mtk_dp_phy_driver); + +MODULE_AUTHOR("Markus Schneider-Pargmann "); +MODULE_DESCRIPTION("MediaTek DP PHY Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/mediatek/phy-mtk-pcie.c b/drivers/phy/mediatek/phy-mtk-pcie.c new file mode 100644 index 0000000000..7f29d43442 --- /dev/null +++ b/drivers/phy/mediatek/phy-mtk-pcie.c @@ -0,0 +1,267 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 MediaTek Inc. + * Author: Jianjun Wang + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "phy-mtk-io.h" + +#define PEXTP_ANA_GLB_00_REG 0x9000 +/* Internal Resistor Selection of TX Bias Current */ +#define EFUSE_GLB_INTR_SEL GENMASK(28, 24) + +#define PEXTP_ANA_LN0_TRX_REG 0xa000 + +#define PEXTP_ANA_TX_REG 0x04 +/* TX PMOS impedance selection */ +#define EFUSE_LN_TX_PMOS_SEL GENMASK(5, 2) +/* TX NMOS impedance selection */ +#define EFUSE_LN_TX_NMOS_SEL GENMASK(11, 8) + +#define PEXTP_ANA_RX_REG 0x3c +/* RX impedance selection */ +#define EFUSE_LN_RX_SEL GENMASK(3, 0) + +#define PEXTP_ANA_LANE_OFFSET 0x100 + +/** + * struct mtk_pcie_lane_efuse - eFuse data for each lane + * @tx_pmos: TX PMOS impedance selection data + * @tx_nmos: TX NMOS impedance selection data + * @rx_data: RX impedance selection data + * @lane_efuse_supported: software eFuse data is supported for this lane + */ +struct mtk_pcie_lane_efuse { + u32 tx_pmos; + u32 tx_nmos; + u32 rx_data; + bool lane_efuse_supported; +}; + +/** + * struct mtk_pcie_phy_data - phy data for each SoC + * @num_lanes: supported lane numbers + * @sw_efuse_supported: support software to load eFuse data + */ +struct mtk_pcie_phy_data { + int num_lanes; + bool sw_efuse_supported; +}; + +/** + * struct mtk_pcie_phy - PCIe phy driver main structure + * @dev: pointer to device + * @phy: pointer to generic phy + * @sif_base: IO mapped register base address of system interface + * @data: pointer to SoC dependent data + * @sw_efuse_en: software eFuse enable status + * @efuse_glb_intr: internal resistor selection of TX bias current data + * @efuse: pointer to eFuse data for each lane + */ +struct mtk_pcie_phy { + struct device *dev; + struct phy *phy; + void __iomem *sif_base; + const struct mtk_pcie_phy_data *data; + + bool sw_efuse_en; + u32 efuse_glb_intr; + struct mtk_pcie_lane_efuse *efuse; +}; + +static void mtk_pcie_efuse_set_lane(struct mtk_pcie_phy *pcie_phy, + unsigned int lane) +{ + struct mtk_pcie_lane_efuse *data = &pcie_phy->efuse[lane]; + void __iomem *addr; + + if (!data->lane_efuse_supported) + return; + + addr = pcie_phy->sif_base + PEXTP_ANA_LN0_TRX_REG + + lane * PEXTP_ANA_LANE_OFFSET; + + mtk_phy_update_bits(addr + PEXTP_ANA_TX_REG, EFUSE_LN_TX_PMOS_SEL, + FIELD_PREP(EFUSE_LN_TX_PMOS_SEL, data->tx_pmos)); + + mtk_phy_update_bits(addr + PEXTP_ANA_TX_REG, EFUSE_LN_TX_NMOS_SEL, + FIELD_PREP(EFUSE_LN_TX_NMOS_SEL, data->tx_nmos)); + + mtk_phy_update_bits(addr + PEXTP_ANA_RX_REG, EFUSE_LN_RX_SEL, + FIELD_PREP(EFUSE_LN_RX_SEL, data->rx_data)); +} + +/** + * mtk_pcie_phy_init() - Initialize the phy + * @phy: the phy to be initialized + * + * Initialize the phy by setting the efuse data. + * The hardware settings will be reset during suspend, it should be + * reinitialized when the consumer calls phy_init() again on resume. + */ +static int mtk_pcie_phy_init(struct phy *phy) +{ + struct mtk_pcie_phy *pcie_phy = phy_get_drvdata(phy); + int i; + + if (!pcie_phy->sw_efuse_en) + return 0; + + /* Set global data */ + mtk_phy_update_bits(pcie_phy->sif_base + PEXTP_ANA_GLB_00_REG, + EFUSE_GLB_INTR_SEL, + FIELD_PREP(EFUSE_GLB_INTR_SEL, pcie_phy->efuse_glb_intr)); + + for (i = 0; i < pcie_phy->data->num_lanes; i++) + mtk_pcie_efuse_set_lane(pcie_phy, i); + + return 0; +} + +static const struct phy_ops mtk_pcie_phy_ops = { + .init = mtk_pcie_phy_init, + .owner = THIS_MODULE, +}; + +static int mtk_pcie_efuse_read_for_lane(struct mtk_pcie_phy *pcie_phy, + unsigned int lane) +{ + struct mtk_pcie_lane_efuse *efuse = &pcie_phy->efuse[lane]; + struct device *dev = pcie_phy->dev; + char efuse_id[16]; + int ret; + + snprintf(efuse_id, sizeof(efuse_id), "tx_ln%d_pmos", lane); + ret = nvmem_cell_read_variable_le_u32(dev, efuse_id, &efuse->tx_pmos); + if (ret) + return dev_err_probe(dev, ret, "Failed to read %s\n", efuse_id); + + snprintf(efuse_id, sizeof(efuse_id), "tx_ln%d_nmos", lane); + ret = nvmem_cell_read_variable_le_u32(dev, efuse_id, &efuse->tx_nmos); + if (ret) + return dev_err_probe(dev, ret, "Failed to read %s\n", efuse_id); + + snprintf(efuse_id, sizeof(efuse_id), "rx_ln%d", lane); + ret = nvmem_cell_read_variable_le_u32(dev, efuse_id, &efuse->rx_data); + if (ret) + return dev_err_probe(dev, ret, "Failed to read %s\n", efuse_id); + + if (!(efuse->tx_pmos || efuse->tx_nmos || efuse->rx_data)) + return dev_err_probe(dev, -EINVAL, + "No eFuse data found for lane%d, but dts enable it\n", + lane); + + efuse->lane_efuse_supported = true; + + return 0; +} + +static int mtk_pcie_read_efuse(struct mtk_pcie_phy *pcie_phy) +{ + struct device *dev = pcie_phy->dev; + bool nvmem_enabled; + int ret, i; + + /* nvmem data is optional */ + nvmem_enabled = device_property_present(dev, "nvmem-cells"); + if (!nvmem_enabled) + return 0; + + ret = nvmem_cell_read_variable_le_u32(dev, "glb_intr", + &pcie_phy->efuse_glb_intr); + if (ret) + return dev_err_probe(dev, ret, "Failed to read glb_intr\n"); + + pcie_phy->sw_efuse_en = true; + + pcie_phy->efuse = devm_kzalloc(dev, pcie_phy->data->num_lanes * + sizeof(*pcie_phy->efuse), GFP_KERNEL); + if (!pcie_phy->efuse) + return -ENOMEM; + + for (i = 0; i < pcie_phy->data->num_lanes; i++) { + ret = mtk_pcie_efuse_read_for_lane(pcie_phy, i); + if (ret) + return ret; + } + + return 0; +} + +static int mtk_pcie_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phy_provider *provider; + struct mtk_pcie_phy *pcie_phy; + int ret; + + pcie_phy = devm_kzalloc(dev, sizeof(*pcie_phy), GFP_KERNEL); + if (!pcie_phy) + return -ENOMEM; + + pcie_phy->sif_base = devm_platform_ioremap_resource_byname(pdev, "sif"); + if (IS_ERR(pcie_phy->sif_base)) + return dev_err_probe(dev, PTR_ERR(pcie_phy->sif_base), + "Failed to map phy-sif base\n"); + + pcie_phy->phy = devm_phy_create(dev, dev->of_node, &mtk_pcie_phy_ops); + if (IS_ERR(pcie_phy->phy)) + return dev_err_probe(dev, PTR_ERR(pcie_phy->phy), + "Failed to create PCIe phy\n"); + + pcie_phy->dev = dev; + pcie_phy->data = of_device_get_match_data(dev); + if (!pcie_phy->data) + return dev_err_probe(dev, -EINVAL, "Failed to get phy data\n"); + + if (pcie_phy->data->sw_efuse_supported) { + /* + * Failed to read the efuse data is not a fatal problem, + * ignore the failure and keep going. + */ + ret = mtk_pcie_read_efuse(pcie_phy); + if (ret == -EPROBE_DEFER || ret == -ENOMEM) + return ret; + } + + phy_set_drvdata(pcie_phy->phy, pcie_phy); + + provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(provider)) + return dev_err_probe(dev, PTR_ERR(provider), + "PCIe phy probe failed\n"); + + return 0; +} + +static const struct mtk_pcie_phy_data mt8195_data = { + .num_lanes = 2, + .sw_efuse_supported = true, +}; + +static const struct of_device_id mtk_pcie_phy_of_match[] = { + { .compatible = "mediatek,mt8195-pcie-phy", .data = &mt8195_data }, + { }, +}; +MODULE_DEVICE_TABLE(of, mtk_pcie_phy_of_match); + +static struct platform_driver mtk_pcie_phy_driver = { + .probe = mtk_pcie_phy_probe, + .driver = { + .name = "mtk-pcie-phy", + .of_match_table = mtk_pcie_phy_of_match, + }, +}; +module_platform_driver(mtk_pcie_phy_driver); + +MODULE_DESCRIPTION("MediaTek PCIe PHY driver"); +MODULE_AUTHOR("Jianjun Wang "); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c new file mode 100644 index 0000000000..4b18289761 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c @@ -0,0 +1,2621 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "phy-qcom-qmp.h" + +/* QPHY_SW_RESET bit */ +#define SW_RESET BIT(0) +/* QPHY_POWER_DOWN_CONTROL */ +#define SW_PWRDN BIT(0) +#define REFCLK_DRV_DSBL BIT(1) +/* QPHY_START_CONTROL bits */ +#define SERDES_START BIT(0) +#define PCS_START BIT(1) +#define PLL_READY_GATE_EN BIT(3) +/* QPHY_PCS_STATUS bit */ +#define PHYSTATUS BIT(6) +#define PHYSTATUS_4_20 BIT(7) +/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */ +#define PCS_READY BIT(0) + +/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */ +/* DP PHY soft reset */ +#define SW_DPPHY_RESET BIT(0) +/* mux to select DP PHY reset control, 0:HW control, 1: software reset */ +#define SW_DPPHY_RESET_MUX BIT(1) +/* USB3 PHY soft reset */ +#define SW_USB3PHY_RESET BIT(2) +/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */ +#define SW_USB3PHY_RESET_MUX BIT(3) + +/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */ +#define USB3_MODE BIT(0) /* enables USB3 mode */ +#define DP_MODE BIT(1) /* enables DP mode */ + +/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */ +#define ARCVR_DTCT_EN BIT(0) +#define ALFPS_DTCT_EN BIT(1) +#define ARCVR_DTCT_EVENT_SEL BIT(4) + +/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */ +#define IRQ_CLEAR BIT(0) + +/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */ +#define RCVR_DETECT BIT(0) + +/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */ +#define CLAMP_EN BIT(0) /* enables i/o clamp_n */ + +#define PHY_INIT_COMPLETE_TIMEOUT 10000 +#define POWER_DOWN_DELAY_US_MIN 10 +#define POWER_DOWN_DELAY_US_MAX 11 + +#define MAX_PROP_NAME 32 + +/* Define the assumed distance between lanes for underspecified device trees. */ +#define QMP_PHY_LEGACY_LANE_STRIDE 0x400 + +struct qmp_phy_init_tbl { + unsigned int offset; + unsigned int val; + /* + * register part of layout ? + * if yes, then offset gives index in the reg-layout + */ + bool in_layout; + /* + * mask of lanes for which this register is written + * for cases when second lane needs different values + */ + u8 lane_mask; +}; + +#define QMP_PHY_INIT_CFG(o, v) \ + { \ + .offset = o, \ + .val = v, \ + .lane_mask = 0xff, \ + } + +#define QMP_PHY_INIT_CFG_L(o, v) \ + { \ + .offset = o, \ + .val = v, \ + .in_layout = true, \ + .lane_mask = 0xff, \ + } + +#define QMP_PHY_INIT_CFG_LANE(o, v, l) \ + { \ + .offset = o, \ + .val = v, \ + .lane_mask = l, \ + } + +/* set of registers with offsets different per-PHY */ +enum qphy_reg_layout { + /* Common block control registers */ + QPHY_COM_SW_RESET, + QPHY_COM_POWER_DOWN_CONTROL, + QPHY_COM_START_CONTROL, + QPHY_COM_PCS_READY_STATUS, + /* PCS registers */ + QPHY_SW_RESET, + QPHY_START_CTRL, + QPHY_PCS_READY_STATUS, + QPHY_PCS_STATUS, + QPHY_PCS_AUTONOMOUS_MODE_CTRL, + QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR, + QPHY_PCS_LFPS_RXTERM_IRQ_STATUS, + QPHY_PCS_POWER_DOWN_CONTROL, + /* PCS_MISC registers */ + QPHY_PCS_MISC_TYPEC_CTRL, + /* Keep last to ensure regs_layout arrays are properly initialized */ + QPHY_LAYOUT_SIZE +}; + +static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x08, + [QPHY_PCS_STATUS] = 0x174, + [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8, + [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc, + [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170, +}; + +static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x44, + [QPHY_PCS_STATUS] = 0x14, + [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40, + + /* In PCS_USB */ + [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x008, + [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x014, +}; + +static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07), +}; + +static const struct qmp_phy_init_tbl qmp_v3_usb3_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06), +}; + +static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x37), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_ENABLE1, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_CTRL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06), +}; + +static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_rbr[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x69), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x6f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x00), +}; + +static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_hbr[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x69), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x00), +}; + +static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_hbr2[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x8c), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x00), +}; + +static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_hbr3[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x69), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x2a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x08), +}; + +static const struct qmp_phy_init_tbl qmp_v3_dp_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_TX_TRANSCEIVER_BIAS_EN, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_VMODE_CTRL1, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_PRE_STALL_LDO_BOOST_EN, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_INTERFACE_SELECT, 0x3d), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_CLKBUF_ENABLE, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RESET_TSYNC_EN, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_TRAN_DRVR_EMP_EN, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_INTERFACE_MODE, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_BAND, 0x4), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_POL_INV, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_DRV_LVL, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_EMP_POST1_LVL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x07), +}; + +static const struct qmp_phy_init_tbl qmp_v3_usb3_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75), +}; + +static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = { + /* FLL settings */ + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02), + + /* Lock Det settings */ + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13), +}; + +static const struct qmp_phy_init_tbl sm8150_usb3_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11), +}; + +static const struct qmp_phy_init_tbl sm8150_usb3_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20), +}; + +static const struct qmp_phy_init_tbl sm8150_usb3_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x94), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10), +}; + +static const struct qmp_phy_init_tbl sm8150_usb3_pcs_tbl[] = { + /* Lock Det settings */ + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13), + + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10), +}; + +static const struct qmp_phy_init_tbl sm8150_usb3_pcs_usb_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07), +}; + +static const struct qmp_phy_init_tbl sm8250_usb3_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x60), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x60), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x40, 1), + QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x54, 2), +}; + +static const struct qmp_phy_init_tbl sm8250_usb3_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e), + QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0xff, 1), + QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f, 2), + QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f, 1), + QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff, 2), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x97), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10), +}; + +static const struct qmp_phy_init_tbl sm8250_usb3_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10), +}; + +static const struct qmp_phy_init_tbl sm8250_usb3_pcs_usb_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07), +}; + +static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x3b), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_CONFIG, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_CTRL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN, 0x17), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORE_CLK_EN, 0x1f), +}; + +static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_rbr[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x69), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x6f), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04), +}; + +static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_hbr[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x69), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x08), +}; + +static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_hbr2[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x8c), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x08), +}; + +static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_hbr3[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x69), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x2a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x08), +}; + +static const struct qmp_phy_init_tbl qmp_v4_dp_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_VMODE_CTRL1, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_PRE_STALL_LDO_BOOST_EN, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_INTERFACE_SELECT, 0x3b), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_CLKBUF_ENABLE, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RESET_TSYNC_EN, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_TRAN_DRVR_EMP_EN, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_INTERFACE_MODE, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_BAND, 0x4), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_POL_INV, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_DRV_LVL, 0x2a), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_EMP_POST1_LVL, 0x20), +}; + + +/* list of regulators */ +struct qmp_regulator_data { + const char *name; + unsigned int enable_load; +}; + +static struct qmp_regulator_data qmp_phy_vreg_l[] = { + { .name = "vdda-phy", .enable_load = 21800 }, + { .name = "vdda-pll", .enable_load = 36000 }, +}; + +struct qmp_phy; + +/* struct qmp_phy_cfg - per-PHY initialization config */ +struct qmp_phy_cfg { + /* phy-type - PCIE/UFS/USB */ + unsigned int type; + /* number of lanes provided by phy */ + int nlanes; + + /* Init sequence for PHY blocks - serdes, tx, rx, pcs */ + const struct qmp_phy_init_tbl *serdes_tbl; + int serdes_tbl_num; + const struct qmp_phy_init_tbl *tx_tbl; + int tx_tbl_num; + const struct qmp_phy_init_tbl *rx_tbl; + int rx_tbl_num; + const struct qmp_phy_init_tbl *pcs_tbl; + int pcs_tbl_num; + const struct qmp_phy_init_tbl *pcs_usb_tbl; + int pcs_usb_tbl_num; + + /* Init sequence for DP PHY block link rates */ + const struct qmp_phy_init_tbl *serdes_tbl_rbr; + int serdes_tbl_rbr_num; + const struct qmp_phy_init_tbl *serdes_tbl_hbr; + int serdes_tbl_hbr_num; + const struct qmp_phy_init_tbl *serdes_tbl_hbr2; + int serdes_tbl_hbr2_num; + const struct qmp_phy_init_tbl *serdes_tbl_hbr3; + int serdes_tbl_hbr3_num; + + /* DP PHY callbacks */ + int (*configure_dp_phy)(struct qmp_phy *qphy); + void (*configure_dp_tx)(struct qmp_phy *qphy); + int (*calibrate_dp_phy)(struct qmp_phy *qphy); + void (*dp_aux_init)(struct qmp_phy *qphy); + + /* clock ids to be requested */ + const char * const *clk_list; + int num_clks; + /* resets to be requested */ + const char * const *reset_list; + int num_resets; + /* regulators to be requested */ + const struct qmp_regulator_data *vreg_list; + int num_vregs; + + /* array of registers with different offsets */ + const unsigned int *regs; + + unsigned int start_ctrl; + unsigned int pwrdn_ctrl; + /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */ + unsigned int phy_status; + + /* true, if PHY needs delay after POWER_DOWN */ + bool has_pwrdn_delay; + /* power_down delay in usec */ + int pwrdn_delay_min; + int pwrdn_delay_max; + + /* true, if PHY has a separate DP_COM control block */ + bool has_phy_dp_com_ctrl; + /* true, if PHY has secondary tx/rx lanes to be configured */ + bool is_dual_lane_phy; + + /* Offset from PCS to PCS_USB region */ + unsigned int pcs_usb_offset; + +}; + +struct qmp_phy_combo_cfg { + const struct qmp_phy_cfg *usb_cfg; + const struct qmp_phy_cfg *dp_cfg; +}; + +/** + * struct qmp_phy - per-lane phy descriptor + * + * @phy: generic phy + * @cfg: phy specific configuration + * @serdes: iomapped memory space for phy's serdes (i.e. PLL) + * @tx: iomapped memory space for lane's tx + * @rx: iomapped memory space for lane's rx + * @pcs: iomapped memory space for lane's pcs + * @tx2: iomapped memory space for second lane's tx (in dual lane PHYs) + * @rx2: iomapped memory space for second lane's rx (in dual lane PHYs) + * @pcs_misc: iomapped memory space for lane's pcs_misc + * @pcs_usb: iomapped memory space for lane's pcs_usb + * @pipe_clk: pipe clock + * @index: lane index + * @qmp: QMP phy to which this lane belongs + * @lane_rst: lane's reset controller + * @mode: current PHY mode + * @dp_aux_cfg: Display port aux config + * @dp_opts: Display port optional config + * @dp_clks: Display port clocks + */ +struct qmp_phy { + struct phy *phy; + const struct qmp_phy_cfg *cfg; + void __iomem *serdes; + void __iomem *tx; + void __iomem *rx; + void __iomem *pcs; + void __iomem *tx2; + void __iomem *rx2; + void __iomem *pcs_misc; + void __iomem *pcs_usb; + struct clk *pipe_clk; + unsigned int index; + struct qcom_qmp *qmp; + struct reset_control *lane_rst; + enum phy_mode mode; + unsigned int dp_aux_cfg; + struct phy_configure_opts_dp dp_opts; + struct qmp_phy_dp_clks *dp_clks; +}; + +struct qmp_phy_dp_clks { + struct qmp_phy *qphy; + struct clk_hw dp_link_hw; + struct clk_hw dp_pixel_hw; +}; + +/** + * struct qcom_qmp - structure holding QMP phy block attributes + * + * @dev: device + * @dp_com: iomapped memory space for phy's dp_com control block + * + * @clks: array of clocks required by phy + * @resets: array of resets required by phy + * @vregs: regulator supplies bulk data + * + * @phys: array of per-lane phy descriptors + * @phy_mutex: mutex lock for PHY common block initialization + * @init_count: phy common block initialization count + * @ufs_reset: optional UFS PHY reset handle + */ +struct qcom_qmp { + struct device *dev; + void __iomem *dp_com; + + struct clk_bulk_data *clks; + struct reset_control_bulk_data *resets; + struct regulator_bulk_data *vregs; + + struct qmp_phy **phys; + + struct mutex phy_mutex; + int init_count; + + struct reset_control *ufs_reset; +}; + +static void qcom_qmp_v3_phy_dp_aux_init(struct qmp_phy *qphy); +static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy); +static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy); +static int qcom_qmp_v3_dp_phy_calibrate(struct qmp_phy *qphy); + +static void qcom_qmp_v4_phy_dp_aux_init(struct qmp_phy *qphy); +static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy); +static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy); +static int qcom_qmp_v4_dp_phy_calibrate(struct qmp_phy *qphy); + +static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val) +{ + u32 reg; + + reg = readl(base + offset); + reg |= val; + writel(reg, base + offset); + + /* ensure that above write is through */ + readl(base + offset); +} + +static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val) +{ + u32 reg; + + reg = readl(base + offset); + reg &= ~val; + writel(reg, base + offset); + + /* ensure that above write is through */ + readl(base + offset); +} + +/* list of clocks required by phy */ +static const char * const qmp_v3_phy_clk_l[] = { + "aux", "cfg_ahb", "ref", "com_aux", +}; + +static const char * const qmp_v4_phy_clk_l[] = { + "aux", "ref_clk_src", "ref", "com_aux", +}; + +/* the primary usb3 phy on sm8250 doesn't have a ref clock */ +static const char * const qmp_v4_sm8250_usbphy_clk_l[] = { + "aux", "ref_clk_src", "com_aux" +}; + +/* list of resets */ +static const char * const msm8996_usb3phy_reset_l[] = { + "phy", "common", +}; + +static const char * const sc7180_usb3phy_reset_l[] = { + "phy", +}; + +static const struct qmp_phy_cfg sc7180_usb3phy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = qmp_v3_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl), + .tx_tbl = qmp_v3_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl), + .rx_tbl = qmp_v3_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl), + .pcs_tbl = qmp_v3_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl), + .clk_list = qmp_v3_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l), + .reset_list = sc7180_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v3_usb3phy_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, + + .has_phy_dp_com_ctrl = true, + .is_dual_lane_phy = true, +}; + +static const struct qmp_phy_cfg sc7180_dpphy_cfg = { + .type = PHY_TYPE_DP, + .nlanes = 1, + + .serdes_tbl = qmp_v3_dp_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl), + .tx_tbl = qmp_v3_dp_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(qmp_v3_dp_tx_tbl), + + .serdes_tbl_rbr = qmp_v3_dp_serdes_tbl_rbr, + .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_rbr), + .serdes_tbl_hbr = qmp_v3_dp_serdes_tbl_hbr, + .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr), + .serdes_tbl_hbr2 = qmp_v3_dp_serdes_tbl_hbr2, + .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr2), + .serdes_tbl_hbr3 = qmp_v3_dp_serdes_tbl_hbr3, + .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr3), + + .clk_list = qmp_v3_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l), + .reset_list = sc7180_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v3_usb3phy_regs_layout, + + .has_phy_dp_com_ctrl = true, + .is_dual_lane_phy = true, + + .dp_aux_init = qcom_qmp_v3_phy_dp_aux_init, + .configure_dp_tx = qcom_qmp_v3_phy_configure_dp_tx, + .configure_dp_phy = qcom_qmp_v3_phy_configure_dp_phy, + .calibrate_dp_phy = qcom_qmp_v3_dp_phy_calibrate, +}; + +static const struct qmp_phy_combo_cfg sc7180_usb3dpphy_cfg = { + .usb_cfg = &sc7180_usb3phy_cfg, + .dp_cfg = &sc7180_dpphy_cfg, +}; + +static const struct qmp_phy_cfg sm8150_usb3phy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = sm8150_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl), + .tx_tbl = sm8150_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_tx_tbl), + .rx_tbl = sm8150_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_rx_tbl), + .pcs_tbl = sm8150_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_tbl), + .pcs_usb_tbl = sm8150_usb3_pcs_usb_tbl, + .pcs_usb_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_usb_tbl), + .clk_list = qmp_v4_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v4_usb3phy_regs_layout, + .pcs_usb_offset = 0x300, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, + + .has_phy_dp_com_ctrl = true, + .is_dual_lane_phy = true, +}; + +static const struct qmp_phy_cfg sc8180x_dpphy_cfg = { + .type = PHY_TYPE_DP, + .nlanes = 1, + + .serdes_tbl = qmp_v4_dp_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl), + .tx_tbl = qmp_v4_dp_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(qmp_v4_dp_tx_tbl), + + .serdes_tbl_rbr = qmp_v4_dp_serdes_tbl_rbr, + .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_rbr), + .serdes_tbl_hbr = qmp_v4_dp_serdes_tbl_hbr, + .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr), + .serdes_tbl_hbr2 = qmp_v4_dp_serdes_tbl_hbr2, + .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr2), + .serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3, + .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3), + + .clk_list = qmp_v3_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l), + .reset_list = sc7180_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v3_usb3phy_regs_layout, + + .has_phy_dp_com_ctrl = true, + .is_dual_lane_phy = true, + + .dp_aux_init = qcom_qmp_v4_phy_dp_aux_init, + .configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx, + .configure_dp_phy = qcom_qmp_v4_phy_configure_dp_phy, + .calibrate_dp_phy = qcom_qmp_v4_dp_phy_calibrate, +}; + +static const struct qmp_phy_combo_cfg sc8180x_usb3dpphy_cfg = { + .usb_cfg = &sm8150_usb3phy_cfg, + .dp_cfg = &sc8180x_dpphy_cfg, +}; + +static const struct qmp_phy_cfg sm8250_usb3phy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = sm8150_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl), + .tx_tbl = sm8250_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_tx_tbl), + .rx_tbl = sm8250_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_rx_tbl), + .pcs_tbl = sm8250_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_tbl), + .pcs_usb_tbl = sm8250_usb3_pcs_usb_tbl, + .pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_usb_tbl), + .clk_list = qmp_v4_sm8250_usbphy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v4_usb3phy_regs_layout, + .pcs_usb_offset = 0x300, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, + + .has_phy_dp_com_ctrl = true, + .is_dual_lane_phy = true, +}; + +static const struct qmp_phy_cfg sm8250_dpphy_cfg = { + .type = PHY_TYPE_DP, + .nlanes = 1, + + .serdes_tbl = qmp_v4_dp_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl), + .tx_tbl = qmp_v4_dp_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(qmp_v4_dp_tx_tbl), + + .serdes_tbl_rbr = qmp_v4_dp_serdes_tbl_rbr, + .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_rbr), + .serdes_tbl_hbr = qmp_v4_dp_serdes_tbl_hbr, + .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr), + .serdes_tbl_hbr2 = qmp_v4_dp_serdes_tbl_hbr2, + .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr2), + .serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3, + .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3), + + .clk_list = qmp_v4_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v4_usb3phy_regs_layout, + + .has_phy_dp_com_ctrl = true, + .is_dual_lane_phy = true, + + .dp_aux_init = qcom_qmp_v4_phy_dp_aux_init, + .configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx, + .configure_dp_phy = qcom_qmp_v4_phy_configure_dp_phy, + .calibrate_dp_phy = qcom_qmp_v4_dp_phy_calibrate, +}; + +static const struct qmp_phy_combo_cfg sm8250_usb3dpphy_cfg = { + .usb_cfg = &sm8250_usb3phy_cfg, + .dp_cfg = &sm8250_dpphy_cfg, +}; + +static void qcom_qmp_phy_combo_configure_lane(void __iomem *base, + const unsigned int *regs, + const struct qmp_phy_init_tbl tbl[], + int num, + u8 lane_mask) +{ + int i; + const struct qmp_phy_init_tbl *t = tbl; + + if (!t) + return; + + for (i = 0; i < num; i++, t++) { + if (!(t->lane_mask & lane_mask)) + continue; + + if (t->in_layout) + writel(t->val, base + regs[t->offset]); + else + writel(t->val, base + t->offset); + } +} + +static void qcom_qmp_phy_combo_configure(void __iomem *base, + const unsigned int *regs, + const struct qmp_phy_init_tbl tbl[], + int num) +{ + qcom_qmp_phy_combo_configure_lane(base, regs, tbl, num, 0xff); +} + +static int qcom_qmp_phy_combo_serdes_init(struct qmp_phy *qphy) +{ + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *serdes = qphy->serdes; + const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts; + const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl; + int serdes_tbl_num = cfg->serdes_tbl_num; + + qcom_qmp_phy_combo_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num); + + if (cfg->type == PHY_TYPE_DP) { + switch (dp_opts->link_rate) { + case 1620: + qcom_qmp_phy_combo_configure(serdes, cfg->regs, + cfg->serdes_tbl_rbr, + cfg->serdes_tbl_rbr_num); + break; + case 2700: + qcom_qmp_phy_combo_configure(serdes, cfg->regs, + cfg->serdes_tbl_hbr, + cfg->serdes_tbl_hbr_num); + break; + case 5400: + qcom_qmp_phy_combo_configure(serdes, cfg->regs, + cfg->serdes_tbl_hbr2, + cfg->serdes_tbl_hbr2_num); + break; + case 8100: + qcom_qmp_phy_combo_configure(serdes, cfg->regs, + cfg->serdes_tbl_hbr3, + cfg->serdes_tbl_hbr3_num); + break; + default: + /* Other link rates aren't supported */ + return -EINVAL; + } + } + + return 0; +} + +static void qcom_qmp_v3_phy_dp_aux_init(struct qmp_phy *qphy) +{ + writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN | + DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN, + qphy->pcs + QSERDES_DP_PHY_PD_CTL); + + /* Turn on BIAS current for PHY/PLL */ + writel(QSERDES_V3_COM_BIAS_EN | QSERDES_V3_COM_BIAS_EN_MUX | + QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL, + qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN); + + writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_DP_PHY_PD_CTL); + + writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN | + DP_PHY_PD_CTL_LANE_0_1_PWRDN | + DP_PHY_PD_CTL_LANE_2_3_PWRDN | DP_PHY_PD_CTL_PLL_PWRDN | + DP_PHY_PD_CTL_DP_CLAMP_EN, + qphy->pcs + QSERDES_DP_PHY_PD_CTL); + + writel(QSERDES_V3_COM_BIAS_EN | + QSERDES_V3_COM_BIAS_EN_MUX | QSERDES_V3_COM_CLKBUF_R_EN | + QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL | + QSERDES_V3_COM_CLKBUF_RX_DRIVE_L, + qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN); + + writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG0); + writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1); + writel(0x24, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2); + writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG3); + writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG4); + writel(0x26, qphy->pcs + QSERDES_DP_PHY_AUX_CFG5); + writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG6); + writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG7); + writel(0xbb, qphy->pcs + QSERDES_DP_PHY_AUX_CFG8); + writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG9); + qphy->dp_aux_cfg = 0; + + writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK | + PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK | + PHY_AUX_REQ_ERR_MASK, + qphy->pcs + QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK); +} + +static const u8 qmp_dp_v3_pre_emphasis_hbr3_hbr2[4][4] = { + { 0x00, 0x0c, 0x15, 0x1a }, + { 0x02, 0x0e, 0x16, 0xff }, + { 0x02, 0x11, 0xff, 0xff }, + { 0x04, 0xff, 0xff, 0xff } +}; + +static const u8 qmp_dp_v3_voltage_swing_hbr3_hbr2[4][4] = { + { 0x02, 0x12, 0x16, 0x1a }, + { 0x09, 0x19, 0x1f, 0xff }, + { 0x10, 0x1f, 0xff, 0xff }, + { 0x1f, 0xff, 0xff, 0xff } +}; + +static const u8 qmp_dp_v3_pre_emphasis_hbr_rbr[4][4] = { + { 0x00, 0x0c, 0x14, 0x19 }, + { 0x00, 0x0b, 0x12, 0xff }, + { 0x00, 0x0b, 0xff, 0xff }, + { 0x04, 0xff, 0xff, 0xff } +}; + +static const u8 qmp_dp_v3_voltage_swing_hbr_rbr[4][4] = { + { 0x08, 0x0f, 0x16, 0x1f }, + { 0x11, 0x1e, 0x1f, 0xff }, + { 0x19, 0x1f, 0xff, 0xff }, + { 0x1f, 0xff, 0xff, 0xff } +}; + +static int qcom_qmp_phy_combo_configure_dp_swing(struct qmp_phy *qphy, + unsigned int drv_lvl_reg, unsigned int emp_post_reg) +{ + const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts; + unsigned int v_level = 0, p_level = 0; + u8 voltage_swing_cfg, pre_emphasis_cfg; + int i; + + for (i = 0; i < dp_opts->lanes; i++) { + v_level = max(v_level, dp_opts->voltage[i]); + p_level = max(p_level, dp_opts->pre[i]); + } + + if (dp_opts->link_rate <= 2700) { + voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr_rbr[v_level][p_level]; + pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr_rbr[v_level][p_level]; + } else { + voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr3_hbr2[v_level][p_level]; + pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr3_hbr2[v_level][p_level]; + } + + /* TODO: Move check to config check */ + if (voltage_swing_cfg == 0xFF && pre_emphasis_cfg == 0xFF) + return -EINVAL; + + /* Enable MUX to use Cursor values from these registers */ + voltage_swing_cfg |= DP_PHY_TXn_TX_DRV_LVL_MUX_EN; + pre_emphasis_cfg |= DP_PHY_TXn_TX_EMP_POST1_LVL_MUX_EN; + + writel(voltage_swing_cfg, qphy->tx + drv_lvl_reg); + writel(pre_emphasis_cfg, qphy->tx + emp_post_reg); + writel(voltage_swing_cfg, qphy->tx2 + drv_lvl_reg); + writel(pre_emphasis_cfg, qphy->tx2 + emp_post_reg); + + return 0; +} + +static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy) +{ + const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts; + u32 bias_en, drvr_en; + + if (qcom_qmp_phy_combo_configure_dp_swing(qphy, + QSERDES_V3_TX_TX_DRV_LVL, + QSERDES_V3_TX_TX_EMP_POST1_LVL) < 0) + return; + + if (dp_opts->lanes == 1) { + bias_en = 0x3e; + drvr_en = 0x13; + } else { + bias_en = 0x3f; + drvr_en = 0x10; + } + + writel(drvr_en, qphy->tx + QSERDES_V3_TX_HIGHZ_DRVR_EN); + writel(bias_en, qphy->tx + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN); + writel(drvr_en, qphy->tx2 + QSERDES_V3_TX_HIGHZ_DRVR_EN); + writel(bias_en, qphy->tx2 + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN); +} + +static bool qcom_qmp_phy_combo_configure_dp_mode(struct qmp_phy *qphy) +{ + u32 val; + bool reverse = false; + + val = DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN | + DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN; + + /* + * TODO: Assume orientation is CC1 for now and two lanes, need to + * use type-c connector to understand orientation and lanes. + * + * Otherwise val changes to be like below if this code understood + * the orientation of the type-c cable. + * + * if (lane_cnt == 4 || orientation == ORIENTATION_CC2) + * val |= DP_PHY_PD_CTL_LANE_0_1_PWRDN; + * if (lane_cnt == 4 || orientation == ORIENTATION_CC1) + * val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN; + * if (orientation == ORIENTATION_CC2) + * writel(0x4c, qphy->pcs + QSERDES_V3_DP_PHY_MODE); + */ + val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN; + writel(val, qphy->pcs + QSERDES_DP_PHY_PD_CTL); + + writel(0x5c, qphy->pcs + QSERDES_DP_PHY_MODE); + + return reverse; +} + +static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy) +{ + const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks; + const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts; + u32 phy_vco_div, status; + unsigned long pixel_freq; + + qcom_qmp_phy_combo_configure_dp_mode(qphy); + + writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL); + writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL); + + switch (dp_opts->link_rate) { + case 1620: + phy_vco_div = 0x1; + pixel_freq = 1620000000UL / 2; + break; + case 2700: + phy_vco_div = 0x1; + pixel_freq = 2700000000UL / 2; + break; + case 5400: + phy_vco_div = 0x2; + pixel_freq = 5400000000UL / 4; + break; + case 8100: + phy_vco_div = 0x0; + pixel_freq = 8100000000UL / 6; + break; + default: + /* Other link rates aren't supported */ + return -EINVAL; + } + writel(phy_vco_div, qphy->pcs + QSERDES_V3_DP_PHY_VCO_DIV); + + clk_set_rate(dp_clks->dp_link_hw.clk, dp_opts->link_rate * 100000); + clk_set_rate(dp_clks->dp_pixel_hw.clk, pixel_freq); + + writel(0x04, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2); + writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x05, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x09, qphy->pcs + QSERDES_DP_PHY_CFG); + + writel(0x20, qphy->serdes + QSERDES_V3_COM_RESETSM_CNTRL); + + if (readl_poll_timeout(qphy->serdes + QSERDES_V3_COM_C_READY_STATUS, + status, + ((status & BIT(0)) > 0), + 500, + 10000)) + return -ETIMEDOUT; + + writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG); + + if (readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS, + status, + ((status & BIT(1)) > 0), + 500, + 10000)) + return -ETIMEDOUT; + + writel(0x18, qphy->pcs + QSERDES_DP_PHY_CFG); + udelay(2000); + writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG); + + return readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS, + status, + ((status & BIT(1)) > 0), + 500, + 10000); +} + +/* + * We need to calibrate the aux setting here as many times + * as the caller tries + */ +static int qcom_qmp_v3_dp_phy_calibrate(struct qmp_phy *qphy) +{ + static const u8 cfg1_settings[] = { 0x13, 0x23, 0x1d }; + u8 val; + + qphy->dp_aux_cfg++; + qphy->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings); + val = cfg1_settings[qphy->dp_aux_cfg]; + + writel(val, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1); + + return 0; +} + +static void qcom_qmp_v4_phy_dp_aux_init(struct qmp_phy *qphy) +{ + writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_PSR_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN | + DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN, + qphy->pcs + QSERDES_DP_PHY_PD_CTL); + + /* Turn on BIAS current for PHY/PLL */ + writel(0x17, qphy->serdes + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN); + + writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG0); + writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1); + writel(0xa4, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2); + writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG3); + writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG4); + writel(0x26, qphy->pcs + QSERDES_DP_PHY_AUX_CFG5); + writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG6); + writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG7); + writel(0xb7, qphy->pcs + QSERDES_DP_PHY_AUX_CFG8); + writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG9); + qphy->dp_aux_cfg = 0; + + writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK | + PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK | + PHY_AUX_REQ_ERR_MASK, + qphy->pcs + QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK); +} + +static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy) +{ + /* Program default values before writing proper values */ + writel(0x27, qphy->tx + QSERDES_V4_TX_TX_DRV_LVL); + writel(0x27, qphy->tx2 + QSERDES_V4_TX_TX_DRV_LVL); + + writel(0x20, qphy->tx + QSERDES_V4_TX_TX_EMP_POST1_LVL); + writel(0x20, qphy->tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL); + + qcom_qmp_phy_combo_configure_dp_swing(qphy, + QSERDES_V4_TX_TX_DRV_LVL, + QSERDES_V4_TX_TX_EMP_POST1_LVL); +} + +static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy) +{ + const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks; + const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts; + u32 phy_vco_div, status; + unsigned long pixel_freq; + u32 bias0_en, drvr0_en, bias1_en, drvr1_en; + bool reverse; + + writel(0x0f, qphy->pcs + QSERDES_V4_DP_PHY_CFG_1); + + reverse = qcom_qmp_phy_combo_configure_dp_mode(qphy); + + writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1); + writel(0xa4, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2); + + writel(0x05, qphy->pcs + QSERDES_V4_DP_PHY_TX0_TX1_LANE_CTL); + writel(0x05, qphy->pcs + QSERDES_V4_DP_PHY_TX2_TX3_LANE_CTL); + + switch (dp_opts->link_rate) { + case 1620: + phy_vco_div = 0x1; + pixel_freq = 1620000000UL / 2; + break; + case 2700: + phy_vco_div = 0x1; + pixel_freq = 2700000000UL / 2; + break; + case 5400: + phy_vco_div = 0x2; + pixel_freq = 5400000000UL / 4; + break; + case 8100: + phy_vco_div = 0x0; + pixel_freq = 8100000000UL / 6; + break; + default: + /* Other link rates aren't supported */ + return -EINVAL; + } + writel(phy_vco_div, qphy->pcs + QSERDES_V4_DP_PHY_VCO_DIV); + + clk_set_rate(dp_clks->dp_link_hw.clk, dp_opts->link_rate * 100000); + clk_set_rate(dp_clks->dp_pixel_hw.clk, pixel_freq); + + writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x05, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x09, qphy->pcs + QSERDES_DP_PHY_CFG); + + writel(0x20, qphy->serdes + QSERDES_V4_COM_RESETSM_CNTRL); + + if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_C_READY_STATUS, + status, + ((status & BIT(0)) > 0), + 500, + 10000)) + return -ETIMEDOUT; + + if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_CMN_STATUS, + status, + ((status & BIT(0)) > 0), + 500, + 10000)) + return -ETIMEDOUT; + + if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_CMN_STATUS, + status, + ((status & BIT(1)) > 0), + 500, + 10000)) + return -ETIMEDOUT; + + writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG); + + if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS, + status, + ((status & BIT(0)) > 0), + 500, + 10000)) + return -ETIMEDOUT; + + if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS, + status, + ((status & BIT(1)) > 0), + 500, + 10000)) + return -ETIMEDOUT; + + /* + * At least for 7nm DP PHY this has to be done after enabling link + * clock. + */ + + if (dp_opts->lanes == 1) { + bias0_en = reverse ? 0x3e : 0x15; + bias1_en = reverse ? 0x15 : 0x3e; + drvr0_en = reverse ? 0x13 : 0x10; + drvr1_en = reverse ? 0x10 : 0x13; + } else if (dp_opts->lanes == 2) { + bias0_en = reverse ? 0x3f : 0x15; + bias1_en = reverse ? 0x15 : 0x3f; + drvr0_en = 0x10; + drvr1_en = 0x10; + } else { + bias0_en = 0x3f; + bias1_en = 0x3f; + drvr0_en = 0x10; + drvr1_en = 0x10; + } + + writel(drvr0_en, qphy->tx + QSERDES_V4_TX_HIGHZ_DRVR_EN); + writel(bias0_en, qphy->tx + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN); + writel(drvr1_en, qphy->tx2 + QSERDES_V4_TX_HIGHZ_DRVR_EN); + writel(bias1_en, qphy->tx2 + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN); + + writel(0x18, qphy->pcs + QSERDES_DP_PHY_CFG); + udelay(2000); + writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG); + + if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS, + status, + ((status & BIT(1)) > 0), + 500, + 10000)) + return -ETIMEDOUT; + + writel(0x0a, qphy->tx + QSERDES_V4_TX_TX_POL_INV); + writel(0x0a, qphy->tx2 + QSERDES_V4_TX_TX_POL_INV); + + writel(0x27, qphy->tx + QSERDES_V4_TX_TX_DRV_LVL); + writel(0x27, qphy->tx2 + QSERDES_V4_TX_TX_DRV_LVL); + + writel(0x20, qphy->tx + QSERDES_V4_TX_TX_EMP_POST1_LVL); + writel(0x20, qphy->tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL); + + return 0; +} + +/* + * We need to calibrate the aux setting here as many times + * as the caller tries + */ +static int qcom_qmp_v4_dp_phy_calibrate(struct qmp_phy *qphy) +{ + static const u8 cfg1_settings[] = { 0x20, 0x13, 0x23, 0x1d }; + u8 val; + + qphy->dp_aux_cfg++; + qphy->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings); + val = cfg1_settings[qphy->dp_aux_cfg]; + + writel(val, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1); + + return 0; +} + +static int qcom_qmp_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts) +{ + const struct phy_configure_opts_dp *dp_opts = &opts->dp; + struct qmp_phy *qphy = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qphy->cfg; + + memcpy(&qphy->dp_opts, dp_opts, sizeof(*dp_opts)); + if (qphy->dp_opts.set_voltages) { + cfg->configure_dp_tx(qphy); + qphy->dp_opts.set_voltages = 0; + } + + return 0; +} + +static int qcom_qmp_dp_phy_calibrate(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qphy->cfg; + + if (cfg->calibrate_dp_phy) + return cfg->calibrate_dp_phy(qphy); + + return 0; +} + +static int qcom_qmp_phy_combo_com_init(struct qmp_phy *qphy) +{ + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *pcs = qphy->pcs; + void __iomem *dp_com = qmp->dp_com; + int ret; + + mutex_lock(&qmp->phy_mutex); + if (qmp->init_count++) { + mutex_unlock(&qmp->phy_mutex); + return 0; + } + + /* turn on regulator supplies */ + ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); + if (ret) { + dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); + goto err_unlock; + } + + ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets); + if (ret) { + dev_err(qmp->dev, "reset assert failed\n"); + goto err_disable_regulators; + } + + ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets); + if (ret) { + dev_err(qmp->dev, "reset deassert failed\n"); + goto err_disable_regulators; + } + + ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks); + if (ret) + goto err_assert_reset; + + if (cfg->has_phy_dp_com_ctrl) { + qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL, + SW_PWRDN); + /* override hardware control for reset of qmp phy */ + qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL, + SW_DPPHY_RESET_MUX | SW_DPPHY_RESET | + SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET); + + /* Default type-c orientation, i.e CC1 */ + qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02); + + qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL, + USB3_MODE | DP_MODE); + + /* bring both QMP USB and QMP DP PHYs PCS block out of reset */ + qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL, + SW_DPPHY_RESET_MUX | SW_DPPHY_RESET | + SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET); + + qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03); + qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET); + } + + if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) + qphy_setbits(pcs, + cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + cfg->pwrdn_ctrl); + else + qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, + cfg->pwrdn_ctrl); + + mutex_unlock(&qmp->phy_mutex); + + return 0; + +err_assert_reset: + reset_control_bulk_assert(cfg->num_resets, qmp->resets); +err_disable_regulators: + regulator_bulk_disable(cfg->num_vregs, qmp->vregs); +err_unlock: + mutex_unlock(&qmp->phy_mutex); + + return ret; +} + +static int qcom_qmp_phy_combo_com_exit(struct qmp_phy *qphy) +{ + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + + mutex_lock(&qmp->phy_mutex); + if (--qmp->init_count) { + mutex_unlock(&qmp->phy_mutex); + return 0; + } + + reset_control_assert(qmp->ufs_reset); + + reset_control_bulk_assert(cfg->num_resets, qmp->resets); + + clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); + + regulator_bulk_disable(cfg->num_vregs, qmp->vregs); + + mutex_unlock(&qmp->phy_mutex); + + return 0; +} + +static int qcom_qmp_phy_combo_init(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + int ret; + dev_vdbg(qmp->dev, "Initializing QMP phy\n"); + + ret = qcom_qmp_phy_combo_com_init(qphy); + if (ret) + return ret; + + if (cfg->type == PHY_TYPE_DP) + cfg->dp_aux_init(qphy); + + return 0; +} + +static int qcom_qmp_phy_combo_power_on(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *tx = qphy->tx; + void __iomem *rx = qphy->rx; + void __iomem *pcs = qphy->pcs; + void __iomem *status; + unsigned int mask, val, ready; + int ret; + + qcom_qmp_phy_combo_serdes_init(qphy); + + ret = clk_prepare_enable(qphy->pipe_clk); + if (ret) { + dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret); + return ret; + } + + /* Tx, Rx, and PCS configurations */ + qcom_qmp_phy_combo_configure_lane(tx, cfg->regs, + cfg->tx_tbl, cfg->tx_tbl_num, 1); + + /* Configuration for other LANE for USB-DP combo PHY */ + if (cfg->is_dual_lane_phy) { + qcom_qmp_phy_combo_configure_lane(qphy->tx2, cfg->regs, + cfg->tx_tbl, cfg->tx_tbl_num, 2); + } + + /* Configure special DP tx tunings */ + if (cfg->type == PHY_TYPE_DP) + cfg->configure_dp_tx(qphy); + + qcom_qmp_phy_combo_configure_lane(rx, cfg->regs, + cfg->rx_tbl, cfg->rx_tbl_num, 1); + + if (cfg->is_dual_lane_phy) { + qcom_qmp_phy_combo_configure_lane(qphy->rx2, cfg->regs, + cfg->rx_tbl, cfg->rx_tbl_num, 2); + } + + /* Configure link rate, swing, etc. */ + if (cfg->type == PHY_TYPE_DP) { + cfg->configure_dp_phy(qphy); + } else { + qcom_qmp_phy_combo_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num); + } + + ret = reset_control_deassert(qmp->ufs_reset); + if (ret) + goto err_disable_pipe_clk; + + if (cfg->has_pwrdn_delay) + usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max); + + if (cfg->type != PHY_TYPE_DP) { + /* Pull PHY out of reset state */ + qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + /* start SerDes and Phy-Coding-Sublayer */ + qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + + status = pcs + cfg->regs[QPHY_PCS_STATUS]; + mask = cfg->phy_status; + ready = 0; + + ret = readl_poll_timeout(status, val, (val & mask) == ready, 10, + PHY_INIT_COMPLETE_TIMEOUT); + if (ret) { + dev_err(qmp->dev, "phy initialization timed-out\n"); + goto err_disable_pipe_clk; + } + } + return 0; + +err_disable_pipe_clk: + clk_disable_unprepare(qphy->pipe_clk); + + return ret; +} + +static int qcom_qmp_phy_combo_power_off(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qphy->cfg; + + clk_disable_unprepare(qphy->pipe_clk); + + if (cfg->type == PHY_TYPE_DP) { + /* Assert DP PHY power down */ + writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_DP_PHY_PD_CTL); + } else { + /* PHY reset */ + qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + + /* stop SerDes and Phy-Coding-Sublayer */ + qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + + /* Put PHY into POWER DOWN state: active low */ + if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) { + qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + cfg->pwrdn_ctrl); + } else { + qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, + cfg->pwrdn_ctrl); + } + } + + return 0; +} + +static int qcom_qmp_phy_combo_exit(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + + qcom_qmp_phy_combo_com_exit(qphy); + + return 0; +} + +static int qcom_qmp_phy_combo_enable(struct phy *phy) +{ + int ret; + + ret = qcom_qmp_phy_combo_init(phy); + if (ret) + return ret; + + ret = qcom_qmp_phy_combo_power_on(phy); + if (ret) + qcom_qmp_phy_combo_exit(phy); + + return ret; +} + +static int qcom_qmp_phy_combo_disable(struct phy *phy) +{ + int ret; + + ret = qcom_qmp_phy_combo_power_off(phy); + if (ret) + return ret; + return qcom_qmp_phy_combo_exit(phy); +} + +static int qcom_qmp_phy_combo_set_mode(struct phy *phy, + enum phy_mode mode, int submode) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + + qphy->mode = mode; + + return 0; +} + +static void qcom_qmp_phy_combo_enable_autonomous_mode(struct qmp_phy *qphy) +{ + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs; + void __iomem *pcs_misc = qphy->pcs_misc; + u32 intr_mask; + + if (qphy->mode == PHY_MODE_USB_HOST_SS || + qphy->mode == PHY_MODE_USB_DEVICE_SS) + intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN; + else + intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL; + + /* Clear any pending interrupts status */ + qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR); + /* Writing 1 followed by 0 clears the interrupt */ + qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR); + + qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], + ARCVR_DTCT_EN | ALFPS_DTCT_EN | ARCVR_DTCT_EVENT_SEL); + + /* Enable required PHY autonomous mode interrupts */ + qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], intr_mask); + + /* Enable i/o clamp_n for autonomous mode */ + if (pcs_misc) + qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN); +} + +static void qcom_qmp_phy_combo_disable_autonomous_mode(struct qmp_phy *qphy) +{ + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs_usb; + void __iomem *pcs_misc = qphy->pcs_misc; + + /* Disable i/o clamp_n on resume for normal mode */ + if (pcs_misc) + qphy_setbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN); + + qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], + ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL | ALFPS_DTCT_EN); + + qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR); + /* Writing 1 followed by 0 clears the interrupt */ + qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR); +} + +static int __maybe_unused qcom_qmp_phy_combo_runtime_suspend(struct device *dev) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + struct qmp_phy *qphy = qmp->phys[0]; + const struct qmp_phy_cfg *cfg = qphy->cfg; + + dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qphy->mode); + + /* Supported only for USB3 PHY and luckily USB3 is the first phy */ + if (cfg->type != PHY_TYPE_USB3) + return 0; + + if (!qmp->init_count) { + dev_vdbg(dev, "PHY not initialized, bailing out\n"); + return 0; + } + + qcom_qmp_phy_combo_enable_autonomous_mode(qphy); + + clk_disable_unprepare(qphy->pipe_clk); + clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); + + return 0; +} + +static int __maybe_unused qcom_qmp_phy_combo_runtime_resume(struct device *dev) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + struct qmp_phy *qphy = qmp->phys[0]; + const struct qmp_phy_cfg *cfg = qphy->cfg; + int ret = 0; + + dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qphy->mode); + + /* Supported only for USB3 PHY and luckily USB3 is the first phy */ + if (cfg->type != PHY_TYPE_USB3) + return 0; + + if (!qmp->init_count) { + dev_vdbg(dev, "PHY not initialized, bailing out\n"); + return 0; + } + + ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks); + if (ret) + return ret; + + ret = clk_prepare_enable(qphy->pipe_clk); + if (ret) { + dev_err(dev, "pipe_clk enable failed, err=%d\n", ret); + clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); + return ret; + } + + qcom_qmp_phy_combo_disable_autonomous_mode(qphy); + + return 0; +} + +static int qcom_qmp_phy_combo_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + int num = cfg->num_vregs; + int ret, i; + + qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL); + if (!qmp->vregs) + return -ENOMEM; + + for (i = 0; i < num; i++) + qmp->vregs[i].supply = cfg->vreg_list[i].name; + + ret = devm_regulator_bulk_get(dev, num, qmp->vregs); + if (ret) { + dev_err(dev, "failed at devm_regulator_bulk_get\n"); + return ret; + } + + for (i = 0; i < num; i++) { + ret = regulator_set_load(qmp->vregs[i].consumer, + cfg->vreg_list[i].enable_load); + if (ret) { + dev_err(dev, "failed to set load at %s\n", + qmp->vregs[i].supply); + return ret; + } + } + + return 0; +} + +static int qcom_qmp_phy_combo_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + int i; + int ret; + + qmp->resets = devm_kcalloc(dev, cfg->num_resets, + sizeof(*qmp->resets), GFP_KERNEL); + if (!qmp->resets) + return -ENOMEM; + + for (i = 0; i < cfg->num_resets; i++) + qmp->resets[i].id = cfg->reset_list[i]; + + ret = devm_reset_control_bulk_get_exclusive(dev, cfg->num_resets, qmp->resets); + if (ret) + return dev_err_probe(dev, ret, "failed to get resets\n"); + + return 0; +} + +static int qcom_qmp_phy_combo_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + int num = cfg->num_clks; + int i; + + qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL); + if (!qmp->clks) + return -ENOMEM; + + for (i = 0; i < num; i++) + qmp->clks[i].id = cfg->clk_list[i]; + + return devm_clk_bulk_get(dev, num, qmp->clks); +} + +static void phy_clk_release_provider(void *res) +{ + of_clk_del_provider(res); +} + +/* + * Register a fixed rate pipe clock. + * + * The _pipe_clksrc generated by PHY goes to the GCC that gate + * controls it. The _pipe_clk coming out of the GCC is requested + * by the PHY driver for its operations. + * We register the _pipe_clksrc here. The gcc driver takes care + * of assigning this _pipe_clksrc as parent to _pipe_clk. + * Below picture shows this relationship. + * + * +---------------+ + * | PHY block |<<---------------------------------------+ + * | | | + * | +-------+ | +-----+ | + * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+ + * clk | +-------+ | +-----+ + * +---------------+ + */ +static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np) +{ + struct clk_fixed_rate *fixed; + struct clk_init_data init = { }; + int ret; + + ret = of_property_read_string(np, "clock-output-names", &init.name); + if (ret) { + dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np); + return ret; + } + + fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL); + if (!fixed) + return -ENOMEM; + + init.ops = &clk_fixed_rate_ops; + + /* controllers using QMP phys use 125MHz pipe clock interface */ + fixed->fixed_rate = 125000000; + fixed->hw.init = &init; + + ret = devm_clk_hw_register(qmp->dev, &fixed->hw); + if (ret) + return ret; + + ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw); + if (ret) + return ret; + + /* + * Roll a devm action because the clock provider is the child node, but + * the child node is not actually a device. + */ + return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np); +} + +/* + * Display Port PLL driver block diagram for branch clocks + * + * +------------------------------+ + * | DP_VCO_CLK | + * | | + * | +-------------------+ | + * | | (DP PLL/VCO) | | + * | +---------+---------+ | + * | v | + * | +----------+-----------+ | + * | | hsclk_divsel_clk_src | | + * | +----------+-----------+ | + * +------------------------------+ + * | + * +---------<---------v------------>----------+ + * | | + * +--------v----------------+ | + * | dp_phy_pll_link_clk | | + * | link_clk | | + * +--------+----------------+ | + * | | + * | | + * v v + * Input to DISPCC block | + * for link clk, crypto clk | + * and interface clock | + * | + * | + * +--------<------------+-----------------+---<---+ + * | | | + * +----v---------+ +--------v-----+ +--------v------+ + * | vco_divided | | vco_divided | | vco_divided | + * | _clk_src | | _clk_src | | _clk_src | + * | | | | | | + * |divsel_six | | divsel_two | | divsel_four | + * +-------+------+ +-----+--------+ +--------+------+ + * | | | + * v---->----------v-------------<------v + * | + * +----------+-----------------+ + * | dp_phy_pll_vco_div_clk | + * +---------+------------------+ + * | + * v + * Input to DISPCC block + * for DP pixel clock + * + */ +static int qcom_qmp_dp_pixel_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + switch (req->rate) { + case 1620000000UL / 2: + case 2700000000UL / 2: + /* 5.4 and 8.1 GHz are same link rate as 2.7GHz, i.e. div 4 and div 6 */ + return 0; + default: + return -EINVAL; + } +} + +static unsigned long +qcom_qmp_dp_pixel_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + const struct qmp_phy_dp_clks *dp_clks; + const struct qmp_phy *qphy; + const struct phy_configure_opts_dp *dp_opts; + + dp_clks = container_of(hw, struct qmp_phy_dp_clks, dp_pixel_hw); + qphy = dp_clks->qphy; + dp_opts = &qphy->dp_opts; + + switch (dp_opts->link_rate) { + case 1620: + return 1620000000UL / 2; + case 2700: + return 2700000000UL / 2; + case 5400: + return 5400000000UL / 4; + case 8100: + return 8100000000UL / 6; + default: + return 0; + } +} + +static const struct clk_ops qcom_qmp_dp_pixel_clk_ops = { + .determine_rate = qcom_qmp_dp_pixel_clk_determine_rate, + .recalc_rate = qcom_qmp_dp_pixel_clk_recalc_rate, +}; + +static int qcom_qmp_dp_link_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + switch (req->rate) { + case 162000000: + case 270000000: + case 540000000: + case 810000000: + return 0; + default: + return -EINVAL; + } +} + +static unsigned long +qcom_qmp_dp_link_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + const struct qmp_phy_dp_clks *dp_clks; + const struct qmp_phy *qphy; + const struct phy_configure_opts_dp *dp_opts; + + dp_clks = container_of(hw, struct qmp_phy_dp_clks, dp_link_hw); + qphy = dp_clks->qphy; + dp_opts = &qphy->dp_opts; + + switch (dp_opts->link_rate) { + case 1620: + case 2700: + case 5400: + case 8100: + return dp_opts->link_rate * 100000; + default: + return 0; + } +} + +static const struct clk_ops qcom_qmp_dp_link_clk_ops = { + .determine_rate = qcom_qmp_dp_link_clk_determine_rate, + .recalc_rate = qcom_qmp_dp_link_clk_recalc_rate, +}; + +static struct clk_hw * +qcom_qmp_dp_clks_hw_get(struct of_phandle_args *clkspec, void *data) +{ + struct qmp_phy_dp_clks *dp_clks = data; + unsigned int idx = clkspec->args[0]; + + if (idx >= 2) { + pr_err("%s: invalid index %u\n", __func__, idx); + return ERR_PTR(-EINVAL); + } + + if (idx == 0) + return &dp_clks->dp_link_hw; + + return &dp_clks->dp_pixel_hw; +} + +static int phy_dp_clks_register(struct qcom_qmp *qmp, struct qmp_phy *qphy, + struct device_node *np) +{ + struct clk_init_data init = { }; + struct qmp_phy_dp_clks *dp_clks; + char name[64]; + int ret; + + dp_clks = devm_kzalloc(qmp->dev, sizeof(*dp_clks), GFP_KERNEL); + if (!dp_clks) + return -ENOMEM; + + dp_clks->qphy = qphy; + qphy->dp_clks = dp_clks; + + snprintf(name, sizeof(name), "%s::link_clk", dev_name(qmp->dev)); + init.ops = &qcom_qmp_dp_link_clk_ops; + init.name = name; + dp_clks->dp_link_hw.init = &init; + ret = devm_clk_hw_register(qmp->dev, &dp_clks->dp_link_hw); + if (ret) + return ret; + + snprintf(name, sizeof(name), "%s::vco_div_clk", dev_name(qmp->dev)); + init.ops = &qcom_qmp_dp_pixel_clk_ops; + init.name = name; + dp_clks->dp_pixel_hw.init = &init; + ret = devm_clk_hw_register(qmp->dev, &dp_clks->dp_pixel_hw); + if (ret) + return ret; + + ret = of_clk_add_hw_provider(np, qcom_qmp_dp_clks_hw_get, dp_clks); + if (ret) + return ret; + + /* + * Roll a devm action because the clock provider is the child node, but + * the child node is not actually a device. + */ + return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np); +} + +static const struct phy_ops qcom_qmp_phy_combo_usb_ops = { + .init = qcom_qmp_phy_combo_enable, + .exit = qcom_qmp_phy_combo_disable, + .set_mode = qcom_qmp_phy_combo_set_mode, + .owner = THIS_MODULE, +}; + +static const struct phy_ops qcom_qmp_phy_combo_dp_ops = { + .init = qcom_qmp_phy_combo_init, + .configure = qcom_qmp_dp_phy_configure, + .power_on = qcom_qmp_phy_combo_power_on, + .calibrate = qcom_qmp_dp_phy_calibrate, + .power_off = qcom_qmp_phy_combo_power_off, + .exit = qcom_qmp_phy_combo_exit, + .set_mode = qcom_qmp_phy_combo_set_mode, + .owner = THIS_MODULE, +}; + +static +int qcom_qmp_phy_combo_create(struct device *dev, struct device_node *np, int id, + void __iomem *serdes, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + struct phy *generic_phy; + struct qmp_phy *qphy; + const struct phy_ops *ops; + char prop_name[MAX_PROP_NAME]; + int ret; + + qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL); + if (!qphy) + return -ENOMEM; + + qphy->cfg = cfg; + qphy->serdes = serdes; + /* + * Get memory resources for each phy lane: + * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. + * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5 + * For single lane PHYs: pcs_misc (optional) -> 3. + */ + qphy->tx = of_iomap(np, 0); + if (!qphy->tx) + return -ENOMEM; + + qphy->rx = of_iomap(np, 1); + if (!qphy->rx) + return -ENOMEM; + + qphy->pcs = of_iomap(np, 2); + if (!qphy->pcs) + return -ENOMEM; + + if (cfg->pcs_usb_offset) + qphy->pcs_usb = qphy->pcs + cfg->pcs_usb_offset; + + /* + * If this is a dual-lane PHY, then there should be registers for the + * second lane. Some old device trees did not specify this, so fall + * back to old legacy behavior of assuming they can be reached at an + * offset from the first lane. + */ + if (cfg->is_dual_lane_phy) { + qphy->tx2 = of_iomap(np, 3); + qphy->rx2 = of_iomap(np, 4); + if (!qphy->tx2 || !qphy->rx2) { + dev_warn(dev, + "Underspecified device tree, falling back to legacy register regions\n"); + + /* In the old version, pcs_misc is at index 3. */ + qphy->pcs_misc = qphy->tx2; + qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE; + qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE; + + } else { + qphy->pcs_misc = of_iomap(np, 5); + } + + } else { + qphy->pcs_misc = of_iomap(np, 3); + } + + if (!qphy->pcs_misc) + dev_vdbg(dev, "PHY pcs_misc-reg not used\n"); + + /* + * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3 + * based phys, so they essentially have pipe clock. So, + * we return error in case phy is USB3 or PIPE type. + * Otherwise, we initialize pipe clock to NULL for + * all phys that don't need this. + */ + snprintf(prop_name, sizeof(prop_name), "pipe%d", id); + qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name); + if (IS_ERR(qphy->pipe_clk)) { + if (cfg->type == PHY_TYPE_USB3) { + ret = PTR_ERR(qphy->pipe_clk); + if (ret != -EPROBE_DEFER) + dev_err(dev, + "failed to get lane%d pipe_clk, %d\n", + id, ret); + return ret; + } + qphy->pipe_clk = NULL; + } + + if (cfg->type == PHY_TYPE_DP) + ops = &qcom_qmp_phy_combo_dp_ops; + else + ops = &qcom_qmp_phy_combo_usb_ops; + + generic_phy = devm_phy_create(dev, np, ops); + if (IS_ERR(generic_phy)) { + ret = PTR_ERR(generic_phy); + dev_err(dev, "failed to create qphy %d\n", ret); + return ret; + } + + qphy->phy = generic_phy; + qphy->index = id; + qphy->qmp = qmp; + qmp->phys[id] = qphy; + phy_set_drvdata(generic_phy, qphy); + + return 0; +} + +static const struct of_device_id qcom_qmp_combo_phy_of_match_table[] = { + { + .compatible = "qcom,sc7180-qmp-usb3-dp-phy", + .data = &sc7180_usb3dpphy_cfg, + }, + { + .compatible = "qcom,sm8250-qmp-usb3-dp-phy", + .data = &sm8250_usb3dpphy_cfg, + }, + { + .compatible = "qcom,sc8180x-qmp-usb3-dp-phy", + .data = &sc8180x_usb3dpphy_cfg, + }, + { } +}; +MODULE_DEVICE_TABLE(of, qcom_qmp_combo_phy_of_match_table); + +static const struct dev_pm_ops qcom_qmp_phy_combo_pm_ops = { + SET_RUNTIME_PM_OPS(qcom_qmp_phy_combo_runtime_suspend, + qcom_qmp_phy_combo_runtime_resume, NULL) +}; + +static int qcom_qmp_phy_combo_probe(struct platform_device *pdev) +{ + struct qcom_qmp *qmp; + struct device *dev = &pdev->dev; + struct device_node *child; + struct phy_provider *phy_provider; + void __iomem *serdes; + void __iomem *usb_serdes; + void __iomem *dp_serdes = NULL; + const struct qmp_phy_combo_cfg *combo_cfg = NULL; + const struct qmp_phy_cfg *cfg = NULL; + const struct qmp_phy_cfg *usb_cfg = NULL; + const struct qmp_phy_cfg *dp_cfg = NULL; + int num, id, expected_phys; + int ret; + + qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL); + if (!qmp) + return -ENOMEM; + + qmp->dev = dev; + dev_set_drvdata(dev, qmp); + + /* Get the specific init parameters of QMP phy */ + combo_cfg = of_device_get_match_data(dev); + if (!combo_cfg) + return -EINVAL; + + usb_cfg = combo_cfg->usb_cfg; + cfg = usb_cfg; /* Setup clks and regulators */ + + /* per PHY serdes; usually located at base address */ + usb_serdes = serdes = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(serdes)) + return PTR_ERR(serdes); + + /* per PHY dp_com; if PHY has dp_com control block */ + if (cfg->has_phy_dp_com_ctrl) { + qmp->dp_com = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(qmp->dp_com)) + return PTR_ERR(qmp->dp_com); + } + + /* Only two serdes for combo PHY */ + dp_serdes = devm_platform_ioremap_resource(pdev, 2); + if (IS_ERR(dp_serdes)) + return PTR_ERR(dp_serdes); + + dp_cfg = combo_cfg->dp_cfg; + expected_phys = 2; + + mutex_init(&qmp->phy_mutex); + + ret = qcom_qmp_phy_combo_clk_init(dev, cfg); + if (ret) + return ret; + + ret = qcom_qmp_phy_combo_reset_init(dev, cfg); + if (ret) + return ret; + + ret = qcom_qmp_phy_combo_vreg_init(dev, cfg); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to get regulator supplies: %d\n", + ret); + return ret; + } + + num = of_get_available_child_count(dev->of_node); + /* do we have a rogue child node ? */ + if (num > expected_phys) + return -EINVAL; + + qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL); + if (!qmp->phys) + return -ENOMEM; + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + /* + * Prevent runtime pm from being ON by default. Users can enable + * it using power/control in sysfs. + */ + pm_runtime_forbid(dev); + + id = 0; + for_each_available_child_of_node(dev->of_node, child) { + if (of_node_name_eq(child, "dp-phy")) { + cfg = dp_cfg; + serdes = dp_serdes; + + /* Create per-lane phy */ + ret = qcom_qmp_phy_combo_create(dev, child, id, serdes, cfg); + if (ret) { + dev_err(dev, "failed to create lane%d phy, %d\n", + id, ret); + goto err_node_put; + } + + ret = phy_dp_clks_register(qmp, qmp->phys[id], child); + if (ret) { + dev_err(qmp->dev, + "failed to register DP clock source\n"); + goto err_node_put; + } + } else if (of_node_name_eq(child, "usb3-phy")) { + cfg = usb_cfg; + serdes = usb_serdes; + + /* Create per-lane phy */ + ret = qcom_qmp_phy_combo_create(dev, child, id, serdes, cfg); + if (ret) { + dev_err(dev, "failed to create lane%d phy, %d\n", + id, ret); + goto err_node_put; + } + + /* + * Register the pipe clock provided by phy. + * See function description to see details of this pipe clock. + */ + ret = phy_pipe_clk_register(qmp, child); + if (ret) { + dev_err(qmp->dev, + "failed to register pipe clock source\n"); + goto err_node_put; + } + } + + id++; + } + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (!IS_ERR(phy_provider)) + dev_info(dev, "Registered Qcom-QMP phy\n"); + else + pm_runtime_disable(dev); + + return PTR_ERR_OR_ZERO(phy_provider); + +err_node_put: + pm_runtime_disable(dev); + of_node_put(child); + return ret; +} + +static struct platform_driver qcom_qmp_phy_combo_driver = { + .probe = qcom_qmp_phy_combo_probe, + .driver = { + .name = "qcom-qmp-combo-phy", + .pm = &qcom_qmp_phy_combo_pm_ops, + .of_match_table = qcom_qmp_combo_phy_of_match_table, + }, +}; + +module_platform_driver(qcom_qmp_phy_combo_driver); + +MODULE_AUTHOR("Vivek Gautam "); +MODULE_DESCRIPTION("Qualcomm QMP USB+DP combo PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c new file mode 100644 index 0000000000..be6a94439b --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c @@ -0,0 +1,1054 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "phy-qcom-qmp.h" + +/* QPHY_SW_RESET bit */ +#define SW_RESET BIT(0) +/* QPHY_POWER_DOWN_CONTROL */ +#define SW_PWRDN BIT(0) +#define REFCLK_DRV_DSBL BIT(1) +/* QPHY_START_CONTROL bits */ +#define SERDES_START BIT(0) +#define PCS_START BIT(1) +#define PLL_READY_GATE_EN BIT(3) +/* QPHY_PCS_STATUS bit */ +#define PHYSTATUS BIT(6) +#define PHYSTATUS_4_20 BIT(7) +/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */ +#define PCS_READY BIT(0) + +/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */ +/* DP PHY soft reset */ +#define SW_DPPHY_RESET BIT(0) +/* mux to select DP PHY reset control, 0:HW control, 1: software reset */ +#define SW_DPPHY_RESET_MUX BIT(1) +/* USB3 PHY soft reset */ +#define SW_USB3PHY_RESET BIT(2) +/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */ +#define SW_USB3PHY_RESET_MUX BIT(3) + +/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */ +#define USB3_MODE BIT(0) /* enables USB3 mode */ +#define DP_MODE BIT(1) /* enables DP mode */ + +/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */ +#define ARCVR_DTCT_EN BIT(0) +#define ALFPS_DTCT_EN BIT(1) +#define ARCVR_DTCT_EVENT_SEL BIT(4) + +/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */ +#define IRQ_CLEAR BIT(0) + +/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */ +#define RCVR_DETECT BIT(0) + +/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */ +#define CLAMP_EN BIT(0) /* enables i/o clamp_n */ + +#define PHY_INIT_COMPLETE_TIMEOUT 10000 +#define POWER_DOWN_DELAY_US_MIN 10 +#define POWER_DOWN_DELAY_US_MAX 11 + +#define MAX_PROP_NAME 32 + +/* Define the assumed distance between lanes for underspecified device trees. */ +#define QMP_PHY_LEGACY_LANE_STRIDE 0x400 + +struct qmp_phy_init_tbl { + unsigned int offset; + unsigned int val; + /* + * register part of layout ? + * if yes, then offset gives index in the reg-layout + */ + bool in_layout; + /* + * mask of lanes for which this register is written + * for cases when second lane needs different values + */ + u8 lane_mask; +}; + +#define QMP_PHY_INIT_CFG(o, v) \ + { \ + .offset = o, \ + .val = v, \ + .lane_mask = 0xff, \ + } + +#define QMP_PHY_INIT_CFG_L(o, v) \ + { \ + .offset = o, \ + .val = v, \ + .in_layout = true, \ + .lane_mask = 0xff, \ + } + +#define QMP_PHY_INIT_CFG_LANE(o, v, l) \ + { \ + .offset = o, \ + .val = v, \ + .lane_mask = l, \ + } + +/* set of registers with offsets different per-PHY */ +enum qphy_reg_layout { + /* Common block control registers */ + QPHY_COM_SW_RESET, + QPHY_COM_POWER_DOWN_CONTROL, + QPHY_COM_START_CONTROL, + QPHY_COM_PCS_READY_STATUS, + /* PCS registers */ + QPHY_SW_RESET, + QPHY_START_CTRL, + QPHY_PCS_READY_STATUS, + QPHY_PCS_STATUS, + QPHY_PCS_AUTONOMOUS_MODE_CTRL, + QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR, + QPHY_PCS_LFPS_RXTERM_IRQ_STATUS, + QPHY_PCS_POWER_DOWN_CONTROL, + /* PCS_MISC registers */ + QPHY_PCS_MISC_TYPEC_CTRL, + /* Keep last to ensure regs_layout arrays are properly initialized */ + QPHY_LAYOUT_SIZE +}; + +static const unsigned int pciephy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_COM_SW_RESET] = 0x400, + [QPHY_COM_POWER_DOWN_CONTROL] = 0x404, + [QPHY_COM_START_CONTROL] = 0x408, + [QPHY_COM_PCS_READY_STATUS] = 0x448, + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x08, + [QPHY_PCS_STATUS] = 0x174, +}; + +static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33), + QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x09), + QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33), + QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19), + QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x15), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10), + QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x40), +}; + +static const struct qmp_phy_init_tbl msm8996_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45), + QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06), +}; + +static const struct qmp_phy_init_tbl msm8996_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x00), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_BAND, 0x18), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x04), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x19), +}; + +static const struct qmp_phy_init_tbl msm8996_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_IDLE_DTCT_CNTRL, 0x4c), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x00), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01), + + QMP_PHY_INIT_CFG(QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME, 0x05), + + QMP_PHY_INIT_CFG(QPHY_V2_PCS_ENDPOINT_REFCLK_DRIVE, 0x05), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_DOWN_CONTROL, 0x02), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_STATE_CONFIG4, 0x00), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_STATE_CONFIG1, 0xa3), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M3P5DB_V0, 0x0e), +}; + +struct qmp_phy; + +/* struct qmp_phy_cfg - per-PHY initialization config */ +struct qmp_phy_cfg { + /* phy-type - PCIE/UFS/USB */ + unsigned int type; + /* number of lanes provided by phy */ + int nlanes; + + /* Init sequence for PHY blocks - serdes, tx, rx, pcs */ + const struct qmp_phy_init_tbl *serdes_tbl; + int serdes_tbl_num; + const struct qmp_phy_init_tbl *serdes_tbl_sec; + int serdes_tbl_num_sec; + const struct qmp_phy_init_tbl *tx_tbl; + int tx_tbl_num; + const struct qmp_phy_init_tbl *tx_tbl_sec; + int tx_tbl_num_sec; + const struct qmp_phy_init_tbl *rx_tbl; + int rx_tbl_num; + const struct qmp_phy_init_tbl *rx_tbl_sec; + int rx_tbl_num_sec; + const struct qmp_phy_init_tbl *pcs_tbl; + int pcs_tbl_num; + const struct qmp_phy_init_tbl *pcs_tbl_sec; + int pcs_tbl_num_sec; + const struct qmp_phy_init_tbl *pcs_misc_tbl; + int pcs_misc_tbl_num; + const struct qmp_phy_init_tbl *pcs_misc_tbl_sec; + int pcs_misc_tbl_num_sec; + + /* clock ids to be requested */ + const char * const *clk_list; + int num_clks; + /* resets to be requested */ + const char * const *reset_list; + int num_resets; + /* regulators to be requested */ + const char * const *vreg_list; + int num_vregs; + + /* array of registers with different offsets */ + const unsigned int *regs; + + unsigned int start_ctrl; + unsigned int pwrdn_ctrl; + unsigned int mask_com_pcs_ready; + /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */ + unsigned int phy_status; + + /* true, if PHY needs delay after POWER_DOWN */ + bool has_pwrdn_delay; + /* power_down delay in usec */ + int pwrdn_delay_min; + int pwrdn_delay_max; +}; + +/** + * struct qmp_phy - per-lane phy descriptor + * + * @phy: generic phy + * @cfg: phy specific configuration + * @serdes: iomapped memory space for phy's serdes (i.e. PLL) + * @tx: iomapped memory space for lane's tx + * @rx: iomapped memory space for lane's rx + * @pcs: iomapped memory space for lane's pcs + * @pcs_misc: iomapped memory space for lane's pcs_misc + * @pipe_clk: pipe clock + * @index: lane index + * @qmp: QMP phy to which this lane belongs + * @lane_rst: lane's reset controller + * @mode: current PHY mode + */ +struct qmp_phy { + struct phy *phy; + const struct qmp_phy_cfg *cfg; + void __iomem *serdes; + void __iomem *tx; + void __iomem *rx; + void __iomem *pcs; + void __iomem *pcs_misc; + struct clk *pipe_clk; + unsigned int index; + struct qcom_qmp *qmp; + struct reset_control *lane_rst; + enum phy_mode mode; +}; + +/** + * struct qcom_qmp - structure holding QMP phy block attributes + * + * @dev: device + * + * @clks: array of clocks required by phy + * @resets: array of resets required by phy + * @vregs: regulator supplies bulk data + * + * @phys: array of per-lane phy descriptors + * @phy_mutex: mutex lock for PHY common block initialization + * @init_count: phy common block initialization count + */ +struct qcom_qmp { + struct device *dev; + + struct clk_bulk_data *clks; + struct reset_control_bulk_data *resets; + struct regulator_bulk_data *vregs; + + struct qmp_phy **phys; + + struct mutex phy_mutex; + int init_count; +}; + +static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val) +{ + u32 reg; + + reg = readl(base + offset); + reg |= val; + writel(reg, base + offset); + + /* ensure that above write is through */ + readl(base + offset); +} + +static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val) +{ + u32 reg; + + reg = readl(base + offset); + reg &= ~val; + writel(reg, base + offset); + + /* ensure that above write is through */ + readl(base + offset); +} + +/* list of clocks required by phy */ +static const char * const msm8996_phy_clk_l[] = { + "aux", "cfg_ahb", "ref", +}; + +/* list of resets */ +static const char * const msm8996_pciephy_reset_l[] = { + "phy", "common", "cfg", +}; + +/* list of regulators */ +static const char * const qmp_phy_vreg_l[] = { + "vdda-phy", "vdda-pll", +}; + +static const struct qmp_phy_cfg msm8996_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 3, + + .serdes_tbl = msm8996_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(msm8996_pcie_serdes_tbl), + .tx_tbl = msm8996_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(msm8996_pcie_tx_tbl), + .rx_tbl = msm8996_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(msm8996_pcie_rx_tbl), + .pcs_tbl = msm8996_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(msm8996_pcie_pcs_tbl), + .clk_list = msm8996_phy_clk_l, + .num_clks = ARRAY_SIZE(msm8996_phy_clk_l), + .reset_list = msm8996_pciephy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = pciephy_regs_layout, + + .start_ctrl = PCS_START | PLL_READY_GATE_EN, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .mask_com_pcs_ready = PCS_READY, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, +}; + +static void qcom_qmp_phy_pcie_msm8996_configure_lane(void __iomem *base, + const unsigned int *regs, + const struct qmp_phy_init_tbl tbl[], + int num, + u8 lane_mask) +{ + int i; + const struct qmp_phy_init_tbl *t = tbl; + + if (!t) + return; + + for (i = 0; i < num; i++, t++) { + if (!(t->lane_mask & lane_mask)) + continue; + + if (t->in_layout) + writel(t->val, base + regs[t->offset]); + else + writel(t->val, base + t->offset); + } +} + +static void qcom_qmp_phy_pcie_msm8996_configure(void __iomem *base, + const unsigned int *regs, + const struct qmp_phy_init_tbl tbl[], + int num) +{ + qcom_qmp_phy_pcie_msm8996_configure_lane(base, regs, tbl, num, 0xff); +} + +static int qcom_qmp_phy_pcie_msm8996_serdes_init(struct qmp_phy *qphy) +{ + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *serdes = qphy->serdes; + const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl; + int serdes_tbl_num = cfg->serdes_tbl_num; + void __iomem *status; + unsigned int mask, val; + int ret; + + qcom_qmp_phy_pcie_msm8996_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num); + if (cfg->serdes_tbl_sec) + qcom_qmp_phy_pcie_msm8996_configure(serdes, cfg->regs, cfg->serdes_tbl_sec, + cfg->serdes_tbl_num_sec); + + + qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET); + qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL], + SERDES_START | PCS_START); + + status = serdes + cfg->regs[QPHY_COM_PCS_READY_STATUS]; + mask = cfg->mask_com_pcs_ready; + + ret = readl_poll_timeout(status, val, (val & mask), 10, + PHY_INIT_COMPLETE_TIMEOUT); + if (ret) { + dev_err(qmp->dev, + "phy common block init timed-out\n"); + return ret; + } + + return 0; +} + +static int qcom_qmp_phy_pcie_msm8996_com_init(struct qmp_phy *qphy) +{ + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *serdes = qphy->serdes; + int ret; + + mutex_lock(&qmp->phy_mutex); + if (qmp->init_count++) { + mutex_unlock(&qmp->phy_mutex); + return 0; + } + + /* turn on regulator supplies */ + ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); + if (ret) { + dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); + goto err_unlock; + } + + ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets); + if (ret) { + dev_err(qmp->dev, "reset assert failed\n"); + goto err_disable_regulators; + } + + ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets); + if (ret) { + dev_err(qmp->dev, "reset deassert failed\n"); + goto err_disable_regulators; + } + + ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks); + if (ret) + goto err_assert_reset; + + qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL], + SW_PWRDN); + + mutex_unlock(&qmp->phy_mutex); + + return 0; + +err_assert_reset: + reset_control_bulk_assert(cfg->num_resets, qmp->resets); +err_disable_regulators: + regulator_bulk_disable(cfg->num_vregs, qmp->vregs); +err_unlock: + mutex_unlock(&qmp->phy_mutex); + + return ret; +} + +static int qcom_qmp_phy_pcie_msm8996_com_exit(struct qmp_phy *qphy) +{ + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *serdes = qphy->serdes; + + mutex_lock(&qmp->phy_mutex); + if (--qmp->init_count) { + mutex_unlock(&qmp->phy_mutex); + return 0; + } + + qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL], + SERDES_START | PCS_START); + qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], + SW_RESET); + qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL], + SW_PWRDN); + + reset_control_bulk_assert(cfg->num_resets, qmp->resets); + + clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); + + regulator_bulk_disable(cfg->num_vregs, qmp->vregs); + + mutex_unlock(&qmp->phy_mutex); + + return 0; +} + +static int qcom_qmp_phy_pcie_msm8996_init(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + struct qcom_qmp *qmp = qphy->qmp; + int ret; + dev_vdbg(qmp->dev, "Initializing QMP phy\n"); + + ret = qcom_qmp_phy_pcie_msm8996_com_init(qphy); + if (ret) + return ret; + + return 0; +} + +static int qcom_qmp_phy_pcie_msm8996_power_on(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *tx = qphy->tx; + void __iomem *rx = qphy->rx; + void __iomem *pcs = qphy->pcs; + void __iomem *pcs_misc = qphy->pcs_misc; + void __iomem *status; + unsigned int mask, val, ready; + int ret; + + qcom_qmp_phy_pcie_msm8996_serdes_init(qphy); + + ret = reset_control_deassert(qphy->lane_rst); + if (ret) { + dev_err(qmp->dev, "lane%d reset deassert failed\n", + qphy->index); + return ret; + } + + ret = clk_prepare_enable(qphy->pipe_clk); + if (ret) { + dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret); + goto err_reset_lane; + } + + /* Tx, Rx, and PCS configurations */ + qcom_qmp_phy_pcie_msm8996_configure_lane(tx, cfg->regs, + cfg->tx_tbl, cfg->tx_tbl_num, 1); + if (cfg->tx_tbl_sec) + qcom_qmp_phy_pcie_msm8996_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec, + cfg->tx_tbl_num_sec, 1); + + qcom_qmp_phy_pcie_msm8996_configure_lane(rx, cfg->regs, + cfg->rx_tbl, cfg->rx_tbl_num, 1); + if (cfg->rx_tbl_sec) + qcom_qmp_phy_pcie_msm8996_configure_lane(rx, cfg->regs, + cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1); + + qcom_qmp_phy_pcie_msm8996_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num); + if (cfg->pcs_tbl_sec) + qcom_qmp_phy_pcie_msm8996_configure(pcs, cfg->regs, cfg->pcs_tbl_sec, + cfg->pcs_tbl_num_sec); + + qcom_qmp_phy_pcie_msm8996_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl, + cfg->pcs_misc_tbl_num); + if (cfg->pcs_misc_tbl_sec) + qcom_qmp_phy_pcie_msm8996_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec, + cfg->pcs_misc_tbl_num_sec); + + /* + * Pull out PHY from POWER DOWN state. + * This is active low enable signal to power-down PHY. + */ + qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl); + + if (cfg->has_pwrdn_delay) + usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max); + + /* Pull PHY out of reset state */ + qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + + /* start SerDes and Phy-Coding-Sublayer */ + qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + + status = pcs + cfg->regs[QPHY_PCS_STATUS]; + mask = cfg->phy_status; + ready = 0; + + ret = readl_poll_timeout(status, val, (val & mask) == ready, 10, + PHY_INIT_COMPLETE_TIMEOUT); + if (ret) { + dev_err(qmp->dev, "phy initialization timed-out\n"); + goto err_disable_pipe_clk; + } + + return 0; + +err_disable_pipe_clk: + clk_disable_unprepare(qphy->pipe_clk); +err_reset_lane: + reset_control_assert(qphy->lane_rst); + + return ret; +} + +static int qcom_qmp_phy_pcie_msm8996_power_off(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qphy->cfg; + + clk_disable_unprepare(qphy->pipe_clk); + + /* PHY reset */ + qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + + /* stop SerDes and Phy-Coding-Sublayer */ + qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + + /* Put PHY into POWER DOWN state: active low */ + if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) { + qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + cfg->pwrdn_ctrl); + } else { + qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, + cfg->pwrdn_ctrl); + } + + return 0; +} + +static int qcom_qmp_phy_pcie_msm8996_exit(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + + reset_control_assert(qphy->lane_rst); + + qcom_qmp_phy_pcie_msm8996_com_exit(qphy); + + return 0; +} + +static int qcom_qmp_phy_pcie_msm8996_enable(struct phy *phy) +{ + int ret; + + ret = qcom_qmp_phy_pcie_msm8996_init(phy); + if (ret) + return ret; + + ret = qcom_qmp_phy_pcie_msm8996_power_on(phy); + if (ret) + qcom_qmp_phy_pcie_msm8996_exit(phy); + + return ret; +} + +static int qcom_qmp_phy_pcie_msm8996_disable(struct phy *phy) +{ + int ret; + + ret = qcom_qmp_phy_pcie_msm8996_power_off(phy); + if (ret) + return ret; + return qcom_qmp_phy_pcie_msm8996_exit(phy); +} + +static int qcom_qmp_phy_pcie_msm8996_set_mode(struct phy *phy, + enum phy_mode mode, int submode) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + + qphy->mode = mode; + + return 0; +} + +static int qcom_qmp_phy_pcie_msm8996_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + int num = cfg->num_vregs; + int i; + + qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL); + if (!qmp->vregs) + return -ENOMEM; + + for (i = 0; i < num; i++) + qmp->vregs[i].supply = cfg->vreg_list[i]; + + return devm_regulator_bulk_get(dev, num, qmp->vregs); +} + +static int qcom_qmp_phy_pcie_msm8996_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + int i; + int ret; + + qmp->resets = devm_kcalloc(dev, cfg->num_resets, + sizeof(*qmp->resets), GFP_KERNEL); + if (!qmp->resets) + return -ENOMEM; + + for (i = 0; i < cfg->num_resets; i++) + qmp->resets[i].id = cfg->reset_list[i]; + + ret = devm_reset_control_bulk_get_exclusive(dev, cfg->num_resets, qmp->resets); + if (ret) + return dev_err_probe(dev, ret, "failed to get resets\n"); + + return 0; +} + +static int qcom_qmp_phy_pcie_msm8996_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + int num = cfg->num_clks; + int i; + + qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL); + if (!qmp->clks) + return -ENOMEM; + + for (i = 0; i < num; i++) + qmp->clks[i].id = cfg->clk_list[i]; + + return devm_clk_bulk_get(dev, num, qmp->clks); +} + +static void phy_clk_release_provider(void *res) +{ + of_clk_del_provider(res); +} + +/* + * Register a fixed rate pipe clock. + * + * The _pipe_clksrc generated by PHY goes to the GCC that gate + * controls it. The _pipe_clk coming out of the GCC is requested + * by the PHY driver for its operations. + * We register the _pipe_clksrc here. The gcc driver takes care + * of assigning this _pipe_clksrc as parent to _pipe_clk. + * Below picture shows this relationship. + * + * +---------------+ + * | PHY block |<<---------------------------------------+ + * | | | + * | +-------+ | +-----+ | + * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+ + * clk | +-------+ | +-----+ + * +---------------+ + */ +static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np) +{ + struct clk_fixed_rate *fixed; + struct clk_init_data init = { }; + int ret; + + ret = of_property_read_string(np, "clock-output-names", &init.name); + if (ret) { + dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np); + return ret; + } + + fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL); + if (!fixed) + return -ENOMEM; + + init.ops = &clk_fixed_rate_ops; + + /* controllers using QMP phys use 125MHz pipe clock interface */ + fixed->fixed_rate = 125000000; + fixed->hw.init = &init; + + ret = devm_clk_hw_register(qmp->dev, &fixed->hw); + if (ret) + return ret; + + ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw); + if (ret) + return ret; + + /* + * Roll a devm action because the clock provider is the child node, but + * the child node is not actually a device. + */ + return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np); +} + +static const struct phy_ops qcom_qmp_phy_pcie_msm8996_ops = { + .power_on = qcom_qmp_phy_pcie_msm8996_enable, + .power_off = qcom_qmp_phy_pcie_msm8996_disable, + .set_mode = qcom_qmp_phy_pcie_msm8996_set_mode, + .owner = THIS_MODULE, +}; + +static void qcom_qmp_reset_control_put(void *data) +{ + reset_control_put(data); +} + +static +int qcom_qmp_phy_pcie_msm8996_create(struct device *dev, struct device_node *np, int id, + void __iomem *serdes, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + struct phy *generic_phy; + struct qmp_phy *qphy; + char prop_name[MAX_PROP_NAME]; + int ret; + + qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL); + if (!qphy) + return -ENOMEM; + + qphy->cfg = cfg; + qphy->serdes = serdes; + /* + * Get memory resources for each phy lane: + * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. + * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5 + * For single lane PHYs: pcs_misc (optional) -> 3. + */ + qphy->tx = of_iomap(np, 0); + if (!qphy->tx) + return -ENOMEM; + + qphy->rx = of_iomap(np, 1); + if (!qphy->rx) + return -ENOMEM; + + qphy->pcs = of_iomap(np, 2); + if (!qphy->pcs) + return -ENOMEM; + + qphy->pcs_misc = of_iomap(np, 3); + + if (!qphy->pcs_misc) + dev_vdbg(dev, "PHY pcs_misc-reg not used\n"); + + snprintf(prop_name, sizeof(prop_name), "pipe%d", id); + qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name); + if (IS_ERR(qphy->pipe_clk)) { + return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk), + "failed to get lane%d pipe clock\n", id); + } + + /* Get lane reset, if any */ + snprintf(prop_name, sizeof(prop_name), "lane%d", id); + qphy->lane_rst = of_reset_control_get_exclusive(np, prop_name); + if (IS_ERR(qphy->lane_rst)) { + dev_err(dev, "failed to get lane%d reset\n", id); + return PTR_ERR(qphy->lane_rst); + } + ret = devm_add_action_or_reset(dev, qcom_qmp_reset_control_put, + qphy->lane_rst); + if (ret) + return ret; + + generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_pcie_msm8996_ops); + if (IS_ERR(generic_phy)) { + ret = PTR_ERR(generic_phy); + dev_err(dev, "failed to create qphy %d\n", ret); + return ret; + } + + qphy->phy = generic_phy; + qphy->index = id; + qphy->qmp = qmp; + qmp->phys[id] = qphy; + phy_set_drvdata(generic_phy, qphy); + + return 0; +} + +static const struct of_device_id qcom_qmp_phy_pcie_msm8996_of_match_table[] = { + { + .compatible = "qcom,msm8996-qmp-pcie-phy", + .data = &msm8996_pciephy_cfg, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, qcom_qmp_phy_pcie_msm8996_of_match_table); + +static int qcom_qmp_phy_pcie_msm8996_probe(struct platform_device *pdev) +{ + struct qcom_qmp *qmp; + struct device *dev = &pdev->dev; + struct device_node *child; + struct phy_provider *phy_provider; + void __iomem *serdes; + const struct qmp_phy_cfg *cfg = NULL; + int num, id, expected_phys; + int ret; + + qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL); + if (!qmp) + return -ENOMEM; + + qmp->dev = dev; + dev_set_drvdata(dev, qmp); + + /* Get the specific init parameters of QMP phy */ + cfg = of_device_get_match_data(dev); + if (!cfg) + return -EINVAL; + + /* per PHY serdes; usually located at base address */ + serdes = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(serdes)) + return PTR_ERR(serdes); + + expected_phys = cfg->nlanes; + + mutex_init(&qmp->phy_mutex); + + ret = qcom_qmp_phy_pcie_msm8996_clk_init(dev, cfg); + if (ret) + return ret; + + ret = qcom_qmp_phy_pcie_msm8996_reset_init(dev, cfg); + if (ret) + return ret; + + ret = qcom_qmp_phy_pcie_msm8996_vreg_init(dev, cfg); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to get regulator supplies: %d\n", + ret); + return ret; + } + + num = of_get_available_child_count(dev->of_node); + /* do we have a rogue child node ? */ + if (num > expected_phys) + return -EINVAL; + + qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL); + if (!qmp->phys) + return -ENOMEM; + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + /* + * Prevent runtime pm from being ON by default. Users can enable + * it using power/control in sysfs. + */ + pm_runtime_forbid(dev); + + id = 0; + for_each_available_child_of_node(dev->of_node, child) { + /* Create per-lane phy */ + ret = qcom_qmp_phy_pcie_msm8996_create(dev, child, id, serdes, cfg); + if (ret) { + dev_err(dev, "failed to create lane%d phy, %d\n", + id, ret); + goto err_node_put; + } + + /* + * Register the pipe clock provided by phy. + * See function description to see details of this pipe clock. + */ + ret = phy_pipe_clk_register(qmp, child); + if (ret) { + dev_err(qmp->dev, + "failed to register pipe clock source\n"); + goto err_node_put; + } + + id++; + } + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (!IS_ERR(phy_provider)) + dev_info(dev, "Registered Qcom-QMP phy\n"); + else + pm_runtime_disable(dev); + + return PTR_ERR_OR_ZERO(phy_provider); + +err_node_put: + pm_runtime_disable(dev); + of_node_put(child); + return ret; +} + +static struct platform_driver qcom_qmp_phy_pcie_msm8996_driver = { + .probe = qcom_qmp_phy_pcie_msm8996_probe, + .driver = { + .name = "qcom-qmp-msm8996-pcie-phy", + .of_match_table = qcom_qmp_phy_pcie_msm8996_of_match_table, + }, +}; + +module_platform_driver(qcom_qmp_phy_pcie_msm8996_driver); + +MODULE_AUTHOR("Vivek Gautam "); +MODULE_DESCRIPTION("Qualcomm QMP MSM8996 PCIe PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-qhp.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-qhp.h new file mode 100644 index 0000000000..e4a4d2cd85 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-qhp.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCIE_QHP_H_ +#define QCOM_PHY_QMP_PCIE_QHP_H_ + +/* PCIE GEN3 COM registers */ +#define PCIE_GEN3_QHP_COM_SSC_EN_CENTER 0x14 +#define PCIE_GEN3_QHP_COM_SSC_PER1 0x20 +#define PCIE_GEN3_QHP_COM_SSC_PER2 0x24 +#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1 0x28 +#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2 0x2c +#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1_MODE1 0x34 +#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2_MODE1 0x38 +#define PCIE_GEN3_QHP_COM_BIAS_EN_CKBUFLR_EN 0x54 +#define PCIE_GEN3_QHP_COM_CLK_ENABLE1 0x58 +#define PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE0 0x6c +#define PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE0 0x70 +#define PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE1 0x78 +#define PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE1 0x7c +#define PCIE_GEN3_QHP_COM_BGV_TRIM 0x98 +#define PCIE_GEN3_QHP_COM_CP_CTRL_MODE0 0xb4 +#define PCIE_GEN3_QHP_COM_CP_CTRL_MODE1 0xb8 +#define PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE0 0xc0 +#define PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE1 0xc4 +#define PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE0 0xcc +#define PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE1 0xd0 +#define PCIE_GEN3_QHP_COM_SYSCLK_EN_SEL 0xdc +#define PCIE_GEN3_QHP_COM_RESTRIM_CTRL2 0xf0 +#define PCIE_GEN3_QHP_COM_LOCK_CMP_EN 0xf8 +#define PCIE_GEN3_QHP_COM_DEC_START_MODE0 0x100 +#define PCIE_GEN3_QHP_COM_DEC_START_MODE1 0x108 +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE0 0x11c +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE0 0x120 +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE0 0x124 +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE1 0x128 +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE1 0x12c +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE1 0x130 +#define PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE0 0x150 +#define PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE1 0x158 +#define PCIE_GEN3_QHP_COM_VCO_TUNE_MAP 0x178 +#define PCIE_GEN3_QHP_COM_BG_CTRL 0x1c8 +#define PCIE_GEN3_QHP_COM_CLK_SELECT 0x1cc +#define PCIE_GEN3_QHP_COM_HSCLK_SEL1 0x1d0 +#define PCIE_GEN3_QHP_COM_CORECLK_DIV 0x1e0 +#define PCIE_GEN3_QHP_COM_CORE_CLK_EN 0x1e8 +#define PCIE_GEN3_QHP_COM_CMN_CONFIG 0x1f0 +#define PCIE_GEN3_QHP_COM_SVS_MODE_CLK_SEL 0x1fc +#define PCIE_GEN3_QHP_COM_CORECLK_DIV_MODE1 0x21c +#define PCIE_GEN3_QHP_COM_CMN_MODE 0x224 +#define PCIE_GEN3_QHP_COM_VREGCLK_DIV1 0x228 +#define PCIE_GEN3_QHP_COM_VREGCLK_DIV2 0x22c + +/* PCIE GEN3 QHP Lane registers */ +#define PCIE_GEN3_QHP_L0_DRVR_CTRL0 0xc +#define PCIE_GEN3_QHP_L0_DRVR_CTRL1 0x10 +#define PCIE_GEN3_QHP_L0_DRVR_CTRL2 0x14 +#define PCIE_GEN3_QHP_L0_DRVR_TAP_EN 0x18 +#define PCIE_GEN3_QHP_L0_TX_BAND_MODE 0x60 +#define PCIE_GEN3_QHP_L0_LANE_MODE 0x64 +#define PCIE_GEN3_QHP_L0_PARALLEL_RATE 0x7c +#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE0 0xc0 +#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE1 0xc4 +#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE2 0xc8 +#define PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE1 0xd0 +#define PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE2 0xd4 +#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE0 0xd8 +#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE1 0xdc +#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE2 0xe0 +#define PCIE_GEN3_QHP_L0_CTLE_THRESH_DFE 0xfc +#define PCIE_GEN3_QHP_L0_CGA_THRESH_DFE 0x100 +#define PCIE_GEN3_QHP_L0_RXENGINE_EN0 0x108 +#define PCIE_GEN3_QHP_L0_CTLE_TRAIN_TIME 0x114 +#define PCIE_GEN3_QHP_L0_CTLE_DFE_OVRLP_TIME 0x118 +#define PCIE_GEN3_QHP_L0_DFE_REFRESH_TIME 0x11c +#define PCIE_GEN3_QHP_L0_DFE_ENABLE_TIME 0x120 +#define PCIE_GEN3_QHP_L0_VGA_GAIN 0x124 +#define PCIE_GEN3_QHP_L0_DFE_GAIN 0x128 +#define PCIE_GEN3_QHP_L0_EQ_GAIN 0x130 +#define PCIE_GEN3_QHP_L0_OFFSET_GAIN 0x134 +#define PCIE_GEN3_QHP_L0_PRE_GAIN 0x138 +#define PCIE_GEN3_QHP_L0_VGA_INITVAL 0x13c +#define PCIE_GEN3_QHP_L0_EQ_INTVAL 0x154 +#define PCIE_GEN3_QHP_L0_EDAC_INITVAL 0x160 +#define PCIE_GEN3_QHP_L0_RXEQ_INITB0 0x168 +#define PCIE_GEN3_QHP_L0_RXEQ_INITB1 0x16c +#define PCIE_GEN3_QHP_L0_RCVRDONE_THRESH1 0x178 +#define PCIE_GEN3_QHP_L0_RXEQ_CTRL 0x180 +#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE0 0x184 +#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE1 0x188 +#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE2 0x18c +#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE0 0x190 +#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE1 0x194 +#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE2 0x198 +#define PCIE_GEN3_QHP_L0_UCDR_SO_CONFIG 0x19c +#define PCIE_GEN3_QHP_L0_RX_BAND 0x1a4 +#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE0 0x1c0 +#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE1 0x1c4 +#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE2 0x1c8 +#define PCIE_GEN3_QHP_L0_SIGDET_ENABLES 0x230 +#define PCIE_GEN3_QHP_L0_SIGDET_CNTRL 0x234 +#define PCIE_GEN3_QHP_L0_SIGDET_DEGLITCH_CNTRL 0x238 +#define PCIE_GEN3_QHP_L0_DCC_GAIN 0x2a4 +#define PCIE_GEN3_QHP_L0_RSM_START 0x2a8 +#define PCIE_GEN3_QHP_L0_RX_EN_SIGNAL 0x2ac +#define PCIE_GEN3_QHP_L0_PSM_RX_EN_CAL 0x2b0 +#define PCIE_GEN3_QHP_L0_RX_MISC_CNTRL0 0x2b8 +#define PCIE_GEN3_QHP_L0_TS0_TIMER 0x2c0 +#define PCIE_GEN3_QHP_L0_DLL_HIGHDATARATE 0x2c4 +#define PCIE_GEN3_QHP_L0_RX_RESETCODE_OFFSET 0x2cc + +/* PCIE GEN3 PCS registers */ +#define PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M3P5DB 0x2c +#define PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M3P5DB 0x40 +#define PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M6DB 0x54 +#define PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M6DB 0x68 +#define PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG 0x15c +#define PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG5 0x16c +#define PCIE_GEN3_QHP_PHY_PCS_TX_RX_CONFIG 0x174 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c new file mode 100644 index 0000000000..2d65e1f56b --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c @@ -0,0 +1,2556 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "phy-qcom-qmp.h" + +/* QPHY_SW_RESET bit */ +#define SW_RESET BIT(0) +/* QPHY_POWER_DOWN_CONTROL */ +#define SW_PWRDN BIT(0) +#define REFCLK_DRV_DSBL BIT(1) +/* QPHY_START_CONTROL bits */ +#define SERDES_START BIT(0) +#define PCS_START BIT(1) +#define PLL_READY_GATE_EN BIT(3) +/* QPHY_PCS_STATUS bit */ +#define PHYSTATUS BIT(6) +#define PHYSTATUS_4_20 BIT(7) +/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */ +#define PCS_READY BIT(0) + +/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */ +/* DP PHY soft reset */ +#define SW_DPPHY_RESET BIT(0) +/* mux to select DP PHY reset control, 0:HW control, 1: software reset */ +#define SW_DPPHY_RESET_MUX BIT(1) +/* USB3 PHY soft reset */ +#define SW_USB3PHY_RESET BIT(2) +/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */ +#define SW_USB3PHY_RESET_MUX BIT(3) + +/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */ +#define USB3_MODE BIT(0) /* enables USB3 mode */ +#define DP_MODE BIT(1) /* enables DP mode */ + +/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */ +#define ARCVR_DTCT_EN BIT(0) +#define ALFPS_DTCT_EN BIT(1) +#define ARCVR_DTCT_EVENT_SEL BIT(4) + +/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */ +#define IRQ_CLEAR BIT(0) + +/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */ +#define RCVR_DETECT BIT(0) + +/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */ +#define CLAMP_EN BIT(0) /* enables i/o clamp_n */ + +#define PHY_INIT_COMPLETE_TIMEOUT 10000 +#define POWER_DOWN_DELAY_US_MIN 10 +#define POWER_DOWN_DELAY_US_MAX 11 + +#define MAX_PROP_NAME 32 + +/* Define the assumed distance between lanes for underspecified device trees. */ +#define QMP_PHY_LEGACY_LANE_STRIDE 0x400 + +struct qmp_phy_init_tbl { + unsigned int offset; + unsigned int val; + /* + * register part of layout ? + * if yes, then offset gives index in the reg-layout + */ + bool in_layout; + /* + * mask of lanes for which this register is written + * for cases when second lane needs different values + */ + u8 lane_mask; +}; + +#define QMP_PHY_INIT_CFG(o, v) \ + { \ + .offset = o, \ + .val = v, \ + .lane_mask = 0xff, \ + } + +#define QMP_PHY_INIT_CFG_L(o, v) \ + { \ + .offset = o, \ + .val = v, \ + .in_layout = true, \ + .lane_mask = 0xff, \ + } + +#define QMP_PHY_INIT_CFG_LANE(o, v, l) \ + { \ + .offset = o, \ + .val = v, \ + .lane_mask = l, \ + } + +/* set of registers with offsets different per-PHY */ +enum qphy_reg_layout { + /* Common block control registers */ + QPHY_COM_SW_RESET, + QPHY_COM_POWER_DOWN_CONTROL, + QPHY_COM_START_CONTROL, + QPHY_COM_PCS_READY_STATUS, + /* PCS registers */ + QPHY_SW_RESET, + QPHY_START_CTRL, + QPHY_PCS_READY_STATUS, + QPHY_PCS_STATUS, + QPHY_PCS_AUTONOMOUS_MODE_CTRL, + QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR, + QPHY_PCS_LFPS_RXTERM_IRQ_STATUS, + QPHY_PCS_POWER_DOWN_CONTROL, + /* PCS_MISC registers */ + QPHY_PCS_MISC_TYPEC_CTRL, + /* Keep last to ensure regs_layout arrays are properly initialized */ + QPHY_LAYOUT_SIZE +}; + +static const unsigned int ipq_pciephy_gen3_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x44, + [QPHY_PCS_STATUS] = 0x14, + [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40, +}; + +static const unsigned int pciephy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_COM_SW_RESET] = 0x400, + [QPHY_COM_POWER_DOWN_CONTROL] = 0x404, + [QPHY_COM_START_CONTROL] = 0x408, + [QPHY_COM_PCS_READY_STATUS] = 0x448, + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x08, + [QPHY_PCS_STATUS] = 0x174, +}; + +static const unsigned int sdm845_qmp_pciephy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x08, + [QPHY_PCS_STATUS] = 0x174, +}; + +static const unsigned int sdm845_qhp_pciephy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x08, + [QPHY_PCS_STATUS] = 0x2ac, +}; + +static const unsigned int sm8250_pcie_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x44, + [QPHY_PCS_STATUS] = 0x14, + [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40, +}; + +static const struct qmp_phy_init_tbl msm8998_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_EP_DIV, 0x19), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_ENABLE1, 0x90), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0d), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x33), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x7e), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x15), +}; + +static const struct qmp_phy_init_tbl msm8998_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06), +}; + +static const struct qmp_phy_init_tbl msm8998_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN_HALF, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x71), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x40), +}; + +static const struct qmp_phy_init_tbl msm8998_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V3_PCS_ENDPOINT_REFCLK_DRIVE, 0x04), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x01), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x20), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME, 0x73), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x99), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_SIGDET_CNTRL, 0x03), +}; + +static const struct qmp_phy_init_tbl ipq6018_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER1, 0x7d), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE0, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90), + QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0xd4), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x14), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x29), + QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0x09), + QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x09), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28), + QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01), + QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08), + QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x68), + QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x53), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x55), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x55), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32), + QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08), +}; + +static const struct qmp_phy_init_tbl ipq6018_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12), +}; + +static const struct qmp_phy_init_tbl ipq6018_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x61), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x73), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xf0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xd3), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04), +}; + +static const struct qmp_phy_init_tbl ipq6018_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_FLL_CNTRL1, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x01), +}; + +static const struct qmp_phy_init_tbl ipq6018_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4, 0x07), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG1, 0x11), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58), +}; + +static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf), + QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0), + QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x1), + QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x20), + QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0xa), + QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0xa), + QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0xa), + QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x3), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x0), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0xD), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xD04), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33), + QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x2), + QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0xb), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x2), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x0), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19), +}; + +static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45), + QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6), + QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2), + QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_TX_TX_EMP_POST1_LVL, 0x36), + QMP_PHY_INIT_CFG(QSERDES_TX_SLEW_CNTL, 0x0a), +}; + +static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x1), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x0), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4), +}; + +static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V2_PCS_ENDPOINT_REFCLK_DRIVE, 0x4), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_OSC_DTCT_ACTIONS, 0x0), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x40), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x0), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x40), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB, 0x0), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x40), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME, 0x73), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_SIGDET_LVL, 0x99), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M6DB_V0, 0x15), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M3P5DB_V0, 0xe), + QMP_PHY_INIT_CFG_L(QPHY_SW_RESET, 0x0), + QMP_PHY_INIT_CFG_L(QPHY_START_CTRL, 0x3), +}; + +static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18), + QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x31), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_MAP, 0x04), + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x30), + QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x21), + QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0x355), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0x35555), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0x1a0a), + QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0xb), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE0, 0x0), + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0x40), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24), + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x20), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV, 0xa), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32), + QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07), + QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08), + QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TIMER, 0xa), + QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x1), + QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x68), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x2), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x2aa), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x2aaab), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x34), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0x3414), + QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28), + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE1, 0x0), + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0x40), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4), + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x0), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE0, 0x19), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE1, 0x28), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90), +}; + +static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_HIGHZ_DRVR_EN, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x06), +}; + +static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0xe), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x73), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xf0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x2), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xd3), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x02), +}; + +static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_FLL_CNTRL2, 0x83), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_FLL_CNT_VAL_L, 0x9), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_FLL_CNT_VAL_H_TOL, 0x42), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_FLL_MAN_CODE, 0x40), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_FLL_CNTRL1, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H, 0x0), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x1), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x0), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG1, 0x11), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG2, 0xb), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4, 0x07), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG2, 0x52), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2, 0x50), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4, 0x1a), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x6), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d), +}; + +static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x007), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_EP_DIV, 0x19), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_ENABLE1, 0x90), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0d), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_MODE, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x33), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x7e), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x15), +}; + +static const struct qmp_phy_init_tbl sdm845_qmp_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06), +}; + +static const struct qmp_phy_init_tbl sdm845_qmp_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN_HALF, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x71), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x59), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_01, 0x59), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x71), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x40), +}; + +static const struct qmp_phy_init_tbl sdm845_qmp_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V3_PCS_ENDPOINT_REFCLK_DRIVE, 0x04), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x01), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x20), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME, 0x73), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xbb), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG1, 0x0d), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG4, 0x00), +}; + +static const struct qmp_phy_init_tbl sdm845_qmp_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_CONFIG2, 0x52), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG2, 0x10), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4, 0x1a), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5, 0x06), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_PCIE_INT_AUX_CLK_CONFIG1, 0x00), +}; + +static const struct qmp_phy_init_tbl sdm845_qhp_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SYSCLK_EN_SEL, 0x27), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1, 0xde), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2, 0x07), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1_MODE1, 0x4c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2_MODE1, 0x06), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BIAS_EN_CKBUFLR_EN, 0x18), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CLK_ENABLE1, 0xb0), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE0, 0x8c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE0, 0x20), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE1, 0x14), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE1, 0x34), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_RESTRIM_CTRL2, 0x05), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DEC_START_MODE1, 0x68), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE1, 0xab), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE1, 0xaa), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE1, 0x02), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE1, 0x3f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VCO_TUNE_MAP, 0x10), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CLK_SELECT, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_HSCLK_SEL1, 0x30), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORECLK_DIV, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORE_CLK_EN, 0x73), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CMN_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SVS_MODE_CLK_SEL, 0x15), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORECLK_DIV_MODE1, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CMN_MODE, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VREGCLK_DIV1, 0x22), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VREGCLK_DIV2, 0x00), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BGV_TRIM, 0x20), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BG_CTRL, 0x07), +}; + +static const struct qmp_phy_init_tbl sdm845_qhp_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL0, 0x00), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_TAP_EN, 0x0d), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_TX_BAND_MODE, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_LANE_MODE, 0x1a), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PARALLEL_RATE, 0x2f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE0, 0x09), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE1, 0x09), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE2, 0x1b), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE1, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE2, 0x07), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE0, 0x31), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE1, 0x31), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE2, 0x03), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_THRESH_DFE, 0x02), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CGA_THRESH_DFE, 0x00), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXENGINE_EN0, 0x12), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_TRAIN_TIME, 0x25), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_DFE_OVRLP_TIME, 0x00), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_REFRESH_TIME, 0x05), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_ENABLE_TIME, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_VGA_GAIN, 0x26), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_GAIN, 0x12), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EQ_GAIN, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_OFFSET_GAIN, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PRE_GAIN, 0x09), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EQ_INTVAL, 0x15), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EDAC_INITVAL, 0x28), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_INITB0, 0x7f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_INITB1, 0x07), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RCVRDONE_THRESH1, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_CTRL, 0x70), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE0, 0x8b), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE1, 0x08), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE2, 0x0a), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE0, 0x03), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE1, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE2, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_BAND, 0x02), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE0, 0x5c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE1, 0x3e), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE2, 0x3f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_ENABLES, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_CNTRL, 0xa0), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_DEGLITCH_CNTRL, 0x08), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DCC_GAIN, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_EN_SIGNAL, 0xc3), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PSM_RX_EN_CAL, 0x00), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_MISC_CNTRL0, 0xbc), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_TS0_TIMER, 0x7f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DLL_HIGHDATARATE, 0x15), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL1, 0x0c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL2, 0x0f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RESETCODE_OFFSET, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_VGA_INITVAL, 0x20), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RSM_START, 0x01), +}; + +static const struct qmp_phy_init_tbl sdm845_qhp_pcie_rx_tbl[] = { +}; + +static const struct qmp_phy_init_tbl sdm845_qhp_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG, 0x3f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_PCS_TX_RX_CONFIG, 0x50), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M3P5DB, 0x19), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M3P5DB, 0x07), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M6DB, 0x17), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M6DB, 0x09), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG5, 0x9f), +}; + +static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0xb4), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x68), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0x4c), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x90), +}; + +static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x5), +}; + +static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x6e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x6e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x37), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xd4), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xdb), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x39), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xe4), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xec), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x39), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xdb), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x75), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RCLK_AUXDATA_SEL, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x03), +}; + +static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RATE_SLEW_CNTRL1, 0x0b), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x01), +}; + +static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0xb4), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x68), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0x4c), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x90), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x07), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x35), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xd4), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xdb), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x3b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xe4), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xec), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x3b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0x36), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RCLK_AUXDATA_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x30), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0x77), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RATE_SLEW_CNTRL1, 0x0b), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x12), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P6_P7_PRE, 0x33), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG2, 0x0f), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x15), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x05), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG2, 0x0f), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4, 0x07), +}; + +static const struct qmp_phy_init_tbl sdx55_qmp_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BG_TIMER, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYS_CLK_CTRL, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x19), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x19), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x46), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_CFG, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x4b), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x50), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0, 0xfb), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE1, 0xfb), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE1, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE0, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_CONFIG, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_MISC1, 0x88), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_MISC2, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_MODE, 0x17), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_DC_LEVEL_CTRL, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x56), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1d), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0x4b), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x22), +}; + +static const struct qmp_phy_init_tbl sdx55_qmp_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_LANE_MODE_1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_LANE_MODE_2, 0xf6), + QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_LANE_MODE_3, 0x13), + QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_VMODE_CTRL1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_PI_QEC_CTRL, 0x00), +}; + +static const struct qmp_phy_init_tbl sdx55_qmp_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_FO_GAIN_RATE2, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_UCDR_PI_CONTROLS, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_AUX_DATA_TCOARSE_TFINE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_3, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_DAC_ENABLE1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_DAC_ENABLE2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_VGA_CAL_CNTRL2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x27), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B1, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B2, 0x5a), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B3, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B4, 0x37), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B0, 0xbd), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B1, 0xf9), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B2, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B3, 0xce), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B4, 0x62), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B0, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B1, 0x7d), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B2, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B3, 0xcf), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B4, 0xd6), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_PHPRE_CTRL, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_MARG_COARSE_CTRL2, 0x12), +}; + +static const struct qmp_phy_init_tbl sdx55_qmp_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_RX_SIGDET_LVL, 0x77), + QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_EQ_CONFIG2, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_EQ_CONFIG4, 0x16), + QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_EQ_CONFIG5, 0x02), +}; + +static const struct qmp_phy_init_tbl sdx55_qmp_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_EQ_CONFIG1, 0x17), + QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G3_RXEQEVAL_TIME, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G4_RXEQEVAL_TIME, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG2, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02), + QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE0, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE2_MODE1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE1, 0xb4), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x68), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_BUF_ENABLE, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x4c), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x75), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x04), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xd8), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0x5c), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa6), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH4, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf0), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_TX_ADAPT_POST_THRESH, 0xf0), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x77), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_RATE_SLEW_CNTRL1, 0x0b), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x05), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG2, 0x0f), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x97), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0xd0), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_2, 0xf6), + QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_TX, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_RX, 0x0c), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xcc), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xcc), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x29), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0xc5), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xad), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0xb6), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xfb), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xc7), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xef), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x81), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_PHPRE_CTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3, 0x37), + + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_3, 0x05), + + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3, 0x1f), + + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210, 0x1f), + + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_IDAC_SAOFFSET, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_DAC_ENABLE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG2, 0x16), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG3, 0x22), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_G3S2_PRE_GAIN, 0x2e), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x99), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_EQ_CONFIG1, 0x16), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3, 0x28), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e), +}; + +struct qmp_phy; + +/* struct qmp_phy_cfg - per-PHY initialization config */ +struct qmp_phy_cfg { + /* phy-type - PCIE/UFS/USB */ + unsigned int type; + /* number of lanes provided by phy */ + int nlanes; + + /* Init sequence for PHY blocks - serdes, tx, rx, pcs */ + const struct qmp_phy_init_tbl *serdes_tbl; + int serdes_tbl_num; + const struct qmp_phy_init_tbl *serdes_tbl_sec; + int serdes_tbl_num_sec; + const struct qmp_phy_init_tbl *tx_tbl; + int tx_tbl_num; + const struct qmp_phy_init_tbl *tx_tbl_sec; + int tx_tbl_num_sec; + const struct qmp_phy_init_tbl *rx_tbl; + int rx_tbl_num; + const struct qmp_phy_init_tbl *rx_tbl_sec; + int rx_tbl_num_sec; + const struct qmp_phy_init_tbl *pcs_tbl; + int pcs_tbl_num; + const struct qmp_phy_init_tbl *pcs_tbl_sec; + int pcs_tbl_num_sec; + const struct qmp_phy_init_tbl *pcs_misc_tbl; + int pcs_misc_tbl_num; + const struct qmp_phy_init_tbl *pcs_misc_tbl_sec; + int pcs_misc_tbl_num_sec; + + /* clock ids to be requested */ + const char * const *clk_list; + int num_clks; + /* resets to be requested */ + const char * const *reset_list; + int num_resets; + /* regulators to be requested */ + const char * const *vreg_list; + int num_vregs; + + /* array of registers with different offsets */ + const unsigned int *regs; + + unsigned int start_ctrl; + unsigned int pwrdn_ctrl; + unsigned int mask_com_pcs_ready; + /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */ + unsigned int phy_status; + + /* true, if PHY needs delay after POWER_DOWN */ + bool has_pwrdn_delay; + /* power_down delay in usec */ + int pwrdn_delay_min; + int pwrdn_delay_max; + + /* true, if PHY has secondary tx/rx lanes to be configured */ + bool is_dual_lane_phy; + + /* QMP PHY pipe clock interface rate */ + unsigned long pipe_clock_rate; +}; + +/** + * struct qmp_phy - per-lane phy descriptor + * + * @phy: generic phy + * @cfg: phy specific configuration + * @serdes: iomapped memory space for phy's serdes (i.e. PLL) + * @tx: iomapped memory space for lane's tx + * @rx: iomapped memory space for lane's rx + * @pcs: iomapped memory space for lane's pcs + * @tx2: iomapped memory space for second lane's tx (in dual lane PHYs) + * @rx2: iomapped memory space for second lane's rx (in dual lane PHYs) + * @pcs_misc: iomapped memory space for lane's pcs_misc + * @pipe_clk: pipe clock + * @index: lane index + * @qmp: QMP phy to which this lane belongs + * @mode: current PHY mode + */ +struct qmp_phy { + struct phy *phy; + const struct qmp_phy_cfg *cfg; + void __iomem *serdes; + void __iomem *tx; + void __iomem *rx; + void __iomem *pcs; + void __iomem *tx2; + void __iomem *rx2; + void __iomem *pcs_misc; + struct clk *pipe_clk; + unsigned int index; + struct qcom_qmp *qmp; + enum phy_mode mode; +}; + +/** + * struct qcom_qmp - structure holding QMP phy block attributes + * + * @dev: device + * + * @clks: array of clocks required by phy + * @resets: array of resets required by phy + * @vregs: regulator supplies bulk data + * + * @phys: array of per-lane phy descriptors + */ +struct qcom_qmp { + struct device *dev; + + struct clk_bulk_data *clks; + struct reset_control_bulk_data *resets; + struct regulator_bulk_data *vregs; + + struct qmp_phy **phys; +}; + +static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val) +{ + u32 reg; + + reg = readl(base + offset); + reg |= val; + writel(reg, base + offset); + + /* ensure that above write is through */ + readl(base + offset); +} + +static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val) +{ + u32 reg; + + reg = readl(base + offset); + reg &= ~val; + writel(reg, base + offset); + + /* ensure that above write is through */ + readl(base + offset); +} + +/* list of clocks required by phy */ +static const char * const msm8996_phy_clk_l[] = { + "aux", "cfg_ahb", "ref", +}; + + +static const char * const sdm845_pciephy_clk_l[] = { + "aux", "cfg_ahb", "ref", "refgen", +}; + +/* list of regulators */ +static const char * const qmp_phy_vreg_l[] = { + "vdda-phy", "vdda-pll", +}; + +static const char * const ipq8074_pciephy_clk_l[] = { + "aux", "cfg_ahb", +}; + +/* list of resets */ +static const char * const ipq8074_pciephy_reset_l[] = { + "phy", "common", +}; + +static const char * const sdm845_pciephy_reset_l[] = { + "phy", +}; + +static const struct qmp_phy_cfg ipq8074_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = ipq8074_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_serdes_tbl), + .tx_tbl = ipq8074_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(ipq8074_pcie_tx_tbl), + .rx_tbl = ipq8074_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl), + .pcs_tbl = ipq8074_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl), + .clk_list = ipq8074_pciephy_clk_l, + .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l), + .reset_list = ipq8074_pciephy_reset_l, + .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l), + .vreg_list = NULL, + .num_vregs = 0, + .regs = pciephy_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + +static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = ipq8074_pcie_gen3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_serdes_tbl), + .tx_tbl = ipq8074_pcie_gen3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl), + .rx_tbl = ipq8074_pcie_gen3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_rx_tbl), + .pcs_tbl = ipq8074_pcie_gen3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_pcs_tbl), + .clk_list = ipq8074_pciephy_clk_l, + .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l), + .reset_list = ipq8074_pciephy_reset_l, + .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l), + .vreg_list = NULL, + .num_vregs = 0, + .regs = ipq_pciephy_gen3_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ + + .pipe_clock_rate = 250000000, +}; + +static const struct qmp_phy_cfg ipq6018_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = ipq6018_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(ipq6018_pcie_serdes_tbl), + .tx_tbl = ipq6018_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(ipq6018_pcie_tx_tbl), + .rx_tbl = ipq6018_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(ipq6018_pcie_rx_tbl), + .pcs_tbl = ipq6018_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(ipq6018_pcie_pcs_tbl), + .pcs_misc_tbl = ipq6018_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num = ARRAY_SIZE(ipq6018_pcie_pcs_misc_tbl), + .clk_list = ipq8074_pciephy_clk_l, + .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l), + .reset_list = ipq8074_pciephy_reset_l, + .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l), + .vreg_list = NULL, + .num_vregs = 0, + .regs = ipq_pciephy_gen3_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + +static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = sdm845_qmp_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl), + .tx_tbl = sdm845_qmp_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_tx_tbl), + .rx_tbl = sdm845_qmp_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_rx_tbl), + .pcs_tbl = sdm845_qmp_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_tbl), + .pcs_misc_tbl = sdm845_qmp_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_misc_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sdm845_qmp_pciephy_regs_layout, + + .start_ctrl = PCS_START | SERDES_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + +static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = sdm845_qhp_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl), + .tx_tbl = sdm845_qhp_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_tx_tbl), + .rx_tbl = sdm845_qhp_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_rx_tbl), + .pcs_tbl = sdm845_qhp_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_pcs_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sdm845_qhp_pciephy_regs_layout, + + .start_ctrl = PCS_START | SERDES_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + +static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = sm8250_qmp_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl), + .serdes_tbl_sec = sm8250_qmp_gen3x1_pcie_serdes_tbl, + .serdes_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_serdes_tbl), + .tx_tbl = sm8250_qmp_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl), + .rx_tbl = sm8250_qmp_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl), + .rx_tbl_sec = sm8250_qmp_gen3x1_pcie_rx_tbl, + .rx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_rx_tbl), + .pcs_tbl = sm8250_qmp_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl), + .pcs_tbl_sec = sm8250_qmp_gen3x1_pcie_pcs_tbl, + .pcs_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_tbl), + .pcs_misc_tbl = sm8250_qmp_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl), + .pcs_misc_tbl_sec = sm8250_qmp_gen3x1_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_misc_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8250_pcie_regs_layout, + + .start_ctrl = PCS_START | SERDES_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + +static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 2, + + .serdes_tbl = sm8250_qmp_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl), + .tx_tbl = sm8250_qmp_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl), + .tx_tbl_sec = sm8250_qmp_gen3x2_pcie_tx_tbl, + .tx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_tx_tbl), + .rx_tbl = sm8250_qmp_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl), + .rx_tbl_sec = sm8250_qmp_gen3x2_pcie_rx_tbl, + .rx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_rx_tbl), + .pcs_tbl = sm8250_qmp_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl), + .pcs_tbl_sec = sm8250_qmp_gen3x2_pcie_pcs_tbl, + .pcs_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_tbl), + .pcs_misc_tbl = sm8250_qmp_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl), + .pcs_misc_tbl_sec = sm8250_qmp_gen3x2_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_misc_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8250_pcie_regs_layout, + + .start_ctrl = PCS_START | SERDES_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS, + + .is_dual_lane_phy = true, + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + +static const struct qmp_phy_cfg msm8998_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = msm8998_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(msm8998_pcie_serdes_tbl), + .tx_tbl = msm8998_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(msm8998_pcie_tx_tbl), + .rx_tbl = msm8998_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(msm8998_pcie_rx_tbl), + .pcs_tbl = msm8998_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(msm8998_pcie_pcs_tbl), + .clk_list = msm8996_phy_clk_l, + .num_clks = ARRAY_SIZE(msm8996_phy_clk_l), + .reset_list = ipq8074_pciephy_reset_l, + .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = pciephy_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS, +}; + +static const struct qmp_phy_cfg sc8180x_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = sc8180x_qmp_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_serdes_tbl), + .tx_tbl = sc8180x_qmp_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_tx_tbl), + .rx_tbl = sc8180x_qmp_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_rx_tbl), + .pcs_tbl = sc8180x_qmp_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_tbl), + .pcs_misc_tbl = sc8180x_qmp_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_misc_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8250_pcie_regs_layout, + + .start_ctrl = PCS_START | SERDES_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + +static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 2, + + .serdes_tbl = sdx55_qmp_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_serdes_tbl), + .tx_tbl = sdx55_qmp_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_tx_tbl), + .rx_tbl = sdx55_qmp_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_rx_tbl), + .pcs_tbl = sdx55_qmp_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_tbl), + .pcs_misc_tbl = sdx55_qmp_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_misc_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8250_pcie_regs_layout, + + .start_ctrl = PCS_START | SERDES_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS_4_20, + + .is_dual_lane_phy = true, + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + +static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = sm8450_qmp_gen3x1_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl), + .tx_tbl = sm8450_qmp_gen3x1_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_tx_tbl), + .rx_tbl = sm8450_qmp_gen3x1_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rx_tbl), + .pcs_tbl = sm8450_qmp_gen3x1_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_tbl), + .pcs_misc_tbl = sm8450_qmp_gen3x1_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_misc_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8250_pcie_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + +static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 2, + + .serdes_tbl = sm8450_qmp_gen4x2_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl), + .tx_tbl = sm8450_qmp_gen4x2_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_tx_tbl), + .rx_tbl = sm8450_qmp_gen4x2_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rx_tbl), + .pcs_tbl = sm8450_qmp_gen4x2_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_tbl), + .pcs_misc_tbl = sm8450_qmp_gen4x2_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_misc_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8250_pcie_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS_4_20, + + .is_dual_lane_phy = true, + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + +static void qcom_qmp_phy_pcie_configure_lane(void __iomem *base, + const unsigned int *regs, + const struct qmp_phy_init_tbl tbl[], + int num, + u8 lane_mask) +{ + int i; + const struct qmp_phy_init_tbl *t = tbl; + + if (!t) + return; + + for (i = 0; i < num; i++, t++) { + if (!(t->lane_mask & lane_mask)) + continue; + + if (t->in_layout) + writel(t->val, base + regs[t->offset]); + else + writel(t->val, base + t->offset); + } +} + +static void qcom_qmp_phy_pcie_configure(void __iomem *base, + const unsigned int *regs, + const struct qmp_phy_init_tbl tbl[], + int num) +{ + qcom_qmp_phy_pcie_configure_lane(base, regs, tbl, num, 0xff); +} + +static int qcom_qmp_phy_pcie_serdes_init(struct qmp_phy *qphy) +{ + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *serdes = qphy->serdes; + const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl; + int serdes_tbl_num = cfg->serdes_tbl_num; + + qcom_qmp_phy_pcie_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num); + if (cfg->serdes_tbl_sec) + qcom_qmp_phy_pcie_configure(serdes, cfg->regs, cfg->serdes_tbl_sec, + cfg->serdes_tbl_num_sec); + + return 0; +} + +static int qcom_qmp_phy_pcie_com_init(struct qmp_phy *qphy) +{ + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *pcs = qphy->pcs; + int ret; + + /* turn on regulator supplies */ + ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); + if (ret) { + dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); + return ret; + } + + ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets); + if (ret) { + dev_err(qmp->dev, "reset assert failed\n"); + goto err_disable_regulators; + } + + ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets); + if (ret) { + dev_err(qmp->dev, "reset deassert failed\n"); + goto err_disable_regulators; + } + + ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks); + if (ret) + goto err_assert_reset; + + if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) + qphy_setbits(pcs, + cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + cfg->pwrdn_ctrl); + else + qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, + cfg->pwrdn_ctrl); + + return 0; + +err_assert_reset: + reset_control_bulk_assert(cfg->num_resets, qmp->resets); +err_disable_regulators: + regulator_bulk_disable(cfg->num_vregs, qmp->vregs); + + return ret; +} + +static int qcom_qmp_phy_pcie_com_exit(struct qmp_phy *qphy) +{ + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + + reset_control_bulk_assert(cfg->num_resets, qmp->resets); + + clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); + + regulator_bulk_disable(cfg->num_vregs, qmp->vregs); + + return 0; +} + +static int qcom_qmp_phy_pcie_init(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + struct qcom_qmp *qmp = qphy->qmp; + int ret; + dev_vdbg(qmp->dev, "Initializing QMP phy\n"); + + ret = qcom_qmp_phy_pcie_com_init(qphy); + if (ret) + return ret; + + return 0; +} + +static int qcom_qmp_phy_pcie_power_on(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *tx = qphy->tx; + void __iomem *rx = qphy->rx; + void __iomem *pcs = qphy->pcs; + void __iomem *pcs_misc = qphy->pcs_misc; + void __iomem *status; + unsigned int mask, val, ready; + int ret; + + qcom_qmp_phy_pcie_serdes_init(qphy); + + ret = clk_prepare_enable(qphy->pipe_clk); + if (ret) { + dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret); + return ret; + } + + /* Tx, Rx, and PCS configurations */ + qcom_qmp_phy_pcie_configure_lane(tx, cfg->regs, + cfg->tx_tbl, cfg->tx_tbl_num, 1); + if (cfg->tx_tbl_sec) + qcom_qmp_phy_pcie_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec, + cfg->tx_tbl_num_sec, 1); + + /* Configuration for other LANE for USB-DP combo PHY */ + if (cfg->is_dual_lane_phy) { + qcom_qmp_phy_pcie_configure_lane(qphy->tx2, cfg->regs, + cfg->tx_tbl, cfg->tx_tbl_num, 2); + if (cfg->tx_tbl_sec) + qcom_qmp_phy_pcie_configure_lane(qphy->tx2, cfg->regs, + cfg->tx_tbl_sec, + cfg->tx_tbl_num_sec, 2); + } + + qcom_qmp_phy_pcie_configure_lane(rx, cfg->regs, + cfg->rx_tbl, cfg->rx_tbl_num, 1); + if (cfg->rx_tbl_sec) + qcom_qmp_phy_pcie_configure_lane(rx, cfg->regs, + cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1); + + if (cfg->is_dual_lane_phy) { + qcom_qmp_phy_pcie_configure_lane(qphy->rx2, cfg->regs, + cfg->rx_tbl, cfg->rx_tbl_num, 2); + if (cfg->rx_tbl_sec) + qcom_qmp_phy_pcie_configure_lane(qphy->rx2, cfg->regs, + cfg->rx_tbl_sec, + cfg->rx_tbl_num_sec, 2); + } + + qcom_qmp_phy_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num); + if (cfg->pcs_tbl_sec) + qcom_qmp_phy_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl_sec, + cfg->pcs_tbl_num_sec); + + qcom_qmp_phy_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl, + cfg->pcs_misc_tbl_num); + if (cfg->pcs_misc_tbl_sec) + qcom_qmp_phy_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec, + cfg->pcs_misc_tbl_num_sec); + + /* + * Pull out PHY from POWER DOWN state. + * This is active low enable signal to power-down PHY. + */ + qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl); + + if (cfg->has_pwrdn_delay) + usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max); + + /* Pull PHY out of reset state */ + qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + + /* start SerDes and Phy-Coding-Sublayer */ + qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + + status = pcs + cfg->regs[QPHY_PCS_STATUS]; + mask = cfg->phy_status; + ready = 0; + + ret = readl_poll_timeout(status, val, (val & mask) == ready, 10, + PHY_INIT_COMPLETE_TIMEOUT); + if (ret) { + dev_err(qmp->dev, "phy initialization timed-out\n"); + goto err_disable_pipe_clk; + } + + return 0; + +err_disable_pipe_clk: + clk_disable_unprepare(qphy->pipe_clk); + + return ret; +} + +static int qcom_qmp_phy_pcie_power_off(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qphy->cfg; + + clk_disable_unprepare(qphy->pipe_clk); + + /* PHY reset */ + qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + + /* stop SerDes and Phy-Coding-Sublayer */ + qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + + /* Put PHY into POWER DOWN state: active low */ + if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) { + qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + cfg->pwrdn_ctrl); + } else { + qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, + cfg->pwrdn_ctrl); + } + + return 0; +} + +static int qcom_qmp_phy_pcie_exit(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + + qcom_qmp_phy_pcie_com_exit(qphy); + + return 0; +} + +static int qcom_qmp_phy_pcie_enable(struct phy *phy) +{ + int ret; + + ret = qcom_qmp_phy_pcie_init(phy); + if (ret) + return ret; + + ret = qcom_qmp_phy_pcie_power_on(phy); + if (ret) + qcom_qmp_phy_pcie_exit(phy); + + return ret; +} + +static int qcom_qmp_phy_pcie_disable(struct phy *phy) +{ + int ret; + + ret = qcom_qmp_phy_pcie_power_off(phy); + if (ret) + return ret; + return qcom_qmp_phy_pcie_exit(phy); +} + +static int qcom_qmp_phy_pcie_set_mode(struct phy *phy, + enum phy_mode mode, int submode) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + + qphy->mode = mode; + + return 0; +} + +static int qcom_qmp_phy_pcie_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + int num = cfg->num_vregs; + int i; + + qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL); + if (!qmp->vregs) + return -ENOMEM; + + for (i = 0; i < num; i++) + qmp->vregs[i].supply = cfg->vreg_list[i]; + + return devm_regulator_bulk_get(dev, num, qmp->vregs); +} + +static int qcom_qmp_phy_pcie_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + int i; + int ret; + + qmp->resets = devm_kcalloc(dev, cfg->num_resets, + sizeof(*qmp->resets), GFP_KERNEL); + if (!qmp->resets) + return -ENOMEM; + + for (i = 0; i < cfg->num_resets; i++) + qmp->resets[i].id = cfg->reset_list[i]; + + ret = devm_reset_control_bulk_get_exclusive(dev, cfg->num_resets, qmp->resets); + if (ret) + return dev_err_probe(dev, ret, "failed to get resets\n"); + + return 0; +} + +static int qcom_qmp_phy_pcie_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + int num = cfg->num_clks; + int i; + + qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL); + if (!qmp->clks) + return -ENOMEM; + + for (i = 0; i < num; i++) + qmp->clks[i].id = cfg->clk_list[i]; + + return devm_clk_bulk_get(dev, num, qmp->clks); +} + +static void phy_clk_release_provider(void *res) +{ + of_clk_del_provider(res); +} + +/* + * Register a fixed rate pipe clock. + * + * The _pipe_clksrc generated by PHY goes to the GCC that gate + * controls it. The _pipe_clk coming out of the GCC is requested + * by the PHY driver for its operations. + * We register the _pipe_clksrc here. The gcc driver takes care + * of assigning this _pipe_clksrc as parent to _pipe_clk. + * Below picture shows this relationship. + * + * +---------------+ + * | PHY block |<<---------------------------------------+ + * | | | + * | +-------+ | +-----+ | + * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+ + * clk | +-------+ | +-----+ + * +---------------+ + */ +static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np) +{ + struct clk_fixed_rate *fixed; + struct clk_init_data init = { }; + int ret; + + ret = of_property_read_string(np, "clock-output-names", &init.name); + if (ret) { + dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np); + return ret; + } + + fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL); + if (!fixed) + return -ENOMEM; + + init.ops = &clk_fixed_rate_ops; + + /* + * Controllers using QMP PHY-s use 125MHz pipe clock interface + * unless other frequency is specified in the PHY config. + */ + if (qmp->phys[0]->cfg->pipe_clock_rate) + fixed->fixed_rate = qmp->phys[0]->cfg->pipe_clock_rate; + else + fixed->fixed_rate = 125000000; + + fixed->hw.init = &init; + + ret = devm_clk_hw_register(qmp->dev, &fixed->hw); + if (ret) + return ret; + + ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw); + if (ret) + return ret; + + /* + * Roll a devm action because the clock provider is the child node, but + * the child node is not actually a device. + */ + return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np); +} + +static const struct phy_ops qcom_qmp_phy_pcie_ops = { + .power_on = qcom_qmp_phy_pcie_enable, + .power_off = qcom_qmp_phy_pcie_disable, + .set_mode = qcom_qmp_phy_pcie_set_mode, + .owner = THIS_MODULE, +}; + +static +int qcom_qmp_phy_pcie_create(struct device *dev, struct device_node *np, int id, + void __iomem *serdes, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + struct phy *generic_phy; + struct qmp_phy *qphy; + char prop_name[MAX_PROP_NAME]; + int ret; + + qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL); + if (!qphy) + return -ENOMEM; + + qphy->cfg = cfg; + qphy->serdes = serdes; + /* + * Get memory resources for each phy lane: + * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. + * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5 + * For single lane PHYs: pcs_misc (optional) -> 3. + */ + qphy->tx = of_iomap(np, 0); + if (!qphy->tx) + return -ENOMEM; + + qphy->rx = of_iomap(np, 1); + if (!qphy->rx) + return -ENOMEM; + + qphy->pcs = of_iomap(np, 2); + if (!qphy->pcs) + return -ENOMEM; + + /* + * If this is a dual-lane PHY, then there should be registers for the + * second lane. Some old device trees did not specify this, so fall + * back to old legacy behavior of assuming they can be reached at an + * offset from the first lane. + */ + if (cfg->is_dual_lane_phy) { + qphy->tx2 = of_iomap(np, 3); + qphy->rx2 = of_iomap(np, 4); + if (!qphy->tx2 || !qphy->rx2) { + dev_warn(dev, + "Underspecified device tree, falling back to legacy register regions\n"); + + /* In the old version, pcs_misc is at index 3. */ + qphy->pcs_misc = qphy->tx2; + qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE; + qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE; + + } else { + qphy->pcs_misc = of_iomap(np, 5); + } + + } else { + qphy->pcs_misc = of_iomap(np, 3); + } + + if (!qphy->pcs_misc && + of_device_is_compatible(dev->of_node, "qcom,ipq6018-qmp-pcie-phy")) + qphy->pcs_misc = qphy->pcs + 0x400; + + if (!qphy->pcs_misc) + dev_vdbg(dev, "PHY pcs_misc-reg not used\n"); + + snprintf(prop_name, sizeof(prop_name), "pipe%d", id); + qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name); + if (IS_ERR(qphy->pipe_clk)) { + return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk), + "failed to get lane%d pipe clock\n", id); + } + + generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_pcie_ops); + if (IS_ERR(generic_phy)) { + ret = PTR_ERR(generic_phy); + dev_err(dev, "failed to create qphy %d\n", ret); + return ret; + } + + qphy->phy = generic_phy; + qphy->index = id; + qphy->qmp = qmp; + qmp->phys[id] = qphy; + phy_set_drvdata(generic_phy, qphy); + + return 0; +} + +static const struct of_device_id qcom_qmp_phy_pcie_of_match_table[] = { + { + .compatible = "qcom,msm8998-qmp-pcie-phy", + .data = &msm8998_pciephy_cfg, + }, { + .compatible = "qcom,ipq8074-qmp-pcie-phy", + .data = &ipq8074_pciephy_cfg, + }, { + .compatible = "qcom,ipq8074-qmp-gen3-pcie-phy", + .data = &ipq8074_pciephy_gen3_cfg, + }, { + .compatible = "qcom,ipq6018-qmp-pcie-phy", + .data = &ipq6018_pciephy_cfg, + }, { + .compatible = "qcom,sc8180x-qmp-pcie-phy", + .data = &sc8180x_pciephy_cfg, + }, { + .compatible = "qcom,sdm845-qhp-pcie-phy", + .data = &sdm845_qhp_pciephy_cfg, + }, { + .compatible = "qcom,sdm845-qmp-pcie-phy", + .data = &sdm845_qmp_pciephy_cfg, + }, { + .compatible = "qcom,sm8250-qmp-gen3x1-pcie-phy", + .data = &sm8250_qmp_gen3x1_pciephy_cfg, + }, { + .compatible = "qcom,sm8250-qmp-gen3x2-pcie-phy", + .data = &sm8250_qmp_gen3x2_pciephy_cfg, + }, { + .compatible = "qcom,sm8250-qmp-modem-pcie-phy", + .data = &sm8250_qmp_gen3x2_pciephy_cfg, + }, { + .compatible = "qcom,sdx55-qmp-pcie-phy", + .data = &sdx55_qmp_pciephy_cfg, + }, { + .compatible = "qcom,sm8450-qmp-gen3x1-pcie-phy", + .data = &sm8450_qmp_gen3x1_pciephy_cfg, + }, { + .compatible = "qcom,sm8450-qmp-gen4x2-pcie-phy", + .data = &sm8450_qmp_gen4x2_pciephy_cfg, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, qcom_qmp_phy_pcie_of_match_table); + +static int qcom_qmp_phy_pcie_probe(struct platform_device *pdev) +{ + struct qcom_qmp *qmp; + struct device *dev = &pdev->dev; + struct device_node *child; + struct phy_provider *phy_provider; + void __iomem *serdes; + const struct qmp_phy_cfg *cfg = NULL; + int num, id; + int ret; + + qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL); + if (!qmp) + return -ENOMEM; + + qmp->dev = dev; + dev_set_drvdata(dev, qmp); + + /* Get the specific init parameters of QMP phy */ + cfg = of_device_get_match_data(dev); + if (!cfg) + return -EINVAL; + + /* per PHY serdes; usually located at base address */ + serdes = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(serdes)) + return PTR_ERR(serdes); + + ret = qcom_qmp_phy_pcie_clk_init(dev, cfg); + if (ret) + return ret; + + ret = qcom_qmp_phy_pcie_reset_init(dev, cfg); + if (ret) + return ret; + + ret = qcom_qmp_phy_pcie_vreg_init(dev, cfg); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to get regulator supplies: %d\n", + ret); + return ret; + } + + num = of_get_available_child_count(dev->of_node); + /* do we have a rogue child node ? */ + if (num > 1) + return -EINVAL; + + qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL); + if (!qmp->phys) + return -ENOMEM; + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + /* + * Prevent runtime pm from being ON by default. Users can enable + * it using power/control in sysfs. + */ + pm_runtime_forbid(dev); + + id = 0; + for_each_available_child_of_node(dev->of_node, child) { + /* Create per-lane phy */ + ret = qcom_qmp_phy_pcie_create(dev, child, id, serdes, cfg); + if (ret) { + dev_err(dev, "failed to create lane%d phy, %d\n", + id, ret); + goto err_node_put; + } + + /* + * Register the pipe clock provided by phy. + * See function description to see details of this pipe clock. + */ + ret = phy_pipe_clk_register(qmp, child); + if (ret) { + dev_err(qmp->dev, + "failed to register pipe clock source\n"); + goto err_node_put; + } + + id++; + } + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (!IS_ERR(phy_provider)) + dev_info(dev, "Registered Qcom-QMP phy\n"); + else + pm_runtime_disable(dev); + + return PTR_ERR_OR_ZERO(phy_provider); + +err_node_put: + pm_runtime_disable(dev); + of_node_put(child); + return ret; +} + +static struct platform_driver qcom_qmp_phy_pcie_driver = { + .probe = qcom_qmp_phy_pcie_probe, + .driver = { + .name = "qcom-qmp-pcie-phy", + .of_match_table = qcom_qmp_phy_pcie_of_match_table, + }, +}; + +module_platform_driver(qcom_qmp_phy_pcie_driver); + +MODULE_AUTHOR("Vivek Gautam "); +MODULE_DESCRIPTION("Qualcomm QMP PCIe PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-misc-v3.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-misc-v3.h new file mode 100644 index 0000000000..a45bd301bc --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-misc-v3.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_MISC_V3_H_ +#define QCOM_PHY_QMP_PCS_MISC_V3_H_ + +/* Only for QMP V3 PHY - PCS_MISC registers */ +#define QPHY_V3_PCS_MISC_CLAMP_ENABLE 0x0c +#define QPHY_V3_PCS_MISC_OSC_DTCT_CONFIG2 0x2c +#define QPHY_V3_PCS_MISC_PCIE_INT_AUX_CLK_CONFIG1 0x44 +#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG2 0x54 +#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4 0x5c +#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5 0x60 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4.h new file mode 100644 index 0000000000..4cc02288d4 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_PCIE_V4_H_ +#define QCOM_PHY_QMP_PCS_PCIE_V4_H_ + +/* Only for QMP V4 PHY - PCS_PCIE registers (same as PCS_MISC?) */ +#define QPHY_V4_PCS_PCIE_INT_AUX_CLK_STATUS 0x00 +#define QPHY_V4_PCS_PCIE_OSC_DTCT_STATUS 0x04 +#define QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG1 0x08 +#define QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2 0x0c +#define QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG3 0x10 +#define QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4 0x14 +#define QPHY_V4_PCS_PCIE_PCS_TX_RX_CONFIG 0x18 +#define QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x1c +#define QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_CNTRL 0x20 +#define QPHY_V4_PCS_PCIE_EPCLK_PRE_PLL_LOCK_DLY_AUXCLK 0x24 +#define QPHY_V4_PCS_PCIE_EPCLK_DLY_COUNT_VAL_L 0x28 +#define QPHY_V4_PCS_PCIE_EPCLK_DLY_COUNT_VAL_H 0x2c +#define QPHY_V4_PCS_PCIE_RX_IDLE_DTCT_CNTRL1 0x30 +#define QPHY_V4_PCS_PCIE_RX_IDLE_DTCT_CNTRL2 0x34 +#define QPHY_V4_PCS_PCIE_SIGDET_CNTRL 0x38 +#define QPHY_V4_PCS_PCIE_SIGDET_LOW_2_IDLE_TIME 0x3c +#define QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L 0x40 +#define QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H 0x44 +#define QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L 0x48 +#define QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H 0x4c +#define QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1 0x50 +#define QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG2 0x54 +#define QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG1 0x58 +#define QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG2 0x5c +#define QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG3 0x60 +#define QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG4 0x64 +#define QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG5 0x68 +#define QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG6 0x6c +#define QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG7 0x70 +#define QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG1 0x74 +#define QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2 0x78 +#define QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG3 0x7c +#define QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4 0x80 +#define QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5 0x84 +#define QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6 0x88 +#define QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG7 0x8c +#define QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS 0x90 +#define QPHY_V4_PCS_PCIE_LOCAL_FS 0x94 +#define QPHY_V4_PCS_PCIE_LOCAL_LF 0x98 +#define QPHY_V4_PCS_PCIE_LOCAL_FS_RS 0x9c +#define QPHY_V4_PCS_PCIE_EQ_CONFIG1 0xa0 +#define QPHY_V4_PCS_PCIE_EQ_CONFIG2 0xa4 +#define QPHY_V4_PCS_PCIE_PRESET_P0_P1_PRE 0xa8 +#define QPHY_V4_PCS_PCIE_PRESET_P2_P3_PRE 0xac +#define QPHY_V4_PCS_PCIE_PRESET_P4_P5_PRE 0xb0 +#define QPHY_V4_PCS_PCIE_PRESET_P6_P7_PRE 0xb4 +#define QPHY_V4_PCS_PCIE_PRESET_P8_P9_PRE 0xb8 +#define QPHY_V4_PCS_PCIE_PRESET_P10_PRE 0xbc +#define QPHY_V4_PCS_PCIE_PRESET_P1_P3_PRE_RS 0xc0 +#define QPHY_V4_PCS_PCIE_PRESET_P4_P5_PRE_RS 0xc4 +#define QPHY_V4_PCS_PCIE_PRESET_P6_P9_PRE_RS 0xc8 +#define QPHY_V4_PCS_PCIE_PRESET_P0_P1_POST 0xcc +#define QPHY_V4_PCS_PCIE_PRESET_P2_P3_POST 0xd0 +#define QPHY_V4_PCS_PCIE_PRESET_P4_P5_POST 0xd4 +#define QPHY_V4_PCS_PCIE_PRESET_P6_P7_POST 0xd8 +#define QPHY_V4_PCS_PCIE_PRESET_P8_P9_POST 0xdc +#define QPHY_V4_PCS_PCIE_PRESET_P10_POST 0xe0 +#define QPHY_V4_PCS_PCIE_PRESET_P1_P3_POST_RS 0xe4 +#define QPHY_V4_PCS_PCIE_PRESET_P4_P5_POST_RS 0xe8 +#define QPHY_V4_PCS_PCIE_PRESET_P6_P9_POST_RS 0xec +#define QPHY_V4_PCS_PCIE_RXEQEVAL_TIME 0xf0 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h new file mode 100644 index 0000000000..af27360299 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_PCIE_V4_20_H_ +#define QCOM_PHY_QMP_PCS_PCIE_V4_20_H_ + +#define QPHY_V4_20_PCS_PCIE_EQ_CONFIG1 0x0a0 +#define QPHY_V4_20_PCS_PCIE_G3_RXEQEVAL_TIME 0x0f0 +#define QPHY_V4_20_PCS_PCIE_G4_RXEQEVAL_TIME 0x0f4 +#define QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG2 0x0fc +#define QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG5 0x108 +#define QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2 0x824 +#define QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2 0x828 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h new file mode 100644 index 0000000000..2e19fb3f05 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h @@ -0,0 +1,16 @@ +/* Only for QMP V5 PHY - PCS_PCIE registers */ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_PCIE_V5_H_ +#define QCOM_PHY_QMP_PCS_PCIE_V5_H_ + +/* Only for QMP V5 PHY - PCS_PCIE registers */ +#define QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x20 +#define QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1 0x54 +#define QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS 0x94 +#define QPHY_V5_PCS_PCIE_EQ_CONFIG2 0xa8 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h new file mode 100644 index 0000000000..1eedf50cf9 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_PCIE_V5_20_H_ +#define QCOM_PHY_QMP_PCS_PCIE_V5_20_H_ + +/* Only for QMP V5_20 PHY - PCIe PCS registers */ +#define QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x01c +#define QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS 0x090 +#define QPHY_V5_20_PCS_PCIE_EQ_CONFIG1 0x0a0 +#define QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5 0x108 +#define QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN 0x15c +#define QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3 0x184 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v3.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v3.h new file mode 100644 index 0000000000..ba1ea29d28 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v3.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_UFS_V3_H_ +#define QCOM_PHY_QMP_PCS_UFS_V3_H_ + +#define QPHY_V3_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x02c +#define QPHY_V3_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x034 +#define QPHY_V3_PCS_UFS_RX_SYM_RESYNC_CTRL 0x134 +#define QPHY_V3_PCS_UFS_RX_MIN_HIBERN8_TIME 0x138 +#define QPHY_V3_PCS_UFS_RX_SIGDET_CTRL1 0x13c +#define QPHY_V3_PCS_UFS_RX_SIGDET_CTRL2 0x140 +#define QPHY_V3_PCS_UFS_TX_MID_TERM_CTRL1 0x1bc +#define QPHY_V3_PCS_UFS_MULTI_LANE_CTRL1 0x1c4 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v4.h new file mode 100644 index 0000000000..a1c7d3d171 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v4.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_UFS_V4_H_ +#define QCOM_PHY_QMP_PCS_UFS_V4_H_ + +/* Only for QMP V4 PHY - UFS PCS registers */ +#define QPHY_V4_PCS_UFS_PHY_START 0x000 +#define QPHY_V4_PCS_UFS_POWER_DOWN_CONTROL 0x004 +#define QPHY_V4_PCS_UFS_SW_RESET 0x008 +#define QPHY_V4_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB 0x00c +#define QPHY_V4_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB 0x010 +#define QPHY_V4_PCS_UFS_PLL_CNTL 0x02c +#define QPHY_V4_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x030 +#define QPHY_V4_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x038 +#define QPHY_V4_PCS_UFS_BIST_FIXED_PAT_CTRL 0x060 +#define QPHY_V4_PCS_UFS_TX_HSGEAR_CAPABILITY 0x074 +#define QPHY_V4_PCS_UFS_RX_HSGEAR_CAPABILITY 0x0b4 +#define QPHY_V4_PCS_UFS_DEBUG_BUS_CLKSEL 0x124 +#define QPHY_V4_PCS_UFS_LINECFG_DISABLE 0x148 +#define QPHY_V4_PCS_UFS_RX_MIN_HIBERN8_TIME 0x150 +#define QPHY_V4_PCS_UFS_RX_SIGDET_CTRL2 0x158 +#define QPHY_V4_PCS_UFS_TX_PWM_GEAR_BAND 0x160 +#define QPHY_V4_PCS_UFS_TX_HS_GEAR_BAND 0x168 +#define QPHY_V4_PCS_UFS_READY_STATUS 0x180 +#define QPHY_V4_PCS_UFS_TX_MID_TERM_CTRL1 0x1d8 +#define QPHY_V4_PCS_UFS_MULTI_LANE_CTRL1 0x1e0 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v5.h new file mode 100644 index 0000000000..bcca23493b --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v5.h @@ -0,0 +1,27 @@ +/* Only for QMP V5 PHY - UFS PCS registers */ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_UFS_V5_H_ +#define QCOM_PHY_QMP_PCS_UFS_V5_H_ + +/* Only for QMP V5 PHY - UFS PCS registers */ +#define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB 0x00c +#define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB 0x010 +#define QPHY_V5_PCS_UFS_PLL_CNTL 0x02c +#define QPHY_V5_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x030 +#define QPHY_V5_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x038 +#define QPHY_V5_PCS_UFS_TX_HSGEAR_CAPABILITY 0x074 +#define QPHY_V5_PCS_UFS_RX_HSGEAR_CAPABILITY 0x0b4 +#define QPHY_V5_PCS_UFS_DEBUG_BUS_CLKSEL 0x124 +#define QPHY_V5_PCS_UFS_RX_MIN_HIBERN8_TIME 0x150 +#define QPHY_V5_PCS_UFS_RX_SIGDET_CTRL1 0x154 +#define QPHY_V5_PCS_UFS_RX_SIGDET_CTRL2 0x158 +#define QPHY_V5_PCS_UFS_TX_PWM_GEAR_BAND 0x160 +#define QPHY_V5_PCS_UFS_TX_HS_GEAR_BAND 0x168 +#define QPHY_V5_PCS_UFS_TX_MID_TERM_CTRL1 0x1d8 +#define QPHY_V5_PCS_UFS_MULTI_LANE_CTRL1 0x1e0 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v4.h new file mode 100644 index 0000000000..d7fd4ac0fc --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v4.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_USB_V4_H_ +#define QCOM_PHY_QMP_PCS_USB_V4_H_ + +/* Only for QMP V4 PHY - USB3 PCS registers */ +#define QPHY_V4_PCS_USB3_POWER_STATE_CONFIG1 0x000 +#define QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_STATUS 0x004 +#define QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL 0x008 +#define QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL2 0x00c +#define QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x010 +#define QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR 0x014 +#define QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x018 +#define QPHY_V4_PCS_USB3_LFPS_TX_ECSTART 0x01c +#define QPHY_V4_PCS_USB3_LFPS_PER_TIMER_VAL 0x020 +#define QPHY_V4_PCS_USB3_LFPS_TX_END_CNT_U3_START 0x024 +#define QPHY_V4_PCS_USB3_RXEQTRAINING_LOCK_TIME 0x028 +#define QPHY_V4_PCS_USB3_RXEQTRAINING_WAIT_TIME 0x02c +#define QPHY_V4_PCS_USB3_RXEQTRAINING_CTLE_TIME 0x030 +#define QPHY_V4_PCS_USB3_RXEQTRAINING_WAIT_TIME_S2 0x034 +#define QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x038 +#define QPHY_V4_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x03c +#define QPHY_V4_PCS_USB3_RCVR_DTCT_DLY_U3_H 0x040 +#define QPHY_V4_PCS_USB3_ARCVR_DTCT_EN_PERIOD 0x044 +#define QPHY_V4_PCS_USB3_ARCVR_DTCT_CM_DLY 0x048 +#define QPHY_V4_PCS_USB3_TXONESZEROS_RUN_LENGTH 0x04c +#define QPHY_V4_PCS_USB3_ALFPS_DEGLITCH_VAL 0x050 +#define QPHY_V4_PCS_USB3_SIGDET_STARTUP_TIMER_VAL 0x054 +#define QPHY_V4_PCS_USB3_TEST_CONTROL 0x058 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v5.h new file mode 100644 index 0000000000..73de626223 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v5.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_USB_V5_H_ +#define QCOM_PHY_QMP_PCS_USB_V5_H_ + +/* Only for QMP V5 PHY - USB3 have different offsets than V4 */ +#define QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1 0x000 +#define QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_STATUS 0x004 +#define QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL 0x008 +#define QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL2 0x00c +#define QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x010 +#define QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR 0x014 +#define QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x018 +#define QPHY_V5_PCS_USB3_LFPS_TX_ECSTART 0x01c +#define QPHY_V5_PCS_USB3_LFPS_PER_TIMER_VAL 0x020 +#define QPHY_V5_PCS_USB3_LFPS_TX_END_CNT_U3_START 0x024 +#define QPHY_V5_PCS_USB3_LFPS_CONFIG1 0x028 +#define QPHY_V5_PCS_USB3_RXEQTRAINING_LOCK_TIME 0x02c +#define QPHY_V5_PCS_USB3_RXEQTRAINING_WAIT_TIME 0x030 +#define QPHY_V5_PCS_USB3_RXEQTRAINING_CTLE_TIME 0x034 +#define QPHY_V5_PCS_USB3_RXEQTRAINING_WAIT_TIME_S2 0x038 +#define QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x03c +#define QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x040 +#define QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_H 0x044 +#define QPHY_V5_PCS_USB3_ARCVR_DTCT_EN_PERIOD 0x048 +#define QPHY_V5_PCS_USB3_ARCVR_DTCT_CM_DLY 0x04c +#define QPHY_V5_PCS_USB3_TXONESZEROS_RUN_LENGTH 0x050 +#define QPHY_V5_PCS_USB3_ALFPS_DEGLITCH_VAL 0x054 +#define QPHY_V5_PCS_USB3_SIGDET_STARTUP_TIMER_VAL 0x058 +#define QPHY_V5_PCS_USB3_TEST_CONTROL 0x05c +#define QPHY_V5_PCS_USB3_RXTERMINATION_DLY_SEL 0x060 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h new file mode 100644 index 0000000000..c8515f5068 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_V2_H_ +#define QCOM_PHY_QMP_PCS_V2_H_ + +/* Only for QMP V2 PHY - PCS registers */ +#define QPHY_V2_PCS_POWER_DOWN_CONTROL 0x004 +#define QPHY_V2_PCS_TXDEEMPH_M6DB_V0 0x024 +#define QPHY_V2_PCS_TXDEEMPH_M3P5DB_V0 0x028 +#define QPHY_V2_PCS_TX_LARGE_AMP_DRV_LVL 0x034 +#define QPHY_V2_PCS_TX_LARGE_AMP_POST_EMP_LVL 0x038 +#define QPHY_V2_PCS_TX_SMALL_AMP_DRV_LVL 0x03c +#define QPHY_V2_PCS_TX_SMALL_AMP_POST_EMP_LVL 0x040 +#define QPHY_V2_PCS_ENDPOINT_REFCLK_DRIVE 0x054 +#define QPHY_V2_PCS_RX_IDLE_DTCT_CNTRL 0x058 +#define QPHY_V2_PCS_POWER_STATE_CONFIG1 0x060 +#define QPHY_V2_PCS_POWER_STATE_CONFIG2 0x064 +#define QPHY_V2_PCS_POWER_STATE_CONFIG4 0x06c +#define QPHY_V2_PCS_LOCK_DETECT_CONFIG1 0x080 +#define QPHY_V2_PCS_LOCK_DETECT_CONFIG2 0x084 +#define QPHY_V2_PCS_LOCK_DETECT_CONFIG3 0x088 +#define QPHY_V2_PCS_PWRUP_RESET_DLY_TIME_AUXCLK 0x0a0 +#define QPHY_V2_PCS_LP_WAKEUP_DLY_TIME_AUXCLK 0x0a4 +#define QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME 0x0a8 +#define QPHY_V2_PCS_FLL_CNTRL1 0x0c0 +#define QPHY_V2_PCS_FLL_CNTRL2 0x0c4 +#define QPHY_V2_PCS_FLL_CNT_VAL_L 0x0c8 +#define QPHY_V2_PCS_FLL_CNT_VAL_H_TOL 0x0cc +#define QPHY_V2_PCS_FLL_MAN_CODE 0x0d0 + +/* UFS only ? */ +#define QPHY_V2_PCS_RX_MIN_STALL_NOCONFIG_TIME_CAP 0x0cc +#define QPHY_V2_PCS_RX_SYM_RESYNC_CTRL 0x13c +#define QPHY_V2_PCS_RX_MIN_HIBERN8_TIME 0x140 +#define QPHY_V2_PCS_RX_SIGDET_CTRL2 0x148 +#define QPHY_V2_PCS_RX_PWM_GEAR_BAND 0x154 +#define QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB 0x1a8 +#define QPHY_V2_PCS_OSC_DTCT_ACTIONS 0x1ac +#define QPHY_V2_PCS_RX_SIGDET_LVL 0x1d8 +#define QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB 0x1dc +#define QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1e0 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v3.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v3.h new file mode 100644 index 0000000000..10dbbb0062 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v3.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_V3_H_ +#define QCOM_PHY_QMP_PCS_V3_H_ + +/* Only for QMP V3 PHY - PCS registers */ +#define QPHY_V3_PCS_SW_RESET 0x000 +#define QPHY_V3_PCS_POWER_DOWN_CONTROL 0x004 +#define QPHY_V3_PCS_START_CONTROL 0x008 +#define QPHY_V3_PCS_TXMGN_V0 0x00c +#define QPHY_V3_PCS_TXMGN_V1 0x010 +#define QPHY_V3_PCS_TXMGN_V2 0x014 +#define QPHY_V3_PCS_TXMGN_V3 0x018 +#define QPHY_V3_PCS_TXMGN_V4 0x01c +#define QPHY_V3_PCS_TXMGN_LS 0x020 +#define QPHY_V3_PCS_TXDEEMPH_M6DB_V0 0x024 +#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0 0x028 +#define QPHY_V3_PCS_TXDEEMPH_M6DB_V1 0x02c +#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1 0x030 +#define QPHY_V3_PCS_TXDEEMPH_M6DB_V2 0x034 +#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2 0x038 +#define QPHY_V3_PCS_TXDEEMPH_M6DB_V3 0x03c +#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3 0x040 +#define QPHY_V3_PCS_TXDEEMPH_M6DB_V4 0x044 +#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4 0x048 +#define QPHY_V3_PCS_TXDEEMPH_M6DB_LS 0x04c +#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS 0x050 +#define QPHY_V3_PCS_ENDPOINT_REFCLK_DRIVE 0x054 +#define QPHY_V3_PCS_RX_IDLE_DTCT_CNTRL 0x058 +#define QPHY_V3_PCS_RATE_SLEW_CNTRL 0x05c +#define QPHY_V3_PCS_POWER_STATE_CONFIG1 0x060 +#define QPHY_V3_PCS_POWER_STATE_CONFIG2 0x064 +#define QPHY_V3_PCS_POWER_STATE_CONFIG3 0x068 +#define QPHY_V3_PCS_POWER_STATE_CONFIG4 0x06c +#define QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L 0x070 +#define QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H 0x074 +#define QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L 0x078 +#define QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H 0x07c +#define QPHY_V3_PCS_LOCK_DETECT_CONFIG1 0x080 +#define QPHY_V3_PCS_LOCK_DETECT_CONFIG2 0x084 +#define QPHY_V3_PCS_LOCK_DETECT_CONFIG3 0x088 +#define QPHY_V3_PCS_TSYNC_RSYNC_TIME 0x08c +#define QPHY_V3_PCS_SIGDET_LOW_2_IDLE_TIME 0x090 +#define QPHY_V3_PCS_BEACON_2_IDLE_TIME_L 0x094 +#define QPHY_V3_PCS_BEACON_2_IDLE_TIME_H 0x098 +#define QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_SYSCLK 0x09c +#define QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK 0x0a0 +#define QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK 0x0a4 +#define QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME 0x0a8 +#define QPHY_V3_PCS_LFPS_DET_HIGH_COUNT_VAL 0x0ac +#define QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK 0x0b0 +#define QPHY_V3_PCS_LFPS_TX_END_CNT_P2U3_START 0x0b4 +#define QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME 0x0b8 +#define QPHY_V3_PCS_RXEQTRAINING_RUN_TIME 0x0bc +#define QPHY_V3_PCS_TXONESZEROS_RUN_LENGTH 0x0c0 +#define QPHY_V3_PCS_FLL_CNTRL1 0x0c4 +#define QPHY_V3_PCS_FLL_CNTRL2 0x0c8 +#define QPHY_V3_PCS_FLL_CNT_VAL_L 0x0cc +#define QPHY_V3_PCS_FLL_CNT_VAL_H_TOL 0x0d0 +#define QPHY_V3_PCS_FLL_MAN_CODE 0x0d4 +#define QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL 0x0d8 +#define QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR 0x0dc +#define QPHY_V3_PCS_ARCVR_DTCT_EN_PERIOD 0x0e0 +#define QPHY_V3_PCS_ARCVR_DTCT_CM_DLY 0x0e4 +#define QPHY_V3_PCS_ALFPS_DEGLITCH_VAL 0x0e8 +#define QPHY_V3_PCS_INSIG_SW_CTRL1 0x0ec +#define QPHY_V3_PCS_INSIG_SW_CTRL2 0x0f0 +#define QPHY_V3_PCS_INSIG_SW_CTRL3 0x0f4 +#define QPHY_V3_PCS_INSIG_MX_CTRL1 0x0f8 +#define QPHY_V3_PCS_INSIG_MX_CTRL2 0x0fc +#define QPHY_V3_PCS_INSIG_MX_CTRL3 0x100 +#define QPHY_V3_PCS_OUTSIG_SW_CTRL1 0x104 +#define QPHY_V3_PCS_OUTSIG_MX_CTRL1 0x108 +#define QPHY_V3_PCS_CLK_DEBUG_BYPASS_CTRL 0x10c +#define QPHY_V3_PCS_TEST_CONTROL 0x110 +#define QPHY_V3_PCS_TEST_CONTROL2 0x114 +#define QPHY_V3_PCS_TEST_CONTROL3 0x118 +#define QPHY_V3_PCS_TEST_CONTROL4 0x11c +#define QPHY_V3_PCS_TEST_CONTROL5 0x120 +#define QPHY_V3_PCS_TEST_CONTROL6 0x124 +#define QPHY_V3_PCS_TEST_CONTROL7 0x128 +#define QPHY_V3_PCS_COM_RESET_CONTROL 0x12c +#define QPHY_V3_PCS_BIST_CTRL 0x130 +#define QPHY_V3_PCS_PRBS_POLY0 0x134 +#define QPHY_V3_PCS_PRBS_POLY1 0x138 +#define QPHY_V3_PCS_PRBS_SEED0 0x13c +#define QPHY_V3_PCS_PRBS_SEED1 0x140 +#define QPHY_V3_PCS_FIXED_PAT_CTRL 0x144 +#define QPHY_V3_PCS_FIXED_PAT0 0x148 +#define QPHY_V3_PCS_FIXED_PAT1 0x14c +#define QPHY_V3_PCS_FIXED_PAT2 0x150 +#define QPHY_V3_PCS_FIXED_PAT3 0x154 +#define QPHY_V3_PCS_COM_CLK_SWITCH_CTRL 0x158 +#define QPHY_V3_PCS_ELECIDLE_DLY_SEL 0x15c +#define QPHY_V3_PCS_SPARE1 0x160 +#define QPHY_V3_PCS_BIST_CHK_ERR_CNT_L_STATUS 0x164 +#define QPHY_V3_PCS_BIST_CHK_ERR_CNT_H_STATUS 0x168 +#define QPHY_V3_PCS_BIST_CHK_STATUS 0x16c +#define QPHY_V3_PCS_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x170 +#define QPHY_V3_PCS_PCS_STATUS 0x174 +#define QPHY_V3_PCS_PCS_STATUS2 0x178 +#define QPHY_V3_PCS_PCS_STATUS3 0x17c +#define QPHY_V3_PCS_COM_RESET_STATUS 0x180 +#define QPHY_V3_PCS_OSC_DTCT_STATUS 0x184 +#define QPHY_V3_PCS_REVISION_ID0 0x188 +#define QPHY_V3_PCS_REVISION_ID1 0x18c +#define QPHY_V3_PCS_REVISION_ID2 0x190 +#define QPHY_V3_PCS_REVISION_ID3 0x194 +#define QPHY_V3_PCS_DEBUG_BUS_0_STATUS 0x198 +#define QPHY_V3_PCS_DEBUG_BUS_1_STATUS 0x19c +#define QPHY_V3_PCS_DEBUG_BUS_2_STATUS 0x1a0 +#define QPHY_V3_PCS_DEBUG_BUS_3_STATUS 0x1a4 +#define QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1a8 +#define QPHY_V3_PCS_OSC_DTCT_ACTIONS 0x1ac +#define QPHY_V3_PCS_SIGDET_CNTRL 0x1b0 +#define QPHY_V3_PCS_IDAC_CAL_CNTRL 0x1b4 +#define QPHY_V3_PCS_CMN_ACK_OUT_SEL 0x1b8 +#define QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME_SYSCLK 0x1bc +#define QPHY_V3_PCS_AUTONOMOUS_MODE_STATUS 0x1c0 +#define QPHY_V3_PCS_ENDPOINT_REFCLK_CNTRL 0x1c4 +#define QPHY_V3_PCS_EPCLK_PRE_PLL_LOCK_DLY_SYSCLK 0x1c8 +#define QPHY_V3_PCS_EPCLK_PRE_PLL_LOCK_DLY_AUXCLK 0x1cc +#define QPHY_V3_PCS_EPCLK_DLY_COUNT_VAL_L 0x1d0 +#define QPHY_V3_PCS_EPCLK_DLY_COUNT_VAL_H 0x1d4 +#define QPHY_V3_PCS_RX_SIGDET_LVL 0x1d8 +#define QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB 0x1dc +#define QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1e0 +#define QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL2 0x1e4 +#define QPHY_V3_PCS_RXTERMINATION_DLY_SEL 0x1e8 +#define QPHY_V3_PCS_LFPS_PER_TIMER_VAL 0x1ec +#define QPHY_V3_PCS_SIGDET_STARTUP_TIMER_VAL 0x1f0 +#define QPHY_V3_PCS_LOCK_DETECT_CONFIG4 0x1f4 +#define QPHY_V3_PCS_RX_SIGDET_DTCT_CNTRL 0x1f8 +#define QPHY_V3_PCS_PCS_STATUS4 0x1fc +#define QPHY_V3_PCS_PCS_STATUS4_CLEAR 0x200 +#define QPHY_V3_PCS_DEC_ERROR_COUNT_STATUS 0x204 +#define QPHY_V3_PCS_COMMA_POS_STATUS 0x208 +#define QPHY_V3_PCS_REFGEN_REQ_CONFIG1 0x20c +#define QPHY_V3_PCS_REFGEN_REQ_CONFIG2 0x210 +#define QPHY_V3_PCS_REFGEN_REQ_CONFIG3 0x214 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v4.h new file mode 100644 index 0000000000..a2c1eba2b6 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v4.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_V4_H_ +#define QCOM_PHY_QMP_PCS_V4_H_ + +/* Only for QMP V4 PHY - USB/PCIe PCS registers */ +#define QPHY_V4_PCS_SW_RESET 0x000 +#define QPHY_V4_PCS_REVISION_ID0 0x004 +#define QPHY_V4_PCS_REVISION_ID1 0x008 +#define QPHY_V4_PCS_REVISION_ID2 0x00c +#define QPHY_V4_PCS_REVISION_ID3 0x010 +#define QPHY_V4_PCS_PCS_STATUS1 0x014 +#define QPHY_V4_PCS_PCS_STATUS2 0x018 +#define QPHY_V4_PCS_PCS_STATUS3 0x01c +#define QPHY_V4_PCS_PCS_STATUS4 0x020 +#define QPHY_V4_PCS_PCS_STATUS5 0x024 +#define QPHY_V4_PCS_PCS_STATUS6 0x028 +#define QPHY_V4_PCS_PCS_STATUS7 0x02c +#define QPHY_V4_PCS_DEBUG_BUS_0_STATUS 0x030 +#define QPHY_V4_PCS_DEBUG_BUS_1_STATUS 0x034 +#define QPHY_V4_PCS_DEBUG_BUS_2_STATUS 0x038 +#define QPHY_V4_PCS_DEBUG_BUS_3_STATUS 0x03c +#define QPHY_V4_PCS_POWER_DOWN_CONTROL 0x040 +#define QPHY_V4_PCS_START_CONTROL 0x044 +#define QPHY_V4_PCS_INSIG_SW_CTRL1 0x048 +#define QPHY_V4_PCS_INSIG_SW_CTRL2 0x04c +#define QPHY_V4_PCS_INSIG_SW_CTRL3 0x050 +#define QPHY_V4_PCS_INSIG_SW_CTRL4 0x054 +#define QPHY_V4_PCS_INSIG_SW_CTRL5 0x058 +#define QPHY_V4_PCS_INSIG_SW_CTRL6 0x05c +#define QPHY_V4_PCS_INSIG_SW_CTRL7 0x060 +#define QPHY_V4_PCS_INSIG_SW_CTRL8 0x064 +#define QPHY_V4_PCS_INSIG_MX_CTRL1 0x068 +#define QPHY_V4_PCS_INSIG_MX_CTRL2 0x06c +#define QPHY_V4_PCS_INSIG_MX_CTRL3 0x070 +#define QPHY_V4_PCS_INSIG_MX_CTRL4 0x074 +#define QPHY_V4_PCS_INSIG_MX_CTRL5 0x078 +#define QPHY_V4_PCS_INSIG_MX_CTRL7 0x07c +#define QPHY_V4_PCS_INSIG_MX_CTRL8 0x080 +#define QPHY_V4_PCS_OUTSIG_SW_CTRL1 0x084 +#define QPHY_V4_PCS_OUTSIG_MX_CTRL1 0x088 +#define QPHY_V4_PCS_CLAMP_ENABLE 0x08c +#define QPHY_V4_PCS_POWER_STATE_CONFIG1 0x090 +#define QPHY_V4_PCS_POWER_STATE_CONFIG2 0x094 +#define QPHY_V4_PCS_FLL_CNTRL1 0x098 +#define QPHY_V4_PCS_FLL_CNTRL2 0x09c +#define QPHY_V4_PCS_FLL_CNT_VAL_L 0x0a0 +#define QPHY_V4_PCS_FLL_CNT_VAL_H_TOL 0x0a4 +#define QPHY_V4_PCS_FLL_MAN_CODE 0x0a8 +#define QPHY_V4_PCS_TEST_CONTROL1 0x0ac +#define QPHY_V4_PCS_TEST_CONTROL2 0x0b0 +#define QPHY_V4_PCS_TEST_CONTROL3 0x0b4 +#define QPHY_V4_PCS_TEST_CONTROL4 0x0b8 +#define QPHY_V4_PCS_TEST_CONTROL5 0x0bc +#define QPHY_V4_PCS_TEST_CONTROL6 0x0c0 +#define QPHY_V4_PCS_LOCK_DETECT_CONFIG1 0x0c4 +#define QPHY_V4_PCS_LOCK_DETECT_CONFIG2 0x0c8 +#define QPHY_V4_PCS_LOCK_DETECT_CONFIG3 0x0cc +#define QPHY_V4_PCS_LOCK_DETECT_CONFIG4 0x0d0 +#define QPHY_V4_PCS_LOCK_DETECT_CONFIG5 0x0d4 +#define QPHY_V4_PCS_LOCK_DETECT_CONFIG6 0x0d8 +#define QPHY_V4_PCS_REFGEN_REQ_CONFIG1 0x0dc +#define QPHY_V4_PCS_REFGEN_REQ_CONFIG2 0x0e0 +#define QPHY_V4_PCS_REFGEN_REQ_CONFIG3 0x0e4 +#define QPHY_V4_PCS_BIST_CTRL 0x0e8 +#define QPHY_V4_PCS_PRBS_POLY0 0x0ec +#define QPHY_V4_PCS_PRBS_POLY1 0x0f0 +#define QPHY_V4_PCS_FIXED_PAT0 0x0f4 +#define QPHY_V4_PCS_FIXED_PAT1 0x0f8 +#define QPHY_V4_PCS_FIXED_PAT2 0x0fc +#define QPHY_V4_PCS_FIXED_PAT3 0x100 +#define QPHY_V4_PCS_FIXED_PAT4 0x104 +#define QPHY_V4_PCS_FIXED_PAT5 0x108 +#define QPHY_V4_PCS_FIXED_PAT6 0x10c +#define QPHY_V4_PCS_FIXED_PAT7 0x110 +#define QPHY_V4_PCS_FIXED_PAT8 0x114 +#define QPHY_V4_PCS_FIXED_PAT9 0x118 +#define QPHY_V4_PCS_FIXED_PAT10 0x11c +#define QPHY_V4_PCS_FIXED_PAT11 0x120 +#define QPHY_V4_PCS_FIXED_PAT12 0x124 +#define QPHY_V4_PCS_FIXED_PAT13 0x128 +#define QPHY_V4_PCS_FIXED_PAT14 0x12c +#define QPHY_V4_PCS_FIXED_PAT15 0x130 +#define QPHY_V4_PCS_TXMGN_CONFIG 0x134 +#define QPHY_V4_PCS_G12S1_TXMGN_V0 0x138 +#define QPHY_V4_PCS_G12S1_TXMGN_V1 0x13c +#define QPHY_V4_PCS_G12S1_TXMGN_V2 0x140 +#define QPHY_V4_PCS_G12S1_TXMGN_V3 0x144 +#define QPHY_V4_PCS_G12S1_TXMGN_V4 0x148 +#define QPHY_V4_PCS_G12S1_TXMGN_V0_RS 0x14c +#define QPHY_V4_PCS_G12S1_TXMGN_V1_RS 0x150 +#define QPHY_V4_PCS_G12S1_TXMGN_V2_RS 0x154 +#define QPHY_V4_PCS_G12S1_TXMGN_V3_RS 0x158 +#define QPHY_V4_PCS_G12S1_TXMGN_V4_RS 0x15c +#define QPHY_V4_PCS_G3S2_TXMGN_MAIN 0x160 +#define QPHY_V4_PCS_G3S2_TXMGN_MAIN_RS 0x164 +#define QPHY_V4_PCS_G12S1_TXDEEMPH_M6DB 0x168 +#define QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB 0x16c +#define QPHY_V4_PCS_G3S2_PRE_GAIN 0x170 +#define QPHY_V4_PCS_G3S2_POST_GAIN 0x174 +#define QPHY_V4_PCS_G3S2_PRE_POST_OFFSET 0x178 +#define QPHY_V4_PCS_G3S2_PRE_GAIN_RS 0x17c +#define QPHY_V4_PCS_G3S2_POST_GAIN_RS 0x180 +#define QPHY_V4_PCS_G3S2_PRE_POST_OFFSET_RS 0x184 +#define QPHY_V4_PCS_RX_SIGDET_LVL 0x188 +#define QPHY_V4_PCS_RX_SIGDET_DTCT_CNTRL 0x18c +#define QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L 0x190 +#define QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H 0x194 +#define QPHY_V4_PCS_RATE_SLEW_CNTRL1 0x198 +#define QPHY_V4_PCS_RATE_SLEW_CNTRL2 0x19c +#define QPHY_V4_PCS_PWRUP_RESET_DLY_TIME_AUXCLK 0x1a0 +#define QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L 0x1a4 +#define QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H 0x1a8 +#define QPHY_V4_PCS_TSYNC_RSYNC_TIME 0x1ac +#define QPHY_V4_PCS_CDR_RESET_TIME 0x1b0 +#define QPHY_V4_PCS_TSYNC_DLY_TIME 0x1b4 +#define QPHY_V4_PCS_ELECIDLE_DLY_SEL 0x1b8 +#define QPHY_V4_PCS_CMN_ACK_OUT_SEL 0x1bc +#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG1 0x1c0 +#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG2 0x1c4 +#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG3 0x1c8 +#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG4 0x1cc +#define QPHY_V4_PCS_PCS_TX_RX_CONFIG 0x1d0 +#define QPHY_V4_PCS_RX_IDLE_DTCT_CNTRL 0x1d4 +#define QPHY_V4_PCS_RX_DCC_CAL_CONFIG 0x1d8 +#define QPHY_V4_PCS_EQ_CONFIG1 0x1dc +#define QPHY_V4_PCS_EQ_CONFIG2 0x1e0 +#define QPHY_V4_PCS_EQ_CONFIG3 0x1e4 +#define QPHY_V4_PCS_EQ_CONFIG4 0x1e8 +#define QPHY_V4_PCS_EQ_CONFIG5 0x1ec + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v4_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v4_20.h new file mode 100644 index 0000000000..08c3dd1154 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v4_20.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_V4_20_H_ +#define QCOM_PHY_QMP_PCS_V4_20_H_ + +/* Only for QMP V4_20 PHY - USB/PCIe PCS registers */ +#define QPHY_V4_20_PCS_RX_SIGDET_LVL 0x188 +#define QPHY_V4_20_PCS_EQ_CONFIG2 0x1d8 +#define QPHY_V4_20_PCS_EQ_CONFIG4 0x1e0 +#define QPHY_V4_20_PCS_EQ_CONFIG5 0x1e4 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h new file mode 100644 index 0000000000..61a44519f9 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_V5_H_ +#define QCOM_PHY_QMP_PCS_V5_H_ + +/* Only for QMP V5 PHY - USB/PCIe PCS registers */ +#define QPHY_V5_PCS_REFGEN_REQ_CONFIG1 0x0dc +#define QPHY_V5_PCS_G3S2_PRE_GAIN 0x170 +#define QPHY_V5_PCS_RX_SIGDET_LVL 0x188 +#define QPHY_V5_PCS_RATE_SLEW_CNTRL1 0x198 +#define QPHY_V5_PCS_EQ_CONFIG2 0x1e0 +#define QPHY_V5_PCS_EQ_CONFIG3 0x1e4 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v3.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v3.h new file mode 100644 index 0000000000..c0bd54e0e7 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v3.h @@ -0,0 +1,111 @@ + +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_QSERDES_COM_V3_H_ +#define QCOM_PHY_QMP_QSERDES_COM_V3_H_ + +/* Only for QMP V3 PHY - QSERDES COM registers */ +#define QSERDES_V3_COM_ATB_SEL1 0x000 +#define QSERDES_V3_COM_ATB_SEL2 0x004 +#define QSERDES_V3_COM_FREQ_UPDATE 0x008 +#define QSERDES_V3_COM_BG_TIMER 0x00c +#define QSERDES_V3_COM_SSC_EN_CENTER 0x010 +#define QSERDES_V3_COM_SSC_ADJ_PER1 0x014 +#define QSERDES_V3_COM_SSC_ADJ_PER2 0x018 +#define QSERDES_V3_COM_SSC_PER1 0x01c +#define QSERDES_V3_COM_SSC_PER2 0x020 +#define QSERDES_V3_COM_SSC_STEP_SIZE1 0x024 +#define QSERDES_V3_COM_SSC_STEP_SIZE2 0x028 +#define QSERDES_V3_COM_POST_DIV 0x02c +#define QSERDES_V3_COM_POST_DIV_MUX 0x030 +#define QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN 0x034 +#define QSERDES_V3_COM_CLK_ENABLE1 0x038 +#define QSERDES_V3_COM_SYS_CLK_CTRL 0x03c +#define QSERDES_V3_COM_SYSCLK_BUF_ENABLE 0x040 +#define QSERDES_V3_COM_PLL_EN 0x044 +#define QSERDES_V3_COM_PLL_IVCO 0x048 +#define QSERDES_V3_COM_CMN_IETRIM 0x04c +#define QSERDES_V3_COM_CMN_IPTRIM 0x050 +#define QSERDES_V3_COM_EP_CLOCK_DETECT_CTR 0x054 +#define QSERDES_V3_COM_SYSCLK_DET_COMP_STATUS 0x058 +#define QSERDES_V3_COM_CLK_EP_DIV 0x05c +#define QSERDES_V3_COM_CP_CTRL_MODE0 0x060 +#define QSERDES_V3_COM_CP_CTRL_MODE1 0x064 +#define QSERDES_V3_COM_PLL_RCTRL_MODE0 0x068 +#define QSERDES_V3_COM_PLL_RCTRL_MODE1 0x06c +#define QSERDES_V3_COM_PLL_CCTRL_MODE0 0x070 +#define QSERDES_V3_COM_PLL_CCTRL_MODE1 0x074 +#define QSERDES_V3_COM_PLL_CNTRL 0x078 +#define QSERDES_V3_COM_BIAS_EN_CTRL_BY_PSM 0x07c +#define QSERDES_V3_COM_SYSCLK_EN_SEL 0x080 +#define QSERDES_V3_COM_CML_SYSCLK_SEL 0x084 +#define QSERDES_V3_COM_RESETSM_CNTRL 0x088 +#define QSERDES_V3_COM_RESETSM_CNTRL2 0x08c +#define QSERDES_V3_COM_LOCK_CMP_EN 0x090 +#define QSERDES_V3_COM_LOCK_CMP_CFG 0x094 +#define QSERDES_V3_COM_LOCK_CMP1_MODE0 0x098 +#define QSERDES_V3_COM_LOCK_CMP2_MODE0 0x09c +#define QSERDES_V3_COM_LOCK_CMP3_MODE0 0x0a0 +#define QSERDES_V3_COM_LOCK_CMP1_MODE1 0x0a4 +#define QSERDES_V3_COM_LOCK_CMP2_MODE1 0x0a8 +#define QSERDES_V3_COM_LOCK_CMP3_MODE1 0x0ac +#define QSERDES_V3_COM_DEC_START_MODE0 0x0b0 +#define QSERDES_V3_COM_DEC_START_MODE1 0x0b4 +#define QSERDES_V3_COM_DIV_FRAC_START1_MODE0 0x0b8 +#define QSERDES_V3_COM_DIV_FRAC_START2_MODE0 0x0bc +#define QSERDES_V3_COM_DIV_FRAC_START3_MODE0 0x0c0 +#define QSERDES_V3_COM_DIV_FRAC_START1_MODE1 0x0c4 +#define QSERDES_V3_COM_DIV_FRAC_START2_MODE1 0x0c8 +#define QSERDES_V3_COM_DIV_FRAC_START3_MODE1 0x0cc +#define QSERDES_V3_COM_INTEGLOOP_INITVAL 0x0d0 +#define QSERDES_V3_COM_INTEGLOOP_EN 0x0d4 +#define QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0 0x0d8 +#define QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0 0x0dc +#define QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE1 0x0e0 +#define QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE1 0x0e4 +#define QSERDES_V3_COM_VCOCAL_DEADMAN_CTRL 0x0e8 +#define QSERDES_V3_COM_VCO_TUNE_CTRL 0x0ec +#define QSERDES_V3_COM_VCO_TUNE_MAP 0x0f0 +#define QSERDES_V3_COM_VCO_TUNE1_MODE0 0x0f4 +#define QSERDES_V3_COM_VCO_TUNE2_MODE0 0x0f8 +#define QSERDES_V3_COM_VCO_TUNE1_MODE1 0x0fc +#define QSERDES_V3_COM_VCO_TUNE2_MODE1 0x100 +#define QSERDES_V3_COM_VCO_TUNE_INITVAL1 0x104 +#define QSERDES_V3_COM_VCO_TUNE_INITVAL2 0x108 +#define QSERDES_V3_COM_VCO_TUNE_MINVAL1 0x10c +#define QSERDES_V3_COM_VCO_TUNE_MINVAL2 0x110 +#define QSERDES_V3_COM_VCO_TUNE_MAXVAL1 0x114 +#define QSERDES_V3_COM_VCO_TUNE_MAXVAL2 0x118 +#define QSERDES_V3_COM_VCO_TUNE_TIMER1 0x11c +#define QSERDES_V3_COM_VCO_TUNE_TIMER2 0x120 +#define QSERDES_V3_COM_CMN_STATUS 0x124 +#define QSERDES_V3_COM_RESET_SM_STATUS 0x128 +#define QSERDES_V3_COM_RESTRIM_CODE_STATUS 0x12c +#define QSERDES_V3_COM_PLLCAL_CODE1_STATUS 0x130 +#define QSERDES_V3_COM_PLLCAL_CODE2_STATUS 0x134 +#define QSERDES_V3_COM_CLK_SELECT 0x138 +#define QSERDES_V3_COM_HSCLK_SEL 0x13c +#define QSERDES_V3_COM_INTEGLOOP_BINCODE_STATUS 0x140 +#define QSERDES_V3_COM_PLL_ANALOG 0x144 +#define QSERDES_V3_COM_CORECLK_DIV_MODE0 0x148 +#define QSERDES_V3_COM_CORECLK_DIV_MODE1 0x14c +#define QSERDES_V3_COM_SW_RESET 0x150 +#define QSERDES_V3_COM_CORE_CLK_EN 0x154 +#define QSERDES_V3_COM_C_READY_STATUS 0x158 +#define QSERDES_V3_COM_CMN_CONFIG 0x15c +#define QSERDES_V3_COM_CMN_RATE_OVERRIDE 0x160 +#define QSERDES_V3_COM_SVS_MODE_CLK_SEL 0x164 +#define QSERDES_V3_COM_DEBUG_BUS0 0x168 +#define QSERDES_V3_COM_DEBUG_BUS1 0x16c +#define QSERDES_V3_COM_DEBUG_BUS2 0x170 +#define QSERDES_V3_COM_DEBUG_BUS3 0x174 +#define QSERDES_V3_COM_DEBUG_BUS_SEL 0x178 +#define QSERDES_V3_COM_CMN_MISC1 0x17c +#define QSERDES_V3_COM_CMN_MISC2 0x180 +#define QSERDES_V3_COM_CMN_MODE 0x184 +#define QSERDES_V3_COM_CMN_VREG_SEL 0x188 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v4.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v4.h new file mode 100644 index 0000000000..b0e3298d99 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v4.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_QSERDES_COM_V4_H_ +#define QCOM_PHY_QMP_QSERDES_COM_V4_H_ + +/* Only for QMP V4 PHY - QSERDES COM registers */ +#define QSERDES_V4_COM_ATB_SEL1 0x000 +#define QSERDES_V4_COM_ATB_SEL2 0x004 +#define QSERDES_V4_COM_FREQ_UPDATE 0x008 +#define QSERDES_V4_COM_BG_TIMER 0x00c +#define QSERDES_V4_COM_SSC_EN_CENTER 0x010 +#define QSERDES_V4_COM_SSC_ADJ_PER1 0x014 +#define QSERDES_V4_COM_SSC_ADJ_PER2 0x018 +#define QSERDES_V4_COM_SSC_PER1 0x01c +#define QSERDES_V4_COM_SSC_PER2 0x020 +#define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0 0x024 +#define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0 0x028 +#define QSERDES_V4_COM_SSC_STEP_SIZE3_MODE0 0x02c +#define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1 0x030 +#define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1 0x034 +#define QSERDES_V4_COM_SSC_STEP_SIZE3_MODE1 0x038 +#define QSERDES_V4_COM_POST_DIV 0x03c +#define QSERDES_V4_COM_POST_DIV_MUX 0x040 +#define QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN 0x044 +#define QSERDES_V4_COM_CLK_ENABLE1 0x048 +#define QSERDES_V4_COM_SYS_CLK_CTRL 0x04c +#define QSERDES_V4_COM_SYSCLK_BUF_ENABLE 0x050 +#define QSERDES_V4_COM_PLL_EN 0x054 +#define QSERDES_V4_COM_PLL_IVCO 0x058 +#define QSERDES_V4_COM_CMN_IETRIM 0x05c +#define QSERDES_V4_COM_CMN_IPTRIM 0x060 +#define QSERDES_V4_COM_EP_CLOCK_DETECT_CTRL 0x064 +#define QSERDES_V4_COM_SYSCLK_DET_COMP_STATUS 0x068 +#define QSERDES_V4_COM_CLK_EP_DIV_MODE0 0x06c +#define QSERDES_V4_COM_CLK_EP_DIV_MODE1 0x070 +#define QSERDES_V4_COM_CP_CTRL_MODE0 0x074 +#define QSERDES_V4_COM_CP_CTRL_MODE1 0x078 +#define QSERDES_V4_COM_PLL_RCTRL_MODE0 0x07c +#define QSERDES_V4_COM_PLL_RCTRL_MODE1 0x080 +#define QSERDES_V4_COM_PLL_CCTRL_MODE0 0x084 +#define QSERDES_V4_COM_PLL_CCTRL_MODE1 0x088 +#define QSERDES_V4_COM_PLL_CNTRL 0x08c +#define QSERDES_V4_COM_BIAS_EN_CTRL_BY_PSM 0x090 +#define QSERDES_V4_COM_SYSCLK_EN_SEL 0x094 +#define QSERDES_V4_COM_CML_SYSCLK_SEL 0x098 +#define QSERDES_V4_COM_RESETSM_CNTRL 0x09c +#define QSERDES_V4_COM_RESETSM_CNTRL2 0x0a0 +#define QSERDES_V4_COM_LOCK_CMP_EN 0x0a4 +#define QSERDES_V4_COM_LOCK_CMP_CFG 0x0a8 +#define QSERDES_V4_COM_LOCK_CMP1_MODE0 0x0ac +#define QSERDES_V4_COM_LOCK_CMP2_MODE0 0x0b0 +#define QSERDES_V4_COM_LOCK_CMP1_MODE1 0x0b4 +#define QSERDES_V4_COM_LOCK_CMP2_MODE1 0x0b8 +#define QSERDES_V4_COM_DEC_START_MODE0 0x0bc +#define QSERDES_V4_COM_DEC_START_MSB_MODE0 0x0c0 +#define QSERDES_V4_COM_DEC_START_MODE1 0x0c4 +#define QSERDES_V4_COM_DEC_START_MSB_MODE1 0x0c8 +#define QSERDES_V4_COM_DIV_FRAC_START1_MODE0 0x0cc +#define QSERDES_V4_COM_DIV_FRAC_START2_MODE0 0x0d0 +#define QSERDES_V4_COM_DIV_FRAC_START3_MODE0 0x0d4 +#define QSERDES_V4_COM_DIV_FRAC_START1_MODE1 0x0d8 +#define QSERDES_V4_COM_DIV_FRAC_START2_MODE1 0x0dc +#define QSERDES_V4_COM_DIV_FRAC_START3_MODE1 0x0e0 +#define QSERDES_V4_COM_INTEGLOOP_INITVAL 0x0e4 +#define QSERDES_V4_COM_INTEGLOOP_EN 0x0e8 +#define QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0 0x0ec +#define QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0 0x0f0 +#define QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE1 0x0f4 +#define QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE1 0x0f8 +#define QSERDES_V4_COM_INTEGLOOP_P_PATH_GAIN0 0x0fc +#define QSERDES_V4_COM_INTEGLOOP_P_PATH_GAIN1 0x100 +#define QSERDES_V4_COM_VCOCAL_DEADMAN_CTRL 0x104 +#define QSERDES_V4_COM_VCO_TUNE_CTRL 0x108 +#define QSERDES_V4_COM_VCO_TUNE_MAP 0x10c +#define QSERDES_V4_COM_VCO_TUNE1_MODE0 0x110 +#define QSERDES_V4_COM_VCO_TUNE2_MODE0 0x114 +#define QSERDES_V4_COM_VCO_TUNE1_MODE1 0x118 +#define QSERDES_V4_COM_VCO_TUNE2_MODE1 0x11c +#define QSERDES_V4_COM_VCO_TUNE_INITVAL1 0x120 +#define QSERDES_V4_COM_VCO_TUNE_INITVAL2 0x124 +#define QSERDES_V4_COM_VCO_TUNE_MINVAL1 0x128 +#define QSERDES_V4_COM_VCO_TUNE_MINVAL2 0x12c +#define QSERDES_V4_COM_VCO_TUNE_MAXVAL1 0x130 +#define QSERDES_V4_COM_VCO_TUNE_MAXVAL2 0x134 +#define QSERDES_V4_COM_VCO_TUNE_TIMER1 0x138 +#define QSERDES_V4_COM_VCO_TUNE_TIMER2 0x13c +#define QSERDES_V4_COM_CMN_STATUS 0x140 +#define QSERDES_V4_COM_RESET_SM_STATUS 0x144 +#define QSERDES_V4_COM_RESTRIM_CODE_STATUS 0x148 +#define QSERDES_V4_COM_PLLCAL_CODE1_STATUS 0x14c +#define QSERDES_V4_COM_PLLCAL_CODE2_STATUS 0x150 +#define QSERDES_V4_COM_CLK_SELECT 0x154 +#define QSERDES_V4_COM_HSCLK_SEL 0x158 +#define QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL 0x15c +#define QSERDES_V4_COM_INTEGLOOP_BINCODE_STATUS 0x160 +#define QSERDES_V4_COM_PLL_ANALOG 0x164 +#define QSERDES_V4_COM_CORECLK_DIV_MODE0 0x168 +#define QSERDES_V4_COM_CORECLK_DIV_MODE1 0x16c +#define QSERDES_V4_COM_SW_RESET 0x170 +#define QSERDES_V4_COM_CORE_CLK_EN 0x174 +#define QSERDES_V4_COM_C_READY_STATUS 0x178 +#define QSERDES_V4_COM_CMN_CONFIG 0x17c +#define QSERDES_V4_COM_CMN_RATE_OVERRIDE 0x180 +#define QSERDES_V4_COM_SVS_MODE_CLK_SEL 0x184 +#define QSERDES_V4_COM_DEBUG_BUS0 0x188 +#define QSERDES_V4_COM_DEBUG_BUS1 0x18c +#define QSERDES_V4_COM_DEBUG_BUS2 0x190 +#define QSERDES_V4_COM_DEBUG_BUS3 0x194 +#define QSERDES_V4_COM_DEBUG_BUS_SEL 0x198 +#define QSERDES_V4_COM_CMN_MISC1 0x19c +#define QSERDES_V4_COM_CMN_MISC2 0x1a0 +#define QSERDES_V4_COM_CMN_MODE 0x1a4 +#define QSERDES_V4_COM_VCO_DC_LEVEL_CTRL 0x1a8 +#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac +#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0 +#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4 +#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8 +#define QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL 0x1bc + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v5.h new file mode 100644 index 0000000000..c8afdf7bc1 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v5.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_QSERDES_COM_V5_H_ +#define QCOM_PHY_QMP_QSERDES_COM_V5_H_ + +/* Only for QMP V5 PHY - QSERDES COM registers */ +#define QSERDES_V5_COM_ATB_SEL1 0x000 +#define QSERDES_V5_COM_ATB_SEL2 0x004 +#define QSERDES_V5_COM_FREQ_UPDATE 0x008 +#define QSERDES_V5_COM_BG_TIMER 0x00c +#define QSERDES_V5_COM_SSC_EN_CENTER 0x010 +#define QSERDES_V5_COM_SSC_ADJ_PER1 0x014 +#define QSERDES_V5_COM_SSC_ADJ_PER2 0x018 +#define QSERDES_V5_COM_SSC_PER1 0x01c +#define QSERDES_V5_COM_SSC_PER2 0x020 +#define QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0 0x024 +#define QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0 0x028 +#define QSERDES_V5_COM_SSC_STEP_SIZE3_MODE0 0x02c +#define QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1 0x030 +#define QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1 0x034 +#define QSERDES_V5_COM_SSC_STEP_SIZE3_MODE1 0x038 +#define QSERDES_V5_COM_POST_DIV 0x03c +#define QSERDES_V5_COM_POST_DIV_MUX 0x040 +#define QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN 0x044 +#define QSERDES_V5_COM_CLK_ENABLE1 0x048 +#define QSERDES_V5_COM_SYS_CLK_CTRL 0x04c +#define QSERDES_V5_COM_SYSCLK_BUF_ENABLE 0x050 +#define QSERDES_V5_COM_PLL_EN 0x054 +#define QSERDES_V5_COM_PLL_IVCO 0x058 +#define QSERDES_V5_COM_CMN_IETRIM 0x05c +#define QSERDES_V5_COM_CMN_IPTRIM 0x060 +#define QSERDES_V5_COM_EP_CLOCK_DETECT_CTRL 0x064 +#define QSERDES_V5_COM_SYSCLK_DET_COMP_STATUS 0x068 +#define QSERDES_V5_COM_CLK_EP_DIV_MODE0 0x06c +#define QSERDES_V5_COM_CLK_EP_DIV_MODE1 0x070 +#define QSERDES_V5_COM_CP_CTRL_MODE0 0x074 +#define QSERDES_V5_COM_CP_CTRL_MODE1 0x078 +#define QSERDES_V5_COM_PLL_RCTRL_MODE0 0x07c +#define QSERDES_V5_COM_PLL_RCTRL_MODE1 0x080 +#define QSERDES_V5_COM_PLL_CCTRL_MODE0 0x084 +#define QSERDES_V5_COM_PLL_CCTRL_MODE1 0x088 +#define QSERDES_V5_COM_PLL_CNTRL 0x08c +#define QSERDES_V5_COM_BIAS_EN_CTRL_BY_PSM 0x090 +#define QSERDES_V5_COM_SYSCLK_EN_SEL 0x094 +#define QSERDES_V5_COM_CML_SYSCLK_SEL 0x098 +#define QSERDES_V5_COM_RESETSM_CNTRL 0x09c +#define QSERDES_V5_COM_RESETSM_CNTRL2 0x0a0 +#define QSERDES_V5_COM_LOCK_CMP_EN 0x0a4 +#define QSERDES_V5_COM_LOCK_CMP_CFG 0x0a8 +#define QSERDES_V5_COM_LOCK_CMP1_MODE0 0x0ac +#define QSERDES_V5_COM_LOCK_CMP2_MODE0 0x0b0 +#define QSERDES_V5_COM_LOCK_CMP1_MODE1 0x0b4 +#define QSERDES_V5_COM_LOCK_CMP2_MODE1 0x0b8 +#define QSERDES_V5_COM_DEC_START_MODE0 0x0bc +#define QSERDES_V5_COM_DEC_START_MSB_MODE0 0x0c0 +#define QSERDES_V5_COM_DEC_START_MODE1 0x0c4 +#define QSERDES_V5_COM_DEC_START_MSB_MODE1 0x0c8 +#define QSERDES_V5_COM_DIV_FRAC_START1_MODE0 0x0cc +#define QSERDES_V5_COM_DIV_FRAC_START2_MODE0 0x0d0 +#define QSERDES_V5_COM_DIV_FRAC_START3_MODE0 0x0d4 +#define QSERDES_V5_COM_DIV_FRAC_START1_MODE1 0x0d8 +#define QSERDES_V5_COM_DIV_FRAC_START2_MODE1 0x0dc +#define QSERDES_V5_COM_DIV_FRAC_START3_MODE1 0x0e0 +#define QSERDES_V5_COM_INTEGLOOP_INITVAL 0x0e4 +#define QSERDES_V5_COM_INTEGLOOP_EN 0x0e8 +#define QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE0 0x0ec +#define QSERDES_V5_COM_INTEGLOOP_GAIN1_MODE0 0x0f0 +#define QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE1 0x0f4 +#define QSERDES_V5_COM_INTEGLOOP_GAIN1_MODE1 0x0f8 +#define QSERDES_V5_COM_INTEGLOOP_P_PATH_GAIN0 0x0fc +#define QSERDES_V5_COM_INTEGLOOP_P_PATH_GAIN1 0x100 +#define QSERDES_V5_COM_VCOCAL_DEADMAN_CTRL 0x104 +#define QSERDES_V5_COM_VCO_TUNE_CTRL 0x108 +#define QSERDES_V5_COM_VCO_TUNE_MAP 0x10c +#define QSERDES_V5_COM_VCO_TUNE1_MODE0 0x110 +#define QSERDES_V5_COM_VCO_TUNE2_MODE0 0x114 +#define QSERDES_V5_COM_VCO_TUNE1_MODE1 0x118 +#define QSERDES_V5_COM_VCO_TUNE2_MODE1 0x11c +#define QSERDES_V5_COM_VCO_TUNE_INITVAL1 0x120 +#define QSERDES_V5_COM_VCO_TUNE_INITVAL2 0x124 +#define QSERDES_V5_COM_VCO_TUNE_MINVAL1 0x128 +#define QSERDES_V5_COM_VCO_TUNE_MINVAL2 0x12c +#define QSERDES_V5_COM_VCO_TUNE_MAXVAL1 0x130 +#define QSERDES_V5_COM_VCO_TUNE_MAXVAL2 0x134 +#define QSERDES_V5_COM_VCO_TUNE_TIMER1 0x138 +#define QSERDES_V5_COM_VCO_TUNE_TIMER2 0x13c +#define QSERDES_V5_COM_CMN_STATUS 0x140 +#define QSERDES_V5_COM_RESET_SM_STATUS 0x144 +#define QSERDES_V5_COM_RESTRIM_CODE_STATUS 0x148 +#define QSERDES_V5_COM_PLLCAL_CODE1_STATUS 0x14c +#define QSERDES_V5_COM_PLLCAL_CODE2_STATUS 0x150 +#define QSERDES_V5_COM_CLK_SELECT 0x154 +#define QSERDES_V5_COM_HSCLK_SEL 0x158 +#define QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL 0x15c +#define QSERDES_V5_COM_INTEGLOOP_BINCODE_STATUS 0x160 +#define QSERDES_V5_COM_PLL_ANALOG 0x164 +#define QSERDES_V5_COM_CORECLK_DIV_MODE0 0x168 +#define QSERDES_V5_COM_CORECLK_DIV_MODE1 0x16c +#define QSERDES_V5_COM_SW_RESET 0x170 +#define QSERDES_V5_COM_CORE_CLK_EN 0x174 +#define QSERDES_V5_COM_C_READY_STATUS 0x178 +#define QSERDES_V5_COM_CMN_CONFIG 0x17c +#define QSERDES_V5_COM_CMN_RATE_OVERRIDE 0x180 +#define QSERDES_V5_COM_SVS_MODE_CLK_SEL 0x184 +#define QSERDES_V5_COM_DEBUG_BUS0 0x188 +#define QSERDES_V5_COM_DEBUG_BUS1 0x18c +#define QSERDES_V5_COM_DEBUG_BUS2 0x190 +#define QSERDES_V5_COM_DEBUG_BUS3 0x194 +#define QSERDES_V5_COM_DEBUG_BUS_SEL 0x198 +#define QSERDES_V5_COM_CMN_MISC1 0x19c +#define QSERDES_V5_COM_CMN_MODE 0x1a0 +#define QSERDES_V5_COM_CMN_MODE_CONTD 0x1a4 +#define QSERDES_V5_COM_VCO_DC_LEVEL_CTRL 0x1a8 +#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac +#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0 +#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4 +#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8 +#define QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL 0x1bc +#define QSERDES_V5_COM_RESERVED_1 0x1c0 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com.h new file mode 100644 index 0000000000..fbaf6ef467 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_QSERDES_COM_H_ +#define QCOM_PHY_QMP_QSERDES_COM_H_ + +/* Only for QMP V2 PHY - QSERDES COM registers */ +#define QSERDES_COM_ATB_SEL1 0x000 +#define QSERDES_COM_ATB_SEL2 0x004 +#define QSERDES_COM_FREQ_UPDATE 0x008 +#define QSERDES_COM_BG_TIMER 0x00c +#define QSERDES_COM_SSC_EN_CENTER 0x010 +#define QSERDES_COM_SSC_ADJ_PER1 0x014 +#define QSERDES_COM_SSC_ADJ_PER2 0x018 +#define QSERDES_COM_SSC_PER1 0x01c +#define QSERDES_COM_SSC_PER2 0x020 +#define QSERDES_COM_SSC_STEP_SIZE1 0x024 +#define QSERDES_COM_SSC_STEP_SIZE2 0x028 +#define QSERDES_COM_POST_DIV 0x02c +#define QSERDES_COM_POST_DIV_MUX 0x030 +#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x034 +#define QSERDES_COM_CLK_ENABLE1 0x038 +#define QSERDES_COM_SYS_CLK_CTRL 0x03c +#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x040 +#define QSERDES_COM_PLL_EN 0x044 +#define QSERDES_COM_PLL_IVCO 0x048 +#define QSERDES_COM_LOCK_CMP1_MODE0 0x04c +#define QSERDES_COM_LOCK_CMP2_MODE0 0x050 +#define QSERDES_COM_LOCK_CMP3_MODE0 0x054 +#define QSERDES_COM_LOCK_CMP1_MODE1 0x058 +#define QSERDES_COM_LOCK_CMP2_MODE1 0x05c +#define QSERDES_COM_LOCK_CMP3_MODE1 0x060 +#define QSERDES_COM_LOCK_CMP1_MODE2 0x064 +#define QSERDES_COM_CMN_RSVD0 0x064 +#define QSERDES_COM_LOCK_CMP2_MODE2 0x068 +#define QSERDES_COM_EP_CLOCK_DETECT_CTRL 0x068 +#define QSERDES_COM_LOCK_CMP3_MODE2 0x06c +#define QSERDES_COM_SYSCLK_DET_COMP_STATUS 0x06c +#define QSERDES_COM_BG_TRIM 0x070 +#define QSERDES_COM_CLK_EP_DIV 0x074 +#define QSERDES_COM_CP_CTRL_MODE0 0x078 +#define QSERDES_COM_CP_CTRL_MODE1 0x07c +#define QSERDES_COM_CP_CTRL_MODE2 0x080 +#define QSERDES_COM_CMN_RSVD1 0x080 +#define QSERDES_COM_PLL_RCTRL_MODE0 0x084 +#define QSERDES_COM_PLL_RCTRL_MODE1 0x088 +#define QSERDES_COM_PLL_RCTRL_MODE2 0x08c +#define QSERDES_COM_CMN_RSVD2 0x08c +#define QSERDES_COM_PLL_CCTRL_MODE0 0x090 +#define QSERDES_COM_PLL_CCTRL_MODE1 0x094 +#define QSERDES_COM_PLL_CCTRL_MODE2 0x098 +#define QSERDES_COM_CMN_RSVD3 0x098 +#define QSERDES_COM_PLL_CNTRL 0x09c +#define QSERDES_COM_PHASE_SEL_CTRL 0x0a0 +#define QSERDES_COM_PHASE_SEL_DC 0x0a4 +#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL 0x0a8 +#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x0a8 +#define QSERDES_COM_SYSCLK_EN_SEL 0x0ac +#define QSERDES_COM_CML_SYSCLK_SEL 0x0b0 +#define QSERDES_COM_RESETSM_CNTRL 0x0b4 +#define QSERDES_COM_RESETSM_CNTRL2 0x0b8 +#define QSERDES_COM_RESTRIM_CTRL 0x0bc +#define QSERDES_COM_RESTRIM_CTRL2 0x0c0 +#define QSERDES_COM_RESCODE_DIV_NUM 0x0c4 +#define QSERDES_COM_LOCK_CMP_EN 0x0c8 +#define QSERDES_COM_LOCK_CMP_CFG 0x0cc +#define QSERDES_COM_DEC_START_MODE0 0x0d0 +#define QSERDES_COM_DEC_START_MODE1 0x0d4 +#define QSERDES_COM_DEC_START_MODE2 0x0d8 +#define QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x0d8 +#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x0dc +#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x0e0 +#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x0e4 +#define QSERDES_COM_DIV_FRAC_START1_MODE1 0x0e8 +#define QSERDES_COM_DIV_FRAC_START2_MODE1 0x0ec +#define QSERDES_COM_DIV_FRAC_START3_MODE1 0x0f0 +#define QSERDES_COM_DIV_FRAC_START1_MODE2 0x0f4 +#define QSERDES_COM_VCO_TUNE_MINVAL1 0x0f4 +#define QSERDES_COM_DIV_FRAC_START2_MODE2 0x0f8 +#define QSERDES_COM_VCO_TUNE_MINVAL2 0x0f8 +#define QSERDES_COM_DIV_FRAC_START3_MODE2 0x0fc +#define QSERDES_COM_CMN_RSVD4 0x0fc +#define QSERDES_COM_INTEGLOOP_INITVAL 0x100 +#define QSERDES_COM_INTEGLOOP_EN 0x104 +#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x108 +#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x10c +#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x110 +#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x114 +#define QSERDES_COM_INTEGLOOP_GAIN0_MODE2 0x118 +#define QSERDES_COM_VCO_TUNE_MAXVAL1 0x118 +#define QSERDES_COM_INTEGLOOP_GAIN1_MODE2 0x11c +#define QSERDES_COM_VCO_TUNE_MAXVAL2 0x11c +#define QSERDES_COM_RES_TRIM_CONTROL2 0x120 +#define QSERDES_COM_VCO_TUNE_CTRL 0x124 +#define QSERDES_COM_VCO_TUNE_MAP 0x128 +#define QSERDES_COM_VCO_TUNE1_MODE0 0x12c +#define QSERDES_COM_VCO_TUNE2_MODE0 0x130 +#define QSERDES_COM_VCO_TUNE1_MODE1 0x134 +#define QSERDES_COM_VCO_TUNE2_MODE1 0x138 +#define QSERDES_COM_VCO_TUNE1_MODE2 0x13c +#define QSERDES_COM_VCO_TUNE_INITVAL1 0x13c +#define QSERDES_COM_VCO_TUNE2_MODE2 0x140 +#define QSERDES_COM_VCO_TUNE_INITVAL2 0x140 +#define QSERDES_COM_VCO_TUNE_TIMER1 0x144 +#define QSERDES_COM_VCO_TUNE_TIMER2 0x148 +#define QSERDES_COM_SAR 0x14c +#define QSERDES_COM_SAR_CLK 0x150 +#define QSERDES_COM_SAR_CODE_OUT_STATUS 0x154 +#define QSERDES_COM_SAR_CODE_READY_STATUS 0x158 +#define QSERDES_COM_CMN_STATUS 0x15c +#define QSERDES_COM_RESET_SM_STATUS 0x160 +#define QSERDES_COM_RESTRIM_CODE_STATUS 0x164 +#define QSERDES_COM_PLLCAL_CODE1_STATUS 0x168 +#define QSERDES_COM_PLLCAL_CODE2_STATUS 0x16c +#define QSERDES_COM_BG_CTRL 0x170 +#define QSERDES_COM_CLK_SELECT 0x174 +#define QSERDES_COM_HSCLK_SEL 0x178 +#define QSERDES_COM_INTEGLOOP_BINCODE_STATUS 0x17c +#define QSERDES_COM_PLL_ANALOG 0x180 +#define QSERDES_COM_CORECLK_DIV 0x184 +#define QSERDES_COM_SW_RESET 0x188 +#define QSERDES_COM_CORE_CLK_EN 0x18c +#define QSERDES_COM_C_READY_STATUS 0x190 +#define QSERDES_COM_CMN_CONFIG 0x194 +#define QSERDES_COM_CMN_RATE_OVERRIDE 0x198 +#define QSERDES_COM_SVS_MODE_CLK_SEL 0x19c +#define QSERDES_COM_DEBUG_BUS0 0x1a0 +#define QSERDES_COM_DEBUG_BUS1 0x1a4 +#define QSERDES_COM_DEBUG_BUS2 0x1a8 +#define QSERDES_COM_DEBUG_BUS3 0x1ac +#define QSERDES_COM_DEBUG_BUS_SEL 0x1b0 +#define QSERDES_COM_CMN_MISC1 0x1b4 +#define QSERDES_COM_CMN_MISC2 0x1b8 +#define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc +#define QSERDES_COM_CORECLK_DIV_MODE2 0x1c0 +#define QSERDES_COM_CMN_RSVD5 0x1c0 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h new file mode 100644 index 0000000000..ad326e301a --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_QSERDES_PLL_H_ +#define QCOM_PHY_QMP_QSERDES_PLL_H_ + +/* QMP V2 PHY for PCIE gen3 ports - QSERDES PLL registers */ +#define QSERDES_PLL_BG_TIMER 0x00c +#define QSERDES_PLL_SSC_PER1 0x01c +#define QSERDES_PLL_SSC_PER2 0x020 +#define QSERDES_PLL_SSC_STEP_SIZE1_MODE0 0x024 +#define QSERDES_PLL_SSC_STEP_SIZE2_MODE0 0x028 +#define QSERDES_PLL_SSC_STEP_SIZE1_MODE1 0x02c +#define QSERDES_PLL_SSC_STEP_SIZE2_MODE1 0x030 +#define QSERDES_PLL_BIAS_EN_CLKBUFLR_EN 0x03c +#define QSERDES_PLL_CLK_ENABLE1 0x040 +#define QSERDES_PLL_SYS_CLK_CTRL 0x044 +#define QSERDES_PLL_SYSCLK_BUF_ENABLE 0x048 +#define QSERDES_PLL_PLL_IVCO 0x050 +#define QSERDES_PLL_LOCK_CMP1_MODE0 0x054 +#define QSERDES_PLL_LOCK_CMP2_MODE0 0x058 +#define QSERDES_PLL_LOCK_CMP1_MODE1 0x060 +#define QSERDES_PLL_LOCK_CMP2_MODE1 0x064 +#define QSERDES_PLL_BG_TRIM 0x074 +#define QSERDES_PLL_CLK_EP_DIV_MODE0 0x078 +#define QSERDES_PLL_CLK_EP_DIV_MODE1 0x07c +#define QSERDES_PLL_CP_CTRL_MODE0 0x080 +#define QSERDES_PLL_CP_CTRL_MODE1 0x084 +#define QSERDES_PLL_PLL_RCTRL_MODE0 0x088 +#define QSERDES_PLL_PLL_RCTRL_MODE1 0x08c +#define QSERDES_PLL_PLL_CCTRL_MODE0 0x090 +#define QSERDES_PLL_PLL_CCTRL_MODE1 0x094 +#define QSERDES_PLL_BIAS_EN_CTRL_BY_PSM 0x0a4 +#define QSERDES_PLL_SYSCLK_EN_SEL 0x0a8 +#define QSERDES_PLL_RESETSM_CNTRL 0x0b0 +#define QSERDES_PLL_LOCK_CMP_EN 0x0c4 +#define QSERDES_PLL_DEC_START_MODE0 0x0cc +#define QSERDES_PLL_DEC_START_MODE1 0x0d0 +#define QSERDES_PLL_DIV_FRAC_START1_MODE0 0x0d8 +#define QSERDES_PLL_DIV_FRAC_START2_MODE0 0x0dc +#define QSERDES_PLL_DIV_FRAC_START3_MODE0 0x0e0 +#define QSERDES_PLL_DIV_FRAC_START1_MODE1 0x0e4 +#define QSERDES_PLL_DIV_FRAC_START2_MODE1 0x0e8 +#define QSERDES_PLL_DIV_FRAC_START3_MODE1 0x0ec +#define QSERDES_PLL_INTEGLOOP_GAIN0_MODE0 0x100 +#define QSERDES_PLL_INTEGLOOP_GAIN1_MODE0 0x104 +#define QSERDES_PLL_INTEGLOOP_GAIN0_MODE1 0x108 +#define QSERDES_PLL_INTEGLOOP_GAIN1_MODE1 0x10c +#define QSERDES_PLL_VCO_TUNE_MAP 0x120 +#define QSERDES_PLL_VCO_TUNE1_MODE0 0x124 +#define QSERDES_PLL_VCO_TUNE2_MODE0 0x128 +#define QSERDES_PLL_VCO_TUNE1_MODE1 0x12c +#define QSERDES_PLL_VCO_TUNE2_MODE1 0x130 +#define QSERDES_PLL_VCO_TUNE_TIMER1 0x13c +#define QSERDES_PLL_VCO_TUNE_TIMER2 0x140 +#define QSERDES_PLL_CLK_SELECT 0x16c +#define QSERDES_PLL_HSCLK_SEL 0x170 +#define QSERDES_PLL_CORECLK_DIV 0x17c +#define QSERDES_PLL_CORE_CLK_EN 0x184 +#define QSERDES_PLL_CMN_CONFIG 0x18c +#define QSERDES_PLL_SVS_MODE_CLK_SEL 0x194 +#define QSERDES_PLL_CORECLK_DIV_MODE1 0x1b4 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v3.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v3.h new file mode 100644 index 0000000000..161e6df30e --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v3.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V3_H_ +#define QCOM_PHY_QMP_QSERDES_TXRX_V3_H_ + +/* Only for QMP V3 PHY - TX registers */ +#define QSERDES_V3_TX_BIST_MODE_LANENO 0x000 +#define QSERDES_V3_TX_CLKBUF_ENABLE 0x008 +#define QSERDES_V3_TX_TX_EMP_POST1_LVL 0x00c +#define QSERDES_V3_TX_TX_DRV_LVL 0x01c +#define QSERDES_V3_TX_RESET_TSYNC_EN 0x024 +#define QSERDES_V3_TX_PRE_STALL_LDO_BOOST_EN 0x028 +#define QSERDES_V3_TX_TX_BAND 0x02c +#define QSERDES_V3_TX_SLEW_CNTL 0x030 +#define QSERDES_V3_TX_INTERFACE_SELECT 0x034 +#define QSERDES_V3_TX_RES_CODE_LANE_TX 0x03c +#define QSERDES_V3_TX_RES_CODE_LANE_RX 0x040 +#define QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX 0x044 +#define QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX 0x048 +#define QSERDES_V3_TX_DEBUG_BUS_SEL 0x058 +#define QSERDES_V3_TX_TRANSCEIVER_BIAS_EN 0x05c +#define QSERDES_V3_TX_HIGHZ_DRVR_EN 0x060 +#define QSERDES_V3_TX_TX_POL_INV 0x064 +#define QSERDES_V3_TX_PARRATE_REC_DETECT_IDLE_EN 0x068 +#define QSERDES_V3_TX_LANE_MODE_1 0x08c +#define QSERDES_V3_TX_LANE_MODE_2 0x090 +#define QSERDES_V3_TX_LANE_MODE_3 0x094 +#define QSERDES_V3_TX_RCV_DETECT_LVL_2 0x0a4 +#define QSERDES_V3_TX_TRAN_DRVR_EMP_EN 0x0c0 +#define QSERDES_V3_TX_TX_INTERFACE_MODE 0x0c4 +#define QSERDES_V3_TX_VMODE_CTRL1 0x0f0 + +/* Only for QMP V3 PHY - RX registers */ +#define QSERDES_V3_RX_UCDR_FO_GAIN 0x008 +#define QSERDES_V3_RX_UCDR_SO_GAIN_HALF 0x00c +#define QSERDES_V3_RX_UCDR_SO_GAIN 0x014 +#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF 0x024 +#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER 0x028 +#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN 0x02c +#define QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN 0x030 +#define QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034 +#define QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c +#define QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040 +#define QSERDES_V3_RX_UCDR_PI_CONTROLS 0x044 +#define QSERDES_V3_RX_RX_TERM_BW 0x07c +#define QSERDES_V3_RX_VGA_CAL_CNTRL1 0x0bc +#define QSERDES_V3_RX_VGA_CAL_CNTRL2 0x0c0 +#define QSERDES_V3_RX_RX_EQ_GAIN2_LSB 0x0c8 +#define QSERDES_V3_RX_RX_EQ_GAIN2_MSB 0x0cc +#define QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL1 0x0d0 +#define QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2 0x0d4 +#define QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3 0x0d8 +#define QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4 0x0dc +#define QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x0f8 +#define QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x0fc +#define QSERDES_V3_RX_SIGDET_ENABLES 0x100 +#define QSERDES_V3_RX_SIGDET_CNTRL 0x104 +#define QSERDES_V3_RX_SIGDET_LVL 0x108 +#define QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL 0x10c +#define QSERDES_V3_RX_RX_BAND 0x110 +#define QSERDES_V3_RX_RX_INTERFACE_MODE 0x11c +#define QSERDES_V3_RX_RX_MODE_00 0x164 +#define QSERDES_V3_RX_RX_MODE_01 0x168 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v4.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v4.h new file mode 100644 index 0000000000..6ee3bec9ac --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v4.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V4_H_ +#define QCOM_PHY_QMP_QSERDES_TXRX_V4_H_ + +/* Only for QMP V4 PHY - TX registers */ +#define QSERDES_V4_TX_BIST_MODE_LANENO 0x000 +#define QSERDES_V4_TX_BIST_INVERT 0x004 +#define QSERDES_V4_TX_CLKBUF_ENABLE 0x008 +#define QSERDES_V4_TX_TX_EMP_POST1_LVL 0x00c +#define QSERDES_V4_TX_TX_IDLE_LVL_LARGE_AMP 0x010 +#define QSERDES_V4_TX_TX_DRV_LVL 0x014 +#define QSERDES_V4_TX_TX_DRV_LVL_OFFSET 0x018 +#define QSERDES_V4_TX_RESET_TSYNC_EN 0x01c +#define QSERDES_V4_TX_PRE_STALL_LDO_BOOST_EN 0x020 +#define QSERDES_V4_TX_TX_BAND 0x024 +#define QSERDES_V4_TX_SLEW_CNTL 0x028 +#define QSERDES_V4_TX_INTERFACE_SELECT 0x02c +#define QSERDES_V4_TX_LPB_EN 0x030 +#define QSERDES_V4_TX_RES_CODE_LANE_TX 0x034 +#define QSERDES_V4_TX_RES_CODE_LANE_RX 0x038 +#define QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX 0x03c +#define QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX 0x040 +#define QSERDES_V4_TX_PERL_LENGTH1 0x044 +#define QSERDES_V4_TX_PERL_LENGTH2 0x048 +#define QSERDES_V4_TX_SERDES_BYP_EN_OUT 0x04c +#define QSERDES_V4_TX_DEBUG_BUS_SEL 0x050 +#define QSERDES_V4_TX_TRANSCEIVER_BIAS_EN 0x054 +#define QSERDES_V4_TX_HIGHZ_DRVR_EN 0x058 +#define QSERDES_V4_TX_TX_POL_INV 0x05c +#define QSERDES_V4_TX_PARRATE_REC_DETECT_IDLE_EN 0x060 +#define QSERDES_V4_TX_BIST_PATTERN1 0x064 +#define QSERDES_V4_TX_BIST_PATTERN2 0x068 +#define QSERDES_V4_TX_BIST_PATTERN3 0x06c +#define QSERDES_V4_TX_BIST_PATTERN4 0x070 +#define QSERDES_V4_TX_BIST_PATTERN5 0x074 +#define QSERDES_V4_TX_BIST_PATTERN6 0x078 +#define QSERDES_V4_TX_BIST_PATTERN7 0x07c +#define QSERDES_V4_TX_BIST_PATTERN8 0x080 +#define QSERDES_V4_TX_LANE_MODE_1 0x084 +#define QSERDES_V4_TX_LANE_MODE_2 0x088 +#define QSERDES_V4_TX_LANE_MODE_3 0x08c +#define QSERDES_V4_TX_ATB_SEL1 0x090 +#define QSERDES_V4_TX_ATB_SEL2 0x094 +#define QSERDES_V4_TX_RCV_DETECT_LVL 0x098 +#define QSERDES_V4_TX_RCV_DETECT_LVL_2 0x09c +#define QSERDES_V4_TX_PRBS_SEED1 0x0a0 +#define QSERDES_V4_TX_PRBS_SEED2 0x0a4 +#define QSERDES_V4_TX_PRBS_SEED3 0x0a8 +#define QSERDES_V4_TX_PRBS_SEED4 0x0ac +#define QSERDES_V4_TX_RESET_GEN 0x0b0 +#define QSERDES_V4_TX_RESET_GEN_MUXES 0x0b4 +#define QSERDES_V4_TX_TRAN_DRVR_EMP_EN 0x0b8 +#define QSERDES_V4_TX_TX_INTERFACE_MODE 0x0bc +#define QSERDES_V4_TX_PWM_CTRL 0x0c0 +#define QSERDES_V4_TX_PWM_ENCODED_OR_DATA 0x0c4 +#define QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND2 0x0c8 +#define QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND2 0x0cc +#define QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND2 0x0d0 +#define QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND2 0x0d4 +#define QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0x0d8 +#define QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0x0dc +#define QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0x0e0 +#define QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0x0e4 +#define QSERDES_V4_TX_VMODE_CTRL1 0x0e8 +#define QSERDES_V4_TX_ALOG_OBSV_BUS_CTRL_1 0x0ec +#define QSERDES_V4_TX_BIST_STATUS 0x0f0 +#define QSERDES_V4_TX_BIST_ERROR_COUNT1 0x0f4 +#define QSERDES_V4_TX_BIST_ERROR_COUNT2 0x0f8 +#define QSERDES_V4_TX_ALOG_OBSV_BUS_STATUS_1 0x0fc +#define QSERDES_V4_TX_LANE_DIG_CONFIG 0x100 +#define QSERDES_V4_TX_PI_QEC_CTRL 0x104 +#define QSERDES_V4_TX_PRE_EMPH 0x108 +#define QSERDES_V4_TX_SW_RESET 0x10c +#define QSERDES_V4_TX_DCC_OFFSET 0x110 +#define QSERDES_V4_TX_DIG_BKUP_CTRL 0x114 +#define QSERDES_V4_TX_DEBUG_BUS0 0x118 +#define QSERDES_V4_TX_DEBUG_BUS1 0x11c +#define QSERDES_V4_TX_DEBUG_BUS2 0x120 +#define QSERDES_V4_TX_DEBUG_BUS3 0x124 +#define QSERDES_V4_TX_READ_EQCODE 0x128 +#define QSERDES_V4_TX_READ_OFFSETCODE 0x12c +#define QSERDES_V4_TX_IA_ERROR_COUNTER_LOW 0x130 +#define QSERDES_V4_TX_IA_ERROR_COUNTER_HIGH 0x134 +#define QSERDES_V4_TX_VGA_READ_CODE 0x138 +#define QSERDES_V4_TX_VTH_READ_CODE 0x13c +#define QSERDES_V4_TX_DFE_TAP1_READ_CODE 0x140 +#define QSERDES_V4_TX_DFE_TAP2_READ_CODE 0x144 +#define QSERDES_V4_TX_IDAC_STATUS_I 0x148 +#define QSERDES_V4_TX_IDAC_STATUS_IBAR 0x14c +#define QSERDES_V4_TX_IDAC_STATUS_Q 0x150 +#define QSERDES_V4_TX_IDAC_STATUS_QBAR 0x154 +#define QSERDES_V4_TX_IDAC_STATUS_A 0x158 +#define QSERDES_V4_TX_IDAC_STATUS_ABAR 0x15c +#define QSERDES_V4_TX_IDAC_STATUS_SM_ON 0x160 +#define QSERDES_V4_TX_IDAC_STATUS_CAL_DONE 0x164 +#define QSERDES_V4_TX_IDAC_STATUS_SIGNERROR 0x168 +#define QSERDES_V4_TX_DCC_CAL_STATUS 0x16c + +/* Only for QMP V4 PHY - RX registers */ +#define QSERDES_V4_RX_UCDR_FO_GAIN_HALF 0x000 +#define QSERDES_V4_RX_UCDR_FO_GAIN_QUARTER 0x004 +#define QSERDES_V4_RX_UCDR_FO_GAIN 0x008 +#define QSERDES_V4_RX_UCDR_SO_GAIN_HALF 0x00c +#define QSERDES_V4_RX_UCDR_SO_GAIN_QUARTER 0x010 +#define QSERDES_V4_RX_UCDR_SO_GAIN 0x014 +#define QSERDES_V4_RX_UCDR_SVS_FO_GAIN_HALF 0x018 +#define QSERDES_V4_RX_UCDR_SVS_FO_GAIN_QUARTER 0x01c +#define QSERDES_V4_RX_UCDR_SVS_FO_GAIN 0x020 +#define QSERDES_V4_RX_UCDR_SVS_SO_GAIN_HALF 0x024 +#define QSERDES_V4_RX_UCDR_SVS_SO_GAIN_QUARTER 0x028 +#define QSERDES_V4_RX_UCDR_SVS_SO_GAIN 0x02c +#define QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN 0x030 +#define QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034 +#define QSERDES_V4_RX_UCDR_FO_TO_SO_DELAY 0x038 +#define QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c +#define QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040 +#define QSERDES_V4_RX_UCDR_PI_CONTROLS 0x044 +#define QSERDES_V4_RX_UCDR_PI_CTRL2 0x048 +#define QSERDES_V4_RX_UCDR_SB2_THRESH1 0x04c +#define QSERDES_V4_RX_UCDR_SB2_THRESH2 0x050 +#define QSERDES_V4_RX_UCDR_SB2_GAIN1 0x054 +#define QSERDES_V4_RX_UCDR_SB2_GAIN2 0x058 +#define QSERDES_V4_RX_AUX_CONTROL 0x05c +#define QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE 0x060 +#define QSERDES_V4_RX_RCLK_AUXDATA_SEL 0x064 +#define QSERDES_V4_RX_AC_JTAG_ENABLE 0x068 +#define QSERDES_V4_RX_AC_JTAG_INITP 0x06c +#define QSERDES_V4_RX_AC_JTAG_INITN 0x070 +#define QSERDES_V4_RX_AC_JTAG_LVL 0x074 +#define QSERDES_V4_RX_AC_JTAG_MODE 0x078 +#define QSERDES_V4_RX_AC_JTAG_RESET 0x07c +#define QSERDES_V4_RX_RX_TERM_BW 0x080 +#define QSERDES_V4_RX_RX_RCVR_IQ_EN 0x084 +#define QSERDES_V4_RX_RX_IDAC_I_DC_OFFSETS 0x088 +#define QSERDES_V4_RX_RX_IDAC_IBAR_DC_OFFSETS 0x08c +#define QSERDES_V4_RX_RX_IDAC_Q_DC_OFFSETS 0x090 +#define QSERDES_V4_RX_RX_IDAC_QBAR_DC_OFFSETS 0x094 +#define QSERDES_V4_RX_RX_IDAC_A_DC_OFFSETS 0x098 +#define QSERDES_V4_RX_RX_IDAC_ABAR_DC_OFFSETS 0x09c +#define QSERDES_V4_RX_RX_IDAC_EN 0x0a0 +#define QSERDES_V4_RX_RX_IDAC_ENABLES 0x0a4 +#define QSERDES_V4_RX_RX_IDAC_SIGN 0x0a8 +#define QSERDES_V4_RX_RX_HIGHZ_HIGHRATE 0x0ac +#define QSERDES_V4_RX_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET 0x0b0 +#define QSERDES_V4_RX_DFE_1 0x0b4 +#define QSERDES_V4_RX_DFE_2 0x0b8 +#define QSERDES_V4_RX_DFE_3 0x0bc +#define QSERDES_V4_RX_DFE_4 0x0c0 +#define QSERDES_V4_RX_TX_ADAPT_PRE_THRESH1 0x0c4 +#define QSERDES_V4_RX_TX_ADAPT_PRE_THRESH2 0x0c8 +#define QSERDES_V4_RX_TX_ADAPT_POST_THRESH 0x0cc +#define QSERDES_V4_RX_TX_ADAPT_MAIN_THRESH 0x0d0 +#define QSERDES_V4_RX_VGA_CAL_CNTRL1 0x0d4 +#define QSERDES_V4_RX_VGA_CAL_CNTRL2 0x0d8 +#define QSERDES_V4_RX_GM_CAL 0x0dc +#define QSERDES_V4_RX_RX_VGA_GAIN2_LSB 0x0e0 +#define QSERDES_V4_RX_RX_VGA_GAIN2_MSB 0x0e4 +#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1 0x0e8 +#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec +#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0 +#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4 +#define QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW 0x0f8 +#define QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH 0x0fc +#define QSERDES_V4_RX_RX_IDAC_MEASURE_TIME 0x100 +#define QSERDES_V4_RX_RX_IDAC_ACCUMULATOR 0x104 +#define QSERDES_V4_RX_RX_EQ_OFFSET_LSB 0x108 +#define QSERDES_V4_RX_RX_EQ_OFFSET_MSB 0x10c +#define QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110 +#define QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x114 +#define QSERDES_V4_RX_SIGDET_ENABLES 0x118 +#define QSERDES_V4_RX_SIGDET_CNTRL 0x11c +#define QSERDES_V4_RX_SIGDET_LVL 0x120 +#define QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL 0x124 +#define QSERDES_V4_RX_RX_BAND 0x128 +#define QSERDES_V4_RX_CDR_FREEZE_UP_DN 0x12c +#define QSERDES_V4_RX_CDR_RESET_OVERRIDE 0x130 +#define QSERDES_V4_RX_RX_INTERFACE_MODE 0x134 +#define QSERDES_V4_RX_JITTER_GEN_MODE 0x138 +#define QSERDES_V4_RX_SJ_AMP1 0x13c +#define QSERDES_V4_RX_SJ_AMP2 0x140 +#define QSERDES_V4_RX_SJ_PER1 0x144 +#define QSERDES_V4_RX_SJ_PER2 0x148 +#define QSERDES_V4_RX_PPM_OFFSET1 0x14c +#define QSERDES_V4_RX_PPM_OFFSET2 0x150 +#define QSERDES_V4_RX_SIGN_PPM_PERIOD1 0x154 +#define QSERDES_V4_RX_SIGN_PPM_PERIOD2 0x158 +#define QSERDES_V4_RX_RX_PWM_ENABLE_AND_DATA 0x15c +#define QSERDES_V4_RX_RX_PWM_GEAR1_TIMEOUT_COUNT 0x160 +#define QSERDES_V4_RX_RX_PWM_GEAR2_TIMEOUT_COUNT 0x164 +#define QSERDES_V4_RX_RX_PWM_GEAR3_TIMEOUT_COUNT 0x168 +#define QSERDES_V4_RX_RX_PWM_GEAR4_TIMEOUT_COUNT 0x16c +#define QSERDES_V4_RX_RX_MODE_00_LOW 0x170 +#define QSERDES_V4_RX_RX_MODE_00_HIGH 0x174 +#define QSERDES_V4_RX_RX_MODE_00_HIGH2 0x178 +#define QSERDES_V4_RX_RX_MODE_00_HIGH3 0x17c +#define QSERDES_V4_RX_RX_MODE_00_HIGH4 0x180 +#define QSERDES_V4_RX_RX_MODE_01_LOW 0x184 +#define QSERDES_V4_RX_RX_MODE_01_HIGH 0x188 +#define QSERDES_V4_RX_RX_MODE_01_HIGH2 0x18c +#define QSERDES_V4_RX_RX_MODE_01_HIGH3 0x190 +#define QSERDES_V4_RX_RX_MODE_01_HIGH4 0x194 +#define QSERDES_V4_RX_RX_MODE_10_LOW 0x198 +#define QSERDES_V4_RX_RX_MODE_10_HIGH 0x19c +#define QSERDES_V4_RX_RX_MODE_10_HIGH2 0x1a0 +#define QSERDES_V4_RX_RX_MODE_10_HIGH3 0x1a4 +#define QSERDES_V4_RX_RX_MODE_10_HIGH4 0x1a8 +#define QSERDES_V4_RX_PHPRE_CTRL 0x1ac +#define QSERDES_V4_RX_PHPRE_INITVAL 0x1b0 +#define QSERDES_V4_RX_DFE_EN_TIMER 0x1b4 +#define QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET 0x1b8 +#define QSERDES_V4_RX_DCC_CTRL1 0x1bc +#define QSERDES_V4_RX_DCC_CTRL2 0x1c0 +#define QSERDES_V4_RX_VTH_CODE 0x1c4 +#define QSERDES_V4_RX_VTH_MIN_THRESH 0x1c8 +#define QSERDES_V4_RX_VTH_MAX_THRESH 0x1cc +#define QSERDES_V4_RX_ALOG_OBSV_BUS_CTRL_1 0x1d0 +#define QSERDES_V4_RX_PI_CTRL1 0x1d4 +#define QSERDES_V4_RX_PI_CTRL2 0x1d8 +#define QSERDES_V4_RX_PI_QUAD 0x1dc +#define QSERDES_V4_RX_IDATA1 0x1e0 +#define QSERDES_V4_RX_IDATA2 0x1e4 +#define QSERDES_V4_RX_AUX_DATA1 0x1e8 +#define QSERDES_V4_RX_AUX_DATA2 0x1ec +#define QSERDES_V4_RX_AC_JTAG_OUTP 0x1f0 +#define QSERDES_V4_RX_AC_JTAG_OUTN 0x1f4 +#define QSERDES_V4_RX_RX_SIGDET 0x1f8 +#define QSERDES_V4_RX_ALOG_OBSV_BUS_STATUS_1 0x1fc + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v4_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v4_20.h new file mode 100644 index 0000000000..114570f301 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v4_20.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V4_20_H_ +#define QCOM_PHY_QMP_QSERDES_TXRX_V4_20_H_ + +/* Only for QMP V4_20 PHY - TX registers */ +#define QSERDES_V4_20_TX_LANE_MODE_1 0x88 +#define QSERDES_V4_20_TX_LANE_MODE_2 0x8c +#define QSERDES_V4_20_TX_LANE_MODE_3 0x90 +#define QSERDES_V4_20_TX_VMODE_CTRL1 0xc4 +#define QSERDES_V4_20_TX_PI_QEC_CTRL 0xe0 + +/* Only for QMP V4_20 PHY - RX registers */ +#define QSERDES_V4_20_RX_FO_GAIN_RATE2 0x008 +#define QSERDES_V4_20_RX_UCDR_PI_CONTROLS 0x058 +#define QSERDES_V4_20_RX_AUX_DATA_TCOARSE_TFINE 0x0ac +#define QSERDES_V4_20_RX_DFE_3 0x110 +#define QSERDES_V4_20_RX_DFE_DAC_ENABLE1 0x134 +#define QSERDES_V4_20_RX_DFE_DAC_ENABLE2 0x138 +#define QSERDES_V4_20_RX_VGA_CAL_CNTRL2 0x150 +#define QSERDES_V4_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x178 +#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B1 0x1c8 +#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B2 0x1cc +#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B3 0x1d0 +#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B4 0x1d4 +#define QSERDES_V4_20_RX_RX_MODE_RATE2_B0 0x1d8 +#define QSERDES_V4_20_RX_RX_MODE_RATE2_B1 0x1dc +#define QSERDES_V4_20_RX_RX_MODE_RATE2_B2 0x1e0 +#define QSERDES_V4_20_RX_RX_MODE_RATE2_B3 0x1e4 +#define QSERDES_V4_20_RX_RX_MODE_RATE2_B4 0x1e8 +#define QSERDES_V4_20_RX_RX_MODE_RATE3_B0 0x1ec +#define QSERDES_V4_20_RX_RX_MODE_RATE3_B1 0x1f0 +#define QSERDES_V4_20_RX_RX_MODE_RATE3_B2 0x1f4 +#define QSERDES_V4_20_RX_RX_MODE_RATE3_B3 0x1f8 +#define QSERDES_V4_20_RX_RX_MODE_RATE3_B4 0x1fc +#define QSERDES_V4_20_RX_PHPRE_CTRL 0x200 +#define QSERDES_V4_20_RX_DFE_CTLE_POST_CAL_OFFSET 0x20c +#define QSERDES_V4_20_RX_MARG_COARSE_CTRL2 0x23c + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5.h new file mode 100644 index 0000000000..fe8f3e330d --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5.h @@ -0,0 +1,231 @@ + +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V5_H_ +#define QCOM_PHY_QMP_QSERDES_TXRX_V5_H_ + +/* Only for QMP V5 PHY - TX registers */ +#define QSERDES_V5_TX_BIST_MODE_LANENO 0x000 +#define QSERDES_V5_TX_BIST_INVERT 0x004 +#define QSERDES_V5_TX_CLKBUF_ENABLE 0x008 +#define QSERDES_V5_TX_TX_EMP_POST1_LVL 0x00c +#define QSERDES_V5_TX_TX_IDLE_LVL_LARGE_AMP 0x010 +#define QSERDES_V5_TX_TX_DRV_LVL 0x014 +#define QSERDES_V5_TX_TX_DRV_LVL_OFFSET 0x018 +#define QSERDES_V5_TX_RESET_TSYNC_EN 0x01c +#define QSERDES_V5_TX_PRE_STALL_LDO_BOOST_EN 0x020 +#define QSERDES_V5_TX_TX_BAND 0x024 +#define QSERDES_V5_TX_SLEW_CNTL 0x028 +#define QSERDES_V5_TX_INTERFACE_SELECT 0x02c +#define QSERDES_V5_TX_LPB_EN 0x030 +#define QSERDES_V5_TX_RES_CODE_LANE_TX 0x034 +#define QSERDES_V5_TX_RES_CODE_LANE_RX 0x038 +#define QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX 0x03c +#define QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX 0x040 +#define QSERDES_V5_TX_PERL_LENGTH1 0x044 +#define QSERDES_V5_TX_PERL_LENGTH2 0x048 +#define QSERDES_V5_TX_SERDES_BYP_EN_OUT 0x04c +#define QSERDES_V5_TX_DEBUG_BUS_SEL 0x050 +#define QSERDES_V5_TX_TRANSCEIVER_BIAS_EN 0x054 +#define QSERDES_V5_TX_HIGHZ_DRVR_EN 0x058 +#define QSERDES_V5_TX_TX_POL_INV 0x05c +#define QSERDES_V5_TX_PARRATE_REC_DETECT_IDLE_EN 0x060 +#define QSERDES_V5_TX_BIST_PATTERN1 0x064 +#define QSERDES_V5_TX_BIST_PATTERN2 0x068 +#define QSERDES_V5_TX_BIST_PATTERN3 0x06c +#define QSERDES_V5_TX_BIST_PATTERN4 0x070 +#define QSERDES_V5_TX_BIST_PATTERN5 0x074 +#define QSERDES_V5_TX_BIST_PATTERN6 0x078 +#define QSERDES_V5_TX_BIST_PATTERN7 0x07c +#define QSERDES_V5_TX_BIST_PATTERN8 0x080 +#define QSERDES_V5_TX_LANE_MODE_1 0x084 +#define QSERDES_V5_TX_LANE_MODE_2 0x088 +#define QSERDES_V5_TX_LANE_MODE_3 0x08c +#define QSERDES_V5_TX_LANE_MODE_4 0x090 +#define QSERDES_V5_TX_LANE_MODE_5 0x094 +#define QSERDES_V5_TX_ATB_SEL1 0x098 +#define QSERDES_V5_TX_ATB_SEL2 0x09c +#define QSERDES_V5_TX_RCV_DETECT_LVL 0x0a0 +#define QSERDES_V5_TX_RCV_DETECT_LVL_2 0x0a4 +#define QSERDES_V5_TX_PRBS_SEED1 0x0a8 +#define QSERDES_V5_TX_PRBS_SEED2 0x0ac +#define QSERDES_V5_TX_PRBS_SEED3 0x0b0 +#define QSERDES_V5_TX_PRBS_SEED4 0x0b4 +#define QSERDES_V5_TX_RESET_GEN 0x0b8 +#define QSERDES_V5_TX_RESET_GEN_MUXES 0x0bc +#define QSERDES_V5_TX_TRAN_DRVR_EMP_EN 0x0c0 +#define QSERDES_V5_TX_TX_INTERFACE_MODE 0x0c4 +#define QSERDES_V5_TX_VMODE_CTRL1 0x0c8 +#define QSERDES_V5_TX_ALOG_OBSV_BUS_CTRL_1 0x0cc +#define QSERDES_V5_TX_BIST_STATUS 0x0d0 +#define QSERDES_V5_TX_BIST_ERROR_COUNT1 0x0d4 +#define QSERDES_V5_TX_BIST_ERROR_COUNT2 0x0d8 +#define QSERDES_V5_TX_ALOG_OBSV_BUS_STATUS_1 0x0dc +#define QSERDES_V5_TX_LANE_DIG_CONFIG 0x0e0 +#define QSERDES_V5_TX_PI_QEC_CTRL 0x0e4 +#define QSERDES_V5_TX_PRE_EMPH 0x0e8 +#define QSERDES_V5_TX_SW_RESET 0x0ec +#define QSERDES_V5_TX_DCC_OFFSET 0x0f0 +#define QSERDES_V5_TX_DCC_CMUX_POSTCAL_OFFSET 0x0f4 +#define QSERDES_V5_TX_DCC_CMUX_CAL_CTRL1 0x0f8 +#define QSERDES_V5_TX_DCC_CMUX_CAL_CTRL2 0x0fc +#define QSERDES_V5_TX_DIG_BKUP_CTRL 0x100 +#define QSERDES_V5_TX_DEBUG_BUS0 0x104 +#define QSERDES_V5_TX_DEBUG_BUS1 0x108 +#define QSERDES_V5_TX_DEBUG_BUS2 0x10c +#define QSERDES_V5_TX_DEBUG_BUS3 0x110 +#define QSERDES_V5_TX_READ_EQCODE 0x114 +#define QSERDES_V5_TX_READ_OFFSETCODE 0x118 +#define QSERDES_V5_TX_IA_ERROR_COUNTER_LOW 0x11c +#define QSERDES_V5_TX_IA_ERROR_COUNTER_HIGH 0x120 +#define QSERDES_V5_TX_VGA_READ_CODE 0x124 +#define QSERDES_V5_TX_VTH_READ_CODE 0x128 +#define QSERDES_V5_TX_DFE_TAP1_READ_CODE 0x12c +#define QSERDES_V5_TX_DFE_TAP2_READ_CODE 0x130 +#define QSERDES_V5_TX_IDAC_STATUS_I 0x134 +#define QSERDES_V5_TX_IDAC_STATUS_IBAR 0x138 +#define QSERDES_V5_TX_IDAC_STATUS_Q 0x13c +#define QSERDES_V5_TX_IDAC_STATUS_QBAR 0x140 +#define QSERDES_V5_TX_IDAC_STATUS_A 0x144 +#define QSERDES_V5_TX_IDAC_STATUS_ABAR 0x148 +#define QSERDES_V5_TX_IDAC_STATUS_SM_ON 0x14c +#define QSERDES_V5_TX_IDAC_STATUS_CAL_DONE 0x150 +#define QSERDES_V5_TX_IDAC_STATUS_SIGNERROR 0x154 +#define QSERDES_V5_TX_DCC_CAL_STATUS 0x158 +#define QSERDES_V5_TX_DCC_READ_CODE_STATUS 0x15c + +/* Only for QMP V5 PHY - RX registers */ +#define QSERDES_V5_RX_UCDR_FO_GAIN_HALF 0x000 +#define QSERDES_V5_RX_UCDR_FO_GAIN_QUARTER 0x004 +#define QSERDES_V5_RX_UCDR_FO_GAIN 0x008 +#define QSERDES_V5_RX_UCDR_SO_GAIN_HALF 0x00c +#define QSERDES_V5_RX_UCDR_SO_GAIN_QUARTER 0x010 +#define QSERDES_V5_RX_UCDR_SO_GAIN 0x014 +#define QSERDES_V5_RX_UCDR_SVS_FO_GAIN_HALF 0x018 +#define QSERDES_V5_RX_UCDR_SVS_FO_GAIN_QUARTER 0x01c +#define QSERDES_V5_RX_UCDR_SVS_FO_GAIN 0x020 +#define QSERDES_V5_RX_UCDR_SVS_SO_GAIN_HALF 0x024 +#define QSERDES_V5_RX_UCDR_SVS_SO_GAIN_QUARTER 0x028 +#define QSERDES_V5_RX_UCDR_SVS_SO_GAIN 0x02c +#define QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN 0x030 +#define QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034 +#define QSERDES_V5_RX_UCDR_FO_TO_SO_DELAY 0x038 +#define QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c +#define QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040 +#define QSERDES_V5_RX_UCDR_PI_CONTROLS 0x044 +#define QSERDES_V5_RX_UCDR_PI_CTRL2 0x048 +#define QSERDES_V5_RX_UCDR_SB2_THRESH1 0x04c +#define QSERDES_V5_RX_UCDR_SB2_THRESH2 0x050 +#define QSERDES_V5_RX_UCDR_SB2_GAIN1 0x054 +#define QSERDES_V5_RX_UCDR_SB2_GAIN2 0x058 +#define QSERDES_V5_RX_AUX_CONTROL 0x05c +#define QSERDES_V5_RX_AUX_DATA_TCOARSE_TFINE 0x060 +#define QSERDES_V5_RX_RCLK_AUXDATA_SEL 0x064 +#define QSERDES_V5_RX_AC_JTAG_ENABLE 0x068 +#define QSERDES_V5_RX_AC_JTAG_INITP 0x06c +#define QSERDES_V5_RX_AC_JTAG_INITN 0x070 +#define QSERDES_V5_RX_AC_JTAG_LVL 0x074 +#define QSERDES_V5_RX_AC_JTAG_MODE 0x078 +#define QSERDES_V5_RX_AC_JTAG_RESET 0x07c +#define QSERDES_V5_RX_RX_TERM_BW 0x080 +#define QSERDES_V5_RX_RX_RCVR_IQ_EN 0x084 +#define QSERDES_V5_RX_RX_IDAC_I_DC_OFFSETS 0x088 +#define QSERDES_V5_RX_RX_IDAC_IBAR_DC_OFFSETS 0x08c +#define QSERDES_V5_RX_RX_IDAC_Q_DC_OFFSETS 0x090 +#define QSERDES_V5_RX_RX_IDAC_QBAR_DC_OFFSETS 0x094 +#define QSERDES_V5_RX_RX_IDAC_A_DC_OFFSETS 0x098 +#define QSERDES_V5_RX_RX_IDAC_ABAR_DC_OFFSETS 0x09c +#define QSERDES_V5_RX_RX_IDAC_EN 0x0a0 +#define QSERDES_V5_RX_RX_IDAC_ENABLES 0x0a4 +#define QSERDES_V5_RX_RX_IDAC_SIGN 0x0a8 +#define QSERDES_V5_RX_RX_HIGHZ_HIGHRATE 0x0ac +#define QSERDES_V5_RX_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET 0x0b0 +#define QSERDES_V5_RX_DFE_1 0x0b4 +#define QSERDES_V5_RX_DFE_2 0x0b8 +#define QSERDES_V5_RX_DFE_3 0x0bc +#define QSERDES_V5_RX_DFE_4 0x0c0 +#define QSERDES_V5_RX_TX_ADAPT_PRE_THRESH1 0x0c4 +#define QSERDES_V5_RX_TX_ADAPT_PRE_THRESH2 0x0c8 +#define QSERDES_V5_RX_TX_ADAPT_POST_THRESH 0x0cc +#define QSERDES_V5_RX_TX_ADAPT_MAIN_THRESH 0x0d0 +#define QSERDES_V5_RX_VGA_CAL_CNTRL1 0x0d4 +#define QSERDES_V5_RX_VGA_CAL_CNTRL2 0x0d8 +#define QSERDES_V5_RX_GM_CAL 0x0dc +#define QSERDES_V5_RX_RX_VGA_GAIN2_LSB 0x0e0 +#define QSERDES_V5_RX_RX_VGA_GAIN2_MSB 0x0e4 +#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL1 0x0e8 +#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec +#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0 +#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4 +#define QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW 0x0f8 +#define QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH 0x0fc +#define QSERDES_V5_RX_RX_IDAC_MEASURE_TIME 0x100 +#define QSERDES_V5_RX_RX_IDAC_ACCUMULATOR 0x104 +#define QSERDES_V5_RX_RX_EQ_OFFSET_LSB 0x108 +#define QSERDES_V5_RX_RX_EQ_OFFSET_MSB 0x10c +#define QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110 +#define QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x114 +#define QSERDES_V5_RX_SIGDET_ENABLES 0x118 +#define QSERDES_V5_RX_SIGDET_CNTRL 0x11c +#define QSERDES_V5_RX_SIGDET_LVL 0x120 +#define QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL 0x124 +#define QSERDES_V5_RX_RX_BAND 0x128 +#define QSERDES_V5_RX_CDR_FREEZE_UP_DN 0x12c +#define QSERDES_V5_RX_CDR_RESET_OVERRIDE 0x130 +#define QSERDES_V5_RX_RX_INTERFACE_MODE 0x134 +#define QSERDES_V5_RX_JITTER_GEN_MODE 0x138 +#define QSERDES_V5_RX_SJ_AMP1 0x13c +#define QSERDES_V5_RX_SJ_AMP2 0x140 +#define QSERDES_V5_RX_SJ_PER1 0x144 +#define QSERDES_V5_RX_SJ_PER2 0x148 +#define QSERDES_V5_RX_PPM_OFFSET1 0x14c +#define QSERDES_V5_RX_PPM_OFFSET2 0x150 +#define QSERDES_V5_RX_SIGN_PPM_PERIOD1 0x154 +#define QSERDES_V5_RX_SIGN_PPM_PERIOD2 0x158 +#define QSERDES_V5_RX_RX_MODE_00_LOW 0x15c +#define QSERDES_V5_RX_RX_MODE_00_HIGH 0x160 +#define QSERDES_V5_RX_RX_MODE_00_HIGH2 0x164 +#define QSERDES_V5_RX_RX_MODE_00_HIGH3 0x168 +#define QSERDES_V5_RX_RX_MODE_00_HIGH4 0x16c +#define QSERDES_V5_RX_RX_MODE_01_LOW 0x170 +#define QSERDES_V5_RX_RX_MODE_01_HIGH 0x174 +#define QSERDES_V5_RX_RX_MODE_01_HIGH2 0x178 +#define QSERDES_V5_RX_RX_MODE_01_HIGH3 0x17c +#define QSERDES_V5_RX_RX_MODE_01_HIGH4 0x180 +#define QSERDES_V5_RX_RX_MODE_10_LOW 0x184 +#define QSERDES_V5_RX_RX_MODE_10_HIGH 0x188 +#define QSERDES_V5_RX_RX_MODE_10_HIGH2 0x18c +#define QSERDES_V5_RX_RX_MODE_10_HIGH3 0x190 +#define QSERDES_V5_RX_RX_MODE_10_HIGH4 0x194 +#define QSERDES_V5_RX_PHPRE_CTRL 0x198 +#define QSERDES_V5_RX_PHPRE_INITVAL 0x19c +#define QSERDES_V5_RX_DFE_EN_TIMER 0x1a0 +#define QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET 0x1a4 +#define QSERDES_V5_RX_DCC_CTRL1 0x1a8 +#define QSERDES_V5_RX_DCC_CTRL2 0x1ac +#define QSERDES_V5_RX_VTH_CODE 0x1b0 +#define QSERDES_V5_RX_VTH_MIN_THRESH 0x1b4 +#define QSERDES_V5_RX_VTH_MAX_THRESH 0x1b8 +#define QSERDES_V5_RX_ALOG_OBSV_BUS_CTRL_1 0x1bc +#define QSERDES_V5_RX_PI_CTRL1 0x1c0 +#define QSERDES_V5_RX_PI_CTRL2 0x1c4 +#define QSERDES_V5_RX_PI_QUAD 0x1c8 +#define QSERDES_V5_RX_IDATA1 0x1cc +#define QSERDES_V5_RX_IDATA2 0x1d0 +#define QSERDES_V5_RX_AUX_DATA1 0x1d4 +#define QSERDES_V5_RX_AUX_DATA2 0x1d8 +#define QSERDES_V5_RX_AC_JTAG_OUTP 0x1dc +#define QSERDES_V5_RX_AC_JTAG_OUTN 0x1e0 +#define QSERDES_V5_RX_RX_SIGDET 0x1e4 +#define QSERDES_V5_RX_ALOG_OBSV_BUS_STATUS_1 0x1e8 + +/* Only for QMP V5 UFS ? */ +#define QSERDES_V5_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0x178 +#define QSERDES_V5_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0x17c +#define QSERDES_V5_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0x180 +#define QSERDES_V5_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0x184 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h new file mode 100644 index 0000000000..86c0110479 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V5_20_H_ +#define QCOM_PHY_QMP_QSERDES_TXRX_V5_20_H_ + +/* Only for QMP V5_20 PHY - TX registers */ +#define QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_TX 0x30 +#define QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_RX 0x34 +#define QSERDES_V5_20_TX_LANE_MODE_1 0x78 +#define QSERDES_V5_20_TX_LANE_MODE_2 0x7c + +/* Only for QMP V5_20 PHY - RX registers */ +#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2 0x008 +#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3 0x00c +#define QSERDES_V5_20_RX_UCDR_PI_CONTROLS 0x020 +#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1 0x02c +#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3 0x030 +#define QSERDES_V5_20_RX_RX_IDAC_SAOFFSET 0x07c +#define QSERDES_V5_20_RX_DFE_3 0x090 +#define QSERDES_V5_20_RX_DFE_DAC_ENABLE1 0x0b4 +#define QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1 0x0c4 +#define QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2 0x0c8 +#define QSERDES_V5_20_RX_VGA_CAL_MAN_VAL 0x0dc +#define QSERDES_V5_20_RX_GM_CAL 0x0ec +#define QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4 0x108 +#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1 0x164 +#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2 0x168 +#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3 0x16c +#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5 0x174 +#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6 0x178 +#define QSERDES_V5_20_RX_RX_MODE_RATE2_B0 0x17c +#define QSERDES_V5_20_RX_RX_MODE_RATE2_B1 0x180 +#define QSERDES_V5_20_RX_RX_MODE_RATE2_B2 0x184 +#define QSERDES_V5_20_RX_RX_MODE_RATE2_B3 0x188 +#define QSERDES_V5_20_RX_RX_MODE_RATE2_B4 0x18c +#define QSERDES_V5_20_RX_RX_MODE_RATE2_B5 0x190 +#define QSERDES_V5_20_RX_RX_MODE_RATE2_B6 0x194 +#define QSERDES_V5_20_RX_RX_MODE_RATE3_B0 0x198 +#define QSERDES_V5_20_RX_RX_MODE_RATE3_B1 0x19c +#define QSERDES_V5_20_RX_RX_MODE_RATE3_B2 0x1a0 +#define QSERDES_V5_20_RX_RX_MODE_RATE3_B3 0x1a4 +#define QSERDES_V5_20_RX_RX_MODE_RATE3_B4 0x1a8 +#define QSERDES_V5_20_RX_RX_MODE_RATE3_B5 0x1ac +#define QSERDES_V5_20_RX_RX_MODE_RATE3_B6 0x1b0 +#define QSERDES_V5_20_RX_PHPRE_CTRL 0x1b4 +#define QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET 0x1c0 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210 0x1f4 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3 0x1f8 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210 0x1fc +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3 0x200 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210 0x204 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3 0x208 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3 0x210 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3 0x218 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3 0x220 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx.h new file mode 100644 index 0000000000..d20694513e --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_QSERDES_TXRX_H_ +#define QCOM_PHY_QMP_QSERDES_TXRX_H_ + +/* Only for QMP V2 PHY - TX registers */ +#define QSERDES_TX_BIST_MODE_LANENO 0x000 +#define QSERDES_TX_BIST_INVERT 0x004 +#define QSERDES_TX_CLKBUF_ENABLE 0x008 +#define QSERDES_TX_CMN_CONTROL_ONE 0x00c +#define QSERDES_TX_CMN_CONTROL_TWO 0x010 +#define QSERDES_TX_CMN_CONTROL_THREE 0x014 +#define QSERDES_TX_TX_EMP_POST1_LVL 0x018 +#define QSERDES_TX_TX_POST2_EMPH 0x01c +#define QSERDES_TX_TX_BOOST_LVL_UP_DN 0x020 +#define QSERDES_TX_HP_PD_ENABLES 0x024 +#define QSERDES_TX_TX_IDLE_LVL_LARGE_AMP 0x028 +#define QSERDES_TX_TX_DRV_LVL 0x02c +#define QSERDES_TX_TX_DRV_LVL_OFFSET 0x030 +#define QSERDES_TX_RESET_TSYNC_EN 0x034 +#define QSERDES_TX_PRE_STALL_LDO_BOOST_EN 0x038 +#define QSERDES_TX_TX_BAND 0x03c +#define QSERDES_TX_SLEW_CNTL 0x040 +#define QSERDES_TX_INTERFACE_SELECT 0x044 +#define QSERDES_TX_LPB_EN 0x048 +#define QSERDES_TX_RES_CODE_LANE_TX 0x04c +#define QSERDES_TX_RES_CODE_LANE_RX 0x050 +#define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054 +#define QSERDES_TX_PERL_LENGTH1 0x058 +#define QSERDES_TX_PERL_LENGTH2 0x05c +#define QSERDES_TX_SERDES_BYP_EN_OUT 0x060 +#define QSERDES_TX_DEBUG_BUS_SEL 0x064 +#define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068 +#define QSERDES_TX_TX_POL_INV 0x06c +#define QSERDES_TX_PARRATE_REC_DETECT_IDLE_EN 0x070 +#define QSERDES_TX_BIST_PATTERN1 0x074 +#define QSERDES_TX_BIST_PATTERN2 0x078 +#define QSERDES_TX_BIST_PATTERN3 0x07c +#define QSERDES_TX_BIST_PATTERN4 0x080 +#define QSERDES_TX_BIST_PATTERN5 0x084 +#define QSERDES_TX_BIST_PATTERN6 0x088 +#define QSERDES_TX_BIST_PATTERN7 0x08c +#define QSERDES_TX_BIST_PATTERN8 0x090 +#define QSERDES_TX_LANE_MODE 0x094 +#define QSERDES_TX_IDAC_CAL_LANE_MODE 0x098 +#define QSERDES_TX_IDAC_CAL_LANE_MODE_CONFIGURATION 0x09c +#define QSERDES_TX_ATB_SEL1 0x0a0 +#define QSERDES_TX_ATB_SEL2 0x0a4 +#define QSERDES_TX_RCV_DETECT_LVL 0x0a8 +#define QSERDES_TX_RCV_DETECT_LVL_2 0x0ac +#define QSERDES_TX_PRBS_SEED1 0x0b0 +#define QSERDES_TX_PRBS_SEED2 0x0b4 +#define QSERDES_TX_PRBS_SEED3 0x0b8 +#define QSERDES_TX_PRBS_SEED4 0x0bc +#define QSERDES_TX_RESET_GEN 0x0c0 +#define QSERDES_TX_RESET_GEN_MUXES 0x0c4 +#define QSERDES_TX_TRAN_DRVR_EMP_EN 0x0c8 +#define QSERDES_TX_TX_INTERFACE_MODE 0x0cc +#define QSERDES_TX_PWM_CTRL 0x0d0 +#define QSERDES_TX_PWM_ENCODED_OR_DATA 0x0d4 +#define QSERDES_TX_PWM_GEAR_1_DIVIDER_BAND2 0x0d8 +#define QSERDES_TX_PWM_GEAR_2_DIVIDER_BAND2 0x0dc +#define QSERDES_TX_PWM_GEAR_3_DIVIDER_BAND2 0x0e0 +#define QSERDES_TX_PWM_GEAR_4_DIVIDER_BAND2 0x0e4 +#define QSERDES_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0x0e8 +#define QSERDES_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0x0ec +#define QSERDES_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0x0f0 +#define QSERDES_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0x0f4 +#define QSERDES_TX_VMODE_CTRL1 0x0f8 +#define QSERDES_TX_VMODE_CTRL2 0x0fc +#define QSERDES_TX_TX_ALOG_INTF_OBSV_CNTL 0x100 +#define QSERDES_TX_BIST_STATUS 0x104 +#define QSERDES_TX_BIST_ERROR_COUNT1 0x108 +#define QSERDES_TX_BIST_ERROR_COUNT2 0x10c +#define QSERDES_TX_TX_ALOG_INTF_OBSV 0x110 + +/* Only for QMP V2 PHY - RX registers */ +#define QSERDES_RX_UCDR_FO_GAIN_HALF 0x000 +#define QSERDES_RX_UCDR_FO_GAIN_QUARTER 0x004 +#define QSERDES_RX_UCDR_FO_GAIN_EIGHTH 0x008 +#define QSERDES_RX_UCDR_FO_GAIN 0x00c +#define QSERDES_RX_UCDR_SO_GAIN_HALF 0x010 +#define QSERDES_RX_UCDR_SO_GAIN_QUARTER 0x014 +#define QSERDES_RX_UCDR_SO_GAIN_EIGHTH 0x018 +#define QSERDES_RX_UCDR_SO_GAIN 0x01c +#define QSERDES_RX_UCDR_SVS_FO_GAIN_HALF 0x020 +#define QSERDES_RX_UCDR_SVS_FO_GAIN_QUARTER 0x024 +#define QSERDES_RX_UCDR_SVS_FO_GAIN_EIGHTH 0x028 +#define QSERDES_RX_UCDR_SVS_FO_GAIN 0x02c +#define QSERDES_RX_UCDR_SVS_SO_GAIN_HALF 0x030 +#define QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER 0x034 +#define QSERDES_RX_UCDR_SVS_SO_GAIN_EIGHTH 0x038 +#define QSERDES_RX_UCDR_SVS_SO_GAIN 0x03c +#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN 0x040 +#define QSERDES_RX_UCDR_FD_GAIN 0x044 +#define QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE 0x048 +#define QSERDES_RX_UCDR_FO_TO_SO_DELAY 0x04c +#define QSERDES_RX_UCDR_FASTLOCK_COUNT_LOW 0x050 +#define QSERDES_RX_UCDR_FASTLOCK_COUNT_HIGH 0x054 +#define QSERDES_RX_UCDR_MODULATE 0x058 +#define QSERDES_RX_UCDR_PI_CONTROLS 0x05c +#define QSERDES_RX_RBIST_CONTROL 0x060 +#define QSERDES_RX_AUX_CONTROL 0x064 +#define QSERDES_RX_AUX_DATA_TCOARSE 0x068 +#define QSERDES_RX_AUX_DATA_TFINE_LSB 0x06c +#define QSERDES_RX_AUX_DATA_TFINE_MSB 0x070 +#define QSERDES_RX_RCLK_AUXDATA_SEL 0x074 +#define QSERDES_RX_AC_JTAG_ENABLE 0x078 +#define QSERDES_RX_AC_JTAG_INITP 0x07c +#define QSERDES_RX_AC_JTAG_INITN 0x080 +#define QSERDES_RX_AC_JTAG_LVL 0x084 +#define QSERDES_RX_AC_JTAG_MODE 0x088 +#define QSERDES_RX_AC_JTAG_RESET 0x08c +#define QSERDES_RX_RX_TERM_BW 0x090 +#define QSERDES_RX_RX_RCVR_IQ_EN 0x094 +#define QSERDES_RX_RX_IDAC_I_DC_OFFSETS 0x098 +#define QSERDES_RX_RX_IDAC_IBAR_DC_OFFSETS 0x09c +#define QSERDES_RX_RX_IDAC_Q_DC_OFFSETS 0x0a0 +#define QSERDES_RX_RX_IDAC_QBAR_DC_OFFSETS 0x0a4 +#define QSERDES_RX_RX_IDAC_A_DC_OFFSETS 0x0a8 +#define QSERDES_RX_RX_IDAC_ABAR_DC_OFFSETS 0x0ac +#define QSERDES_RX_RX_IDAC_EN 0x0b0 +#define QSERDES_RX_RX_IDAC_ENABLES 0x0b4 +#define QSERDES_RX_RX_IDAC_SIGN 0x0b8 +#define QSERDES_RX_RX_HIGHZ_HIGHRATE 0x0bc +#define QSERDES_RX_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET 0x0c0 +#define QSERDES_RX_RX_EQ_GAIN1_LSB 0x0c4 +#define QSERDES_RX_RX_EQ_GAIN1_MSB 0x0c8 +#define QSERDES_RX_RX_EQ_GAIN2_LSB 0x0cc +#define QSERDES_RX_RX_EQ_GAIN2_MSB 0x0d0 +#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL1 0x0d4 +#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 0x0d8 +#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 0x0dc +#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 0x0e0 +#define QSERDES_RX_RX_IDAC_CAL_CONFIGURATION 0x0e4 +#define QSERDES_RX_RX_IDAC_TSETTLE_LOW 0x0e8 +#define QSERDES_RX_RX_IDAC_TSETTLE_HIGH 0x0ec +#define QSERDES_RX_RX_IDAC_ENDSAMP_LOW 0x0f0 +#define QSERDES_RX_RX_IDAC_ENDSAMP_HIGH 0x0f4 +#define QSERDES_RX_RX_IDAC_MIDPOINT_LOW 0x0f8 +#define QSERDES_RX_RX_IDAC_MIDPOINT_HIGH 0x0fc +#define QSERDES_RX_RX_EQ_OFFSET_LSB 0x100 +#define QSERDES_RX_RX_EQ_OFFSET_MSB 0x104 +#define QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x108 +#define QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x10c +#define QSERDES_RX_SIGDET_ENABLES 0x110 +#define QSERDES_RX_SIGDET_CNTRL 0x114 +#define QSERDES_RX_SIGDET_LVL 0x118 +#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL 0x11c +#define QSERDES_RX_RX_BAND 0x120 +#define QSERDES_RX_CDR_FREEZE_UP_DN 0x124 +#define QSERDES_RX_CDR_RESET_OVERRIDE 0x128 +#define QSERDES_RX_RX_INTERFACE_MODE 0x12c +#define QSERDES_RX_JITTER_GEN_MODE 0x130 +#define QSERDES_RX_BUJ_AMP 0x134 +#define QSERDES_RX_SJ_AMP1 0x138 +#define QSERDES_RX_SJ_AMP2 0x13c +#define QSERDES_RX_SJ_PER1 0x140 +#define QSERDES_RX_SJ_PER2 0x144 +#define QSERDES_RX_BUJ_STEP_FREQ1 0x148 +#define QSERDES_RX_BUJ_STEP_FREQ2 0x14c +#define QSERDES_RX_PPM_OFFSET1 0x150 +#define QSERDES_RX_PPM_OFFSET2 0x154 +#define QSERDES_RX_SIGN_PPM_PERIOD1 0x158 +#define QSERDES_RX_SIGN_PPM_PERIOD2 0x15c +#define QSERDES_RX_SSC_CTRL 0x160 +#define QSERDES_RX_SSC_COUNT1 0x164 +#define QSERDES_RX_SSC_COUNT2 0x168 +#define QSERDES_RX_RX_ALOG_INTF_OBSV_CNTL 0x16c +#define QSERDES_RX_RX_PWM_ENABLE_AND_DATA 0x170 +#define QSERDES_RX_RX_PWM_GEAR1_TIMEOUT_COUNT 0x174 +#define QSERDES_RX_RX_PWM_GEAR2_TIMEOUT_COUNT 0x178 +#define QSERDES_RX_RX_PWM_GEAR3_TIMEOUT_COUNT 0x17c +#define QSERDES_RX_RX_PWM_GEAR4_TIMEOUT_COUNT 0x180 +#define QSERDES_RX_PI_CTRL1 0x184 +#define QSERDES_RX_PI_CTRL2 0x188 +#define QSERDES_RX_PI_QUAD 0x18c +#define QSERDES_RX_IDATA1 0x190 +#define QSERDES_RX_IDATA2 0x194 +#define QSERDES_RX_AUX_DATA1 0x198 +#define QSERDES_RX_AUX_DATA2 0x19c +#define QSERDES_RX_AC_JTAG_OUTP 0x1a0 +#define QSERDES_RX_AC_JTAG_OUTN 0x1a4 +#define QSERDES_RX_RX_SIGDET 0x1a8 +#define QSERDES_RX_RX_VDCOFF 0x1ac +#define QSERDES_RX_IDAC_CAL_ON 0x1b0 +#define QSERDES_RX_IDAC_STATUS_I 0x1b4 +#define QSERDES_RX_IDAC_STATUS_IBAR 0x1b8 +#define QSERDES_RX_IDAC_STATUS_Q 0x1bc +#define QSERDES_RX_IDAC_STATUS_QBAR 0x1c0 +#define QSERDES_RX_IDAC_STATUS_A 0x1c4 +#define QSERDES_RX_IDAC_STATUS_ABAR 0x1c8 +#define QSERDES_RX_CALST_STATUS_I 0x1cc +#define QSERDES_RX_CALST_STATUS_Q 0x1d0 +#define QSERDES_RX_CALST_STATUS_A 0x1d4 +#define QSERDES_RX_RX_ALOG_INTF_OBSV 0x1d8 +#define QSERDES_RX_READ_EQCODE 0x1dc +#define QSERDES_RX_READ_OFFSETCODE 0x1e0 +#define QSERDES_RX_IA_ERROR_COUNTER_LOW 0x1e4 +#define QSERDES_RX_IA_ERROR_COUNTER_HIGH 0x1e8 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c new file mode 100644 index 0000000000..c8583f5a54 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c @@ -0,0 +1,1383 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "phy-qcom-qmp.h" + +/* QPHY_SW_RESET bit */ +#define SW_RESET BIT(0) +/* QPHY_POWER_DOWN_CONTROL */ +#define SW_PWRDN BIT(0) +#define REFCLK_DRV_DSBL BIT(1) +/* QPHY_START_CONTROL bits */ +#define SERDES_START BIT(0) +#define PCS_START BIT(1) +#define PLL_READY_GATE_EN BIT(3) +/* QPHY_PCS_STATUS bit */ +#define PHYSTATUS BIT(6) +#define PHYSTATUS_4_20 BIT(7) +/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */ +#define PCS_READY BIT(0) + +/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */ +/* DP PHY soft reset */ +#define SW_DPPHY_RESET BIT(0) +/* mux to select DP PHY reset control, 0:HW control, 1: software reset */ +#define SW_DPPHY_RESET_MUX BIT(1) +/* USB3 PHY soft reset */ +#define SW_USB3PHY_RESET BIT(2) +/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */ +#define SW_USB3PHY_RESET_MUX BIT(3) + +/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */ +#define USB3_MODE BIT(0) /* enables USB3 mode */ +#define DP_MODE BIT(1) /* enables DP mode */ + +/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */ +#define ARCVR_DTCT_EN BIT(0) +#define ALFPS_DTCT_EN BIT(1) +#define ARCVR_DTCT_EVENT_SEL BIT(4) + +/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */ +#define IRQ_CLEAR BIT(0) + +/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */ +#define RCVR_DETECT BIT(0) + +/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */ +#define CLAMP_EN BIT(0) /* enables i/o clamp_n */ + +#define PHY_INIT_COMPLETE_TIMEOUT 10000 +#define POWER_DOWN_DELAY_US_MIN 10 +#define POWER_DOWN_DELAY_US_MAX 11 + +#define MAX_PROP_NAME 32 + +/* Define the assumed distance between lanes for underspecified device trees. */ +#define QMP_PHY_LEGACY_LANE_STRIDE 0x400 + +struct qmp_phy_init_tbl { + unsigned int offset; + unsigned int val; + /* + * register part of layout ? + * if yes, then offset gives index in the reg-layout + */ + bool in_layout; + /* + * mask of lanes for which this register is written + * for cases when second lane needs different values + */ + u8 lane_mask; +}; + +#define QMP_PHY_INIT_CFG(o, v) \ + { \ + .offset = o, \ + .val = v, \ + .lane_mask = 0xff, \ + } + +#define QMP_PHY_INIT_CFG_L(o, v) \ + { \ + .offset = o, \ + .val = v, \ + .in_layout = true, \ + .lane_mask = 0xff, \ + } + +#define QMP_PHY_INIT_CFG_LANE(o, v, l) \ + { \ + .offset = o, \ + .val = v, \ + .lane_mask = l, \ + } + +/* set of registers with offsets different per-PHY */ +enum qphy_reg_layout { + /* Common block control registers */ + QPHY_COM_SW_RESET, + QPHY_COM_POWER_DOWN_CONTROL, + QPHY_COM_START_CONTROL, + QPHY_COM_PCS_READY_STATUS, + /* PCS registers */ + QPHY_SW_RESET, + QPHY_START_CTRL, + QPHY_PCS_READY_STATUS, + QPHY_PCS_STATUS, + QPHY_PCS_AUTONOMOUS_MODE_CTRL, + QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR, + QPHY_PCS_LFPS_RXTERM_IRQ_STATUS, + QPHY_PCS_POWER_DOWN_CONTROL, + /* PCS_MISC registers */ + QPHY_PCS_MISC_TYPEC_CTRL, + /* Keep last to ensure regs_layout arrays are properly initialized */ + QPHY_LAYOUT_SIZE +}; + +static const unsigned int msm8996_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_START_CTRL] = 0x00, + [QPHY_PCS_READY_STATUS] = 0x168, +}; + +static const unsigned int sdm845_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_START_CTRL] = 0x00, + [QPHY_PCS_READY_STATUS] = 0x160, +}; + +static const unsigned int sm6115_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_START_CTRL] = 0x00, + [QPHY_PCS_READY_STATUS] = 0x168, +}; + +static const unsigned int sm8150_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_START_CTRL] = QPHY_V4_PCS_UFS_PHY_START, + [QPHY_PCS_READY_STATUS] = QPHY_V4_PCS_UFS_READY_STATUS, + [QPHY_SW_RESET] = QPHY_V4_PCS_UFS_SW_RESET, +}; + +static const struct qmp_phy_init_tbl msm8996_ufs_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0xd7), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06), + QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x10), + QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x54), + QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE1, 0x98), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE1, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE1, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE1, 0x32), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE1, 0x00), +}; + +static const struct qmp_phy_init_tbl msm8996_ufs_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45), + QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x02), +}; + +static const struct qmp_phy_init_tbl msm8996_ufs_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x24), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_INTERFACE_MODE, 0x00), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x18), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_TERM_BW, 0x5b), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xff), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xff), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E), +}; + +static const struct qmp_phy_init_tbl sm6115_ufsphy_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x04), + QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE1, 0x98), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE1, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE1, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE1, 0x32), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_INITVAL1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00), + + /* Rate B */ + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x44), +}; + +static const struct qmp_phy_init_tbl sm6115_ufsphy_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45), + QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06), +}; + +static const struct qmp_phy_init_tbl sm6115_ufsphy_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x24), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x0F), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_INTERFACE_MODE, 0x40), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_TERM_BW, 0x5B), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x5B), +}; + +static const struct qmp_phy_init_tbl sm6115_ufsphy_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_PWM_GEAR_BAND, 0x15), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_SIGDET_CTRL2, 0x6d), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_TX_LARGE_AMP_DRV_LVL, 0x0f), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_TX_SMALL_AMP_DRV_LVL, 0x02), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_SYM_RESYNC_CTRL, 0x03), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_TX_LARGE_AMP_POST_EMP_LVL, 0x12), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_TX_SMALL_AMP_POST_EMP_LVL, 0x0f), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_MIN_HIBERN8_TIME, 0x9a), /* 8 us */ +}; + +static const struct qmp_phy_init_tbl sdm845_ufsphy_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0xd5), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_CTRL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xda), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE1, 0x98), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE1, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE1, 0xc1), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE1, 0x32), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE1, 0x0f), + + /* Rate B */ + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x44), +}; + +static const struct qmp_phy_init_tbl sdm845_ufsphy_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x07), +}; + +static const struct qmp_phy_init_tbl sdm845_ufsphy_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_LVL, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_TERM_BW, 0x5b), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1b), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x81), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x59), +}; + +static const struct qmp_phy_init_tbl sdm845_ufsphy_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_RX_SIGDET_CTRL2, 0x6e), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0a), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_RX_SYM_RESYNC_CTRL, 0x03), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_TX_MID_TERM_CTRL1, 0x43), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_RX_SIGDET_CTRL1, 0x0f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_RX_MIN_HIBERN8_TIME, 0x9a), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_MULTI_LANE_CTRL1, 0x02), +}; + +static const struct qmp_phy_init_tbl sm8150_ufsphy_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0xd9), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_INITVAL2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xac), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x98), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x32), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xdd), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x23), + + /* Rate B */ + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x06), +}; + +static const struct qmp_phy_init_tbl sm8150_ufsphy_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_TRAN_DRVR_EMP_EN, 0x0c), +}; + +static const struct qmp_phy_init_tbl sm8150_ufsphy_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_LVL, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_BAND, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0xf1), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_TERM_BW, 0x1b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1d), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_MEASURE_TIME, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xf6), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x3b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x3d), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xe0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xc8), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x3b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xe0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0xc8), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x3b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1), + +}; + +static const struct qmp_phy_init_tbl sm8150_ufsphy_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_RX_SIGDET_CTRL2, 0x6d), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0a), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_MID_TERM_CTRL1, 0x43), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_DEBUG_BUS_CLKSEL, 0x1f), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_RX_MIN_HIBERN8_TIME, 0xff), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_MULTI_LANE_CTRL1, 0x02), +}; + +static const struct qmp_phy_init_tbl sm8350_ufsphy_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0xd9), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_INITVAL2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x19), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xac), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x98), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x65), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xdd), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x23), + + /* Rate B */ + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x06), +}; + +static const struct qmp_phy_init_tbl sm8350_ufsphy_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xf5), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_TRAN_DRVR_EMP_EN, 0x0c), +}; + +static const struct qmp_phy_init_tbl sm8350_ufsphy_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_LVL, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_BAND, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x5a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf1), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_TERM_BW, 0x1b), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_MEASURE_TIME, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x6d), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x6d), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xed), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3b), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0x3c), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0xe0), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0xc8), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xc8), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x3b), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xb7), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_LOW, 0xe0), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH, 0xc8), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH2, 0xc8), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x3b), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH4, 0xb7), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_DCC_CTRL1, 0x0c), +}; + +static const struct qmp_phy_init_tbl sm8350_ufsphy_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_SIGDET_CTRL2, 0x6d), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0a), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_MID_TERM_CTRL1, 0x43), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_DEBUG_BUS_CLKSEL, 0x1f), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_MIN_HIBERN8_TIME, 0xff), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_PLL_CNTL, 0x03), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB, 0x16), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB, 0xd8), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_PWM_GEAR_BAND, 0xaa), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_HS_GEAR_BAND, 0x06), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_HSGEAR_CAPABILITY, 0x03), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_HSGEAR_CAPABILITY, 0x03), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_SIGDET_CTRL1, 0x0e), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_MULTI_LANE_CTRL1, 0x02), +}; + +struct qmp_phy; + +/* struct qmp_phy_cfg - per-PHY initialization config */ +struct qmp_phy_cfg { + /* phy-type - PCIE/UFS/USB */ + unsigned int type; + /* number of lanes provided by phy */ + int nlanes; + + /* Init sequence for PHY blocks - serdes, tx, rx, pcs */ + const struct qmp_phy_init_tbl *serdes_tbl; + int serdes_tbl_num; + const struct qmp_phy_init_tbl *tx_tbl; + int tx_tbl_num; + const struct qmp_phy_init_tbl *rx_tbl; + int rx_tbl_num; + const struct qmp_phy_init_tbl *pcs_tbl; + int pcs_tbl_num; + + /* clock ids to be requested */ + const char * const *clk_list; + int num_clks; + /* regulators to be requested */ + const char * const *vreg_list; + int num_vregs; + + /* array of registers with different offsets */ + const unsigned int *regs; + + unsigned int start_ctrl; + unsigned int pwrdn_ctrl; + /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */ + unsigned int phy_status; + + /* true, if PHY has secondary tx/rx lanes to be configured */ + bool is_dual_lane_phy; + + /* true, if PCS block has no separate SW_RESET register */ + bool no_pcs_sw_reset; +}; + +/** + * struct qmp_phy - per-lane phy descriptor + * + * @phy: generic phy + * @cfg: phy specific configuration + * @serdes: iomapped memory space for phy's serdes (i.e. PLL) + * @tx: iomapped memory space for lane's tx + * @rx: iomapped memory space for lane's rx + * @pcs: iomapped memory space for lane's pcs + * @tx2: iomapped memory space for second lane's tx (in dual lane PHYs) + * @rx2: iomapped memory space for second lane's rx (in dual lane PHYs) + * @pcs_misc: iomapped memory space for lane's pcs_misc + * @index: lane index + * @qmp: QMP phy to which this lane belongs + * @mode: current PHY mode + */ +struct qmp_phy { + struct phy *phy; + const struct qmp_phy_cfg *cfg; + void __iomem *serdes; + void __iomem *tx; + void __iomem *rx; + void __iomem *pcs; + void __iomem *tx2; + void __iomem *rx2; + void __iomem *pcs_misc; + unsigned int index; + struct qcom_qmp *qmp; + enum phy_mode mode; +}; + +/** + * struct qcom_qmp - structure holding QMP phy block attributes + * + * @dev: device + * + * @clks: array of clocks required by phy + * @resets: array of resets required by phy + * @vregs: regulator supplies bulk data + * + * @phys: array of per-lane phy descriptors + * @ufs_reset: optional UFS PHY reset handle + */ +struct qcom_qmp { + struct device *dev; + + struct clk_bulk_data *clks; + struct regulator_bulk_data *vregs; + + struct qmp_phy **phys; + + struct reset_control *ufs_reset; +}; + +static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val) +{ + u32 reg; + + reg = readl(base + offset); + reg |= val; + writel(reg, base + offset); + + /* ensure that above write is through */ + readl(base + offset); +} + +static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val) +{ + u32 reg; + + reg = readl(base + offset); + reg &= ~val; + writel(reg, base + offset); + + /* ensure that above write is through */ + readl(base + offset); +} + +/* list of clocks required by phy */ +static const char * const msm8996_ufs_phy_clk_l[] = { + "ref", +}; + +/* the primary usb3 phy on sm8250 doesn't have a ref clock */ +static const char * const sm8450_ufs_phy_clk_l[] = { + "qref", "ref", "ref_aux", +}; + +static const char * const sdm845_ufs_phy_clk_l[] = { + "ref", "ref_aux", +}; + +/* list of regulators */ +static const char * const qmp_phy_vreg_l[] = { + "vdda-phy", "vdda-pll", +}; + +static const struct qmp_phy_cfg msm8996_ufs_cfg = { + .type = PHY_TYPE_UFS, + .nlanes = 1, + + .serdes_tbl = msm8996_ufs_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(msm8996_ufs_serdes_tbl), + .tx_tbl = msm8996_ufs_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(msm8996_ufs_tx_tbl), + .rx_tbl = msm8996_ufs_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(msm8996_ufs_rx_tbl), + + .clk_list = msm8996_ufs_phy_clk_l, + .num_clks = ARRAY_SIZE(msm8996_ufs_phy_clk_l), + + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + + .regs = msm8996_ufsphy_regs_layout, + + .start_ctrl = SERDES_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .no_pcs_sw_reset = true, +}; + +static const struct qmp_phy_cfg sdm845_ufsphy_cfg = { + .type = PHY_TYPE_UFS, + .nlanes = 2, + + .serdes_tbl = sdm845_ufsphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sdm845_ufsphy_serdes_tbl), + .tx_tbl = sdm845_ufsphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sdm845_ufsphy_tx_tbl), + .rx_tbl = sdm845_ufsphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sdm845_ufsphy_rx_tbl), + .pcs_tbl = sdm845_ufsphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sdm845_ufsphy_pcs_tbl), + .clk_list = sdm845_ufs_phy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sdm845_ufsphy_regs_layout, + + .start_ctrl = SERDES_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .is_dual_lane_phy = true, + .no_pcs_sw_reset = true, +}; + +static const struct qmp_phy_cfg sm6115_ufsphy_cfg = { + .type = PHY_TYPE_UFS, + .nlanes = 1, + + .serdes_tbl = sm6115_ufsphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm6115_ufsphy_serdes_tbl), + .tx_tbl = sm6115_ufsphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm6115_ufsphy_tx_tbl), + .rx_tbl = sm6115_ufsphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm6115_ufsphy_rx_tbl), + .pcs_tbl = sm6115_ufsphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm6115_ufsphy_pcs_tbl), + .clk_list = sdm845_ufs_phy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm6115_ufsphy_regs_layout, + + .start_ctrl = SERDES_START, + .pwrdn_ctrl = SW_PWRDN, + + .no_pcs_sw_reset = true, +}; + +static const struct qmp_phy_cfg sm8150_ufsphy_cfg = { + .type = PHY_TYPE_UFS, + .nlanes = 2, + + .serdes_tbl = sm8150_ufsphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8150_ufsphy_serdes_tbl), + .tx_tbl = sm8150_ufsphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_tx_tbl), + .rx_tbl = sm8150_ufsphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_rx_tbl), + .pcs_tbl = sm8150_ufsphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8150_ufsphy_pcs_tbl), + .clk_list = sdm845_ufs_phy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8150_ufsphy_regs_layout, + + .start_ctrl = SERDES_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .is_dual_lane_phy = true, +}; + +static const struct qmp_phy_cfg sm8350_ufsphy_cfg = { + .type = PHY_TYPE_UFS, + .nlanes = 2, + + .serdes_tbl = sm8350_ufsphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl), + .tx_tbl = sm8350_ufsphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_tx_tbl), + .rx_tbl = sm8350_ufsphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_rx_tbl), + .pcs_tbl = sm8350_ufsphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8350_ufsphy_pcs_tbl), + .clk_list = sdm845_ufs_phy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8150_ufsphy_regs_layout, + + .start_ctrl = SERDES_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .is_dual_lane_phy = true, +}; + +static const struct qmp_phy_cfg sm8450_ufsphy_cfg = { + .type = PHY_TYPE_UFS, + .nlanes = 2, + + .serdes_tbl = sm8350_ufsphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl), + .tx_tbl = sm8350_ufsphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_tx_tbl), + .rx_tbl = sm8350_ufsphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_rx_tbl), + .pcs_tbl = sm8350_ufsphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8350_ufsphy_pcs_tbl), + .clk_list = sm8450_ufs_phy_clk_l, + .num_clks = ARRAY_SIZE(sm8450_ufs_phy_clk_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8150_ufsphy_regs_layout, + + .start_ctrl = SERDES_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .is_dual_lane_phy = true, +}; + +static void qcom_qmp_phy_ufs_configure_lane(void __iomem *base, + const unsigned int *regs, + const struct qmp_phy_init_tbl tbl[], + int num, + u8 lane_mask) +{ + int i; + const struct qmp_phy_init_tbl *t = tbl; + + if (!t) + return; + + for (i = 0; i < num; i++, t++) { + if (!(t->lane_mask & lane_mask)) + continue; + + if (t->in_layout) + writel(t->val, base + regs[t->offset]); + else + writel(t->val, base + t->offset); + } +} + +static void qcom_qmp_phy_ufs_configure(void __iomem *base, + const unsigned int *regs, + const struct qmp_phy_init_tbl tbl[], + int num) +{ + qcom_qmp_phy_ufs_configure_lane(base, regs, tbl, num, 0xff); +} + +static int qcom_qmp_phy_ufs_serdes_init(struct qmp_phy *qphy) +{ + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *serdes = qphy->serdes; + const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl; + int serdes_tbl_num = cfg->serdes_tbl_num; + + qcom_qmp_phy_ufs_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num); + + return 0; +} + +static int qcom_qmp_phy_ufs_com_init(struct qmp_phy *qphy) +{ + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *pcs = qphy->pcs; + int ret; + + /* turn on regulator supplies */ + ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); + if (ret) { + dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); + return ret; + } + + ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks); + if (ret) + goto err_disable_regulators; + + if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) + qphy_setbits(pcs, + cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + cfg->pwrdn_ctrl); + else + qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, + cfg->pwrdn_ctrl); + + return 0; + +err_disable_regulators: + regulator_bulk_disable(cfg->num_vregs, qmp->vregs); + + return ret; +} + +static int qcom_qmp_phy_ufs_com_exit(struct qmp_phy *qphy) +{ + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + + reset_control_assert(qmp->ufs_reset); + + clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); + + regulator_bulk_disable(cfg->num_vregs, qmp->vregs); + + return 0; +} + +static int qcom_qmp_phy_ufs_init(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + int ret; + dev_vdbg(qmp->dev, "Initializing QMP phy\n"); + + if (cfg->no_pcs_sw_reset) { + /* + * Get UFS reset, which is delayed until now to avoid a + * circular dependency where UFS needs its PHY, but the PHY + * needs this UFS reset. + */ + if (!qmp->ufs_reset) { + qmp->ufs_reset = + devm_reset_control_get_exclusive(qmp->dev, + "ufsphy"); + + if (IS_ERR(qmp->ufs_reset)) { + ret = PTR_ERR(qmp->ufs_reset); + dev_err(qmp->dev, + "failed to get UFS reset: %d\n", + ret); + + qmp->ufs_reset = NULL; + return ret; + } + } + + ret = reset_control_assert(qmp->ufs_reset); + if (ret) + return ret; + } + + ret = qcom_qmp_phy_ufs_com_init(qphy); + if (ret) + return ret; + + return 0; +} + +static int qcom_qmp_phy_ufs_power_on(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *tx = qphy->tx; + void __iomem *rx = qphy->rx; + void __iomem *pcs = qphy->pcs; + void __iomem *status; + unsigned int mask, val, ready; + int ret; + + qcom_qmp_phy_ufs_serdes_init(qphy); + + /* Tx, Rx, and PCS configurations */ + qcom_qmp_phy_ufs_configure_lane(tx, cfg->regs, + cfg->tx_tbl, cfg->tx_tbl_num, 1); + + /* Configuration for other LANE for USB-DP combo PHY */ + if (cfg->is_dual_lane_phy) { + qcom_qmp_phy_ufs_configure_lane(qphy->tx2, cfg->regs, + cfg->tx_tbl, cfg->tx_tbl_num, 2); + } + + qcom_qmp_phy_ufs_configure_lane(rx, cfg->regs, + cfg->rx_tbl, cfg->rx_tbl_num, 1); + + if (cfg->is_dual_lane_phy) { + qcom_qmp_phy_ufs_configure_lane(qphy->rx2, cfg->regs, + cfg->rx_tbl, cfg->rx_tbl_num, 2); + } + + qcom_qmp_phy_ufs_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num); + + ret = reset_control_deassert(qmp->ufs_reset); + if (ret) + return ret; + + /* Pull PHY out of reset state */ + if (!cfg->no_pcs_sw_reset) + qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + /* start SerDes and Phy-Coding-Sublayer */ + qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + + status = pcs + cfg->regs[QPHY_PCS_READY_STATUS]; + mask = PCS_READY; + ready = PCS_READY; + + ret = readl_poll_timeout(status, val, (val & mask) == ready, 10, + PHY_INIT_COMPLETE_TIMEOUT); + if (ret) { + dev_err(qmp->dev, "phy initialization timed-out\n"); + return ret; + } + + return 0; +} + +static int qcom_qmp_phy_ufs_power_off(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qphy->cfg; + + /* PHY reset */ + if (!cfg->no_pcs_sw_reset) + qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + + /* stop SerDes and Phy-Coding-Sublayer */ + qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + + /* Put PHY into POWER DOWN state: active low */ + if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) { + qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + cfg->pwrdn_ctrl); + } else { + qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, + cfg->pwrdn_ctrl); + } + + return 0; +} + +static int qcom_qmp_phy_ufs_exit(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + + qcom_qmp_phy_ufs_com_exit(qphy); + + return 0; +} + +static int qcom_qmp_phy_ufs_enable(struct phy *phy) +{ + int ret; + + ret = qcom_qmp_phy_ufs_init(phy); + if (ret) + return ret; + + ret = qcom_qmp_phy_ufs_power_on(phy); + if (ret) + qcom_qmp_phy_ufs_exit(phy); + + return ret; +} + +static int qcom_qmp_phy_ufs_disable(struct phy *phy) +{ + int ret; + + ret = qcom_qmp_phy_ufs_power_off(phy); + if (ret) + return ret; + return qcom_qmp_phy_ufs_exit(phy); +} + +static int qcom_qmp_phy_ufs_set_mode(struct phy *phy, + enum phy_mode mode, int submode) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + + qphy->mode = mode; + + return 0; +} + +static int qcom_qmp_phy_ufs_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + int num = cfg->num_vregs; + int i; + + qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL); + if (!qmp->vregs) + return -ENOMEM; + + for (i = 0; i < num; i++) + qmp->vregs[i].supply = cfg->vreg_list[i]; + + return devm_regulator_bulk_get(dev, num, qmp->vregs); +} + +static int qcom_qmp_phy_ufs_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + int num = cfg->num_clks; + int i; + + qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL); + if (!qmp->clks) + return -ENOMEM; + + for (i = 0; i < num; i++) + qmp->clks[i].id = cfg->clk_list[i]; + + return devm_clk_bulk_get(dev, num, qmp->clks); +} + +static const struct phy_ops qcom_qmp_ufs_ops = { + .power_on = qcom_qmp_phy_ufs_enable, + .power_off = qcom_qmp_phy_ufs_disable, + .set_mode = qcom_qmp_phy_ufs_set_mode, + .owner = THIS_MODULE, +}; + +static +int qcom_qmp_phy_ufs_create(struct device *dev, struct device_node *np, int id, + void __iomem *serdes, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + struct phy *generic_phy; + struct qmp_phy *qphy; + int ret; + + qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL); + if (!qphy) + return -ENOMEM; + + qphy->cfg = cfg; + qphy->serdes = serdes; + /* + * Get memory resources for each phy lane: + * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. + * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5 + * For single lane PHYs: pcs_misc (optional) -> 3. + */ + qphy->tx = of_iomap(np, 0); + if (!qphy->tx) + return -ENOMEM; + + qphy->rx = of_iomap(np, 1); + if (!qphy->rx) + return -ENOMEM; + + qphy->pcs = of_iomap(np, 2); + if (!qphy->pcs) + return -ENOMEM; + + /* + * If this is a dual-lane PHY, then there should be registers for the + * second lane. Some old device trees did not specify this, so fall + * back to old legacy behavior of assuming they can be reached at an + * offset from the first lane. + */ + if (cfg->is_dual_lane_phy) { + qphy->tx2 = of_iomap(np, 3); + qphy->rx2 = of_iomap(np, 4); + if (!qphy->tx2 || !qphy->rx2) { + dev_warn(dev, + "Underspecified device tree, falling back to legacy register regions\n"); + + /* In the old version, pcs_misc is at index 3. */ + qphy->pcs_misc = qphy->tx2; + qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE; + qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE; + + } else { + qphy->pcs_misc = of_iomap(np, 5); + } + + } else { + qphy->pcs_misc = of_iomap(np, 3); + } + + if (!qphy->pcs_misc) + dev_vdbg(dev, "PHY pcs_misc-reg not used\n"); + + generic_phy = devm_phy_create(dev, np, &qcom_qmp_ufs_ops); + if (IS_ERR(generic_phy)) { + ret = PTR_ERR(generic_phy); + dev_err(dev, "failed to create qphy %d\n", ret); + return ret; + } + + qphy->phy = generic_phy; + qphy->index = id; + qphy->qmp = qmp; + qmp->phys[id] = qphy; + phy_set_drvdata(generic_phy, qphy); + + return 0; +} + +static const struct of_device_id qcom_qmp_phy_ufs_of_match_table[] = { + { + .compatible = "qcom,msm8996-qmp-ufs-phy", + .data = &msm8996_ufs_cfg, + }, { + .compatible = "qcom,msm8998-qmp-ufs-phy", + .data = &sdm845_ufsphy_cfg, + }, { + .compatible = "qcom,sc8180x-qmp-ufs-phy", + .data = &sm8150_ufsphy_cfg, + }, { + .compatible = "qcom,sc8280xp-qmp-ufs-phy", + .data = &sm8350_ufsphy_cfg, + }, { + .compatible = "qcom,sdm845-qmp-ufs-phy", + .data = &sdm845_ufsphy_cfg, + }, { + .compatible = "qcom,sm6115-qmp-ufs-phy", + .data = &sm6115_ufsphy_cfg, + }, { + .compatible = "qcom,sm6350-qmp-ufs-phy", + .data = &sdm845_ufsphy_cfg, + }, { + .compatible = "qcom,sm8150-qmp-ufs-phy", + .data = &sm8150_ufsphy_cfg, + }, { + .compatible = "qcom,sm8250-qmp-ufs-phy", + .data = &sm8150_ufsphy_cfg, + }, { + .compatible = "qcom,sm8350-qmp-ufs-phy", + .data = &sm8350_ufsphy_cfg, + }, { + .compatible = "qcom,sm8450-qmp-ufs-phy", + .data = &sm8450_ufsphy_cfg, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, qcom_qmp_phy_ufs_of_match_table); + +static int qcom_qmp_phy_ufs_probe(struct platform_device *pdev) +{ + struct qcom_qmp *qmp; + struct device *dev = &pdev->dev; + struct device_node *child; + struct phy_provider *phy_provider; + void __iomem *serdes; + const struct qmp_phy_cfg *cfg = NULL; + int num, id; + int ret; + + qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL); + if (!qmp) + return -ENOMEM; + + qmp->dev = dev; + dev_set_drvdata(dev, qmp); + + /* Get the specific init parameters of QMP phy */ + cfg = of_device_get_match_data(dev); + if (!cfg) + return -EINVAL; + + /* per PHY serdes; usually located at base address */ + serdes = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(serdes)) + return PTR_ERR(serdes); + + ret = qcom_qmp_phy_ufs_clk_init(dev, cfg); + if (ret) + return ret; + + ret = qcom_qmp_phy_ufs_vreg_init(dev, cfg); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to get regulator supplies: %d\n", + ret); + return ret; + } + + num = of_get_available_child_count(dev->of_node); + /* do we have a rogue child node ? */ + if (num > 1) + return -EINVAL; + + qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL); + if (!qmp->phys) + return -ENOMEM; + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + /* + * Prevent runtime pm from being ON by default. Users can enable + * it using power/control in sysfs. + */ + pm_runtime_forbid(dev); + + id = 0; + for_each_available_child_of_node(dev->of_node, child) { + /* Create per-lane phy */ + ret = qcom_qmp_phy_ufs_create(dev, child, id, serdes, cfg); + if (ret) { + dev_err(dev, "failed to create lane%d phy, %d\n", + id, ret); + goto err_node_put; + } + + id++; + } + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (!IS_ERR(phy_provider)) + dev_info(dev, "Registered Qcom-QMP phy\n"); + else + pm_runtime_disable(dev); + + return PTR_ERR_OR_ZERO(phy_provider); + +err_node_put: + pm_runtime_disable(dev); + of_node_put(child); + return ret; +} + +static struct platform_driver qcom_qmp_phy_ufs_driver = { + .probe = qcom_qmp_phy_ufs_probe, + .driver = { + .name = "qcom-qmp-ufs-phy", + .of_match_table = qcom_qmp_phy_ufs_of_match_table, + }, +}; + +module_platform_driver(qcom_qmp_phy_ufs_driver); + +MODULE_AUTHOR("Vivek Gautam "); +MODULE_DESCRIPTION("Qualcomm QMP UFS PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c new file mode 100644 index 0000000000..1d270356a9 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c @@ -0,0 +1,2765 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "phy-qcom-qmp.h" + +/* QPHY_SW_RESET bit */ +#define SW_RESET BIT(0) +/* QPHY_POWER_DOWN_CONTROL */ +#define SW_PWRDN BIT(0) +#define REFCLK_DRV_DSBL BIT(1) +/* QPHY_START_CONTROL bits */ +#define SERDES_START BIT(0) +#define PCS_START BIT(1) +#define PLL_READY_GATE_EN BIT(3) +/* QPHY_PCS_STATUS bit */ +#define PHYSTATUS BIT(6) +#define PHYSTATUS_4_20 BIT(7) +/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */ +#define PCS_READY BIT(0) + +/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */ +/* DP PHY soft reset */ +#define SW_DPPHY_RESET BIT(0) +/* mux to select DP PHY reset control, 0:HW control, 1: software reset */ +#define SW_DPPHY_RESET_MUX BIT(1) +/* USB3 PHY soft reset */ +#define SW_USB3PHY_RESET BIT(2) +/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */ +#define SW_USB3PHY_RESET_MUX BIT(3) + +/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */ +#define USB3_MODE BIT(0) /* enables USB3 mode */ +#define DP_MODE BIT(1) /* enables DP mode */ + +/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */ +#define ARCVR_DTCT_EN BIT(0) +#define ALFPS_DTCT_EN BIT(1) +#define ARCVR_DTCT_EVENT_SEL BIT(4) + +/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */ +#define IRQ_CLEAR BIT(0) + +/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */ +#define RCVR_DETECT BIT(0) + +/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */ +#define CLAMP_EN BIT(0) /* enables i/o clamp_n */ + +#define PHY_INIT_COMPLETE_TIMEOUT 10000 +#define POWER_DOWN_DELAY_US_MIN 10 +#define POWER_DOWN_DELAY_US_MAX 11 + +#define MAX_PROP_NAME 32 + +/* Define the assumed distance between lanes for underspecified device trees. */ +#define QMP_PHY_LEGACY_LANE_STRIDE 0x400 + +struct qmp_phy_init_tbl { + unsigned int offset; + unsigned int val; + /* + * register part of layout ? + * if yes, then offset gives index in the reg-layout + */ + bool in_layout; + /* + * mask of lanes for which this register is written + * for cases when second lane needs different values + */ + u8 lane_mask; +}; + +#define QMP_PHY_INIT_CFG(o, v) \ + { \ + .offset = o, \ + .val = v, \ + .lane_mask = 0xff, \ + } + +#define QMP_PHY_INIT_CFG_L(o, v) \ + { \ + .offset = o, \ + .val = v, \ + .in_layout = true, \ + .lane_mask = 0xff, \ + } + +#define QMP_PHY_INIT_CFG_LANE(o, v, l) \ + { \ + .offset = o, \ + .val = v, \ + .lane_mask = l, \ + } + +/* set of registers with offsets different per-PHY */ +enum qphy_reg_layout { + /* Common block control registers */ + QPHY_COM_SW_RESET, + QPHY_COM_POWER_DOWN_CONTROL, + QPHY_COM_START_CONTROL, + QPHY_COM_PCS_READY_STATUS, + /* PCS registers */ + QPHY_SW_RESET, + QPHY_START_CTRL, + QPHY_PCS_READY_STATUS, + QPHY_PCS_STATUS, + QPHY_PCS_AUTONOMOUS_MODE_CTRL, + QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR, + QPHY_PCS_LFPS_RXTERM_IRQ_STATUS, + QPHY_PCS_POWER_DOWN_CONTROL, + /* PCS_MISC registers */ + QPHY_PCS_MISC_TYPEC_CTRL, + /* Keep last to ensure regs_layout arrays are properly initialized */ + QPHY_LAYOUT_SIZE +}; + +static const unsigned int usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x08, + [QPHY_PCS_STATUS] = 0x17c, + [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4, + [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8, + [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178, +}; + +static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x08, + [QPHY_PCS_STATUS] = 0x174, + [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8, + [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc, + [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170, +}; + +static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x44, + [QPHY_PCS_STATUS] = 0x14, + [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40, + + /* In PCS_USB */ + [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x008, + [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x014, +}; + +static const unsigned int qcm2290_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_SW_RESET] = 0x00, + [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04, + [QPHY_START_CTRL] = 0x08, + [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0xd8, + [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0xdc, + [QPHY_PCS_STATUS] = 0x174, + [QPHY_PCS_MISC_TYPEC_CTRL] = 0x00, +}; + +static const struct qmp_phy_init_tbl ipq8074_usb3_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06), + /* PLL and Loop filter settings */ + QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a), + /* SSC settings */ + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07), +}; + +static const struct qmp_phy_init_tbl ipq8074_usb3_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x06), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4c), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xb8), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x16), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x0), +}; + +static const struct qmp_phy_init_tbl ipq8074_usb3_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0e), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x85), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x17), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0f), +}; + +static const struct qmp_phy_init_tbl msm8996_usb3_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x04), + /* PLL and Loop filter settings */ + QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a), + /* SSC settings */ + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07), +}; + +static const struct qmp_phy_init_tbl msm8996_usb3_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45), + QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06), +}; + +static const struct qmp_phy_init_tbl msm8996_usb3_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4c), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xbb), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x18), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x16), +}; + +static const struct qmp_phy_init_tbl msm8996_usb3_pcs_tbl[] = { + /* FLL settings */ + QMP_PHY_INIT_CFG(QPHY_V2_PCS_FLL_CNTRL2, 0x03), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_FLL_CNTRL1, 0x02), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_FLL_CNT_VAL_L, 0x09), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_FLL_CNT_VAL_H_TOL, 0x42), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_FLL_MAN_CODE, 0x85), + + /* Lock Det settings */ + QMP_PHY_INIT_CFG(QPHY_V2_PCS_LOCK_DETECT_CONFIG1, 0xd1), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_LOCK_DETECT_CONFIG2, 0x1f), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_LOCK_DETECT_CONFIG3, 0x47), + QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_STATE_CONFIG2, 0x08), +}; + +static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07), +}; + +static const struct qmp_phy_init_tbl qmp_v3_usb3_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06), +}; + +static const struct qmp_phy_init_tbl qmp_v3_usb3_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75), +}; + +static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = { + /* FLL settings */ + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02), + + /* Lock Det settings */ + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13), +}; + +static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07), +}; + +static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0xc6), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06), +}; + +static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x50), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75), +}; + +static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_pcs_tbl[] = { + /* FLL settings */ + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02), + + /* Lock Det settings */ + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb5), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4c), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x64), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6a), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG1, 0x21), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG2, 0x60), +}; + +static const struct qmp_phy_init_tbl msm8998_usb3_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_INITVAL, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_MODE, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07), +}; + +static const struct qmp_phy_init_tbl msm8998_usb3_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00), +}; + +static const struct qmp_phy_init_tbl msm8998_usb3_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x43), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x05), +}; + +static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x8a), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13), +}; + +static const struct qmp_phy_init_tbl sm8150_usb3_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11), +}; + +static const struct qmp_phy_init_tbl sm8150_usb3_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20), +}; + +static const struct qmp_phy_init_tbl sm8150_usb3_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x94), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10), +}; + +static const struct qmp_phy_init_tbl sm8150_usb3_pcs_tbl[] = { + /* Lock Det settings */ + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13), + + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10), +}; + +static const struct qmp_phy_init_tbl sm8150_usb3_pcs_usb_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07), +}; + +static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02), +}; + +static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x95), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x05), +}; + +static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0xb8), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x37), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xef), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c), +}; + +static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0f), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c), +}; + +static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_pcs_usb_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07), +}; + +static const struct qmp_phy_init_tbl sm8250_usb3_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x60), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x60), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x40, 1), + QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x54, 2), +}; + +static const struct qmp_phy_init_tbl sm8250_usb3_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e), + QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0xff, 1), + QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f, 2), + QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f, 1), + QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff, 2), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x97), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10), +}; + +static const struct qmp_phy_init_tbl sm8250_usb3_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10), +}; + +static const struct qmp_phy_init_tbl sm8250_usb3_pcs_usb_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07), +}; + +static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_2, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02), +}; + +static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0xb8), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f), +}; + +static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21), +}; + +static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_pcs_usb_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8), +}; + +static const struct qmp_phy_init_tbl sdx55_usb3_uniphy_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x08), +}; + +static const struct qmp_phy_init_tbl sdx55_usb3_uniphy_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x26), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x048), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f), +}; + +static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0b), +}; + +static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x7b), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xe4), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00), +}; + +static const struct qmp_phy_init_tbl sm8350_usb3_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_TX, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_RX, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x35), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_5, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21), +}; + +static const struct qmp_phy_init_tbl sm8350_usb3_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xbb), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7b), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbb), + QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3d, 1), + QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3c, 2), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xd2), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x13), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_EN_TIMER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_AUX_DATA_TCOARSE_TFINE, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_DCC_CTRL1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VTH_CODE, 0x10), +}; + +static const struct qmp_phy_init_tbl sm8350_usb3_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10), +}; + +static const struct qmp_phy_init_tbl sm8350_usb3_pcs_usb_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07), +}; + +static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e), +}; + +static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x7b), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xe4), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00), +}; + +static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21), +}; + +static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_pcs_usb_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8), +}; + +static const struct qmp_phy_init_tbl qcm2290_usb3_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06), + QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_INITVAL, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x01), +}; + +static const struct qmp_phy_init_tbl qcm2290_usb3_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0xc6), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x00), +}; + +static const struct qmp_phy_init_tbl qcm2290_usb3_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x00), +}; + +static const struct qmp_phy_init_tbl qcm2290_usb3_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x17), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x85), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88), +}; + +struct qmp_phy; + +/* struct qmp_phy_cfg - per-PHY initialization config */ +struct qmp_phy_cfg { + /* phy-type - PCIE/UFS/USB */ + unsigned int type; + /* number of lanes provided by phy */ + int nlanes; + + /* Init sequence for PHY blocks - serdes, tx, rx, pcs */ + const struct qmp_phy_init_tbl *serdes_tbl; + int serdes_tbl_num; + const struct qmp_phy_init_tbl *tx_tbl; + int tx_tbl_num; + const struct qmp_phy_init_tbl *rx_tbl; + int rx_tbl_num; + const struct qmp_phy_init_tbl *pcs_tbl; + int pcs_tbl_num; + const struct qmp_phy_init_tbl *pcs_usb_tbl; + int pcs_usb_tbl_num; + + /* clock ids to be requested */ + const char * const *clk_list; + int num_clks; + /* resets to be requested */ + const char * const *reset_list; + int num_resets; + /* regulators to be requested */ + const char * const *vreg_list; + int num_vregs; + + /* array of registers with different offsets */ + const unsigned int *regs; + + unsigned int start_ctrl; + unsigned int pwrdn_ctrl; + /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */ + unsigned int phy_status; + + /* true, if PHY needs delay after POWER_DOWN */ + bool has_pwrdn_delay; + /* power_down delay in usec */ + int pwrdn_delay_min; + int pwrdn_delay_max; + + /* true, if PHY has a separate DP_COM control block */ + bool has_phy_dp_com_ctrl; + /* true, if PHY has secondary tx/rx lanes to be configured */ + bool is_dual_lane_phy; + + /* Offset from PCS to PCS_USB region */ + unsigned int pcs_usb_offset; +}; + +/** + * struct qmp_phy - per-lane phy descriptor + * + * @phy: generic phy + * @cfg: phy specific configuration + * @serdes: iomapped memory space for phy's serdes (i.e. PLL) + * @tx: iomapped memory space for lane's tx + * @rx: iomapped memory space for lane's rx + * @pcs: iomapped memory space for lane's pcs + * @tx2: iomapped memory space for second lane's tx (in dual lane PHYs) + * @rx2: iomapped memory space for second lane's rx (in dual lane PHYs) + * @pcs_misc: iomapped memory space for lane's pcs_misc + * @pcs_usb: iomapped memory space for lane's pcs_usb + * @pipe_clk: pipe clock + * @index: lane index + * @qmp: QMP phy to which this lane belongs + * @mode: current PHY mode + */ +struct qmp_phy { + struct phy *phy; + const struct qmp_phy_cfg *cfg; + void __iomem *serdes; + void __iomem *tx; + void __iomem *rx; + void __iomem *pcs; + void __iomem *tx2; + void __iomem *rx2; + void __iomem *pcs_misc; + void __iomem *pcs_usb; + struct clk *pipe_clk; + unsigned int index; + struct qcom_qmp *qmp; + enum phy_mode mode; +}; + +/** + * struct qcom_qmp - structure holding QMP phy block attributes + * + * @dev: device + * @dp_com: iomapped memory space for phy's dp_com control block + * + * @clks: array of clocks required by phy + * @resets: array of resets required by phy + * @vregs: regulator supplies bulk data + * + * @phys: array of per-lane phy descriptors + */ +struct qcom_qmp { + struct device *dev; + void __iomem *dp_com; + + struct clk_bulk_data *clks; + struct reset_control_bulk_data *resets; + struct regulator_bulk_data *vregs; + + struct qmp_phy **phys; +}; + +static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val) +{ + u32 reg; + + reg = readl(base + offset); + reg |= val; + writel(reg, base + offset); + + /* ensure that above write is through */ + readl(base + offset); +} + +static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val) +{ + u32 reg; + + reg = readl(base + offset); + reg &= ~val; + writel(reg, base + offset); + + /* ensure that above write is through */ + readl(base + offset); +} + +/* list of clocks required by phy */ +static const char * const msm8996_phy_clk_l[] = { + "aux", "cfg_ahb", "ref", +}; + +static const char * const qmp_v3_phy_clk_l[] = { + "aux", "cfg_ahb", "ref", "com_aux", +}; + +static const char * const qmp_v4_phy_clk_l[] = { + "aux", "ref_clk_src", "ref", "com_aux", +}; + +/* the primary usb3 phy on sm8250 doesn't have a ref clock */ +static const char * const qmp_v4_sm8250_usbphy_clk_l[] = { + "aux", "ref_clk_src", "com_aux" +}; + +/* usb3 phy on sdx55 doesn't have com_aux clock */ +static const char * const qmp_v4_sdx55_usbphy_clk_l[] = { + "aux", "cfg_ahb", "ref" +}; + +static const char * const qcm2290_usb3phy_clk_l[] = { + "cfg_ahb", "ref", "com_aux", +}; + +/* list of resets */ +static const char * const msm8996_usb3phy_reset_l[] = { + "phy", "common", +}; + +static const char * const sc7180_usb3phy_reset_l[] = { + "phy", +}; + +static const char * const qcm2290_usb3phy_reset_l[] = { + "phy_phy", "phy", +}; + +/* list of regulators */ +static const char * const qmp_phy_vreg_l[] = { + "vdda-phy", "vdda-pll", +}; + +static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = ipq8074_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(ipq8074_usb3_serdes_tbl), + .tx_tbl = msm8996_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl), + .rx_tbl = ipq8074_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(ipq8074_usb3_rx_tbl), + .pcs_tbl = ipq8074_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(ipq8074_usb3_pcs_tbl), + .clk_list = msm8996_phy_clk_l, + .num_clks = ARRAY_SIZE(msm8996_phy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = usb3phy_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, +}; + +static const struct qmp_phy_cfg msm8996_usb3phy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = msm8996_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl), + .tx_tbl = msm8996_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl), + .rx_tbl = msm8996_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(msm8996_usb3_rx_tbl), + .pcs_tbl = msm8996_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(msm8996_usb3_pcs_tbl), + .clk_list = msm8996_phy_clk_l, + .num_clks = ARRAY_SIZE(msm8996_phy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = usb3phy_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, +}; + +static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = qmp_v3_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl), + .tx_tbl = qmp_v3_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl), + .rx_tbl = qmp_v3_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl), + .pcs_tbl = qmp_v3_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl), + .clk_list = qmp_v3_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v3_usb3phy_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, + + .has_phy_dp_com_ctrl = true, + .is_dual_lane_phy = true, +}; + +static const struct qmp_phy_cfg sc7180_usb3phy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = qmp_v3_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl), + .tx_tbl = qmp_v3_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl), + .rx_tbl = qmp_v3_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl), + .pcs_tbl = qmp_v3_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl), + .clk_list = qmp_v3_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l), + .reset_list = sc7180_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v3_usb3phy_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, + + .has_phy_dp_com_ctrl = true, + .is_dual_lane_phy = true, +}; + +static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = qmp_v3_usb3_uniphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_serdes_tbl), + .tx_tbl = qmp_v3_usb3_uniphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_tx_tbl), + .rx_tbl = qmp_v3_usb3_uniphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_rx_tbl), + .pcs_tbl = qmp_v3_usb3_uniphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_pcs_tbl), + .clk_list = qmp_v3_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v3_usb3phy_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, +}; + +static const struct qmp_phy_cfg msm8998_usb3phy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = msm8998_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(msm8998_usb3_serdes_tbl), + .tx_tbl = msm8998_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(msm8998_usb3_tx_tbl), + .rx_tbl = msm8998_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(msm8998_usb3_rx_tbl), + .pcs_tbl = msm8998_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(msm8998_usb3_pcs_tbl), + .clk_list = msm8996_phy_clk_l, + .num_clks = ARRAY_SIZE(msm8996_phy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v3_usb3phy_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .is_dual_lane_phy = true, +}; + +static const struct qmp_phy_cfg sm8150_usb3phy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = sm8150_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl), + .tx_tbl = sm8150_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_tx_tbl), + .rx_tbl = sm8150_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_rx_tbl), + .pcs_tbl = sm8150_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_tbl), + .pcs_usb_tbl = sm8150_usb3_pcs_usb_tbl, + .pcs_usb_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_usb_tbl), + .clk_list = qmp_v4_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v4_usb3phy_regs_layout, + .pcs_usb_offset = 0x300, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, + + .has_phy_dp_com_ctrl = true, + .is_dual_lane_phy = true, +}; + +static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl), + .tx_tbl = sm8150_usb3_uniphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_tx_tbl), + .rx_tbl = sm8150_usb3_uniphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_rx_tbl), + .pcs_tbl = sm8150_usb3_uniphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_pcs_tbl), + .pcs_usb_tbl = sm8150_usb3_uniphy_pcs_usb_tbl, + .pcs_usb_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_pcs_usb_tbl), + .clk_list = qmp_v4_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v4_usb3phy_regs_layout, + .pcs_usb_offset = 0x600, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, +}; + +static const struct qmp_phy_cfg sm8250_usb3phy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = sm8150_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl), + .tx_tbl = sm8250_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_tx_tbl), + .rx_tbl = sm8250_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_rx_tbl), + .pcs_tbl = sm8250_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_tbl), + .pcs_usb_tbl = sm8250_usb3_pcs_usb_tbl, + .pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_usb_tbl), + .clk_list = qmp_v4_sm8250_usbphy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v4_usb3phy_regs_layout, + .pcs_usb_offset = 0x300, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, + + .has_phy_dp_com_ctrl = true, + .is_dual_lane_phy = true, +}; + +static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl), + .tx_tbl = sm8250_usb3_uniphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_tx_tbl), + .rx_tbl = sm8250_usb3_uniphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_rx_tbl), + .pcs_tbl = sm8250_usb3_uniphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_tbl), + .pcs_usb_tbl = sm8250_usb3_uniphy_pcs_usb_tbl, + .pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_usb_tbl), + .clk_list = qmp_v4_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v4_usb3phy_regs_layout, + .pcs_usb_offset = 0x600, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, +}; + +static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl), + .tx_tbl = sdx55_usb3_uniphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sdx55_usb3_uniphy_tx_tbl), + .rx_tbl = sdx55_usb3_uniphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sdx55_usb3_uniphy_rx_tbl), + .pcs_tbl = sm8250_usb3_uniphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_tbl), + .pcs_usb_tbl = sm8250_usb3_uniphy_pcs_usb_tbl, + .pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_usb_tbl), + .clk_list = qmp_v4_sdx55_usbphy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_sdx55_usbphy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v4_usb3phy_regs_layout, + .pcs_usb_offset = 0x600, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, +}; + +static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl), + .tx_tbl = sdx65_usb3_uniphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sdx65_usb3_uniphy_tx_tbl), + .rx_tbl = sdx65_usb3_uniphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sdx65_usb3_uniphy_rx_tbl), + .pcs_tbl = sm8350_usb3_uniphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_tbl), + .pcs_usb_tbl = sm8350_usb3_uniphy_pcs_usb_tbl, + .pcs_usb_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_usb_tbl), + .clk_list = qmp_v4_sdx55_usbphy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_sdx55_usbphy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v4_usb3phy_regs_layout, + .pcs_usb_offset = 0x1000, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, +}; + +static const struct qmp_phy_cfg sm8350_usb3phy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = sm8150_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl), + .tx_tbl = sm8350_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8350_usb3_tx_tbl), + .rx_tbl = sm8350_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8350_usb3_rx_tbl), + .pcs_tbl = sm8350_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_tbl), + .pcs_usb_tbl = sm8350_usb3_pcs_usb_tbl, + .pcs_usb_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_usb_tbl), + .clk_list = qmp_v4_sm8250_usbphy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v4_usb3phy_regs_layout, + .pcs_usb_offset = 0x300, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, + + .has_phy_dp_com_ctrl = true, + .is_dual_lane_phy = true, +}; + +static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl), + .tx_tbl = sm8350_usb3_uniphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_tx_tbl), + .rx_tbl = sm8350_usb3_uniphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_rx_tbl), + .pcs_tbl = sm8350_usb3_uniphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_tbl), + .pcs_usb_tbl = sm8350_usb3_uniphy_pcs_usb_tbl, + .pcs_usb_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_usb_tbl), + .clk_list = qmp_v4_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v4_usb3phy_regs_layout, + .pcs_usb_offset = 0x1000, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, +}; + +static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = qcm2290_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(qcm2290_usb3_serdes_tbl), + .tx_tbl = qcm2290_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(qcm2290_usb3_tx_tbl), + .rx_tbl = qcm2290_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(qcm2290_usb3_rx_tbl), + .pcs_tbl = qcm2290_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(qcm2290_usb3_pcs_tbl), + .clk_list = qcm2290_usb3phy_clk_l, + .num_clks = ARRAY_SIZE(qcm2290_usb3phy_clk_l), + .reset_list = qcm2290_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(qcm2290_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qcm2290_usb3phy_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .is_dual_lane_phy = true, +}; + +static void qcom_qmp_phy_usb_configure_lane(void __iomem *base, + const unsigned int *regs, + const struct qmp_phy_init_tbl tbl[], + int num, + u8 lane_mask) +{ + int i; + const struct qmp_phy_init_tbl *t = tbl; + + if (!t) + return; + + for (i = 0; i < num; i++, t++) { + if (!(t->lane_mask & lane_mask)) + continue; + + if (t->in_layout) + writel(t->val, base + regs[t->offset]); + else + writel(t->val, base + t->offset); + } +} + +static void qcom_qmp_phy_usb_configure(void __iomem *base, + const unsigned int *regs, + const struct qmp_phy_init_tbl tbl[], + int num) +{ + qcom_qmp_phy_usb_configure_lane(base, regs, tbl, num, 0xff); +} + +static int qcom_qmp_phy_usb_serdes_init(struct qmp_phy *qphy) +{ + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *serdes = qphy->serdes; + const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl; + int serdes_tbl_num = cfg->serdes_tbl_num; + + qcom_qmp_phy_usb_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num); + + return 0; +} + +static int qcom_qmp_phy_usb_com_init(struct qmp_phy *qphy) +{ + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *pcs = qphy->pcs; + void __iomem *dp_com = qmp->dp_com; + int ret; + + /* turn on regulator supplies */ + ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); + if (ret) { + dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); + return ret; + } + + ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets); + if (ret) { + dev_err(qmp->dev, "reset assert failed\n"); + goto err_disable_regulators; + } + + ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets); + if (ret) { + dev_err(qmp->dev, "reset deassert failed\n"); + goto err_disable_regulators; + } + + ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks); + if (ret) + goto err_assert_reset; + + if (cfg->has_phy_dp_com_ctrl) { + qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL, + SW_PWRDN); + /* override hardware control for reset of qmp phy */ + qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL, + SW_DPPHY_RESET_MUX | SW_DPPHY_RESET | + SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET); + + /* Default type-c orientation, i.e CC1 */ + qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02); + + qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL, + USB3_MODE | DP_MODE); + + /* bring both QMP USB and QMP DP PHYs PCS block out of reset */ + qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL, + SW_DPPHY_RESET_MUX | SW_DPPHY_RESET | + SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET); + + qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03); + qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET); + } + + if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) + qphy_setbits(pcs, + cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + cfg->pwrdn_ctrl); + else + qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, + cfg->pwrdn_ctrl); + + return 0; + +err_assert_reset: + reset_control_bulk_assert(cfg->num_resets, qmp->resets); +err_disable_regulators: + regulator_bulk_disable(cfg->num_vregs, qmp->vregs); + + return ret; +} + +static int qcom_qmp_phy_usb_com_exit(struct qmp_phy *qphy) +{ + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + + reset_control_bulk_assert(cfg->num_resets, qmp->resets); + + clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); + + regulator_bulk_disable(cfg->num_vregs, qmp->vregs); + + return 0; +} + +static int qcom_qmp_phy_usb_init(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + struct qcom_qmp *qmp = qphy->qmp; + int ret; + dev_vdbg(qmp->dev, "Initializing QMP phy\n"); + + ret = qcom_qmp_phy_usb_com_init(qphy); + if (ret) + return ret; + + return 0; +} + +static int qcom_qmp_phy_usb_power_on(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *tx = qphy->tx; + void __iomem *rx = qphy->rx; + void __iomem *pcs = qphy->pcs; + void __iomem *status; + unsigned int mask, val, ready; + int ret; + + qcom_qmp_phy_usb_serdes_init(qphy); + + ret = clk_prepare_enable(qphy->pipe_clk); + if (ret) { + dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret); + return ret; + } + + /* Tx, Rx, and PCS configurations */ + qcom_qmp_phy_usb_configure_lane(tx, cfg->regs, + cfg->tx_tbl, cfg->tx_tbl_num, 1); + + /* Configuration for other LANE for USB-DP combo PHY */ + if (cfg->is_dual_lane_phy) { + qcom_qmp_phy_usb_configure_lane(qphy->tx2, cfg->regs, + cfg->tx_tbl, cfg->tx_tbl_num, 2); + } + + qcom_qmp_phy_usb_configure_lane(rx, cfg->regs, + cfg->rx_tbl, cfg->rx_tbl_num, 1); + + if (cfg->is_dual_lane_phy) { + qcom_qmp_phy_usb_configure_lane(qphy->rx2, cfg->regs, + cfg->rx_tbl, cfg->rx_tbl_num, 2); + } + + /* Configure link rate, swing, etc. */ + qcom_qmp_phy_usb_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num); + + if (cfg->has_pwrdn_delay) + usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max); + + /* Pull PHY out of reset state */ + qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + + /* start SerDes and Phy-Coding-Sublayer */ + qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + + status = pcs + cfg->regs[QPHY_PCS_STATUS]; + mask = cfg->phy_status; + ready = 0; + + ret = readl_poll_timeout(status, val, (val & mask) == ready, 10, + PHY_INIT_COMPLETE_TIMEOUT); + if (ret) { + dev_err(qmp->dev, "phy initialization timed-out\n"); + goto err_disable_pipe_clk; + } + + return 0; + +err_disable_pipe_clk: + clk_disable_unprepare(qphy->pipe_clk); + + return ret; +} + +static int qcom_qmp_phy_usb_power_off(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qphy->cfg; + + clk_disable_unprepare(qphy->pipe_clk); + + /* PHY reset */ + qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + + /* stop SerDes and Phy-Coding-Sublayer */ + qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + + /* Put PHY into POWER DOWN state: active low */ + if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) { + qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + cfg->pwrdn_ctrl); + } else { + qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, + cfg->pwrdn_ctrl); + } + + return 0; +} + +static int qcom_qmp_phy_usb_exit(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + + qcom_qmp_phy_usb_com_exit(qphy); + + return 0; +} + +static int qcom_qmp_phy_usb_enable(struct phy *phy) +{ + int ret; + + ret = qcom_qmp_phy_usb_init(phy); + if (ret) + return ret; + + ret = qcom_qmp_phy_usb_power_on(phy); + if (ret) + qcom_qmp_phy_usb_exit(phy); + + return ret; +} + +static int qcom_qmp_phy_usb_disable(struct phy *phy) +{ + int ret; + + ret = qcom_qmp_phy_usb_power_off(phy); + if (ret) + return ret; + return qcom_qmp_phy_usb_exit(phy); +} + +static int qcom_qmp_phy_usb_set_mode(struct phy *phy, + enum phy_mode mode, int submode) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + + qphy->mode = mode; + + return 0; +} + +static void qcom_qmp_phy_usb_enable_autonomous_mode(struct qmp_phy *qphy) +{ + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs; + void __iomem *pcs_misc = qphy->pcs_misc; + u32 intr_mask; + + if (qphy->mode == PHY_MODE_USB_HOST_SS || + qphy->mode == PHY_MODE_USB_DEVICE_SS) + intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN; + else + intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL; + + /* Clear any pending interrupts status */ + qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR); + /* Writing 1 followed by 0 clears the interrupt */ + qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR); + + qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], + ARCVR_DTCT_EN | ALFPS_DTCT_EN | ARCVR_DTCT_EVENT_SEL); + + /* Enable required PHY autonomous mode interrupts */ + qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], intr_mask); + + /* Enable i/o clamp_n for autonomous mode */ + if (pcs_misc) + qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN); +} + +static void qcom_qmp_phy_usb_disable_autonomous_mode(struct qmp_phy *qphy) +{ + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs; + void __iomem *pcs_misc = qphy->pcs_misc; + + /* Disable i/o clamp_n on resume for normal mode */ + if (pcs_misc) + qphy_setbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN); + + qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], + ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL | ALFPS_DTCT_EN); + + qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR); + /* Writing 1 followed by 0 clears the interrupt */ + qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR); +} + +static int __maybe_unused qcom_qmp_phy_usb_runtime_suspend(struct device *dev) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + struct qmp_phy *qphy = qmp->phys[0]; + const struct qmp_phy_cfg *cfg = qphy->cfg; + + dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qphy->mode); + + /* Supported only for USB3 PHY and luckily USB3 is the first phy */ + if (cfg->type != PHY_TYPE_USB3) + return 0; + + if (!qphy->phy->init_count) { + dev_vdbg(dev, "PHY not initialized, bailing out\n"); + return 0; + } + + qcom_qmp_phy_usb_enable_autonomous_mode(qphy); + + clk_disable_unprepare(qphy->pipe_clk); + clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); + + return 0; +} + +static int __maybe_unused qcom_qmp_phy_usb_runtime_resume(struct device *dev) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + struct qmp_phy *qphy = qmp->phys[0]; + const struct qmp_phy_cfg *cfg = qphy->cfg; + int ret = 0; + + dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qphy->mode); + + /* Supported only for USB3 PHY and luckily USB3 is the first phy */ + if (cfg->type != PHY_TYPE_USB3) + return 0; + + if (!qphy->phy->init_count) { + dev_vdbg(dev, "PHY not initialized, bailing out\n"); + return 0; + } + + ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks); + if (ret) + return ret; + + ret = clk_prepare_enable(qphy->pipe_clk); + if (ret) { + dev_err(dev, "pipe_clk enable failed, err=%d\n", ret); + clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); + return ret; + } + + qcom_qmp_phy_usb_disable_autonomous_mode(qphy); + + return 0; +} + +static int qcom_qmp_phy_usb_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + int num = cfg->num_vregs; + int i; + + qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL); + if (!qmp->vregs) + return -ENOMEM; + + for (i = 0; i < num; i++) + qmp->vregs[i].supply = cfg->vreg_list[i]; + + return devm_regulator_bulk_get(dev, num, qmp->vregs); +} + +static int qcom_qmp_phy_usb_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + int i; + int ret; + + qmp->resets = devm_kcalloc(dev, cfg->num_resets, + sizeof(*qmp->resets), GFP_KERNEL); + if (!qmp->resets) + return -ENOMEM; + + for (i = 0; i < cfg->num_resets; i++) + qmp->resets[i].id = cfg->reset_list[i]; + + ret = devm_reset_control_bulk_get_exclusive(dev, cfg->num_resets, qmp->resets); + if (ret) + return dev_err_probe(dev, ret, "failed to get resets\n"); + + return 0; +} + +static int qcom_qmp_phy_usb_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + int num = cfg->num_clks; + int i; + + qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL); + if (!qmp->clks) + return -ENOMEM; + + for (i = 0; i < num; i++) + qmp->clks[i].id = cfg->clk_list[i]; + + return devm_clk_bulk_get(dev, num, qmp->clks); +} + +static void phy_clk_release_provider(void *res) +{ + of_clk_del_provider(res); +} + +/* + * Register a fixed rate pipe clock. + * + * The _pipe_clksrc generated by PHY goes to the GCC that gate + * controls it. The _pipe_clk coming out of the GCC is requested + * by the PHY driver for its operations. + * We register the _pipe_clksrc here. The gcc driver takes care + * of assigning this _pipe_clksrc as parent to _pipe_clk. + * Below picture shows this relationship. + * + * +---------------+ + * | PHY block |<<---------------------------------------+ + * | | | + * | +-------+ | +-----+ | + * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+ + * clk | +-------+ | +-----+ + * +---------------+ + */ +static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np) +{ + struct clk_fixed_rate *fixed; + struct clk_init_data init = { }; + int ret; + + ret = of_property_read_string(np, "clock-output-names", &init.name); + if (ret) { + dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np); + return ret; + } + + fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL); + if (!fixed) + return -ENOMEM; + + init.ops = &clk_fixed_rate_ops; + + /* controllers using QMP phys use 125MHz pipe clock interface */ + fixed->fixed_rate = 125000000; + fixed->hw.init = &init; + + ret = devm_clk_hw_register(qmp->dev, &fixed->hw); + if (ret) + return ret; + + ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw); + if (ret) + return ret; + + /* + * Roll a devm action because the clock provider is the child node, but + * the child node is not actually a device. + */ + return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np); +} + +static const struct phy_ops qcom_qmp_phy_usb_ops = { + .init = qcom_qmp_phy_usb_enable, + .exit = qcom_qmp_phy_usb_disable, + .set_mode = qcom_qmp_phy_usb_set_mode, + .owner = THIS_MODULE, +}; + +static +int qcom_qmp_phy_usb_create(struct device *dev, struct device_node *np, int id, + void __iomem *serdes, const struct qmp_phy_cfg *cfg) +{ + struct qcom_qmp *qmp = dev_get_drvdata(dev); + struct phy *generic_phy; + struct qmp_phy *qphy; + char prop_name[MAX_PROP_NAME]; + int ret; + + qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL); + if (!qphy) + return -ENOMEM; + + qphy->cfg = cfg; + qphy->serdes = serdes; + /* + * Get memory resources for each phy lane: + * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. + * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5 + * For single lane PHYs: pcs_misc (optional) -> 3. + */ + qphy->tx = of_iomap(np, 0); + if (!qphy->tx) + return -ENOMEM; + + qphy->rx = of_iomap(np, 1); + if (!qphy->rx) + return -ENOMEM; + + qphy->pcs = of_iomap(np, 2); + if (!qphy->pcs) + return -ENOMEM; + + if (cfg->pcs_usb_offset) + qphy->pcs_usb = qphy->pcs + cfg->pcs_usb_offset; + + /* + * If this is a dual-lane PHY, then there should be registers for the + * second lane. Some old device trees did not specify this, so fall + * back to old legacy behavior of assuming they can be reached at an + * offset from the first lane. + */ + if (cfg->is_dual_lane_phy) { + qphy->tx2 = of_iomap(np, 3); + qphy->rx2 = of_iomap(np, 4); + if (!qphy->tx2 || !qphy->rx2) { + dev_warn(dev, + "Underspecified device tree, falling back to legacy register regions\n"); + + /* In the old version, pcs_misc is at index 3. */ + qphy->pcs_misc = qphy->tx2; + qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE; + qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE; + + } else { + qphy->pcs_misc = of_iomap(np, 5); + } + + } else { + qphy->pcs_misc = of_iomap(np, 3); + } + + if (!qphy->pcs_misc) + dev_vdbg(dev, "PHY pcs_misc-reg not used\n"); + + snprintf(prop_name, sizeof(prop_name), "pipe%d", id); + qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name); + if (IS_ERR(qphy->pipe_clk)) { + return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk), + "failed to get lane%d pipe clock\n", id); + } + + generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_usb_ops); + if (IS_ERR(generic_phy)) { + ret = PTR_ERR(generic_phy); + dev_err(dev, "failed to create qphy %d\n", ret); + return ret; + } + + qphy->phy = generic_phy; + qphy->index = id; + qphy->qmp = qmp; + qmp->phys[id] = qphy; + phy_set_drvdata(generic_phy, qphy); + + return 0; +} + +static const struct of_device_id qcom_qmp_phy_usb_of_match_table[] = { + { + .compatible = "qcom,ipq8074-qmp-usb3-phy", + .data = &ipq8074_usb3phy_cfg, + }, { + .compatible = "qcom,msm8996-qmp-usb3-phy", + .data = &msm8996_usb3phy_cfg, + }, { + .compatible = "qcom,ipq6018-qmp-usb3-phy", + .data = &ipq8074_usb3phy_cfg, + }, { + .compatible = "qcom,sc7180-qmp-usb3-phy", + .data = &sc7180_usb3phy_cfg, + }, { + .compatible = "qcom,sc8180x-qmp-usb3-phy", + .data = &sm8150_usb3phy_cfg, + }, { + .compatible = "qcom,sdm845-qmp-usb3-phy", + .data = &qmp_v3_usb3phy_cfg, + }, { + .compatible = "qcom,sdm845-qmp-usb3-uni-phy", + .data = &qmp_v3_usb3_uniphy_cfg, + }, { + .compatible = "qcom,msm8998-qmp-usb3-phy", + .data = &msm8998_usb3phy_cfg, + }, { + .compatible = "qcom,sm8150-qmp-usb3-phy", + .data = &sm8150_usb3phy_cfg, + }, { + .compatible = "qcom,sm8150-qmp-usb3-uni-phy", + .data = &sm8150_usb3_uniphy_cfg, + }, { + .compatible = "qcom,sm8250-qmp-usb3-phy", + .data = &sm8250_usb3phy_cfg, + }, { + .compatible = "qcom,sm8250-qmp-usb3-uni-phy", + .data = &sm8250_usb3_uniphy_cfg, + }, { + .compatible = "qcom,sdx55-qmp-usb3-uni-phy", + .data = &sdx55_usb3_uniphy_cfg, + }, { + .compatible = "qcom,sdx65-qmp-usb3-uni-phy", + .data = &sdx65_usb3_uniphy_cfg, + }, { + .compatible = "qcom,sm8350-qmp-usb3-phy", + .data = &sm8350_usb3phy_cfg, + }, { + .compatible = "qcom,sm8350-qmp-usb3-uni-phy", + .data = &sm8350_usb3_uniphy_cfg, + }, { + .compatible = "qcom,sm8450-qmp-usb3-phy", + .data = &sm8350_usb3phy_cfg, + }, { + .compatible = "qcom,qcm2290-qmp-usb3-phy", + .data = &qcm2290_usb3phy_cfg, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, qcom_qmp_phy_usb_of_match_table); + +static const struct dev_pm_ops qcom_qmp_phy_usb_pm_ops = { + SET_RUNTIME_PM_OPS(qcom_qmp_phy_usb_runtime_suspend, + qcom_qmp_phy_usb_runtime_resume, NULL) +}; + +static int qcom_qmp_phy_usb_probe(struct platform_device *pdev) +{ + struct qcom_qmp *qmp; + struct device *dev = &pdev->dev; + struct device_node *child; + struct phy_provider *phy_provider; + void __iomem *serdes; + const struct qmp_phy_cfg *cfg = NULL; + int num, id; + int ret; + + qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL); + if (!qmp) + return -ENOMEM; + + qmp->dev = dev; + dev_set_drvdata(dev, qmp); + + /* Get the specific init parameters of QMP phy */ + cfg = of_device_get_match_data(dev); + if (!cfg) + return -EINVAL; + + /* per PHY serdes; usually located at base address */ + serdes = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(serdes)) + return PTR_ERR(serdes); + + /* per PHY dp_com; if PHY has dp_com control block */ + if (cfg->has_phy_dp_com_ctrl) { + qmp->dp_com = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(qmp->dp_com)) + return PTR_ERR(qmp->dp_com); + } + + ret = qcom_qmp_phy_usb_clk_init(dev, cfg); + if (ret) + return ret; + + ret = qcom_qmp_phy_usb_reset_init(dev, cfg); + if (ret) + return ret; + + ret = qcom_qmp_phy_usb_vreg_init(dev, cfg); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to get regulator supplies: %d\n", + ret); + return ret; + } + + num = of_get_available_child_count(dev->of_node); + /* do we have a rogue child node ? */ + if (num > 1) + return -EINVAL; + + qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL); + if (!qmp->phys) + return -ENOMEM; + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + /* + * Prevent runtime pm from being ON by default. Users can enable + * it using power/control in sysfs. + */ + pm_runtime_forbid(dev); + + id = 0; + for_each_available_child_of_node(dev->of_node, child) { + /* Create per-lane phy */ + ret = qcom_qmp_phy_usb_create(dev, child, id, serdes, cfg); + if (ret) { + dev_err(dev, "failed to create lane%d phy, %d\n", + id, ret); + goto err_node_put; + } + + /* + * Register the pipe clock provided by phy. + * See function description to see details of this pipe clock. + */ + ret = phy_pipe_clk_register(qmp, child); + if (ret) { + dev_err(qmp->dev, + "failed to register pipe clock source\n"); + goto err_node_put; + } + + id++; + } + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (!IS_ERR(phy_provider)) + dev_info(dev, "Registered Qcom-QMP phy\n"); + else + pm_runtime_disable(dev); + + return PTR_ERR_OR_ZERO(phy_provider); + +err_node_put: + pm_runtime_disable(dev); + of_node_put(child); + return ret; +} + +static struct platform_driver qcom_qmp_phy_usb_driver = { + .probe = qcom_qmp_phy_usb_probe, + .driver = { + .name = "qcom-qmp-usb-phy", + .pm = &qcom_qmp_phy_usb_pm_ops, + .of_match_table = qcom_qmp_phy_usb_of_match_table, + }, +}; + +module_platform_driver(qcom_qmp_phy_usb_driver); + +MODULE_AUTHOR("Vivek Gautam "); +MODULE_DESCRIPTION("Qualcomm QMP USB PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/samsung/phy-fsd-ufs.c b/drivers/phy/samsung/phy-fsd-ufs.c new file mode 100644 index 0000000000..d36cabd534 --- /dev/null +++ b/drivers/phy/samsung/phy-fsd-ufs.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * UFS PHY driver data for FSD SoC + * + * Copyright (C) 2022 Samsung Electronics Co., Ltd. + * + */ +#include "phy-samsung-ufs.h" + +#define FSD_EMBEDDED_COMBO_PHY_CTRL 0x724 +#define FSD_EMBEDDED_COMBO_PHY_CTRL_MASK 0x1 +#define FSD_EMBEDDED_COMBO_PHY_CTRL_EN BIT(0) +#define FSD_EMBEDDED_COMBO_PHY_CDR_LOCK_STATUS 0x6e + +static const struct samsung_ufs_phy_cfg fsd_pre_init_cfg[] = { + PHY_COMN_REG_CFG(0x00f, 0xfa, PWR_MODE_ANY), + PHY_COMN_REG_CFG(0x010, 0x82, PWR_MODE_ANY), + PHY_COMN_REG_CFG(0x011, 0x1e, PWR_MODE_ANY), + PHY_COMN_REG_CFG(0x017, 0x94, PWR_MODE_ANY), + PHY_TRSV_REG_CFG(0x035, 0x58, PWR_MODE_ANY), + PHY_TRSV_REG_CFG(0x036, 0x32, PWR_MODE_ANY), + PHY_TRSV_REG_CFG(0x037, 0x40, PWR_MODE_ANY), + PHY_TRSV_REG_CFG(0x03b, 0x83, PWR_MODE_ANY), + PHY_TRSV_REG_CFG(0x042, 0x88, PWR_MODE_ANY), + PHY_TRSV_REG_CFG(0x043, 0xa6, PWR_MODE_ANY), + PHY_TRSV_REG_CFG(0x048, 0x74, PWR_MODE_ANY), + PHY_TRSV_REG_CFG(0x04c, 0x5b, PWR_MODE_ANY), + PHY_TRSV_REG_CFG(0x04d, 0x83, PWR_MODE_ANY), + PHY_TRSV_REG_CFG(0x05c, 0x14, PWR_MODE_ANY), + END_UFS_PHY_CFG +}; + +/* Calibration for HS mode series A/B */ +static const struct samsung_ufs_phy_cfg fsd_pre_pwr_hs_cfg[] = { + END_UFS_PHY_CFG +}; + +/* Calibration for HS mode series A/B atfer PMC */ +static const struct samsung_ufs_phy_cfg fsd_post_pwr_hs_cfg[] = { + END_UFS_PHY_CFG +}; + +static const struct samsung_ufs_phy_cfg *fsd_ufs_phy_cfgs[CFG_TAG_MAX] = { + [CFG_PRE_INIT] = fsd_pre_init_cfg, + [CFG_PRE_PWR_HS] = fsd_pre_pwr_hs_cfg, + [CFG_POST_PWR_HS] = fsd_post_pwr_hs_cfg, +}; + +static const char * const fsd_ufs_phy_clks[] = { + "ref_clk", +}; + +const struct samsung_ufs_phy_drvdata fsd_ufs_phy = { + .cfgs = fsd_ufs_phy_cfgs, + .isol = { + .offset = FSD_EMBEDDED_COMBO_PHY_CTRL, + .mask = FSD_EMBEDDED_COMBO_PHY_CTRL_MASK, + .en = FSD_EMBEDDED_COMBO_PHY_CTRL_EN, + }, + .clk_list = fsd_ufs_phy_clks, + .num_clks = ARRAY_SIZE(fsd_ufs_phy_clks), + .cdr_lock_status_offset = FSD_EMBEDDED_COMBO_PHY_CDR_LOCK_STATUS, +}; diff --git a/drivers/pinctrl/freescale/pinctrl-imxrt1170.c b/drivers/pinctrl/freescale/pinctrl-imxrt1170.c new file mode 100644 index 0000000000..5da1545fde --- /dev/null +++ b/drivers/pinctrl/freescale/pinctrl-imxrt1170.c @@ -0,0 +1,349 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 + * Author(s): Jesse Taube + */ + +#include +#include +#include +#include +#include + +#include "pinctrl-imx.h" + +enum imxrt1170_pads { + IMXRT1170_PAD_RESERVE0, + IMXRT1170_PAD_RESERVE1, + IMXRT1170_PAD_RESERVE2, + IMXRT1170_PAD_RESERVE3, + IMXRT1170_PAD_EMC_B1_00, + IMXRT1170_PAD_EMC_B1_01, + IMXRT1170_PAD_EMC_B1_02, + IMXRT1170_PAD_EMC_B1_03, + IMXRT1170_PAD_EMC_B1_04, + IMXRT1170_PAD_EMC_B1_05, + IMXRT1170_PAD_EMC_B1_06, + IMXRT1170_PAD_EMC_B1_07, + IMXRT1170_PAD_EMC_B1_08, + IMXRT1170_PAD_EMC_B1_09, + IMXRT1170_PAD_EMC_B1_10, + IMXRT1170_PAD_EMC_B1_11, + IMXRT1170_PAD_EMC_B1_12, + IMXRT1170_PAD_EMC_B1_13, + IMXRT1170_PAD_EMC_B1_14, + IMXRT1170_PAD_EMC_B1_15, + IMXRT1170_PAD_EMC_B1_16, + IMXRT1170_PAD_EMC_B1_17, + IMXRT1170_PAD_EMC_B1_18, + IMXRT1170_PAD_EMC_B1_19, + IMXRT1170_PAD_EMC_B1_20, + IMXRT1170_PAD_EMC_B1_21, + IMXRT1170_PAD_EMC_B1_22, + IMXRT1170_PAD_EMC_B1_23, + IMXRT1170_PAD_EMC_B1_24, + IMXRT1170_PAD_EMC_B1_25, + IMXRT1170_PAD_EMC_B1_26, + IMXRT1170_PAD_EMC_B1_27, + IMXRT1170_PAD_EMC_B1_28, + IMXRT1170_PAD_EMC_B1_29, + IMXRT1170_PAD_EMC_B1_30, + IMXRT1170_PAD_EMC_B1_31, + IMXRT1170_PAD_EMC_B1_32, + IMXRT1170_PAD_EMC_B1_33, + IMXRT1170_PAD_EMC_B1_34, + IMXRT1170_PAD_EMC_B1_35, + IMXRT1170_PAD_EMC_B1_36, + IMXRT1170_PAD_EMC_B1_37, + IMXRT1170_PAD_EMC_B1_38, + IMXRT1170_PAD_EMC_B1_39, + IMXRT1170_PAD_EMC_B1_40, + IMXRT1170_PAD_EMC_B1_41, + IMXRT1170_PAD_EMC_B2_00, + IMXRT1170_PAD_EMC_B2_01, + IMXRT1170_PAD_EMC_B2_02, + IMXRT1170_PAD_EMC_B2_03, + IMXRT1170_PAD_EMC_B2_04, + IMXRT1170_PAD_EMC_B2_05, + IMXRT1170_PAD_EMC_B2_06, + IMXRT1170_PAD_EMC_B2_07, + IMXRT1170_PAD_EMC_B2_08, + IMXRT1170_PAD_EMC_B2_09, + IMXRT1170_PAD_EMC_B2_10, + IMXRT1170_PAD_EMC_B2_11, + IMXRT1170_PAD_EMC_B2_12, + IMXRT1170_PAD_EMC_B2_13, + IMXRT1170_PAD_EMC_B2_14, + IMXRT1170_PAD_EMC_B2_15, + IMXRT1170_PAD_EMC_B2_16, + IMXRT1170_PAD_EMC_B2_17, + IMXRT1170_PAD_EMC_B2_18, + IMXRT1170_PAD_EMC_B2_19, + IMXRT1170_PAD_EMC_B2_20, + IMXRT1170_PAD_AD_00, + IMXRT1170_PAD_AD_01, + IMXRT1170_PAD_AD_02, + IMXRT1170_PAD_AD_03, + IMXRT1170_PAD_AD_04, + IMXRT1170_PAD_AD_05, + IMXRT1170_PAD_AD_06, + IMXRT1170_PAD_AD_07, + IMXRT1170_PAD_AD_08, + IMXRT1170_PAD_AD_09, + IMXRT1170_PAD_AD_10, + IMXRT1170_PAD_AD_11, + IMXRT1170_PAD_AD_12, + IMXRT1170_PAD_AD_13, + IMXRT1170_PAD_AD_14, + IMXRT1170_PAD_AD_15, + IMXRT1170_PAD_AD_16, + IMXRT1170_PAD_AD_17, + IMXRT1170_PAD_AD_18, + IMXRT1170_PAD_AD_19, + IMXRT1170_PAD_AD_20, + IMXRT1170_PAD_AD_21, + IMXRT1170_PAD_AD_22, + IMXRT1170_PAD_AD_23, + IMXRT1170_PAD_AD_24, + IMXRT1170_PAD_AD_25, + IMXRT1170_PAD_AD_26, + IMXRT1170_PAD_AD_27, + IMXRT1170_PAD_AD_28, + IMXRT1170_PAD_AD_29, + IMXRT1170_PAD_AD_30, + IMXRT1170_PAD_AD_31, + IMXRT1170_PAD_AD_32, + IMXRT1170_PAD_AD_33, + IMXRT1170_PAD_AD_34, + IMXRT1170_PAD_AD_35, + IMXRT1170_PAD_SD_B1_00, + IMXRT1170_PAD_SD_B1_01, + IMXRT1170_PAD_SD_B1_02, + IMXRT1170_PAD_SD_B1_03, + IMXRT1170_PAD_SD_B1_04, + IMXRT1170_PAD_SD_B1_05, + IMXRT1170_PAD_SD_B2_00, + IMXRT1170_PAD_SD_B2_01, + IMXRT1170_PAD_SD_B2_02, + IMXRT1170_PAD_SD_B2_03, + IMXRT1170_PAD_SD_B2_04, + IMXRT1170_PAD_SD_B2_05, + IMXRT1170_PAD_SD_B2_06, + IMXRT1170_PAD_SD_B2_07, + IMXRT1170_PAD_SD_B2_08, + IMXRT1170_PAD_SD_B2_09, + IMXRT1170_PAD_SD_B2_10, + IMXRT1170_PAD_SD_B2_11, + IMXRT1170_PAD_DISP_B1_00, + IMXRT1170_PAD_DISP_B1_01, + IMXRT1170_PAD_DISP_B1_02, + IMXRT1170_PAD_DISP_B1_03, + IMXRT1170_PAD_DISP_B1_04, + IMXRT1170_PAD_DISP_B1_05, + IMXRT1170_PAD_DISP_B1_06, + IMXRT1170_PAD_DISP_B1_07, + IMXRT1170_PAD_DISP_B1_08, + IMXRT1170_PAD_DISP_B1_09, + IMXRT1170_PAD_DISP_B1_10, + IMXRT1170_PAD_DISP_B1_11, + IMXRT1170_PAD_DISP_B2_00, + IMXRT1170_PAD_DISP_B2_01, + IMXRT1170_PAD_DISP_B2_02, + IMXRT1170_PAD_DISP_B2_03, + IMXRT1170_PAD_DISP_B2_04, + IMXRT1170_PAD_DISP_B2_05, + IMXRT1170_PAD_DISP_B2_06, + IMXRT1170_PAD_DISP_B2_07, + IMXRT1170_PAD_DISP_B2_08, + IMXRT1170_PAD_DISP_B2_09, + IMXRT1170_PAD_DISP_B2_10, + IMXRT1170_PAD_DISP_B2_11, + IMXRT1170_PAD_DISP_B2_12, + IMXRT1170_PAD_DISP_B2_13, + IMXRT1170_PAD_DISP_B2_14, + IMXRT1170_PAD_DISP_B2_15, +}; + +/* Pad names for the pinmux subsystem */ +static const struct pinctrl_pin_desc imxrt1170_pinctrl_pads[] = { + IMX_PINCTRL_PIN(IMXRT1170_PAD_RESERVE0), + IMX_PINCTRL_PIN(IMXRT1170_PAD_RESERVE1), + IMX_PINCTRL_PIN(IMXRT1170_PAD_RESERVE2), + IMX_PINCTRL_PIN(IMXRT1170_PAD_RESERVE3), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_00), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_01), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_02), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_03), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_04), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_05), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_06), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_07), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_08), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_09), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_10), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_11), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_12), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_13), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_14), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_15), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_16), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_17), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_18), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_19), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_20), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_21), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_22), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_23), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_24), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_25), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_26), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_27), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_28), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_29), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_30), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_31), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_32), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_33), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_34), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_35), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_36), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_37), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_38), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_39), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_40), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_41), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_00), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_01), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_02), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_03), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_04), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_05), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_06), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_07), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_08), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_09), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_10), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_11), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_12), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_13), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_14), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_15), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_16), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_17), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_18), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_19), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_20), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_00), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_01), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_02), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_03), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_04), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_05), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_06), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_07), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_08), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_09), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_10), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_11), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_12), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_13), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_14), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_15), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_16), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_17), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_18), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_19), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_20), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_21), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_22), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_23), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_24), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_25), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_26), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_27), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_28), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_29), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_30), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_31), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_32), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_33), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_34), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_35), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_00), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_01), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_02), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_03), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_04), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_05), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_00), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_01), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_02), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_03), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_04), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_05), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_06), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_07), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_08), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_09), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_10), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_11), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_00), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_01), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_02), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_03), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_04), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_05), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_06), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_07), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_08), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_09), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_10), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_11), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_00), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_01), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_02), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_03), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_04), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_05), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_06), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_07), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_08), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_09), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_10), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_11), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_12), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_13), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_14), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_15), +}; + +static const struct imx_pinctrl_soc_info imxrt1170_pinctrl_info = { + .pins = imxrt1170_pinctrl_pads, + .npins = ARRAY_SIZE(imxrt1170_pinctrl_pads), + .gpr_compatible = "fsl,imxrt1170-iomuxc-gpr", +}; + +static const struct of_device_id imxrt1170_pinctrl_of_match[] = { + { .compatible = "fsl,imxrt1170-iomuxc", .data = &imxrt1170_pinctrl_info, }, + { /* sentinel */ } +}; + +static int imxrt1170_pinctrl_probe(struct platform_device *pdev) +{ + return imx_pinctrl_probe(pdev, &imxrt1170_pinctrl_info); +} + +static struct platform_driver imxrt1170_pinctrl_driver = { + .driver = { + .name = "imxrt1170-pinctrl", + .of_match_table = of_match_ptr(imxrt1170_pinctrl_of_match), + .suppress_bind_attrs = true, + }, + .probe = imxrt1170_pinctrl_probe, +}; + +static int __init imxrt1170_pinctrl_init(void) +{ + return platform_driver_register(&imxrt1170_pinctrl_driver); +} +arch_initcall(imxrt1170_pinctrl_init); diff --git a/drivers/pinctrl/intel/pinctrl-meteorlake.c b/drivers/pinctrl/intel/pinctrl-meteorlake.c new file mode 100644 index 0000000000..9576dcd1cb --- /dev/null +++ b/drivers/pinctrl/intel/pinctrl-meteorlake.c @@ -0,0 +1,417 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Meteor Lake PCH pinctrl/GPIO driver + * + * Copyright (C) 2022, Intel Corporation + * Author: Andy Shevchenko + */ + +#include +#include +#include + +#include + +#include "pinctrl-intel.h" + +#define MTL_PAD_OWN 0x0b0 +#define MTL_PADCFGLOCK 0x110 +#define MTL_HOSTSW_OWN 0x140 +#define MTL_GPI_IS 0x200 +#define MTL_GPI_IE 0x210 + +#define MTL_GPP(r, s, e, g) \ + { \ + .reg_num = (r), \ + .base = (s), \ + .size = ((e) - (s) + 1), \ + .gpio_base = (g), \ + } + +#define MTL_COMMUNITY(b, s, e, g) \ + { \ + .barno = (b), \ + .padown_offset = MTL_PAD_OWN, \ + .padcfglock_offset = MTL_PADCFGLOCK, \ + .hostown_offset = MTL_HOSTSW_OWN, \ + .is_offset = MTL_GPI_IS, \ + .ie_offset = MTL_GPI_IE, \ + .pin_base = (s), \ + .npins = ((e) - (s) + 1), \ + .gpps = (g), \ + .ngpps = ARRAY_SIZE(g), \ + } + +/* Meteor Lake-P */ +static const struct pinctrl_pin_desc mtlp_pins[] = { + /* CPU */ + PINCTRL_PIN(0, "PECI"), + PINCTRL_PIN(1, "UFS_RESET_B"), + PINCTRL_PIN(2, "VIDSOUT"), + PINCTRL_PIN(3, "VIDSCK"), + PINCTRL_PIN(4, "VIDALERT_B"), + /* GPP_V */ + PINCTRL_PIN(5, "BATLOW_B"), + PINCTRL_PIN(6, "AC_PRESENT"), + PINCTRL_PIN(7, "SOC_WAKE_B"), + PINCTRL_PIN(8, "PWRBTN_B"), + PINCTRL_PIN(9, "SLP_S3_B"), + PINCTRL_PIN(10, "SLP_S4_B"), + PINCTRL_PIN(11, "SLP_A_B"), + PINCTRL_PIN(12, "GPP_V_7"), + PINCTRL_PIN(13, "SUSCLK"), + PINCTRL_PIN(14, "SLP_WLAN_B"), + PINCTRL_PIN(15, "SLP_S5_B"), + PINCTRL_PIN(16, "LANPHYPC"), + PINCTRL_PIN(17, "SLP_LAN_B"), + PINCTRL_PIN(18, "GPP_V_13"), + PINCTRL_PIN(19, "WAKE_B"), + PINCTRL_PIN(20, "GPP_V_15"), + PINCTRL_PIN(21, "GPP_V_16"), + PINCTRL_PIN(22, "GPP_V_17"), + PINCTRL_PIN(23, "GPP_V_18"), + PINCTRL_PIN(24, "CATERR_B"), + PINCTRL_PIN(25, "PROCHOT_B"), + PINCTRL_PIN(26, "THERMTRIP_B"), + PINCTRL_PIN(27, "DSI_DE_TE_2_GENLOCK_REF"), + PINCTRL_PIN(28, "DSI_DE_TE_1_DISP_UTILS"), + /* GPP_C */ + PINCTRL_PIN(29, "SMBCLK"), + PINCTRL_PIN(30, "SMBDATA"), + PINCTRL_PIN(31, "SMBALERT_B"), + PINCTRL_PIN(32, "SML0CLK"), + PINCTRL_PIN(33, "SML0DATA"), + PINCTRL_PIN(34, "GPP_C_5"), + PINCTRL_PIN(35, "GPP_C_6"), + PINCTRL_PIN(36, "GPP_C_7"), + PINCTRL_PIN(37, "GPP_C_8"), + PINCTRL_PIN(38, "GPP_C_9"), + PINCTRL_PIN(39, "GPP_C_10"), + PINCTRL_PIN(40, "GPP_C_11"), + PINCTRL_PIN(41, "GPP_C_12"), + PINCTRL_PIN(42, "GPP_C_13"), + PINCTRL_PIN(43, "GPP_C_14"), + PINCTRL_PIN(44, "GPP_C_15"), + PINCTRL_PIN(45, "GPP_C_16"), + PINCTRL_PIN(46, "GPP_C_17"), + PINCTRL_PIN(47, "GPP_C_18"), + PINCTRL_PIN(48, "GPP_C_19"), + PINCTRL_PIN(49, "GPP_C_20"), + PINCTRL_PIN(50, "GPP_C_21"), + PINCTRL_PIN(51, "GPP_C_22"), + PINCTRL_PIN(52, "GPP_C_23"), + /* GPP_A */ + PINCTRL_PIN(53, "ESPI_IO_0"), + PINCTRL_PIN(54, "ESPI_IO_1"), + PINCTRL_PIN(55, "ESPI_IO_2"), + PINCTRL_PIN(56, "ESPI_IO_3"), + PINCTRL_PIN(57, "ESPI_CS0_B"), + PINCTRL_PIN(58, "ESPI_CLK"), + PINCTRL_PIN(59, "ESPI_RESET_B"), + PINCTRL_PIN(60, "GPP_A_7"), + PINCTRL_PIN(61, "GPP_A_8"), + PINCTRL_PIN(62, "GPP_A_9"), + PINCTRL_PIN(63, "GPP_A_10"), + PINCTRL_PIN(64, "GPP_A_11"), + PINCTRL_PIN(65, "GPP_A_12"), + PINCTRL_PIN(66, "ESPI_CS1_B"), + PINCTRL_PIN(67, "ESPI_CS2_B"), + PINCTRL_PIN(68, "ESPI_CS3_B"), + PINCTRL_PIN(69, "ESPI_ALERT0_B"), + PINCTRL_PIN(70, "ESPI_ALERT1_B"), + PINCTRL_PIN(71, "ESPI_ALERT2_B"), + PINCTRL_PIN(72, "ESPI_ALERT3_B"), + PINCTRL_PIN(73, "GPP_A_20"), + PINCTRL_PIN(74, "GPP_A_21"), + PINCTRL_PIN(75, "GPP_A_22"), + PINCTRL_PIN(76, "GPP_A_23"), + PINCTRL_PIN(77, "ESPI_CLK_LOOPBK"), + /* GPP_E */ + PINCTRL_PIN(78, "GPP_E_0"), + PINCTRL_PIN(79, "GPP_E_1"), + PINCTRL_PIN(80, "GPP_E_2"), + PINCTRL_PIN(81, "GPP_E_3"), + PINCTRL_PIN(82, "GPP_E_4"), + PINCTRL_PIN(83, "GPP_E_5"), + PINCTRL_PIN(84, "GPP_E_6"), + PINCTRL_PIN(85, "GPP_E_7"), + PINCTRL_PIN(86, "GPP_E_8"), + PINCTRL_PIN(87, "GPP_E_9"), + PINCTRL_PIN(88, "GPP_E_10"), + PINCTRL_PIN(89, "GPP_E_11"), + PINCTRL_PIN(90, "GPP_E_12"), + PINCTRL_PIN(91, "GPP_E_13"), + PINCTRL_PIN(92, "GPP_E_14"), + PINCTRL_PIN(93, "SLP_DRAM_B"), + PINCTRL_PIN(94, "GPP_E_16"), + PINCTRL_PIN(95, "GPP_E_17"), + PINCTRL_PIN(96, "GPP_E_18"), + PINCTRL_PIN(97, "GPP_E_19"), + PINCTRL_PIN(98, "GPP_E_20"), + PINCTRL_PIN(99, "GPP_E_21"), + PINCTRL_PIN(100, "DNX_FORCE_RELOAD"), + PINCTRL_PIN(101, "GPP_E_23"), + PINCTRL_PIN(102, "THC0_GSPI0_CLK_LOOPBK"), + /* GPP_H */ + PINCTRL_PIN(103, "GPP_H_0"), + PINCTRL_PIN(104, "GPP_H_1"), + PINCTRL_PIN(105, "GPP_H_2"), + PINCTRL_PIN(106, "GPP_H_3"), + PINCTRL_PIN(107, "GPP_H_4"), + PINCTRL_PIN(108, "GPP_H_5"), + PINCTRL_PIN(109, "GPP_H_6"), + PINCTRL_PIN(110, "GPP_H_7"), + PINCTRL_PIN(111, "GPP_H_8"), + PINCTRL_PIN(112, "GPP_H_9"), + PINCTRL_PIN(113, "GPP_H_10"), + PINCTRL_PIN(114, "GPP_H_11"), + PINCTRL_PIN(115, "GPP_H_12"), + PINCTRL_PIN(116, "CPU_C10_GATE_B"), + PINCTRL_PIN(117, "GPP_H_14"), + PINCTRL_PIN(118, "GPP_H_15"), + PINCTRL_PIN(119, "GPP_H_16"), + PINCTRL_PIN(120, "GPP_H_17"), + PINCTRL_PIN(121, "GPP_H_18"), + PINCTRL_PIN(122, "GPP_H_19"), + PINCTRL_PIN(123, "GPP_H_20"), + PINCTRL_PIN(124, "GPP_H_21"), + PINCTRL_PIN(125, "GPP_H_22"), + PINCTRL_PIN(126, "GPP_H_23"), + PINCTRL_PIN(127, "LPI3C1_CLK_LOOPBK"), + PINCTRL_PIN(128, "I3C0_CLK_LOOPBK"), + /* GPP_F */ + PINCTRL_PIN(129, "CNV_BRI_DT"), + PINCTRL_PIN(130, "CNV_BRI_RSP"), + PINCTRL_PIN(131, "CNV_RGI_DT"), + PINCTRL_PIN(132, "CNV_RGI_RSP"), + PINCTRL_PIN(133, "CNV_RF_RESET_B"), + PINCTRL_PIN(134, "CRF_CLKREQ"), + PINCTRL_PIN(135, "GPP_F_6"), + PINCTRL_PIN(136, "FUSA_DIAGTEST_EN"), + PINCTRL_PIN(137, "FUSA_DIAGTEST_MODE"), + PINCTRL_PIN(138, "BOOTMPC"), + PINCTRL_PIN(139, "GPP_F_10"), + PINCTRL_PIN(140, "GPP_F_11"), + PINCTRL_PIN(141, "GSXDOUT"), + PINCTRL_PIN(142, "GSXSLOAD"), + PINCTRL_PIN(143, "GSXDIN"), + PINCTRL_PIN(144, "GSXSRESETB"), + PINCTRL_PIN(145, "GSXCLK"), + PINCTRL_PIN(146, "GMII_MDC_0"), + PINCTRL_PIN(147, "GMII_MDIO_0"), + PINCTRL_PIN(148, "GPP_F_19"), + PINCTRL_PIN(149, "GPP_F_20"), + PINCTRL_PIN(150, "GPP_F_21"), + PINCTRL_PIN(151, "GPP_F_22"), + PINCTRL_PIN(152, "GPP_F_23"), + PINCTRL_PIN(153, "THC1_GSPI1_CLK_LOOPBK"), + PINCTRL_PIN(154, "GSPI0A_CLK_LOOPBK"), + /* SPI0 */ + PINCTRL_PIN(155, "SPI0_IO_2"), + PINCTRL_PIN(156, "SPI0_IO_3"), + PINCTRL_PIN(157, "SPI0_MOSI_IO_0"), + PINCTRL_PIN(158, "SPI0_MISO_IO_1"), + PINCTRL_PIN(159, "SPI0_TPM_CS_B"), + PINCTRL_PIN(160, "SPI0_FLASH_0_CS_B"), + PINCTRL_PIN(161, "SPI0_FLASH_1_CS_B"), + PINCTRL_PIN(162, "SPI0_CLK"), + PINCTRL_PIN(163, "L_BKLTEN"), + PINCTRL_PIN(164, "L_BKLTCTL"), + PINCTRL_PIN(165, "L_VDDEN"), + PINCTRL_PIN(166, "SYS_PWROK"), + PINCTRL_PIN(167, "SYS_RESET_B"), + PINCTRL_PIN(168, "MLK_RST_B"), + PINCTRL_PIN(169, "SPI0_CLK_LOOPBK"), + /* vGPIO_3 */ + PINCTRL_PIN(170, "ESPI_USB_OCB_0"), + PINCTRL_PIN(171, "ESPI_USB_OCB_1"), + PINCTRL_PIN(172, "ESPI_USB_OCB_2"), + PINCTRL_PIN(173, "ESPI_USB_OCB_3"), + PINCTRL_PIN(174, "USB_CPU_OCB_0"), + PINCTRL_PIN(175, "USB_CPU_OCB_1"), + PINCTRL_PIN(176, "USB_CPU_OCB_2"), + PINCTRL_PIN(177, "USB_CPU_OCB_3"), + PINCTRL_PIN(178, "TS0_IN_INT"), + PINCTRL_PIN(179, "TS1_IN_INT"), + PINCTRL_PIN(180, "THC0_WOT_INT"), + PINCTRL_PIN(181, "THC1_WOT_INT"), + PINCTRL_PIN(182, "THC0_WHC_INT"), + PINCTRL_PIN(183, "THC1_WHC_INT"), + /* GPP_S */ + PINCTRL_PIN(184, "GPP_S_0"), + PINCTRL_PIN(185, "GPP_S_1"), + PINCTRL_PIN(186, "GPP_S_2"), + PINCTRL_PIN(187, "GPP_S_3"), + PINCTRL_PIN(188, "GPP_S_4"), + PINCTRL_PIN(189, "GPP_S_5"), + PINCTRL_PIN(190, "GPP_S_6"), + PINCTRL_PIN(191, "GPP_S_7"), + /* JTAG */ + PINCTRL_PIN(192, "JTAG_MBPB0"), + PINCTRL_PIN(193, "JTAG_MBPB1"), + PINCTRL_PIN(194, "JTAG_MBPB2"), + PINCTRL_PIN(195, "JTAG_MBPB3"), + PINCTRL_PIN(196, "JTAG_TDO"), + PINCTRL_PIN(197, "PRDY_B"), + PINCTRL_PIN(198, "PREQ_B"), + PINCTRL_PIN(199, "JTAG_TDI"), + PINCTRL_PIN(200, "JTAG_TMS"), + PINCTRL_PIN(201, "JTAG_TCK"), + PINCTRL_PIN(202, "DBG_PMODE"), + PINCTRL_PIN(203, "JTAG_TRST_B"), + /* GPP_B */ + PINCTRL_PIN(204, "ADM_VID_0"), + PINCTRL_PIN(205, "ADM_VID_1"), + PINCTRL_PIN(206, "GPP_B_2"), + PINCTRL_PIN(207, "GPP_B_3"), + PINCTRL_PIN(208, "GPP_B_4"), + PINCTRL_PIN(209, "GPP_B_5"), + PINCTRL_PIN(210, "GPP_B_6"), + PINCTRL_PIN(211, "GPP_B_7"), + PINCTRL_PIN(212, "GPP_B_8"), + PINCTRL_PIN(213, "GPP_B_9"), + PINCTRL_PIN(214, "GPP_B_10"), + PINCTRL_PIN(215, "GPP_B_11"), + PINCTRL_PIN(216, "SLP_S0_B"), + PINCTRL_PIN(217, "PLTRST_B"), + PINCTRL_PIN(218, "GPP_B_14"), + PINCTRL_PIN(219, "GPP_B_15"), + PINCTRL_PIN(220, "GPP_B_16"), + PINCTRL_PIN(221, "GPP_B_17"), + PINCTRL_PIN(222, "GPP_B_18"), + PINCTRL_PIN(223, "GPP_B_19"), + PINCTRL_PIN(224, "GPP_B_20"), + PINCTRL_PIN(225, "GPP_B_21"), + PINCTRL_PIN(226, "GPP_B_22"), + PINCTRL_PIN(227, "GPP_B_23"), + PINCTRL_PIN(228, "ISH_I3C0_CLK_LOOPBK"), + /* GPP_D */ + PINCTRL_PIN(229, "GPP_D_0"), + PINCTRL_PIN(230, "GPP_D_1"), + PINCTRL_PIN(231, "GPP_D_2"), + PINCTRL_PIN(232, "GPP_D_3"), + PINCTRL_PIN(233, "GPP_D_4"), + PINCTRL_PIN(234, "GPP_D_5"), + PINCTRL_PIN(235, "GPP_D_6"), + PINCTRL_PIN(236, "GPP_D_7"), + PINCTRL_PIN(237, "GPP_D_8"), + PINCTRL_PIN(238, "GPP_D_9"), + PINCTRL_PIN(239, "HDA_BCLK"), + PINCTRL_PIN(240, "HDA_SYNC"), + PINCTRL_PIN(241, "HDA_SDO"), + PINCTRL_PIN(242, "HDA_SDI_0"), + PINCTRL_PIN(243, "GPP_D_14"), + PINCTRL_PIN(244, "GPP_D_15"), + PINCTRL_PIN(245, "GPP_D_16"), + PINCTRL_PIN(246, "HDA_RST_B"), + PINCTRL_PIN(247, "GPP_D_18"), + PINCTRL_PIN(248, "GPP_D_19"), + PINCTRL_PIN(249, "GPP_D_20"), + PINCTRL_PIN(250, "UFS_REFCLK"), + PINCTRL_PIN(251, "BPKI3C_SDA"), + PINCTRL_PIN(252, "BPKI3C_SCL"), + PINCTRL_PIN(253, "BOOTHALT_B"), + /* vGPIO */ + PINCTRL_PIN(254, "CNV_BTEN"), + PINCTRL_PIN(255, "CNV_BT_HOST_WAKEB"), + PINCTRL_PIN(256, "CNV_BT_IF_SELECT"), + PINCTRL_PIN(257, "vCNV_BT_UART_TXD"), + PINCTRL_PIN(258, "vCNV_BT_UART_RXD"), + PINCTRL_PIN(259, "vCNV_BT_UART_CTS_B"), + PINCTRL_PIN(260, "vCNV_BT_UART_RTS_B"), + PINCTRL_PIN(261, "vCNV_MFUART1_TXD"), + PINCTRL_PIN(262, "vCNV_MFUART1_RXD"), + PINCTRL_PIN(263, "vCNV_MFUART1_CTS_B"), + PINCTRL_PIN(264, "vCNV_MFUART1_RTS_B"), + PINCTRL_PIN(265, "vUART0_TXD"), + PINCTRL_PIN(266, "vUART0_RXD"), + PINCTRL_PIN(267, "vUART0_CTS_B"), + PINCTRL_PIN(268, "vUART0_RTS_B"), + PINCTRL_PIN(269, "vISH_UART0_TXD"), + PINCTRL_PIN(270, "vISH_UART0_RXD"), + PINCTRL_PIN(271, "vISH_UART0_CTS_B"), + PINCTRL_PIN(272, "vISH_UART0_RTS_B"), + PINCTRL_PIN(273, "vCNV_BT_I2S_BCLK"), + PINCTRL_PIN(274, "vCNV_BT_I2S_WS_SYNC"), + PINCTRL_PIN(275, "vCNV_BT_I2S_SDO"), + PINCTRL_PIN(276, "vCNV_BT_I2S_SDI"), + PINCTRL_PIN(277, "vI2S2_SCLK"), + PINCTRL_PIN(278, "vI2S2_SFRM"), + PINCTRL_PIN(279, "vI2S2_TXD"), + PINCTRL_PIN(280, "vI2S2_RXD"), + PINCTRL_PIN(281, "vCNV_BT_I2S_BCLK_2"), + PINCTRL_PIN(282, "vCNV_BT_I2S_WS_SYNC_2"), + PINCTRL_PIN(283, "vCNV_BT_I2S_SDO_2"), + PINCTRL_PIN(284, "vCNV_BT_I2S_SDI_2"), + PINCTRL_PIN(285, "vI2S2_SCLK_2"), + PINCTRL_PIN(286, "vI2S2_SFRM_2"), + PINCTRL_PIN(287, "vI2S2_TXD_2"), + PINCTRL_PIN(288, "vI2S2_RXD_2"), +}; + +static const struct intel_padgroup mtlp_community0_gpps[] = { + MTL_GPP(0, 0, 4, 0), /* CPU */ + MTL_GPP(1, 5, 28, 32), /* GPP_V */ + MTL_GPP(2, 29, 52, 64), /* GPP_C */ +}; + +static const struct intel_padgroup mtlp_community1_gpps[] = { + MTL_GPP(0, 53, 77, 96), /* GPP_A */ + MTL_GPP(1, 78, 102, 128), /* GPP_E */ +}; + +static const struct intel_padgroup mtlp_community3_gpps[] = { + MTL_GPP(0, 103, 128, 160), /* GPP_H */ + MTL_GPP(1, 129, 154, 192), /* GPP_F */ + MTL_GPP(2, 155, 169, 224), /* SPI0 */ + MTL_GPP(3, 170, 183, 256), /* vGPIO_3 */ +}; + +static const struct intel_padgroup mtlp_community4_gpps[] = { + MTL_GPP(0, 184, 191, 288), /* GPP_S */ + MTL_GPP(1, 192, 203, 320), /* JTAG */ +}; + +static const struct intel_padgroup mtlp_community5_gpps[] = { + MTL_GPP(0, 204, 228, 352), /* GPP_B */ + MTL_GPP(1, 229, 253, 384), /* GPP_D */ + MTL_GPP(2, 254, 285, 416), /* vGPIO_0 */ + MTL_GPP(3, 286, 288, 448), /* vGPIO_1 */ +}; + +static const struct intel_community mtlp_communities[] = { + MTL_COMMUNITY(0, 0, 52, mtlp_community0_gpps), + MTL_COMMUNITY(1, 53, 102, mtlp_community1_gpps), + MTL_COMMUNITY(2, 103, 183, mtlp_community3_gpps), + MTL_COMMUNITY(3, 184, 203, mtlp_community4_gpps), + MTL_COMMUNITY(4, 204, 288, mtlp_community5_gpps), +}; + +static const struct intel_pinctrl_soc_data mtlp_soc_data = { + .pins = mtlp_pins, + .npins = ARRAY_SIZE(mtlp_pins), + .communities = mtlp_communities, + .ncommunities = ARRAY_SIZE(mtlp_communities), +}; + +static const struct acpi_device_id mtl_pinctrl_acpi_match[] = { + { "INTC1083", (kernel_ulong_t)&mtlp_soc_data }, + { } +}; +MODULE_DEVICE_TABLE(acpi, mtl_pinctrl_acpi_match); + +static INTEL_PINCTRL_PM_OPS(mtl_pinctrl_pm_ops); + +static struct platform_driver mtl_pinctrl_driver = { + .probe = intel_pinctrl_probe_by_hid, + .driver = { + .name = "meteorlake-pinctrl", + .acpi_match_table = mtl_pinctrl_acpi_match, + .pm = &mtl_pinctrl_pm_ops, + }, +}; +module_platform_driver(mtl_pinctrl_driver); + +MODULE_AUTHOR("Andy Shevchenko "); +MODULE_DESCRIPTION("Intel Meteor Lake PCH pinctrl/GPIO driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6795.c b/drivers/pinctrl/mediatek/pinctrl-mt6795.c new file mode 100644 index 0000000000..f90152261a --- /dev/null +++ b/drivers/pinctrl/mediatek/pinctrl-mt6795.c @@ -0,0 +1,623 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Collabora Ltd. + * Author: AngeloGioacchino Del Regno + */ + +#include "pinctrl-mtk-mt6795.h" +#include "pinctrl-paris.h" + +#define PIN_FIELD15(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits) \ + PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit, \ + _x_bits, 15, 0) + +#define PIN_FIELD16(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits) \ + PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit, \ + _x_bits, 16, 0) + +#define PINS_FIELD16(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits)\ + PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit, \ + _x_bits, 16, 1) + +static const struct mtk_pin_field_calc mt6795_pin_dir_range[] = { + PIN_FIELD16(0, 196, 0x0, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_pullen_range[] = { + PIN_FIELD16(0, 196, 0x100, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_pullsel_range[] = { + PIN_FIELD16(0, 196, 0x200, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_do_range[] = { + PIN_FIELD16(0, 196, 0x400, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_di_range[] = { + PIN_FIELD16(0, 196, 0x500, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_mode_range[] = { + PIN_FIELD15(0, 196, 0x600, 0x10, 0, 3), +}; + +static const struct mtk_pin_field_calc mt6795_pin_ies_range[] = { + PINS_FIELD16(0, 4, 0x900, 0x10, 1, 1), + PINS_FIELD16(5, 9, 0x900, 0x10, 2, 1), + PINS_FIELD16(10, 15, 0x900, 0x10, 10, 1), + PINS_FIELD16(16, 16, 0x900, 0x10, 2, 1), + PINS_FIELD16(17, 19, 0x910, 0x10, 3, 1), + PINS_FIELD16(20, 22, 0x910, 0x10, 4, 1), + PINS_FIELD16(23, 26, 0xce0, 0x10, 14, 1), + PINS_FIELD16(27, 27, 0xcc0, 0x10, 14, 1), + PINS_FIELD16(28, 28, 0xcd0, 0x10, 14, 1), + PINS_FIELD16(29, 32, 0x900, 0x10, 3, 1), + PINS_FIELD16(33, 33, 0x900, 0x10, 4, 1), + PINS_FIELD16(34, 36, 0x900, 0x10, 5, 1), + PINS_FIELD16(37, 38, 0x900, 0x10, 6, 1), + PINS_FIELD16(39, 39, 0x900, 0x10, 7, 1), + PINS_FIELD16(40, 40, 0x900, 0x10, 8, 1), + PINS_FIELD16(41, 42, 0x900, 0x10, 9, 1), + PINS_FIELD16(43, 46, 0x900, 0x10, 11, 1), + PINS_FIELD16(47, 61, 0x920, 0x10, 3, 1), + PINS_FIELD16(62, 66, 0x920, 0x10, 4, 1), + PINS_FIELD16(67, 67, 0x920, 0x10, 3, 1), + PINS_FIELD16(68, 72, 0x920, 0x10, 5, 1), + PINS_FIELD16(73, 77, 0x920, 0x10, 6, 1), + PINS_FIELD16(78, 91, 0x920, 0x10, 7, 1), + PINS_FIELD16(92, 92, 0x900, 0x10, 13, 1), + PINS_FIELD16(93, 95, 0x900, 0x10, 14, 1), + PINS_FIELD16(96, 99, 0x900, 0x10, 15, 1), + PINS_FIELD16(100, 103, 0xca0, 0x10, 14, 1), + PINS_FIELD16(104, 104, 0xc80, 0x10, 14, 1), + PINS_FIELD16(105, 105, 0xc90, 0x10, 14, 1), + PINS_FIELD16(106, 107, 0x910, 0x10, 0, 1), + PINS_FIELD16(108, 112, 0x910, 0x10, 1, 1), + PINS_FIELD16(113, 116, 0x910, 0x10, 2, 1), + PINS_FIELD16(117, 118, 0x910, 0x10, 5, 1), + PINS_FIELD16(119, 124, 0x910, 0x10, 6, 1), + PINS_FIELD16(125, 126, 0x910, 0x10, 7, 1), + PINS_FIELD16(129, 129, 0x910, 0x10, 8, 1), + PINS_FIELD16(130, 131, 0x910, 0x10, 9, 1), + PINS_FIELD16(132, 135, 0x910, 0x10, 8, 1), + PINS_FIELD16(136, 137, 0x910, 0x10, 7, 1), + PINS_FIELD16(154, 161, 0xc20, 0x10, 14, 1), + PINS_FIELD16(162, 162, 0xc10, 0x10, 14, 1), + PINS_FIELD16(163, 163, 0xc00, 0x10, 14, 1), + PINS_FIELD16(164, 164, 0xd10, 0x10, 14, 1), + PINS_FIELD16(165, 165, 0xd00, 0x10, 14, 1), + PINS_FIELD16(166, 169, 0x910, 0x10, 14, 1), + PINS_FIELD16(176, 179, 0x910, 0x10, 15, 1), + PINS_FIELD16(180, 180, 0x920, 0x10, 0, 1), + PINS_FIELD16(181, 184, 0x920, 0x10, 1, 1), + PINS_FIELD16(185, 191, 0x920, 0x10, 2, 1), + PINS_FIELD16(192, 192, 0x920, 0x10, 8, 1), + PINS_FIELD16(193, 194, 0x920, 0x10, 9, 1), + PINS_FIELD16(195, 196, 0x920, 0x10, 8, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_smt_range[] = { + PINS_FIELD16(0, 4, 0x930, 0x10, 1, 1), + PINS_FIELD16(5, 9, 0x930, 0x10, 2, 1), + PINS_FIELD16(10, 15, 0x930, 0x10, 10, 1), + PINS_FIELD16(16, 16, 0x930, 0x10, 2, 1), + PINS_FIELD16(17, 19, 0x940, 0x10, 3, 1), + PINS_FIELD16(20, 22, 0x940, 0x10, 4, 1), + PINS_FIELD16(23, 26, 0xce0, 0x10, 13, 1), + PINS_FIELD16(27, 27, 0xcc0, 0x10, 13, 1), + PINS_FIELD16(28, 28, 0xcd0, 0x10, 13, 1), + PINS_FIELD16(29, 32, 0x930, 0x10, 3, 1), + PINS_FIELD16(33, 33, 0x930, 0x10, 4, 1), + PINS_FIELD16(34, 36, 0x930, 0x10, 5, 1), + PINS_FIELD16(37, 38, 0x930, 0x10, 6, 1), + PINS_FIELD16(39, 39, 0x930, 0x10, 7, 1), + PINS_FIELD16(40, 40, 0x930, 0x10, 8, 1), + PINS_FIELD16(41, 42, 0x930, 0x10, 9, 1), + PINS_FIELD16(43, 46, 0x930, 0x10, 11, 1), + PINS_FIELD16(47, 61, 0x950, 0x10, 3, 1), + PINS_FIELD16(62, 66, 0x950, 0x10, 4, 1), + PINS_FIELD16(67, 67, 0x950, 0x10, 3, 1), + PINS_FIELD16(68, 72, 0x950, 0x10, 5, 1), + PINS_FIELD16(73, 77, 0x950, 0x10, 6, 1), + PINS_FIELD16(78, 91, 0x950, 0x10, 7, 1), + PINS_FIELD16(92, 92, 0x930, 0x10, 13, 1), + PINS_FIELD16(93, 95, 0x930, 0x10, 14, 1), + PINS_FIELD16(96, 99, 0x930, 0x10, 15, 1), + PINS_FIELD16(100, 103, 0xca0, 0x10, 13, 1), + PINS_FIELD16(104, 104, 0xc80, 0x10, 13, 1), + PINS_FIELD16(105, 105, 0xc90, 0x10, 13, 1), + PINS_FIELD16(106, 107, 0x940, 0x10, 0, 1), + PINS_FIELD16(108, 112, 0x940, 0x10, 1, 1), + PINS_FIELD16(113, 116, 0x940, 0x10, 2, 1), + PINS_FIELD16(117, 118, 0x940, 0x10, 5, 1), + PINS_FIELD16(119, 124, 0x940, 0x10, 6, 1), + PINS_FIELD16(125, 126, 0x940, 0x10, 7, 1), + PINS_FIELD16(129, 129, 0x940, 0x10, 8, 1), + PINS_FIELD16(130, 131, 0x940, 0x10, 9, 1), + PINS_FIELD16(132, 135, 0x940, 0x10, 8, 1), + PINS_FIELD16(136, 137, 0x940, 0x10, 7, 1), + PINS_FIELD16(154, 161, 0xc20, 0x10, 13, 1), + PINS_FIELD16(162, 162, 0xc10, 0x10, 13, 1), + PINS_FIELD16(163, 163, 0xc00, 0x10, 13, 1), + PINS_FIELD16(164, 164, 0xd10, 0x10, 13, 1), + PINS_FIELD16(165, 165, 0xd00, 0x10, 13, 1), + PINS_FIELD16(166, 169, 0x940, 0x10, 14, 1), + PINS_FIELD16(176, 179, 0x940, 0x10, 15, 1), + PINS_FIELD16(180, 180, 0x950, 0x10, 0, 1), + PINS_FIELD16(181, 184, 0x950, 0x10, 1, 1), + PINS_FIELD16(185, 191, 0x950, 0x10, 2, 1), + PINS_FIELD16(192, 192, 0x950, 0x10, 8, 1), + PINS_FIELD16(193, 194, 0x950, 0x10, 9, 1), + PINS_FIELD16(195, 196, 0x950, 0x10, 8, 1), +}; + + +static const struct mtk_pin_field_calc mt6795_pin_pupd_range[] = { + /* KROW */ + PIN_FIELD16(119, 119, 0xe00, 0x10, 2, 1), /* KROW0 */ + PIN_FIELD16(120, 120, 0xe00, 0x10, 6, 1), /* KROW1 */ + PIN_FIELD16(121, 121, 0xe00, 0x10, 10, 1), /* KROW2 */ + PIN_FIELD16(122, 122, 0xe10, 0x10, 2, 1), /* KCOL0 */ + PIN_FIELD16(123, 123, 0xe10, 0x10, 6, 1), /* KCOL1 */ + PIN_FIELD16(124, 124, 0xe10, 0x10, 10, 1), /* KCOL2 */ + + /* DPI */ + PIN_FIELD16(138, 138, 0xd50, 0x10, 2, 1), /* CK */ + PIN_FIELD16(139, 139, 0xd60, 0x10, 1, 1), /* DE */ + PIN_FIELD16(140, 140, 0xd70, 0x10, 1, 1), /* data0 */ + PIN_FIELD16(141, 141, 0xd70, 0x10, 3, 1), /* data1 */ + PIN_FIELD16(142, 142, 0xd70, 0x10, 5, 1), /* data2 */ + PIN_FIELD16(143, 143, 0xd70, 0x10, 7, 1), /* data3 */ + PIN_FIELD16(144, 144, 0xd50, 0x10, 5, 1), /* data4 */ + PIN_FIELD16(145, 145, 0xd50, 0x10, 7, 1), /* data5 */ + PIN_FIELD16(146, 146, 0xd60, 0x10, 7, 1), /* data6 */ + PIN_FIELD16(147, 147, 0xed0, 0x10, 6, 1), /* data7 */ + PIN_FIELD16(148, 148, 0xed0, 0x10, 8, 1), /* data8 */ + PIN_FIELD16(149, 149, 0xed0, 0x10, 10, 1), /* data9 */ + PIN_FIELD16(150, 150, 0xed0, 0x10, 12, 1), /* data10 */ + PIN_FIELD16(151, 151, 0xed0, 0x10, 14, 1), /* data11 */ + PIN_FIELD16(152, 152, 0xd60, 0x10, 3, 1), /* hsync */ + PIN_FIELD16(153, 153, 0xd60, 0x10, 5, 1), /* vsync */ + + /* MSDC0 */ + PIN_FIELD16(154, 154, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(155, 155, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(156, 156, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(157, 157, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(158, 158, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(159, 159, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(160, 160, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(161, 161, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(162, 162, 0xc10, 0x10, 2, 1), /* CMD */ + PIN_FIELD16(163, 163, 0xc00, 0x10, 2, 1), /* CLK */ + PIN_FIELD16(164, 164, 0xd10, 0x10, 2, 1), /* DS */ + PIN_FIELD16(165, 165, 0xd00, 0x10, 2, 1), /* RST */ + + /* MSDC1 */ + PIN_FIELD16(170, 170, 0xc50, 0x10, 2, 1), /* CMD */ + PIN_FIELD16(171, 171, 0xd20, 0x10, 2, 1), /* DAT0 */ + PIN_FIELD16(172, 172, 0xd20, 0x10, 6, 1), /* DAT1 */ + PIN_FIELD16(173, 173, 0xd20, 0x10, 10, 1), /* DAT2 */ + PIN_FIELD16(174, 174, 0xd20, 0x10, 14, 1), /* DAT3 */ + PIN_FIELD16(175, 175, 0xc40, 0x10, 2, 1), /* CLK */ + + /* MSDC2 */ + PIN_FIELD16(100, 100, 0xd30, 0x10, 2, 1), /* DAT0 */ + PIN_FIELD16(101, 101, 0xd30, 0x10, 6, 1), /* DAT1 */ + PIN_FIELD16(102, 102, 0xd30, 0x10, 10, 1), /* DAT2 */ + PIN_FIELD16(103, 103, 0xd30, 0x10, 14, 1), /* DAT3 */ + PIN_FIELD16(104, 104, 0xc80, 0x10, 2, 1), /* CLK */ + PIN_FIELD16(105, 105, 0xc90, 0x10, 2, 1), /* CMD */ + + /* MSDC3 */ + PIN_FIELD16(23, 23, 0xd40, 0x10, 2, 1), /* DAT0 */ + PIN_FIELD16(24, 24, 0xd40, 0x10, 6, 5), /* DAT1 */ + PIN_FIELD16(25, 25, 0xd40, 0x10, 10, 9), /* DAT2 */ + PIN_FIELD16(26, 26, 0xd40, 0x10, 14, 13), /* DAT3 */ + PIN_FIELD16(27, 27, 0xcc0, 0x10, 2, 1), /* CLK */ + PIN_FIELD16(28, 28, 0xcd0, 0x10, 2, 1) /* CMD */ +}; + +static const struct mtk_pin_field_calc mt6795_pin_r0_range[] = { + PIN_FIELD16(23, 23, 0xd40, 0x10, 0, 1), + PIN_FIELD16(24, 24, 0xd40, 0x10, 4, 1), + PIN_FIELD16(25, 25, 0xd40, 0x10, 8, 1), + PIN_FIELD16(26, 26, 0xd40, 0x10, 12, 1), + PIN_FIELD16(27, 27, 0xcc0, 0x10, 0, 1), + PIN_FIELD16(28, 28, 0xcd0, 0x10, 0, 1), + PIN_FIELD16(100, 100, 0xd30, 0x10, 0, 1), + PIN_FIELD16(101, 101, 0xd30, 0x10, 4, 1), + PIN_FIELD16(102, 102, 0xd30, 0x10, 8, 1), + PIN_FIELD16(103, 103, 0xd30, 0x10, 12, 1), + PIN_FIELD16(104, 104, 0xc80, 0x10, 0, 1), + PIN_FIELD16(105, 105, 0xc90, 0x10, 0, 1), + PIN_FIELD16(119, 119, 0xe00, 0x10, 0, 1), + PIN_FIELD16(120, 120, 0xe00, 0x10, 4, 1), + PIN_FIELD16(121, 121, 0xe00, 0x10, 8, 1), + PIN_FIELD16(122, 122, 0xe10, 0x10, 0, 1), + PIN_FIELD16(123, 123, 0xe10, 0x10, 4, 1), + PIN_FIELD16(124, 124, 0xe10, 0x10, 8, 1), + PIN_FIELD16(138, 138, 0xd50, 0x10, 0, 1), + PIN_FIELD16(139, 139, 0xd60, 0x10, 0, 1), + PIN_FIELD16(140, 140, 0xd70, 0x10, 0, 1), + PIN_FIELD16(141, 141, 0xd70, 0x10, 1, 1), + PIN_FIELD16(142, 142, 0xd70, 0x10, 3, 1), + PIN_FIELD16(143, 143, 0xd70, 0x10, 5, 1), + PIN_FIELD16(144, 144, 0xd50, 0x10, 3, 1), + PIN_FIELD16(145, 145, 0xd50, 0x10, 5, 1), + PIN_FIELD16(146, 146, 0xd60, 0x10, 5, 1), + PIN_FIELD16(147, 147, 0xed0, 0x10, 4, 1), + PIN_FIELD16(148, 148, 0xed0, 0x10, 6, 1), + PIN_FIELD16(149, 149, 0xed0, 0x10, 8, 1), + PIN_FIELD16(150, 150, 0xed0, 0x10, 10, 1), + PIN_FIELD16(151, 151, 0xed0, 0x10, 12, 1), + PIN_FIELD16(152, 152, 0xd60, 0x10, 1, 1), + PIN_FIELD16(153, 153, 0xd60, 0x10, 3, 1), + PIN_FIELD16(154, 155, 0xc20, 0x10, 0, 1), + PIN_FIELD16(155, 156, 0xc20, 0x10, 0, 1), + PIN_FIELD16(156, 157, 0xc20, 0x10, 0, 1), + PIN_FIELD16(157, 158, 0xc20, 0x10, 0, 1), + PIN_FIELD16(158, 159, 0xc20, 0x10, 0, 1), + PIN_FIELD16(159, 160, 0xc20, 0x10, 0, 1), + PIN_FIELD16(160, 161, 0xc20, 0x10, 0, 1), + PIN_FIELD16(161, 161, 0xc20, 0x10, 0, 1), + PIN_FIELD16(162, 162, 0xc10, 0x10, 0, 1), + PIN_FIELD16(163, 163, 0xc00, 0x10, 0, 1), + PIN_FIELD16(164, 164, 0xd10, 0x10, 0, 1), + PIN_FIELD16(165, 165, 0xd00, 0x10, 0, 1), + PIN_FIELD16(170, 170, 0xc50, 0x10, 0, 1), + PIN_FIELD16(171, 171, 0xd20, 0x10, 0, 1), + PIN_FIELD16(172, 172, 0xd20, 0x10, 4, 1), + PIN_FIELD16(173, 173, 0xd20, 0x10, 8, 1), + PIN_FIELD16(174, 174, 0xd20, 0x10, 12, 1), + PIN_FIELD16(175, 175, 0xc40, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_r1_range[] = { + PIN_FIELD16(23, 23, 0xd40, 0x10, 1, 1), + PIN_FIELD16(24, 24, 0xd40, 0x10, 5, 1), + PIN_FIELD16(25, 25, 0xd40, 0x10, 9, 1), + PIN_FIELD16(26, 26, 0xd40, 0x10, 13, 1), + PIN_FIELD16(27, 27, 0xcc0, 0x10, 1, 1), + PIN_FIELD16(28, 28, 0xcd0, 0x10, 1, 1), + PIN_FIELD16(100, 100, 0xd30, 0x10, 1, 1), + PIN_FIELD16(101, 101, 0xd30, 0x10, 5, 1), + PIN_FIELD16(102, 102, 0xd30, 0x10, 9, 1), + PIN_FIELD16(103, 103, 0xd30, 0x10, 13, 1), + PIN_FIELD16(104, 104, 0xc80, 0x10, 1, 1), + PIN_FIELD16(105, 105, 0xc90, 0x10, 1, 1), + PIN_FIELD16(119, 119, 0xe00, 0x10, 1, 1), + PIN_FIELD16(120, 120, 0xe00, 0x10, 5, 1), + PIN_FIELD16(121, 121, 0xe00, 0x10, 9, 1), + PIN_FIELD16(122, 122, 0xe10, 0x10, 1, 1), + PIN_FIELD16(123, 123, 0xe10, 0x10, 5, 1), + PIN_FIELD16(124, 124, 0xe10, 0x10, 9, 1), + PIN_FIELD16(138, 138, 0xd50, 0x10, 1, 1), + PIN_FIELD16(139, 139, 0xd60, 0x10, 0, 1), + PIN_FIELD16(140, 140, 0xd70, 0x10, 0, 1), + PIN_FIELD16(141, 141, 0xd70, 0x10, 2, 1), + PIN_FIELD16(142, 142, 0xd70, 0x10, 4, 1), + PIN_FIELD16(143, 143, 0xd70, 0x10, 6, 1), + PIN_FIELD16(144, 144, 0xd50, 0x10, 4, 1), + PIN_FIELD16(145, 145, 0xd50, 0x10, 6, 1), + PIN_FIELD16(146, 146, 0xd60, 0x10, 6, 1), + PIN_FIELD16(147, 147, 0xed0, 0x10, 5, 1), + PIN_FIELD16(148, 148, 0xed0, 0x10, 7, 1), + PIN_FIELD16(149, 149, 0xed0, 0x10, 9, 1), + PIN_FIELD16(150, 150, 0xed0, 0x10, 11, 1), + PIN_FIELD16(151, 151, 0xed0, 0x10, 13, 1), + PIN_FIELD16(152, 152, 0xd60, 0x10, 2, 1), + PIN_FIELD16(153, 153, 0xd60, 0x10, 4, 1), + PIN_FIELD16(154, 155, 0xc20, 0x10, 1, 1), + PIN_FIELD16(155, 156, 0xc20, 0x10, 1, 1), + PIN_FIELD16(156, 157, 0xc20, 0x10, 1, 1), + PIN_FIELD16(157, 158, 0xc20, 0x10, 1, 1), + PIN_FIELD16(158, 159, 0xc20, 0x10, 1, 1), + PIN_FIELD16(159, 160, 0xc20, 0x10, 1, 1), + PIN_FIELD16(160, 161, 0xc20, 0x10, 1, 1), + PIN_FIELD16(161, 161, 0xc20, 0x10, 1, 1), + PIN_FIELD16(162, 162, 0xc10, 0x10, 1, 1), + PIN_FIELD16(163, 163, 0xc00, 0x10, 1, 1), + PIN_FIELD16(164, 164, 0xd10, 0x10, 1, 1), + PIN_FIELD16(165, 165, 0xd00, 0x10, 1, 1), + PIN_FIELD16(170, 170, 0xc50, 0x10, 1, 1), + PIN_FIELD16(171, 171, 0xd20, 0x10, 1, 1), + PIN_FIELD16(172, 172, 0xd20, 0x10, 5, 1), + PIN_FIELD16(173, 173, 0xd20, 0x10, 9, 1), + PIN_FIELD16(174, 174, 0xd20, 0x10, 13, 1), + PIN_FIELD16(175, 175, 0xc40, 0x10, 1, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_drv_range[] = { + PINS_FIELD16(0, 4, 0xb30, 0x10, 13, 2), + PINS_FIELD16(5, 9, 0xb30, 0x10, 1, 2), + PINS_FIELD16(10, 15, 0xb30, 0x10, 5, 2), + PIN_FIELD16(16, 16, 0xb30, 0x10, 1, 2), + PINS_FIELD16(17, 19, 0xb70, 0x10, 5, 2), + PINS_FIELD16(20, 22, 0xb70, 0x10, 9, 2), + PINS_FIELD16(23, 26, 0xce0, 0x10, 8, 2), + PIN_FIELD16(27, 27, 0xcc0, 0x10, 8, 2), + PIN_FIELD16(28, 28, 0xcd0, 0x10, 8, 2), + PINS_FIELD16(29, 32, 0xb80, 0x10, 13, 2), + PIN_FIELD16(33, 33, 0xb10, 0x10, 13, 2), + PINS_FIELD16(34, 36, 0xb10, 0x10, 9, 2), + PINS_FIELD16(37, 38, 0xb10, 0x10, 5, 2), + PIN_FIELD16(39, 39, 0xb20, 0x10, 1, 2), + PIN_FIELD16(40, 40, 0xb20, 0x10, 5, 2), + PINS_FIELD16(41, 42, 0xb20, 0x10, 9, 2), + PINS_FIELD16(47, 61, 0xb00, 0x10, 9, 2), + PINS_FIELD16(62, 66, 0xb70, 0x10, 1, 2), + PINS_FIELD16(67, 67, 0xb00, 0x10, 9, 2), + PINS_FIELD16(68, 72, 0xb60, 0x10, 13, 2), + PINS_FIELD16(73, 77, 0xb40, 0x10, 13, 2), + PIN_FIELD16(78, 78, 0xb00, 0x10, 12, 3), + PINS_FIELD16(79, 91, 0xb00, 0x10, 13, 2), + PIN_FIELD16(92, 92, 0xb60, 0x10, 5, 2), + PINS_FIELD16(93, 95, 0xb60, 0x10, 1, 2), + PINS_FIELD16(96, 99, 0xb80, 0x10, 9, 2), + PINS_FIELD16(100, 103, 0xca0, 0x10, 8, 2), + PIN_FIELD16(104, 104, 0xc80, 0x10, 8, 2), + PIN_FIELD16(105, 105, 0xc90, 0x10, 8, 2), + PINS_FIELD16(106, 107, 0xb50, 0x10, 9, 2), + PINS_FIELD16(108, 112, 0xb50, 0x10, 1, 2), + PINS_FIELD16(113, 116, 0xb80, 0x10, 5, 2), + PINS_FIELD16(117, 118, 0xb90, 0x10, 1, 2), + PINS_FIELD16(119, 124, 0xb50, 0x10, 5, 2), + PIN_FIELD16(127, 127, 0xb70, 0x10, 5, 2), + PIN_FIELD16(128, 128, 0xb70, 0x10, 9, 2), + PIN_FIELD16(129, 129, 0xb40, 0x10, 9, 2), + PINS_FIELD16(130, 131, 0xb40, 0x10, 13, 2), + PINS_FIELD16(132, 135, 0xb40, 0x10, 9, 2), + PIN_FIELD16(138, 138, 0xb50, 0x10, 8, 2), + PIN_FIELD16(139, 139, 0xb60, 0x10, 8, 2), + PINS_FIELD16(140, 151, 0xb70, 0x10, 8, 2), + PINS_FIELD16(152, 153, 0xb60, 0x10, 8, 2), + PINS_FIELD16(153, 153, 0xb60, 0x10, 8, 2), + PINS_FIELD16(154, 161, 0xc20, 0x10, 8, 2), + PIN_FIELD16(162, 162, 0xc10, 0x10, 8, 2), + PIN_FIELD16(163, 163, 0xc00, 0x10, 8, 2), + PIN_FIELD16(164, 164, 0xd10, 0x10, 8, 2), + PIN_FIELD16(165, 165, 0xd00, 0x10, 8, 2), + PINS_FIELD16(166, 169, 0xb80, 0x10, 1, 2), + PINS_FIELD16(170, 173, 0xc60, 0x10, 8, 2), + PIN_FIELD16(174, 174, 0xc40, 0x10, 8, 2), + PIN_FIELD16(175, 175, 0xc50, 0x10, 8, 2), + PINS_FIELD16(176, 179, 0xb70, 0x10, 13, 2), + PIN_FIELD16(180, 180, 0xb00, 0x10, 5, 2), + PINS_FIELD16(181, 184, 0xb00, 0x10, 1, 2), + PINS_FIELD16(185, 191, 0xb60, 0x10, 9, 2), + PIN_FIELD16(192, 192, 0xb40, 0x10, 1, 2), + PINS_FIELD16(193, 194, 0xb40, 0x10, 5, 2), + PINS_FIELD16(195, 196, 0xb40, 0x10, 1, 2), +}; + +static const struct mtk_pin_field_calc mt6795_pin_sr_range[] = { + PINS_FIELD16(0, 4, 0xb30, 0x10, 15, 1), + PINS_FIELD16(5, 9, 0xb30, 0x10, 3, 1), + PINS_FIELD16(10, 15, 0xb30, 0x10, 7, 1), + PIN_FIELD16(16, 16, 0xb30, 0x10, 5, 1), + PINS_FIELD16(23, 26, 0xce0, 0x10, 12, 1), + PIN_FIELD16(27, 27, 0xcc0, 0x10, 12, 1), + PIN_FIELD16(28, 28, 0xcd0, 0x10, 12, 1), + PINS_FIELD16(29, 32, 0xb80, 0x10, 15, 1), + PIN_FIELD16(33, 33, 0xb10, 0x10, 15, 1), + PINS_FIELD16(34, 36, 0xb10, 0x10, 11, 1), + PINS_FIELD16(37, 38, 0xb10, 0x10, 7, 1), + PIN_FIELD16(39, 39, 0xb20, 0x10, 3, 1), + PIN_FIELD16(40, 40, 0xb20, 0x10, 7, 1), + PINS_FIELD16(41, 42, 0xb20, 0x10, 11, 1), + PINS_FIELD16(47, 61, 0xb00, 0x10, 11, 1), + PINS_FIELD16(62, 66, 0xb70, 0x10, 3, 1), + PINS_FIELD16(67, 67, 0xb00, 0x10, 11, 1), + PINS_FIELD16(68, 72, 0xb60, 0x10, 15, 1), + PINS_FIELD16(73, 77, 0xb40, 0x10, 15, 1), + PIN_FIELD16(78, 78, 0xb00, 0x10, 15, 3), + PINS_FIELD16(79, 91, 0xb00, 0x10, 15, 1), + PIN_FIELD16(92, 92, 0xb60, 0x10, 7, 1), + PINS_FIELD16(93, 95, 0xb60, 0x10, 3, 1), + PINS_FIELD16(96, 99, 0xb80, 0x10, 11, 1), + PINS_FIELD16(100, 103, 0xca0, 0x10, 12, 1), + PIN_FIELD16(104, 104, 0xc80, 0x10, 12, 1), + PIN_FIELD16(105, 105, 0xc90, 0x10, 12, 1), + PINS_FIELD16(106, 107, 0xb50, 0x10, 11, 1), + PINS_FIELD16(108, 112, 0xb50, 0x10, 3, 1), + PINS_FIELD16(113, 116, 0xb80, 0x10, 7, 1), + PINS_FIELD16(117, 118, 0xb90, 0x10, 3, 1), + PINS_FIELD16(119, 124, 0xb50, 0x10, 7, 1), + PIN_FIELD16(127, 127, 0xb70, 0x10, 7, 1), + PIN_FIELD16(128, 128, 0xb70, 0x10, 11, 1), + PIN_FIELD16(129, 129, 0xb40, 0x10, 11, 1), + PINS_FIELD16(130, 131, 0xb40, 0x10, 15, 1), + PINS_FIELD16(132, 135, 0xb40, 0x10, 11, 1), + PIN_FIELD16(138, 138, 0xb50, 0x10, 12, 1), + PIN_FIELD16(139, 139, 0xb60, 0x10, 12, 1), + PINS_FIELD16(140, 151, 0xb70, 0x10, 12, 1), + PINS_FIELD16(152, 153, 0xb60, 0x10, 12, 1), + PINS_FIELD16(153, 153, 0xb60, 0x10, 12, 1), + PINS_FIELD16(154, 161, 0xc20, 0x10, 12, 1), + PIN_FIELD16(162, 162, 0xc10, 0x10, 12, 1), + PIN_FIELD16(163, 163, 0xc00, 0x10, 12, 1), + PIN_FIELD16(164, 164, 0xd10, 0x10, 12, 1), + PIN_FIELD16(165, 165, 0xd00, 0x10, 12, 1), + PINS_FIELD16(166, 169, 0xb80, 0x10, 3, 1), + PINS_FIELD16(170, 173, 0xc60, 0x10, 12, 1), + PIN_FIELD16(174, 174, 0xc40, 0x10, 12, 1), + PIN_FIELD16(175, 175, 0xc50, 0x10, 12, 1), + PINS_FIELD16(176, 179, 0xb70, 0x10, 15, 1), + PIN_FIELD16(180, 180, 0xb00, 0x10, 7, 1), + PINS_FIELD16(181, 184, 0xb00, 0x10, 3, 1), + PINS_FIELD16(185, 191, 0xb60, 0x10, 11, 1), + PIN_FIELD16(192, 192, 0xb40, 0x10, 3, 1), + PINS_FIELD16(193, 194, 0xb40, 0x10, 7, 1), + PINS_FIELD16(195, 196, 0xb40, 0x10, 3, 1), +}; + +static const struct mtk_pin_reg_calc mt6795_reg_cals[PINCTRL_PIN_REG_MAX] = { + [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt6795_pin_mode_range), + [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt6795_pin_dir_range), + [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt6795_pin_di_range), + [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt6795_pin_do_range), + [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt6795_pin_sr_range), + [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt6795_pin_smt_range), + [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt6795_pin_drv_range), + [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt6795_pin_pupd_range), + [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt6795_pin_r0_range), + [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt6795_pin_r1_range), + [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt6795_pin_ies_range), + [PINCTRL_PIN_REG_PULLEN] = MTK_RANGE(mt6795_pin_pullen_range), + [PINCTRL_PIN_REG_PULLSEL] = MTK_RANGE(mt6795_pin_pullsel_range), +}; + +static const struct mtk_eint_hw mt6795_eint_hw = { + .port_mask = 7, + .ports = 7, + .ap_num = 224, + .db_cnt = 32, +}; + +static const unsigned int mt6795_pull_type[] = { + MTK_PULL_PULLSEL_TYPE,/*0*/ MTK_PULL_PULLSEL_TYPE,/*1*/ + MTK_PULL_PULLSEL_TYPE,/*2*/ MTK_PULL_PULLSEL_TYPE,/*3*/ + MTK_PULL_PULLSEL_TYPE,/*4*/ MTK_PULL_PULLSEL_TYPE,/*5*/ + MTK_PULL_PULLSEL_TYPE,/*6*/ MTK_PULL_PULLSEL_TYPE,/*7*/ + MTK_PULL_PULLSEL_TYPE,/*8*/ MTK_PULL_PULLSEL_TYPE,/*9*/ + MTK_PULL_PULLSEL_TYPE,/*10*/ MTK_PULL_PULLSEL_TYPE,/*11*/ + MTK_PULL_PULLSEL_TYPE,/*12*/ MTK_PULL_PULLSEL_TYPE,/*13*/ + MTK_PULL_PULLSEL_TYPE,/*14*/ MTK_PULL_PULLSEL_TYPE,/*15*/ + MTK_PULL_PULLSEL_TYPE,/*16*/ MTK_PULL_PULLSEL_TYPE,/*17*/ + MTK_PULL_PULLSEL_TYPE,/*18*/ MTK_PULL_PULLSEL_TYPE,/*19*/ + MTK_PULL_PULLSEL_TYPE,/*20*/ MTK_PULL_PULLSEL_TYPE,/*21*/ + MTK_PULL_PULLSEL_TYPE,/*22*/ MTK_PULL_PUPD_R1R0_TYPE,/*23*/ + MTK_PULL_PUPD_R1R0_TYPE,/*24*/ MTK_PULL_PUPD_R1R0_TYPE,/*25*/ + MTK_PULL_PUPD_R1R0_TYPE,/*26*/ MTK_PULL_PUPD_R1R0_TYPE,/*27*/ + MTK_PULL_PUPD_R1R0_TYPE,/*28*/ MTK_PULL_PULLSEL_TYPE,/*29*/ + MTK_PULL_PULLSEL_TYPE,/*30*/ MTK_PULL_PULLSEL_TYPE,/*31*/ + MTK_PULL_PULLSEL_TYPE,/*32*/ MTK_PULL_PULLSEL_TYPE,/*33*/ + MTK_PULL_PULLSEL_TYPE,/*34*/ MTK_PULL_PULLSEL_TYPE,/*35*/ + MTK_PULL_PULLSEL_TYPE,/*36*/ MTK_PULL_PULLSEL_TYPE,/*37*/ + MTK_PULL_PULLSEL_TYPE,/*38*/ MTK_PULL_PULLSEL_TYPE,/*39*/ + MTK_PULL_PULLSEL_TYPE,/*40*/ MTK_PULL_PULLSEL_TYPE,/*41*/ + MTK_PULL_PULLSEL_TYPE,/*42*/ MTK_PULL_PULLSEL_TYPE,/*43*/ + MTK_PULL_PULLSEL_TYPE,/*44*/ MTK_PULL_PULLSEL_TYPE,/*45*/ + MTK_PULL_PULLSEL_TYPE,/*46*/ MTK_PULL_PULLSEL_TYPE,/*47*/ + MTK_PULL_PULLSEL_TYPE,/*48*/ MTK_PULL_PULLSEL_TYPE,/*49*/ + MTK_PULL_PULLSEL_TYPE,/*50*/ MTK_PULL_PULLSEL_TYPE,/*51*/ + MTK_PULL_PULLSEL_TYPE,/*52*/ MTK_PULL_PULLSEL_TYPE,/*53*/ + MTK_PULL_PULLSEL_TYPE,/*54*/ MTK_PULL_PULLSEL_TYPE,/*55*/ + MTK_PULL_PULLSEL_TYPE,/*56*/ MTK_PULL_PULLSEL_TYPE,/*57*/ + MTK_PULL_PULLSEL_TYPE,/*58*/ MTK_PULL_PULLSEL_TYPE,/*59*/ + MTK_PULL_PULLSEL_TYPE,/*60*/ MTK_PULL_PULLSEL_TYPE,/*61*/ + MTK_PULL_PULLSEL_TYPE,/*62*/ MTK_PULL_PULLSEL_TYPE,/*63*/ + MTK_PULL_PULLSEL_TYPE,/*64*/ MTK_PULL_PULLSEL_TYPE,/*65*/ + MTK_PULL_PULLSEL_TYPE,/*66*/ MTK_PULL_PUPD_R1R0_TYPE,/*67*/ + MTK_PULL_PUPD_R1R0_TYPE,/*68*/ MTK_PULL_PUPD_R1R0_TYPE,/*69*/ + MTK_PULL_PUPD_R1R0_TYPE,/*70*/ MTK_PULL_PUPD_R1R0_TYPE,/*71*/ + MTK_PULL_PUPD_R1R0_TYPE,/*72*/ MTK_PULL_PUPD_R1R0_TYPE,/*73*/ + MTK_PULL_PUPD_R1R0_TYPE,/*74*/ MTK_PULL_PUPD_R1R0_TYPE,/*75*/ + MTK_PULL_PUPD_R1R0_TYPE,/*76*/ MTK_PULL_PUPD_R1R0_TYPE,/*77*/ + MTK_PULL_PUPD_R1R0_TYPE,/*78*/ MTK_PULL_PUPD_R1R0_TYPE,/*79*/ + MTK_PULL_PUPD_R1R0_TYPE,/*80*/ MTK_PULL_PUPD_R1R0_TYPE,/*81*/ + MTK_PULL_PUPD_R1R0_TYPE,/*82*/ MTK_PULL_PULLSEL_TYPE,/*83*/ + MTK_PULL_PUPD_R1R0_TYPE,/*84*/ MTK_PULL_PUPD_R1R0_TYPE,/*85*/ + MTK_PULL_PUPD_R1R0_TYPE,/*86*/ MTK_PULL_PUPD_R1R0_TYPE,/*87*/ + MTK_PULL_PUPD_R1R0_TYPE,/*88*/ MTK_PULL_PUPD_R1R0_TYPE,/*89*/ + MTK_PULL_PULLSEL_TYPE,/*90*/ MTK_PULL_PULLSEL_TYPE,/*91*/ + MTK_PULL_PULLSEL_TYPE,/*92*/ MTK_PULL_PULLSEL_TYPE,/*93*/ + MTK_PULL_PULLSEL_TYPE,/*94*/ MTK_PULL_PULLSEL_TYPE,/*95*/ + MTK_PULL_PULLSEL_TYPE,/*96*/ MTK_PULL_PULLSEL_TYPE,/*97*/ + MTK_PULL_PULLSEL_TYPE,/*98*/ MTK_PULL_PULLSEL_TYPE,/*99*/ + MTK_PULL_PUPD_R1R0_TYPE,/*100*/ MTK_PULL_PUPD_R1R0_TYPE,/*101*/ + MTK_PULL_PUPD_R1R0_TYPE,/*102*/ MTK_PULL_PUPD_R1R0_TYPE,/*103*/ + MTK_PULL_PUPD_R1R0_TYPE,/*104*/ MTK_PULL_PUPD_R1R0_TYPE,/*105*/ + MTK_PULL_PULLSEL_TYPE,/*106*/ MTK_PULL_PULLSEL_TYPE,/*107*/ + MTK_PULL_PULLSEL_TYPE,/*108*/ MTK_PULL_PULLSEL_TYPE,/*109*/ + MTK_PULL_PULLSEL_TYPE,/*110*/ MTK_PULL_PULLSEL_TYPE,/*111*/ + MTK_PULL_PULLSEL_TYPE,/*112*/ MTK_PULL_PULLSEL_TYPE,/*113*/ + MTK_PULL_PULLSEL_TYPE,/*114*/ MTK_PULL_PULLSEL_TYPE,/*115*/ + MTK_PULL_PULLSEL_TYPE,/*116*/ MTK_PULL_PULLSEL_TYPE,/*117*/ + MTK_PULL_PULLSEL_TYPE,/*118*/ MTK_PULL_PUPD_R1R0_TYPE,/*119*/ + MTK_PULL_PUPD_R1R0_TYPE,/*120*/ MTK_PULL_PUPD_R1R0_TYPE,/*121*/ + MTK_PULL_PUPD_R1R0_TYPE,/*122*/ MTK_PULL_PUPD_R1R0_TYPE,/*123*/ + MTK_PULL_PUPD_R1R0_TYPE,/*124*/ MTK_PULL_PULLSEL_TYPE,/*125*/ + MTK_PULL_PULLSEL_TYPE,/*126*/ MTK_PULL_PULLSEL_TYPE,/*127*/ + MTK_PULL_PULLSEL_TYPE,/*128*/ MTK_PULL_PULLSEL_TYPE,/*129*/ + MTK_PULL_PULLSEL_TYPE,/*130*/ MTK_PULL_PULLSEL_TYPE,/*131*/ + MTK_PULL_PULLSEL_TYPE,/*132*/ MTK_PULL_PULLSEL_TYPE,/*133*/ + MTK_PULL_PULLSEL_TYPE,/*134*/ MTK_PULL_PULLSEL_TYPE,/*135*/ + MTK_PULL_PULLSEL_TYPE,/*136*/ MTK_PULL_PULLSEL_TYPE,/*137*/ + MTK_PULL_PUPD_R1R0_TYPE,/*138*/ MTK_PULL_PUPD_R1R0_TYPE,/*139*/ + MTK_PULL_PUPD_R1R0_TYPE,/*140*/ MTK_PULL_PUPD_R1R0_TYPE,/*141*/ + MTK_PULL_PUPD_R1R0_TYPE,/*142*/ MTK_PULL_PUPD_R1R0_TYPE,/*143*/ + MTK_PULL_PUPD_R1R0_TYPE,/*144*/ MTK_PULL_PUPD_R1R0_TYPE,/*145*/ + MTK_PULL_PUPD_R1R0_TYPE,/*146*/ MTK_PULL_PUPD_R1R0_TYPE,/*147*/ + MTK_PULL_PUPD_R1R0_TYPE,/*148*/ MTK_PULL_PUPD_R1R0_TYPE,/*149*/ + MTK_PULL_PUPD_R1R0_TYPE,/*150*/ MTK_PULL_PUPD_R1R0_TYPE,/*151*/ + MTK_PULL_PUPD_R1R0_TYPE,/*152*/ MTK_PULL_PUPD_R1R0_TYPE,/*153*/ + MTK_PULL_PUPD_R1R0_TYPE,/*154*/ MTK_PULL_PUPD_R1R0_TYPE,/*155*/ + MTK_PULL_PUPD_R1R0_TYPE,/*156*/ MTK_PULL_PUPD_R1R0_TYPE,/*157*/ + MTK_PULL_PUPD_R1R0_TYPE,/*158*/ MTK_PULL_PUPD_R1R0_TYPE,/*159*/ + MTK_PULL_PUPD_R1R0_TYPE,/*160*/ MTK_PULL_PUPD_R1R0_TYPE,/*161*/ + MTK_PULL_PUPD_R1R0_TYPE,/*162*/ MTK_PULL_PUPD_R1R0_TYPE,/*163*/ + MTK_PULL_PUPD_R1R0_TYPE,/*164*/ MTK_PULL_PUPD_R1R0_TYPE,/*165*/ + MTK_PULL_PULLSEL_TYPE,/*166*/ MTK_PULL_PULLSEL_TYPE,/*167*/ + MTK_PULL_PULLSEL_TYPE,/*168*/ MTK_PULL_PULLSEL_TYPE,/*169*/ + MTK_PULL_PUPD_R1R0_TYPE,/*170*/ MTK_PULL_PUPD_R1R0_TYPE,/*171*/ + MTK_PULL_PUPD_R1R0_TYPE,/*172*/ MTK_PULL_PUPD_R1R0_TYPE,/*173*/ + MTK_PULL_PUPD_R1R0_TYPE,/*174*/ MTK_PULL_PUPD_R1R0_TYPE,/*175*/ + MTK_PULL_PULLSEL_TYPE,/*176*/ MTK_PULL_PULLSEL_TYPE,/*177*/ + MTK_PULL_PULLSEL_TYPE,/*178*/ MTK_PULL_PULLSEL_TYPE,/*179*/ + MTK_PULL_PULLSEL_TYPE,/*180*/ MTK_PULL_PULLSEL_TYPE,/*181*/ + MTK_PULL_PULLSEL_TYPE,/*182*/ MTK_PULL_PULLSEL_TYPE,/*183*/ + MTK_PULL_PULLSEL_TYPE,/*184*/ MTK_PULL_PULLSEL_TYPE,/*185*/ + MTK_PULL_PULLSEL_TYPE,/*186*/ MTK_PULL_PULLSEL_TYPE,/*187*/ + MTK_PULL_PULLSEL_TYPE,/*188*/ MTK_PULL_PULLSEL_TYPE,/*189*/ + MTK_PULL_PULLSEL_TYPE,/*190*/ MTK_PULL_PULLSEL_TYPE,/*191*/ + MTK_PULL_PULLSEL_TYPE,/*192*/ MTK_PULL_PULLSEL_TYPE,/*193*/ + MTK_PULL_PULLSEL_TYPE,/*194*/ MTK_PULL_PULLSEL_TYPE,/*195*/ + MTK_PULL_PULLSEL_TYPE,/*196*/ +}; + +static const struct mtk_pin_soc mt6795_data = { + .reg_cal = mt6795_reg_cals, + .pins = mtk_pins_mt6795, + .npins = ARRAY_SIZE(mtk_pins_mt6795), + .ngrps = ARRAY_SIZE(mtk_pins_mt6795), + .nfuncs = 8, + .eint_hw = &mt6795_eint_hw, + .gpio_m = 0, + .base_names = mtk_default_register_base_names, + .nbase_names = ARRAY_SIZE(mtk_default_register_base_names), + .pull_type = mt6795_pull_type, + .bias_disable_set = mtk_pinconf_bias_disable_set_rev1, + .bias_disable_get = mtk_pinconf_bias_disable_get_rev1, + .bias_set = mtk_pinconf_bias_set_rev1, + .bias_get = mtk_pinconf_bias_get_rev1, + .bias_set_combo = mtk_pinconf_bias_set_combo, + .bias_get_combo = mtk_pinconf_bias_get_combo, + .drive_set = mtk_pinconf_drive_set_rev1, + .drive_get = mtk_pinconf_drive_get_rev1, + .adv_pull_get = mtk_pinconf_adv_pull_get, + .adv_pull_set = mtk_pinconf_adv_pull_set, +}; + +static const struct of_device_id mt6795_pctrl_match[] = { + { .compatible = "mediatek,mt6795-pinctrl", .data = &mt6795_data }, + { } +}; + +static struct platform_driver mt6795_pinctrl_driver = { + .driver = { + .name = "mt6795-pinctrl", + .of_match_table = mt6795_pctrl_match, + .pm = &mtk_paris_pinctrl_pm_ops, + }, + .probe = mtk_paris_pinctrl_probe, +}; + +static int __init mtk_pinctrl_init(void) +{ + return platform_driver_register(&mt6795_pinctrl_driver); +} +arch_initcall(mtk_pinctrl_init); diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt6795.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6795.h new file mode 100644 index 0000000000..f639bd8591 --- /dev/null +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6795.h @@ -0,0 +1,1698 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Collabora Ltd. + * Author: AngeloGioacchino Del Regno + */ + +#ifndef __PINCTRL_MTK_MT6795_H +#define __PINCTRL_MTK_MT6795_H + +#include "pinctrl-paris.h" + +static const struct mtk_pin_desc mtk_pins_mt6795[] = { + MTK_PIN( + 0, "GPIO0", + MTK_EINT_FUNCTION(0, 0), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO0"), + MTK_FUNCTION(1, "IRDA_PDN"), + MTK_FUNCTION(2, "I2S1_WS"), + MTK_FUNCTION(4, "TDD_TMS"), + MTK_FUNCTION(5, "UTXD0") + ), + MTK_PIN( + 1, "GPIO1", + MTK_EINT_FUNCTION(0, 1), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO1"), + MTK_FUNCTION(1, "IRDA_RXD"), + MTK_FUNCTION(2, "I2S1_BCK"), + MTK_FUNCTION(3, "SDA4"), + MTK_FUNCTION(4, "TDD_TCK"), + MTK_FUNCTION(5, "URXD0") + ), + MTK_PIN( + 2, "GPIO2", + MTK_EINT_FUNCTION(0, 2), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO2"), + MTK_FUNCTION(1, "IRDA_TXD"), + MTK_FUNCTION(2, "I2S1_MCK"), + MTK_FUNCTION(3, "SCL4"), + MTK_FUNCTION(4, "TDD_TDI"), + MTK_FUNCTION(5, "UTXD3") + ), + MTK_PIN( + 3, "GPIO3", + MTK_EINT_FUNCTION(0, 3), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO3"), + MTK_FUNCTION(1, "DSI1_TE"), + MTK_FUNCTION(2, "I2S1_DO_1"), + MTK_FUNCTION(3, "SDA3"), + MTK_FUNCTION(4, "TDD_TDO"), + MTK_FUNCTION(5, "URXD3") + ), + MTK_PIN( + 4, "GPIO4", + MTK_EINT_FUNCTION(0, 4), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO4"), + MTK_FUNCTION(1, "DISP_PWM1"), + MTK_FUNCTION(2, "I2S1_DO_2"), + MTK_FUNCTION(3, "SCL3"), + MTK_FUNCTION(4, "TDD_TRSTN") + ), + MTK_PIN( + 5, "GPIO5", + MTK_EINT_FUNCTION(0, 5), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO5"), + MTK_FUNCTION(1, "PCM1_CLK"), + MTK_FUNCTION(2, "I2S2_WS"), + MTK_FUNCTION(3, "SPI_CK_3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TMS"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TMS") + ), + MTK_PIN( + 6, "GPIO6", + MTK_EINT_FUNCTION(0, 6), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO6"), + MTK_FUNCTION(1, "PCM1_SYNC"), + MTK_FUNCTION(2, "I2S2_BCK"), + MTK_FUNCTION(3, "SPI_MI_3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TCK"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TCK") + ), + MTK_PIN( + 7, "GPIO7", + MTK_EINT_FUNCTION(0, 7), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO7"), + MTK_FUNCTION(1, "PCM1_DI"), + MTK_FUNCTION(2, "I2S2_DI_1"), + MTK_FUNCTION(3, "SPI_MO_3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TDI"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TDI") + ), + MTK_PIN( + 8, "GPIO8", + MTK_EINT_FUNCTION(0, 8), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO8"), + MTK_FUNCTION(1, "PCM1_DO"), + MTK_FUNCTION(2, "I2S2_DI_2"), + MTK_FUNCTION(3, "SPI_CS_3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TDO"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TDO") + ), + MTK_PIN( + 9, "GPIO9", + MTK_EINT_FUNCTION(0, 9), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO9"), + MTK_FUNCTION(1, "USB_DRVVBUS"), + MTK_FUNCTION(2, "I2S2_MCK"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TRST"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TRST") + ), + MTK_PIN( + 10, "GPIO10", + MTK_EINT_FUNCTION(0, 10), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO10"), + MTK_FUNCTION(2, "I2S0_WS") + ), + MTK_PIN( + 11, "GPIO11", + MTK_EINT_FUNCTION(0, 11), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO11"), + MTK_FUNCTION(2, "I2S0_BCK") + ), + MTK_PIN( + 12, "GPIO12", + MTK_EINT_FUNCTION(0, 12), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO12"), + MTK_FUNCTION(2, "I2S0_MCK") + ), + MTK_PIN( + 13, "GPIO13", + MTK_EINT_FUNCTION(0, 13), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO13"), + MTK_FUNCTION(2, "I2S0_DO") + ), + MTK_PIN( + 14, "GPIO14", + MTK_EINT_FUNCTION(0, 14), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO14"), + MTK_FUNCTION(2, "I2S0_DI"), + MTK_FUNCTION(3, "DISP_PWM1"), + MTK_FUNCTION(4, "PWM4"), + MTK_FUNCTION(5, "IRDA_RXD"), + MTK_FUNCTION(6, "I2S1_BCK") + ), + MTK_PIN( + 15, "GPIO15", + MTK_EINT_FUNCTION(0, 15), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO15"), + MTK_FUNCTION(2, "DSI1_TE"), + MTK_FUNCTION(3, "USB_DRVVBUS"), + MTK_FUNCTION(4, "PWM5"), + MTK_FUNCTION(5, "IRDA_TXD"), + MTK_FUNCTION(6, "I2S1_MCK") + ), + MTK_PIN( + 16, "GPIO16", + MTK_EINT_FUNCTION(0, 16), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO16"), + MTK_FUNCTION(1, "IDDIG"), + MTK_FUNCTION(2, "FLASH"), + MTK_FUNCTION(3, "EXT_FRAME_SYNC"), + MTK_FUNCTION(4, "PWM5") + ), + MTK_PIN( + 17, "GPIO17", + MTK_EINT_FUNCTION(0, 17), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO17"), + MTK_FUNCTION(1, "SIM1_SCLK"), + MTK_FUNCTION(2, "SIM2_SCLK") + ), + MTK_PIN( + 18, "GPIO18", + MTK_EINT_FUNCTION(0, 18), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO18"), + MTK_FUNCTION(1, "SIM1_SRST"), + MTK_FUNCTION(2, "SIM2_SRST") + ), + MTK_PIN( + 19, "GPIO19", + MTK_EINT_FUNCTION(0, 19), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO19"), + MTK_FUNCTION(1, "SIM1_SDAT"), + MTK_FUNCTION(2, "SIM2_SDAT") + ), + MTK_PIN( + 20, "GPIO20", + MTK_EINT_FUNCTION(0, 20), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO20"), + MTK_FUNCTION(1, "SIM2_SCLK"), + MTK_FUNCTION(2, "SIM1_SCLK") + ), + MTK_PIN( + 21, "GPIO21", + MTK_EINT_FUNCTION(0, 21), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO21"), + MTK_FUNCTION(1, "SIM2_SRST"), + MTK_FUNCTION(2, "SIM1_SRST") + ), + MTK_PIN( + 22, "GPIO22", + MTK_EINT_FUNCTION(0, 22), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO22"), + MTK_FUNCTION(1, "SIM2_SDAT"), + MTK_FUNCTION(2, "SIM1_SDAT") + ), + MTK_PIN( + 23, "GPIO23", + MTK_EINT_FUNCTION(0, 23), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO23"), + MTK_FUNCTION(1, "MSDC3_DAT0") + ), + MTK_PIN( + 24, "GPIO24", + MTK_EINT_FUNCTION(0, 24), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO24"), + MTK_FUNCTION(1, "MSDC3_DAT1") + ), + MTK_PIN( + 25, "GPIO25", + MTK_EINT_FUNCTION(0, 25), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO25"), + MTK_FUNCTION(1, "MSDC3_DAT2") + ), + MTK_PIN( + 26, "GPIO26", + MTK_EINT_FUNCTION(0, 26), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO26"), + MTK_FUNCTION(1, "MSDC3_DAT3") + ), + MTK_PIN( + 27, "GPIO27", + MTK_EINT_FUNCTION(0, 27), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO27"), + MTK_FUNCTION(1, "MSDC3_CLK") + ), + MTK_PIN( + 28, "GPIO28", + MTK_EINT_FUNCTION(0, 28), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO28"), + MTK_FUNCTION(1, "MSDC3_CMD") + ), + MTK_PIN( + 29, "GPIO29", + MTK_EINT_FUNCTION(0, 29), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO29"), + MTK_FUNCTION(1, "PTA_RXD"), + MTK_FUNCTION(2, "UCTS2") + ), + MTK_PIN( + 30, "GPIO30", + MTK_EINT_FUNCTION(0, 30), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO30"), + MTK_FUNCTION(1, "PTA_TXD"), + MTK_FUNCTION(2, "URTS2") + ), + MTK_PIN( + 31, "GPIO31", + MTK_EINT_FUNCTION(0, 31), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO31"), + MTK_FUNCTION(1, "URXD2"), + MTK_FUNCTION(2, "UTXD2") + ), + MTK_PIN( + 32, "GPIO32", + MTK_EINT_FUNCTION(0, 32), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO32"), + MTK_FUNCTION(1, "UTXD2"), + MTK_FUNCTION(2, "URXD2") + ), + MTK_PIN( + 33, "GPIO33", + MTK_EINT_FUNCTION(0, 33), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO33"), + MTK_FUNCTION(1, "MRG_CLK"), + MTK_FUNCTION(2, "PCM0_CLK") + ), + MTK_PIN( + 34, "GPIO34", + MTK_EINT_FUNCTION(0, 34), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO34"), + MTK_FUNCTION(1, "MRG_DI"), + MTK_FUNCTION(2, "PCM0_DI") + ), + MTK_PIN( + 35, "GPIO35", + MTK_EINT_FUNCTION(0, 35), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO35"), + MTK_FUNCTION(1, "MRG_DO"), + MTK_FUNCTION(2, "PCM0_DO") + ), + MTK_PIN( + 36, "GPIO36", + MTK_EINT_FUNCTION(0, 36), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO36"), + MTK_FUNCTION(1, "MRG_SYNC"), + MTK_FUNCTION(2, "PCM0_SYNC") + ), + MTK_PIN( + 37, "GPIO37", + MTK_EINT_FUNCTION(0, 37), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO37"), + MTK_FUNCTION(1, "GPS_SYNC") + ), + MTK_PIN( + 38, "GPIO38", + MTK_EINT_FUNCTION(0, 38), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO38"), + MTK_FUNCTION(1, "DAIRSTB") + ), + MTK_PIN( + 39, "GPIO39", + MTK_EINT_FUNCTION(0, 39), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO39"), + MTK_FUNCTION(1, "CM2MCLK") + ), + MTK_PIN( + 40, "GPIO40", + MTK_EINT_FUNCTION(0, 40), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO40"), + MTK_FUNCTION(1, "CM3MCLK"), + MTK_FUNCTION(2, "IRDA_PDN"), + MTK_FUNCTION(3, "PWM6"), + MTK_FUNCTION(4, "I2S1_WS") + ), + MTK_PIN( + 41, "GPIO41", + MTK_EINT_FUNCTION(0, 41), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO41"), + MTK_FUNCTION(1, "CMPCLK"), + MTK_FUNCTION(2, "CMCSK"), + MTK_FUNCTION(3, "FLASH") + ), + MTK_PIN( + 42, "GPIO42", + MTK_EINT_FUNCTION(0, 42), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO42"), + MTK_FUNCTION(1, "CMMCLK") + ), + MTK_PIN( + 43, "GPIO43", + MTK_EINT_FUNCTION(0, 43), + DRV_FIXED, + MTK_FUNCTION(0, "GPIO43"), + MTK_FUNCTION(1, "SDA2") + ), + MTK_PIN( + 44, "GPIO44", + MTK_EINT_FUNCTION(0, 44), + DRV_FIXED, + MTK_FUNCTION(0, "GPIO44"), + MTK_FUNCTION(1, "SCL2") + ), + MTK_PIN( + 45, "GPIO45", + MTK_EINT_FUNCTION(0, 45), + DRV_FIXED, + MTK_FUNCTION(0, "GPIO45"), + MTK_FUNCTION(1, "SDA0") + ), + MTK_PIN( + 46, "GPIO46", + MTK_EINT_FUNCTION(0, 46), + DRV_FIXED, + MTK_FUNCTION(0, "GPIO46"), + MTK_FUNCTION(1, "SCL0") + ), + MTK_PIN( + 47, "GPIO47", + MTK_EINT_FUNCTION(0, 47), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO47"), + MTK_FUNCTION(1, "BPI_BUS0") + ), + MTK_PIN( + 48, "GPIO48", + MTK_EINT_FUNCTION(0, 48), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO48"), + MTK_FUNCTION(1, "BPI_BUS1") + ), + MTK_PIN( + 49, "GPIO49", + MTK_EINT_FUNCTION(0, 49), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO49"), + MTK_FUNCTION(1, "BPI_BUS2") + ), + MTK_PIN( + 50, "GPIO50", + MTK_EINT_FUNCTION(0, 50), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO50"), + MTK_FUNCTION(1, "BPI_BUS3") + ), + MTK_PIN( + 51, "GPIO51", + MTK_EINT_FUNCTION(0, 51), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO51"), + MTK_FUNCTION(1, "BPI_BUS4") + ), + MTK_PIN( + 52, "GPIO52", + MTK_EINT_FUNCTION(0, 52), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO52"), + MTK_FUNCTION(1, "BPI_BUS5") + ), + MTK_PIN( + 53, "GPIO53", + MTK_EINT_FUNCTION(0, 53), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO53"), + MTK_FUNCTION(1, "BPI_BUS6") + ), + MTK_PIN( + 54, "GPIO54", + MTK_EINT_FUNCTION(0, 54), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO54"), + MTK_FUNCTION(1, "BPI_BUS7") + ), + MTK_PIN( + 55, "GPIO55", + MTK_EINT_FUNCTION(0, 55), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO55"), + MTK_FUNCTION(1, "BPI_BUS8") + ), + MTK_PIN( + 56, "GPIO56", + MTK_EINT_FUNCTION(0, 56), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO56"), + MTK_FUNCTION(1, "BPI_BUS9") + ), + MTK_PIN( + 57, "GPIO57", + MTK_EINT_FUNCTION(0, 57), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO57"), + MTK_FUNCTION(1, "BPI_BUS10") + ), + MTK_PIN( + 58, "GPIO58", + MTK_EINT_FUNCTION(0, 58), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO58"), + MTK_FUNCTION(1, "BPI_BUS11") + ), + MTK_PIN( + 59, "GPIO59", + MTK_EINT_FUNCTION(0, 59), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO59"), + MTK_FUNCTION(1, "BPI_BUS12") + ), + MTK_PIN( + 60, "GPIO60", + MTK_EINT_FUNCTION(0, 60), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO60"), + MTK_FUNCTION(1, "BPI_BUS13") + ), + MTK_PIN( + 61, "GPIO61", + MTK_EINT_FUNCTION(0, 61), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO61"), + MTK_FUNCTION(1, "BPI_BUS14") + ), + MTK_PIN( + 62, "GPIO62", + MTK_EINT_FUNCTION(0, 62), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO62"), + MTK_FUNCTION(1, "RFIC1_BSI_CK") + ), + MTK_PIN( + 63, "GPIO63", + MTK_EINT_FUNCTION(0, 63), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO63"), + MTK_FUNCTION(1, "RFIC1_BSI_D0") + ), + MTK_PIN( + 64, "GPIO64", + MTK_EINT_FUNCTION(0, 64), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO64"), + MTK_FUNCTION(1, "RFIC1_BSI_D1") + ), + MTK_PIN( + 65, "GPIO65", + MTK_EINT_FUNCTION(0, 65), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO65"), + MTK_FUNCTION(1, "RFIC1_BSI_D2") + ), + MTK_PIN( + 66, "GPIO66", + MTK_EINT_FUNCTION(0, 66), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO66"), + MTK_FUNCTION(1, "RFIC1_BSI_CS") + ), + MTK_PIN( + 67, "GPIO67", + MTK_EINT_FUNCTION(0, 67), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO67"), + MTK_FUNCTION(1, "TD_TXBPI") + ), + MTK_PIN( + 68, "GPIO68", + MTK_EINT_FUNCTION(0, 68), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO68"), + MTK_FUNCTION(1, "RFIC0_BSI_CK") + ), + MTK_PIN( + 69, "GPIO69", + MTK_EINT_FUNCTION(0, 69), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO69"), + MTK_FUNCTION(1, "RFIC0_BSI_D0") + ), + MTK_PIN( + 70, "GPIO70", + MTK_EINT_FUNCTION(0, 70), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO70"), + MTK_FUNCTION(1, "RFIC0_BSI_D1") + ), + MTK_PIN( + 71, "GPIO71", + MTK_EINT_FUNCTION(0, 71), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO71"), + MTK_FUNCTION(1, "RFIC0_BSI_D2") + ), + MTK_PIN( + 72, "GPIO72", + MTK_EINT_FUNCTION(0, 72), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO72"), + MTK_FUNCTION(1, "RFIC0_BSI_CS") + ), + MTK_PIN( + 73, "GPIO73", + MTK_EINT_FUNCTION(0, 73), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO73"), + MTK_FUNCTION(1, "MISC_BSI_DO") + ), + MTK_PIN( + 74, "GPIO74", + MTK_EINT_FUNCTION(0, 74), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO74"), + MTK_FUNCTION(1, "MISC_BSI_CK") + ), + MTK_PIN( + 75, "GPIO75", + MTK_EINT_FUNCTION(0, 75), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO75"), + MTK_FUNCTION(1, "MISC_BSI_CS0B"), + MTK_FUNCTION(2, "MIPI1_SCLK") + ), + MTK_PIN( + 76, "GPIO76", + MTK_EINT_FUNCTION(0, 76), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO76"), + MTK_FUNCTION(1, "MISC_BSI_CS1B") + ), + MTK_PIN( + 77, "GPIO77", + MTK_EINT_FUNCTION(0, 77), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO77"), + MTK_FUNCTION(1, "MISC_BSI_DI"), + MTK_FUNCTION(2, "MIPI1_SDATA") + ), + MTK_PIN( + 78, "GPIO78", + MTK_EINT_FUNCTION(0, 78), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO78"), + MTK_FUNCTION(1, "LTE_TXBPI") + ), + MTK_PIN( + 79, "GPIO79", + MTK_EINT_FUNCTION(0, 79), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO79"), + MTK_FUNCTION(1, "BPI_BUS15") + ), + MTK_PIN( + 80, "GPIO80", + MTK_EINT_FUNCTION(0, 80), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO80"), + MTK_FUNCTION(1, "BPI_BUS16") + ), + MTK_PIN( + 81, "GPIO81", + MTK_EINT_FUNCTION(0, 81), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO81"), + MTK_FUNCTION(1, "BPI_BUS17") + ), + MTK_PIN( + 82, "GPIO82", + MTK_EINT_FUNCTION(0, 82), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO82"), + MTK_FUNCTION(1, "BPI_BUS18") + ), + MTK_PIN( + 83, "GPIO83", + MTK_EINT_FUNCTION(0, 83), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO83"), + MTK_FUNCTION(1, "BPI_BUS19") + ), + MTK_PIN( + 84, "GPIO84", + MTK_EINT_FUNCTION(0, 84), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO84"), + MTK_FUNCTION(1, "BPI_BUS20") + ), + MTK_PIN( + 85, "GPIO85", + MTK_EINT_FUNCTION(0, 85), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO85"), + MTK_FUNCTION(1, "BPI_BUS21") + ), + MTK_PIN( + 86, "GPIO86", + MTK_EINT_FUNCTION(0, 86), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO86"), + MTK_FUNCTION(1, "BPI_BUS22") + ), + MTK_PIN( + 87, "GPIO87", + MTK_EINT_FUNCTION(0, 87), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO87"), + MTK_FUNCTION(1, "BPI_BUS23") + ), + MTK_PIN( + 88, "GPIO88", + MTK_EINT_FUNCTION(0, 88), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO88"), + MTK_FUNCTION(1, "BPI_BUS24") + ), + MTK_PIN( + 89, "GPIO89", + MTK_EINT_FUNCTION(0, 89), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO89"), + MTK_FUNCTION(1, "BPI_BUS25") + ), + MTK_PIN( + 90, "GPIO90", + MTK_EINT_FUNCTION(0, 90), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO90"), + MTK_FUNCTION(1, "BPI_BUS26") + ), + MTK_PIN( + 91, "GPIO91", + MTK_EINT_FUNCTION(0, 91), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO91"), + MTK_FUNCTION(1, "BPI_BUS27") + ), + MTK_PIN( + 92, "GPIO92", + MTK_EINT_FUNCTION(0, 92), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO92"), + MTK_FUNCTION(1, "PCM1_CLK"), + MTK_FUNCTION(2, "I2S0_BCK"), + MTK_FUNCTION(3, "NLD6") + ), + MTK_PIN( + 93, "GPIO93", + MTK_EINT_FUNCTION(0, 93), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO93"), + MTK_FUNCTION(1, "PCM1_SYNC"), + MTK_FUNCTION(2, "I2S0_WS"), + MTK_FUNCTION(3, "NLD7") + ), + MTK_PIN( + 94, "GPIO94", + MTK_EINT_FUNCTION(0, 94), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO94"), + MTK_FUNCTION(1, "PCM1_DI"), + MTK_FUNCTION(2, "I2S0_DI"), + MTK_FUNCTION(3, "NREB") + ), + MTK_PIN( + 95, "GPIO95", + MTK_EINT_FUNCTION(0, 95), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO95"), + MTK_FUNCTION(1, "PCM1_DO"), + MTK_FUNCTION(2, "I2S0_DO"), + MTK_FUNCTION(3, "NRNB0") + ), + MTK_PIN( + 96, "GPIO96", + MTK_EINT_FUNCTION(0, 96), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO96"), + MTK_FUNCTION(1, "URXD1"), + MTK_FUNCTION(2, "UTXD1"), + MTK_FUNCTION(3, "NWEB") + ), + MTK_PIN( + 97, "GPIO97", + MTK_EINT_FUNCTION(0, 97), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO97"), + MTK_FUNCTION(1, "UTXD1"), + MTK_FUNCTION(2, "URXD1"), + MTK_FUNCTION(3, "NCEB0") + ), + MTK_PIN( + 98, "GPIO98", + MTK_EINT_FUNCTION(0, 98), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO98"), + MTK_FUNCTION(1, "URTS1"), + MTK_FUNCTION(2, "UCTS1"), + MTK_FUNCTION(3, "NALE") + ), + MTK_PIN( + 99, "GPIO99", + MTK_EINT_FUNCTION(0, 99), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO99"), + MTK_FUNCTION(1, "UCTS1"), + MTK_FUNCTION(2, "URTS1"), + MTK_FUNCTION(3, "NCLE") + ), + MTK_PIN( + 100, "GPIO100", + MTK_EINT_FUNCTION(0, 100), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO100"), + MTK_FUNCTION(1, "MSDC2_DAT0"), + MTK_FUNCTION(2, "URXD1"), + MTK_FUNCTION(3, "USB_DRVVBUS"), + MTK_FUNCTION(4, "SDA4") + ), + MTK_PIN( + 101, "GPIO101", + MTK_EINT_FUNCTION(0, 101), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO101"), + MTK_FUNCTION(1, "MSDC2_DAT1"), + MTK_FUNCTION(2, "UTXD1"), + MTK_FUNCTION(4, "SCL4") + ), + MTK_PIN( + 102, "GPIO102", + MTK_EINT_FUNCTION(0, 102), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO102"), + MTK_FUNCTION(1, "MSDC2_DAT2"), + MTK_FUNCTION(2, "URTS1"), + MTK_FUNCTION(3, "UTXD0"), + MTK_FUNCTION(5, "PWM0"), + MTK_FUNCTION(6, "SPI_CK_1") + ), + MTK_PIN( + 103, "GPIO103", + MTK_EINT_FUNCTION(0, 103), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO103"), + MTK_FUNCTION(1, "MSDC2_DAT3"), + MTK_FUNCTION(2, "UCTS1"), + MTK_FUNCTION(3, "URXD0"), + MTK_FUNCTION(5, "PWM1"), + MTK_FUNCTION(6, "SPI_MI_1") + ), + MTK_PIN( + 104, "GPIO104", + MTK_EINT_FUNCTION(0, 104), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO104"), + MTK_FUNCTION(1, "MSDC2_CLK"), + MTK_FUNCTION(2, "NLD4"), + MTK_FUNCTION(3, "UTXD3"), + MTK_FUNCTION(4, "SDA3"), + MTK_FUNCTION(5, "PWM2"), + MTK_FUNCTION(6, "SPI_MO_1") + ), + MTK_PIN( + 105, "GPIO105", + MTK_EINT_FUNCTION(0, 105), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO105"), + MTK_FUNCTION(1, "MSDC2_CMD"), + MTK_FUNCTION(2, "NLD5"), + MTK_FUNCTION(3, "URXD3"), + MTK_FUNCTION(4, "SCL3"), + MTK_FUNCTION(5, "PWM3"), + MTK_FUNCTION(6, "SPI_CS_1") + ), + MTK_PIN( + 106, "GPIO106", + MTK_EINT_FUNCTION(0, 106), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO106"), + MTK_FUNCTION(1, "LCM_RST") + ), + MTK_PIN( + 107, "GPIO107", + MTK_EINT_FUNCTION(0, 107), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO107"), + MTK_FUNCTION(1, "DSI_TE") + ), + MTK_PIN( + 108, "GPIO108", + MTK_EINT_FUNCTION(0, 108), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO108"), + MTK_FUNCTION(1, "JTMS"), + MTK_FUNCTION(2, "MFG_JTAG_TMS"), + MTK_FUNCTION(3, "TDD_TMS"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TMS"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TMS"), + MTK_FUNCTION(6, "DFD_TMS") + ), + MTK_PIN( + 109, "GPIO109", + MTK_EINT_FUNCTION(0, 109), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO109"), + MTK_FUNCTION(1, "JTCK"), + MTK_FUNCTION(2, "MFG_JTAG_TCK"), + MTK_FUNCTION(3, "TDD_TCK"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TCK"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TCK"), + MTK_FUNCTION(6, "DFD_TCK") + ), + MTK_PIN( + 110, "GPIO110", + MTK_EINT_FUNCTION(0, 110), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO110"), + MTK_FUNCTION(1, "JTDI"), + MTK_FUNCTION(2, "MFG_JTAG_TDI"), + MTK_FUNCTION(3, "TDD_TDI"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TDI"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TDI"), + MTK_FUNCTION(6, "DFD_TDI") + ), + MTK_PIN( + 111, "GPIO111", + MTK_EINT_FUNCTION(0, 111), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO111"), + MTK_FUNCTION(1, "JTDO"), + MTK_FUNCTION(2, "MFG_JTAG_TDO"), + MTK_FUNCTION(3, "TDD_TDO"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TDO"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TDO"), + MTK_FUNCTION(6, "DFD_TDO") + ), + MTK_PIN( + 112, "GPIO112", + MTK_EINT_FUNCTION(0, 112), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO112"), + MTK_FUNCTION(1, "JTRST_B"), + MTK_FUNCTION(2, "MFG_JTAG_TRSTN"), + MTK_FUNCTION(3, "TDD_TRSTN"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TRST"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TRST"), + MTK_FUNCTION(6, "DFD_NTRST") + ), + MTK_PIN( + 113, "GPIO113", + MTK_EINT_FUNCTION(0, 113), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO113"), + MTK_FUNCTION(1, "URXD0"), + MTK_FUNCTION(2, "UTXD0"), + MTK_FUNCTION(3, "MD_URXD"), + MTK_FUNCTION(4, "LTE_URXD"), + MTK_FUNCTION(5, "TDD_TXD"), + MTK_FUNCTION(6, "I2S2_WS") + ), + MTK_PIN( + 114, "GPIO114", + MTK_EINT_FUNCTION(0, 114), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO114"), + MTK_FUNCTION(1, "UTXD0"), + MTK_FUNCTION(2, "URXD0"), + MTK_FUNCTION(3, "MD_UTXD"), + MTK_FUNCTION(4, "LTE_UTXD"), + MTK_FUNCTION(5, "TDD_TXD"), + MTK_FUNCTION(6, "I2S2_BCK") + ), + MTK_PIN( + 115, "GPIO115", + MTK_EINT_FUNCTION(0, 115), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO115"), + MTK_FUNCTION(1, "URTS0"), + MTK_FUNCTION(2, "UCTS0"), + MTK_FUNCTION(3, "MD_URXD"), + MTK_FUNCTION(4, "LTE_URXD"), + MTK_FUNCTION(5, "TDD_TXD"), + MTK_FUNCTION(6, "I2S2_MCK") + ), + MTK_PIN( + 116, "GPIO116", + MTK_EINT_FUNCTION(0, 116), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO116"), + MTK_FUNCTION(1, "UCTS0"), + MTK_FUNCTION(2, "URTS0"), + MTK_FUNCTION(3, "MD_UTXD"), + MTK_FUNCTION(4, "LTE_UTXD"), + MTK_FUNCTION(5, "TDD_TXD"), + MTK_FUNCTION(6, "I2S2_DI_1") + ), + MTK_PIN( + 117, "GPIO117", + MTK_EINT_FUNCTION(0, 117), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO117"), + MTK_FUNCTION(1, "URXD3"), + MTK_FUNCTION(2, "UTXD3"), + MTK_FUNCTION(3, "MD_URXD"), + MTK_FUNCTION(4, "LTE_URXD"), + MTK_FUNCTION(5, "TDD_TXD") + ), + MTK_PIN( + 118, "GPIO118", + MTK_EINT_FUNCTION(0, 118), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO118"), + MTK_FUNCTION(1, "UTXD3"), + MTK_FUNCTION(2, "URXD3"), + MTK_FUNCTION(3, "MD_UTXD"), + MTK_FUNCTION(4, "LTE_UTXD"), + MTK_FUNCTION(5, "TDD_TXD") + ), + MTK_PIN( + 119, "GPIO119", + MTK_EINT_FUNCTION(0, 119), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO119"), + MTK_FUNCTION(1, "KROW0") + ), + MTK_PIN( + 120, "GPIO120", + MTK_EINT_FUNCTION(0, 120), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO120"), + MTK_FUNCTION(1, "KROW1"), + MTK_FUNCTION(3, "PWM6") + ), + MTK_PIN( + 121, "GPIO121", + MTK_EINT_FUNCTION(0, 121), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO121"), + MTK_FUNCTION(1, "KROW2"), + MTK_FUNCTION(2, "IRDA_PDN"), + MTK_FUNCTION(3, "I2S1_DO_1"), + MTK_FUNCTION(4, "USB_DRVVBUS"), + MTK_FUNCTION(5, "SPI_CK_2"), + MTK_FUNCTION(6, "PWM4") + ), + MTK_PIN( + 122, "GPIO122", + MTK_EINT_FUNCTION(0, 122), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO122"), + MTK_FUNCTION(1, "KCOL0") + ), + MTK_PIN( + 123, "GPIO123", + MTK_EINT_FUNCTION(0, 123), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO123"), + MTK_FUNCTION(1, "KCOL1"), + MTK_FUNCTION(2, "IRDA_RXD"), + MTK_FUNCTION(3, "I2S2_DI_2"), + MTK_FUNCTION(4, "PWM5") + ), + MTK_PIN( + 124, "GPIO124", + MTK_EINT_FUNCTION(0, 124), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO124"), + MTK_FUNCTION(1, "KCOL2"), + MTK_FUNCTION(2, "IRDA_TXD"), + MTK_FUNCTION(3, "I2S1_DO_2"), + MTK_FUNCTION(4, "USB_DRVVBUS"), + MTK_FUNCTION(5, "SPI_MI_2"), + MTK_FUNCTION(6, "PWM3") + ), + MTK_PIN( + 125, "GPIO125", + MTK_EINT_FUNCTION(0, 125), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO125"), + MTK_FUNCTION(1, "SDA1") + ), + MTK_PIN( + 126, "GPIO126", + MTK_EINT_FUNCTION(0, 126), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO126"), + MTK_FUNCTION(1, "SCL1") + ), + MTK_PIN( + 127, "GPIO127", + MTK_EINT_FUNCTION(1, 127), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO127"), + MTK_FUNCTION(1, "MD_EINT1"), + MTK_FUNCTION(2, "DISP_PWM1"), + MTK_FUNCTION(3, "SPI_MO_2") + ), + MTK_PIN( + 128, "GPIO128", + MTK_EINT_FUNCTION(1, 128), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO128"), + MTK_FUNCTION(1, "MD_EINT2"), + MTK_FUNCTION(2, "DSI1_TE"), + MTK_FUNCTION(3, "SPI_CS_2") + ), + MTK_PIN( + 129, "GPIO129", + MTK_EINT_FUNCTION(0, 129), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO129"), + MTK_FUNCTION(1, "I2S3_WS"), + MTK_FUNCTION(2, "I2S2_WS"), + MTK_FUNCTION(3, "PWM0") + ), + MTK_PIN( + 130, "GPIO130", + MTK_EINT_FUNCTION(0, 130), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO130"), + MTK_FUNCTION(1, "I2S3_BCK"), + MTK_FUNCTION(2, "I2S2_BCK"), + MTK_FUNCTION(3, "PWM1") + ), + MTK_PIN( + 131, "GPIO131", + MTK_EINT_FUNCTION(0, 131), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO131"), + MTK_FUNCTION(1, "I2S3_MCK"), + MTK_FUNCTION(2, "I2S2_MCK"), + MTK_FUNCTION(3, "PWM2") + ), + MTK_PIN( + 132, "GPIO132", + MTK_EINT_FUNCTION(0, 132), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO132"), + MTK_FUNCTION(1, "I2S3_DO_1"), + MTK_FUNCTION(2, "I2S2_DI_1"), + MTK_FUNCTION(3, "PWM3") + ), + MTK_PIN( + 133, "GPIO133", + MTK_EINT_FUNCTION(0, 133), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO133"), + MTK_FUNCTION(1, "I2S3_DO_2"), + MTK_FUNCTION(2, "I2S2_DI_2"), + MTK_FUNCTION(3, "PWM4") + ), + MTK_PIN( + 134, "GPIO134", + MTK_EINT_FUNCTION(0, 134), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO134"), + MTK_FUNCTION(1, "I2S3_DO_3"), + MTK_FUNCTION(2, "DISP_PWM1"), + MTK_FUNCTION(3, "I2S1_DO_1"), + MTK_FUNCTION(4, "PWM5") + ), + MTK_PIN( + 135, "GPIO135", + MTK_EINT_FUNCTION(0, 135), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO135"), + MTK_FUNCTION(1, "I2S3_DO_4"), + MTK_FUNCTION(2, "DSI1_TE"), + MTK_FUNCTION(3, "I2S1_DO_2"), + MTK_FUNCTION(4, "PWM6") + ), + MTK_PIN( + 136, "GPIO136", + MTK_EINT_FUNCTION(0, 136), + DRV_FIXED, + MTK_FUNCTION(0, "GPIO136"), + MTK_FUNCTION(1, "SDA3") + ), + MTK_PIN( + 137, "GPIO137", + MTK_EINT_FUNCTION(0, 137), + DRV_FIXED, + MTK_FUNCTION(0, "GPIO137"), + MTK_FUNCTION(1, "SCL3") + ), + MTK_PIN( + 138, "GPIO138", + MTK_EINT_FUNCTION(0, 138), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO138"), + MTK_FUNCTION(1, "DPI_CK"), + MTK_FUNCTION(2, "NLD6"), + MTK_FUNCTION(3, "UTXD0"), + MTK_FUNCTION(4, "USB_DRVVBUS"), + MTK_FUNCTION(5, "IRDA_PDN") + ), + MTK_PIN( + 139, "GPIO139", + MTK_EINT_FUNCTION(0, 139), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO139"), + MTK_FUNCTION(1, "DPI_DE"), + MTK_FUNCTION(2, "NLD7"), + MTK_FUNCTION(3, "URXD0"), + MTK_FUNCTION(4, "MD_UTXD"), + MTK_FUNCTION(5, "IRDA_RXD") + ), + MTK_PIN( + 140, "GPIO140", + MTK_EINT_FUNCTION(0, 140), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO140"), + MTK_FUNCTION(1, "DPI_D0"), + MTK_FUNCTION(2, "NREB"), + MTK_FUNCTION(3, "UCTS0"), + MTK_FUNCTION(4, "MD_URXD"), + MTK_FUNCTION(5, "IRDA_TXD") + ), + MTK_PIN( + 141, "GPIO141", + MTK_EINT_FUNCTION(0, 141), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO141"), + MTK_FUNCTION(1, "DPI_D1"), + MTK_FUNCTION(2, "NRNB0"), + MTK_FUNCTION(3, "URTS0"), + MTK_FUNCTION(4, "LTE_UTXD"), + MTK_FUNCTION(5, "I2S2_WS") + ), + MTK_PIN( + 142, "GPIO142", + MTK_EINT_FUNCTION(0, 142), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO142"), + MTK_FUNCTION(1, "DPI_D2"), + MTK_FUNCTION(2, "NWEB"), + MTK_FUNCTION(3, "UTXD1"), + MTK_FUNCTION(4, "LTE_URXD"), + MTK_FUNCTION(5, "I2S2_BCK") + ), + MTK_PIN( + 143, "GPIO143", + MTK_EINT_FUNCTION(0, 143), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO143"), + MTK_FUNCTION(1, "DPI_D3"), + MTK_FUNCTION(2, "NCEB0"), + MTK_FUNCTION(3, "URXD1"), + MTK_FUNCTION(4, "TDD_TXD"), + MTK_FUNCTION(5, "I2S2_MCK") + ), + MTK_PIN( + 144, "GPIO144", + MTK_EINT_FUNCTION(0, 144), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO144"), + MTK_FUNCTION(1, "DPI_D4"), + MTK_FUNCTION(2, "NALE"), + MTK_FUNCTION(3, "UCTS1"), + MTK_FUNCTION(4, "TDD_TMS"), + MTK_FUNCTION(5, "I2S2_DI_1") + ), + MTK_PIN( + 145, "GPIO145", + MTK_EINT_FUNCTION(0, 145), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO145"), + MTK_FUNCTION(1, "DPI_D5"), + MTK_FUNCTION(2, "NCLE"), + MTK_FUNCTION(3, "URTS1"), + MTK_FUNCTION(4, "TDD_TCK"), + MTK_FUNCTION(5, "I2S2_DI_2") + ), + MTK_PIN( + 146, "GPIO146", + MTK_EINT_FUNCTION(0, 146), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO146"), + MTK_FUNCTION(1, "DPI_D6"), + MTK_FUNCTION(2, "NLD8"), + MTK_FUNCTION(3, "UTXD2"), + MTK_FUNCTION(4, "TDD_TDI") + ), + MTK_PIN( + 147, "GPIO147", + MTK_EINT_FUNCTION(0, 147), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO147"), + MTK_FUNCTION(1, "DPI_D7"), + MTK_FUNCTION(2, "NLD9"), + MTK_FUNCTION(3, "URXD2"), + MTK_FUNCTION(4, "TDD_TDO"), + MTK_FUNCTION(5, "I2S1_WS") + ), + MTK_PIN( + 148, "GPIO148", + MTK_EINT_FUNCTION(0, 148), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO148"), + MTK_FUNCTION(1, "DPI_D8"), + MTK_FUNCTION(2, "NLD10"), + MTK_FUNCTION(3, "UCTS2"), + MTK_FUNCTION(4, "TDD_TRSTN"), + MTK_FUNCTION(5, "I2S1_BCK") + ), + MTK_PIN( + 149, "GPIO149", + MTK_EINT_FUNCTION(0, 149), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO149"), + MTK_FUNCTION(1, "DPI_D9"), + MTK_FUNCTION(2, "NLD11"), + MTK_FUNCTION(3, "URTS2"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TMS"), + MTK_FUNCTION(5, "I2S1_MCK") + ), + MTK_PIN( + 150, "GPIO150", + MTK_EINT_FUNCTION(0, 150), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO150"), + MTK_FUNCTION(1, "DPI_D10"), + MTK_FUNCTION(2, "NLD12"), + MTK_FUNCTION(3, "UTXD3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TCK"), + MTK_FUNCTION(5, "I2S1_DO_1") + ), + MTK_PIN( + 151, "GPIO151", + MTK_EINT_FUNCTION(0, 151), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO151"), + MTK_FUNCTION(1, "DPI_D11"), + MTK_FUNCTION(2, "NLD13"), + MTK_FUNCTION(3, "URXD3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TDI"), + MTK_FUNCTION(5, "I2S1_DO_2") + ), + MTK_PIN( + 152, "GPIO152", + MTK_EINT_FUNCTION(0, 152), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO152"), + MTK_FUNCTION(1, "DPI_HSYNC"), + MTK_FUNCTION(2, "NLD14"), + MTK_FUNCTION(3, "UCTS3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TDO"), + MTK_FUNCTION(5, "DSI1_TE") + ), + MTK_PIN( + 153, "GPIO153", + MTK_EINT_FUNCTION(0, 153), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO153"), + MTK_FUNCTION(1, "DPI_VSYNC"), + MTK_FUNCTION(2, "NLD15"), + MTK_FUNCTION(3, "URTS3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TRST"), + MTK_FUNCTION(5, "DISP_PWM1") + ), + MTK_PIN( + 154, "GPIO154", + MTK_EINT_FUNCTION(0, 154), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO154"), + MTK_FUNCTION(1, "MSDC0_DAT0"), + MTK_FUNCTION(2, "NLD8") + ), + MTK_PIN( + 155, "GPIO155", + MTK_EINT_FUNCTION(0, 155), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO155"), + MTK_FUNCTION(1, "MSDC0_DAT1"), + MTK_FUNCTION(2, "NLD9") + ), + MTK_PIN( + 156, "GPIO156", + MTK_EINT_FUNCTION(0, 156), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO156"), + MTK_FUNCTION(1, "MSDC0_DAT2"), + MTK_FUNCTION(2, "NLD10") + ), + MTK_PIN( + 157, "GPIO157", + MTK_EINT_FUNCTION(0, 157), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO157"), + MTK_FUNCTION(1, "MSDC0_DAT3"), + MTK_FUNCTION(2, "NLD11") + ), + MTK_PIN( + 158, "GPIO158", + MTK_EINT_FUNCTION(0, 158), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO158"), + MTK_FUNCTION(1, "MSDC0_DAT4"), + MTK_FUNCTION(2, "NLD12") + ), + MTK_PIN( + 159, "GPIO159", + MTK_EINT_FUNCTION(0, 159), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO159"), + MTK_FUNCTION(1, "MSDC0_DAT5"), + MTK_FUNCTION(2, "NLD13") + ), + MTK_PIN( + 160, "GPIO160", + MTK_EINT_FUNCTION(0, 160), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO160"), + MTK_FUNCTION(1, "MSDC0_DAT6"), + MTK_FUNCTION(2, "NLD14") + ), + MTK_PIN( + 161, "GPIO161", + MTK_EINT_FUNCTION(0, 161), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO161"), + MTK_FUNCTION(1, "MSDC0_DAT7"), + MTK_FUNCTION(2, "NLD15") + ), + MTK_PIN( + 162, "GPIO162", + MTK_EINT_FUNCTION(0, 162), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO162"), + MTK_FUNCTION(1, "MSDC0_CMD") + ), + MTK_PIN( + 163, "GPIO163", + MTK_EINT_FUNCTION(0, 163), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO163"), + MTK_FUNCTION(1, "MSDC0_CLK") + ), + MTK_PIN( + 164, "GPIO164", + MTK_EINT_FUNCTION(0, 164), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO164"), + MTK_FUNCTION(1, "MSDC0_DSL") + ), + MTK_PIN( + 165, "GPIO165", + MTK_EINT_FUNCTION(0, 165), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO165"), + MTK_FUNCTION(1, "MSDC0_RSTB") + ), + MTK_PIN( + 166, "GPIO166", + MTK_EINT_FUNCTION(0, 166), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO166"), + MTK_FUNCTION(1, "SPI_CK_0"), + MTK_FUNCTION(3, "PWM0") + ), + MTK_PIN( + 167, "GPIO167", + MTK_EINT_FUNCTION(0, 167), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO167"), + MTK_FUNCTION(1, "SPI_MI_0"), + MTK_FUNCTION(3, "PWM1"), + MTK_FUNCTION(4, "SPI_MO_0") + ), + MTK_PIN( + 168, "GPIO168", + MTK_EINT_FUNCTION(2, 168), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO168"), + MTK_FUNCTION(1, "SPI_MO_0"), + MTK_FUNCTION(2, "MD_EINT3"), + MTK_FUNCTION(3, "PWM2"), + MTK_FUNCTION(4, "SPI_MI_0") + ), + MTK_PIN( + 169, "GPIO169", + MTK_EINT_FUNCTION(2, 169), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO169"), + MTK_FUNCTION(1, "SPI_CS_0"), + MTK_FUNCTION(2, "MD_EINT4"), + MTK_FUNCTION(3, "PWM3") + ), + MTK_PIN( + 170, "GPIO170", + MTK_EINT_FUNCTION(0, 170), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO170"), + MTK_FUNCTION(1, "MSDC1_CMD") + ), + MTK_PIN( + 171, "GPIO171", + MTK_EINT_FUNCTION(0, 171), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO171"), + MTK_FUNCTION(1, "MSDC1_DAT0") + ), + MTK_PIN( + 172, "GPIO172", + MTK_EINT_FUNCTION(0, 172), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO172"), + MTK_FUNCTION(1, "MSDC1_DAT1") + ), + MTK_PIN( + 173, "GPIO173", + MTK_EINT_FUNCTION(0, 173), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO173"), + MTK_FUNCTION(1, "MSDC1_DAT2") + ), + MTK_PIN( + 174, "GPIO174", + MTK_EINT_FUNCTION(0, 174), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO174"), + MTK_FUNCTION(1, "MSDC1_DAT3") + ), + MTK_PIN( + 175, "GPIO175", + MTK_EINT_FUNCTION(0, 175), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO175"), + MTK_FUNCTION(1, "MSDC1_CLK") + ), + MTK_PIN( + 176, "GPIO176", + MTK_EINT_FUNCTION(0, 176), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO176"), + MTK_FUNCTION(1, "PWRAP_SPIMI"), + MTK_FUNCTION(2, "PWRAP_SPIMO") + ), + MTK_PIN( + 177, "GPIO177", + MTK_EINT_FUNCTION(0, 177), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO177"), + MTK_FUNCTION(1, "PWRAP_SPIMO"), + MTK_FUNCTION(2, "PWRAP_SPIMI") + ), + MTK_PIN( + 178, "GPIO178", + MTK_EINT_FUNCTION(0, 178), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO178"), + MTK_FUNCTION(1, "PWRAP_SPICK") + ), + MTK_PIN( + 179, "GPIO179", + MTK_EINT_FUNCTION(0, 179), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO179"), + MTK_FUNCTION(1, "PWRAP_SPICS") + ), + MTK_PIN( + 180, "GPIO180", + MTK_EINT_FUNCTION(0, 180), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO180"), + MTK_FUNCTION(1, "AUD_CLK_MOSI"), + MTK_FUNCTION(2, "I2S1_WS"), + MTK_FUNCTION(3, "I2S2_WS"), + MTK_FUNCTION(4, "I2S0_WS") + ), + MTK_PIN( + 181, "GPIO181", + MTK_EINT_FUNCTION(0, 181), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO181"), + MTK_FUNCTION(1, "AUD_DAT_MISO_1"), + MTK_FUNCTION(2, "I2S1_BCK"), + MTK_FUNCTION(3, "I2S2_BCK"), + MTK_FUNCTION(4, "I2S0_BCK") + ), + MTK_PIN( + 182, "GPIO182", + MTK_EINT_FUNCTION(0, 182), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO182"), + MTK_FUNCTION(1, "AUD_DAT_MOSI_1"), + MTK_FUNCTION(2, "I2S1_MCK"), + MTK_FUNCTION(3, "I2S2_MCK"), + MTK_FUNCTION(4, "I2S0_MCK") + ), + MTK_PIN( + 183, "GPIO183", + MTK_EINT_FUNCTION(0, 183), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO183"), + MTK_FUNCTION(1, "AUD_DAT_MISO_2"), + MTK_FUNCTION(2, "I2S1_DO_1"), + MTK_FUNCTION(3, "I2S2_DI_1"), + MTK_FUNCTION(4, "I2S0_DO") + ), + MTK_PIN( + 184, "GPIO184", + MTK_EINT_FUNCTION(0, 184), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO184"), + MTK_FUNCTION(1, "AUD_DAT_MOSI_2"), + MTK_FUNCTION(2, "I2S1_DO_2"), + MTK_FUNCTION(3, "I2S2_DI_2"), + MTK_FUNCTION(4, "I2S0_DI") + ), + MTK_PIN( + 185, "GPIO185", + MTK_EINT_FUNCTION(0, 185), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO185"), + MTK_FUNCTION(1, "RTC32K_CK") + ), + MTK_PIN( + 186, "GPIO186", + MTK_EINT_FUNCTION(0, 186), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO186"), + MTK_FUNCTION(1, "DISP_PWM0"), + MTK_FUNCTION(2, "DISP_PWM1") + ), + MTK_PIN( + 187, "GPIO187", + MTK_EINT_FUNCTION(0, 187), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO187"), + MTK_FUNCTION(1, "SRCLKENAI") + ), + MTK_PIN( + 188, "GPIO188", + MTK_EINT_FUNCTION(0, 188), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO188"), + MTK_FUNCTION(1, "SRCLKENAI2") + ), + MTK_PIN( + 189, "GPIO189", + MTK_EINT_FUNCTION(0, 189), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO189"), + MTK_FUNCTION(1, "SRCLKENA0") + ), + MTK_PIN( + 190, "GPIO190", + MTK_EINT_FUNCTION(0, 190), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO190"), + MTK_FUNCTION(1, "SRCLKENA1") + ), + MTK_PIN( + 191, "GPIO191", + MTK_EINT_FUNCTION(0, 191), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO191"), + MTK_FUNCTION(1, "WATCHDOG_AO") + ), + MTK_PIN( + 192, "GPIO192", + MTK_EINT_FUNCTION(0, 192), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO192"), + MTK_FUNCTION(1, "I2S0_WS"), + MTK_FUNCTION(2, "I2S1_WS"), + MTK_FUNCTION(3, "I2S2_WS"), + MTK_FUNCTION(4, "NCEB1") + ), + MTK_PIN( + 193, "GPIO193", + MTK_EINT_FUNCTION(0, 193), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO193"), + MTK_FUNCTION(1, "I2S0_BCK"), + MTK_FUNCTION(2, "I2S1_BCK"), + MTK_FUNCTION(3, "I2S2_BCK"), + MTK_FUNCTION(4, "NRNB1") + ), + MTK_PIN( + 194, "GPIO194", + MTK_EINT_FUNCTION(0, 194), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO194"), + MTK_FUNCTION(1, "I2S0_MCK"), + MTK_FUNCTION(2, "I2S1_MCK"), + MTK_FUNCTION(3, "I2S2_MCK") + ), + MTK_PIN( + 195, "GPIO195", + MTK_EINT_FUNCTION(0, 195), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO195"), + MTK_FUNCTION(1, "I2S0_DO"), + MTK_FUNCTION(2, "I2S1_DO_1"), + MTK_FUNCTION(3, "I2S2_DI_1") + ), + MTK_PIN( + 196, "GPIO196", + MTK_EINT_FUNCTION(0, 196), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO196"), + MTK_FUNCTION(1, "I2S0_DI"), + MTK_FUNCTION(2, "I2S1_DO_2"), + MTK_FUNCTION(3, "I2S2_DI_2") + ), +}; + +#endif /* __PINCTRL_MTK_MT6795_H */ diff --git a/drivers/pinctrl/mvebu/pinctrl-ac5.c b/drivers/pinctrl/mvebu/pinctrl-ac5.c new file mode 100644 index 0000000000..292633e611 --- /dev/null +++ b/drivers/pinctrl/mvebu/pinctrl-ac5.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Marvell ac5 pinctrl driver based on mvebu pinctrl core + * + * Copyright (C) 2021 Marvell + * + * Noam Liron + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "pinctrl-mvebu.h" + +static struct mvebu_mpp_mode ac5_mpp_modes[] = { + MPP_MODE(0, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d0"), + MPP_FUNCTION(2, "nand", "io4")), + MPP_MODE(1, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d1"), + MPP_FUNCTION(2, "nand", "io3")), + MPP_MODE(2, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d2"), + MPP_FUNCTION(2, "nand", "io2")), + MPP_MODE(3, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d3"), + MPP_FUNCTION(2, "nand", "io7")), + MPP_MODE(4, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d4"), + MPP_FUNCTION(2, "nand", "io6"), + MPP_FUNCTION(3, "uart3", "txd"), + MPP_FUNCTION(4, "uart2", "txd")), + MPP_MODE(5, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d5"), + MPP_FUNCTION(2, "nand", "io5"), + MPP_FUNCTION(3, "uart3", "rxd"), + MPP_FUNCTION(4, "uart2", "rxd")), + MPP_MODE(6, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d6"), + MPP_FUNCTION(2, "nand", "io0"), + MPP_FUNCTION(3, "i2c1", "sck")), + MPP_MODE(7, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d7"), + MPP_FUNCTION(2, "nand", "io1"), + MPP_FUNCTION(3, "i2c1", "sda")), + MPP_MODE(8, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "clk"), + MPP_FUNCTION(2, "nand", "wen")), + MPP_MODE(9, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "cmd"), + MPP_FUNCTION(2, "nand", "ale")), + MPP_MODE(10, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "ds"), + MPP_FUNCTION(2, "nand", "cle")), + MPP_MODE(11, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "rst"), + MPP_FUNCTION(2, "nand", "cen")), + MPP_MODE(12, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "spi0", "clk")), + MPP_MODE(13, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "spi0", "csn")), + MPP_MODE(14, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "spi0", "mosi")), + MPP_MODE(15, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "spi0", "miso")), + MPP_MODE(16, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "spi0", "wpn"), + MPP_FUNCTION(2, "nand", "ren"), + MPP_FUNCTION(3, "uart1", "txd")), + MPP_MODE(17, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "spi0", "hold"), + MPP_FUNCTION(2, "nand", "rb"), + MPP_FUNCTION(3, "uart1", "rxd")), + MPP_MODE(18, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "tsen_int", NULL), + MPP_FUNCTION(2, "uart2", "rxd"), + MPP_FUNCTION(3, "wd_int", NULL)), + MPP_MODE(19, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "dev_init_done", NULL), + MPP_FUNCTION(2, "uart2", "txd")), + MPP_MODE(20, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(2, "i2c1", "sck"), + MPP_FUNCTION(3, "spi1", "clk"), + MPP_FUNCTION(4, "uart3", "txd")), + MPP_MODE(21, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(2, "i2c1", "sda"), + MPP_FUNCTION(3, "spi1", "csn"), + MPP_FUNCTION(4, "uart3", "rxd")), + MPP_MODE(22, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(3, "spi1", "mosi")), + MPP_MODE(23, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(3, "spi1", "miso")), + MPP_MODE(24, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "wd_int", NULL), + MPP_FUNCTION(2, "uart2", "txd"), + MPP_FUNCTION(3, "uartsd", "txd")), + MPP_MODE(25, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "int_out", NULL), + MPP_FUNCTION(2, "uart2", "rxd"), + MPP_FUNCTION(3, "uartsd", "rxd")), + MPP_MODE(26, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "i2c0", "sck"), + MPP_FUNCTION(2, "ptp", "clk1"), + MPP_FUNCTION(3, "uart3", "txd")), + MPP_MODE(27, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "i2c0", "sda"), + MPP_FUNCTION(2, "ptp", "pulse"), + MPP_FUNCTION(3, "uart3", "rxd")), + MPP_MODE(28, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "xg", "mdio"), + MPP_FUNCTION(2, "ge", "mdio"), + MPP_FUNCTION(3, "uart3", "txd")), + MPP_MODE(29, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "xg", "mdio"), + MPP_FUNCTION(2, "ge", "mdio"), + MPP_FUNCTION(3, "uart3", "rxd")), + MPP_MODE(30, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "xg", "mdio"), + MPP_FUNCTION(2, "ge", "mdio"), + MPP_FUNCTION(3, "ge", "mdio")), + MPP_MODE(31, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "xg", "mdio"), + MPP_FUNCTION(2, "ge", "mdio"), + MPP_FUNCTION(3, "ge", "mdio")), + MPP_MODE(32, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "uart0", "txd")), + MPP_MODE(33, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "uart0", "rxd"), + MPP_FUNCTION(2, "ptp", "clk1"), + MPP_FUNCTION(3, "ptp", "pulse")), + MPP_MODE(34, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "ge", "mdio"), + MPP_FUNCTION(2, "uart3", "rxd")), + MPP_MODE(35, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "ge", "mdio"), + MPP_FUNCTION(2, "uart3", "txd"), + MPP_FUNCTION(3, "pcie", "rstoutn")), + MPP_MODE(36, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "ptp", "clk0_tp"), + MPP_FUNCTION(2, "ptp", "clk1_tp")), + MPP_MODE(37, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "ptp", "pulse_tp"), + MPP_FUNCTION(2, "wd_int", NULL)), + MPP_MODE(38, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "synce", "clk_out0")), + MPP_MODE(39, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "synce", "clk_out1")), + MPP_MODE(40, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "ptp", "pclk_out0"), + MPP_FUNCTION(2, "ptp", "pclk_out1")), + MPP_MODE(41, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "ptp", "ref_clk"), + MPP_FUNCTION(2, "ptp", "clk1"), + MPP_FUNCTION(3, "ptp", "pulse"), + MPP_FUNCTION(4, "uart2", "txd"), + MPP_FUNCTION(5, "i2c1", "sck")), + MPP_MODE(42, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "ptp", "clk0"), + MPP_FUNCTION(2, "ptp", "clk1"), + MPP_FUNCTION(3, "ptp", "pulse"), + MPP_FUNCTION(4, "uart2", "rxd"), + MPP_FUNCTION(5, "i2c1", "sda")), + MPP_MODE(43, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "led", "clk")), + MPP_MODE(44, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "led", "stb")), + MPP_MODE(45, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "led", "data")), +}; + +static struct mvebu_pinctrl_soc_info ac5_pinctrl_info; + +static const struct of_device_id ac5_pinctrl_of_match[] = { + { + .compatible = "marvell,ac5-pinctrl", + }, + { }, +}; + +static const struct mvebu_mpp_ctrl ac5_mpp_controls[] = { + MPP_FUNC_CTRL(0, 45, NULL, mvebu_mmio_mpp_ctrl), }; + +static struct pinctrl_gpio_range ac5_mpp_gpio_ranges[] = { + MPP_GPIO_RANGE(0, 0, 0, 46), }; + +static int ac5_pinctrl_probe(struct platform_device *pdev) +{ + struct mvebu_pinctrl_soc_info *soc = &ac5_pinctrl_info; + + soc->variant = 0; /* no variants for ac5 */ + soc->controls = ac5_mpp_controls; + soc->ncontrols = ARRAY_SIZE(ac5_mpp_controls); + soc->gpioranges = ac5_mpp_gpio_ranges; + soc->ngpioranges = ARRAY_SIZE(ac5_mpp_gpio_ranges); + soc->modes = ac5_mpp_modes; + soc->nmodes = ac5_mpp_controls[0].npins; + + pdev->dev.platform_data = soc; + + return mvebu_pinctrl_simple_mmio_probe(pdev); +} + +static struct platform_driver ac5_pinctrl_driver = { + .driver = { + .name = "ac5-pinctrl", + .of_match_table = of_match_ptr(ac5_pinctrl_of_match), + }, + .probe = ac5_pinctrl_probe, +}; +builtin_platform_driver(ac5_pinctrl_driver); diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h new file mode 100644 index 0000000000..afbac2a6c8 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2020 Linaro Ltd. + */ +#ifndef __PINCTRL_LPASS_LPI_H__ +#define __PINCTRL_LPASS_LPI_H__ + +#include +#include +#include "../core.h" + +#define LPI_SLEW_RATE_CTL_REG 0xa000 +#define LPI_TLMM_REG_OFFSET 0x1000 +#define LPI_SLEW_RATE_MAX 0x03 +#define LPI_SLEW_BITS_SIZE 0x02 +#define LPI_SLEW_RATE_MASK GENMASK(1, 0) +#define LPI_GPIO_CFG_REG 0x00 +#define LPI_GPIO_PULL_MASK GENMASK(1, 0) +#define LPI_GPIO_FUNCTION_MASK GENMASK(5, 2) +#define LPI_GPIO_OUT_STRENGTH_MASK GENMASK(8, 6) +#define LPI_GPIO_OE_MASK BIT(9) +#define LPI_GPIO_VALUE_REG 0x04 +#define LPI_GPIO_VALUE_IN_MASK BIT(0) +#define LPI_GPIO_VALUE_OUT_MASK BIT(1) + +#define LPI_GPIO_BIAS_DISABLE 0x0 +#define LPI_GPIO_PULL_DOWN 0x1 +#define LPI_GPIO_KEEPER 0x2 +#define LPI_GPIO_PULL_UP 0x3 +#define LPI_GPIO_DS_TO_VAL(v) (v / 2 - 1) +#define LPI_NO_SLEW -1 + +#define LPI_FUNCTION(fname) \ + [LPI_MUX_##fname] = { \ + .name = #fname, \ + .groups = fname##_groups, \ + .ngroups = ARRAY_SIZE(fname##_groups), \ + } + +#define LPI_PINGROUP(id, soff, f1, f2, f3, f4) \ + { \ + .group.name = "gpio" #id, \ + .group.pins = gpio##id##_pins, \ + .pin = id, \ + .slew_offset = soff, \ + .group.num_pins = ARRAY_SIZE(gpio##id##_pins), \ + .funcs = (int[]){ \ + LPI_MUX_gpio, \ + LPI_MUX_##f1, \ + LPI_MUX_##f2, \ + LPI_MUX_##f3, \ + LPI_MUX_##f4, \ + }, \ + .nfuncs = 5, \ + } + +struct lpi_pingroup { + struct group_desc group; + unsigned int pin; + /* Bit offset in slew register for SoundWire pins only */ + int slew_offset; + unsigned int *funcs; + unsigned int nfuncs; +}; + +struct lpi_function { + const char *name; + const char * const *groups; + unsigned int ngroups; +}; + +struct lpi_pinctrl_variant_data { + const struct pinctrl_pin_desc *pins; + int npins; + const struct lpi_pingroup *groups; + int ngroups; + const struct lpi_function *functions; + int nfunctions; +}; + +int lpi_pinctrl_probe(struct platform_device *pdev); +int lpi_pinctrl_remove(struct platform_device *pdev); + +#endif /*__PINCTRL_LPASS_LPI_H__*/ diff --git a/drivers/pinctrl/qcom/pinctrl-msm8909.c b/drivers/pinctrl/qcom/pinctrl-msm8909.c new file mode 100644 index 0000000000..6dd15b9106 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-msm8909.c @@ -0,0 +1,956 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2022, Kernkonzept GmbH. + */ + +#include +#include +#include +#include + +#include "pinctrl-msm.h" + +#define FUNCTION(fname) \ + [msm_mux_##fname] = { \ + .name = #fname, \ + .groups = fname##_groups, \ + .ngroups = ARRAY_SIZE(fname##_groups), \ + } + +#define REG_SIZE 0x1000 +#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \ + { \ + .name = "gpio" #id, \ + .pins = gpio##id##_pins, \ + .npins = ARRAY_SIZE(gpio##id##_pins), \ + .funcs = (int[]){ \ + msm_mux_gpio, \ + msm_mux_##f1, \ + msm_mux_##f2, \ + msm_mux_##f3, \ + msm_mux_##f4, \ + msm_mux_##f5, \ + msm_mux_##f6, \ + msm_mux_##f7, \ + msm_mux_##f8, \ + msm_mux_##f9, \ + }, \ + .nfuncs = 10, \ + .ctl_reg = REG_SIZE * id, \ + .io_reg = 0x4 + REG_SIZE * id, \ + .intr_cfg_reg = 0x8 + REG_SIZE * id, \ + .intr_status_reg = 0xc + REG_SIZE * id, \ + .intr_target_reg = 0x8 + REG_SIZE * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 4, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + } + +#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } +static const struct pinctrl_pin_desc msm8909_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "GPIO_108"), + PINCTRL_PIN(109, "GPIO_109"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "SDC1_CLK"), + PINCTRL_PIN(114, "SDC1_CMD"), + PINCTRL_PIN(115, "SDC1_DATA"), + PINCTRL_PIN(116, "SDC2_CLK"), + PINCTRL_PIN(117, "SDC2_CMD"), + PINCTRL_PIN(118, "SDC2_DATA"), + PINCTRL_PIN(119, "QDSD_CLK"), + PINCTRL_PIN(120, "QDSD_CMD"), + PINCTRL_PIN(121, "QDSD_DATA0"), + PINCTRL_PIN(122, "QDSD_DATA1"), + PINCTRL_PIN(123, "QDSD_DATA2"), + PINCTRL_PIN(124, "QDSD_DATA3"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ + static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); +DECLARE_MSM_GPIO_PINS(88); +DECLARE_MSM_GPIO_PINS(89); +DECLARE_MSM_GPIO_PINS(90); +DECLARE_MSM_GPIO_PINS(91); +DECLARE_MSM_GPIO_PINS(92); +DECLARE_MSM_GPIO_PINS(93); +DECLARE_MSM_GPIO_PINS(94); +DECLARE_MSM_GPIO_PINS(95); +DECLARE_MSM_GPIO_PINS(96); +DECLARE_MSM_GPIO_PINS(97); +DECLARE_MSM_GPIO_PINS(98); +DECLARE_MSM_GPIO_PINS(99); +DECLARE_MSM_GPIO_PINS(100); +DECLARE_MSM_GPIO_PINS(101); +DECLARE_MSM_GPIO_PINS(102); +DECLARE_MSM_GPIO_PINS(103); +DECLARE_MSM_GPIO_PINS(104); +DECLARE_MSM_GPIO_PINS(105); +DECLARE_MSM_GPIO_PINS(106); +DECLARE_MSM_GPIO_PINS(107); +DECLARE_MSM_GPIO_PINS(108); +DECLARE_MSM_GPIO_PINS(109); +DECLARE_MSM_GPIO_PINS(110); +DECLARE_MSM_GPIO_PINS(111); +DECLARE_MSM_GPIO_PINS(112); + +static const unsigned int sdc1_clk_pins[] = { 113 }; +static const unsigned int sdc1_cmd_pins[] = { 114 }; +static const unsigned int sdc1_data_pins[] = { 115 }; +static const unsigned int sdc2_clk_pins[] = { 116 }; +static const unsigned int sdc2_cmd_pins[] = { 117 }; +static const unsigned int sdc2_data_pins[] = { 118 }; +static const unsigned int qdsd_clk_pins[] = { 119 }; +static const unsigned int qdsd_cmd_pins[] = { 120 }; +static const unsigned int qdsd_data0_pins[] = { 121 }; +static const unsigned int qdsd_data1_pins[] = { 122 }; +static const unsigned int qdsd_data2_pins[] = { 123 }; +static const unsigned int qdsd_data3_pins[] = { 124 }; + +enum msm8909_functions { + msm_mux_gpio, + msm_mux_adsp_ext, + msm_mux_atest_bbrx0, + msm_mux_atest_bbrx1, + msm_mux_atest_char, + msm_mux_atest_char0, + msm_mux_atest_char1, + msm_mux_atest_char2, + msm_mux_atest_char3, + msm_mux_atest_combodac, + msm_mux_atest_gpsadc0, + msm_mux_atest_gpsadc1, + msm_mux_atest_wlan0, + msm_mux_atest_wlan1, + msm_mux_bimc_dte0, + msm_mux_bimc_dte1, + msm_mux_blsp_i2c1, + msm_mux_blsp_i2c2, + msm_mux_blsp_i2c3, + msm_mux_blsp_i2c4, + msm_mux_blsp_i2c5, + msm_mux_blsp_i2c6, + msm_mux_blsp_spi1, + msm_mux_blsp_spi1_cs1, + msm_mux_blsp_spi1_cs2, + msm_mux_blsp_spi1_cs3, + msm_mux_blsp_spi2, + msm_mux_blsp_spi2_cs1, + msm_mux_blsp_spi2_cs2, + msm_mux_blsp_spi2_cs3, + msm_mux_blsp_spi3, + msm_mux_blsp_spi3_cs1, + msm_mux_blsp_spi3_cs2, + msm_mux_blsp_spi3_cs3, + msm_mux_blsp_spi4, + msm_mux_blsp_spi5, + msm_mux_blsp_spi6, + msm_mux_blsp_uart1, + msm_mux_blsp_uart2, + msm_mux_blsp_uim1, + msm_mux_blsp_uim2, + msm_mux_cam_mclk, + msm_mux_cci_async, + msm_mux_cci_timer0, + msm_mux_cci_timer1, + msm_mux_cci_timer2, + msm_mux_cdc_pdm0, + msm_mux_dbg_out, + msm_mux_dmic0_clk, + msm_mux_dmic0_data, + msm_mux_ebi0_wrcdc, + msm_mux_ebi2_a, + msm_mux_ebi2_lcd, + msm_mux_ext_lpass, + msm_mux_gcc_gp1_clk_a, + msm_mux_gcc_gp1_clk_b, + msm_mux_gcc_gp2_clk_a, + msm_mux_gcc_gp2_clk_b, + msm_mux_gcc_gp3_clk_a, + msm_mux_gcc_gp3_clk_b, + msm_mux_gcc_plltest, + msm_mux_gsm0_tx, + msm_mux_ldo_en, + msm_mux_ldo_update, + msm_mux_m_voc, + msm_mux_mdp_vsync, + msm_mux_modem_tsync, + msm_mux_nav_pps, + msm_mux_nav_tsync, + msm_mux_pa_indicator, + msm_mux_pbs0, + msm_mux_pbs1, + msm_mux_pbs2, + msm_mux_pri_mi2s_data0_a, + msm_mux_pri_mi2s_data0_b, + msm_mux_pri_mi2s_data1_a, + msm_mux_pri_mi2s_data1_b, + msm_mux_pri_mi2s_mclk_a, + msm_mux_pri_mi2s_mclk_b, + msm_mux_pri_mi2s_sck_a, + msm_mux_pri_mi2s_sck_b, + msm_mux_pri_mi2s_ws_a, + msm_mux_pri_mi2s_ws_b, + msm_mux_prng_rosc, + msm_mux_pwr_crypto_enabled_a, + msm_mux_pwr_crypto_enabled_b, + msm_mux_pwr_modem_enabled_a, + msm_mux_pwr_modem_enabled_b, + msm_mux_pwr_nav_enabled_a, + msm_mux_pwr_nav_enabled_b, + msm_mux_qdss_cti_trig_in_a0, + msm_mux_qdss_cti_trig_in_a1, + msm_mux_qdss_cti_trig_in_b0, + msm_mux_qdss_cti_trig_in_b1, + msm_mux_qdss_cti_trig_out_a0, + msm_mux_qdss_cti_trig_out_a1, + msm_mux_qdss_cti_trig_out_b0, + msm_mux_qdss_cti_trig_out_b1, + msm_mux_qdss_traceclk_a, + msm_mux_qdss_tracectl_a, + msm_mux_qdss_tracedata_a, + msm_mux_qdss_tracedata_b, + msm_mux_sd_write, + msm_mux_sec_mi2s, + msm_mux_smb_int, + msm_mux_ssbi0, + msm_mux_ssbi1, + msm_mux_uim1_clk, + msm_mux_uim1_data, + msm_mux_uim1_present, + msm_mux_uim1_reset, + msm_mux_uim2_clk, + msm_mux_uim2_data, + msm_mux_uim2_present, + msm_mux_uim2_reset, + msm_mux_uim3_clk, + msm_mux_uim3_data, + msm_mux_uim3_present, + msm_mux_uim3_reset, + msm_mux_uim_batt, + msm_mux_wcss_bt, + msm_mux_wcss_fm, + msm_mux_wcss_wlan, + msm_mux__, +}; + +static const char * const adsp_ext_groups[] = { "gpio38" }; +static const char * const atest_bbrx0_groups[] = { "gpio37" }; +static const char * const atest_bbrx1_groups[] = { "gpio36" }; +static const char * const atest_char0_groups[] = { "gpio62" }; +static const char * const atest_char1_groups[] = { "gpio61" }; +static const char * const atest_char2_groups[] = { "gpio60" }; +static const char * const atest_char3_groups[] = { "gpio59" }; +static const char * const atest_char_groups[] = { "gpio63" }; +static const char * const atest_combodac_groups[] = { + "gpio32", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43", + "gpio44", "gpio45", "gpio47", "gpio48", "gpio66", "gpio81", "gpio83", + "gpio84", "gpio85", "gpio86", "gpio94", "gpio95", "gpio110" +}; +static const char * const atest_gpsadc0_groups[] = { "gpio65" }; +static const char * const atest_gpsadc1_groups[] = { "gpio79" }; +static const char * const atest_wlan0_groups[] = { "gpio96" }; +static const char * const atest_wlan1_groups[] = { "gpio97" }; +static const char * const bimc_dte0_groups[] = { "gpio6", "gpio59" }; +static const char * const bimc_dte1_groups[] = { "gpio7", "gpio60" }; +static const char * const blsp_i2c1_groups[] = { "gpio6", "gpio7" }; +static const char * const blsp_i2c2_groups[] = { "gpio111", "gpio112" }; +static const char * const blsp_i2c3_groups[] = { "gpio29", "gpio30" }; +static const char * const blsp_i2c4_groups[] = { "gpio14", "gpio15" }; +static const char * const blsp_i2c5_groups[] = { "gpio18", "gpio19" }; +static const char * const blsp_i2c6_groups[] = { "gpio10", "gpio11" }; +static const char * const blsp_spi1_cs1_groups[] = { "gpio97" }; +static const char * const blsp_spi1_cs2_groups[] = { "gpio37" }; +static const char * const blsp_spi1_cs3_groups[] = { "gpio65" }; +static const char * const blsp_spi1_groups[] = { + "gpio4", "gpio5", "gpio6", "gpio7" +}; +static const char * const blsp_spi2_cs1_groups[] = { "gpio98" }; +static const char * const blsp_spi2_cs2_groups[] = { "gpio17" }; +static const char * const blsp_spi2_cs3_groups[] = { "gpio5" }; +static const char * const blsp_spi2_groups[] = { + "gpio20", "gpio21", "gpio111", "gpio112" +}; +static const char * const blsp_spi3_cs1_groups[] = { "gpio95" }; +static const char * const blsp_spi3_cs2_groups[] = { "gpio65" }; +static const char * const blsp_spi3_cs3_groups[] = { "gpio4" }; +static const char * const blsp_spi3_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3" +}; +static const char * const blsp_spi4_groups[] = { + "gpio12", "gpio13", "gpio14", "gpio15" +}; +static const char * const blsp_spi5_groups[] = { + "gpio16", "gpio17", "gpio18", "gpio19" +}; +static const char * const blsp_spi6_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11" +}; +static const char * const blsp_uart1_groups[] = { + "gpio4", "gpio5", "gpio6", "gpio7" +}; +static const char * const blsp_uart2_groups[] = { + "gpio20", "gpio21", "gpio111", "gpio112" +}; +static const char * const blsp_uim1_groups[] = { "gpio4", "gpio5" }; +static const char * const blsp_uim2_groups[] = { "gpio20", "gpio21" }; +static const char * const cam_mclk_groups[] = { "gpio26", "gpio27" }; +static const char * const cci_async_groups[] = { "gpio33" }; +static const char * const cci_timer0_groups[] = { "gpio31" }; +static const char * const cci_timer1_groups[] = { "gpio32" }; +static const char * const cci_timer2_groups[] = { "gpio38" }; +static const char * const cdc_pdm0_groups[] = { + "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", "gpio64" +}; +static const char * const dbg_out_groups[] = { "gpio10" }; +static const char * const dmic0_clk_groups[] = { "gpio4" }; +static const char * const dmic0_data_groups[] = { "gpio5" }; +static const char * const ebi0_wrcdc_groups[] = { "gpio64" }; +static const char * const ebi2_a_groups[] = { "gpio99" }; +static const char * const ebi2_lcd_groups[] = { + "gpio24", "gpio24", "gpio25", "gpio95" +}; +static const char * const ext_lpass_groups[] = { "gpio45" }; +static const char * const gcc_gp1_clk_a_groups[] = { "gpio49" }; +static const char * const gcc_gp1_clk_b_groups[] = { "gpio14" }; +static const char * const gcc_gp2_clk_a_groups[] = { "gpio50" }; +static const char * const gcc_gp2_clk_b_groups[] = { "gpio12" }; +static const char * const gcc_gp3_clk_a_groups[] = { "gpio51" }; +static const char * const gcc_gp3_clk_b_groups[] = { "gpio13" }; +static const char * const gcc_plltest_groups[] = { "gpio66", "gpio67" }; +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", + "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", + "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", + "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", + "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", + "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", + "gpio111", "gpio112" +}; +static const char * const gsm0_tx_groups[] = { "gpio85" }; +static const char * const ldo_en_groups[] = { "gpio99" }; +static const char * const ldo_update_groups[] = { "gpio98" }; +static const char * const m_voc_groups[] = { "gpio8", "gpio95" }; +static const char * const mdp_vsync_groups[] = { "gpio24", "gpio25" }; +static const char * const modem_tsync_groups[] = { "gpio83" }; +static const char * const nav_pps_groups[] = { "gpio83" }; +static const char * const nav_tsync_groups[] = { "gpio83" }; +static const char * const pa_indicator_groups[] = { "gpio82" }; +static const char * const pbs0_groups[] = { "gpio90" }; +static const char * const pbs1_groups[] = { "gpio91" }; +static const char * const pbs2_groups[] = { "gpio92" }; +static const char * const pri_mi2s_data0_a_groups[] = { "gpio62" }; +static const char * const pri_mi2s_data0_b_groups[] = { "gpio95" }; +static const char * const pri_mi2s_data1_a_groups[] = { "gpio63" }; +static const char * const pri_mi2s_data1_b_groups[] = { "gpio96" }; +static const char * const pri_mi2s_mclk_a_groups[] = { "gpio59" }; +static const char * const pri_mi2s_mclk_b_groups[] = { "gpio98" }; +static const char * const pri_mi2s_sck_a_groups[] = { "gpio60" }; +static const char * const pri_mi2s_sck_b_groups[] = { "gpio94" }; +static const char * const pri_mi2s_ws_a_groups[] = { "gpio61" }; +static const char * const pri_mi2s_ws_b_groups[] = { "gpio110" }; +static const char * const prng_rosc_groups[] = { "gpio43" }; +static const char * const pwr_crypto_enabled_a_groups[] = { "gpio35" }; +static const char * const pwr_crypto_enabled_b_groups[] = { "gpio96" }; +static const char * const pwr_modem_enabled_a_groups[] = { "gpio28" }; +static const char * const pwr_modem_enabled_b_groups[] = { "gpio94" }; +static const char * const pwr_nav_enabled_a_groups[] = { "gpio34" }; +static const char * const pwr_nav_enabled_b_groups[] = { "gpio95" }; +static const char * const qdss_cti_trig_in_a0_groups[] = { "gpio20" }; +static const char * const qdss_cti_trig_in_a1_groups[] = { "gpio49" }; +static const char * const qdss_cti_trig_in_b0_groups[] = { "gpio21" }; +static const char * const qdss_cti_trig_in_b1_groups[] = { "gpio50" }; +static const char * const qdss_cti_trig_out_a0_groups[] = { "gpio23" }; +static const char * const qdss_cti_trig_out_a1_groups[] = { "gpio52" }; +static const char * const qdss_cti_trig_out_b0_groups[] = { "gpio22" }; +static const char * const qdss_cti_trig_out_b1_groups[] = { "gpio51" }; +static const char * const qdss_traceclk_a_groups[] = { "gpio46" }; +static const char * const qdss_tracectl_a_groups[] = { "gpio45" }; +static const char * const qdss_tracedata_a_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio47", "gpio48", "gpio58", "gpio65", "gpio94", "gpio96", + "gpio97" +}; +static const char * const qdss_tracedata_b_groups[] = { + "gpio14", "gpio16", "gpio17", "gpio29", "gpio30", "gpio31", "gpio32", + "gpio33", "gpio34", "gpio35", "gpio36", "gpio37", "gpio93" +}; +static const char * const sd_write_groups[] = { "gpio99" }; +static const char * const sec_mi2s_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio98" +}; +static const char * const smb_int_groups[] = { "gpio58" }; +static const char * const ssbi0_groups[] = { "gpio88" }; +static const char * const ssbi1_groups[] = { "gpio89" }; +static const char * const uim1_clk_groups[] = { "gpio54" }; +static const char * const uim1_data_groups[] = { "gpio53" }; +static const char * const uim1_present_groups[] = { "gpio56" }; +static const char * const uim1_reset_groups[] = { "gpio55" }; +static const char * const uim2_clk_groups[] = { "gpio50" }; +static const char * const uim2_data_groups[] = { "gpio49" }; +static const char * const uim2_present_groups[] = { "gpio52" }; +static const char * const uim2_reset_groups[] = { "gpio51" }; +static const char * const uim3_clk_groups[] = { "gpio23" }; +static const char * const uim3_data_groups[] = { "gpio20" }; +static const char * const uim3_present_groups[] = { "gpio21" }; +static const char * const uim3_reset_groups[] = { "gpio22" }; +static const char * const uim_batt_groups[] = { "gpio57" }; +static const char * const wcss_bt_groups[] = { "gpio39", "gpio47", "gpio48" }; +static const char * const wcss_fm_groups[] = { "gpio45", "gpio46" }; +static const char * const wcss_wlan_groups[] = { + "gpio40", "gpio41", "gpio42", "gpio43", "gpio44" +}; + +static const struct msm_function msm8909_functions[] = { + FUNCTION(adsp_ext), + FUNCTION(atest_bbrx0), + FUNCTION(atest_bbrx1), + FUNCTION(atest_char), + FUNCTION(atest_char0), + FUNCTION(atest_char1), + FUNCTION(atest_char2), + FUNCTION(atest_char3), + FUNCTION(atest_combodac), + FUNCTION(atest_gpsadc0), + FUNCTION(atest_gpsadc1), + FUNCTION(atest_wlan0), + FUNCTION(atest_wlan1), + FUNCTION(bimc_dte0), + FUNCTION(bimc_dte1), + FUNCTION(blsp_i2c1), + FUNCTION(blsp_i2c2), + FUNCTION(blsp_i2c3), + FUNCTION(blsp_i2c4), + FUNCTION(blsp_i2c5), + FUNCTION(blsp_i2c6), + FUNCTION(blsp_spi1), + FUNCTION(blsp_spi1_cs1), + FUNCTION(blsp_spi1_cs2), + FUNCTION(blsp_spi1_cs3), + FUNCTION(blsp_spi2), + FUNCTION(blsp_spi2_cs1), + FUNCTION(blsp_spi2_cs2), + FUNCTION(blsp_spi2_cs3), + FUNCTION(blsp_spi3), + FUNCTION(blsp_spi3_cs1), + FUNCTION(blsp_spi3_cs2), + FUNCTION(blsp_spi3_cs3), + FUNCTION(blsp_spi4), + FUNCTION(blsp_spi5), + FUNCTION(blsp_spi6), + FUNCTION(blsp_uart1), + FUNCTION(blsp_uart2), + FUNCTION(blsp_uim1), + FUNCTION(blsp_uim2), + FUNCTION(cam_mclk), + FUNCTION(cci_async), + FUNCTION(cci_timer0), + FUNCTION(cci_timer1), + FUNCTION(cci_timer2), + FUNCTION(cdc_pdm0), + FUNCTION(dbg_out), + FUNCTION(dmic0_clk), + FUNCTION(dmic0_data), + FUNCTION(ebi0_wrcdc), + FUNCTION(ebi2_a), + FUNCTION(ebi2_lcd), + FUNCTION(ext_lpass), + FUNCTION(gcc_gp1_clk_a), + FUNCTION(gcc_gp1_clk_b), + FUNCTION(gcc_gp2_clk_a), + FUNCTION(gcc_gp2_clk_b), + FUNCTION(gcc_gp3_clk_a), + FUNCTION(gcc_gp3_clk_b), + FUNCTION(gcc_plltest), + FUNCTION(gpio), + FUNCTION(gsm0_tx), + FUNCTION(ldo_en), + FUNCTION(ldo_update), + FUNCTION(m_voc), + FUNCTION(mdp_vsync), + FUNCTION(modem_tsync), + FUNCTION(nav_pps), + FUNCTION(nav_tsync), + FUNCTION(pa_indicator), + FUNCTION(pbs0), + FUNCTION(pbs1), + FUNCTION(pbs2), + FUNCTION(pri_mi2s_data0_a), + FUNCTION(pri_mi2s_data0_b), + FUNCTION(pri_mi2s_data1_a), + FUNCTION(pri_mi2s_data1_b), + FUNCTION(pri_mi2s_mclk_a), + FUNCTION(pri_mi2s_mclk_b), + FUNCTION(pri_mi2s_sck_a), + FUNCTION(pri_mi2s_sck_b), + FUNCTION(pri_mi2s_ws_a), + FUNCTION(pri_mi2s_ws_b), + FUNCTION(prng_rosc), + FUNCTION(pwr_crypto_enabled_a), + FUNCTION(pwr_crypto_enabled_b), + FUNCTION(pwr_modem_enabled_a), + FUNCTION(pwr_modem_enabled_b), + FUNCTION(pwr_nav_enabled_a), + FUNCTION(pwr_nav_enabled_b), + FUNCTION(qdss_cti_trig_in_a0), + FUNCTION(qdss_cti_trig_in_a1), + FUNCTION(qdss_cti_trig_in_b0), + FUNCTION(qdss_cti_trig_in_b1), + FUNCTION(qdss_cti_trig_out_a0), + FUNCTION(qdss_cti_trig_out_a1), + FUNCTION(qdss_cti_trig_out_b0), + FUNCTION(qdss_cti_trig_out_b1), + FUNCTION(qdss_traceclk_a), + FUNCTION(qdss_tracectl_a), + FUNCTION(qdss_tracedata_a), + FUNCTION(qdss_tracedata_b), + FUNCTION(sd_write), + FUNCTION(sec_mi2s), + FUNCTION(smb_int), + FUNCTION(ssbi0), + FUNCTION(ssbi1), + FUNCTION(uim1_clk), + FUNCTION(uim1_data), + FUNCTION(uim1_present), + FUNCTION(uim1_reset), + FUNCTION(uim2_clk), + FUNCTION(uim2_data), + FUNCTION(uim2_present), + FUNCTION(uim2_reset), + FUNCTION(uim3_clk), + FUNCTION(uim3_data), + FUNCTION(uim3_present), + FUNCTION(uim3_reset), + FUNCTION(uim_batt), + FUNCTION(wcss_bt), + FUNCTION(wcss_fm), + FUNCTION(wcss_wlan), +}; + +static const struct msm_pingroup msm8909_groups[] = { + PINGROUP(0, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _), + PINGROUP(1, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _), + PINGROUP(2, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _), + PINGROUP(3, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _), + PINGROUP(4, blsp_spi1, blsp_uart1, blsp_uim1, blsp_spi3_cs3, dmic0_clk, _, _, _, _), + PINGROUP(5, blsp_spi1, blsp_uart1, blsp_uim1, blsp_spi2_cs3, dmic0_data, _, _, _, _), + PINGROUP(6, blsp_spi1, blsp_uart1, blsp_i2c1, _, _, _, _, _, bimc_dte0), + PINGROUP(7, blsp_spi1, blsp_uart1, blsp_i2c1, _, _, _, _, _, bimc_dte1), + PINGROUP(8, blsp_spi6, m_voc, _, _, _, _, _, qdss_tracedata_a, _), + PINGROUP(9, blsp_spi6, _, _, _, _, _, qdss_tracedata_a, _, _), + PINGROUP(10, blsp_spi6, blsp_i2c6, dbg_out, qdss_tracedata_a, _, _, _, _, _), + PINGROUP(11, blsp_spi6, blsp_i2c6, _, _, _, _, _, _, _), + PINGROUP(12, blsp_spi4, gcc_gp2_clk_b, _, _, _, _, _, _, _), + PINGROUP(13, blsp_spi4, gcc_gp3_clk_b, _, _, _, _, _, _, _), + PINGROUP(14, blsp_spi4, blsp_i2c4, gcc_gp1_clk_b, _, _, _, _, _, qdss_tracedata_b), + PINGROUP(15, blsp_spi4, blsp_i2c4, _, _, _, _, _, _, _), + PINGROUP(16, blsp_spi5, _, _, _, _, _, qdss_tracedata_b, _, _), + PINGROUP(17, blsp_spi5, blsp_spi2_cs2, _, _, _, _, _, qdss_tracedata_b, _), + PINGROUP(18, blsp_spi5, blsp_i2c5, _, _, _, _, _, _, _), + PINGROUP(19, blsp_spi5, blsp_i2c5, _, _, _, _, _, _, _), + PINGROUP(20, uim3_data, blsp_spi2, blsp_uart2, blsp_uim2, _, qdss_cti_trig_in_a0, _, _, _), + PINGROUP(21, uim3_present, blsp_spi2, blsp_uart2, blsp_uim2, _, qdss_cti_trig_in_b0, _, _, _), + PINGROUP(22, uim3_reset, _, qdss_cti_trig_out_b0, _, _, _, _, _, _), + PINGROUP(23, uim3_clk, qdss_cti_trig_out_a0, _, _, _, _, _, _, _), + PINGROUP(24, mdp_vsync, ebi2_lcd, ebi2_lcd, _, _, _, _, _, _), + PINGROUP(25, mdp_vsync, ebi2_lcd, _, _, _, _, _, _, _), + PINGROUP(26, cam_mclk, _, _, _, _, _, _, _, _), + PINGROUP(27, cam_mclk, _, _, _, _, _, _, _, _), + PINGROUP(28, _, pwr_modem_enabled_a, _, _, _, _, _, _, _), + PINGROUP(29, blsp_i2c3, _, _, _, _, _, qdss_tracedata_b, _, _), + PINGROUP(30, blsp_i2c3, _, _, _, _, _, qdss_tracedata_b, _, _), + PINGROUP(31, cci_timer0, _, _, _, _, _, _, qdss_tracedata_b, _), + PINGROUP(32, cci_timer1, _, qdss_tracedata_b, _, atest_combodac, _, _, _, _), + PINGROUP(33, cci_async, qdss_tracedata_b, _, _, _, _, _, _, _), + PINGROUP(34, pwr_nav_enabled_a, qdss_tracedata_b, _, _, _, _, _, _, _), + PINGROUP(35, pwr_crypto_enabled_a, qdss_tracedata_b, _, _, _, _, _, _, _), + PINGROUP(36, qdss_tracedata_b, _, atest_bbrx1, _, _, _, _, _, _), + PINGROUP(37, blsp_spi1_cs2, qdss_tracedata_b, _, atest_bbrx0, _, _, _, _, _), + PINGROUP(38, cci_timer2, adsp_ext, _, atest_combodac, _, _, _, _, _), + PINGROUP(39, wcss_bt, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _), + PINGROUP(40, wcss_wlan, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _), + PINGROUP(41, wcss_wlan, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _), + PINGROUP(42, wcss_wlan, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _), + PINGROUP(43, wcss_wlan, prng_rosc, qdss_tracedata_a, _, atest_combodac, _, _, _, _), + PINGROUP(44, wcss_wlan, _, atest_combodac, _, _, _, _, _, _), + PINGROUP(45, wcss_fm, ext_lpass, qdss_tracectl_a, _, atest_combodac, _, _, _, _), + PINGROUP(46, wcss_fm, qdss_traceclk_a, _, _, _, _, _, _, _), + PINGROUP(47, wcss_bt, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _), + PINGROUP(48, wcss_bt, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _), + PINGROUP(49, uim2_data, gcc_gp1_clk_a, qdss_cti_trig_in_a1, _, _, _, _, _, _), + PINGROUP(50, uim2_clk, gcc_gp2_clk_a, qdss_cti_trig_in_b1, _, _, _, _, _, _), + PINGROUP(51, uim2_reset, gcc_gp3_clk_a, qdss_cti_trig_out_b1, _, _, _, _, _, _), + PINGROUP(52, uim2_present, qdss_cti_trig_out_a1, _, _, _, _, _, _, _), + PINGROUP(53, uim1_data, _, _, _, _, _, _, _, _), + PINGROUP(54, uim1_clk, _, _, _, _, _, _, _, _), + PINGROUP(55, uim1_reset, _, _, _, _, _, _, _, _), + PINGROUP(56, uim1_present, _, _, _, _, _, _, _, _), + PINGROUP(57, uim_batt, _, _, _, _, _, _, _, _), + PINGROUP(58, qdss_tracedata_a, smb_int, _, _, _, _, _, _, _), + PINGROUP(59, cdc_pdm0, pri_mi2s_mclk_a, atest_char3, _, _, _, _, _, bimc_dte0), + PINGROUP(60, cdc_pdm0, pri_mi2s_sck_a, atest_char2, _, _, _, _, _, bimc_dte1), + PINGROUP(61, cdc_pdm0, pri_mi2s_ws_a, atest_char1, _, _, _, _, _, _), + PINGROUP(62, cdc_pdm0, pri_mi2s_data0_a, atest_char0, _, _, _, _, _, _), + PINGROUP(63, cdc_pdm0, pri_mi2s_data1_a, atest_char, _, _, _, _, _, _), + PINGROUP(64, cdc_pdm0, _, _, _, _, _, ebi0_wrcdc, _, _), + PINGROUP(65, blsp_spi3_cs2, blsp_spi1_cs3, qdss_tracedata_a, _, atest_gpsadc0, _, _, _, _), + PINGROUP(66, _, gcc_plltest, _, atest_combodac, _, _, _, _, _), + PINGROUP(67, _, gcc_plltest, _, _, _, _, _, _, _), + PINGROUP(68, _, _, _, _, _, _, _, _, _), + PINGROUP(69, _, _, _, _, _, _, _, _, _), + PINGROUP(70, _, _, _, _, _, _, _, _, _), + PINGROUP(71, _, _, _, _, _, _, _, _, _), + PINGROUP(72, _, _, _, _, _, _, _, _, _), + PINGROUP(73, _, _, _, _, _, _, _, _, _), + PINGROUP(74, _, _, _, _, _, _, _, _, _), + PINGROUP(75, _, _, _, _, _, _, _, _, _), + PINGROUP(76, _, _, _, _, _, _, _, _, _), + PINGROUP(77, _, _, _, _, _, _, _, _, _), + PINGROUP(78, _, _, _, _, _, _, _, _, _), + PINGROUP(79, _, _, atest_gpsadc1, _, _, _, _, _, _), + PINGROUP(80, _, _, _, _, _, _, _, _, _), + PINGROUP(81, _, _, _, atest_combodac, _, _, _, _, _), + PINGROUP(82, _, pa_indicator, _, _, _, _, _, _, _), + PINGROUP(83, _, modem_tsync, nav_tsync, nav_pps, _, atest_combodac, _, _, _), + PINGROUP(84, _, _, atest_combodac, _, _, _, _, _, _), + PINGROUP(85, gsm0_tx, _, _, atest_combodac, _, _, _, _, _), + PINGROUP(86, _, _, atest_combodac, _, _, _, _, _, _), + PINGROUP(87, _, _, _, _, _, _, _, _, _), + PINGROUP(88, _, ssbi0, _, _, _, _, _, _, _), + PINGROUP(89, _, ssbi1, _, _, _, _, _, _, _), + PINGROUP(90, pbs0, _, _, _, _, _, _, _, _), + PINGROUP(91, pbs1, _, _, _, _, _, _, _, _), + PINGROUP(92, pbs2, _, _, _, _, _, _, _, _), + PINGROUP(93, qdss_tracedata_b, _, _, _, _, _, _, _, _), + PINGROUP(94, pri_mi2s_sck_b, pwr_modem_enabled_b, qdss_tracedata_a, _, atest_combodac, _, _, _, _), + PINGROUP(95, blsp_spi3_cs1, pri_mi2s_data0_b, ebi2_lcd, m_voc, pwr_nav_enabled_b, _, atest_combodac, _, _), + PINGROUP(96, pri_mi2s_data1_b, _, pwr_crypto_enabled_b, qdss_tracedata_a, _, atest_wlan0, _, _, _), + PINGROUP(97, blsp_spi1_cs1, qdss_tracedata_a, _, atest_wlan1, _, _, _, _, _), + PINGROUP(98, sec_mi2s, pri_mi2s_mclk_b, blsp_spi2_cs1, ldo_update, _, _, _, _, _), + PINGROUP(99, ebi2_a, sd_write, ldo_en, _, _, _, _, _, _), + PINGROUP(100, _, _, _, _, _, _, _, _, _), + PINGROUP(101, _, _, _, _, _, _, _, _, _), + PINGROUP(102, _, _, _, _, _, _, _, _, _), + PINGROUP(103, _, _, _, _, _, _, _, _, _), + PINGROUP(104, _, _, _, _, _, _, _, _, _), + PINGROUP(105, _, _, _, _, _, _, _, _, _), + PINGROUP(106, _, _, _, _, _, _, _, _, _), + PINGROUP(107, _, _, _, _, _, _, _, _, _), + PINGROUP(108, _, _, _, _, _, _, _, _, _), + PINGROUP(109, _, _, _, _, _, _, _, _, _), + PINGROUP(110, pri_mi2s_ws_b, _, atest_combodac, _, _, _, _, _, _), + PINGROUP(111, blsp_spi2, blsp_uart2, blsp_i2c2, _, _, _, _, _, _), + PINGROUP(112, blsp_spi2, blsp_uart2, blsp_i2c2, _, _, _, _, _, _), + SDC_QDSD_PINGROUP(sdc1_clk, 0x10a000, 13, 6), + SDC_QDSD_PINGROUP(sdc1_cmd, 0x10a000, 11, 3), + SDC_QDSD_PINGROUP(sdc1_data, 0x10a000, 9, 0), + SDC_QDSD_PINGROUP(sdc2_clk, 0x109000, 14, 6), + SDC_QDSD_PINGROUP(sdc2_cmd, 0x109000, 11, 3), + SDC_QDSD_PINGROUP(sdc2_data, 0x109000, 9, 0), + SDC_QDSD_PINGROUP(qdsd_clk, 0x19c000, 3, 0), + SDC_QDSD_PINGROUP(qdsd_cmd, 0x19c000, 8, 5), + SDC_QDSD_PINGROUP(qdsd_data0, 0x19c000, 13, 10), + SDC_QDSD_PINGROUP(qdsd_data1, 0x19c000, 18, 15), + SDC_QDSD_PINGROUP(qdsd_data2, 0x19c000, 23, 20), + SDC_QDSD_PINGROUP(qdsd_data3, 0x19c000, 28, 25), +}; + +static const struct msm_gpio_wakeirq_map msm8909_mpm_map[] = { + { 65, 3 }, { 5, 4 }, { 11, 5 }, { 12, 6 }, { 64, 7 }, { 58, 8 }, + { 50, 9 }, { 13, 10 }, { 49, 11 }, { 20, 12 }, { 21, 13 }, { 25, 14 }, + { 46, 15 }, { 45, 16 }, { 28, 17 }, { 44, 18 }, { 31, 19 }, { 43, 20 }, + { 42, 21 }, { 34, 22 }, { 35, 23 }, { 36, 24 }, { 37, 25 }, { 38, 26 }, + { 39, 27 }, { 40, 28 }, { 41, 29 }, { 90, 30 }, { 91, 32 }, { 92, 33 }, + { 94, 34 }, { 95, 35 }, { 96, 36 }, { 97, 37 }, { 98, 38 }, + { 110, 39 }, { 111, 40 }, { 112, 41 }, { 105, 42 }, { 107, 43 }, + { 47, 50 }, { 48, 51 }, +}; + +static const struct msm_pinctrl_soc_data msm8909_pinctrl = { + .pins = msm8909_pins, + .npins = ARRAY_SIZE(msm8909_pins), + .functions = msm8909_functions, + .nfunctions = ARRAY_SIZE(msm8909_functions), + .groups = msm8909_groups, + .ngroups = ARRAY_SIZE(msm8909_groups), + .ngpios = 113, + .wakeirq_map = msm8909_mpm_map, + .nwakeirq_map = ARRAY_SIZE(msm8909_mpm_map), +}; + +static int msm8909_pinctrl_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &msm8909_pinctrl); +} + +static const struct of_device_id msm8909_pinctrl_of_match[] = { + { .compatible = "qcom,msm8909-tlmm", }, + { }, +}; +MODULE_DEVICE_TABLE(of, msm8909_pinctrl_of_match); + +static struct platform_driver msm8909_pinctrl_driver = { + .driver = { + .name = "msm8909-pinctrl", + .of_match_table = msm8909_pinctrl_of_match, + }, + .probe = msm8909_pinctrl_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init msm8909_pinctrl_init(void) +{ + return platform_driver_register(&msm8909_pinctrl_driver); +} +arch_initcall(msm8909_pinctrl_init); + +static void __exit msm8909_pinctrl_exit(void) +{ + platform_driver_unregister(&msm8909_pinctrl_driver); +} +module_exit(msm8909_pinctrl_exit); + +MODULE_DESCRIPTION("Qualcomm MSM8909 TLMM pinctrl driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c new file mode 100644 index 0000000000..d615b6c55b --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * ALSA SoC platform-machine driver for QTi LPASS + */ + +#include +#include +#include + +#include "pinctrl-lpass-lpi.h" + +enum lpass_lpi_functions { + LPI_MUX_dmic1_clk, + LPI_MUX_dmic1_data, + LPI_MUX_dmic2_clk, + LPI_MUX_dmic2_data, + LPI_MUX_dmic3_clk, + LPI_MUX_dmic3_data, + LPI_MUX_i2s1_clk, + LPI_MUX_i2s1_data, + LPI_MUX_i2s1_ws, + LPI_MUX_i2s2_clk, + LPI_MUX_i2s2_data, + LPI_MUX_i2s2_ws, + LPI_MUX_qua_mi2s_data, + LPI_MUX_qua_mi2s_sclk, + LPI_MUX_qua_mi2s_ws, + LPI_MUX_swr_rx_clk, + LPI_MUX_swr_rx_data, + LPI_MUX_swr_tx_clk, + LPI_MUX_swr_tx_data, + LPI_MUX_wsa_swr_clk, + LPI_MUX_wsa_swr_data, + LPI_MUX_gpio, + LPI_MUX__, +}; + +static int gpio0_pins[] = { 0 }; +static int gpio1_pins[] = { 1 }; +static int gpio2_pins[] = { 2 }; +static int gpio3_pins[] = { 3 }; +static int gpio4_pins[] = { 4 }; +static int gpio5_pins[] = { 5 }; +static int gpio6_pins[] = { 6 }; +static int gpio7_pins[] = { 7 }; +static int gpio8_pins[] = { 8 }; +static int gpio9_pins[] = { 9 }; +static int gpio10_pins[] = { 10 }; +static int gpio11_pins[] = { 11 }; +static int gpio12_pins[] = { 12 }; +static int gpio13_pins[] = { 13 }; +static int gpio14_pins[] = { 14 }; + +static const struct pinctrl_pin_desc sc7280_lpi_pins[] = { + PINCTRL_PIN(0, "gpio0"), + PINCTRL_PIN(1, "gpio1"), + PINCTRL_PIN(2, "gpio2"), + PINCTRL_PIN(3, "gpio3"), + PINCTRL_PIN(4, "gpio4"), + PINCTRL_PIN(5, "gpio5"), + PINCTRL_PIN(6, "gpio6"), + PINCTRL_PIN(7, "gpio7"), + PINCTRL_PIN(8, "gpio8"), + PINCTRL_PIN(9, "gpio9"), + PINCTRL_PIN(10, "gpio10"), + PINCTRL_PIN(11, "gpio11"), + PINCTRL_PIN(12, "gpio12"), + PINCTRL_PIN(13, "gpio13"), + PINCTRL_PIN(14, "gpio14"), +}; + +static const char * const swr_tx_clk_groups[] = { "gpio0" }; +static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" }; +static const char * const swr_rx_clk_groups[] = { "gpio3" }; +static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" }; +static const char * const dmic1_clk_groups[] = { "gpio6" }; +static const char * const dmic1_data_groups[] = { "gpio7" }; +static const char * const dmic2_clk_groups[] = { "gpio8" }; +static const char * const dmic2_data_groups[] = { "gpio9" }; +static const char * const i2s2_clk_groups[] = { "gpio10" }; +static const char * const i2s2_ws_groups[] = { "gpio11" }; +static const char * const dmic3_clk_groups[] = { "gpio12" }; +static const char * const dmic3_data_groups[] = { "gpio13" }; +static const char * const qua_mi2s_sclk_groups[] = { "gpio0" }; +static const char * const qua_mi2s_ws_groups[] = { "gpio1" }; +static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4" }; +static const char * const i2s1_clk_groups[] = { "gpio6" }; +static const char * const i2s1_ws_groups[] = { "gpio7" }; +static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" }; +static const char * const wsa_swr_clk_groups[] = { "gpio10" }; +static const char * const wsa_swr_data_groups[] = { "gpio11" }; +static const char * const i2s2_data_groups[] = { "gpio12", "gpio13" }; + +static const struct lpi_pingroup sc7280_groups[] = { + LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _), + LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _), + LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _), + LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _), + LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _), + LPI_PINGROUP(5, 12, swr_rx_data, _, _, _), + LPI_PINGROUP(6, LPI_NO_SLEW, dmic1_clk, i2s1_clk, _, _), + LPI_PINGROUP(7, LPI_NO_SLEW, dmic1_data, i2s1_ws, _, _), + LPI_PINGROUP(8, LPI_NO_SLEW, dmic2_clk, i2s1_data, _, _), + LPI_PINGROUP(9, LPI_NO_SLEW, dmic2_data, i2s1_data, _, _), + LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _), + LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _), + LPI_PINGROUP(12, LPI_NO_SLEW, dmic3_clk, i2s2_data, _, _), + LPI_PINGROUP(13, LPI_NO_SLEW, dmic3_data, i2s2_data, _, _), + LPI_PINGROUP(14, 6, swr_tx_data, _, _, _), +}; + +static const struct lpi_function sc7280_functions[] = { + LPI_FUNCTION(dmic1_clk), + LPI_FUNCTION(dmic1_data), + LPI_FUNCTION(dmic2_clk), + LPI_FUNCTION(dmic2_data), + LPI_FUNCTION(dmic3_clk), + LPI_FUNCTION(dmic3_data), + LPI_FUNCTION(i2s1_clk), + LPI_FUNCTION(i2s1_data), + LPI_FUNCTION(i2s1_ws), + LPI_FUNCTION(i2s2_clk), + LPI_FUNCTION(i2s2_data), + LPI_FUNCTION(i2s2_ws), + LPI_FUNCTION(qua_mi2s_data), + LPI_FUNCTION(qua_mi2s_sclk), + LPI_FUNCTION(qua_mi2s_ws), + LPI_FUNCTION(swr_rx_clk), + LPI_FUNCTION(swr_rx_data), + LPI_FUNCTION(swr_tx_clk), + LPI_FUNCTION(swr_tx_data), + LPI_FUNCTION(wsa_swr_clk), + LPI_FUNCTION(wsa_swr_data), +}; + +static const struct lpi_pinctrl_variant_data sc7280_lpi_data = { + .pins = sc7280_lpi_pins, + .npins = ARRAY_SIZE(sc7280_lpi_pins), + .groups = sc7280_groups, + .ngroups = ARRAY_SIZE(sc7280_groups), + .functions = sc7280_functions, + .nfunctions = ARRAY_SIZE(sc7280_functions), +}; + +static const struct of_device_id lpi_pinctrl_of_match[] = { + { + .compatible = "qcom,sc7280-lpass-lpi-pinctrl", + .data = &sc7280_lpi_data, + }, + { } +}; +MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match); + +static struct platform_driver lpi_pinctrl_driver = { + .driver = { + .name = "qcom-sc7280-lpass-lpi-pinctrl", + .of_match_table = lpi_pinctrl_of_match, + }, + .probe = lpi_pinctrl_probe, + .remove = lpi_pinctrl_remove, +}; + +module_platform_driver(lpi_pinctrl_driver); +MODULE_DESCRIPTION("QTI SC7280 LPI GPIO pin control driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/qcom/pinctrl-sm6375.c b/drivers/pinctrl/qcom/pinctrl-sm6375.c new file mode 100644 index 0000000000..1138e683e6 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-sm6375.c @@ -0,0 +1,1544 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Konrad Dybcio + */ + +#include +#include +#include +#include + +#include "pinctrl-msm.h" + +#define FUNCTION(fname) \ + [msm_mux_##fname] = { \ + .name = #fname, \ + .groups = fname##_groups, \ + .ngroups = ARRAY_SIZE(fname##_groups), \ + } + +#define REG_BASE 0x100000 +#define REG_SIZE 0x1000 +#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \ + { \ + .name = "gpio" #id, \ + .pins = gpio##id##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \ + .funcs = (int[]){ \ + msm_mux_gpio, /* gpio mode */ \ + msm_mux_##f1, \ + msm_mux_##f2, \ + msm_mux_##f3, \ + msm_mux_##f4, \ + msm_mux_##f5, \ + msm_mux_##f6, \ + msm_mux_##f7, \ + msm_mux_##f8, \ + msm_mux_##f9 \ + }, \ + .nfuncs = 10, \ + .ctl_reg = REG_SIZE * id, \ + .io_reg = REG_SIZE * id + 0x4, \ + .intr_cfg_reg = REG_SIZE * id + 0x8, \ + .intr_status_reg = REG_SIZE * id + 0xc, \ + .intr_target_reg = REG_SIZE * id + 0x8, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .egpio_enable = 12, \ + .egpio_present = 11, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 3, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + } + +#define SDC_PINGROUP(pg_name, ctl, pull, drv) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +#define UFS_RESET(pg_name, offset) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = offset, \ + .io_reg = offset + 0x4, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = 3, \ + .drv_bit = 0, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = 0, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +static const struct pinctrl_pin_desc sm6375_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "GPIO_108"), + PINCTRL_PIN(109, "GPIO_109"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "GPIO_113"), + PINCTRL_PIN(114, "GPIO_114"), + PINCTRL_PIN(115, "GPIO_115"), + PINCTRL_PIN(116, "GPIO_116"), + PINCTRL_PIN(117, "GPIO_117"), + PINCTRL_PIN(118, "GPIO_118"), + PINCTRL_PIN(119, "GPIO_119"), + PINCTRL_PIN(120, "GPIO_120"), + PINCTRL_PIN(121, "GPIO_121"), + PINCTRL_PIN(122, "GPIO_122"), + PINCTRL_PIN(123, "GPIO_123"), + PINCTRL_PIN(124, "GPIO_124"), + PINCTRL_PIN(125, "GPIO_125"), + PINCTRL_PIN(126, "GPIO_126"), + PINCTRL_PIN(127, "GPIO_127"), + PINCTRL_PIN(128, "GPIO_128"), + PINCTRL_PIN(129, "GPIO_129"), + PINCTRL_PIN(130, "GPIO_130"), + PINCTRL_PIN(131, "GPIO_131"), + PINCTRL_PIN(132, "GPIO_132"), + PINCTRL_PIN(133, "GPIO_133"), + PINCTRL_PIN(134, "GPIO_134"), + PINCTRL_PIN(135, "GPIO_135"), + PINCTRL_PIN(136, "GPIO_136"), + PINCTRL_PIN(137, "GPIO_137"), + PINCTRL_PIN(138, "GPIO_138"), + PINCTRL_PIN(139, "GPIO_139"), + PINCTRL_PIN(140, "GPIO_140"), + PINCTRL_PIN(141, "GPIO_141"), + PINCTRL_PIN(142, "GPIO_142"), + PINCTRL_PIN(143, "GPIO_143"), + PINCTRL_PIN(144, "GPIO_144"), + PINCTRL_PIN(145, "GPIO_145"), + PINCTRL_PIN(146, "GPIO_146"), + PINCTRL_PIN(147, "GPIO_147"), + PINCTRL_PIN(148, "GPIO_148"), + PINCTRL_PIN(149, "GPIO_149"), + PINCTRL_PIN(150, "GPIO_150"), + PINCTRL_PIN(151, "GPIO_151"), + PINCTRL_PIN(152, "GPIO_152"), + PINCTRL_PIN(153, "GPIO_153"), + PINCTRL_PIN(154, "GPIO_154"), + PINCTRL_PIN(155, "GPIO_155"), + PINCTRL_PIN(156, "UFS_RESET"), + PINCTRL_PIN(157, "SDC1_RCLK"), + PINCTRL_PIN(158, "SDC1_CLK"), + PINCTRL_PIN(159, "SDC1_CMD"), + PINCTRL_PIN(160, "SDC1_DATA"), + PINCTRL_PIN(161, "SDC2_CLK"), + PINCTRL_PIN(162, "SDC2_CMD"), + PINCTRL_PIN(163, "SDC2_DATA"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ + static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); +DECLARE_MSM_GPIO_PINS(88); +DECLARE_MSM_GPIO_PINS(89); +DECLARE_MSM_GPIO_PINS(90); +DECLARE_MSM_GPIO_PINS(91); +DECLARE_MSM_GPIO_PINS(92); +DECLARE_MSM_GPIO_PINS(93); +DECLARE_MSM_GPIO_PINS(94); +DECLARE_MSM_GPIO_PINS(95); +DECLARE_MSM_GPIO_PINS(96); +DECLARE_MSM_GPIO_PINS(97); +DECLARE_MSM_GPIO_PINS(98); +DECLARE_MSM_GPIO_PINS(99); +DECLARE_MSM_GPIO_PINS(100); +DECLARE_MSM_GPIO_PINS(101); +DECLARE_MSM_GPIO_PINS(102); +DECLARE_MSM_GPIO_PINS(103); +DECLARE_MSM_GPIO_PINS(104); +DECLARE_MSM_GPIO_PINS(105); +DECLARE_MSM_GPIO_PINS(106); +DECLARE_MSM_GPIO_PINS(107); +DECLARE_MSM_GPIO_PINS(108); +DECLARE_MSM_GPIO_PINS(109); +DECLARE_MSM_GPIO_PINS(110); +DECLARE_MSM_GPIO_PINS(111); +DECLARE_MSM_GPIO_PINS(112); +DECLARE_MSM_GPIO_PINS(113); +DECLARE_MSM_GPIO_PINS(114); +DECLARE_MSM_GPIO_PINS(115); +DECLARE_MSM_GPIO_PINS(116); +DECLARE_MSM_GPIO_PINS(117); +DECLARE_MSM_GPIO_PINS(118); +DECLARE_MSM_GPIO_PINS(119); +DECLARE_MSM_GPIO_PINS(120); +DECLARE_MSM_GPIO_PINS(121); +DECLARE_MSM_GPIO_PINS(122); +DECLARE_MSM_GPIO_PINS(123); +DECLARE_MSM_GPIO_PINS(124); +DECLARE_MSM_GPIO_PINS(125); +DECLARE_MSM_GPIO_PINS(126); +DECLARE_MSM_GPIO_PINS(127); +DECLARE_MSM_GPIO_PINS(128); +DECLARE_MSM_GPIO_PINS(129); +DECLARE_MSM_GPIO_PINS(130); +DECLARE_MSM_GPIO_PINS(131); +DECLARE_MSM_GPIO_PINS(132); +DECLARE_MSM_GPIO_PINS(133); +DECLARE_MSM_GPIO_PINS(134); +DECLARE_MSM_GPIO_PINS(135); +DECLARE_MSM_GPIO_PINS(136); +DECLARE_MSM_GPIO_PINS(137); +DECLARE_MSM_GPIO_PINS(138); +DECLARE_MSM_GPIO_PINS(139); +DECLARE_MSM_GPIO_PINS(140); +DECLARE_MSM_GPIO_PINS(141); +DECLARE_MSM_GPIO_PINS(142); +DECLARE_MSM_GPIO_PINS(143); +DECLARE_MSM_GPIO_PINS(144); +DECLARE_MSM_GPIO_PINS(145); +DECLARE_MSM_GPIO_PINS(146); +DECLARE_MSM_GPIO_PINS(147); +DECLARE_MSM_GPIO_PINS(148); +DECLARE_MSM_GPIO_PINS(149); +DECLARE_MSM_GPIO_PINS(150); +DECLARE_MSM_GPIO_PINS(151); +DECLARE_MSM_GPIO_PINS(152); +DECLARE_MSM_GPIO_PINS(153); +DECLARE_MSM_GPIO_PINS(154); +DECLARE_MSM_GPIO_PINS(155); + + +static const unsigned int sdc1_rclk_pins[] = { 157 }; +static const unsigned int sdc1_clk_pins[] = { 158 }; +static const unsigned int sdc1_cmd_pins[] = { 159 }; +static const unsigned int sdc1_data_pins[] = { 160 }; +static const unsigned int sdc2_clk_pins[] = { 161 }; +static const unsigned int sdc2_cmd_pins[] = { 162 }; +static const unsigned int sdc2_data_pins[] = { 163 }; +static const unsigned int ufs_reset_pins[] = { 156 }; + +enum sm6375_functions { + msm_mux_adsp_ext, + msm_mux_agera_pll, + msm_mux_atest_char, + msm_mux_atest_char0, + msm_mux_atest_char1, + msm_mux_atest_char2, + msm_mux_atest_char3, + msm_mux_atest_tsens, + msm_mux_atest_tsens2, + msm_mux_atest_usb1, + msm_mux_atest_usb10, + msm_mux_atest_usb11, + msm_mux_atest_usb12, + msm_mux_atest_usb13, + msm_mux_atest_usb2, + msm_mux_atest_usb20, + msm_mux_atest_usb21, + msm_mux_atest_usb22, + msm_mux_atest_usb23, + msm_mux_audio_ref, + msm_mux_btfm_slimbus, + msm_mux_cam_mclk, + msm_mux_cci_async, + msm_mux_cci_i2c, + msm_mux_cci_timer0, + msm_mux_cci_timer1, + msm_mux_cci_timer2, + msm_mux_cci_timer3, + msm_mux_cci_timer4, + msm_mux_cri_trng, + msm_mux_dbg_out, + msm_mux_ddr_bist, + msm_mux_ddr_pxi0, + msm_mux_ddr_pxi1, + msm_mux_ddr_pxi2, + msm_mux_ddr_pxi3, + msm_mux_dp_hot, + msm_mux_edp_lcd, + msm_mux_gcc_gp1, + msm_mux_gcc_gp2, + msm_mux_gcc_gp3, + msm_mux_gp_pdm0, + msm_mux_gp_pdm1, + msm_mux_gp_pdm2, + msm_mux_gpio, + msm_mux_gps_tx, + msm_mux_ibi_i3c, + msm_mux_jitter_bist, + msm_mux_ldo_en, + msm_mux_ldo_update, + msm_mux_lpass_ext, + msm_mux_m_voc, + msm_mux_mclk, + msm_mux_mdp_vsync, + msm_mux_mdp_vsync0, + msm_mux_mdp_vsync1, + msm_mux_mdp_vsync2, + msm_mux_mdp_vsync3, + msm_mux_mi2s_0, + msm_mux_mi2s_1, + msm_mux_mi2s_2, + msm_mux_mss_lte, + msm_mux_nav_gpio, + msm_mux_nav_pps, + msm_mux_pa_indicator, + msm_mux_phase_flag0, + msm_mux_phase_flag1, + msm_mux_phase_flag10, + msm_mux_phase_flag11, + msm_mux_phase_flag12, + msm_mux_phase_flag13, + msm_mux_phase_flag14, + msm_mux_phase_flag15, + msm_mux_phase_flag16, + msm_mux_phase_flag17, + msm_mux_phase_flag18, + msm_mux_phase_flag19, + msm_mux_phase_flag2, + msm_mux_phase_flag20, + msm_mux_phase_flag21, + msm_mux_phase_flag22, + msm_mux_phase_flag23, + msm_mux_phase_flag24, + msm_mux_phase_flag25, + msm_mux_phase_flag26, + msm_mux_phase_flag27, + msm_mux_phase_flag28, + msm_mux_phase_flag29, + msm_mux_phase_flag3, + msm_mux_phase_flag30, + msm_mux_phase_flag31, + msm_mux_phase_flag4, + msm_mux_phase_flag5, + msm_mux_phase_flag6, + msm_mux_phase_flag7, + msm_mux_phase_flag8, + msm_mux_phase_flag9, + msm_mux_pll_bist, + msm_mux_pll_bypassnl, + msm_mux_pll_clk, + msm_mux_pll_reset, + msm_mux_prng_rosc0, + msm_mux_prng_rosc1, + msm_mux_prng_rosc2, + msm_mux_prng_rosc3, + msm_mux_qdss_cti, + msm_mux_qdss_gpio, + msm_mux_qdss_gpio0, + msm_mux_qdss_gpio1, + msm_mux_qdss_gpio10, + msm_mux_qdss_gpio11, + msm_mux_qdss_gpio12, + msm_mux_qdss_gpio13, + msm_mux_qdss_gpio14, + msm_mux_qdss_gpio15, + msm_mux_qdss_gpio2, + msm_mux_qdss_gpio3, + msm_mux_qdss_gpio4, + msm_mux_qdss_gpio5, + msm_mux_qdss_gpio6, + msm_mux_qdss_gpio7, + msm_mux_qdss_gpio8, + msm_mux_qdss_gpio9, + msm_mux_qlink0_enable, + msm_mux_qlink0_request, + msm_mux_qlink0_wmss, + msm_mux_qlink1_enable, + msm_mux_qlink1_request, + msm_mux_qlink1_wmss, + msm_mux_qup00, + msm_mux_qup01, + msm_mux_qup02, + msm_mux_qup10, + msm_mux_qup11_f1, + msm_mux_qup11_f2, + msm_mux_qup12, + msm_mux_qup13_f1, + msm_mux_qup13_f2, + msm_mux_qup14, + msm_mux_sd_write, + msm_mux_sdc1_tb, + msm_mux_sdc2_tb, + msm_mux_sp_cmu, + msm_mux_tgu_ch0, + msm_mux_tgu_ch1, + msm_mux_tgu_ch2, + msm_mux_tgu_ch3, + msm_mux_tsense_pwm1, + msm_mux_tsense_pwm2, + msm_mux_uim1_clk, + msm_mux_uim1_data, + msm_mux_uim1_present, + msm_mux_uim1_reset, + msm_mux_uim2_clk, + msm_mux_uim2_data, + msm_mux_uim2_present, + msm_mux_uim2_reset, + msm_mux_usb2phy_ac, + msm_mux_usb_phy, + msm_mux_vfr_1, + msm_mux_vsense_trigger, + msm_mux_wlan1_adc0, + msm_mux_wlan1_adc1, + msm_mux_wlan2_adc0, + msm_mux_wlan2_adc1, + msm_mux__, +}; + +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio11", "gpio12", "gpio13", "gpio14", "gpio15", + "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22", + "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29", + "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36", + "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43", + "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", "gpio50", + "gpio51", "gpio52", "gpio53", "gpio56", "gpio57", "gpio58", "gpio59", + "gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65", "gpio66", + "gpio67", "gpio68", "gpio69", "gpio75", "gpio76", "gpio77", "gpio78", + "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", "gpio85", + "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", "gpio92", + "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", "gpio99", + "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", "gpio105", + "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", "gpio111", + "gpio112", "gpio113", "gpio114", "gpio115", "gpio116", "gpio117", + "gpio118", "gpio119", "gpio120", "gpio124", "gpio125", "gpio126", + "gpio127", "gpio128", "gpio129", "gpio130", "gpio131", "gpio132", + "gpio133", "gpio134", "gpio135", "gpio136", "gpio141", "gpio142", + "gpio143", "gpio150", "gpio151", "gpio152", "gpio153", "gpio154", + "gpio155", +}; +static const char * const agera_pll_groups[] = { + "gpio89", +}; +static const char * const cci_async_groups[] = { + "gpio35", "gpio36", "gpio48", "gpio52", "gpio53", +}; +static const char * const cci_i2c_groups[] = { + "gpio2", "gpio3", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43", + "gpio44", +}; +static const char * const gps_tx_groups[] = { + "gpio101", "gpio102", "gpio107", "gpio108", +}; +static const char * const gp_pdm0_groups[] = { + "gpio37", "gpio68", +}; +static const char * const gp_pdm1_groups[] = { + "gpio8", "gpio52", +}; +static const char * const gp_pdm2_groups[] = { + "gpio57", +}; +static const char * const jitter_bist_groups[] = { + "gpio90", +}; +static const char * const mclk_groups[] = { + "gpio93", +}; +static const char * const mdp_vsync_groups[] = { + "gpio6", "gpio23", "gpio24", "gpio27", "gpio28", +}; +static const char * const mss_lte_groups[] = { + "gpio65", "gpio66", +}; +static const char * const nav_pps_groups[] = { + "gpio101", "gpio101", "gpio102", "gpio102", +}; +static const char * const pll_bist_groups[] = { + "gpio27", +}; +static const char * const qlink0_wmss_groups[] = { + "gpio103", +}; +static const char * const qlink1_wmss_groups[] = { + "gpio106", +}; +static const char * const usb_phy_groups[] = { + "gpio124", +}; +static const char * const adsp_ext_groups[] = { + "gpio87", +}; +static const char * const atest_char_groups[] = { + "gpio95", +}; +static const char * const atest_char0_groups[] = { + "gpio96", +}; +static const char * const atest_char1_groups[] = { + "gpio97", +}; +static const char * const atest_char2_groups[] = { + "gpio98", +}; +static const char * const atest_char3_groups[] = { + "gpio99", +}; +static const char * const atest_tsens_groups[] = { + "gpio92", +}; +static const char * const atest_tsens2_groups[] = { + "gpio93", +}; +static const char * const atest_usb1_groups[] = { + "gpio83", +}; +static const char * const atest_usb10_groups[] = { + "gpio84", +}; +static const char * const atest_usb11_groups[] = { + "gpio85", +}; +static const char * const atest_usb12_groups[] = { + "gpio86", +}; +static const char * const atest_usb13_groups[] = { + "gpio87", +}; +static const char * const atest_usb2_groups[] = { + "gpio88", +}; +static const char * const atest_usb20_groups[] = { + "gpio89", +}; +static const char * const atest_usb21_groups[] = { + "gpio90", +}; +static const char * const atest_usb22_groups[] = { + "gpio91", +}; +static const char * const atest_usb23_groups[] = { + "gpio92", +}; +static const char * const audio_ref_groups[] = { + "gpio60", +}; +static const char * const btfm_slimbus_groups[] = { + "gpio67", "gpio68", "gpio86", "gpio87", +}; +static const char * const cam_mclk_groups[] = { + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", +}; +static const char * const cci_timer0_groups[] = { + "gpio34", +}; +static const char * const cci_timer1_groups[] = { + "gpio35", +}; +static const char * const cci_timer2_groups[] = { + "gpio36", +}; +static const char * const cci_timer3_groups[] = { + "gpio37", +}; +static const char * const cci_timer4_groups[] = { + "gpio38", +}; +static const char * const cri_trng_groups[] = { + "gpio0", "gpio1", "gpio2", +}; +static const char * const dbg_out_groups[] = { + "gpio3", +}; +static const char * const ddr_bist_groups[] = { + "gpio19", "gpio20", "gpio21", "gpio22", +}; +static const char * const ddr_pxi0_groups[] = { + "gpio86", "gpio90", +}; +static const char * const ddr_pxi1_groups[] = { + "gpio87", "gpio91", +}; +static const char * const ddr_pxi2_groups[] = { + "gpio88", "gpio92", +}; +static const char * const ddr_pxi3_groups[] = { + "gpio89", "gpio93", +}; +static const char * const dp_hot_groups[] = { + "gpio12", "gpio118", +}; +static const char * const edp_lcd_groups[] = { + "gpio23", +}; +static const char * const gcc_gp1_groups[] = { + "gpio48", "gpio58", +}; +static const char * const gcc_gp2_groups[] = { + "gpio21", +}; +static const char * const gcc_gp3_groups[] = { + "gpio22", +}; +static const char * const ibi_i3c_groups[] = { + "gpio0", "gpio1", +}; +static const char * const ldo_en_groups[] = { + "gpio95", +}; +static const char * const ldo_update_groups[] = { + "gpio96", +}; +static const char * const lpass_ext_groups[] = { + "gpio60", "gpio93", +}; +static const char * const m_voc_groups[] = { + "gpio12", +}; +static const char * const mdp_vsync0_groups[] = { + "gpio47", +}; +static const char * const mdp_vsync1_groups[] = { + "gpio48", +}; +static const char * const mdp_vsync2_groups[] = { + "gpio56", +}; +static const char * const mdp_vsync3_groups[] = { + "gpio57", +}; +static const char * const mi2s_0_groups[] = { + "gpio88", "gpio89", "gpio90", "gpio91", +}; +static const char * const mi2s_1_groups[] = { + "gpio67", "gpio68", "gpio86", "gpio87", +}; +static const char * const mi2s_2_groups[] = { + "gpio60", +}; +static const char * const nav_gpio_groups[] = { + "gpio101", "gpio102", +}; +static const char * const pa_indicator_groups[] = { + "gpio118", +}; +static const char * const phase_flag0_groups[] = { + "gpio12", +}; +static const char * const phase_flag1_groups[] = { + "gpio17", +}; +static const char * const phase_flag10_groups[] = { + "gpio41", +}; +static const char * const phase_flag11_groups[] = { + "gpio42", +}; +static const char * const phase_flag12_groups[] = { + "gpio43", +}; +static const char * const phase_flag13_groups[] = { + "gpio44", +}; +static const char * const phase_flag14_groups[] = { + "gpio45", +}; +static const char * const phase_flag15_groups[] = { + "gpio46", +}; +static const char * const phase_flag16_groups[] = { + "gpio47", +}; +static const char * const phase_flag17_groups[] = { + "gpio48", +}; +static const char * const phase_flag18_groups[] = { + "gpio49", +}; +static const char * const phase_flag19_groups[] = { + "gpio50", +}; +static const char * const phase_flag2_groups[] = { + "gpio18", +}; +static const char * const phase_flag20_groups[] = { + "gpio51", +}; +static const char * const phase_flag21_groups[] = { + "gpio52", +}; +static const char * const phase_flag22_groups[] = { + "gpio53", +}; +static const char * const phase_flag23_groups[] = { + "gpio56", +}; +static const char * const phase_flag24_groups[] = { + "gpio57", +}; +static const char * const phase_flag25_groups[] = { + "gpio60", +}; +static const char * const phase_flag26_groups[] = { + "gpio61", +}; +static const char * const phase_flag27_groups[] = { + "gpio62", +}; +static const char * const phase_flag28_groups[] = { + "gpio63", +}; +static const char * const phase_flag29_groups[] = { + "gpio64", +}; +static const char * const phase_flag3_groups[] = { + "gpio34", +}; +static const char * const phase_flag30_groups[] = { + "gpio67", +}; +static const char * const phase_flag31_groups[] = { + "gpio68", +}; +static const char * const phase_flag4_groups[] = { + "gpio35", +}; +static const char * const phase_flag5_groups[] = { + "gpio36", +}; +static const char * const phase_flag6_groups[] = { + "gpio37", +}; +static const char * const phase_flag7_groups[] = { + "gpio38", +}; +static const char * const phase_flag8_groups[] = { + "gpio39", +}; +static const char * const phase_flag9_groups[] = { + "gpio40", +}; +static const char * const pll_bypassnl_groups[] = { + "gpio13", +}; +static const char * const pll_clk_groups[] = { + "gpio98", +}; +static const char * const pll_reset_groups[] = { + "gpio14", +}; +static const char * const prng_rosc0_groups[] = { + "gpio97", +}; +static const char * const prng_rosc1_groups[] = { + "gpio98", +}; +static const char * const prng_rosc2_groups[] = { + "gpio99", +}; +static const char * const prng_rosc3_groups[] = { + "gpio100", +}; +static const char * const qdss_cti_groups[] = { + "gpio2", "gpio3", "gpio6", "gpio7", "gpio61", "gpio62", "gpio86", + "gpio87", +}; +static const char * const qdss_gpio_groups[] = { + "gpio8", "gpio9", "gpio63", "gpio64", +}; +static const char * const qdss_gpio0_groups[] = { + "gpio39", "gpio65", +}; +static const char * const qdss_gpio1_groups[] = { + "gpio40", "gpio66", +}; +static const char * const qdss_gpio10_groups[] = { + "gpio50", "gpio56", +}; +static const char * const qdss_gpio11_groups[] = { + "gpio51", "gpio57", +}; +static const char * const qdss_gpio12_groups[] = { + "gpio34", "gpio52", +}; +static const char * const qdss_gpio13_groups[] = { + "gpio35", "gpio53", +}; +static const char * const qdss_gpio14_groups[] = { + "gpio27", "gpio36", +}; +static const char * const qdss_gpio15_groups[] = { + "gpio28", "gpio37", +}; +static const char * const qdss_gpio2_groups[] = { + "gpio38", "gpio41", +}; +static const char * const qdss_gpio3_groups[] = { + "gpio42", "gpio47", +}; +static const char * const qdss_gpio4_groups[] = { + "gpio43", "gpio88", +}; +static const char * const qdss_gpio5_groups[] = { + "gpio44", "gpio89", +}; +static const char * const qdss_gpio6_groups[] = { + "gpio45", "gpio90", +}; +static const char * const qdss_gpio7_groups[] = { + "gpio46", "gpio91", +}; +static const char * const qdss_gpio8_groups[] = { + "gpio48", "gpio92", +}; +static const char * const qdss_gpio9_groups[] = { + "gpio49", "gpio93", +}; +static const char * const qlink0_enable_groups[] = { + "gpio105", +}; +static const char * const qlink0_request_groups[] = { + "gpio104", +}; +static const char * const qlink1_enable_groups[] = { + "gpio108", +}; +static const char * const qlink1_request_groups[] = { + "gpio107", +}; +static const char * const qup00_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", +}; +static const char * const qup01_groups[] = { + "gpio61", "gpio62", "gpio63", "gpio64", +}; +static const char * const qup02_groups[] = { + "gpio45", "gpio46", "gpio48", "gpio56", "gpio57", +}; +static const char * const qup10_groups[] = { + "gpio13", "gpio14", "gpio15", "gpio16", "gpio17", +}; +static const char * const qup11_f1_groups[] = { + "gpio27", "gpio28", +}; +static const char * const qup11_f2_groups[] = { + "gpio27", "gpio28", +}; + +static const char * const qup12_groups[] = { + "gpio19", "gpio19", "gpio20", "gpio20", +}; +static const char * const qup13_f1_groups[] = { + "gpio25", "gpio26", +}; +static const char * const qup13_f2_groups[] = { + "gpio25", "gpio26", +}; +static const char * const qup14_groups[] = { + "gpio4", "gpio4", "gpio5", "gpio5", +}; +static const char * const sd_write_groups[] = { + "gpio85", +}; +static const char * const sdc1_tb_groups[] = { + "gpio4", +}; +static const char * const sdc2_tb_groups[] = { + "gpio5", +}; +static const char * const sp_cmu_groups[] = { + "gpio3", +}; +static const char * const tgu_ch0_groups[] = { + "gpio61", +}; +static const char * const tgu_ch1_groups[] = { + "gpio62", +}; +static const char * const tgu_ch2_groups[] = { + "gpio63", +}; +static const char * const tgu_ch3_groups[] = { + "gpio64", +}; +static const char * const tsense_pwm1_groups[] = { + "gpio88", +}; +static const char * const tsense_pwm2_groups[] = { + "gpio88", +}; +static const char * const uim1_clk_groups[] = { + "gpio80", +}; +static const char * const uim1_data_groups[] = { + "gpio79", +}; +static const char * const uim1_present_groups[] = { + "gpio82", +}; +static const char * const uim1_reset_groups[] = { + "gpio81", +}; +static const char * const uim2_clk_groups[] = { + "gpio76", +}; +static const char * const uim2_data_groups[] = { + "gpio75", +}; +static const char * const uim2_present_groups[] = { + "gpio78", +}; +static const char * const uim2_reset_groups[] = { + "gpio77", +}; +static const char * const usb2phy_ac_groups[] = { + "gpio47", +}; +static const char * const vfr_1_groups[] = { + "gpio49", +}; +static const char * const vsense_trigger_groups[] = { + "gpio89", +}; +static const char * const wlan1_adc0_groups[] = { + "gpio90", +}; +static const char * const wlan1_adc1_groups[] = { + "gpio92", +}; +static const char * const wlan2_adc0_groups[] = { + "gpio91", +}; +static const char * const wlan2_adc1_groups[] = { + "gpio93", +}; + +static const struct msm_function sm6375_functions[] = { + FUNCTION(adsp_ext), + FUNCTION(agera_pll), + FUNCTION(atest_char), + FUNCTION(atest_char0), + FUNCTION(atest_char1), + FUNCTION(atest_char2), + FUNCTION(atest_char3), + FUNCTION(atest_tsens), + FUNCTION(atest_tsens2), + FUNCTION(atest_usb1), + FUNCTION(atest_usb10), + FUNCTION(atest_usb11), + FUNCTION(atest_usb12), + FUNCTION(atest_usb13), + FUNCTION(atest_usb2), + FUNCTION(atest_usb20), + FUNCTION(atest_usb21), + FUNCTION(atest_usb22), + FUNCTION(atest_usb23), + FUNCTION(audio_ref), + FUNCTION(btfm_slimbus), + FUNCTION(cam_mclk), + FUNCTION(cci_async), + FUNCTION(cci_i2c), + FUNCTION(cci_timer0), + FUNCTION(cci_timer1), + FUNCTION(cci_timer2), + FUNCTION(cci_timer3), + FUNCTION(cci_timer4), + FUNCTION(cri_trng), + FUNCTION(dbg_out), + FUNCTION(ddr_bist), + FUNCTION(ddr_pxi0), + FUNCTION(ddr_pxi1), + FUNCTION(ddr_pxi2), + FUNCTION(ddr_pxi3), + FUNCTION(dp_hot), + FUNCTION(edp_lcd), + FUNCTION(gcc_gp1), + FUNCTION(gcc_gp2), + FUNCTION(gcc_gp3), + FUNCTION(gp_pdm0), + FUNCTION(gp_pdm1), + FUNCTION(gp_pdm2), + FUNCTION(gpio), + FUNCTION(gps_tx), + FUNCTION(ibi_i3c), + FUNCTION(jitter_bist), + FUNCTION(ldo_en), + FUNCTION(ldo_update), + FUNCTION(lpass_ext), + FUNCTION(m_voc), + FUNCTION(mclk), + FUNCTION(mdp_vsync), + FUNCTION(mdp_vsync0), + FUNCTION(mdp_vsync1), + FUNCTION(mdp_vsync2), + FUNCTION(mdp_vsync3), + FUNCTION(mi2s_0), + FUNCTION(mi2s_1), + FUNCTION(mi2s_2), + FUNCTION(mss_lte), + FUNCTION(nav_gpio), + FUNCTION(nav_pps), + FUNCTION(pa_indicator), + FUNCTION(phase_flag0), + FUNCTION(phase_flag1), + FUNCTION(phase_flag10), + FUNCTION(phase_flag11), + FUNCTION(phase_flag12), + FUNCTION(phase_flag13), + FUNCTION(phase_flag14), + FUNCTION(phase_flag15), + FUNCTION(phase_flag16), + FUNCTION(phase_flag17), + FUNCTION(phase_flag18), + FUNCTION(phase_flag19), + FUNCTION(phase_flag2), + FUNCTION(phase_flag20), + FUNCTION(phase_flag21), + FUNCTION(phase_flag22), + FUNCTION(phase_flag23), + FUNCTION(phase_flag24), + FUNCTION(phase_flag25), + FUNCTION(phase_flag26), + FUNCTION(phase_flag27), + FUNCTION(phase_flag28), + FUNCTION(phase_flag29), + FUNCTION(phase_flag3), + FUNCTION(phase_flag30), + FUNCTION(phase_flag31), + FUNCTION(phase_flag4), + FUNCTION(phase_flag5), + FUNCTION(phase_flag6), + FUNCTION(phase_flag7), + FUNCTION(phase_flag8), + FUNCTION(phase_flag9), + FUNCTION(pll_bist), + FUNCTION(pll_bypassnl), + FUNCTION(pll_clk), + FUNCTION(pll_reset), + FUNCTION(prng_rosc0), + FUNCTION(prng_rosc1), + FUNCTION(prng_rosc2), + FUNCTION(prng_rosc3), + FUNCTION(qdss_cti), + FUNCTION(qdss_gpio), + FUNCTION(qdss_gpio0), + FUNCTION(qdss_gpio1), + FUNCTION(qdss_gpio10), + FUNCTION(qdss_gpio11), + FUNCTION(qdss_gpio12), + FUNCTION(qdss_gpio13), + FUNCTION(qdss_gpio14), + FUNCTION(qdss_gpio15), + FUNCTION(qdss_gpio2), + FUNCTION(qdss_gpio3), + FUNCTION(qdss_gpio4), + FUNCTION(qdss_gpio5), + FUNCTION(qdss_gpio6), + FUNCTION(qdss_gpio7), + FUNCTION(qdss_gpio8), + FUNCTION(qdss_gpio9), + FUNCTION(qlink0_enable), + FUNCTION(qlink0_request), + FUNCTION(qlink0_wmss), + FUNCTION(qlink1_enable), + FUNCTION(qlink1_request), + FUNCTION(qlink1_wmss), + FUNCTION(qup00), + FUNCTION(qup01), + FUNCTION(qup02), + FUNCTION(qup10), + FUNCTION(qup11_f1), + FUNCTION(qup11_f2), + FUNCTION(qup12), + FUNCTION(qup13_f1), + FUNCTION(qup13_f2), + FUNCTION(qup14), + FUNCTION(sd_write), + FUNCTION(sdc1_tb), + FUNCTION(sdc2_tb), + FUNCTION(sp_cmu), + FUNCTION(tgu_ch0), + FUNCTION(tgu_ch1), + FUNCTION(tgu_ch2), + FUNCTION(tgu_ch3), + FUNCTION(tsense_pwm1), + FUNCTION(tsense_pwm2), + FUNCTION(uim1_clk), + FUNCTION(uim1_data), + FUNCTION(uim1_present), + FUNCTION(uim1_reset), + FUNCTION(uim2_clk), + FUNCTION(uim2_data), + FUNCTION(uim2_present), + FUNCTION(uim2_reset), + FUNCTION(usb2phy_ac), + FUNCTION(usb_phy), + FUNCTION(vfr_1), + FUNCTION(vsense_trigger), + FUNCTION(wlan1_adc0), + FUNCTION(wlan1_adc1), + FUNCTION(wlan2_adc0), + FUNCTION(wlan2_adc1), +}; + +/* + * Every pin is maintained as a single group, and missing or non-existing pin + * would be maintained as dummy group to synchronize pin group index with + * pin descriptor registered with pinctrl core. + * Clients would not be able to request these dummy pin groups. + */ +static const struct msm_pingroup sm6375_groups[] = { + [0] = PINGROUP(0, ibi_i3c, qup00, cri_trng, _, _, _, _, _, _), + [1] = PINGROUP(1, ibi_i3c, qup00, cri_trng, _, _, _, _, _, _), + [2] = PINGROUP(2, qup00, cci_i2c, cri_trng, qdss_cti, _, _, _, _, _), + [3] = PINGROUP(3, qup00, cci_i2c, sp_cmu, dbg_out, qdss_cti, _, _, _, _), + [4] = PINGROUP(4, qup14, qup14, sdc1_tb, _, _, _, _, _, _), + [5] = PINGROUP(5, qup14, qup14, sdc2_tb, _, _, _, _, _, _), + [6] = PINGROUP(6, mdp_vsync, qdss_cti, _, _, _, _, _, _, _), + [7] = PINGROUP(7, qdss_cti, _, _, _, _, _, _, _, _), + [8] = PINGROUP(8, gp_pdm1, qdss_gpio, _, _, _, _, _, _, _), + [9] = PINGROUP(9, qdss_gpio, _, _, _, _, _, _, _, _), + [10] = PINGROUP(10, _, _, _, _, _, _, _, _, _), + [11] = PINGROUP(11, _, _, _, _, _, _, _, _, _), + [12] = PINGROUP(12, m_voc, dp_hot, _, phase_flag0, _, _, _, _, _), + [13] = PINGROUP(13, qup10, pll_bypassnl, _, _, _, _, _, _, _), + [14] = PINGROUP(14, qup10, pll_reset, _, _, _, _, _, _, _), + [15] = PINGROUP(15, qup10, _, _, _, _, _, _, _, _), + [16] = PINGROUP(16, qup10, _, _, _, _, _, _, _, _), + [17] = PINGROUP(17, _, phase_flag1, qup10, _, _, _, _, _, _), + [18] = PINGROUP(18, _, phase_flag2, _, _, _, _, _, _, _), + [19] = PINGROUP(19, qup12, qup12, ddr_bist, _, _, _, _, _, _), + [20] = PINGROUP(20, qup12, qup12, ddr_bist, _, _, _, _, _, _), + [21] = PINGROUP(21, gcc_gp2, ddr_bist, _, _, _, _, _, _, _), + [22] = PINGROUP(22, gcc_gp3, ddr_bist, _, _, _, _, _, _, _), + [23] = PINGROUP(23, mdp_vsync, edp_lcd, _, _, _, _, _, _, _), + [24] = PINGROUP(24, mdp_vsync, _, _, _, _, _, _, _, _), + [25] = PINGROUP(25, qup13_f1, qup13_f2, _, _, _, _, _, _, _), + [26] = PINGROUP(26, qup13_f1, qup13_f2, _, _, _, _, _, _, _), + [27] = PINGROUP(27, qup11_f1, qup11_f2, mdp_vsync, pll_bist, _, qdss_gpio14, _, _, _), + [28] = PINGROUP(28, qup11_f1, qup11_f2, mdp_vsync, _, qdss_gpio15, _, _, _, _), + [29] = PINGROUP(29, cam_mclk, _, _, _, _, _, _, _, _), + [30] = PINGROUP(30, cam_mclk, _, _, _, _, _, _, _, _), + [31] = PINGROUP(31, cam_mclk, _, _, _, _, _, _, _, _), + [32] = PINGROUP(32, cam_mclk, _, _, _, _, _, _, _, _), + [33] = PINGROUP(33, cam_mclk, _, _, _, _, _, _, _, _), + [34] = PINGROUP(34, cci_timer0, _, phase_flag3, qdss_gpio12, _, _, _, _, _), + [35] = PINGROUP(35, cci_timer1, cci_async, _, phase_flag4, qdss_gpio13, _, _, _, _), + [36] = PINGROUP(36, cci_timer2, cci_async, _, phase_flag5, qdss_gpio14, _, _, _, _), + [37] = PINGROUP(37, cci_timer3, gp_pdm0, _, phase_flag6, qdss_gpio15, _, _, _, _), + [38] = PINGROUP(38, cci_timer4, _, phase_flag7, qdss_gpio2, _, _, _, _, _), + [39] = PINGROUP(39, cci_i2c, _, phase_flag8, qdss_gpio0, _, _, _, _, _), + [40] = PINGROUP(40, cci_i2c, _, phase_flag9, qdss_gpio1, _, _, _, _, _), + [41] = PINGROUP(41, cci_i2c, _, phase_flag10, qdss_gpio2, _, _, _, _, _), + [42] = PINGROUP(42, cci_i2c, _, phase_flag11, qdss_gpio3, _, _, _, _, _), + [43] = PINGROUP(43, cci_i2c, _, phase_flag12, qdss_gpio4, _, _, _, _, _), + [44] = PINGROUP(44, cci_i2c, _, phase_flag13, qdss_gpio5, _, _, _, _, _), + [45] = PINGROUP(45, qup02, _, phase_flag14, qdss_gpio6, _, _, _, _, _), + [46] = PINGROUP(46, qup02, _, phase_flag15, qdss_gpio7, _, _, _, _, _), + [47] = PINGROUP(47, mdp_vsync0, _, phase_flag16, qdss_gpio3, _, _, usb2phy_ac, _, _), + [48] = PINGROUP(48, cci_async, mdp_vsync1, gcc_gp1, _, phase_flag17, qdss_gpio8, qup02, + _, _), + [49] = PINGROUP(49, vfr_1, _, phase_flag18, qdss_gpio9, _, _, _, _, _), + [50] = PINGROUP(50, _, phase_flag19, qdss_gpio10, _, _, _, _, _, _), + [51] = PINGROUP(51, _, phase_flag20, qdss_gpio11, _, _, _, _, _, _), + [52] = PINGROUP(52, cci_async, gp_pdm1, _, phase_flag21, qdss_gpio12, _, _, _, _), + [53] = PINGROUP(53, cci_async, _, phase_flag22, qdss_gpio13, _, _, _, _, _), + [54] = PINGROUP(54, _, _, _, _, _, _, _, _, _), + [55] = PINGROUP(55, _, _, _, _, _, _, _, _, _), + [56] = PINGROUP(56, qup02, mdp_vsync2, _, phase_flag23, qdss_gpio10, _, _, _, _), + [57] = PINGROUP(57, qup02, mdp_vsync3, gp_pdm2, _, phase_flag24, qdss_gpio11, _, _, _), + [58] = PINGROUP(58, gcc_gp1, _, _, _, _, _, _, _, _), + [59] = PINGROUP(59, _, _, _, _, _, _, _, _, _), + [60] = PINGROUP(60, audio_ref, lpass_ext, mi2s_2, _, phase_flag25, _, _, _, _), + [61] = PINGROUP(61, qup01, tgu_ch0, _, phase_flag26, qdss_cti, _, _, _, _), + [62] = PINGROUP(62, qup01, tgu_ch1, _, phase_flag27, qdss_cti, _, _, _, _), + [63] = PINGROUP(63, qup01, tgu_ch2, _, phase_flag28, qdss_gpio, _, _, _, _), + [64] = PINGROUP(64, qup01, tgu_ch3, _, phase_flag29, qdss_gpio, _, _, _, _), + [65] = PINGROUP(65, mss_lte, _, qdss_gpio0, _, _, _, _, _, _), + [66] = PINGROUP(66, mss_lte, _, qdss_gpio1, _, _, _, _, _, _), + [67] = PINGROUP(67, btfm_slimbus, mi2s_1, _, phase_flag30, _, _, _, _, _), + [68] = PINGROUP(68, btfm_slimbus, mi2s_1, gp_pdm0, _, phase_flag31, _, _, _, _), + [69] = PINGROUP(69, _, _, _, _, _, _, _, _, _), + [70] = PINGROUP(70, _, _, _, _, _, _, _, _, _), + [71] = PINGROUP(71, _, _, _, _, _, _, _, _, _), + [72] = PINGROUP(72, _, _, _, _, _, _, _, _, _), + [73] = PINGROUP(73, _, _, _, _, _, _, _, _, _), + [74] = PINGROUP(74, _, _, _, _, _, _, _, _, _), + [75] = PINGROUP(75, uim2_data, _, _, _, _, _, _, _, _), + [76] = PINGROUP(76, uim2_clk, _, _, _, _, _, _, _, _), + [77] = PINGROUP(77, uim2_reset, _, _, _, _, _, _, _, _), + [78] = PINGROUP(78, uim2_present, _, _, _, _, _, _, _, _), + [79] = PINGROUP(79, uim1_data, _, _, _, _, _, _, _, _), + [80] = PINGROUP(80, uim1_clk, _, _, _, _, _, _, _, _), + [81] = PINGROUP(81, uim1_reset, _, _, _, _, _, _, _, _), + [82] = PINGROUP(82, uim1_present, _, _, _, _, _, _, _, _), + [83] = PINGROUP(83, atest_usb1, _, _, _, _, _, _, _, _), + [84] = PINGROUP(84, _, atest_usb10, _, _, _, _, _, _, _), + [85] = PINGROUP(85, sd_write, _, atest_usb11, _, _, _, _, _, _), + [86] = PINGROUP(86, btfm_slimbus, mi2s_1, _, qdss_cti, atest_usb12, ddr_pxi0, _, _, _), + [87] = PINGROUP(87, btfm_slimbus, mi2s_1, adsp_ext, _, qdss_cti, atest_usb13, ddr_pxi1, _, + _), + [88] = PINGROUP(88, mi2s_0, _, qdss_gpio4, _, atest_usb2, ddr_pxi2, tsense_pwm1, + tsense_pwm2, _), + [89] = PINGROUP(89, mi2s_0, agera_pll, _, qdss_gpio5, _, vsense_trigger, atest_usb20, + ddr_pxi3, _), + [90] = PINGROUP(90, mi2s_0, jitter_bist, _, qdss_gpio6, _, wlan1_adc0, atest_usb21, + ddr_pxi0, _), + [91] = PINGROUP(91, mi2s_0, _, qdss_gpio7, _, wlan2_adc0, atest_usb22, ddr_pxi1, _, _), + [92] = PINGROUP(92, _, qdss_gpio8, atest_tsens, wlan1_adc1, atest_usb23, ddr_pxi2, _, _, + _), + [93] = PINGROUP(93, mclk, lpass_ext, _, qdss_gpio9, atest_tsens2, wlan2_adc1, ddr_pxi3, + _, _), + [94] = PINGROUP(94, _, _, _, _, _, _, _, _, _), + [95] = PINGROUP(95, ldo_en, _, atest_char, _, _, _, _, _, _), + [96] = PINGROUP(96, ldo_update, _, atest_char0, _, _, _, _, _, _), + [97] = PINGROUP(97, prng_rosc0, _, atest_char1, _, _, _, _, _, _), + [98] = PINGROUP(98, _, atest_char2, _, _, prng_rosc1, pll_clk, _, _, _), + [99] = PINGROUP(99, _, atest_char3, _, _, prng_rosc2, _, _, _, _), + [100] = PINGROUP(100, _, _, prng_rosc3, _, _, _, _, _, _), + [101] = PINGROUP(101, nav_gpio, nav_pps, nav_pps, gps_tx, _, _, _, _, _), + [102] = PINGROUP(102, nav_gpio, nav_pps, nav_pps, gps_tx, _, _, _, _, _), + [103] = PINGROUP(103, qlink0_wmss, _, _, _, _, _, _, _, _), + [104] = PINGROUP(104, qlink0_request, _, _, _, _, _, _, _, _), + [105] = PINGROUP(105, qlink0_enable, _, _, _, _, _, _, _, _), + [106] = PINGROUP(106, qlink1_wmss, _, _, _, _, _, _, _, _), + [107] = PINGROUP(107, qlink1_request, gps_tx, _, _, _, _, _, _, _), + [108] = PINGROUP(108, qlink1_enable, gps_tx, _, _, _, _, _, _, _), + [109] = PINGROUP(109, _, _, _, _, _, _, _, _, _), + [110] = PINGROUP(110, _, _, _, _, _, _, _, _, _), + [111] = PINGROUP(111, _, _, _, _, _, _, _, _, _), + [112] = PINGROUP(112, _, _, _, _, _, _, _, _, _), + [113] = PINGROUP(113, _, _, _, _, _, _, _, _, _), + [114] = PINGROUP(114, _, _, _, _, _, _, _, _, _), + [115] = PINGROUP(115, _, _, _, _, _, _, _, _, _), + [116] = PINGROUP(116, _, _, _, _, _, _, _, _, _), + [117] = PINGROUP(117, _, _, _, _, _, _, _, _, _), + [118] = PINGROUP(118, _, _, pa_indicator, dp_hot, _, _, _, _, _), + [119] = PINGROUP(119, _, _, _, _, _, _, _, _, _), + [120] = PINGROUP(120, _, _, _, _, _, _, _, _, _), + [121] = PINGROUP(121, _, _, _, _, _, _, _, _, _), + [122] = PINGROUP(122, _, _, _, _, _, _, _, _, _), + [123] = PINGROUP(123, _, _, _, _, _, _, _, _, _), + [124] = PINGROUP(124, usb_phy, _, _, _, _, _, _, _, _), + [125] = PINGROUP(125, _, _, _, _, _, _, _, _, _), + [126] = PINGROUP(126, _, _, _, _, _, _, _, _, _), + [127] = PINGROUP(127, _, _, _, _, _, _, _, _, _), + [128] = PINGROUP(128, _, _, _, _, _, _, _, _, _), + [129] = PINGROUP(129, _, _, _, _, _, _, _, _, _), + [130] = PINGROUP(130, _, _, _, _, _, _, _, _, _), + [131] = PINGROUP(131, _, _, _, _, _, _, _, _, _), + [132] = PINGROUP(132, _, _, _, _, _, _, _, _, _), + [133] = PINGROUP(133, _, _, _, _, _, _, _, _, _), + [134] = PINGROUP(134, _, _, _, _, _, _, _, _, _), + [135] = PINGROUP(135, _, _, _, _, _, _, _, _, _), + [136] = PINGROUP(136, _, _, _, _, _, _, _, _, _), + [137] = PINGROUP(137, _, _, _, _, _, _, _, _, _), + [138] = PINGROUP(138, _, _, _, _, _, _, _, _, _), + [139] = PINGROUP(139, _, _, _, _, _, _, _, _, _), + [140] = PINGROUP(140, _, _, _, _, _, _, _, _, _), + [141] = PINGROUP(141, _, _, _, _, _, _, _, _, _), + [142] = PINGROUP(142, _, _, _, _, _, _, _, _, _), + [143] = PINGROUP(143, _, _, _, _, _, _, _, _, _), + [144] = PINGROUP(144, _, _, _, _, _, _, _, _, _), + [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _), + [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _), + [147] = PINGROUP(147, _, _, _, _, _, _, _, _, _), + [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _), + [149] = PINGROUP(149, _, _, _, _, _, _, _, _, _), + [150] = PINGROUP(150, _, _, _, _, _, _, _, _, _), + [151] = PINGROUP(151, _, _, _, _, _, _, _, _, _), + [152] = PINGROUP(152, _, _, _, _, _, _, _, _, _), + [153] = PINGROUP(153, _, _, _, _, _, _, _, _, _), + [154] = PINGROUP(154, _, _, _, _, _, _, _, _, _), + [155] = PINGROUP(155, _, _, _, _, _, _, _, _, _), + [156] = UFS_RESET(ufs_reset, 0x1ae000), + [157] = SDC_PINGROUP(sdc1_rclk, 0x1a1000, 0, 0), + [158] = SDC_PINGROUP(sdc1_clk, 0x1a0000, 13, 6), + [159] = SDC_PINGROUP(sdc1_cmd, 0x1a0000, 11, 3), + [160] = SDC_PINGROUP(sdc1_data, 0x1a0000, 9, 0), + [161] = SDC_PINGROUP(sdc2_clk, 0x1a2000, 14, 6), + [162] = SDC_PINGROUP(sdc2_cmd, 0x1a2000, 11, 3), + [163] = SDC_PINGROUP(sdc2_data, 0x1a2000, 9, 0), +}; + +static const struct msm_gpio_wakeirq_map sm6375_mpm_map[] = { + { 0, 84 }, { 3, 6 }, { 4, 7 }, { 7, 8 }, { 8, 9 }, { 9, 10 }, { 11, 11 }, { 12, 13 }, + { 13, 14 }, { 16, 16 }, { 17, 17 }, { 18, 18 }, { 19, 19 }, { 21, 20 }, { 22, 21 }, + { 23, 23 }, { 24, 24 }, { 25, 25 }, { 27, 26 }, { 28, 27 }, { 37, 28 }, { 38, 29 }, + { 48, 30 }, { 50, 31 }, { 51, 32 }, { 52, 33 }, { 57, 34 }, { 59, 35 }, { 60, 37 }, + { 61, 38 }, { 62, 39 }, { 64, 40 }, { 66, 41 }, { 67, 42 }, { 68, 43 }, { 69, 44 }, + { 78, 45 }, { 82, 36 }, { 83, 47 }, { 84, 48 }, { 85, 49 }, { 87, 50 }, { 88, 51 }, + { 91, 52 }, { 94, 53 }, { 95, 54 }, { 96, 55 }, { 97, 56 }, { 98, 57 }, { 99, 58 }, + { 100, 59 }, { 104, 60 }, { 107, 61 }, { 118, 62 }, { 124, 63 }, { 125, 64 }, { 126, 65 }, + { 128, 66 }, { 129, 67 }, { 131, 69 }, { 133, 70 }, { 134, 71 }, { 136, 73 }, { 142, 74 }, + { 150, 75 }, { 153, 76 }, { 155, 77 }, +}; + +static const struct msm_pinctrl_soc_data sm6375_tlmm = { + .pins = sm6375_pins, + .npins = ARRAY_SIZE(sm6375_pins), + .functions = sm6375_functions, + .nfunctions = ARRAY_SIZE(sm6375_functions), + .groups = sm6375_groups, + .ngroups = ARRAY_SIZE(sm6375_groups), + .ngpios = 157, + .wakeirq_map = sm6375_mpm_map, + .nwakeirq_map = ARRAY_SIZE(sm6375_mpm_map), +}; + +static int sm6375_tlmm_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &sm6375_tlmm); +} + +static const struct of_device_id sm6375_tlmm_of_match[] = { + { .compatible = "qcom,sm6375-tlmm", }, + { }, +}; + +static struct platform_driver sm6375_tlmm_driver = { + .driver = { + .name = "sm6375-tlmm", + .of_match_table = sm6375_tlmm_of_match, + }, + .probe = sm6375_tlmm_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init sm6375_tlmm_init(void) +{ + return platform_driver_register(&sm6375_tlmm_driver); +} +arch_initcall(sm6375_tlmm_init); + +static void __exit sm6375_tlmm_exit(void) +{ + platform_driver_unregister(&sm6375_tlmm_driver); +} +module_exit(sm6375_tlmm_exit); + +MODULE_DESCRIPTION("QTI SM6375 TLMM driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(of, sm6375_tlmm_of_match); diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c new file mode 100644 index 0000000000..ddbc6317f2 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2020 Linaro Ltd. + */ + +#include +#include +#include + +#include "pinctrl-lpass-lpi.h" + +enum lpass_lpi_functions { + LPI_MUX_dmic1_clk, + LPI_MUX_dmic1_data, + LPI_MUX_dmic2_clk, + LPI_MUX_dmic2_data, + LPI_MUX_dmic3_clk, + LPI_MUX_dmic3_data, + LPI_MUX_i2s1_clk, + LPI_MUX_i2s1_data, + LPI_MUX_i2s1_ws, + LPI_MUX_i2s2_clk, + LPI_MUX_i2s2_data, + LPI_MUX_i2s2_ws, + LPI_MUX_qua_mi2s_data, + LPI_MUX_qua_mi2s_sclk, + LPI_MUX_qua_mi2s_ws, + LPI_MUX_swr_rx_clk, + LPI_MUX_swr_rx_data, + LPI_MUX_swr_tx_clk, + LPI_MUX_swr_tx_data, + LPI_MUX_wsa_swr_clk, + LPI_MUX_wsa_swr_data, + LPI_MUX_gpio, + LPI_MUX__, +}; + +static int gpio0_pins[] = { 0 }; +static int gpio1_pins[] = { 1 }; +static int gpio2_pins[] = { 2 }; +static int gpio3_pins[] = { 3 }; +static int gpio4_pins[] = { 4 }; +static int gpio5_pins[] = { 5 }; +static int gpio6_pins[] = { 6 }; +static int gpio7_pins[] = { 7 }; +static int gpio8_pins[] = { 8 }; +static int gpio9_pins[] = { 9 }; +static int gpio10_pins[] = { 10 }; +static int gpio11_pins[] = { 11 }; +static int gpio12_pins[] = { 12 }; +static int gpio13_pins[] = { 13 }; + +static const struct pinctrl_pin_desc sm8250_lpi_pins[] = { + PINCTRL_PIN(0, "gpio0"), + PINCTRL_PIN(1, "gpio1"), + PINCTRL_PIN(2, "gpio2"), + PINCTRL_PIN(3, "gpio3"), + PINCTRL_PIN(4, "gpio4"), + PINCTRL_PIN(5, "gpio5"), + PINCTRL_PIN(6, "gpio6"), + PINCTRL_PIN(7, "gpio7"), + PINCTRL_PIN(8, "gpio8"), + PINCTRL_PIN(9, "gpio9"), + PINCTRL_PIN(10, "gpio10"), + PINCTRL_PIN(11, "gpio11"), + PINCTRL_PIN(12, "gpio12"), + PINCTRL_PIN(13, "gpio13"), +}; + +static const char * const swr_tx_clk_groups[] = { "gpio0" }; +static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio5" }; +static const char * const swr_rx_clk_groups[] = { "gpio3" }; +static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" }; +static const char * const dmic1_clk_groups[] = { "gpio6" }; +static const char * const dmic1_data_groups[] = { "gpio7" }; +static const char * const dmic2_clk_groups[] = { "gpio8" }; +static const char * const dmic2_data_groups[] = { "gpio9" }; +static const char * const i2s2_clk_groups[] = { "gpio10" }; +static const char * const i2s2_ws_groups[] = { "gpio11" }; +static const char * const dmic3_clk_groups[] = { "gpio12" }; +static const char * const dmic3_data_groups[] = { "gpio13" }; +static const char * const qua_mi2s_sclk_groups[] = { "gpio0" }; +static const char * const qua_mi2s_ws_groups[] = { "gpio1" }; +static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4" }; +static const char * const i2s1_clk_groups[] = { "gpio6" }; +static const char * const i2s1_ws_groups[] = { "gpio7" }; +static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" }; +static const char * const wsa_swr_clk_groups[] = { "gpio10" }; +static const char * const wsa_swr_data_groups[] = { "gpio11" }; +static const char * const i2s2_data_groups[] = { "gpio12", "gpio12" }; + +static const struct lpi_pingroup sm8250_groups[] = { + LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _), + LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _), + LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _), + LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _), + LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _), + LPI_PINGROUP(5, 12, swr_tx_data, swr_rx_data, _, _), + LPI_PINGROUP(6, LPI_NO_SLEW, dmic1_clk, i2s1_clk, _, _), + LPI_PINGROUP(7, LPI_NO_SLEW, dmic1_data, i2s1_ws, _, _), + LPI_PINGROUP(8, LPI_NO_SLEW, dmic2_clk, i2s1_data, _, _), + LPI_PINGROUP(9, LPI_NO_SLEW, dmic2_data, i2s1_data, _, _), + LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _), + LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _), + LPI_PINGROUP(12, LPI_NO_SLEW, dmic3_clk, i2s2_data, _, _), + LPI_PINGROUP(13, LPI_NO_SLEW, dmic3_data, i2s2_data, _, _), +}; + +static const struct lpi_function sm8250_functions[] = { + LPI_FUNCTION(dmic1_clk), + LPI_FUNCTION(dmic1_data), + LPI_FUNCTION(dmic2_clk), + LPI_FUNCTION(dmic2_data), + LPI_FUNCTION(dmic3_clk), + LPI_FUNCTION(dmic3_data), + LPI_FUNCTION(i2s1_clk), + LPI_FUNCTION(i2s1_data), + LPI_FUNCTION(i2s1_ws), + LPI_FUNCTION(i2s2_clk), + LPI_FUNCTION(i2s2_data), + LPI_FUNCTION(i2s2_ws), + LPI_FUNCTION(qua_mi2s_data), + LPI_FUNCTION(qua_mi2s_sclk), + LPI_FUNCTION(qua_mi2s_ws), + LPI_FUNCTION(swr_rx_clk), + LPI_FUNCTION(swr_rx_data), + LPI_FUNCTION(swr_tx_clk), + LPI_FUNCTION(swr_tx_data), + LPI_FUNCTION(wsa_swr_clk), + LPI_FUNCTION(wsa_swr_data), +}; + +static const struct lpi_pinctrl_variant_data sm8250_lpi_data = { + .pins = sm8250_lpi_pins, + .npins = ARRAY_SIZE(sm8250_lpi_pins), + .groups = sm8250_groups, + .ngroups = ARRAY_SIZE(sm8250_groups), + .functions = sm8250_functions, + .nfunctions = ARRAY_SIZE(sm8250_functions), +}; + +static const struct of_device_id lpi_pinctrl_of_match[] = { + { + .compatible = "qcom,sm8250-lpass-lpi-pinctrl", + .data = &sm8250_lpi_data, + }, + { } +}; +MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match); + +static struct platform_driver lpi_pinctrl_driver = { + .driver = { + .name = "qcom-sm8250-lpass-lpi-pinctrl", + .of_match_table = lpi_pinctrl_of_match, + }, + .probe = lpi_pinctrl_probe, + .remove = lpi_pinctrl_remove, +}; + +module_platform_driver(lpi_pinctrl_driver); +MODULE_DESCRIPTION("QTI SM8250 LPI GPIO pin control driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/ralink/pinctrl-ralink.c b/drivers/pinctrl/ralink/pinctrl-ralink.c new file mode 100644 index 0000000000..770862f45b --- /dev/null +++ b/drivers/pinctrl/ralink/pinctrl-ralink.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2013 John Crispin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "pinctrl-ralink.h" +#include "../core.h" +#include "../pinctrl-utils.h" + +#define SYSC_REG_GPIO_MODE 0x60 +#define SYSC_REG_GPIO_MODE2 0x64 + +struct ralink_priv { + struct device *dev; + + struct pinctrl_pin_desc *pads; + struct pinctrl_desc *desc; + + struct ralink_pmx_func **func; + int func_count; + + struct ralink_pmx_group *groups; + const char **group_names; + int group_count; + + u8 *gpio; + int max_pins; +}; + +static int ralink_get_group_count(struct pinctrl_dev *pctrldev) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + return p->group_count; +} + +static const char *ralink_get_group_name(struct pinctrl_dev *pctrldev, + unsigned int group) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + return (group >= p->group_count) ? NULL : p->group_names[group]; +} + +static int ralink_get_group_pins(struct pinctrl_dev *pctrldev, + unsigned int group, + const unsigned int **pins, + unsigned int *num_pins) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + if (group >= p->group_count) + return -EINVAL; + + *pins = p->groups[group].func[0].pins; + *num_pins = p->groups[group].func[0].pin_count; + + return 0; +} + +static const struct pinctrl_ops ralink_pctrl_ops = { + .get_groups_count = ralink_get_group_count, + .get_group_name = ralink_get_group_name, + .get_group_pins = ralink_get_group_pins, + .dt_node_to_map = pinconf_generic_dt_node_to_map_all, + .dt_free_map = pinconf_generic_dt_free_map, +}; + +static int ralink_pmx_func_count(struct pinctrl_dev *pctrldev) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + return p->func_count; +} + +static const char *ralink_pmx_func_name(struct pinctrl_dev *pctrldev, + unsigned int func) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + return p->func[func]->name; +} + +static int ralink_pmx_group_get_groups(struct pinctrl_dev *pctrldev, + unsigned int func, + const char * const **groups, + unsigned int * const num_groups) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + if (p->func[func]->group_count == 1) + *groups = &p->group_names[p->func[func]->groups[0]]; + else + *groups = p->group_names; + + *num_groups = p->func[func]->group_count; + + return 0; +} + +static int ralink_pmx_group_enable(struct pinctrl_dev *pctrldev, + unsigned int func, unsigned int group) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + u32 mode = 0; + u32 reg = SYSC_REG_GPIO_MODE; + int i; + int shift; + + /* dont allow double use */ + if (p->groups[group].enabled) { + dev_err(p->dev, "%s is already enabled\n", + p->groups[group].name); + return 0; + } + + p->groups[group].enabled = 1; + p->func[func]->enabled = 1; + + shift = p->groups[group].shift; + if (shift >= 32) { + shift -= 32; + reg = SYSC_REG_GPIO_MODE2; + } + mode = rt_sysc_r32(reg); + mode &= ~(p->groups[group].mask << shift); + + /* mark the pins as gpio */ + for (i = 0; i < p->groups[group].func[0].pin_count; i++) + p->gpio[p->groups[group].func[0].pins[i]] = 1; + + /* function 0 is gpio and needs special handling */ + if (func == 0) { + mode |= p->groups[group].gpio << shift; + } else { + for (i = 0; i < p->func[func]->pin_count; i++) + p->gpio[p->func[func]->pins[i]] = 0; + mode |= p->func[func]->value << shift; + } + rt_sysc_w32(mode, reg); + + return 0; +} + +static int ralink_pmx_group_gpio_request_enable(struct pinctrl_dev *pctrldev, + struct pinctrl_gpio_range *range, + unsigned int pin) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + if (!p->gpio[pin]) { + dev_err(p->dev, "pin %d is not set to gpio mux\n", pin); + return -EINVAL; + } + + return 0; +} + +static const struct pinmux_ops ralink_pmx_group_ops = { + .get_functions_count = ralink_pmx_func_count, + .get_function_name = ralink_pmx_func_name, + .get_function_groups = ralink_pmx_group_get_groups, + .set_mux = ralink_pmx_group_enable, + .gpio_request_enable = ralink_pmx_group_gpio_request_enable, +}; + +static struct pinctrl_desc ralink_pctrl_desc = { + .owner = THIS_MODULE, + .name = "ralink-pinctrl", + .pctlops = &ralink_pctrl_ops, + .pmxops = &ralink_pmx_group_ops, +}; + +static struct ralink_pmx_func gpio_func = { + .name = "gpio", +}; + +static int ralink_pinctrl_index(struct ralink_priv *p) +{ + struct ralink_pmx_group *mux = p->groups; + int i, j, c = 0; + + /* count the mux functions */ + while (mux->name) { + p->group_count++; + mux++; + } + + /* allocate the group names array needed by the gpio function */ + p->group_names = devm_kcalloc(p->dev, p->group_count, + sizeof(char *), GFP_KERNEL); + if (!p->group_names) + return -ENOMEM; + + for (i = 0; i < p->group_count; i++) { + p->group_names[i] = p->groups[i].name; + p->func_count += p->groups[i].func_count; + } + + /* we have a dummy function[0] for gpio */ + p->func_count++; + + /* allocate our function and group mapping index buffers */ + p->func = devm_kcalloc(p->dev, p->func_count, + sizeof(*p->func), GFP_KERNEL); + gpio_func.groups = devm_kcalloc(p->dev, p->group_count, sizeof(int), + GFP_KERNEL); + if (!p->func || !gpio_func.groups) + return -ENOMEM; + + /* add a backpointer to the function so it knows its group */ + gpio_func.group_count = p->group_count; + for (i = 0; i < gpio_func.group_count; i++) + gpio_func.groups[i] = i; + + p->func[c] = &gpio_func; + c++; + + /* add remaining functions */ + for (i = 0; i < p->group_count; i++) { + for (j = 0; j < p->groups[i].func_count; j++) { + p->func[c] = &p->groups[i].func[j]; + p->func[c]->groups = devm_kzalloc(p->dev, sizeof(int), + GFP_KERNEL); + if (!p->func[c]->groups) + return -ENOMEM; + p->func[c]->groups[0] = i; + p->func[c]->group_count = 1; + c++; + } + } + return 0; +} + +static int ralink_pinctrl_pins(struct ralink_priv *p) +{ + int i, j; + + /* + * loop over the functions and initialize the pins array. + * also work out the highest pin used. + */ + for (i = 0; i < p->func_count; i++) { + int pin; + + if (!p->func[i]->pin_count) + continue; + + p->func[i]->pins = devm_kcalloc(p->dev, + p->func[i]->pin_count, + sizeof(int), + GFP_KERNEL); + if (!p->func[i]->pins) + return -ENOMEM; + for (j = 0; j < p->func[i]->pin_count; j++) + p->func[i]->pins[j] = p->func[i]->pin_first + j; + + pin = p->func[i]->pin_first + p->func[i]->pin_count; + if (pin > p->max_pins) + p->max_pins = pin; + } + + /* the buffer that tells us which pins are gpio */ + p->gpio = devm_kcalloc(p->dev, p->max_pins, sizeof(u8), GFP_KERNEL); + /* the pads needed to tell pinctrl about our pins */ + p->pads = devm_kcalloc(p->dev, p->max_pins, + sizeof(struct pinctrl_pin_desc), GFP_KERNEL); + if (!p->pads || !p->gpio) + return -ENOMEM; + + memset(p->gpio, 1, sizeof(u8) * p->max_pins); + for (i = 0; i < p->func_count; i++) { + if (!p->func[i]->pin_count) + continue; + + for (j = 0; j < p->func[i]->pin_count; j++) + p->gpio[p->func[i]->pins[j]] = 0; + } + + /* pin 0 is always a gpio */ + p->gpio[0] = 1; + + /* set the pads */ + for (i = 0; i < p->max_pins; i++) { + /* strlen("ioXY") + 1 = 5 */ + char *name = devm_kzalloc(p->dev, 5, GFP_KERNEL); + + if (!name) + return -ENOMEM; + snprintf(name, 5, "io%d", i); + p->pads[i].number = i; + p->pads[i].name = name; + } + p->desc->pins = p->pads; + p->desc->npins = p->max_pins; + + return 0; +} + +int ralink_pinctrl_init(struct platform_device *pdev, + struct ralink_pmx_group *data) +{ + struct ralink_priv *p; + struct pinctrl_dev *dev; + int err; + + if (!data) + return -ENOTSUPP; + + /* setup the private data */ + p = devm_kzalloc(&pdev->dev, sizeof(struct ralink_priv), GFP_KERNEL); + if (!p) + return -ENOMEM; + + p->dev = &pdev->dev; + p->desc = &ralink_pctrl_desc; + p->groups = data; + platform_set_drvdata(pdev, p); + + /* init the device */ + err = ralink_pinctrl_index(p); + if (err) { + dev_err(&pdev->dev, "failed to load index\n"); + return err; + } + + err = ralink_pinctrl_pins(p); + if (err) { + dev_err(&pdev->dev, "failed to load pins\n"); + return err; + } + dev = pinctrl_register(p->desc, &pdev->dev, p); + + return PTR_ERR_OR_ZERO(dev); +} diff --git a/drivers/pinctrl/ralink/pinctrl-ralink.h b/drivers/pinctrl/ralink/pinctrl-ralink.h new file mode 100644 index 0000000000..e6037be1e1 --- /dev/null +++ b/drivers/pinctrl/ralink/pinctrl-ralink.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 John Crispin + */ + +#ifndef _PINCTRL_RALINK_H__ +#define _PINCTRL_RALINK_H__ + +#define FUNC(name, value, pin_first, pin_count) \ + { name, value, pin_first, pin_count } + +#define GRP(_name, _func, _mask, _shift) \ + { .name = _name, .mask = _mask, .shift = _shift, \ + .func = _func, .gpio = _mask, \ + .func_count = ARRAY_SIZE(_func) } + +#define GRP_G(_name, _func, _mask, _gpio, _shift) \ + { .name = _name, .mask = _mask, .shift = _shift, \ + .func = _func, .gpio = _gpio, \ + .func_count = ARRAY_SIZE(_func) } + +struct ralink_pmx_group; + +struct ralink_pmx_func { + const char *name; + const char value; + + int pin_first; + int pin_count; + int *pins; + + int *groups; + int group_count; + + int enabled; +}; + +struct ralink_pmx_group { + const char *name; + int enabled; + + const u32 shift; + const char mask; + const char gpio; + + struct ralink_pmx_func *func; + int func_count; +}; + +int ralink_pinctrl_init(struct platform_device *pdev, + struct ralink_pmx_group *data); + +#endif diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c new file mode 100644 index 0000000000..5dd1c2c770 --- /dev/null +++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c @@ -0,0 +1,4262 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * R8A779A0 processor support - PFC hardware block. + * + * Copyright (C) 2021 Renesas Electronics Corp. + * + * This file is based on the drivers/pinctrl/renesas/pfc-r8a779a0.c + */ + +#include +#include +#include + +#include "sh_pfc.h" + +#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN) + +#define CPU_ALL_GP(fn, sfx) \ + PORT_GP_CFG_19(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \ + PORT_GP_CFG_23(1, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \ + PORT_GP_CFG_1(1, 23, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(1, 24, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(1, 25, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(1, 26, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(1, 27, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(1, 28, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_20(2, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_13(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \ + PORT_GP_CFG_1(3, 13, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 14, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 16, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 17, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 18, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 19, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 20, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 21, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 22, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 23, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 24, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 25, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 26, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 27, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 28, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(3, 29, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_25(4, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_21(5, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_21(6, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_21(7, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_14(8, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33) + +/* GPSR0 */ +#define GPSR0_18 F_(MSIOF2_RXD, IP2SR0_11_8) +#define GPSR0_17 F_(MSIOF2_SCK, IP2SR0_7_4) +#define GPSR0_16 F_(MSIOF2_TXD, IP2SR0_3_0) +#define GPSR0_15 F_(MSIOF2_SYNC, IP1SR0_31_28) +#define GPSR0_14 F_(MSIOF2_SS1, IP1SR0_27_24) +#define GPSR0_13 F_(MSIOF2_SS2, IP1SR0_23_20) +#define GPSR0_12 F_(MSIOF5_RXD, IP1SR0_19_16) +#define GPSR0_11 F_(MSIOF5_SCK, IP1SR0_15_12) +#define GPSR0_10 F_(MSIOF5_TXD, IP1SR0_11_8) +#define GPSR0_9 F_(MSIOF5_SYNC, IP1SR0_7_4) +#define GPSR0_8 F_(MSIOF5_SS1, IP1SR0_3_0) +#define GPSR0_7 F_(MSIOF5_SS2, IP0SR0_31_28) +#define GPSR0_6 F_(IRQ0, IP0SR0_27_24) +#define GPSR0_5 F_(IRQ1, IP0SR0_23_20) +#define GPSR0_4 F_(IRQ2, IP0SR0_19_16) +#define GPSR0_3 F_(IRQ3, IP0SR0_15_12) +#define GPSR0_2 F_(GP0_02, IP0SR0_11_8) +#define GPSR0_1 F_(GP0_01, IP0SR0_7_4) +#define GPSR0_0 F_(GP0_00, IP0SR0_3_0) + +/* GPSR1 */ +#define GPSR1_28 F_(HTX3, IP3SR1_19_16) +#define GPSR1_27 F_(HCTS3_N, IP3SR1_15_12) +#define GPSR1_26 F_(HRTS3_N, IP3SR1_11_8) +#define GPSR1_25 F_(HSCK3, IP3SR1_7_4) +#define GPSR1_24 F_(HRX3, IP3SR1_3_0) +#define GPSR1_23 F_(GP1_23, IP2SR1_31_28) +#define GPSR1_22 F_(AUDIO_CLKIN, IP2SR1_27_24) +#define GPSR1_21 F_(AUDIO_CLKOUT, IP2SR1_23_20) +#define GPSR1_20 F_(SSI_SD, IP2SR1_19_16) +#define GPSR1_19 F_(SSI_WS, IP2SR1_15_12) +#define GPSR1_18 F_(SSI_SCK, IP2SR1_11_8) +#define GPSR1_17 F_(SCIF_CLK, IP2SR1_7_4) +#define GPSR1_16 F_(HRX0, IP2SR1_3_0) +#define GPSR1_15 F_(HSCK0, IP1SR1_31_28) +#define GPSR1_14 F_(HRTS0_N, IP1SR1_27_24) +#define GPSR1_13 F_(HCTS0_N, IP1SR1_23_20) +#define GPSR1_12 F_(HTX0, IP1SR1_19_16) +#define GPSR1_11 F_(MSIOF0_RXD, IP1SR1_15_12) +#define GPSR1_10 F_(MSIOF0_SCK, IP1SR1_11_8) +#define GPSR1_9 F_(MSIOF0_TXD, IP1SR1_7_4) +#define GPSR1_8 F_(MSIOF0_SYNC, IP1SR1_3_0) +#define GPSR1_7 F_(MSIOF0_SS1, IP0SR1_31_28) +#define GPSR1_6 F_(MSIOF0_SS2, IP0SR1_27_24) +#define GPSR1_5 F_(MSIOF1_RXD, IP0SR1_23_20) +#define GPSR1_4 F_(MSIOF1_TXD, IP0SR1_19_16) +#define GPSR1_3 F_(MSIOF1_SCK, IP0SR1_15_12) +#define GPSR1_2 F_(MSIOF1_SYNC, IP0SR1_11_8) +#define GPSR1_1 F_(MSIOF1_SS1, IP0SR1_7_4) +#define GPSR1_0 F_(MSIOF1_SS2, IP0SR1_3_0) + +/* GPSR2 */ +#define GPSR2_19 F_(CANFD7_RX, IP2SR2_15_12) +#define GPSR2_18 F_(CANFD7_TX, IP2SR2_11_8) +#define GPSR2_17 F_(CANFD4_RX, IP2SR2_7_4) +#define GPSR2_16 F_(CANFD4_TX, IP2SR2_3_0) +#define GPSR2_15 F_(CANFD3_RX, IP1SR2_31_28) +#define GPSR2_14 F_(CANFD3_TX, IP1SR2_27_24) +#define GPSR2_13 F_(CANFD2_RX, IP1SR2_23_20) +#define GPSR2_12 F_(CANFD2_TX, IP1SR2_19_16) +#define GPSR2_11 F_(CANFD0_RX, IP1SR2_15_12) +#define GPSR2_10 F_(CANFD0_TX, IP1SR2_11_8) +#define GPSR2_9 F_(CAN_CLK, IP1SR2_7_4) +#define GPSR2_8 F_(TPU0TO0, IP1SR2_3_0) +#define GPSR2_7 F_(TPU0TO1, IP0SR2_31_28) +#define GPSR2_6 F_(FXR_TXDB, IP0SR2_27_24) +#define GPSR2_5 F_(FXR_TXENB_N, IP0SR2_23_20) +#define GPSR2_4 F_(RXDB_EXTFXR, IP0SR2_19_16) +#define GPSR2_3 F_(CLK_EXTFXR, IP0SR2_15_12) +#define GPSR2_2 F_(RXDA_EXTFXR, IP0SR2_11_8) +#define GPSR2_1 F_(FXR_TXENA_N, IP0SR2_7_4) +#define GPSR2_0 F_(FXR_TXDA, IP0SR2_3_0) + +/* GPSR3 */ +#define GPSR3_29 F_(RPC_INT_N, IP3SR3_23_20) +#define GPSR3_28 F_(RPC_WP_N, IP3SR3_19_16) +#define GPSR3_27 F_(RPC_RESET_N, IP3SR3_15_12) +#define GPSR3_26 F_(QSPI1_IO3, IP3SR3_11_8) +#define GPSR3_25 F_(QSPI1_SSL, IP3SR3_7_4) +#define GPSR3_24 F_(QSPI1_IO2, IP3SR3_3_0) +#define GPSR3_23 F_(QSPI1_MISO_IO1, IP2SR3_31_28) +#define GPSR3_22 F_(QSPI1_SPCLK, IP2SR3_27_24) +#define GPSR3_21 F_(QSPI1_MOSI_IO0, IP2SR3_23_20) +#define GPSR3_20 F_(QSPI0_SPCLK, IP2SR3_19_16) +#define GPSR3_19 F_(QSPI0_MOSI_IO0, IP2SR3_15_12) +#define GPSR3_18 F_(QSPI0_MISO_IO1, IP2SR3_11_8) +#define GPSR3_17 F_(QSPI0_IO2, IP2SR3_7_4) +#define GPSR3_16 F_(QSPI0_IO3, IP2SR3_3_0) +#define GPSR3_15 F_(QSPI0_SSL, IP1SR3_31_28) +#define GPSR3_14 F_(IPC_CLKOUT, IP1SR3_27_24) +#define GPSR3_13 F_(IPC_CLKIN, IP1SR3_23_20) +#define GPSR3_12 F_(SD_WP, IP1SR3_19_16) +#define GPSR3_11 F_(SD_CD, IP1SR3_15_12) +#define GPSR3_10 F_(MMC_SD_CMD, IP1SR3_11_8) +#define GPSR3_9 F_(MMC_D6, IP1SR3_7_4) +#define GPSR3_8 F_(MMC_D7, IP1SR3_3_0) +#define GPSR3_7 F_(MMC_D4, IP0SR3_31_28) +#define GPSR3_6 F_(MMC_D5, IP0SR3_27_24) +#define GPSR3_5 F_(MMC_SD_D3, IP0SR3_23_20) +#define GPSR3_4 F_(MMC_DS, IP0SR3_19_16) +#define GPSR3_3 F_(MMC_SD_CLK, IP0SR3_15_12) +#define GPSR3_2 F_(MMC_SD_D2, IP0SR3_11_8) +#define GPSR3_1 F_(MMC_SD_D0, IP0SR3_7_4) +#define GPSR3_0 F_(MMC_SD_D1, IP0SR3_3_0) + +/* GPSR4 */ +#define GPSR4_24 FM(AVS1) +#define GPSR4_23 FM(AVS0) +#define GPSR4_22 FM(PCIE1_CLKREQ_N) +#define GPSR4_21 FM(PCIE0_CLKREQ_N) +#define GPSR4_20 FM(TSN0_TXCREFCLK) +#define GPSR4_19 FM(TSN0_TD2) +#define GPSR4_18 FM(TSN0_TD3) +#define GPSR4_17 FM(TSN0_RD2) +#define GPSR4_16 FM(TSN0_RD3) +#define GPSR4_15 FM(TSN0_TD0) +#define GPSR4_14 FM(TSN0_TD1) +#define GPSR4_13 FM(TSN0_RD1) +#define GPSR4_12 FM(TSN0_TXC) +#define GPSR4_11 FM(TSN0_RXC) +#define GPSR4_10 FM(TSN0_RD0) +#define GPSR4_9 FM(TSN0_TX_CTL) +#define GPSR4_8 FM(TSN0_AVTP_PPS0) +#define GPSR4_7 FM(TSN0_RX_CTL) +#define GPSR4_6 FM(TSN0_AVTP_CAPTURE) +#define GPSR4_5 FM(TSN0_AVTP_MATCH) +#define GPSR4_4 FM(TSN0_LINK) +#define GPSR4_3 FM(TSN0_PHY_INT) +#define GPSR4_2 FM(TSN0_AVTP_PPS1) +#define GPSR4_1 FM(TSN0_MDC) +#define GPSR4_0 FM(TSN0_MDIO) + +/* GPSR 5 */ +#define GPSR5_20 FM(AVB2_RX_CTL) +#define GPSR5_19 FM(AVB2_TX_CTL) +#define GPSR5_18 FM(AVB2_RXC) +#define GPSR5_17 FM(AVB2_RD0) +#define GPSR5_16 FM(AVB2_TXC) +#define GPSR5_15 FM(AVB2_TD0) +#define GPSR5_14 FM(AVB2_RD1) +#define GPSR5_13 FM(AVB2_RD2) +#define GPSR5_12 FM(AVB2_TD1) +#define GPSR5_11 FM(AVB2_TD2) +#define GPSR5_10 FM(AVB2_MDIO) +#define GPSR5_9 FM(AVB2_RD3) +#define GPSR5_8 FM(AVB2_TD3) +#define GPSR5_7 FM(AVB2_TXCREFCLK) +#define GPSR5_6 FM(AVB2_MDC) +#define GPSR5_5 FM(AVB2_MAGIC) +#define GPSR5_4 FM(AVB2_PHY_INT) +#define GPSR5_3 FM(AVB2_LINK) +#define GPSR5_2 FM(AVB2_AVTP_MATCH) +#define GPSR5_1 FM(AVB2_AVTP_CAPTURE) +#define GPSR5_0 FM(AVB2_AVTP_PPS) + +/* GPSR 6 */ +#define GPSR6_20 F_(AVB1_TXCREFCLK, IP2SR6_19_16) +#define GPSR6_19 F_(AVB1_RD3, IP2SR6_15_12) +#define GPSR6_18 F_(AVB1_TD3, IP2SR6_11_8) +#define GPSR6_17 F_(AVB1_RD2, IP2SR6_7_4) +#define GPSR6_16 F_(AVB1_TD2, IP2SR6_3_0) +#define GPSR6_15 F_(AVB1_RD0, IP1SR6_31_28) +#define GPSR6_14 F_(AVB1_RD1, IP1SR6_27_24) +#define GPSR6_13 F_(AVB1_TD0, IP1SR6_23_20) +#define GPSR6_12 F_(AVB1_TD1, IP1SR6_19_16) +#define GPSR6_11 F_(AVB1_AVTP_CAPTURE, IP1SR6_15_12) +#define GPSR6_10 F_(AVB1_AVTP_PPS, IP1SR6_11_8) +#define GPSR6_9 F_(AVB1_RX_CTL, IP1SR6_7_4) +#define GPSR6_8 F_(AVB1_RXC, IP1SR6_3_0) +#define GPSR6_7 F_(AVB1_TX_CTL, IP0SR6_31_28) +#define GPSR6_6 F_(AVB1_TXC, IP0SR6_27_24) +#define GPSR6_5 F_(AVB1_AVTP_MATCH, IP0SR6_23_20) +#define GPSR6_4 F_(AVB1_LINK, IP0SR6_19_16) +#define GPSR6_3 F_(AVB1_PHY_INT, IP0SR6_15_12) +#define GPSR6_2 F_(AVB1_MDC, IP0SR6_11_8) +#define GPSR6_1 F_(AVB1_MAGIC, IP0SR6_7_4) +#define GPSR6_0 F_(AVB1_MDIO, IP0SR6_3_0) + +/* GPSR7 */ +#define GPSR7_20 F_(AVB0_RX_CTL, IP2SR7_19_16) +#define GPSR7_19 F_(AVB0_RXC, IP2SR7_15_12) +#define GPSR7_18 F_(AVB0_RD0, IP2SR7_11_8) +#define GPSR7_17 F_(AVB0_RD1, IP2SR7_7_4) +#define GPSR7_16 F_(AVB0_TX_CTL, IP2SR7_3_0) +#define GPSR7_15 F_(AVB0_TXC, IP1SR7_31_28) +#define GPSR7_14 F_(AVB0_MDIO, IP1SR7_27_24) +#define GPSR7_13 F_(AVB0_MDC, IP1SR7_23_20) +#define GPSR7_12 F_(AVB0_RD2, IP1SR7_19_16) +#define GPSR7_11 F_(AVB0_TD0, IP1SR7_15_12) +#define GPSR7_10 F_(AVB0_MAGIC, IP1SR7_11_8) +#define GPSR7_9 F_(AVB0_TXCREFCLK, IP1SR7_7_4) +#define GPSR7_8 F_(AVB0_RD3, IP1SR7_3_0) +#define GPSR7_7 F_(AVB0_TD1, IP0SR7_31_28) +#define GPSR7_6 F_(AVB0_TD2, IP0SR7_27_24) +#define GPSR7_5 F_(AVB0_PHY_INT, IP0SR7_23_20) +#define GPSR7_4 F_(AVB0_LINK, IP0SR7_19_16) +#define GPSR7_3 F_(AVB0_TD3, IP0SR7_15_12) +#define GPSR7_2 F_(AVB0_AVTP_MATCH, IP0SR7_11_8) +#define GPSR7_1 F_(AVB0_AVTP_CAPTURE, IP0SR7_7_4) +#define GPSR7_0 F_(AVB0_AVTP_PPS, IP0SR7_3_0) + +/* GPSR8 */ +#define GPSR8_13 F_(GP8_13, IP1SR8_23_20) +#define GPSR8_12 F_(GP8_12, IP1SR8_19_16) +#define GPSR8_11 F_(SDA5, IP1SR8_15_12) +#define GPSR8_10 F_(SCL5, IP1SR8_11_8) +#define GPSR8_9 F_(SDA4, IP1SR8_7_4) +#define GPSR8_8 F_(SCL4, IP1SR8_3_0) +#define GPSR8_7 F_(SDA3, IP0SR8_31_28) +#define GPSR8_6 F_(SCL3, IP0SR8_27_24) +#define GPSR8_5 F_(SDA2, IP0SR8_23_20) +#define GPSR8_4 F_(SCL2, IP0SR8_19_16) +#define GPSR8_3 F_(SDA1, IP0SR8_15_12) +#define GPSR8_2 F_(SCL1, IP0SR8_11_8) +#define GPSR8_1 F_(SDA0, IP0SR8_7_4) +#define GPSR8_0 F_(SCL0, IP0SR8_3_0) + +/* SR0 */ +/* IP0SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_B) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_7_4 F_(0, 0) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_11_8 F_(0, 0) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_15_12 FM(IRQ3) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_19_16 FM(IRQ2) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_23_20 FM(IRQ1) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_27_24 FM(IRQ0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_31_28 FM(MSIOF5_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* IP1SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP1SR0_3_0 FM(MSIOF5_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR0_7_4 FM(MSIOF5_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR0_11_8 FM(MSIOF5_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR0_15_12 FM(MSIOF5_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR0_19_16 FM(MSIOF5_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR0_23_20 FM(MSIOF2_SS2) FM(TCLK1) FM(IRQ2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR0_27_24 FM(MSIOF2_SS1) FM(HTX1) FM(TX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR0_31_28 FM(MSIOF2_SYNC) FM(HRX1) FM(RX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* IP2SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP2SR0_3_0 FM(MSIOF2_TXD) FM(HCTS1_N) FM(CTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR0_7_4 FM(MSIOF2_SCK) FM(HRTS1_N) FM(RTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR0_11_8 FM(MSIOF2_RXD) FM(HSCK1) FM(SCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* SR1 */ +/* IP0SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP0SR1_3_0 FM(MSIOF1_SS2) FM(HTX3_A) FM(TX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR1_7_4 FM(MSIOF1_SS1) FM(HCTS3_N_A) FM(RX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR1_11_8 FM(MSIOF1_SYNC) FM(HRTS3_N_A) FM(RTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR1_15_12 FM(MSIOF1_SCK) FM(HSCK3_A) FM(CTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR1_19_16 FM(MSIOF1_TXD) FM(HRX3_A) FM(SCK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR1_23_20 FM(MSIOF1_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR1_27_24 FM(MSIOF0_SS2) FM(HTX1_X) FM(TX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR1_31_28 FM(MSIOF0_SS1) FM(HRX1_X) FM(RX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* IP1SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP1SR1_3_0 FM(MSIOF0_SYNC) FM(HCTS1_N_X) FM(CTS1_N_X) FM(CANFD5_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR1_7_4 FM(MSIOF0_TXD) FM(HRTS1_N_X) FM(RTS1_N_X) FM(CANFD5_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR1_11_8 FM(MSIOF0_SCK) FM(HSCK1_X) FM(SCK1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR1_15_12 FM(MSIOF0_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR1_19_16 FM(HTX0) FM(TX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR1_23_20 FM(HCTS0_N) FM(CTS0_N) FM(PWM8_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR1_27_24 FM(HRTS0_N) FM(RTS0_N) FM(PWM9_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR1_31_28 FM(HSCK0) FM(SCK0) FM(PWM0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* IP2SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP2SR1_3_0 FM(HRX0) FM(RX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR1_7_4 FM(SCIF_CLK) FM(IRQ4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR1_11_8 FM(SSI_SCK) FM(TCLK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR1_15_12 FM(SSI_WS) FM(TCLK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR1_19_16 FM(SSI_SD) FM(IRQ0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR1_23_20 FM(AUDIO_CLKOUT) FM(IRQ1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR1_27_24 FM(AUDIO_CLKIN) FM(PWM3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR1_31_28 F_(0, 0) FM(TCLK2) FM(MSIOF4_SS1) FM(IRQ3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* IP3SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP3SR1_3_0 FM(HRX3) FM(SCK3_A) FM(MSIOF4_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP3SR1_7_4 FM(HSCK3) FM(CTS3_N_A) FM(MSIOF4_SCK) FM(TPU0TO0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP3SR1_11_8 FM(HRTS3_N) FM(RTS3_N_A) FM(MSIOF4_TXD) FM(TPU0TO1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP3SR1_15_12 FM(HCTS3_N) FM(RX3_A) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP3SR1_19_16 FM(HTX3) FM(TX3_A) FM(MSIOF4_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* SR2 */ +/* IP0SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP0SR2_3_0 FM(FXR_TXDA) FM(CANFD1_TX) FM(TPU0TO2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR2_7_4 FM(FXR_TXENA_N) FM(CANFD1_RX) FM(TPU0TO3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR2_11_8 FM(RXDA_EXTFXR) FM(CANFD5_TX) FM(IRQ5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR2_15_12 FM(CLK_EXTFXR) FM(CANFD5_RX) FM(IRQ4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR2_19_16 FM(RXDB_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR2_23_20 FM(FXR_TXENB_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR2_27_24 FM(FXR_TXDB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR2_31_28 FM(TPU0TO1) FM(CANFD6_TX) F_(0, 0) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* IP1SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP1SR2_3_0 FM(TPU0TO0) FM(CANFD6_RX) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR2_7_4 FM(CAN_CLK) FM(FXR_TXENA_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR2_11_8 FM(CANFD0_TX) FM(FXR_TXENB_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR2_15_12 FM(CANFD0_RX) FM(STPWT_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR2_19_16 FM(CANFD2_TX) FM(TPU0TO2) F_(0, 0) FM(TCLK3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR2_23_20 FM(CANFD2_RX) FM(TPU0TO3) FM(PWM1_B) FM(TCLK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR2_27_24 FM(CANFD3_TX) F_(0, 0) FM(PWM2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR2_31_28 FM(CANFD3_RX) F_(0, 0) FM(PWM3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* IP2SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP2SR2_3_0 FM(CANFD4_TX) F_(0, 0) FM(PWM4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR2_7_4 FM(CANFD4_RX) F_(0, 0) FM(PWM5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR2_11_8 FM(CANFD7_TX) F_(0, 0) FM(PWM6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR2_15_12 FM(CANFD7_RX) F_(0, 0) FM(PWM7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* SR3 */ +/* IP0SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP0SR3_3_0 FM(MMC_SD_D1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR3_7_4 FM(MMC_SD_D0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR3_11_8 FM(MMC_SD_D2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR3_15_12 FM(MMC_SD_CLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR3_19_16 FM(MMC_DS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR3_23_20 FM(MMC_SD_D3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR3_27_24 FM(MMC_D5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR3_31_28 FM(MMC_D4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* IP1SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP1SR3_3_0 FM(MMC_D7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR3_7_4 FM(MMC_D6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR3_11_8 FM(MMC_SD_CMD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR3_15_12 FM(SD_CD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR3_19_16 FM(SD_WP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR3_23_20 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) FM(PWM1_A) FM(TCLK3_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR3_27_24 FM(IPC_CLKOUT) FM(IPC_CLKEN_OUT) FM(ERROROUTC_A) FM(TCLK4_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR3_31_28 FM(QSPI0_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* IP2SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP2SR3_3_0 FM(QSPI0_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR3_7_4 FM(QSPI0_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR3_11_8 FM(QSPI0_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR3_15_12 FM(QSPI0_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR3_19_16 FM(QSPI0_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR3_23_20 FM(QSPI1_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR3_27_24 FM(QSPI1_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR3_31_28 FM(QSPI1_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* IP3SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP3SR3_3_0 FM(QSPI1_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP3SR3_7_4 FM(QSPI1_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP3SR3_11_8 FM(QSPI1_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP3SR3_15_12 FM(RPC_RESET_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP3SR3_19_16 FM(RPC_WP_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP3SR3_23_20 FM(RPC_INT_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* SR6 */ +/* IP0SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP0SR6_3_0 FM(AVB1_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR6_7_4 FM(AVB1_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR6_11_8 FM(AVB1_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR6_15_12 FM(AVB1_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR6_19_16 FM(AVB1_LINK) FM(AVB1_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR6_23_20 FM(AVB1_AVTP_MATCH) FM(AVB1_MII_RX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR6_27_24 FM(AVB1_TXC) FM(AVB1_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR6_31_28 FM(AVB1_TX_CTL) FM(AVB1_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* IP1SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP1SR6_3_0 FM(AVB1_RXC) FM(AVB1_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR6_7_4 FM(AVB1_RX_CTL) FM(AVB1_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR6_11_8 FM(AVB1_AVTP_PPS) FM(AVB1_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR6_15_12 FM(AVB1_AVTP_CAPTURE) FM(AVB1_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR6_19_16 FM(AVB1_TD1) FM(AVB1_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR6_23_20 FM(AVB1_TD0) FM(AVB1_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR6_27_24 FM(AVB1_RD1) FM(AVB1_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR6_31_28 FM(AVB1_RD0) FM(AVB1_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* IP2SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP2SR6_3_0 FM(AVB1_TD2) FM(AVB1_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR6_7_4 FM(AVB1_RD2) FM(AVB1_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR6_11_8 FM(AVB1_TD3) FM(AVB1_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR6_15_12 FM(AVB1_RD3) FM(AVB1_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR6_19_16 FM(AVB1_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* SR7 */ +/* IP0SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP0SR7_3_0 FM(AVB0_AVTP_PPS) FM(AVB0_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR7_7_4 FM(AVB0_AVTP_CAPTURE) FM(AVB0_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR7_11_8 FM(AVB0_AVTP_MATCH) FM(AVB0_MII_RX_ER) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR7_15_12 FM(AVB0_TD3) FM(AVB0_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR7_19_16 FM(AVB0_LINK) FM(AVB0_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR7_23_20 FM(AVB0_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR7_27_24 FM(AVB0_TD2) FM(AVB0_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR7_31_28 FM(AVB0_TD1) FM(AVB0_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* IP1SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP1SR7_3_0 FM(AVB0_RD3) FM(AVB0_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR7_7_4 FM(AVB0_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR7_11_8 FM(AVB0_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR7_15_12 FM(AVB0_TD0) FM(AVB0_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR7_19_16 FM(AVB0_RD2) FM(AVB0_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR7_23_20 FM(AVB0_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR7_27_24 FM(AVB0_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR7_31_28 FM(AVB0_TXC) FM(AVB0_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* IP2SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP2SR7_3_0 FM(AVB0_TX_CTL) FM(AVB0_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR7_7_4 FM(AVB0_RD1) FM(AVB0_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR7_11_8 FM(AVB0_RD0) FM(AVB0_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR7_15_12 FM(AVB0_RXC) FM(AVB0_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR7_19_16 FM(AVB0_RX_CTL) FM(AVB0_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* SR8 */ +/* IP0SR8 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP0SR8_3_0 FM(SCL0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR8_7_4 FM(SDA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR8_11_8 FM(SCL1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR8_15_12 FM(SDA1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR8_19_16 FM(SCL2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR8_23_20 FM(SDA2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR8_27_24 FM(SCL3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR8_31_28 FM(SDA3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +/* IP1SR8 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ +#define IP1SR8_3_0 FM(SCL4) FM(HRX2) FM(SCK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR8_7_4 FM(SDA4) FM(HTX2) FM(CTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR8_11_8 FM(SCL5) FM(HRTS2_N) FM(RTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR8_15_12 FM(SDA5) FM(SCIF_CLK2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR8_19_16 F_(0, 0) FM(HCTS2_N) FM(TX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR8_23_20 F_(0, 0) FM(HSCK2) FM(RX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) + +#define PINMUX_GPSR \ + GPSR3_29 \ + GPSR1_28 GPSR3_28 \ + GPSR1_27 GPSR3_27 \ + GPSR1_26 GPSR3_26 \ + GPSR1_25 GPSR3_25 \ + GPSR1_24 GPSR3_24 GPSR4_24 \ + GPSR1_23 GPSR3_23 GPSR4_23 \ + GPSR1_22 GPSR3_22 GPSR4_22 \ + GPSR1_21 GPSR3_21 GPSR4_21 \ + GPSR1_20 GPSR3_20 GPSR4_20 GPSR5_20 GPSR6_20 GPSR7_20 \ + GPSR1_19 GPSR2_19 GPSR3_19 GPSR4_19 GPSR5_19 GPSR6_19 GPSR7_19 \ +GPSR0_18 GPSR1_18 GPSR2_18 GPSR3_18 GPSR4_18 GPSR5_18 GPSR6_18 GPSR7_18 \ +GPSR0_17 GPSR1_17 GPSR2_17 GPSR3_17 GPSR4_17 GPSR5_17 GPSR6_17 GPSR7_17 \ +GPSR0_16 GPSR1_16 GPSR2_16 GPSR3_16 GPSR4_16 GPSR5_16 GPSR6_16 GPSR7_16 \ +GPSR0_15 GPSR1_15 GPSR2_15 GPSR3_15 GPSR4_15 GPSR5_15 GPSR6_15 GPSR7_15 \ +GPSR0_14 GPSR1_14 GPSR2_14 GPSR3_14 GPSR4_14 GPSR5_14 GPSR6_14 GPSR7_14 \ +GPSR0_13 GPSR1_13 GPSR2_13 GPSR3_13 GPSR4_13 GPSR5_13 GPSR6_13 GPSR7_13 GPSR8_13 \ +GPSR0_12 GPSR1_12 GPSR2_12 GPSR3_12 GPSR4_12 GPSR5_12 GPSR6_12 GPSR7_12 GPSR8_12 \ +GPSR0_11 GPSR1_11 GPSR2_11 GPSR3_11 GPSR4_11 GPSR5_11 GPSR6_11 GPSR7_11 GPSR8_11 \ +GPSR0_10 GPSR1_10 GPSR2_10 GPSR3_10 GPSR4_10 GPSR5_10 GPSR6_10 GPSR7_10 GPSR8_10 \ +GPSR0_9 GPSR1_9 GPSR2_9 GPSR3_9 GPSR4_9 GPSR5_9 GPSR6_9 GPSR7_9 GPSR8_9 \ +GPSR0_8 GPSR1_8 GPSR2_8 GPSR3_8 GPSR4_8 GPSR5_8 GPSR6_8 GPSR7_8 GPSR8_8 \ +GPSR0_7 GPSR1_7 GPSR2_7 GPSR3_7 GPSR4_7 GPSR5_7 GPSR6_7 GPSR7_7 GPSR8_7 \ +GPSR0_6 GPSR1_6 GPSR2_6 GPSR3_6 GPSR4_6 GPSR5_6 GPSR6_6 GPSR7_6 GPSR8_6 \ +GPSR0_5 GPSR1_5 GPSR2_5 GPSR3_5 GPSR4_5 GPSR5_5 GPSR6_5 GPSR7_5 GPSR8_5 \ +GPSR0_4 GPSR1_4 GPSR2_4 GPSR3_4 GPSR4_4 GPSR5_4 GPSR6_4 GPSR7_4 GPSR8_4 \ +GPSR0_3 GPSR1_3 GPSR2_3 GPSR3_3 GPSR4_3 GPSR5_3 GPSR6_3 GPSR7_3 GPSR8_3 \ +GPSR0_2 GPSR1_2 GPSR2_2 GPSR3_2 GPSR4_2 GPSR5_2 GPSR6_2 GPSR7_2 GPSR8_2 \ +GPSR0_1 GPSR1_1 GPSR2_1 GPSR3_1 GPSR4_1 GPSR5_1 GPSR6_1 GPSR7_1 GPSR8_1 \ +GPSR0_0 GPSR1_0 GPSR2_0 GPSR3_0 GPSR4_0 GPSR5_0 GPSR6_0 GPSR7_0 GPSR8_0 + +#define PINMUX_IPSR \ +\ +FM(IP0SR0_3_0) IP0SR0_3_0 FM(IP1SR0_3_0) IP1SR0_3_0 FM(IP2SR0_3_0) IP2SR0_3_0 \ +FM(IP0SR0_7_4) IP0SR0_7_4 FM(IP1SR0_7_4) IP1SR0_7_4 FM(IP2SR0_7_4) IP2SR0_7_4 \ +FM(IP0SR0_11_8) IP0SR0_11_8 FM(IP1SR0_11_8) IP1SR0_11_8 FM(IP2SR0_11_8) IP2SR0_11_8 \ +FM(IP0SR0_15_12) IP0SR0_15_12 FM(IP1SR0_15_12) IP1SR0_15_12 \ +FM(IP0SR0_19_16) IP0SR0_19_16 FM(IP1SR0_19_16) IP1SR0_19_16 \ +FM(IP0SR0_23_20) IP0SR0_23_20 FM(IP1SR0_23_20) IP1SR0_23_20 \ +FM(IP0SR0_27_24) IP0SR0_27_24 FM(IP1SR0_27_24) IP1SR0_27_24 \ +FM(IP0SR0_31_28) IP0SR0_31_28 FM(IP1SR0_31_28) IP1SR0_31_28 \ +\ +FM(IP0SR1_3_0) IP0SR1_3_0 FM(IP1SR1_3_0) IP1SR1_3_0 FM(IP2SR1_3_0) IP2SR1_3_0 FM(IP3SR1_3_0) IP3SR1_3_0 \ +FM(IP0SR1_7_4) IP0SR1_7_4 FM(IP1SR1_7_4) IP1SR1_7_4 FM(IP2SR1_7_4) IP2SR1_7_4 FM(IP3SR1_7_4) IP3SR1_7_4 \ +FM(IP0SR1_11_8) IP0SR1_11_8 FM(IP1SR1_11_8) IP1SR1_11_8 FM(IP2SR1_11_8) IP2SR1_11_8 FM(IP3SR1_11_8) IP3SR1_11_8 \ +FM(IP0SR1_15_12) IP0SR1_15_12 FM(IP1SR1_15_12) IP1SR1_15_12 FM(IP2SR1_15_12) IP2SR1_15_12 FM(IP3SR1_15_12) IP3SR1_15_12 \ +FM(IP0SR1_19_16) IP0SR1_19_16 FM(IP1SR1_19_16) IP1SR1_19_16 FM(IP2SR1_19_16) IP2SR1_19_16 FM(IP3SR1_19_16) IP3SR1_19_16 \ +FM(IP0SR1_23_20) IP0SR1_23_20 FM(IP1SR1_23_20) IP1SR1_23_20 FM(IP2SR1_23_20) IP2SR1_23_20 \ +FM(IP0SR1_27_24) IP0SR1_27_24 FM(IP1SR1_27_24) IP1SR1_27_24 FM(IP2SR1_27_24) IP2SR1_27_24 \ +FM(IP0SR1_31_28) IP0SR1_31_28 FM(IP1SR1_31_28) IP1SR1_31_28 FM(IP2SR1_31_28) IP2SR1_31_28 \ +\ +FM(IP0SR2_3_0) IP0SR2_3_0 FM(IP1SR2_3_0) IP1SR2_3_0 FM(IP2SR2_3_0) IP2SR2_3_0 \ +FM(IP0SR2_7_4) IP0SR2_7_4 FM(IP1SR2_7_4) IP1SR2_7_4 FM(IP2SR2_7_4) IP2SR2_7_4 \ +FM(IP0SR2_11_8) IP0SR2_11_8 FM(IP1SR2_11_8) IP1SR2_11_8 FM(IP2SR2_11_8) IP2SR2_11_8 \ +FM(IP0SR2_15_12) IP0SR2_15_12 FM(IP1SR2_15_12) IP1SR2_15_12 FM(IP2SR2_15_12) IP2SR2_15_12 \ +FM(IP0SR2_19_16) IP0SR2_19_16 FM(IP1SR2_19_16) IP1SR2_19_16 \ +FM(IP0SR2_23_20) IP0SR2_23_20 FM(IP1SR2_23_20) IP1SR2_23_20 \ +FM(IP0SR2_27_24) IP0SR2_27_24 FM(IP1SR2_27_24) IP1SR2_27_24 \ +FM(IP0SR2_31_28) IP0SR2_31_28 FM(IP1SR2_31_28) IP1SR2_31_28 \ +\ +FM(IP0SR3_3_0) IP0SR3_3_0 FM(IP1SR3_3_0) IP1SR3_3_0 FM(IP2SR3_3_0) IP2SR3_3_0 FM(IP3SR3_3_0) IP3SR3_3_0 \ +FM(IP0SR3_7_4) IP0SR3_7_4 FM(IP1SR3_7_4) IP1SR3_7_4 FM(IP2SR3_7_4) IP2SR3_7_4 FM(IP3SR3_7_4) IP3SR3_7_4 \ +FM(IP0SR3_11_8) IP0SR3_11_8 FM(IP1SR3_11_8) IP1SR3_11_8 FM(IP2SR3_11_8) IP2SR3_11_8 FM(IP3SR3_11_8) IP3SR3_11_8 \ +FM(IP0SR3_15_12) IP0SR3_15_12 FM(IP1SR3_15_12) IP1SR3_15_12 FM(IP2SR3_15_12) IP2SR3_15_12 FM(IP3SR3_15_12) IP3SR3_15_12 \ +FM(IP0SR3_19_16) IP0SR3_19_16 FM(IP1SR3_19_16) IP1SR3_19_16 FM(IP2SR3_19_16) IP2SR3_19_16 FM(IP3SR3_19_16) IP3SR3_19_16 \ +FM(IP0SR3_23_20) IP0SR3_23_20 FM(IP1SR3_23_20) IP1SR3_23_20 FM(IP2SR3_23_20) IP2SR3_23_20 FM(IP3SR3_23_20) IP3SR3_23_20 \ +FM(IP0SR3_27_24) IP0SR3_27_24 FM(IP1SR3_27_24) IP1SR3_27_24 FM(IP2SR3_27_24) IP2SR3_27_24 \ +FM(IP0SR3_31_28) IP0SR3_31_28 FM(IP1SR3_31_28) IP1SR3_31_28 FM(IP2SR3_31_28) IP2SR3_31_28 \ +\ +FM(IP0SR6_3_0) IP0SR6_3_0 FM(IP1SR6_3_0) IP1SR6_3_0 FM(IP2SR6_3_0) IP2SR6_3_0 \ +FM(IP0SR6_7_4) IP0SR6_7_4 FM(IP1SR6_7_4) IP1SR6_7_4 FM(IP2SR6_7_4) IP2SR6_7_4 \ +FM(IP0SR6_11_8) IP0SR6_11_8 FM(IP1SR6_11_8) IP1SR6_11_8 FM(IP2SR6_11_8) IP2SR6_11_8 \ +FM(IP0SR6_15_12) IP0SR6_15_12 FM(IP1SR6_15_12) IP1SR6_15_12 FM(IP2SR6_15_12) IP2SR6_15_12 \ +FM(IP0SR6_19_16) IP0SR6_19_16 FM(IP1SR6_19_16) IP1SR6_19_16 FM(IP2SR6_19_16) IP2SR6_19_16 \ +FM(IP0SR6_23_20) IP0SR6_23_20 FM(IP1SR6_23_20) IP1SR6_23_20 \ +FM(IP0SR6_27_24) IP0SR6_27_24 FM(IP1SR6_27_24) IP1SR6_27_24 \ +FM(IP0SR6_31_28) IP0SR6_31_28 FM(IP1SR6_31_28) IP1SR6_31_28 \ +\ +FM(IP0SR7_3_0) IP0SR7_3_0 FM(IP1SR7_3_0) IP1SR7_3_0 FM(IP2SR7_3_0) IP2SR7_3_0 \ +FM(IP0SR7_7_4) IP0SR7_7_4 FM(IP1SR7_7_4) IP1SR7_7_4 FM(IP2SR7_7_4) IP2SR7_7_4 \ +FM(IP0SR7_11_8) IP0SR7_11_8 FM(IP1SR7_11_8) IP1SR7_11_8 FM(IP2SR7_11_8) IP2SR7_11_8 \ +FM(IP0SR7_15_12) IP0SR7_15_12 FM(IP1SR7_15_12) IP1SR7_15_12 FM(IP2SR7_15_12) IP2SR7_15_12 \ +FM(IP0SR7_19_16) IP0SR7_19_16 FM(IP1SR7_19_16) IP1SR7_19_16 FM(IP2SR7_19_16) IP2SR7_19_16 \ +FM(IP0SR7_23_20) IP0SR7_23_20 FM(IP1SR7_23_20) IP1SR7_23_20 \ +FM(IP0SR7_27_24) IP0SR7_27_24 FM(IP1SR7_27_24) IP1SR7_27_24 \ +FM(IP0SR7_31_28) IP0SR7_31_28 FM(IP1SR7_31_28) IP1SR7_31_28 \ +\ +FM(IP0SR8_3_0) IP0SR8_3_0 FM(IP1SR8_3_0) IP1SR8_3_0 \ +FM(IP0SR8_7_4) IP0SR8_7_4 FM(IP1SR8_7_4) IP1SR8_7_4 \ +FM(IP0SR8_11_8) IP0SR8_11_8 FM(IP1SR8_11_8) IP1SR8_11_8 \ +FM(IP0SR8_15_12) IP0SR8_15_12 FM(IP1SR8_15_12) IP1SR8_15_12 \ +FM(IP0SR8_19_16) IP0SR8_19_16 FM(IP1SR8_19_16) IP1SR8_19_16 \ +FM(IP0SR8_23_20) IP0SR8_23_20 FM(IP1SR8_23_20) IP1SR8_23_20 \ +FM(IP0SR8_27_24) IP0SR8_27_24 \ +FM(IP0SR8_31_28) IP0SR8_31_28 + +/* MOD_SEL4 */ /* 0 */ /* 1 */ +#define MOD_SEL4_19 FM(SEL_TSN0_TD2_0) FM(SEL_TSN0_TD2_1) +#define MOD_SEL4_18 FM(SEL_TSN0_TD3_0) FM(SEL_TSN0_TD3_1) +#define MOD_SEL4_15 FM(SEL_TSN0_TD0_0) FM(SEL_TSN0_TD0_1) +#define MOD_SEL4_14 FM(SEL_TSN0_TD1_0) FM(SEL_TSN0_TD1_1) +#define MOD_SEL4_12 FM(SEL_TSN0_TXC_0) FM(SEL_TSN0_TXC_1) +#define MOD_SEL4_9 FM(SEL_TSN0_TX_CTL_0) FM(SEL_TSN0_TX_CTL_1) +#define MOD_SEL4_8 FM(SEL_TSN0_AVTP_PPS0_0) FM(SEL_TSN0_AVTP_PPS0_1) +#define MOD_SEL4_5 FM(SEL_TSN0_AVTP_MATCH_0) FM(SEL_TSN0_AVTP_MATCH_1) +#define MOD_SEL4_2 FM(SEL_TSN0_AVTP_PPS1_0) FM(SEL_TSN0_AVTP_PPS1_1) +#define MOD_SEL4_1 FM(SEL_TSN0_MDC_0) FM(SEL_TSN0_MDC_1) + +/* MOD_SEL5 */ /* 0 */ /* 1 */ +#define MOD_SEL5_19 FM(SEL_AVB2_TX_CTL_0) FM(SEL_AVB2_TX_CTL_1) +#define MOD_SEL5_16 FM(SEL_AVB2_TXC_0) FM(SEL_AVB2_TXC_1) +#define MOD_SEL5_15 FM(SEL_AVB2_TD0_0) FM(SEL_AVB2_TD0_1) +#define MOD_SEL5_12 FM(SEL_AVB2_TD1_0) FM(SEL_AVB2_TD1_1) +#define MOD_SEL5_11 FM(SEL_AVB2_TD2_0) FM(SEL_AVB2_TD2_1) +#define MOD_SEL5_8 FM(SEL_AVB2_TD3_0) FM(SEL_AVB2_TD3_1) +#define MOD_SEL5_6 FM(SEL_AVB2_MDC_0) FM(SEL_AVB2_MDC_1) +#define MOD_SEL5_5 FM(SEL_AVB2_MAGIC_0) FM(SEL_AVB2_MAGIC_1) +#define MOD_SEL5_2 FM(SEL_AVB2_AVTP_MATCH_0) FM(SEL_AVB2_AVTP_MATCH_1) +#define MOD_SEL5_0 FM(SEL_AVB2_AVTP_PPS_0) FM(SEL_AVB2_AVTP_PPS_1) + +/* MOD_SEL6 */ /* 0 */ /* 1 */ +#define MOD_SEL6_18 FM(SEL_AVB1_TD3_0) FM(SEL_AVB1_TD3_1) +#define MOD_SEL6_16 FM(SEL_AVB1_TD2_0) FM(SEL_AVB1_TD2_1) +#define MOD_SEL6_13 FM(SEL_AVB1_TD0_0) FM(SEL_AVB1_TD0_1) +#define MOD_SEL6_12 FM(SEL_AVB1_TD1_0) FM(SEL_AVB1_TD1_1) +#define MOD_SEL6_10 FM(SEL_AVB1_AVTP_PPS_0) FM(SEL_AVB1_AVTP_PPS_1) +#define MOD_SEL6_7 FM(SEL_AVB1_TX_CTL_0) FM(SEL_AVB1_TX_CTL_1) +#define MOD_SEL6_6 FM(SEL_AVB1_TXC_0) FM(SEL_AVB1_TXC_1) +#define MOD_SEL6_5 FM(SEL_AVB1_AVTP_MATCH_0) FM(SEL_AVB1_AVTP_MATCH_1) +#define MOD_SEL6_2 FM(SEL_AVB1_MDC_0) FM(SEL_AVB1_MDC_1) +#define MOD_SEL6_1 FM(SEL_AVB1_MAGIC_0) FM(SEL_AVB1_MAGIC_1) + +/* MOD_SEL7 */ /* 0 */ /* 1 */ +#define MOD_SEL7_16 FM(SEL_AVB0_TX_CTL_0) FM(SEL_AVB0_TX_CTL_1) +#define MOD_SEL7_15 FM(SEL_AVB0_TXC_0) FM(SEL_AVB0_TXC_1) +#define MOD_SEL7_13 FM(SEL_AVB0_MDC_0) FM(SEL_AVB0_MDC_1) +#define MOD_SEL7_11 FM(SEL_AVB0_TD0_0) FM(SEL_AVB0_TD0_1) +#define MOD_SEL7_10 FM(SEL_AVB0_MAGIC_0) FM(SEL_AVB0_MAGIC_1) +#define MOD_SEL7_7 FM(SEL_AVB0_TD1_0) FM(SEL_AVB0_TD1_1) +#define MOD_SEL7_6 FM(SEL_AVB0_TD2_0) FM(SEL_AVB0_TD2_1) +#define MOD_SEL7_3 FM(SEL_AVB0_TD3_0) FM(SEL_AVB0_TD3_1) +#define MOD_SEL7_2 FM(SEL_AVB0_AVTP_MATCH_0) FM(SEL_AVB0_AVTP_MATCH_1) +#define MOD_SEL7_0 FM(SEL_AVB0_AVTP_PPS_0) FM(SEL_AVB0_AVTP_PPS_1) + +/* MOD_SEL8 */ /* 0 */ /* 1 */ +#define MOD_SEL8_11 FM(SEL_SDA5_0) FM(SEL_SDA5_1) +#define MOD_SEL8_10 FM(SEL_SCL5_0) FM(SEL_SCL5_1) +#define MOD_SEL8_9 FM(SEL_SDA4_0) FM(SEL_SDA4_1) +#define MOD_SEL8_8 FM(SEL_SCL4_0) FM(SEL_SCL4_1) +#define MOD_SEL8_7 FM(SEL_SDA3_0) FM(SEL_SDA3_1) +#define MOD_SEL8_6 FM(SEL_SCL3_0) FM(SEL_SCL3_1) +#define MOD_SEL8_5 FM(SEL_SDA2_0) FM(SEL_SDA2_1) +#define MOD_SEL8_4 FM(SEL_SCL2_0) FM(SEL_SCL2_1) +#define MOD_SEL8_3 FM(SEL_SDA1_0) FM(SEL_SDA1_1) +#define MOD_SEL8_2 FM(SEL_SCL1_0) FM(SEL_SCL1_1) +#define MOD_SEL8_1 FM(SEL_SDA0_0) FM(SEL_SDA0_1) +#define MOD_SEL8_0 FM(SEL_SCL0_0) FM(SEL_SCL0_1) + +#define PINMUX_MOD_SELS \ +\ +MOD_SEL4_19 MOD_SEL5_19 \ +MOD_SEL4_18 MOD_SEL6_18 \ + \ + MOD_SEL5_16 MOD_SEL6_16 MOD_SEL7_16 \ +MOD_SEL4_15 MOD_SEL5_15 MOD_SEL7_15 \ +MOD_SEL4_14 \ + MOD_SEL6_13 MOD_SEL7_13 \ +MOD_SEL4_12 MOD_SEL5_12 MOD_SEL6_12 \ + MOD_SEL5_11 MOD_SEL7_11 MOD_SEL8_11 \ + MOD_SEL6_10 MOD_SEL7_10 MOD_SEL8_10 \ +MOD_SEL4_9 MOD_SEL8_9 \ +MOD_SEL4_8 MOD_SEL5_8 MOD_SEL8_8 \ + MOD_SEL6_7 MOD_SEL7_7 MOD_SEL8_7 \ + MOD_SEL5_6 MOD_SEL6_6 MOD_SEL7_6 MOD_SEL8_6 \ +MOD_SEL4_5 MOD_SEL5_5 MOD_SEL6_5 MOD_SEL8_5 \ + MOD_SEL8_4 \ + MOD_SEL7_3 MOD_SEL8_3 \ +MOD_SEL4_2 MOD_SEL5_2 MOD_SEL6_2 MOD_SEL7_2 MOD_SEL8_2 \ +MOD_SEL4_1 MOD_SEL6_1 MOD_SEL8_1 \ + MOD_SEL5_0 MOD_SEL7_0 MOD_SEL8_0 + +enum { + PINMUX_RESERVED = 0, + + PINMUX_DATA_BEGIN, + GP_ALL(DATA), + PINMUX_DATA_END, + +#define F_(x, y) +#define FM(x) FN_##x, + PINMUX_FUNCTION_BEGIN, + GP_ALL(FN), + PINMUX_GPSR + PINMUX_IPSR + PINMUX_MOD_SELS + PINMUX_FUNCTION_END, +#undef F_ +#undef FM + +#define F_(x, y) +#define FM(x) x##_MARK, + PINMUX_MARK_BEGIN, + PINMUX_GPSR + PINMUX_IPSR + PINMUX_MOD_SELS + PINMUX_MARK_END, +#undef F_ +#undef FM +}; + +static const u16 pinmux_data[] = { + PINMUX_DATA_GP_ALL(), + + PINMUX_SINGLE(AVS1), + PINMUX_SINGLE(AVS0), + PINMUX_SINGLE(PCIE1_CLKREQ_N), + PINMUX_SINGLE(PCIE0_CLKREQ_N), + + /* TSN0 without MODSEL4 */ + PINMUX_SINGLE(TSN0_TXCREFCLK), + PINMUX_SINGLE(TSN0_RD2), + PINMUX_SINGLE(TSN0_RD3), + PINMUX_SINGLE(TSN0_RD1), + PINMUX_SINGLE(TSN0_RXC), + PINMUX_SINGLE(TSN0_RD0), + PINMUX_SINGLE(TSN0_RX_CTL), + PINMUX_SINGLE(TSN0_AVTP_CAPTURE), + PINMUX_SINGLE(TSN0_LINK), + PINMUX_SINGLE(TSN0_PHY_INT), + PINMUX_SINGLE(TSN0_MDIO), + /* TSN0 with MODSEL4 */ + PINMUX_IPSR_NOGM(0, TSN0_TD2, SEL_TSN0_TD2_1), + PINMUX_IPSR_NOGM(0, TSN0_TD3, SEL_TSN0_TD3_1), + PINMUX_IPSR_NOGM(0, TSN0_TD0, SEL_TSN0_TD0_1), + PINMUX_IPSR_NOGM(0, TSN0_TD1, SEL_TSN0_TD1_1), + PINMUX_IPSR_NOGM(0, TSN0_TXC, SEL_TSN0_TXC_1), + PINMUX_IPSR_NOGM(0, TSN0_TX_CTL, SEL_TSN0_TX_CTL_1), + PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS0, SEL_TSN0_AVTP_PPS0_1), + PINMUX_IPSR_NOGM(0, TSN0_AVTP_MATCH, SEL_TSN0_AVTP_MATCH_1), + PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS1, SEL_TSN0_AVTP_PPS1_1), + PINMUX_IPSR_NOGM(0, TSN0_MDC, SEL_TSN0_MDC_1), + + /* TSN0 without MODSEL5 */ + PINMUX_SINGLE(AVB2_RX_CTL), + PINMUX_SINGLE(AVB2_RXC), + PINMUX_SINGLE(AVB2_RD0), + PINMUX_SINGLE(AVB2_RD1), + PINMUX_SINGLE(AVB2_RD2), + PINMUX_SINGLE(AVB2_MDIO), + PINMUX_SINGLE(AVB2_RD3), + PINMUX_SINGLE(AVB2_TXCREFCLK), + PINMUX_SINGLE(AVB2_PHY_INT), + PINMUX_SINGLE(AVB2_LINK), + PINMUX_SINGLE(AVB2_AVTP_CAPTURE), + /* TSN0 with MODSEL5 */ + PINMUX_IPSR_NOGM(0, AVB2_TX_CTL, SEL_AVB2_TX_CTL_1), + PINMUX_IPSR_NOGM(0, AVB2_TXC, SEL_AVB2_TXC_1), + PINMUX_IPSR_NOGM(0, AVB2_TD0, SEL_AVB2_TD0_1), + PINMUX_IPSR_NOGM(0, AVB2_TD1, SEL_AVB2_TD1_1), + PINMUX_IPSR_NOGM(0, AVB2_TD2, SEL_AVB2_TD2_1), + PINMUX_IPSR_NOGM(0, AVB2_TD3, SEL_AVB2_TD3_1), + PINMUX_IPSR_NOGM(0, AVB2_MDC, SEL_AVB2_MDC_1), + PINMUX_IPSR_NOGM(0, AVB2_MAGIC, SEL_AVB2_MAGIC_1), + PINMUX_IPSR_NOGM(0, AVB2_AVTP_MATCH, SEL_AVB2_AVTP_MATCH_1), + PINMUX_IPSR_NOGM(0, AVB2_AVTP_PPS, SEL_AVB2_AVTP_PPS_1), + + /* IP0SR0 */ + PINMUX_IPSR_GPSR(IP0SR0_3_0, ERROROUTC_B), + PINMUX_IPSR_GPSR(IP0SR0_3_0, TCLK2_A), + + PINMUX_IPSR_GPSR(IP0SR0_7_4, MSIOF3_SS1), + + PINMUX_IPSR_GPSR(IP0SR0_11_8, MSIOF3_SS2), + + PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3), + PINMUX_IPSR_GPSR(IP0SR0_15_12, MSIOF3_SCK), + + PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2), + PINMUX_IPSR_GPSR(IP0SR0_19_16, MSIOF3_TXD), + + PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1), + PINMUX_IPSR_GPSR(IP0SR0_23_20, MSIOF3_RXD), + + PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0), + PINMUX_IPSR_GPSR(IP0SR0_27_24, MSIOF3_SYNC), + + PINMUX_IPSR_GPSR(IP0SR0_31_28, MSIOF5_SS2), + + /* IP1SR0 */ + PINMUX_IPSR_GPSR(IP1SR0_3_0, MSIOF5_SS1), + + PINMUX_IPSR_GPSR(IP1SR0_7_4, MSIOF5_SYNC), + + PINMUX_IPSR_GPSR(IP1SR0_11_8, MSIOF5_TXD), + + PINMUX_IPSR_GPSR(IP1SR0_15_12, MSIOF5_SCK), + + PINMUX_IPSR_GPSR(IP1SR0_19_16, MSIOF5_RXD), + + PINMUX_IPSR_GPSR(IP1SR0_23_20, MSIOF2_SS2), + PINMUX_IPSR_GPSR(IP1SR0_23_20, TCLK1), + PINMUX_IPSR_GPSR(IP1SR0_23_20, IRQ2_A), + + PINMUX_IPSR_GPSR(IP1SR0_27_24, MSIOF2_SS1), + PINMUX_IPSR_GPSR(IP1SR0_27_24, HTX1), + PINMUX_IPSR_GPSR(IP1SR0_27_24, TX1), + + PINMUX_IPSR_GPSR(IP1SR0_31_28, MSIOF2_SYNC), + PINMUX_IPSR_GPSR(IP1SR0_31_28, HRX1), + PINMUX_IPSR_GPSR(IP1SR0_31_28, RX1), + + /* IP2SR0 */ + PINMUX_IPSR_GPSR(IP2SR0_3_0, MSIOF2_TXD), + PINMUX_IPSR_GPSR(IP2SR0_3_0, HCTS1_N), + PINMUX_IPSR_GPSR(IP2SR0_3_0, CTS1_N), + + PINMUX_IPSR_GPSR(IP2SR0_7_4, MSIOF2_SCK), + PINMUX_IPSR_GPSR(IP2SR0_7_4, HRTS1_N), + PINMUX_IPSR_GPSR(IP2SR0_7_4, RTS1_N), + + PINMUX_IPSR_GPSR(IP2SR0_11_8, MSIOF2_RXD), + PINMUX_IPSR_GPSR(IP2SR0_11_8, HSCK1), + PINMUX_IPSR_GPSR(IP2SR0_11_8, SCK1), + + /* IP0SR1 */ + PINMUX_IPSR_GPSR(IP0SR1_3_0, MSIOF1_SS2), + PINMUX_IPSR_GPSR(IP0SR1_3_0, HTX3_A), + PINMUX_IPSR_GPSR(IP0SR1_3_0, TX3), + + PINMUX_IPSR_GPSR(IP0SR1_7_4, MSIOF1_SS1), + PINMUX_IPSR_GPSR(IP0SR1_7_4, HCTS3_N_A), + PINMUX_IPSR_GPSR(IP0SR1_7_4, RX3), + + PINMUX_IPSR_GPSR(IP0SR1_11_8, MSIOF1_SYNC), + PINMUX_IPSR_GPSR(IP0SR1_11_8, HRTS3_N_A), + PINMUX_IPSR_GPSR(IP0SR1_11_8, RTS3_N), + + PINMUX_IPSR_GPSR(IP0SR1_15_12, MSIOF1_SCK), + PINMUX_IPSR_GPSR(IP0SR1_15_12, HSCK3_A), + PINMUX_IPSR_GPSR(IP0SR1_15_12, CTS3_N), + + PINMUX_IPSR_GPSR(IP0SR1_19_16, MSIOF1_TXD), + PINMUX_IPSR_GPSR(IP0SR1_19_16, HRX3_A), + PINMUX_IPSR_GPSR(IP0SR1_19_16, SCK3), + + PINMUX_IPSR_GPSR(IP0SR1_23_20, MSIOF1_RXD), + + PINMUX_IPSR_GPSR(IP0SR1_27_24, MSIOF0_SS2), + PINMUX_IPSR_GPSR(IP0SR1_27_24, HTX1_X), + PINMUX_IPSR_GPSR(IP0SR1_27_24, TX1_X), + + PINMUX_IPSR_GPSR(IP0SR1_31_28, MSIOF0_SS1), + PINMUX_IPSR_GPSR(IP0SR1_31_28, HRX1_X), + PINMUX_IPSR_GPSR(IP0SR1_31_28, RX1_X), + + /* IP1SR1 */ + PINMUX_IPSR_GPSR(IP1SR1_3_0, MSIOF0_SYNC), + PINMUX_IPSR_GPSR(IP1SR1_3_0, HCTS1_N_X), + PINMUX_IPSR_GPSR(IP1SR1_3_0, CTS1_N_X), + PINMUX_IPSR_GPSR(IP1SR1_3_0, CANFD5_TX_B), + + PINMUX_IPSR_GPSR(IP1SR1_7_4, MSIOF0_TXD), + PINMUX_IPSR_GPSR(IP1SR1_7_4, HRTS1_N_X), + PINMUX_IPSR_GPSR(IP1SR1_7_4, RTS1_N_X), + PINMUX_IPSR_GPSR(IP1SR1_7_4, CANFD5_RX_B), + + PINMUX_IPSR_GPSR(IP1SR1_11_8, MSIOF0_SCK), + PINMUX_IPSR_GPSR(IP1SR1_11_8, HSCK1_X), + PINMUX_IPSR_GPSR(IP1SR1_11_8, SCK1_X), + + PINMUX_IPSR_GPSR(IP1SR1_15_12, MSIOF0_RXD), + + PINMUX_IPSR_GPSR(IP1SR1_19_16, HTX0), + PINMUX_IPSR_GPSR(IP1SR1_19_16, TX0), + + PINMUX_IPSR_GPSR(IP1SR1_23_20, HCTS0_N), + PINMUX_IPSR_GPSR(IP1SR1_23_20, CTS0_N), + PINMUX_IPSR_GPSR(IP1SR1_23_20, PWM8_A), + + PINMUX_IPSR_GPSR(IP1SR1_27_24, HRTS0_N), + PINMUX_IPSR_GPSR(IP1SR1_27_24, RTS0_N), + PINMUX_IPSR_GPSR(IP1SR1_27_24, PWM9_A), + + PINMUX_IPSR_GPSR(IP1SR1_31_28, HSCK0), + PINMUX_IPSR_GPSR(IP1SR1_31_28, SCK0), + PINMUX_IPSR_GPSR(IP1SR1_31_28, PWM0_A), + + /* IP2SR1 */ + PINMUX_IPSR_GPSR(IP2SR1_3_0, HRX0), + PINMUX_IPSR_GPSR(IP2SR1_3_0, RX0), + + PINMUX_IPSR_GPSR(IP2SR1_7_4, SCIF_CLK), + PINMUX_IPSR_GPSR(IP2SR1_7_4, IRQ4_A), + + PINMUX_IPSR_GPSR(IP2SR1_11_8, SSI_SCK), + PINMUX_IPSR_GPSR(IP2SR1_11_8, TCLK3), + + PINMUX_IPSR_GPSR(IP2SR1_15_12, SSI_WS), + PINMUX_IPSR_GPSR(IP2SR1_15_12, TCLK4), + + PINMUX_IPSR_GPSR(IP2SR1_19_16, SSI_SD), + PINMUX_IPSR_GPSR(IP2SR1_19_16, IRQ0_A), + + PINMUX_IPSR_GPSR(IP2SR1_23_20, AUDIO_CLKOUT), + PINMUX_IPSR_GPSR(IP2SR1_23_20, IRQ1_A), + + PINMUX_IPSR_GPSR(IP2SR1_27_24, AUDIO_CLKIN), + PINMUX_IPSR_GPSR(IP2SR1_27_24, PWM3_A), + + PINMUX_IPSR_GPSR(IP2SR1_31_28, TCLK2), + PINMUX_IPSR_GPSR(IP2SR1_31_28, MSIOF4_SS1), + PINMUX_IPSR_GPSR(IP2SR1_31_28, IRQ3_B), + + /* IP3SR1 */ + PINMUX_IPSR_GPSR(IP3SR1_3_0, HRX3), + PINMUX_IPSR_GPSR(IP3SR1_3_0, SCK3_A), + PINMUX_IPSR_GPSR(IP3SR1_3_0, MSIOF4_SS2), + + PINMUX_IPSR_GPSR(IP3SR1_7_4, HSCK3), + PINMUX_IPSR_GPSR(IP3SR1_7_4, CTS3_N_A), + PINMUX_IPSR_GPSR(IP3SR1_7_4, MSIOF4_SCK), + PINMUX_IPSR_GPSR(IP3SR1_7_4, TPU0TO0_A), + + PINMUX_IPSR_GPSR(IP3SR1_11_8, HRTS3_N), + PINMUX_IPSR_GPSR(IP3SR1_11_8, RTS3_N_A), + PINMUX_IPSR_GPSR(IP3SR1_11_8, MSIOF4_TXD), + PINMUX_IPSR_GPSR(IP3SR1_11_8, TPU0TO1_A), + + PINMUX_IPSR_GPSR(IP3SR1_15_12, HCTS3_N), + PINMUX_IPSR_GPSR(IP3SR1_15_12, RX3_A), + PINMUX_IPSR_GPSR(IP3SR1_15_12, MSIOF4_RXD), + + PINMUX_IPSR_GPSR(IP3SR1_19_16, HTX3), + PINMUX_IPSR_GPSR(IP3SR1_19_16, TX3_A), + PINMUX_IPSR_GPSR(IP3SR1_19_16, MSIOF4_SYNC), + + /* IP0SR2 */ + PINMUX_IPSR_GPSR(IP0SR2_3_0, FXR_TXDA), + PINMUX_IPSR_GPSR(IP0SR2_3_0, CANFD1_TX), + PINMUX_IPSR_GPSR(IP0SR2_3_0, TPU0TO2_A), + + PINMUX_IPSR_GPSR(IP0SR2_7_4, FXR_TXENA_N), + PINMUX_IPSR_GPSR(IP0SR2_7_4, CANFD1_RX), + PINMUX_IPSR_GPSR(IP0SR2_7_4, TPU0TO3_A), + + PINMUX_IPSR_GPSR(IP0SR2_11_8, RXDA_EXTFXR), + PINMUX_IPSR_GPSR(IP0SR2_11_8, CANFD5_TX), + PINMUX_IPSR_GPSR(IP0SR2_11_8, IRQ5), + + PINMUX_IPSR_GPSR(IP0SR2_15_12, CLK_EXTFXR), + PINMUX_IPSR_GPSR(IP0SR2_15_12, CANFD5_RX), + PINMUX_IPSR_GPSR(IP0SR2_15_12, IRQ4_B), + + PINMUX_IPSR_GPSR(IP0SR2_19_16, RXDB_EXTFXR), + + PINMUX_IPSR_GPSR(IP0SR2_23_20, FXR_TXENB_N), + + PINMUX_IPSR_GPSR(IP0SR2_27_24, FXR_TXDB), + + PINMUX_IPSR_GPSR(IP0SR2_31_28, TPU0TO1), + PINMUX_IPSR_GPSR(IP0SR2_31_28, CANFD6_TX), + PINMUX_IPSR_GPSR(IP0SR2_31_28, TCLK2_B), + + /* IP1SR2 */ + PINMUX_IPSR_GPSR(IP1SR2_3_0, TPU0TO0), + PINMUX_IPSR_GPSR(IP1SR2_3_0, CANFD6_RX), + PINMUX_IPSR_GPSR(IP1SR2_3_0, TCLK1_A), + + PINMUX_IPSR_GPSR(IP1SR2_7_4, CAN_CLK), + PINMUX_IPSR_GPSR(IP1SR2_7_4, FXR_TXENA_N_X), + + PINMUX_IPSR_GPSR(IP1SR2_11_8, CANFD0_TX), + PINMUX_IPSR_GPSR(IP1SR2_11_8, FXR_TXENB_N_X), + + PINMUX_IPSR_GPSR(IP1SR2_15_12, CANFD0_RX), + PINMUX_IPSR_GPSR(IP1SR2_15_12, STPWT_EXTFXR), + + PINMUX_IPSR_GPSR(IP1SR2_19_16, CANFD2_TX), + PINMUX_IPSR_GPSR(IP1SR2_19_16, TPU0TO2), + PINMUX_IPSR_GPSR(IP1SR2_19_16, TCLK3_A), + + PINMUX_IPSR_GPSR(IP1SR2_23_20, CANFD2_RX), + PINMUX_IPSR_GPSR(IP1SR2_23_20, TPU0TO3), + PINMUX_IPSR_GPSR(IP1SR2_23_20, PWM1_B), + PINMUX_IPSR_GPSR(IP1SR2_23_20, TCLK4_A), + + PINMUX_IPSR_GPSR(IP1SR2_27_24, CANFD3_TX), + PINMUX_IPSR_GPSR(IP1SR2_27_24, PWM2_B), + + PINMUX_IPSR_GPSR(IP1SR2_31_28, CANFD3_RX), + PINMUX_IPSR_GPSR(IP1SR2_31_28, PWM3_B), + + /* IP2SR2 */ + PINMUX_IPSR_GPSR(IP2SR2_3_0, CANFD4_TX), + PINMUX_IPSR_GPSR(IP2SR2_3_0, PWM4), + + PINMUX_IPSR_GPSR(IP2SR2_7_4, CANFD4_RX), + PINMUX_IPSR_GPSR(IP2SR2_7_4, PWM5), + + PINMUX_IPSR_GPSR(IP2SR2_11_8, CANFD7_TX), + PINMUX_IPSR_GPSR(IP2SR2_11_8, PWM6), + + PINMUX_IPSR_GPSR(IP2SR2_15_12, CANFD7_RX), + PINMUX_IPSR_GPSR(IP2SR2_15_12, PWM7), + + /* IP0SR3 */ + PINMUX_IPSR_GPSR(IP0SR3_3_0, MMC_SD_D1), + PINMUX_IPSR_GPSR(IP0SR3_7_4, MMC_SD_D0), + PINMUX_IPSR_GPSR(IP0SR3_11_8, MMC_SD_D2), + PINMUX_IPSR_GPSR(IP0SR3_15_12, MMC_SD_CLK), + PINMUX_IPSR_GPSR(IP0SR3_19_16, MMC_DS), + PINMUX_IPSR_GPSR(IP0SR3_23_20, MMC_SD_D3), + PINMUX_IPSR_GPSR(IP0SR3_27_24, MMC_D5), + PINMUX_IPSR_GPSR(IP0SR3_31_28, MMC_D4), + + /* IP1SR3 */ + PINMUX_IPSR_GPSR(IP1SR3_3_0, MMC_D7), + + PINMUX_IPSR_GPSR(IP1SR3_7_4, MMC_D6), + + PINMUX_IPSR_GPSR(IP1SR3_11_8, MMC_SD_CMD), + + PINMUX_IPSR_GPSR(IP1SR3_15_12, SD_CD), + + PINMUX_IPSR_GPSR(IP1SR3_19_16, SD_WP), + + PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKIN), + PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKEN_IN), + PINMUX_IPSR_GPSR(IP1SR3_23_20, PWM1_A), + PINMUX_IPSR_GPSR(IP1SR3_23_20, TCLK3_X), + + PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKOUT), + PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKEN_OUT), + PINMUX_IPSR_GPSR(IP1SR3_27_24, ERROROUTC_A), + PINMUX_IPSR_GPSR(IP1SR3_27_24, TCLK4_X), + + PINMUX_IPSR_GPSR(IP1SR3_31_28, QSPI0_SSL), + + /* IP2SR3 */ + PINMUX_IPSR_GPSR(IP2SR3_3_0, QSPI0_IO3), + PINMUX_IPSR_GPSR(IP2SR3_7_4, QSPI0_IO2), + PINMUX_IPSR_GPSR(IP2SR3_11_8, QSPI0_MISO_IO1), + PINMUX_IPSR_GPSR(IP2SR3_15_12, QSPI0_MOSI_IO0), + PINMUX_IPSR_GPSR(IP2SR3_19_16, QSPI0_SPCLK), + PINMUX_IPSR_GPSR(IP2SR3_23_20, QSPI1_MOSI_IO0), + PINMUX_IPSR_GPSR(IP2SR3_27_24, QSPI1_SPCLK), + PINMUX_IPSR_GPSR(IP2SR3_31_28, QSPI1_MISO_IO1), + + /* IP3SR3 */ + PINMUX_IPSR_GPSR(IP3SR3_3_0, QSPI1_IO2), + PINMUX_IPSR_GPSR(IP3SR3_7_4, QSPI1_SSL), + PINMUX_IPSR_GPSR(IP3SR3_11_8, QSPI1_IO3), + PINMUX_IPSR_GPSR(IP3SR3_15_12, RPC_RESET_N), + PINMUX_IPSR_GPSR(IP3SR3_19_16, RPC_WP_N), + PINMUX_IPSR_GPSR(IP3SR3_23_20, RPC_INT_N), + + /* IP0SR6 */ + PINMUX_IPSR_GPSR(IP0SR6_3_0, AVB1_MDIO), + + PINMUX_IPSR_MSEL(IP0SR6_7_4, AVB1_MAGIC, SEL_AVB1_MAGIC_1), + + PINMUX_IPSR_MSEL(IP0SR6_11_8, AVB1_MDC, SEL_AVB1_MDC_1), + + PINMUX_IPSR_GPSR(IP0SR6_15_12, AVB1_PHY_INT), + + PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_LINK), + PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_MII_TX_ER), + + PINMUX_IPSR_MSEL(IP0SR6_23_20, AVB1_AVTP_MATCH, SEL_AVB1_AVTP_MATCH_1), + PINMUX_IPSR_MSEL(IP0SR6_23_20, AVB1_MII_RX_ER, SEL_AVB1_AVTP_MATCH_0), + + PINMUX_IPSR_MSEL(IP0SR6_27_24, AVB1_TXC, SEL_AVB1_TXC_1), + PINMUX_IPSR_MSEL(IP0SR6_27_24, AVB1_MII_TXC, SEL_AVB1_TXC_0), + + PINMUX_IPSR_MSEL(IP0SR6_31_28, AVB1_TX_CTL, SEL_AVB1_TX_CTL_1), + PINMUX_IPSR_MSEL(IP0SR6_31_28, AVB1_MII_TX_EN, SEL_AVB1_TX_CTL_0), + + /* IP1SR6 */ + PINMUX_IPSR_GPSR(IP1SR6_3_0, AVB1_RXC), + PINMUX_IPSR_GPSR(IP1SR6_3_0, AVB1_MII_RXC), + + PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_RX_CTL), + PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_MII_RX_DV), + + PINMUX_IPSR_MSEL(IP1SR6_11_8, AVB1_AVTP_PPS, SEL_AVB1_AVTP_PPS_1), + PINMUX_IPSR_MSEL(IP1SR6_11_8, AVB1_MII_COL, SEL_AVB1_AVTP_PPS_0), + + PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_AVTP_CAPTURE), + PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_MII_CRS), + + PINMUX_IPSR_MSEL(IP1SR6_19_16, AVB1_TD1, SEL_AVB1_TD1_1), + PINMUX_IPSR_MSEL(IP1SR6_19_16, AVB1_MII_TD1, SEL_AVB1_TD1_0), + + PINMUX_IPSR_MSEL(IP1SR6_23_20, AVB1_TD0, SEL_AVB1_TD0_1), + PINMUX_IPSR_MSEL(IP1SR6_23_20, AVB1_MII_TD0, SEL_AVB1_TD0_0), + + PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_RD1), + PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_MII_RD1), + + PINMUX_IPSR_GPSR(IP1SR6_31_28, AVB1_RD0), + PINMUX_IPSR_GPSR(IP1SR6_31_28, AVB1_MII_RD0), + + /* IP2SR6 */ + PINMUX_IPSR_MSEL(IP2SR6_3_0, AVB1_TD2, SEL_AVB1_TD2_1), + PINMUX_IPSR_MSEL(IP2SR6_3_0, AVB1_MII_TD2, SEL_AVB1_TD2_0), + + PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_RD2), + PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_MII_RD2), + + PINMUX_IPSR_MSEL(IP2SR6_11_8, AVB1_TD3, SEL_AVB1_TD3_1), + PINMUX_IPSR_MSEL(IP2SR6_11_8, AVB1_MII_TD3, SEL_AVB1_TD3_0), + + PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_RD3), + PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_MII_RD3), + + PINMUX_IPSR_GPSR(IP2SR6_19_16, AVB1_TXCREFCLK), + + /* IP0SR7 */ + PINMUX_IPSR_MSEL(IP0SR7_3_0, AVB0_AVTP_PPS, SEL_AVB0_AVTP_PPS_1), + PINMUX_IPSR_MSEL(IP0SR7_3_0, AVB0_MII_COL, SEL_AVB0_AVTP_PPS_0), + + PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_AVTP_CAPTURE), + PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_MII_CRS), + + PINMUX_IPSR_MSEL(IP0SR7_11_8, AVB0_AVTP_MATCH, SEL_AVB0_AVTP_MATCH_1), + PINMUX_IPSR_MSEL(IP0SR7_11_8, AVB0_MII_RX_ER, SEL_AVB0_AVTP_MATCH_0), + PINMUX_IPSR_MSEL(IP0SR7_11_8, CC5_OSCOUT, SEL_AVB0_AVTP_MATCH_0), + + PINMUX_IPSR_MSEL(IP0SR7_15_12, AVB0_TD3, SEL_AVB0_TD3_1), + PINMUX_IPSR_MSEL(IP0SR7_15_12, AVB0_MII_TD3, SEL_AVB0_TD3_0), + + PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_LINK), + PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_MII_TX_ER), + + PINMUX_IPSR_GPSR(IP0SR7_23_20, AVB0_PHY_INT), + + PINMUX_IPSR_MSEL(IP0SR7_27_24, AVB0_TD2, SEL_AVB0_TD2_1), + PINMUX_IPSR_MSEL(IP0SR7_27_24, AVB0_MII_TD2, SEL_AVB0_TD2_0), + + PINMUX_IPSR_MSEL(IP0SR7_31_28, AVB0_TD1, SEL_AVB0_TD1_1), + PINMUX_IPSR_MSEL(IP0SR7_31_28, AVB0_MII_TD1, SEL_AVB0_TD1_0), + + /* IP1SR7 */ + PINMUX_IPSR_GPSR(IP1SR7_3_0, AVB0_RD3), + PINMUX_IPSR_GPSR(IP1SR7_3_0, AVB0_MII_RD3), + + PINMUX_IPSR_GPSR(IP1SR7_7_4, AVB0_TXCREFCLK), + + PINMUX_IPSR_MSEL(IP1SR7_11_8, AVB0_MAGIC, SEL_AVB0_MAGIC_1), + + PINMUX_IPSR_MSEL(IP1SR7_15_12, AVB0_TD0, SEL_AVB0_TD0_1), + PINMUX_IPSR_MSEL(IP1SR7_15_12, AVB0_MII_TD0, SEL_AVB0_TD0_0), + + PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_RD2), + PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_MII_RD2), + + PINMUX_IPSR_MSEL(IP1SR7_23_20, AVB0_MDC, SEL_AVB0_MDC_1), + + PINMUX_IPSR_GPSR(IP1SR7_27_24, AVB0_MDIO), + + PINMUX_IPSR_MSEL(IP1SR7_31_28, AVB0_TXC, SEL_AVB0_TXC_1), + PINMUX_IPSR_MSEL(IP1SR7_31_28, AVB0_MII_TXC, SEL_AVB0_TXC_0), + + /* IP2SR7 */ + PINMUX_IPSR_MSEL(IP2SR7_3_0, AVB0_TX_CTL, SEL_AVB0_TX_CTL_1), + PINMUX_IPSR_MSEL(IP2SR7_3_0, AVB0_MII_TX_EN, SEL_AVB0_TX_CTL_0), + + PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_RD1), + PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_MII_RD1), + + PINMUX_IPSR_GPSR(IP2SR7_11_8, AVB0_RD0), + PINMUX_IPSR_GPSR(IP2SR7_11_8, AVB0_MII_RD0), + + PINMUX_IPSR_GPSR(IP2SR7_15_12, AVB0_RXC), + PINMUX_IPSR_GPSR(IP2SR7_15_12, AVB0_MII_RXC), + + PINMUX_IPSR_GPSR(IP2SR7_19_16, AVB0_RX_CTL), + PINMUX_IPSR_GPSR(IP2SR7_19_16, AVB0_MII_RX_DV), + + /* IP0SR8 */ + PINMUX_IPSR_MSEL(IP0SR8_3_0, SCL0, SEL_SCL0_0), + PINMUX_IPSR_MSEL(IP0SR8_7_4, SDA0, SEL_SDA0_0), + PINMUX_IPSR_MSEL(IP0SR8_11_8, SCL1, SEL_SCL1_0), + PINMUX_IPSR_MSEL(IP0SR8_15_12, SDA1, SEL_SDA1_0), + PINMUX_IPSR_MSEL(IP0SR8_19_16, SCL2, SEL_SCL2_0), + PINMUX_IPSR_MSEL(IP0SR8_23_20, SDA2, SEL_SDA2_0), + PINMUX_IPSR_MSEL(IP0SR8_27_24, SCL3, SEL_SCL3_0), + PINMUX_IPSR_MSEL(IP0SR8_31_28, SDA3, SEL_SDA3_0), + + /* IP1SR8 */ + PINMUX_IPSR_MSEL(IP1SR8_3_0, SCL4, SEL_SCL4_0), + PINMUX_IPSR_MSEL(IP1SR8_3_0, HRX2, SEL_SCL4_0), + PINMUX_IPSR_MSEL(IP1SR8_3_0, SCK4, SEL_SCL4_0), + + PINMUX_IPSR_MSEL(IP1SR8_7_4, SDA4, SEL_SDA4_0), + PINMUX_IPSR_MSEL(IP1SR8_7_4, HTX2, SEL_SDA4_0), + PINMUX_IPSR_MSEL(IP1SR8_7_4, CTS4_N, SEL_SDA4_0), + + PINMUX_IPSR_MSEL(IP1SR8_11_8, SCL5, SEL_SCL5_0), + PINMUX_IPSR_MSEL(IP1SR8_11_8, HRTS2_N, SEL_SCL5_0), + PINMUX_IPSR_MSEL(IP1SR8_11_8, RTS4_N, SEL_SCL5_0), + + PINMUX_IPSR_MSEL(IP1SR8_15_12, SDA5, SEL_SDA5_0), + PINMUX_IPSR_MSEL(IP1SR8_15_12, SCIF_CLK2, SEL_SDA5_0), + + PINMUX_IPSR_GPSR(IP1SR8_19_16, HCTS2_N), + PINMUX_IPSR_GPSR(IP1SR8_19_16, TX4), + + PINMUX_IPSR_GPSR(IP1SR8_23_20, HSCK2), + PINMUX_IPSR_GPSR(IP1SR8_23_20, RX4), +}; + +/* + * Pins not associated with a GPIO port. + */ +enum { + GP_ASSIGN_LAST(), +}; + +static const struct sh_pfc_pin pinmux_pins[] = { + PINMUX_GPIO_GP_ALL(), +}; + +/* - AVB0 ------------------------------------------------ */ +static const unsigned int avb0_link_pins[] = { + /* AVB0_LINK */ + RCAR_GP_PIN(7, 4), +}; +static const unsigned int avb0_link_mux[] = { + AVB0_LINK_MARK, +}; +static const unsigned int avb0_magic_pins[] = { + /* AVB0_MAGIC */ + RCAR_GP_PIN(7, 10), +}; +static const unsigned int avb0_magic_mux[] = { + AVB0_MAGIC_MARK, +}; +static const unsigned int avb0_phy_int_pins[] = { + /* AVB0_PHY_INT */ + RCAR_GP_PIN(7, 5), +}; +static const unsigned int avb0_phy_int_mux[] = { + AVB0_PHY_INT_MARK, +}; +static const unsigned int avb0_mdio_pins[] = { + /* AVB0_MDC, AVB0_MDIO */ + RCAR_GP_PIN(7, 13), RCAR_GP_PIN(7, 14), +}; +static const unsigned int avb0_mdio_mux[] = { + AVB0_MDC_MARK, AVB0_MDIO_MARK, +}; +static const unsigned int avb0_rgmii_pins[] = { + /* + * AVB0_TX_CTL, AVB0_TXC, AVB0_TD0, AVB0_TD1, AVB0_TD2, AVB0_TD3, + * AVB0_RX_CTL, AVB0_RXC, AVB0_RD0, AVB0_RD1, AVB0_RD2, AVB0_RD3, + */ + RCAR_GP_PIN(7, 16), RCAR_GP_PIN(7, 15), + RCAR_GP_PIN(7, 11), RCAR_GP_PIN(7, 7), + RCAR_GP_PIN(7, 6), RCAR_GP_PIN(7, 3), + RCAR_GP_PIN(7, 20), RCAR_GP_PIN(7, 19), + RCAR_GP_PIN(7, 18), RCAR_GP_PIN(7, 17), + RCAR_GP_PIN(7, 12), RCAR_GP_PIN(7, 8), +}; +static const unsigned int avb0_rgmii_mux[] = { + AVB0_TX_CTL_MARK, AVB0_TXC_MARK, + AVB0_TD0_MARK, AVB0_TD1_MARK, + AVB0_TD2_MARK, AVB0_TD3_MARK, + AVB0_RX_CTL_MARK, AVB0_RXC_MARK, + AVB0_RD0_MARK, AVB0_RD1_MARK, + AVB0_RD2_MARK, AVB0_RD3_MARK, +}; +static const unsigned int avb0_txcrefclk_pins[] = { + /* AVB0_TXCREFCLK */ + RCAR_GP_PIN(7, 9), +}; +static const unsigned int avb0_txcrefclk_mux[] = { + AVB0_TXCREFCLK_MARK, +}; +static const unsigned int avb0_avtp_pps_pins[] = { + /* AVB0_AVTP_PPS */ + RCAR_GP_PIN(7, 0), +}; +static const unsigned int avb0_avtp_pps_mux[] = { + AVB0_AVTP_PPS_MARK, +}; +static const unsigned int avb0_avtp_capture_pins[] = { + /* AVB0_AVTP_CAPTURE */ + RCAR_GP_PIN(7, 1), +}; +static const unsigned int avb0_avtp_capture_mux[] = { + AVB0_AVTP_CAPTURE_MARK, +}; +static const unsigned int avb0_avtp_match_pins[] = { + /* AVB0_AVTP_MATCH */ + RCAR_GP_PIN(7, 2), +}; +static const unsigned int avb0_avtp_match_mux[] = { + AVB0_AVTP_MATCH_MARK, +}; + +/* - AVB1 ------------------------------------------------ */ +static const unsigned int avb1_link_pins[] = { + /* AVB1_LINK */ + RCAR_GP_PIN(6, 4), +}; +static const unsigned int avb1_link_mux[] = { + AVB1_LINK_MARK, +}; +static const unsigned int avb1_magic_pins[] = { + /* AVB1_MAGIC */ + RCAR_GP_PIN(6, 1), +}; +static const unsigned int avb1_magic_mux[] = { + AVB1_MAGIC_MARK, +}; +static const unsigned int avb1_phy_int_pins[] = { + /* AVB1_PHY_INT */ + RCAR_GP_PIN(6, 3), +}; +static const unsigned int avb1_phy_int_mux[] = { + AVB1_PHY_INT_MARK, +}; +static const unsigned int avb1_mdio_pins[] = { + /* AVB1_MDC, AVB1_MDIO */ + RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 0), +}; +static const unsigned int avb1_mdio_mux[] = { + AVB1_MDC_MARK, AVB1_MDIO_MARK, +}; +static const unsigned int avb1_rgmii_pins[] = { + /* + * AVB1_TX_CTL, AVB1_TXC, AVB1_TD0, AVB1_TD1, AVB1_TD2, AVB1_TD3, + * AVB1_RX_CTL, AVB1_RXC, AVB1_RD0, AVB1_RD1, AVB1_RD2, AVB1_RD3, + */ + RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6), + RCAR_GP_PIN(6, 13), RCAR_GP_PIN(6, 12), + RCAR_GP_PIN(6, 16), RCAR_GP_PIN(6, 18), + RCAR_GP_PIN(6, 9), RCAR_GP_PIN(6, 8), + RCAR_GP_PIN(6, 15), RCAR_GP_PIN(6, 14), + RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 19), +}; +static const unsigned int avb1_rgmii_mux[] = { + AVB1_TX_CTL_MARK, AVB1_TXC_MARK, + AVB1_TD0_MARK, AVB1_TD1_MARK, + AVB1_TD2_MARK, AVB1_TD3_MARK, + AVB1_RX_CTL_MARK, AVB1_RXC_MARK, + AVB1_RD0_MARK, AVB1_RD1_MARK, + AVB1_RD2_MARK, AVB1_RD3_MARK, +}; +static const unsigned int avb1_txcrefclk_pins[] = { + /* AVB1_TXCREFCLK */ + RCAR_GP_PIN(6, 20), +}; +static const unsigned int avb1_txcrefclk_mux[] = { + AVB1_TXCREFCLK_MARK, +}; +static const unsigned int avb1_avtp_pps_pins[] = { + /* AVB1_AVTP_PPS */ + RCAR_GP_PIN(6, 10), +}; +static const unsigned int avb1_avtp_pps_mux[] = { + AVB1_AVTP_PPS_MARK, +}; +static const unsigned int avb1_avtp_capture_pins[] = { + /* AVB1_AVTP_CAPTURE */ + RCAR_GP_PIN(6, 11), +}; +static const unsigned int avb1_avtp_capture_mux[] = { + AVB1_AVTP_CAPTURE_MARK, +}; +static const unsigned int avb1_avtp_match_pins[] = { + /* AVB1_AVTP_MATCH */ + RCAR_GP_PIN(6, 5), +}; +static const unsigned int avb1_avtp_match_mux[] = { + AVB1_AVTP_MATCH_MARK, +}; + +/* - AVB2 ------------------------------------------------ */ +static const unsigned int avb2_link_pins[] = { + /* AVB2_LINK */ + RCAR_GP_PIN(5, 3), +}; +static const unsigned int avb2_link_mux[] = { + AVB2_LINK_MARK, +}; +static const unsigned int avb2_magic_pins[] = { + /* AVB2_MAGIC */ + RCAR_GP_PIN(5, 5), +}; +static const unsigned int avb2_magic_mux[] = { + AVB2_MAGIC_MARK, +}; +static const unsigned int avb2_phy_int_pins[] = { + /* AVB2_PHY_INT */ + RCAR_GP_PIN(5, 4), +}; +static const unsigned int avb2_phy_int_mux[] = { + AVB2_PHY_INT_MARK, +}; +static const unsigned int avb2_mdio_pins[] = { + /* AVB2_MDC, AVB2_MDIO */ + RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 10), +}; +static const unsigned int avb2_mdio_mux[] = { + AVB2_MDC_MARK, AVB2_MDIO_MARK, +}; +static const unsigned int avb2_rgmii_pins[] = { + /* + * AVB2_TX_CTL, AVB2_TXC, AVB2_TD0, AVB2_TD1, AVB2_TD2, AVB2_TD3, + * AVB2_RX_CTL, AVB2_RXC, AVB2_RD0, AVB2_RD1, AVB2_RD2, AVB2_RD3, + */ + RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 16), + RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 12), + RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 8), + RCAR_GP_PIN(5, 20), RCAR_GP_PIN(5, 18), + RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 14), + RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 9), +}; +static const unsigned int avb2_rgmii_mux[] = { + AVB2_TX_CTL_MARK, AVB2_TXC_MARK, + AVB2_TD0_MARK, AVB2_TD1_MARK, + AVB2_TD2_MARK, AVB2_TD3_MARK, + AVB2_RX_CTL_MARK, AVB2_RXC_MARK, + AVB2_RD0_MARK, AVB2_RD1_MARK, + AVB2_RD2_MARK, AVB2_RD3_MARK, +}; +static const unsigned int avb2_txcrefclk_pins[] = { + /* AVB2_TXCREFCLK */ + RCAR_GP_PIN(5, 7), +}; +static const unsigned int avb2_txcrefclk_mux[] = { + AVB2_TXCREFCLK_MARK, +}; +static const unsigned int avb2_avtp_pps_pins[] = { + /* AVB2_AVTP_PPS */ + RCAR_GP_PIN(5, 0), +}; +static const unsigned int avb2_avtp_pps_mux[] = { + AVB2_AVTP_PPS_MARK, +}; +static const unsigned int avb2_avtp_capture_pins[] = { + /* AVB2_AVTP_CAPTURE */ + RCAR_GP_PIN(5, 1), +}; +static const unsigned int avb2_avtp_capture_mux[] = { + AVB2_AVTP_CAPTURE_MARK, +}; +static const unsigned int avb2_avtp_match_pins[] = { + /* AVB2_AVTP_MATCH */ + RCAR_GP_PIN(5, 2), +}; +static const unsigned int avb2_avtp_match_mux[] = { + AVB2_AVTP_MATCH_MARK, +}; + +/* - CANFD0 ----------------------------------------------------------------- */ +static const unsigned int canfd0_data_pins[] = { + /* CANFD0_TX, CANFD0_RX */ + RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11), +}; +static const unsigned int canfd0_data_mux[] = { + CANFD0_TX_MARK, CANFD0_RX_MARK, +}; + +/* - CANFD1 ----------------------------------------------------------------- */ +static const unsigned int canfd1_data_pins[] = { + /* CANFD1_TX, CANFD1_RX */ + RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1), +}; +static const unsigned int canfd1_data_mux[] = { + CANFD1_TX_MARK, CANFD1_RX_MARK, +}; + +/* - CANFD2 ----------------------------------------------------------------- */ +static const unsigned int canfd2_data_pins[] = { + /* CANFD2_TX, CANFD2_RX */ + RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 13), +}; +static const unsigned int canfd2_data_mux[] = { + CANFD2_TX_MARK, CANFD2_RX_MARK, +}; + +/* - CANFD3 ----------------------------------------------------------------- */ +static const unsigned int canfd3_data_pins[] = { + /* CANFD3_TX, CANFD3_RX */ + RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15), +}; +static const unsigned int canfd3_data_mux[] = { + CANFD3_TX_MARK, CANFD3_RX_MARK, +}; + +/* - CANFD4 ----------------------------------------------------------------- */ +static const unsigned int canfd4_data_pins[] = { + /* CANFD4_TX, CANFD4_RX */ + RCAR_GP_PIN(2, 16), RCAR_GP_PIN(2, 17), +}; +static const unsigned int canfd4_data_mux[] = { + CANFD4_TX_MARK, CANFD4_RX_MARK, +}; + +/* - CANFD5 ----------------------------------------------------------------- */ +static const unsigned int canfd5_data_pins[] = { + /* CANFD5_TX, CANFD5_RX */ + RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3), +}; +static const unsigned int canfd5_data_mux[] = { + CANFD5_TX_MARK, CANFD5_RX_MARK, +}; + +/* - CANFD5_B ----------------------------------------------------------------- */ +static const unsigned int canfd5_data_b_pins[] = { + /* CANFD5_TX_B, CANFD5_RX_B */ + RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 9), +}; +static const unsigned int canfd5_data_b_mux[] = { + CANFD5_TX_B_MARK, CANFD5_RX_B_MARK, +}; + +/* - CANFD6 ----------------------------------------------------------------- */ +static const unsigned int canfd6_data_pins[] = { + /* CANFD6_TX, CANFD6_RX */ + RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8), +}; +static const unsigned int canfd6_data_mux[] = { + CANFD6_TX_MARK, CANFD6_RX_MARK, +}; + +/* - CANFD7 ----------------------------------------------------------------- */ +static const unsigned int canfd7_data_pins[] = { + /* CANFD7_TX, CANFD7_RX */ + RCAR_GP_PIN(2, 18), RCAR_GP_PIN(2, 19), +}; +static const unsigned int canfd7_data_mux[] = { + CANFD7_TX_MARK, CANFD7_RX_MARK, +}; + +/* - CANFD Clock ------------------------------------------------------------ */ +static const unsigned int can_clk_pins[] = { + /* CAN_CLK */ + RCAR_GP_PIN(2, 9), +}; +static const unsigned int can_clk_mux[] = { + CAN_CLK_MARK, +}; + +/* - HSCIF0 ----------------------------------------------------------------- */ +static const unsigned int hscif0_data_pins[] = { + /* HRX0, HTX0 */ + RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 12), +}; +static const unsigned int hscif0_data_mux[] = { + HRX0_MARK, HTX0_MARK, +}; +static const unsigned int hscif0_clk_pins[] = { + /* HSCK0 */ + RCAR_GP_PIN(1, 15), +}; +static const unsigned int hscif0_clk_mux[] = { + HSCK0_MARK, +}; +static const unsigned int hscif0_ctrl_pins[] = { + /* HRTS0_N, HCTS0_N */ + RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13), +}; +static const unsigned int hscif0_ctrl_mux[] = { + HRTS0_N_MARK, HCTS0_N_MARK, +}; + +/* - HSCIF1 ----------------------------------------------------------------- */ +static const unsigned int hscif1_data_pins[] = { + /* HRX1, HTX1 */ + RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), +}; +static const unsigned int hscif1_data_mux[] = { + HRX1_MARK, HTX1_MARK, +}; +static const unsigned int hscif1_clk_pins[] = { + /* HSCK1 */ + RCAR_GP_PIN(0, 18), +}; +static const unsigned int hscif1_clk_mux[] = { + HSCK1_MARK, +}; +static const unsigned int hscif1_ctrl_pins[] = { + /* HRTS1_N, HCTS1_N */ + RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16), +}; +static const unsigned int hscif1_ctrl_mux[] = { + HRTS1_N_MARK, HCTS1_N_MARK, +}; + +/* - HSCIF1_X---------------------------------------------------------------- */ +static const unsigned int hscif1_data_x_pins[] = { + /* HRX1_X, HTX1_X */ + RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), +}; +static const unsigned int hscif1_data_x_mux[] = { + HRX1_X_MARK, HTX1_X_MARK, +}; +static const unsigned int hscif1_clk_x_pins[] = { + /* HSCK1_X */ + RCAR_GP_PIN(1, 10), +}; +static const unsigned int hscif1_clk_x_mux[] = { + HSCK1_X_MARK, +}; +static const unsigned int hscif1_ctrl_x_pins[] = { + /* HRTS1_N_X, HCTS1_N_X */ + RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8), +}; +static const unsigned int hscif1_ctrl_x_mux[] = { + HRTS1_N_X_MARK, HCTS1_N_X_MARK, +}; + +/* - HSCIF2 ----------------------------------------------------------------- */ +static const unsigned int hscif2_data_pins[] = { + /* HRX2, HTX2 */ + RCAR_GP_PIN(8, 8), RCAR_GP_PIN(8, 9), +}; +static const unsigned int hscif2_data_mux[] = { + HRX2_MARK, HTX2_MARK, +}; +static const unsigned int hscif2_clk_pins[] = { + /* HSCK2 */ + RCAR_GP_PIN(8, 13), +}; +static const unsigned int hscif2_clk_mux[] = { + HSCK2_MARK, +}; +static const unsigned int hscif2_ctrl_pins[] = { + /* HRTS2_N, HCTS2_N */ + RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 12), +}; +static const unsigned int hscif2_ctrl_mux[] = { + HRTS2_N_MARK, HCTS2_N_MARK, +}; + +/* - HSCIF3 ----------------------------------------------------------------- */ +static const unsigned int hscif3_data_pins[] = { + /* HRX3, HTX3 */ + RCAR_GP_PIN(1, 24), RCAR_GP_PIN(1, 28), +}; +static const unsigned int hscif3_data_mux[] = { + HRX3_MARK, HTX3_MARK, +}; +static const unsigned int hscif3_clk_pins[] = { + /* HSCK3 */ + RCAR_GP_PIN(1, 25), +}; +static const unsigned int hscif3_clk_mux[] = { + HSCK3_MARK, +}; +static const unsigned int hscif3_ctrl_pins[] = { + /* HRTS3_N, HCTS3_N */ + RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 27), +}; +static const unsigned int hscif3_ctrl_mux[] = { + HRTS3_N_MARK, HCTS3_N_MARK, +}; + +/* - HSCIF3_A ----------------------------------------------------------------- */ +static const unsigned int hscif3_data_a_pins[] = { + /* HRX3_A, HTX3_A */ + RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 0), +}; +static const unsigned int hscif3_data_a_mux[] = { + HRX3_A_MARK, HTX3_A_MARK, +}; +static const unsigned int hscif3_clk_a_pins[] = { + /* HSCK3_A */ + RCAR_GP_PIN(1, 3), +}; +static const unsigned int hscif3_clk_a_mux[] = { + HSCK3_A_MARK, +}; +static const unsigned int hscif3_ctrl_a_pins[] = { + /* HRTS3_N_A, HCTS3_N_A */ + RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 1), +}; +static const unsigned int hscif3_ctrl_a_mux[] = { + HRTS3_N_A_MARK, HCTS3_N_A_MARK, +}; + +/* - I2C0 ------------------------------------------------------------------- */ +static const unsigned int i2c0_pins[] = { + /* SDA0, SCL0 */ + RCAR_GP_PIN(8, 1), RCAR_GP_PIN(8, 0), +}; +static const unsigned int i2c0_mux[] = { + SDA0_MARK, SCL0_MARK, +}; + +/* - I2C1 ------------------------------------------------------------------- */ +static const unsigned int i2c1_pins[] = { + /* SDA1, SCL1 */ + RCAR_GP_PIN(8, 3), RCAR_GP_PIN(8, 2), +}; +static const unsigned int i2c1_mux[] = { + SDA1_MARK, SCL1_MARK, +}; + +/* - I2C2 ------------------------------------------------------------------- */ +static const unsigned int i2c2_pins[] = { + /* SDA2, SCL2 */ + RCAR_GP_PIN(8, 5), RCAR_GP_PIN(8, 4), +}; +static const unsigned int i2c2_mux[] = { + SDA2_MARK, SCL2_MARK, +}; + +/* - I2C3 ------------------------------------------------------------------- */ +static const unsigned int i2c3_pins[] = { + /* SDA3, SCL3 */ + RCAR_GP_PIN(8, 7), RCAR_GP_PIN(8, 6), +}; +static const unsigned int i2c3_mux[] = { + SDA3_MARK, SCL3_MARK, +}; + +/* - I2C4 ------------------------------------------------------------------- */ +static const unsigned int i2c4_pins[] = { + /* SDA4, SCL4 */ + RCAR_GP_PIN(8, 9), RCAR_GP_PIN(8, 8), +}; +static const unsigned int i2c4_mux[] = { + SDA4_MARK, SCL4_MARK, +}; + +/* - I2C5 ------------------------------------------------------------------- */ +static const unsigned int i2c5_pins[] = { + /* SDA5, SCL5 */ + RCAR_GP_PIN(8, 11), RCAR_GP_PIN(8, 10), +}; +static const unsigned int i2c5_mux[] = { + SDA5_MARK, SCL5_MARK, +}; + +/* - MMC -------------------------------------------------------------------- */ +static const unsigned int mmc_data_pins[] = { + /* MMC_SD_D[0:3], MMC_D[4:7] */ + RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 0), + RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 5), + RCAR_GP_PIN(3, 7), RCAR_GP_PIN(3, 6), + RCAR_GP_PIN(3, 9), RCAR_GP_PIN(3, 8), +}; +static const unsigned int mmc_data_mux[] = { + MMC_SD_D0_MARK, MMC_SD_D1_MARK, + MMC_SD_D2_MARK, MMC_SD_D3_MARK, + MMC_D4_MARK, MMC_D5_MARK, + MMC_D6_MARK, MMC_D7_MARK, +}; +static const unsigned int mmc_ctrl_pins[] = { + /* MMC_SD_CLK, MMC_SD_CMD */ + RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 10), +}; +static const unsigned int mmc_ctrl_mux[] = { + MMC_SD_CLK_MARK, MMC_SD_CMD_MARK, +}; +static const unsigned int mmc_cd_pins[] = { + /* SD_CD */ + RCAR_GP_PIN(3, 11), +}; +static const unsigned int mmc_cd_mux[] = { + SD_CD_MARK, +}; +static const unsigned int mmc_wp_pins[] = { + /* SD_WP */ + RCAR_GP_PIN(3, 12), +}; +static const unsigned int mmc_wp_mux[] = { + SD_WP_MARK, +}; +static const unsigned int mmc_ds_pins[] = { + /* MMC_DS */ + RCAR_GP_PIN(3, 4), +}; +static const unsigned int mmc_ds_mux[] = { + MMC_DS_MARK, +}; + +/* - MSIOF0 ----------------------------------------------------------------- */ +static const unsigned int msiof0_clk_pins[] = { + /* MSIOF0_SCK */ + RCAR_GP_PIN(1, 10), +}; +static const unsigned int msiof0_clk_mux[] = { + MSIOF0_SCK_MARK, +}; +static const unsigned int msiof0_sync_pins[] = { + /* MSIOF0_SYNC */ + RCAR_GP_PIN(1, 8), +}; +static const unsigned int msiof0_sync_mux[] = { + MSIOF0_SYNC_MARK, +}; +static const unsigned int msiof0_ss1_pins[] = { + /* MSIOF0_SS1 */ + RCAR_GP_PIN(1, 7), +}; +static const unsigned int msiof0_ss1_mux[] = { + MSIOF0_SS1_MARK, +}; +static const unsigned int msiof0_ss2_pins[] = { + /* MSIOF0_SS2 */ + RCAR_GP_PIN(1, 6), +}; +static const unsigned int msiof0_ss2_mux[] = { + MSIOF0_SS2_MARK, +}; +static const unsigned int msiof0_txd_pins[] = { + /* MSIOF0_TXD */ + RCAR_GP_PIN(1, 9), +}; +static const unsigned int msiof0_txd_mux[] = { + MSIOF0_TXD_MARK, +}; +static const unsigned int msiof0_rxd_pins[] = { + /* MSIOF0_RXD */ + RCAR_GP_PIN(1, 11), +}; +static const unsigned int msiof0_rxd_mux[] = { + MSIOF0_RXD_MARK, +}; + +/* - MSIOF1 ----------------------------------------------------------------- */ +static const unsigned int msiof1_clk_pins[] = { + /* MSIOF1_SCK */ + RCAR_GP_PIN(1, 3), +}; +static const unsigned int msiof1_clk_mux[] = { + MSIOF1_SCK_MARK, +}; +static const unsigned int msiof1_sync_pins[] = { + /* MSIOF1_SYNC */ + RCAR_GP_PIN(1, 2), +}; +static const unsigned int msiof1_sync_mux[] = { + MSIOF1_SYNC_MARK, +}; +static const unsigned int msiof1_ss1_pins[] = { + /* MSIOF1_SS1 */ + RCAR_GP_PIN(1, 1), +}; +static const unsigned int msiof1_ss1_mux[] = { + MSIOF1_SS1_MARK, +}; +static const unsigned int msiof1_ss2_pins[] = { + /* MSIOF1_SS2 */ + RCAR_GP_PIN(1, 0), +}; +static const unsigned int msiof1_ss2_mux[] = { + MSIOF1_SS2_MARK, +}; +static const unsigned int msiof1_txd_pins[] = { + /* MSIOF1_TXD */ + RCAR_GP_PIN(1, 4), +}; +static const unsigned int msiof1_txd_mux[] = { + MSIOF1_TXD_MARK, +}; +static const unsigned int msiof1_rxd_pins[] = { + /* MSIOF1_RXD */ + RCAR_GP_PIN(1, 5), +}; +static const unsigned int msiof1_rxd_mux[] = { + MSIOF1_RXD_MARK, +}; + +/* - MSIOF2 ----------------------------------------------------------------- */ +static const unsigned int msiof2_clk_pins[] = { + /* MSIOF2_SCK */ + RCAR_GP_PIN(0, 17), +}; +static const unsigned int msiof2_clk_mux[] = { + MSIOF2_SCK_MARK, +}; +static const unsigned int msiof2_sync_pins[] = { + /* MSIOF2_SYNC */ + RCAR_GP_PIN(0, 15), +}; +static const unsigned int msiof2_sync_mux[] = { + MSIOF2_SYNC_MARK, +}; +static const unsigned int msiof2_ss1_pins[] = { + /* MSIOF2_SS1 */ + RCAR_GP_PIN(0, 14), +}; +static const unsigned int msiof2_ss1_mux[] = { + MSIOF2_SS1_MARK, +}; +static const unsigned int msiof2_ss2_pins[] = { + /* MSIOF2_SS2 */ + RCAR_GP_PIN(0, 13), +}; +static const unsigned int msiof2_ss2_mux[] = { + MSIOF2_SS2_MARK, +}; +static const unsigned int msiof2_txd_pins[] = { + /* MSIOF2_TXD */ + RCAR_GP_PIN(0, 16), +}; +static const unsigned int msiof2_txd_mux[] = { + MSIOF2_TXD_MARK, +}; +static const unsigned int msiof2_rxd_pins[] = { + /* MSIOF2_RXD */ + RCAR_GP_PIN(0, 18), +}; +static const unsigned int msiof2_rxd_mux[] = { + MSIOF2_RXD_MARK, +}; + +/* - MSIOF3 ----------------------------------------------------------------- */ +static const unsigned int msiof3_clk_pins[] = { + /* MSIOF3_SCK */ + RCAR_GP_PIN(0, 3), +}; +static const unsigned int msiof3_clk_mux[] = { + MSIOF3_SCK_MARK, +}; +static const unsigned int msiof3_sync_pins[] = { + /* MSIOF3_SYNC */ + RCAR_GP_PIN(0, 6), +}; +static const unsigned int msiof3_sync_mux[] = { + MSIOF3_SYNC_MARK, +}; +static const unsigned int msiof3_ss1_pins[] = { + /* MSIOF3_SS1 */ + RCAR_GP_PIN(0, 1), +}; +static const unsigned int msiof3_ss1_mux[] = { + MSIOF3_SS1_MARK, +}; +static const unsigned int msiof3_ss2_pins[] = { + /* MSIOF3_SS2 */ + RCAR_GP_PIN(0, 2), +}; +static const unsigned int msiof3_ss2_mux[] = { + MSIOF3_SS2_MARK, +}; +static const unsigned int msiof3_txd_pins[] = { + /* MSIOF3_TXD */ + RCAR_GP_PIN(0, 4), +}; +static const unsigned int msiof3_txd_mux[] = { + MSIOF3_TXD_MARK, +}; +static const unsigned int msiof3_rxd_pins[] = { + /* MSIOF3_RXD */ + RCAR_GP_PIN(0, 5), +}; +static const unsigned int msiof3_rxd_mux[] = { + MSIOF3_RXD_MARK, +}; + +/* - MSIOF4 ----------------------------------------------------------------- */ +static const unsigned int msiof4_clk_pins[] = { + /* MSIOF4_SCK */ + RCAR_GP_PIN(1, 25), +}; +static const unsigned int msiof4_clk_mux[] = { + MSIOF4_SCK_MARK, +}; +static const unsigned int msiof4_sync_pins[] = { + /* MSIOF4_SYNC */ + RCAR_GP_PIN(1, 28), +}; +static const unsigned int msiof4_sync_mux[] = { + MSIOF4_SYNC_MARK, +}; +static const unsigned int msiof4_ss1_pins[] = { + /* MSIOF4_SS1 */ + RCAR_GP_PIN(1, 23), +}; +static const unsigned int msiof4_ss1_mux[] = { + MSIOF4_SS1_MARK, +}; +static const unsigned int msiof4_ss2_pins[] = { + /* MSIOF4_SS2 */ + RCAR_GP_PIN(1, 24), +}; +static const unsigned int msiof4_ss2_mux[] = { + MSIOF4_SS2_MARK, +}; +static const unsigned int msiof4_txd_pins[] = { + /* MSIOF4_TXD */ + RCAR_GP_PIN(1, 26), +}; +static const unsigned int msiof4_txd_mux[] = { + MSIOF4_TXD_MARK, +}; +static const unsigned int msiof4_rxd_pins[] = { + /* MSIOF4_RXD */ + RCAR_GP_PIN(1, 27), +}; +static const unsigned int msiof4_rxd_mux[] = { + MSIOF4_RXD_MARK, +}; + +/* - MSIOF5 ----------------------------------------------------------------- */ +static const unsigned int msiof5_clk_pins[] = { + /* MSIOF5_SCK */ + RCAR_GP_PIN(0, 11), +}; +static const unsigned int msiof5_clk_mux[] = { + MSIOF5_SCK_MARK, +}; +static const unsigned int msiof5_sync_pins[] = { + /* MSIOF5_SYNC */ + RCAR_GP_PIN(0, 9), +}; +static const unsigned int msiof5_sync_mux[] = { + MSIOF5_SYNC_MARK, +}; +static const unsigned int msiof5_ss1_pins[] = { + /* MSIOF5_SS1 */ + RCAR_GP_PIN(0, 8), +}; +static const unsigned int msiof5_ss1_mux[] = { + MSIOF5_SS1_MARK, +}; +static const unsigned int msiof5_ss2_pins[] = { + /* MSIOF5_SS2 */ + RCAR_GP_PIN(0, 7), +}; +static const unsigned int msiof5_ss2_mux[] = { + MSIOF5_SS2_MARK, +}; +static const unsigned int msiof5_txd_pins[] = { + /* MSIOF5_TXD */ + RCAR_GP_PIN(0, 10), +}; +static const unsigned int msiof5_txd_mux[] = { + MSIOF5_TXD_MARK, +}; +static const unsigned int msiof5_rxd_pins[] = { + /* MSIOF5_RXD */ + RCAR_GP_PIN(0, 12), +}; +static const unsigned int msiof5_rxd_mux[] = { + MSIOF5_RXD_MARK, +}; + +/* - PCIE ------------------------------------------------------------------- */ +static const unsigned int pcie0_clkreq_n_pins[] = { + /* PCIE0_CLKREQ_N */ + RCAR_GP_PIN(4, 21), +}; + +static const unsigned int pcie0_clkreq_n_mux[] = { + PCIE0_CLKREQ_N_MARK, +}; + +static const unsigned int pcie1_clkreq_n_pins[] = { + /* PCIE1_CLKREQ_N */ + RCAR_GP_PIN(4, 22), +}; + +static const unsigned int pcie1_clkreq_n_mux[] = { + PCIE1_CLKREQ_N_MARK, +}; + +/* - PWM0_A ------------------------------------------------------------------- */ +static const unsigned int pwm0_a_pins[] = { + /* PWM0_A */ + RCAR_GP_PIN(1, 15), +}; +static const unsigned int pwm0_a_mux[] = { + PWM0_A_MARK, +}; + +/* - PWM1_A ------------------------------------------------------------------- */ +static const unsigned int pwm1_a_pins[] = { + /* PWM1_A */ + RCAR_GP_PIN(3, 13), +}; +static const unsigned int pwm1_a_mux[] = { + PWM1_A_MARK, +}; + +/* - PWM1_B ------------------------------------------------------------------- */ +static const unsigned int pwm1_b_pins[] = { + /* PWM1_B */ + RCAR_GP_PIN(2, 13), +}; +static const unsigned int pwm1_b_mux[] = { + PWM1_B_MARK, +}; + +/* - PWM2_B ------------------------------------------------------------------- */ +static const unsigned int pwm2_b_pins[] = { + /* PWM2_B */ + RCAR_GP_PIN(2, 14), +}; +static const unsigned int pwm2_b_mux[] = { + PWM2_B_MARK, +}; + +/* - PWM3_A ------------------------------------------------------------------- */ +static const unsigned int pwm3_a_pins[] = { + /* PWM3_A */ + RCAR_GP_PIN(1, 22), +}; +static const unsigned int pwm3_a_mux[] = { + PWM3_A_MARK, +}; + +/* - PWM3_B ------------------------------------------------------------------- */ +static const unsigned int pwm3_b_pins[] = { + /* PWM3_B */ + RCAR_GP_PIN(2, 15), +}; +static const unsigned int pwm3_b_mux[] = { + PWM3_B_MARK, +}; + +/* - PWM4 ------------------------------------------------------------------- */ +static const unsigned int pwm4_pins[] = { + /* PWM4 */ + RCAR_GP_PIN(2, 16), +}; +static const unsigned int pwm4_mux[] = { + PWM4_MARK, +}; + +/* - PWM5 ------------------------------------------------------------------- */ +static const unsigned int pwm5_pins[] = { + /* PWM5 */ + RCAR_GP_PIN(2, 17), +}; +static const unsigned int pwm5_mux[] = { + PWM5_MARK, +}; + +/* - PWM6 ------------------------------------------------------------------- */ +static const unsigned int pwm6_pins[] = { + /* PWM6 */ + RCAR_GP_PIN(2, 18), +}; +static const unsigned int pwm6_mux[] = { + PWM6_MARK, +}; + +/* - PWM7 ------------------------------------------------------------------- */ +static const unsigned int pwm7_pins[] = { + /* PWM7 */ + RCAR_GP_PIN(2, 19), +}; +static const unsigned int pwm7_mux[] = { + PWM7_MARK, +}; + +/* - PWM8_A ------------------------------------------------------------------- */ +static const unsigned int pwm8_a_pins[] = { + /* PWM8_A */ + RCAR_GP_PIN(1, 13), +}; +static const unsigned int pwm8_a_mux[] = { + PWM8_A_MARK, +}; + +/* - PWM9_A ------------------------------------------------------------------- */ +static const unsigned int pwm9_a_pins[] = { + /* PWM9_A */ + RCAR_GP_PIN(1, 14), +}; +static const unsigned int pwm9_a_mux[] = { + PWM9_A_MARK, +}; + +/* - QSPI0 ------------------------------------------------------------------ */ +static const unsigned int qspi0_ctrl_pins[] = { + /* SPCLK, SSL */ + RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 15), +}; +static const unsigned int qspi0_ctrl_mux[] = { + QSPI0_SPCLK_MARK, QSPI0_SSL_MARK, +}; +static const unsigned int qspi0_data_pins[] = { + /* MOSI_IO0, MISO_IO1, IO2, IO3 */ + RCAR_GP_PIN(3, 19), RCAR_GP_PIN(3, 18), + RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 16), +}; +static const unsigned int qspi0_data_mux[] = { + QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK, + QSPI0_IO2_MARK, QSPI0_IO3_MARK +}; + +/* - QSPI1 ------------------------------------------------------------------ */ +static const unsigned int qspi1_ctrl_pins[] = { + /* SPCLK, SSL */ + RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 25), +}; +static const unsigned int qspi1_ctrl_mux[] = { + QSPI1_SPCLK_MARK, QSPI1_SSL_MARK, +}; +static const unsigned int qspi1_data_pins[] = { + /* MOSI_IO0, MISO_IO1, IO2, IO3 */ + RCAR_GP_PIN(3, 21), RCAR_GP_PIN(3, 23), + RCAR_GP_PIN(3, 24), RCAR_GP_PIN(3, 26), +}; +static const unsigned int qspi1_data_mux[] = { + QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK, + QSPI1_IO2_MARK, QSPI1_IO3_MARK +}; + +/* - SCIF0 ------------------------------------------------------------------ */ +static const unsigned int scif0_data_pins[] = { + /* RX0, TX0 */ + RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 12), +}; +static const unsigned int scif0_data_mux[] = { + RX0_MARK, TX0_MARK, +}; +static const unsigned int scif0_clk_pins[] = { + /* SCK0 */ + RCAR_GP_PIN(1, 15), +}; +static const unsigned int scif0_clk_mux[] = { + SCK0_MARK, +}; +static const unsigned int scif0_ctrl_pins[] = { + /* RTS0_N, CTS0_N */ + RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13), +}; +static const unsigned int scif0_ctrl_mux[] = { + RTS0_N_MARK, CTS0_N_MARK, +}; + +/* - SCIF1 ------------------------------------------------------------------ */ +static const unsigned int scif1_data_pins[] = { + /* RX1, TX1 */ + RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), +}; +static const unsigned int scif1_data_mux[] = { + RX1_MARK, TX1_MARK, +}; +static const unsigned int scif1_clk_pins[] = { + /* SCK1 */ + RCAR_GP_PIN(0, 18), +}; +static const unsigned int scif1_clk_mux[] = { + SCK1_MARK, +}; +static const unsigned int scif1_ctrl_pins[] = { + /* RTS1_N, CTS1_N */ + RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16), +}; +static const unsigned int scif1_ctrl_mux[] = { + RTS1_N_MARK, CTS1_N_MARK, +}; + +/* - SCIF1_X ------------------------------------------------------------------ */ +static const unsigned int scif1_data_x_pins[] = { + /* RX1_X, TX1_X */ + RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), +}; +static const unsigned int scif1_data_x_mux[] = { + RX1_X_MARK, TX1_X_MARK, +}; +static const unsigned int scif1_clk_x_pins[] = { + /* SCK1_X */ + RCAR_GP_PIN(1, 10), +}; +static const unsigned int scif1_clk_x_mux[] = { + SCK1_X_MARK, +}; +static const unsigned int scif1_ctrl_x_pins[] = { + /* RTS1_N_X, CTS1_N_X */ + RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8), +}; +static const unsigned int scif1_ctrl_x_mux[] = { + RTS1_N_X_MARK, CTS1_N_X_MARK, +}; + +/* - SCIF3 ------------------------------------------------------------------ */ +static const unsigned int scif3_data_pins[] = { + /* RX3, TX3 */ + RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0), +}; +static const unsigned int scif3_data_mux[] = { + RX3_MARK, TX3_MARK, +}; +static const unsigned int scif3_clk_pins[] = { + /* SCK3 */ + RCAR_GP_PIN(1, 4), +}; +static const unsigned int scif3_clk_mux[] = { + SCK3_MARK, +}; +static const unsigned int scif3_ctrl_pins[] = { + /* RTS3_N, CTS3_N */ + RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3), +}; +static const unsigned int scif3_ctrl_mux[] = { + RTS3_N_MARK, CTS3_N_MARK, +}; + +/* - SCIF3_A ------------------------------------------------------------------ */ +static const unsigned int scif3_data_a_pins[] = { + /* RX3_A, TX3_A */ + RCAR_GP_PIN(1, 27), RCAR_GP_PIN(1, 28), +}; +static const unsigned int scif3_data_a_mux[] = { + RX3_A_MARK, TX3_A_MARK, +}; +static const unsigned int scif3_clk_a_pins[] = { + /* SCK3_A */ + RCAR_GP_PIN(1, 24), +}; +static const unsigned int scif3_clk_a_mux[] = { + SCK3_A_MARK, +}; +static const unsigned int scif3_ctrl_a_pins[] = { + /* RTS3_N_A, CTS3_N_A */ + RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25), +}; +static const unsigned int scif3_ctrl_a_mux[] = { + RTS3_N_A_MARK, CTS3_N_A_MARK, +}; + +/* - SCIF4 ------------------------------------------------------------------ */ +static const unsigned int scif4_data_pins[] = { + /* RX4, TX4 */ + RCAR_GP_PIN(8, 13), RCAR_GP_PIN(8, 12), +}; +static const unsigned int scif4_data_mux[] = { + RX4_MARK, TX4_MARK, +}; +static const unsigned int scif4_clk_pins[] = { + /* SCK4 */ + RCAR_GP_PIN(8, 8), +}; +static const unsigned int scif4_clk_mux[] = { + SCK4_MARK, +}; +static const unsigned int scif4_ctrl_pins[] = { + /* RTS4_N, CTS4_N */ + RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 9), +}; +static const unsigned int scif4_ctrl_mux[] = { + RTS4_N_MARK, CTS4_N_MARK, +}; + +/* - SCIF Clock ------------------------------------------------------------- */ +static const unsigned int scif_clk_pins[] = { + /* SCIF_CLK */ + RCAR_GP_PIN(1, 17), +}; +static const unsigned int scif_clk_mux[] = { + SCIF_CLK_MARK, +}; + +/* - TPU ------------------------------------------------------------------- */ +static const unsigned int tpu_to0_pins[] = { + /* TPU0TO0 */ + RCAR_GP_PIN(2, 8), +}; +static const unsigned int tpu_to0_mux[] = { + TPU0TO0_MARK, +}; +static const unsigned int tpu_to1_pins[] = { + /* TPU0TO1 */ + RCAR_GP_PIN(2, 7), +}; +static const unsigned int tpu_to1_mux[] = { + TPU0TO1_MARK, +}; +static const unsigned int tpu_to2_pins[] = { + /* TPU0TO2 */ + RCAR_GP_PIN(2, 12), +}; +static const unsigned int tpu_to2_mux[] = { + TPU0TO2_MARK, +}; +static const unsigned int tpu_to3_pins[] = { + /* TPU0TO3 */ + RCAR_GP_PIN(2, 13), +}; +static const unsigned int tpu_to3_mux[] = { + TPU0TO3_MARK, +}; + +/* - TPU_A ------------------------------------------------------------------- */ +static const unsigned int tpu_to0_a_pins[] = { + /* TPU0TO0_A */ + RCAR_GP_PIN(1, 25), +}; +static const unsigned int tpu_to0_a_mux[] = { + TPU0TO0_A_MARK, +}; +static const unsigned int tpu_to1_a_pins[] = { + /* TPU0TO1_A */ + RCAR_GP_PIN(1, 26), +}; +static const unsigned int tpu_to1_a_mux[] = { + TPU0TO1_A_MARK, +}; +static const unsigned int tpu_to2_a_pins[] = { + /* TPU0TO2_A */ + RCAR_GP_PIN(2, 0), +}; +static const unsigned int tpu_to2_a_mux[] = { + TPU0TO2_A_MARK, +}; +static const unsigned int tpu_to3_a_pins[] = { + /* TPU0TO3_A */ + RCAR_GP_PIN(2, 1), +}; +static const unsigned int tpu_to3_a_mux[] = { + TPU0TO3_A_MARK, +}; + +/* - TSN0 ------------------------------------------------ */ +static const unsigned int tsn0_link_pins[] = { + /* TSN0_LINK */ + RCAR_GP_PIN(4, 4), +}; +static const unsigned int tsn0_link_mux[] = { + TSN0_LINK_MARK, +}; +static const unsigned int tsn0_phy_int_pins[] = { + /* TSN0_PHY_INT */ + RCAR_GP_PIN(4, 3), +}; +static const unsigned int tsn0_phy_int_mux[] = { + TSN0_PHY_INT_MARK, +}; +static const unsigned int tsn0_mdio_pins[] = { + /* TSN0_MDC, TSN0_MDIO */ + RCAR_GP_PIN(4, 1), RCAR_GP_PIN(4, 0), +}; +static const unsigned int tsn0_mdio_mux[] = { + TSN0_MDC_MARK, TSN0_MDIO_MARK, +}; +static const unsigned int tsn0_rgmii_pins[] = { + /* + * TSN0_TX_CTL, TSN0_TXC, TSN0_TD0, TSN0_TD1, TSN0_TD2, TSN0_TD3, + * TSN0_RX_CTL, TSN0_RXC, TSN0_RD0, TSN0_RD1, TSN0_RD2, TSN0_RD3, + */ + RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 12), + RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 14), + RCAR_GP_PIN(4, 19), RCAR_GP_PIN(4, 18), + RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 11), + RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 13), + RCAR_GP_PIN(4, 17), RCAR_GP_PIN(4, 16), +}; +static const unsigned int tsn0_rgmii_mux[] = { + TSN0_TX_CTL_MARK, TSN0_TXC_MARK, + TSN0_TD0_MARK, TSN0_TD1_MARK, + TSN0_TD2_MARK, TSN0_TD3_MARK, + TSN0_RX_CTL_MARK, TSN0_RXC_MARK, + TSN0_RD0_MARK, TSN0_RD1_MARK, + TSN0_RD2_MARK, TSN0_RD3_MARK, +}; +static const unsigned int tsn0_txcrefclk_pins[] = { + /* TSN0_TXCREFCLK */ + RCAR_GP_PIN(4, 20), +}; +static const unsigned int tsn0_txcrefclk_mux[] = { + TSN0_TXCREFCLK_MARK, +}; +static const unsigned int tsn0_avtp_pps_pins[] = { + /* TSN0_AVTP_PPS0, TSN0_AVTP_PPS1 */ + RCAR_GP_PIN(4, 8), RCAR_GP_PIN(4, 2), +}; +static const unsigned int tsn0_avtp_pps_mux[] = { + TSN0_AVTP_PPS0_MARK, TSN0_AVTP_PPS1_MARK, +}; +static const unsigned int tsn0_avtp_capture_pins[] = { + /* TSN0_AVTP_CAPTURE */ + RCAR_GP_PIN(4, 6), +}; +static const unsigned int tsn0_avtp_capture_mux[] = { + TSN0_AVTP_CAPTURE_MARK, +}; +static const unsigned int tsn0_avtp_match_pins[] = { + /* TSN0_AVTP_MATCH */ + RCAR_GP_PIN(4, 5), +}; +static const unsigned int tsn0_avtp_match_mux[] = { + TSN0_AVTP_MATCH_MARK, +}; + +static const struct sh_pfc_pin_group pinmux_groups[] = { + SH_PFC_PIN_GROUP(avb0_link), + SH_PFC_PIN_GROUP(avb0_magic), + SH_PFC_PIN_GROUP(avb0_phy_int), + SH_PFC_PIN_GROUP(avb0_mdio), + SH_PFC_PIN_GROUP(avb0_rgmii), + SH_PFC_PIN_GROUP(avb0_txcrefclk), + SH_PFC_PIN_GROUP(avb0_avtp_pps), + SH_PFC_PIN_GROUP(avb0_avtp_capture), + SH_PFC_PIN_GROUP(avb0_avtp_match), + + SH_PFC_PIN_GROUP(avb1_link), + SH_PFC_PIN_GROUP(avb1_magic), + SH_PFC_PIN_GROUP(avb1_phy_int), + SH_PFC_PIN_GROUP(avb1_mdio), + SH_PFC_PIN_GROUP(avb1_rgmii), + SH_PFC_PIN_GROUP(avb1_txcrefclk), + SH_PFC_PIN_GROUP(avb1_avtp_pps), + SH_PFC_PIN_GROUP(avb1_avtp_capture), + SH_PFC_PIN_GROUP(avb1_avtp_match), + + SH_PFC_PIN_GROUP(avb2_link), + SH_PFC_PIN_GROUP(avb2_magic), + SH_PFC_PIN_GROUP(avb2_phy_int), + SH_PFC_PIN_GROUP(avb2_mdio), + SH_PFC_PIN_GROUP(avb2_rgmii), + SH_PFC_PIN_GROUP(avb2_txcrefclk), + SH_PFC_PIN_GROUP(avb2_avtp_pps), + SH_PFC_PIN_GROUP(avb2_avtp_capture), + SH_PFC_PIN_GROUP(avb2_avtp_match), + + SH_PFC_PIN_GROUP(canfd0_data), + SH_PFC_PIN_GROUP(canfd1_data), + SH_PFC_PIN_GROUP(canfd2_data), + SH_PFC_PIN_GROUP(canfd3_data), + SH_PFC_PIN_GROUP(canfd4_data), + SH_PFC_PIN_GROUP(canfd5_data), /* suffix might be updated */ + SH_PFC_PIN_GROUP(canfd5_data_b), /* suffix might be updated */ + SH_PFC_PIN_GROUP(canfd6_data), + SH_PFC_PIN_GROUP(canfd7_data), + SH_PFC_PIN_GROUP(can_clk), + + SH_PFC_PIN_GROUP(hscif0_data), + SH_PFC_PIN_GROUP(hscif0_clk), + SH_PFC_PIN_GROUP(hscif0_ctrl), + SH_PFC_PIN_GROUP(hscif1_data), /* suffix might be updated */ + SH_PFC_PIN_GROUP(hscif1_clk), /* suffix might be updated */ + SH_PFC_PIN_GROUP(hscif1_ctrl), /* suffix might be updated */ + SH_PFC_PIN_GROUP(hscif1_data_x), /* suffix might be updated */ + SH_PFC_PIN_GROUP(hscif1_clk_x), /* suffix might be updated */ + SH_PFC_PIN_GROUP(hscif1_ctrl_x), /* suffix might be updated */ + SH_PFC_PIN_GROUP(hscif2_data), + SH_PFC_PIN_GROUP(hscif2_clk), + SH_PFC_PIN_GROUP(hscif2_ctrl), + SH_PFC_PIN_GROUP(hscif3_data), /* suffix might be updated */ + SH_PFC_PIN_GROUP(hscif3_clk), /* suffix might be updated */ + SH_PFC_PIN_GROUP(hscif3_ctrl), /* suffix might be updated */ + SH_PFC_PIN_GROUP(hscif3_data_a), /* suffix might be updated */ + SH_PFC_PIN_GROUP(hscif3_clk_a), /* suffix might be updated */ + SH_PFC_PIN_GROUP(hscif3_ctrl_a), /* suffix might be updated */ + + SH_PFC_PIN_GROUP(i2c0), + SH_PFC_PIN_GROUP(i2c1), + SH_PFC_PIN_GROUP(i2c2), + SH_PFC_PIN_GROUP(i2c3), + SH_PFC_PIN_GROUP(i2c4), + SH_PFC_PIN_GROUP(i2c5), + + BUS_DATA_PIN_GROUP(mmc_data, 1), + BUS_DATA_PIN_GROUP(mmc_data, 4), + BUS_DATA_PIN_GROUP(mmc_data, 8), + SH_PFC_PIN_GROUP(mmc_ctrl), + SH_PFC_PIN_GROUP(mmc_cd), + SH_PFC_PIN_GROUP(mmc_wp), + SH_PFC_PIN_GROUP(mmc_ds), + + SH_PFC_PIN_GROUP(msiof0_clk), + SH_PFC_PIN_GROUP(msiof0_sync), + SH_PFC_PIN_GROUP(msiof0_ss1), + SH_PFC_PIN_GROUP(msiof0_ss2), + SH_PFC_PIN_GROUP(msiof0_txd), + SH_PFC_PIN_GROUP(msiof0_rxd), + + SH_PFC_PIN_GROUP(msiof1_clk), + SH_PFC_PIN_GROUP(msiof1_sync), + SH_PFC_PIN_GROUP(msiof1_ss1), + SH_PFC_PIN_GROUP(msiof1_ss2), + SH_PFC_PIN_GROUP(msiof1_txd), + SH_PFC_PIN_GROUP(msiof1_rxd), + + SH_PFC_PIN_GROUP(msiof2_clk), + SH_PFC_PIN_GROUP(msiof2_sync), + SH_PFC_PIN_GROUP(msiof2_ss1), + SH_PFC_PIN_GROUP(msiof2_ss2), + SH_PFC_PIN_GROUP(msiof2_txd), + SH_PFC_PIN_GROUP(msiof2_rxd), + + SH_PFC_PIN_GROUP(msiof3_clk), + SH_PFC_PIN_GROUP(msiof3_sync), + SH_PFC_PIN_GROUP(msiof3_ss1), + SH_PFC_PIN_GROUP(msiof3_ss2), + SH_PFC_PIN_GROUP(msiof3_txd), + SH_PFC_PIN_GROUP(msiof3_rxd), + + SH_PFC_PIN_GROUP(msiof4_clk), + SH_PFC_PIN_GROUP(msiof4_sync), + SH_PFC_PIN_GROUP(msiof4_ss1), + SH_PFC_PIN_GROUP(msiof4_ss2), + SH_PFC_PIN_GROUP(msiof4_txd), + SH_PFC_PIN_GROUP(msiof4_rxd), + + SH_PFC_PIN_GROUP(msiof5_clk), + SH_PFC_PIN_GROUP(msiof5_sync), + SH_PFC_PIN_GROUP(msiof5_ss1), + SH_PFC_PIN_GROUP(msiof5_ss2), + SH_PFC_PIN_GROUP(msiof5_txd), + SH_PFC_PIN_GROUP(msiof5_rxd), + + SH_PFC_PIN_GROUP(pcie0_clkreq_n), + SH_PFC_PIN_GROUP(pcie1_clkreq_n), + + SH_PFC_PIN_GROUP(pwm0_a), /* suffix might be updated */ + SH_PFC_PIN_GROUP(pwm1_a), + SH_PFC_PIN_GROUP(pwm1_b), + SH_PFC_PIN_GROUP(pwm2_b), /* suffix might be updated */ + SH_PFC_PIN_GROUP(pwm3_a), + SH_PFC_PIN_GROUP(pwm3_b), + SH_PFC_PIN_GROUP(pwm4), + SH_PFC_PIN_GROUP(pwm5), + SH_PFC_PIN_GROUP(pwm6), + SH_PFC_PIN_GROUP(pwm7), + SH_PFC_PIN_GROUP(pwm8_a), /* suffix might be updated */ + SH_PFC_PIN_GROUP(pwm9_a), /* suffix might be updated */ + + SH_PFC_PIN_GROUP(qspi0_ctrl), + BUS_DATA_PIN_GROUP(qspi0_data, 2), + BUS_DATA_PIN_GROUP(qspi0_data, 4), + SH_PFC_PIN_GROUP(qspi1_ctrl), + BUS_DATA_PIN_GROUP(qspi1_data, 2), + BUS_DATA_PIN_GROUP(qspi1_data, 4), + + SH_PFC_PIN_GROUP(scif0_data), + SH_PFC_PIN_GROUP(scif0_clk), + SH_PFC_PIN_GROUP(scif0_ctrl), + SH_PFC_PIN_GROUP(scif1_data), /* suffix might be updated */ + SH_PFC_PIN_GROUP(scif1_clk), /* suffix might be updated */ + SH_PFC_PIN_GROUP(scif1_ctrl), /* suffix might be updated */ + SH_PFC_PIN_GROUP(scif1_data_x), /* suffix might be updated */ + SH_PFC_PIN_GROUP(scif1_clk_x), /* suffix might be updated */ + SH_PFC_PIN_GROUP(scif1_ctrl_x), /* suffix might be updated */ + SH_PFC_PIN_GROUP(scif3_data), /* suffix might be updated */ + SH_PFC_PIN_GROUP(scif3_clk), /* suffix might be updated */ + SH_PFC_PIN_GROUP(scif3_ctrl), /* suffix might be updated */ + SH_PFC_PIN_GROUP(scif3_data_a), /* suffix might be updated */ + SH_PFC_PIN_GROUP(scif3_clk_a), /* suffix might be updated */ + SH_PFC_PIN_GROUP(scif3_ctrl_a), /* suffix might be updated */ + SH_PFC_PIN_GROUP(scif4_data), + SH_PFC_PIN_GROUP(scif4_clk), + SH_PFC_PIN_GROUP(scif4_ctrl), + SH_PFC_PIN_GROUP(scif_clk), + + SH_PFC_PIN_GROUP(tpu_to0), /* suffix might be updated */ + SH_PFC_PIN_GROUP(tpu_to0_a), /* suffix might be updated */ + SH_PFC_PIN_GROUP(tpu_to1), /* suffix might be updated */ + SH_PFC_PIN_GROUP(tpu_to1_a), /* suffix might be updated */ + SH_PFC_PIN_GROUP(tpu_to2), /* suffix might be updated */ + SH_PFC_PIN_GROUP(tpu_to2_a), /* suffix might be updated */ + SH_PFC_PIN_GROUP(tpu_to3), /* suffix might be updated */ + SH_PFC_PIN_GROUP(tpu_to3_a), /* suffix might be updated */ + + SH_PFC_PIN_GROUP(tsn0_link), + SH_PFC_PIN_GROUP(tsn0_phy_int), + SH_PFC_PIN_GROUP(tsn0_mdio), + SH_PFC_PIN_GROUP(tsn0_rgmii), + SH_PFC_PIN_GROUP(tsn0_txcrefclk), + SH_PFC_PIN_GROUP(tsn0_avtp_pps), + SH_PFC_PIN_GROUP(tsn0_avtp_capture), + SH_PFC_PIN_GROUP(tsn0_avtp_match), +}; + +static const char * const avb0_groups[] = { + "avb0_link", + "avb0_magic", + "avb0_phy_int", + "avb0_mdio", + "avb0_rgmii", + "avb0_txcrefclk", + "avb0_avtp_pps", + "avb0_avtp_capture", + "avb0_avtp_match", +}; + +static const char * const avb1_groups[] = { + "avb1_link", + "avb1_magic", + "avb1_phy_int", + "avb1_mdio", + "avb1_rgmii", + "avb1_txcrefclk", + "avb1_avtp_pps", + "avb1_avtp_capture", + "avb1_avtp_match", +}; + +static const char * const avb2_groups[] = { + "avb2_link", + "avb2_magic", + "avb2_phy_int", + "avb2_mdio", + "avb2_rgmii", + "avb2_txcrefclk", + "avb2_avtp_pps", + "avb2_avtp_capture", + "avb2_avtp_match", +}; + +static const char * const canfd0_groups[] = { + "canfd0_data", +}; + +static const char * const canfd1_groups[] = { + "canfd1_data", +}; + +static const char * const canfd2_groups[] = { + "canfd2_data", +}; + +static const char * const canfd3_groups[] = { + "canfd3_data", +}; + +static const char * const canfd4_groups[] = { + "canfd4_data", +}; + +static const char * const canfd5_groups[] = { + /* suffix might be updated */ + "canfd5_data", + "canfd5_data_b", +}; + +static const char * const canfd6_groups[] = { + "canfd6_data", +}; + +static const char * const canfd7_groups[] = { + "canfd7_data", +}; + +static const char * const can_clk_groups[] = { + "can_clk", +}; + +static const char * const hscif0_groups[] = { + "hscif0_data", + "hscif0_clk", + "hscif0_ctrl", +}; + +static const char * const hscif1_groups[] = { + /* suffix might be updated */ + "hscif1_data", + "hscif1_clk", + "hscif1_ctrl", + "hscif1_data_x", + "hscif1_clk_x", + "hscif1_ctrl_x", +}; + +static const char * const hscif2_groups[] = { + "hscif2_data", + "hscif2_clk", + "hscif2_ctrl", +}; + +static const char * const hscif3_groups[] = { + /* suffix might be updated */ + "hscif3_data", + "hscif3_clk", + "hscif3_ctrl", + "hscif3_data_a", + "hscif3_clk_a", + "hscif3_ctrl_a", +}; + +static const char * const i2c0_groups[] = { + "i2c0", +}; + +static const char * const i2c1_groups[] = { + "i2c1", +}; + +static const char * const i2c2_groups[] = { + "i2c2", +}; + +static const char * const i2c3_groups[] = { + "i2c3", +}; + +static const char * const i2c4_groups[] = { + "i2c4", +}; + +static const char * const i2c5_groups[] = { + "i2c5", +}; + +static const char * const mmc_groups[] = { + "mmc_data1", + "mmc_data4", + "mmc_data8", + "mmc_ctrl", + "mmc_cd", + "mmc_wp", + "mmc_ds", +}; + +static const char * const msiof0_groups[] = { + "msiof0_clk", + "msiof0_sync", + "msiof0_ss1", + "msiof0_ss2", + "msiof0_txd", + "msiof0_rxd", +}; + +static const char * const msiof1_groups[] = { + "msiof1_clk", + "msiof1_sync", + "msiof1_ss1", + "msiof1_ss2", + "msiof1_txd", + "msiof1_rxd", +}; + +static const char * const msiof2_groups[] = { + "msiof2_clk", + "msiof2_sync", + "msiof2_ss1", + "msiof2_ss2", + "msiof2_txd", + "msiof2_rxd", +}; + +static const char * const msiof3_groups[] = { + "msiof3_clk", + "msiof3_sync", + "msiof3_ss1", + "msiof3_ss2", + "msiof3_txd", + "msiof3_rxd", +}; + +static const char * const msiof4_groups[] = { + "msiof4_clk", + "msiof4_sync", + "msiof4_ss1", + "msiof4_ss2", + "msiof4_txd", + "msiof4_rxd", +}; + +static const char * const msiof5_groups[] = { + "msiof5_clk", + "msiof5_sync", + "msiof5_ss1", + "msiof5_ss2", + "msiof5_txd", + "msiof5_rxd", +}; + +static const char * const pcie_groups[] = { + "pcie0_clkreq_n", + "pcie1_clkreq_n", +}; + +static const char * const pwm0_groups[] = { + /* suffix might be updated */ + "pwm0_a", +}; + +static const char * const pwm1_groups[] = { + "pwm1_a", + "pwm1_b", +}; + +static const char * const pwm2_groups[] = { + /* suffix might be updated */ + "pwm2_b", +}; + +static const char * const pwm3_groups[] = { + "pwm3_a", + "pwm3_b", +}; + +static const char * const pwm4_groups[] = { + "pwm4", +}; + +static const char * const pwm5_groups[] = { + "pwm5", +}; + +static const char * const pwm6_groups[] = { + "pwm6", +}; + +static const char * const pwm7_groups[] = { + "pwm7", +}; + +static const char * const pwm8_groups[] = { + /* suffix might be updated */ + "pwm8_a", +}; + +static const char * const pwm9_groups[] = { + /* suffix might be updated */ + "pwm9_a", +}; + +static const char * const qspi0_groups[] = { + "qspi0_ctrl", + "qspi0_data2", + "qspi0_data4", +}; + +static const char * const qspi1_groups[] = { + "qspi1_ctrl", + "qspi1_data2", + "qspi1_data4", +}; + +static const char * const scif0_groups[] = { + "scif0_data", + "scif0_clk", + "scif0_ctrl", +}; + +static const char * const scif1_groups[] = { + /* suffix might be updated */ + "scif1_data", + "scif1_clk", + "scif1_ctrl", + "scif1_data_x", + "scif1_clk_x", + "scif1_ctrl_x", +}; + +static const char * const scif3_groups[] = { + /* suffix might be updated */ + "scif3_data", + "scif3_clk", + "scif3_ctrl", + "scif3_data_a", + "scif3_clk_a", + "scif3_ctrl_a", +}; + +static const char * const scif4_groups[] = { + "scif4_data", + "scif4_clk", + "scif4_ctrl", +}; + +static const char * const scif_clk_groups[] = { + "scif_clk", +}; + +static const char * const tpu_groups[] = { + /* suffix might be updated */ + "tpu_to0", + "tpu_to0_a", + "tpu_to1", + "tpu_to1_a", + "tpu_to2", + "tpu_to2_a", + "tpu_to3", + "tpu_to3_a", +}; + +static const char * const tsn0_groups[] = { + "tsn0_link", + "tsn0_phy_int", + "tsn0_mdio", + "tsn0_rgmii", + "tsn0_txcrefclk", + "tsn0_avtp_pps", + "tsn0_avtp_capture", + "tsn0_avtp_match", +}; + +static const struct sh_pfc_function pinmux_functions[] = { + SH_PFC_FUNCTION(avb0), + SH_PFC_FUNCTION(avb1), + SH_PFC_FUNCTION(avb2), + + SH_PFC_FUNCTION(canfd0), + SH_PFC_FUNCTION(canfd1), + SH_PFC_FUNCTION(canfd2), + SH_PFC_FUNCTION(canfd3), + SH_PFC_FUNCTION(canfd4), + SH_PFC_FUNCTION(canfd5), + SH_PFC_FUNCTION(canfd6), + SH_PFC_FUNCTION(canfd7), + SH_PFC_FUNCTION(can_clk), + + SH_PFC_FUNCTION(hscif0), + SH_PFC_FUNCTION(hscif1), + SH_PFC_FUNCTION(hscif2), + SH_PFC_FUNCTION(hscif3), + + SH_PFC_FUNCTION(i2c0), + SH_PFC_FUNCTION(i2c1), + SH_PFC_FUNCTION(i2c2), + SH_PFC_FUNCTION(i2c3), + SH_PFC_FUNCTION(i2c4), + SH_PFC_FUNCTION(i2c5), + + SH_PFC_FUNCTION(mmc), + + SH_PFC_FUNCTION(msiof0), + SH_PFC_FUNCTION(msiof1), + SH_PFC_FUNCTION(msiof2), + SH_PFC_FUNCTION(msiof3), + SH_PFC_FUNCTION(msiof4), + SH_PFC_FUNCTION(msiof5), + + SH_PFC_FUNCTION(pcie), + + SH_PFC_FUNCTION(pwm0), + SH_PFC_FUNCTION(pwm1), + SH_PFC_FUNCTION(pwm2), + SH_PFC_FUNCTION(pwm3), + SH_PFC_FUNCTION(pwm4), + SH_PFC_FUNCTION(pwm5), + SH_PFC_FUNCTION(pwm6), + SH_PFC_FUNCTION(pwm7), + SH_PFC_FUNCTION(pwm8), + SH_PFC_FUNCTION(pwm9), + + SH_PFC_FUNCTION(qspi0), + SH_PFC_FUNCTION(qspi1), + + SH_PFC_FUNCTION(scif0), + SH_PFC_FUNCTION(scif1), + SH_PFC_FUNCTION(scif3), + SH_PFC_FUNCTION(scif4), + SH_PFC_FUNCTION(scif_clk), + + SH_PFC_FUNCTION(tpu), + + SH_PFC_FUNCTION(tsn0), +}; + +static const struct pinmux_cfg_reg pinmux_config_regs[] = { +#define F_(x, y) FN_##y +#define FM(x) FN_##x + { PINMUX_CFG_REG_VAR("GPSR0", 0xE6050040, 32, + GROUP(-13, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP0_31_19 RESERVED */ + GP_0_18_FN, GPSR0_18, + GP_0_17_FN, GPSR0_17, + GP_0_16_FN, GPSR0_16, + GP_0_15_FN, GPSR0_15, + GP_0_14_FN, GPSR0_14, + GP_0_13_FN, GPSR0_13, + GP_0_12_FN, GPSR0_12, + GP_0_11_FN, GPSR0_11, + GP_0_10_FN, GPSR0_10, + GP_0_9_FN, GPSR0_9, + GP_0_8_FN, GPSR0_8, + GP_0_7_FN, GPSR0_7, + GP_0_6_FN, GPSR0_6, + GP_0_5_FN, GPSR0_5, + GP_0_4_FN, GPSR0_4, + GP_0_3_FN, GPSR0_3, + GP_0_2_FN, GPSR0_2, + GP_0_1_FN, GPSR0_1, + GP_0_0_FN, GPSR0_0, )) + }, + { PINMUX_CFG_REG("GPSR1", 0xE6050840, 32, 1, GROUP( + 0, 0, + 0, 0, + 0, 0, + GP_1_28_FN, GPSR1_28, + GP_1_27_FN, GPSR1_27, + GP_1_26_FN, GPSR1_26, + GP_1_25_FN, GPSR1_25, + GP_1_24_FN, GPSR1_24, + GP_1_23_FN, GPSR1_23, + GP_1_22_FN, GPSR1_22, + GP_1_21_FN, GPSR1_21, + GP_1_20_FN, GPSR1_20, + GP_1_19_FN, GPSR1_19, + GP_1_18_FN, GPSR1_18, + GP_1_17_FN, GPSR1_17, + GP_1_16_FN, GPSR1_16, + GP_1_15_FN, GPSR1_15, + GP_1_14_FN, GPSR1_14, + GP_1_13_FN, GPSR1_13, + GP_1_12_FN, GPSR1_12, + GP_1_11_FN, GPSR1_11, + GP_1_10_FN, GPSR1_10, + GP_1_9_FN, GPSR1_9, + GP_1_8_FN, GPSR1_8, + GP_1_7_FN, GPSR1_7, + GP_1_6_FN, GPSR1_6, + GP_1_5_FN, GPSR1_5, + GP_1_4_FN, GPSR1_4, + GP_1_3_FN, GPSR1_3, + GP_1_2_FN, GPSR1_2, + GP_1_1_FN, GPSR1_1, + GP_1_0_FN, GPSR1_0, )) + }, + { PINMUX_CFG_REG_VAR("GPSR2", 0xE6058040, 32, + GROUP(-12, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP2_31_20 RESERVED */ + GP_2_19_FN, GPSR2_19, + GP_2_18_FN, GPSR2_18, + GP_2_17_FN, GPSR2_17, + GP_2_16_FN, GPSR2_16, + GP_2_15_FN, GPSR2_15, + GP_2_14_FN, GPSR2_14, + GP_2_13_FN, GPSR2_13, + GP_2_12_FN, GPSR2_12, + GP_2_11_FN, GPSR2_11, + GP_2_10_FN, GPSR2_10, + GP_2_9_FN, GPSR2_9, + GP_2_8_FN, GPSR2_8, + GP_2_7_FN, GPSR2_7, + GP_2_6_FN, GPSR2_6, + GP_2_5_FN, GPSR2_5, + GP_2_4_FN, GPSR2_4, + GP_2_3_FN, GPSR2_3, + GP_2_2_FN, GPSR2_2, + GP_2_1_FN, GPSR2_1, + GP_2_0_FN, GPSR2_0, )) + }, + { PINMUX_CFG_REG("GPSR3", 0xE6058840, 32, 1, GROUP( + 0, 0, + 0, 0, + GP_3_29_FN, GPSR3_29, + GP_3_28_FN, GPSR3_28, + GP_3_27_FN, GPSR3_27, + GP_3_26_FN, GPSR3_26, + GP_3_25_FN, GPSR3_25, + GP_3_24_FN, GPSR3_24, + GP_3_23_FN, GPSR3_23, + GP_3_22_FN, GPSR3_22, + GP_3_21_FN, GPSR3_21, + GP_3_20_FN, GPSR3_20, + GP_3_19_FN, GPSR3_19, + GP_3_18_FN, GPSR3_18, + GP_3_17_FN, GPSR3_17, + GP_3_16_FN, GPSR3_16, + GP_3_15_FN, GPSR3_15, + GP_3_14_FN, GPSR3_14, + GP_3_13_FN, GPSR3_13, + GP_3_12_FN, GPSR3_12, + GP_3_11_FN, GPSR3_11, + GP_3_10_FN, GPSR3_10, + GP_3_9_FN, GPSR3_9, + GP_3_8_FN, GPSR3_8, + GP_3_7_FN, GPSR3_7, + GP_3_6_FN, GPSR3_6, + GP_3_5_FN, GPSR3_5, + GP_3_4_FN, GPSR3_4, + GP_3_3_FN, GPSR3_3, + GP_3_2_FN, GPSR3_2, + GP_3_1_FN, GPSR3_1, + GP_3_0_FN, GPSR3_0, )) + }, + { PINMUX_CFG_REG("GPSR4", 0xE6060040, 32, 1, GROUP( + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + GP_4_24_FN, GPSR4_24, + GP_4_23_FN, GPSR4_23, + GP_4_22_FN, GPSR4_22, + GP_4_21_FN, GPSR4_21, + GP_4_20_FN, GPSR4_20, + GP_4_19_FN, GPSR4_19, + GP_4_18_FN, GPSR4_18, + GP_4_17_FN, GPSR4_17, + GP_4_16_FN, GPSR4_16, + GP_4_15_FN, GPSR4_15, + GP_4_14_FN, GPSR4_14, + GP_4_13_FN, GPSR4_13, + GP_4_12_FN, GPSR4_12, + GP_4_11_FN, GPSR4_11, + GP_4_10_FN, GPSR4_10, + GP_4_9_FN, GPSR4_9, + GP_4_8_FN, GPSR4_8, + GP_4_7_FN, GPSR4_7, + GP_4_6_FN, GPSR4_6, + GP_4_5_FN, GPSR4_5, + GP_4_4_FN, GPSR4_4, + GP_4_3_FN, GPSR4_3, + GP_4_2_FN, GPSR4_2, + GP_4_1_FN, GPSR4_1, + GP_4_0_FN, GPSR4_0, )) + }, + { PINMUX_CFG_REG_VAR("GPSR5", 0xE6060840, 32, + GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP5_31_21 RESERVED */ + GP_5_20_FN, GPSR5_20, + GP_5_19_FN, GPSR5_19, + GP_5_18_FN, GPSR5_18, + GP_5_17_FN, GPSR5_17, + GP_5_16_FN, GPSR5_16, + GP_5_15_FN, GPSR5_15, + GP_5_14_FN, GPSR5_14, + GP_5_13_FN, GPSR5_13, + GP_5_12_FN, GPSR5_12, + GP_5_11_FN, GPSR5_11, + GP_5_10_FN, GPSR5_10, + GP_5_9_FN, GPSR5_9, + GP_5_8_FN, GPSR5_8, + GP_5_7_FN, GPSR5_7, + GP_5_6_FN, GPSR5_6, + GP_5_5_FN, GPSR5_5, + GP_5_4_FN, GPSR5_4, + GP_5_3_FN, GPSR5_3, + GP_5_2_FN, GPSR5_2, + GP_5_1_FN, GPSR5_1, + GP_5_0_FN, GPSR5_0, )) + }, + { PINMUX_CFG_REG_VAR("GPSR6", 0xE6061040, 32, + GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP6_31_21 RESERVED */ + GP_6_20_FN, GPSR6_20, + GP_6_19_FN, GPSR6_19, + GP_6_18_FN, GPSR6_18, + GP_6_17_FN, GPSR6_17, + GP_6_16_FN, GPSR6_16, + GP_6_15_FN, GPSR6_15, + GP_6_14_FN, GPSR6_14, + GP_6_13_FN, GPSR6_13, + GP_6_12_FN, GPSR6_12, + GP_6_11_FN, GPSR6_11, + GP_6_10_FN, GPSR6_10, + GP_6_9_FN, GPSR6_9, + GP_6_8_FN, GPSR6_8, + GP_6_7_FN, GPSR6_7, + GP_6_6_FN, GPSR6_6, + GP_6_5_FN, GPSR6_5, + GP_6_4_FN, GPSR6_4, + GP_6_3_FN, GPSR6_3, + GP_6_2_FN, GPSR6_2, + GP_6_1_FN, GPSR6_1, + GP_6_0_FN, GPSR6_0, )) + }, + { PINMUX_CFG_REG_VAR("GPSR7", 0xE6061840, 32, + GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP7_31_21 RESERVED */ + GP_7_20_FN, GPSR7_20, + GP_7_19_FN, GPSR7_19, + GP_7_18_FN, GPSR7_18, + GP_7_17_FN, GPSR7_17, + GP_7_16_FN, GPSR7_16, + GP_7_15_FN, GPSR7_15, + GP_7_14_FN, GPSR7_14, + GP_7_13_FN, GPSR7_13, + GP_7_12_FN, GPSR7_12, + GP_7_11_FN, GPSR7_11, + GP_7_10_FN, GPSR7_10, + GP_7_9_FN, GPSR7_9, + GP_7_8_FN, GPSR7_8, + GP_7_7_FN, GPSR7_7, + GP_7_6_FN, GPSR7_6, + GP_7_5_FN, GPSR7_5, + GP_7_4_FN, GPSR7_4, + GP_7_3_FN, GPSR7_3, + GP_7_2_FN, GPSR7_2, + GP_7_1_FN, GPSR7_1, + GP_7_0_FN, GPSR7_0, )) + }, + { PINMUX_CFG_REG_VAR("GPSR8", 0xE6068040, 32, + GROUP(-18, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP8_31_14 RESERVED */ + GP_8_13_FN, GPSR8_13, + GP_8_12_FN, GPSR8_12, + GP_8_11_FN, GPSR8_11, + GP_8_10_FN, GPSR8_10, + GP_8_9_FN, GPSR8_9, + GP_8_8_FN, GPSR8_8, + GP_8_7_FN, GPSR8_7, + GP_8_6_FN, GPSR8_6, + GP_8_5_FN, GPSR8_5, + GP_8_4_FN, GPSR8_4, + GP_8_3_FN, GPSR8_3, + GP_8_2_FN, GPSR8_2, + GP_8_1_FN, GPSR8_1, + GP_8_0_FN, GPSR8_0, )) + }, +#undef F_ +#undef FM + +#define F_(x, y) x, +#define FM(x) FN_##x, + { PINMUX_CFG_REG("IP0SR0", 0xE6050060, 32, 4, GROUP( + IP0SR0_31_28 + IP0SR0_27_24 + IP0SR0_23_20 + IP0SR0_19_16 + IP0SR0_15_12 + IP0SR0_11_8 + IP0SR0_7_4 + IP0SR0_3_0)) + }, + { PINMUX_CFG_REG("IP1SR0", 0xE6050064, 32, 4, GROUP( + IP1SR0_31_28 + IP1SR0_27_24 + IP1SR0_23_20 + IP1SR0_19_16 + IP1SR0_15_12 + IP1SR0_11_8 + IP1SR0_7_4 + IP1SR0_3_0)) + }, + { PINMUX_CFG_REG_VAR("IP2SR0", 0xE6050068, 32, + GROUP(-20, 4, 4, 4), + GROUP( + /* IP2SR0_31_12 RESERVED */ + IP2SR0_11_8 + IP2SR0_7_4 + IP2SR0_3_0)) + }, + { PINMUX_CFG_REG("IP0SR1", 0xE6050860, 32, 4, GROUP( + IP0SR1_31_28 + IP0SR1_27_24 + IP0SR1_23_20 + IP0SR1_19_16 + IP0SR1_15_12 + IP0SR1_11_8 + IP0SR1_7_4 + IP0SR1_3_0)) + }, + { PINMUX_CFG_REG("IP1SR1", 0xE6050864, 32, 4, GROUP( + IP1SR1_31_28 + IP1SR1_27_24 + IP1SR1_23_20 + IP1SR1_19_16 + IP1SR1_15_12 + IP1SR1_11_8 + IP1SR1_7_4 + IP1SR1_3_0)) + }, + { PINMUX_CFG_REG("IP2SR1", 0xE6050868, 32, 4, GROUP( + IP2SR1_31_28 + IP2SR1_27_24 + IP2SR1_23_20 + IP2SR1_19_16 + IP2SR1_15_12 + IP2SR1_11_8 + IP2SR1_7_4 + IP2SR1_3_0)) + }, + { PINMUX_CFG_REG_VAR("IP3SR1", 0xE605086C, 32, + GROUP(-12, 4, 4, 4, 4, 4), + GROUP( + /* IP3SR1_31_20 RESERVED */ + IP3SR1_19_16 + IP3SR1_15_12 + IP3SR1_11_8 + IP3SR1_7_4 + IP3SR1_3_0)) + }, + { PINMUX_CFG_REG("IP0SR2", 0xE6058060, 32, 4, GROUP( + IP0SR2_31_28 + IP0SR2_27_24 + IP0SR2_23_20 + IP0SR2_19_16 + IP0SR2_15_12 + IP0SR2_11_8 + IP0SR2_7_4 + IP0SR2_3_0)) + }, + { PINMUX_CFG_REG("IP1SR2", 0xE6058064, 32, 4, GROUP( + IP1SR2_31_28 + IP1SR2_27_24 + IP1SR2_23_20 + IP1SR2_19_16 + IP1SR2_15_12 + IP1SR2_11_8 + IP1SR2_7_4 + IP1SR2_3_0)) + }, + { PINMUX_CFG_REG_VAR("IP2SR2", 0xE6058068, 32, + GROUP(-16, 4, 4, 4, 4), + GROUP( + /* IP2SR2_31_16 RESERVED */ + IP2SR2_15_12 + IP2SR2_11_8 + IP2SR2_7_4 + IP2SR2_3_0)) + }, + { PINMUX_CFG_REG("IP0SR3", 0xE6058860, 32, 4, GROUP( + IP0SR3_31_28 + IP0SR3_27_24 + IP0SR3_23_20 + IP0SR3_19_16 + IP0SR3_15_12 + IP0SR3_11_8 + IP0SR3_7_4 + IP0SR3_3_0)) + }, + { PINMUX_CFG_REG("IP1SR3", 0xE6058864, 32, 4, GROUP( + IP1SR3_31_28 + IP1SR3_27_24 + IP1SR3_23_20 + IP1SR3_19_16 + IP1SR3_15_12 + IP1SR3_11_8 + IP1SR3_7_4 + IP1SR3_3_0)) + }, + { PINMUX_CFG_REG("IP2SR3", 0xE6058868, 32, 4, GROUP( + IP2SR3_31_28 + IP2SR3_27_24 + IP2SR3_23_20 + IP2SR3_19_16 + IP2SR3_15_12 + IP2SR3_11_8 + IP2SR3_7_4 + IP2SR3_3_0)) + }, + { PINMUX_CFG_REG_VAR("IP3SR3", 0xE605886C, 32, + GROUP(-8, 4, 4, 4, 4, 4, 4), + GROUP( + /* IP3SR3_31_24 RESERVED */ + IP3SR3_23_20 + IP3SR3_19_16 + IP3SR3_15_12 + IP3SR3_11_8 + IP3SR3_7_4 + IP3SR3_3_0)) + }, + { PINMUX_CFG_REG("IP0SR6", 0xE6061060, 32, 4, GROUP( + IP0SR6_31_28 + IP0SR6_27_24 + IP0SR6_23_20 + IP0SR6_19_16 + IP0SR6_15_12 + IP0SR6_11_8 + IP0SR6_7_4 + IP0SR6_3_0)) + }, + { PINMUX_CFG_REG("IP1SR6", 0xE6061064, 32, 4, GROUP( + IP1SR6_31_28 + IP1SR6_27_24 + IP1SR6_23_20 + IP1SR6_19_16 + IP1SR6_15_12 + IP1SR6_11_8 + IP1SR6_7_4 + IP1SR6_3_0)) + }, + { PINMUX_CFG_REG_VAR("IP2SR6", 0xE6061068, 32, + GROUP(-12, 4, 4, 4, 4, 4), + GROUP( + /* IP2SR6_31_20 RESERVED */ + IP2SR6_19_16 + IP2SR6_15_12 + IP2SR6_11_8 + IP2SR6_7_4 + IP2SR6_3_0)) + }, + { PINMUX_CFG_REG("IP0SR7", 0xE6061860, 32, 4, GROUP( + IP0SR7_31_28 + IP0SR7_27_24 + IP0SR7_23_20 + IP0SR7_19_16 + IP0SR7_15_12 + IP0SR7_11_8 + IP0SR7_7_4 + IP0SR7_3_0)) + }, + { PINMUX_CFG_REG("IP1SR7", 0xE6061864, 32, 4, GROUP( + IP1SR7_31_28 + IP1SR7_27_24 + IP1SR7_23_20 + IP1SR7_19_16 + IP1SR7_15_12 + IP1SR7_11_8 + IP1SR7_7_4 + IP1SR7_3_0)) + }, + { PINMUX_CFG_REG_VAR("IP2SR7", 0xE6061868, 32, + GROUP(-12, 4, 4, 4, 4, 4), + GROUP( + /* IP2SR7_31_20 RESERVED */ + IP2SR7_19_16 + IP2SR7_15_12 + IP2SR7_11_8 + IP2SR7_7_4 + IP2SR7_3_0)) + }, + { PINMUX_CFG_REG("IP0SR8", 0xE6068060, 32, 4, GROUP( + IP0SR8_31_28 + IP0SR8_27_24 + IP0SR8_23_20 + IP0SR8_19_16 + IP0SR8_15_12 + IP0SR8_11_8 + IP0SR8_7_4 + IP0SR8_3_0)) + }, + { PINMUX_CFG_REG_VAR("IP1SR8", 0xE6068064, 32, + GROUP(-8, 4, 4, 4, 4, 4, 4), + GROUP( + /* IP1SR8_31_24 RESERVED */ + IP1SR8_23_20 + IP1SR8_19_16 + IP1SR8_15_12 + IP1SR8_11_8 + IP1SR8_7_4 + IP1SR8_3_0)) + }, +#undef F_ +#undef FM + +#define F_(x, y) x, +#define FM(x) FN_##x, + { PINMUX_CFG_REG_VAR("MOD_SEL4", 0xE6060100, 32, + GROUP(-12, 1, 1, -2, 1, 1, -1, 1, -2, 1, 1, -2, 1, + -2, 1, 1, -1), + GROUP( + /* RESERVED 31-20 */ + MOD_SEL4_19 + MOD_SEL4_18 + /* RESERVED 17-16 */ + MOD_SEL4_15 + MOD_SEL4_14 + /* RESERVED 13 */ + MOD_SEL4_12 + /* RESERVED 11-10 */ + MOD_SEL4_9 + MOD_SEL4_8 + /* RESERVED 7-6 */ + MOD_SEL4_5 + /* RESERVED 4-3 */ + MOD_SEL4_2 + MOD_SEL4_1 + /* RESERVED 0 */ + )) + }, + { PINMUX_CFG_REG_VAR("MOD_SEL5", 0xE6060900, 32, + GROUP(-12, 1, -2, 1, 1, -2, 1, 1, -2, 1, -1, + 1, 1, -2, 1, -1, 1), + GROUP( + /* RESERVED 31-20 */ + MOD_SEL5_19 + /* RESERVED 18-17 */ + MOD_SEL5_16 + MOD_SEL5_15 + /* RESERVED 14-13 */ + MOD_SEL5_12 + MOD_SEL5_11 + /* RESERVED 10-9 */ + MOD_SEL5_8 + /* RESERVED 7 */ + MOD_SEL5_6 + MOD_SEL5_5 + /* RESERVED 4-3 */ + MOD_SEL5_2 + /* RESERVED 1 */ + MOD_SEL5_0)) + }, + { PINMUX_CFG_REG_VAR("MOD_SEL6", 0xE6061100, 32, + GROUP(-13, 1, -1, 1, -2, 1, 1, + -1, 1, -2, 1, 1, 1, -2, 1, 1, -1), + GROUP( + /* RESERVED 31-19 */ + MOD_SEL6_18 + /* RESERVED 17 */ + MOD_SEL6_16 + /* RESERVED 15-14 */ + MOD_SEL6_13 + MOD_SEL6_12 + /* RESERVED 11 */ + MOD_SEL6_10 + /* RESERVED 9-8 */ + MOD_SEL6_7 + MOD_SEL6_6 + MOD_SEL6_5 + /* RESERVED 4-3 */ + MOD_SEL6_2 + MOD_SEL6_1 + /* RESERVED 0 */ + )) + }, + { PINMUX_CFG_REG_VAR("MOD_SEL7", 0xE6061900, 32, + GROUP(-15, 1, 1, -1, 1, -1, 1, 1, -2, 1, 1, + -2, 1, 1, -1, 1), + GROUP( + /* RESERVED 31-17 */ + MOD_SEL7_16 + MOD_SEL7_15 + /* RESERVED 14 */ + MOD_SEL7_13 + /* RESERVED 12 */ + MOD_SEL7_11 + MOD_SEL7_10 + /* RESERVED 9-8 */ + MOD_SEL7_7 + MOD_SEL7_6 + /* RESERVED 5-4 */ + MOD_SEL7_3 + MOD_SEL7_2 + /* RESERVED 1 */ + MOD_SEL7_0)) + }, + { PINMUX_CFG_REG_VAR("MOD_SEL8", 0xE6068100, 32, + GROUP(-20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* RESERVED 31-12 */ + MOD_SEL8_11 + MOD_SEL8_10 + MOD_SEL8_9 + MOD_SEL8_8 + MOD_SEL8_7 + MOD_SEL8_6 + MOD_SEL8_5 + MOD_SEL8_4 + MOD_SEL8_3 + MOD_SEL8_2 + MOD_SEL8_1 + MOD_SEL8_0)) + }, + { }, +}; + +static const struct pinmux_drive_reg pinmux_drive_regs[] = { + { PINMUX_DRIVE_REG("DRV0CTRL0", 0xE6050080) { + { RCAR_GP_PIN(0, 7), 28, 3 }, /* MSIOF5_SS2 */ + { RCAR_GP_PIN(0, 6), 24, 3 }, /* IRQ0 */ + { RCAR_GP_PIN(0, 5), 20, 3 }, /* IRQ1 */ + { RCAR_GP_PIN(0, 4), 16, 3 }, /* IRQ2 */ + { RCAR_GP_PIN(0, 3), 12, 3 }, /* IRQ3 */ + { RCAR_GP_PIN(0, 2), 8, 3 }, /* GP0_02 */ + { RCAR_GP_PIN(0, 1), 4, 3 }, /* GP0_01 */ + { RCAR_GP_PIN(0, 0), 0, 3 }, /* GP0_00 */ + } }, + { PINMUX_DRIVE_REG("DRV1CTRL0", 0xE6050084) { + { RCAR_GP_PIN(0, 15), 28, 3 }, /* MSIOF2_SYNC */ + { RCAR_GP_PIN(0, 14), 24, 3 }, /* MSIOF2_SS1 */ + { RCAR_GP_PIN(0, 13), 20, 3 }, /* MSIOF2_SS2 */ + { RCAR_GP_PIN(0, 12), 16, 3 }, /* MSIOF5_RXD */ + { RCAR_GP_PIN(0, 11), 12, 3 }, /* MSIOF5_SCK */ + { RCAR_GP_PIN(0, 10), 8, 3 }, /* MSIOF5_TXD */ + { RCAR_GP_PIN(0, 9), 4, 3 }, /* MSIOF5_SYNC */ + { RCAR_GP_PIN(0, 8), 0, 3 }, /* MSIOF5_SS1 */ + } }, + { PINMUX_DRIVE_REG("DRV2CTRL0", 0xE6050088) { + { RCAR_GP_PIN(0, 18), 8, 3 }, /* MSIOF2_RXD */ + { RCAR_GP_PIN(0, 17), 4, 3 }, /* MSIOF2_SCK */ + { RCAR_GP_PIN(0, 16), 0, 3 }, /* MSIOF2_TXD */ + } }, + { PINMUX_DRIVE_REG("DRV0CTRL1", 0xE6050880) { + { RCAR_GP_PIN(1, 7), 28, 3 }, /* MSIOF0_SS1 */ + { RCAR_GP_PIN(1, 6), 24, 3 }, /* MSIOF0_SS2 */ + { RCAR_GP_PIN(1, 5), 20, 3 }, /* MSIOF1_RXD */ + { RCAR_GP_PIN(1, 4), 16, 3 }, /* MSIOF1_TXD */ + { RCAR_GP_PIN(1, 3), 12, 3 }, /* MSIOF1_SCK */ + { RCAR_GP_PIN(1, 2), 8, 3 }, /* MSIOF1_SYNC */ + { RCAR_GP_PIN(1, 1), 4, 3 }, /* MSIOF1_SS1 */ + { RCAR_GP_PIN(1, 0), 0, 3 }, /* MSIOF1_SS2 */ + } }, + { PINMUX_DRIVE_REG("DRV1CTRL1", 0xE6050884) { + { RCAR_GP_PIN(1, 15), 28, 3 }, /* HSCK0 */ + { RCAR_GP_PIN(1, 14), 24, 3 }, /* HRTS0_N */ + { RCAR_GP_PIN(1, 13), 20, 3 }, /* HCTS0_N */ + { RCAR_GP_PIN(1, 12), 16, 3 }, /* HTX0 */ + { RCAR_GP_PIN(1, 11), 12, 3 }, /* MSIOF0_RXD */ + { RCAR_GP_PIN(1, 10), 8, 3 }, /* MSIOF0_SCK */ + { RCAR_GP_PIN(1, 9), 4, 3 }, /* MSIOF0_TXD */ + { RCAR_GP_PIN(1, 8), 0, 3 }, /* MSIOF0_SYNC */ + } }, + { PINMUX_DRIVE_REG("DRV2CTRL1", 0xE6050888) { + { RCAR_GP_PIN(1, 23), 28, 3 }, /* GP1_23 */ + { RCAR_GP_PIN(1, 22), 24, 3 }, /* AUDIO_CLKIN */ + { RCAR_GP_PIN(1, 21), 20, 3 }, /* AUDIO_CLKOUT */ + { RCAR_GP_PIN(1, 20), 16, 3 }, /* SSI_SD */ + { RCAR_GP_PIN(1, 19), 12, 3 }, /* SSI_WS */ + { RCAR_GP_PIN(1, 18), 8, 3 }, /* SSI_SCK */ + { RCAR_GP_PIN(1, 17), 4, 3 }, /* SCIF_CLK */ + { RCAR_GP_PIN(1, 16), 0, 3 }, /* HRX0 */ + } }, + { PINMUX_DRIVE_REG("DRV3CTRL1", 0xE605088C) { + { RCAR_GP_PIN(1, 28), 16, 3 }, /* HTX3 */ + { RCAR_GP_PIN(1, 27), 12, 3 }, /* HCTS3_N */ + { RCAR_GP_PIN(1, 26), 8, 3 }, /* HRTS3_N */ + { RCAR_GP_PIN(1, 25), 4, 3 }, /* HSCK3 */ + { RCAR_GP_PIN(1, 24), 0, 3 }, /* HRX3 */ + } }, + { PINMUX_DRIVE_REG("DRV0CTRL2", 0xE6058080) { + { RCAR_GP_PIN(2, 7), 28, 3 }, /* TPU0TO1 */ + { RCAR_GP_PIN(2, 6), 24, 3 }, /* FXR_TXDB */ + { RCAR_GP_PIN(2, 5), 20, 3 }, /* FXR_TXENB_N */ + { RCAR_GP_PIN(2, 4), 16, 3 }, /* RXDB_EXTFXR */ + { RCAR_GP_PIN(2, 3), 12, 3 }, /* CLK_EXTFXR */ + { RCAR_GP_PIN(2, 2), 8, 3 }, /* RXDA_EXTFXR */ + { RCAR_GP_PIN(2, 1), 4, 3 }, /* FXR_TXENA_N */ + { RCAR_GP_PIN(2, 0), 0, 3 }, /* FXR_TXDA */ + } }, + { PINMUX_DRIVE_REG("DRV1CTRL2", 0xE6058084) { + { RCAR_GP_PIN(2, 15), 28, 3 }, /* CANFD3_RX */ + { RCAR_GP_PIN(2, 14), 24, 3 }, /* CANFD3_TX */ + { RCAR_GP_PIN(2, 13), 20, 3 }, /* CANFD2_RX */ + { RCAR_GP_PIN(2, 12), 16, 3 }, /* CANFD2_TX */ + { RCAR_GP_PIN(2, 11), 12, 3 }, /* CANFD0_RX */ + { RCAR_GP_PIN(2, 10), 8, 3 }, /* CANFD0_TX */ + { RCAR_GP_PIN(2, 9), 4, 3 }, /* CAN_CLK */ + { RCAR_GP_PIN(2, 8), 0, 3 }, /* TPU0TO0 */ + } }, + { PINMUX_DRIVE_REG("DRV2CTRL2", 0xE6058088) { + { RCAR_GP_PIN(2, 19), 12, 3 }, /* CANFD7_RX */ + { RCAR_GP_PIN(2, 18), 8, 3 }, /* CANFD7_TX */ + { RCAR_GP_PIN(2, 17), 4, 3 }, /* CANFD4_RX */ + { RCAR_GP_PIN(2, 16), 0, 3 }, /* CANFD4_TX */ + } }, + { PINMUX_DRIVE_REG("DRV0CTRL3", 0xE6058880) { + { RCAR_GP_PIN(3, 7), 28, 3 }, /* MMC_D4 */ + { RCAR_GP_PIN(3, 6), 24, 3 }, /* MMC_D5 */ + { RCAR_GP_PIN(3, 5), 20, 3 }, /* MMC_SD_D3 */ + { RCAR_GP_PIN(3, 4), 16, 3 }, /* MMC_DS */ + { RCAR_GP_PIN(3, 3), 12, 3 }, /* MMC_SD_CLK */ + { RCAR_GP_PIN(3, 2), 8, 3 }, /* MMC_SD_D2 */ + { RCAR_GP_PIN(3, 1), 4, 3 }, /* MMC_SD_D0 */ + { RCAR_GP_PIN(3, 0), 0, 3 }, /* MMC_SD_D1 */ + } }, + { PINMUX_DRIVE_REG("DRV1CTRL3", 0xE6058884) { + { RCAR_GP_PIN(3, 15), 28, 2 }, /* QSPI0_SSL */ + { RCAR_GP_PIN(3, 14), 24, 2 }, /* IPC_CLKOUT */ + { RCAR_GP_PIN(3, 13), 20, 2 }, /* IPC_CLKIN */ + { RCAR_GP_PIN(3, 12), 16, 3 }, /* SD_WP */ + { RCAR_GP_PIN(3, 11), 12, 3 }, /* SD_CD */ + { RCAR_GP_PIN(3, 10), 8, 3 }, /* MMC_SD_CMD */ + { RCAR_GP_PIN(3, 9), 4, 3 }, /* MMC_D6*/ + { RCAR_GP_PIN(3, 8), 0, 3 }, /* MMC_D7 */ + } }, + { PINMUX_DRIVE_REG("DRV2CTRL3", 0xE6058888) { + { RCAR_GP_PIN(3, 23), 28, 2 }, /* QSPI1_MISO_IO1 */ + { RCAR_GP_PIN(3, 22), 24, 2 }, /* QSPI1_SPCLK */ + { RCAR_GP_PIN(3, 21), 20, 2 }, /* QSPI1_MOSI_IO0 */ + { RCAR_GP_PIN(3, 20), 16, 2 }, /* QSPI0_SPCLK */ + { RCAR_GP_PIN(3, 19), 12, 2 }, /* QSPI0_MOSI_IO0 */ + { RCAR_GP_PIN(3, 18), 8, 2 }, /* QSPI0_MISO_IO1 */ + { RCAR_GP_PIN(3, 17), 4, 2 }, /* QSPI0_IO2 */ + { RCAR_GP_PIN(3, 16), 0, 2 }, /* QSPI0_IO3 */ + } }, + { PINMUX_DRIVE_REG("DRV3CTRL3", 0xE605888C) { + { RCAR_GP_PIN(3, 29), 20, 2 }, /* RPC_INT_N */ + { RCAR_GP_PIN(3, 28), 16, 2 }, /* RPC_WP_N */ + { RCAR_GP_PIN(3, 27), 12, 2 }, /* RPC_RESET_N */ + { RCAR_GP_PIN(3, 26), 8, 2 }, /* QSPI1_IO3 */ + { RCAR_GP_PIN(3, 25), 4, 2 }, /* QSPI1_SSL */ + { RCAR_GP_PIN(3, 24), 0, 2 }, /* QSPI1_IO2 */ + } }, + { PINMUX_DRIVE_REG("DRV0CTRL4", 0xE6060080) { + { RCAR_GP_PIN(4, 7), 28, 3 }, /* TSN0_RX_CTL */ + { RCAR_GP_PIN(4, 6), 24, 3 }, /* TSN0_AVTP_CAPTURE */ + { RCAR_GP_PIN(4, 5), 20, 3 }, /* TSN0_AVTP_MATCH */ + { RCAR_GP_PIN(4, 4), 16, 3 }, /* TSN0_LINK */ + { RCAR_GP_PIN(4, 3), 12, 3 }, /* TSN0_PHY_INT */ + { RCAR_GP_PIN(4, 2), 8, 3 }, /* TSN0_AVTP_PPS1 */ + { RCAR_GP_PIN(4, 1), 4, 3 }, /* TSN0_MDC */ + { RCAR_GP_PIN(4, 0), 0, 3 }, /* TSN0_MDIO */ + } }, + { PINMUX_DRIVE_REG("DRV1CTRL4", 0xE6060084) { + { RCAR_GP_PIN(4, 15), 28, 3 }, /* TSN0_TD0 */ + { RCAR_GP_PIN(4, 14), 24, 3 }, /* TSN0_TD1 */ + { RCAR_GP_PIN(4, 13), 20, 3 }, /* TSN0_RD1 */ + { RCAR_GP_PIN(4, 12), 16, 3 }, /* TSN0_TXC */ + { RCAR_GP_PIN(4, 11), 12, 3 }, /* TSN0_RXC */ + { RCAR_GP_PIN(4, 10), 8, 3 }, /* TSN0_RD0 */ + { RCAR_GP_PIN(4, 9), 4, 3 }, /* TSN0_TX_CTL */ + { RCAR_GP_PIN(4, 8), 0, 3 }, /* TSN0_AVTP_PPS0 */ + } }, + { PINMUX_DRIVE_REG("DRV2CTRL4", 0xE6060088) { + { RCAR_GP_PIN(4, 23), 28, 3 }, /* AVS0 */ + { RCAR_GP_PIN(4, 22), 24, 3 }, /* PCIE1_CLKREQ_N */ + { RCAR_GP_PIN(4, 21), 20, 3 }, /* PCIE0_CLKREQ_N */ + { RCAR_GP_PIN(4, 20), 16, 3 }, /* TSN0_TXCREFCLK */ + { RCAR_GP_PIN(4, 19), 12, 3 }, /* TSN0_TD2 */ + { RCAR_GP_PIN(4, 18), 8, 3 }, /* TSN0_TD3 */ + { RCAR_GP_PIN(4, 17), 4, 3 }, /* TSN0_RD2 */ + { RCAR_GP_PIN(4, 16), 0, 3 }, /* TSN0_RD3 */ + } }, + { PINMUX_DRIVE_REG("DRV3CTRL4", 0xE606008C) { + { RCAR_GP_PIN(4, 24), 0, 3 }, /* AVS1 */ + } }, + { PINMUX_DRIVE_REG("DRV0CTRL5", 0xE6060880) { + { RCAR_GP_PIN(5, 7), 28, 3 }, /* AVB2_TXCREFCLK */ + { RCAR_GP_PIN(5, 6), 24, 3 }, /* AVB2_MDC */ + { RCAR_GP_PIN(5, 5), 20, 3 }, /* AVB2_MAGIC */ + { RCAR_GP_PIN(5, 4), 16, 3 }, /* AVB2_PHY_INT */ + { RCAR_GP_PIN(5, 3), 12, 3 }, /* AVB2_LINK */ + { RCAR_GP_PIN(5, 2), 8, 3 }, /* AVB2_AVTP_MATCH */ + { RCAR_GP_PIN(5, 1), 4, 3 }, /* AVB2_AVTP_CAPTURE */ + { RCAR_GP_PIN(5, 0), 0, 3 }, /* AVB2_AVTP_PPS */ + } }, + { PINMUX_DRIVE_REG("DRV1CTRL5", 0xE6060884) { + { RCAR_GP_PIN(5, 15), 28, 3 }, /* AVB2_TD0 */ + { RCAR_GP_PIN(5, 14), 24, 3 }, /* AVB2_RD1 */ + { RCAR_GP_PIN(5, 13), 20, 3 }, /* AVB2_RD2 */ + { RCAR_GP_PIN(5, 12), 16, 3 }, /* AVB2_TD1 */ + { RCAR_GP_PIN(5, 11), 12, 3 }, /* AVB2_TD2 */ + { RCAR_GP_PIN(5, 10), 8, 3 }, /* AVB2_MDIO */ + { RCAR_GP_PIN(5, 9), 4, 3 }, /* AVB2_RD3 */ + { RCAR_GP_PIN(5, 8), 0, 3 }, /* AVB2_TD3 */ + } }, + { PINMUX_DRIVE_REG("DRV2CTRL5", 0xE6060888) { + { RCAR_GP_PIN(5, 20), 16, 3 }, /* AVB2_RX_CTL */ + { RCAR_GP_PIN(5, 19), 12, 3 }, /* AVB2_TX_CTL */ + { RCAR_GP_PIN(5, 18), 8, 3 }, /* AVB2_RXC */ + { RCAR_GP_PIN(5, 17), 4, 3 }, /* AVB2_RD0 */ + { RCAR_GP_PIN(5, 16), 0, 3 }, /* AVB2_TXC */ + } }, + { PINMUX_DRIVE_REG("DRV0CTRL6", 0xE6061080) { + { RCAR_GP_PIN(6, 7), 28, 3 }, /* AVB1_TX_CTL */ + { RCAR_GP_PIN(6, 6), 24, 3 }, /* AVB1_TXC */ + { RCAR_GP_PIN(6, 5), 20, 3 }, /* AVB1_AVTP_MATCH */ + { RCAR_GP_PIN(6, 4), 16, 3 }, /* AVB1_LINK */ + { RCAR_GP_PIN(6, 3), 12, 3 }, /* AVB1_PHY_INT */ + { RCAR_GP_PIN(6, 2), 8, 3 }, /* AVB1_MDC */ + { RCAR_GP_PIN(6, 1), 4, 3 }, /* AVB1_MAGIC */ + { RCAR_GP_PIN(6, 0), 0, 3 }, /* AVB1_MDIO */ + } }, + { PINMUX_DRIVE_REG("DRV1CTRL6", 0xE6061084) { + { RCAR_GP_PIN(6, 15), 28, 3 }, /* AVB1_RD0 */ + { RCAR_GP_PIN(6, 14), 24, 3 }, /* AVB1_RD1 */ + { RCAR_GP_PIN(6, 13), 20, 3 }, /* AVB1_TD0 */ + { RCAR_GP_PIN(6, 12), 16, 3 }, /* AVB1_TD1 */ + { RCAR_GP_PIN(6, 11), 12, 3 }, /* AVB1_AVTP_CAPTURE */ + { RCAR_GP_PIN(6, 10), 8, 3 }, /* AVB1_AVTP_PPS */ + { RCAR_GP_PIN(6, 9), 4, 3 }, /* AVB1_RX_CTL */ + { RCAR_GP_PIN(6, 8), 0, 3 }, /* AVB1_RXC */ + } }, + { PINMUX_DRIVE_REG("DRV2CTRL6", 0xE6061088) { + { RCAR_GP_PIN(6, 20), 16, 3 }, /* AVB1_TXCREFCLK */ + { RCAR_GP_PIN(6, 19), 12, 3 }, /* AVB1_RD3 */ + { RCAR_GP_PIN(6, 18), 8, 3 }, /* AVB1_TD3 */ + { RCAR_GP_PIN(6, 17), 4, 3 }, /* AVB1_RD2 */ + { RCAR_GP_PIN(6, 16), 0, 3 }, /* AVB1_TD2 */ + } }, + { PINMUX_DRIVE_REG("DRV0CTRL7", 0xE6061880) { + { RCAR_GP_PIN(7, 7), 28, 3 }, /* AVB0_TD1 */ + { RCAR_GP_PIN(7, 6), 24, 3 }, /* AVB0_TD2 */ + { RCAR_GP_PIN(7, 5), 20, 3 }, /* AVB0_PHY_INT */ + { RCAR_GP_PIN(7, 4), 16, 3 }, /* AVB0_LINK */ + { RCAR_GP_PIN(7, 3), 12, 3 }, /* AVB0_TD3 */ + { RCAR_GP_PIN(7, 2), 8, 3 }, /* AVB0_AVTP_MATCH */ + { RCAR_GP_PIN(7, 1), 4, 3 }, /* AVB0_AVTP_CAPTURE */ + { RCAR_GP_PIN(7, 0), 0, 3 }, /* AVB0_AVTP_PPS */ + } }, + { PINMUX_DRIVE_REG("DRV1CTRL7", 0xE6061884) { + { RCAR_GP_PIN(7, 15), 28, 3 }, /* AVB0_TXC */ + { RCAR_GP_PIN(7, 14), 24, 3 }, /* AVB0_MDIO */ + { RCAR_GP_PIN(7, 13), 20, 3 }, /* AVB0_MDC */ + { RCAR_GP_PIN(7, 12), 16, 3 }, /* AVB0_RD2 */ + { RCAR_GP_PIN(7, 11), 12, 3 }, /* AVB0_TD0 */ + { RCAR_GP_PIN(7, 10), 8, 3 }, /* AVB0_MAGIC */ + { RCAR_GP_PIN(7, 9), 4, 3 }, /* AVB0_TXCREFCLK */ + { RCAR_GP_PIN(7, 8), 0, 3 }, /* AVB0_RD3 */ + } }, + { PINMUX_DRIVE_REG("DRV2CTRL7", 0xE6061888) { + { RCAR_GP_PIN(7, 20), 16, 3 }, /* AVB0_RX_CTL */ + { RCAR_GP_PIN(7, 19), 12, 3 }, /* AVB0_RXC */ + { RCAR_GP_PIN(7, 18), 8, 3 }, /* AVB0_RD0 */ + { RCAR_GP_PIN(7, 17), 4, 3 }, /* AVB0_RD1 */ + { RCAR_GP_PIN(7, 16), 0, 3 }, /* AVB0_TX_CTL */ + } }, + { PINMUX_DRIVE_REG("DRV0CTRL8", 0xE6068080) { + { RCAR_GP_PIN(8, 7), 28, 3 }, /* SDA3 */ + { RCAR_GP_PIN(8, 6), 24, 3 }, /* SCL3 */ + { RCAR_GP_PIN(8, 5), 20, 3 }, /* SDA2 */ + { RCAR_GP_PIN(8, 4), 16, 3 }, /* SCL2 */ + { RCAR_GP_PIN(8, 3), 12, 3 }, /* SDA1 */ + { RCAR_GP_PIN(8, 2), 8, 3 }, /* SCL1 */ + { RCAR_GP_PIN(8, 1), 4, 3 }, /* SDA0 */ + { RCAR_GP_PIN(8, 0), 0, 3 }, /* SCL0 */ + } }, + { PINMUX_DRIVE_REG("DRV1CTRL8", 0xE6068084) { + { RCAR_GP_PIN(8, 13), 20, 3 }, /* GP8_13 */ + { RCAR_GP_PIN(8, 12), 16, 3 }, /* GP8_12 */ + { RCAR_GP_PIN(8, 11), 12, 3 }, /* SDA5 */ + { RCAR_GP_PIN(8, 10), 8, 3 }, /* SCL5 */ + { RCAR_GP_PIN(8, 9), 4, 3 }, /* SDA4 */ + { RCAR_GP_PIN(8, 8), 0, 3 }, /* SCL4 */ + } }, + { }, +}; + +enum ioctrl_regs { + POC0, + POC1, + POC3, + POC4, + POC5, + POC6, + POC7, + POC8, +}; + +static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = { + [POC0] = { 0xE60500A0, }, + [POC1] = { 0xE60508A0, }, + [POC3] = { 0xE60588A0, }, + [POC4] = { 0xE60600A0, }, + [POC5] = { 0xE60608A0, }, + [POC6] = { 0xE60610A0, }, + [POC7] = { 0xE60618A0, }, + [POC8] = { 0xE60680A0, }, + { /* sentinel */ }, +}; + +static int r8a779g0_pin_to_pocctrl(unsigned int pin, u32 *pocctrl) +{ + int bit = pin & 0x1f; + + *pocctrl = pinmux_ioctrl_regs[POC0].reg; + if (pin >= RCAR_GP_PIN(0, 0) && pin <= RCAR_GP_PIN(0, 18)) + return bit; + + *pocctrl = pinmux_ioctrl_regs[POC1].reg; + if (pin >= RCAR_GP_PIN(1, 0) && pin <= RCAR_GP_PIN(1, 22)) + return bit; + + *pocctrl = pinmux_ioctrl_regs[POC3].reg; + if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 12)) + return bit; + + *pocctrl = pinmux_ioctrl_regs[POC8].reg; + if (pin >= RCAR_GP_PIN(8, 0) && pin <= RCAR_GP_PIN(8, 13)) + return bit; + + return -EINVAL; +} + +static const struct pinmux_bias_reg pinmux_bias_regs[] = { + { PINMUX_BIAS_REG("PUEN0", 0xE60500C0, "PUD0", 0xE60500E0) { + [ 0] = RCAR_GP_PIN(0, 0), /* GP0_00 */ + [ 1] = RCAR_GP_PIN(0, 1), /* GP0_01 */ + [ 2] = RCAR_GP_PIN(0, 2), /* GP0_02 */ + [ 3] = RCAR_GP_PIN(0, 3), /* IRQ3 */ + [ 4] = RCAR_GP_PIN(0, 4), /* IRQ2 */ + [ 5] = RCAR_GP_PIN(0, 5), /* IRQ1 */ + [ 6] = RCAR_GP_PIN(0, 6), /* IRQ0 */ + [ 7] = RCAR_GP_PIN(0, 7), /* MSIOF5_SS2 */ + [ 8] = RCAR_GP_PIN(0, 8), /* MSIOF5_SS1 */ + [ 9] = RCAR_GP_PIN(0, 9), /* MSIOF5_SYNC */ + [10] = RCAR_GP_PIN(0, 10), /* MSIOF5_TXD */ + [11] = RCAR_GP_PIN(0, 11), /* MSIOF5_SCK */ + [12] = RCAR_GP_PIN(0, 12), /* MSIOF5_RXD */ + [13] = RCAR_GP_PIN(0, 13), /* MSIOF2_SS2 */ + [14] = RCAR_GP_PIN(0, 14), /* MSIOF2_SS1 */ + [15] = RCAR_GP_PIN(0, 15), /* MSIOF2_SYNC */ + [16] = RCAR_GP_PIN(0, 16), /* MSIOF2_TXD */ + [17] = RCAR_GP_PIN(0, 17), /* MSIOF2_SCK */ + [18] = RCAR_GP_PIN(0, 18), /* MSIOF2_RXD */ + [19] = SH_PFC_PIN_NONE, + [20] = SH_PFC_PIN_NONE, + [21] = SH_PFC_PIN_NONE, + [22] = SH_PFC_PIN_NONE, + [23] = SH_PFC_PIN_NONE, + [24] = SH_PFC_PIN_NONE, + [25] = SH_PFC_PIN_NONE, + [26] = SH_PFC_PIN_NONE, + [27] = SH_PFC_PIN_NONE, + [28] = SH_PFC_PIN_NONE, + [29] = SH_PFC_PIN_NONE, + [30] = SH_PFC_PIN_NONE, + [31] = SH_PFC_PIN_NONE, + } }, + { PINMUX_BIAS_REG("PUEN1", 0xE60508C0, "PUD1", 0xE60508E0) { + [ 0] = RCAR_GP_PIN(1, 0), /* MSIOF1_SS2 */ + [ 1] = RCAR_GP_PIN(1, 1), /* MSIOF1_SS1 */ + [ 2] = RCAR_GP_PIN(1, 2), /* MSIOF1_SYNC */ + [ 3] = RCAR_GP_PIN(1, 3), /* MSIOF1_SCK */ + [ 4] = RCAR_GP_PIN(1, 4), /* MSIOF1_TXD */ + [ 5] = RCAR_GP_PIN(1, 5), /* MSIOF1_RXD */ + [ 6] = RCAR_GP_PIN(1, 6), /* MSIOF0_SS2 */ + [ 7] = RCAR_GP_PIN(1, 7), /* MSIOF0_SS1 */ + [ 8] = RCAR_GP_PIN(1, 8), /* MSIOF0_SYNC */ + [ 9] = RCAR_GP_PIN(1, 9), /* MSIOF0_TXD */ + [10] = RCAR_GP_PIN(1, 10), /* MSIOF0_SCK */ + [11] = RCAR_GP_PIN(1, 11), /* MSIOF0_RXD */ + [12] = RCAR_GP_PIN(1, 12), /* HTX0 */ + [13] = RCAR_GP_PIN(1, 13), /* HCTS0_N */ + [14] = RCAR_GP_PIN(1, 14), /* HRTS0_N */ + [15] = RCAR_GP_PIN(1, 15), /* HSCK0 */ + [16] = RCAR_GP_PIN(1, 16), /* HRX0 */ + [17] = RCAR_GP_PIN(1, 17), /* SCIF_CLK */ + [18] = RCAR_GP_PIN(1, 18), /* SSI_SCK */ + [19] = RCAR_GP_PIN(1, 19), /* SSI_WS */ + [20] = RCAR_GP_PIN(1, 20), /* SSI_SD */ + [21] = RCAR_GP_PIN(1, 21), /* AUDIO_CLKOUT */ + [22] = RCAR_GP_PIN(1, 22), /* AUDIO_CLKIN */ + [23] = RCAR_GP_PIN(1, 23), /* GP1_23 */ + [24] = RCAR_GP_PIN(1, 24), /* HRX3 */ + [25] = RCAR_GP_PIN(1, 25), /* HSCK3 */ + [26] = RCAR_GP_PIN(1, 26), /* HRTS3_N */ + [27] = RCAR_GP_PIN(1, 27), /* HCTS3_N */ + [28] = RCAR_GP_PIN(1, 28), /* HTX3 */ + [29] = SH_PFC_PIN_NONE, + [30] = SH_PFC_PIN_NONE, + [31] = SH_PFC_PIN_NONE, + } }, + { PINMUX_BIAS_REG("PUEN2", 0xE60580C0, "PUD2", 0xE60580E0) { + [ 0] = RCAR_GP_PIN(2, 0), /* FXR_TXDA */ + [ 1] = RCAR_GP_PIN(2, 1), /* FXR_TXENA_N */ + [ 2] = RCAR_GP_PIN(2, 2), /* RXDA_EXTFXR */ + [ 3] = RCAR_GP_PIN(2, 3), /* CLK_EXTFXR */ + [ 4] = RCAR_GP_PIN(2, 4), /* RXDB_EXTFXR */ + [ 5] = RCAR_GP_PIN(2, 5), /* FXR_TXENB_N */ + [ 6] = RCAR_GP_PIN(2, 6), /* FXR_TXDB */ + [ 7] = RCAR_GP_PIN(2, 7), /* TPU0TO1 */ + [ 8] = RCAR_GP_PIN(2, 8), /* TPU0TO0 */ + [ 9] = RCAR_GP_PIN(2, 9), /* CAN_CLK */ + [10] = RCAR_GP_PIN(2, 10), /* CANFD0_TX */ + [11] = RCAR_GP_PIN(2, 11), /* CANFD0_RX */ + [12] = RCAR_GP_PIN(2, 12), /* CANFD2_TX */ + [13] = RCAR_GP_PIN(2, 13), /* CANFD2_RX */ + [14] = RCAR_GP_PIN(2, 14), /* CANFD3_TX */ + [15] = RCAR_GP_PIN(2, 15), /* CANFD3_RX */ + [16] = RCAR_GP_PIN(2, 16), /* CANFD4_TX */ + [17] = RCAR_GP_PIN(2, 17), /* CANFD4_RX */ + [18] = RCAR_GP_PIN(2, 18), /* CANFD7_TX */ + [19] = RCAR_GP_PIN(2, 19), /* CANFD7_RX */ + [20] = SH_PFC_PIN_NONE, + [21] = SH_PFC_PIN_NONE, + [22] = SH_PFC_PIN_NONE, + [23] = SH_PFC_PIN_NONE, + [24] = SH_PFC_PIN_NONE, + [25] = SH_PFC_PIN_NONE, + [26] = SH_PFC_PIN_NONE, + [27] = SH_PFC_PIN_NONE, + [28] = SH_PFC_PIN_NONE, + [29] = SH_PFC_PIN_NONE, + [30] = SH_PFC_PIN_NONE, + [31] = SH_PFC_PIN_NONE, + } }, + { PINMUX_BIAS_REG("PUEN3", 0xE60588C0, "PUD3", 0xE60588E0) { + [ 0] = RCAR_GP_PIN(3, 0), /* MMC_SD_D1 */ + [ 1] = RCAR_GP_PIN(3, 1), /* MMC_SD_D0 */ + [ 2] = RCAR_GP_PIN(3, 2), /* MMC_SD_D2 */ + [ 3] = RCAR_GP_PIN(3, 3), /* MMC_SD_CLK */ + [ 4] = RCAR_GP_PIN(3, 4), /* MMC_DS */ + [ 5] = RCAR_GP_PIN(3, 5), /* MMC_SD_D3 */ + [ 6] = RCAR_GP_PIN(3, 6), /* MMC_D5 */ + [ 7] = RCAR_GP_PIN(3, 7), /* MMC_D4 */ + [ 8] = RCAR_GP_PIN(3, 8), /* MMC_D7 */ + [ 9] = RCAR_GP_PIN(3, 9), /* MMC_D6 */ + [10] = RCAR_GP_PIN(3, 10), /* MMC_SD_CMD */ + [11] = RCAR_GP_PIN(3, 11), /* SD_CD */ + [12] = RCAR_GP_PIN(3, 12), /* SD_WP */ + [13] = RCAR_GP_PIN(3, 13), /* IPC_CLKIN */ + [14] = RCAR_GP_PIN(3, 14), /* IPC_CLKOUT */ + [15] = RCAR_GP_PIN(3, 15), /* QSPI0_SSL */ + [16] = RCAR_GP_PIN(3, 16), /* QSPI0_IO3 */ + [17] = RCAR_GP_PIN(3, 17), /* QSPI0_IO2 */ + [18] = RCAR_GP_PIN(3, 18), /* QSPI0_MISO_IO1 */ + [19] = RCAR_GP_PIN(3, 19), /* QSPI0_MOSI_IO0 */ + [20] = RCAR_GP_PIN(3, 20), /* QSPI0_SPCLK */ + [21] = RCAR_GP_PIN(3, 21), /* QSPI1_MOSI_IO0 */ + [22] = RCAR_GP_PIN(3, 22), /* QSPI1_SPCLK */ + [23] = RCAR_GP_PIN(3, 23), /* QSPI1_MISO_IO1 */ + [24] = RCAR_GP_PIN(3, 24), /* QSPI1_IO2 */ + [25] = RCAR_GP_PIN(3, 25), /* QSPI1_SSL */ + [26] = RCAR_GP_PIN(3, 26), /* QSPI1_IO3 */ + [27] = RCAR_GP_PIN(3, 27), /* RPC_RESET_N */ + [28] = RCAR_GP_PIN(3, 28), /* RPC_WP_N */ + [29] = RCAR_GP_PIN(3, 29), /* RPC_INT_N */ + [30] = SH_PFC_PIN_NONE, + [31] = SH_PFC_PIN_NONE, + } }, + { PINMUX_BIAS_REG("PUEN4", 0xE60600C0, "PUD4", 0xE60600E0) { + [ 0] = RCAR_GP_PIN(4, 0), /* TSN0_MDIO */ + [ 1] = RCAR_GP_PIN(4, 1), /* TSN0_MDC */ + [ 2] = RCAR_GP_PIN(4, 2), /* TSN0_AVTP_PPS1 */ + [ 3] = RCAR_GP_PIN(4, 3), /* TSN0_PHY_INT */ + [ 4] = RCAR_GP_PIN(4, 4), /* TSN0_LINK */ + [ 5] = RCAR_GP_PIN(4, 5), /* TSN0_AVTP_MATCH */ + [ 6] = RCAR_GP_PIN(4, 6), /* TSN0_AVTP_CAPTURE */ + [ 7] = RCAR_GP_PIN(4, 7), /* TSN0_RX_CTL */ + [ 8] = RCAR_GP_PIN(4, 8), /* TSN0_AVTP_PPS0 */ + [ 9] = RCAR_GP_PIN(4, 9), /* TSN0_TX_CTL */ + [10] = RCAR_GP_PIN(4, 10), /* TSN0_RD0 */ + [11] = RCAR_GP_PIN(4, 11), /* TSN0_RXC */ + [12] = RCAR_GP_PIN(4, 12), /* TSN0_TXC */ + [13] = RCAR_GP_PIN(4, 13), /* TSN0_RD1 */ + [14] = RCAR_GP_PIN(4, 14), /* TSN0_TD1 */ + [15] = RCAR_GP_PIN(4, 15), /* TSN0_TD0 */ + [16] = RCAR_GP_PIN(4, 16), /* TSN0_RD3 */ + [17] = RCAR_GP_PIN(4, 17), /* TSN0_RD2 */ + [18] = RCAR_GP_PIN(4, 18), /* TSN0_TD3 */ + [19] = RCAR_GP_PIN(4, 19), /* TSN0_TD2 */ + [20] = RCAR_GP_PIN(4, 20), /* TSN0_TXCREFCLK */ + [21] = RCAR_GP_PIN(4, 21), /* PCIE0_CLKREQ_N */ + [22] = RCAR_GP_PIN(4, 22), /* PCIE1_CLKREQ_N */ + [23] = RCAR_GP_PIN(4, 23), /* AVS0 */ + [24] = RCAR_GP_PIN(4, 24), /* AVS1 */ + [25] = SH_PFC_PIN_NONE, + [26] = SH_PFC_PIN_NONE, + [27] = SH_PFC_PIN_NONE, + [28] = SH_PFC_PIN_NONE, + [29] = SH_PFC_PIN_NONE, + [30] = SH_PFC_PIN_NONE, + [31] = SH_PFC_PIN_NONE, + } }, + { PINMUX_BIAS_REG("PUEN5", 0xE60608C0, "PUD5", 0xE60608E0) { + [ 0] = RCAR_GP_PIN(5, 0), /* AVB2_AVTP_PPS */ + [ 1] = RCAR_GP_PIN(5, 1), /* AVB0_AVTP_CAPTURE */ + [ 2] = RCAR_GP_PIN(5, 2), /* AVB2_AVTP_MATCH */ + [ 3] = RCAR_GP_PIN(5, 3), /* AVB2_LINK */ + [ 4] = RCAR_GP_PIN(5, 4), /* AVB2_PHY_INT */ + [ 5] = RCAR_GP_PIN(5, 5), /* AVB2_MAGIC */ + [ 6] = RCAR_GP_PIN(5, 6), /* AVB2_MDC */ + [ 7] = RCAR_GP_PIN(5, 7), /* AVB2_TXCREFCLK */ + [ 8] = RCAR_GP_PIN(5, 8), /* AVB2_TD3 */ + [ 9] = RCAR_GP_PIN(5, 9), /* AVB2_RD3 */ + [10] = RCAR_GP_PIN(5, 10), /* AVB2_MDIO */ + [11] = RCAR_GP_PIN(5, 11), /* AVB2_TD2 */ + [12] = RCAR_GP_PIN(5, 12), /* AVB2_TD1 */ + [13] = RCAR_GP_PIN(5, 13), /* AVB2_RD2 */ + [14] = RCAR_GP_PIN(5, 14), /* AVB2_RD1 */ + [15] = RCAR_GP_PIN(5, 15), /* AVB2_TD0 */ + [16] = RCAR_GP_PIN(5, 16), /* AVB2_TXC */ + [17] = RCAR_GP_PIN(5, 17), /* AVB2_RD0 */ + [18] = RCAR_GP_PIN(5, 18), /* AVB2_RXC */ + [19] = RCAR_GP_PIN(5, 19), /* AVB2_TX_CTL */ + [20] = RCAR_GP_PIN(5, 20), /* AVB2_RX_CTL */ + [21] = SH_PFC_PIN_NONE, + [22] = SH_PFC_PIN_NONE, + [23] = SH_PFC_PIN_NONE, + [24] = SH_PFC_PIN_NONE, + [25] = SH_PFC_PIN_NONE, + [26] = SH_PFC_PIN_NONE, + [27] = SH_PFC_PIN_NONE, + [28] = SH_PFC_PIN_NONE, + [29] = SH_PFC_PIN_NONE, + [30] = SH_PFC_PIN_NONE, + [31] = SH_PFC_PIN_NONE, + } }, + { PINMUX_BIAS_REG("PUEN6", 0xE60610C0, "PUD6", 0xE60610E0) { + [ 0] = RCAR_GP_PIN(6, 0), /* AVB1_MDIO */ + [ 1] = RCAR_GP_PIN(6, 1), /* AVB1_MAGIC */ + [ 2] = RCAR_GP_PIN(6, 2), /* AVB1_MDC */ + [ 3] = RCAR_GP_PIN(6, 3), /* AVB1_PHY_INT */ + [ 4] = RCAR_GP_PIN(6, 4), /* AVB1_LINK */ + [ 5] = RCAR_GP_PIN(6, 5), /* AVB1_AVTP_MATCH */ + [ 6] = RCAR_GP_PIN(6, 6), /* AVB1_TXC */ + [ 7] = RCAR_GP_PIN(6, 7), /* AVB1_TX_CTL */ + [ 8] = RCAR_GP_PIN(6, 8), /* AVB1_RXC */ + [ 9] = RCAR_GP_PIN(6, 9), /* AVB1_RX_CTL */ + [10] = RCAR_GP_PIN(6, 10), /* AVB1_AVTP_PPS */ + [11] = RCAR_GP_PIN(6, 11), /* AVB1_AVTP_CAPTURE */ + [12] = RCAR_GP_PIN(6, 12), /* AVB1_TD1 */ + [13] = RCAR_GP_PIN(6, 13), /* AVB1_TD0 */ + [14] = RCAR_GP_PIN(6, 14), /* AVB1_RD1*/ + [15] = RCAR_GP_PIN(6, 15), /* AVB1_RD0 */ + [16] = RCAR_GP_PIN(6, 16), /* AVB1_TD2 */ + [17] = RCAR_GP_PIN(6, 17), /* AVB1_RD2 */ + [18] = RCAR_GP_PIN(6, 18), /* AVB1_TD3 */ + [19] = RCAR_GP_PIN(6, 19), /* AVB1_RD3 */ + [20] = RCAR_GP_PIN(6, 20), /* AVB1_TXCREFCLK */ + [21] = SH_PFC_PIN_NONE, + [22] = SH_PFC_PIN_NONE, + [23] = SH_PFC_PIN_NONE, + [24] = SH_PFC_PIN_NONE, + [25] = SH_PFC_PIN_NONE, + [26] = SH_PFC_PIN_NONE, + [27] = SH_PFC_PIN_NONE, + [28] = SH_PFC_PIN_NONE, + [29] = SH_PFC_PIN_NONE, + [30] = SH_PFC_PIN_NONE, + [31] = SH_PFC_PIN_NONE, + } }, + { PINMUX_BIAS_REG("PUEN7", 0xE60618C0, "PUD7", 0xE60618E0) { + [ 0] = RCAR_GP_PIN(7, 0), /* AVB0_AVTP_PPS */ + [ 1] = RCAR_GP_PIN(7, 1), /* AVB0_AVTP_CAPTURE */ + [ 2] = RCAR_GP_PIN(7, 2), /* AVB0_AVTP_MATCH */ + [ 3] = RCAR_GP_PIN(7, 3), /* AVB0_TD3 */ + [ 4] = RCAR_GP_PIN(7, 4), /* AVB0_LINK */ + [ 5] = RCAR_GP_PIN(7, 5), /* AVB0_PHY_INT */ + [ 6] = RCAR_GP_PIN(7, 6), /* AVB0_TD2 */ + [ 7] = RCAR_GP_PIN(7, 7), /* AVB0_TD1 */ + [ 8] = RCAR_GP_PIN(7, 8), /* AVB0_RD3 */ + [ 9] = RCAR_GP_PIN(7, 9), /* AVB0_TXCREFCLK */ + [10] = RCAR_GP_PIN(7, 10), /* AVB0_MAGIC */ + [11] = RCAR_GP_PIN(7, 11), /* AVB0_TD0 */ + [12] = RCAR_GP_PIN(7, 12), /* AVB0_RD2 */ + [13] = RCAR_GP_PIN(7, 13), /* AVB0_MDC */ + [14] = RCAR_GP_PIN(7, 14), /* AVB0_MDIO */ + [15] = RCAR_GP_PIN(7, 15), /* AVB0_TXC */ + [16] = RCAR_GP_PIN(7, 16), /* AVB0_TX_CTL */ + [17] = RCAR_GP_PIN(7, 17), /* AVB0_RD1 */ + [18] = RCAR_GP_PIN(7, 18), /* AVB0_RD0 */ + [19] = RCAR_GP_PIN(7, 19), /* AVB0_RXC */ + [20] = RCAR_GP_PIN(7, 20), /* AVB0_RX_CTL */ + [21] = SH_PFC_PIN_NONE, + [22] = SH_PFC_PIN_NONE, + [23] = SH_PFC_PIN_NONE, + [24] = SH_PFC_PIN_NONE, + [25] = SH_PFC_PIN_NONE, + [26] = SH_PFC_PIN_NONE, + [27] = SH_PFC_PIN_NONE, + [28] = SH_PFC_PIN_NONE, + [29] = SH_PFC_PIN_NONE, + [30] = SH_PFC_PIN_NONE, + [31] = SH_PFC_PIN_NONE, + } }, + { PINMUX_BIAS_REG("PUEN8", 0xE60680C0, "PUD8", 0xE60680E0) { + [ 0] = RCAR_GP_PIN(8, 0), /* SCL0 */ + [ 1] = RCAR_GP_PIN(8, 1), /* SDA0 */ + [ 2] = RCAR_GP_PIN(8, 2), /* SCL1 */ + [ 3] = RCAR_GP_PIN(8, 3), /* SDA1 */ + [ 4] = RCAR_GP_PIN(8, 4), /* SCL2 */ + [ 5] = RCAR_GP_PIN(8, 5), /* SDA2 */ + [ 6] = RCAR_GP_PIN(8, 6), /* SCL3 */ + [ 7] = RCAR_GP_PIN(8, 7), /* SDA3 */ + [ 8] = RCAR_GP_PIN(8, 8), /* SCL4 */ + [ 9] = RCAR_GP_PIN(8, 9), /* SDA4 */ + [10] = RCAR_GP_PIN(8, 10), /* SCL5 */ + [11] = RCAR_GP_PIN(8, 11), /* SDA5 */ + [12] = RCAR_GP_PIN(8, 12), /* GP8_12 */ + [13] = RCAR_GP_PIN(8, 13), /* GP8_13 */ + [14] = SH_PFC_PIN_NONE, + [15] = SH_PFC_PIN_NONE, + [16] = SH_PFC_PIN_NONE, + [17] = SH_PFC_PIN_NONE, + [18] = SH_PFC_PIN_NONE, + [19] = SH_PFC_PIN_NONE, + [20] = SH_PFC_PIN_NONE, + [21] = SH_PFC_PIN_NONE, + [22] = SH_PFC_PIN_NONE, + [23] = SH_PFC_PIN_NONE, + [24] = SH_PFC_PIN_NONE, + [25] = SH_PFC_PIN_NONE, + [26] = SH_PFC_PIN_NONE, + [27] = SH_PFC_PIN_NONE, + [28] = SH_PFC_PIN_NONE, + [29] = SH_PFC_PIN_NONE, + [30] = SH_PFC_PIN_NONE, + [31] = SH_PFC_PIN_NONE, + } }, + { /* sentinel */ }, +}; + +static const struct sh_pfc_soc_operations r8a779g0_pin_ops = { + .pin_to_pocctrl = r8a779g0_pin_to_pocctrl, + .get_bias = rcar_pinmux_get_bias, + .set_bias = rcar_pinmux_set_bias, +}; + +const struct sh_pfc_soc_info r8a779g0_pinmux_info = { + .name = "r8a779g0_pfc", + .ops = &r8a779g0_pin_ops, + .unlock_reg = 0x1ff, /* PMMRn mask */ + + .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, + + .pins = pinmux_pins, + .nr_pins = ARRAY_SIZE(pinmux_pins), + .groups = pinmux_groups, + .nr_groups = ARRAY_SIZE(pinmux_groups), + .functions = pinmux_functions, + .nr_functions = ARRAY_SIZE(pinmux_functions), + + .cfg_regs = pinmux_config_regs, + .drive_regs = pinmux_drive_regs, + .bias_regs = pinmux_bias_regs, + .ioctrl_regs = pinmux_ioctrl_regs, + + .pinmux_data = pinmux_data, + .pinmux_data_size = ARRAY_SIZE(pinmux_data), +}; diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c new file mode 100644 index 0000000000..e8c18198be --- /dev/null +++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c @@ -0,0 +1,1119 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RZ/V2M Pin Control and GPIO driver core + * + * Based on: + * Renesas RZ/G2L Pin Control and GPIO driver core + * + * Copyright (C) 2022 Renesas Electronics Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "../core.h" +#include "../pinconf.h" +#include "../pinmux.h" + +#define DRV_NAME "pinctrl-rzv2m" + +/* + * Use 16 lower bits [15:0] for pin identifier + * Use 16 higher bits [31:16] for pin mux function + */ +#define MUX_PIN_ID_MASK GENMASK(15, 0) +#define MUX_FUNC_MASK GENMASK(31, 16) +#define MUX_FUNC(pinconf) FIELD_GET(MUX_FUNC_MASK, (pinconf)) + +/* PIN capabilities */ +#define PIN_CFG_GRP_1_8V_2 1 +#define PIN_CFG_GRP_1_8V_3 2 +#define PIN_CFG_GRP_SWIO_1 3 +#define PIN_CFG_GRP_SWIO_2 4 +#define PIN_CFG_GRP_3_3V 5 +#define PIN_CFG_GRP_MASK GENMASK(2, 0) +#define PIN_CFG_BIAS BIT(3) +#define PIN_CFG_DRV BIT(4) +#define PIN_CFG_SLEW BIT(5) + +#define RZV2M_MPXED_PIN_FUNCS (PIN_CFG_BIAS | \ + PIN_CFG_DRV | \ + PIN_CFG_SLEW) + +/* + * n indicates number of pins in the port, a is the register index + * and f is pin configuration capabilities supported. + */ +#define RZV2M_GPIO_PORT_PACK(n, a, f) (((n) << 24) | ((a) << 16) | (f)) +#define RZV2M_GPIO_PORT_GET_PINCNT(x) FIELD_GET(GENMASK(31, 24), (x)) +#define RZV2M_GPIO_PORT_GET_INDEX(x) FIELD_GET(GENMASK(23, 16), (x)) +#define RZV2M_GPIO_PORT_GET_CFGS(x) FIELD_GET(GENMASK(15, 0), (x)) + +#define RZV2M_DEDICATED_PORT_IDX 22 + +/* + * BIT(31) indicates dedicated pin, b is the register bits (b * 16) + * and f is the pin configuration capabilities supported. + */ +#define RZV2M_SINGLE_PIN BIT(31) +#define RZV2M_SINGLE_PIN_PACK(b, f) (RZV2M_SINGLE_PIN | \ + ((RZV2M_DEDICATED_PORT_IDX) << 24) | \ + ((b) << 16) | (f)) +#define RZV2M_SINGLE_PIN_GET_PORT(x) FIELD_GET(GENMASK(30, 24), (x)) +#define RZV2M_SINGLE_PIN_GET_BIT(x) FIELD_GET(GENMASK(23, 16), (x)) +#define RZV2M_SINGLE_PIN_GET_CFGS(x) FIELD_GET(GENMASK(15, 0), (x)) + +#define RZV2M_PIN_ID_TO_PORT(id) ((id) / RZV2M_PINS_PER_PORT) +#define RZV2M_PIN_ID_TO_PIN(id) ((id) % RZV2M_PINS_PER_PORT) + +#define DO(n) (0x00 + (n) * 0x40) +#define OE(n) (0x04 + (n) * 0x40) +#define IE(n) (0x08 + (n) * 0x40) +#define PFSEL(n) (0x10 + (n) * 0x40) +#define DI(n) (0x20 + (n) * 0x40) +#define PUPD(n) (0x24 + (n) * 0x40) +#define DRV(n) ((n) < RZV2M_DEDICATED_PORT_IDX ? (0x28 + (n) * 0x40) \ + : 0x590) +#define SR(n) ((n) < RZV2M_DEDICATED_PORT_IDX ? (0x2c + (n) * 0x40) \ + : 0x594) +#define DI_MSK(n) (0x30 + (n) * 0x40) +#define EN_MSK(n) (0x34 + (n) * 0x40) + +#define PFC_MASK 0x07 +#define PUPD_MASK 0x03 +#define DRV_MASK 0x03 + +struct rzv2m_dedicated_configs { + const char *name; + u32 config; +}; + +struct rzv2m_pinctrl_data { + const char * const *port_pins; + const u32 *port_pin_configs; + const struct rzv2m_dedicated_configs *dedicated_pins; + unsigned int n_port_pins; + unsigned int n_dedicated_pins; +}; + +struct rzv2m_pinctrl { + struct pinctrl_dev *pctl; + struct pinctrl_desc desc; + struct pinctrl_pin_desc *pins; + + const struct rzv2m_pinctrl_data *data; + void __iomem *base; + struct device *dev; + struct clk *clk; + + struct gpio_chip gpio_chip; + struct pinctrl_gpio_range gpio_range; + + spinlock_t lock; +}; + +static const unsigned int drv_1_8V_group2_uA[] = { 1800, 3800, 7800, 11000 }; +static const unsigned int drv_1_8V_group3_uA[] = { 1600, 3200, 6400, 9600 }; +static const unsigned int drv_SWIO_group2_3_3V_uA[] = { 9000, 11000, 13000, 18000 }; +static const unsigned int drv_3_3V_group_uA[] = { 2000, 4000, 8000, 12000 }; + +/* Helper for registers that have a write enable bit in the upper word */ +static void rzv2m_writel_we(void __iomem *addr, u8 shift, u8 value) +{ + writel((BIT(16) | value) << shift, addr); +} + +static void rzv2m_pinctrl_set_pfc_mode(struct rzv2m_pinctrl *pctrl, + u8 port, u8 pin, u8 func) +{ + void __iomem *addr; + + /* Mask input/output */ + rzv2m_writel_we(pctrl->base + DI_MSK(port), pin, 1); + rzv2m_writel_we(pctrl->base + EN_MSK(port), pin, 1); + + /* Select the function and set the write enable bits */ + addr = pctrl->base + PFSEL(port) + (pin / 4) * 4; + writel(((PFC_MASK << 16) | func) << ((pin % 4) * 4), addr); + + /* Unmask input/output */ + rzv2m_writel_we(pctrl->base + EN_MSK(port), pin, 0); + rzv2m_writel_we(pctrl->base + DI_MSK(port), pin, 0); +}; + +static int rzv2m_pinctrl_set_mux(struct pinctrl_dev *pctldev, + unsigned int func_selector, + unsigned int group_selector) +{ + struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + struct function_desc *func; + unsigned int i, *psel_val; + struct group_desc *group; + int *pins; + + func = pinmux_generic_get_function(pctldev, func_selector); + if (!func) + return -EINVAL; + group = pinctrl_generic_get_group(pctldev, group_selector); + if (!group) + return -EINVAL; + + psel_val = func->data; + pins = group->pins; + + for (i = 0; i < group->num_pins; i++) { + dev_dbg(pctrl->dev, "port:%u pin: %u PSEL:%u\n", + RZV2M_PIN_ID_TO_PORT(pins[i]), RZV2M_PIN_ID_TO_PIN(pins[i]), + psel_val[i]); + rzv2m_pinctrl_set_pfc_mode(pctrl, RZV2M_PIN_ID_TO_PORT(pins[i]), + RZV2M_PIN_ID_TO_PIN(pins[i]), psel_val[i]); + } + + return 0; +}; + +static int rzv2m_map_add_config(struct pinctrl_map *map, + const char *group_or_pin, + enum pinctrl_map_type type, + unsigned long *configs, + unsigned int num_configs) +{ + unsigned long *cfgs; + + cfgs = kmemdup(configs, num_configs * sizeof(*cfgs), + GFP_KERNEL); + if (!cfgs) + return -ENOMEM; + + map->type = type; + map->data.configs.group_or_pin = group_or_pin; + map->data.configs.configs = cfgs; + map->data.configs.num_configs = num_configs; + + return 0; +} + +static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev, + struct device_node *np, + struct pinctrl_map **map, + unsigned int *num_maps, + unsigned int *index) +{ + struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + struct pinctrl_map *maps = *map; + unsigned int nmaps = *num_maps; + unsigned long *configs = NULL; + unsigned int *pins, *psel_val; + unsigned int num_pinmux = 0; + unsigned int idx = *index; + unsigned int num_pins, i; + unsigned int num_configs; + struct property *pinmux; + struct property *prop; + int ret, gsel, fsel; + const char **pin_fn; + const char *pin; + + pinmux = of_find_property(np, "pinmux", NULL); + if (pinmux) + num_pinmux = pinmux->length / sizeof(u32); + + ret = of_property_count_strings(np, "pins"); + if (ret == -EINVAL) { + num_pins = 0; + } else if (ret < 0) { + dev_err(pctrl->dev, "Invalid pins list in DT\n"); + return ret; + } else { + num_pins = ret; + } + + if (!num_pinmux && !num_pins) + return 0; + + if (num_pinmux && num_pins) { + dev_err(pctrl->dev, + "DT node must contain either a pinmux or pins and not both\n"); + return -EINVAL; + } + + ret = pinconf_generic_parse_dt_config(np, NULL, &configs, &num_configs); + if (ret < 0) + return ret; + + if (num_pins && !num_configs) { + dev_err(pctrl->dev, "DT node must contain a config\n"); + ret = -ENODEV; + goto done; + } + + if (num_pinmux) + nmaps += 1; + + if (num_pins) + nmaps += num_pins; + + maps = krealloc_array(maps, nmaps, sizeof(*maps), GFP_KERNEL); + if (!maps) { + ret = -ENOMEM; + goto done; + } + + *map = maps; + *num_maps = nmaps; + if (num_pins) { + of_property_for_each_string(np, "pins", prop, pin) { + ret = rzv2m_map_add_config(&maps[idx], pin, + PIN_MAP_TYPE_CONFIGS_PIN, + configs, num_configs); + if (ret < 0) + goto done; + + idx++; + } + ret = 0; + goto done; + } + + pins = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*pins), GFP_KERNEL); + psel_val = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*psel_val), + GFP_KERNEL); + pin_fn = devm_kzalloc(pctrl->dev, sizeof(*pin_fn), GFP_KERNEL); + if (!pins || !psel_val || !pin_fn) { + ret = -ENOMEM; + goto done; + } + + /* Collect pin locations and mux settings from DT properties */ + for (i = 0; i < num_pinmux; ++i) { + u32 value; + + ret = of_property_read_u32_index(np, "pinmux", i, &value); + if (ret) + goto done; + pins[i] = value & MUX_PIN_ID_MASK; + psel_val[i] = MUX_FUNC(value); + } + + /* Register a single pin group listing all the pins we read from DT */ + gsel = pinctrl_generic_add_group(pctldev, np->name, pins, num_pinmux, NULL); + if (gsel < 0) { + ret = gsel; + goto done; + } + + /* + * Register a single group function where the 'data' is an array PSEL + * register values read from DT. + */ + pin_fn[0] = np->name; + fsel = pinmux_generic_add_function(pctldev, np->name, pin_fn, 1, + psel_val); + if (fsel < 0) { + ret = fsel; + goto remove_group; + } + + maps[idx].type = PIN_MAP_TYPE_MUX_GROUP; + maps[idx].data.mux.group = np->name; + maps[idx].data.mux.function = np->name; + idx++; + + dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux); + ret = 0; + goto done; + +remove_group: + pinctrl_generic_remove_group(pctldev, gsel); +done: + *index = idx; + kfree(configs); + return ret; +} + +static void rzv2m_dt_free_map(struct pinctrl_dev *pctldev, + struct pinctrl_map *map, + unsigned int num_maps) +{ + unsigned int i; + + if (!map) + return; + + for (i = 0; i < num_maps; ++i) { + if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP || + map[i].type == PIN_MAP_TYPE_CONFIGS_PIN) + kfree(map[i].data.configs.configs); + } + kfree(map); +} + +static int rzv2m_dt_node_to_map(struct pinctrl_dev *pctldev, + struct device_node *np, + struct pinctrl_map **map, + unsigned int *num_maps) +{ + struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + struct device_node *child; + unsigned int index; + int ret; + + *map = NULL; + *num_maps = 0; + index = 0; + + for_each_child_of_node(np, child) { + ret = rzv2m_dt_subnode_to_map(pctldev, child, map, + num_maps, &index); + if (ret < 0) { + of_node_put(child); + goto done; + } + } + + if (*num_maps == 0) { + ret = rzv2m_dt_subnode_to_map(pctldev, np, map, + num_maps, &index); + if (ret < 0) + goto done; + } + + if (*num_maps) + return 0; + + dev_err(pctrl->dev, "no mapping found in node %pOF\n", np); + ret = -EINVAL; + +done: + if (ret < 0) + rzv2m_dt_free_map(pctldev, *map, *num_maps); + + return ret; +} + +static int rzv2m_validate_gpio_pin(struct rzv2m_pinctrl *pctrl, + u32 cfg, u32 port, u8 bit) +{ + u8 pincount = RZV2M_GPIO_PORT_GET_PINCNT(cfg); + u32 port_index = RZV2M_GPIO_PORT_GET_INDEX(cfg); + u32 data; + + if (bit >= pincount || port >= pctrl->data->n_port_pins) + return -EINVAL; + + data = pctrl->data->port_pin_configs[port]; + if (port_index != RZV2M_GPIO_PORT_GET_INDEX(data)) + return -EINVAL; + + return 0; +} + +static void rzv2m_rmw_pin_config(struct rzv2m_pinctrl *pctrl, u32 offset, + u8 shift, u32 mask, u32 val) +{ + void __iomem *addr = pctrl->base + offset; + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&pctrl->lock, flags); + reg = readl(addr) & ~(mask << shift); + writel(reg | (val << shift), addr); + spin_unlock_irqrestore(&pctrl->lock, flags); +} + +static int rzv2m_pinctrl_pinconf_get(struct pinctrl_dev *pctldev, + unsigned int _pin, + unsigned long *config) +{ + struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + enum pin_config_param param = pinconf_to_config_param(*config); + const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin]; + unsigned int *pin_data = pin->drv_data; + unsigned int arg = 0; + u32 port; + u32 cfg; + u8 bit; + u32 val; + + if (!pin_data) + return -EINVAL; + + if (*pin_data & RZV2M_SINGLE_PIN) { + port = RZV2M_SINGLE_PIN_GET_PORT(*pin_data); + cfg = RZV2M_SINGLE_PIN_GET_CFGS(*pin_data); + bit = RZV2M_SINGLE_PIN_GET_BIT(*pin_data); + } else { + cfg = RZV2M_GPIO_PORT_GET_CFGS(*pin_data); + port = RZV2M_PIN_ID_TO_PORT(_pin); + bit = RZV2M_PIN_ID_TO_PIN(_pin); + + if (rzv2m_validate_gpio_pin(pctrl, *pin_data, RZV2M_PIN_ID_TO_PORT(_pin), bit)) + return -EINVAL; + } + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + case PIN_CONFIG_BIAS_PULL_UP: + case PIN_CONFIG_BIAS_PULL_DOWN: { + enum pin_config_param bias; + + if (!(cfg & PIN_CFG_BIAS)) + return -EINVAL; + + /* PUPD uses 2-bits per pin */ + bit *= 2; + + switch ((readl(pctrl->base + PUPD(port)) >> bit) & PUPD_MASK) { + case 0: + bias = PIN_CONFIG_BIAS_PULL_DOWN; + break; + case 2: + bias = PIN_CONFIG_BIAS_PULL_UP; + break; + default: + bias = PIN_CONFIG_BIAS_DISABLE; + } + + if (bias != param) + return -EINVAL; + break; + } + + case PIN_CONFIG_DRIVE_STRENGTH_UA: + if (!(cfg & PIN_CFG_DRV)) + return -EINVAL; + + /* DRV uses 2-bits per pin */ + bit *= 2; + + val = (readl(pctrl->base + DRV(port)) >> bit) & DRV_MASK; + + switch (cfg & PIN_CFG_GRP_MASK) { + case PIN_CFG_GRP_1_8V_2: + arg = drv_1_8V_group2_uA[val]; + break; + case PIN_CFG_GRP_1_8V_3: + arg = drv_1_8V_group3_uA[val]; + break; + case PIN_CFG_GRP_SWIO_2: + arg = drv_SWIO_group2_3_3V_uA[val]; + break; + case PIN_CFG_GRP_SWIO_1: + case PIN_CFG_GRP_3_3V: + arg = drv_3_3V_group_uA[val]; + break; + default: + return -EINVAL; + } + + break; + + case PIN_CONFIG_SLEW_RATE: + if (!(cfg & PIN_CFG_SLEW)) + return -EINVAL; + + arg = readl(pctrl->base + SR(port)) & BIT(bit); + break; + + default: + return -ENOTSUPP; + } + + *config = pinconf_to_config_packed(param, arg); + + return 0; +}; + +static int rzv2m_pinctrl_pinconf_set(struct pinctrl_dev *pctldev, + unsigned int _pin, + unsigned long *_configs, + unsigned int num_configs) +{ + struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin]; + unsigned int *pin_data = pin->drv_data; + enum pin_config_param param; + u32 port; + unsigned int i; + u32 cfg; + u8 bit; + u32 val; + + if (!pin_data) + return -EINVAL; + + if (*pin_data & RZV2M_SINGLE_PIN) { + port = RZV2M_SINGLE_PIN_GET_PORT(*pin_data); + cfg = RZV2M_SINGLE_PIN_GET_CFGS(*pin_data); + bit = RZV2M_SINGLE_PIN_GET_BIT(*pin_data); + } else { + cfg = RZV2M_GPIO_PORT_GET_CFGS(*pin_data); + port = RZV2M_PIN_ID_TO_PORT(_pin); + bit = RZV2M_PIN_ID_TO_PIN(_pin); + + if (rzv2m_validate_gpio_pin(pctrl, *pin_data, RZV2M_PIN_ID_TO_PORT(_pin), bit)) + return -EINVAL; + } + + for (i = 0; i < num_configs; i++) { + param = pinconf_to_config_param(_configs[i]); + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + case PIN_CONFIG_BIAS_PULL_UP: + case PIN_CONFIG_BIAS_PULL_DOWN: + if (!(cfg & PIN_CFG_BIAS)) + return -EINVAL; + + /* PUPD uses 2-bits per pin */ + bit *= 2; + + switch (param) { + case PIN_CONFIG_BIAS_PULL_DOWN: + val = 0; + break; + case PIN_CONFIG_BIAS_PULL_UP: + val = 2; + break; + default: + val = 1; + } + + rzv2m_rmw_pin_config(pctrl, PUPD(port), bit, PUPD_MASK, val); + break; + + case PIN_CONFIG_DRIVE_STRENGTH_UA: { + unsigned int arg = pinconf_to_config_argument(_configs[i]); + const unsigned int *drv_strengths; + unsigned int index; + + if (!(cfg & PIN_CFG_DRV)) + return -EINVAL; + + switch (cfg & PIN_CFG_GRP_MASK) { + case PIN_CFG_GRP_1_8V_2: + drv_strengths = drv_1_8V_group2_uA; + break; + case PIN_CFG_GRP_1_8V_3: + drv_strengths = drv_1_8V_group3_uA; + break; + case PIN_CFG_GRP_SWIO_2: + drv_strengths = drv_SWIO_group2_3_3V_uA; + break; + case PIN_CFG_GRP_SWIO_1: + case PIN_CFG_GRP_3_3V: + drv_strengths = drv_3_3V_group_uA; + break; + default: + return -EINVAL; + } + + for (index = 0; index < 4; index++) { + if (arg == drv_strengths[index]) + break; + } + if (index >= 4) + return -EINVAL; + + /* DRV uses 2-bits per pin */ + bit *= 2; + + rzv2m_rmw_pin_config(pctrl, DRV(port), bit, DRV_MASK, index); + break; + } + + case PIN_CONFIG_SLEW_RATE: { + unsigned int arg = pinconf_to_config_argument(_configs[i]); + + if (!(cfg & PIN_CFG_SLEW)) + return -EINVAL; + + rzv2m_writel_we(pctrl->base + SR(port), bit, !arg); + break; + } + + default: + return -EOPNOTSUPP; + } + } + + return 0; +} + +static int rzv2m_pinctrl_pinconf_group_set(struct pinctrl_dev *pctldev, + unsigned int group, + unsigned long *configs, + unsigned int num_configs) +{ + const unsigned int *pins; + unsigned int i, npins; + int ret; + + ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins); + if (ret) + return ret; + + for (i = 0; i < npins; i++) { + ret = rzv2m_pinctrl_pinconf_set(pctldev, pins[i], configs, + num_configs); + if (ret) + return ret; + } + + return 0; +}; + +static int rzv2m_pinctrl_pinconf_group_get(struct pinctrl_dev *pctldev, + unsigned int group, + unsigned long *config) +{ + const unsigned int *pins; + unsigned int i, npins, prev_config = 0; + int ret; + + ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins); + if (ret) + return ret; + + for (i = 0; i < npins; i++) { + ret = rzv2m_pinctrl_pinconf_get(pctldev, pins[i], config); + if (ret) + return ret; + + /* Check config matches previous pins */ + if (i && prev_config != *config) + return -EOPNOTSUPP; + + prev_config = *config; + } + + return 0; +}; + +static const struct pinctrl_ops rzv2m_pinctrl_pctlops = { + .get_groups_count = pinctrl_generic_get_group_count, + .get_group_name = pinctrl_generic_get_group_name, + .get_group_pins = pinctrl_generic_get_group_pins, + .dt_node_to_map = rzv2m_dt_node_to_map, + .dt_free_map = rzv2m_dt_free_map, +}; + +static const struct pinmux_ops rzv2m_pinctrl_pmxops = { + .get_functions_count = pinmux_generic_get_function_count, + .get_function_name = pinmux_generic_get_function_name, + .get_function_groups = pinmux_generic_get_function_groups, + .set_mux = rzv2m_pinctrl_set_mux, + .strict = true, +}; + +static const struct pinconf_ops rzv2m_pinctrl_confops = { + .is_generic = true, + .pin_config_get = rzv2m_pinctrl_pinconf_get, + .pin_config_set = rzv2m_pinctrl_pinconf_set, + .pin_config_group_set = rzv2m_pinctrl_pinconf_group_set, + .pin_config_group_get = rzv2m_pinctrl_pinconf_group_get, + .pin_config_config_dbg_show = pinconf_generic_dump_config, +}; + +static int rzv2m_gpio_request(struct gpio_chip *chip, unsigned int offset) +{ + struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip); + u32 port = RZV2M_PIN_ID_TO_PORT(offset); + u8 bit = RZV2M_PIN_ID_TO_PIN(offset); + int ret; + + ret = pinctrl_gpio_request(chip->base + offset); + if (ret) + return ret; + + rzv2m_pinctrl_set_pfc_mode(pctrl, port, bit, 0); + + return 0; +} + +static void rzv2m_gpio_set_direction(struct rzv2m_pinctrl *pctrl, u32 port, + u8 bit, bool output) +{ + rzv2m_writel_we(pctrl->base + OE(port), bit, output); + rzv2m_writel_we(pctrl->base + IE(port), bit, !output); +} + +static int rzv2m_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) +{ + struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip); + u32 port = RZV2M_PIN_ID_TO_PORT(offset); + u8 bit = RZV2M_PIN_ID_TO_PIN(offset); + + if (!(readl(pctrl->base + IE(port)) & BIT(bit))) + return GPIO_LINE_DIRECTION_OUT; + + return GPIO_LINE_DIRECTION_IN; +} + +static int rzv2m_gpio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip); + u32 port = RZV2M_PIN_ID_TO_PORT(offset); + u8 bit = RZV2M_PIN_ID_TO_PIN(offset); + + rzv2m_gpio_set_direction(pctrl, port, bit, false); + + return 0; +} + +static void rzv2m_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ + struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip); + u32 port = RZV2M_PIN_ID_TO_PORT(offset); + u8 bit = RZV2M_PIN_ID_TO_PIN(offset); + + rzv2m_writel_we(pctrl->base + DO(port), bit, !!value); +} + +static int rzv2m_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int value) +{ + struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip); + u32 port = RZV2M_PIN_ID_TO_PORT(offset); + u8 bit = RZV2M_PIN_ID_TO_PIN(offset); + + rzv2m_gpio_set(chip, offset, value); + rzv2m_gpio_set_direction(pctrl, port, bit, true); + + return 0; +} + +static int rzv2m_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip); + u32 port = RZV2M_PIN_ID_TO_PORT(offset); + u8 bit = RZV2M_PIN_ID_TO_PIN(offset); + int direction = rzv2m_gpio_get_direction(chip, offset); + + if (direction == GPIO_LINE_DIRECTION_IN) + return !!(readl(pctrl->base + DI(port)) & BIT(bit)); + else + return !!(readl(pctrl->base + DO(port)) & BIT(bit)); +} + +static void rzv2m_gpio_free(struct gpio_chip *chip, unsigned int offset) +{ + pinctrl_gpio_free(chip->base + offset); + + /* + * Set the GPIO as an input to ensure that the next GPIO request won't + * drive the GPIO pin as an output. + */ + rzv2m_gpio_direction_input(chip, offset); +} + +static const char * const rzv2m_gpio_names[] = { + "P0_0", "P0_1", "P0_2", "P0_3", "P0_4", "P0_5", "P0_6", "P0_7", + "P0_8", "P0_9", "P0_10", "P0_11", "P0_12", "P0_13", "P0_14", "P0_15", + "P1_0", "P1_1", "P1_2", "P1_3", "P1_4", "P1_5", "P1_6", "P1_7", + "P1_8", "P1_9", "P1_10", "P1_11", "P1_12", "P1_13", "P1_14", "P1_15", + "P2_0", "P2_1", "P2_2", "P2_3", "P2_4", "P2_5", "P2_6", "P2_7", + "P2_8", "P2_9", "P2_10", "P2_11", "P2_12", "P2_13", "P2_14", "P2_15", + "P3_0", "P3_1", "P3_2", "P3_3", "P3_4", "P3_5", "P3_6", "P3_7", + "P3_8", "P3_9", "P3_10", "P3_11", "P3_12", "P3_13", "P3_14", "P3_15", + "P4_0", "P4_1", "P4_2", "P4_3", "P4_4", "P4_5", "P4_6", "P4_7", + "P4_8", "P4_9", "P4_10", "P4_11", "P4_12", "P4_13", "P4_14", "P4_15", + "P5_0", "P5_1", "P5_2", "P5_3", "P5_4", "P5_5", "P5_6", "P5_7", + "P5_8", "P5_9", "P5_10", "P5_11", "P5_12", "P5_13", "P5_14", "P5_15", + "P6_0", "P6_1", "P6_2", "P6_3", "P6_4", "P6_5", "P6_6", "P6_7", + "P6_8", "P6_9", "P6_10", "P6_11", "P6_12", "P6_13", "P6_14", "P6_15", + "P7_0", "P7_1", "P7_2", "P7_3", "P7_4", "P7_5", "P7_6", "P7_7", + "P7_8", "P7_9", "P7_10", "P7_11", "P7_12", "P7_13", "P7_14", "P7_15", + "P8_0", "P8_1", "P8_2", "P8_3", "P8_4", "P8_5", "P8_6", "P8_7", + "P8_8", "P8_9", "P8_10", "P8_11", "P8_12", "P8_13", "P8_14", "P8_15", + "P9_0", "P9_1", "P9_2", "P9_3", "P9_4", "P9_5", "P9_6", "P9_7", + "P9_8", "P9_9", "P9_10", "P9_11", "P9_12", "P9_13", "P9_14", "P9_15", + "P10_0", "P10_1", "P10_2", "P10_3", "P10_4", "P10_5", "P10_6", "P10_7", + "P10_8", "P10_9", "P10_10", "P10_11", "P10_12", "P10_13", "P10_14", "P10_15", + "P11_0", "P11_1", "P11_2", "P11_3", "P11_4", "P11_5", "P11_6", "P11_7", + "P11_8", "P11_9", "P11_10", "P11_11", "P11_12", "P11_13", "P11_14", "P11_15", + "P12_0", "P12_1", "P12_2", "P12_3", "P12_4", "P12_5", "P12_6", "P12_7", + "P12_8", "P12_9", "P12_10", "P12_11", "P12_12", "P12_13", "P12_14", "P12_15", + "P13_0", "P13_1", "P13_2", "P13_3", "P13_4", "P13_5", "P13_6", "P13_7", + "P13_8", "P13_9", "P13_10", "P13_11", "P13_12", "P13_13", "P13_14", "P13_15", + "P14_0", "P14_1", "P14_2", "P14_3", "P14_4", "P14_5", "P14_6", "P14_7", + "P14_8", "P14_9", "P14_10", "P14_11", "P14_12", "P14_13", "P14_14", "P14_15", + "P15_0", "P15_1", "P15_2", "P15_3", "P15_4", "P15_5", "P15_6", "P15_7", + "P15_8", "P15_9", "P15_10", "P15_11", "P15_12", "P15_13", "P15_14", "P15_15", + "P16_0", "P16_1", "P16_2", "P16_3", "P16_4", "P16_5", "P16_6", "P16_7", + "P16_8", "P16_9", "P16_10", "P16_11", "P16_12", "P16_13", "P16_14", "P16_15", + "P17_0", "P17_1", "P17_2", "P17_3", "P17_4", "P17_5", "P17_6", "P17_7", + "P17_8", "P17_9", "P17_10", "P17_11", "P17_12", "P17_13", "P17_14", "P17_15", + "P18_0", "P18_1", "P18_2", "P18_3", "P18_4", "P18_5", "P18_6", "P18_7", + "P18_8", "P18_9", "P18_10", "P18_11", "P18_12", "P18_13", "P18_14", "P18_15", + "P19_0", "P19_1", "P19_2", "P19_3", "P19_4", "P19_5", "P19_6", "P19_7", + "P19_8", "P19_9", "P19_10", "P19_11", "P19_12", "P19_13", "P19_14", "P19_15", + "P20_0", "P20_1", "P20_2", "P20_3", "P20_4", "P20_5", "P20_6", "P20_7", + "P20_8", "P20_9", "P20_10", "P20_11", "P20_12", "P20_13", "P20_14", "P20_15", + "P21_0", "P21_1", "P21_2", "P21_3", "P21_4", "P21_5", "P21_6", "P21_7", + "P21_8", "P21_9", "P21_10", "P21_11", "P21_12", "P21_13", "P21_14", "P21_15", +}; + +static const u32 rzv2m_gpio_configs[] = { + RZV2M_GPIO_PORT_PACK(14, 0, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(16, 1, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(8, 2, PIN_CFG_GRP_1_8V_3 | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(16, 3, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(8, 4, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(4, 5, PIN_CFG_GRP_1_8V_3 | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(12, 6, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(6, 7, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(8, 8, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(8, 9, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(9, 10, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(9, 11, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(4, 12, PIN_CFG_GRP_3_3V | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(12, 13, PIN_CFG_GRP_3_3V | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(8, 14, PIN_CFG_GRP_3_3V | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(16, 15, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(14, 16, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(1, 17, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS), + RZV2M_GPIO_PORT_PACK(0, 18, 0), + RZV2M_GPIO_PORT_PACK(0, 19, 0), + RZV2M_GPIO_PORT_PACK(3, 20, PIN_CFG_GRP_1_8V_2 | PIN_CFG_DRV), + RZV2M_GPIO_PORT_PACK(1, 21, PIN_CFG_GRP_SWIO_1 | PIN_CFG_DRV | PIN_CFG_SLEW), +}; + +static const struct rzv2m_dedicated_configs rzv2m_dedicated_pins[] = { + { "NAWPN", RZV2M_SINGLE_PIN_PACK(0, + (PIN_CFG_GRP_SWIO_2 | PIN_CFG_DRV | PIN_CFG_SLEW)) }, + { "IM0CLK", RZV2M_SINGLE_PIN_PACK(1, + (PIN_CFG_GRP_SWIO_1 | PIN_CFG_DRV | PIN_CFG_SLEW)) }, + { "IM1CLK", RZV2M_SINGLE_PIN_PACK(2, + (PIN_CFG_GRP_SWIO_1 | PIN_CFG_DRV | PIN_CFG_SLEW)) }, + { "DETDO", RZV2M_SINGLE_PIN_PACK(5, + (PIN_CFG_GRP_1_8V_3 | PIN_CFG_DRV | PIN_CFG_SLEW)) }, + { "DETMS", RZV2M_SINGLE_PIN_PACK(6, + (PIN_CFG_GRP_1_8V_3 | PIN_CFG_DRV | PIN_CFG_SLEW)) }, + { "PCRSTOUTB", RZV2M_SINGLE_PIN_PACK(12, + (PIN_CFG_GRP_3_3V | PIN_CFG_DRV | PIN_CFG_SLEW)) }, + { "USPWEN", RZV2M_SINGLE_PIN_PACK(14, + (PIN_CFG_GRP_3_3V | PIN_CFG_DRV | PIN_CFG_SLEW)) }, +}; + +static int rzv2m_gpio_register(struct rzv2m_pinctrl *pctrl) +{ + struct device_node *np = pctrl->dev->of_node; + struct gpio_chip *chip = &pctrl->gpio_chip; + const char *name = dev_name(pctrl->dev); + struct of_phandle_args of_args; + int ret; + + ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &of_args); + if (ret) { + dev_err(pctrl->dev, "Unable to parse gpio-ranges\n"); + return ret; + } + + if (of_args.args[0] != 0 || of_args.args[1] != 0 || + of_args.args[2] != pctrl->data->n_port_pins) { + dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n"); + return -EINVAL; + } + + chip->names = pctrl->data->port_pins; + chip->request = rzv2m_gpio_request; + chip->free = rzv2m_gpio_free; + chip->get_direction = rzv2m_gpio_get_direction; + chip->direction_input = rzv2m_gpio_direction_input; + chip->direction_output = rzv2m_gpio_direction_output; + chip->get = rzv2m_gpio_get; + chip->set = rzv2m_gpio_set; + chip->label = name; + chip->parent = pctrl->dev; + chip->owner = THIS_MODULE; + chip->base = -1; + chip->ngpio = of_args.args[2]; + + pctrl->gpio_range.id = 0; + pctrl->gpio_range.pin_base = 0; + pctrl->gpio_range.base = 0; + pctrl->gpio_range.npins = chip->ngpio; + pctrl->gpio_range.name = chip->label; + pctrl->gpio_range.gc = chip; + ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl); + if (ret) { + dev_err(pctrl->dev, "failed to add GPIO controller\n"); + return ret; + } + + dev_dbg(pctrl->dev, "Registered gpio controller\n"); + + return 0; +} + +static int rzv2m_pinctrl_register(struct rzv2m_pinctrl *pctrl) +{ + struct pinctrl_pin_desc *pins; + unsigned int i, j; + u32 *pin_data; + int ret; + + pctrl->desc.name = DRV_NAME; + pctrl->desc.npins = pctrl->data->n_port_pins + pctrl->data->n_dedicated_pins; + pctrl->desc.pctlops = &rzv2m_pinctrl_pctlops; + pctrl->desc.pmxops = &rzv2m_pinctrl_pmxops; + pctrl->desc.confops = &rzv2m_pinctrl_confops; + pctrl->desc.owner = THIS_MODULE; + + pins = devm_kcalloc(pctrl->dev, pctrl->desc.npins, sizeof(*pins), GFP_KERNEL); + if (!pins) + return -ENOMEM; + + pin_data = devm_kcalloc(pctrl->dev, pctrl->desc.npins, + sizeof(*pin_data), GFP_KERNEL); + if (!pin_data) + return -ENOMEM; + + pctrl->pins = pins; + pctrl->desc.pins = pins; + + for (i = 0, j = 0; i < pctrl->data->n_port_pins; i++) { + pins[i].number = i; + pins[i].name = pctrl->data->port_pins[i]; + if (i && !(i % RZV2M_PINS_PER_PORT)) + j++; + pin_data[i] = pctrl->data->port_pin_configs[j]; + pins[i].drv_data = &pin_data[i]; + } + + for (i = 0; i < pctrl->data->n_dedicated_pins; i++) { + unsigned int index = pctrl->data->n_port_pins + i; + + pins[index].number = index; + pins[index].name = pctrl->data->dedicated_pins[i].name; + pin_data[index] = pctrl->data->dedicated_pins[i].config; + pins[index].drv_data = &pin_data[index]; + } + + ret = devm_pinctrl_register_and_init(pctrl->dev, &pctrl->desc, pctrl, + &pctrl->pctl); + if (ret) { + dev_err(pctrl->dev, "pinctrl registration failed\n"); + return ret; + } + + ret = pinctrl_enable(pctrl->pctl); + if (ret) { + dev_err(pctrl->dev, "pinctrl enable failed\n"); + return ret; + } + + ret = rzv2m_gpio_register(pctrl); + if (ret) { + dev_err(pctrl->dev, "failed to add GPIO chip: %i\n", ret); + return ret; + } + + return 0; +} + +static void rzv2m_pinctrl_clk_disable(void *data) +{ + clk_disable_unprepare(data); +} + +static int rzv2m_pinctrl_probe(struct platform_device *pdev) +{ + struct rzv2m_pinctrl *pctrl; + int ret; + + pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL); + if (!pctrl) + return -ENOMEM; + + pctrl->dev = &pdev->dev; + + pctrl->data = of_device_get_match_data(&pdev->dev); + if (!pctrl->data) + return -EINVAL; + + pctrl->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(pctrl->base)) + return PTR_ERR(pctrl->base); + + pctrl->clk = devm_clk_get(pctrl->dev, NULL); + if (IS_ERR(pctrl->clk)) { + ret = PTR_ERR(pctrl->clk); + dev_err(pctrl->dev, "failed to get GPIO clk : %i\n", ret); + return ret; + } + + spin_lock_init(&pctrl->lock); + + platform_set_drvdata(pdev, pctrl); + + ret = clk_prepare_enable(pctrl->clk); + if (ret) { + dev_err(pctrl->dev, "failed to enable GPIO clk: %i\n", ret); + return ret; + } + + ret = devm_add_action_or_reset(&pdev->dev, rzv2m_pinctrl_clk_disable, + pctrl->clk); + if (ret) { + dev_err(pctrl->dev, + "failed to register GPIO clk disable action, %i\n", + ret); + return ret; + } + + ret = rzv2m_pinctrl_register(pctrl); + if (ret) + return ret; + + dev_info(pctrl->dev, "%s support registered\n", DRV_NAME); + return 0; +} + +static struct rzv2m_pinctrl_data r9a09g011_data = { + .port_pins = rzv2m_gpio_names, + .port_pin_configs = rzv2m_gpio_configs, + .dedicated_pins = rzv2m_dedicated_pins, + .n_port_pins = ARRAY_SIZE(rzv2m_gpio_configs) * RZV2M_PINS_PER_PORT, + .n_dedicated_pins = ARRAY_SIZE(rzv2m_dedicated_pins), +}; + +static const struct of_device_id rzv2m_pinctrl_of_table[] = { + { + .compatible = "renesas,r9a09g011-pinctrl", + .data = &r9a09g011_data, + }, + { /* sentinel */ } +}; + +static struct platform_driver rzv2m_pinctrl_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = of_match_ptr(rzv2m_pinctrl_of_table), + }, + .probe = rzv2m_pinctrl_probe, +}; + +static int __init rzv2m_pinctrl_init(void) +{ + return platform_driver_register(&rzv2m_pinctrl_driver); +} +core_initcall(rzv2m_pinctrl_init); + +MODULE_AUTHOR("Phil Edworthy "); +MODULE_DESCRIPTION("Pin and gpio controller driver for RZ/V2M"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c b/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c new file mode 100644 index 0000000000..40858b8812 --- /dev/null +++ b/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c @@ -0,0 +1,840 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Allwinner D1 SoC pinctrl driver. + * + * Copyright (c) 2020 wuyan@allwinnertech.com + * Copyright (c) 2021-2022 Samuel Holland + */ + +#include +#include +#include +#include +#include + +#include "pinctrl-sunxi.h" + +static const struct sunxi_desc_pin d1_pins[] = { + /* PB */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "pwm3"), + SUNXI_FUNCTION(0x3, "ir"), /* TX */ + SUNXI_FUNCTION(0x4, "i2c2"), /* SCK */ + SUNXI_FUNCTION(0x5, "spi1"), /* WP */ + SUNXI_FUNCTION(0x6, "uart0"), /* TX */ + SUNXI_FUNCTION(0x7, "uart2"), /* TX */ + SUNXI_FUNCTION(0x8, "spdif"), /* OUT */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 0)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "pwm4"), + SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT3 */ + SUNXI_FUNCTION(0x4, "i2c2"), /* SDA */ + SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN3 */ + SUNXI_FUNCTION(0x6, "uart0"), /* RX */ + SUNXI_FUNCTION(0x7, "uart2"), /* RX */ + SUNXI_FUNCTION(0x8, "ir"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 1)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D0 */ + SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT2 */ + SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */ + SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN2 */ + SUNXI_FUNCTION(0x6, "lcd0"), /* D18 */ + SUNXI_FUNCTION(0x7, "uart4"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 2)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D1 */ + SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT1 */ + SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */ + SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN0 */ + SUNXI_FUNCTION(0x6, "lcd0"), /* D19 */ + SUNXI_FUNCTION(0x7, "uart4"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 3)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D8 */ + SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT0 */ + SUNXI_FUNCTION(0x4, "i2c1"), /* SCK */ + SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN1 */ + SUNXI_FUNCTION(0x6, "lcd0"), /* D20 */ + SUNXI_FUNCTION(0x7, "uart5"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 4)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D9 */ + SUNXI_FUNCTION(0x3, "i2s2"), /* BCLK */ + SUNXI_FUNCTION(0x4, "i2c1"), /* SDA */ + SUNXI_FUNCTION(0x5, "pwm0"), + SUNXI_FUNCTION(0x6, "lcd0"), /* D21 */ + SUNXI_FUNCTION(0x7, "uart5"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 5)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D16 */ + SUNXI_FUNCTION(0x3, "i2s2"), /* LRCK */ + SUNXI_FUNCTION(0x4, "i2c3"), /* SCK */ + SUNXI_FUNCTION(0x5, "pwm1"), + SUNXI_FUNCTION(0x6, "lcd0"), /* D22 */ + SUNXI_FUNCTION(0x7, "uart3"), /* TX */ + SUNXI_FUNCTION(0x8, "bist0"), /* BIST_RESULT0 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 6)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D17 */ + SUNXI_FUNCTION(0x3, "i2s2"), /* MCLK */ + SUNXI_FUNCTION(0x4, "i2c3"), /* SDA */ + SUNXI_FUNCTION(0x5, "ir"), /* RX */ + SUNXI_FUNCTION(0x6, "lcd0"), /* D23 */ + SUNXI_FUNCTION(0x7, "uart3"), /* RX */ + SUNXI_FUNCTION(0x8, "bist1"), /* BIST_RESULT1 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 7)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "dmic"), /* DATA3 */ + SUNXI_FUNCTION(0x3, "pwm5"), + SUNXI_FUNCTION(0x4, "i2c2"), /* SCK */ + SUNXI_FUNCTION(0x5, "spi1"), /* HOLD */ + SUNXI_FUNCTION(0x6, "uart0"), /* TX */ + SUNXI_FUNCTION(0x7, "uart1"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 8)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "dmic"), /* DATA2 */ + SUNXI_FUNCTION(0x3, "pwm6"), + SUNXI_FUNCTION(0x4, "i2c2"), /* SDA */ + SUNXI_FUNCTION(0x5, "spi1"), /* MISO */ + SUNXI_FUNCTION(0x6, "uart0"), /* RX */ + SUNXI_FUNCTION(0x7, "uart1"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 9)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "dmic"), /* DATA1 */ + SUNXI_FUNCTION(0x3, "pwm7"), + SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */ + SUNXI_FUNCTION(0x5, "spi1"), /* MOSI */ + SUNXI_FUNCTION(0x6, "clk"), /* FANOUT0 */ + SUNXI_FUNCTION(0x7, "uart1"), /* RTS */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 10)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 11), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "dmic"), /* DATA0 */ + SUNXI_FUNCTION(0x3, "pwm2"), + SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */ + SUNXI_FUNCTION(0x5, "spi1"), /* CLK */ + SUNXI_FUNCTION(0x6, "clk"), /* FANOUT1 */ + SUNXI_FUNCTION(0x7, "uart1"), /* CTS */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 11)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 12), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "dmic"), /* CLK */ + SUNXI_FUNCTION(0x3, "pwm0"), + SUNXI_FUNCTION(0x4, "spdif"), /* IN */ + SUNXI_FUNCTION(0x5, "spi1"), /* CS0 */ + SUNXI_FUNCTION(0x6, "clk"), /* FANOUT2 */ + SUNXI_FUNCTION(0x7, "ir"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 12)), + /* PC */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart2"), /* TX */ + SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */ + SUNXI_FUNCTION(0x4, "ledc"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 0)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart2"), /* RX */ + SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 1)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi0"), /* CLK */ + SUNXI_FUNCTION(0x3, "mmc2"), /* CLK */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 2)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi0"), /* CS0 */ + SUNXI_FUNCTION(0x3, "mmc2"), /* CMD */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 3)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi0"), /* MOSI */ + SUNXI_FUNCTION(0x3, "mmc2"), /* D2 */ + SUNXI_FUNCTION(0x4, "boot"), /* SEL0 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 4)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi0"), /* MISO */ + SUNXI_FUNCTION(0x3, "mmc2"), /* D1 */ + SUNXI_FUNCTION(0x4, "boot"), /* SEL1 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 5)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi0"), /* WP */ + SUNXI_FUNCTION(0x3, "mmc2"), /* D0 */ + SUNXI_FUNCTION(0x4, "uart3"), /* TX */ + SUNXI_FUNCTION(0x5, "i2c3"), /* SCK */ + SUNXI_FUNCTION(0x6, "pll"), /* DBG-CLK */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 6)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi0"), /* HOLD */ + SUNXI_FUNCTION(0x3, "mmc2"), /* D3 */ + SUNXI_FUNCTION(0x4, "uart3"), /* RX */ + SUNXI_FUNCTION(0x5, "i2c3"), /* SDA */ + SUNXI_FUNCTION(0x6, "tcon"), /* TRIG0 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 7)), + /* PD */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */ + SUNXI_FUNCTION(0x3, "lvds0"), /* V0P */ + SUNXI_FUNCTION(0x4, "dsi"), /* D0P */ + SUNXI_FUNCTION(0x5, "i2c0"), /* SCK */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 0)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */ + SUNXI_FUNCTION(0x3, "lvds0"), /* V0N */ + SUNXI_FUNCTION(0x4, "dsi"), /* D0N */ + SUNXI_FUNCTION(0x5, "uart2"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 1)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */ + SUNXI_FUNCTION(0x3, "lvds0"), /* V1P */ + SUNXI_FUNCTION(0x4, "dsi"), /* D1P */ + SUNXI_FUNCTION(0x5, "uart2"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 2)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */ + SUNXI_FUNCTION(0x3, "lvds0"), /* V1N */ + SUNXI_FUNCTION(0x4, "dsi"), /* D1N */ + SUNXI_FUNCTION(0x5, "uart2"), /* RTS */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 3)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */ + SUNXI_FUNCTION(0x3, "lvds0"), /* V2P */ + SUNXI_FUNCTION(0x4, "dsi"), /* CKP */ + SUNXI_FUNCTION(0x5, "uart2"), /* CTS */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 4)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */ + SUNXI_FUNCTION(0x3, "lvds0"), /* V2N */ + SUNXI_FUNCTION(0x4, "dsi"), /* CKN */ + SUNXI_FUNCTION(0x5, "uart5"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 5)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */ + SUNXI_FUNCTION(0x3, "lvds0"), /* CKP */ + SUNXI_FUNCTION(0x4, "dsi"), /* D2P */ + SUNXI_FUNCTION(0x5, "uart5"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 6)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */ + SUNXI_FUNCTION(0x3, "lvds0"), /* CKN */ + SUNXI_FUNCTION(0x4, "dsi"), /* D2N */ + SUNXI_FUNCTION(0x5, "uart4"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 7)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */ + SUNXI_FUNCTION(0x3, "lvds0"), /* V3P */ + SUNXI_FUNCTION(0x4, "dsi"), /* D3P */ + SUNXI_FUNCTION(0x5, "uart4"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 8)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */ + SUNXI_FUNCTION(0x3, "lvds0"), /* V3N */ + SUNXI_FUNCTION(0x4, "dsi"), /* D3N */ + SUNXI_FUNCTION(0x5, "pwm6"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 9)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */ + SUNXI_FUNCTION(0x3, "lvds1"), /* V0P */ + SUNXI_FUNCTION(0x4, "spi1"), /* CS0 */ + SUNXI_FUNCTION(0x5, "uart3"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 10)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */ + SUNXI_FUNCTION(0x3, "lvds1"), /* V0N */ + SUNXI_FUNCTION(0x4, "spi1"), /* CLK */ + SUNXI_FUNCTION(0x5, "uart3"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 11)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */ + SUNXI_FUNCTION(0x3, "lvds1"), /* V1P */ + SUNXI_FUNCTION(0x4, "spi1"), /* MOSI */ + SUNXI_FUNCTION(0x5, "i2c0"), /* SDA */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 12)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */ + SUNXI_FUNCTION(0x3, "lvds1"), /* V1N */ + SUNXI_FUNCTION(0x4, "spi1"), /* MISO */ + SUNXI_FUNCTION(0x5, "uart3"), /* RTS */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 13)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */ + SUNXI_FUNCTION(0x3, "lvds1"), /* V2P */ + SUNXI_FUNCTION(0x4, "spi1"), /* HOLD */ + SUNXI_FUNCTION(0x5, "uart3"), /* CTS */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 14)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */ + SUNXI_FUNCTION(0x3, "lvds1"), /* V2N */ + SUNXI_FUNCTION(0x4, "spi1"), /* WP */ + SUNXI_FUNCTION(0x5, "ir"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 15)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */ + SUNXI_FUNCTION(0x3, "lvds1"), /* CKP */ + SUNXI_FUNCTION(0x4, "dmic"), /* DATA3 */ + SUNXI_FUNCTION(0x5, "pwm0"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 16)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */ + SUNXI_FUNCTION(0x3, "lvds1"), /* CKN */ + SUNXI_FUNCTION(0x4, "dmic"), /* DATA2 */ + SUNXI_FUNCTION(0x5, "pwm1"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 17)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */ + SUNXI_FUNCTION(0x3, "lvds1"), /* V3P */ + SUNXI_FUNCTION(0x4, "dmic"), /* DATA1 */ + SUNXI_FUNCTION(0x5, "pwm2"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 18)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* DE */ + SUNXI_FUNCTION(0x3, "lvds1"), /* V3N */ + SUNXI_FUNCTION(0x4, "dmic"), /* DATA0 */ + SUNXI_FUNCTION(0x5, "pwm3"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 19)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */ + SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */ + SUNXI_FUNCTION(0x4, "dmic"), /* CLK */ + SUNXI_FUNCTION(0x5, "pwm4"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 20)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */ + SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */ + SUNXI_FUNCTION(0x4, "uart1"), /* TX */ + SUNXI_FUNCTION(0x5, "pwm5"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 21)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spdif"), /* OUT */ + SUNXI_FUNCTION(0x3, "ir"), /* RX */ + SUNXI_FUNCTION(0x4, "uart1"), /* RX */ + SUNXI_FUNCTION(0x5, "pwm7"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 22)), + /* PE */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "ncsi0"), /* HSYNC */ + SUNXI_FUNCTION(0x3, "uart2"), /* RTS */ + SUNXI_FUNCTION(0x4, "i2c1"), /* SCK */ + SUNXI_FUNCTION(0x5, "lcd0"), /* HSYNC */ + SUNXI_FUNCTION(0x8, "emac"), /* RXCTL/CRS_DV */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 0)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "ncsi0"), /* VSYNC */ + SUNXI_FUNCTION(0x3, "uart2"), /* CTS */ + SUNXI_FUNCTION(0x4, "i2c1"), /* SDA */ + SUNXI_FUNCTION(0x5, "lcd0"), /* VSYNC */ + SUNXI_FUNCTION(0x8, "emac"), /* RXD0 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 1)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "ncsi0"), /* PCLK */ + SUNXI_FUNCTION(0x3, "uart2"), /* TX */ + SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */ + SUNXI_FUNCTION(0x5, "clk"), /* FANOUT0 */ + SUNXI_FUNCTION(0x6, "uart0"), /* TX */ + SUNXI_FUNCTION(0x8, "emac"), /* RXD1 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 2)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "ncsi0"), /* MCLK */ + SUNXI_FUNCTION(0x3, "uart2"), /* RX */ + SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */ + SUNXI_FUNCTION(0x5, "clk"), /* FANOUT1 */ + SUNXI_FUNCTION(0x6, "uart0"), /* RX */ + SUNXI_FUNCTION(0x8, "emac"), /* TXCK */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 3)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "ncsi0"), /* D0 */ + SUNXI_FUNCTION(0x3, "uart4"), /* TX */ + SUNXI_FUNCTION(0x4, "i2c2"), /* SCK */ + SUNXI_FUNCTION(0x5, "clk"), /* FANOUT2 */ + SUNXI_FUNCTION(0x6, "d_jtag"), /* MS */ + SUNXI_FUNCTION(0x7, "r_jtag"), /* MS */ + SUNXI_FUNCTION(0x8, "emac"), /* TXD0 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 4)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "ncsi0"), /* D1 */ + SUNXI_FUNCTION(0x3, "uart4"), /* RX */ + SUNXI_FUNCTION(0x4, "i2c2"), /* SDA */ + SUNXI_FUNCTION(0x5, "ledc"), + SUNXI_FUNCTION(0x6, "d_jtag"), /* DI */ + SUNXI_FUNCTION(0x7, "r_jtag"), /* DI */ + SUNXI_FUNCTION(0x8, "emac"), /* TXD1 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 5)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "ncsi0"), /* D2 */ + SUNXI_FUNCTION(0x3, "uart5"), /* TX */ + SUNXI_FUNCTION(0x4, "i2c3"), /* SCK */ + SUNXI_FUNCTION(0x5, "spdif"), /* IN */ + SUNXI_FUNCTION(0x6, "d_jtag"), /* DO */ + SUNXI_FUNCTION(0x7, "r_jtag"), /* DO */ + SUNXI_FUNCTION(0x8, "emac"), /* TXCTL/TXEN */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 6)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "ncsi0"), /* D3 */ + SUNXI_FUNCTION(0x3, "uart5"), /* RX */ + SUNXI_FUNCTION(0x4, "i2c3"), /* SDA */ + SUNXI_FUNCTION(0x5, "spdif"), /* OUT */ + SUNXI_FUNCTION(0x6, "d_jtag"), /* CK */ + SUNXI_FUNCTION(0x7, "r_jtag"), /* CK */ + SUNXI_FUNCTION(0x8, "emac"), /* CK */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 7)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "ncsi0"), /* D4 */ + SUNXI_FUNCTION(0x3, "uart1"), /* RTS */ + SUNXI_FUNCTION(0x4, "pwm2"), + SUNXI_FUNCTION(0x5, "uart3"), /* TX */ + SUNXI_FUNCTION(0x6, "jtag"), /* MS */ + SUNXI_FUNCTION(0x8, "emac"), /* MDC */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 8)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "ncsi0"), /* D5 */ + SUNXI_FUNCTION(0x3, "uart1"), /* CTS */ + SUNXI_FUNCTION(0x4, "pwm3"), + SUNXI_FUNCTION(0x5, "uart3"), /* RX */ + SUNXI_FUNCTION(0x6, "jtag"), /* DI */ + SUNXI_FUNCTION(0x8, "emac"), /* MDIO */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 9)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "ncsi0"), /* D6 */ + SUNXI_FUNCTION(0x3, "uart1"), /* TX */ + SUNXI_FUNCTION(0x4, "pwm4"), + SUNXI_FUNCTION(0x5, "ir"), /* RX */ + SUNXI_FUNCTION(0x6, "jtag"), /* DO */ + SUNXI_FUNCTION(0x8, "emac"), /* EPHY-25M */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 10)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "ncsi0"), /* D7 */ + SUNXI_FUNCTION(0x3, "uart1"), /* RX */ + SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT3 */ + SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN3 */ + SUNXI_FUNCTION(0x6, "jtag"), /* CK */ + SUNXI_FUNCTION(0x8, "emac"), /* TXD2 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 11)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2c2"), /* SCK */ + SUNXI_FUNCTION(0x3, "ncsi0"), /* FIELD */ + SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT2 */ + SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN2 */ + SUNXI_FUNCTION(0x8, "emac"), /* TXD3 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 12)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2c2"), /* SDA */ + SUNXI_FUNCTION(0x3, "pwm5"), + SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT0 */ + SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN1 */ + SUNXI_FUNCTION(0x6, "dmic"), /* DATA3 */ + SUNXI_FUNCTION(0x8, "emac"), /* RXD2 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 13)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2c1"), /* SCK */ + SUNXI_FUNCTION(0x3, "d_jtag"), /* MS */ + SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT1 */ + SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN0 */ + SUNXI_FUNCTION(0x6, "dmic"), /* DATA2 */ + SUNXI_FUNCTION(0x8, "emac"), /* RXD3 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 14)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2c1"), /* SDA */ + SUNXI_FUNCTION(0x3, "d_jtag"), /* DI */ + SUNXI_FUNCTION(0x4, "pwm6"), + SUNXI_FUNCTION(0x5, "i2s0"), /* LRCK */ + SUNXI_FUNCTION(0x6, "dmic"), /* DATA1 */ + SUNXI_FUNCTION(0x8, "emac"), /* RXCK */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 15)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2c3"), /* SCK */ + SUNXI_FUNCTION(0x3, "d_jtag"), /* DO */ + SUNXI_FUNCTION(0x4, "pwm7"), + SUNXI_FUNCTION(0x5, "i2s0"), /* BCLK */ + SUNXI_FUNCTION(0x6, "dmic"), /* DATA0 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 16)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2c3"), /* SDA */ + SUNXI_FUNCTION(0x3, "d_jtag"), /* CK */ + SUNXI_FUNCTION(0x4, "ir"), /* TX */ + SUNXI_FUNCTION(0x5, "i2s0"), /* MCLK */ + SUNXI_FUNCTION(0x6, "dmic"), /* CLK */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 17)), + /* PF */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */ + SUNXI_FUNCTION(0x3, "jtag"), /* MS */ + SUNXI_FUNCTION(0x4, "r_jtag"), /* MS */ + SUNXI_FUNCTION(0x5, "i2s2_dout"), /* DOUT1 */ + SUNXI_FUNCTION(0x6, "i2s2_din"), /* DIN0 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 0)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */ + SUNXI_FUNCTION(0x3, "jtag"), /* DI */ + SUNXI_FUNCTION(0x4, "r_jtag"), /* DI */ + SUNXI_FUNCTION(0x5, "i2s2_dout"), /* DOUT0 */ + SUNXI_FUNCTION(0x6, "i2s2_din"), /* DIN1 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 1)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */ + SUNXI_FUNCTION(0x3, "uart0"), /* TX */ + SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */ + SUNXI_FUNCTION(0x5, "ledc"), + SUNXI_FUNCTION(0x6, "spdif"), /* IN */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 2)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */ + SUNXI_FUNCTION(0x3, "jtag"), /* DO */ + SUNXI_FUNCTION(0x4, "r_jtag"), /* DO */ + SUNXI_FUNCTION(0x5, "i2s2"), /* BCLK */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 3)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */ + SUNXI_FUNCTION(0x3, "uart0"), /* RX */ + SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */ + SUNXI_FUNCTION(0x5, "pwm6"), + SUNXI_FUNCTION(0x6, "ir"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 4)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */ + SUNXI_FUNCTION(0x3, "jtag"), /* CK */ + SUNXI_FUNCTION(0x4, "r_jtag"), /* CK */ + SUNXI_FUNCTION(0x5, "i2s2"), /* LRCK */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 5)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "spdif"), /* OUT */ + SUNXI_FUNCTION(0x4, "ir"), /* RX */ + SUNXI_FUNCTION(0x5, "i2s2"), /* MCLK */ + SUNXI_FUNCTION(0x6, "pwm5"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 6)), + /* PG */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */ + SUNXI_FUNCTION(0x3, "uart3"), /* TX */ + SUNXI_FUNCTION(0x4, "emac"), /* RXCTRL/CRS_DV */ + SUNXI_FUNCTION(0x5, "pwm7"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 0)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */ + SUNXI_FUNCTION(0x3, "uart3"), /* RX */ + SUNXI_FUNCTION(0x4, "emac"), /* RXD0 */ + SUNXI_FUNCTION(0x5, "pwm6"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 1)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */ + SUNXI_FUNCTION(0x3, "uart3"), /* RTS */ + SUNXI_FUNCTION(0x4, "emac"), /* RXD1 */ + SUNXI_FUNCTION(0x5, "uart4"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 2)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */ + SUNXI_FUNCTION(0x3, "uart3"), /* CTS */ + SUNXI_FUNCTION(0x4, "emac"), /* TXCK */ + SUNXI_FUNCTION(0x5, "uart4"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 3)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */ + SUNXI_FUNCTION(0x3, "uart5"), /* TX */ + SUNXI_FUNCTION(0x4, "emac"), /* TXD0 */ + SUNXI_FUNCTION(0x5, "pwm5"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 4)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */ + SUNXI_FUNCTION(0x3, "uart5"), /* RX */ + SUNXI_FUNCTION(0x4, "emac"), /* TXD1 */ + SUNXI_FUNCTION(0x5, "pwm4"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 5)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart1"), /* TX */ + SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */ + SUNXI_FUNCTION(0x4, "emac"), /* TXD2 */ + SUNXI_FUNCTION(0x5, "pwm1"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 6)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart1"), /* RX */ + SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */ + SUNXI_FUNCTION(0x4, "emac"), /* TXD3 */ + SUNXI_FUNCTION(0x5, "spdif"), /* IN */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 7)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart1"), /* RTS */ + SUNXI_FUNCTION(0x3, "i2c1"), /* SCK */ + SUNXI_FUNCTION(0x4, "emac"), /* RXD2 */ + SUNXI_FUNCTION(0x5, "uart3"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 8)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart1"), /* CTS */ + SUNXI_FUNCTION(0x3, "i2c1"), /* SDA */ + SUNXI_FUNCTION(0x4, "emac"), /* RXD3 */ + SUNXI_FUNCTION(0x5, "uart3"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 9)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "pwm3"), + SUNXI_FUNCTION(0x3, "i2c3"), /* SCK */ + SUNXI_FUNCTION(0x4, "emac"), /* RXCK */ + SUNXI_FUNCTION(0x5, "clk"), /* FANOUT0 */ + SUNXI_FUNCTION(0x6, "ir"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 10)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s1"), /* MCLK */ + SUNXI_FUNCTION(0x3, "i2c3"), /* SDA */ + SUNXI_FUNCTION(0x4, "emac"), /* EPHY-25M */ + SUNXI_FUNCTION(0x5, "clk"), /* FANOUT1 */ + SUNXI_FUNCTION(0x6, "tcon"), /* TRIG0 */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 11)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s1"), /* LRCK */ + SUNXI_FUNCTION(0x3, "i2c0"), /* SCK */ + SUNXI_FUNCTION(0x4, "emac"), /* TXCTL/TXEN */ + SUNXI_FUNCTION(0x5, "clk"), /* FANOUT2 */ + SUNXI_FUNCTION(0x6, "pwm0"), + SUNXI_FUNCTION(0x7, "uart1"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 12)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s1"), /* BCLK */ + SUNXI_FUNCTION(0x3, "i2c0"), /* SDA */ + SUNXI_FUNCTION(0x4, "emac"), /* CLKIN/RXER */ + SUNXI_FUNCTION(0x5, "pwm2"), + SUNXI_FUNCTION(0x6, "ledc"), + SUNXI_FUNCTION(0x7, "uart1"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 13)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 14), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s1_din"), /* DIN0 */ + SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */ + SUNXI_FUNCTION(0x4, "emac"), /* MDC */ + SUNXI_FUNCTION(0x5, "i2s1_dout"), /* DOUT1 */ + SUNXI_FUNCTION(0x6, "spi0"), /* WP */ + SUNXI_FUNCTION(0x7, "uart1"), /* RTS */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 14)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 15), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s1_dout"), /* DOUT0 */ + SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */ + SUNXI_FUNCTION(0x4, "emac"), /* MDIO */ + SUNXI_FUNCTION(0x5, "i2s1_din"), /* DIN1 */ + SUNXI_FUNCTION(0x6, "spi0"), /* HOLD */ + SUNXI_FUNCTION(0x7, "uart1"), /* CTS */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 15)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 16), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "ir"), /* RX */ + SUNXI_FUNCTION(0x3, "tcon"), /* TRIG0 */ + SUNXI_FUNCTION(0x4, "pwm5"), + SUNXI_FUNCTION(0x5, "clk"), /* FANOUT2 */ + SUNXI_FUNCTION(0x6, "spdif"), /* IN */ + SUNXI_FUNCTION(0x7, "ledc"), + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 16)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 17), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart2"), /* TX */ + SUNXI_FUNCTION(0x3, "i2c3"), /* SCK */ + SUNXI_FUNCTION(0x4, "pwm7"), + SUNXI_FUNCTION(0x5, "clk"), /* FANOUT0 */ + SUNXI_FUNCTION(0x6, "ir"), /* TX */ + SUNXI_FUNCTION(0x7, "uart0"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 17)), + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 18), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart2"), /* RX */ + SUNXI_FUNCTION(0x3, "i2c3"), /* SDA */ + SUNXI_FUNCTION(0x4, "pwm6"), + SUNXI_FUNCTION(0x5, "clk"), /* FANOUT1 */ + SUNXI_FUNCTION(0x6, "spdif"), /* OUT */ + SUNXI_FUNCTION(0x7, "uart0"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 18)), +}; + +static const unsigned int d1_irq_bank_map[] = { 1, 2, 3, 4, 5, 6 }; + +static const struct sunxi_pinctrl_desc d1_pinctrl_data = { + .pins = d1_pins, + .npins = ARRAY_SIZE(d1_pins), + .irq_banks = ARRAY_SIZE(d1_irq_bank_map), + .irq_bank_map = d1_irq_bank_map, + .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL, +}; + +static int d1_pinctrl_probe(struct platform_device *pdev) +{ + unsigned long variant = (unsigned long)of_device_get_match_data(&pdev->dev); + + return sunxi_pinctrl_init_with_variant(pdev, &d1_pinctrl_data, variant); +} + +static const struct of_device_id d1_pinctrl_match[] = { + { + .compatible = "allwinner,sun20i-d1-pinctrl", + .data = (void *)PINCTRL_SUN20I_D1 + }, + {} +}; + +static struct platform_driver d1_pinctrl_driver = { + .probe = d1_pinctrl_probe, + .driver = { + .name = "sun20i-d1-pinctrl", + .of_match_table = d1_pinctrl_match, + }, +}; +builtin_platform_driver(d1_pinctrl_driver); diff --git a/drivers/platform/chrome/chromeos_acpi.c b/drivers/platform/chrome/chromeos_acpi.c new file mode 100644 index 0000000000..50d8a4d435 --- /dev/null +++ b/drivers/platform/chrome/chromeos_acpi.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ChromeOS specific ACPI extensions + * + * Copyright 2022 Google LLC + * + * This driver attaches to the ChromeOS ACPI device and then exports the + * values reported by the ACPI in a sysfs directory. All values are + * presented in the string form (numbers as decimal values) and can be + * accessed as the contents of the appropriate read only files in the + * sysfs directory tree. + */ +#include +#include +#include +#include +#include + +#define ACPI_ATTR_NAME_LEN 4 + +#define DEV_ATTR(_var, _name) \ + static struct device_attribute dev_attr_##_var = \ + __ATTR(_name, 0444, chromeos_first_level_attr_show, NULL); + +#define GPIO_ATTR_GROUP(_group, _name, _num) \ + static umode_t attr_is_visible_gpio_##_num(struct kobject *kobj, \ + struct attribute *attr, int n) \ + { \ + if (_num < chromeos_acpi_gpio_groups) \ + return attr->mode; \ + return 0; \ + } \ + static ssize_t chromeos_attr_show_gpio_##_num(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ + { \ + char name[ACPI_ATTR_NAME_LEN + 1]; \ + int ret, num; \ + \ + ret = parse_attr_name(attr->attr.name, name, &num); \ + if (ret) \ + return ret; \ + return chromeos_acpi_evaluate_method(dev, _num, num, name, buf); \ + } \ + static struct device_attribute dev_attr_0_##_group = \ + __ATTR(GPIO.0, 0444, chromeos_attr_show_gpio_##_num, NULL); \ + static struct device_attribute dev_attr_1_##_group = \ + __ATTR(GPIO.1, 0444, chromeos_attr_show_gpio_##_num, NULL); \ + static struct device_attribute dev_attr_2_##_group = \ + __ATTR(GPIO.2, 0444, chromeos_attr_show_gpio_##_num, NULL); \ + static struct device_attribute dev_attr_3_##_group = \ + __ATTR(GPIO.3, 0444, chromeos_attr_show_gpio_##_num, NULL); \ + \ + static struct attribute *attrs_##_group[] = { \ + &dev_attr_0_##_group.attr, \ + &dev_attr_1_##_group.attr, \ + &dev_attr_2_##_group.attr, \ + &dev_attr_3_##_group.attr, \ + NULL \ + }; \ + static const struct attribute_group attr_group_##_group = { \ + .name = _name, \ + .is_visible = attr_is_visible_gpio_##_num, \ + .attrs = attrs_##_group, \ + }; + +static unsigned int chromeos_acpi_gpio_groups; + +/* Parse the ACPI package and return the data related to that attribute */ +static int chromeos_acpi_handle_package(struct device *dev, union acpi_object *obj, + int pkg_num, int sub_pkg_num, char *name, char *buf) +{ + union acpi_object *element = obj->package.elements; + + if (pkg_num >= obj->package.count) + return -EINVAL; + element += pkg_num; + + if (element->type == ACPI_TYPE_PACKAGE) { + if (sub_pkg_num >= element->package.count) + return -EINVAL; + /* select sub element inside this package */ + element = element->package.elements; + element += sub_pkg_num; + } + + switch (element->type) { + case ACPI_TYPE_INTEGER: + return sysfs_emit(buf, "%d\n", (int)element->integer.value); + case ACPI_TYPE_STRING: + return sysfs_emit(buf, "%s\n", element->string.pointer); + case ACPI_TYPE_BUFFER: + return sysfs_emit(buf, "%s\n", element->buffer.pointer); + default: + dev_err(dev, "element type %d not supported\n", element->type); + return -EINVAL; + } +} + +static int chromeos_acpi_evaluate_method(struct device *dev, int pkg_num, int sub_pkg_num, + char *name, char *buf) +{ + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + acpi_status status; + int ret = -EINVAL; + + status = acpi_evaluate_object(ACPI_HANDLE(dev), name, NULL, &output); + if (ACPI_FAILURE(status)) { + dev_err(dev, "failed to retrieve %s. %s\n", name, acpi_format_exception(status)); + return ret; + } + + if (((union acpi_object *)output.pointer)->type == ACPI_TYPE_PACKAGE) + ret = chromeos_acpi_handle_package(dev, output.pointer, pkg_num, sub_pkg_num, + name, buf); + + kfree(output.pointer); + return ret; +} + +static int parse_attr_name(const char *name, char *attr_name, int *attr_num) +{ + int ret; + + ret = strscpy(attr_name, name, ACPI_ATTR_NAME_LEN + 1); + if (ret == -E2BIG) + return kstrtoint(&name[ACPI_ATTR_NAME_LEN + 1], 0, attr_num); + return 0; +} + +static ssize_t chromeos_first_level_attr_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + char attr_name[ACPI_ATTR_NAME_LEN + 1]; + int ret, attr_num = 0; + + ret = parse_attr_name(attr->attr.name, attr_name, &attr_num); + if (ret) + return ret; + return chromeos_acpi_evaluate_method(dev, attr_num, 0, attr_name, buf); +} + +static unsigned int get_gpio_pkg_num(struct device *dev) +{ + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + acpi_status status; + unsigned int count = 0; + char *name = "GPIO"; + + status = acpi_evaluate_object(ACPI_HANDLE(dev), name, NULL, &output); + if (ACPI_FAILURE(status)) { + dev_err(dev, "failed to retrieve %s. %s\n", name, acpi_format_exception(status)); + return count; + } + + obj = output.pointer; + + if (obj->type == ACPI_TYPE_PACKAGE) + count = obj->package.count; + + kfree(output.pointer); + return count; +} + +DEV_ATTR(binf2, BINF.2) +DEV_ATTR(binf3, BINF.3) +DEV_ATTR(chsw, CHSW) +DEV_ATTR(fmap, FMAP) +DEV_ATTR(frid, FRID) +DEV_ATTR(fwid, FWID) +DEV_ATTR(hwid, HWID) +DEV_ATTR(meck, MECK) +DEV_ATTR(vbnv0, VBNV.0) +DEV_ATTR(vbnv1, VBNV.1) +DEV_ATTR(vdat, VDAT) + +static struct attribute *first_level_attrs[] = { + &dev_attr_binf2.attr, + &dev_attr_binf3.attr, + &dev_attr_chsw.attr, + &dev_attr_fmap.attr, + &dev_attr_frid.attr, + &dev_attr_fwid.attr, + &dev_attr_hwid.attr, + &dev_attr_meck.attr, + &dev_attr_vbnv0.attr, + &dev_attr_vbnv1.attr, + &dev_attr_vdat.attr, + NULL +}; + +static const struct attribute_group first_level_attr_group = { + .attrs = first_level_attrs, +}; + +/* + * Every platform can have a different number of GPIO attribute groups. + * Define upper limit groups. At run time, the platform decides to show + * the present number of groups only, others are hidden. + */ +GPIO_ATTR_GROUP(gpio0, "GPIO.0", 0) +GPIO_ATTR_GROUP(gpio1, "GPIO.1", 1) +GPIO_ATTR_GROUP(gpio2, "GPIO.2", 2) +GPIO_ATTR_GROUP(gpio3, "GPIO.3", 3) +GPIO_ATTR_GROUP(gpio4, "GPIO.4", 4) +GPIO_ATTR_GROUP(gpio5, "GPIO.5", 5) +GPIO_ATTR_GROUP(gpio6, "GPIO.6", 6) +GPIO_ATTR_GROUP(gpio7, "GPIO.7", 7) + +static const struct attribute_group *chromeos_acpi_all_groups[] = { + &first_level_attr_group, + &attr_group_gpio0, + &attr_group_gpio1, + &attr_group_gpio2, + &attr_group_gpio3, + &attr_group_gpio4, + &attr_group_gpio5, + &attr_group_gpio6, + &attr_group_gpio7, + NULL +}; + +static int chromeos_acpi_device_probe(struct platform_device *pdev) +{ + chromeos_acpi_gpio_groups = get_gpio_pkg_num(&pdev->dev); + + /* + * If the platform has more GPIO attribute groups than the number of + * groups this driver supports, give out a warning message. + */ + if (chromeos_acpi_gpio_groups > ARRAY_SIZE(chromeos_acpi_all_groups) - 2) + dev_warn(&pdev->dev, "Only %zu GPIO attr groups supported by the driver out of total %u.\n", + ARRAY_SIZE(chromeos_acpi_all_groups) - 2, chromeos_acpi_gpio_groups); + return 0; +} + +/* GGL is valid PNP ID of Google. PNP ID can be used with the ACPI devices. */ +static const struct acpi_device_id chromeos_device_ids[] = { + { "GGL0001", 0 }, + {} +}; +MODULE_DEVICE_TABLE(acpi, chromeos_device_ids); + +static struct platform_driver chromeos_acpi_device_driver = { + .probe = chromeos_acpi_device_probe, + .driver = { + .name = KBUILD_MODNAME, + .dev_groups = chromeos_acpi_all_groups, + .acpi_match_table = chromeos_device_ids, + } +}; +module_platform_driver(chromeos_acpi_device_driver); + +MODULE_AUTHOR("Muhammad Usama Anjum "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ChromeOS specific ACPI extensions"); diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c new file mode 100644 index 0000000000..c6a83df91a --- /dev/null +++ b/drivers/platform/chrome/cros_ec_proto_test.c @@ -0,0 +1,2753 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Kunit tests for ChromeOS Embedded Controller protocol. + */ + +#include + +#include +#include +#include + +#include "cros_ec.h" +#include "cros_kunit_util.h" + +#define BUFSIZE 512 + +struct cros_ec_proto_test_priv { + struct cros_ec_device ec_dev; + u8 dout[BUFSIZE]; + u8 din[BUFSIZE]; + struct cros_ec_command *msg; + u8 _msg[BUFSIZE]; +}; + +static void cros_ec_proto_test_prepare_tx_legacy_normal(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct cros_ec_command *msg = priv->msg; + int ret, i; + u8 csum; + + ec_dev->proto_version = 2; + + msg->command = EC_CMD_HELLO; + msg->outsize = EC_PROTO2_MAX_PARAM_SIZE; + msg->data[0] = 0xde; + msg->data[1] = 0xad; + msg->data[2] = 0xbe; + msg->data[3] = 0xef; + + ret = cros_ec_prepare_tx(ec_dev, msg); + + KUNIT_EXPECT_EQ(test, ret, EC_MSG_TX_PROTO_BYTES + EC_PROTO2_MAX_PARAM_SIZE); + KUNIT_EXPECT_EQ(test, ec_dev->dout[0], EC_CMD_VERSION0); + KUNIT_EXPECT_EQ(test, ec_dev->dout[1], EC_CMD_HELLO); + KUNIT_EXPECT_EQ(test, ec_dev->dout[2], EC_PROTO2_MAX_PARAM_SIZE); + KUNIT_EXPECT_EQ(test, EC_MSG_TX_HEADER_BYTES, 3); + KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 0], 0xde); + KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 1], 0xad); + KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 2], 0xbe); + KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 3], 0xef); + for (i = 4; i < EC_PROTO2_MAX_PARAM_SIZE; ++i) + KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + i], 0); + + csum = EC_CMD_VERSION0; + csum += EC_CMD_HELLO; + csum += EC_PROTO2_MAX_PARAM_SIZE; + csum += 0xde; + csum += 0xad; + csum += 0xbe; + csum += 0xef; + KUNIT_EXPECT_EQ(test, + ec_dev->dout[EC_MSG_TX_HEADER_BYTES + EC_PROTO2_MAX_PARAM_SIZE], + csum); +} + +static void cros_ec_proto_test_prepare_tx_legacy_bad_msg_outsize(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct cros_ec_command *msg = priv->msg; + int ret; + + ec_dev->proto_version = 2; + + msg->outsize = EC_PROTO2_MAX_PARAM_SIZE + 1; + + ret = cros_ec_prepare_tx(ec_dev, msg); + KUNIT_EXPECT_EQ(test, ret, -EINVAL); +} + +static void cros_ec_proto_test_prepare_tx_normal(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct cros_ec_command *msg = priv->msg; + struct ec_host_request *request = (struct ec_host_request *)ec_dev->dout; + int ret, i; + u8 csum; + + msg->command = EC_CMD_HELLO; + msg->outsize = 0x88; + msg->data[0] = 0xde; + msg->data[1] = 0xad; + msg->data[2] = 0xbe; + msg->data[3] = 0xef; + + ret = cros_ec_prepare_tx(ec_dev, msg); + + KUNIT_EXPECT_EQ(test, ret, sizeof(*request) + 0x88); + + KUNIT_EXPECT_EQ(test, request->struct_version, EC_HOST_REQUEST_VERSION); + KUNIT_EXPECT_EQ(test, request->command, EC_CMD_HELLO); + KUNIT_EXPECT_EQ(test, request->command_version, 0); + KUNIT_EXPECT_EQ(test, request->data_len, 0x88); + KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 0], 0xde); + KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 1], 0xad); + KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 2], 0xbe); + KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 3], 0xef); + for (i = 4; i < 0x88; ++i) + KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + i], 0); + + csum = EC_HOST_REQUEST_VERSION; + csum += EC_CMD_HELLO; + csum += 0x88; + csum += 0xde; + csum += 0xad; + csum += 0xbe; + csum += 0xef; + KUNIT_EXPECT_EQ(test, request->checksum, (u8)-csum); +} + +static void cros_ec_proto_test_prepare_tx_bad_msg_outsize(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct cros_ec_command *msg = priv->msg; + int ret; + + msg->outsize = ec_dev->dout_size - sizeof(struct ec_host_request) + 1; + + ret = cros_ec_prepare_tx(ec_dev, msg); + KUNIT_EXPECT_EQ(test, ret, -EINVAL); +} + +static void cros_ec_proto_test_check_result(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct cros_ec_command *msg = priv->msg; + int ret, i; + static enum ec_status status[] = { + EC_RES_SUCCESS, + EC_RES_INVALID_COMMAND, + EC_RES_ERROR, + EC_RES_INVALID_PARAM, + EC_RES_ACCESS_DENIED, + EC_RES_INVALID_RESPONSE, + EC_RES_INVALID_VERSION, + EC_RES_INVALID_CHECKSUM, + EC_RES_UNAVAILABLE, + EC_RES_TIMEOUT, + EC_RES_OVERFLOW, + EC_RES_INVALID_HEADER, + EC_RES_REQUEST_TRUNCATED, + EC_RES_RESPONSE_TOO_BIG, + EC_RES_BUS_ERROR, + EC_RES_BUSY, + EC_RES_INVALID_HEADER_VERSION, + EC_RES_INVALID_HEADER_CRC, + EC_RES_INVALID_DATA_CRC, + EC_RES_DUP_UNAVAILABLE, + }; + + for (i = 0; i < ARRAY_SIZE(status); ++i) { + msg->result = status[i]; + ret = cros_ec_check_result(ec_dev, msg); + KUNIT_EXPECT_EQ(test, ret, 0); + } + + msg->result = EC_RES_IN_PROGRESS; + ret = cros_ec_check_result(ec_dev, msg); + KUNIT_EXPECT_EQ(test, ret, -EAGAIN); +} + +static void cros_ec_proto_test_query_all_pretest(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + + /* + * cros_ec_query_all() will free din and dout and allocate them again to fit the usage by + * calling devm_kfree() and devm_kzalloc(). Set them to NULL as they aren't managed by + * ec_dev->dev but allocated statically in struct cros_ec_proto_test_priv + * (see cros_ec_proto_test_init()). + */ + ec_dev->din = NULL; + ec_dev->dout = NULL; +} + +static void cros_ec_proto_test_query_all_normal(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* For cros_ec_get_proto_info() without passthru. */ + { + struct ec_response_get_protocol_info *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_get_protocol_info *)mock->o_data; + data->protocol_versions = BIT(3) | BIT(2); + data->max_request_packet_size = 0xbe; + data->max_response_packet_size = 0xef; + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + struct ec_response_get_protocol_info *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_get_protocol_info *)mock->o_data; + data->max_request_packet_size = 0xbf; + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + struct ec_response_get_cmd_versions *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_get_cmd_versions *)mock->o_data; + data->version_mask = BIT(6) | BIT(5); + } + + /* For cros_ec_get_host_command_version_mask() for host sleep v1. */ + { + struct ec_response_get_cmd_versions *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_get_cmd_versions *)mock->o_data; + data->version_mask = BIT(1); + } + + /* For cros_ec_get_host_event_wake_mask(). */ + { + struct ec_response_host_event_mask *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_host_event_mask *)mock->o_data; + data->mask = 0xbeef; + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, 0); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + + KUNIT_EXPECT_EQ(test, ec_dev->max_request, 0xbe - sizeof(struct ec_host_request)); + KUNIT_EXPECT_EQ(test, ec_dev->max_response, 0xef - sizeof(struct ec_host_response)); + KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 3); + KUNIT_EXPECT_EQ(test, ec_dev->din_size, 0xef + EC_MAX_RESPONSE_OVERHEAD); + KUNIT_EXPECT_EQ(test, ec_dev->dout_size, 0xbe + EC_MAX_REQUEST_OVERHEAD); + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, + EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) | + EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + + KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0xbf - sizeof(struct ec_host_request)); + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + struct ec_params_get_cmd_versions *data; + + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_cmd_versions)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data)); + + data = (struct ec_params_get_cmd_versions *)mock->i_data; + KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT); + + KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 7); + } + + /* For cros_ec_get_host_command_version_mask() for host sleep v1. */ + { + struct ec_params_get_cmd_versions *data; + + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_cmd_versions)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data)); + + data = (struct ec_params_get_cmd_versions *)mock->i_data; + KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_HOST_SLEEP_EVENT); + + KUNIT_EXPECT_TRUE(test, ec_dev->host_sleep_v1); + } + + /* For cros_ec_get_host_event_wake_mask(). */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK); + KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + + KUNIT_EXPECT_EQ(test, ec_dev->host_event_wake_mask, 0xbeef); + } +} + +static void cros_ec_proto_test_query_all_no_pd_return_error(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* Set some garbage bytes. */ + ec_dev->max_passthru = 0xbf; + + /* For cros_ec_get_proto_info() without passthru. */ + { + struct ec_response_get_protocol_info *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + /* + * Although it doesn't check the value, provides valid sizes so that + * cros_ec_query_all() allocates din and dout correctly. + */ + data = (struct ec_response_get_protocol_info *)mock->o_data; + data->max_request_packet_size = 0xbe; + data->max_response_packet_size = 0xef; + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, 0); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, + EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) | + EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + + KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0); + } +} + +static void cros_ec_proto_test_query_all_no_pd_return0(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* Set some garbage bytes. */ + ec_dev->max_passthru = 0xbf; + + /* For cros_ec_get_proto_info() without passthru. */ + { + struct ec_response_get_protocol_info *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + /* + * Although it doesn't check the value, provides valid sizes so that + * cros_ec_query_all() allocates din and dout correctly. + */ + data = (struct ec_response_get_protocol_info *)mock->o_data; + data->max_request_packet_size = 0xbe; + data->max_response_packet_size = 0xef; + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_add(test, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, 0); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, + EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) | + EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + + KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0); + } +} + +static void cros_ec_proto_test_query_all_legacy_normal_v3_return_error(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_proto_info_legacy(). */ + { + struct ec_response_hello *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_hello *)mock->o_data; + data->out_data = 0xa1b2c3d4; + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, 0); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_proto_info_legacy(). */ + { + struct ec_params_hello *data; + + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data)); + + data = (struct ec_params_hello *)mock->i_data; + KUNIT_EXPECT_EQ(test, data->in_data, 0xa0b0c0d0); + + KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 2); + KUNIT_EXPECT_EQ(test, ec_dev->max_request, EC_PROTO2_MAX_PARAM_SIZE); + KUNIT_EXPECT_EQ(test, ec_dev->max_response, EC_PROTO2_MAX_PARAM_SIZE); + KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0); + KUNIT_EXPECT_PTR_EQ(test, ec_dev->pkt_xfer, NULL); + KUNIT_EXPECT_EQ(test, ec_dev->din_size, EC_PROTO2_MSG_BYTES); + KUNIT_EXPECT_EQ(test, ec_dev->dout_size, EC_PROTO2_MSG_BYTES); + } +} + +static void cros_ec_proto_test_query_all_legacy_normal_v3_return0(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_add(test, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_proto_info_legacy(). */ + { + struct ec_response_hello *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_hello *)mock->o_data; + data->out_data = 0xa1b2c3d4; + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, 0); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_proto_info_legacy(). */ + { + struct ec_params_hello *data; + + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data)); + + data = (struct ec_params_hello *)mock->i_data; + KUNIT_EXPECT_EQ(test, data->in_data, 0xa0b0c0d0); + + KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 2); + KUNIT_EXPECT_EQ(test, ec_dev->max_request, EC_PROTO2_MAX_PARAM_SIZE); + KUNIT_EXPECT_EQ(test, ec_dev->max_response, EC_PROTO2_MAX_PARAM_SIZE); + KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0); + KUNIT_EXPECT_PTR_EQ(test, ec_dev->pkt_xfer, NULL); + KUNIT_EXPECT_EQ(test, ec_dev->din_size, EC_PROTO2_MSG_BYTES); + KUNIT_EXPECT_EQ(test, ec_dev->dout_size, EC_PROTO2_MSG_BYTES); + } +} + +static void cros_ec_proto_test_query_all_legacy_xfer_error(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_proto_info_legacy(). */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, -EIO, EC_RES_SUCCESS, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, -EIO); + KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_proto_info_legacy(). */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello)); + } +} + +static void cros_ec_proto_test_query_all_legacy_return_error(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_proto_info_legacy(). */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, -EOPNOTSUPP); + KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_proto_info_legacy(). */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello)); + } +} + +static void cros_ec_proto_test_query_all_legacy_data_error(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_proto_info_legacy(). */ + { + struct ec_response_hello *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_hello *)mock->o_data; + data->out_data = 0xbeefbfbf; + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, -EBADMSG); + KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_proto_info_legacy(). */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello)); + } +} + +static void cros_ec_proto_test_query_all_legacy_return0(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_proto_info_legacy(). */ + { + mock = cros_kunit_ec_xfer_mock_add(test, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, -EPROTO); + KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_proto_info_legacy(). */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello)); + } +} + +static void cros_ec_proto_test_query_all_no_mkbp(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* Set some garbage bytes. */ + ec_dev->mkbp_event_supported = 0xbf; + + /* For cros_ec_get_proto_info() without passthru. */ + { + struct ec_response_get_protocol_info *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + /* + * Although it doesn't check the value, provides valid sizes so that + * cros_ec_query_all() allocates din and dout correctly. + */ + data = (struct ec_response_get_protocol_info *)mock->o_data; + data->max_request_packet_size = 0xbe; + data->max_response_packet_size = 0xef; + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + struct ec_response_get_cmd_versions *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_get_cmd_versions *)mock->o_data; + data->version_mask = 0; + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, 0); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, + EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) | + EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + struct ec_params_get_cmd_versions *data; + + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_cmd_versions)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data)); + + data = (struct ec_params_get_cmd_versions *)mock->i_data; + KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT); + + KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0); + } +} + +static void cros_ec_proto_test_query_all_no_mkbp_return_error(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* Set some garbage bytes. */ + ec_dev->mkbp_event_supported = 0xbf; + + /* For cros_ec_get_proto_info() without passthru. */ + { + struct ec_response_get_protocol_info *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + /* + * Although it doesn't check the value, provides valid sizes so that + * cros_ec_query_all() allocates din and dout correctly. + */ + data = (struct ec_response_get_protocol_info *)mock->o_data; + data->max_request_packet_size = 0xbe; + data->max_response_packet_size = 0xef; + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, 0); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, + EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) | + EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + struct ec_params_get_cmd_versions *data; + + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_cmd_versions)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data)); + + data = (struct ec_params_get_cmd_versions *)mock->i_data; + KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT); + + KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0); + } +} + +static void cros_ec_proto_test_query_all_no_mkbp_return0(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* Set some garbage bytes. */ + ec_dev->mkbp_event_supported = 0xbf; + + /* For cros_ec_get_proto_info() without passthru. */ + { + struct ec_response_get_protocol_info *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + /* + * Although it doesn't check the value, provides valid sizes so that + * cros_ec_query_all() allocates din and dout correctly. + */ + data = (struct ec_response_get_protocol_info *)mock->o_data; + data->max_request_packet_size = 0xbe; + data->max_response_packet_size = 0xef; + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + mock = cros_kunit_ec_xfer_mock_add(test, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, 0); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, + EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) | + EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + struct ec_params_get_cmd_versions *data; + + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_cmd_versions)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data)); + + data = (struct ec_params_get_cmd_versions *)mock->i_data; + KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT); + + KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0); + } +} + +static void cros_ec_proto_test_query_all_no_host_sleep(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* Set some garbage bytes. */ + ec_dev->host_sleep_v1 = true; + + /* For cros_ec_get_proto_info() without passthru. */ + { + struct ec_response_get_protocol_info *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + /* + * Although it doesn't check the value, provides valid sizes so that + * cros_ec_query_all() allocates din and dout correctly. + */ + data = (struct ec_response_get_protocol_info *)mock->o_data; + data->max_request_packet_size = 0xbe; + data->max_response_packet_size = 0xef; + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + mock = cros_kunit_ec_xfer_mock_add(test, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_host_command_version_mask() for host sleep v1. */ + { + struct ec_response_get_cmd_versions *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_get_cmd_versions *)mock->o_data; + data->version_mask = 0; + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, 0); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, + EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) | + EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_cmd_versions)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions)); + } + + /* For cros_ec_get_host_command_version_mask() for host sleep v1. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_cmd_versions)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions)); + + KUNIT_EXPECT_FALSE(test, ec_dev->host_sleep_v1); + } +} + +static void cros_ec_proto_test_query_all_no_host_sleep_return0(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* Set some garbage bytes. */ + ec_dev->host_sleep_v1 = true; + + /* For cros_ec_get_proto_info() without passthru. */ + { + struct ec_response_get_protocol_info *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + /* + * Although it doesn't check the value, provides valid sizes so that + * cros_ec_query_all() allocates din and dout correctly. + */ + data = (struct ec_response_get_protocol_info *)mock->o_data; + data->max_request_packet_size = 0xbe; + data->max_response_packet_size = 0xef; + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + struct ec_response_get_cmd_versions *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + /* In order to pollute next cros_ec_get_host_command_version_mask(). */ + data = (struct ec_response_get_cmd_versions *)mock->o_data; + data->version_mask = 0xbeef; + } + + /* For cros_ec_get_host_command_version_mask() for host sleep v1. */ + { + mock = cros_kunit_ec_xfer_mock_add(test, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, 0); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, + EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) | + EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_cmd_versions)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions)); + } + + /* For cros_ec_get_host_command_version_mask() for host sleep v1. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_cmd_versions)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions)); + + KUNIT_EXPECT_FALSE(test, ec_dev->host_sleep_v1); + } +} + +static void cros_ec_proto_test_query_all_default_wake_mask_return_error(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* Set some garbage bytes. */ + ec_dev->host_event_wake_mask = U32_MAX; + + /* For cros_ec_get_proto_info() without passthru. */ + { + struct ec_response_get_protocol_info *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + /* + * Although it doesn't check the value, provides valid sizes so that + * cros_ec_query_all() allocates din and dout correctly. + */ + data = (struct ec_response_get_protocol_info *)mock->o_data; + data->max_request_packet_size = 0xbe; + data->max_response_packet_size = 0xef; + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + mock = cros_kunit_ec_xfer_mock_add(test, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_host_command_version_mask() for host sleep v1. */ + { + mock = cros_kunit_ec_xfer_mock_add(test, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_host_event_wake_mask(). */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, 0); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, + EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) | + EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_cmd_versions)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions)); + } + + /* For cros_ec_get_host_command_version_mask() for host sleep v1. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_cmd_versions)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions)); + } + + /* For cros_ec_get_host_event_wake_mask(). */ + { + u32 mask; + + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK); + KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + + mask = ec_dev->host_event_wake_mask; + KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED), 0); + KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED), 0); + KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW), 0); + KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL), 0); + KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY), 0); + KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU), 0); + KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS), 0); + } +} + +static void cros_ec_proto_test_query_all_default_wake_mask_return0(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + + /* Set some garbage bytes. */ + ec_dev->host_event_wake_mask = U32_MAX; + + /* For cros_ec_get_proto_info() without passthru. */ + { + struct ec_response_get_protocol_info *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + /* + * Although it doesn't check the value, provides valid sizes so that + * cros_ec_query_all() allocates din and dout correctly. + */ + data = (struct ec_response_get_protocol_info *)mock->o_data; + data->max_request_packet_size = 0xbe; + data->max_response_packet_size = 0xef; + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + mock = cros_kunit_ec_xfer_mock_add(test, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For cros_ec_get_host_command_version_mask() for host sleep v1. */ + { + mock = cros_kunit_ec_xfer_mock_add(test, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For get_host_event_wake_mask(). */ + { + mock = cros_kunit_ec_xfer_mock_add(test, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + cros_ec_proto_test_query_all_pretest(test); + ret = cros_ec_query_all(ec_dev); + KUNIT_EXPECT_EQ(test, ret, 0); + + /* For cros_ec_get_proto_info() without passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_proto_info() with passthru. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, + EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) | + EC_CMD_GET_PROTOCOL_INFO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_protocol_info)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + /* For cros_ec_get_host_command_version_mask() for MKBP. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_cmd_versions)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions)); + } + + /* For cros_ec_get_host_command_version_mask() for host sleep v1. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_cmd_versions)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions)); + } + + /* For get_host_event_wake_mask(). */ + { + u32 mask; + + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK); + KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + + mask = ec_dev->host_event_wake_mask; + KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED), 0); + KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED), 0); + KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW), 0); + KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL), 0); + KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY), 0); + KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU), 0); + KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS), 0); + } +} + +static void cros_ec_proto_test_cmd_xfer_normal(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + struct { + struct cros_ec_command msg; + u8 data[0x100]; + } __packed buf; + + ec_dev->max_request = 0xff; + ec_dev->max_response = 0xee; + ec_dev->max_passthru = 0xdd; + + buf.msg.version = 0; + buf.msg.command = EC_CMD_HELLO; + buf.msg.insize = 4; + buf.msg.outsize = 2; + buf.data[0] = 0x55; + buf.data[1] = 0xaa; + + { + u8 *data; + + mock = cros_kunit_ec_xfer_mock_add(test, 4); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (u8 *)mock->o_data; + data[0] = 0xaa; + data[1] = 0x55; + data[2] = 0xcc; + data[3] = 0x33; + } + + ret = cros_ec_cmd_xfer(ec_dev, &buf.msg); + KUNIT_EXPECT_EQ(test, ret, 4); + + { + u8 *data; + + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, 4); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 2); + + data = (u8 *)mock->i_data; + KUNIT_EXPECT_EQ(test, data[0], 0x55); + KUNIT_EXPECT_EQ(test, data[1], 0xaa); + + KUNIT_EXPECT_EQ(test, buf.data[0], 0xaa); + KUNIT_EXPECT_EQ(test, buf.data[1], 0x55); + KUNIT_EXPECT_EQ(test, buf.data[2], 0xcc); + KUNIT_EXPECT_EQ(test, buf.data[3], 0x33); + } +} + +static void cros_ec_proto_test_cmd_xfer_excess_msg_insize(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + struct { + struct cros_ec_command msg; + u8 data[0x100]; + } __packed buf; + + ec_dev->max_request = 0xff; + ec_dev->max_response = 0xee; + ec_dev->max_passthru = 0xdd; + + buf.msg.version = 0; + buf.msg.command = EC_CMD_HELLO; + buf.msg.insize = 0xee + 1; + buf.msg.outsize = 2; + + { + mock = cros_kunit_ec_xfer_mock_add(test, 0xcc); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + ret = cros_ec_cmd_xfer(ec_dev, &buf.msg); + KUNIT_EXPECT_EQ(test, ret, 0xcc); + + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO); + KUNIT_EXPECT_EQ(test, mock->msg.insize, 0xee); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 2); + } +} + +static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_without_passthru(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + int ret; + struct { + struct cros_ec_command msg; + u8 data[0x100]; + } __packed buf; + + ec_dev->max_request = 0xff; + ec_dev->max_response = 0xee; + ec_dev->max_passthru = 0xdd; + + buf.msg.version = 0; + buf.msg.command = EC_CMD_HELLO; + buf.msg.insize = 4; + buf.msg.outsize = 0xff + 1; + + ret = cros_ec_cmd_xfer(ec_dev, &buf.msg); + KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE); +} + +static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_with_passthru(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + int ret; + struct { + struct cros_ec_command msg; + u8 data[0x100]; + } __packed buf; + + ec_dev->max_request = 0xff; + ec_dev->max_response = 0xee; + ec_dev->max_passthru = 0xdd; + + buf.msg.version = 0; + buf.msg.command = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) + EC_CMD_HELLO; + buf.msg.insize = 4; + buf.msg.outsize = 0xdd + 1; + + ret = cros_ec_cmd_xfer(ec_dev, &buf.msg); + KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE); +} + +static void cros_ec_proto_test_cmd_xfer_protocol_v3_normal(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + int ret; + struct cros_ec_command msg; + + memset(&msg, 0, sizeof(msg)); + + ec_dev->proto_version = 3; + ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock; + ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock; + + ret = cros_ec_cmd_xfer(ec_dev, &msg); + KUNIT_EXPECT_EQ(test, ret, 0); + + KUNIT_EXPECT_EQ(test, cros_kunit_ec_cmd_xfer_mock_called, 0); + KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 1); +} + +static void cros_ec_proto_test_cmd_xfer_protocol_v3_no_op(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + int ret; + struct cros_ec_command msg; + + memset(&msg, 0, sizeof(msg)); + + ec_dev->proto_version = 3; + ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock; + ec_dev->pkt_xfer = NULL; + + ret = cros_ec_cmd_xfer(ec_dev, &msg); + KUNIT_EXPECT_EQ(test, ret, -EIO); +} + +static void cros_ec_proto_test_cmd_xfer_protocol_v2_normal(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + int ret; + struct cros_ec_command msg; + + memset(&msg, 0, sizeof(msg)); + + ec_dev->proto_version = 2; + ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock; + ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock; + + ret = cros_ec_cmd_xfer(ec_dev, &msg); + KUNIT_EXPECT_EQ(test, ret, 0); + + KUNIT_EXPECT_EQ(test, cros_kunit_ec_cmd_xfer_mock_called, 1); + KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 0); +} + +static void cros_ec_proto_test_cmd_xfer_protocol_v2_no_op(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + int ret; + struct cros_ec_command msg; + + memset(&msg, 0, sizeof(msg)); + + ec_dev->proto_version = 2; + ec_dev->cmd_xfer = NULL; + ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock; + + ret = cros_ec_cmd_xfer(ec_dev, &msg); + KUNIT_EXPECT_EQ(test, ret, -EIO); +} + +static void cros_ec_proto_test_cmd_xfer_in_progress_normal(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + struct cros_ec_command msg; + + memset(&msg, 0, sizeof(msg)); + + ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock; + + /* For the first host command to return EC_RES_IN_PROGRESS. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For EC_CMD_GET_COMMS_STATUS. */ + { + struct ec_response_get_comms_status *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_get_comms_status *)mock->o_data; + data->flags = 0; + } + + ret = cros_ec_cmd_xfer(ec_dev, &msg); + KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_comms_status)); + + KUNIT_EXPECT_EQ(test, msg.result, EC_RES_SUCCESS); + + /* For the first host command to return EC_RES_IN_PROGRESS. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + } + + /* For EC_CMD_GET_COMMS_STATUS. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_COMMS_STATUS); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_comms_status)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } + + KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2); +} + +static void cros_ec_proto_test_cmd_xfer_in_progress_retries_eagain(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + struct cros_ec_command msg; + + memset(&msg, 0, sizeof(msg)); + + ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock; + + /* For the first host command to return EC_RES_IN_PROGRESS. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */ + cros_kunit_ec_xfer_mock_default_ret = -EAGAIN; + + ret = cros_ec_cmd_xfer(ec_dev, &msg); + KUNIT_EXPECT_EQ(test, ret, -EAGAIN); + + /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */ + KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 51); +} + +static void cros_ec_proto_test_cmd_xfer_in_progress_retries_status_processing(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + struct cros_ec_command msg; + + memset(&msg, 0, sizeof(msg)); + + ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock; + + /* For the first host command to return EC_RES_IN_PROGRESS. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */ + { + struct ec_response_get_comms_status *data; + int i; + + for (i = 0; i < 50; ++i) { + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_get_comms_status *)mock->o_data; + data->flags |= EC_COMMS_STATUS_PROCESSING; + } + } + + ret = cros_ec_cmd_xfer(ec_dev, &msg); + KUNIT_EXPECT_EQ(test, ret, -EAGAIN); + + /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */ + KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 51); +} + +static void cros_ec_proto_test_cmd_xfer_in_progress_xfer_error(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + struct cros_ec_command msg; + + memset(&msg, 0, sizeof(msg)); + + /* For the first host command to return EC_RES_IN_PROGRESS. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For EC_CMD_GET_COMMS_STATUS. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, -EIO, EC_RES_SUCCESS, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + ret = cros_ec_cmd_xfer(ec_dev, &msg); + KUNIT_EXPECT_EQ(test, ret, -EIO); +} + +static void cros_ec_proto_test_cmd_xfer_in_progress_return_error(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + struct cros_ec_command msg; + + memset(&msg, 0, sizeof(msg)); + + ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock; + + /* For the first host command to return EC_RES_IN_PROGRESS. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For EC_CMD_GET_COMMS_STATUS. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + ret = cros_ec_cmd_xfer(ec_dev, &msg); + KUNIT_EXPECT_EQ(test, ret, 0); + + KUNIT_EXPECT_EQ(test, msg.result, EC_RES_INVALID_COMMAND); + + KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2); +} + +static void cros_ec_proto_test_cmd_xfer_in_progress_return0(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + struct cros_ec_command msg; + + memset(&msg, 0, sizeof(msg)); + + ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock; + + /* For the first host command to return EC_RES_IN_PROGRESS. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For EC_CMD_GET_COMMS_STATUS. */ + { + mock = cros_kunit_ec_xfer_mock_add(test, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + ret = cros_ec_cmd_xfer(ec_dev, &msg); + KUNIT_EXPECT_EQ(test, ret, -EPROTO); + + KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2); +} + +static void cros_ec_proto_test_cmd_xfer_status_normal(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + struct cros_ec_command msg; + + memset(&msg, 0, sizeof(msg)); + + /* For cros_ec_cmd_xfer(). */ + { + mock = cros_kunit_ec_xfer_mock_add(test, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + ret = cros_ec_cmd_xfer_status(ec_dev, &msg); + KUNIT_EXPECT_EQ(test, ret, 0); +} + +static void cros_ec_proto_test_cmd_xfer_status_xfer_error(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + struct cros_ec_command msg; + + memset(&msg, 0, sizeof(msg)); + + /* For cros_ec_cmd_xfer(). */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + ret = cros_ec_cmd_xfer_status(ec_dev, &msg); + KUNIT_EXPECT_EQ(test, ret, -EPROTO); +} + +static void cros_ec_proto_test_cmd_xfer_status_return_error(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret, i; + struct cros_ec_command msg; + static const int map[] = { + [EC_RES_SUCCESS] = 0, + [EC_RES_INVALID_COMMAND] = -EOPNOTSUPP, + [EC_RES_ERROR] = -EIO, + [EC_RES_INVALID_PARAM] = -EINVAL, + [EC_RES_ACCESS_DENIED] = -EACCES, + [EC_RES_INVALID_RESPONSE] = -EPROTO, + [EC_RES_INVALID_VERSION] = -ENOPROTOOPT, + [EC_RES_INVALID_CHECKSUM] = -EBADMSG, + /* + * EC_RES_IN_PROGRESS is special because cros_ec_send_command() has extra logic to + * handle it. Note that default cros_kunit_ec_xfer_mock_default_ret == 0 thus + * cros_ec_xfer_command() in cros_ec_wait_until_complete() returns 0. As a result, + * it returns -EPROTO without calling cros_ec_map_error(). + */ + [EC_RES_IN_PROGRESS] = -EPROTO, + [EC_RES_UNAVAILABLE] = -ENODATA, + [EC_RES_TIMEOUT] = -ETIMEDOUT, + [EC_RES_OVERFLOW] = -EOVERFLOW, + [EC_RES_INVALID_HEADER] = -EBADR, + [EC_RES_REQUEST_TRUNCATED] = -EBADR, + [EC_RES_RESPONSE_TOO_BIG] = -EFBIG, + [EC_RES_BUS_ERROR] = -EFAULT, + [EC_RES_BUSY] = -EBUSY, + [EC_RES_INVALID_HEADER_VERSION] = -EBADMSG, + [EC_RES_INVALID_HEADER_CRC] = -EBADMSG, + [EC_RES_INVALID_DATA_CRC] = -EBADMSG, + [EC_RES_DUP_UNAVAILABLE] = -ENODATA, + }; + + memset(&msg, 0, sizeof(msg)); + + for (i = 0; i < ARRAY_SIZE(map); ++i) { + mock = cros_kunit_ec_xfer_mock_addx(test, 0, i, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + ret = cros_ec_cmd_xfer_status(ec_dev, &msg); + KUNIT_EXPECT_EQ(test, ret, map[i]); + } +} + +static void cros_ec_proto_test_get_next_event_no_mkbp_event(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + bool wake_event, more_events; + + ec_dev->max_request = 0xff; + ec_dev->max_response = 0xee; + ec_dev->mkbp_event_supported = 0; + + /* Set some garbage bytes. */ + wake_event = false; + more_events = true; + + /* For get_keyboard_state_event(). */ + { + union ec_response_get_next_data_v1 *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (union ec_response_get_next_data_v1 *)mock->o_data; + data->host_event = 0xbeef; + } + + ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events); + KUNIT_EXPECT_EQ(test, ret, sizeof(union ec_response_get_next_data_v1)); + + KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_KEY_MATRIX); + KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.host_event, 0xbeef); + + KUNIT_EXPECT_TRUE(test, wake_event); + KUNIT_EXPECT_FALSE(test, more_events); + + /* For get_keyboard_state_event(). */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MKBP_STATE); + KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(union ec_response_get_next_data_v1)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } +} + +static void cros_ec_proto_test_get_next_event_mkbp_event_ec_suspended(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + int ret; + + ec_dev->mkbp_event_supported = 1; + ec_dev->suspended = true; + + ret = cros_ec_get_next_event(ec_dev, NULL, NULL); + KUNIT_EXPECT_EQ(test, ret, -EHOSTDOWN); +} + +static void cros_ec_proto_test_get_next_event_mkbp_event_version0(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + bool wake_event, more_events; + + ec_dev->max_request = 0xff; + ec_dev->max_response = 0xee; + ec_dev->mkbp_event_supported = 1; + + /* Set some garbage bytes. */ + wake_event = true; + more_events = false; + + /* For get_next_event_xfer(). */ + { + struct ec_response_get_next_event *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_get_next_event *)mock->o_data; + data->event_type = EC_MKBP_EVENT_SENSOR_FIFO | EC_MKBP_HAS_MORE_EVENTS; + data->data.sysrq = 0xbeef; + } + + ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events); + KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_next_event)); + + KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_SENSOR_FIFO); + KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.sysrq, 0xbeef); + + KUNIT_EXPECT_FALSE(test, wake_event); + KUNIT_EXPECT_TRUE(test, more_events); + + /* For get_next_event_xfer(). */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT); + KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_get_next_event)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } +} + +static void cros_ec_proto_test_get_next_event_mkbp_event_version2(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + bool wake_event, more_events; + + ec_dev->max_request = 0xff; + ec_dev->max_response = 0xee; + ec_dev->mkbp_event_supported = 3; + + /* Set some garbage bytes. */ + wake_event = false; + more_events = true; + + /* For get_next_event_xfer(). */ + { + struct ec_response_get_next_event_v1 *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_get_next_event_v1 *)mock->o_data; + data->event_type = EC_MKBP_EVENT_FINGERPRINT; + data->data.sysrq = 0xbeef; + } + + ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events); + KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_next_event_v1)); + + KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_FINGERPRINT); + KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.sysrq, 0xbeef); + + KUNIT_EXPECT_TRUE(test, wake_event); + KUNIT_EXPECT_FALSE(test, more_events); + + /* For get_next_event_xfer(). */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 2); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_next_event_v1)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } +} + +static void cros_ec_proto_test_get_next_event_mkbp_event_host_event_rtc(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + bool wake_event; + struct ec_response_get_next_event_v1 *data; + + ec_dev->max_request = 0xff; + ec_dev->max_response = 0xee; + ec_dev->mkbp_event_supported = 3; + ec_dev->host_event_wake_mask = U32_MAX; + + /* Set some garbage bytes. */ + wake_event = true; + + /* For get_next_event_xfer(). */ + { + mock = cros_kunit_ec_xfer_mock_add(test, + sizeof(data->event_type) + + sizeof(data->data.host_event)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_get_next_event_v1 *)mock->o_data; + data->event_type = EC_MKBP_EVENT_HOST_EVENT; + put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC), &data->data.host_event); + } + + ret = cros_ec_get_next_event(ec_dev, &wake_event, NULL); + KUNIT_EXPECT_EQ(test, ret, sizeof(data->event_type) + sizeof(data->data.host_event)); + + KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_HOST_EVENT); + + KUNIT_EXPECT_FALSE(test, wake_event); + + /* For get_next_event_xfer(). */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 2); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_next_event_v1)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } +} + +static void cros_ec_proto_test_get_next_event_mkbp_event_host_event_masked(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + bool wake_event; + struct ec_response_get_next_event_v1 *data; + + ec_dev->max_request = 0xff; + ec_dev->max_response = 0xee; + ec_dev->mkbp_event_supported = 3; + ec_dev->host_event_wake_mask = U32_MAX & ~EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED); + + /* Set some garbage bytes. */ + wake_event = true; + + /* For get_next_event_xfer(). */ + { + mock = cros_kunit_ec_xfer_mock_add(test, + sizeof(data->event_type) + + sizeof(data->data.host_event)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_get_next_event_v1 *)mock->o_data; + data->event_type = EC_MKBP_EVENT_HOST_EVENT; + put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED), + &data->data.host_event); + } + + ret = cros_ec_get_next_event(ec_dev, &wake_event, NULL); + KUNIT_EXPECT_EQ(test, ret, sizeof(data->event_type) + sizeof(data->data.host_event)); + + KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_HOST_EVENT); + + KUNIT_EXPECT_FALSE(test, wake_event); + + /* For get_next_event_xfer(). */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 2); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_get_next_event_v1)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } +} + +static void cros_ec_proto_test_get_host_event_no_mkbp_event(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + int ret; + + ec_dev->mkbp_event_supported = 0; + + ret = cros_ec_get_host_event(ec_dev); + KUNIT_EXPECT_EQ(test, ret, 0); +} + +static void cros_ec_proto_test_get_host_event_not_host_event(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + int ret; + + ec_dev->mkbp_event_supported = 1; + ec_dev->event_data.event_type = EC_MKBP_EVENT_FINGERPRINT; + + ret = cros_ec_get_host_event(ec_dev); + KUNIT_EXPECT_EQ(test, ret, 0); +} + +static void cros_ec_proto_test_get_host_event_wrong_event_size(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + int ret; + + ec_dev->mkbp_event_supported = 1; + ec_dev->event_data.event_type = EC_MKBP_EVENT_HOST_EVENT; + ec_dev->event_size = 0xff; + + ret = cros_ec_get_host_event(ec_dev); + KUNIT_EXPECT_EQ(test, ret, 0); +} + +static void cros_ec_proto_test_get_host_event_normal(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + int ret; + + ec_dev->mkbp_event_supported = 1; + ec_dev->event_data.event_type = EC_MKBP_EVENT_HOST_EVENT; + ec_dev->event_size = sizeof(ec_dev->event_data.data.host_event); + put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC), + &ec_dev->event_data.data.host_event); + + ret = cros_ec_get_host_event(ec_dev); + KUNIT_EXPECT_EQ(test, ret, EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC)); +} + +static void cros_ec_proto_test_check_features_cached(struct kunit *test) +{ + int ret, i; + struct cros_ec_dev ec; + + ec.features.flags[0] = EC_FEATURE_MASK_0(EC_FEATURE_FINGERPRINT); + ec.features.flags[1] = EC_FEATURE_MASK_0(EC_FEATURE_SCP); + + for (i = 0; i < EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK; ++i) { + ret = cros_ec_check_features(&ec, i); + switch (i) { + case EC_FEATURE_FINGERPRINT: + case EC_FEATURE_SCP: + KUNIT_EXPECT_TRUE(test, ret); + break; + default: + KUNIT_EXPECT_FALSE(test, ret); + break; + } + } +} + +static void cros_ec_proto_test_check_features_not_cached(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret, i; + struct cros_ec_dev ec; + + ec_dev->max_request = 0xff; + ec_dev->max_response = 0xee; + ec.ec_dev = ec_dev; + ec.dev = ec_dev->dev; + ec.cmd_offset = 0; + ec.features.flags[0] = -1; + ec.features.flags[1] = -1; + + /* For EC_CMD_GET_FEATURES. */ + { + struct ec_response_get_features *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_get_features *)mock->o_data; + data->flags[0] = EC_FEATURE_MASK_0(EC_FEATURE_FINGERPRINT); + data->flags[1] = EC_FEATURE_MASK_0(EC_FEATURE_SCP); + } + + for (i = 0; i < EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK; ++i) { + ret = cros_ec_check_features(&ec, i); + switch (i) { + case EC_FEATURE_FINGERPRINT: + case EC_FEATURE_SCP: + KUNIT_EXPECT_TRUE(test, ret); + break; + default: + KUNIT_EXPECT_FALSE(test, ret); + break; + } + } + + /* For EC_CMD_GET_FEATURES. */ + { + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_FEATURES); + KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_get_features)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); + } +} + +static void cros_ec_proto_test_get_sensor_count_normal(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + struct cros_ec_dev ec; + + ec_dev->max_request = 0xff; + ec_dev->max_response = 0xee; + ec.ec_dev = ec_dev; + ec.dev = ec_dev->dev; + ec.cmd_offset = 0; + + /* For EC_CMD_MOTION_SENSE_CMD. */ + { + struct ec_response_motion_sense *data; + + mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (struct ec_response_motion_sense *)mock->o_data; + data->dump.sensor_count = 0xbf; + } + + ret = cros_ec_get_sensor_count(&ec); + KUNIT_EXPECT_EQ(test, ret, 0xbf); + + /* For EC_CMD_MOTION_SENSE_CMD. */ + { + struct ec_params_motion_sense *data; + + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 1); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD); + KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_motion_sense)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data)); + + data = (struct ec_params_motion_sense *)mock->i_data; + KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP); + } +} + +static void cros_ec_proto_test_get_sensor_count_xfer_error(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + struct cros_ec_dev ec; + + ec_dev->max_request = 0xff; + ec_dev->max_response = 0xee; + ec.ec_dev = ec_dev; + ec.dev = ec_dev->dev; + ec.cmd_offset = 0; + + /* For EC_CMD_MOTION_SENSE_CMD. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + ret = cros_ec_get_sensor_count(&ec); + KUNIT_EXPECT_EQ(test, ret, -EPROTO); + + /* For EC_CMD_MOTION_SENSE_CMD. */ + { + struct ec_params_motion_sense *data; + + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 1); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD); + KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_motion_sense)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data)); + + data = (struct ec_params_motion_sense *)mock->i_data; + KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP); + } +} + +static void cros_ec_proto_test_get_sensor_count_legacy(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret, i; + struct cros_ec_dev ec; + struct { + u8 readmem_data; + int expected_result; + } test_data[] = { + { 0, 0 }, + { EC_MEMMAP_ACC_STATUS_PRESENCE_BIT, 2 }, + }; + + ec_dev->max_request = 0xff; + ec_dev->max_response = 0xee; + ec_dev->cmd_readmem = cros_kunit_readmem_mock; + ec.ec_dev = ec_dev; + ec.dev = ec_dev->dev; + ec.cmd_offset = 0; + + for (i = 0; i < ARRAY_SIZE(test_data); ++i) { + /* For EC_CMD_MOTION_SENSE_CMD. */ + { + mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + } + + /* For readmem. */ + { + cros_kunit_readmem_mock_data = kunit_kzalloc(test, 1, GFP_KERNEL); + KUNIT_ASSERT_PTR_NE(test, cros_kunit_readmem_mock_data, NULL); + cros_kunit_readmem_mock_data[0] = test_data[i].readmem_data; + + cros_kunit_ec_xfer_mock_default_ret = 1; + } + + ret = cros_ec_get_sensor_count(&ec); + KUNIT_EXPECT_EQ(test, ret, test_data[i].expected_result); + + /* For EC_CMD_MOTION_SENSE_CMD. */ + { + struct ec_params_motion_sense *data; + + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 1); + KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD); + KUNIT_EXPECT_EQ(test, mock->msg.insize, + sizeof(struct ec_response_motion_sense)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data)); + + data = (struct ec_params_motion_sense *)mock->i_data; + KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP); + } + + /* For readmem. */ + { + KUNIT_EXPECT_EQ(test, cros_kunit_readmem_mock_offset, EC_MEMMAP_ACC_STATUS); + } + } +} + +static void cros_ec_proto_test_ec_cmd(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + struct ec_xfer_mock *mock; + int ret; + u8 out[3], in[2]; + + ec_dev->max_request = 0xff; + ec_dev->max_response = 0xee; + + out[0] = 0xdd; + out[1] = 0xcc; + out[2] = 0xbb; + + { + u8 *data; + + mock = cros_kunit_ec_xfer_mock_add(test, 2); + KUNIT_ASSERT_PTR_NE(test, mock, NULL); + + data = (u8 *)mock->o_data; + data[0] = 0xaa; + data[1] = 0x99; + } + + ret = cros_ec_cmd(ec_dev, 0x88, 0x77, out, ARRAY_SIZE(out), in, ARRAY_SIZE(in)); + KUNIT_EXPECT_EQ(test, ret, 2); + + { + u8 *data; + + mock = cros_kunit_ec_xfer_mock_next(); + KUNIT_EXPECT_PTR_NE(test, mock, NULL); + + KUNIT_EXPECT_EQ(test, mock->msg.version, 0x88); + KUNIT_EXPECT_EQ(test, mock->msg.command, 0x77); + KUNIT_EXPECT_EQ(test, mock->msg.insize, ARRAY_SIZE(in)); + KUNIT_EXPECT_EQ(test, mock->msg.outsize, ARRAY_SIZE(out)); + + data = (u8 *)mock->i_data; + KUNIT_EXPECT_EQ(test, data[0], 0xdd); + KUNIT_EXPECT_EQ(test, data[1], 0xcc); + KUNIT_EXPECT_EQ(test, data[2], 0xbb); + } +} + +static void cros_ec_proto_test_release(struct device *dev) +{ +} + +static int cros_ec_proto_test_init(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv; + struct cros_ec_device *ec_dev; + + priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + test->priv = priv; + + ec_dev = &priv->ec_dev; + ec_dev->dout = (u8 *)priv->dout; + ec_dev->dout_size = ARRAY_SIZE(priv->dout); + ec_dev->din = (u8 *)priv->din; + ec_dev->din_size = ARRAY_SIZE(priv->din); + ec_dev->proto_version = EC_HOST_REQUEST_VERSION; + ec_dev->dev = kunit_kzalloc(test, sizeof(*ec_dev->dev), GFP_KERNEL); + if (!ec_dev->dev) + return -ENOMEM; + device_initialize(ec_dev->dev); + dev_set_name(ec_dev->dev, "cros_ec_proto_test"); + ec_dev->dev->release = cros_ec_proto_test_release; + ec_dev->cmd_xfer = cros_kunit_ec_xfer_mock; + ec_dev->pkt_xfer = cros_kunit_ec_xfer_mock; + + priv->msg = (struct cros_ec_command *)priv->_msg; + + cros_kunit_mock_reset(); + + return 0; +} + +static void cros_ec_proto_test_exit(struct kunit *test) +{ + struct cros_ec_proto_test_priv *priv = test->priv; + struct cros_ec_device *ec_dev = &priv->ec_dev; + + put_device(ec_dev->dev); +} + +static struct kunit_case cros_ec_proto_test_cases[] = { + KUNIT_CASE(cros_ec_proto_test_prepare_tx_legacy_normal), + KUNIT_CASE(cros_ec_proto_test_prepare_tx_legacy_bad_msg_outsize), + KUNIT_CASE(cros_ec_proto_test_prepare_tx_normal), + KUNIT_CASE(cros_ec_proto_test_prepare_tx_bad_msg_outsize), + KUNIT_CASE(cros_ec_proto_test_check_result), + KUNIT_CASE(cros_ec_proto_test_query_all_normal), + KUNIT_CASE(cros_ec_proto_test_query_all_no_pd_return_error), + KUNIT_CASE(cros_ec_proto_test_query_all_no_pd_return0), + KUNIT_CASE(cros_ec_proto_test_query_all_legacy_normal_v3_return_error), + KUNIT_CASE(cros_ec_proto_test_query_all_legacy_normal_v3_return0), + KUNIT_CASE(cros_ec_proto_test_query_all_legacy_xfer_error), + KUNIT_CASE(cros_ec_proto_test_query_all_legacy_return_error), + KUNIT_CASE(cros_ec_proto_test_query_all_legacy_data_error), + KUNIT_CASE(cros_ec_proto_test_query_all_legacy_return0), + KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp), + KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp_return_error), + KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp_return0), + KUNIT_CASE(cros_ec_proto_test_query_all_no_host_sleep), + KUNIT_CASE(cros_ec_proto_test_query_all_no_host_sleep_return0), + KUNIT_CASE(cros_ec_proto_test_query_all_default_wake_mask_return_error), + KUNIT_CASE(cros_ec_proto_test_query_all_default_wake_mask_return0), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_normal), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_insize), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_outsize_without_passthru), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_outsize_with_passthru), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v3_normal), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v3_no_op), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v2_normal), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v2_no_op), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_normal), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_retries_eagain), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_retries_status_processing), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_xfer_error), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_return_error), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_return0), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_normal), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_xfer_error), + KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_return_error), + KUNIT_CASE(cros_ec_proto_test_get_next_event_no_mkbp_event), + KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_ec_suspended), + KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_version0), + KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_version2), + KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_host_event_rtc), + KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_host_event_masked), + KUNIT_CASE(cros_ec_proto_test_get_host_event_no_mkbp_event), + KUNIT_CASE(cros_ec_proto_test_get_host_event_not_host_event), + KUNIT_CASE(cros_ec_proto_test_get_host_event_wrong_event_size), + KUNIT_CASE(cros_ec_proto_test_get_host_event_normal), + KUNIT_CASE(cros_ec_proto_test_check_features_cached), + KUNIT_CASE(cros_ec_proto_test_check_features_not_cached), + KUNIT_CASE(cros_ec_proto_test_get_sensor_count_normal), + KUNIT_CASE(cros_ec_proto_test_get_sensor_count_xfer_error), + KUNIT_CASE(cros_ec_proto_test_get_sensor_count_legacy), + KUNIT_CASE(cros_ec_proto_test_ec_cmd), + {} +}; + +static struct kunit_suite cros_ec_proto_test_suite = { + .name = "cros_ec_proto_test", + .init = cros_ec_proto_test_init, + .exit = cros_ec_proto_test_exit, + .test_cases = cros_ec_proto_test_cases, +}; + +kunit_test_suite(cros_ec_proto_test_suite); + +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/chrome/cros_kunit_util.c b/drivers/platform/chrome/cros_kunit_util.c new file mode 100644 index 0000000000..f0fda96b11 --- /dev/null +++ b/drivers/platform/chrome/cros_kunit_util.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CrOS Kunit tests utilities. + */ + +#include + +#include +#include +#include +#include + +#include "cros_ec.h" +#include "cros_kunit_util.h" + +int cros_kunit_ec_xfer_mock_default_result; +int cros_kunit_ec_xfer_mock_default_ret; +int cros_kunit_ec_cmd_xfer_mock_called; +int cros_kunit_ec_pkt_xfer_mock_called; + +static struct list_head cros_kunit_ec_xfer_mock_in; +static struct list_head cros_kunit_ec_xfer_mock_out; + +int cros_kunit_ec_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg) +{ + struct ec_xfer_mock *mock; + + mock = list_first_entry_or_null(&cros_kunit_ec_xfer_mock_in, struct ec_xfer_mock, list); + if (!mock) { + msg->result = cros_kunit_ec_xfer_mock_default_result; + return cros_kunit_ec_xfer_mock_default_ret; + } + + list_del(&mock->list); + + memcpy(&mock->msg, msg, sizeof(*msg)); + if (msg->outsize) { + mock->i_data = kunit_kzalloc(mock->test, msg->outsize, GFP_KERNEL); + if (mock->i_data) + memcpy(mock->i_data, msg->data, msg->outsize); + } + + msg->result = mock->result; + if (msg->insize) + memcpy(msg->data, mock->o_data, min(msg->insize, mock->o_data_len)); + + list_add_tail(&mock->list, &cros_kunit_ec_xfer_mock_out); + + return mock->ret; +} + +int cros_kunit_ec_cmd_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg) +{ + ++cros_kunit_ec_cmd_xfer_mock_called; + return cros_kunit_ec_xfer_mock(ec_dev, msg); +} + +int cros_kunit_ec_pkt_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg) +{ + ++cros_kunit_ec_pkt_xfer_mock_called; + return cros_kunit_ec_xfer_mock(ec_dev, msg); +} + +struct ec_xfer_mock *cros_kunit_ec_xfer_mock_add(struct kunit *test, size_t size) +{ + return cros_kunit_ec_xfer_mock_addx(test, size, EC_RES_SUCCESS, size); +} + +struct ec_xfer_mock *cros_kunit_ec_xfer_mock_addx(struct kunit *test, + int ret, int result, size_t size) +{ + struct ec_xfer_mock *mock; + + mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL); + if (!mock) + return NULL; + + list_add_tail(&mock->list, &cros_kunit_ec_xfer_mock_in); + mock->test = test; + + mock->ret = ret; + mock->result = result; + mock->o_data = kunit_kzalloc(test, size, GFP_KERNEL); + if (!mock->o_data) + return NULL; + mock->o_data_len = size; + + return mock; +} + +struct ec_xfer_mock *cros_kunit_ec_xfer_mock_next(void) +{ + struct ec_xfer_mock *mock; + + mock = list_first_entry_or_null(&cros_kunit_ec_xfer_mock_out, struct ec_xfer_mock, list); + if (mock) + list_del(&mock->list); + + return mock; +} + +int cros_kunit_readmem_mock_offset; +u8 *cros_kunit_readmem_mock_data; +int cros_kunit_readmem_mock_ret; + +int cros_kunit_readmem_mock(struct cros_ec_device *ec_dev, unsigned int offset, + unsigned int bytes, void *dest) +{ + cros_kunit_readmem_mock_offset = offset; + + memcpy(dest, cros_kunit_readmem_mock_data, bytes); + + return cros_kunit_readmem_mock_ret; +} + +void cros_kunit_mock_reset(void) +{ + cros_kunit_ec_xfer_mock_default_result = 0; + cros_kunit_ec_xfer_mock_default_ret = 0; + cros_kunit_ec_cmd_xfer_mock_called = 0; + cros_kunit_ec_pkt_xfer_mock_called = 0; + INIT_LIST_HEAD(&cros_kunit_ec_xfer_mock_in); + INIT_LIST_HEAD(&cros_kunit_ec_xfer_mock_out); + + cros_kunit_readmem_mock_offset = 0; + cros_kunit_readmem_mock_data = NULL; + cros_kunit_readmem_mock_ret = 0; +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/chrome/cros_kunit_util.h b/drivers/platform/chrome/cros_kunit_util.h new file mode 100644 index 0000000000..414002271c --- /dev/null +++ b/drivers/platform/chrome/cros_kunit_util.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CrOS Kunit tests utilities. + */ + +#ifndef _CROS_KUNIT_UTIL_H_ +#define _CROS_KUNIT_UTIL_H_ + +#include + +struct ec_xfer_mock { + struct list_head list; + struct kunit *test; + + /* input */ + struct cros_ec_command msg; + void *i_data; + + /* output */ + int ret; + int result; + void *o_data; + u32 o_data_len; +}; + +extern int cros_kunit_ec_xfer_mock_default_result; +extern int cros_kunit_ec_xfer_mock_default_ret; +extern int cros_kunit_ec_cmd_xfer_mock_called; +extern int cros_kunit_ec_pkt_xfer_mock_called; + +int cros_kunit_ec_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); +int cros_kunit_ec_cmd_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); +int cros_kunit_ec_pkt_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); +struct ec_xfer_mock *cros_kunit_ec_xfer_mock_add(struct kunit *test, size_t size); +struct ec_xfer_mock *cros_kunit_ec_xfer_mock_addx(struct kunit *test, + int ret, int result, size_t size); +struct ec_xfer_mock *cros_kunit_ec_xfer_mock_next(void); + +extern int cros_kunit_readmem_mock_offset; +extern u8 *cros_kunit_readmem_mock_data; +extern int cros_kunit_readmem_mock_ret; + +int cros_kunit_readmem_mock(struct cros_ec_device *ec_dev, unsigned int offset, + unsigned int bytes, void *dest); + +void cros_kunit_mock_reset(void); + +#endif diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c new file mode 100644 index 0000000000..7b9c107c17 --- /dev/null +++ b/drivers/platform/mellanox/nvsw-sn2201.c @@ -0,0 +1,1263 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Nvidia sn2201 driver + * + * Copyright (C) 2022 Nvidia Technologies Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* SN2201 CPLD register offset. */ +#define NVSW_SN2201_CPLD_LPC_I2C_BASE_ADRR 0x2000 +#define NVSW_SN2201_CPLD_LPC_IO_RANGE 0x100 +#define NVSW_SN2201_HW_VER_ID_OFFSET 0x00 +#define NVSW_SN2201_BOARD_ID_OFFSET 0x01 +#define NVSW_SN2201_CPLD_VER_OFFSET 0x02 +#define NVSW_SN2201_CPLD_MVER_OFFSET 0x03 +#define NVSW_SN2201_CPLD_ID_OFFSET 0x04 +#define NVSW_SN2201_CPLD_PN_OFFSET 0x05 +#define NVSW_SN2201_CPLD_PN1_OFFSET 0x06 +#define NVSW_SN2201_PSU_CTRL_OFFSET 0x0a +#define NVSW_SN2201_QSFP28_STATUS_OFFSET 0x0b +#define NVSW_SN2201_QSFP28_INT_STATUS_OFFSET 0x0c +#define NVSW_SN2201_QSFP28_LP_STATUS_OFFSET 0x0d +#define NVSW_SN2201_QSFP28_RST_STATUS_OFFSET 0x0e +#define NVSW_SN2201_SYS_STATUS_OFFSET 0x0f +#define NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET 0x10 +#define NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET 0x12 +#define NVSW_SN2201_FRONT_UID_LED_CTRL_OFFSET 0x13 +#define NVSW_SN2201_QSFP28_LED_TEST_STATUS_OFFSET 0x14 +#define NVSW_SN2201_SYS_RST_STATUS_OFFSET 0x15 +#define NVSW_SN2201_SYS_INT_STATUS_OFFSET 0x21 +#define NVSW_SN2201_SYS_INT_MASK_OFFSET 0x22 +#define NVSW_SN2201_ASIC_STATUS_OFFSET 0x24 +#define NVSW_SN2201_ASIC_EVENT_OFFSET 0x25 +#define NVSW_SN2201_ASIC_MAKS_OFFSET 0x26 +#define NVSW_SN2201_THML_STATUS_OFFSET 0x27 +#define NVSW_SN2201_THML_EVENT_OFFSET 0x28 +#define NVSW_SN2201_THML_MASK_OFFSET 0x29 +#define NVSW_SN2201_PS_ALT_STATUS_OFFSET 0x2a +#define NVSW_SN2201_PS_ALT_EVENT_OFFSET 0x2b +#define NVSW_SN2201_PS_ALT_MASK_OFFSET 0x2c +#define NVSW_SN2201_PS_PRSNT_STATUS_OFFSET 0x30 +#define NVSW_SN2201_PS_PRSNT_EVENT_OFFSET 0x31 +#define NVSW_SN2201_PS_PRSNT_MASK_OFFSET 0x32 +#define NVSW_SN2201_PS_DC_OK_STATUS_OFFSET 0x33 +#define NVSW_SN2201_PS_DC_OK_EVENT_OFFSET 0x34 +#define NVSW_SN2201_PS_DC_OK_MASK_OFFSET 0x35 +#define NVSW_SN2201_RST_CAUSE1_OFFSET 0x36 +#define NVSW_SN2201_RST_CAUSE2_OFFSET 0x37 +#define NVSW_SN2201_RST_SW_CTRL_OFFSET 0x38 +#define NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET 0x3a +#define NVSW_SN2201_FAN_PRSNT_EVENT_OFFSET 0x3b +#define NVSW_SN2201_FAN_PRSNT_MASK_OFFSET 0x3c +#define NVSW_SN2201_WD_TMR_OFFSET_LSB 0x40 +#define NVSW_SN2201_WD_TMR_OFFSET_MSB 0x41 +#define NVSW_SN2201_WD_ACT_OFFSET 0x42 +#define NVSW_SN2201_FAN_LED1_CTRL_OFFSET 0x50 +#define NVSW_SN2201_FAN_LED2_CTRL_OFFSET 0x51 +#define NVSW_SN2201_REG_MAX 0x52 + +/* Number of physical I2C busses. */ +#define NVSW_SN2201_PHY_I2C_BUS_NUM 2 +/* Number of main mux channels. */ +#define NVSW_SN2201_MAIN_MUX_CHNL_NUM 8 + +#define NVSW_SN2201_MAIN_NR 0 +#define NVSW_SN2201_MAIN_MUX_NR 1 +#define NVSW_SN2201_MAIN_MUX_DEFER_NR (NVSW_SN2201_PHY_I2C_BUS_NUM + \ + NVSW_SN2201_MAIN_MUX_CHNL_NUM - 1) + +#define NVSW_SN2201_MAIN_MUX_CH0_NR NVSW_SN2201_PHY_I2C_BUS_NUM +#define NVSW_SN2201_MAIN_MUX_CH1_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 1) +#define NVSW_SN2201_MAIN_MUX_CH2_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 2) +#define NVSW_SN2201_MAIN_MUX_CH3_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 3) +#define NVSW_SN2201_MAIN_MUX_CH5_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 5) +#define NVSW_SN2201_MAIN_MUX_CH6_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 6) +#define NVSW_SN2201_MAIN_MUX_CH7_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 7) + +#define NVSW_SN2201_CPLD_NR NVSW_SN2201_MAIN_MUX_CH0_NR +#define NVSW_SN2201_NR_NONE -1 + +/* Masks for aggregation, PSU presence and power, ASIC events + * in CPLD related registers. + */ +#define NVSW_SN2201_CPLD_AGGR_ASIC_MASK_DEF 0xe0 +#define NVSW_SN2201_CPLD_AGGR_PSU_MASK_DEF 0x04 +#define NVSW_SN2201_CPLD_AGGR_PWR_MASK_DEF 0x02 +#define NVSW_SN2201_CPLD_AGGR_FAN_MASK_DEF 0x10 +#define NVSW_SN2201_CPLD_AGGR_MASK_DEF \ + (NVSW_SN2201_CPLD_AGGR_ASIC_MASK_DEF \ + | NVSW_SN2201_CPLD_AGGR_PSU_MASK_DEF \ + | NVSW_SN2201_CPLD_AGGR_PWR_MASK_DEF \ + | NVSW_SN2201_CPLD_AGGR_FAN_MASK_DEF) + +#define NVSW_SN2201_CPLD_ASIC_MASK GENMASK(3, 1) +#define NVSW_SN2201_CPLD_PSU_MASK GENMASK(1, 0) +#define NVSW_SN2201_CPLD_PWR_MASK GENMASK(1, 0) +#define NVSW_SN2201_CPLD_FAN_MASK GENMASK(3, 0) + +#define NVSW_SN2201_CPLD_SYSIRQ 26 +#define NVSW_SN2201_LPC_SYSIRQ 28 +#define NVSW_SN2201_CPLD_I2CADDR 0x41 + +#define NVSW_SN2201_WD_DFLT_TIMEOUT 600 + +/* nvsw_sn2201 - device private data + * @dev: platform device; + * @io_data: register access platform data; + * @led_data: LED platform data; + * @hotplug_data: hotplug platform data; + * @i2c_data: I2C controller platform data; + * @led: LED device; + * @io_regs: register access device; + * @pdev_hotplug: hotplug device; + * @sn2201_devs: I2C devices for sn2201 devices; + * @sn2201_devs_num: number of I2C devices for sn2201 device; + * @main_mux_devs: I2C devices for main mux; + * @main_mux_devs_num: number of I2C devices for main mux; + * @cpld_devs: I2C devices for cpld; + * @cpld_devs_num: number of I2C devices for cpld; + * @main_mux_deferred_nr: I2C adapter number must be exist prior creating devices execution; + */ +struct nvsw_sn2201 { + struct device *dev; + struct mlxreg_core_platform_data *io_data; + struct mlxreg_core_platform_data *led_data; + struct mlxreg_core_platform_data *wd_data; + struct mlxreg_core_hotplug_platform_data *hotplug_data; + struct mlxreg_core_hotplug_platform_data *i2c_data; + struct platform_device *led; + struct platform_device *wd; + struct platform_device *io_regs; + struct platform_device *pdev_hotplug; + struct platform_device *pdev_i2c; + struct mlxreg_hotplug_device *sn2201_devs; + int sn2201_devs_num; + struct mlxreg_hotplug_device *main_mux_devs; + int main_mux_devs_num; + struct mlxreg_hotplug_device *cpld_devs; + int cpld_devs_num; + int main_mux_deferred_nr; +}; + +static bool nvsw_sn2201_writeable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case NVSW_SN2201_PSU_CTRL_OFFSET: + case NVSW_SN2201_QSFP28_LP_STATUS_OFFSET: + case NVSW_SN2201_QSFP28_RST_STATUS_OFFSET: + case NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET: + case NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET: + case NVSW_SN2201_FRONT_UID_LED_CTRL_OFFSET: + case NVSW_SN2201_QSFP28_LED_TEST_STATUS_OFFSET: + case NVSW_SN2201_SYS_RST_STATUS_OFFSET: + case NVSW_SN2201_SYS_INT_MASK_OFFSET: + case NVSW_SN2201_ASIC_EVENT_OFFSET: + case NVSW_SN2201_ASIC_MAKS_OFFSET: + case NVSW_SN2201_THML_EVENT_OFFSET: + case NVSW_SN2201_THML_MASK_OFFSET: + case NVSW_SN2201_PS_ALT_EVENT_OFFSET: + case NVSW_SN2201_PS_ALT_MASK_OFFSET: + case NVSW_SN2201_PS_PRSNT_EVENT_OFFSET: + case NVSW_SN2201_PS_PRSNT_MASK_OFFSET: + case NVSW_SN2201_PS_DC_OK_EVENT_OFFSET: + case NVSW_SN2201_PS_DC_OK_MASK_OFFSET: + case NVSW_SN2201_RST_SW_CTRL_OFFSET: + case NVSW_SN2201_FAN_PRSNT_EVENT_OFFSET: + case NVSW_SN2201_FAN_PRSNT_MASK_OFFSET: + case NVSW_SN2201_WD_TMR_OFFSET_LSB: + case NVSW_SN2201_WD_TMR_OFFSET_MSB: + case NVSW_SN2201_WD_ACT_OFFSET: + case NVSW_SN2201_FAN_LED1_CTRL_OFFSET: + case NVSW_SN2201_FAN_LED2_CTRL_OFFSET: + return true; + } + return false; +} + +static bool nvsw_sn2201_readable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case NVSW_SN2201_HW_VER_ID_OFFSET: + case NVSW_SN2201_BOARD_ID_OFFSET: + case NVSW_SN2201_CPLD_VER_OFFSET: + case NVSW_SN2201_CPLD_MVER_OFFSET: + case NVSW_SN2201_CPLD_ID_OFFSET: + case NVSW_SN2201_CPLD_PN_OFFSET: + case NVSW_SN2201_CPLD_PN1_OFFSET: + case NVSW_SN2201_PSU_CTRL_OFFSET: + case NVSW_SN2201_QSFP28_STATUS_OFFSET: + case NVSW_SN2201_QSFP28_INT_STATUS_OFFSET: + case NVSW_SN2201_QSFP28_LP_STATUS_OFFSET: + case NVSW_SN2201_QSFP28_RST_STATUS_OFFSET: + case NVSW_SN2201_SYS_STATUS_OFFSET: + case NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET: + case NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET: + case NVSW_SN2201_FRONT_UID_LED_CTRL_OFFSET: + case NVSW_SN2201_QSFP28_LED_TEST_STATUS_OFFSET: + case NVSW_SN2201_SYS_RST_STATUS_OFFSET: + case NVSW_SN2201_RST_CAUSE1_OFFSET: + case NVSW_SN2201_RST_CAUSE2_OFFSET: + case NVSW_SN2201_SYS_INT_STATUS_OFFSET: + case NVSW_SN2201_SYS_INT_MASK_OFFSET: + case NVSW_SN2201_ASIC_STATUS_OFFSET: + case NVSW_SN2201_ASIC_EVENT_OFFSET: + case NVSW_SN2201_ASIC_MAKS_OFFSET: + case NVSW_SN2201_THML_STATUS_OFFSET: + case NVSW_SN2201_THML_EVENT_OFFSET: + case NVSW_SN2201_THML_MASK_OFFSET: + case NVSW_SN2201_PS_ALT_STATUS_OFFSET: + case NVSW_SN2201_PS_ALT_EVENT_OFFSET: + case NVSW_SN2201_PS_ALT_MASK_OFFSET: + case NVSW_SN2201_PS_PRSNT_STATUS_OFFSET: + case NVSW_SN2201_PS_PRSNT_EVENT_OFFSET: + case NVSW_SN2201_PS_PRSNT_MASK_OFFSET: + case NVSW_SN2201_PS_DC_OK_STATUS_OFFSET: + case NVSW_SN2201_PS_DC_OK_EVENT_OFFSET: + case NVSW_SN2201_PS_DC_OK_MASK_OFFSET: + case NVSW_SN2201_RST_SW_CTRL_OFFSET: + case NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET: + case NVSW_SN2201_FAN_PRSNT_EVENT_OFFSET: + case NVSW_SN2201_FAN_PRSNT_MASK_OFFSET: + case NVSW_SN2201_WD_TMR_OFFSET_LSB: + case NVSW_SN2201_WD_TMR_OFFSET_MSB: + case NVSW_SN2201_WD_ACT_OFFSET: + case NVSW_SN2201_FAN_LED1_CTRL_OFFSET: + case NVSW_SN2201_FAN_LED2_CTRL_OFFSET: + return true; + } + return false; +} + +static bool nvsw_sn2201_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case NVSW_SN2201_HW_VER_ID_OFFSET: + case NVSW_SN2201_BOARD_ID_OFFSET: + case NVSW_SN2201_CPLD_VER_OFFSET: + case NVSW_SN2201_CPLD_MVER_OFFSET: + case NVSW_SN2201_CPLD_ID_OFFSET: + case NVSW_SN2201_CPLD_PN_OFFSET: + case NVSW_SN2201_CPLD_PN1_OFFSET: + case NVSW_SN2201_PSU_CTRL_OFFSET: + case NVSW_SN2201_QSFP28_STATUS_OFFSET: + case NVSW_SN2201_QSFP28_INT_STATUS_OFFSET: + case NVSW_SN2201_QSFP28_LP_STATUS_OFFSET: + case NVSW_SN2201_QSFP28_RST_STATUS_OFFSET: + case NVSW_SN2201_SYS_STATUS_OFFSET: + case NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET: + case NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET: + case NVSW_SN2201_FRONT_UID_LED_CTRL_OFFSET: + case NVSW_SN2201_QSFP28_LED_TEST_STATUS_OFFSET: + case NVSW_SN2201_SYS_RST_STATUS_OFFSET: + case NVSW_SN2201_RST_CAUSE1_OFFSET: + case NVSW_SN2201_RST_CAUSE2_OFFSET: + case NVSW_SN2201_SYS_INT_STATUS_OFFSET: + case NVSW_SN2201_SYS_INT_MASK_OFFSET: + case NVSW_SN2201_ASIC_STATUS_OFFSET: + case NVSW_SN2201_ASIC_EVENT_OFFSET: + case NVSW_SN2201_ASIC_MAKS_OFFSET: + case NVSW_SN2201_THML_STATUS_OFFSET: + case NVSW_SN2201_THML_EVENT_OFFSET: + case NVSW_SN2201_THML_MASK_OFFSET: + case NVSW_SN2201_PS_ALT_STATUS_OFFSET: + case NVSW_SN2201_PS_ALT_EVENT_OFFSET: + case NVSW_SN2201_PS_ALT_MASK_OFFSET: + case NVSW_SN2201_PS_PRSNT_STATUS_OFFSET: + case NVSW_SN2201_PS_PRSNT_EVENT_OFFSET: + case NVSW_SN2201_PS_PRSNT_MASK_OFFSET: + case NVSW_SN2201_PS_DC_OK_STATUS_OFFSET: + case NVSW_SN2201_PS_DC_OK_EVENT_OFFSET: + case NVSW_SN2201_PS_DC_OK_MASK_OFFSET: + case NVSW_SN2201_RST_SW_CTRL_OFFSET: + case NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET: + case NVSW_SN2201_FAN_PRSNT_EVENT_OFFSET: + case NVSW_SN2201_FAN_PRSNT_MASK_OFFSET: + case NVSW_SN2201_WD_TMR_OFFSET_LSB: + case NVSW_SN2201_WD_TMR_OFFSET_MSB: + case NVSW_SN2201_FAN_LED1_CTRL_OFFSET: + case NVSW_SN2201_FAN_LED2_CTRL_OFFSET: + return true; + } + return false; +} + +static const struct reg_default nvsw_sn2201_regmap_default[] = { + { NVSW_SN2201_QSFP28_LED_TEST_STATUS_OFFSET, 0x00 }, + { NVSW_SN2201_WD_ACT_OFFSET, 0x00 }, +}; + +/* Configuration for the register map of a device with 1 bytes address space. */ +static const struct regmap_config nvsw_sn2201_regmap_conf = { + .reg_bits = 8, + .val_bits = 8, + .max_register = NVSW_SN2201_REG_MAX, + .cache_type = REGCACHE_FLAT, + .writeable_reg = nvsw_sn2201_writeable_reg, + .readable_reg = nvsw_sn2201_readable_reg, + .volatile_reg = nvsw_sn2201_volatile_reg, + .reg_defaults = nvsw_sn2201_regmap_default, + .num_reg_defaults = ARRAY_SIZE(nvsw_sn2201_regmap_default), +}; + +/* Regions for LPC I2C controller and LPC base register space. */ +static const struct resource nvsw_sn2201_lpc_io_resources[] = { + [0] = DEFINE_RES_NAMED(NVSW_SN2201_CPLD_LPC_I2C_BASE_ADRR, + NVSW_SN2201_CPLD_LPC_IO_RANGE, + "mlxplat_cpld_lpc_i2c_ctrl", IORESOURCE_IO), +}; + +static struct resource nvsw_sn2201_cpld_res[] = { + [0] = DEFINE_RES_IRQ_NAMED(NVSW_SN2201_CPLD_SYSIRQ, "mlxreg-hotplug"), +}; + +static struct resource nvsw_sn2201_lpc_res[] = { + [0] = DEFINE_RES_IRQ_NAMED(NVSW_SN2201_LPC_SYSIRQ, "i2c-mlxcpld"), +}; + +/* SN2201 I2C platform data. */ +static struct mlxreg_core_hotplug_platform_data nvsw_sn2201_i2c_data = { + .irq = NVSW_SN2201_CPLD_SYSIRQ, +}; + +/* SN2201 CPLD device. */ +static struct i2c_board_info nvsw_sn2201_cpld_devices[] = { + { + I2C_BOARD_INFO("nvsw-sn2201", 0x41), + }, +}; + +/* SN2201 CPLD board info. */ +static struct mlxreg_hotplug_device nvsw_sn2201_cpld_brdinfo[] = { + { + .brdinfo = &nvsw_sn2201_cpld_devices[0], + .nr = NVSW_SN2201_CPLD_NR, + }, +}; + +/* SN2201 main mux device. */ +static struct i2c_board_info nvsw_sn2201_main_mux_devices[] = { + { + I2C_BOARD_INFO("pca9548", 0x70), + }, +}; + +/* SN2201 main mux board info. */ +static struct mlxreg_hotplug_device nvsw_sn2201_main_mux_brdinfo[] = { + { + .brdinfo = &nvsw_sn2201_main_mux_devices[0], + .nr = NVSW_SN2201_MAIN_MUX_NR, + }, +}; + +/* SN2201 power devices. */ +static struct i2c_board_info nvsw_sn2201_pwr_devices[] = { + { + I2C_BOARD_INFO("pmbus", 0x58), + }, + { + I2C_BOARD_INFO("pmbus", 0x58), + }, +}; + +/* SN2201 fan devices. */ +static struct i2c_board_info nvsw_sn2201_fan_devices[] = { + { + I2C_BOARD_INFO("24c02", 0x50), + }, + { + I2C_BOARD_INFO("24c02", 0x51), + }, + { + I2C_BOARD_INFO("24c02", 0x52), + }, + { + I2C_BOARD_INFO("24c02", 0x53), + }, +}; + +/* SN2201 hotplug default data. */ +static struct mlxreg_core_data nvsw_sn2201_psu_items_data[] = { + { + .label = "psu1", + .reg = NVSW_SN2201_PS_PRSNT_STATUS_OFFSET, + .mask = BIT(0), + .hpdev.nr = NVSW_SN2201_NR_NONE, + }, + { + .label = "psu2", + .reg = NVSW_SN2201_PS_PRSNT_STATUS_OFFSET, + .mask = BIT(1), + .hpdev.nr = NVSW_SN2201_NR_NONE, + }, +}; + +static struct mlxreg_core_data nvsw_sn2201_pwr_items_data[] = { + { + .label = "pwr1", + .reg = NVSW_SN2201_PS_DC_OK_STATUS_OFFSET, + .mask = BIT(0), + .hpdev.brdinfo = &nvsw_sn2201_pwr_devices[0], + .hpdev.nr = NVSW_SN2201_MAIN_MUX_CH1_NR, + }, + { + .label = "pwr2", + .reg = NVSW_SN2201_PS_DC_OK_STATUS_OFFSET, + .mask = BIT(1), + .hpdev.brdinfo = &nvsw_sn2201_pwr_devices[1], + .hpdev.nr = NVSW_SN2201_MAIN_MUX_CH2_NR, + }, +}; + +static struct mlxreg_core_data nvsw_sn2201_fan_items_data[] = { + { + .label = "fan1", + .reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET, + .mask = BIT(0), + .hpdev.brdinfo = &nvsw_sn2201_fan_devices[0], + .hpdev.nr = NVSW_SN2201_NR_NONE, + }, + { + .label = "fan2", + .reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET, + .mask = BIT(1), + .hpdev.brdinfo = &nvsw_sn2201_fan_devices[1], + .hpdev.nr = NVSW_SN2201_NR_NONE, + }, + { + .label = "fan3", + .reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET, + .mask = BIT(2), + .hpdev.brdinfo = &nvsw_sn2201_fan_devices[2], + .hpdev.nr = NVSW_SN2201_NR_NONE, + }, + { + .label = "fan4", + .reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET, + .mask = BIT(3), + .hpdev.brdinfo = &nvsw_sn2201_fan_devices[3], + .hpdev.nr = NVSW_SN2201_NR_NONE, + }, +}; + +static struct mlxreg_core_data nvsw_sn2201_sys_items_data[] = { + { + .label = "nic_smb_alert", + .reg = NVSW_SN2201_ASIC_STATUS_OFFSET, + .mask = BIT(1), + .hpdev.nr = NVSW_SN2201_NR_NONE, + }, + { + .label = "cpu_sd", + .reg = NVSW_SN2201_ASIC_STATUS_OFFSET, + .mask = BIT(2), + .hpdev.nr = NVSW_SN2201_NR_NONE, + }, + { + .label = "mac_health", + .reg = NVSW_SN2201_ASIC_STATUS_OFFSET, + .mask = BIT(3), + .hpdev.nr = NVSW_SN2201_NR_NONE, + }, +}; + +static struct mlxreg_core_item nvsw_sn2201_items[] = { + { + .data = nvsw_sn2201_psu_items_data, + .aggr_mask = NVSW_SN2201_CPLD_AGGR_PSU_MASK_DEF, + .reg = NVSW_SN2201_PS_PRSNT_STATUS_OFFSET, + .mask = NVSW_SN2201_CPLD_PSU_MASK, + .count = ARRAY_SIZE(nvsw_sn2201_psu_items_data), + .inversed = 1, + .health = false, + }, + { + .data = nvsw_sn2201_pwr_items_data, + .aggr_mask = NVSW_SN2201_CPLD_AGGR_PWR_MASK_DEF, + .reg = NVSW_SN2201_PS_DC_OK_STATUS_OFFSET, + .mask = NVSW_SN2201_CPLD_PWR_MASK, + .count = ARRAY_SIZE(nvsw_sn2201_pwr_items_data), + .inversed = 0, + .health = false, + }, + { + .data = nvsw_sn2201_fan_items_data, + .aggr_mask = NVSW_SN2201_CPLD_AGGR_FAN_MASK_DEF, + .reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET, + .mask = NVSW_SN2201_CPLD_FAN_MASK, + .count = ARRAY_SIZE(nvsw_sn2201_fan_items_data), + .inversed = 1, + .health = false, + }, + { + .data = nvsw_sn2201_sys_items_data, + .aggr_mask = NVSW_SN2201_CPLD_AGGR_ASIC_MASK_DEF, + .reg = NVSW_SN2201_ASIC_STATUS_OFFSET, + .mask = NVSW_SN2201_CPLD_ASIC_MASK, + .count = ARRAY_SIZE(nvsw_sn2201_sys_items_data), + .inversed = 1, + .health = false, + }, +}; + +static +struct mlxreg_core_hotplug_platform_data nvsw_sn2201_hotplug = { + .items = nvsw_sn2201_items, + .counter = ARRAY_SIZE(nvsw_sn2201_items), + .cell = NVSW_SN2201_SYS_INT_STATUS_OFFSET, + .mask = NVSW_SN2201_CPLD_AGGR_MASK_DEF, +}; + +/* SN2201 static devices. */ +static struct i2c_board_info nvsw_sn2201_static_devices[] = { + { + I2C_BOARD_INFO("24c02", 0x57), + }, + { + I2C_BOARD_INFO("lm75", 0x4b), + }, + { + I2C_BOARD_INFO("24c64", 0x56), + }, + { + I2C_BOARD_INFO("ads1015", 0x49), + }, + { + I2C_BOARD_INFO("pca9546", 0x71), + }, + { + I2C_BOARD_INFO("emc2305", 0x4d), + }, + { + I2C_BOARD_INFO("lm75", 0x49), + }, + { + I2C_BOARD_INFO("pca9555", 0x27), + }, + { + I2C_BOARD_INFO("powr1014", 0x37), + }, + { + I2C_BOARD_INFO("lm75", 0x4f), + }, + { + I2C_BOARD_INFO("pmbus", 0x40), + }, +}; + +/* SN2201 default static board info. */ +static struct mlxreg_hotplug_device nvsw_sn2201_static_brdinfo[] = { + { + .brdinfo = &nvsw_sn2201_static_devices[0], + .nr = NVSW_SN2201_MAIN_NR, + }, + { + .brdinfo = &nvsw_sn2201_static_devices[1], + .nr = NVSW_SN2201_MAIN_MUX_CH0_NR, + }, + { + .brdinfo = &nvsw_sn2201_static_devices[2], + .nr = NVSW_SN2201_MAIN_MUX_CH0_NR, + }, + { + .brdinfo = &nvsw_sn2201_static_devices[3], + .nr = NVSW_SN2201_MAIN_MUX_CH0_NR, + }, + { + .brdinfo = &nvsw_sn2201_static_devices[4], + .nr = NVSW_SN2201_MAIN_MUX_CH3_NR, + }, + { + .brdinfo = &nvsw_sn2201_static_devices[5], + .nr = NVSW_SN2201_MAIN_MUX_CH5_NR, + }, + { + .brdinfo = &nvsw_sn2201_static_devices[6], + .nr = NVSW_SN2201_MAIN_MUX_CH5_NR, + }, + { + .brdinfo = &nvsw_sn2201_static_devices[7], + .nr = NVSW_SN2201_MAIN_MUX_CH5_NR, + }, + { + .brdinfo = &nvsw_sn2201_static_devices[8], + .nr = NVSW_SN2201_MAIN_MUX_CH6_NR, + }, + { + .brdinfo = &nvsw_sn2201_static_devices[9], + .nr = NVSW_SN2201_MAIN_MUX_CH6_NR, + }, + { + .brdinfo = &nvsw_sn2201_static_devices[10], + .nr = NVSW_SN2201_MAIN_MUX_CH7_NR, + }, +}; + +/* LED default data. */ +static struct mlxreg_core_data nvsw_sn2201_led_data[] = { + { + .label = "status:green", + .reg = NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET, + .mask = GENMASK(7, 4), + }, + { + .label = "status:orange", + .reg = NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET, + .mask = GENMASK(7, 4), + }, + { + .label = "psu:green", + .reg = NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET, + .mask = GENMASK(7, 4), + }, + { + .label = "psu:orange", + .reg = NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET, + .mask = GENMASK(7, 4), + }, + { + .label = "uid:blue", + .reg = NVSW_SN2201_FRONT_UID_LED_CTRL_OFFSET, + .mask = GENMASK(7, 4), + }, + { + .label = "fan1:green", + .reg = NVSW_SN2201_FAN_LED1_CTRL_OFFSET, + .mask = GENMASK(7, 4), + }, + { + .label = "fan1:orange", + .reg = NVSW_SN2201_FAN_LED1_CTRL_OFFSET, + .mask = GENMASK(7, 4), + }, + { + .label = "fan2:green", + .reg = NVSW_SN2201_FAN_LED1_CTRL_OFFSET, + .mask = GENMASK(3, 0), + }, + { + .label = "fan2:orange", + .reg = NVSW_SN2201_FAN_LED1_CTRL_OFFSET, + .mask = GENMASK(3, 0), + }, + { + .label = "fan3:green", + .reg = NVSW_SN2201_FAN_LED2_CTRL_OFFSET, + .mask = GENMASK(7, 4), + }, + { + .label = "fan3:orange", + .reg = NVSW_SN2201_FAN_LED2_CTRL_OFFSET, + .mask = GENMASK(7, 4), + }, + { + .label = "fan4:green", + .reg = NVSW_SN2201_FAN_LED2_CTRL_OFFSET, + .mask = GENMASK(3, 0), + }, + { + .label = "fan4:orange", + .reg = NVSW_SN2201_FAN_LED2_CTRL_OFFSET, + .mask = GENMASK(3, 0), + }, +}; + +static struct mlxreg_core_platform_data nvsw_sn2201_led = { + .data = nvsw_sn2201_led_data, + .counter = ARRAY_SIZE(nvsw_sn2201_led_data), +}; + +/* Default register access data. */ +static struct mlxreg_core_data nvsw_sn2201_io_data[] = { + { + .label = "cpld1_version", + .reg = NVSW_SN2201_CPLD_VER_OFFSET, + .bit = GENMASK(7, 0), + .mode = 0444, + }, + { + .label = "cpld1_version_min", + .reg = NVSW_SN2201_CPLD_MVER_OFFSET, + .bit = GENMASK(7, 0), + .mode = 0444, + }, + { + .label = "cpld1_pn", + .reg = NVSW_SN2201_CPLD_PN_OFFSET, + .bit = GENMASK(15, 0), + .mode = 0444, + .regnum = 2, + }, + { + .label = "psu1_on", + .reg = NVSW_SN2201_PSU_CTRL_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(0), + .mode = 0644, + }, + { + .label = "psu2_on", + .reg = NVSW_SN2201_PSU_CTRL_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(1), + .mode = 0644, + }, + { + .label = "pwr_cycle", + .reg = NVSW_SN2201_PSU_CTRL_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(2), + .mode = 0644, + }, + { + .label = "asic_health", + .reg = NVSW_SN2201_SYS_STATUS_OFFSET, + .mask = GENMASK(4, 3), + .bit = 4, + .mode = 0444, + }, + { + .label = "qsfp_pwr_good", + .reg = NVSW_SN2201_SYS_STATUS_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(0), + .mode = 0444, + }, + { + .label = "phy_reset", + .reg = NVSW_SN2201_SYS_RST_STATUS_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(3), + .mode = 0644, + }, + { + .label = "mac_reset", + .reg = NVSW_SN2201_SYS_RST_STATUS_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(2), + .mode = 0644, + }, + { + .label = "pwr_down", + .reg = NVSW_SN2201_RST_SW_CTRL_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(0), + .mode = 0644, + }, + { + .label = "reset_long_pb", + .reg = NVSW_SN2201_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(0), + .mode = 0444, + }, + { + .label = "reset_short_pb", + .reg = NVSW_SN2201_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(1), + .mode = 0444, + }, + { + .label = "reset_aux_pwr_or_fu", + .reg = NVSW_SN2201_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(2), + .mode = 0444, + }, + { + .label = "reset_swb_dc_dc_pwr_fail", + .reg = NVSW_SN2201_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(3), + .mode = 0444, + }, + { + .label = "reset_sw_reset", + .reg = NVSW_SN2201_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(4), + .mode = 0444, + }, + { + .label = "reset_fw_reset", + .reg = NVSW_SN2201_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(5), + .mode = 0444, + }, + { + .label = "reset_swb_wd", + .reg = NVSW_SN2201_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(6), + .mode = 0444, + }, + { + .label = "reset_asic_thermal", + .reg = NVSW_SN2201_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(7), + .mode = 0444, + }, + { + .label = "reset_system", + .reg = NVSW_SN2201_RST_CAUSE2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(1), + .mode = 0444, + }, + { + .label = "reset_sw_pwr_off", + .reg = NVSW_SN2201_RST_CAUSE2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(2), + .mode = 0444, + }, + { + .label = "reset_cpu_pwr_fail_thermal", + .reg = NVSW_SN2201_RST_CAUSE2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(4), + .mode = 0444, + }, + { + .label = "reset_reload_bios", + .reg = NVSW_SN2201_RST_CAUSE2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(5), + .mode = 0444, + }, + { + .label = "reset_ac_pwr_fail", + .reg = NVSW_SN2201_RST_CAUSE2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(6), + .mode = 0444, + }, + { + .label = "psu1", + .reg = NVSW_SN2201_PS_PRSNT_STATUS_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(0), + .mode = 0444, + }, + { + .label = "psu2", + .reg = NVSW_SN2201_PS_PRSNT_STATUS_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(1), + .mode = 0444, + }, +}; + +static struct mlxreg_core_platform_data nvsw_sn2201_regs_io = { + .data = nvsw_sn2201_io_data, + .counter = ARRAY_SIZE(nvsw_sn2201_io_data), +}; + +/* Default watchdog data. */ +static struct mlxreg_core_data nvsw_sn2201_wd_data[] = { + { + .label = "action", + .reg = NVSW_SN2201_WD_ACT_OFFSET, + .mask = GENMASK(7, 1), + .bit = 0, + }, + { + .label = "timeout", + .reg = NVSW_SN2201_WD_TMR_OFFSET_LSB, + .mask = 0, + .health_cntr = NVSW_SN2201_WD_DFLT_TIMEOUT, + }, + { + .label = "timeleft", + .reg = NVSW_SN2201_WD_TMR_OFFSET_LSB, + .mask = 0, + }, + { + .label = "ping", + .reg = NVSW_SN2201_WD_ACT_OFFSET, + .mask = GENMASK(7, 1), + .bit = 0, + }, + { + .label = "reset", + .reg = NVSW_SN2201_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(6), + .bit = 6, + }, +}; + +static struct mlxreg_core_platform_data nvsw_sn2201_wd = { + .data = nvsw_sn2201_wd_data, + .counter = ARRAY_SIZE(nvsw_sn2201_wd_data), + .version = MLX_WDT_TYPE3, + .identity = "mlx-wdt-main", +}; + +static int +nvsw_sn2201_create_static_devices(struct nvsw_sn2201 *nvsw_sn2201, + struct mlxreg_hotplug_device *devs, + int size) +{ + struct mlxreg_hotplug_device *dev = devs; + int ret; + int i; + + /* Create I2C static devices. */ + for (i = 0; i < size; i++, dev++) { + dev->client = i2c_new_client_device(dev->adapter, dev->brdinfo); + if (IS_ERR(dev->client)) { + dev_err(nvsw_sn2201->dev, "Failed to create client %s at bus %d at addr 0x%02x\n", + dev->brdinfo->type, + dev->nr, dev->brdinfo->addr); + + dev->adapter = NULL; + ret = PTR_ERR(dev->client); + goto fail_create_static_devices; + } + } + + return 0; + +fail_create_static_devices: + while (--i >= 0) { + dev = devs + i; + i2c_unregister_device(dev->client); + dev->client = NULL; + dev->adapter = NULL; + } + return ret; +} + +static void nvsw_sn2201_destroy_static_devices(struct nvsw_sn2201 *nvsw_sn2201, + struct mlxreg_hotplug_device *devs, int size) +{ + struct mlxreg_hotplug_device *dev = devs; + int i; + + /* Destroy static I2C device for SN2201 static devices. */ + for (i = 0; i < size; i++, dev++) { + if (dev->client) { + i2c_unregister_device(dev->client); + dev->client = NULL; + i2c_put_adapter(dev->adapter); + dev->adapter = NULL; + } + } +} + +static int nvsw_sn2201_config_post_init(struct nvsw_sn2201 *nvsw_sn2201) +{ + struct mlxreg_hotplug_device *sn2201_dev; + struct i2c_adapter *adap; + struct device *dev; + int i, err; + + dev = nvsw_sn2201->dev; + adap = i2c_get_adapter(nvsw_sn2201->main_mux_deferred_nr); + if (!adap) { + dev_err(dev, "Failed to get adapter for bus %d\n", + nvsw_sn2201->main_mux_deferred_nr); + return -ENODEV; + } + i2c_put_adapter(adap); + + /* Update board info. */ + sn2201_dev = nvsw_sn2201->sn2201_devs; + for (i = 0; i < nvsw_sn2201->sn2201_devs_num; i++, sn2201_dev++) { + sn2201_dev->adapter = i2c_get_adapter(sn2201_dev->nr); + if (!sn2201_dev->adapter) + return -ENODEV; + i2c_put_adapter(sn2201_dev->adapter); + } + + err = nvsw_sn2201_create_static_devices(nvsw_sn2201, nvsw_sn2201->sn2201_devs, + nvsw_sn2201->sn2201_devs_num); + if (err) + dev_err(dev, "Failed to create static devices\n"); + + return err; +} + +static int nvsw_sn2201_config_init(struct nvsw_sn2201 *nvsw_sn2201, void *regmap) +{ + struct device *dev = nvsw_sn2201->dev; + int err; + + nvsw_sn2201->io_data = &nvsw_sn2201_regs_io; + nvsw_sn2201->led_data = &nvsw_sn2201_led; + nvsw_sn2201->wd_data = &nvsw_sn2201_wd; + nvsw_sn2201->hotplug_data = &nvsw_sn2201_hotplug; + + /* Register IO access driver. */ + if (nvsw_sn2201->io_data) { + nvsw_sn2201->io_data->regmap = regmap; + nvsw_sn2201->io_regs = + platform_device_register_resndata(dev, "mlxreg-io", PLATFORM_DEVID_NONE, NULL, 0, + nvsw_sn2201->io_data, + sizeof(*nvsw_sn2201->io_data)); + if (IS_ERR(nvsw_sn2201->io_regs)) { + err = PTR_ERR(nvsw_sn2201->io_regs); + goto fail_register_io; + } + } + + /* Register LED driver. */ + if (nvsw_sn2201->led_data) { + nvsw_sn2201->led_data->regmap = regmap; + nvsw_sn2201->led = + platform_device_register_resndata(dev, "leds-mlxreg", PLATFORM_DEVID_NONE, NULL, 0, + nvsw_sn2201->led_data, + sizeof(*nvsw_sn2201->led_data)); + if (IS_ERR(nvsw_sn2201->led)) { + err = PTR_ERR(nvsw_sn2201->led); + goto fail_register_led; + } + } + + /* Register WD driver. */ + if (nvsw_sn2201->wd_data) { + nvsw_sn2201->wd_data->regmap = regmap; + nvsw_sn2201->wd = + platform_device_register_resndata(dev, "mlx-wdt", PLATFORM_DEVID_NONE, NULL, 0, + nvsw_sn2201->wd_data, + sizeof(*nvsw_sn2201->wd_data)); + if (IS_ERR(nvsw_sn2201->wd)) { + err = PTR_ERR(nvsw_sn2201->wd); + goto fail_register_wd; + } + } + + /* Register hotplug driver. */ + if (nvsw_sn2201->hotplug_data) { + nvsw_sn2201->hotplug_data->regmap = regmap; + nvsw_sn2201->pdev_hotplug = + platform_device_register_resndata(dev, "mlxreg-hotplug", PLATFORM_DEVID_NONE, + nvsw_sn2201_cpld_res, + ARRAY_SIZE(nvsw_sn2201_cpld_res), + nvsw_sn2201->hotplug_data, + sizeof(*nvsw_sn2201->hotplug_data)); + if (IS_ERR(nvsw_sn2201->pdev_hotplug)) { + err = PTR_ERR(nvsw_sn2201->pdev_hotplug); + goto fail_register_hotplug; + } + } + + return nvsw_sn2201_config_post_init(nvsw_sn2201); + +fail_register_hotplug: + if (nvsw_sn2201->wd) + platform_device_unregister(nvsw_sn2201->wd); +fail_register_wd: + if (nvsw_sn2201->led) + platform_device_unregister(nvsw_sn2201->led); +fail_register_led: + if (nvsw_sn2201->io_regs) + platform_device_unregister(nvsw_sn2201->io_regs); +fail_register_io: + + return err; +} + +static void nvsw_sn2201_config_exit(struct nvsw_sn2201 *nvsw_sn2201) +{ + /* Unregister hotplug driver. */ + if (nvsw_sn2201->pdev_hotplug) + platform_device_unregister(nvsw_sn2201->pdev_hotplug); + /* Unregister WD driver. */ + if (nvsw_sn2201->wd) + platform_device_unregister(nvsw_sn2201->wd); + /* Unregister LED driver. */ + if (nvsw_sn2201->led) + platform_device_unregister(nvsw_sn2201->led); + /* Unregister IO access driver. */ + if (nvsw_sn2201->io_regs) + platform_device_unregister(nvsw_sn2201->io_regs); +} + +/* + * Initialization is divided into two parts: + * - I2C main bus init. + * - Mux creation and attaching devices to the mux, + * which assumes that the main bus is already created. + * This separation is required for synchronization between these two parts. + * Completion notify callback is used to make this flow synchronized. + */ +static int nvsw_sn2201_i2c_completion_notify(void *handle, int id) +{ + struct nvsw_sn2201 *nvsw_sn2201 = handle; + void *regmap; + int i, err; + + /* Create main mux. */ + nvsw_sn2201->main_mux_devs->adapter = i2c_get_adapter(nvsw_sn2201->main_mux_devs->nr); + if (!nvsw_sn2201->main_mux_devs->adapter) { + err = -ENODEV; + dev_err(nvsw_sn2201->dev, "Failed to get adapter for bus %d\n", + nvsw_sn2201->cpld_devs->nr); + goto i2c_get_adapter_main_fail; + } + + nvsw_sn2201->main_mux_devs_num = ARRAY_SIZE(nvsw_sn2201_main_mux_brdinfo); + err = nvsw_sn2201_create_static_devices(nvsw_sn2201, nvsw_sn2201->main_mux_devs, + nvsw_sn2201->main_mux_devs_num); + if (err) { + dev_err(nvsw_sn2201->dev, "Failed to create main mux devices\n"); + goto nvsw_sn2201_create_static_devices_fail; + } + + nvsw_sn2201->cpld_devs->adapter = i2c_get_adapter(nvsw_sn2201->cpld_devs->nr); + if (!nvsw_sn2201->cpld_devs->adapter) { + err = -ENODEV; + dev_err(nvsw_sn2201->dev, "Failed to get adapter for bus %d\n", + nvsw_sn2201->cpld_devs->nr); + goto i2c_get_adapter_fail; + } + + /* Create CPLD device. */ + nvsw_sn2201->cpld_devs->client = i2c_new_dummy_device(nvsw_sn2201->cpld_devs->adapter, + NVSW_SN2201_CPLD_I2CADDR); + if (IS_ERR(nvsw_sn2201->cpld_devs->client)) { + err = PTR_ERR(nvsw_sn2201->cpld_devs->client); + dev_err(nvsw_sn2201->dev, "Failed to create %s cpld device at bus %d at addr 0x%02x\n", + nvsw_sn2201->cpld_devs->brdinfo->type, nvsw_sn2201->cpld_devs->nr, + nvsw_sn2201->cpld_devs->brdinfo->addr); + goto i2c_new_dummy_fail; + } + + regmap = devm_regmap_init_i2c(nvsw_sn2201->cpld_devs->client, &nvsw_sn2201_regmap_conf); + if (IS_ERR(regmap)) { + err = PTR_ERR(regmap); + dev_err(nvsw_sn2201->dev, "Failed to initialise managed register map\n"); + goto devm_regmap_init_i2c_fail; + } + + /* Set default registers. */ + for (i = 0; i < nvsw_sn2201_regmap_conf.num_reg_defaults; i++) { + err = regmap_write(regmap, nvsw_sn2201_regmap_default[i].reg, + nvsw_sn2201_regmap_default[i].def); + if (err) { + dev_err(nvsw_sn2201->dev, "Failed to set register at offset 0x%02x to default value: 0x%02x\n", + nvsw_sn2201_regmap_default[i].reg, + nvsw_sn2201_regmap_default[i].def); + goto regmap_write_fail; + } + } + + /* Sync registers with hardware. */ + regcache_mark_dirty(regmap); + err = regcache_sync(regmap); + if (err) { + dev_err(nvsw_sn2201->dev, "Failed to Sync registers with hardware\n"); + goto regcache_sync_fail; + } + + /* Configure SN2201 board. */ + err = nvsw_sn2201_config_init(nvsw_sn2201, regmap); + if (err) { + dev_err(nvsw_sn2201->dev, "Failed to configure board\n"); + goto nvsw_sn2201_config_init_fail; + } + + return 0; + +nvsw_sn2201_config_init_fail: + nvsw_sn2201_config_exit(nvsw_sn2201); +regcache_sync_fail: +regmap_write_fail: +devm_regmap_init_i2c_fail: +i2c_new_dummy_fail: + i2c_put_adapter(nvsw_sn2201->cpld_devs->adapter); + nvsw_sn2201->cpld_devs->adapter = NULL; +i2c_get_adapter_fail: + /* Destroy SN2201 static I2C devices. */ + nvsw_sn2201_destroy_static_devices(nvsw_sn2201, nvsw_sn2201->sn2201_devs, + nvsw_sn2201->sn2201_devs_num); + /* Destroy main mux device. */ + nvsw_sn2201_destroy_static_devices(nvsw_sn2201, nvsw_sn2201->main_mux_devs, + nvsw_sn2201->main_mux_devs_num); +nvsw_sn2201_create_static_devices_fail: + i2c_put_adapter(nvsw_sn2201->main_mux_devs->adapter); +i2c_get_adapter_main_fail: + return err; +} + +static int nvsw_sn2201_config_pre_init(struct nvsw_sn2201 *nvsw_sn2201) +{ + nvsw_sn2201->i2c_data = &nvsw_sn2201_i2c_data; + + /* Register I2C controller. */ + nvsw_sn2201->i2c_data->handle = nvsw_sn2201; + nvsw_sn2201->i2c_data->completion_notify = nvsw_sn2201_i2c_completion_notify; + nvsw_sn2201->pdev_i2c = platform_device_register_resndata(nvsw_sn2201->dev, "i2c_mlxcpld", + NVSW_SN2201_MAIN_MUX_NR, + nvsw_sn2201_lpc_res, + ARRAY_SIZE(nvsw_sn2201_lpc_res), + nvsw_sn2201->i2c_data, + sizeof(*nvsw_sn2201->i2c_data)); + if (IS_ERR(nvsw_sn2201->pdev_i2c)) + return PTR_ERR(nvsw_sn2201->pdev_i2c); + + return 0; +} + +static int nvsw_sn2201_probe(struct platform_device *pdev) +{ + struct nvsw_sn2201 *nvsw_sn2201; + + nvsw_sn2201 = devm_kzalloc(&pdev->dev, sizeof(*nvsw_sn2201), GFP_KERNEL); + if (!nvsw_sn2201) + return -ENOMEM; + + nvsw_sn2201->dev = &pdev->dev; + platform_set_drvdata(pdev, nvsw_sn2201); + platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources, + ARRAY_SIZE(nvsw_sn2201_lpc_io_resources)); + + nvsw_sn2201->main_mux_deferred_nr = NVSW_SN2201_MAIN_MUX_DEFER_NR; + nvsw_sn2201->main_mux_devs = nvsw_sn2201_main_mux_brdinfo; + nvsw_sn2201->cpld_devs = nvsw_sn2201_cpld_brdinfo; + nvsw_sn2201->sn2201_devs = nvsw_sn2201_static_brdinfo; + nvsw_sn2201->sn2201_devs_num = ARRAY_SIZE(nvsw_sn2201_static_brdinfo); + + return nvsw_sn2201_config_pre_init(nvsw_sn2201); +} + +static int nvsw_sn2201_remove(struct platform_device *pdev) +{ + struct nvsw_sn2201 *nvsw_sn2201 = platform_get_drvdata(pdev); + + /* Unregister underlying drivers. */ + nvsw_sn2201_config_exit(nvsw_sn2201); + + /* Destroy SN2201 static I2C devices. */ + nvsw_sn2201_destroy_static_devices(nvsw_sn2201, + nvsw_sn2201->sn2201_devs, + nvsw_sn2201->sn2201_devs_num); + + i2c_put_adapter(nvsw_sn2201->cpld_devs->adapter); + nvsw_sn2201->cpld_devs->adapter = NULL; + /* Destroy main mux device. */ + nvsw_sn2201_destroy_static_devices(nvsw_sn2201, + nvsw_sn2201->main_mux_devs, + nvsw_sn2201->main_mux_devs_num); + + /* Unregister I2C controller. */ + if (nvsw_sn2201->pdev_i2c) + platform_device_unregister(nvsw_sn2201->pdev_i2c); + + return 0; +} + +static const struct acpi_device_id nvsw_sn2201_acpi_ids[] = { + {"NVSN2201", 0}, + {} +}; + +MODULE_DEVICE_TABLE(acpi, nvsw_sn2201_acpi_ids); + +static struct platform_driver nvsw_sn2201_driver = { + .probe = nvsw_sn2201_probe, + .remove = nvsw_sn2201_remove, + .driver = { + .name = "nvsw-sn2201", + .acpi_match_table = nvsw_sn2201_acpi_ids, + }, +}; + +module_platform_driver(nvsw_sn2201_driver); + +MODULE_AUTHOR("Nvidia"); +MODULE_DESCRIPTION("Nvidia sn2201 platform driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_ALIAS("platform:nvsw-sn2201"); diff --git a/drivers/platform/surface/surface_aggregator_hub.c b/drivers/platform/surface/surface_aggregator_hub.c new file mode 100644 index 0000000000..43061514be --- /dev/null +++ b/drivers/platform/surface/surface_aggregator_hub.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Surface System Aggregator Module (SSAM) subsystem device hubs. + * + * Provides a driver for SSAM subsystems device hubs. This driver performs + * instantiation of the devices managed by said hubs and takes care of + * (hot-)removal. + * + * Copyright (C) 2020-2022 Maximilian Luz + */ + +#include +#include +#include +#include +#include + +#include + + +/* -- SSAM generic subsystem hub driver framework. -------------------------- */ + +enum ssam_hub_state { + SSAM_HUB_UNINITIALIZED, /* Only set during initialization. */ + SSAM_HUB_CONNECTED, + SSAM_HUB_DISCONNECTED, +}; + +enum ssam_hub_flags { + SSAM_HUB_HOT_REMOVED, +}; + +struct ssam_hub; + +struct ssam_hub_ops { + int (*get_state)(struct ssam_hub *hub, enum ssam_hub_state *state); +}; + +struct ssam_hub { + struct ssam_device *sdev; + + enum ssam_hub_state state; + unsigned long flags; + + struct delayed_work update_work; + unsigned long connect_delay; + + struct ssam_event_notifier notif; + struct ssam_hub_ops ops; +}; + +struct ssam_hub_desc { + struct { + struct ssam_event_registry reg; + struct ssam_event_id id; + enum ssam_event_mask mask; + } event; + + struct { + u32 (*notify)(struct ssam_event_notifier *nf, const struct ssam_event *event); + int (*get_state)(struct ssam_hub *hub, enum ssam_hub_state *state); + } ops; + + unsigned long connect_delay_ms; +}; + +static void ssam_hub_update_workfn(struct work_struct *work) +{ + struct ssam_hub *hub = container_of(work, struct ssam_hub, update_work.work); + enum ssam_hub_state state; + int status = 0; + + status = hub->ops.get_state(hub, &state); + if (status) + return; + + /* + * There is a small possibility that hub devices were hot-removed and + * re-added before we were able to remove them here. In that case, both + * the state returned by get_state() and the state of the hub will + * equal SSAM_HUB_CONNECTED and we would bail early below, which would + * leave child devices without proper (re-)initialization and the + * hot-remove flag set. + * + * Therefore, we check whether devices have been hot-removed via an + * additional flag on the hub and, in this case, override the returned + * hub state. In case of a missed disconnect (i.e. get_state returned + * "connected"), we further need to re-schedule this work (with the + * appropriate delay) as the actual connect work submission might have + * been merged with this one. + * + * This then leads to one of two cases: Either we submit an unnecessary + * work item (which will get ignored via either the queue or the state + * checks) or, in the unlikely case that the work is actually required, + * double the normal connect delay. + */ + if (test_and_clear_bit(SSAM_HUB_HOT_REMOVED, &hub->flags)) { + if (state == SSAM_HUB_CONNECTED) + schedule_delayed_work(&hub->update_work, hub->connect_delay); + + state = SSAM_HUB_DISCONNECTED; + } + + if (hub->state == state) + return; + hub->state = state; + + if (hub->state == SSAM_HUB_CONNECTED) + status = ssam_device_register_clients(hub->sdev); + else + ssam_remove_clients(&hub->sdev->dev); + + if (status) + dev_err(&hub->sdev->dev, "failed to update hub child devices: %d\n", status); +} + +static int ssam_hub_mark_hot_removed(struct device *dev, void *_data) +{ + struct ssam_device *sdev = to_ssam_device(dev); + + if (is_ssam_device(dev)) + ssam_device_mark_hot_removed(sdev); + + return 0; +} + +static void ssam_hub_update(struct ssam_hub *hub, bool connected) +{ + unsigned long delay; + + /* Mark devices as hot-removed before we remove any. */ + if (!connected) { + set_bit(SSAM_HUB_HOT_REMOVED, &hub->flags); + device_for_each_child_reverse(&hub->sdev->dev, NULL, ssam_hub_mark_hot_removed); + } + + /* + * Delay update when the base/keyboard cover is being connected to give + * devices/EC some time to set up. + */ + delay = connected ? hub->connect_delay : 0; + + schedule_delayed_work(&hub->update_work, delay); +} + +static int __maybe_unused ssam_hub_resume(struct device *dev) +{ + struct ssam_hub *hub = dev_get_drvdata(dev); + + schedule_delayed_work(&hub->update_work, 0); + return 0; +} +static SIMPLE_DEV_PM_OPS(ssam_hub_pm_ops, NULL, ssam_hub_resume); + +static int ssam_hub_probe(struct ssam_device *sdev) +{ + const struct ssam_hub_desc *desc; + struct ssam_hub *hub; + int status; + + desc = ssam_device_get_match_data(sdev); + if (!desc) { + WARN(1, "no driver match data specified"); + return -EINVAL; + } + + hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL); + if (!hub) + return -ENOMEM; + + hub->sdev = sdev; + hub->state = SSAM_HUB_UNINITIALIZED; + + hub->notif.base.priority = INT_MAX; /* This notifier should run first. */ + hub->notif.base.fn = desc->ops.notify; + hub->notif.event.reg = desc->event.reg; + hub->notif.event.id = desc->event.id; + hub->notif.event.mask = desc->event.mask; + hub->notif.event.flags = SSAM_EVENT_SEQUENCED; + + hub->connect_delay = msecs_to_jiffies(desc->connect_delay_ms); + hub->ops.get_state = desc->ops.get_state; + + INIT_DELAYED_WORK(&hub->update_work, ssam_hub_update_workfn); + + ssam_device_set_drvdata(sdev, hub); + + status = ssam_device_notifier_register(sdev, &hub->notif); + if (status) + return status; + + schedule_delayed_work(&hub->update_work, 0); + return 0; +} + +static void ssam_hub_remove(struct ssam_device *sdev) +{ + struct ssam_hub *hub = ssam_device_get_drvdata(sdev); + + ssam_device_notifier_unregister(sdev, &hub->notif); + cancel_delayed_work_sync(&hub->update_work); + ssam_remove_clients(&sdev->dev); +} + + +/* -- SSAM base-subsystem hub driver. --------------------------------------- */ + +/* + * Some devices (especially battery) may need a bit of time to be fully usable + * after being (re-)connected. This delay has been determined via + * experimentation. + */ +#define SSAM_BASE_UPDATE_CONNECT_DELAY 2500 + +SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, { + .target_category = SSAM_SSH_TC_BAS, + .target_id = 0x01, + .command_id = 0x0d, + .instance_id = 0x00, +}); + +#define SSAM_BAS_OPMODE_TABLET 0x00 +#define SSAM_EVENT_BAS_CID_CONNECTION 0x0c + +static int ssam_base_hub_query_state(struct ssam_hub *hub, enum ssam_hub_state *state) +{ + u8 opmode; + int status; + + status = ssam_retry(ssam_bas_query_opmode, hub->sdev->ctrl, &opmode); + if (status < 0) { + dev_err(&hub->sdev->dev, "failed to query base state: %d\n", status); + return status; + } + + if (opmode != SSAM_BAS_OPMODE_TABLET) + *state = SSAM_HUB_CONNECTED; + else + *state = SSAM_HUB_DISCONNECTED; + + return 0; +} + +static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event) +{ + struct ssam_hub *hub = container_of(nf, struct ssam_hub, notif); + + if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION) + return 0; + + if (event->length < 1) { + dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length); + return 0; + } + + ssam_hub_update(hub, event->data[0]); + + /* + * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and + * consumed by the detachment system driver. We're just a (more or less) + * silent observer. + */ + return 0; +} + +static const struct ssam_hub_desc base_hub = { + .event = { + .reg = SSAM_EVENT_REGISTRY_SAM, + .id = { + .target_category = SSAM_SSH_TC_BAS, + .instance = 0, + }, + .mask = SSAM_EVENT_MASK_NONE, + }, + .ops = { + .notify = ssam_base_hub_notif, + .get_state = ssam_base_hub_query_state, + }, + .connect_delay_ms = SSAM_BASE_UPDATE_CONNECT_DELAY, +}; + + +/* -- SSAM KIP-subsystem hub driver. ---------------------------------------- */ + +/* + * Some devices may need a bit of time to be fully usable after being + * (re-)connected. This delay has been determined via experimentation. + */ +#define SSAM_KIP_UPDATE_CONNECT_DELAY 250 + +#define SSAM_EVENT_KIP_CID_CONNECTION 0x2c + +SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_query_state, u8, { + .target_category = SSAM_SSH_TC_KIP, + .target_id = 0x01, + .command_id = 0x2c, + .instance_id = 0x00, +}); + +static int ssam_kip_hub_query_state(struct ssam_hub *hub, enum ssam_hub_state *state) +{ + int status; + u8 connected; + + status = ssam_retry(__ssam_kip_query_state, hub->sdev->ctrl, &connected); + if (status < 0) { + dev_err(&hub->sdev->dev, "failed to query KIP connection state: %d\n", status); + return status; + } + + *state = connected ? SSAM_HUB_CONNECTED : SSAM_HUB_DISCONNECTED; + return 0; +} + +static u32 ssam_kip_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event) +{ + struct ssam_hub *hub = container_of(nf, struct ssam_hub, notif); + + if (event->command_id != SSAM_EVENT_KIP_CID_CONNECTION) + return 0; /* Return "unhandled". */ + + if (event->length < 1) { + dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length); + return 0; + } + + ssam_hub_update(hub, event->data[0]); + return SSAM_NOTIF_HANDLED; +} + +static const struct ssam_hub_desc kip_hub = { + .event = { + .reg = SSAM_EVENT_REGISTRY_SAM, + .id = { + .target_category = SSAM_SSH_TC_KIP, + .instance = 0, + }, + .mask = SSAM_EVENT_MASK_TARGET, + }, + .ops = { + .notify = ssam_kip_hub_notif, + .get_state = ssam_kip_hub_query_state, + }, + .connect_delay_ms = SSAM_KIP_UPDATE_CONNECT_DELAY, +}; + + +/* -- Driver registration. -------------------------------------------------- */ + +static const struct ssam_device_id ssam_hub_match[] = { + { SSAM_VDEV(HUB, 0x01, SSAM_SSH_TC_KIP, 0x00), (unsigned long)&kip_hub }, + { SSAM_VDEV(HUB, 0x02, SSAM_SSH_TC_BAS, 0x00), (unsigned long)&base_hub }, + { } +}; +MODULE_DEVICE_TABLE(ssam, ssam_hub_match); + +static struct ssam_device_driver ssam_subsystem_hub_driver = { + .probe = ssam_hub_probe, + .remove = ssam_hub_remove, + .match_table = ssam_hub_match, + .driver = { + .name = "surface_aggregator_subsystem_hub", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &ssam_hub_pm_ops, + }, +}; +module_ssam_device_driver(ssam_subsystem_hub_driver); + +MODULE_AUTHOR("Maximilian Luz "); +MODULE_DESCRIPTION("Subsystem device hub driver for Surface System Aggregator Module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/surface/surface_aggregator_tabletsw.c b/drivers/platform/surface/surface_aggregator_tabletsw.c new file mode 100644 index 0000000000..27d95a6a78 --- /dev/null +++ b/drivers/platform/surface/surface_aggregator_tabletsw.c @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Surface System Aggregator Module (SSAM) tablet mode switch driver. + * + * Copyright (C) 2022 Maximilian Luz + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + + +/* -- SSAM generic tablet switch driver framework. -------------------------- */ + +struct ssam_tablet_sw; + +struct ssam_tablet_sw_ops { + int (*get_state)(struct ssam_tablet_sw *sw, u32 *state); + const char *(*state_name)(struct ssam_tablet_sw *sw, u32 state); + bool (*state_is_tablet_mode)(struct ssam_tablet_sw *sw, u32 state); +}; + +struct ssam_tablet_sw { + struct ssam_device *sdev; + + u32 state; + struct work_struct update_work; + struct input_dev *mode_switch; + + struct ssam_tablet_sw_ops ops; + struct ssam_event_notifier notif; +}; + +struct ssam_tablet_sw_desc { + struct { + const char *name; + const char *phys; + } dev; + + struct { + u32 (*notify)(struct ssam_event_notifier *nf, const struct ssam_event *event); + int (*get_state)(struct ssam_tablet_sw *sw, u32 *state); + const char *(*state_name)(struct ssam_tablet_sw *sw, u32 state); + bool (*state_is_tablet_mode)(struct ssam_tablet_sw *sw, u32 state); + } ops; + + struct { + struct ssam_event_registry reg; + struct ssam_event_id id; + enum ssam_event_mask mask; + u8 flags; + } event; +}; + +static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ssam_tablet_sw *sw = dev_get_drvdata(dev); + const char *state = sw->ops.state_name(sw, sw->state); + + return sysfs_emit(buf, "%s\n", state); +} +static DEVICE_ATTR_RO(state); + +static struct attribute *ssam_tablet_sw_attrs[] = { + &dev_attr_state.attr, + NULL, +}; + +static const struct attribute_group ssam_tablet_sw_group = { + .attrs = ssam_tablet_sw_attrs, +}; + +static void ssam_tablet_sw_update_workfn(struct work_struct *work) +{ + struct ssam_tablet_sw *sw = container_of(work, struct ssam_tablet_sw, update_work); + int tablet, status; + u32 state; + + status = sw->ops.get_state(sw, &state); + if (status) + return; + + if (sw->state == state) + return; + sw->state = state; + + /* Send SW_TABLET_MODE event. */ + tablet = sw->ops.state_is_tablet_mode(sw, state); + input_report_switch(sw->mode_switch, SW_TABLET_MODE, tablet); + input_sync(sw->mode_switch); +} + +static int __maybe_unused ssam_tablet_sw_resume(struct device *dev) +{ + struct ssam_tablet_sw *sw = dev_get_drvdata(dev); + + schedule_work(&sw->update_work); + return 0; +} +static SIMPLE_DEV_PM_OPS(ssam_tablet_sw_pm_ops, NULL, ssam_tablet_sw_resume); + +static int ssam_tablet_sw_probe(struct ssam_device *sdev) +{ + const struct ssam_tablet_sw_desc *desc; + struct ssam_tablet_sw *sw; + int tablet, status; + + desc = ssam_device_get_match_data(sdev); + if (!desc) { + WARN(1, "no driver match data specified"); + return -EINVAL; + } + + sw = devm_kzalloc(&sdev->dev, sizeof(*sw), GFP_KERNEL); + if (!sw) + return -ENOMEM; + + sw->sdev = sdev; + + sw->ops.get_state = desc->ops.get_state; + sw->ops.state_name = desc->ops.state_name; + sw->ops.state_is_tablet_mode = desc->ops.state_is_tablet_mode; + + INIT_WORK(&sw->update_work, ssam_tablet_sw_update_workfn); + + ssam_device_set_drvdata(sdev, sw); + + /* Get initial state. */ + status = sw->ops.get_state(sw, &sw->state); + if (status) + return status; + + /* Set up tablet mode switch. */ + sw->mode_switch = devm_input_allocate_device(&sdev->dev); + if (!sw->mode_switch) + return -ENOMEM; + + sw->mode_switch->name = desc->dev.name; + sw->mode_switch->phys = desc->dev.phys; + sw->mode_switch->id.bustype = BUS_HOST; + sw->mode_switch->dev.parent = &sdev->dev; + + tablet = sw->ops.state_is_tablet_mode(sw, sw->state); + input_set_capability(sw->mode_switch, EV_SW, SW_TABLET_MODE); + input_report_switch(sw->mode_switch, SW_TABLET_MODE, tablet); + + status = input_register_device(sw->mode_switch); + if (status) + return status; + + /* Set up notifier. */ + sw->notif.base.priority = 0; + sw->notif.base.fn = desc->ops.notify; + sw->notif.event.reg = desc->event.reg; + sw->notif.event.id = desc->event.id; + sw->notif.event.mask = desc->event.mask; + sw->notif.event.flags = SSAM_EVENT_SEQUENCED; + + status = ssam_device_notifier_register(sdev, &sw->notif); + if (status) + return status; + + status = sysfs_create_group(&sdev->dev.kobj, &ssam_tablet_sw_group); + if (status) + goto err; + + /* We might have missed events during setup, so check again. */ + schedule_work(&sw->update_work); + return 0; + +err: + ssam_device_notifier_unregister(sdev, &sw->notif); + cancel_work_sync(&sw->update_work); + return status; +} + +static void ssam_tablet_sw_remove(struct ssam_device *sdev) +{ + struct ssam_tablet_sw *sw = ssam_device_get_drvdata(sdev); + + sysfs_remove_group(&sdev->dev.kobj, &ssam_tablet_sw_group); + + ssam_device_notifier_unregister(sdev, &sw->notif); + cancel_work_sync(&sw->update_work); +} + + +/* -- SSAM KIP tablet switch implementation. -------------------------------- */ + +#define SSAM_EVENT_KIP_CID_COVER_STATE_CHANGED 0x1d + +enum ssam_kip_cover_state { + SSAM_KIP_COVER_STATE_DISCONNECTED = 0x01, + SSAM_KIP_COVER_STATE_CLOSED = 0x02, + SSAM_KIP_COVER_STATE_LAPTOP = 0x03, + SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04, + SSAM_KIP_COVER_STATE_FOLDED_BACK = 0x05, +}; + +static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, u32 state) +{ + switch (state) { + case SSAM_KIP_COVER_STATE_DISCONNECTED: + return "disconnected"; + + case SSAM_KIP_COVER_STATE_CLOSED: + return "closed"; + + case SSAM_KIP_COVER_STATE_LAPTOP: + return "laptop"; + + case SSAM_KIP_COVER_STATE_FOLDED_CANVAS: + return "folded-canvas"; + + case SSAM_KIP_COVER_STATE_FOLDED_BACK: + return "folded-back"; + + default: + dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state); + return ""; + } +} + +static bool ssam_kip_cover_state_is_tablet_mode(struct ssam_tablet_sw *sw, u32 state) +{ + switch (state) { + case SSAM_KIP_COVER_STATE_DISCONNECTED: + case SSAM_KIP_COVER_STATE_FOLDED_CANVAS: + case SSAM_KIP_COVER_STATE_FOLDED_BACK: + return true; + + case SSAM_KIP_COVER_STATE_CLOSED: + case SSAM_KIP_COVER_STATE_LAPTOP: + return false; + + default: + dev_warn(&sw->sdev->dev, "unknown KIP cover state: %d\n", sw->state); + return true; + } +} + +SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_get_cover_state, u8, { + .target_category = SSAM_SSH_TC_KIP, + .target_id = 0x01, + .command_id = 0x1d, + .instance_id = 0x00, +}); + +static int ssam_kip_get_cover_state(struct ssam_tablet_sw *sw, u32 *state) +{ + int status; + u8 raw; + + status = ssam_retry(__ssam_kip_get_cover_state, sw->sdev->ctrl, &raw); + if (status < 0) { + dev_err(&sw->sdev->dev, "failed to query KIP lid state: %d\n", status); + return status; + } + + *state = raw; + return 0; +} + +static u32 ssam_kip_sw_notif(struct ssam_event_notifier *nf, const struct ssam_event *event) +{ + struct ssam_tablet_sw *sw = container_of(nf, struct ssam_tablet_sw, notif); + + if (event->command_id != SSAM_EVENT_KIP_CID_COVER_STATE_CHANGED) + return 0; /* Return "unhandled". */ + + if (event->length < 1) + dev_warn(&sw->sdev->dev, "unexpected payload size: %u\n", event->length); + + schedule_work(&sw->update_work); + return SSAM_NOTIF_HANDLED; +} + +static const struct ssam_tablet_sw_desc ssam_kip_sw_desc = { + .dev = { + .name = "Microsoft Surface KIP Tablet Mode Switch", + .phys = "ssam/01:0e:01:00:01/input0", + }, + .ops = { + .notify = ssam_kip_sw_notif, + .get_state = ssam_kip_get_cover_state, + .state_name = ssam_kip_cover_state_name, + .state_is_tablet_mode = ssam_kip_cover_state_is_tablet_mode, + }, + .event = { + .reg = SSAM_EVENT_REGISTRY_SAM, + .id = { + .target_category = SSAM_SSH_TC_KIP, + .instance = 0, + }, + .mask = SSAM_EVENT_MASK_TARGET, + }, +}; + + +/* -- SSAM POS tablet switch implementation. -------------------------------- */ + +static bool tablet_mode_in_slate_state = true; +module_param(tablet_mode_in_slate_state, bool, 0644); +MODULE_PARM_DESC(tablet_mode_in_slate_state, "Enable tablet mode in slate device posture, default is 'true'"); + +#define SSAM_EVENT_POS_CID_POSTURE_CHANGED 0x03 +#define SSAM_POS_MAX_SOURCES 4 + +enum ssam_pos_state { + SSAM_POS_POSTURE_LID_CLOSED = 0x00, + SSAM_POS_POSTURE_LAPTOP = 0x01, + SSAM_POS_POSTURE_SLATE = 0x02, + SSAM_POS_POSTURE_TABLET = 0x03, +}; + +struct ssam_sources_list { + __le32 count; + __le32 id[SSAM_POS_MAX_SOURCES]; +} __packed; + +static const char *ssam_pos_state_name(struct ssam_tablet_sw *sw, u32 state) +{ + switch (state) { + case SSAM_POS_POSTURE_LID_CLOSED: + return "closed"; + + case SSAM_POS_POSTURE_LAPTOP: + return "laptop"; + + case SSAM_POS_POSTURE_SLATE: + return "slate"; + + case SSAM_POS_POSTURE_TABLET: + return "tablet"; + + default: + dev_warn(&sw->sdev->dev, "unknown device posture: %u\n", state); + return ""; + } +} + +static bool ssam_pos_state_is_tablet_mode(struct ssam_tablet_sw *sw, u32 state) +{ + switch (state) { + case SSAM_POS_POSTURE_LAPTOP: + case SSAM_POS_POSTURE_LID_CLOSED: + return false; + + case SSAM_POS_POSTURE_SLATE: + return tablet_mode_in_slate_state; + + case SSAM_POS_POSTURE_TABLET: + return true; + + default: + dev_warn(&sw->sdev->dev, "unknown device posture: %u\n", state); + return true; + } +} + +static int ssam_pos_get_sources_list(struct ssam_tablet_sw *sw, struct ssam_sources_list *sources) +{ + struct ssam_request rqst; + struct ssam_response rsp; + int status; + + rqst.target_category = SSAM_SSH_TC_POS; + rqst.target_id = 0x01; + rqst.command_id = 0x01; + rqst.instance_id = 0x00; + rqst.flags = SSAM_REQUEST_HAS_RESPONSE; + rqst.length = 0; + rqst.payload = NULL; + + rsp.capacity = sizeof(*sources); + rsp.length = 0; + rsp.pointer = (u8 *)sources; + + status = ssam_retry(ssam_request_sync_onstack, sw->sdev->ctrl, &rqst, &rsp, 0); + if (status) + return status; + + /* We need at least the 'sources->count' field. */ + if (rsp.length < sizeof(__le32)) { + dev_err(&sw->sdev->dev, "received source list response is too small\n"); + return -EPROTO; + } + + /* Make sure 'sources->count' matches with the response length. */ + if (get_unaligned_le32(&sources->count) * sizeof(__le32) + sizeof(__le32) != rsp.length) { + dev_err(&sw->sdev->dev, "mismatch between number of sources and response size\n"); + return -EPROTO; + } + + return 0; +} + +static int ssam_pos_get_source(struct ssam_tablet_sw *sw, u32 *source_id) +{ + struct ssam_sources_list sources = {}; + int status; + + status = ssam_pos_get_sources_list(sw, &sources); + if (status) + return status; + + if (get_unaligned_le32(&sources.count) == 0) { + dev_err(&sw->sdev->dev, "no posture sources found\n"); + return -ENODEV; + } + + /* + * We currently don't know what to do with more than one posture + * source. At the moment, only one source seems to be used/provided. + * The WARN_ON() here should hopefully let us know quickly once there + * is a device that provides multiple sources, at which point we can + * then try to figure out how to handle them. + */ + WARN_ON(get_unaligned_le32(&sources.count) > 1); + + *source_id = get_unaligned_le32(&sources.id[0]); + return 0; +} + +SSAM_DEFINE_SYNC_REQUEST_WR(__ssam_pos_get_posture_for_source, __le32, __le32, { + .target_category = SSAM_SSH_TC_POS, + .target_id = 0x01, + .command_id = 0x02, + .instance_id = 0x00, +}); + +static int ssam_pos_get_posture_for_source(struct ssam_tablet_sw *sw, u32 source_id, u32 *posture) +{ + __le32 source_le = cpu_to_le32(source_id); + __le32 rspval_le = 0; + int status; + + status = ssam_retry(__ssam_pos_get_posture_for_source, sw->sdev->ctrl, + &source_le, &rspval_le); + if (status) + return status; + + *posture = le32_to_cpu(rspval_le); + return 0; +} + +static int ssam_pos_get_posture(struct ssam_tablet_sw *sw, u32 *state) +{ + u32 source_id; + int status; + + status = ssam_pos_get_source(sw, &source_id); + if (status) { + dev_err(&sw->sdev->dev, "failed to get posture source ID: %d\n", status); + return status; + } + + status = ssam_pos_get_posture_for_source(sw, source_id, state); + if (status) { + dev_err(&sw->sdev->dev, "failed to get posture value for source %u: %d\n", + source_id, status); + return status; + } + + return 0; +} + +static u32 ssam_pos_sw_notif(struct ssam_event_notifier *nf, const struct ssam_event *event) +{ + struct ssam_tablet_sw *sw = container_of(nf, struct ssam_tablet_sw, notif); + + if (event->command_id != SSAM_EVENT_POS_CID_POSTURE_CHANGED) + return 0; /* Return "unhandled". */ + + if (event->length != sizeof(__le32) * 3) + dev_warn(&sw->sdev->dev, "unexpected payload size: %u\n", event->length); + + schedule_work(&sw->update_work); + return SSAM_NOTIF_HANDLED; +} + +static const struct ssam_tablet_sw_desc ssam_pos_sw_desc = { + .dev = { + .name = "Microsoft Surface POS Tablet Mode Switch", + .phys = "ssam/01:26:01:00:01/input0", + }, + .ops = { + .notify = ssam_pos_sw_notif, + .get_state = ssam_pos_get_posture, + .state_name = ssam_pos_state_name, + .state_is_tablet_mode = ssam_pos_state_is_tablet_mode, + }, + .event = { + .reg = SSAM_EVENT_REGISTRY_SAM, + .id = { + .target_category = SSAM_SSH_TC_POS, + .instance = 0, + }, + .mask = SSAM_EVENT_MASK_TARGET, + }, +}; + + +/* -- Driver registration. -------------------------------------------------- */ + +static const struct ssam_device_id ssam_tablet_sw_match[] = { + { SSAM_SDEV(KIP, 0x01, 0x00, 0x01), (unsigned long)&ssam_kip_sw_desc }, + { SSAM_SDEV(POS, 0x01, 0x00, 0x01), (unsigned long)&ssam_pos_sw_desc }, + { }, +}; +MODULE_DEVICE_TABLE(ssam, ssam_tablet_sw_match); + +static struct ssam_device_driver ssam_tablet_sw_driver = { + .probe = ssam_tablet_sw_probe, + .remove = ssam_tablet_sw_remove, + .match_table = ssam_tablet_sw_match, + .driver = { + .name = "surface_aggregator_tablet_mode_switch", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &ssam_tablet_sw_pm_ops, + }, +}; +module_ssam_device_driver(ssam_tablet_sw_driver); + +MODULE_AUTHOR("Maximilian Luz "); +MODULE_DESCRIPTION("Tablet mode switch driver for Surface devices using the Surface Aggregator Module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig new file mode 100644 index 0000000000..c0d0a3c517 --- /dev/null +++ b/drivers/platform/x86/amd/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# AMD x86 Platform Specific Drivers +# + +config AMD_PMC + tristate "AMD SoC PMC driver" + depends on ACPI && PCI && RTC_CLASS + help + The driver provides support for AMD Power Management Controller + primarily responsible for S2Idle transactions that are driven from + a platform firmware running on SMU. This driver also provides a debug + mechanism to investigate the S2Idle transactions and failures. + + Say Y or M here if you have a notebook powered by AMD RYZEN CPU/APU. + + If you choose to compile this driver as a module the module will be + called amd-pmc. + +config AMD_HSMP + tristate "AMD HSMP Driver" + depends on AMD_NB && X86_64 + help + The driver provides a way for user space tools to monitor and manage + system management functionality on EPYC server CPUs from AMD. + + Host System Management Port (HSMP) interface is a mailbox interface + between the x86 core and the System Management Unit (SMU) firmware. + + If you choose to compile this driver as a module the module will be + called amd_hsmp. diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile new file mode 100644 index 0000000000..a03fbb08e8 --- /dev/null +++ b/drivers/platform/x86/amd/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for drivers/platform/x86/amd +# AMD x86 Platform-Specific Drivers +# + +amd-pmc-y := pmc.o +obj-$(CONFIG_AMD_PMC) += amd-pmc.o +amd_hsmp-y := hsmp.o +obj-$(CONFIG_AMD_HSMP) += amd_hsmp.o diff --git a/drivers/platform/x86/amd/hsmp.c b/drivers/platform/x86/amd/hsmp.c new file mode 100644 index 0000000000..a0c54b838c --- /dev/null +++ b/drivers/platform/x86/amd/hsmp.c @@ -0,0 +1,425 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD HSMP Platform Driver + * Copyright (c) 2022, AMD. + * All Rights Reserved. + * + * This file provides a device implementation for HSMP interface + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "amd_hsmp" +#define DRIVER_VERSION "1.0" + +/* HSMP Status / Error codes */ +#define HSMP_STATUS_NOT_READY 0x00 +#define HSMP_STATUS_OK 0x01 +#define HSMP_ERR_INVALID_MSG 0xFE +#define HSMP_ERR_INVALID_INPUT 0xFF + +/* Timeout in millsec */ +#define HSMP_MSG_TIMEOUT 100 +#define HSMP_SHORT_SLEEP 1 + +#define HSMP_WR true +#define HSMP_RD false + +/* + * To access specific HSMP mailbox register, s/w writes the SMN address of HSMP mailbox + * register into the SMN_INDEX register, and reads/writes the SMN_DATA reg. + * Below are required SMN address for HSMP Mailbox register offsets in SMU address space + */ +#define SMN_HSMP_MSG_ID 0x3B10534 +#define SMN_HSMP_MSG_RESP 0x3B10980 +#define SMN_HSMP_MSG_DATA 0x3B109E0 + +#define HSMP_INDEX_REG 0xc4 +#define HSMP_DATA_REG 0xc8 + +static struct semaphore *hsmp_sem; + +static struct miscdevice hsmp_device; + +static int amd_hsmp_rdwr(struct pci_dev *root, u32 address, + u32 *value, bool write) +{ + int ret; + + ret = pci_write_config_dword(root, HSMP_INDEX_REG, address); + if (ret) + return ret; + + ret = (write ? pci_write_config_dword(root, HSMP_DATA_REG, *value) + : pci_read_config_dword(root, HSMP_DATA_REG, value)); + + return ret; +} + +/* + * Send a message to the HSMP port via PCI-e config space registers. + * + * The caller is expected to zero out any unused arguments. + * If a response is expected, the number of response words should be greater than 0. + * + * Returns 0 for success and populates the requested number of arguments. + * Returns a negative error code for failure. + */ +static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg) +{ + unsigned long timeout, short_sleep; + u32 mbox_status; + u32 index; + int ret; + + /* Clear the status register */ + mbox_status = HSMP_STATUS_NOT_READY; + ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_RESP, &mbox_status, HSMP_WR); + if (ret) { + pr_err("Error %d clearing mailbox status register\n", ret); + return ret; + } + + index = 0; + /* Write any message arguments */ + while (index < msg->num_args) { + ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_DATA + (index << 2), + &msg->args[index], HSMP_WR); + if (ret) { + pr_err("Error %d writing message argument %d\n", ret, index); + return ret; + } + index++; + } + + /* Write the message ID which starts the operation */ + ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_ID, &msg->msg_id, HSMP_WR); + if (ret) { + pr_err("Error %d writing message ID %u\n", ret, msg->msg_id); + return ret; + } + + /* + * Depending on when the trigger write completes relative to the SMU + * firmware 1 ms cycle, the operation may take from tens of us to 1 ms + * to complete. Some operations may take more. Therefore we will try + * a few short duration sleeps and switch to long sleeps if we don't + * succeed quickly. + */ + short_sleep = jiffies + msecs_to_jiffies(HSMP_SHORT_SLEEP); + timeout = jiffies + msecs_to_jiffies(HSMP_MSG_TIMEOUT); + + while (time_before(jiffies, timeout)) { + ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_RESP, &mbox_status, HSMP_RD); + if (ret) { + pr_err("Error %d reading mailbox status\n", ret); + return ret; + } + + if (mbox_status != HSMP_STATUS_NOT_READY) + break; + if (time_before(jiffies, short_sleep)) + usleep_range(50, 100); + else + usleep_range(1000, 2000); + } + + if (unlikely(mbox_status == HSMP_STATUS_NOT_READY)) { + return -ETIMEDOUT; + } else if (unlikely(mbox_status == HSMP_ERR_INVALID_MSG)) { + return -ENOMSG; + } else if (unlikely(mbox_status == HSMP_ERR_INVALID_INPUT)) { + return -EINVAL; + } else if (unlikely(mbox_status != HSMP_STATUS_OK)) { + pr_err("Message ID %u unknown failure (status = 0x%X)\n", + msg->msg_id, mbox_status); + return -EIO; + } + + /* + * SMU has responded OK. Read response data. + * SMU reads the input arguments from eight 32 bit registers starting + * from SMN_HSMP_MSG_DATA and writes the response data to the same + * SMN_HSMP_MSG_DATA address. + * We copy the response data if any, back to the args[]. + */ + index = 0; + while (index < msg->response_sz) { + ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_DATA + (index << 2), + &msg->args[index], HSMP_RD); + if (ret) { + pr_err("Error %d reading response %u for message ID:%u\n", + ret, index, msg->msg_id); + break; + } + index++; + } + + return ret; +} + +static int validate_message(struct hsmp_message *msg) +{ + /* msg_id against valid range of message IDs */ + if (msg->msg_id < HSMP_TEST || msg->msg_id >= HSMP_MSG_ID_MAX) + return -ENOMSG; + + /* msg_id is a reserved message ID */ + if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_RSVD) + return -ENOMSG; + + /* num_args and response_sz against the HSMP spec */ + if (msg->num_args != hsmp_msg_desc_table[msg->msg_id].num_args || + msg->response_sz != hsmp_msg_desc_table[msg->msg_id].response_sz) + return -EINVAL; + + return 0; +} + +int hsmp_send_message(struct hsmp_message *msg) +{ + struct amd_northbridge *nb; + int ret; + + if (!msg) + return -EINVAL; + + nb = node_to_amd_nb(msg->sock_ind); + if (!nb || !nb->root) + return -ENODEV; + + ret = validate_message(msg); + if (ret) + return ret; + + /* + * The time taken by smu operation to complete is between + * 10us to 1ms. Sometime it may take more time. + * In SMP system timeout of 100 millisecs should + * be enough for the previous thread to finish the operation + */ + ret = down_timeout(&hsmp_sem[msg->sock_ind], + msecs_to_jiffies(HSMP_MSG_TIMEOUT)); + if (ret < 0) + return ret; + + ret = __hsmp_send_message(nb->root, msg); + + up(&hsmp_sem[msg->sock_ind]); + + return ret; +} +EXPORT_SYMBOL_GPL(hsmp_send_message); + +static int hsmp_test(u16 sock_ind, u32 value) +{ + struct hsmp_message msg = { 0 }; + struct amd_northbridge *nb; + int ret = -ENODEV; + + nb = node_to_amd_nb(sock_ind); + if (!nb || !nb->root) + return ret; + + /* + * Test the hsmp port by performing TEST command. The test message + * takes one argument and returns the value of that argument + 1. + */ + msg.msg_id = HSMP_TEST; + msg.num_args = 1; + msg.response_sz = 1; + msg.args[0] = value; + msg.sock_ind = sock_ind; + + ret = __hsmp_send_message(nb->root, &msg); + if (ret) + return ret; + + /* Check the response value */ + if (msg.args[0] != (value + 1)) { + pr_err("Socket %d test message failed, Expected 0x%08X, received 0x%08X\n", + sock_ind, (value + 1), msg.args[0]); + return -EBADE; + } + + return ret; +} + +static long hsmp_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + int __user *arguser = (int __user *)arg; + struct hsmp_message msg = { 0 }; + int ret; + + if (copy_struct_from_user(&msg, sizeof(msg), arguser, sizeof(struct hsmp_message))) + return -EFAULT; + + /* + * Check msg_id is within the range of supported msg ids + * i.e within the array bounds of hsmp_msg_desc_table + */ + if (msg.msg_id < HSMP_TEST || msg.msg_id >= HSMP_MSG_ID_MAX) + return -ENOMSG; + + switch (fp->f_mode & (FMODE_WRITE | FMODE_READ)) { + case FMODE_WRITE: + /* + * Device is opened in O_WRONLY mode + * Execute only set/configure commands + */ + if (hsmp_msg_desc_table[msg.msg_id].type != HSMP_SET) + return -EINVAL; + break; + case FMODE_READ: + /* + * Device is opened in O_RDONLY mode + * Execute only get/monitor commands + */ + if (hsmp_msg_desc_table[msg.msg_id].type != HSMP_GET) + return -EINVAL; + break; + case FMODE_READ | FMODE_WRITE: + /* + * Device is opened in O_RDWR mode + * Execute both get/monitor and set/configure commands + */ + break; + default: + return -EINVAL; + } + + ret = hsmp_send_message(&msg); + if (ret) + return ret; + + if (hsmp_msg_desc_table[msg.msg_id].response_sz > 0) { + /* Copy results back to user for get/monitor commands */ + if (copy_to_user(arguser, &msg, sizeof(struct hsmp_message))) + return -EFAULT; + } + + return 0; +} + +static const struct file_operations hsmp_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = hsmp_ioctl, + .compat_ioctl = hsmp_ioctl, +}; + +static int hsmp_pltdrv_probe(struct platform_device *pdev) +{ + int i; + + hsmp_sem = devm_kzalloc(&pdev->dev, + (amd_nb_num() * sizeof(struct semaphore)), + GFP_KERNEL); + if (!hsmp_sem) + return -ENOMEM; + + for (i = 0; i < amd_nb_num(); i++) + sema_init(&hsmp_sem[i], 1); + + hsmp_device.name = "hsmp_cdev"; + hsmp_device.minor = MISC_DYNAMIC_MINOR; + hsmp_device.fops = &hsmp_fops; + hsmp_device.parent = &pdev->dev; + hsmp_device.nodename = "hsmp"; + hsmp_device.mode = 0644; + + return misc_register(&hsmp_device); +} + +static int hsmp_pltdrv_remove(struct platform_device *pdev) +{ + misc_deregister(&hsmp_device); + + return 0; +} + +static struct platform_driver amd_hsmp_driver = { + .probe = hsmp_pltdrv_probe, + .remove = hsmp_pltdrv_remove, + .driver = { + .name = DRIVER_NAME, + }, +}; + +static struct platform_device *amd_hsmp_platdev; + +static int __init hsmp_plt_init(void) +{ + int ret = -ENODEV; + u16 num_sockets; + int i; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD || boot_cpu_data.x86 < 0x19) { + pr_err("HSMP is not supported on Family:%x model:%x\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + return ret; + } + + /* + * amd_nb_num() returns number of SMN/DF interfaces present in the system + * if we have N SMN/DF interfaces that ideally means N sockets + */ + num_sockets = amd_nb_num(); + if (num_sockets == 0) + return ret; + + /* Test the hsmp interface on each socket */ + for (i = 0; i < num_sockets; i++) { + ret = hsmp_test(i, 0xDEADBEEF); + if (ret) { + pr_err("HSMP is not supported on Fam:%x model:%x\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + pr_err("Or Is HSMP disabled in BIOS ?\n"); + return -EOPNOTSUPP; + } + } + + ret = platform_driver_register(&amd_hsmp_driver); + if (ret) + return ret; + + amd_hsmp_platdev = platform_device_alloc(DRIVER_NAME, -1); + if (!amd_hsmp_platdev) { + ret = -ENOMEM; + goto drv_unregister; + } + + ret = platform_device_add(amd_hsmp_platdev); + if (ret) { + platform_device_put(amd_hsmp_platdev); + goto drv_unregister; + } + + return 0; + +drv_unregister: + platform_driver_unregister(&amd_hsmp_driver); + return ret; +} + +static void __exit hsmp_plt_exit(void) +{ + platform_device_unregister(amd_hsmp_platdev); + platform_driver_unregister(&amd_hsmp_driver); +} + +device_initcall(hsmp_plt_init); +module_exit(hsmp_plt_exit); + +MODULE_DESCRIPTION("AMD HSMP Platform Interface Driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c new file mode 100644 index 0000000000..700eb19e84 --- /dev/null +++ b/drivers/platform/x86/amd/pmc.c @@ -0,0 +1,945 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD SoC Power Management Controller Driver + * + * Copyright (c) 2020, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Shyam Sundar S K + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* SMU communication registers */ +#define AMD_PMC_REGISTER_MESSAGE 0x538 +#define AMD_PMC_REGISTER_RESPONSE 0x980 +#define AMD_PMC_REGISTER_ARGUMENT 0x9BC + +/* PMC Scratch Registers */ +#define AMD_PMC_SCRATCH_REG_CZN 0x94 +#define AMD_PMC_SCRATCH_REG_YC 0xD14 + +/* STB Registers */ +#define AMD_PMC_STB_INDEX_ADDRESS 0xF8 +#define AMD_PMC_STB_INDEX_DATA 0xFC +#define AMD_PMC_STB_PMI_0 0x03E30600 +#define AMD_PMC_STB_PREDEF 0xC6000001 + +/* STB S2D(Spill to DRAM) has different message port offset */ +#define STB_SPILL_TO_DRAM 0xBE +#define AMD_S2D_REGISTER_MESSAGE 0xA20 +#define AMD_S2D_REGISTER_RESPONSE 0xA80 +#define AMD_S2D_REGISTER_ARGUMENT 0xA88 + +/* STB Spill to DRAM Parameters */ +#define S2D_TELEMETRY_BYTES_MAX 0x100000 +#define S2D_TELEMETRY_DRAMBYTES_MAX 0x1000000 + +/* Base address of SMU for mapping physical address to virtual address */ +#define AMD_PMC_SMU_INDEX_ADDRESS 0xB8 +#define AMD_PMC_SMU_INDEX_DATA 0xBC +#define AMD_PMC_MAPPING_SIZE 0x01000 +#define AMD_PMC_BASE_ADDR_OFFSET 0x10000 +#define AMD_PMC_BASE_ADDR_LO 0x13B102E8 +#define AMD_PMC_BASE_ADDR_HI 0x13B102EC +#define AMD_PMC_BASE_ADDR_LO_MASK GENMASK(15, 0) +#define AMD_PMC_BASE_ADDR_HI_MASK GENMASK(31, 20) + +/* SMU Response Codes */ +#define AMD_PMC_RESULT_OK 0x01 +#define AMD_PMC_RESULT_CMD_REJECT_BUSY 0xFC +#define AMD_PMC_RESULT_CMD_REJECT_PREREQ 0xFD +#define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE +#define AMD_PMC_RESULT_FAILED 0xFF + +/* FCH SSC Registers */ +#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30 +#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34 +#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38 +#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C +#define FCH_SSC_MAPPING_SIZE 0x800 +#define FCH_BASE_PHY_ADDR_LOW 0xFED81100 +#define FCH_BASE_PHY_ADDR_HIGH 0x00000000 + +/* SMU Message Definations */ +#define SMU_MSG_GETSMUVERSION 0x02 +#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04 +#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05 +#define SMU_MSG_LOG_START 0x06 +#define SMU_MSG_LOG_RESET 0x07 +#define SMU_MSG_LOG_DUMP_DATA 0x08 +#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09 +/* List of supported CPU ids */ +#define AMD_CPU_ID_RV 0x15D0 +#define AMD_CPU_ID_RN 0x1630 +#define AMD_CPU_ID_PCO AMD_CPU_ID_RV +#define AMD_CPU_ID_CZN AMD_CPU_ID_RN +#define AMD_CPU_ID_YC 0x14B5 +#define AMD_CPU_ID_CB 0x14D8 +#define AMD_CPU_ID_PS 0x14E8 + +#define PMC_MSG_DELAY_MIN_US 50 +#define RESPONSE_REGISTER_LOOP_MAX 20000 + +#define SOC_SUBSYSTEM_IP_MAX 12 +#define DELAY_MIN_US 2000 +#define DELAY_MAX_US 3000 +#define FIFO_SIZE 4096 +enum amd_pmc_def { + MSG_TEST = 0x01, + MSG_OS_HINT_PCO, + MSG_OS_HINT_RN, +}; + +enum s2d_arg { + S2D_TELEMETRY_SIZE = 0x01, + S2D_PHYS_ADDR_LOW, + S2D_PHYS_ADDR_HIGH, +}; + +struct amd_pmc_bit_map { + const char *name; + u32 bit_mask; +}; + +static const struct amd_pmc_bit_map soc15_ip_blk[] = { + {"DISPLAY", BIT(0)}, + {"CPU", BIT(1)}, + {"GFX", BIT(2)}, + {"VDD", BIT(3)}, + {"ACP", BIT(4)}, + {"VCN", BIT(5)}, + {"ISP", BIT(6)}, + {"NBIO", BIT(7)}, + {"DF", BIT(8)}, + {"USB0", BIT(9)}, + {"USB1", BIT(10)}, + {"LAPIC", BIT(11)}, + {} +}; + +struct amd_pmc_dev { + void __iomem *regbase; + void __iomem *smu_virt_addr; + void __iomem *stb_virt_addr; + void __iomem *fch_virt_addr; + bool msg_port; + u32 base_addr; + u32 cpu_id; + u32 active_ips; +/* SMU version information */ + u8 smu_program; + u8 major; + u8 minor; + u8 rev; + struct device *dev; + struct pci_dev *rdev; + struct mutex lock; /* generic mutex lock */ +#if IS_ENABLED(CONFIG_DEBUG_FS) + struct dentry *dbgfs_dir; +#endif /* CONFIG_DEBUG_FS */ +}; + +static bool enable_stb; +module_param(enable_stb, bool, 0644); +MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism"); + +static struct amd_pmc_dev pmc; +static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret); +static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf); +#ifdef CONFIG_SUSPEND +static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data); +#endif + +static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset) +{ + return ioread32(dev->regbase + reg_offset); +} + +static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u32 val) +{ + iowrite32(val, dev->regbase + reg_offset); +} + +struct smu_metrics { + u32 table_version; + u32 hint_count; + u32 s0i3_last_entry_status; + u32 timein_s0i2; + u64 timeentering_s0i3_lastcapture; + u64 timeentering_s0i3_totaltime; + u64 timeto_resume_to_os_lastcapture; + u64 timeto_resume_to_os_totaltime; + u64 timein_s0i3_lastcapture; + u64 timein_s0i3_totaltime; + u64 timein_swdrips_lastcapture; + u64 timein_swdrips_totaltime; + u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX]; + u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX]; +} __packed; + +static int amd_pmc_stb_debugfs_open(struct inode *inode, struct file *filp) +{ + struct amd_pmc_dev *dev = filp->f_inode->i_private; + u32 size = FIFO_SIZE * sizeof(u32); + u32 *buf; + int rc; + + buf = kzalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + rc = amd_pmc_read_stb(dev, buf); + if (rc) { + kfree(buf); + return rc; + } + + filp->private_data = buf; + return rc; +} + +static ssize_t amd_pmc_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, + loff_t *pos) +{ + if (!filp->private_data) + return -EINVAL; + + return simple_read_from_buffer(buf, size, pos, filp->private_data, + FIFO_SIZE * sizeof(u32)); +} + +static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp) +{ + kfree(filp->private_data); + return 0; +} + +static const struct file_operations amd_pmc_stb_debugfs_fops = { + .owner = THIS_MODULE, + .open = amd_pmc_stb_debugfs_open, + .read = amd_pmc_stb_debugfs_read, + .release = amd_pmc_stb_debugfs_release, +}; + +static int amd_pmc_stb_debugfs_open_v2(struct inode *inode, struct file *filp) +{ + struct amd_pmc_dev *dev = filp->f_inode->i_private; + u32 *buf; + + buf = kzalloc(S2D_TELEMETRY_BYTES_MAX, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memcpy_fromio(buf, dev->stb_virt_addr, S2D_TELEMETRY_BYTES_MAX); + filp->private_data = buf; + + return 0; +} + +static ssize_t amd_pmc_stb_debugfs_read_v2(struct file *filp, char __user *buf, size_t size, + loff_t *pos) +{ + if (!filp->private_data) + return -EINVAL; + + return simple_read_from_buffer(buf, size, pos, filp->private_data, + S2D_TELEMETRY_BYTES_MAX); +} + +static int amd_pmc_stb_debugfs_release_v2(struct inode *inode, struct file *filp) +{ + kfree(filp->private_data); + return 0; +} + +static const struct file_operations amd_pmc_stb_debugfs_fops_v2 = { + .owner = THIS_MODULE, + .open = amd_pmc_stb_debugfs_open_v2, + .read = amd_pmc_stb_debugfs_read_v2, + .release = amd_pmc_stb_debugfs_release_v2, +}; + +#if defined(CONFIG_SUSPEND) || defined(CONFIG_DEBUG_FS) +static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev) +{ + if (dev->cpu_id == AMD_CPU_ID_PCO) { + dev_warn_once(dev->dev, "SMU debugging info not supported on this platform\n"); + return -EINVAL; + } + + /* Get Active devices list from SMU */ + if (!dev->active_ips) + amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1); + + /* Get dram address */ + if (!dev->smu_virt_addr) { + u32 phys_addr_low, phys_addr_hi; + u64 smu_phys_addr; + + amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1); + amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1); + smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low); + + dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, + sizeof(struct smu_metrics)); + if (!dev->smu_virt_addr) + return -ENOMEM; + } + + /* Start the logging */ + amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_RESET, 0); + amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0); + + return 0; +} + +static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev, + struct seq_file *s) +{ + u32 val; + + switch (pdev->cpu_id) { + case AMD_CPU_ID_CZN: + val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN); + break; + case AMD_CPU_ID_YC: + case AMD_CPU_ID_CB: + case AMD_CPU_ID_PS: + val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC); + break; + default: + return -EINVAL; + } + + if (dev) + dev_dbg(pdev->dev, "SMU idlemask s0i3: 0x%x\n", val); + + if (s) + seq_printf(s, "SMU idlemask : 0x%x\n", val); + + return 0; +} + +static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table) +{ + if (!pdev->smu_virt_addr) { + int ret = amd_pmc_setup_smu_logging(pdev); + + if (ret) + return ret; + } + + if (pdev->cpu_id == AMD_CPU_ID_PCO) + return -ENODEV; + memcpy_fromio(table, pdev->smu_virt_addr, sizeof(struct smu_metrics)); + return 0; +} +#endif /* CONFIG_SUSPEND || CONFIG_DEBUG_FS */ + +#ifdef CONFIG_SUSPEND +static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev) +{ + struct smu_metrics table; + + if (get_metrics_table(pdev, &table)) + return; + + if (!table.s0i3_last_entry_status) + dev_warn(pdev->dev, "Last suspend didn't reach deepest state\n"); + else + dev_dbg(pdev->dev, "Last suspend in deepest state for %lluus\n", + table.timein_s0i3_lastcapture); +} +#endif + +#ifdef CONFIG_DEBUG_FS +static int smu_fw_info_show(struct seq_file *s, void *unused) +{ + struct amd_pmc_dev *dev = s->private; + struct smu_metrics table; + int idx; + + if (get_metrics_table(dev, &table)) + return -EINVAL; + + seq_puts(s, "\n=== SMU Statistics ===\n"); + seq_printf(s, "Table Version: %d\n", table.table_version); + seq_printf(s, "Hint Count: %d\n", table.hint_count); + seq_printf(s, "Last S0i3 Status: %s\n", table.s0i3_last_entry_status ? "Success" : + "Unknown/Fail"); + seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture); + seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture); + seq_printf(s, "Time (in us) to resume from S0i3: %lld\n", + table.timeto_resume_to_os_lastcapture); + + seq_puts(s, "\n=== Active time (in us) ===\n"); + for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) { + if (soc15_ip_blk[idx].bit_mask & dev->active_ips) + seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name, + table.timecondition_notmet_lastcapture[idx]); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(smu_fw_info); + +static int s0ix_stats_show(struct seq_file *s, void *unused) +{ + struct amd_pmc_dev *dev = s->private; + u64 entry_time, exit_time, residency; + + /* Use FCH registers to get the S0ix stats */ + if (!dev->fch_virt_addr) { + u32 base_addr_lo = FCH_BASE_PHY_ADDR_LOW; + u32 base_addr_hi = FCH_BASE_PHY_ADDR_HIGH; + u64 fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo); + + dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE); + if (!dev->fch_virt_addr) + return -ENOMEM; + } + + entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET); + entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET); + + exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET); + exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET); + + /* It's in 48MHz. We need to convert it */ + residency = exit_time - entry_time; + do_div(residency, 48); + + seq_puts(s, "=== S0ix statistics ===\n"); + seq_printf(s, "S0ix Entry Time: %lld\n", entry_time); + seq_printf(s, "S0ix Exit Time: %lld\n", exit_time); + seq_printf(s, "Residency Time: %lld\n", residency); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(s0ix_stats); + +static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev) +{ + int rc; + u32 val; + + rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, 1); + if (rc) + return rc; + + dev->smu_program = (val >> 24) & GENMASK(7, 0); + dev->major = (val >> 16) & GENMASK(7, 0); + dev->minor = (val >> 8) & GENMASK(7, 0); + dev->rev = (val >> 0) & GENMASK(7, 0); + + dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n", + dev->smu_program, dev->major, dev->minor, dev->rev); + + return 0; +} + +static int amd_pmc_idlemask_show(struct seq_file *s, void *unused) +{ + struct amd_pmc_dev *dev = s->private; + int rc; + + /* we haven't yet read SMU version */ + if (!dev->major) { + rc = amd_pmc_get_smu_version(dev); + if (rc) + return rc; + } + + if (dev->major > 56 || (dev->major >= 55 && dev->minor >= 37)) { + rc = amd_pmc_idlemask_read(dev, NULL, s); + if (rc) + return rc; + } else { + seq_puts(s, "Unsupported SMU version for Idlemask\n"); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(amd_pmc_idlemask); + +static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev) +{ + debugfs_remove_recursive(dev->dbgfs_dir); +} + +static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev) +{ + dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL); + debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev, + &smu_fw_info_fops); + debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev, + &s0ix_stats_fops); + debugfs_create_file("amd_pmc_idlemask", 0644, dev->dbgfs_dir, dev, + &amd_pmc_idlemask_fops); + /* Enable STB only when the module_param is set */ + if (enable_stb) { + if (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB || + dev->cpu_id == AMD_CPU_ID_PS) + debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev, + &amd_pmc_stb_debugfs_fops_v2); + else + debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev, + &amd_pmc_stb_debugfs_fops); + } +} +#else +static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev) +{ +} + +static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +static void amd_pmc_dump_registers(struct amd_pmc_dev *dev) +{ + u32 value, message, argument, response; + + if (dev->msg_port) { + message = AMD_S2D_REGISTER_MESSAGE; + argument = AMD_S2D_REGISTER_ARGUMENT; + response = AMD_S2D_REGISTER_RESPONSE; + } else { + message = AMD_PMC_REGISTER_MESSAGE; + argument = AMD_PMC_REGISTER_ARGUMENT; + response = AMD_PMC_REGISTER_RESPONSE; + } + + value = amd_pmc_reg_read(dev, response); + dev_dbg(dev->dev, "AMD_PMC_REGISTER_RESPONSE:%x\n", value); + + value = amd_pmc_reg_read(dev, argument); + dev_dbg(dev->dev, "AMD_PMC_REGISTER_ARGUMENT:%x\n", value); + + value = amd_pmc_reg_read(dev, message); + dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value); +} + +static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret) +{ + int rc; + u32 val, message, argument, response; + + mutex_lock(&dev->lock); + + if (dev->msg_port) { + message = AMD_S2D_REGISTER_MESSAGE; + argument = AMD_S2D_REGISTER_ARGUMENT; + response = AMD_S2D_REGISTER_RESPONSE; + } else { + message = AMD_PMC_REGISTER_MESSAGE; + argument = AMD_PMC_REGISTER_ARGUMENT; + response = AMD_PMC_REGISTER_RESPONSE; + } + + /* Wait until we get a valid response */ + rc = readx_poll_timeout(ioread32, dev->regbase + response, + val, val != 0, PMC_MSG_DELAY_MIN_US, + PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); + if (rc) { + dev_err(dev->dev, "failed to talk to SMU\n"); + goto out_unlock; + } + + /* Write zero to response register */ + amd_pmc_reg_write(dev, response, 0); + + /* Write argument into response register */ + amd_pmc_reg_write(dev, argument, arg); + + /* Write message ID to message ID register */ + amd_pmc_reg_write(dev, message, msg); + + /* Wait until we get a valid response */ + rc = readx_poll_timeout(ioread32, dev->regbase + response, + val, val != 0, PMC_MSG_DELAY_MIN_US, + PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); + if (rc) { + dev_err(dev->dev, "SMU response timed out\n"); + goto out_unlock; + } + + switch (val) { + case AMD_PMC_RESULT_OK: + if (ret) { + /* PMFW may take longer time to return back the data */ + usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US); + *data = amd_pmc_reg_read(dev, argument); + } + break; + case AMD_PMC_RESULT_CMD_REJECT_BUSY: + dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val); + rc = -EBUSY; + goto out_unlock; + case AMD_PMC_RESULT_CMD_UNKNOWN: + dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val); + rc = -EINVAL; + goto out_unlock; + case AMD_PMC_RESULT_CMD_REJECT_PREREQ: + case AMD_PMC_RESULT_FAILED: + default: + dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val); + rc = -EIO; + goto out_unlock; + } + +out_unlock: + mutex_unlock(&dev->lock); + amd_pmc_dump_registers(dev); + return rc; +} + +#ifdef CONFIG_SUSPEND +static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev) +{ + switch (dev->cpu_id) { + case AMD_CPU_ID_PCO: + return MSG_OS_HINT_PCO; + case AMD_CPU_ID_RN: + case AMD_CPU_ID_YC: + case AMD_CPU_ID_CB: + case AMD_CPU_ID_PS: + return MSG_OS_HINT_RN; + } + return -EINVAL; +} + +static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg) +{ + struct rtc_device *rtc_device; + time64_t then, now, duration; + struct rtc_wkalrm alarm; + struct rtc_time tm; + int rc; + + if (pdev->major < 64 || (pdev->major == 64 && pdev->minor < 53)) + return 0; + + rtc_device = rtc_class_open("rtc0"); + if (!rtc_device) + return 0; + rc = rtc_read_alarm(rtc_device, &alarm); + if (rc) + return rc; + if (!alarm.enabled) { + dev_dbg(pdev->dev, "alarm not enabled\n"); + return 0; + } + rc = rtc_read_time(rtc_device, &tm); + if (rc) + return rc; + then = rtc_tm_to_time64(&alarm.time); + now = rtc_tm_to_time64(&tm); + duration = then-now; + + /* in the past */ + if (then < now) + return 0; + + /* will be stored in upper 16 bits of s0i3 hint argument, + * so timer wakeup from s0i3 is limited to ~18 hours or less + */ + if (duration <= 4 || duration > U16_MAX) + return -EINVAL; + + *arg |= (duration << 16); + rc = rtc_alarm_irq_enable(rtc_device, 0); + dev_dbg(pdev->dev, "wakeup timer programmed for %lld seconds\n", duration); + + return rc; +} + +static void amd_pmc_s2idle_prepare(void) +{ + struct amd_pmc_dev *pdev = &pmc; + int rc; + u8 msg; + u32 arg = 1; + + /* Reset and Start SMU logging - to monitor the s0i3 stats */ + amd_pmc_setup_smu_logging(pdev); + + /* Activate CZN specific RTC functionality */ + if (pdev->cpu_id == AMD_CPU_ID_CZN) { + rc = amd_pmc_verify_czn_rtc(pdev, &arg); + if (rc) { + dev_err(pdev->dev, "failed to set RTC: %d\n", rc); + return; + } + } + + /* Dump the IdleMask before we send hint to SMU */ + amd_pmc_idlemask_read(pdev, pdev->dev, NULL); + msg = amd_pmc_get_os_hint(pdev); + rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0); + if (rc) { + dev_err(pdev->dev, "suspend failed: %d\n", rc); + return; + } + + if (enable_stb) { + rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF); + if (rc) + dev_err(pdev->dev, "error writing to STB: %d\n", rc); + } +} + +static void amd_pmc_s2idle_restore(void) +{ + struct amd_pmc_dev *pdev = &pmc; + int rc; + u8 msg; + + msg = amd_pmc_get_os_hint(pdev); + rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0); + if (rc) + dev_err(pdev->dev, "resume failed: %d\n", rc); + + /* Let SMU know that we are looking for stats */ + amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0); + + /* Dump the IdleMask to see the blockers */ + amd_pmc_idlemask_read(pdev, pdev->dev, NULL); + + /* Write data incremented by 1 to distinguish in stb_read */ + if (enable_stb) { + rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF + 1); + if (rc) + dev_err(pdev->dev, "error writing to STB: %d\n", rc); + } + + /* Notify on failed entry */ + amd_pmc_validate_deepest(pdev); +} + +static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = { + .prepare = amd_pmc_s2idle_prepare, + .restore = amd_pmc_s2idle_restore, +}; +#endif + +static const struct pci_device_id pmc_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CB) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RV) }, + { } +}; + +static int amd_pmc_s2d_init(struct amd_pmc_dev *dev) +{ + u32 phys_addr_low, phys_addr_hi; + u64 stb_phys_addr; + u32 size = 0; + + /* Spill to DRAM feature uses separate SMU message port */ + dev->msg_port = 1; + + amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, STB_SPILL_TO_DRAM, 1); + if (size != S2D_TELEMETRY_BYTES_MAX) + return -EIO; + + /* Get STB DRAM address */ + amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, STB_SPILL_TO_DRAM, 1); + amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, STB_SPILL_TO_DRAM, 1); + + stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low); + + /* Clear msg_port for other SMU operation */ + dev->msg_port = 0; + + dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, S2D_TELEMETRY_DRAMBYTES_MAX); + if (!dev->stb_virt_addr) + return -ENOMEM; + + return 0; +} + +#ifdef CONFIG_SUSPEND +static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data) +{ + int err; + + err = pci_write_config_dword(dev->rdev, AMD_PMC_STB_INDEX_ADDRESS, AMD_PMC_STB_PMI_0); + if (err) { + dev_err(dev->dev, "failed to write addr in stb: 0x%X\n", + AMD_PMC_STB_INDEX_ADDRESS); + return pcibios_err_to_errno(err); + } + + err = pci_write_config_dword(dev->rdev, AMD_PMC_STB_INDEX_DATA, data); + if (err) { + dev_err(dev->dev, "failed to write data in stb: 0x%X\n", + AMD_PMC_STB_INDEX_DATA); + return pcibios_err_to_errno(err); + } + + return 0; +} +#endif + +static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf) +{ + int i, err; + + err = pci_write_config_dword(dev->rdev, AMD_PMC_STB_INDEX_ADDRESS, AMD_PMC_STB_PMI_0); + if (err) { + dev_err(dev->dev, "error writing addr to stb: 0x%X\n", + AMD_PMC_STB_INDEX_ADDRESS); + return pcibios_err_to_errno(err); + } + + for (i = 0; i < FIFO_SIZE; i++) { + err = pci_read_config_dword(dev->rdev, AMD_PMC_STB_INDEX_DATA, buf++); + if (err) { + dev_err(dev->dev, "error reading data from stb: 0x%X\n", + AMD_PMC_STB_INDEX_DATA); + return pcibios_err_to_errno(err); + } + } + + return 0; +} + +static int amd_pmc_probe(struct platform_device *pdev) +{ + struct amd_pmc_dev *dev = &pmc; + struct pci_dev *rdev; + u32 base_addr_lo, base_addr_hi; + u64 base_addr; + int err; + u32 val; + + dev->dev = &pdev->dev; + + rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); + if (!rdev || !pci_match_id(pmc_pci_ids, rdev)) { + err = -ENODEV; + goto err_pci_dev_put; + } + + dev->cpu_id = rdev->device; + dev->rdev = rdev; + err = pci_write_config_dword(rdev, AMD_PMC_SMU_INDEX_ADDRESS, AMD_PMC_BASE_ADDR_LO); + if (err) { + dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMC_SMU_INDEX_ADDRESS); + err = pcibios_err_to_errno(err); + goto err_pci_dev_put; + } + + err = pci_read_config_dword(rdev, AMD_PMC_SMU_INDEX_DATA, &val); + if (err) { + err = pcibios_err_to_errno(err); + goto err_pci_dev_put; + } + + base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK; + + err = pci_write_config_dword(rdev, AMD_PMC_SMU_INDEX_ADDRESS, AMD_PMC_BASE_ADDR_HI); + if (err) { + dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMC_SMU_INDEX_ADDRESS); + err = pcibios_err_to_errno(err); + goto err_pci_dev_put; + } + + err = pci_read_config_dword(rdev, AMD_PMC_SMU_INDEX_DATA, &val); + if (err) { + err = pcibios_err_to_errno(err); + goto err_pci_dev_put; + } + + base_addr_hi = val & AMD_PMC_BASE_ADDR_LO_MASK; + base_addr = ((u64)base_addr_hi << 32 | base_addr_lo); + + dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET, + AMD_PMC_MAPPING_SIZE); + if (!dev->regbase) { + err = -ENOMEM; + goto err_pci_dev_put; + } + + mutex_init(&dev->lock); + + if (enable_stb && (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB)) { + err = amd_pmc_s2d_init(dev); + if (err) + return err; + } + + platform_set_drvdata(pdev, dev); +#ifdef CONFIG_SUSPEND + err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops); + if (err) + dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n"); +#endif + + amd_pmc_dbgfs_register(dev); + return 0; + +err_pci_dev_put: + pci_dev_put(rdev); + return err; +} + +static int amd_pmc_remove(struct platform_device *pdev) +{ + struct amd_pmc_dev *dev = platform_get_drvdata(pdev); + +#ifdef CONFIG_SUSPEND + acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops); +#endif + amd_pmc_dbgfs_unregister(dev); + pci_dev_put(dev->rdev); + mutex_destroy(&dev->lock); + return 0; +} + +static const struct acpi_device_id amd_pmc_acpi_ids[] = { + {"AMDI0005", 0}, + {"AMDI0006", 0}, + {"AMDI0007", 0}, + {"AMDI0008", 0}, + {"AMD0004", 0}, + {"AMD0005", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, amd_pmc_acpi_ids); + +static struct platform_driver amd_pmc_driver = { + .driver = { + .name = "amd_pmc", + .acpi_match_table = amd_pmc_acpi_ids, + }, + .probe = amd_pmc_probe, + .remove = amd_pmc_remove, +}; +module_platform_driver(amd_pmc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("AMD PMC Driver"); diff --git a/drivers/platform/x86/intel/ifs/Kconfig b/drivers/platform/x86/intel/ifs/Kconfig new file mode 100644 index 0000000000..c341a27cc1 --- /dev/null +++ b/drivers/platform/x86/intel/ifs/Kconfig @@ -0,0 +1,16 @@ +config INTEL_IFS + tristate "Intel In Field Scan" + depends on X86 && CPU_SUP_INTEL && 64BIT && SMP + # Discussion on the list has shown that the sysfs API needs a bit + # more work, mark this as broken for now + depends on BROKEN + select INTEL_IFS_DEVICE + help + Enable support for the In Field Scan capability in select + CPUs. The capability allows for running low level tests via + a scan image distributed by Intel via Github to validate CPU + operation beyond baseline RAS capabilities. To compile this + support as a module, choose M here. The module will be called + intel_ifs. + + If unsure, say N. diff --git a/drivers/platform/x86/intel/ifs/Makefile b/drivers/platform/x86/intel/ifs/Makefile new file mode 100644 index 0000000000..30f035ef55 --- /dev/null +++ b/drivers/platform/x86/intel/ifs/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_INTEL_IFS) += intel_ifs.o + +intel_ifs-objs := core.o load.o runtest.o sysfs.o diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c new file mode 100644 index 0000000000..27204e3d67 --- /dev/null +++ b/drivers/platform/x86/intel/ifs/core.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2022 Intel Corporation. */ + +#include +#include +#include + +#include + +#include "ifs.h" + +#define X86_MATCH(model) \ + X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \ + INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, NULL) + +static const struct x86_cpu_id ifs_cpu_ids[] __initconst = { + X86_MATCH(SAPPHIRERAPIDS_X), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids); + +static struct ifs_device ifs_device = { + .data = { + .integrity_cap_bit = MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT, + }, + .misc = { + .name = "intel_ifs_0", + .nodename = "intel_ifs/0", + .minor = MISC_DYNAMIC_MINOR, + }, +}; + +static int __init ifs_init(void) +{ + const struct x86_cpu_id *m; + u64 msrval; + + m = x86_match_cpu(ifs_cpu_ids); + if (!m) + return -ENODEV; + + if (rdmsrl_safe(MSR_IA32_CORE_CAPS, &msrval)) + return -ENODEV; + + if (!(msrval & MSR_IA32_CORE_CAPS_INTEGRITY_CAPS)) + return -ENODEV; + + if (rdmsrl_safe(MSR_INTEGRITY_CAPS, &msrval)) + return -ENODEV; + + ifs_device.misc.groups = ifs_get_groups(); + + if ((msrval & BIT(ifs_device.data.integrity_cap_bit)) && + !misc_register(&ifs_device.misc)) { + down(&ifs_sem); + ifs_load_firmware(ifs_device.misc.this_device); + up(&ifs_sem); + return 0; + } + + return -ENODEV; +} + +static void __exit ifs_exit(void) +{ + misc_deregister(&ifs_device.misc); +} + +module_init(ifs_init); +module_exit(ifs_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel In Field Scan (IFS) device"); diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h new file mode 100644 index 0000000000..73c8e91cf1 --- /dev/null +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2022 Intel Corporation. */ + +#ifndef _IFS_H_ +#define _IFS_H_ + +/** + * DOC: In-Field Scan + * + * ============= + * In-Field Scan + * ============= + * + * Introduction + * ------------ + * + * In Field Scan (IFS) is a hardware feature to run circuit level tests on + * a CPU core to detect problems that are not caught by parity or ECC checks. + * Future CPUs will support more than one type of test which will show up + * with a new platform-device instance-id, for now only .0 is exposed. + * + * + * IFS Image + * --------- + * + * Intel provides a firmware file containing the scan tests via + * github [#f1]_. Similar to microcode there is a separate file for each + * family-model-stepping. + * + * IFS Image Loading + * ----------------- + * + * The driver loads the tests into memory reserved BIOS local to each CPU + * socket in a two step process using writes to MSRs to first load the + * SHA hashes for the test. Then the tests themselves. Status MSRs provide + * feedback on the success/failure of these steps. When a new test file + * is installed it can be loaded by writing to the driver reload file:: + * + * # echo 1 > /sys/devices/virtual/misc/intel_ifs_0/reload + * + * Similar to microcode, the current version of the scan tests is stored + * in a fixed location: /lib/firmware/intel/ifs.0/family-model-stepping.scan + * + * Running tests + * ------------- + * + * Tests are run by the driver synchronizing execution of all threads on a + * core and then writing to the ACTIVATE_SCAN MSR on all threads. Instruction + * execution continues when: + * + * 1) All tests have completed. + * 2) Execution was interrupted. + * 3) A test detected a problem. + * + * Note that ALL THREADS ON THE CORE ARE EFFECTIVELY OFFLINE FOR THE + * DURATION OF THE TEST. This can be up to 200 milliseconds. If the system + * is running latency sensitive applications that cannot tolerate an + * interruption of this magnitude, the system administrator must arrange + * to migrate those applications to other cores before running a core test. + * It may also be necessary to redirect interrupts to other CPUs. + * + * In all cases reading the SCAN_STATUS MSR provides details on what + * happened. The driver makes the value of this MSR visible to applications + * via the "details" file (see below). Interrupted tests may be restarted. + * + * The IFS driver provides sysfs interfaces via /sys/devices/virtual/misc/intel_ifs_0/ + * to control execution: + * + * Test a specific core:: + * + * # echo > /sys/devices/virtual/misc/intel_ifs_0/run_test + * + * when HT is enabled any of the sibling cpu# can be specified to test + * its corresponding physical core. Since the tests are per physical core, + * the result of testing any thread is same. All siblings must be online + * to run a core test. It is only necessary to test one thread. + * + * For e.g. to test core corresponding to cpu5 + * + * # echo 5 > /sys/devices/virtual/misc/intel_ifs_0/run_test + * + * Results of the last test is provided in /sys:: + * + * $ cat /sys/devices/virtual/misc/intel_ifs_0/status + * pass + * + * Status can be one of pass, fail, untested + * + * Additional details of the last test is provided by the details file:: + * + * $ cat /sys/devices/virtual/misc/intel_ifs_0/details + * 0x8081 + * + * The details file reports the hex value of the SCAN_STATUS MSR. + * Hardware defined error codes are documented in volume 4 of the Intel + * Software Developer's Manual but the error_code field may contain one of + * the following driver defined software codes: + * + * +------+--------------------+ + * | 0xFD | Software timeout | + * +------+--------------------+ + * | 0xFE | Partial completion | + * +------+--------------------+ + * + * Driver design choices + * --------------------- + * + * 1) The ACTIVATE_SCAN MSR allows for running any consecutive subrange of + * available tests. But the driver always tries to run all tests and only + * uses the subrange feature to restart an interrupted test. + * + * 2) Hardware allows for some number of cores to be tested in parallel. + * The driver does not make use of this, it only tests one core at a time. + * + * .. [#f1] https://github.com/intel/TBD + */ +#include +#include + +#define MSR_COPY_SCAN_HASHES 0x000002c2 +#define MSR_SCAN_HASHES_STATUS 0x000002c3 +#define MSR_AUTHENTICATE_AND_COPY_CHUNK 0x000002c4 +#define MSR_CHUNKS_AUTHENTICATION_STATUS 0x000002c5 +#define MSR_ACTIVATE_SCAN 0x000002c6 +#define MSR_SCAN_STATUS 0x000002c7 +#define SCAN_NOT_TESTED 0 +#define SCAN_TEST_PASS 1 +#define SCAN_TEST_FAIL 2 + +/* MSR_SCAN_HASHES_STATUS bit fields */ +union ifs_scan_hashes_status { + u64 data; + struct { + u32 chunk_size :16; + u32 num_chunks :8; + u32 rsvd1 :8; + u32 error_code :8; + u32 rsvd2 :11; + u32 max_core_limit :12; + u32 valid :1; + }; +}; + +/* MSR_CHUNKS_AUTH_STATUS bit fields */ +union ifs_chunks_auth_status { + u64 data; + struct { + u32 valid_chunks :8; + u32 total_chunks :8; + u32 rsvd1 :16; + u32 error_code :8; + u32 rsvd2 :24; + }; +}; + +/* MSR_ACTIVATE_SCAN bit fields */ +union ifs_scan { + u64 data; + struct { + u32 start :8; + u32 stop :8; + u32 rsvd :16; + u32 delay :31; + u32 sigmce :1; + }; +}; + +/* MSR_SCAN_STATUS bit fields */ +union ifs_status { + u64 data; + struct { + u32 chunk_num :8; + u32 chunk_stop_index :8; + u32 rsvd1 :16; + u32 error_code :8; + u32 rsvd2 :22; + u32 control_error :1; + u32 signature_error :1; + }; +}; + +/* + * Driver populated error-codes + * 0xFD: Test timed out before completing all the chunks. + * 0xFE: not all scan chunks were executed. Maximum forward progress retries exceeded. + */ +#define IFS_SW_TIMEOUT 0xFD +#define IFS_SW_PARTIAL_COMPLETION 0xFE + +/** + * struct ifs_data - attributes related to intel IFS driver + * @integrity_cap_bit: MSR_INTEGRITY_CAPS bit enumerating this test + * @loaded_version: stores the currently loaded ifs image version. + * @loaded: If a valid test binary has been loaded into the memory + * @loading_error: Error occurred on another CPU while loading image + * @valid_chunks: number of chunks which could be validated. + * @status: it holds simple status pass/fail/untested + * @scan_details: opaque scan status code from h/w + */ +struct ifs_data { + int integrity_cap_bit; + int loaded_version; + bool loaded; + bool loading_error; + int valid_chunks; + int status; + u64 scan_details; +}; + +struct ifs_work { + struct work_struct w; + struct device *dev; +}; + +struct ifs_device { + struct ifs_data data; + struct miscdevice misc; +}; + +static inline struct ifs_data *ifs_get_data(struct device *dev) +{ + struct miscdevice *m = dev_get_drvdata(dev); + struct ifs_device *d = container_of(m, struct ifs_device, misc); + + return &d->data; +} + +void ifs_load_firmware(struct device *dev); +int do_core_test(int cpu, struct device *dev); +const struct attribute_group **ifs_get_groups(void); + +extern struct semaphore ifs_sem; + +#endif diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c new file mode 100644 index 0000000000..d056617ddc --- /dev/null +++ b/drivers/platform/x86/intel/ifs/load.c @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2022 Intel Corporation. */ + +#include +#include +#include +#include + +#include "ifs.h" + +struct ifs_header { + u32 header_ver; + u32 blob_revision; + u32 date; + u32 processor_sig; + u32 check_sum; + u32 loader_rev; + u32 processor_flags; + u32 metadata_size; + u32 total_size; + u32 fusa_info; + u64 reserved; +}; + +#define IFS_HEADER_SIZE (sizeof(struct ifs_header)) +static struct ifs_header *ifs_header_ptr; /* pointer to the ifs image header */ +static u64 ifs_hash_ptr; /* Address of ifs metadata (hash) */ +static u64 ifs_test_image_ptr; /* 256B aligned address of test pattern */ +static DECLARE_COMPLETION(ifs_done); + +static const char * const scan_hash_status[] = { + [0] = "No error reported", + [1] = "Attempt to copy scan hashes when copy already in progress", + [2] = "Secure Memory not set up correctly", + [3] = "FuSaInfo.ProgramID does not match or ff-mm-ss does not match", + [4] = "Reserved", + [5] = "Integrity check failed", + [6] = "Scan reload or test is in progress" +}; + +static const char * const scan_authentication_status[] = { + [0] = "No error reported", + [1] = "Attempt to authenticate a chunk which is already marked as authentic", + [2] = "Chunk authentication error. The hash of chunk did not match expected value" +}; + +/* + * To copy scan hashes and authenticate test chunks, the initiating cpu must point + * to the EDX:EAX to the test image in linear address. + * Run wrmsr(MSR_COPY_SCAN_HASHES) for scan hash copy and run wrmsr(MSR_AUTHENTICATE_AND_COPY_CHUNK) + * for scan hash copy and test chunk authentication. + */ +static void copy_hashes_authenticate_chunks(struct work_struct *work) +{ + struct ifs_work *local_work = container_of(work, struct ifs_work, w); + union ifs_scan_hashes_status hashes_status; + union ifs_chunks_auth_status chunk_status; + struct device *dev = local_work->dev; + int i, num_chunks, chunk_size; + struct ifs_data *ifsd; + u64 linear_addr, base; + u32 err_code; + + ifsd = ifs_get_data(dev); + /* run scan hash copy */ + wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr); + rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data); + + /* enumerate the scan image information */ + num_chunks = hashes_status.num_chunks; + chunk_size = hashes_status.chunk_size * 1024; + err_code = hashes_status.error_code; + + if (!hashes_status.valid) { + ifsd->loading_error = true; + if (err_code >= ARRAY_SIZE(scan_hash_status)) { + dev_err(dev, "invalid error code 0x%x for hash copy\n", err_code); + goto done; + } + dev_err(dev, "Hash copy error : %s", scan_hash_status[err_code]); + goto done; + } + + /* base linear address to the scan data */ + base = ifs_test_image_ptr; + + /* scan data authentication and copy chunks to secured memory */ + for (i = 0; i < num_chunks; i++) { + linear_addr = base + i * chunk_size; + linear_addr |= i; + + wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, linear_addr); + rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + + ifsd->valid_chunks = chunk_status.valid_chunks; + err_code = chunk_status.error_code; + + if (err_code) { + ifsd->loading_error = true; + if (err_code >= ARRAY_SIZE(scan_authentication_status)) { + dev_err(dev, + "invalid error code 0x%x for authentication\n", err_code); + goto done; + } + dev_err(dev, "Chunk authentication error %s\n", + scan_authentication_status[err_code]); + goto done; + } + } +done: + complete(&ifs_done); +} + +/* + * IFS requires scan chunks authenticated per each socket in the platform. + * Once the test chunk is authenticated, it is automatically copied to secured memory + * and proceed the authentication for the next chunk. + */ +static int scan_chunks_sanity_check(struct device *dev) +{ + int metadata_size, curr_pkg, cpu, ret = -ENOMEM; + struct ifs_data *ifsd = ifs_get_data(dev); + bool *package_authenticated; + struct ifs_work local_work; + char *test_ptr; + + package_authenticated = kcalloc(topology_max_packages(), sizeof(bool), GFP_KERNEL); + if (!package_authenticated) + return ret; + + metadata_size = ifs_header_ptr->metadata_size; + + /* Spec says that if the Meta Data Size = 0 then it should be treated as 2000 */ + if (metadata_size == 0) + metadata_size = 2000; + + /* Scan chunk start must be 256 byte aligned */ + if ((metadata_size + IFS_HEADER_SIZE) % 256) { + dev_err(dev, "Scan pattern offset within the binary is not 256 byte aligned\n"); + return -EINVAL; + } + + test_ptr = (char *)ifs_header_ptr + IFS_HEADER_SIZE + metadata_size; + ifsd->loading_error = false; + + ifs_test_image_ptr = (u64)test_ptr; + ifsd->loaded_version = ifs_header_ptr->blob_revision; + + /* copy the scan hash and authenticate per package */ + cpus_read_lock(); + for_each_online_cpu(cpu) { + curr_pkg = topology_physical_package_id(cpu); + if (package_authenticated[curr_pkg]) + continue; + reinit_completion(&ifs_done); + local_work.dev = dev; + INIT_WORK(&local_work.w, copy_hashes_authenticate_chunks); + schedule_work_on(cpu, &local_work.w); + wait_for_completion(&ifs_done); + if (ifsd->loading_error) + goto out; + package_authenticated[curr_pkg] = 1; + } + ret = 0; +out: + cpus_read_unlock(); + kfree(package_authenticated); + + return ret; +} + +static int ifs_sanity_check(struct device *dev, + const struct microcode_header_intel *mc_header) +{ + unsigned long total_size, data_size; + u32 sum, *mc; + + total_size = get_totalsize(mc_header); + data_size = get_datasize(mc_header); + + if ((data_size + MC_HEADER_SIZE > total_size) || (total_size % sizeof(u32))) { + dev_err(dev, "bad ifs data file size.\n"); + return -EINVAL; + } + + if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { + dev_err(dev, "invalid/unknown ifs update format.\n"); + return -EINVAL; + } + + mc = (u32 *)mc_header; + sum = 0; + for (int i = 0; i < total_size / sizeof(u32); i++) + sum += mc[i]; + + if (sum) { + dev_err(dev, "bad ifs data checksum, aborting.\n"); + return -EINVAL; + } + + return 0; +} + +static bool find_ifs_matching_signature(struct device *dev, struct ucode_cpu_info *uci, + const struct microcode_header_intel *shdr) +{ + unsigned int mc_size; + + mc_size = get_totalsize(shdr); + + if (!mc_size || ifs_sanity_check(dev, shdr) < 0) { + dev_err(dev, "ifs sanity check failure\n"); + return false; + } + + if (!intel_cpu_signatures_match(uci->cpu_sig.sig, uci->cpu_sig.pf, shdr->sig, shdr->pf)) { + dev_err(dev, "ifs signature, pf not matching\n"); + return false; + } + + return true; +} + +static bool ifs_image_sanity_check(struct device *dev, const struct microcode_header_intel *data) +{ + struct ucode_cpu_info uci; + + intel_cpu_collect_info(&uci); + + return find_ifs_matching_signature(dev, &uci, data); +} + +/* + * Load ifs image. Before loading ifs module, the ifs image must be located + * in /lib/firmware/intel/ifs and named as {family/model/stepping}.{testname}. + */ +void ifs_load_firmware(struct device *dev) +{ + struct ifs_data *ifsd = ifs_get_data(dev); + const struct firmware *fw; + char scan_path[32]; + int ret; + + snprintf(scan_path, sizeof(scan_path), "intel/ifs/%02x-%02x-%02x.scan", + boot_cpu_data.x86, boot_cpu_data.x86_model, boot_cpu_data.x86_stepping); + + ret = request_firmware_direct(&fw, scan_path, dev); + if (ret) { + dev_err(dev, "ifs file %s load failed\n", scan_path); + goto done; + } + + if (!ifs_image_sanity_check(dev, (struct microcode_header_intel *)fw->data)) { + dev_err(dev, "ifs header sanity check failed\n"); + goto release; + } + + ifs_header_ptr = (struct ifs_header *)fw->data; + ifs_hash_ptr = (u64)(ifs_header_ptr + 1); + + ret = scan_chunks_sanity_check(dev); +release: + release_firmware(fw); +done: + ifsd->loaded = (ret == 0); +} diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c new file mode 100644 index 0000000000..b2ca2bb450 --- /dev/null +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2022 Intel Corporation. */ + +#include +#include +#include +#include +#include +#include + +#include "ifs.h" + +/* + * Note all code and data in this file is protected by + * ifs_sem. On HT systems all threads on a core will + * execute together, but only the first thread on the + * core will update results of the test. + */ + +#define CREATE_TRACE_POINTS +#include + +/* Max retries on the same chunk */ +#define MAX_IFS_RETRIES 5 + +/* + * Number of TSC cycles that a logical CPU will wait for the other + * logical CPU on the core in the WRMSR(ACTIVATE_SCAN). + */ +#define IFS_THREAD_WAIT 100000 + +enum ifs_status_err_code { + IFS_NO_ERROR = 0, + IFS_OTHER_THREAD_COULD_NOT_JOIN = 1, + IFS_INTERRUPTED_BEFORE_RENDEZVOUS = 2, + IFS_POWER_MGMT_INADEQUATE_FOR_SCAN = 3, + IFS_INVALID_CHUNK_RANGE = 4, + IFS_MISMATCH_ARGUMENTS_BETWEEN_THREADS = 5, + IFS_CORE_NOT_CAPABLE_CURRENTLY = 6, + IFS_UNASSIGNED_ERROR_CODE = 7, + IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT = 8, + IFS_INTERRUPTED_DURING_EXECUTION = 9, +}; + +static const char * const scan_test_status[] = { + [IFS_NO_ERROR] = "SCAN no error", + [IFS_OTHER_THREAD_COULD_NOT_JOIN] = "Other thread could not join.", + [IFS_INTERRUPTED_BEFORE_RENDEZVOUS] = "Interrupt occurred prior to SCAN coordination.", + [IFS_POWER_MGMT_INADEQUATE_FOR_SCAN] = + "Core Abort SCAN Response due to power management condition.", + [IFS_INVALID_CHUNK_RANGE] = "Non valid chunks in the range", + [IFS_MISMATCH_ARGUMENTS_BETWEEN_THREADS] = "Mismatch in arguments between threads T0/T1.", + [IFS_CORE_NOT_CAPABLE_CURRENTLY] = "Core not capable of performing SCAN currently", + [IFS_UNASSIGNED_ERROR_CODE] = "Unassigned error code 0x7", + [IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT] = + "Exceeded number of Logical Processors (LP) allowed to run Scan-At-Field concurrently", + [IFS_INTERRUPTED_DURING_EXECUTION] = "Interrupt occurred prior to SCAN start", +}; + +static void message_not_tested(struct device *dev, int cpu, union ifs_status status) +{ + if (status.error_code < ARRAY_SIZE(scan_test_status)) { + dev_info(dev, "CPU(s) %*pbl: SCAN operation did not start. %s\n", + cpumask_pr_args(cpu_smt_mask(cpu)), + scan_test_status[status.error_code]); + } else if (status.error_code == IFS_SW_TIMEOUT) { + dev_info(dev, "CPU(s) %*pbl: software timeout during scan\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } else if (status.error_code == IFS_SW_PARTIAL_COMPLETION) { + dev_info(dev, "CPU(s) %*pbl: %s\n", + cpumask_pr_args(cpu_smt_mask(cpu)), + "Not all scan chunks were executed. Maximum forward progress retries exceeded"); + } else { + dev_info(dev, "CPU(s) %*pbl: SCAN unknown status %llx\n", + cpumask_pr_args(cpu_smt_mask(cpu)), status.data); + } +} + +static void message_fail(struct device *dev, int cpu, union ifs_status status) +{ + /* + * control_error is set when the microcode runs into a problem + * loading the image from the reserved BIOS memory, or it has + * been corrupted. Reloading the image may fix this issue. + */ + if (status.control_error) { + dev_err(dev, "CPU(s) %*pbl: could not execute from loaded scan image\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } + + /* + * signature_error is set when the output from the scan chains does not + * match the expected signature. This might be a transient problem (e.g. + * due to a bit flip from an alpha particle or neutron). If the problem + * repeats on a subsequent test, then it indicates an actual problem in + * the core being tested. + */ + if (status.signature_error) { + dev_err(dev, "CPU(s) %*pbl: test signature incorrect.\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } +} + +static bool can_restart(union ifs_status status) +{ + enum ifs_status_err_code err_code = status.error_code; + + /* Signature for chunk is bad, or scan test failed */ + if (status.signature_error || status.control_error) + return false; + + switch (err_code) { + case IFS_NO_ERROR: + case IFS_OTHER_THREAD_COULD_NOT_JOIN: + case IFS_INTERRUPTED_BEFORE_RENDEZVOUS: + case IFS_POWER_MGMT_INADEQUATE_FOR_SCAN: + case IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT: + case IFS_INTERRUPTED_DURING_EXECUTION: + return true; + case IFS_INVALID_CHUNK_RANGE: + case IFS_MISMATCH_ARGUMENTS_BETWEEN_THREADS: + case IFS_CORE_NOT_CAPABLE_CURRENTLY: + case IFS_UNASSIGNED_ERROR_CODE: + break; + } + return false; +} + +/* + * Execute the scan. Called "simultaneously" on all threads of a core + * at high priority using the stop_cpus mechanism. + */ +static int doscan(void *data) +{ + int cpu = smp_processor_id(); + u64 *msrs = data; + int first; + + /* Only the first logical CPU on a core reports result */ + first = cpumask_first(cpu_smt_mask(cpu)); + + /* + * This WRMSR will wait for other HT threads to also write + * to this MSR (at most for activate.delay cycles). Then it + * starts scan of each requested chunk. The core scan happens + * during the "execution" of the WRMSR. This instruction can + * take up to 200 milliseconds (in the case where all chunks + * are processed in a single pass) before it retires. + */ + wrmsrl(MSR_ACTIVATE_SCAN, msrs[0]); + + if (cpu == first) { + /* Pass back the result of the scan */ + rdmsrl(MSR_SCAN_STATUS, msrs[1]); + } + + return 0; +} + +/* + * Use stop_core_cpuslocked() to synchronize writing to MSR_ACTIVATE_SCAN + * on all threads of the core to be tested. Loop if necessary to complete + * run of all chunks. Include some defensive tests to make sure forward + * progress is made, and that the whole test completes in a reasonable time. + */ +static void ifs_test_core(int cpu, struct device *dev) +{ + union ifs_scan activate; + union ifs_status status; + unsigned long timeout; + struct ifs_data *ifsd; + u64 msrvals[2]; + int retries; + + ifsd = ifs_get_data(dev); + + activate.rsvd = 0; + activate.delay = IFS_THREAD_WAIT; + activate.sigmce = 0; + activate.start = 0; + activate.stop = ifsd->valid_chunks - 1; + + timeout = jiffies + HZ / 2; + retries = MAX_IFS_RETRIES; + + while (activate.start <= activate.stop) { + if (time_after(jiffies, timeout)) { + status.error_code = IFS_SW_TIMEOUT; + break; + } + + msrvals[0] = activate.data; + stop_core_cpuslocked(cpu, doscan, msrvals); + + status.data = msrvals[1]; + + trace_ifs_status(cpu, activate, status); + + /* Some cases can be retried, give up for others */ + if (!can_restart(status)) + break; + + if (status.chunk_num == activate.start) { + /* Check for forward progress */ + if (--retries == 0) { + if (status.error_code == IFS_NO_ERROR) + status.error_code = IFS_SW_PARTIAL_COMPLETION; + break; + } + } else { + retries = MAX_IFS_RETRIES; + activate.start = status.chunk_num; + } + } + + /* Update status for this core */ + ifsd->scan_details = status.data; + + if (status.control_error || status.signature_error) { + ifsd->status = SCAN_TEST_FAIL; + message_fail(dev, cpu, status); + } else if (status.error_code) { + ifsd->status = SCAN_NOT_TESTED; + message_not_tested(dev, cpu, status); + } else { + ifsd->status = SCAN_TEST_PASS; + } +} + +/* + * Initiate per core test. It wakes up work queue threads on the target cpu and + * its sibling cpu. Once all sibling threads wake up, the scan test gets executed and + * wait for all sibling threads to finish the scan test. + */ +int do_core_test(int cpu, struct device *dev) +{ + int ret = 0; + + /* Prevent CPUs from being taken offline during the scan test */ + cpus_read_lock(); + + if (!cpu_online(cpu)) { + dev_info(dev, "cannot test on the offline cpu %d\n", cpu); + ret = -EINVAL; + goto out; + } + + ifs_test_core(cpu, dev); +out: + cpus_read_unlock(); + return ret; +} diff --git a/drivers/platform/x86/intel/ifs/sysfs.c b/drivers/platform/x86/intel/ifs/sysfs.c new file mode 100644 index 0000000000..37d8380d6f --- /dev/null +++ b/drivers/platform/x86/intel/ifs/sysfs.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2022 Intel Corporation. */ + +#include +#include +#include +#include +#include + +#include "ifs.h" + +/* + * Protects against simultaneous tests on multiple cores, or + * reloading can file while a test is in progress + */ +DEFINE_SEMAPHORE(ifs_sem); + +/* + * The sysfs interface to check additional details of last test + * cat /sys/devices/system/platform/ifs/details + */ +static ssize_t details_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ifs_data *ifsd = ifs_get_data(dev); + + return sysfs_emit(buf, "%#llx\n", ifsd->scan_details); +} + +static DEVICE_ATTR_RO(details); + +static const char * const status_msg[] = { + [SCAN_NOT_TESTED] = "untested", + [SCAN_TEST_PASS] = "pass", + [SCAN_TEST_FAIL] = "fail" +}; + +/* + * The sysfs interface to check the test status: + * To check the status of last test + * cat /sys/devices/platform/ifs/status + */ +static ssize_t status_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ifs_data *ifsd = ifs_get_data(dev); + + return sysfs_emit(buf, "%s\n", status_msg[ifsd->status]); +} + +static DEVICE_ATTR_RO(status); + +/* + * The sysfs interface for single core testing + * To start test, for example, cpu5 + * echo 5 > /sys/devices/platform/ifs/run_test + * To check the result: + * cat /sys/devices/platform/ifs/result + * The sibling core gets tested at the same time. + */ +static ssize_t run_test_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ifs_data *ifsd = ifs_get_data(dev); + unsigned int cpu; + int rc; + + rc = kstrtouint(buf, 0, &cpu); + if (rc < 0 || cpu >= nr_cpu_ids) + return -EINVAL; + + if (down_interruptible(&ifs_sem)) + return -EINTR; + + if (!ifsd->loaded) + rc = -EPERM; + else + rc = do_core_test(cpu, dev); + + up(&ifs_sem); + + return rc ? rc : count; +} + +static DEVICE_ATTR_WO(run_test); + +/* + * Reload the IFS image. When user wants to install new IFS image + */ +static ssize_t reload_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ifs_data *ifsd = ifs_get_data(dev); + bool res; + + + if (kstrtobool(buf, &res)) + return -EINVAL; + if (!res) + return count; + + if (down_interruptible(&ifs_sem)) + return -EINTR; + + ifs_load_firmware(dev); + + up(&ifs_sem); + + return ifsd->loaded ? count : -ENODEV; +} + +static DEVICE_ATTR_WO(reload); + +/* + * Display currently loaded IFS image version. + */ +static ssize_t image_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ifs_data *ifsd = ifs_get_data(dev); + + if (!ifsd->loaded) + return sysfs_emit(buf, "%s\n", "none"); + else + return sysfs_emit(buf, "%#x\n", ifsd->loaded_version); +} + +static DEVICE_ATTR_RO(image_version); + +/* global scan sysfs attributes */ +static struct attribute *plat_ifs_attrs[] = { + &dev_attr_details.attr, + &dev_attr_status.attr, + &dev_attr_run_test.attr, + &dev_attr_reload.attr, + &dev_attr_image_version.attr, + NULL +}; + +ATTRIBUTE_GROUPS(plat_ifs); + +const struct attribute_group **ifs_get_groups(void) +{ + return plat_ifs_groups; +} diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c new file mode 100644 index 0000000000..384d0962ae --- /dev/null +++ b/drivers/platform/x86/p2sb.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Primary to Sideband (P2SB) bridge access support + * + * Copyright (c) 2017, 2021-2022 Intel Corporation. + * + * Authors: Andy Shevchenko + * Jonathan Yong + */ + +#include +#include +#include +#include + +#include +#include + +#define P2SBC 0xe0 +#define P2SBC_HIDE BIT(8) + +static const struct x86_cpu_id p2sb_cpu_ids[] = { + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, PCI_DEVFN(31, 1)), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, PCI_DEVFN(31, 1)), + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, PCI_DEVFN(31, 1)), + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, PCI_DEVFN(31, 1)), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, PCI_DEVFN(31, 1)), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, PCI_DEVFN(31, 1)), + {} +}; + +static int p2sb_get_devfn(unsigned int *devfn) +{ + const struct x86_cpu_id *id; + + id = x86_match_cpu(p2sb_cpu_ids); + if (!id) + return -ENODEV; + + *devfn = (unsigned int)id->driver_data; + return 0; +} + +/* Copy resource from the first BAR of the device in question */ +static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem) +{ + struct resource *bar0 = &pdev->resource[0]; + + /* Make sure we have no dangling pointers in the output */ + memset(mem, 0, sizeof(*mem)); + + /* + * We copy only selected fields from the original resource. + * Because a PCI device will be removed soon, we may not use + * any allocated data, hence we may not copy any pointers. + */ + mem->start = bar0->start; + mem->end = bar0->end; + mem->flags = bar0->flags; + mem->desc = bar0->desc; + + return 0; +} + +static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem) +{ + struct pci_dev *pdev; + int ret; + + pdev = pci_scan_single_device(bus, devfn); + if (!pdev) + return -ENODEV; + + ret = p2sb_read_bar0(pdev, mem); + + pci_stop_and_remove_bus_device(pdev); + return ret; +} + +/** + * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR + * @bus: PCI bus to communicate with + * @devfn: PCI slot and function to communicate with + * @mem: memory resource to be filled in + * + * The BIOS prevents the P2SB device from being enumerated by the PCI + * subsystem, so we need to unhide and hide it back to lookup the BAR. + * + * if @bus is NULL, the bus 0 in domain 0 will be used. + * If @devfn is 0, it will be replaced by devfn of the P2SB device. + * + * Caller must provide a valid pointer to @mem. + * + * Locking is handled by pci_rescan_remove_lock mutex. + * + * Return: + * 0 on success or appropriate errno value on error. + */ +int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem) +{ + struct pci_dev *pdev_p2sb; + unsigned int devfn_p2sb; + u32 value = P2SBC_HIDE; + int ret; + + /* Get devfn for P2SB device itself */ + ret = p2sb_get_devfn(&devfn_p2sb); + if (ret) + return ret; + + /* if @bus is NULL, use bus 0 in domain 0 */ + bus = bus ?: pci_find_bus(0, 0); + + /* + * Prevent concurrent PCI bus scan from seeing the P2SB device and + * removing via sysfs while it is temporarily exposed. + */ + pci_lock_rescan_remove(); + + /* Unhide the P2SB device, if needed */ + pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value); + if (value & P2SBC_HIDE) + pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0); + + pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb); + if (devfn) + ret = p2sb_scan_and_read(bus, devfn, mem); + else + ret = p2sb_read_bar0(pdev_p2sb, mem); + pci_stop_and_remove_bus_device(pdev_p2sb); + + /* Hide the P2SB device, if it was hidden */ + if (value & P2SBC_HIDE) + pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, P2SBC_HIDE); + + pci_unlock_rescan_remove(); + + if (ret) + return ret; + + if (mem->flags == 0) + return -ENODEV; + + return 0; +} +EXPORT_SYMBOL_GPL(p2sb_bar); diff --git a/drivers/platform/x86/winmate-fm07-keys.c b/drivers/platform/x86/winmate-fm07-keys.c new file mode 100644 index 0000000000..2c90c5c7ec --- /dev/null +++ b/drivers/platform/x86/winmate-fm07-keys.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Driver for the Winmate FM07 front-panel keys +// +// Author: Daniel Beer + +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "winmate-fm07keys" + +#define PORT_CMD 0x6c +#define PORT_DATA 0x68 + +#define EC_ADDR_KEYS 0x3b +#define EC_CMD_READ 0x80 + +#define BASE_KEY KEY_F13 +#define NUM_KEYS 5 + +/* Typically we're done in fewer than 10 iterations */ +#define LOOP_TIMEOUT 1000 + +static void fm07keys_poll(struct input_dev *input) +{ + uint8_t k; + int i; + + /* Flush output buffer */ + i = 0; + while (inb(PORT_CMD) & 0x01) { + if (++i >= LOOP_TIMEOUT) + goto timeout; + inb(PORT_DATA); + } + + /* Send request and wait for write completion */ + outb(EC_CMD_READ, PORT_CMD); + i = 0; + while (inb(PORT_CMD) & 0x02) + if (++i >= LOOP_TIMEOUT) + goto timeout; + + outb(EC_ADDR_KEYS, PORT_DATA); + i = 0; + while (inb(PORT_CMD) & 0x02) + if (++i >= LOOP_TIMEOUT) + goto timeout; + + /* Wait for data ready */ + i = 0; + while (!(inb(PORT_CMD) & 0x01)) + if (++i >= LOOP_TIMEOUT) + goto timeout; + k = inb(PORT_DATA); + + /* Notify of new key states */ + for (i = 0; i < NUM_KEYS; i++) { + input_report_key(input, BASE_KEY + i, (~k) & 1); + k >>= 1; + } + + input_sync(input); + return; + +timeout: + dev_warn_ratelimited(&input->dev, "timeout polling IO memory\n"); +} + +static int fm07keys_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct input_dev *input; + int ret; + int i; + + input = devm_input_allocate_device(dev); + if (!input) { + dev_err(dev, "no memory for input device\n"); + return -ENOMEM; + } + + if (!devm_request_region(dev, PORT_CMD, 1, "Winmate FM07 EC")) + return -EBUSY; + if (!devm_request_region(dev, PORT_DATA, 1, "Winmate FM07 EC")) + return -EBUSY; + + input->name = "Winmate FM07 front-panel keys"; + input->phys = DRV_NAME "/input0"; + + input->id.bustype = BUS_HOST; + input->id.vendor = 0x0001; + input->id.product = 0x0001; + input->id.version = 0x0100; + + __set_bit(EV_KEY, input->evbit); + + for (i = 0; i < NUM_KEYS; i++) + __set_bit(BASE_KEY + i, input->keybit); + + ret = input_setup_polling(input, fm07keys_poll); + if (ret) { + dev_err(dev, "unable to set up polling, err=%d\n", ret); + return ret; + } + + /* These are silicone buttons. They can't be pressed in rapid + * succession too quickly, and 50 Hz seems to be an adequate + * sampling rate without missing any events when tested. + */ + input_set_poll_interval(input, 20); + + ret = input_register_device(input); + if (ret) { + dev_err(dev, "unable to register polled device, err=%d\n", + ret); + return ret; + } + + input_sync(input); + return 0; +} + +static struct platform_driver fm07keys_driver = { + .probe = fm07keys_probe, + .driver = { + .name = DRV_NAME + }, +}; + +static struct platform_device *dev; + +static const struct dmi_system_id fm07keys_dmi_table[] __initconst = { + { + /* FM07 and FM07P */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Winmate Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "IP30"), + }, + }, + { } +}; + +MODULE_DEVICE_TABLE(dmi, fm07keys_dmi_table); + +static int __init fm07keys_init(void) +{ + int ret; + + if (!dmi_check_system(fm07keys_dmi_table)) + return -ENODEV; + + ret = platform_driver_register(&fm07keys_driver); + if (ret) { + pr_err("fm07keys: failed to register driver, err=%d\n", ret); + return ret; + } + + dev = platform_device_register_simple(DRV_NAME, -1, NULL, 0); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + pr_err("fm07keys: failed to allocate device, err = %d\n", ret); + goto fail_register; + } + + return 0; + +fail_register: + platform_driver_unregister(&fm07keys_driver); + return ret; +} + +static void __exit fm07keys_exit(void) +{ + platform_driver_unregister(&fm07keys_driver); + platform_device_unregister(dev); +} + +module_init(fm07keys_init); +module_exit(fm07keys_exit); + +MODULE_AUTHOR("Daniel Beer "); +MODULE_DESCRIPTION("Winmate FM07 front-panel keys driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/power/reset/pwr-mlxbf.c b/drivers/power/reset/pwr-mlxbf.c new file mode 100644 index 0000000000..12dedf841a --- /dev/null +++ b/drivers/power/reset/pwr-mlxbf.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0-only or BSD-3-Clause + +/* + * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct pwr_mlxbf { + struct work_struct send_work; + const char *hid; +}; + +static void pwr_mlxbf_send_work(struct work_struct *work) +{ + acpi_bus_generate_netlink_event("button/power.*", "Power Button", 0x80, 1); +} + +static irqreturn_t pwr_mlxbf_irq(int irq, void *ptr) +{ + const char *rst_pwr_hid = "MLNXBF24"; + const char *low_pwr_hid = "MLNXBF29"; + struct pwr_mlxbf *priv = ptr; + + if (!strncmp(priv->hid, rst_pwr_hid, 8)) + emergency_restart(); + + if (!strncmp(priv->hid, low_pwr_hid, 8)) + schedule_work(&priv->send_work); + + return IRQ_HANDLED; +} + +static int pwr_mlxbf_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct acpi_device *adev; + struct pwr_mlxbf *priv; + const char *hid; + int irq, err; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + adev = ACPI_COMPANION(dev); + if (!adev) + return -ENXIO; + + hid = acpi_device_hid(adev); + priv->hid = hid; + + irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 0); + if (irq < 0) + return dev_err_probe(dev, irq, "Error getting %s irq.\n", priv->hid); + + err = devm_work_autocancel(dev, &priv->send_work, pwr_mlxbf_send_work); + if (err) + return err; + + err = devm_request_irq(dev, irq, pwr_mlxbf_irq, 0, hid, priv); + if (err) + dev_err(dev, "Failed request of %s irq\n", priv->hid); + + return err; +} + +static const struct acpi_device_id __maybe_unused pwr_mlxbf_acpi_match[] = { + { "MLNXBF24", 0 }, + { "MLNXBF29", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, pwr_mlxbf_acpi_match); + +static struct platform_driver pwr_mlxbf_driver = { + .driver = { + .name = "pwr_mlxbf", + .acpi_match_table = pwr_mlxbf_acpi_match, + }, + .probe = pwr_mlxbf_probe, +}; + +module_platform_driver(pwr_mlxbf_driver); + +MODULE_DESCRIPTION("Mellanox BlueField power driver"); +MODULE_AUTHOR("Asmaa Mnebhi "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/pwm/pwm-clk.c b/drivers/pwm/pwm-clk.c new file mode 100644 index 0000000000..c2a503d684 --- /dev/null +++ b/drivers/pwm/pwm-clk.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Clock based PWM controller + * + * Copyright (c) 2021 Nikita Travkin + * + * This is an "adapter" driver that allows PWM consumers to use + * system clocks with duty cycle control as PWM outputs. + * + * Limitations: + * - Due to the fact that exact behavior depends on the underlying + * clock driver, various limitations are possible. + * - Underlying clock may not be able to give 0% or 100% duty cycle + * (constant off or on), exact behavior will depend on the clock. + * - When the PWM is disabled, the clock will be disabled as well, + * line state will depend on the clock. + * - The clk API doesn't expose the necessary calls to implement + * .get_state(). + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct pwm_clk_chip { + struct pwm_chip chip; + struct clk *clk; + bool clk_enabled; +}; + +#define to_pwm_clk_chip(_chip) container_of(_chip, struct pwm_clk_chip, chip) + +static int pwm_clk_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct pwm_clk_chip *pcchip = to_pwm_clk_chip(chip); + int ret; + u32 rate; + u64 period = state->period; + u64 duty_cycle = state->duty_cycle; + + if (!state->enabled) { + if (pwm->state.enabled) { + clk_disable(pcchip->clk); + pcchip->clk_enabled = false; + } + return 0; + } else if (!pwm->state.enabled) { + ret = clk_enable(pcchip->clk); + if (ret) + return ret; + pcchip->clk_enabled = true; + } + + /* + * We have to enable the clk before setting the rate and duty_cycle, + * that however results in a window where the clk is on with a + * (potentially) different setting. Also setting period and duty_cycle + * are two separate calls, so that probably isn't atomic either. + */ + + rate = DIV64_U64_ROUND_UP(NSEC_PER_SEC, period); + ret = clk_set_rate(pcchip->clk, rate); + if (ret) + return ret; + + if (state->polarity == PWM_POLARITY_INVERSED) + duty_cycle = period - duty_cycle; + + return clk_set_duty_cycle(pcchip->clk, duty_cycle, period); +} + +static const struct pwm_ops pwm_clk_ops = { + .apply = pwm_clk_apply, + .owner = THIS_MODULE, +}; + +static int pwm_clk_probe(struct platform_device *pdev) +{ + struct pwm_clk_chip *pcchip; + int ret; + + pcchip = devm_kzalloc(&pdev->dev, sizeof(*pcchip), GFP_KERNEL); + if (!pcchip) + return -ENOMEM; + + pcchip->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pcchip->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(pcchip->clk), + "Failed to get clock\n"); + + pcchip->chip.dev = &pdev->dev; + pcchip->chip.ops = &pwm_clk_ops; + pcchip->chip.npwm = 1; + + ret = clk_prepare(pcchip->clk); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "Failed to prepare clock\n"); + + ret = pwmchip_add(&pcchip->chip); + if (ret < 0) { + clk_unprepare(pcchip->clk); + return dev_err_probe(&pdev->dev, ret, "Failed to add pwm chip\n"); + } + + platform_set_drvdata(pdev, pcchip); + return 0; +} + +static int pwm_clk_remove(struct platform_device *pdev) +{ + struct pwm_clk_chip *pcchip = platform_get_drvdata(pdev); + + pwmchip_remove(&pcchip->chip); + + if (pcchip->clk_enabled) + clk_disable(pcchip->clk); + + clk_unprepare(pcchip->clk); + + return 0; +} + +static const struct of_device_id pwm_clk_dt_ids[] = { + { .compatible = "clk-pwm", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, pwm_clk_dt_ids); + +static struct platform_driver pwm_clk_driver = { + .driver = { + .name = "pwm-clk", + .of_match_table = pwm_clk_dt_ids, + }, + .probe = pwm_clk_probe, + .remove = pwm_clk_remove, +}; +module_platform_driver(pwm_clk_driver); + +MODULE_ALIAS("platform:pwm-clk"); +MODULE_AUTHOR("Nikita Travkin "); +MODULE_DESCRIPTION("Clock based PWM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pwm/pwm-sunplus.c b/drivers/pwm/pwm-sunplus.c new file mode 100644 index 0000000000..e776fd1651 --- /dev/null +++ b/drivers/pwm/pwm-sunplus.c @@ -0,0 +1,232 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PWM device driver for SUNPLUS SP7021 SoC + * + * Links: + * Reference Manual: + * https://sunplus-tibbo.atlassian.net/wiki/spaces/doc/overview + * + * Reference Manual(PWM module): + * https://sunplus.atlassian.net/wiki/spaces/doc/pages/461144198/12.+Pulse+Width+Modulation+PWM + * + * Limitations: + * - Only supports normal polarity. + * - It output low when PWM channel disabled. + * - When the parameters change, current running period will not be completed + * and run new settings immediately. + * - In .apply() PWM output need to write register FREQ and DUTY. When first write FREQ + * done and not yet write DUTY, it has short timing gap use new FREQ and old DUTY. + * + * Author: Hammer Hsieh + */ +#include +#include +#include +#include +#include +#include +#include + +#define SP7021_PWM_MODE0 0x000 +#define SP7021_PWM_MODE0_PWMEN(ch) BIT(ch) +#define SP7021_PWM_MODE0_BYPASS(ch) BIT(8 + (ch)) +#define SP7021_PWM_MODE1 0x004 +#define SP7021_PWM_MODE1_CNT_EN(ch) BIT(ch) +#define SP7021_PWM_FREQ(ch) (0x008 + 4 * (ch)) +#define SP7021_PWM_FREQ_MAX GENMASK(15, 0) +#define SP7021_PWM_DUTY(ch) (0x018 + 4 * (ch)) +#define SP7021_PWM_DUTY_DD_SEL(ch) FIELD_PREP(GENMASK(9, 8), ch) +#define SP7021_PWM_DUTY_MAX GENMASK(7, 0) +#define SP7021_PWM_DUTY_MASK SP7021_PWM_DUTY_MAX +#define SP7021_PWM_FREQ_SCALER 256 +#define SP7021_PWM_NUM 4 + +struct sunplus_pwm { + struct pwm_chip chip; + void __iomem *base; + struct clk *clk; +}; + +static inline struct sunplus_pwm *to_sunplus_pwm(struct pwm_chip *chip) +{ + return container_of(chip, struct sunplus_pwm, chip); +} + +static int sunplus_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct sunplus_pwm *priv = to_sunplus_pwm(chip); + u32 dd_freq, duty, mode0, mode1; + u64 clk_rate; + + if (state->polarity != pwm->state.polarity) + return -EINVAL; + + if (!state->enabled) { + /* disable pwm channel output */ + mode0 = readl(priv->base + SP7021_PWM_MODE0); + mode0 &= ~SP7021_PWM_MODE0_PWMEN(pwm->hwpwm); + writel(mode0, priv->base + SP7021_PWM_MODE0); + /* disable pwm channel clk source */ + mode1 = readl(priv->base + SP7021_PWM_MODE1); + mode1 &= ~SP7021_PWM_MODE1_CNT_EN(pwm->hwpwm); + writel(mode1, priv->base + SP7021_PWM_MODE1); + return 0; + } + + clk_rate = clk_get_rate(priv->clk); + + /* + * The following calculations might overflow if clk is bigger + * than 256 GHz. In practise it's 202.5MHz, so this limitation + * is only theoretic. + */ + if (clk_rate > (u64)SP7021_PWM_FREQ_SCALER * NSEC_PER_SEC) + return -EINVAL; + + /* + * With clk_rate limited above we have dd_freq <= state->period, + * so this cannot overflow. + */ + dd_freq = mul_u64_u64_div_u64(clk_rate, state->period, (u64)SP7021_PWM_FREQ_SCALER + * NSEC_PER_SEC); + + if (dd_freq == 0) + return -EINVAL; + + if (dd_freq > SP7021_PWM_FREQ_MAX) + dd_freq = SP7021_PWM_FREQ_MAX; + + writel(dd_freq, priv->base + SP7021_PWM_FREQ(pwm->hwpwm)); + + /* cal and set pwm duty */ + mode0 = readl(priv->base + SP7021_PWM_MODE0); + mode0 |= SP7021_PWM_MODE0_PWMEN(pwm->hwpwm); + mode1 = readl(priv->base + SP7021_PWM_MODE1); + mode1 |= SP7021_PWM_MODE1_CNT_EN(pwm->hwpwm); + if (state->duty_cycle == state->period) { + /* PWM channel output = high */ + mode0 |= SP7021_PWM_MODE0_BYPASS(pwm->hwpwm); + duty = SP7021_PWM_DUTY_DD_SEL(pwm->hwpwm) | SP7021_PWM_DUTY_MAX; + } else { + mode0 &= ~SP7021_PWM_MODE0_BYPASS(pwm->hwpwm); + /* + * duty_ns <= period_ns 27 bits, clk_rate 28 bits, won't overflow. + */ + duty = mul_u64_u64_div_u64(state->duty_cycle, clk_rate, + (u64)dd_freq * NSEC_PER_SEC); + duty = SP7021_PWM_DUTY_DD_SEL(pwm->hwpwm) | duty; + } + writel(duty, priv->base + SP7021_PWM_DUTY(pwm->hwpwm)); + writel(mode1, priv->base + SP7021_PWM_MODE1); + writel(mode0, priv->base + SP7021_PWM_MODE0); + + return 0; +} + +static void sunplus_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct sunplus_pwm *priv = to_sunplus_pwm(chip); + u32 mode0, dd_freq, duty; + u64 clk_rate; + + mode0 = readl(priv->base + SP7021_PWM_MODE0); + + if (mode0 & BIT(pwm->hwpwm)) { + clk_rate = clk_get_rate(priv->clk); + dd_freq = readl(priv->base + SP7021_PWM_FREQ(pwm->hwpwm)); + duty = readl(priv->base + SP7021_PWM_DUTY(pwm->hwpwm)); + duty = FIELD_GET(SP7021_PWM_DUTY_MASK, duty); + /* + * dd_freq 16 bits, SP7021_PWM_FREQ_SCALER 8 bits + * NSEC_PER_SEC 30 bits, won't overflow. + */ + state->period = DIV64_U64_ROUND_UP((u64)dd_freq * (u64)SP7021_PWM_FREQ_SCALER + * NSEC_PER_SEC, clk_rate); + /* + * dd_freq 16 bits, duty 8 bits, NSEC_PER_SEC 30 bits, won't overflow. + */ + state->duty_cycle = DIV64_U64_ROUND_UP((u64)dd_freq * (u64)duty * NSEC_PER_SEC, + clk_rate); + state->enabled = true; + } else { + state->enabled = false; + } + + state->polarity = PWM_POLARITY_NORMAL; +} + +static const struct pwm_ops sunplus_pwm_ops = { + .apply = sunplus_pwm_apply, + .get_state = sunplus_pwm_get_state, + .owner = THIS_MODULE, +}; + +static void sunplus_pwm_clk_release(void *data) +{ + struct clk *clk = data; + + clk_disable_unprepare(clk); +} + +static int sunplus_pwm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sunplus_pwm *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "get pwm clock failed\n"); + + ret = clk_prepare_enable(priv->clk); + if (ret < 0) { + dev_err(dev, "failed to enable clock: %d\n", ret); + return ret; + } + + ret = devm_add_action_or_reset(dev, sunplus_pwm_clk_release, priv->clk); + if (ret < 0) { + dev_err(dev, "failed to release clock: %d\n", ret); + return ret; + } + + priv->chip.dev = dev; + priv->chip.ops = &sunplus_pwm_ops; + priv->chip.npwm = SP7021_PWM_NUM; + + ret = devm_pwmchip_add(dev, &priv->chip); + if (ret < 0) + return dev_err_probe(dev, ret, "Cannot register sunplus PWM\n"); + + return 0; +} + +static const struct of_device_id sunplus_pwm_of_match[] = { + { .compatible = "sunplus,sp7021-pwm", }, + {} +}; +MODULE_DEVICE_TABLE(of, sunplus_pwm_of_match); + +static struct platform_driver sunplus_pwm_driver = { + .probe = sunplus_pwm_probe, + .driver = { + .name = "sunplus-pwm", + .of_match_table = sunplus_pwm_of_match, + }, +}; +module_platform_driver(sunplus_pwm_driver); + +MODULE_DESCRIPTION("Sunplus SoC PWM Driver"); +MODULE_AUTHOR("Hammer Hsieh "); +MODULE_LICENSE("GPL"); diff --git a/drivers/pwm/pwm-xilinx.c b/drivers/pwm/pwm-xilinx.c new file mode 100644 index 0000000000..4dab2b86c4 --- /dev/null +++ b/drivers/pwm/pwm-xilinx.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2021 Sean Anderson + * + * Limitations: + * - When changing both duty cycle and period, we may end up with one cycle + * with the old duty cycle and the new period. This is because the counters + * may only be reloaded by first stopping them, or by letting them be + * automatically reloaded at the end of a cycle. If this automatic reload + * happens after we set TLR0 but before we set TLR1 then we will have a + * bad cycle. This could probably be fixed by reading TCR0 just before + * reprogramming, but I think it would add complexity for little gain. + * - Cannot produce 100% duty cycle by configuring the TLRs. This might be + * possible by stopping the counters at an appropriate point in the cycle, + * but this is not (yet) implemented. + * - Only produces "normal" output. + * - Always produces low output if disabled. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * The following functions are "common" to drivers for this device, and may be + * exported at a future date. + */ +u32 xilinx_timer_tlr_cycles(struct xilinx_timer_priv *priv, u32 tcsr, + u64 cycles) +{ + WARN_ON(cycles < 2 || cycles - 2 > priv->max); + + if (tcsr & TCSR_UDT) + return cycles - 2; + return priv->max - cycles + 2; +} + +unsigned int xilinx_timer_get_period(struct xilinx_timer_priv *priv, + u32 tlr, u32 tcsr) +{ + u64 cycles; + + if (tcsr & TCSR_UDT) + cycles = tlr + 2; + else + cycles = (u64)priv->max - tlr + 2; + + /* cycles has a max of 2^32 + 2, so we can't overflow */ + return DIV64_U64_ROUND_UP(cycles * NSEC_PER_SEC, + clk_get_rate(priv->clk)); +} + +/* + * The idea here is to capture whether the PWM is actually running (e.g. + * because we or the bootloader set it up) and we need to be careful to ensure + * we don't cause a glitch. According to the data sheet, to enable the PWM we + * need to + * + * - Set both timers to generate mode (MDT=1) + * - Set both timers to PWM mode (PWMA=1) + * - Enable the generate out signals (GENT=1) + * + * In addition, + * + * - The timer must be running (ENT=1) + * - The timer must auto-reload TLR into TCR (ARHT=1) + * - We must not be in the process of loading TLR into TCR (LOAD=0) + * - Cascade mode must be disabled (CASC=0) + * + * If any of these differ from usual, then the PWM is either disabled, or is + * running in a mode that this driver does not support. + */ +#define TCSR_PWM_SET (TCSR_GENT | TCSR_ARHT | TCSR_ENT | TCSR_PWMA) +#define TCSR_PWM_CLEAR (TCSR_MDT | TCSR_LOAD) +#define TCSR_PWM_MASK (TCSR_PWM_SET | TCSR_PWM_CLEAR) + +struct xilinx_pwm_device { + struct pwm_chip chip; + struct xilinx_timer_priv priv; +}; + +static inline struct xilinx_timer_priv +*xilinx_pwm_chip_to_priv(struct pwm_chip *chip) +{ + return &container_of(chip, struct xilinx_pwm_device, chip)->priv; +} + +static bool xilinx_timer_pwm_enabled(u32 tcsr0, u32 tcsr1) +{ + return ((TCSR_PWM_MASK | TCSR_CASC) & tcsr0) == TCSR_PWM_SET && + (TCSR_PWM_MASK & tcsr1) == TCSR_PWM_SET; +} + +static int xilinx_pwm_apply(struct pwm_chip *chip, struct pwm_device *unused, + const struct pwm_state *state) +{ + struct xilinx_timer_priv *priv = xilinx_pwm_chip_to_priv(chip); + u32 tlr0, tlr1, tcsr0, tcsr1; + u64 period_cycles, duty_cycles; + unsigned long rate; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + /* + * To be representable by TLR, cycles must be between 2 and + * priv->max + 2. To enforce this we can reduce the cycles, but we may + * not increase them. Caveat emptor: while this does result in more + * predictable rounding, it may also result in a completely different + * duty cycle (% high time) than what was requested. + */ + rate = clk_get_rate(priv->clk); + /* Avoid overflow */ + period_cycles = min_t(u64, state->period, U32_MAX * NSEC_PER_SEC); + period_cycles = mul_u64_u32_div(period_cycles, rate, NSEC_PER_SEC); + period_cycles = min_t(u64, period_cycles, priv->max + 2); + if (period_cycles < 2) + return -ERANGE; + + /* Same thing for duty cycles */ + duty_cycles = min_t(u64, state->duty_cycle, U32_MAX * NSEC_PER_SEC); + duty_cycles = mul_u64_u32_div(duty_cycles, rate, NSEC_PER_SEC); + duty_cycles = min_t(u64, duty_cycles, priv->max + 2); + + /* + * If we specify 100% duty cycle, we will get 0% instead, so decrease + * the duty cycle count by one. + */ + if (duty_cycles >= period_cycles) + duty_cycles = period_cycles - 1; + + /* Round down to 0% duty cycle for unrepresentable duty cycles */ + if (duty_cycles < 2) + duty_cycles = period_cycles; + + regmap_read(priv->map, TCSR0, &tcsr0); + regmap_read(priv->map, TCSR1, &tcsr1); + tlr0 = xilinx_timer_tlr_cycles(priv, tcsr0, period_cycles); + tlr1 = xilinx_timer_tlr_cycles(priv, tcsr1, duty_cycles); + regmap_write(priv->map, TLR0, tlr0); + regmap_write(priv->map, TLR1, tlr1); + + if (state->enabled) { + /* + * If the PWM is already running, then the counters will be + * reloaded at the end of the current cycle. + */ + if (!xilinx_timer_pwm_enabled(tcsr0, tcsr1)) { + /* Load TLR into TCR */ + regmap_write(priv->map, TCSR0, tcsr0 | TCSR_LOAD); + regmap_write(priv->map, TCSR1, tcsr1 | TCSR_LOAD); + /* Enable timers all at once with ENALL */ + tcsr0 = (TCSR_PWM_SET & ~TCSR_ENT) | (tcsr0 & TCSR_UDT); + tcsr1 = TCSR_PWM_SET | TCSR_ENALL | (tcsr1 & TCSR_UDT); + regmap_write(priv->map, TCSR0, tcsr0); + regmap_write(priv->map, TCSR1, tcsr1); + } + } else { + regmap_write(priv->map, TCSR0, 0); + regmap_write(priv->map, TCSR1, 0); + } + + return 0; +} + +static void xilinx_pwm_get_state(struct pwm_chip *chip, + struct pwm_device *unused, + struct pwm_state *state) +{ + struct xilinx_timer_priv *priv = xilinx_pwm_chip_to_priv(chip); + u32 tlr0, tlr1, tcsr0, tcsr1; + + regmap_read(priv->map, TLR0, &tlr0); + regmap_read(priv->map, TLR1, &tlr1); + regmap_read(priv->map, TCSR0, &tcsr0); + regmap_read(priv->map, TCSR1, &tcsr1); + state->period = xilinx_timer_get_period(priv, tlr0, tcsr0); + state->duty_cycle = xilinx_timer_get_period(priv, tlr1, tcsr1); + state->enabled = xilinx_timer_pwm_enabled(tcsr0, tcsr1); + state->polarity = PWM_POLARITY_NORMAL; + + /* + * 100% duty cycle results in constant low output. This may be (very) + * wrong if rate > 1 GHz, so fix this if you have such hardware :) + */ + if (state->period == state->duty_cycle) + state->duty_cycle = 0; +} + +static const struct pwm_ops xilinx_pwm_ops = { + .apply = xilinx_pwm_apply, + .get_state = xilinx_pwm_get_state, + .owner = THIS_MODULE, +}; + +static const struct regmap_config xilinx_pwm_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .val_format_endian = REGMAP_ENDIAN_LITTLE, + .max_register = TCR1, +}; + +static int xilinx_pwm_probe(struct platform_device *pdev) +{ + int ret; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct xilinx_timer_priv *priv; + struct xilinx_pwm_device *xilinx_pwm; + u32 pwm_cells, one_timer, width; + void __iomem *regs; + + /* If there are no PWM cells, this binding is for a timer */ + ret = of_property_read_u32(np, "#pwm-cells", &pwm_cells); + if (ret == -EINVAL) + return -ENODEV; + if (ret) + return dev_err_probe(dev, ret, "could not read #pwm-cells\n"); + + xilinx_pwm = devm_kzalloc(dev, sizeof(*xilinx_pwm), GFP_KERNEL); + if (!xilinx_pwm) + return -ENOMEM; + platform_set_drvdata(pdev, xilinx_pwm); + priv = &xilinx_pwm->priv; + + regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + priv->map = devm_regmap_init_mmio(dev, regs, + &xilinx_pwm_regmap_config); + if (IS_ERR(priv->map)) + return dev_err_probe(dev, PTR_ERR(priv->map), + "Could not create regmap\n"); + + ret = of_property_read_u32(np, "xlnx,one-timer-only", &one_timer); + if (ret) + return dev_err_probe(dev, ret, + "Could not read xlnx,one-timer-only\n"); + + if (one_timer) + return dev_err_probe(dev, -EINVAL, + "Two timers required for PWM mode\n"); + + ret = of_property_read_u32(np, "xlnx,count-width", &width); + if (ret == -EINVAL) + width = 32; + else if (ret) + return dev_err_probe(dev, ret, + "Could not read xlnx,count-width\n"); + + if (width != 8 && width != 16 && width != 32) + return dev_err_probe(dev, -EINVAL, + "Invalid counter width %d\n", width); + priv->max = BIT_ULL(width) - 1; + + /* + * The polarity of the Generate Out signals must be active high for PWM + * mode to work. We could determine this from the device tree, but + * alas, such properties are not allowed to be used. + */ + + priv->clk = devm_clk_get(dev, "s_axi_aclk"); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "Could not get clock\n"); + + ret = clk_prepare_enable(priv->clk); + if (ret) + return dev_err_probe(dev, ret, "Clock enable failed\n"); + clk_rate_exclusive_get(priv->clk); + + xilinx_pwm->chip.dev = dev; + xilinx_pwm->chip.ops = &xilinx_pwm_ops; + xilinx_pwm->chip.npwm = 1; + ret = pwmchip_add(&xilinx_pwm->chip); + if (ret) { + clk_rate_exclusive_put(priv->clk); + clk_disable_unprepare(priv->clk); + return dev_err_probe(dev, ret, "Could not register PWM chip\n"); + } + + return 0; +} + +static int xilinx_pwm_remove(struct platform_device *pdev) +{ + struct xilinx_pwm_device *xilinx_pwm = platform_get_drvdata(pdev); + + pwmchip_remove(&xilinx_pwm->chip); + clk_rate_exclusive_put(xilinx_pwm->priv.clk); + clk_disable_unprepare(xilinx_pwm->priv.clk); + return 0; +} + +static const struct of_device_id xilinx_pwm_of_match[] = { + { .compatible = "xlnx,xps-timer-1.00.a", }, + {}, +}; +MODULE_DEVICE_TABLE(of, xilinx_pwm_of_match); + +static struct platform_driver xilinx_pwm_driver = { + .probe = xilinx_pwm_probe, + .remove = xilinx_pwm_remove, + .driver = { + .name = "xilinx-pwm", + .of_match_table = of_match_ptr(xilinx_pwm_of_match), + }, +}; +module_platform_driver(xilinx_pwm_driver); + +MODULE_ALIAS("platform:xilinx-pwm"); +MODULE_DESCRIPTION("PWM driver for Xilinx LogiCORE IP AXI Timer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/max597x-regulator.c b/drivers/regulator/max597x-regulator.c new file mode 100644 index 0000000000..03c6027682 --- /dev/null +++ b/drivers/regulator/max597x-regulator.c @@ -0,0 +1,502 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device driver for regulators in MAX5970 and MAX5978 IC + * + * Copyright (c) 2022 9elements GmbH + * + * Author: Patrick Rudolph + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +struct max597x_regulator { + int num_switches, mon_rng, irng, shunt_micro_ohms, lim_uA; + struct regmap *regmap; +}; + +enum max597x_regulator_id { + MAX597X_SW0, + MAX597X_SW1, +}; + +static int max597x_uvp_ovp_check_mode(struct regulator_dev *rdev, int severity) +{ + int ret, reg; + + /* Status1 register contains the soft strap values sampled at POR */ + ret = regmap_read(rdev->regmap, MAX5970_REG_STATUS1, ®); + if (ret) + return ret; + + /* Check soft straps match requested mode */ + if (severity == REGULATOR_SEVERITY_PROT) { + if (STATUS1_PROT(reg) != STATUS1_PROT_SHUTDOWN) + return -EOPNOTSUPP; + + return 0; + } + if (STATUS1_PROT(reg) == STATUS1_PROT_SHUTDOWN) + return -EOPNOTSUPP; + + return 0; +} + +static int max597x_set_vp(struct regulator_dev *rdev, int lim_uV, int severity, + bool enable, bool overvoltage) +{ + int off_h, off_l, reg, ret; + struct max597x_regulator *data = rdev_get_drvdata(rdev); + int channel = rdev_get_id(rdev); + + if (overvoltage) { + if (severity == REGULATOR_SEVERITY_WARN) { + off_h = MAX5970_REG_CH_OV_WARN_H(channel); + off_l = MAX5970_REG_CH_OV_WARN_L(channel); + } else { + off_h = MAX5970_REG_CH_OV_CRIT_H(channel); + off_l = MAX5970_REG_CH_OV_CRIT_L(channel); + } + } else { + if (severity == REGULATOR_SEVERITY_WARN) { + off_h = MAX5970_REG_CH_UV_WARN_H(channel); + off_l = MAX5970_REG_CH_UV_WARN_L(channel); + } else { + off_h = MAX5970_REG_CH_UV_CRIT_H(channel); + off_l = MAX5970_REG_CH_UV_CRIT_L(channel); + } + } + + if (enable) + /* reg = ADC_MASK * (lim_uV / 1000000) / (data->mon_rng / 1000000) */ + reg = ADC_MASK * lim_uV / data->mon_rng; + else + reg = 0; + + ret = regmap_write(rdev->regmap, off_h, MAX5970_VAL2REG_H(reg)); + if (ret) + return ret; + + ret = regmap_write(rdev->regmap, off_l, MAX5970_VAL2REG_L(reg)); + if (ret) + return ret; + + return 0; +} + +static int max597x_set_uvp(struct regulator_dev *rdev, int lim_uV, int severity, + bool enable) +{ + int ret; + + /* + * MAX5970 has enable control as a special value in limit reg. Can't + * set limit but keep feature disabled or enable W/O given limit. + */ + if ((lim_uV && !enable) || (!lim_uV && enable)) + return -EINVAL; + + ret = max597x_uvp_ovp_check_mode(rdev, severity); + if (ret) + return ret; + + return max597x_set_vp(rdev, lim_uV, severity, enable, false); +} + +static int max597x_set_ovp(struct regulator_dev *rdev, int lim_uV, int severity, + bool enable) +{ + int ret; + + /* + * MAX5970 has enable control as a special value in limit reg. Can't + * set limit but keep feature disabled or enable W/O given limit. + */ + if ((lim_uV && !enable) || (!lim_uV && enable)) + return -EINVAL; + + ret = max597x_uvp_ovp_check_mode(rdev, severity); + if (ret) + return ret; + + return max597x_set_vp(rdev, lim_uV, severity, enable, true); +} + +static int max597x_set_ocp(struct regulator_dev *rdev, int lim_uA, + int severity, bool enable) +{ + int ret, val, reg; + unsigned int vthst, vthfst; + + struct max597x_regulator *data = rdev_get_drvdata(rdev); + int rdev_id = rdev_get_id(rdev); + /* + * MAX5970 doesn't has enable control for ocp. + * If limit is specified but enable is not set then hold the value in + * variable & later use it when ocp needs to be enabled. + */ + if (lim_uA != 0 && lim_uA != data->lim_uA) + data->lim_uA = lim_uA; + + if (severity != REGULATOR_SEVERITY_PROT) + return -EINVAL; + + if (enable) { + + /* Calc Vtrip threshold in uV. */ + vthst = + div_u64(mul_u32_u32(data->shunt_micro_ohms, data->lim_uA), + 1000000); + + /* + * As recommended in datasheed, add 20% margin to avoid + * spurious event & passive component tolerance. + */ + vthst = div_u64(mul_u32_u32(vthst, 120), 100); + + /* Calc fast Vtrip threshold in uV */ + vthfst = vthst * (MAX5970_FAST2SLOW_RATIO / 100); + + if (vthfst > data->irng) { + dev_err(&rdev->dev, "Current limit out of range\n"); + return -EINVAL; + } + /* Fast trip threshold to be programmed */ + val = div_u64(mul_u32_u32(0xFF, vthfst), data->irng); + } else + /* + * Since there is no option to disable ocp, set limit to max + * value + */ + val = 0xFF; + + reg = MAX5970_REG_DAC_FAST(rdev_id); + ret = regmap_write(rdev->regmap, reg, val); + + return ret; +} + +static int max597x_get_status(struct regulator_dev *rdev) +{ + int val, ret; + + ret = regmap_read(rdev->regmap, MAX5970_REG_STATUS3, &val); + if (ret) + return REGULATOR_FAILED_RETRY; + + if (val & MAX5970_STATUS3_ALERT) + return REGULATOR_STATUS_ERROR; + + ret = regulator_is_enabled_regmap(rdev); + if (ret < 0) + return ret; + + if (ret) + return REGULATOR_STATUS_ON; + + return REGULATOR_STATUS_OFF; +} + +static const struct regulator_ops max597x_switch_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .get_status = max597x_get_status, + .set_over_voltage_protection = max597x_set_ovp, + .set_under_voltage_protection = max597x_set_uvp, + .set_over_current_protection = max597x_set_ocp, +}; + +static int max597x_dt_parse(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + struct max597x_regulator *data = cfg->driver_data; + int ret = 0; + + ret = + of_property_read_u32(np, "shunt-resistor-micro-ohms", + &data->shunt_micro_ohms); + if (ret < 0) + dev_err(cfg->dev, + "property 'shunt-resistor-micro-ohms' not found, err %d\n", + ret); + return ret; + +} + +#define MAX597X_SWITCH(_ID, _ereg, _chan, _supply) { \ + .name = #_ID, \ + .of_match = of_match_ptr(#_ID), \ + .ops = &max597x_switch_ops, \ + .regulators_node = of_match_ptr("regulators"), \ + .type = REGULATOR_VOLTAGE, \ + .id = MAX597X_##_ID, \ + .owner = THIS_MODULE, \ + .supply_name = _supply, \ + .enable_reg = _ereg, \ + .enable_mask = CHXEN((_chan)), \ + .of_parse_cb = max597x_dt_parse, \ +} + +static const struct regulator_desc regulators[] = { + MAX597X_SWITCH(SW0, MAX5970_REG_CHXEN, 0, "vss1"), + MAX597X_SWITCH(SW1, MAX5970_REG_CHXEN, 1, "vss2"), +}; + +static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg, + unsigned int *val) +{ + int ret; + + ret = regmap_read(map, reg, val); + if (ret) + return ret; + + if (*val) + return regmap_write(map, reg, *val); + + return 0; +} + +static int max597x_irq_handler(int irq, struct regulator_irq_data *rid, + unsigned long *dev_mask) +{ + struct regulator_err_state *stat; + struct max597x_regulator *d = (struct max597x_regulator *)rid->data; + int val, ret, i; + + ret = max597x_regmap_read_clear(d->regmap, MAX5970_REG_FAULT0, &val); + if (ret) + return REGULATOR_FAILED_RETRY; + + *dev_mask = 0; + for (i = 0; i < d->num_switches; i++) { + stat = &rid->states[i]; + stat->notifs = 0; + stat->errors = 0; + } + + for (i = 0; i < d->num_switches; i++) { + stat = &rid->states[i]; + + if (val & UV_STATUS_CRIT(i)) { + *dev_mask |= 1 << i; + stat->notifs |= REGULATOR_EVENT_UNDER_VOLTAGE; + stat->errors |= REGULATOR_ERROR_UNDER_VOLTAGE; + } else if (val & UV_STATUS_WARN(i)) { + *dev_mask |= 1 << i; + stat->notifs |= REGULATOR_EVENT_UNDER_VOLTAGE_WARN; + stat->errors |= REGULATOR_ERROR_UNDER_VOLTAGE_WARN; + } + } + + ret = max597x_regmap_read_clear(d->regmap, MAX5970_REG_FAULT1, &val); + if (ret) + return REGULATOR_FAILED_RETRY; + + for (i = 0; i < d->num_switches; i++) { + stat = &rid->states[i]; + + if (val & OV_STATUS_CRIT(i)) { + *dev_mask |= 1 << i; + stat->notifs |= REGULATOR_EVENT_REGULATION_OUT; + stat->errors |= REGULATOR_ERROR_REGULATION_OUT; + } else if (val & OV_STATUS_WARN(i)) { + *dev_mask |= 1 << i; + stat->notifs |= REGULATOR_EVENT_OVER_VOLTAGE_WARN; + stat->errors |= REGULATOR_ERROR_OVER_VOLTAGE_WARN; + } + } + + ret = max597x_regmap_read_clear(d->regmap, MAX5970_REG_FAULT2, &val); + if (ret) + return REGULATOR_FAILED_RETRY; + + for (i = 0; i < d->num_switches; i++) { + stat = &rid->states[i]; + + if (val & OC_STATUS_WARN(i)) { + *dev_mask |= 1 << i; + stat->notifs |= REGULATOR_EVENT_OVER_CURRENT_WARN; + stat->errors |= REGULATOR_ERROR_OVER_CURRENT_WARN; + } + } + + ret = regmap_read(d->regmap, MAX5970_REG_STATUS0, &val); + if (ret) + return REGULATOR_FAILED_RETRY; + + for (i = 0; i < d->num_switches; i++) { + stat = &rid->states[i]; + + if ((val & MAX5970_CB_IFAULTF(i)) + || (val & MAX5970_CB_IFAULTS(i))) { + *dev_mask |= 1 << i; + stat->notifs |= + REGULATOR_EVENT_OVER_CURRENT | + REGULATOR_EVENT_DISABLE; + stat->errors |= + REGULATOR_ERROR_OVER_CURRENT | REGULATOR_ERROR_FAIL; + + /* Clear the sub-IRQ status */ + regulator_disable_regmap(stat->rdev); + } + } + return 0; +} + +static const struct regmap_config max597x_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = MAX_REGISTERS, +}; + +static int max597x_adc_range(struct regmap *regmap, const int ch, + u32 *irng, u32 *mon_rng) +{ + unsigned int reg; + int ret; + + /* Decode current ADC range */ + ret = regmap_read(regmap, MAX5970_REG_STATUS2, ®); + if (ret) + return ret; + switch (MAX5970_IRNG(reg, ch)) { + case 0: + *irng = 100000; /* 100 mV */ + break; + case 1: + *irng = 50000; /* 50 mV */ + break; + case 2: + *irng = 25000; /* 25 mV */ + break; + default: + return -EINVAL; + } + + /* Decode current voltage monitor range */ + ret = regmap_read(regmap, MAX5970_REG_MON_RANGE, ®); + if (ret) + return ret; + + *mon_rng = MAX5970_MON_MAX_RANGE_UV >> MAX5970_MON(reg, ch); + + return 0; +} + +static int max597x_setup_irq(struct device *dev, + int irq, + struct regulator_dev *rdevs[MAX5970_NUM_SWITCHES], + int num_switches, struct max597x_regulator *data) +{ + struct regulator_irq_desc max597x_notif = { + .name = "max597x-irq", + .map_event = max597x_irq_handler, + .data = data, + }; + int errs = REGULATOR_ERROR_UNDER_VOLTAGE | + REGULATOR_ERROR_UNDER_VOLTAGE_WARN | + REGULATOR_ERROR_OVER_VOLTAGE_WARN | + REGULATOR_ERROR_REGULATION_OUT | + REGULATOR_ERROR_OVER_CURRENT | + REGULATOR_ERROR_OVER_CURRENT_WARN | REGULATOR_ERROR_FAIL; + void *irq_helper; + + /* Register notifiers - can fail if IRQ is not given */ + irq_helper = devm_regulator_irq_helper(dev, &max597x_notif, + irq, 0, errs, NULL, + &rdevs[0], num_switches); + if (IS_ERR(irq_helper)) { + if (PTR_ERR(irq_helper) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + dev_warn(dev, "IRQ disabled %pe\n", irq_helper); + } + + return 0; +} + +static int max597x_regulator_probe(struct platform_device *pdev) +{ + + + struct max597x_data *max597x = dev_get_drvdata(pdev->dev.parent); + struct max597x_regulator *data; + + struct regulator_config config = { }; + struct regulator_dev *rdev; + struct regulator_dev *rdevs[MAX5970_NUM_SWITCHES]; + int num_switches = max597x->num_switches; + int ret, i; + + for (i = 0; i < num_switches; i++) { + data = + devm_kzalloc(max597x->dev, sizeof(struct max597x_regulator), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->num_switches = num_switches; + data->regmap = max597x->regmap; + + ret = max597x_adc_range(data->regmap, i, &max597x->irng[i], &max597x->mon_rng[i]); + if (ret < 0) + return ret; + + data->irng = max597x->irng[i]; + data->mon_rng = max597x->mon_rng[i]; + + config.dev = max597x->dev; + config.driver_data = (void *)data; + config.regmap = data->regmap; + rdev = devm_regulator_register(max597x->dev, + ®ulators[i], &config); + if (IS_ERR(rdev)) { + dev_err(max597x->dev, "failed to register regulator %s\n", + regulators[i].name); + return PTR_ERR(rdev); + } + rdevs[i] = rdev; + max597x->shunt_micro_ohms[i] = data->shunt_micro_ohms; + } + + if (max597x->irq) { + ret = + max597x_setup_irq(max597x->dev, max597x->irq, rdevs, num_switches, + data); + if (ret) { + dev_err(max597x->dev, "IRQ setup failed"); + return ret; + } + } + + return ret; +} + +static struct platform_driver max597x_regulator_driver = { + .driver = { + .name = "max597x-regulator", + }, + .probe = max597x_regulator_probe, +}; + +module_platform_driver(max597x_regulator_driver); + + +MODULE_AUTHOR("Patrick Rudolph "); +MODULE_DESCRIPTION("MAX5970_hot-swap controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/mt6370-regulator.c b/drivers/regulator/mt6370-regulator.c new file mode 100644 index 0000000000..e73f5a46cb --- /dev/null +++ b/drivers/regulator/mt6370-regulator.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + MT6370_IDX_DSVBOOST = 0, + MT6370_IDX_DSVPOS, + MT6370_IDX_DSVNEG, + MT6370_IDX_VIBLDO, + MT6370_MAX_IDX +}; + +#define MT6370_REG_LDO_CFG 0x180 +#define MT6370_REG_LDO_VOUT 0x181 +#define MT6370_REG_DB_CTRL1 0x1B0 +#define MT6370_REG_DB_CTRL2 0x1B1 +#define MT6370_REG_DB_VBST 0x1B2 +#define MT6370_REG_DB_VPOS 0x1B3 +#define MT6370_REG_DB_VNEG 0x1B4 +#define MT6370_REG_LDO_STAT 0x1DC +#define MT6370_REG_DB_STAT 0x1DF + +#define MT6370_LDOOMS_MASK BIT(7) +#define MT6370_LDOEN_MASK BIT(7) +#define MT6370_LDOVOUT_MASK GENMASK(3, 0) +#define MT6370_DBPERD_MASK (BIT(7) | BIT(4)) +#define MT6370_DBEXTEN_MASK BIT(0) +#define MT6370_DBVPOSEN_MASK BIT(6) +#define MT6370_DBVPOSDISG_MASK BIT(5) +#define MT6370_DBVNEGEN_MASK BIT(3) +#define MT6370_DBVNEGDISG_MASK BIT(2) +#define MT6370_DBALLON_MASK (MT6370_DBVPOSEN_MASK | MT6370_DBVNEGEN_MASK) +#define MT6370_DBSLEW_MASK GENMASK(7, 6) +#define MT6370_DBVOUT_MASK GENMASK(5, 0) +#define MT6370_LDOOC_EVT_MASK BIT(7) +#define MT6370_POSSCP_EVT_MASK BIT(7) +#define MT6370_NEGSCP_EVT_MASK BIT(6) +#define MT6370_BSTOCP_EVT_MASK BIT(5) +#define MT6370_POSOCP_EVT_MASK BIT(4) +#define MT6370_NEGOCP_EVT_MASK BIT(3) + +#define MT6370_LDO_MINUV 1600000 +#define MT6370_LDO_STPUV 200000 +#define MT6370_LDO_N_VOLT 13 +#define MT6370_DBVBOOST_MINUV 4000000 +#define MT6370_DBVBOOST_STPUV 50000 +#define MT6370_DBVBOOST_N_VOLT 45 +#define MT6370_DBVOUT_MINUV 4000000 +#define MT6370_DBVOUT_STPUV 50000 +#define MT6370_DBVOUT_N_VOLT 41 + +struct mt6370_priv { + struct device *dev; + struct regmap *regmap; + struct regulator_dev *rdev[MT6370_MAX_IDX]; + bool use_external_ctrl; +}; + +static const unsigned int mt6370_vpos_ramp_tbl[] = { 8540, 5840, 4830, 3000 }; +static const unsigned int mt6370_vneg_ramp_tbl[] = { 10090, 6310, 5050, 3150 }; + +static int mt6370_get_error_flags(struct regulator_dev *rdev, + unsigned int *flags) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + unsigned int stat_reg, stat, rpt_flags = 0; + int rid = rdev_get_id(rdev), ret; + + if (rid == MT6370_IDX_VIBLDO) + stat_reg = MT6370_REG_LDO_STAT; + else + stat_reg = MT6370_REG_DB_STAT; + + ret = regmap_read(regmap, stat_reg, &stat); + if (ret) + return ret; + + switch (rid) { + case MT6370_IDX_DSVBOOST: + if (stat & MT6370_BSTOCP_EVT_MASK) + rpt_flags |= REGULATOR_ERROR_OVER_CURRENT; + break; + case MT6370_IDX_DSVPOS: + if (stat & MT6370_POSSCP_EVT_MASK) + rpt_flags |= REGULATOR_ERROR_UNDER_VOLTAGE; + + if (stat & MT6370_POSOCP_EVT_MASK) + rpt_flags |= REGULATOR_ERROR_OVER_CURRENT; + break; + case MT6370_IDX_DSVNEG: + if (stat & MT6370_NEGSCP_EVT_MASK) + rpt_flags |= REGULATOR_ERROR_UNDER_VOLTAGE; + + if (stat & MT6370_NEGOCP_EVT_MASK) + rpt_flags |= REGULATOR_ERROR_OVER_CURRENT; + break; + default: + if (stat & MT6370_LDOOC_EVT_MASK) + rpt_flags |= REGULATOR_ERROR_OVER_CURRENT; + break; + } + + *flags = rpt_flags; + return 0; +} + +static const struct regulator_ops mt6370_dbvboost_ops = { + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .list_voltage = regulator_list_voltage_linear, + .get_bypass = regulator_get_bypass_regmap, + .set_bypass = regulator_set_bypass_regmap, + .get_error_flags = mt6370_get_error_flags, +}; + +static const struct regulator_ops mt6370_dbvout_ops = { + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .list_voltage = regulator_list_voltage_linear, + .is_enabled = regulator_is_enabled_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .set_ramp_delay = regulator_set_ramp_delay_regmap, + .get_error_flags = mt6370_get_error_flags, +}; + +static const struct regulator_ops mt6370_ldo_ops = { + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .list_voltage = regulator_list_voltage_linear, + .is_enabled = regulator_is_enabled_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .get_error_flags = mt6370_get_error_flags, +}; + +static int mt6370_of_parse_cb(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *config) +{ + struct mt6370_priv *priv = config->driver_data; + struct gpio_desc *enable_gpio; + int ret; + + enable_gpio = fwnode_gpiod_get_index(of_fwnode_handle(np), "enable", 0, + GPIOD_OUT_HIGH | + GPIOD_FLAGS_BIT_NONEXCLUSIVE, + desc->name); + if (IS_ERR(enable_gpio)) { + config->ena_gpiod = NULL; + return 0; + } + + /* + * RG control by default + * Only if all are using external pin, change all by external control + */ + if (priv->use_external_ctrl) { + ret = regmap_update_bits(priv->regmap, MT6370_REG_DB_CTRL1, + MT6370_DBEXTEN_MASK, + MT6370_DBEXTEN_MASK); + if (ret) + return ret; + } + + config->ena_gpiod = enable_gpio; + priv->use_external_ctrl = true; + return 0; +} + +static const struct regulator_desc mt6370_regulator_descs[] = { + { + .name = "mt6370-dsv-vbst", + .of_match = of_match_ptr("dsvbst"), + .regulators_node = of_match_ptr("regulators"), + .id = MT6370_IDX_DSVBOOST, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .ops = &mt6370_dbvboost_ops, + .min_uV = MT6370_DBVBOOST_MINUV, + .uV_step = MT6370_DBVBOOST_STPUV, + .n_voltages = MT6370_DBVBOOST_N_VOLT, + .vsel_reg = MT6370_REG_DB_VBST, + .vsel_mask = MT6370_DBVOUT_MASK, + .bypass_reg = MT6370_REG_DB_CTRL1, + .bypass_mask = MT6370_DBPERD_MASK, + .bypass_val_on = MT6370_DBPERD_MASK, + }, + { + .name = "mt6370-dsv-vpos", + .of_match = of_match_ptr("dsvpos"), + .regulators_node = of_match_ptr("regulators"), + .id = MT6370_IDX_DSVPOS, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .of_parse_cb = mt6370_of_parse_cb, + .ops = &mt6370_dbvout_ops, + .min_uV = MT6370_DBVOUT_MINUV, + .uV_step = MT6370_DBVOUT_STPUV, + .n_voltages = MT6370_DBVOUT_N_VOLT, + .vsel_reg = MT6370_REG_DB_VPOS, + .vsel_mask = MT6370_DBVOUT_MASK, + .enable_reg = MT6370_REG_DB_CTRL2, + .enable_mask = MT6370_DBVPOSEN_MASK, + .ramp_reg = MT6370_REG_DB_VPOS, + .ramp_mask = MT6370_DBSLEW_MASK, + .ramp_delay_table = mt6370_vpos_ramp_tbl, + .n_ramp_values = ARRAY_SIZE(mt6370_vpos_ramp_tbl), + .active_discharge_reg = MT6370_REG_DB_CTRL2, + .active_discharge_mask = MT6370_DBVPOSDISG_MASK, + .active_discharge_on = MT6370_DBVPOSDISG_MASK, + }, + { + .name = "mt6370-dsv-vneg", + .of_match = of_match_ptr("dsvneg"), + .regulators_node = of_match_ptr("regulators"), + .id = MT6370_IDX_DSVNEG, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .of_parse_cb = mt6370_of_parse_cb, + .ops = &mt6370_dbvout_ops, + .min_uV = MT6370_DBVOUT_MINUV, + .uV_step = MT6370_DBVOUT_STPUV, + .n_voltages = MT6370_DBVOUT_N_VOLT, + .vsel_reg = MT6370_REG_DB_VNEG, + .vsel_mask = MT6370_DBVOUT_MASK, + .enable_reg = MT6370_REG_DB_CTRL2, + .enable_mask = MT6370_DBVNEGEN_MASK, + .ramp_reg = MT6370_REG_DB_VNEG, + .ramp_mask = MT6370_DBSLEW_MASK, + .ramp_delay_table = mt6370_vneg_ramp_tbl, + .n_ramp_values = ARRAY_SIZE(mt6370_vneg_ramp_tbl), + .active_discharge_reg = MT6370_REG_DB_CTRL2, + .active_discharge_mask = MT6370_DBVNEGDISG_MASK, + .active_discharge_on = MT6370_DBVNEGDISG_MASK, + }, + { + .name = "mt6370-vib-ldo", + .of_match = of_match_ptr("vibldo"), + .regulators_node = of_match_ptr("regulators"), + .id = MT6370_IDX_VIBLDO, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .ops = &mt6370_ldo_ops, + .min_uV = MT6370_LDO_MINUV, + .uV_step = MT6370_LDO_STPUV, + .n_voltages = MT6370_LDO_N_VOLT, + .vsel_reg = MT6370_REG_LDO_VOUT, + .vsel_mask = MT6370_LDOVOUT_MASK, + .enable_reg = MT6370_REG_LDO_VOUT, + .enable_mask = MT6370_LDOEN_MASK, + .active_discharge_reg = MT6370_REG_LDO_CFG, + .active_discharge_mask = MT6370_LDOOMS_MASK, + .active_discharge_on = MT6370_LDOOMS_MASK, + } +}; + +static irqreturn_t mt6370_scp_handler(int irq, void *data) +{ + struct regulator_dev *rdev = data; + + regulator_notifier_call_chain(rdev, REGULATOR_EVENT_UNDER_VOLTAGE, + NULL); + return IRQ_HANDLED; +} + +static irqreturn_t mt6370_ocp_handler(int irq, void *data) +{ + struct regulator_dev *rdev = data; + + regulator_notifier_call_chain(rdev, REGULATOR_EVENT_OVER_CURRENT, NULL); + return IRQ_HANDLED; +} + +static int mt6370_regulator_irq_register(struct mt6370_priv *priv) +{ + struct platform_device *pdev = to_platform_device(priv->dev); + static const struct { + const char *name; + int rid; + irq_handler_t handler; + } mt6370_irqs[] = { + { "db_vpos_scp", MT6370_IDX_DSVPOS, mt6370_scp_handler }, + { "db_vneg_scp", MT6370_IDX_DSVNEG, mt6370_scp_handler }, + { "db_vbst_ocp", MT6370_IDX_DSVBOOST, mt6370_ocp_handler }, + { "db_vpos_ocp", MT6370_IDX_DSVPOS, mt6370_ocp_handler }, + { "db_vneg_ocp", MT6370_IDX_DSVNEG, mt6370_ocp_handler }, + { "ldo_oc", MT6370_IDX_VIBLDO, mt6370_ocp_handler } + }; + struct regulator_dev *rdev; + int i, irq, ret; + + for (i = 0; i < ARRAY_SIZE(mt6370_irqs); i++) { + irq = platform_get_irq_byname(pdev, mt6370_irqs[i].name); + + rdev = priv->rdev[mt6370_irqs[i].rid]; + + ret = devm_request_threaded_irq(priv->dev, irq, NULL, + mt6370_irqs[i].handler, 0, + mt6370_irqs[i].name, rdev); + if (ret) { + dev_err(priv->dev, + "Failed to register (%d) interrupt\n", i); + return ret; + } + } + + return 0; +} + +static int mt6370_regualtor_register(struct mt6370_priv *priv) +{ + struct regulator_dev *rdev; + struct regulator_config cfg = {}; + struct device *parent = priv->dev->parent; + int i; + + cfg.dev = parent; + cfg.driver_data = priv; + + for (i = 0; i < MT6370_MAX_IDX; i++) { + rdev = devm_regulator_register(priv->dev, + mt6370_regulator_descs + i, + &cfg); + if (IS_ERR(rdev)) { + dev_err(priv->dev, + "Failed to register (%d) regulator\n", i); + return PTR_ERR(rdev); + } + + priv->rdev[i] = rdev; + } + + return 0; +} + +static int mt6370_regulator_probe(struct platform_device *pdev) +{ + struct mt6370_priv *priv; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = &pdev->dev; + + priv->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!priv->regmap) { + dev_err(&pdev->dev, "Failed to init regmap\n"); + return -ENODEV; + } + + ret = mt6370_regualtor_register(priv); + if (ret) + return ret; + + return mt6370_regulator_irq_register(priv); +} + +static const struct platform_device_id mt6370_devid_table[] = { + { "mt6370-regulator", 0}, + {} +}; +MODULE_DEVICE_TABLE(platform, mt6370_devid_table); + +static struct platform_driver mt6370_regulator_driver = { + .driver = { + .name = "mt6370-regulator", + }, + .id_table = mt6370_devid_table, + .probe = mt6370_regulator_probe, +}; +module_platform_driver(mt6370_regulator_driver); + +MODULE_AUTHOR("ChiYuan Huang "); +MODULE_DESCRIPTION("Mediatek MT6370 Regulator Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/rt5120-regulator.c b/drivers/regulator/rt5120-regulator.c new file mode 100644 index 0000000000..8173ede094 --- /dev/null +++ b/drivers/regulator/rt5120-regulator.c @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RT5120_REG_PGSTAT 0x03 +#define RT5120_REG_CH1VID 0x06 +#define RT5120_REG_CH1SLPVID 0x07 +#define RT5120_REG_ENABLE 0x08 +#define RT5120_REG_MODECTL 0x09 +#define RT5120_REG_UVOVPROT 0x0A +#define RT5120_REG_SLPCTL 0x0C +#define RT5120_REG_INTSTAT 0x1E +#define RT5120_REG_DISCHG 0x1F + +#define RT5120_OUTPG_MASK(rid) BIT(rid + 1) +#define RT5120_OUTUV_MASK(rid) BIT(rid + 9) +#define RT5120_OUTOV_MASK(rid) BIT(rid + 16) +#define RT5120_CH1VID_MASK GENMASK(6, 0) +#define RT5120_RIDEN_MASK(rid) BIT(rid + 1) +#define RT5120_RADEN_MASK(rid) BIT(rid) +#define RT5120_FPWM_MASK(rid) BIT(rid + 1) +#define RT5120_UVHICCUP_MASK BIT(1) +#define RT5120_OVHICCUP_MASK BIT(0) +#define RT5120_HOTDIE_MASK BIT(1) + +#define RT5120_BUCK1_MINUV 600000 +#define RT5120_BUCK1_MAXUV 1393750 +#define RT5120_BUCK1_STEPUV 6250 +#define RT5120_BUCK1_NUM_VOLT 0x80 + +#define RT5120_AUTO_MODE 0 +#define RT5120_FPWM_MODE 1 + +enum { + RT5120_REGULATOR_BUCK1 = 0, + RT5120_REGULATOR_BUCK2, + RT5120_REGULATOR_BUCK3, + RT5120_REGULATOR_BUCK4, + RT5120_REGULATOR_LDO, + RT5120_REGULATOR_EXTEN, + RT5120_MAX_REGULATOR +}; + +struct rt5120_priv { + struct device *dev; + struct regmap *regmap; + struct regulator_desc rdesc[RT5120_MAX_REGULATOR]; +}; + +static int rt5120_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + int rid = rdev_get_id(rdev); + unsigned int mask = RT5120_FPWM_MASK(rid), val; + + switch (mode) { + case REGULATOR_MODE_NORMAL: + val = 0; + break; + case REGULATOR_MODE_FAST: + val = RT5120_FPWM_MASK(rid); + break; + default: + return -EINVAL; + } + + return regmap_update_bits(regmap, RT5120_REG_MODECTL, mask, val); +} + +static unsigned int rt5120_buck_get_mode(struct regulator_dev *rdev) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + int ret, rid = rdev_get_id(rdev); + unsigned int val; + + ret = regmap_read(regmap, RT5120_REG_MODECTL, &val); + if (ret) + return REGULATOR_MODE_INVALID; + + if (val & RT5120_FPWM_MASK(rid)) + return REGULATOR_MODE_FAST; + + return REGULATOR_MODE_NORMAL; +} + +static int rt5120_regulator_get_error_flags(struct regulator_dev *rdev, + unsigned int *flags) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + unsigned int stat, hd_stat, cur_flags = 0; + int rid = rdev_get_id(rdev), ret; + + /* + * reg 0x03/0x04/0x05 to indicate PG/UV/OV + * use block read to descrease I/O xfer time + */ + ret = regmap_raw_read(regmap, RT5120_REG_PGSTAT, &stat, 3); + if (ret) + return ret; + + ret = regmap_read(regmap, RT5120_REG_INTSTAT, &hd_stat); + if (ret) + return ret; + + if (!(stat & RT5120_OUTPG_MASK(rid))) { + if (stat & RT5120_OUTUV_MASK(rid)) + cur_flags |= REGULATOR_ERROR_UNDER_VOLTAGE; + + if (stat & RT5120_OUTOV_MASK(rid)) + cur_flags |= REGULATOR_ERROR_REGULATION_OUT; + } + + if (hd_stat & RT5120_HOTDIE_MASK) + cur_flags |= REGULATOR_ERROR_OVER_TEMP; + + *flags = cur_flags; + return 0; +} + +static int rt5120_buck1_set_suspend_voltage(struct regulator_dev *rdev, int uV) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + int sel; + + if (uV < RT5120_BUCK1_MINUV || uV > RT5120_BUCK1_MAXUV) + return -EINVAL; + + sel = (uV - RT5120_BUCK1_MINUV) / RT5120_BUCK1_STEPUV; + return regmap_write(regmap, RT5120_REG_CH1SLPVID, sel); +} + +static int rt5120_regulator_set_suspend_enable(struct regulator_dev *rdev) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + int rid = rdev_get_id(rdev); + unsigned int mask = RT5120_RIDEN_MASK(rid); + + return regmap_update_bits(regmap, RT5120_REG_SLPCTL, mask, mask); +} + +static int rt5120_regulator_set_suspend_disable(struct regulator_dev *rdev) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + int rid = rdev_get_id(rdev); + unsigned int mask = RT5120_RIDEN_MASK(rid); + + return regmap_update_bits(regmap, RT5120_REG_SLPCTL, mask, 0); +} + +static const struct regulator_ops rt5120_buck1_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .set_mode = rt5120_buck_set_mode, + .get_mode = rt5120_buck_get_mode, + .get_error_flags = rt5120_regulator_get_error_flags, + .set_suspend_voltage = rt5120_buck1_set_suspend_voltage, + .set_suspend_enable = rt5120_regulator_set_suspend_enable, + .set_suspend_disable = rt5120_regulator_set_suspend_disable, +}; + +static const struct regulator_ops rt5120_buck234_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .set_mode = rt5120_buck_set_mode, + .get_mode = rt5120_buck_get_mode, + .get_error_flags = rt5120_regulator_get_error_flags, + .set_suspend_enable = rt5120_regulator_set_suspend_enable, + .set_suspend_disable = rt5120_regulator_set_suspend_disable, +}; + +static const struct regulator_ops rt5120_ldo_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .get_error_flags = rt5120_regulator_get_error_flags, + .set_suspend_enable = rt5120_regulator_set_suspend_enable, + .set_suspend_disable = rt5120_regulator_set_suspend_disable, +}; + +static const struct regulator_ops rt5120_exten_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_suspend_enable = rt5120_regulator_set_suspend_enable, + .set_suspend_disable = rt5120_regulator_set_suspend_disable, +}; + +static unsigned int rt5120_buck_of_map_mode(unsigned int mode) +{ + switch (mode) { + case RT5120_AUTO_MODE: + return REGULATOR_MODE_NORMAL; + case RT5120_FPWM_MODE: + return REGULATOR_MODE_FAST; + default: + return REGULATOR_MODE_INVALID; + } +} + +static void rt5120_fillin_regulator_desc(struct regulator_desc *desc, int rid) +{ + static const char * const name[] = { + "buck1", "buck2", "buck3", "buck4", "ldo", "exten" }; + static const char * const sname[] = { + "vin1", "vin2", "vin3", "vin4", "vinldo", NULL }; + + /* Common regulator property */ + desc->name = name[rid]; + desc->supply_name = sname[rid]; + desc->owner = THIS_MODULE; + desc->type = REGULATOR_VOLTAGE; + desc->id = rid; + desc->enable_reg = RT5120_REG_ENABLE; + desc->enable_mask = RT5120_RIDEN_MASK(rid); + desc->active_discharge_reg = RT5120_REG_DISCHG; + desc->active_discharge_mask = RT5120_RADEN_MASK(rid); + desc->active_discharge_on = RT5120_RADEN_MASK(rid); + /* Config n_voltages to 1 for all*/ + desc->n_voltages = 1; + + /* Only buck support mode change */ + if (rid >= RT5120_REGULATOR_BUCK1 && rid <= RT5120_REGULATOR_BUCK4) + desc->of_map_mode = rt5120_buck_of_map_mode; + + /* RID specific property init */ + switch (rid) { + case RT5120_REGULATOR_BUCK1: + /* Only buck1 support voltage change by I2C */ + desc->n_voltages = RT5120_BUCK1_NUM_VOLT; + desc->min_uV = RT5120_BUCK1_MINUV; + desc->uV_step = RT5120_BUCK1_STEPUV; + desc->vsel_reg = RT5120_REG_CH1VID, + desc->vsel_mask = RT5120_CH1VID_MASK, + desc->ops = &rt5120_buck1_ops; + break; + case RT5120_REGULATOR_BUCK2 ... RT5120_REGULATOR_BUCK4: + desc->ops = &rt5120_buck234_ops; + break; + case RT5120_REGULATOR_LDO: + desc->ops = &rt5120_ldo_ops; + break; + default: + desc->ops = &rt5120_exten_ops; + } +} + +static int rt5120_of_parse_cb(struct rt5120_priv *priv, int rid, + struct of_regulator_match *match) +{ + struct regulator_desc *desc = priv->rdesc + rid; + struct regulator_init_data *init_data = match->init_data; + + if (!init_data || rid == RT5120_REGULATOR_BUCK1) + return 0; + + if (init_data->constraints.min_uV != init_data->constraints.max_uV) { + dev_err(priv->dev, "Variable voltage for fixed regulator\n"); + return -EINVAL; + } + + desc->fixed_uV = init_data->constraints.min_uV; + return 0; +} + +static struct of_regulator_match rt5120_regu_match[RT5120_MAX_REGULATOR] = { + [RT5120_REGULATOR_BUCK1] = { .name = "buck1", }, + [RT5120_REGULATOR_BUCK2] = { .name = "buck2", }, + [RT5120_REGULATOR_BUCK3] = { .name = "buck3", }, + [RT5120_REGULATOR_BUCK4] = { .name = "buck4", }, + [RT5120_REGULATOR_LDO] = { .name = "ldo", }, + [RT5120_REGULATOR_EXTEN] = { .name = "exten", } +}; + +static int rt5120_parse_regulator_dt_data(struct rt5120_priv *priv) +{ + struct device *dev = priv->dev->parent; + struct device_node *reg_node; + int i, ret; + + for (i = 0; i < RT5120_MAX_REGULATOR; i++) { + rt5120_fillin_regulator_desc(priv->rdesc + i, i); + + rt5120_regu_match[i].desc = priv->rdesc + i; + } + + reg_node = of_get_child_by_name(dev->of_node, "regulators"); + if (!reg_node) { + dev_err(priv->dev, "Couldn't find 'regulators' node\n"); + return -ENODEV; + } + + ret = of_regulator_match(priv->dev, reg_node, rt5120_regu_match, + ARRAY_SIZE(rt5120_regu_match)); + + of_node_put(reg_node); + + if (ret < 0) { + dev_err(priv->dev, + "Error parsing regulator init data (%d)\n", ret); + return ret; + } + + for (i = 0; i < RT5120_MAX_REGULATOR; i++) { + ret = rt5120_of_parse_cb(priv, i, rt5120_regu_match + i); + if (ret) { + dev_err(priv->dev, "Failed in [%d] of_passe_cb\n", i); + return ret; + } + } + + return 0; +} + +static int rt5120_device_property_init(struct rt5120_priv *priv) +{ + struct device *dev = priv->dev->parent; + struct device_node *np = dev->of_node; + bool prot_enable; + unsigned int prot_enable_val = 0; + + /* Assign UV/OV HW protection behavior */ + prot_enable = of_property_read_bool(np, + "richtek,enable-undervolt-hiccup"); + if (prot_enable) + prot_enable_val |= RT5120_UVHICCUP_MASK; + + prot_enable = of_property_read_bool(np, + "richtek,enable-overvolt-hiccup"); + if (prot_enable) + prot_enable_val |= RT5120_OVHICCUP_MASK; + + return regmap_update_bits(priv->regmap, RT5120_REG_UVOVPROT, + RT5120_UVHICCUP_MASK | RT5120_OVHICCUP_MASK, + prot_enable_val); +} + +static int rt5120_regulator_probe(struct platform_device *pdev) +{ + struct rt5120_priv *priv; + struct regulator_dev *rdev; + struct regulator_config config = {}; + int i, ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = &pdev->dev; + + priv->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!priv->regmap) { + dev_err(&pdev->dev, "Failed to init regmap\n"); + return -ENODEV; + } + + ret = rt5120_device_property_init(priv); + if (ret) { + dev_err(&pdev->dev, "Failed to do property init\n"); + return ret; + } + + ret = rt5120_parse_regulator_dt_data(priv); + if (ret) { + dev_err(&pdev->dev, "Failed to parse dt data\n"); + return ret; + } + + config.dev = &pdev->dev; + config.regmap = priv->regmap; + + for (i = 0; i < RT5120_MAX_REGULATOR; i++) { + config.of_node = rt5120_regu_match[i].of_node; + config.init_data = rt5120_regu_match[i].init_data; + + rdev = devm_regulator_register(&pdev->dev, priv->rdesc + i, + &config); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, + "Failed to register regulator [%d]\n", i); + return PTR_ERR(rdev); + } + } + + return 0; +} + +static const struct platform_device_id rt5120_regulator_dev_table[] = { + { "rt5120-regulator", 0 }, + {} +}; +MODULE_DEVICE_TABLE(platform, rt5120_regulator_dev_table); + +static struct platform_driver rt5120_regulator_driver = { + .driver = { + .name = "rt5120-regulator", + }, + .id_table = rt5120_regulator_dev_table, + .probe = rt5120_regulator_probe, +}; +module_platform_driver(rt5120_regulator_driver); + +MODULE_AUTHOR("ChiYuan Huang "); +MODULE_DESCRIPTION("Richtek RT5120 regulator driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/rt5759-regulator.c b/drivers/regulator/rt5759-regulator.c new file mode 100644 index 0000000000..6b96899eb2 --- /dev/null +++ b/drivers/regulator/rt5759-regulator.c @@ -0,0 +1,369 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define RT5759_REG_VENDORINFO 0x00 +#define RT5759_REG_FREQ 0x01 +#define RT5759_REG_VSEL 0x02 +#define RT5759_REG_DCDCCTRL 0x03 +#define RT5759_REG_STATUS 0x04 +#define RT5759_REG_DCDCSET 0x05 +#define RT5759A_REG_WDTEN 0x42 + +#define RT5759_TSTEP_MASK GENMASK(3, 2) +#define RT5759_VSEL_MASK GENMASK(6, 0) +#define RT5759_DISCHARGE_MASK BIT(3) +#define RT5759_FPWM_MASK BIT(2) +#define RT5759_ENABLE_MASK BIT(1) +#define RT5759_OT_MASK BIT(1) +#define RT5759_UV_MASK BIT(0) +#define RT5957_OCLVL_MASK GENMASK(7, 6) +#define RT5759_OCLVL_SHIFT 6 +#define RT5957_OTLVL_MASK GENMASK(5, 4) +#define RT5759_OTLVL_SHIFT 4 +#define RT5759A_WDTEN_MASK BIT(1) + +#define RT5759_MANUFACTURER_ID 0x82 +/* vsel range 0x00 ~ 0x5A */ +#define RT5759_NUM_VOLTS 91 +#define RT5759_MIN_UV 600000 +#define RT5759_STEP_UV 10000 +#define RT5759A_STEP_UV 12500 +#define RT5759_MINSS_TIMEUS 1500 + +#define RT5759_PSKIP_MODE 0 +#define RT5759_FPWM_MODE 1 + +enum { + CHIP_TYPE_RT5759 = 0, + CHIP_TYPE_RT5759A, + CHIP_TYPE_MAX +}; + +struct rt5759_priv { + struct device *dev; + struct regmap *regmap; + struct regulator_desc desc; + unsigned long chip_type; +}; + +static int rt5759_set_mode(struct regulator_dev *rdev, unsigned int mode) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + unsigned int mode_val; + + switch (mode) { + case REGULATOR_MODE_NORMAL: + mode_val = 0; + break; + case REGULATOR_MODE_FAST: + mode_val = RT5759_FPWM_MASK; + break; + default: + return -EINVAL; + } + + return regmap_update_bits(regmap, RT5759_REG_STATUS, RT5759_FPWM_MASK, + mode_val); +} + +static unsigned int rt5759_get_mode(struct regulator_dev *rdev) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + unsigned int regval; + int ret; + + ret = regmap_read(regmap, RT5759_REG_DCDCCTRL, ®val); + if (ret) + return REGULATOR_MODE_INVALID; + + if (regval & RT5759_FPWM_MASK) + return REGULATOR_MODE_FAST; + + return REGULATOR_MODE_NORMAL; +} + +static int rt5759_get_error_flags(struct regulator_dev *rdev, + unsigned int *flags) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + unsigned int status, events = 0; + int ret; + + ret = regmap_read(regmap, RT5759_REG_STATUS, &status); + if (ret) + return ret; + + if (status & RT5759_OT_MASK) + events |= REGULATOR_ERROR_OVER_TEMP; + + if (status & RT5759_UV_MASK) + events |= REGULATOR_ERROR_UNDER_VOLTAGE; + + *flags = events; + return 0; +} + +static int rt5759_set_ocp(struct regulator_dev *rdev, int lim_uA, int severity, + bool enable) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + int ocp_lvl[] = { 9800000, 10800000, 11800000 }; + unsigned int ocp_regval; + int i; + + /* Only support over current protection parameter */ + if (severity != REGULATOR_SEVERITY_PROT) + return 0; + + if (enable) { + /* Default ocp level is 10.8A */ + if (lim_uA == 0) + lim_uA = 10800000; + + for (i = 0; i < ARRAY_SIZE(ocp_lvl); i++) { + if (lim_uA <= ocp_lvl[i]) + break; + } + + if (i == ARRAY_SIZE(ocp_lvl)) + i = ARRAY_SIZE(ocp_lvl) - 1; + + ocp_regval = i + 1; + } else + ocp_regval = 0; + + return regmap_update_bits(regmap, RT5759_REG_DCDCSET, RT5957_OCLVL_MASK, + ocp_regval << RT5759_OCLVL_SHIFT); +} + +static int rt5759_set_otp(struct regulator_dev *rdev, int lim, int severity, + bool enable) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + int otp_lvl[] = { 140, 150, 170 }; + unsigned int otp_regval; + int i; + + /* Only support over temperature protection parameter */ + if (severity != REGULATOR_SEVERITY_PROT) + return 0; + + if (enable) { + /* Default otp level is 150'c */ + if (lim == 0) + lim = 150; + + for (i = 0; i < ARRAY_SIZE(otp_lvl); i++) { + if (lim <= otp_lvl[i]) + break; + } + + if (i == ARRAY_SIZE(otp_lvl)) + i = ARRAY_SIZE(otp_lvl) - 1; + + otp_regval = i + 1; + } else + otp_regval = 0; + + return regmap_update_bits(regmap, RT5759_REG_DCDCSET, RT5957_OTLVL_MASK, + otp_regval << RT5759_OTLVL_SHIFT); +} + +static const struct regulator_ops rt5759_regulator_ops = { + .list_voltage = regulator_list_voltage_linear, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .set_mode = rt5759_set_mode, + .get_mode = rt5759_get_mode, + .set_ramp_delay = regulator_set_ramp_delay_regmap, + .get_error_flags = rt5759_get_error_flags, + .set_over_current_protection = rt5759_set_ocp, + .set_thermal_protection = rt5759_set_otp, +}; + +static unsigned int rt5759_of_map_mode(unsigned int mode) +{ + switch (mode) { + case RT5759_FPWM_MODE: + return REGULATOR_MODE_FAST; + case RT5759_PSKIP_MODE: + return REGULATOR_MODE_NORMAL; + default: + return REGULATOR_MODE_INVALID; + } +} + +static const unsigned int rt5759_ramp_table[] = { 20000, 15000, 10000, 5000 }; + +static int rt5759_regulator_register(struct rt5759_priv *priv) +{ + struct device_node *np = priv->dev->of_node; + struct regulator_desc *reg_desc = &priv->desc; + struct regulator_config reg_cfg; + struct regulator_dev *rdev; + int ret; + + reg_desc->name = "rt5759-buck"; + reg_desc->type = REGULATOR_VOLTAGE; + reg_desc->owner = THIS_MODULE; + reg_desc->ops = &rt5759_regulator_ops; + reg_desc->n_voltages = RT5759_NUM_VOLTS; + reg_desc->min_uV = RT5759_MIN_UV; + reg_desc->uV_step = RT5759_STEP_UV; + reg_desc->vsel_reg = RT5759_REG_VSEL; + reg_desc->vsel_mask = RT5759_VSEL_MASK; + reg_desc->enable_reg = RT5759_REG_DCDCCTRL; + reg_desc->enable_mask = RT5759_ENABLE_MASK; + reg_desc->active_discharge_reg = RT5759_REG_DCDCCTRL; + reg_desc->active_discharge_mask = RT5759_DISCHARGE_MASK; + reg_desc->active_discharge_on = RT5759_DISCHARGE_MASK; + reg_desc->ramp_reg = RT5759_REG_FREQ; + reg_desc->ramp_mask = RT5759_TSTEP_MASK; + reg_desc->ramp_delay_table = rt5759_ramp_table; + reg_desc->n_ramp_values = ARRAY_SIZE(rt5759_ramp_table); + reg_desc->enable_time = RT5759_MINSS_TIMEUS; + reg_desc->of_map_mode = rt5759_of_map_mode; + + /* + * RT5759 step uV = 10000 + * RT5759A step uV = 12500 + */ + if (priv->chip_type == CHIP_TYPE_RT5759A) + reg_desc->uV_step = RT5759A_STEP_UV; + + reg_cfg.dev = priv->dev; + reg_cfg.of_node = np; + reg_cfg.init_data = of_get_regulator_init_data(priv->dev, np, reg_desc); + reg_cfg.regmap = priv->regmap; + + rdev = devm_regulator_register(priv->dev, reg_desc, ®_cfg); + if (IS_ERR(rdev)) { + ret = PTR_ERR(rdev); + dev_err(priv->dev, "Failed to register regulator (%d)\n", ret); + return ret; + } + + return 0; +} + +static int rt5759_init_device_property(struct rt5759_priv *priv) +{ + unsigned int val = 0; + + /* + * Only RT5759A support external watchdog input + */ + if (priv->chip_type != CHIP_TYPE_RT5759A) + return 0; + + if (device_property_read_bool(priv->dev, "richtek,watchdog-enable")) + val = RT5759A_WDTEN_MASK; + + return regmap_update_bits(priv->regmap, RT5759A_REG_WDTEN, + RT5759A_WDTEN_MASK, val); +} + +static int rt5759_manufacturer_check(struct rt5759_priv *priv) +{ + unsigned int vendor; + int ret; + + ret = regmap_read(priv->regmap, RT5759_REG_VENDORINFO, &vendor); + if (ret) + return ret; + + if (vendor != RT5759_MANUFACTURER_ID) { + dev_err(priv->dev, "vendor info not correct (%d)\n", vendor); + return -EINVAL; + } + + return 0; +} + +static bool rt5759_is_accessible_reg(struct device *dev, unsigned int reg) +{ + struct rt5759_priv *priv = dev_get_drvdata(dev); + + if (reg <= RT5759_REG_DCDCSET) + return true; + + if (priv->chip_type == CHIP_TYPE_RT5759A && reg == RT5759A_REG_WDTEN) + return true; + + return false; +} + +static const struct regmap_config rt5759_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = RT5759A_REG_WDTEN, + .readable_reg = rt5759_is_accessible_reg, + .writeable_reg = rt5759_is_accessible_reg, +}; + +static int rt5759_probe(struct i2c_client *i2c) +{ + struct rt5759_priv *priv; + int ret; + + priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = &i2c->dev; + priv->chip_type = (unsigned long)of_device_get_match_data(&i2c->dev); + i2c_set_clientdata(i2c, priv); + + priv->regmap = devm_regmap_init_i2c(i2c, &rt5759_regmap_config); + if (IS_ERR(priv->regmap)) { + ret = PTR_ERR(priv->regmap); + dev_err(&i2c->dev, "Failed to allocate regmap (%d)\n", ret); + return ret; + } + + ret = rt5759_manufacturer_check(priv); + if (ret) { + dev_err(&i2c->dev, "Failed to check device (%d)\n", ret); + return ret; + } + + ret = rt5759_init_device_property(priv); + if (ret) { + dev_err(&i2c->dev, "Failed to init device (%d)\n", ret); + return ret; + } + + return rt5759_regulator_register(priv); +} + +static const struct of_device_id __maybe_unused rt5759_device_table[] = { + { .compatible = "richtek,rt5759", .data = (void *)CHIP_TYPE_RT5759 }, + { .compatible = "richtek,rt5759a", .data = (void *)CHIP_TYPE_RT5759A }, + {} +}; +MODULE_DEVICE_TABLE(of, rt5759_device_table); + +static struct i2c_driver rt5759_driver = { + .driver = { + .name = "rt5759", + .of_match_table = of_match_ptr(rt5759_device_table), + }, + .probe_new = rt5759_probe, +}; +module_i2c_driver(rt5759_driver); + +MODULE_AUTHOR("ChiYuan Huang "); +MODULE_DESCRIPTION("Richtek RT5759 Regulator Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/sm5703-regulator.c b/drivers/regulator/sm5703-regulator.c new file mode 100644 index 0000000000..05ad28fc4d --- /dev/null +++ b/drivers/regulator/sm5703-regulator.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include +#include +#include +#include + +enum sm5703_regulators { + SM5703_BUCK, + SM5703_LDO1, + SM5703_LDO2, + SM5703_LDO3, + SM5703_USBLDO1, + SM5703_USBLDO2, + SM5703_VBUS, + SM5703_MAX_REGULATORS, +}; + +static const int sm5703_ldo_voltagemap[] = { + 1500000, 1800000, 2600000, 2800000, 3000000, 3300000, +}; + +static const int sm5703_buck_voltagemap[] = { + 1000000, 1000000, 1000000, 1000000, + 1000000, 1000000, 1000000, 1000000, + 1000000, 1000000, 1000000, 1100000, + 1200000, 1300000, 1400000, 1500000, + 1600000, 1700000, 1800000, 1900000, + 2000000, 2100000, 2200000, 2300000, + 2400000, 2500000, 2600000, 2700000, + 2800000, 2900000, 3000000, 3000000, +}; + +#define SM5703USBLDO(_name, _id) \ + [SM5703_USBLDO ## _id] = { \ + .name = _name, \ + .of_match = _name, \ + .regulators_node = "regulators", \ + .type = REGULATOR_VOLTAGE, \ + .id = SM5703_USBLDO ## _id, \ + .ops = &sm5703_regulator_ops_fixed, \ + .fixed_uV = SM5703_USBLDO_MICROVOLT, \ + .enable_reg = SM5703_REG_USBLDO12, \ + .enable_mask = SM5703_REG_EN_USBLDO ##_id, \ + .owner = THIS_MODULE, \ + } + +#define SM5703VBUS(_name) \ + [SM5703_VBUS] = { \ + .name = _name, \ + .of_match = _name, \ + .regulators_node = "regulators", \ + .type = REGULATOR_VOLTAGE, \ + .id = SM5703_VBUS, \ + .ops = &sm5703_regulator_ops_fixed, \ + .fixed_uV = SM5703_VBUS_MICROVOLT, \ + .enable_reg = SM5703_REG_CNTL, \ + .enable_mask = SM5703_OPERATION_MODE_MASK, \ + .enable_val = SM5703_OPERATION_MODE_USB_OTG_MODE, \ + .disable_val = SM5703_OPERATION_MODE_CHARGING_ON, \ + .owner = THIS_MODULE, \ + } + +#define SM5703BUCK(_name) \ + [SM5703_BUCK] = { \ + .name = _name, \ + .of_match = _name, \ + .regulators_node = "regulators", \ + .type = REGULATOR_VOLTAGE, \ + .id = SM5703_BUCK, \ + .ops = &sm5703_regulator_ops, \ + .n_voltages = ARRAY_SIZE(sm5703_buck_voltagemap), \ + .volt_table = sm5703_buck_voltagemap, \ + .vsel_reg = SM5703_REG_BUCK, \ + .vsel_mask = SM5703_BUCK_VOLT_MASK, \ + .enable_reg = SM5703_REG_BUCK, \ + .enable_mask = SM5703_REG_EN_BUCK, \ + .owner = THIS_MODULE, \ + } + +#define SM5703LDO(_name, _id) \ + [SM5703_LDO ## _id] = { \ + .name = _name, \ + .of_match = _name, \ + .regulators_node = "regulators", \ + .type = REGULATOR_VOLTAGE, \ + .id = SM5703_LDO ## _id, \ + .ops = &sm5703_regulator_ops, \ + .n_voltages = ARRAY_SIZE(sm5703_ldo_voltagemap), \ + .volt_table = sm5703_ldo_voltagemap, \ + .vsel_reg = SM5703_REG_LDO ##_id, \ + .vsel_mask = SM5703_LDO_VOLT_MASK, \ + .enable_reg = SM5703_REG_LDO ##_id, \ + .enable_mask = SM5703_LDO_EN, \ + .owner = THIS_MODULE, \ + } + +static const struct regulator_ops sm5703_regulator_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_table, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, +}; + +static const struct regulator_ops sm5703_regulator_ops_fixed = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, +}; + +static struct regulator_desc sm5703_regulators_desc[SM5703_MAX_REGULATORS] = { + SM5703BUCK("buck"), + SM5703LDO("ldo1", 1), + SM5703LDO("ldo2", 2), + SM5703LDO("ldo3", 3), + SM5703USBLDO("usbldo1", 1), + SM5703USBLDO("usbldo2", 2), + SM5703VBUS("vbus"), +}; + +static int sm5703_regulator_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct regulator_config config = { NULL, }; + struct regulator_dev *rdev; + struct sm5703_dev *sm5703 = dev_get_drvdata(pdev->dev.parent); + int i; + + config.dev = dev->parent; + config.regmap = sm5703->regmap; + + for (i = 0; i < SM5703_MAX_REGULATORS; i++) { + rdev = devm_regulator_register(dev, + &sm5703_regulators_desc[i], + &config); + if (IS_ERR(rdev)) + return dev_err_probe(dev, PTR_ERR(rdev), + "Failed to register a regulator\n"); + } + + return 0; +} + +static const struct platform_device_id sm5703_regulator_id[] = { + { "sm5703-regulator", 0 }, + {} +}; +MODULE_DEVICE_TABLE(platform, sm5703_regulator_id); + +static struct platform_driver sm5703_regulator_driver = { + .driver = { + .name = "sm5703-regulator", + }, + .probe = sm5703_regulator_probe, + .id_table = sm5703_regulator_id, +}; + +module_platform_driver(sm5703_regulator_driver); + +MODULE_DESCRIPTION("Silicon Mitus SM5703 LDO/Buck/USB regulator driver"); +MODULE_AUTHOR("Markuss Broks "); +MODULE_LICENSE("GPL"); diff --git a/drivers/reset/reset-sunplus.c b/drivers/reset/reset-sunplus.c new file mode 100644 index 0000000000..2f23ecaa7b --- /dev/null +++ b/drivers/reset/reset-sunplus.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* + * SP7021 reset driver + * + * Copyright (C) Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +/* HIWORD_MASK_REG BITS */ +#define BITS_PER_HWM_REG 16 + +/* resets HW info: reg_index_shift */ +static const u32 sp_resets[] = { +/* SP7021: mo_reset0 ~ mo_reset9 */ + 0x00, + 0x02, + 0x03, + 0x04, + 0x05, + 0x06, + 0x07, + 0x08, + 0x09, + 0x0a, + 0x0b, + 0x0d, + 0x0e, + 0x0f, + 0x10, + 0x12, + 0x14, + 0x15, + 0x16, + 0x17, + 0x18, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x20, + 0x21, + 0x22, + 0x23, + 0x24, + 0x25, + 0x26, + 0x2a, + 0x2b, + 0x2d, + 0x2e, + 0x30, + 0x31, + 0x32, + 0x33, + 0x3d, + 0x3e, + 0x3f, + 0x42, + 0x44, + 0x4b, + 0x4c, + 0x4d, + 0x4e, + 0x4f, + 0x50, + 0x55, + 0x60, + 0x61, + 0x6a, + 0x6f, + 0x70, + 0x73, + 0x74, + 0x86, + 0x8a, + 0x8b, + 0x8d, + 0x8e, + 0x8f, + 0x90, + 0x92, + 0x93, + 0x94, + 0x95, + 0x96, + 0x97, + 0x98, + 0x99, +}; + +struct sp_reset { + struct reset_controller_dev rcdev; + struct notifier_block notifier; + void __iomem *base; +}; + +static inline struct sp_reset *to_sp_reset(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct sp_reset, rcdev); +} + +static int sp_reset_update(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + struct sp_reset *reset = to_sp_reset(rcdev); + int index = sp_resets[id] / BITS_PER_HWM_REG; + int shift = sp_resets[id] % BITS_PER_HWM_REG; + u32 val; + + val = (1 << (16 + shift)) | (assert << shift); + writel(val, reset->base + (index * 4)); + + return 0; +} + +static int sp_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return sp_reset_update(rcdev, id, true); +} + +static int sp_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return sp_reset_update(rcdev, id, false); +} + +static int sp_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct sp_reset *reset = to_sp_reset(rcdev); + int index = sp_resets[id] / BITS_PER_HWM_REG; + int shift = sp_resets[id] % BITS_PER_HWM_REG; + u32 reg; + + reg = readl(reset->base + (index * 4)); + + return !!(reg & BIT(shift)); +} + +static const struct reset_control_ops sp_reset_ops = { + .assert = sp_reset_assert, + .deassert = sp_reset_deassert, + .status = sp_reset_status, +}; + +static int sp_restart(struct notifier_block *nb, unsigned long mode, + void *cmd) +{ + struct sp_reset *reset = container_of(nb, struct sp_reset, notifier); + + sp_reset_assert(&reset->rcdev, 0); + sp_reset_deassert(&reset->rcdev, 0); + + return NOTIFY_DONE; +} + +static int sp_reset_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sp_reset *reset; + struct resource *res; + int ret; + + reset = devm_kzalloc(dev, sizeof(*reset), GFP_KERNEL); + if (!reset) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + reset->base = devm_ioremap_resource(dev, res); + if (IS_ERR(reset->base)) + return PTR_ERR(reset->base); + + reset->rcdev.ops = &sp_reset_ops; + reset->rcdev.owner = THIS_MODULE; + reset->rcdev.of_node = dev->of_node; + reset->rcdev.nr_resets = resource_size(res) / 4 * BITS_PER_HWM_REG; + + ret = devm_reset_controller_register(dev, &reset->rcdev); + if (ret) + return ret; + + reset->notifier.notifier_call = sp_restart; + reset->notifier.priority = 192; + + return register_restart_handler(&reset->notifier); +} + +static const struct of_device_id sp_reset_dt_ids[] = { + {.compatible = "sunplus,sp7021-reset",}, + { /* sentinel */ }, +}; + +static struct platform_driver sp_reset_driver = { + .probe = sp_reset_probe, + .driver = { + .name = "sunplus-reset", + .of_match_table = sp_reset_dt_ids, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(sp_reset_driver); diff --git a/drivers/reset/reset-tps380x.c b/drivers/reset/reset-tps380x.c new file mode 100644 index 0000000000..09d511f069 --- /dev/null +++ b/drivers/reset/reset-tps380x.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * TI TPS380x Supply Voltage Supervisor and Reset Controller Driver + * + * Copyright (C) 2022 Pengutronix, Marco Felsch + * + * Based on Simple Reset Controller Driver + * + * Copyright (C) 2017 Pengutronix, Philipp Zabel + */ + +#include +#include +#include +#include +#include +#include +#include + +struct tps380x_reset { + struct reset_controller_dev rcdev; + struct gpio_desc *reset_gpio; + unsigned int reset_ms; +}; + +struct tps380x_reset_devdata { + unsigned int min_reset_ms; + unsigned int typ_reset_ms; + unsigned int max_reset_ms; +}; + +static inline +struct tps380x_reset *to_tps380x_reset(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct tps380x_reset, rcdev); +} + +static int +tps380x_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) +{ + struct tps380x_reset *tps380x = to_tps380x_reset(rcdev); + + gpiod_set_value_cansleep(tps380x->reset_gpio, 1); + + return 0; +} + +static int +tps380x_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) +{ + struct tps380x_reset *tps380x = to_tps380x_reset(rcdev); + + gpiod_set_value_cansleep(tps380x->reset_gpio, 0); + msleep(tps380x->reset_ms); + + return 0; +} + +static const struct reset_control_ops reset_tps380x_ops = { + .assert = tps380x_reset_assert, + .deassert = tps380x_reset_deassert, +}; + +static int tps380x_reset_of_xlate(struct reset_controller_dev *rcdev, + const struct of_phandle_args *reset_spec) +{ + /* No special handling needed, we have only one reset line per device */ + return 0; +} + +static int tps380x_reset_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct tps380x_reset_devdata *devdata; + struct tps380x_reset *tps380x; + + devdata = device_get_match_data(dev); + if (!devdata) + return -EINVAL; + + tps380x = devm_kzalloc(dev, sizeof(*tps380x), GFP_KERNEL); + if (!tps380x) + return -ENOMEM; + + tps380x->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(tps380x->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(tps380x->reset_gpio), + "Failed to get GPIO\n"); + + tps380x->reset_ms = devdata->max_reset_ms; + + tps380x->rcdev.ops = &reset_tps380x_ops; + tps380x->rcdev.owner = THIS_MODULE; + tps380x->rcdev.dev = dev; + tps380x->rcdev.of_node = dev->of_node; + tps380x->rcdev.of_reset_n_cells = 0; + tps380x->rcdev.of_xlate = tps380x_reset_of_xlate; + tps380x->rcdev.nr_resets = 1; + + return devm_reset_controller_register(dev, &tps380x->rcdev); +} + +static const struct tps380x_reset_devdata tps3801_reset_data = { + .min_reset_ms = 120, + .typ_reset_ms = 200, + .max_reset_ms = 280, +}; + +static const struct of_device_id tps380x_reset_dt_ids[] = { + { .compatible = "ti,tps3801", .data = &tps3801_reset_data }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, tps380x_reset_dt_ids); + +static struct platform_driver tps380x_reset_driver = { + .probe = tps380x_reset_probe, + .driver = { + .name = "tps380x-reset", + .of_match_table = tps380x_reset_dt_ids, + }, +}; +module_platform_driver(tps380x_reset_driver); + +MODULE_AUTHOR("Marco Felsch "); +MODULE_DESCRIPTION("TI TPS380x Supply Voltage Supervisor and Reset Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-mpfs.c b/drivers/rtc/rtc-mpfs.c new file mode 100644 index 0000000000..f14d1925e0 --- /dev/null +++ b/drivers/rtc/rtc-mpfs.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microchip MPFS RTC driver + * + * Copyright (c) 2021-2022 Microchip Corporation. All rights reserved. + * + * Author: Daire McNamara + * & Conor Dooley + */ +#include "linux/bits.h" +#include "linux/iopoll.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CONTROL_REG 0x00 +#define MODE_REG 0x04 +#define PRESCALER_REG 0x08 +#define ALARM_LOWER_REG 0x0c +#define ALARM_UPPER_REG 0x10 +#define COMPARE_LOWER_REG 0x14 +#define COMPARE_UPPER_REG 0x18 +#define DATETIME_LOWER_REG 0x20 +#define DATETIME_UPPER_REG 0x24 + +#define CONTROL_RUNNING_BIT BIT(0) +#define CONTROL_START_BIT BIT(0) +#define CONTROL_STOP_BIT BIT(1) +#define CONTROL_ALARM_ON_BIT BIT(2) +#define CONTROL_ALARM_OFF_BIT BIT(3) +#define CONTROL_RESET_BIT BIT(4) +#define CONTROL_UPLOAD_BIT BIT(5) +#define CONTROL_DOWNLOAD_BIT BIT(6) +#define CONTROL_MATCH_BIT BIT(7) +#define CONTROL_WAKEUP_CLR_BIT BIT(8) +#define CONTROL_WAKEUP_SET_BIT BIT(9) +#define CONTROL_UPDATED_BIT BIT(10) + +#define MODE_CLOCK_CALENDAR BIT(0) +#define MODE_WAKE_EN BIT(1) +#define MODE_WAKE_RESET BIT(2) +#define MODE_WAKE_CONTINUE BIT(3) + +#define MAX_PRESCALER_COUNT GENMASK(25, 0) +#define DATETIME_UPPER_MASK GENMASK(29, 0) +#define ALARM_UPPER_MASK GENMASK(10, 0) + +#define UPLOAD_TIMEOUT_US 50 + +struct mpfs_rtc_dev { + struct rtc_device *rtc; + void __iomem *base; +}; + +static void mpfs_rtc_start(struct mpfs_rtc_dev *rtcdev) +{ + u32 ctrl; + + ctrl = readl(rtcdev->base + CONTROL_REG); + ctrl &= ~CONTROL_STOP_BIT; + ctrl |= CONTROL_START_BIT; + writel(ctrl, rtcdev->base + CONTROL_REG); +} + +static void mpfs_rtc_clear_irq(struct mpfs_rtc_dev *rtcdev) +{ + u32 val = readl(rtcdev->base + CONTROL_REG); + + val &= ~(CONTROL_ALARM_ON_BIT | CONTROL_STOP_BIT); + val |= CONTROL_ALARM_OFF_BIT; + writel(val, rtcdev->base + CONTROL_REG); + /* + * Ensure that the posted write to the CONTROL_REG register completed before + * returning from this function. Not doing this may result in the interrupt + * only being cleared some time after this function returns. + */ + (void)readl(rtcdev->base + CONTROL_REG); +} + +static int mpfs_rtc_readtime(struct device *dev, struct rtc_time *tm) +{ + struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev); + u64 time; + + time = readl(rtcdev->base + DATETIME_LOWER_REG); + time |= ((u64)readl(rtcdev->base + DATETIME_UPPER_REG) & DATETIME_UPPER_MASK) << 32; + rtc_time64_to_tm(time, tm); + + return 0; +} + +static int mpfs_rtc_settime(struct device *dev, struct rtc_time *tm) +{ + struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev); + u32 ctrl, prog; + u64 time; + int ret; + + time = rtc_tm_to_time64(tm); + + writel((u32)time, rtcdev->base + DATETIME_LOWER_REG); + writel((u32)(time >> 32) & DATETIME_UPPER_MASK, rtcdev->base + DATETIME_UPPER_REG); + + ctrl = readl(rtcdev->base + CONTROL_REG); + ctrl &= ~CONTROL_STOP_BIT; + ctrl |= CONTROL_UPLOAD_BIT; + writel(ctrl, rtcdev->base + CONTROL_REG); + + ret = read_poll_timeout(readl, prog, prog & CONTROL_UPLOAD_BIT, 0, UPLOAD_TIMEOUT_US, + false, rtcdev->base + CONTROL_REG); + if (ret) { + dev_err(dev, "timed out uploading time to rtc"); + return ret; + } + mpfs_rtc_start(rtcdev); + + return 0; +} + +static int mpfs_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev); + u32 mode = readl(rtcdev->base + MODE_REG); + u64 time; + + alrm->enabled = mode & MODE_WAKE_EN; + + time = (u64)readl(rtcdev->base + ALARM_LOWER_REG) << 32; + time |= (readl(rtcdev->base + ALARM_UPPER_REG) & ALARM_UPPER_MASK); + rtc_time64_to_tm(time, &alrm->time); + + return 0; +} + +static int mpfs_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev); + u32 mode, ctrl; + u64 time; + + /* Disable the alarm before updating */ + ctrl = readl(rtcdev->base + CONTROL_REG); + ctrl |= CONTROL_ALARM_OFF_BIT; + writel(ctrl, rtcdev->base + CONTROL_REG); + + time = rtc_tm_to_time64(&alrm->time); + + writel((u32)time, rtcdev->base + ALARM_LOWER_REG); + writel((u32)(time >> 32) & ALARM_UPPER_MASK, rtcdev->base + ALARM_UPPER_REG); + + /* Bypass compare register in alarm mode */ + writel(GENMASK(31, 0), rtcdev->base + COMPARE_LOWER_REG); + writel(GENMASK(29, 0), rtcdev->base + COMPARE_UPPER_REG); + + /* Configure the RTC to enable the alarm. */ + ctrl = readl(rtcdev->base + CONTROL_REG); + mode = readl(rtcdev->base + MODE_REG); + if (alrm->enabled) { + mode = MODE_WAKE_EN | MODE_WAKE_CONTINUE; + /* Enable the alarm */ + ctrl &= ~CONTROL_ALARM_OFF_BIT; + ctrl |= CONTROL_ALARM_ON_BIT; + } + ctrl &= ~CONTROL_STOP_BIT; + ctrl |= CONTROL_START_BIT; + writel(ctrl, rtcdev->base + CONTROL_REG); + writel(mode, rtcdev->base + MODE_REG); + + return 0; +} + +static int mpfs_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev); + u32 ctrl; + + ctrl = readl(rtcdev->base + CONTROL_REG); + ctrl &= ~(CONTROL_ALARM_ON_BIT | CONTROL_ALARM_OFF_BIT | CONTROL_STOP_BIT); + + if (enabled) + ctrl |= CONTROL_ALARM_ON_BIT; + else + ctrl |= CONTROL_ALARM_OFF_BIT; + + writel(ctrl, rtcdev->base + CONTROL_REG); + + return 0; +} + +static inline struct clk *mpfs_rtc_init_clk(struct device *dev) +{ + struct clk *clk; + int ret; + + clk = devm_clk_get(dev, "rtc"); + if (IS_ERR(clk)) + return clk; + + ret = clk_prepare_enable(clk); + if (ret) + return ERR_PTR(ret); + + devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare, clk); + return clk; +} + +static irqreturn_t mpfs_rtc_wakeup_irq_handler(int irq, void *dev) +{ + struct mpfs_rtc_dev *rtcdev = dev; + + mpfs_rtc_clear_irq(rtcdev); + + rtc_update_irq(rtcdev->rtc, 1, RTC_IRQF | RTC_AF); + + return IRQ_HANDLED; +} + +static const struct rtc_class_ops mpfs_rtc_ops = { + .read_time = mpfs_rtc_readtime, + .set_time = mpfs_rtc_settime, + .read_alarm = mpfs_rtc_readalarm, + .set_alarm = mpfs_rtc_setalarm, + .alarm_irq_enable = mpfs_rtc_alarm_irq_enable, +}; + +static int mpfs_rtc_probe(struct platform_device *pdev) +{ + struct mpfs_rtc_dev *rtcdev; + struct clk *clk; + u32 prescaler; + int wakeup_irq, ret; + + rtcdev = devm_kzalloc(&pdev->dev, sizeof(struct mpfs_rtc_dev), GFP_KERNEL); + if (!rtcdev) + return -ENOMEM; + + platform_set_drvdata(pdev, rtcdev); + + rtcdev->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtcdev->rtc)) + return PTR_ERR(rtcdev->rtc); + + rtcdev->rtc->ops = &mpfs_rtc_ops; + + /* range is capped by alarm max, lower reg is 31:0 & upper is 10:0 */ + rtcdev->rtc->range_max = GENMASK_ULL(42, 0); + + clk = mpfs_rtc_init_clk(&pdev->dev); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + rtcdev->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(rtcdev->base)) { + dev_dbg(&pdev->dev, "invalid ioremap resources\n"); + return PTR_ERR(rtcdev->base); + } + + wakeup_irq = platform_get_irq(pdev, 0); + if (wakeup_irq <= 0) { + dev_dbg(&pdev->dev, "could not get wakeup irq\n"); + return wakeup_irq; + } + ret = devm_request_irq(&pdev->dev, wakeup_irq, mpfs_rtc_wakeup_irq_handler, 0, + dev_name(&pdev->dev), rtcdev); + if (ret) { + dev_dbg(&pdev->dev, "could not request wakeup irq\n"); + return ret; + } + + /* prescaler hardware adds 1 to reg value */ + prescaler = clk_get_rate(devm_clk_get(&pdev->dev, "rtcref")) - 1; + + if (prescaler > MAX_PRESCALER_COUNT) { + dev_dbg(&pdev->dev, "invalid prescaler %d\n", prescaler); + return -EINVAL; + } + + writel(prescaler, rtcdev->base + PRESCALER_REG); + dev_info(&pdev->dev, "prescaler set to: 0x%X \r\n", prescaler); + + device_init_wakeup(&pdev->dev, true); + ret = dev_pm_set_wake_irq(&pdev->dev, wakeup_irq); + if (ret) + dev_err(&pdev->dev, "failed to enable irq wake\n"); + + return devm_rtc_register_device(rtcdev->rtc); +} + +static int mpfs_rtc_remove(struct platform_device *pdev) +{ + dev_pm_clear_wake_irq(&pdev->dev); + + return 0; +} + +static const struct of_device_id mpfs_rtc_of_match[] = { + { .compatible = "microchip,mpfs-rtc" }, + { } +}; + +MODULE_DEVICE_TABLE(of, mpfs_rtc_of_match); + +static struct platform_driver mpfs_rtc_driver = { + .probe = mpfs_rtc_probe, + .remove = mpfs_rtc_remove, + .driver = { + .name = "mpfs_rtc", + .of_match_table = mpfs_rtc_of_match, + }, +}; + +module_platform_driver(mpfs_rtc_driver); + +MODULE_DESCRIPTION("Real time clock for Microchip Polarfire SoC"); +MODULE_AUTHOR("Daire McNamara "); +MODULE_AUTHOR("Conor Dooley "); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c new file mode 100644 index 0000000000..d43acd3920 --- /dev/null +++ b/drivers/rtc/rtc-nct3018y.c @@ -0,0 +1,553 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2022 Nuvoton Technology Corporation + +#include +#include +#include +#include +#include +#include +#include +#include + +#define NCT3018Y_REG_SC 0x00 /* seconds */ +#define NCT3018Y_REG_SCA 0x01 /* alarm */ +#define NCT3018Y_REG_MN 0x02 +#define NCT3018Y_REG_MNA 0x03 /* alarm */ +#define NCT3018Y_REG_HR 0x04 +#define NCT3018Y_REG_HRA 0x05 /* alarm */ +#define NCT3018Y_REG_DW 0x06 +#define NCT3018Y_REG_DM 0x07 +#define NCT3018Y_REG_MO 0x08 +#define NCT3018Y_REG_YR 0x09 +#define NCT3018Y_REG_CTRL 0x0A /* timer control */ +#define NCT3018Y_REG_ST 0x0B /* status */ +#define NCT3018Y_REG_CLKO 0x0C /* clock out */ + +#define NCT3018Y_BIT_AF BIT(7) +#define NCT3018Y_BIT_ST BIT(7) +#define NCT3018Y_BIT_DM BIT(6) +#define NCT3018Y_BIT_HF BIT(5) +#define NCT3018Y_BIT_DSM BIT(4) +#define NCT3018Y_BIT_AIE BIT(3) +#define NCT3018Y_BIT_OFIE BIT(2) +#define NCT3018Y_BIT_CIE BIT(1) +#define NCT3018Y_BIT_TWO BIT(0) + +#define NCT3018Y_REG_BAT_MASK 0x07 +#define NCT3018Y_REG_CLKO_F_MASK 0x03 /* frequenc mask */ +#define NCT3018Y_REG_CLKO_CKE 0x80 /* clock out enabled */ + +struct nct3018y { + struct rtc_device *rtc; + struct i2c_client *client; +#ifdef CONFIG_COMMON_CLK + struct clk_hw clkout_hw; +#endif +}; + +static int nct3018y_set_alarm_mode(struct i2c_client *client, bool on) +{ + int err, flags; + + dev_dbg(&client->dev, "%s:on:%d\n", __func__, on); + + flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL); + if (flags < 0) { + dev_dbg(&client->dev, + "Failed to read NCT3018Y_REG_CTRL\n"); + return flags; + } + + if (on) + flags |= NCT3018Y_BIT_AIE; + else + flags &= ~NCT3018Y_BIT_AIE; + + flags |= NCT3018Y_BIT_CIE; + err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags); + if (err < 0) { + dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL\n"); + return err; + } + + flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST); + if (flags < 0) { + dev_dbg(&client->dev, + "Failed to read NCT3018Y_REG_ST\n"); + return flags; + } + + flags &= ~(NCT3018Y_BIT_AF); + err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_ST, flags); + if (err < 0) { + dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_ST\n"); + return err; + } + + return 0; +} + +static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *alarm_enable, + unsigned char *alarm_flag) +{ + int flags; + + if (alarm_enable) { + dev_dbg(&client->dev, "%s:NCT3018Y_REG_CTRL\n", __func__); + flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL); + if (flags < 0) + return flags; + *alarm_enable = flags & NCT3018Y_BIT_AIE; + } + + if (alarm_flag) { + dev_dbg(&client->dev, "%s:NCT3018Y_REG_ST\n", __func__); + flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST); + if (flags < 0) + return flags; + *alarm_flag = flags & NCT3018Y_BIT_AF; + } + + dev_dbg(&client->dev, "%s:alarm_enable:%x alarm_flag:%x\n", + __func__, *alarm_enable, *alarm_flag); + + return 0; +} + +static irqreturn_t nct3018y_irq(int irq, void *dev_id) +{ + struct nct3018y *nct3018y = i2c_get_clientdata(dev_id); + struct i2c_client *client = nct3018y->client; + int err; + unsigned char alarm_flag; + unsigned char alarm_enable; + + dev_dbg(&client->dev, "%s:irq:%d\n", __func__, irq); + err = nct3018y_get_alarm_mode(nct3018y->client, &alarm_enable, &alarm_flag); + if (err) + return IRQ_NONE; + + if (alarm_flag) { + dev_dbg(&client->dev, "%s:alarm flag:%x\n", + __func__, alarm_flag); + rtc_update_irq(nct3018y->rtc, 1, RTC_IRQF | RTC_AF); + nct3018y_set_alarm_mode(nct3018y->client, 0); + dev_dbg(&client->dev, "%s:IRQ_HANDLED\n", __func__); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +/* + * In the routines that deal directly with the nct3018y hardware, we use + * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch. + */ +static int nct3018y_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned char buf[10]; + int err; + + err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_ST, 1, buf); + if (err < 0) + return err; + + if (!buf[0]) { + dev_dbg(&client->dev, " voltage <=1.7, date/time is not reliable.\n"); + return -EINVAL; + } + + err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_SC, sizeof(buf), buf); + if (err < 0) + return err; + + tm->tm_sec = bcd2bin(buf[0] & 0x7F); + tm->tm_min = bcd2bin(buf[2] & 0x7F); + tm->tm_hour = bcd2bin(buf[4] & 0x3F); + tm->tm_wday = buf[6] & 0x07; + tm->tm_mday = bcd2bin(buf[7] & 0x3F); + tm->tm_mon = bcd2bin(buf[8] & 0x1F) - 1; + tm->tm_year = bcd2bin(buf[9]) + 100; + + return 0; +} + +static int nct3018y_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned char buf[4] = {0}; + int err; + + buf[0] = bin2bcd(tm->tm_sec); + err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_SC, buf[0]); + if (err < 0) { + dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_SC\n"); + return err; + } + + buf[0] = bin2bcd(tm->tm_min); + err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_MN, buf[0]); + if (err < 0) { + dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_MN\n"); + return err; + } + + buf[0] = bin2bcd(tm->tm_hour); + err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_HR, buf[0]); + if (err < 0) { + dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_HR\n"); + return err; + } + + buf[0] = tm->tm_wday & 0x07; + buf[1] = bin2bcd(tm->tm_mday); + buf[2] = bin2bcd(tm->tm_mon + 1); + buf[3] = bin2bcd(tm->tm_year - 100); + err = i2c_smbus_write_i2c_block_data(client, NCT3018Y_REG_DW, + sizeof(buf), buf); + if (err < 0) { + dev_dbg(&client->dev, "Unable to write for day and mon and year\n"); + return -EIO; + } + + return err; +} + +static int nct3018y_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned char buf[5]; + int err; + + err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_SCA, + sizeof(buf), buf); + if (err < 0) { + dev_dbg(&client->dev, "Unable to read date\n"); + return -EIO; + } + + dev_dbg(&client->dev, "%s: raw data is sec=%02x, min=%02x hr=%02x\n", + __func__, buf[0], buf[2], buf[4]); + + tm->time.tm_sec = bcd2bin(buf[0] & 0x7F); + tm->time.tm_min = bcd2bin(buf[2] & 0x7F); + tm->time.tm_hour = bcd2bin(buf[4] & 0x3F); + + err = nct3018y_get_alarm_mode(client, &tm->enabled, &tm->pending); + if (err < 0) + return err; + + dev_dbg(&client->dev, "%s:s=%d m=%d, hr=%d, enabled=%d, pending=%d\n", + __func__, tm->time.tm_sec, tm->time.tm_min, + tm->time.tm_hour, tm->enabled, tm->pending); + + return 0; +} + +static int nct3018y_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm) +{ + struct i2c_client *client = to_i2c_client(dev); + int err; + + dev_dbg(dev, "%s, sec=%d, min=%d hour=%d tm->enabled:%d\n", + __func__, tm->time.tm_sec, tm->time.tm_min, tm->time.tm_hour, + tm->enabled); + + err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_SCA, bin2bcd(tm->time.tm_sec)); + if (err < 0) { + dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_SCA\n"); + return err; + } + + err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_MNA, bin2bcd(tm->time.tm_min)); + if (err < 0) { + dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_MNA\n"); + return err; + } + + err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_HRA, bin2bcd(tm->time.tm_hour)); + if (err < 0) { + dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_HRA\n"); + return err; + } + + return nct3018y_set_alarm_mode(client, tm->enabled); +} + +static int nct3018y_irq_enable(struct device *dev, unsigned int enabled) +{ + dev_dbg(dev, "%s: alarm enable=%d\n", __func__, enabled); + + return nct3018y_set_alarm_mode(to_i2c_client(dev), enabled); +} + +static int nct3018y_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) +{ + struct i2c_client *client = to_i2c_client(dev); + int status, flags = 0; + + switch (cmd) { + case RTC_VL_READ: + status = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST); + if (status < 0) + return status; + + if (!(status & NCT3018Y_REG_BAT_MASK)) + flags |= RTC_VL_DATA_INVALID; + + return put_user(flags, (unsigned int __user *)arg); + + default: + return -ENOIOCTLCMD; + } +} + +#ifdef CONFIG_COMMON_CLK +/* + * Handling of the clkout + */ + +#define clkout_hw_to_nct3018y(_hw) container_of(_hw, struct nct3018y, clkout_hw) + +static const int clkout_rates[] = { + 32768, + 1024, + 32, + 1, +}; + +static unsigned long nct3018y_clkout_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw); + struct i2c_client *client = nct3018y->client; + int flags; + + flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO); + if (flags < 0) + return 0; + + flags &= NCT3018Y_REG_CLKO_F_MASK; + return clkout_rates[flags]; +} + +static long nct3018y_clkout_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(clkout_rates); i++) + if (clkout_rates[i] <= rate) + return clkout_rates[i]; + + return 0; +} + +static int nct3018y_clkout_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw); + struct i2c_client *client = nct3018y->client; + int i, flags; + + flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO); + if (flags < 0) + return flags; + + for (i = 0; i < ARRAY_SIZE(clkout_rates); i++) + if (clkout_rates[i] == rate) { + flags &= ~NCT3018Y_REG_CLKO_F_MASK; + flags |= i; + return i2c_smbus_write_byte_data(client, NCT3018Y_REG_CLKO, flags); + } + + return -EINVAL; +} + +static int nct3018y_clkout_control(struct clk_hw *hw, bool enable) +{ + struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw); + struct i2c_client *client = nct3018y->client; + int flags; + + flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO); + if (flags < 0) + return flags; + + if (enable) + flags |= NCT3018Y_REG_CLKO_CKE; + else + flags &= ~NCT3018Y_REG_CLKO_CKE; + + return i2c_smbus_write_byte_data(client, NCT3018Y_REG_CLKO, flags); +} + +static int nct3018y_clkout_prepare(struct clk_hw *hw) +{ + return nct3018y_clkout_control(hw, 1); +} + +static void nct3018y_clkout_unprepare(struct clk_hw *hw) +{ + nct3018y_clkout_control(hw, 0); +} + +static int nct3018y_clkout_is_prepared(struct clk_hw *hw) +{ + struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw); + struct i2c_client *client = nct3018y->client; + int flags; + + flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO); + if (flags < 0) + return flags; + + return flags & NCT3018Y_REG_CLKO_CKE; +} + +static const struct clk_ops nct3018y_clkout_ops = { + .prepare = nct3018y_clkout_prepare, + .unprepare = nct3018y_clkout_unprepare, + .is_prepared = nct3018y_clkout_is_prepared, + .recalc_rate = nct3018y_clkout_recalc_rate, + .round_rate = nct3018y_clkout_round_rate, + .set_rate = nct3018y_clkout_set_rate, +}; + +static struct clk *nct3018y_clkout_register_clk(struct nct3018y *nct3018y) +{ + struct i2c_client *client = nct3018y->client; + struct device_node *node = client->dev.of_node; + struct clk *clk; + struct clk_init_data init; + + init.name = "nct3018y-clkout"; + init.ops = &nct3018y_clkout_ops; + init.flags = 0; + init.parent_names = NULL; + init.num_parents = 0; + nct3018y->clkout_hw.init = &init; + + /* optional override of the clockname */ + of_property_read_string(node, "clock-output-names", &init.name); + + /* register the clock */ + clk = devm_clk_register(&client->dev, &nct3018y->clkout_hw); + + if (!IS_ERR(clk)) + of_clk_add_provider(node, of_clk_src_simple_get, clk); + + return clk; +} +#endif + +static const struct rtc_class_ops nct3018y_rtc_ops = { + .read_time = nct3018y_rtc_read_time, + .set_time = nct3018y_rtc_set_time, + .read_alarm = nct3018y_rtc_read_alarm, + .set_alarm = nct3018y_rtc_set_alarm, + .alarm_irq_enable = nct3018y_irq_enable, + .ioctl = nct3018y_ioctl, +}; + +static int nct3018y_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct nct3018y *nct3018y; + int err, flags; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C | + I2C_FUNC_SMBUS_BYTE | + I2C_FUNC_SMBUS_BLOCK_DATA)) + return -ENODEV; + + nct3018y = devm_kzalloc(&client->dev, sizeof(struct nct3018y), + GFP_KERNEL); + if (!nct3018y) + return -ENOMEM; + + i2c_set_clientdata(client, nct3018y); + nct3018y->client = client; + device_set_wakeup_capable(&client->dev, 1); + + flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL); + if (flags < 0) { + dev_dbg(&client->dev, "%s: read error\n", __func__); + return flags; + } else if (flags & NCT3018Y_BIT_TWO) { + dev_dbg(&client->dev, "%s: NCT3018Y_BIT_TWO is set\n", __func__); + } + + flags = NCT3018Y_BIT_TWO; + err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags); + if (err < 0) { + dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL\n"); + return err; + } + + flags = 0; + err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_ST, flags); + if (err < 0) { + dev_dbg(&client->dev, "%s: write error\n", __func__); + return err; + } + + nct3018y->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(nct3018y->rtc)) + return PTR_ERR(nct3018y->rtc); + + nct3018y->rtc->ops = &nct3018y_rtc_ops; + nct3018y->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + nct3018y->rtc->range_max = RTC_TIMESTAMP_END_2099; + + if (client->irq > 0) { + err = devm_request_threaded_irq(&client->dev, client->irq, + NULL, nct3018y_irq, + IRQF_ONESHOT | IRQF_TRIGGER_FALLING, + "nct3018y", client); + if (err) { + dev_dbg(&client->dev, "unable to request IRQ %d\n", client->irq); + return err; + } + } else { + clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, nct3018y->rtc->features); + clear_bit(RTC_FEATURE_ALARM, nct3018y->rtc->features); + } + +#ifdef CONFIG_COMMON_CLK + /* register clk in common clk framework */ + nct3018y_clkout_register_clk(nct3018y); +#endif + + return devm_rtc_register_device(nct3018y->rtc); +} + +static const struct i2c_device_id nct3018y_id[] = { + { "nct3018y", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, nct3018y_id); + +static const struct of_device_id nct3018y_of_match[] = { + { .compatible = "nuvoton,nct3018y" }, + {} +}; +MODULE_DEVICE_TABLE(of, nct3018y_of_match); + +static struct i2c_driver nct3018y_driver = { + .driver = { + .name = "rtc-nct3018y", + .of_match_table = of_match_ptr(nct3018y_of_match), + }, + .probe = nct3018y_probe, + .id_table = nct3018y_id, +}; + +module_i2c_driver(nct3018y_driver); + +MODULE_AUTHOR("Medad CChien "); +MODULE_AUTHOR("Mia Lin "); +MODULE_DESCRIPTION("Nuvoton NCT3018Y RTC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-rzn1.c b/drivers/rtc/rtc-rzn1.c new file mode 100644 index 0000000000..ac788799c8 --- /dev/null +++ b/drivers/rtc/rtc-rzn1.c @@ -0,0 +1,418 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Renesas RZ/N1 Real Time Clock interface for Linux + * + * Copyright: + * - 2014 Renesas Electronics Europe Limited + * - 2022 Schneider Electric + * + * Authors: + * - Michel Pollet , + * - Miquel Raynal + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define RZN1_RTC_CTL0 0x00 +#define RZN1_RTC_CTL0_SLSB_SUBU 0 +#define RZN1_RTC_CTL0_SLSB_SCMP BIT(4) +#define RZN1_RTC_CTL0_AMPM BIT(5) +#define RZN1_RTC_CTL0_CE BIT(7) + +#define RZN1_RTC_CTL1 0x04 +#define RZN1_RTC_CTL1_ALME BIT(4) + +#define RZN1_RTC_CTL2 0x08 +#define RZN1_RTC_CTL2_WAIT BIT(0) +#define RZN1_RTC_CTL2_WST BIT(1) +#define RZN1_RTC_CTL2_WUST BIT(5) +#define RZN1_RTC_CTL2_STOPPED (RZN1_RTC_CTL2_WAIT | RZN1_RTC_CTL2_WST) + +#define RZN1_RTC_SEC 0x14 +#define RZN1_RTC_MIN 0x18 +#define RZN1_RTC_HOUR 0x1c +#define RZN1_RTC_WEEK 0x20 +#define RZN1_RTC_DAY 0x24 +#define RZN1_RTC_MONTH 0x28 +#define RZN1_RTC_YEAR 0x2c + +#define RZN1_RTC_SUBU 0x38 +#define RZN1_RTC_SUBU_DEV BIT(7) +#define RZN1_RTC_SUBU_DECR BIT(6) + +#define RZN1_RTC_ALM 0x40 +#define RZN1_RTC_ALH 0x44 +#define RZN1_RTC_ALW 0x48 + +#define RZN1_RTC_SECC 0x4c +#define RZN1_RTC_MINC 0x50 +#define RZN1_RTC_HOURC 0x54 +#define RZN1_RTC_WEEKC 0x58 +#define RZN1_RTC_DAYC 0x5c +#define RZN1_RTC_MONTHC 0x60 +#define RZN1_RTC_YEARC 0x64 + +struct rzn1_rtc { + struct rtc_device *rtcdev; + void __iomem *base; +}; + +static void rzn1_rtc_get_time_snapshot(struct rzn1_rtc *rtc, struct rtc_time *tm) +{ + tm->tm_sec = readl(rtc->base + RZN1_RTC_SECC); + tm->tm_min = readl(rtc->base + RZN1_RTC_MINC); + tm->tm_hour = readl(rtc->base + RZN1_RTC_HOURC); + tm->tm_wday = readl(rtc->base + RZN1_RTC_WEEKC); + tm->tm_mday = readl(rtc->base + RZN1_RTC_DAYC); + tm->tm_mon = readl(rtc->base + RZN1_RTC_MONTHC); + tm->tm_year = readl(rtc->base + RZN1_RTC_YEARC); +} + +static unsigned int rzn1_rtc_tm_to_wday(struct rtc_time *tm) +{ + time64_t time; + unsigned int days; + u32 secs; + + time = rtc_tm_to_time64(tm); + days = div_s64_rem(time, 86400, &secs); + + /* day of the week, 1970-01-01 was a Thursday */ + return (days + 4) % 7; +} + +static int rzn1_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct rzn1_rtc *rtc = dev_get_drvdata(dev); + u32 val, secs; + + /* + * The RTC was not started or is stopped and thus does not carry the + * proper time/date. + */ + val = readl(rtc->base + RZN1_RTC_CTL2); + if (val & RZN1_RTC_CTL2_STOPPED) + return -EINVAL; + + rzn1_rtc_get_time_snapshot(rtc, tm); + secs = readl(rtc->base + RZN1_RTC_SECC); + if (tm->tm_sec != secs) + rzn1_rtc_get_time_snapshot(rtc, tm); + + tm->tm_sec = bcd2bin(tm->tm_sec); + tm->tm_min = bcd2bin(tm->tm_min); + tm->tm_hour = bcd2bin(tm->tm_hour); + tm->tm_wday = bcd2bin(tm->tm_wday); + tm->tm_mday = bcd2bin(tm->tm_mday); + tm->tm_mon = bcd2bin(tm->tm_mon); + tm->tm_year = bcd2bin(tm->tm_year); + + return 0; +} + +static int rzn1_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct rzn1_rtc *rtc = dev_get_drvdata(dev); + u32 val; + int ret; + + tm->tm_sec = bin2bcd(tm->tm_sec); + tm->tm_min = bin2bcd(tm->tm_min); + tm->tm_hour = bin2bcd(tm->tm_hour); + tm->tm_wday = bin2bcd(rzn1_rtc_tm_to_wday(tm)); + tm->tm_mday = bin2bcd(tm->tm_mday); + tm->tm_mon = bin2bcd(tm->tm_mon); + tm->tm_year = bin2bcd(tm->tm_year); + + val = readl(rtc->base + RZN1_RTC_CTL2); + if (!(val & RZN1_RTC_CTL2_STOPPED)) { + /* Hold the counter if it was counting up */ + writel(RZN1_RTC_CTL2_WAIT, rtc->base + RZN1_RTC_CTL2); + + /* Wait for the counter to stop: two 32k clock cycles */ + usleep_range(61, 100); + ret = readl_poll_timeout(rtc->base + RZN1_RTC_CTL2, val, + val & RZN1_RTC_CTL2_WST, 0, 100); + if (ret) + return ret; + } + + writel(tm->tm_sec, rtc->base + RZN1_RTC_SEC); + writel(tm->tm_min, rtc->base + RZN1_RTC_MIN); + writel(tm->tm_hour, rtc->base + RZN1_RTC_HOUR); + writel(tm->tm_wday, rtc->base + RZN1_RTC_WEEK); + writel(tm->tm_mday, rtc->base + RZN1_RTC_DAY); + writel(tm->tm_mon, rtc->base + RZN1_RTC_MONTH); + writel(tm->tm_year, rtc->base + RZN1_RTC_YEAR); + writel(0, rtc->base + RZN1_RTC_CTL2); + + return 0; +} + +static irqreturn_t rzn1_rtc_alarm_irq(int irq, void *dev_id) +{ + struct rzn1_rtc *rtc = dev_id; + + rtc_update_irq(rtc->rtcdev, 1, RTC_AF | RTC_IRQF); + + return IRQ_HANDLED; +} + +static int rzn1_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) +{ + struct rzn1_rtc *rtc = dev_get_drvdata(dev); + u32 ctl1 = readl(rtc->base + RZN1_RTC_CTL1); + + if (enable) + ctl1 |= RZN1_RTC_CTL1_ALME; + else + ctl1 &= ~RZN1_RTC_CTL1_ALME; + + writel(ctl1, rtc->base + RZN1_RTC_CTL1); + + return 0; +} + +static int rzn1_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct rzn1_rtc *rtc = dev_get_drvdata(dev); + struct rtc_time *tm = &alrm->time; + unsigned int min, hour, wday, delta_days; + time64_t alarm; + u32 ctl1; + int ret; + + ret = rzn1_rtc_read_time(dev, tm); + if (ret) + return ret; + + min = readl(rtc->base + RZN1_RTC_ALM); + hour = readl(rtc->base + RZN1_RTC_ALH); + wday = readl(rtc->base + RZN1_RTC_ALW); + + tm->tm_sec = 0; + tm->tm_min = bcd2bin(min); + tm->tm_hour = bcd2bin(hour); + delta_days = ((fls(wday) - 1) - tm->tm_wday + 7) % 7; + tm->tm_wday = fls(wday) - 1; + + if (delta_days) { + alarm = rtc_tm_to_time64(tm) + (delta_days * 86400); + rtc_time64_to_tm(alarm, tm); + } + + ctl1 = readl(rtc->base + RZN1_RTC_CTL1); + alrm->enabled = !!(ctl1 & RZN1_RTC_CTL1_ALME); + + return 0; +} + +static int rzn1_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct rzn1_rtc *rtc = dev_get_drvdata(dev); + struct rtc_time *tm = &alrm->time, tm_now; + unsigned long alarm, farest; + unsigned int days_ahead, wday; + int ret; + + ret = rzn1_rtc_read_time(dev, &tm_now); + if (ret) + return ret; + + /* We cannot set alarms more than one week ahead */ + farest = rtc_tm_to_time64(&tm_now) + (7 * 86400); + alarm = rtc_tm_to_time64(tm); + if (time_after(alarm, farest)) + return -ERANGE; + + /* Convert alarm day into week day */ + days_ahead = tm->tm_mday - tm_now.tm_mday; + wday = (tm_now.tm_wday + days_ahead) % 7; + + writel(bin2bcd(tm->tm_min), rtc->base + RZN1_RTC_ALM); + writel(bin2bcd(tm->tm_hour), rtc->base + RZN1_RTC_ALH); + writel(BIT(wday), rtc->base + RZN1_RTC_ALW); + + rzn1_rtc_alarm_irq_enable(dev, alrm->enabled); + + return 0; +} + +static int rzn1_rtc_read_offset(struct device *dev, long *offset) +{ + struct rzn1_rtc *rtc = dev_get_drvdata(dev); + unsigned int ppb_per_step; + bool subtract; + u32 val; + + val = readl(rtc->base + RZN1_RTC_SUBU); + ppb_per_step = val & RZN1_RTC_SUBU_DEV ? 1017 : 3051; + subtract = val & RZN1_RTC_SUBU_DECR; + val &= 0x3F; + + if (!val) + *offset = 0; + else if (subtract) + *offset = -(((~val) & 0x3F) + 1) * ppb_per_step; + else + *offset = (val - 1) * ppb_per_step; + + return 0; +} + +static int rzn1_rtc_set_offset(struct device *dev, long offset) +{ + struct rzn1_rtc *rtc = dev_get_drvdata(dev); + int stepsh, stepsl, steps; + u32 subu = 0, ctl2; + int ret; + + /* + * Check which resolution mode (every 20 or 60s) can be used. + * Between 2 and 124 clock pulses can be added or substracted. + * + * In 20s mode, the minimum resolution is 2 / (32768 * 20) which is + * close to 3051 ppb. In 60s mode, the resolution is closer to 1017. + */ + stepsh = DIV_ROUND_CLOSEST(offset, 1017); + stepsl = DIV_ROUND_CLOSEST(offset, 3051); + + if (stepsh >= -0x3E && stepsh <= 0x3E) { + /* 1017 ppb per step */ + steps = stepsh; + subu |= RZN1_RTC_SUBU_DEV; + } else if (stepsl >= -0x3E && stepsl <= 0x3E) { + /* 3051 ppb per step */ + steps = stepsl; + } else { + return -ERANGE; + } + + if (!steps) + return 0; + + if (steps > 0) { + subu |= steps + 1; + } else { + subu |= RZN1_RTC_SUBU_DECR; + subu |= (~(-steps - 1)) & 0x3F; + } + + ret = readl_poll_timeout(rtc->base + RZN1_RTC_CTL2, ctl2, + !(ctl2 & RZN1_RTC_CTL2_WUST), 100, 2000000); + if (ret) + return ret; + + writel(subu, rtc->base + RZN1_RTC_SUBU); + + return 0; +} + +static const struct rtc_class_ops rzn1_rtc_ops = { + .read_time = rzn1_rtc_read_time, + .set_time = rzn1_rtc_set_time, + .read_alarm = rzn1_rtc_read_alarm, + .set_alarm = rzn1_rtc_set_alarm, + .alarm_irq_enable = rzn1_rtc_alarm_irq_enable, + .read_offset = rzn1_rtc_read_offset, + .set_offset = rzn1_rtc_set_offset, +}; + +static int rzn1_rtc_probe(struct platform_device *pdev) +{ + struct rzn1_rtc *rtc; + int alarm_irq; + int ret; + + rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); + if (!rtc) + return -ENOMEM; + + platform_set_drvdata(pdev, rtc); + + rtc->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(rtc->base)) + return dev_err_probe(&pdev->dev, PTR_ERR(rtc->base), "Missing reg\n"); + + alarm_irq = platform_get_irq(pdev, 0); + if (alarm_irq < 0) + return alarm_irq; + + rtc->rtcdev = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtc->rtcdev)) + return PTR_ERR(rtc->rtcdev); + + rtc->rtcdev->range_min = RTC_TIMESTAMP_BEGIN_2000; + rtc->rtcdev->range_max = RTC_TIMESTAMP_END_2099; + rtc->rtcdev->ops = &rzn1_rtc_ops; + set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtcdev->features); + clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtcdev->features); + + devm_pm_runtime_enable(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) + return ret; + + /* + * Ensure the clock counter is enabled. + * Set 24-hour mode and possible oscillator offset compensation in SUBU mode. + */ + writel(RZN1_RTC_CTL0_CE | RZN1_RTC_CTL0_AMPM | RZN1_RTC_CTL0_SLSB_SUBU, + rtc->base + RZN1_RTC_CTL0); + + /* Disable all interrupts */ + writel(0, rtc->base + RZN1_RTC_CTL1); + + ret = devm_request_irq(&pdev->dev, alarm_irq, rzn1_rtc_alarm_irq, 0, + dev_name(&pdev->dev), rtc); + if (ret) { + dev_err(&pdev->dev, "RTC timer interrupt not available\n"); + goto dis_runtime_pm; + } + + ret = devm_rtc_register_device(rtc->rtcdev); + if (ret) + goto dis_runtime_pm; + + return 0; + +dis_runtime_pm: + pm_runtime_put(&pdev->dev); + + return ret; +} + +static int rzn1_rtc_remove(struct platform_device *pdev) +{ + pm_runtime_put(&pdev->dev); + + return 0; +} + +static const struct of_device_id rzn1_rtc_of_match[] = { + { .compatible = "renesas,rzn1-rtc" }, + {}, +}; +MODULE_DEVICE_TABLE(of, rzn1_rtc_of_match); + +static struct platform_driver rzn1_rtc_driver = { + .probe = rzn1_rtc_probe, + .remove = rzn1_rtc_remove, + .driver = { + .name = "rzn1-rtc", + .of_match_table = rzn1_rtc_of_match, + }, +}; +module_platform_driver(rzn1_rtc_driver); + +MODULE_AUTHOR("Michel Pollet +#include +#include +#include +#include +#include +#include +#include +#include + +/* Registers */ +#define REG_K3RTC_S_CNT_LSW 0x08 +#define REG_K3RTC_S_CNT_MSW 0x0c +#define REG_K3RTC_COMP 0x10 +#define REG_K3RTC_ON_OFF_S_CNT_LSW 0x20 +#define REG_K3RTC_ON_OFF_S_CNT_MSW 0x24 +#define REG_K3RTC_SCRATCH0 0x30 +#define REG_K3RTC_SCRATCH7 0x4c +#define REG_K3RTC_GENERAL_CTL 0x50 +#define REG_K3RTC_IRQSTATUS_RAW_SYS 0x54 +#define REG_K3RTC_IRQSTATUS_SYS 0x58 +#define REG_K3RTC_IRQENABLE_SET_SYS 0x5c +#define REG_K3RTC_IRQENABLE_CLR_SYS 0x60 +#define REG_K3RTC_SYNCPEND 0x68 +#define REG_K3RTC_KICK0 0x70 +#define REG_K3RTC_KICK1 0x74 + +/* Freeze when lsw is read and unfreeze when msw is read */ +#define K3RTC_CNT_FMODE_S_CNT_VALUE (0x2 << 24) + +/* Magic values for lock/unlock */ +#define K3RTC_KICK0_UNLOCK_VALUE 0x83e70b13 +#define K3RTC_KICK1_UNLOCK_VALUE 0x95a4f1e0 + +/* Multiplier for ppb conversions */ +#define K3RTC_PPB_MULT (1000000000LL) +/* Min and max values supported with 'offset' interface (swapped sign) */ +#define K3RTC_MIN_OFFSET (-277761) +#define K3RTC_MAX_OFFSET (277778) + +/** + * struct ti_k3_rtc_soc_data - Private of compatible data for ti-k3-rtc + * @unlock_irq_erratum: Has erratum for unlock infinite IRQs (erratum i2327) + */ +struct ti_k3_rtc_soc_data { + const bool unlock_irq_erratum; +}; + +static const struct regmap_config ti_k3_rtc_regmap_config = { + .name = "peripheral-registers", + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = REG_K3RTC_KICK1, +}; + +enum ti_k3_rtc_fields { + K3RTC_KICK0, + K3RTC_KICK1, + K3RTC_S_CNT_LSW, + K3RTC_S_CNT_MSW, + K3RTC_O32K_OSC_DEP_EN, + K3RTC_UNLOCK, + K3RTC_CNT_FMODE, + K3RTC_PEND, + K3RTC_RELOAD_FROM_BBD, + K3RTC_COMP, + + K3RTC_ALM_S_CNT_LSW, + K3RTC_ALM_S_CNT_MSW, + K3RTC_IRQ_STATUS_RAW, + K3RTC_IRQ_STATUS, + K3RTC_IRQ_ENABLE_SET, + K3RTC_IRQ_ENABLE_CLR, + + K3RTC_IRQ_STATUS_ALT, + K3RTC_IRQ_ENABLE_CLR_ALT, + + K3_RTC_MAX_FIELDS +}; + +static const struct reg_field ti_rtc_reg_fields[] = { + [K3RTC_KICK0] = REG_FIELD(REG_K3RTC_KICK0, 0, 31), + [K3RTC_KICK1] = REG_FIELD(REG_K3RTC_KICK1, 0, 31), + [K3RTC_S_CNT_LSW] = REG_FIELD(REG_K3RTC_S_CNT_LSW, 0, 31), + [K3RTC_S_CNT_MSW] = REG_FIELD(REG_K3RTC_S_CNT_MSW, 0, 15), + [K3RTC_O32K_OSC_DEP_EN] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 21, 21), + [K3RTC_UNLOCK] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 23, 23), + [K3RTC_CNT_FMODE] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 24, 25), + [K3RTC_PEND] = REG_FIELD(REG_K3RTC_SYNCPEND, 0, 1), + [K3RTC_RELOAD_FROM_BBD] = REG_FIELD(REG_K3RTC_SYNCPEND, 31, 31), + [K3RTC_COMP] = REG_FIELD(REG_K3RTC_COMP, 0, 31), + + /* We use on to off as alarm trigger */ + [K3RTC_ALM_S_CNT_LSW] = REG_FIELD(REG_K3RTC_ON_OFF_S_CNT_LSW, 0, 31), + [K3RTC_ALM_S_CNT_MSW] = REG_FIELD(REG_K3RTC_ON_OFF_S_CNT_MSW, 0, 15), + [K3RTC_IRQ_STATUS_RAW] = REG_FIELD(REG_K3RTC_IRQSTATUS_RAW_SYS, 0, 0), + [K3RTC_IRQ_STATUS] = REG_FIELD(REG_K3RTC_IRQSTATUS_SYS, 0, 0), + [K3RTC_IRQ_ENABLE_SET] = REG_FIELD(REG_K3RTC_IRQENABLE_SET_SYS, 0, 0), + [K3RTC_IRQ_ENABLE_CLR] = REG_FIELD(REG_K3RTC_IRQENABLE_CLR_SYS, 0, 0), + /* Off to on is alternate */ + [K3RTC_IRQ_STATUS_ALT] = REG_FIELD(REG_K3RTC_IRQSTATUS_SYS, 1, 1), + [K3RTC_IRQ_ENABLE_CLR_ALT] = REG_FIELD(REG_K3RTC_IRQENABLE_CLR_SYS, 1, 1), +}; + +/** + * struct ti_k3_rtc - Private data for ti-k3-rtc + * @irq: IRQ + * @sync_timeout_us: data sync timeout period in uSec + * @rate_32k: 32k clock rate in Hz + * @rtc_dev: rtc device + * @regmap: rtc mmio regmap + * @r_fields: rtc register fields + * @soc: SoC compatible match data + */ +struct ti_k3_rtc { + unsigned int irq; + u32 sync_timeout_us; + unsigned long rate_32k; + struct rtc_device *rtc_dev; + struct regmap *regmap; + struct regmap_field *r_fields[K3_RTC_MAX_FIELDS]; + const struct ti_k3_rtc_soc_data *soc; +}; + +static int k3rtc_field_read(struct ti_k3_rtc *priv, enum ti_k3_rtc_fields f) +{ + int ret; + int val; + + ret = regmap_field_read(priv->r_fields[f], &val); + /* + * We shouldn't be seeing regmap fail on us for mmio reads + * This is possible if clock context fails, but that isn't the case for us + */ + if (WARN_ON_ONCE(ret)) + return ret; + return val; +} + +static void k3rtc_field_write(struct ti_k3_rtc *priv, enum ti_k3_rtc_fields f, u32 val) +{ + regmap_field_write(priv->r_fields[f], val); +} + +/** + * k3rtc_fence - Ensure a register sync took place between the two domains + * @priv: pointer to priv data + * + * Return: 0 if the sync took place, else returns -ETIMEDOUT + */ +static int k3rtc_fence(struct ti_k3_rtc *priv) +{ + int ret; + + ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_PEND], ret, + !ret, 2, priv->sync_timeout_us); + + return ret; +} + +static inline int k3rtc_check_unlocked(struct ti_k3_rtc *priv) +{ + int ret; + + ret = k3rtc_field_read(priv, K3RTC_UNLOCK); + if (ret < 0) + return ret; + + return (ret) ? 0 : 1; +} + +static int k3rtc_unlock_rtc(struct ti_k3_rtc *priv) +{ + int ret; + + ret = k3rtc_check_unlocked(priv); + if (!ret) + return ret; + + k3rtc_field_write(priv, K3RTC_KICK0, K3RTC_KICK0_UNLOCK_VALUE); + k3rtc_field_write(priv, K3RTC_KICK1, K3RTC_KICK1_UNLOCK_VALUE); + + /* Skip fence since we are going to check the unlock bit as fence */ + ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_UNLOCK], ret, + !ret, 2, priv->sync_timeout_us); + + return ret; +} + +static int k3rtc_configure(struct device *dev) +{ + int ret; + struct ti_k3_rtc *priv = dev_get_drvdata(dev); + + /* + * HWBUG: The compare state machine is broken if the RTC module + * is NOT unlocked in under one second of boot - which is pretty long + * time from the perspective of Linux driver (module load, u-boot + * shell all can take much longer than this. + * + * In such occurrence, it is assumed that the RTC module is unusable + */ + if (priv->soc->unlock_irq_erratum) { + ret = k3rtc_check_unlocked(priv); + /* If there is an error OR if we are locked, return error */ + if (ret) { + dev_err(dev, + HW_ERR "Erratum i2327 unlock QUIRK! Cannot operate!!\n"); + return -EFAULT; + } + } else { + /* May need to explicitly unlock first time */ + ret = k3rtc_unlock_rtc(priv); + if (ret) { + dev_err(dev, "Failed to unlock(%d)!\n", ret); + return ret; + } + } + + /* Enable Shadow register sync on 32k clock boundary */ + k3rtc_field_write(priv, K3RTC_O32K_OSC_DEP_EN, 0x1); + + /* + * Wait at least clock sync time before proceeding further programming. + * This ensures that the 32k based sync is active. + */ + usleep_range(priv->sync_timeout_us, priv->sync_timeout_us + 5); + + /* We need to ensure fence here to make sure sync here */ + ret = k3rtc_fence(priv); + if (ret) { + dev_err(dev, + "Failed fence osc_dep enable(%d) - is 32k clk working?!\n", ret); + return ret; + } + + /* + * FMODE setting: Reading lower seconds will freeze value on higher + * seconds. This also implies that we must *ALWAYS* read lower seconds + * prior to reading higher seconds + */ + k3rtc_field_write(priv, K3RTC_CNT_FMODE, K3RTC_CNT_FMODE_S_CNT_VALUE); + + /* Clear any spurious IRQ sources if any */ + k3rtc_field_write(priv, K3RTC_IRQ_STATUS_ALT, 0x1); + k3rtc_field_write(priv, K3RTC_IRQ_STATUS, 0x1); + /* Disable all IRQs */ + k3rtc_field_write(priv, K3RTC_IRQ_ENABLE_CLR_ALT, 0x1); + k3rtc_field_write(priv, K3RTC_IRQ_ENABLE_CLR, 0x1); + + /* And.. Let us Sync the writes in */ + return k3rtc_fence(priv); +} + +static int ti_k3_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct ti_k3_rtc *priv = dev_get_drvdata(dev); + u32 seconds_lo, seconds_hi; + + seconds_lo = k3rtc_field_read(priv, K3RTC_S_CNT_LSW); + seconds_hi = k3rtc_field_read(priv, K3RTC_S_CNT_MSW); + + rtc_time64_to_tm((((time64_t)seconds_hi) << 32) | (time64_t)seconds_lo, tm); + + return 0; +} + +static int ti_k3_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct ti_k3_rtc *priv = dev_get_drvdata(dev); + time64_t seconds; + + seconds = rtc_tm_to_time64(tm); + + /* + * Read operation on LSW will freeze the RTC, so to update + * the time, we cannot use field operations. Just write since the + * reserved bits are ignored. + */ + regmap_write(priv->regmap, REG_K3RTC_S_CNT_LSW, seconds); + regmap_write(priv->regmap, REG_K3RTC_S_CNT_MSW, seconds >> 32); + + return k3rtc_fence(priv); +} + +static int ti_k3_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct ti_k3_rtc *priv = dev_get_drvdata(dev); + u32 reg; + u32 offset = enabled ? K3RTC_IRQ_ENABLE_SET : K3RTC_IRQ_ENABLE_CLR; + + reg = k3rtc_field_read(priv, K3RTC_IRQ_ENABLE_SET); + if ((enabled && reg) || (!enabled && !reg)) + return 0; + + k3rtc_field_write(priv, offset, 0x1); + + /* + * Ensure the write sync is through - NOTE: it should be OK to have + * ISR to fire as we are checking sync (which should be done in a 32k + * cycle or so). + */ + return k3rtc_fence(priv); +} + +static int ti_k3_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct ti_k3_rtc *priv = dev_get_drvdata(dev); + u32 seconds_lo, seconds_hi; + + seconds_lo = k3rtc_field_read(priv, K3RTC_ALM_S_CNT_LSW); + seconds_hi = k3rtc_field_read(priv, K3RTC_ALM_S_CNT_MSW); + + rtc_time64_to_tm((((time64_t)seconds_hi) << 32) | (time64_t)seconds_lo, &alarm->time); + + alarm->enabled = k3rtc_field_read(priv, K3RTC_IRQ_ENABLE_SET); + + return 0; +} + +static int ti_k3_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct ti_k3_rtc *priv = dev_get_drvdata(dev); + time64_t seconds; + int ret; + + seconds = rtc_tm_to_time64(&alarm->time); + + k3rtc_field_write(priv, K3RTC_ALM_S_CNT_LSW, seconds); + k3rtc_field_write(priv, K3RTC_ALM_S_CNT_MSW, (seconds >> 32)); + + /* Make sure the alarm time is synced in */ + ret = k3rtc_fence(priv); + if (ret) { + dev_err(dev, "Failed to fence(%d)! Potential config issue?\n", ret); + return ret; + } + + /* Alarm IRQ enable will do a sync */ + return ti_k3_rtc_alarm_irq_enable(dev, alarm->enabled); +} + +static int ti_k3_rtc_read_offset(struct device *dev, long *offset) +{ + struct ti_k3_rtc *priv = dev_get_drvdata(dev); + u32 ticks_per_hr = priv->rate_32k * 3600; + int comp; + s64 tmp; + + comp = k3rtc_field_read(priv, K3RTC_COMP); + + /* Convert from RTC calibration register format to ppb format */ + tmp = comp * (s64)K3RTC_PPB_MULT; + if (tmp < 0) + tmp -= ticks_per_hr / 2LL; + else + tmp += ticks_per_hr / 2LL; + tmp = div_s64(tmp, ticks_per_hr); + + /* Offset value operates in negative way, so swap sign */ + *offset = (long)-tmp; + + return 0; +} + +static int ti_k3_rtc_set_offset(struct device *dev, long offset) +{ + struct ti_k3_rtc *priv = dev_get_drvdata(dev); + u32 ticks_per_hr = priv->rate_32k * 3600; + int comp; + s64 tmp; + + /* Make sure offset value is within supported range */ + if (offset < K3RTC_MIN_OFFSET || offset > K3RTC_MAX_OFFSET) + return -ERANGE; + + /* Convert from ppb format to RTC calibration register format */ + tmp = offset * (s64)ticks_per_hr; + if (tmp < 0) + tmp -= K3RTC_PPB_MULT / 2LL; + else + tmp += K3RTC_PPB_MULT / 2LL; + tmp = div_s64(tmp, K3RTC_PPB_MULT); + + /* Offset value operates in negative way, so swap sign */ + comp = (int)-tmp; + + k3rtc_field_write(priv, K3RTC_COMP, comp); + + return k3rtc_fence(priv); +} + +static irqreturn_t ti_k3_rtc_interrupt(s32 irq, void *dev_id) +{ + struct device *dev = dev_id; + struct ti_k3_rtc *priv = dev_get_drvdata(dev); + u32 reg; + int ret; + + /* + * IRQ assertion can be very fast, however, the IRQ Status clear + * de-assert depends on 32k clock edge in the 32k domain + * If we clear the status prior to the first 32k clock edge, + * the status bit is cleared, but the IRQ stays re-asserted. + * + * To prevent this condition, we need to wait for clock sync time. + * We can either do that by polling the 32k observability signal for + * a toggle OR we could just sleep and let the processor do other + * stuff. + */ + usleep_range(priv->sync_timeout_us, priv->sync_timeout_us + 2); + + /* Lets make sure that this is a valid interrupt */ + reg = k3rtc_field_read(priv, K3RTC_IRQ_STATUS); + + if (!reg) { + u32 raw = k3rtc_field_read(priv, K3RTC_IRQ_STATUS_RAW); + + dev_err(dev, + HW_ERR + "Erratum i2327/IRQ trig: status: 0x%08x / 0x%08x\n", reg, raw); + return IRQ_NONE; + } + + /* + * Write 1 to clear status reg + * We cannot use a field operation here due to a potential race between + * 32k domain and vbus domain. + */ + regmap_write(priv->regmap, REG_K3RTC_IRQSTATUS_SYS, 0x1); + + /* Sync the write in */ + ret = k3rtc_fence(priv); + if (ret) { + dev_err(dev, "Failed to fence irq status clr(%d)!\n", ret); + return IRQ_NONE; + } + + /* + * Force the 32k status to be reloaded back in to ensure status is + * reflected back correctly. + */ + k3rtc_field_write(priv, K3RTC_RELOAD_FROM_BBD, 0x1); + + /* Ensure the write sync is through */ + ret = k3rtc_fence(priv); + if (ret) { + dev_err(dev, "Failed to fence reload from bbd(%d)!\n", ret); + return IRQ_NONE; + } + + /* Now we ensure that the status bit is cleared */ + ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_IRQ_STATUS], + ret, !ret, 2, priv->sync_timeout_us); + if (ret) { + dev_err(dev, "Time out waiting for status clear\n"); + return IRQ_NONE; + } + + /* Notify RTC core on event */ + rtc_update_irq(priv->rtc_dev, 1, RTC_IRQF | RTC_AF); + + return IRQ_HANDLED; +} + +static const struct rtc_class_ops ti_k3_rtc_ops = { + .read_time = ti_k3_rtc_read_time, + .set_time = ti_k3_rtc_set_time, + .read_alarm = ti_k3_rtc_read_alarm, + .set_alarm = ti_k3_rtc_set_alarm, + .read_offset = ti_k3_rtc_read_offset, + .set_offset = ti_k3_rtc_set_offset, + .alarm_irq_enable = ti_k3_rtc_alarm_irq_enable, +}; + +static int ti_k3_rtc_scratch_read(void *priv_data, unsigned int offset, + void *val, size_t bytes) +{ + struct ti_k3_rtc *priv = (struct ti_k3_rtc *)priv_data; + + return regmap_bulk_read(priv->regmap, REG_K3RTC_SCRATCH0 + offset, val, bytes / 4); +} + +static int ti_k3_rtc_scratch_write(void *priv_data, unsigned int offset, + void *val, size_t bytes) +{ + struct ti_k3_rtc *priv = (struct ti_k3_rtc *)priv_data; + int ret; + + ret = regmap_bulk_write(priv->regmap, REG_K3RTC_SCRATCH0 + offset, val, bytes / 4); + if (ret) + return ret; + + return k3rtc_fence(priv); +} + +static struct nvmem_config ti_k3_rtc_nvmem_config = { + .name = "ti_k3_rtc_scratch", + .word_size = 4, + .stride = 4, + .size = REG_K3RTC_SCRATCH7 - REG_K3RTC_SCRATCH0 + 4, + .reg_read = ti_k3_rtc_scratch_read, + .reg_write = ti_k3_rtc_scratch_write, +}; + +static int k3rtc_get_32kclk(struct device *dev, struct ti_k3_rtc *priv) +{ + int ret; + struct clk *clk; + + clk = devm_clk_get(dev, "osc32k"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + ret = clk_prepare_enable(clk); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk); + if (ret) + return ret; + + priv->rate_32k = clk_get_rate(clk); + + /* Make sure we are exact 32k clock. Else, try to compensate delay */ + if (priv->rate_32k != 32768) + dev_warn(dev, "Clock rate %ld is not 32768! Could misbehave!\n", + priv->rate_32k); + + /* + * Sync timeout should be two 32k clk sync cycles = ~61uS. We double + * it to comprehend intermediate bus segment and cpu frequency + * deltas + */ + priv->sync_timeout_us = (u32)(DIV_ROUND_UP_ULL(1000000, priv->rate_32k) * 4); + + return ret; +} + +static int k3rtc_get_vbusclk(struct device *dev, struct ti_k3_rtc *priv) +{ + int ret; + struct clk *clk; + + /* Note: VBUS isn't a context clock, it is needed for hardware operation */ + clk = devm_clk_get(dev, "vbus"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + ret = clk_prepare_enable(clk); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk); +} + +static int ti_k3_rtc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ti_k3_rtc *priv; + void __iomem *rtc_base; + int ret; + + priv = devm_kzalloc(dev, sizeof(struct ti_k3_rtc), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + rtc_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(rtc_base)) + return PTR_ERR(rtc_base); + + priv->regmap = devm_regmap_init_mmio(dev, rtc_base, &ti_k3_rtc_regmap_config); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); + + ret = devm_regmap_field_bulk_alloc(dev, priv->regmap, priv->r_fields, + ti_rtc_reg_fields, K3_RTC_MAX_FIELDS); + if (ret) + return ret; + + ret = k3rtc_get_32kclk(dev, priv); + if (ret) + return ret; + ret = k3rtc_get_vbusclk(dev, priv); + if (ret) + return ret; + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + priv->irq = (unsigned int)ret; + + priv->rtc_dev = devm_rtc_allocate_device(dev); + if (IS_ERR(priv->rtc_dev)) + return PTR_ERR(priv->rtc_dev); + + priv->soc = of_device_get_match_data(dev); + + priv->rtc_dev->ops = &ti_k3_rtc_ops; + priv->rtc_dev->range_max = (1ULL << 48) - 1; /* 48Bit seconds */ + ti_k3_rtc_nvmem_config.priv = priv; + + ret = devm_request_threaded_irq(dev, priv->irq, NULL, + ti_k3_rtc_interrupt, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + dev_name(dev), dev); + if (ret) { + dev_err(dev, "Could not request IRQ: %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, priv); + + ret = k3rtc_configure(dev); + if (ret) + return ret; + + if (device_property_present(dev, "wakeup-source")) + device_init_wakeup(dev, true); + else + device_set_wakeup_capable(dev, true); + + ret = devm_rtc_register_device(priv->rtc_dev); + if (ret) + return ret; + + return devm_rtc_nvmem_register(priv->rtc_dev, &ti_k3_rtc_nvmem_config); +} + +static const struct ti_k3_rtc_soc_data ti_k3_am62_data = { + .unlock_irq_erratum = true, +}; + +static const struct of_device_id ti_k3_rtc_of_match_table[] = { + {.compatible = "ti,am62-rtc", .data = &ti_k3_am62_data}, + {} +}; +MODULE_DEVICE_TABLE(of, ti_k3_rtc_of_match_table); + +static int __maybe_unused ti_k3_rtc_suspend(struct device *dev) +{ + struct ti_k3_rtc *priv = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + enable_irq_wake(priv->irq); + return 0; +} + +static int __maybe_unused ti_k3_rtc_resume(struct device *dev) +{ + struct ti_k3_rtc *priv = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + disable_irq_wake(priv->irq); + return 0; +} + +static SIMPLE_DEV_PM_OPS(ti_k3_rtc_pm_ops, ti_k3_rtc_suspend, ti_k3_rtc_resume); + +static struct platform_driver ti_k3_rtc_driver = { + .probe = ti_k3_rtc_probe, + .driver = { + .name = "rtc-ti-k3", + .of_match_table = ti_k3_rtc_of_match_table, + .pm = &ti_k3_rtc_pm_ops, + }, +}; +module_platform_driver(ti_k3_rtc_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TI K3 RTC driver"); +MODULE_AUTHOR("Nishanth Menon"); diff --git a/drivers/s390/char/uvdevice.c b/drivers/s390/char/uvdevice.c new file mode 100644 index 0000000000..1d40457c7b --- /dev/null +++ b/drivers/s390/char/uvdevice.c @@ -0,0 +1,256 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2022 + * Author(s): Steffen Eiden + * + * This file provides a Linux misc device to give userspace access to some + * Ultravisor (UV) functions. The device only accepts IOCTLs and will only + * be present if the Ultravisor facility (158) is present. + * + * When userspace sends a valid IOCTL uvdevice will copy the input data to + * kernel space, do some basic validity checks to avoid kernel/system + * corruption. Any other check that the Ultravisor does will not be done by + * the uvdevice to keep changes minimal when adding new functionalities + * to existing UV-calls. + * After the checks uvdevice builds a corresponding + * Ultravisor Call Control Block, and sends the request to the Ultravisor. + * Then, it copies the response, including the return codes, back to userspace. + * It is the responsibility of the userspace to check for any error issued + * by UV and to interpret the UV response. The uvdevice acts as a communication + * channel for userspace to the Ultravisor. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static int uvio_build_uvcb_attest(struct uv_cb_attest *uvcb_attest, u8 *arcb, + u8 *meas, u8 *add_data, struct uvio_attest *uvio_attest) +{ + void __user *user_buf_arcb = (void __user *)uvio_attest->arcb_addr; + + if (copy_from_user(arcb, user_buf_arcb, uvio_attest->arcb_len)) + return -EFAULT; + + uvcb_attest->header.len = sizeof(*uvcb_attest); + uvcb_attest->header.cmd = UVC_CMD_RETR_ATTEST; + uvcb_attest->arcb_addr = (u64)arcb; + uvcb_attest->cont_token = 0; + uvcb_attest->user_data_len = uvio_attest->user_data_len; + memcpy(uvcb_attest->user_data, uvio_attest->user_data, sizeof(uvcb_attest->user_data)); + uvcb_attest->meas_len = uvio_attest->meas_len; + uvcb_attest->meas_addr = (u64)meas; + uvcb_attest->add_data_len = uvio_attest->add_data_len; + uvcb_attest->add_data_addr = (u64)add_data; + + return 0; +} + +static int uvio_copy_attest_result_to_user(struct uv_cb_attest *uvcb_attest, + struct uvio_ioctl_cb *uv_ioctl, + u8 *measurement, u8 *add_data, + struct uvio_attest *uvio_attest) +{ + struct uvio_attest __user *user_uvio_attest = (void __user *)uv_ioctl->argument_addr; + void __user *user_buf_add = (void __user *)uvio_attest->add_data_addr; + void __user *user_buf_meas = (void __user *)uvio_attest->meas_addr; + void __user *user_buf_uid = &user_uvio_attest->config_uid; + + if (copy_to_user(user_buf_meas, measurement, uvio_attest->meas_len)) + return -EFAULT; + if (add_data && copy_to_user(user_buf_add, add_data, uvio_attest->add_data_len)) + return -EFAULT; + if (copy_to_user(user_buf_uid, uvcb_attest->config_uid, sizeof(uvcb_attest->config_uid))) + return -EFAULT; + return 0; +} + +static int get_uvio_attest(struct uvio_ioctl_cb *uv_ioctl, struct uvio_attest *uvio_attest) +{ + u8 __user *user_arg_buf = (u8 __user *)uv_ioctl->argument_addr; + + if (copy_from_user(uvio_attest, user_arg_buf, sizeof(*uvio_attest))) + return -EFAULT; + + if (uvio_attest->arcb_len > UVIO_ATT_ARCB_MAX_LEN) + return -EINVAL; + if (uvio_attest->arcb_len == 0) + return -EINVAL; + if (uvio_attest->meas_len > UVIO_ATT_MEASUREMENT_MAX_LEN) + return -EINVAL; + if (uvio_attest->meas_len == 0) + return -EINVAL; + if (uvio_attest->add_data_len > UVIO_ATT_ADDITIONAL_MAX_LEN) + return -EINVAL; + if (uvio_attest->reserved136) + return -EINVAL; + return 0; +} + +/** + * uvio_attestation() - Perform a Retrieve Attestation Measurement UVC. + * + * @uv_ioctl: ioctl control block + * + * uvio_attestation() does a Retrieve Attestation Measurement Ultravisor Call. + * It verifies that the given userspace addresses are valid and request sizes + * are sane. Every other check is made by the Ultravisor (UV) and won't result + * in a negative return value. It copies the input to kernelspace, builds the + * request, sends the UV-call, and copies the result to userspace. + * + * The Attestation Request has two input and two outputs. + * ARCB and User Data are inputs for the UV generated by userspace. + * Measurement and Additional Data are outputs for userspace generated by UV. + * + * The Attestation Request Control Block (ARCB) is a cryptographically verified + * and secured request to UV and User Data is some plaintext data which is + * going to be included in the Attestation Measurement calculation. + * + * Measurement is a cryptographic measurement of the callers properties, + * optional data configured by the ARCB and the user data. If specified by the + * ARCB, UV will add some Additional Data to the measurement calculation. + * This Additional Data is then returned as well. + * + * If the Retrieve Attestation Measurement UV facility is not present, + * UV will return invalid command rc. This won't be fenced in the driver + * and does not result in a negative return value. + * + * Context: might sleep + * + * Return: 0 on success or a negative error code on error. + */ +static int uvio_attestation(struct uvio_ioctl_cb *uv_ioctl) +{ + struct uv_cb_attest *uvcb_attest = NULL; + struct uvio_attest *uvio_attest = NULL; + u8 *measurement = NULL; + u8 *add_data = NULL; + u8 *arcb = NULL; + int ret; + + ret = -EINVAL; + if (uv_ioctl->argument_len != sizeof(*uvio_attest)) + goto out; + + ret = -ENOMEM; + uvio_attest = kzalloc(sizeof(*uvio_attest), GFP_KERNEL); + if (!uvio_attest) + goto out; + + ret = get_uvio_attest(uv_ioctl, uvio_attest); + if (ret) + goto out; + + ret = -ENOMEM; + arcb = kvzalloc(uvio_attest->arcb_len, GFP_KERNEL); + measurement = kvzalloc(uvio_attest->meas_len, GFP_KERNEL); + if (!arcb || !measurement) + goto out; + + if (uvio_attest->add_data_len) { + add_data = kvzalloc(uvio_attest->add_data_len, GFP_KERNEL); + if (!add_data) + goto out; + } + + uvcb_attest = kzalloc(sizeof(*uvcb_attest), GFP_KERNEL); + if (!uvcb_attest) + goto out; + + ret = uvio_build_uvcb_attest(uvcb_attest, arcb, measurement, add_data, uvio_attest); + if (ret) + goto out; + + uv_call_sched(0, (u64)uvcb_attest); + + uv_ioctl->uv_rc = uvcb_attest->header.rc; + uv_ioctl->uv_rrc = uvcb_attest->header.rrc; + + ret = uvio_copy_attest_result_to_user(uvcb_attest, uv_ioctl, measurement, add_data, + uvio_attest); +out: + kvfree(arcb); + kvfree(measurement); + kvfree(add_data); + kfree(uvio_attest); + kfree(uvcb_attest); + return ret; +} + +static int uvio_copy_and_check_ioctl(struct uvio_ioctl_cb *ioctl, void __user *argp) +{ + if (copy_from_user(ioctl, argp, sizeof(*ioctl))) + return -EFAULT; + if (ioctl->flags != 0) + return -EINVAL; + if (memchr_inv(ioctl->reserved14, 0, sizeof(ioctl->reserved14))) + return -EINVAL; + + return 0; +} + +/* + * IOCTL entry point for the Ultravisor device. + */ +static long uvio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct uvio_ioctl_cb uv_ioctl = { }; + long ret; + + switch (cmd) { + case UVIO_IOCTL_ATT: + ret = uvio_copy_and_check_ioctl(&uv_ioctl, argp); + if (ret) + return ret; + ret = uvio_attestation(&uv_ioctl); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + if (ret) + return ret; + + if (copy_to_user(argp, &uv_ioctl, sizeof(uv_ioctl))) + ret = -EFAULT; + + return ret; +} + +static const struct file_operations uvio_dev_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = uvio_ioctl, + .llseek = no_llseek, +}; + +static struct miscdevice uvio_dev_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = UVIO_DEVICE_NAME, + .fops = &uvio_dev_fops, +}; + +static void __exit uvio_dev_exit(void) +{ + misc_deregister(&uvio_dev_miscdev); +} + +static int __init uvio_dev_init(void) +{ + return misc_register(&uvio_dev_miscdev); +} + +module_cpu_feature_match(S390_CPU_FEATURE_UV, uvio_dev_init); +module_exit(uvio_dev_exit); + +MODULE_AUTHOR("IBM Corporation"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Ultravisor UAPI driver"); diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c new file mode 100644 index 0000000000..f64ced04b9 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_vmid.c @@ -0,0 +1,288 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include + +#include + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_crtn.h" + + +/* + * lpfc_get_vmid_from_hashtable - search the UUID in the hash table + * @vport: The virtual port for which this call is being executed. + * @hash: calculated hash value + * @buf: uuid associated with the VE + * Return the VMID entry associated with the UUID + * Make sure to acquire the appropriate lock before invoking this routine. + */ +struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport, + u32 hash, u8 *buf) +{ + struct lpfc_vmid *vmp; + + hash_for_each_possible(vport->hash_table, vmp, hnode, hash) { + if (memcmp(&vmp->host_vmid[0], buf, 16) == 0) + return vmp; + } + return NULL; +} + +/* + * lpfc_put_vmid_in_hashtable - put the VMID in the hash table + * @vport: The virtual port for which this call is being executed. + * @hash - calculated hash value + * @vmp: Pointer to a VMID entry representing a VM sending I/O + * + * This routine will insert the newly acquired VMID entity in the hash table. + * Make sure to acquire the appropriate lock before invoking this routine. + */ +static void +lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash, + struct lpfc_vmid *vmp) +{ + hash_add(vport->hash_table, &vmp->hnode, hash); +} + +/* + * lpfc_vmid_hash_fn - create a hash value of the UUID + * @vmid: uuid associated with the VE + * @len: length of the VMID string + * Returns the calculated hash value + */ +int lpfc_vmid_hash_fn(const char *vmid, int len) +{ + int c; + int hash = 0; + + if (len == 0) + return 0; + while (len--) { + c = *vmid++; + if (c >= 'A' && c <= 'Z') + c += 'a' - 'A'; + + hash = (hash + (c << LPFC_VMID_HASH_SHIFT) + + (c >> LPFC_VMID_HASH_SHIFT)) * 19; + } + + return hash & LPFC_VMID_HASH_MASK; +} + +/* + * lpfc_vmid_update_entry - update the vmid entry in the hash table + * @vport: The virtual port for which this call is being executed. + * @iodir: io direction + * @vmp: Pointer to a VMID entry representing a VM sending I/O + * @tag: VMID tag + */ +static void lpfc_vmid_update_entry(struct lpfc_vport *vport, + enum dma_data_direction iodir, + struct lpfc_vmid *vmp, + union lpfc_vmid_io_tag *tag) +{ + u64 *lta; + + if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) + tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid; + else if (vport->phba->cfg_vmid_app_header) + tag->app_id = vmp->un.app_id; + + if (iodir == DMA_TO_DEVICE) + vmp->io_wr_cnt++; + else if (iodir == DMA_FROM_DEVICE) + vmp->io_rd_cnt++; + + /* update the last access timestamp in the table */ + lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id()); + *lta = jiffies; +} + +static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport, + struct lpfc_vmid *vmid) +{ + u32 hash; + struct lpfc_vmid *pvmid; + + if (vport->port_type == LPFC_PHYSICAL_PORT) { + vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport); + } else { + hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len); + pvmid = + lpfc_get_vmid_from_hashtable(vport->phba->pport, hash, + vmid->host_vmid); + if (pvmid) + vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid; + else + vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport); + } +} + +/* + * lpfc_vmid_get_appid - get the VMID associated with the UUID + * @vport: The virtual port for which this call is being executed. + * @uuid: UUID associated with the VE + * @cmd: address of scsi_cmd descriptor + * @iodir: io direction + * @tag: VMID tag + * Returns status of the function + */ +int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, + enum dma_data_direction iodir, + union lpfc_vmid_io_tag *tag) +{ + struct lpfc_vmid *vmp = NULL; + int hash, len, rc = -EPERM, i; + + /* check if QFPA is complete */ + if (lpfc_vmid_is_type_priority_tag(vport) && + !(vport->vmid_flag & LPFC_VMID_QFPA_CMPL) && + (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA)) { + vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; + return -EAGAIN; + } + + /* search if the UUID has already been mapped to the VMID */ + len = strlen(uuid); + hash = lpfc_vmid_hash_fn(uuid, len); + + /* search for the VMID in the table */ + read_lock(&vport->vmid_lock); + vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid); + + /* if found, check if its already registered */ + if (vmp && vmp->flag & LPFC_VMID_REGISTERED) { + read_unlock(&vport->vmid_lock); + lpfc_vmid_update_entry(vport, iodir, vmp, tag); + rc = 0; + } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER || + vmp->flag & LPFC_VMID_DE_REGISTER)) { + /* else if register or dereg request has already been sent */ + /* Hence VMID tag will not be added for this I/O */ + read_unlock(&vport->vmid_lock); + rc = -EBUSY; + } else { + /* The VMID was not found in the hashtable. At this point, */ + /* drop the read lock first before proceeding further */ + read_unlock(&vport->vmid_lock); + /* start the process to obtain one as per the */ + /* type of the VMID indicated */ + write_lock(&vport->vmid_lock); + vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid); + + /* while the read lock was released, in case the entry was */ + /* added by other context or is in process of being added */ + if (vmp && vmp->flag & LPFC_VMID_REGISTERED) { + lpfc_vmid_update_entry(vport, iodir, vmp, tag); + write_unlock(&vport->vmid_lock); + return 0; + } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) { + write_unlock(&vport->vmid_lock); + return -EBUSY; + } + + /* else search and allocate a free slot in the hash table */ + if (vport->cur_vmid_cnt < vport->max_vmid) { + for (i = 0; i < vport->max_vmid; i++) { + vmp = vport->vmid + i; + if (vmp->flag == LPFC_VMID_SLOT_FREE) + break; + } + if (i == vport->max_vmid) + vmp = NULL; + } else { + vmp = NULL; + } + + if (!vmp) { + write_unlock(&vport->vmid_lock); + return -ENOMEM; + } + + /* Add the vmid and register */ + lpfc_put_vmid_in_hashtable(vport, hash, vmp); + vmp->vmid_len = len; + memcpy(vmp->host_vmid, uuid, vmp->vmid_len); + vmp->io_rd_cnt = 0; + vmp->io_wr_cnt = 0; + vmp->flag = LPFC_VMID_SLOT_USED; + + vmp->delete_inactive = + vport->vmid_inactivity_timeout ? 1 : 0; + + /* if type priority tag, get next available VMID */ + if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) + lpfc_vmid_assign_cs_ctl(vport, vmp); + + /* allocate the per cpu variable for holding */ + /* the last access time stamp only if VMID is enabled */ + if (!vmp->last_io_time) + vmp->last_io_time = __alloc_percpu(sizeof(u64), + __alignof__(struct + lpfc_vmid)); + if (!vmp->last_io_time) { + hash_del(&vmp->hnode); + vmp->flag = LPFC_VMID_SLOT_FREE; + write_unlock(&vport->vmid_lock); + return -EIO; + } + + write_unlock(&vport->vmid_lock); + + /* complete transaction with switch */ + if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) + rc = lpfc_vmid_uvem(vport, vmp, true); + else if (vport->phba->cfg_vmid_app_header) + rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp); + if (!rc) { + write_lock(&vport->vmid_lock); + vport->cur_vmid_cnt++; + vmp->flag |= LPFC_VMID_REQ_REGISTER; + write_unlock(&vport->vmid_lock); + } else { + write_lock(&vport->vmid_lock); + hash_del(&vmp->hnode); + vmp->flag = LPFC_VMID_SLOT_FREE; + free_percpu(vmp->last_io_time); + write_unlock(&vport->vmid_lock); + return -EIO; + } + + /* finally, enable the idle timer once */ + if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) { + mod_timer(&vport->phba->inactive_vmid_poll, + jiffies + + msecs_to_jiffies(1000 * LPFC_VMID_TIMER)); + vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD; + } + } + return rc; +} diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c new file mode 100644 index 0000000000..9baac224b2 --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi3mr_app.c @@ -0,0 +1,1860 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for Broadcom MPI3 Storage Controllers + * + * Copyright (C) 2017-2022 Broadcom Inc. + * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) + * + */ + +#include "mpi3mr.h" +#include +#include + +/** + * mpi3mr_bsg_pel_abort - sends PEL abort request + * @mrioc: Adapter instance reference + * + * This function sends PEL abort request to the firmware through + * admin request queue. + * + * Return: 0 on success, -1 on failure + */ +static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc *mrioc) +{ + struct mpi3_pel_req_action_abort pel_abort_req; + struct mpi3_pel_reply *pel_reply; + int retval = 0; + u16 pe_log_status; + + if (mrioc->reset_in_progress) { + dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); + return -1; + } + if (mrioc->stop_bsgs) { + dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); + return -1; + } + + memset(&pel_abort_req, 0, sizeof(pel_abort_req)); + mutex_lock(&mrioc->pel_abort_cmd.mutex); + if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) { + dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); + mutex_unlock(&mrioc->pel_abort_cmd.mutex); + return -1; + } + mrioc->pel_abort_cmd.state = MPI3MR_CMD_PENDING; + mrioc->pel_abort_cmd.is_waiting = 1; + mrioc->pel_abort_cmd.callback = NULL; + pel_abort_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_ABORT); + pel_abort_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; + pel_abort_req.action = MPI3_PEL_ACTION_ABORT; + pel_abort_req.abort_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); + + mrioc->pel_abort_requested = 1; + init_completion(&mrioc->pel_abort_cmd.done); + retval = mpi3mr_admin_request_post(mrioc, &pel_abort_req, + sizeof(pel_abort_req), 0); + if (retval) { + retval = -1; + dprint_bsg_err(mrioc, "%s: admin request post failed\n", + __func__); + mrioc->pel_abort_requested = 0; + goto out_unlock; + } + + wait_for_completion_timeout(&mrioc->pel_abort_cmd.done, + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); + if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) { + mrioc->pel_abort_cmd.is_waiting = 0; + dprint_bsg_err(mrioc, "%s: command timedout\n", __func__); + if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_RESET)) + mpi3mr_soft_reset_handler(mrioc, + MPI3MR_RESET_FROM_PELABORT_TIMEOUT, 1); + retval = -1; + goto out_unlock; + } + if ((mrioc->pel_abort_cmd.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + != MPI3_IOCSTATUS_SUCCESS) { + dprint_bsg_err(mrioc, + "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n", + __func__, (mrioc->pel_abort_cmd.ioc_status & + MPI3_IOCSTATUS_STATUS_MASK), + mrioc->pel_abort_cmd.ioc_loginfo); + retval = -1; + goto out_unlock; + } + if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_REPLY_VALID) { + pel_reply = (struct mpi3_pel_reply *)mrioc->pel_abort_cmd.reply; + pe_log_status = le16_to_cpu(pel_reply->pe_log_status); + if (pe_log_status != MPI3_PEL_STATUS_SUCCESS) { + dprint_bsg_err(mrioc, + "%s: command failed, pel_status(0x%04x)\n", + __func__, pe_log_status); + retval = -1; + } + } + +out_unlock: + mrioc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->pel_abort_cmd.mutex); + return retval; +} +/** + * mpi3mr_bsg_verify_adapter - verify adapter number is valid + * @ioc_number: Adapter number + * + * This function returns the adapter instance pointer of given + * adapter number. If adapter number does not match with the + * driver's adapter list, driver returns NULL. + * + * Return: adapter instance reference + */ +static struct mpi3mr_ioc *mpi3mr_bsg_verify_adapter(int ioc_number) +{ + struct mpi3mr_ioc *mrioc = NULL; + + spin_lock(&mrioc_list_lock); + list_for_each_entry(mrioc, &mrioc_list, list) { + if (mrioc->id == ioc_number) { + spin_unlock(&mrioc_list_lock); + return mrioc; + } + } + spin_unlock(&mrioc_list_lock); + return NULL; +} + +/** + * mpi3mr_enable_logdata - Handler for log data enable + * @mrioc: Adapter instance reference + * @job: BSG job reference + * + * This function enables log data caching in the driver if not + * already enabled and return the maximum number of log data + * entries that can be cached in the driver. + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_enable_logdata(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + struct mpi3mr_logdata_enable logdata_enable; + + if (!mrioc->logdata_buf) { + mrioc->logdata_entry_sz = + (mrioc->reply_sz - (sizeof(struct mpi3_event_notification_reply) - 4)) + + MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ; + mrioc->logdata_buf_idx = 0; + mrioc->logdata_buf = kcalloc(MPI3MR_BSG_LOGDATA_MAX_ENTRIES, + mrioc->logdata_entry_sz, GFP_KERNEL); + + if (!mrioc->logdata_buf) + return -ENOMEM; + } + + memset(&logdata_enable, 0, sizeof(logdata_enable)); + logdata_enable.max_entries = + MPI3MR_BSG_LOGDATA_MAX_ENTRIES; + if (job->request_payload.payload_len >= sizeof(logdata_enable)) { + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &logdata_enable, sizeof(logdata_enable)); + return 0; + } + + return -EINVAL; +} +/** + * mpi3mr_get_logdata - Handler for get log data + * @mrioc: Adapter instance reference + * @job: BSG job pointer + * This function copies the log data entries to the user buffer + * when log caching is enabled in the driver. + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_get_logdata(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + u16 num_entries, sz, entry_sz = mrioc->logdata_entry_sz; + + if ((!mrioc->logdata_buf) || (job->request_payload.payload_len < entry_sz)) + return -EINVAL; + + num_entries = job->request_payload.payload_len / entry_sz; + if (num_entries > MPI3MR_BSG_LOGDATA_MAX_ENTRIES) + num_entries = MPI3MR_BSG_LOGDATA_MAX_ENTRIES; + sz = num_entries * entry_sz; + + if (job->request_payload.payload_len >= sz) { + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + mrioc->logdata_buf, sz); + return 0; + } + return -EINVAL; +} + +/** + * mpi3mr_bsg_pel_enable - Handler for PEL enable driver + * @mrioc: Adapter instance reference + * @job: BSG job pointer + * + * This function is the handler for PEL enable driver. + * Validates the application given class and locale and if + * requires aborts the existing PEL wait request and/or issues + * new PEL wait request to the firmware and returns. + * + * Return: 0 on success and proper error codes on failure. + */ +static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + long rval = -EINVAL; + struct mpi3mr_bsg_out_pel_enable pel_enable; + u8 issue_pel_wait; + u8 tmp_class; + u16 tmp_locale; + + if (job->request_payload.payload_len != sizeof(pel_enable)) { + dprint_bsg_err(mrioc, "%s: invalid size argument\n", + __func__); + return rval; + } + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &pel_enable, sizeof(pel_enable)); + + if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) { + dprint_bsg_err(mrioc, "%s: out of range class %d sent\n", + __func__, pel_enable.pel_class); + rval = 0; + goto out; + } + if (!mrioc->pel_enabled) + issue_pel_wait = 1; + else { + if ((mrioc->pel_class <= pel_enable.pel_class) && + !((mrioc->pel_locale & pel_enable.pel_locale) ^ + pel_enable.pel_locale)) { + issue_pel_wait = 0; + rval = 0; + } else { + pel_enable.pel_locale |= mrioc->pel_locale; + + if (mrioc->pel_class < pel_enable.pel_class) + pel_enable.pel_class = mrioc->pel_class; + + rval = mpi3mr_bsg_pel_abort(mrioc); + if (rval) { + dprint_bsg_err(mrioc, + "%s: pel_abort failed, status(%ld)\n", + __func__, rval); + goto out; + } + issue_pel_wait = 1; + } + } + if (issue_pel_wait) { + tmp_class = mrioc->pel_class; + tmp_locale = mrioc->pel_locale; + mrioc->pel_class = pel_enable.pel_class; + mrioc->pel_locale = pel_enable.pel_locale; + mrioc->pel_enabled = 1; + rval = mpi3mr_pel_get_seqnum_post(mrioc, NULL); + if (rval) { + mrioc->pel_class = tmp_class; + mrioc->pel_locale = tmp_locale; + mrioc->pel_enabled = 0; + dprint_bsg_err(mrioc, + "%s: pel get sequence number failed, status(%ld)\n", + __func__, rval); + } + } + +out: + return rval; +} +/** + * mpi3mr_get_all_tgt_info - Get all target information + * @mrioc: Adapter instance reference + * @job: BSG job reference + * + * This function copies the driver managed target devices device + * handle, persistent ID, bus ID and taret ID to the user + * provided buffer for the specific controller. This function + * also provides the number of devices managed by the driver for + * the specific controller. + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + long rval = -EINVAL; + u16 num_devices = 0, i = 0, size; + unsigned long flags; + struct mpi3mr_tgt_dev *tgtdev; + struct mpi3mr_device_map_info *devmap_info = NULL; + struct mpi3mr_all_tgt_info *alltgt_info = NULL; + uint32_t min_entrylen = 0, kern_entrylen = 0, usr_entrylen = 0; + + if (job->request_payload.payload_len < sizeof(u32)) { + dprint_bsg_err(mrioc, "%s: invalid size argument\n", + __func__); + return rval; + } + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) + num_devices++; + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + + if ((job->request_payload.payload_len == sizeof(u32)) || + list_empty(&mrioc->tgtdev_list)) { + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &num_devices, sizeof(num_devices)); + return 0; + } + + kern_entrylen = (num_devices - 1) * sizeof(*devmap_info); + size = sizeof(*alltgt_info) + kern_entrylen; + alltgt_info = kzalloc(size, GFP_KERNEL); + if (!alltgt_info) + return -ENOMEM; + + devmap_info = alltgt_info->dmi; + memset((u8 *)devmap_info, 0xFF, (kern_entrylen + sizeof(*devmap_info))); + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { + if (i < num_devices) { + devmap_info[i].handle = tgtdev->dev_handle; + devmap_info[i].perst_id = tgtdev->perst_id; + if (tgtdev->host_exposed && tgtdev->starget) { + devmap_info[i].target_id = tgtdev->starget->id; + devmap_info[i].bus_id = + tgtdev->starget->channel; + } + i++; + } + } + num_devices = i; + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + + memcpy(&alltgt_info->num_devices, &num_devices, sizeof(num_devices)); + + usr_entrylen = (job->request_payload.payload_len - sizeof(u32)) / sizeof(*devmap_info); + usr_entrylen *= sizeof(*devmap_info); + min_entrylen = min(usr_entrylen, kern_entrylen); + if (min_entrylen && (!memcpy(&alltgt_info->dmi, devmap_info, min_entrylen))) { + dprint_bsg_err(mrioc, "%s:%d: device map info copy failed\n", + __func__, __LINE__); + rval = -EFAULT; + goto out; + } + + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + alltgt_info, job->request_payload.payload_len); + rval = 0; +out: + kfree(alltgt_info); + return rval; +} +/** + * mpi3mr_get_change_count - Get topology change count + * @mrioc: Adapter instance reference + * @job: BSG job reference + * + * This function copies the toplogy change count provided by the + * driver in events and cached in the driver to the user + * provided buffer for the specific controller. + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_get_change_count(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + struct mpi3mr_change_count chgcnt; + + memset(&chgcnt, 0, sizeof(chgcnt)); + chgcnt.change_count = mrioc->change_count; + if (job->request_payload.payload_len >= sizeof(chgcnt)) { + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &chgcnt, sizeof(chgcnt)); + return 0; + } + return -EINVAL; +} + +/** + * mpi3mr_bsg_adp_reset - Issue controller reset + * @mrioc: Adapter instance reference + * @job: BSG job reference + * + * This function identifies the user provided reset type and + * issues approporiate reset to the controller and wait for that + * to complete and reinitialize the controller and then returns + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + long rval = -EINVAL; + u8 save_snapdump; + struct mpi3mr_bsg_adp_reset adpreset; + + if (job->request_payload.payload_len != + sizeof(adpreset)) { + dprint_bsg_err(mrioc, "%s: invalid size argument\n", + __func__); + goto out; + } + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &adpreset, sizeof(adpreset)); + + switch (adpreset.reset_type) { + case MPI3MR_BSG_ADPRESET_SOFT: + save_snapdump = 0; + break; + case MPI3MR_BSG_ADPRESET_DIAG_FAULT: + save_snapdump = 1; + break; + default: + dprint_bsg_err(mrioc, "%s: unknown reset_type(%d)\n", + __func__, adpreset.reset_type); + goto out; + } + + rval = mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_APP, + save_snapdump); + + if (rval) + dprint_bsg_err(mrioc, + "%s: reset handler returned error(%ld) for reset type %d\n", + __func__, rval, adpreset.reset_type); +out: + return rval; +} + +/** + * mpi3mr_bsg_populate_adpinfo - Get adapter info command handler + * @mrioc: Adapter instance reference + * @job: BSG job reference + * + * This function provides adapter information for the given + * controller + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + enum mpi3mr_iocstate ioc_state; + struct mpi3mr_bsg_in_adpinfo adpinfo; + + memset(&adpinfo, 0, sizeof(adpinfo)); + adpinfo.adp_type = MPI3MR_BSG_ADPTYPE_AVGFAMILY; + adpinfo.pci_dev_id = mrioc->pdev->device; + adpinfo.pci_dev_hw_rev = mrioc->pdev->revision; + adpinfo.pci_subsys_dev_id = mrioc->pdev->subsystem_device; + adpinfo.pci_subsys_ven_id = mrioc->pdev->subsystem_vendor; + adpinfo.pci_bus = mrioc->pdev->bus->number; + adpinfo.pci_dev = PCI_SLOT(mrioc->pdev->devfn); + adpinfo.pci_func = PCI_FUNC(mrioc->pdev->devfn); + adpinfo.pci_seg_id = pci_domain_nr(mrioc->pdev->bus); + adpinfo.app_intfc_ver = MPI3MR_IOCTL_VERSION; + + ioc_state = mpi3mr_get_iocstate(mrioc); + if (ioc_state == MRIOC_STATE_UNRECOVERABLE) + adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE; + else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs)) + adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET; + else if (ioc_state == MRIOC_STATE_FAULT) + adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_FAULT; + else + adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL; + + memcpy((u8 *)&adpinfo.driver_info, (u8 *)&mrioc->driver_info, + sizeof(adpinfo.driver_info)); + + if (job->request_payload.payload_len >= sizeof(adpinfo)) { + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &adpinfo, sizeof(adpinfo)); + return 0; + } + return -EINVAL; +} + +/** + * mpi3mr_bsg_process_drv_cmds - Driver Command handler + * @job: BSG job reference + * + * This function is the top level handler for driver commands, + * this does basic validation of the buffer and identifies the + * opcode and switches to correct sub handler. + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job) +{ + long rval = -EINVAL; + struct mpi3mr_ioc *mrioc = NULL; + struct mpi3mr_bsg_packet *bsg_req = NULL; + struct mpi3mr_bsg_drv_cmd *drvrcmd = NULL; + + bsg_req = job->request; + drvrcmd = &bsg_req->cmd.drvrcmd; + + mrioc = mpi3mr_bsg_verify_adapter(drvrcmd->mrioc_id); + if (!mrioc) + return -ENODEV; + + if (drvrcmd->opcode == MPI3MR_DRVBSG_OPCODE_ADPINFO) { + rval = mpi3mr_bsg_populate_adpinfo(mrioc, job); + return rval; + } + + if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) + return -ERESTARTSYS; + + switch (drvrcmd->opcode) { + case MPI3MR_DRVBSG_OPCODE_ADPRESET: + rval = mpi3mr_bsg_adp_reset(mrioc, job); + break; + case MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO: + rval = mpi3mr_get_all_tgt_info(mrioc, job); + break; + case MPI3MR_DRVBSG_OPCODE_GETCHGCNT: + rval = mpi3mr_get_change_count(mrioc, job); + break; + case MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE: + rval = mpi3mr_enable_logdata(mrioc, job); + break; + case MPI3MR_DRVBSG_OPCODE_GETLOGDATA: + rval = mpi3mr_get_logdata(mrioc, job); + break; + case MPI3MR_DRVBSG_OPCODE_PELENABLE: + rval = mpi3mr_bsg_pel_enable(mrioc, job); + break; + case MPI3MR_DRVBSG_OPCODE_UNKNOWN: + default: + pr_err("%s: unsupported driver command opcode %d\n", + MPI3MR_DRIVER_NAME, drvrcmd->opcode); + break; + } + mutex_unlock(&mrioc->bsg_cmds.mutex); + return rval; +} + +/** + * mpi3mr_bsg_build_sgl - SGL construction for MPI commands + * @mpi_req: MPI request + * @sgl_offset: offset to start sgl in the MPI request + * @drv_bufs: DMA address of the buffers to be placed in sgl + * @bufcnt: Number of DMA buffers + * @is_rmc: Does the buffer list has management command buffer + * @is_rmr: Does the buffer list has management response buffer + * @num_datasges: Number of data buffers in the list + * + * This function places the DMA address of the given buffers in + * proper format as SGEs in the given MPI request. + * + * Return: Nothing + */ +static void mpi3mr_bsg_build_sgl(u8 *mpi_req, uint32_t sgl_offset, + struct mpi3mr_buf_map *drv_bufs, u8 bufcnt, u8 is_rmc, + u8 is_rmr, u8 num_datasges) +{ + u8 *sgl = (mpi_req + sgl_offset), count = 0; + struct mpi3_mgmt_passthrough_request *rmgmt_req = + (struct mpi3_mgmt_passthrough_request *)mpi_req; + struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; + u8 sgl_flags, sgl_flags_last; + + sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | + MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_BUFFER; + sgl_flags_last = sgl_flags | MPI3_SGE_FLAGS_END_OF_LIST; + + if (is_rmc) { + mpi3mr_add_sg_single(&rmgmt_req->command_sgl, + sgl_flags_last, drv_buf_iter->kern_buf_len, + drv_buf_iter->kern_buf_dma); + sgl = (u8 *)drv_buf_iter->kern_buf + drv_buf_iter->bsg_buf_len; + drv_buf_iter++; + count++; + if (is_rmr) { + mpi3mr_add_sg_single(&rmgmt_req->response_sgl, + sgl_flags_last, drv_buf_iter->kern_buf_len, + drv_buf_iter->kern_buf_dma); + drv_buf_iter++; + count++; + } else + mpi3mr_build_zero_len_sge( + &rmgmt_req->response_sgl); + } + if (!num_datasges) { + mpi3mr_build_zero_len_sge(sgl); + return; + } + for (; count < bufcnt; count++, drv_buf_iter++) { + if (drv_buf_iter->data_dir == DMA_NONE) + continue; + if (num_datasges == 1 || !is_rmc) + mpi3mr_add_sg_single(sgl, sgl_flags_last, + drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma); + else + mpi3mr_add_sg_single(sgl, sgl_flags, + drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma); + sgl += sizeof(struct mpi3_sge_common); + num_datasges--; + } +} + +/** + * mpi3mr_get_nvme_data_fmt - returns the NVMe data format + * @nvme_encap_request: NVMe encapsulated MPI request + * + * This function returns the type of the data format specified + * in user provided NVMe command in NVMe encapsulated request. + * + * Return: Data format of the NVMe command (PRP/SGL etc) + */ +static unsigned int mpi3mr_get_nvme_data_fmt( + struct mpi3_nvme_encapsulated_request *nvme_encap_request) +{ + u8 format = 0; + + format = ((nvme_encap_request->command[0] & 0xc000) >> 14); + return format; + +} + +/** + * mpi3mr_build_nvme_sgl - SGL constructor for NVME + * encapsulated request + * @mrioc: Adapter instance reference + * @nvme_encap_request: NVMe encapsulated MPI request + * @drv_bufs: DMA address of the buffers to be placed in sgl + * @bufcnt: Number of DMA buffers + * + * This function places the DMA address of the given buffers in + * proper format as SGEs in the given NVMe encapsulated request. + * + * Return: 0 on success, -1 on failure + */ +static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc, + struct mpi3_nvme_encapsulated_request *nvme_encap_request, + struct mpi3mr_buf_map *drv_bufs, u8 bufcnt) +{ + struct mpi3mr_nvme_pt_sge *nvme_sgl; + u64 sgl_ptr; + u8 count; + size_t length = 0; + struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; + u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) << + mrioc->facts.sge_mod_shift) << 32); + u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) << + mrioc->facts.sge_mod_shift) << 32; + + /* + * Not all commands require a data transfer. If no data, just return + * without constructing any sgl. + */ + for (count = 0; count < bufcnt; count++, drv_buf_iter++) { + if (drv_buf_iter->data_dir == DMA_NONE) + continue; + sgl_ptr = (u64)drv_buf_iter->kern_buf_dma; + length = drv_buf_iter->kern_buf_len; + break; + } + if (!length) + return 0; + + if (sgl_ptr & sgemod_mask) { + dprint_bsg_err(mrioc, + "%s: SGL address collides with SGE modifier\n", + __func__); + return -1; + } + + sgl_ptr &= ~sgemod_mask; + sgl_ptr |= sgemod_val; + nvme_sgl = (struct mpi3mr_nvme_pt_sge *) + ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET); + memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge)); + nvme_sgl->base_addr = sgl_ptr; + nvme_sgl->length = length; + return 0; +} + +/** + * mpi3mr_build_nvme_prp - PRP constructor for NVME + * encapsulated request + * @mrioc: Adapter instance reference + * @nvme_encap_request: NVMe encapsulated MPI request + * @drv_bufs: DMA address of the buffers to be placed in SGL + * @bufcnt: Number of DMA buffers + * + * This function places the DMA address of the given buffers in + * proper format as PRP entries in the given NVMe encapsulated + * request. + * + * Return: 0 on success, -1 on failure + */ +static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc, + struct mpi3_nvme_encapsulated_request *nvme_encap_request, + struct mpi3mr_buf_map *drv_bufs, u8 bufcnt) +{ + int prp_size = MPI3MR_NVME_PRP_SIZE; + __le64 *prp_entry, *prp1_entry, *prp2_entry; + __le64 *prp_page; + dma_addr_t prp_entry_dma, prp_page_dma, dma_addr; + u32 offset, entry_len, dev_pgsz; + u32 page_mask_result, page_mask; + size_t length = 0; + u8 count; + struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; + u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) << + mrioc->facts.sge_mod_shift) << 32); + u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) << + mrioc->facts.sge_mod_shift) << 32; + u16 dev_handle = nvme_encap_request->dev_handle; + struct mpi3mr_tgt_dev *tgtdev; + + tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); + if (!tgtdev) { + dprint_bsg_err(mrioc, "%s: invalid device handle 0x%04x\n", + __func__, dev_handle); + return -1; + } + + if (tgtdev->dev_spec.pcie_inf.pgsz == 0) { + dprint_bsg_err(mrioc, + "%s: NVMe device page size is zero for handle 0x%04x\n", + __func__, dev_handle); + mpi3mr_tgtdev_put(tgtdev); + return -1; + } + + dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz); + mpi3mr_tgtdev_put(tgtdev); + + /* + * Not all commands require a data transfer. If no data, just return + * without constructing any PRP. + */ + for (count = 0; count < bufcnt; count++, drv_buf_iter++) { + if (drv_buf_iter->data_dir == DMA_NONE) + continue; + dma_addr = drv_buf_iter->kern_buf_dma; + length = drv_buf_iter->kern_buf_len; + break; + } + + if (!length) + return 0; + + mrioc->prp_sz = 0; + mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev, + dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL); + + if (!mrioc->prp_list_virt) + return -1; + mrioc->prp_sz = dev_pgsz; + + /* + * Set pointers to PRP1 and PRP2, which are in the NVMe command. + * PRP1 is located at a 24 byte offset from the start of the NVMe + * command. Then set the current PRP entry pointer to PRP1. + */ + prp1_entry = (__le64 *)((u8 *)(nvme_encap_request->command) + + MPI3MR_NVME_CMD_PRP1_OFFSET); + prp2_entry = (__le64 *)((u8 *)(nvme_encap_request->command) + + MPI3MR_NVME_CMD_PRP2_OFFSET); + prp_entry = prp1_entry; + /* + * For the PRP entries, use the specially allocated buffer of + * contiguous memory. + */ + prp_page = (__le64 *)mrioc->prp_list_virt; + prp_page_dma = mrioc->prp_list_dma; + + /* + * Check if we are within 1 entry of a page boundary we don't + * want our first entry to be a PRP List entry. + */ + page_mask = dev_pgsz - 1; + page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask; + if (!page_mask_result) { + dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n", + __func__); + goto err_out; + } + + /* + * Set PRP physical pointer, which initially points to the current PRP + * DMA memory page. + */ + prp_entry_dma = prp_page_dma; + + + /* Loop while the length is not zero. */ + while (length) { + page_mask_result = (prp_entry_dma + prp_size) & page_mask; + if (!page_mask_result && (length > dev_pgsz)) { + dprint_bsg_err(mrioc, + "%s: single PRP page is not sufficient\n", + __func__); + goto err_out; + } + + /* Need to handle if entry will be part of a page. */ + offset = dma_addr & page_mask; + entry_len = dev_pgsz - offset; + + if (prp_entry == prp1_entry) { + /* + * Must fill in the first PRP pointer (PRP1) before + * moving on. + */ + *prp1_entry = cpu_to_le64(dma_addr); + if (*prp1_entry & sgemod_mask) { + dprint_bsg_err(mrioc, + "%s: PRP1 address collides with SGE modifier\n", + __func__); + goto err_out; + } + *prp1_entry &= ~sgemod_mask; + *prp1_entry |= sgemod_val; + + /* + * Now point to the second PRP entry within the + * command (PRP2). + */ + prp_entry = prp2_entry; + } else if (prp_entry == prp2_entry) { + /* + * Should the PRP2 entry be a PRP List pointer or just + * a regular PRP pointer? If there is more than one + * more page of data, must use a PRP List pointer. + */ + if (length > dev_pgsz) { + /* + * PRP2 will contain a PRP List pointer because + * more PRP's are needed with this command. The + * list will start at the beginning of the + * contiguous buffer. + */ + *prp2_entry = cpu_to_le64(prp_entry_dma); + if (*prp2_entry & sgemod_mask) { + dprint_bsg_err(mrioc, + "%s: PRP list address collides with SGE modifier\n", + __func__); + goto err_out; + } + *prp2_entry &= ~sgemod_mask; + *prp2_entry |= sgemod_val; + + /* + * The next PRP Entry will be the start of the + * first PRP List. + */ + prp_entry = prp_page; + continue; + } else { + /* + * After this, the PRP Entries are complete. + * This command uses 2 PRP's and no PRP list. + */ + *prp2_entry = cpu_to_le64(dma_addr); + if (*prp2_entry & sgemod_mask) { + dprint_bsg_err(mrioc, + "%s: PRP2 collides with SGE modifier\n", + __func__); + goto err_out; + } + *prp2_entry &= ~sgemod_mask; + *prp2_entry |= sgemod_val; + } + } else { + /* + * Put entry in list and bump the addresses. + * + * After PRP1 and PRP2 are filled in, this will fill in + * all remaining PRP entries in a PRP List, one per + * each time through the loop. + */ + *prp_entry = cpu_to_le64(dma_addr); + if (*prp1_entry & sgemod_mask) { + dprint_bsg_err(mrioc, + "%s: PRP address collides with SGE modifier\n", + __func__); + goto err_out; + } + *prp_entry &= ~sgemod_mask; + *prp_entry |= sgemod_val; + prp_entry++; + prp_entry_dma++; + } + + /* + * Bump the phys address of the command's data buffer by the + * entry_len. + */ + dma_addr += entry_len; + + /* decrement length accounting for last partial page. */ + if (entry_len > length) + length = 0; + else + length -= entry_len; + } + return 0; +err_out: + if (mrioc->prp_list_virt) { + dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz, + mrioc->prp_list_virt, mrioc->prp_list_dma); + mrioc->prp_list_virt = NULL; + } + return -1; +} +/** + * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler + * @job: BSG job reference + * + * This function is the top level handler for MPI Pass through + * command, this does basic validation of the input data buffers, + * identifies the given buffer types and MPI command, allocates + * DMAable memory for user given buffers, construstcs SGL + * properly and passes the command to the firmware. + * + * Once the MPI command is completed the driver copies the data + * if any and reply, sense information to user provided buffers. + * If the command is timed out then issues controller reset + * prior to returning. + * + * Return: 0 on success and proper error codes on failure + */ + +static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply_payload_rcv_len) +{ + long rval = -EINVAL; + + struct mpi3mr_ioc *mrioc = NULL; + u8 *mpi_req = NULL, *sense_buff_k = NULL; + u8 mpi_msg_size = 0; + struct mpi3mr_bsg_packet *bsg_req = NULL; + struct mpi3mr_bsg_mptcmd *karg; + struct mpi3mr_buf_entry *buf_entries = NULL; + struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL; + u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0; + u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF, sg_entries = 0; + u8 block_io = 0, resp_code = 0, nvme_fmt = 0; + struct mpi3_request_header *mpi_header = NULL; + struct mpi3_status_reply_descriptor *status_desc; + struct mpi3_scsi_task_mgmt_request *tm_req; + u32 erbsz = MPI3MR_SENSE_BUF_SZ, tmplen; + u16 dev_handle; + struct mpi3mr_tgt_dev *tgtdev; + struct mpi3mr_stgt_priv_data *stgt_priv = NULL; + struct mpi3mr_bsg_in_reply_buf *bsg_reply_buf = NULL; + u32 din_size = 0, dout_size = 0; + u8 *din_buf = NULL, *dout_buf = NULL; + u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL; + + bsg_req = job->request; + karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd; + + mrioc = mpi3mr_bsg_verify_adapter(karg->mrioc_id); + if (!mrioc) + return -ENODEV; + + if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT) + karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT; + + mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL); + if (!mpi_req) + return -ENOMEM; + mpi_header = (struct mpi3_request_header *)mpi_req; + + bufcnt = karg->buf_entry_list.num_of_entries; + drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL); + if (!drv_bufs) { + rval = -ENOMEM; + goto out; + } + + dout_buf = kzalloc(job->request_payload.payload_len, + GFP_KERNEL); + if (!dout_buf) { + rval = -ENOMEM; + goto out; + } + + din_buf = kzalloc(job->reply_payload.payload_len, + GFP_KERNEL); + if (!din_buf) { + rval = -ENOMEM; + goto out; + } + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + dout_buf, job->request_payload.payload_len); + + buf_entries = karg->buf_entry_list.buf_entry; + sgl_din_iter = din_buf; + sgl_dout_iter = dout_buf; + drv_buf_iter = drv_bufs; + + for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) { + + if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) { + dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n", + __func__); + rval = -EINVAL; + goto out; + } + if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) { + dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n", + __func__); + rval = -EINVAL; + goto out; + } + + switch (buf_entries->buf_type) { + case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD: + sgl_iter = sgl_dout_iter; + sgl_dout_iter += buf_entries->buf_len; + drv_buf_iter->data_dir = DMA_TO_DEVICE; + is_rmcb = 1; + if (count != 0) + invalid_be = 1; + break; + case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP: + sgl_iter = sgl_din_iter; + sgl_din_iter += buf_entries->buf_len; + drv_buf_iter->data_dir = DMA_FROM_DEVICE; + is_rmrb = 1; + if (count != 1 || !is_rmcb) + invalid_be = 1; + break; + case MPI3MR_BSG_BUFTYPE_DATA_IN: + sgl_iter = sgl_din_iter; + sgl_din_iter += buf_entries->buf_len; + drv_buf_iter->data_dir = DMA_FROM_DEVICE; + din_cnt++; + din_size += drv_buf_iter->bsg_buf_len; + if ((din_cnt > 1) && !is_rmcb) + invalid_be = 1; + break; + case MPI3MR_BSG_BUFTYPE_DATA_OUT: + sgl_iter = sgl_dout_iter; + sgl_dout_iter += buf_entries->buf_len; + drv_buf_iter->data_dir = DMA_TO_DEVICE; + dout_cnt++; + dout_size += drv_buf_iter->bsg_buf_len; + if ((dout_cnt > 1) && !is_rmcb) + invalid_be = 1; + break; + case MPI3MR_BSG_BUFTYPE_MPI_REPLY: + sgl_iter = sgl_din_iter; + sgl_din_iter += buf_entries->buf_len; + drv_buf_iter->data_dir = DMA_NONE; + mpirep_offset = count; + break; + case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE: + sgl_iter = sgl_din_iter; + sgl_din_iter += buf_entries->buf_len; + drv_buf_iter->data_dir = DMA_NONE; + erb_offset = count; + break; + case MPI3MR_BSG_BUFTYPE_MPI_REQUEST: + sgl_iter = sgl_dout_iter; + sgl_dout_iter += buf_entries->buf_len; + drv_buf_iter->data_dir = DMA_NONE; + mpi_msg_size = buf_entries->buf_len; + if ((!mpi_msg_size || (mpi_msg_size % 4)) || + (mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) { + dprint_bsg_err(mrioc, "%s: invalid MPI message size\n", + __func__); + rval = -EINVAL; + goto out; + } + memcpy(mpi_req, sgl_iter, buf_entries->buf_len); + break; + default: + invalid_be = 1; + break; + } + if (invalid_be) { + dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n", + __func__); + rval = -EINVAL; + goto out; + } + + drv_buf_iter->bsg_buf = sgl_iter; + drv_buf_iter->bsg_buf_len = buf_entries->buf_len; + + } + if (!is_rmcb && (dout_cnt || din_cnt)) { + sg_entries = dout_cnt + din_cnt; + if (((mpi_msg_size) + (sg_entries * + sizeof(struct mpi3_sge_common))) > MPI3MR_ADMIN_REQ_FRAME_SZ) { + dprint_bsg_err(mrioc, + "%s:%d: invalid message size passed\n", + __func__, __LINE__); + rval = -EINVAL; + goto out; + } + } + if (din_size > MPI3MR_MAX_APP_XFER_SIZE) { + dprint_bsg_err(mrioc, + "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n", + __func__, __LINE__, mpi_header->function, din_size); + rval = -EINVAL; + goto out; + } + if (dout_size > MPI3MR_MAX_APP_XFER_SIZE) { + dprint_bsg_err(mrioc, + "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n", + __func__, __LINE__, mpi_header->function, dout_size); + rval = -EINVAL; + goto out; + } + + drv_buf_iter = drv_bufs; + for (count = 0; count < bufcnt; count++, drv_buf_iter++) { + if (drv_buf_iter->data_dir == DMA_NONE) + continue; + + drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len; + if (is_rmcb && !count) + drv_buf_iter->kern_buf_len += ((dout_cnt + din_cnt) * + sizeof(struct mpi3_sge_common)); + + if (!drv_buf_iter->kern_buf_len) + continue; + + drv_buf_iter->kern_buf = dma_alloc_coherent(&mrioc->pdev->dev, + drv_buf_iter->kern_buf_len, &drv_buf_iter->kern_buf_dma, + GFP_KERNEL); + if (!drv_buf_iter->kern_buf) { + rval = -ENOMEM; + goto out; + } + if (drv_buf_iter->data_dir == DMA_TO_DEVICE) { + tmplen = min(drv_buf_iter->kern_buf_len, + drv_buf_iter->bsg_buf_len); + memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen); + } + } + + if (erb_offset != 0xFF) { + sense_buff_k = kzalloc(erbsz, GFP_KERNEL); + if (!sense_buff_k) { + rval = -ENOMEM; + goto out; + } + } + + if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) { + rval = -ERESTARTSYS; + goto out; + } + if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) { + rval = -EAGAIN; + dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); + mutex_unlock(&mrioc->bsg_cmds.mutex); + goto out; + } + if (mrioc->unrecoverable) { + dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", + __func__); + rval = -EFAULT; + mutex_unlock(&mrioc->bsg_cmds.mutex); + goto out; + } + if (mrioc->reset_in_progress) { + dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); + rval = -EAGAIN; + mutex_unlock(&mrioc->bsg_cmds.mutex); + goto out; + } + if (mrioc->stop_bsgs) { + dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); + rval = -EAGAIN; + mutex_unlock(&mrioc->bsg_cmds.mutex); + goto out; + } + + if (mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) { + nvme_fmt = mpi3mr_get_nvme_data_fmt( + (struct mpi3_nvme_encapsulated_request *)mpi_req); + if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) { + if (mpi3mr_build_nvme_prp(mrioc, + (struct mpi3_nvme_encapsulated_request *)mpi_req, + drv_bufs, bufcnt)) { + rval = -ENOMEM; + mutex_unlock(&mrioc->bsg_cmds.mutex); + goto out; + } + } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 || + nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) { + if (mpi3mr_build_nvme_sgl(mrioc, + (struct mpi3_nvme_encapsulated_request *)mpi_req, + drv_bufs, bufcnt)) { + rval = -EINVAL; + mutex_unlock(&mrioc->bsg_cmds.mutex); + goto out; + } + } else { + dprint_bsg_err(mrioc, + "%s:invalid NVMe command format\n", __func__); + rval = -EINVAL; + mutex_unlock(&mrioc->bsg_cmds.mutex); + goto out; + } + } else { + mpi3mr_bsg_build_sgl(mpi_req, (mpi_msg_size), + drv_bufs, bufcnt, is_rmcb, is_rmrb, + (dout_cnt + din_cnt)); + } + + if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) { + tm_req = (struct mpi3_scsi_task_mgmt_request *)mpi_req; + if (tm_req->task_type != + MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + dev_handle = tm_req->dev_handle; + block_io = 1; + } + } + if (block_io) { + tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); + if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) { + stgt_priv = (struct mpi3mr_stgt_priv_data *) + tgtdev->starget->hostdata; + atomic_inc(&stgt_priv->block_io); + mpi3mr_tgtdev_put(tgtdev); + } + } + + mrioc->bsg_cmds.state = MPI3MR_CMD_PENDING; + mrioc->bsg_cmds.is_waiting = 1; + mrioc->bsg_cmds.callback = NULL; + mrioc->bsg_cmds.is_sense = 0; + mrioc->bsg_cmds.sensebuf = sense_buff_k; + memset(mrioc->bsg_cmds.reply, 0, mrioc->reply_sz); + mpi_header->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_BSG_CMDS); + if (mrioc->logging_level & MPI3_DEBUG_BSG_INFO) { + dprint_bsg_info(mrioc, + "%s: posting bsg request to the controller\n", __func__); + dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ, + "bsg_mpi3_req"); + if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) { + drv_buf_iter = &drv_bufs[0]; + dprint_dump(drv_buf_iter->kern_buf, + drv_buf_iter->kern_buf_len, "mpi3_mgmt_req"); + } + } + + init_completion(&mrioc->bsg_cmds.done); + rval = mpi3mr_admin_request_post(mrioc, mpi_req, + MPI3MR_ADMIN_REQ_FRAME_SZ, 0); + + + if (rval) { + mrioc->bsg_cmds.is_waiting = 0; + dprint_bsg_err(mrioc, + "%s: posting bsg request is failed\n", __func__); + rval = -EAGAIN; + goto out_unlock; + } + wait_for_completion_timeout(&mrioc->bsg_cmds.done, + (karg->timeout * HZ)); + if (block_io && stgt_priv) + atomic_dec(&stgt_priv->block_io); + if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE)) { + mrioc->bsg_cmds.is_waiting = 0; + rval = -EAGAIN; + if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET) + goto out_unlock; + dprint_bsg_err(mrioc, + "%s: bsg request timedout after %d seconds\n", __func__, + karg->timeout); + if (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR) { + dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ, + "bsg_mpi3_req"); + if (mpi_header->function == + MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) { + drv_buf_iter = &drv_bufs[0]; + dprint_dump(drv_buf_iter->kern_buf, + drv_buf_iter->kern_buf_len, "mpi3_mgmt_req"); + } + } + + if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) || + (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO)) + mpi3mr_issue_tm(mrioc, + MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + mpi_header->function_dependent, 0, + MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT, + &mrioc->host_tm_cmds, &resp_code, NULL); + if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) && + !(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)) + mpi3mr_soft_reset_handler(mrioc, + MPI3MR_RESET_FROM_APP_TIMEOUT, 1); + goto out_unlock; + } + dprint_bsg_info(mrioc, "%s: bsg request is completed\n", __func__); + + if (mrioc->prp_list_virt) { + dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz, + mrioc->prp_list_virt, mrioc->prp_list_dma); + mrioc->prp_list_virt = NULL; + } + + if ((mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + != MPI3_IOCSTATUS_SUCCESS) { + dprint_bsg_info(mrioc, + "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n", + __func__, + (mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), + mrioc->bsg_cmds.ioc_loginfo); + } + + if ((mpirep_offset != 0xFF) && + drv_bufs[mpirep_offset].bsg_buf_len) { + drv_buf_iter = &drv_bufs[mpirep_offset]; + drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) - 1 + + mrioc->reply_sz); + bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL); + + if (!bsg_reply_buf) { + rval = -ENOMEM; + goto out_unlock; + } + if (mrioc->bsg_cmds.state & MPI3MR_CMD_REPLY_VALID) { + bsg_reply_buf->mpi_reply_type = + MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS; + memcpy(bsg_reply_buf->reply_buf, + mrioc->bsg_cmds.reply, mrioc->reply_sz); + } else { + bsg_reply_buf->mpi_reply_type = + MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS; + status_desc = (struct mpi3_status_reply_descriptor *) + bsg_reply_buf->reply_buf; + status_desc->ioc_status = mrioc->bsg_cmds.ioc_status; + status_desc->ioc_log_info = mrioc->bsg_cmds.ioc_loginfo; + } + tmplen = min(drv_buf_iter->kern_buf_len, + drv_buf_iter->bsg_buf_len); + memcpy(drv_buf_iter->bsg_buf, bsg_reply_buf, tmplen); + } + + if (erb_offset != 0xFF && mrioc->bsg_cmds.sensebuf && + mrioc->bsg_cmds.is_sense) { + drv_buf_iter = &drv_bufs[erb_offset]; + tmplen = min(erbsz, drv_buf_iter->bsg_buf_len); + memcpy(drv_buf_iter->bsg_buf, sense_buff_k, tmplen); + } + + drv_buf_iter = drv_bufs; + for (count = 0; count < bufcnt; count++, drv_buf_iter++) { + if (drv_buf_iter->data_dir == DMA_NONE) + continue; + if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) { + tmplen = min(drv_buf_iter->kern_buf_len, + drv_buf_iter->bsg_buf_len); + memcpy(drv_buf_iter->bsg_buf, + drv_buf_iter->kern_buf, tmplen); + } + } + +out_unlock: + if (din_buf) { + *reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + din_buf, job->reply_payload.payload_len); + } + mrioc->bsg_cmds.is_sense = 0; + mrioc->bsg_cmds.sensebuf = NULL; + mrioc->bsg_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->bsg_cmds.mutex); +out: + kfree(sense_buff_k); + kfree(dout_buf); + kfree(din_buf); + kfree(mpi_req); + if (drv_bufs) { + drv_buf_iter = drv_bufs; + for (count = 0; count < bufcnt; count++, drv_buf_iter++) { + if (drv_buf_iter->kern_buf && drv_buf_iter->kern_buf_dma) + dma_free_coherent(&mrioc->pdev->dev, + drv_buf_iter->kern_buf_len, + drv_buf_iter->kern_buf, + drv_buf_iter->kern_buf_dma); + } + kfree(drv_bufs); + } + kfree(bsg_reply_buf); + return rval; +} + +/** + * mpi3mr_app_save_logdata - Save Log Data events + * @mrioc: Adapter instance reference + * @event_data: event data associated with log data event + * @event_data_size: event data size to copy + * + * If log data event caching is enabled by the applicatiobns, + * then this function saves the log data in the circular queue + * and Sends async signal SIGIO to indicate there is an async + * event from the firmware to the event monitoring applications. + * + * Return:Nothing + */ +void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data, + u16 event_data_size) +{ + u32 index = mrioc->logdata_buf_idx, sz; + struct mpi3mr_logdata_entry *entry; + + if (!(mrioc->logdata_buf)) + return; + + entry = (struct mpi3mr_logdata_entry *) + (mrioc->logdata_buf + (index * mrioc->logdata_entry_sz)); + entry->valid_entry = 1; + sz = min(mrioc->logdata_entry_sz, event_data_size); + memcpy(entry->data, event_data, sz); + mrioc->logdata_buf_idx = + ((++index) % MPI3MR_BSG_LOGDATA_MAX_ENTRIES); + atomic64_inc(&event_counter); +} + +/** + * mpi3mr_bsg_request - bsg request entry point + * @job: BSG job reference + * + * This is driver's entry point for bsg requests + * + * Return: 0 on success and proper error codes on failure + */ +static int mpi3mr_bsg_request(struct bsg_job *job) +{ + long rval = -EINVAL; + unsigned int reply_payload_rcv_len = 0; + + struct mpi3mr_bsg_packet *bsg_req = job->request; + + switch (bsg_req->cmd_type) { + case MPI3MR_DRV_CMD: + rval = mpi3mr_bsg_process_drv_cmds(job); + break; + case MPI3MR_MPT_CMD: + rval = mpi3mr_bsg_process_mpt_cmds(job, &reply_payload_rcv_len); + break; + default: + pr_err("%s: unsupported BSG command(0x%08x)\n", + MPI3MR_DRIVER_NAME, bsg_req->cmd_type); + break; + } + + bsg_job_done(job, rval, reply_payload_rcv_len); + + return 0; +} + +/** + * mpi3mr_bsg_exit - de-registration from bsg layer + * + * This will be called during driver unload and all + * bsg resources allocated during load will be freed. + * + * Return:Nothing + */ +void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc) +{ + struct device *bsg_dev = &mrioc->bsg_dev; + if (!mrioc->bsg_queue) + return; + + bsg_remove_queue(mrioc->bsg_queue); + mrioc->bsg_queue = NULL; + + device_del(bsg_dev); + put_device(bsg_dev); +} + +/** + * mpi3mr_bsg_node_release -release bsg device node + * @dev: bsg device node + * + * decrements bsg dev parent reference count + * + * Return:Nothing + */ +static void mpi3mr_bsg_node_release(struct device *dev) +{ + put_device(dev->parent); +} + +/** + * mpi3mr_bsg_init - registration with bsg layer + * + * This will be called during driver load and it will + * register driver with bsg layer + * + * Return:Nothing + */ +void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc) +{ + struct device *bsg_dev = &mrioc->bsg_dev; + struct device *parent = &mrioc->shost->shost_gendev; + + device_initialize(bsg_dev); + + bsg_dev->parent = get_device(parent); + bsg_dev->release = mpi3mr_bsg_node_release; + + dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id); + + if (device_add(bsg_dev)) { + ioc_err(mrioc, "%s: bsg device add failed\n", + dev_name(bsg_dev)); + put_device(bsg_dev); + return; + } + + mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), + mpi3mr_bsg_request, NULL, 0); + if (IS_ERR(mrioc->bsg_queue)) { + ioc_err(mrioc, "%s: bsg registration failed\n", + dev_name(bsg_dev)); + device_del(bsg_dev); + put_device(bsg_dev); + return; + } + + blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS); + blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS); + + return; +} + +/** + * version_fw_show - SysFS callback for firmware version read + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying firmware version + */ +static ssize_t +version_fw_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; + + return sysfs_emit(buf, "%d.%d.%d.%d.%05d-%05d\n", + fwver->gen_major, fwver->gen_minor, fwver->ph_major, + fwver->ph_minor, fwver->cust_id, fwver->build_num); +} +static DEVICE_ATTR_RO(version_fw); + +/** + * fw_queue_depth_show - SysFS callback for firmware max cmds + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying firmware max commands + */ +static ssize_t +fw_queue_depth_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + + return sysfs_emit(buf, "%d\n", mrioc->facts.max_reqs); +} +static DEVICE_ATTR_RO(fw_queue_depth); + +/** + * op_req_q_count_show - SysFS callback for request queue count + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying request queue count + */ +static ssize_t +op_req_q_count_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + + return sysfs_emit(buf, "%d\n", mrioc->num_op_req_q); +} +static DEVICE_ATTR_RO(op_req_q_count); + +/** + * reply_queue_count_show - SysFS callback for reply queue count + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying reply queue count + */ +static ssize_t +reply_queue_count_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + + return sysfs_emit(buf, "%d\n", mrioc->num_op_reply_q); +} + +static DEVICE_ATTR_RO(reply_queue_count); + +/** + * logging_level_show - Show controller debug level + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * A sysfs 'read/write' shost attribute, to show the current + * debug log level used by the driver for the specific + * controller. + * + * Return: sysfs_emit() return + */ +static ssize_t +logging_level_show(struct device *dev, + struct device_attribute *attr, char *buf) + +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + + return sysfs_emit(buf, "%08xh\n", mrioc->logging_level); +} + +/** + * logging_level_store- Change controller debug level + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * @count: size of the buffer + * + * A sysfs 'read/write' shost attribute, to change the current + * debug log level used by the driver for the specific + * controller. + * + * Return: strlen() return + */ +static ssize_t +logging_level_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + int val = 0; + + if (kstrtoint(buf, 0, &val) != 0) + return -EINVAL; + + mrioc->logging_level = val; + ioc_info(mrioc, "logging_level=%08xh\n", mrioc->logging_level); + return strlen(buf); +} +static DEVICE_ATTR_RW(logging_level); + +/** + * adp_state_show() - SysFS callback for adapter state show + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying adapter state + */ +static ssize_t +adp_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + enum mpi3mr_iocstate ioc_state; + uint8_t adp_state; + + ioc_state = mpi3mr_get_iocstate(mrioc); + if (ioc_state == MRIOC_STATE_UNRECOVERABLE) + adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE; + else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs)) + adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET; + else if (ioc_state == MRIOC_STATE_FAULT) + adp_state = MPI3MR_BSG_ADPSTATE_FAULT; + else + adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL; + + return sysfs_emit(buf, "%u\n", adp_state); +} + +static DEVICE_ATTR_RO(adp_state); + +static struct attribute *mpi3mr_host_attrs[] = { + &dev_attr_version_fw.attr, + &dev_attr_fw_queue_depth.attr, + &dev_attr_op_req_q_count.attr, + &dev_attr_reply_queue_count.attr, + &dev_attr_logging_level.attr, + &dev_attr_adp_state.attr, + NULL, +}; + +static const struct attribute_group mpi3mr_host_attr_group = { + .attrs = mpi3mr_host_attrs +}; + +const struct attribute_group *mpi3mr_host_groups[] = { + &mpi3mr_host_attr_group, + NULL, +}; + + +/* + * SCSI Device attributes under sysfs + */ + +/** + * sas_address_show - SysFS callback for dev SASaddress display + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying SAS address of the + * specific SAS/SATA end device. + */ +static ssize_t +sas_address_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct mpi3mr_sdev_priv_data *sdev_priv_data; + struct mpi3mr_stgt_priv_data *tgt_priv_data; + struct mpi3mr_tgt_dev *tgtdev; + + sdev_priv_data = sdev->hostdata; + if (!sdev_priv_data) + return 0; + + tgt_priv_data = sdev_priv_data->tgt_priv_data; + if (!tgt_priv_data) + return 0; + tgtdev = tgt_priv_data->tgt_dev; + if (!tgtdev || tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) + return 0; + return sysfs_emit(buf, "0x%016llx\n", + (unsigned long long)tgtdev->dev_spec.sas_sata_inf.sas_address); +} + +static DEVICE_ATTR_RO(sas_address); + +/** + * device_handle_show - SysFS callback for device handle display + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying firmware internal + * device handle of the specific device. + */ +static ssize_t +device_handle_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct mpi3mr_sdev_priv_data *sdev_priv_data; + struct mpi3mr_stgt_priv_data *tgt_priv_data; + struct mpi3mr_tgt_dev *tgtdev; + + sdev_priv_data = sdev->hostdata; + if (!sdev_priv_data) + return 0; + + tgt_priv_data = sdev_priv_data->tgt_priv_data; + if (!tgt_priv_data) + return 0; + tgtdev = tgt_priv_data->tgt_dev; + if (!tgtdev) + return 0; + return sysfs_emit(buf, "0x%04x\n", tgtdev->dev_handle); +} + +static DEVICE_ATTR_RO(device_handle); + +/** + * persistent_id_show - SysFS callback for persisten ID display + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying persistent ID of the + * of the specific device. + */ +static ssize_t +persistent_id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct mpi3mr_sdev_priv_data *sdev_priv_data; + struct mpi3mr_stgt_priv_data *tgt_priv_data; + struct mpi3mr_tgt_dev *tgtdev; + + sdev_priv_data = sdev->hostdata; + if (!sdev_priv_data) + return 0; + + tgt_priv_data = sdev_priv_data->tgt_priv_data; + if (!tgt_priv_data) + return 0; + tgtdev = tgt_priv_data->tgt_dev; + if (!tgtdev) + return 0; + return sysfs_emit(buf, "%d\n", tgtdev->perst_id); +} +static DEVICE_ATTR_RO(persistent_id); + +static struct attribute *mpi3mr_dev_attrs[] = { + &dev_attr_sas_address.attr, + &dev_attr_device_handle.attr, + &dev_attr_persistent_id.attr, + NULL, +}; + +static const struct attribute_group mpi3mr_dev_attr_group = { + .attrs = mpi3mr_dev_attrs +}; + +const struct attribute_group *mpi3mr_dev_groups[] = { + &mpi3mr_dev_attr_group, + NULL, +}; diff --git a/drivers/soc/apple/rtkit-crashlog.c b/drivers/soc/apple/rtkit-crashlog.c new file mode 100644 index 0000000000..732deed646 --- /dev/null +++ b/drivers/soc/apple/rtkit-crashlog.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple RTKit IPC library + * Copyright (C) The Asahi Linux Contributors + */ +#include "rtkit-internal.h" + +#define FOURCC(a, b, c, d) \ + (((u32)(a) << 24) | ((u32)(b) << 16) | ((u32)(c) << 8) | ((u32)(d))) + +#define APPLE_RTKIT_CRASHLOG_HEADER FOURCC('C', 'L', 'H', 'E') +#define APPLE_RTKIT_CRASHLOG_STR FOURCC('C', 's', 't', 'r') +#define APPLE_RTKIT_CRASHLOG_VERSION FOURCC('C', 'v', 'e', 'r') +#define APPLE_RTKIT_CRASHLOG_MBOX FOURCC('C', 'm', 'b', 'x') +#define APPLE_RTKIT_CRASHLOG_TIME FOURCC('C', 't', 'i', 'm') + +struct apple_rtkit_crashlog_header { + u32 fourcc; + u32 version; + u32 size; + u32 flags; + u8 _unk[16]; +}; +static_assert(sizeof(struct apple_rtkit_crashlog_header) == 0x20); + +struct apple_rtkit_crashlog_mbox_entry { + u64 msg0; + u64 msg1; + u32 timestamp; + u8 _unk[4]; +}; +static_assert(sizeof(struct apple_rtkit_crashlog_mbox_entry) == 0x18); + +static void apple_rtkit_crashlog_dump_str(struct apple_rtkit *rtk, u8 *bfr, + size_t size) +{ + u32 idx; + u8 *ptr, *end; + + memcpy(&idx, bfr, 4); + + ptr = bfr + 4; + end = bfr + size; + while (ptr < end) { + u8 *newline = memchr(ptr, '\n', end - ptr); + + if (newline) { + u8 tmp = *newline; + *newline = '\0'; + dev_warn(rtk->dev, "RTKit: Message (id=%x): %s\n", idx, + ptr); + *newline = tmp; + ptr = newline + 1; + } else { + dev_warn(rtk->dev, "RTKit: Message (id=%x): %s", idx, + ptr); + break; + } + } +} + +static void apple_rtkit_crashlog_dump_version(struct apple_rtkit *rtk, u8 *bfr, + size_t size) +{ + dev_warn(rtk->dev, "RTKit: Version: %s", bfr + 16); +} + +static void apple_rtkit_crashlog_dump_time(struct apple_rtkit *rtk, u8 *bfr, + size_t size) +{ + u64 crash_time; + + memcpy(&crash_time, bfr, 8); + dev_warn(rtk->dev, "RTKit: Crash time: %lld", crash_time); +} + +static void apple_rtkit_crashlog_dump_mailbox(struct apple_rtkit *rtk, u8 *bfr, + size_t size) +{ + u32 type, index, i; + size_t n_messages; + struct apple_rtkit_crashlog_mbox_entry entry; + + memcpy(&type, bfr + 16, 4); + memcpy(&index, bfr + 24, 4); + n_messages = (size - 28) / sizeof(entry); + + dev_warn(rtk->dev, "RTKit: Mailbox history (type = %d, index = %d)", + type, index); + for (i = 0; i < n_messages; ++i) { + memcpy(&entry, bfr + 28 + i * sizeof(entry), sizeof(entry)); + dev_warn(rtk->dev, "RTKit: #%03d@%08x: %016llx %016llx", i, + entry.timestamp, entry.msg0, entry.msg1); + } +} + +void apple_rtkit_crashlog_dump(struct apple_rtkit *rtk, u8 *bfr, size_t size) +{ + size_t offset; + u32 section_fourcc, section_size; + struct apple_rtkit_crashlog_header header; + + memcpy(&header, bfr, sizeof(header)); + if (header.fourcc != APPLE_RTKIT_CRASHLOG_HEADER) { + dev_warn(rtk->dev, "RTKit: Expected crashlog header but got %x", + header.fourcc); + return; + } + + if (header.size > size) { + dev_warn(rtk->dev, "RTKit: Crashlog size (%x) is too large", + header.size); + return; + } + + size = header.size; + offset = sizeof(header); + + while (offset < size) { + memcpy(§ion_fourcc, bfr + offset, 4); + memcpy(§ion_size, bfr + offset + 12, 4); + + switch (section_fourcc) { + case APPLE_RTKIT_CRASHLOG_HEADER: + dev_dbg(rtk->dev, "RTKit: End of crashlog reached"); + return; + case APPLE_RTKIT_CRASHLOG_STR: + apple_rtkit_crashlog_dump_str(rtk, bfr + offset + 16, + section_size); + break; + case APPLE_RTKIT_CRASHLOG_VERSION: + apple_rtkit_crashlog_dump_version( + rtk, bfr + offset + 16, section_size); + break; + case APPLE_RTKIT_CRASHLOG_MBOX: + apple_rtkit_crashlog_dump_mailbox( + rtk, bfr + offset + 16, section_size); + break; + case APPLE_RTKIT_CRASHLOG_TIME: + apple_rtkit_crashlog_dump_time(rtk, bfr + offset + 16, + section_size); + break; + default: + dev_warn(rtk->dev, + "RTKit: Unknown crashlog section: %x", + section_fourcc); + } + + offset += section_size; + } + + dev_warn(rtk->dev, + "RTKit: End of crashlog reached but no footer present"); +} diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h new file mode 100644 index 0000000000..24bd619ec5 --- /dev/null +++ b/drivers/soc/apple/rtkit-internal.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* + * Apple RTKit IPC library + * Copyright (C) The Asahi Linux Contributors + */ + +#ifndef _APPLE_RTKIT_INTERAL_H +#define _APPLE_RTKIT_INTERAL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define APPLE_RTKIT_APP_ENDPOINT_START 0x20 +#define APPLE_RTKIT_MAX_ENDPOINTS 0x100 + +struct apple_rtkit { + void *cookie; + const struct apple_rtkit_ops *ops; + struct device *dev; + + const char *mbox_name; + int mbox_idx; + struct mbox_client mbox_cl; + struct mbox_chan *mbox_chan; + + struct completion epmap_completion; + struct completion iop_pwr_ack_completion; + struct completion ap_pwr_ack_completion; + + int boot_result; + int version; + + unsigned int iop_power_state; + unsigned int ap_power_state; + bool crashed; + + DECLARE_BITMAP(endpoints, APPLE_RTKIT_MAX_ENDPOINTS); + + struct apple_rtkit_shmem ioreport_buffer; + struct apple_rtkit_shmem crashlog_buffer; + + struct apple_rtkit_shmem syslog_buffer; + char *syslog_msg_buffer; + size_t syslog_n_entries; + size_t syslog_msg_size; + + struct workqueue_struct *wq; +}; + +void apple_rtkit_crashlog_dump(struct apple_rtkit *rtk, u8 *bfr, size_t size); + +#endif diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c new file mode 100644 index 0000000000..cf1129e9f7 --- /dev/null +++ b/drivers/soc/apple/rtkit.c @@ -0,0 +1,958 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple RTKit IPC library + * Copyright (C) The Asahi Linux Contributors + */ + +#include "rtkit-internal.h" + +enum { + APPLE_RTKIT_PWR_STATE_OFF = 0x00, /* power off, cannot be restarted */ + APPLE_RTKIT_PWR_STATE_SLEEP = 0x01, /* sleeping, can be restarted */ + APPLE_RTKIT_PWR_STATE_QUIESCED = 0x10, /* running but no communication */ + APPLE_RTKIT_PWR_STATE_ON = 0x20, /* normal operating state */ +}; + +enum { + APPLE_RTKIT_EP_MGMT = 0, + APPLE_RTKIT_EP_CRASHLOG = 1, + APPLE_RTKIT_EP_SYSLOG = 2, + APPLE_RTKIT_EP_DEBUG = 3, + APPLE_RTKIT_EP_IOREPORT = 4, + APPLE_RTKIT_EP_OSLOG = 8, +}; + +#define APPLE_RTKIT_MGMT_TYPE GENMASK_ULL(59, 52) + +enum { + APPLE_RTKIT_MGMT_HELLO = 1, + APPLE_RTKIT_MGMT_HELLO_REPLY = 2, + APPLE_RTKIT_MGMT_STARTEP = 5, + APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE = 6, + APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE_ACK = 7, + APPLE_RTKIT_MGMT_EPMAP = 8, + APPLE_RTKIT_MGMT_EPMAP_REPLY = 8, + APPLE_RTKIT_MGMT_SET_AP_PWR_STATE = 0xb, + APPLE_RTKIT_MGMT_SET_AP_PWR_STATE_ACK = 0xb, +}; + +#define APPLE_RTKIT_MGMT_HELLO_MINVER GENMASK_ULL(15, 0) +#define APPLE_RTKIT_MGMT_HELLO_MAXVER GENMASK_ULL(31, 16) + +#define APPLE_RTKIT_MGMT_EPMAP_LAST BIT_ULL(51) +#define APPLE_RTKIT_MGMT_EPMAP_BASE GENMASK_ULL(34, 32) +#define APPLE_RTKIT_MGMT_EPMAP_BITMAP GENMASK_ULL(31, 0) + +#define APPLE_RTKIT_MGMT_EPMAP_REPLY_MORE BIT_ULL(0) + +#define APPLE_RTKIT_MGMT_STARTEP_EP GENMASK_ULL(39, 32) +#define APPLE_RTKIT_MGMT_STARTEP_FLAG BIT_ULL(1) + +#define APPLE_RTKIT_MGMT_PWR_STATE GENMASK_ULL(15, 0) + +#define APPLE_RTKIT_CRASHLOG_CRASH 1 + +#define APPLE_RTKIT_BUFFER_REQUEST 1 +#define APPLE_RTKIT_BUFFER_REQUEST_SIZE GENMASK_ULL(51, 44) +#define APPLE_RTKIT_BUFFER_REQUEST_IOVA GENMASK_ULL(41, 0) + +#define APPLE_RTKIT_SYSLOG_TYPE GENMASK_ULL(59, 52) + +#define APPLE_RTKIT_SYSLOG_LOG 5 + +#define APPLE_RTKIT_SYSLOG_INIT 8 +#define APPLE_RTKIT_SYSLOG_N_ENTRIES GENMASK_ULL(7, 0) +#define APPLE_RTKIT_SYSLOG_MSG_SIZE GENMASK_ULL(31, 24) + +#define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56) +#define APPLE_RTKIT_OSLOG_INIT 1 +#define APPLE_RTKIT_OSLOG_ACK 3 + +#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11 +#define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12 + +struct apple_rtkit_msg { + struct completion *completion; + struct apple_mbox_msg mbox_msg; +}; + +struct apple_rtkit_rx_work { + struct apple_rtkit *rtk; + u8 ep; + u64 msg; + struct work_struct work; +}; + +bool apple_rtkit_is_running(struct apple_rtkit *rtk) +{ + if (rtk->crashed) + return false; + if ((rtk->iop_power_state & 0xff) != APPLE_RTKIT_PWR_STATE_ON) + return false; + if ((rtk->ap_power_state & 0xff) != APPLE_RTKIT_PWR_STATE_ON) + return false; + return true; +} +EXPORT_SYMBOL_GPL(apple_rtkit_is_running); + +bool apple_rtkit_is_crashed(struct apple_rtkit *rtk) +{ + return rtk->crashed; +} +EXPORT_SYMBOL_GPL(apple_rtkit_is_crashed); + +static void apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type, + u64 msg) +{ + msg &= ~APPLE_RTKIT_MGMT_TYPE; + msg |= FIELD_PREP(APPLE_RTKIT_MGMT_TYPE, type); + apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false); +} + +static void apple_rtkit_management_rx_hello(struct apple_rtkit *rtk, u64 msg) +{ + u64 reply; + + int min_ver = FIELD_GET(APPLE_RTKIT_MGMT_HELLO_MINVER, msg); + int max_ver = FIELD_GET(APPLE_RTKIT_MGMT_HELLO_MAXVER, msg); + int want_ver = min(APPLE_RTKIT_MAX_SUPPORTED_VERSION, max_ver); + + dev_dbg(rtk->dev, "RTKit: Min ver %d, max ver %d\n", min_ver, max_ver); + + if (min_ver > APPLE_RTKIT_MAX_SUPPORTED_VERSION) { + dev_err(rtk->dev, "RTKit: Firmware min version %d is too new\n", + min_ver); + goto abort_boot; + } + + if (max_ver < APPLE_RTKIT_MIN_SUPPORTED_VERSION) { + dev_err(rtk->dev, "RTKit: Firmware max version %d is too old\n", + max_ver); + goto abort_boot; + } + + dev_info(rtk->dev, "RTKit: Initializing (protocol version %d)\n", + want_ver); + rtk->version = want_ver; + + reply = FIELD_PREP(APPLE_RTKIT_MGMT_HELLO_MINVER, want_ver); + reply |= FIELD_PREP(APPLE_RTKIT_MGMT_HELLO_MAXVER, want_ver); + apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_HELLO_REPLY, reply); + + return; + +abort_boot: + rtk->boot_result = -EINVAL; + complete_all(&rtk->epmap_completion); +} + +static void apple_rtkit_management_rx_epmap(struct apple_rtkit *rtk, u64 msg) +{ + int i, ep; + u64 reply; + unsigned long bitmap = FIELD_GET(APPLE_RTKIT_MGMT_EPMAP_BITMAP, msg); + u32 base = FIELD_GET(APPLE_RTKIT_MGMT_EPMAP_BASE, msg); + + dev_dbg(rtk->dev, + "RTKit: received endpoint bitmap 0x%lx with base 0x%x\n", + bitmap, base); + + for_each_set_bit(i, &bitmap, 32) { + ep = 32 * base + i; + dev_dbg(rtk->dev, "RTKit: Discovered endpoint 0x%02x\n", ep); + set_bit(ep, rtk->endpoints); + } + + reply = FIELD_PREP(APPLE_RTKIT_MGMT_EPMAP_BASE, base); + if (msg & APPLE_RTKIT_MGMT_EPMAP_LAST) + reply |= APPLE_RTKIT_MGMT_EPMAP_LAST; + else + reply |= APPLE_RTKIT_MGMT_EPMAP_REPLY_MORE; + + apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_EPMAP_REPLY, reply); + + if (!(msg & APPLE_RTKIT_MGMT_EPMAP_LAST)) + return; + + for_each_set_bit(ep, rtk->endpoints, APPLE_RTKIT_APP_ENDPOINT_START) { + switch (ep) { + /* the management endpoint is started by default */ + case APPLE_RTKIT_EP_MGMT: + break; + + /* without starting these RTKit refuses to boot */ + case APPLE_RTKIT_EP_SYSLOG: + case APPLE_RTKIT_EP_CRASHLOG: + case APPLE_RTKIT_EP_DEBUG: + case APPLE_RTKIT_EP_IOREPORT: + case APPLE_RTKIT_EP_OSLOG: + dev_dbg(rtk->dev, + "RTKit: Starting system endpoint 0x%02x\n", ep); + apple_rtkit_start_ep(rtk, ep); + break; + + default: + dev_warn(rtk->dev, + "RTKit: Unknown system endpoint: 0x%02x\n", + ep); + } + } + + rtk->boot_result = 0; + complete_all(&rtk->epmap_completion); +} + +static void apple_rtkit_management_rx_iop_pwr_ack(struct apple_rtkit *rtk, + u64 msg) +{ + unsigned int new_state = FIELD_GET(APPLE_RTKIT_MGMT_PWR_STATE, msg); + + dev_dbg(rtk->dev, "RTKit: IOP power state transition: 0x%x -> 0x%x\n", + rtk->iop_power_state, new_state); + rtk->iop_power_state = new_state; + + complete_all(&rtk->iop_pwr_ack_completion); +} + +static void apple_rtkit_management_rx_ap_pwr_ack(struct apple_rtkit *rtk, + u64 msg) +{ + unsigned int new_state = FIELD_GET(APPLE_RTKIT_MGMT_PWR_STATE, msg); + + dev_dbg(rtk->dev, "RTKit: AP power state transition: 0x%x -> 0x%x\n", + rtk->ap_power_state, new_state); + rtk->ap_power_state = new_state; + + complete_all(&rtk->ap_pwr_ack_completion); +} + +static void apple_rtkit_management_rx(struct apple_rtkit *rtk, u64 msg) +{ + u8 type = FIELD_GET(APPLE_RTKIT_MGMT_TYPE, msg); + + switch (type) { + case APPLE_RTKIT_MGMT_HELLO: + apple_rtkit_management_rx_hello(rtk, msg); + break; + case APPLE_RTKIT_MGMT_EPMAP: + apple_rtkit_management_rx_epmap(rtk, msg); + break; + case APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE_ACK: + apple_rtkit_management_rx_iop_pwr_ack(rtk, msg); + break; + case APPLE_RTKIT_MGMT_SET_AP_PWR_STATE_ACK: + apple_rtkit_management_rx_ap_pwr_ack(rtk, msg); + break; + default: + dev_warn( + rtk->dev, + "RTKit: unknown management message: 0x%llx (type: 0x%02x)\n", + msg, type); + } +} + +static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk, + struct apple_rtkit_shmem *buffer, + u8 ep, u64 msg) +{ + size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg); + u64 reply; + int err; + + buffer->buffer = NULL; + buffer->iomem = NULL; + buffer->is_mapped = false; + buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg); + buffer->size = n_4kpages << 12; + + dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n", + buffer->size, &buffer->iova); + + if (buffer->iova && + (!rtk->ops->shmem_setup || !rtk->ops->shmem_destroy)) { + err = -EINVAL; + goto error; + } + + if (rtk->ops->shmem_setup) { + err = rtk->ops->shmem_setup(rtk->cookie, buffer); + if (err) + goto error; + } else { + buffer->buffer = dma_alloc_coherent(rtk->dev, buffer->size, + &buffer->iova, GFP_KERNEL); + if (!buffer->buffer) { + err = -ENOMEM; + goto error; + } + } + + if (!buffer->is_mapped) { + reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE, + APPLE_RTKIT_BUFFER_REQUEST); + reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages); + reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA, + buffer->iova); + apple_rtkit_send_message(rtk, ep, reply, NULL, false); + } + + return 0; + +error: + buffer->buffer = NULL; + buffer->iomem = NULL; + buffer->iova = 0; + buffer->size = 0; + buffer->is_mapped = false; + return err; +} + +static void apple_rtkit_free_buffer(struct apple_rtkit *rtk, + struct apple_rtkit_shmem *bfr) +{ + if (bfr->size == 0) + return; + + if (rtk->ops->shmem_destroy) + rtk->ops->shmem_destroy(rtk->cookie, bfr); + else if (bfr->buffer) + dma_free_coherent(rtk->dev, bfr->size, bfr->buffer, bfr->iova); + + bfr->buffer = NULL; + bfr->iomem = NULL; + bfr->iova = 0; + bfr->size = 0; + bfr->is_mapped = false; +} + +static void apple_rtkit_memcpy(struct apple_rtkit *rtk, void *dst, + struct apple_rtkit_shmem *bfr, size_t offset, + size_t len) +{ + if (bfr->iomem) + memcpy_fromio(dst, bfr->iomem + offset, len); + else + memcpy(dst, bfr->buffer + offset, len); +} + +static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg) +{ + u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg); + u8 *bfr; + + if (type != APPLE_RTKIT_CRASHLOG_CRASH) { + dev_warn(rtk->dev, "RTKit: Unknown crashlog message: %llx\n", + msg); + return; + } + + if (!rtk->crashlog_buffer.size) { + apple_rtkit_common_rx_get_buffer(rtk, &rtk->crashlog_buffer, + APPLE_RTKIT_EP_CRASHLOG, msg); + return; + } + + dev_err(rtk->dev, "RTKit: co-processor has crashed\n"); + + /* + * create a shadow copy here to make sure the co-processor isn't able + * to change the log while we're dumping it. this also ensures + * the buffer is in normal memory and not iomem for e.g. the SMC + */ + bfr = kzalloc(rtk->crashlog_buffer.size, GFP_KERNEL); + if (bfr) { + apple_rtkit_memcpy(rtk, bfr, &rtk->crashlog_buffer, 0, + rtk->crashlog_buffer.size); + apple_rtkit_crashlog_dump(rtk, bfr, rtk->crashlog_buffer.size); + kfree(bfr); + } else { + dev_err(rtk->dev, + "RTKit: Couldn't allocate crashlog shadow buffer\n"); + } + + rtk->crashed = true; + if (rtk->ops->crashed) + rtk->ops->crashed(rtk->cookie); +} + +static void apple_rtkit_ioreport_rx(struct apple_rtkit *rtk, u64 msg) +{ + u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg); + + switch (type) { + case APPLE_RTKIT_BUFFER_REQUEST: + apple_rtkit_common_rx_get_buffer(rtk, &rtk->ioreport_buffer, + APPLE_RTKIT_EP_IOREPORT, msg); + break; + /* unknown, must be ACKed or the co-processor will hang */ + case 0x8: + case 0xc: + apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_IOREPORT, msg, + NULL, false); + break; + default: + dev_warn(rtk->dev, "RTKit: Unknown ioreport message: %llx\n", + msg); + } +} + +static void apple_rtkit_syslog_rx_init(struct apple_rtkit *rtk, u64 msg) +{ + rtk->syslog_n_entries = FIELD_GET(APPLE_RTKIT_SYSLOG_N_ENTRIES, msg); + rtk->syslog_msg_size = FIELD_GET(APPLE_RTKIT_SYSLOG_MSG_SIZE, msg); + + rtk->syslog_msg_buffer = kzalloc(rtk->syslog_msg_size, GFP_KERNEL); + + dev_dbg(rtk->dev, + "RTKit: syslog initialized: entries: %zd, msg_size: %zd\n", + rtk->syslog_n_entries, rtk->syslog_msg_size); +} + +static void apple_rtkit_syslog_rx_log(struct apple_rtkit *rtk, u64 msg) +{ + u8 idx = msg & 0xff; + char log_context[24]; + size_t entry_size = 0x20 + rtk->syslog_msg_size; + + if (!rtk->syslog_msg_buffer) { + dev_warn( + rtk->dev, + "RTKit: received syslog message but no syslog_msg_buffer\n"); + goto done; + } + if (!rtk->syslog_buffer.size) { + dev_warn( + rtk->dev, + "RTKit: received syslog message but syslog_buffer.size is zero\n"); + goto done; + } + if (!rtk->syslog_buffer.buffer && !rtk->syslog_buffer.iomem) { + dev_warn( + rtk->dev, + "RTKit: received syslog message but no syslog_buffer.buffer or syslog_buffer.iomem\n"); + goto done; + } + if (idx > rtk->syslog_n_entries) { + dev_warn(rtk->dev, "RTKit: syslog index %d out of range\n", + idx); + goto done; + } + + apple_rtkit_memcpy(rtk, log_context, &rtk->syslog_buffer, + idx * entry_size + 8, sizeof(log_context)); + apple_rtkit_memcpy(rtk, rtk->syslog_msg_buffer, &rtk->syslog_buffer, + idx * entry_size + 8 + sizeof(log_context), + rtk->syslog_msg_size); + + log_context[sizeof(log_context) - 1] = 0; + rtk->syslog_msg_buffer[rtk->syslog_msg_size - 1] = 0; + dev_info(rtk->dev, "RTKit: syslog message: %s: %s\n", log_context, + rtk->syslog_msg_buffer); + +done: + apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_SYSLOG, msg, NULL, false); +} + +static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg) +{ + u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg); + + switch (type) { + case APPLE_RTKIT_BUFFER_REQUEST: + apple_rtkit_common_rx_get_buffer(rtk, &rtk->syslog_buffer, + APPLE_RTKIT_EP_SYSLOG, msg); + break; + case APPLE_RTKIT_SYSLOG_INIT: + apple_rtkit_syslog_rx_init(rtk, msg); + break; + case APPLE_RTKIT_SYSLOG_LOG: + apple_rtkit_syslog_rx_log(rtk, msg); + break; + default: + dev_warn(rtk->dev, "RTKit: Unknown syslog message: %llx\n", + msg); + } +} + +static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg) +{ + u64 ack; + + dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg); + ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK); + apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false); +} + +static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg) +{ + u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg); + + switch (type) { + case APPLE_RTKIT_OSLOG_INIT: + apple_rtkit_oslog_rx_init(rtk, msg); + break; + default: + dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg); + } +} + +static void apple_rtkit_rx_work(struct work_struct *work) +{ + struct apple_rtkit_rx_work *rtk_work = + container_of(work, struct apple_rtkit_rx_work, work); + struct apple_rtkit *rtk = rtk_work->rtk; + + switch (rtk_work->ep) { + case APPLE_RTKIT_EP_MGMT: + apple_rtkit_management_rx(rtk, rtk_work->msg); + break; + case APPLE_RTKIT_EP_CRASHLOG: + apple_rtkit_crashlog_rx(rtk, rtk_work->msg); + break; + case APPLE_RTKIT_EP_SYSLOG: + apple_rtkit_syslog_rx(rtk, rtk_work->msg); + break; + case APPLE_RTKIT_EP_IOREPORT: + apple_rtkit_ioreport_rx(rtk, rtk_work->msg); + break; + case APPLE_RTKIT_EP_OSLOG: + apple_rtkit_oslog_rx(rtk, rtk_work->msg); + break; + case APPLE_RTKIT_APP_ENDPOINT_START ... 0xff: + if (rtk->ops->recv_message) + rtk->ops->recv_message(rtk->cookie, rtk_work->ep, + rtk_work->msg); + else + dev_warn( + rtk->dev, + "Received unexpected message to EP%02d: %llx\n", + rtk_work->ep, rtk_work->msg); + break; + default: + dev_warn(rtk->dev, + "RTKit: message to unknown endpoint %02x: %llx\n", + rtk_work->ep, rtk_work->msg); + } + + kfree(rtk_work); +} + +static void apple_rtkit_rx(struct mbox_client *cl, void *mssg) +{ + struct apple_rtkit *rtk = container_of(cl, struct apple_rtkit, mbox_cl); + struct apple_mbox_msg *msg = mssg; + struct apple_rtkit_rx_work *work; + u8 ep = msg->msg1; + + /* + * The message was read from a MMIO FIFO and we have to make + * sure all reads from buffers sent with that message happen + * afterwards. + */ + dma_rmb(); + + if (!test_bit(ep, rtk->endpoints)) + dev_warn(rtk->dev, + "RTKit: Message to undiscovered endpoint 0x%02x\n", + ep); + + if (ep >= APPLE_RTKIT_APP_ENDPOINT_START && + rtk->ops->recv_message_early && + rtk->ops->recv_message_early(rtk->cookie, ep, msg->msg0)) + return; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; + + work->rtk = rtk; + work->ep = ep; + work->msg = msg->msg0; + INIT_WORK(&work->work, apple_rtkit_rx_work); + queue_work(rtk->wq, &work->work); +} + +static void apple_rtkit_tx_done(struct mbox_client *cl, void *mssg, int r) +{ + struct apple_rtkit_msg *msg = + container_of(mssg, struct apple_rtkit_msg, mbox_msg); + + if (r == -ETIME) + return; + + if (msg->completion) + complete(msg->completion); + kfree(msg); +} + +int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message, + struct completion *completion, bool atomic) +{ + struct apple_rtkit_msg *msg; + int ret; + gfp_t flags; + + if (rtk->crashed) + return -EINVAL; + if (ep >= APPLE_RTKIT_APP_ENDPOINT_START && + !apple_rtkit_is_running(rtk)) + return -EINVAL; + + if (atomic) + flags = GFP_ATOMIC; + else + flags = GFP_KERNEL; + + msg = kzalloc(sizeof(*msg), flags); + if (!msg) + return -ENOMEM; + + msg->mbox_msg.msg0 = message; + msg->mbox_msg.msg1 = ep; + msg->completion = completion; + + /* + * The message will be sent with a MMIO write. We need the barrier + * here to ensure any previous writes to buffers are visible to the + * device before that MMIO write happens. + */ + dma_wmb(); + + ret = mbox_send_message(rtk->mbox_chan, &msg->mbox_msg); + if (ret < 0) { + kfree(msg); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(apple_rtkit_send_message); + +int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message, + unsigned long timeout, bool atomic) +{ + DECLARE_COMPLETION_ONSTACK(completion); + int ret; + long t; + + ret = apple_rtkit_send_message(rtk, ep, message, &completion, atomic); + if (ret < 0) + return ret; + + if (atomic) { + ret = mbox_flush(rtk->mbox_chan, timeout); + if (ret < 0) + return ret; + + if (try_wait_for_completion(&completion)) + return 0; + + return -ETIME; + } else { + t = wait_for_completion_interruptible_timeout( + &completion, msecs_to_jiffies(timeout)); + if (t < 0) + return t; + else if (t == 0) + return -ETIME; + return 0; + } +} +EXPORT_SYMBOL_GPL(apple_rtkit_send_message_wait); + +int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint) +{ + u64 msg; + + if (!test_bit(endpoint, rtk->endpoints)) + return -EINVAL; + if (endpoint >= APPLE_RTKIT_APP_ENDPOINT_START && + !apple_rtkit_is_running(rtk)) + return -EINVAL; + + msg = FIELD_PREP(APPLE_RTKIT_MGMT_STARTEP_EP, endpoint); + msg |= APPLE_RTKIT_MGMT_STARTEP_FLAG; + apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_STARTEP, msg); + + return 0; +} +EXPORT_SYMBOL_GPL(apple_rtkit_start_ep); + +static int apple_rtkit_request_mbox_chan(struct apple_rtkit *rtk) +{ + if (rtk->mbox_name) + rtk->mbox_chan = mbox_request_channel_byname(&rtk->mbox_cl, + rtk->mbox_name); + else + rtk->mbox_chan = + mbox_request_channel(&rtk->mbox_cl, rtk->mbox_idx); + + if (IS_ERR(rtk->mbox_chan)) + return PTR_ERR(rtk->mbox_chan); + return 0; +} + +static struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie, + const char *mbox_name, int mbox_idx, + const struct apple_rtkit_ops *ops) +{ + struct apple_rtkit *rtk; + int ret; + + if (!ops) + return ERR_PTR(-EINVAL); + + rtk = kzalloc(sizeof(*rtk), GFP_KERNEL); + if (!rtk) + return ERR_PTR(-ENOMEM); + + rtk->dev = dev; + rtk->cookie = cookie; + rtk->ops = ops; + + init_completion(&rtk->epmap_completion); + init_completion(&rtk->iop_pwr_ack_completion); + init_completion(&rtk->ap_pwr_ack_completion); + + bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS); + set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints); + + rtk->mbox_name = mbox_name; + rtk->mbox_idx = mbox_idx; + rtk->mbox_cl.dev = dev; + rtk->mbox_cl.tx_block = false; + rtk->mbox_cl.knows_txdone = false; + rtk->mbox_cl.rx_callback = &apple_rtkit_rx; + rtk->mbox_cl.tx_done = &apple_rtkit_tx_done; + + rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM, + dev_name(rtk->dev)); + if (!rtk->wq) { + ret = -ENOMEM; + goto free_rtk; + } + + ret = apple_rtkit_request_mbox_chan(rtk); + if (ret) + goto destroy_wq; + + return rtk; + +destroy_wq: + destroy_workqueue(rtk->wq); +free_rtk: + kfree(rtk); + return ERR_PTR(ret); +} + +static int apple_rtkit_wait_for_completion(struct completion *c) +{ + long t; + + t = wait_for_completion_interruptible_timeout(c, + msecs_to_jiffies(1000)); + if (t < 0) + return t; + else if (t == 0) + return -ETIME; + else + return 0; +} + +int apple_rtkit_reinit(struct apple_rtkit *rtk) +{ + /* make sure we don't handle any messages while reinitializing */ + mbox_free_channel(rtk->mbox_chan); + flush_workqueue(rtk->wq); + + apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer); + apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer); + apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer); + + kfree(rtk->syslog_msg_buffer); + + rtk->syslog_msg_buffer = NULL; + rtk->syslog_n_entries = 0; + rtk->syslog_msg_size = 0; + + bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS); + set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints); + + reinit_completion(&rtk->epmap_completion); + reinit_completion(&rtk->iop_pwr_ack_completion); + reinit_completion(&rtk->ap_pwr_ack_completion); + + rtk->crashed = false; + rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_OFF; + rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_OFF; + + return apple_rtkit_request_mbox_chan(rtk); +} +EXPORT_SYMBOL_GPL(apple_rtkit_reinit); + +static int apple_rtkit_set_ap_power_state(struct apple_rtkit *rtk, + unsigned int state) +{ + u64 msg; + int ret; + + reinit_completion(&rtk->ap_pwr_ack_completion); + + msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state); + apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE, + msg); + + ret = apple_rtkit_wait_for_completion(&rtk->ap_pwr_ack_completion); + if (ret) + return ret; + + if (rtk->ap_power_state != state) + return -EINVAL; + return 0; +} + +static int apple_rtkit_set_iop_power_state(struct apple_rtkit *rtk, + unsigned int state) +{ + u64 msg; + int ret; + + reinit_completion(&rtk->iop_pwr_ack_completion); + + msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state); + apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE, + msg); + + ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion); + if (ret) + return ret; + + if (rtk->iop_power_state != state) + return -EINVAL; + return 0; +} + +int apple_rtkit_boot(struct apple_rtkit *rtk) +{ + int ret; + + if (apple_rtkit_is_running(rtk)) + return 0; + if (rtk->crashed) + return -EINVAL; + + dev_dbg(rtk->dev, "RTKit: waiting for boot to finish\n"); + ret = apple_rtkit_wait_for_completion(&rtk->epmap_completion); + if (ret) + return ret; + if (rtk->boot_result) + return rtk->boot_result; + + dev_dbg(rtk->dev, "RTKit: waiting for IOP power state ACK\n"); + ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion); + if (ret) + return ret; + + return apple_rtkit_set_ap_power_state(rtk, APPLE_RTKIT_PWR_STATE_ON); +} +EXPORT_SYMBOL_GPL(apple_rtkit_boot); + +int apple_rtkit_shutdown(struct apple_rtkit *rtk) +{ + int ret; + + /* if OFF is used here the co-processor will not wake up again */ + ret = apple_rtkit_set_ap_power_state(rtk, + APPLE_RTKIT_PWR_STATE_QUIESCED); + if (ret) + return ret; + + ret = apple_rtkit_set_iop_power_state(rtk, APPLE_RTKIT_PWR_STATE_SLEEP); + if (ret) + return ret; + + return apple_rtkit_reinit(rtk); +} +EXPORT_SYMBOL_GPL(apple_rtkit_shutdown); + +int apple_rtkit_quiesce(struct apple_rtkit *rtk) +{ + int ret; + + ret = apple_rtkit_set_ap_power_state(rtk, + APPLE_RTKIT_PWR_STATE_QUIESCED); + if (ret) + return ret; + + ret = apple_rtkit_set_iop_power_state(rtk, + APPLE_RTKIT_PWR_STATE_QUIESCED); + if (ret) + return ret; + + ret = apple_rtkit_reinit(rtk); + if (ret) + return ret; + + rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_QUIESCED; + rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_QUIESCED; + return 0; +} +EXPORT_SYMBOL_GPL(apple_rtkit_quiesce); + +int apple_rtkit_wake(struct apple_rtkit *rtk) +{ + u64 msg; + + if (apple_rtkit_is_running(rtk)) + return -EINVAL; + + reinit_completion(&rtk->iop_pwr_ack_completion); + + /* + * Use open-coded apple_rtkit_set_iop_power_state since apple_rtkit_boot + * will wait for the completion anyway. + */ + msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_ON); + apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE, + msg); + + return apple_rtkit_boot(rtk); +} +EXPORT_SYMBOL_GPL(apple_rtkit_wake); + +static void apple_rtkit_free(struct apple_rtkit *rtk) +{ + mbox_free_channel(rtk->mbox_chan); + destroy_workqueue(rtk->wq); + + apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer); + apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer); + apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer); + + kfree(rtk->syslog_msg_buffer); + kfree(rtk); +} + +struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie, + const char *mbox_name, int mbox_idx, + const struct apple_rtkit_ops *ops) +{ + struct apple_rtkit *rtk; + int ret; + + rtk = apple_rtkit_init(dev, cookie, mbox_name, mbox_idx, ops); + if (IS_ERR(rtk)) + return rtk; + + ret = devm_add_action_or_reset(dev, (void (*)(void *))apple_rtkit_free, + rtk); + if (ret) + return ERR_PTR(ret); + + return rtk; +} +EXPORT_SYMBOL_GPL(devm_apple_rtkit_init); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_AUTHOR("Sven Peter "); +MODULE_DESCRIPTION("Apple RTKit driver"); diff --git a/drivers/soc/apple/sart.c b/drivers/soc/apple/sart.c new file mode 100644 index 0000000000..83804b16ad --- /dev/null +++ b/drivers/soc/apple/sart.c @@ -0,0 +1,328 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SART device driver + * Copyright (C) The Asahi Linux Contributors + * + * Apple SART is a simple address filter for some DMA transactions. + * Regions of physical memory must be added to the SART's allow + * list before any DMA can target these. Unlike a proper + * IOMMU no remapping can be done and special support in the + * consumer driver is required since not all DMA transactions of + * a single device are subject to SART filtering. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define APPLE_SART_MAX_ENTRIES 16 + +/* This is probably a bitfield but the exact meaning of each bit is unknown. */ +#define APPLE_SART_FLAGS_ALLOW 0xff + +/* SARTv2 registers */ +#define APPLE_SART2_CONFIG(idx) (0x00 + 4 * (idx)) +#define APPLE_SART2_CONFIG_FLAGS GENMASK(31, 24) +#define APPLE_SART2_CONFIG_SIZE GENMASK(23, 0) +#define APPLE_SART2_CONFIG_SIZE_SHIFT 12 +#define APPLE_SART2_CONFIG_SIZE_MAX GENMASK(23, 0) + +#define APPLE_SART2_PADDR(idx) (0x40 + 4 * (idx)) +#define APPLE_SART2_PADDR_SHIFT 12 + +/* SARTv3 registers */ +#define APPLE_SART3_CONFIG(idx) (0x00 + 4 * (idx)) + +#define APPLE_SART3_PADDR(idx) (0x40 + 4 * (idx)) +#define APPLE_SART3_PADDR_SHIFT 12 + +#define APPLE_SART3_SIZE(idx) (0x80 + 4 * (idx)) +#define APPLE_SART3_SIZE_SHIFT 12 +#define APPLE_SART3_SIZE_MAX GENMASK(29, 0) + +struct apple_sart_ops { + void (*get_entry)(struct apple_sart *sart, int index, u8 *flags, + phys_addr_t *paddr, size_t *size); + void (*set_entry)(struct apple_sart *sart, int index, u8 flags, + phys_addr_t paddr_shifted, size_t size_shifted); + unsigned int size_shift; + unsigned int paddr_shift; + size_t size_max; +}; + +struct apple_sart { + struct device *dev; + void __iomem *regs; + + const struct apple_sart_ops *ops; + + unsigned long protected_entries; + unsigned long used_entries; +}; + +static void sart2_get_entry(struct apple_sart *sart, int index, u8 *flags, + phys_addr_t *paddr, size_t *size) +{ + u32 cfg = readl(sart->regs + APPLE_SART2_CONFIG(index)); + phys_addr_t paddr_ = readl(sart->regs + APPLE_SART2_PADDR(index)); + size_t size_ = FIELD_GET(APPLE_SART2_CONFIG_SIZE, cfg); + + *flags = FIELD_GET(APPLE_SART2_CONFIG_FLAGS, cfg); + *size = size_ << APPLE_SART2_CONFIG_SIZE_SHIFT; + *paddr = paddr_ << APPLE_SART2_PADDR_SHIFT; +} + +static void sart2_set_entry(struct apple_sart *sart, int index, u8 flags, + phys_addr_t paddr_shifted, size_t size_shifted) +{ + u32 cfg; + + cfg = FIELD_PREP(APPLE_SART2_CONFIG_FLAGS, flags); + cfg |= FIELD_PREP(APPLE_SART2_CONFIG_SIZE, size_shifted); + + writel(paddr_shifted, sart->regs + APPLE_SART2_PADDR(index)); + writel(cfg, sart->regs + APPLE_SART2_CONFIG(index)); +} + +static struct apple_sart_ops sart_ops_v2 = { + .get_entry = sart2_get_entry, + .set_entry = sart2_set_entry, + .size_shift = APPLE_SART2_CONFIG_SIZE_SHIFT, + .paddr_shift = APPLE_SART2_PADDR_SHIFT, + .size_max = APPLE_SART2_CONFIG_SIZE_MAX, +}; + +static void sart3_get_entry(struct apple_sart *sart, int index, u8 *flags, + phys_addr_t *paddr, size_t *size) +{ + phys_addr_t paddr_ = readl(sart->regs + APPLE_SART3_PADDR(index)); + size_t size_ = readl(sart->regs + APPLE_SART3_SIZE(index)); + + *flags = readl(sart->regs + APPLE_SART3_CONFIG(index)); + *size = size_ << APPLE_SART3_SIZE_SHIFT; + *paddr = paddr_ << APPLE_SART3_PADDR_SHIFT; +} + +static void sart3_set_entry(struct apple_sart *sart, int index, u8 flags, + phys_addr_t paddr_shifted, size_t size_shifted) +{ + writel(paddr_shifted, sart->regs + APPLE_SART3_PADDR(index)); + writel(size_shifted, sart->regs + APPLE_SART3_SIZE(index)); + writel(flags, sart->regs + APPLE_SART3_CONFIG(index)); +} + +static struct apple_sart_ops sart_ops_v3 = { + .get_entry = sart3_get_entry, + .set_entry = sart3_set_entry, + .size_shift = APPLE_SART3_SIZE_SHIFT, + .paddr_shift = APPLE_SART3_PADDR_SHIFT, + .size_max = APPLE_SART3_SIZE_MAX, +}; + +static int apple_sart_probe(struct platform_device *pdev) +{ + int i; + struct apple_sart *sart; + struct device *dev = &pdev->dev; + + sart = devm_kzalloc(dev, sizeof(*sart), GFP_KERNEL); + if (!sart) + return -ENOMEM; + + sart->dev = dev; + sart->ops = of_device_get_match_data(dev); + + sart->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(sart->regs)) + return PTR_ERR(sart->regs); + + for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) { + u8 flags; + size_t size; + phys_addr_t paddr; + + sart->ops->get_entry(sart, i, &flags, &paddr, &size); + + if (!flags) + continue; + + dev_dbg(sart->dev, + "SART bootloader entry: index %02d; flags: 0x%02x; paddr: %pa; size: 0x%zx\n", + i, flags, &paddr, size); + set_bit(i, &sart->protected_entries); + } + + platform_set_drvdata(pdev, sart); + return 0; +} + +struct apple_sart *devm_apple_sart_get(struct device *dev) +{ + struct device_node *sart_node; + struct platform_device *sart_pdev; + struct apple_sart *sart; + int ret; + + sart_node = of_parse_phandle(dev->of_node, "apple,sart", 0); + if (!sart_node) + return ERR_PTR(-ENODEV); + + sart_pdev = of_find_device_by_node(sart_node); + of_node_put(sart_node); + + if (!sart_pdev) + return ERR_PTR(-ENODEV); + + sart = dev_get_drvdata(&sart_pdev->dev); + if (!sart) { + put_device(&sart_pdev->dev); + return ERR_PTR(-EPROBE_DEFER); + } + + ret = devm_add_action_or_reset(dev, (void (*)(void *))put_device, + &sart_pdev->dev); + if (ret) + return ERR_PTR(ret); + + device_link_add(dev, &sart_pdev->dev, + DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER); + + return sart; +} +EXPORT_SYMBOL_GPL(devm_apple_sart_get); + +static int sart_set_entry(struct apple_sart *sart, int index, u8 flags, + phys_addr_t paddr, size_t size) +{ + if (size & ((1 << sart->ops->size_shift) - 1)) + return -EINVAL; + if (paddr & ((1 << sart->ops->paddr_shift) - 1)) + return -EINVAL; + + paddr >>= sart->ops->size_shift; + size >>= sart->ops->paddr_shift; + + if (size > sart->ops->size_max) + return -EINVAL; + + sart->ops->set_entry(sart, index, flags, paddr, size); + return 0; +} + +int apple_sart_add_allowed_region(struct apple_sart *sart, phys_addr_t paddr, + size_t size) +{ + int i, ret; + + for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) { + if (test_bit(i, &sart->protected_entries)) + continue; + if (test_and_set_bit(i, &sart->used_entries)) + continue; + + ret = sart_set_entry(sart, i, APPLE_SART_FLAGS_ALLOW, paddr, + size); + if (ret) { + dev_dbg(sart->dev, + "unable to set entry %d to [%pa, 0x%zx]\n", + i, &paddr, size); + clear_bit(i, &sart->used_entries); + return ret; + } + + dev_dbg(sart->dev, "wrote [%pa, 0x%zx] to %d\n", &paddr, size, + i); + return 0; + } + + dev_warn(sart->dev, + "no free entries left to add [paddr: 0x%pa, size: 0x%zx]\n", + &paddr, size); + + return -EBUSY; +} +EXPORT_SYMBOL_GPL(apple_sart_add_allowed_region); + +int apple_sart_remove_allowed_region(struct apple_sart *sart, phys_addr_t paddr, + size_t size) +{ + int i; + + dev_dbg(sart->dev, + "will remove [paddr: %pa, size: 0x%zx] from allowed regions\n", + &paddr, size); + + for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) { + u8 eflags; + size_t esize; + phys_addr_t epaddr; + + if (test_bit(i, &sart->protected_entries)) + continue; + + sart->ops->get_entry(sart, i, &eflags, &epaddr, &esize); + + if (epaddr != paddr || esize != size) + continue; + + sart->ops->set_entry(sart, i, 0, 0, 0); + + clear_bit(i, &sart->used_entries); + dev_dbg(sart->dev, "cleared entry %d\n", i); + return 0; + } + + dev_warn(sart->dev, "entry [paddr: 0x%pa, size: 0x%zx] not found\n", + &paddr, size); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(apple_sart_remove_allowed_region); + +static void apple_sart_shutdown(struct platform_device *pdev) +{ + struct apple_sart *sart = dev_get_drvdata(&pdev->dev); + int i; + + for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) { + if (test_bit(i, &sart->protected_entries)) + continue; + + sart->ops->set_entry(sart, i, 0, 0, 0); + } +} + +static const struct of_device_id apple_sart_of_match[] = { + { + .compatible = "apple,t6000-sart", + .data = &sart_ops_v3, + }, + { + .compatible = "apple,t8103-sart", + .data = &sart_ops_v2, + }, + {} +}; +MODULE_DEVICE_TABLE(of, apple_sart_of_match); + +static struct platform_driver apple_sart_driver = { + .driver = { + .name = "apple-sart", + .of_match_table = apple_sart_of_match, + }, + .probe = apple_sart_probe, + .shutdown = apple_sart_shutdown, +}; +module_platform_driver(apple_sart_driver); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_AUTHOR("Sven Peter "); +MODULE_DESCRIPTION("Apple SART driver"); diff --git a/drivers/soc/fujitsu/Kconfig b/drivers/soc/fujitsu/Kconfig new file mode 100644 index 0000000000..987731e806 --- /dev/null +++ b/drivers/soc/fujitsu/Kconfig @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "fujitsu SoC drivers" + +config A64FX_DIAG + bool "A64FX diag driver" + depends on ARM64 + depends on ACPI + help + Say Y here if you want to enable diag interrupt on Fujitsu A64FX. + This driver enables BMC's diagnostic requests and enables + A64FX-specific interrupts. This allows administrators to obtain + kernel dumps via diagnostic requests using ipmitool, etc. + + If unsure, say N. + +endmenu diff --git a/drivers/soc/fujitsu/Makefile b/drivers/soc/fujitsu/Makefile new file mode 100644 index 0000000000..945bc1c14a --- /dev/null +++ b/drivers/soc/fujitsu/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_A64FX_DIAG) += a64fx-diag.o diff --git a/drivers/soc/fujitsu/a64fx-diag.c b/drivers/soc/fujitsu/a64fx-diag.c new file mode 100644 index 0000000000..d87f348427 --- /dev/null +++ b/drivers/soc/fujitsu/a64fx-diag.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * A64FX diag driver. + * Copyright (c) 2022 Fujitsu Ltd. + */ + +#include +#include +#include +#include +#include + +#define A64FX_DIAG_IRQ 1 +#define BMC_DIAG_INTERRUPT_ENABLE 0x40 +#define BMC_DIAG_INTERRUPT_STATUS 0x44 +#define BMC_DIAG_INTERRUPT_MASK BIT(31) + +struct a64fx_diag_priv { + void __iomem *mmsc_reg_base; + int irq; + bool has_nmi; +}; + +static irqreturn_t a64fx_diag_handler_nmi(int irq, void *dev_id) +{ + nmi_panic(NULL, "a64fx_diag: interrupt received\n"); + + return IRQ_HANDLED; +} + +static irqreturn_t a64fx_diag_handler_irq(int irq, void *dev_id) +{ + panic("a64fx_diag: interrupt received\n"); + + return IRQ_HANDLED; +} + +static void a64fx_diag_interrupt_clear(struct a64fx_diag_priv *priv) +{ + void __iomem *diag_status_reg_addr; + u32 mmsc; + + diag_status_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_STATUS; + mmsc = readl(diag_status_reg_addr); + if (mmsc & BMC_DIAG_INTERRUPT_MASK) + writel(BMC_DIAG_INTERRUPT_MASK, diag_status_reg_addr); +} + +static void a64fx_diag_interrupt_enable(struct a64fx_diag_priv *priv) +{ + void __iomem *diag_enable_reg_addr; + u32 mmsc; + + diag_enable_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_ENABLE; + mmsc = readl(diag_enable_reg_addr); + if (!(mmsc & BMC_DIAG_INTERRUPT_MASK)) { + mmsc |= BMC_DIAG_INTERRUPT_MASK; + writel(mmsc, diag_enable_reg_addr); + } +} + +static void a64fx_diag_interrupt_disable(struct a64fx_diag_priv *priv) +{ + void __iomem *diag_enable_reg_addr; + u32 mmsc; + + diag_enable_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_ENABLE; + mmsc = readl(diag_enable_reg_addr); + if (mmsc & BMC_DIAG_INTERRUPT_MASK) { + mmsc &= ~BMC_DIAG_INTERRUPT_MASK; + writel(mmsc, diag_enable_reg_addr); + } +} + +static int a64fx_diag_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct a64fx_diag_priv *priv; + unsigned long irq_flags; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; + + priv->mmsc_reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->mmsc_reg_base)) + return PTR_ERR(priv->mmsc_reg_base); + + priv->irq = platform_get_irq(pdev, A64FX_DIAG_IRQ); + if (priv->irq < 0) + return priv->irq; + + platform_set_drvdata(pdev, priv); + + irq_flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_AUTOEN | + IRQF_NO_THREAD; + ret = request_nmi(priv->irq, &a64fx_diag_handler_nmi, irq_flags, + "a64fx_diag_nmi", NULL); + if (ret) { + ret = request_irq(priv->irq, &a64fx_diag_handler_irq, + irq_flags, "a64fx_diag_irq", NULL); + if (ret) { + dev_err(dev, "cannot register IRQ %d\n", ret); + return ret; + } + enable_irq(priv->irq); + } else { + enable_nmi(priv->irq); + priv->has_nmi = true; + } + + a64fx_diag_interrupt_clear(priv); + a64fx_diag_interrupt_enable(priv); + + return 0; +} + +static int a64fx_diag_remove(struct platform_device *pdev) +{ + struct a64fx_diag_priv *priv = platform_get_drvdata(pdev); + + a64fx_diag_interrupt_disable(priv); + a64fx_diag_interrupt_clear(priv); + + if (priv->has_nmi) + free_nmi(priv->irq, NULL); + else + free_irq(priv->irq, NULL); + + return 0; +} + +static const struct acpi_device_id a64fx_diag_acpi_match[] = { + { "FUJI2007", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, a64fx_diag_acpi_match); + + +static struct platform_driver a64fx_diag_driver = { + .driver = { + .name = "a64fx_diag_driver", + .acpi_match_table = ACPI_PTR(a64fx_diag_acpi_match), + }, + .probe = a64fx_diag_probe, + .remove = a64fx_diag_remove, +}; + +module_platform_driver(a64fx_diag_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Hitomi Hasegawa "); +MODULE_DESCRIPTION("A64FX diag driver"); diff --git a/drivers/soc/imx/imx8mp-blk-ctrl.c b/drivers/soc/imx/imx8mp-blk-ctrl.c new file mode 100644 index 0000000000..4ca2ede687 --- /dev/null +++ b/drivers/soc/imx/imx8mp-blk-ctrl.c @@ -0,0 +1,696 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Copyright 2022 Pengutronix, Lucas Stach + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define GPR_REG0 0x0 +#define PCIE_CLOCK_MODULE_EN BIT(0) +#define USB_CLOCK_MODULE_EN BIT(1) + +struct imx8mp_blk_ctrl_domain; + +struct imx8mp_blk_ctrl { + struct device *dev; + struct notifier_block power_nb; + struct device *bus_power_dev; + struct regmap *regmap; + struct imx8mp_blk_ctrl_domain *domains; + struct genpd_onecell_data onecell_data; + void (*power_off) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain); + void (*power_on) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain); +}; + +struct imx8mp_blk_ctrl_domain_data { + const char *name; + const char * const *clk_names; + int num_clks; + const char *gpc_name; +}; + +#define DOMAIN_MAX_CLKS 2 + +struct imx8mp_blk_ctrl_domain { + struct generic_pm_domain genpd; + const struct imx8mp_blk_ctrl_domain_data *data; + struct clk_bulk_data clks[DOMAIN_MAX_CLKS]; + struct device *power_dev; + struct imx8mp_blk_ctrl *bc; + int id; +}; + +struct imx8mp_blk_ctrl_data { + int max_reg; + notifier_fn_t power_notifier_fn; + void (*power_off) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain); + void (*power_on) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain); + const struct imx8mp_blk_ctrl_domain_data *domains; + int num_domains; +}; + +static inline struct imx8mp_blk_ctrl_domain * +to_imx8mp_blk_ctrl_domain(struct generic_pm_domain *genpd) +{ + return container_of(genpd, struct imx8mp_blk_ctrl_domain, genpd); +} + +static void imx8mp_hsio_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc, + struct imx8mp_blk_ctrl_domain *domain) +{ + switch (domain->id) { + case IMX8MP_HSIOBLK_PD_USB: + regmap_set_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN); + break; + case IMX8MP_HSIOBLK_PD_PCIE: + regmap_set_bits(bc->regmap, GPR_REG0, PCIE_CLOCK_MODULE_EN); + break; + default: + break; + } +} + +static void imx8mp_hsio_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc, + struct imx8mp_blk_ctrl_domain *domain) +{ + switch (domain->id) { + case IMX8MP_HSIOBLK_PD_USB: + regmap_clear_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN); + break; + case IMX8MP_HSIOBLK_PD_PCIE: + regmap_clear_bits(bc->regmap, GPR_REG0, PCIE_CLOCK_MODULE_EN); + break; + default: + break; + } +} + +static int imx8mp_hsio_power_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct imx8mp_blk_ctrl *bc = container_of(nb, struct imx8mp_blk_ctrl, + power_nb); + struct clk_bulk_data *usb_clk = bc->domains[IMX8MP_HSIOBLK_PD_USB].clks; + int num_clks = bc->domains[IMX8MP_HSIOBLK_PD_USB].data->num_clks; + int ret; + + switch (action) { + case GENPD_NOTIFY_ON: + /* + * enable USB clock for a moment for the power-on ADB handshake + * to proceed + */ + ret = clk_bulk_prepare_enable(num_clks, usb_clk); + if (ret) + return NOTIFY_BAD; + regmap_set_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN); + + udelay(5); + + regmap_clear_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN); + clk_bulk_disable_unprepare(num_clks, usb_clk); + break; + case GENPD_NOTIFY_PRE_OFF: + /* enable USB clock for the power-down ADB handshake to work */ + ret = clk_bulk_prepare_enable(num_clks, usb_clk); + if (ret) + return NOTIFY_BAD; + + regmap_set_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN); + break; + case GENPD_NOTIFY_OFF: + clk_bulk_disable_unprepare(num_clks, usb_clk); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static const struct imx8mp_blk_ctrl_domain_data imx8mp_hsio_domain_data[] = { + [IMX8MP_HSIOBLK_PD_USB] = { + .name = "hsioblk-usb", + .clk_names = (const char *[]){ "usb" }, + .num_clks = 1, + .gpc_name = "usb", + }, + [IMX8MP_HSIOBLK_PD_USB_PHY1] = { + .name = "hsioblk-usb-phy1", + .gpc_name = "usb-phy1", + }, + [IMX8MP_HSIOBLK_PD_USB_PHY2] = { + .name = "hsioblk-usb-phy2", + .gpc_name = "usb-phy2", + }, + [IMX8MP_HSIOBLK_PD_PCIE] = { + .name = "hsioblk-pcie", + .clk_names = (const char *[]){ "pcie" }, + .num_clks = 1, + .gpc_name = "pcie", + }, + [IMX8MP_HSIOBLK_PD_PCIE_PHY] = { + .name = "hsioblk-pcie-phy", + .gpc_name = "pcie-phy", + }, +}; + +static const struct imx8mp_blk_ctrl_data imx8mp_hsio_blk_ctl_dev_data = { + .max_reg = 0x24, + .power_on = imx8mp_hsio_blk_ctrl_power_on, + .power_off = imx8mp_hsio_blk_ctrl_power_off, + .power_notifier_fn = imx8mp_hsio_power_notifier, + .domains = imx8mp_hsio_domain_data, + .num_domains = ARRAY_SIZE(imx8mp_hsio_domain_data), +}; + +#define HDMI_RTX_RESET_CTL0 0x20 +#define HDMI_RTX_CLK_CTL0 0x40 +#define HDMI_RTX_CLK_CTL1 0x50 +#define HDMI_RTX_CLK_CTL2 0x60 +#define HDMI_RTX_CLK_CTL3 0x70 +#define HDMI_RTX_CLK_CTL4 0x80 +#define HDMI_TX_CONTROL0 0x200 + +static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc, + struct imx8mp_blk_ctrl_domain *domain) +{ + switch (domain->id) { + case IMX8MP_HDMIBLK_PD_IRQSTEER: + regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(9)); + regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(16)); + break; + case IMX8MP_HDMIBLK_PD_LCDIF: + regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, + BIT(7) | BIT(16) | BIT(17) | BIT(18) | + BIT(19) | BIT(20)); + regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11)); + regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, + BIT(4) | BIT(5) | BIT(6)); + break; + case IMX8MP_HDMIBLK_PD_PAI: + regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(17)); + regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(18)); + break; + case IMX8MP_HDMIBLK_PD_PVI: + regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(28)); + regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(22)); + break; + case IMX8MP_HDMIBLK_PD_TRNG: + regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(27) | BIT(30)); + regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(20)); + break; + case IMX8MP_HDMIBLK_PD_HDMI_TX: + regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, + BIT(2) | BIT(4) | BIT(5)); + regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, + BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) | + BIT(18) | BIT(19) | BIT(20) | BIT(21)); + regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, + BIT(7) | BIT(10) | BIT(11)); + regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(1)); + break; + case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY: + regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24)); + regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12)); + regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3)); + break; + default: + break; + } +} + +static void imx8mp_hdmi_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc, + struct imx8mp_blk_ctrl_domain *domain) +{ + switch (domain->id) { + case IMX8MP_HDMIBLK_PD_IRQSTEER: + regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(9)); + regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(16)); + break; + case IMX8MP_HDMIBLK_PD_LCDIF: + regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, + BIT(4) | BIT(5) | BIT(6)); + regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11)); + regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, + BIT(7) | BIT(16) | BIT(17) | BIT(18) | + BIT(19) | BIT(20)); + break; + case IMX8MP_HDMIBLK_PD_PAI: + regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(18)); + regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(17)); + break; + case IMX8MP_HDMIBLK_PD_PVI: + regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(22)); + regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(28)); + break; + case IMX8MP_HDMIBLK_PD_TRNG: + regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(20)); + regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(27) | BIT(30)); + break; + case IMX8MP_HDMIBLK_PD_HDMI_TX: + regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(1)); + regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, + BIT(7) | BIT(10) | BIT(11)); + regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, + BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) | + BIT(18) | BIT(19) | BIT(20) | BIT(21)); + regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, + BIT(2) | BIT(4) | BIT(5)); + break; + case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY: + regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3)); + regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12)); + regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24)); + break; + default: + break; + } +} + +static int imx8mp_hdmi_power_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct imx8mp_blk_ctrl *bc = container_of(nb, struct imx8mp_blk_ctrl, + power_nb); + + if (action != GENPD_NOTIFY_ON) + return NOTIFY_OK; + + /* + * Contrary to other blk-ctrls the reset and clock don't clear when the + * power domain is powered down. To ensure the proper reset pulsing, + * first clear them all to asserted state, then enable the bus clocks + * and then release the ADB reset. + */ + regmap_write(bc->regmap, HDMI_RTX_RESET_CTL0, 0x0); + regmap_write(bc->regmap, HDMI_RTX_CLK_CTL0, 0x0); + regmap_write(bc->regmap, HDMI_RTX_CLK_CTL1, 0x0); + regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, + BIT(0) | BIT(1) | BIT(10)); + regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(0)); + + /* + * On power up we have no software backchannel to the GPC to + * wait for the ADB handshake to happen, so we just delay for a + * bit. On power down the GPC driver waits for the handshake. + */ + udelay(5); + + return NOTIFY_OK; +} + +static const struct imx8mp_blk_ctrl_domain_data imx8mp_hdmi_domain_data[] = { + [IMX8MP_HDMIBLK_PD_IRQSTEER] = { + .name = "hdmiblk-irqsteer", + .clk_names = (const char *[]){ "apb" }, + .num_clks = 1, + .gpc_name = "irqsteer", + }, + [IMX8MP_HDMIBLK_PD_LCDIF] = { + .name = "hdmiblk-lcdif", + .clk_names = (const char *[]){ "axi", "apb" }, + .num_clks = 2, + .gpc_name = "lcdif", + }, + [IMX8MP_HDMIBLK_PD_PAI] = { + .name = "hdmiblk-pai", + .clk_names = (const char *[]){ "apb" }, + .num_clks = 1, + .gpc_name = "pai", + }, + [IMX8MP_HDMIBLK_PD_PVI] = { + .name = "hdmiblk-pvi", + .clk_names = (const char *[]){ "apb" }, + .num_clks = 1, + .gpc_name = "pvi", + }, + [IMX8MP_HDMIBLK_PD_TRNG] = { + .name = "hdmiblk-trng", + .clk_names = (const char *[]){ "apb" }, + .num_clks = 1, + .gpc_name = "trng", + }, + [IMX8MP_HDMIBLK_PD_HDMI_TX] = { + .name = "hdmiblk-hdmi-tx", + .clk_names = (const char *[]){ "apb", "ref_266m" }, + .num_clks = 2, + .gpc_name = "hdmi-tx", + }, + [IMX8MP_HDMIBLK_PD_HDMI_TX_PHY] = { + .name = "hdmiblk-hdmi-tx-phy", + .clk_names = (const char *[]){ "apb", "ref_24m" }, + .num_clks = 2, + .gpc_name = "hdmi-tx-phy", + }, +}; + +static const struct imx8mp_blk_ctrl_data imx8mp_hdmi_blk_ctl_dev_data = { + .max_reg = 0x23c, + .power_on = imx8mp_hdmi_blk_ctrl_power_on, + .power_off = imx8mp_hdmi_blk_ctrl_power_off, + .power_notifier_fn = imx8mp_hdmi_power_notifier, + .domains = imx8mp_hdmi_domain_data, + .num_domains = ARRAY_SIZE(imx8mp_hdmi_domain_data), +}; + +static int imx8mp_blk_ctrl_power_on(struct generic_pm_domain *genpd) +{ + struct imx8mp_blk_ctrl_domain *domain = to_imx8mp_blk_ctrl_domain(genpd); + const struct imx8mp_blk_ctrl_domain_data *data = domain->data; + struct imx8mp_blk_ctrl *bc = domain->bc; + int ret; + + /* make sure bus domain is awake */ + ret = pm_runtime_resume_and_get(bc->bus_power_dev); + if (ret < 0) { + dev_err(bc->dev, "failed to power up bus domain\n"); + return ret; + } + + /* enable upstream clocks */ + ret = clk_bulk_prepare_enable(data->num_clks, domain->clks); + if (ret) { + dev_err(bc->dev, "failed to enable clocks\n"); + goto bus_put; + } + + /* domain specific blk-ctrl manipulation */ + bc->power_on(bc, domain); + + /* power up upstream GPC domain */ + ret = pm_runtime_resume_and_get(domain->power_dev); + if (ret < 0) { + dev_err(bc->dev, "failed to power up peripheral domain\n"); + goto clk_disable; + } + + clk_bulk_disable_unprepare(data->num_clks, domain->clks); + + return 0; + +clk_disable: + clk_bulk_disable_unprepare(data->num_clks, domain->clks); +bus_put: + pm_runtime_put(bc->bus_power_dev); + + return ret; +} + +static int imx8mp_blk_ctrl_power_off(struct generic_pm_domain *genpd) +{ + struct imx8mp_blk_ctrl_domain *domain = to_imx8mp_blk_ctrl_domain(genpd); + const struct imx8mp_blk_ctrl_domain_data *data = domain->data; + struct imx8mp_blk_ctrl *bc = domain->bc; + int ret; + + ret = clk_bulk_prepare_enable(data->num_clks, domain->clks); + if (ret) { + dev_err(bc->dev, "failed to enable clocks\n"); + return ret; + } + + /* domain specific blk-ctrl manipulation */ + bc->power_off(bc, domain); + + clk_bulk_disable_unprepare(data->num_clks, domain->clks); + + /* power down upstream GPC domain */ + pm_runtime_put(domain->power_dev); + + /* allow bus domain to suspend */ + pm_runtime_put(bc->bus_power_dev); + + return 0; +} + +static struct generic_pm_domain * +imx8m_blk_ctrl_xlate(struct of_phandle_args *args, void *data) +{ + struct genpd_onecell_data *onecell_data = data; + unsigned int index = args->args[0]; + + if (args->args_count != 1 || + index >= onecell_data->num_domains) + return ERR_PTR(-EINVAL); + + return onecell_data->domains[index]; +} + +static struct lock_class_key blk_ctrl_genpd_lock_class; + +static int imx8mp_blk_ctrl_probe(struct platform_device *pdev) +{ + const struct imx8mp_blk_ctrl_data *bc_data; + struct device *dev = &pdev->dev; + struct imx8mp_blk_ctrl *bc; + void __iomem *base; + int num_domains, i, ret; + + struct regmap_config regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + }; + + bc = devm_kzalloc(dev, sizeof(*bc), GFP_KERNEL); + if (!bc) + return -ENOMEM; + + bc->dev = dev; + + bc_data = of_device_get_match_data(dev); + num_domains = bc_data->num_domains; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + regmap_config.max_register = bc_data->max_reg; + bc->regmap = devm_regmap_init_mmio(dev, base, ®map_config); + if (IS_ERR(bc->regmap)) + return dev_err_probe(dev, PTR_ERR(bc->regmap), + "failed to init regmap\n"); + + bc->domains = devm_kcalloc(dev, num_domains, + sizeof(struct imx8mp_blk_ctrl_domain), + GFP_KERNEL); + if (!bc->domains) + return -ENOMEM; + + bc->onecell_data.num_domains = num_domains; + bc->onecell_data.xlate = imx8m_blk_ctrl_xlate; + bc->onecell_data.domains = + devm_kcalloc(dev, num_domains, + sizeof(struct generic_pm_domain *), GFP_KERNEL); + if (!bc->onecell_data.domains) + return -ENOMEM; + + bc->bus_power_dev = genpd_dev_pm_attach_by_name(dev, "bus"); + if (IS_ERR(bc->bus_power_dev)) + return dev_err_probe(dev, PTR_ERR(bc->bus_power_dev), + "failed to attach bus power domain\n"); + + bc->power_off = bc_data->power_off; + bc->power_on = bc_data->power_on; + + for (i = 0; i < num_domains; i++) { + const struct imx8mp_blk_ctrl_domain_data *data = &bc_data->domains[i]; + struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i]; + int j; + + domain->data = data; + + for (j = 0; j < data->num_clks; j++) + domain->clks[j].id = data->clk_names[j]; + + ret = devm_clk_bulk_get(dev, data->num_clks, domain->clks); + if (ret) { + dev_err_probe(dev, ret, "failed to get clock\n"); + goto cleanup_pds; + } + + domain->power_dev = + dev_pm_domain_attach_by_name(dev, data->gpc_name); + if (IS_ERR(domain->power_dev)) { + dev_err_probe(dev, PTR_ERR(domain->power_dev), + "failed to attach power domain %s\n", + data->gpc_name); + ret = PTR_ERR(domain->power_dev); + goto cleanup_pds; + } + dev_set_name(domain->power_dev, "%s", data->name); + + domain->genpd.name = data->name; + domain->genpd.power_on = imx8mp_blk_ctrl_power_on; + domain->genpd.power_off = imx8mp_blk_ctrl_power_off; + domain->bc = bc; + domain->id = i; + + ret = pm_genpd_init(&domain->genpd, NULL, true); + if (ret) { + dev_err_probe(dev, ret, "failed to init power domain\n"); + dev_pm_domain_detach(domain->power_dev, true); + goto cleanup_pds; + } + + /* + * We use runtime PM to trigger power on/off of the upstream GPC + * domain, as a strict hierarchical parent/child power domain + * setup doesn't allow us to meet the sequencing requirements. + * This means we have nested locking of genpd locks, without the + * nesting being visible at the genpd level, so we need a + * separate lock class to make lockdep aware of the fact that + * this are separate domain locks that can be nested without a + * self-deadlock. + */ + lockdep_set_class(&domain->genpd.mlock, + &blk_ctrl_genpd_lock_class); + + bc->onecell_data.domains[i] = &domain->genpd; + } + + ret = of_genpd_add_provider_onecell(dev->of_node, &bc->onecell_data); + if (ret) { + dev_err_probe(dev, ret, "failed to add power domain provider\n"); + goto cleanup_pds; + } + + bc->power_nb.notifier_call = bc_data->power_notifier_fn; + ret = dev_pm_genpd_add_notifier(bc->bus_power_dev, &bc->power_nb); + if (ret) { + dev_err_probe(dev, ret, "failed to add power notifier\n"); + goto cleanup_provider; + } + + dev_set_drvdata(dev, bc); + + return 0; + +cleanup_provider: + of_genpd_del_provider(dev->of_node); +cleanup_pds: + for (i--; i >= 0; i--) { + pm_genpd_remove(&bc->domains[i].genpd); + dev_pm_domain_detach(bc->domains[i].power_dev, true); + } + + dev_pm_domain_detach(bc->bus_power_dev, true); + + return ret; +} + +static int imx8mp_blk_ctrl_remove(struct platform_device *pdev) +{ + struct imx8mp_blk_ctrl *bc = dev_get_drvdata(&pdev->dev); + int i; + + of_genpd_del_provider(pdev->dev.of_node); + + for (i = 0; bc->onecell_data.num_domains; i++) { + struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i]; + + pm_genpd_remove(&domain->genpd); + dev_pm_domain_detach(domain->power_dev, true); + } + + dev_pm_genpd_remove_notifier(bc->bus_power_dev); + + dev_pm_domain_detach(bc->bus_power_dev, true); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int imx8mp_blk_ctrl_suspend(struct device *dev) +{ + struct imx8mp_blk_ctrl *bc = dev_get_drvdata(dev); + int ret, i; + + /* + * This may look strange, but is done so the generic PM_SLEEP code + * can power down our domains and more importantly power them up again + * after resume, without tripping over our usage of runtime PM to + * control the upstream GPC domains. Things happen in the right order + * in the system suspend/resume paths due to the device parent/child + * hierarchy. + */ + ret = pm_runtime_get_sync(bc->bus_power_dev); + if (ret < 0) { + pm_runtime_put_noidle(bc->bus_power_dev); + return ret; + } + + for (i = 0; i < bc->onecell_data.num_domains; i++) { + struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i]; + + ret = pm_runtime_get_sync(domain->power_dev); + if (ret < 0) { + pm_runtime_put_noidle(domain->power_dev); + goto out_fail; + } + } + + return 0; + +out_fail: + for (i--; i >= 0; i--) + pm_runtime_put(bc->domains[i].power_dev); + + pm_runtime_put(bc->bus_power_dev); + + return ret; +} + +static int imx8mp_blk_ctrl_resume(struct device *dev) +{ + struct imx8mp_blk_ctrl *bc = dev_get_drvdata(dev); + int i; + + for (i = 0; i < bc->onecell_data.num_domains; i++) + pm_runtime_put(bc->domains[i].power_dev); + + pm_runtime_put(bc->bus_power_dev); + + return 0; +} +#endif + +static const struct dev_pm_ops imx8mp_blk_ctrl_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(imx8mp_blk_ctrl_suspend, + imx8mp_blk_ctrl_resume) +}; + +static const struct of_device_id imx8mp_blk_ctrl_of_match[] = { + { + .compatible = "fsl,imx8mp-hsio-blk-ctrl", + .data = &imx8mp_hsio_blk_ctl_dev_data, + }, { + .compatible = "fsl,imx8mp-hdmi-blk-ctrl", + .data = &imx8mp_hdmi_blk_ctl_dev_data, + }, { + /* Sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, imx8m_blk_ctrl_of_match); + +static struct platform_driver imx8mp_blk_ctrl_driver = { + .probe = imx8mp_blk_ctrl_probe, + .remove = imx8mp_blk_ctrl_remove, + .driver = { + .name = "imx8mp-blk-ctrl", + .pm = &imx8mp_blk_ctrl_pm_ops, + .of_match_table = imx8mp_blk_ctrl_of_match, + }, +}; +module_platform_driver(imx8mp_blk_ctrl_driver); diff --git a/drivers/soc/mediatek/mt6795-pm-domains.h b/drivers/soc/mediatek/mt6795-pm-domains.h new file mode 100644 index 0000000000..ef07c9dfdd --- /dev/null +++ b/drivers/soc/mediatek/mt6795-pm-domains.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __SOC_MEDIATEK_MT6795_PM_DOMAINS_H +#define __SOC_MEDIATEK_MT6795_PM_DOMAINS_H + +#include "mtk-pm-domains.h" +#include + +/* + * MT6795 power domain support + */ + +static const struct scpsys_domain_data scpsys_domain_data_mt6795[] = { + [MT6795_POWER_DOMAIN_VDEC] = { + .name = "vdec", + .sta_mask = PWR_STATUS_VDEC, + .ctl_offs = SPM_VDE_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + }, + [MT6795_POWER_DOMAIN_VENC] = { + .name = "venc", + .sta_mask = PWR_STATUS_VENC, + .ctl_offs = SPM_VEN_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + }, + [MT6795_POWER_DOMAIN_ISP] = { + .name = "isp", + .sta_mask = PWR_STATUS_ISP, + .ctl_offs = SPM_ISP_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(13, 12), + }, + [MT6795_POWER_DOMAIN_MM] = { + .name = "mm", + .sta_mask = PWR_STATUS_DISP, + .ctl_offs = SPM_DIS_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MM_M0 | + MT8173_TOP_AXI_PROT_EN_MM_M1), + }, + }, + [MT6795_POWER_DOMAIN_MJC] = { + .name = "mjc", + .sta_mask = BIT(20), + .ctl_offs = 0x298, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + }, + [MT6795_POWER_DOMAIN_AUDIO] = { + .name = "audio", + .sta_mask = PWR_STATUS_AUDIO, + .ctl_offs = SPM_AUDIO_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + }, + [MT6795_POWER_DOMAIN_MFG_ASYNC] = { + .name = "mfg_async", + .sta_mask = PWR_STATUS_MFG_ASYNC, + .ctl_offs = SPM_MFG_ASYNC_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = 0, + }, + [MT6795_POWER_DOMAIN_MFG_2D] = { + .name = "mfg_2d", + .sta_mask = PWR_STATUS_MFG_2D, + .ctl_offs = SPM_MFG_2D_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(13, 12), + }, + [MT6795_POWER_DOMAIN_MFG] = { + .name = "mfg", + .sta_mask = PWR_STATUS_MFG, + .ctl_offs = SPM_MFG_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(13, 8), + .sram_pdn_ack_bits = GENMASK(21, 16), + .bp_infracfg = { + BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MFG_S | + MT8173_TOP_AXI_PROT_EN_MFG_M0 | + MT8173_TOP_AXI_PROT_EN_MFG_M1 | + MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT), + }, + }, +}; + +static const struct scpsys_soc_data mt6795_scpsys_data = { + .domains_data = scpsys_domain_data_mt6795, + .num_domains = ARRAY_SIZE(scpsys_domain_data_mt6795), +}; + +#endif /* __SOC_MEDIATEK_MT6795_PM_DOMAINS_H */ diff --git a/drivers/soc/mediatek/mt8195-mmsys.h b/drivers/soc/mediatek/mt8195-mmsys.h new file mode 100644 index 0000000000..abfe94a302 --- /dev/null +++ b/drivers/soc/mediatek/mt8195-mmsys.h @@ -0,0 +1,370 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __SOC_MEDIATEK_MT8195_MMSYS_H +#define __SOC_MEDIATEK_MT8195_MMSYS_H + +#define MT8195_VDO0_OVL_MOUT_EN 0xf14 +#define MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0 BIT(0) +#define MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0 BIT(1) +#define MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1 BIT(2) +#define MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1 BIT(4) +#define MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1 BIT(5) +#define MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0 BIT(6) + +#define MT8195_VDO0_SEL_IN 0xf34 +#define MT8195_SEL_IN_VPP_MERGE_FROM_MASK GENMASK(1, 0) +#define MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT (0 << 0) +#define MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1 (1 << 0) +#define MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0 (2 << 0) +#define MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK GENMASK(4, 4) +#define MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0 (0 << 4) +#define MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE (1 << 4) +#define MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK GENMASK(5, 5) +#define MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1 (0 << 5) +#define MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE (1 << 5) +#define MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK GENMASK(8, 8) +#define MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE (0 << 8) +#define MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT (1 << 8) +#define MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK GENMASK(9, 9) +#define MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT (0 << 9) +#define MT8195_SEL_IN_DP_INTF0_FROM_MASK GENMASK(13, 12) +#define MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT (0 << 0) +#define MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE (1 << 12) +#define MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0 (2 << 12) +#define MT8195_SEL_IN_DSI0_FROM_MASK GENMASK(16, 16) +#define MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT (0 << 16) +#define MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0 (1 << 16) +#define MT8195_SEL_IN_DSI1_FROM_MASK GENMASK(17, 17) +#define MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT (0 << 17) +#define MT8195_SEL_IN_DSI1_FROM_VPP_MERGE (1 << 17) +#define MT8195_SEL_IN_DISP_WDMA1_FROM_MASK GENMASK(20, 20) +#define MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1 (0 << 20) +#define MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE (1 << 20) +#define MT8195_SEL_IN_DSC_WRAP1_FROM_MASK GENMASK(21, 21) +#define MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN (0 << 21) +#define MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1 (1 << 21) +#define MT8195_SEL_IN_DISP_WDMA0_FROM_MASK GENMASK(22, 22) +#define MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0 (0 << 22) + +#define MT8195_VDO0_SEL_OUT 0xf38 +#define MT8195_SOUT_DISP_DITHER0_TO_MASK BIT(0) +#define MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN (0 << 0) +#define MT8195_SOUT_DISP_DITHER0_TO_DSI0 (1 << 0) +#define MT8195_SOUT_DISP_DITHER1_TO_MASK GENMASK(2, 1) +#define MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN (0 << 1) +#define MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE (1 << 1) +#define MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT (2 << 1) +#define MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK GENMASK(4, 4) +#define MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE (0 << 4) +#define MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0 (1 << 4) +#define MT8195_SOUT_VPP_MERGE_TO_MASK GENMASK(10, 8) +#define MT8195_SOUT_VPP_MERGE_TO_DSI1 (0 << 8) +#define MT8195_SOUT_VPP_MERGE_TO_DP_INTF0 (1 << 8) +#define MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0 (2 << 8) +#define MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1 (3 << 8) +#define MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN (4 << 8) +#define MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK GENMASK(11, 11) +#define MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN (0 << 11) +#define MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK GENMASK(13, 12) +#define MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0 (0 << 12) +#define MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0 (1 << 12) +#define MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE (2 << 12) +#define MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK GENMASK(17, 16) +#define MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1 (0 << 16) +#define MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0 (1 << 16) +#define MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0 (2 << 16) +#define MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE (3 << 16) + +static const struct mtk_mmsys_routes mmsys_mt8195_routing_table[] = { + { + DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0, + MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0, + MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0 + }, { + DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0, + MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0, + MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0 + }, { + DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL1, + MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1, + MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1 + }, { + DDP_COMPONENT_OVL1, DDP_COMPONENT_RDMA1, + MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1, + MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1 + }, { + DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1, + MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1, + MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1 + }, { + DDP_COMPONENT_OVL1, DDP_COMPONENT_OVL0, + MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0, + MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0 + }, { + DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK, + MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT + }, { + DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK, + MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1 + }, { + DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK, + MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0 + }, { + DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK, + MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0 + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK, + MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE + }, { + DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK, + MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1 + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK, + MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK, + MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK, + MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK, + MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK, + MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK, + MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK, + MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT + }, { + DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK, + MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT + }, { + DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK, + MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT + }, { + DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK, + MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK, + MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK, + MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE + }, { + DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK, + MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0 + }, { + DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK, + MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT + }, { + DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK, + MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0 + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK, + MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK, + MT8195_SEL_IN_DSI1_FROM_VPP_MERGE + }, { + DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK, + MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1 + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK, + MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK, + MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK, + MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK, + MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK, + MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK, + MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK, + MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN + }, { + DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK, + MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1 + }, { + DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK, + MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1 + }, { + DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK, + MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1 + }, { + DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK, + MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1 + }, { + DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0, + MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA0_FROM_MASK, + MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0 + }, { + DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK, + MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN + }, { + DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK, + MT8195_SOUT_DISP_DITHER0_TO_DSI0 + }, { + DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK, + MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN + }, { + DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK, + MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE + }, { + DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK, + MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT + }, { + DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK, + MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT + }, { + DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF1, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK, + MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT + }, { + DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK, + MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT + }, { + DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK, + MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT + }, { + DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK, + MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE + }, { + DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK, + MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0 + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK, + MT8195_SOUT_VPP_MERGE_TO_DSI1 + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK, + MT8195_SOUT_VPP_MERGE_TO_DP_INTF0 + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK, + MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0 + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK, + MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0 + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK, + MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0 + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK, + MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1 + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK, + MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK, + MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN + }, { + DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK, + MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0 + }, { + DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK, + MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0 + }, { + DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK, + MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0 + }, { + DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK, + MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0 + }, { + DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK, + MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK, + MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1 + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK, + MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0 + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK, + MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0 + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK, + MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0 + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK, + MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0 + }, { + DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0, + MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK, + MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE + } +}; + +#endif /* __SOC_MEDIATEK_MT8195_MMSYS_H */ diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c new file mode 100644 index 0000000000..dee8664a12 --- /dev/null +++ b/drivers/soc/mediatek/mtk-svs.c @@ -0,0 +1,2403 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 MediaTek Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* svs bank 1-line software id */ +#define SVSB_CPU_LITTLE BIT(0) +#define SVSB_CPU_BIG BIT(1) +#define SVSB_CCI BIT(2) +#define SVSB_GPU BIT(3) + +/* svs bank 2-line type */ +#define SVSB_LOW BIT(8) +#define SVSB_HIGH BIT(9) + +/* svs bank mode support */ +#define SVSB_MODE_ALL_DISABLE 0 +#define SVSB_MODE_INIT01 BIT(1) +#define SVSB_MODE_INIT02 BIT(2) +#define SVSB_MODE_MON BIT(3) + +/* svs bank volt flags */ +#define SVSB_INIT01_PD_REQ BIT(0) +#define SVSB_INIT01_VOLT_IGNORE BIT(1) +#define SVSB_INIT01_VOLT_INC_ONLY BIT(2) +#define SVSB_MON_VOLT_IGNORE BIT(16) +#define SVSB_REMOVE_DVTFIXED_VOLT BIT(24) + +/* svs bank register common configuration */ +#define SVSB_DET_MAX 0xffff +#define SVSB_DET_WINDOW 0xa28 +#define SVSB_DTHI 0x1 +#define SVSB_DTLO 0xfe +#define SVSB_EN_INIT01 0x1 +#define SVSB_EN_INIT02 0x5 +#define SVSB_EN_MON 0x2 +#define SVSB_EN_OFF 0x0 +#define SVSB_INTEN_INIT0x 0x00005f01 +#define SVSB_INTEN_MONVOPEN 0x00ff0000 +#define SVSB_INTSTS_CLEAN 0x00ffffff +#define SVSB_INTSTS_COMPLETE 0x1 +#define SVSB_INTSTS_MONVOP 0x00ff0000 +#define SVSB_RUNCONFIG_DEFAULT 0x80000000 + +/* svs bank related setting */ +#define BITS8 8 +#define MAX_OPP_ENTRIES 16 +#define REG_BYTES 4 +#define SVSB_DC_SIGNED_BIT BIT(15) +#define SVSB_DET_CLK_EN BIT(31) +#define SVSB_TEMP_LOWER_BOUND 0xb2 +#define SVSB_TEMP_UPPER_BOUND 0x64 + +static DEFINE_SPINLOCK(svs_lock); + +#define debug_fops_ro(name) \ + static int svs_##name##_debug_open(struct inode *inode, \ + struct file *filp) \ + { \ + return single_open(filp, svs_##name##_debug_show, \ + inode->i_private); \ + } \ + static const struct file_operations svs_##name##_debug_fops = { \ + .owner = THIS_MODULE, \ + .open = svs_##name##_debug_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + +#define debug_fops_rw(name) \ + static int svs_##name##_debug_open(struct inode *inode, \ + struct file *filp) \ + { \ + return single_open(filp, svs_##name##_debug_show, \ + inode->i_private); \ + } \ + static const struct file_operations svs_##name##_debug_fops = { \ + .owner = THIS_MODULE, \ + .open = svs_##name##_debug_open, \ + .read = seq_read, \ + .write = svs_##name##_debug_write, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + +#define svs_dentry_data(name) {__stringify(name), &svs_##name##_debug_fops} + +/** + * enum svsb_phase - svs bank phase enumeration + * @SVSB_PHASE_ERROR: svs bank encounters unexpected condition + * @SVSB_PHASE_INIT01: svs bank basic init for data calibration + * @SVSB_PHASE_INIT02: svs bank can provide voltages to opp table + * @SVSB_PHASE_MON: svs bank can provide voltages with thermal effect + * @SVSB_PHASE_MAX: total number of svs bank phase (debug purpose) + * + * Each svs bank has its own independent phase and we enable each svs bank by + * running their phase orderly. However, when svs bank encounters unexpected + * condition, it will fire an irq (PHASE_ERROR) to inform svs software. + * + * svs bank general phase-enabled order: + * SVSB_PHASE_INIT01 -> SVSB_PHASE_INIT02 -> SVSB_PHASE_MON + */ +enum svsb_phase { + SVSB_PHASE_ERROR = 0, + SVSB_PHASE_INIT01, + SVSB_PHASE_INIT02, + SVSB_PHASE_MON, + SVSB_PHASE_MAX, +}; + +enum svs_reg_index { + DESCHAR = 0, + TEMPCHAR, + DETCHAR, + AGECHAR, + DCCONFIG, + AGECONFIG, + FREQPCT30, + FREQPCT74, + LIMITVALS, + VBOOT, + DETWINDOW, + CONFIG, + TSCALCS, + RUNCONFIG, + SVSEN, + INIT2VALS, + DCVALUES, + AGEVALUES, + VOP30, + VOP74, + TEMP, + INTSTS, + INTSTSRAW, + INTEN, + CHKINT, + CHKSHIFT, + STATUS, + VDESIGN30, + VDESIGN74, + DVT30, + DVT74, + AGECOUNT, + SMSTATE0, + SMSTATE1, + CTL0, + DESDETSEC, + TEMPAGESEC, + CTRLSPARE0, + CTRLSPARE1, + CTRLSPARE2, + CTRLSPARE3, + CORESEL, + THERMINTST, + INTST, + THSTAGE0ST, + THSTAGE1ST, + THSTAGE2ST, + THAHBST0, + THAHBST1, + SPARE0, + SPARE1, + SPARE2, + SPARE3, + THSLPEVEB, + SVS_REG_MAX, +}; + +static const u32 svs_regs_v2[] = { + [DESCHAR] = 0xc00, + [TEMPCHAR] = 0xc04, + [DETCHAR] = 0xc08, + [AGECHAR] = 0xc0c, + [DCCONFIG] = 0xc10, + [AGECONFIG] = 0xc14, + [FREQPCT30] = 0xc18, + [FREQPCT74] = 0xc1c, + [LIMITVALS] = 0xc20, + [VBOOT] = 0xc24, + [DETWINDOW] = 0xc28, + [CONFIG] = 0xc2c, + [TSCALCS] = 0xc30, + [RUNCONFIG] = 0xc34, + [SVSEN] = 0xc38, + [INIT2VALS] = 0xc3c, + [DCVALUES] = 0xc40, + [AGEVALUES] = 0xc44, + [VOP30] = 0xc48, + [VOP74] = 0xc4c, + [TEMP] = 0xc50, + [INTSTS] = 0xc54, + [INTSTSRAW] = 0xc58, + [INTEN] = 0xc5c, + [CHKINT] = 0xc60, + [CHKSHIFT] = 0xc64, + [STATUS] = 0xc68, + [VDESIGN30] = 0xc6c, + [VDESIGN74] = 0xc70, + [DVT30] = 0xc74, + [DVT74] = 0xc78, + [AGECOUNT] = 0xc7c, + [SMSTATE0] = 0xc80, + [SMSTATE1] = 0xc84, + [CTL0] = 0xc88, + [DESDETSEC] = 0xce0, + [TEMPAGESEC] = 0xce4, + [CTRLSPARE0] = 0xcf0, + [CTRLSPARE1] = 0xcf4, + [CTRLSPARE2] = 0xcf8, + [CTRLSPARE3] = 0xcfc, + [CORESEL] = 0xf00, + [THERMINTST] = 0xf04, + [INTST] = 0xf08, + [THSTAGE0ST] = 0xf0c, + [THSTAGE1ST] = 0xf10, + [THSTAGE2ST] = 0xf14, + [THAHBST0] = 0xf18, + [THAHBST1] = 0xf1c, + [SPARE0] = 0xf20, + [SPARE1] = 0xf24, + [SPARE2] = 0xf28, + [SPARE3] = 0xf2c, + [THSLPEVEB] = 0xf30, +}; + +/** + * struct svs_platform - svs platform control + * @name: svs platform name + * @base: svs platform register base + * @dev: svs platform device + * @main_clk: main clock for svs bank + * @pbank: svs bank pointer needing to be protected by spin_lock section + * @banks: svs banks that svs platform supports + * @rst: svs platform reset control + * @efuse_parsing: svs platform efuse parsing function pointer + * @probe: svs platform probe function pointer + * @irqflags: svs platform irq settings flags + * @efuse_max: total number of svs efuse + * @tefuse_max: total number of thermal efuse + * @regs: svs platform registers map + * @bank_max: total number of svs banks + * @efuse: svs efuse data received from NVMEM framework + * @tefuse: thermal efuse data received from NVMEM framework + */ +struct svs_platform { + char *name; + void __iomem *base; + struct device *dev; + struct clk *main_clk; + struct svs_bank *pbank; + struct svs_bank *banks; + struct reset_control *rst; + bool (*efuse_parsing)(struct svs_platform *svsp); + int (*probe)(struct svs_platform *svsp); + unsigned long irqflags; + size_t efuse_max; + size_t tefuse_max; + const u32 *regs; + u32 bank_max; + u32 *efuse; + u32 *tefuse; +}; + +struct svs_platform_data { + char *name; + struct svs_bank *banks; + bool (*efuse_parsing)(struct svs_platform *svsp); + int (*probe)(struct svs_platform *svsp); + unsigned long irqflags; + const u32 *regs; + u32 bank_max; +}; + +/** + * struct svs_bank - svs bank representation + * @dev: bank device + * @opp_dev: device for opp table/buck control + * @init_completion: the timeout completion for bank init + * @buck: regulator used by opp_dev + * @tzd: thermal zone device for getting temperature + * @lock: mutex lock to protect voltage update process + * @set_freq_pct: function pointer to set bank frequency percent table + * @get_volts: function pointer to get bank voltages + * @name: bank name + * @buck_name: regulator name + * @tzone_name: thermal zone name + * @phase: bank current phase + * @volt_od: bank voltage overdrive + * @reg_data: bank register data in different phase for debug purpose + * @pm_runtime_enabled_count: bank pm runtime enabled count + * @mode_support: bank mode support. + * @freq_base: reference frequency for bank init + * @turn_freq_base: refenrece frequency for 2-line turn point + * @vboot: voltage request for bank init01 only + * @opp_dfreq: default opp frequency table + * @opp_dvolt: default opp voltage table + * @freq_pct: frequency percent table for bank init + * @volt: bank voltage table + * @volt_step: bank voltage step + * @volt_base: bank voltage base + * @volt_flags: bank voltage flags + * @vmax: bank voltage maximum + * @vmin: bank voltage minimum + * @age_config: bank age configuration + * @age_voffset_in: bank age voltage offset + * @dc_config: bank dc configuration + * @dc_voffset_in: bank dc voltage offset + * @dvt_fixed: bank dvt fixed value + * @vco: bank VCO value + * @chk_shift: bank chicken shift + * @core_sel: bank selection + * @opp_count: bank opp count + * @int_st: bank interrupt identification + * @sw_id: bank software identification + * @cpu_id: cpu core id for SVS CPU bank use only + * @ctl0: TS-x selection + * @temp: bank temperature + * @tzone_htemp: thermal zone high temperature threshold + * @tzone_htemp_voffset: thermal zone high temperature voltage offset + * @tzone_ltemp: thermal zone low temperature threshold + * @tzone_ltemp_voffset: thermal zone low temperature voltage offset + * @bts: svs efuse data + * @mts: svs efuse data + * @bdes: svs efuse data + * @mdes: svs efuse data + * @mtdes: svs efuse data + * @dcbdet: svs efuse data + * @dcmdet: svs efuse data + * @turn_pt: 2-line turn point tells which opp_volt calculated by high/low bank + * @type: bank type to represent it is 2-line (high/low) bank or 1-line bank + * + * Svs bank will generate suitalbe voltages by below general math equation + * and provide these voltages to opp voltage table. + * + * opp_volt[i] = (volt[i] * volt_step) + volt_base; + */ +struct svs_bank { + struct device *dev; + struct device *opp_dev; + struct completion init_completion; + struct regulator *buck; + struct thermal_zone_device *tzd; + struct mutex lock; /* lock to protect voltage update process */ + void (*set_freq_pct)(struct svs_platform *svsp); + void (*get_volts)(struct svs_platform *svsp); + char *name; + char *buck_name; + char *tzone_name; + enum svsb_phase phase; + s32 volt_od; + u32 reg_data[SVSB_PHASE_MAX][SVS_REG_MAX]; + u32 pm_runtime_enabled_count; + u32 mode_support; + u32 freq_base; + u32 turn_freq_base; + u32 vboot; + u32 opp_dfreq[MAX_OPP_ENTRIES]; + u32 opp_dvolt[MAX_OPP_ENTRIES]; + u32 freq_pct[MAX_OPP_ENTRIES]; + u32 volt[MAX_OPP_ENTRIES]; + u32 volt_step; + u32 volt_base; + u32 volt_flags; + u32 vmax; + u32 vmin; + u32 age_config; + u32 age_voffset_in; + u32 dc_config; + u32 dc_voffset_in; + u32 dvt_fixed; + u32 vco; + u32 chk_shift; + u32 core_sel; + u32 opp_count; + u32 int_st; + u32 sw_id; + u32 cpu_id; + u32 ctl0; + u32 temp; + u32 tzone_htemp; + u32 tzone_htemp_voffset; + u32 tzone_ltemp; + u32 tzone_ltemp_voffset; + u32 bts; + u32 mts; + u32 bdes; + u32 mdes; + u32 mtdes; + u32 dcbdet; + u32 dcmdet; + u32 turn_pt; + u32 type; +}; + +static u32 percent(u32 numerator, u32 denominator) +{ + /* If not divide 1000, "numerator * 100" will have data overflow. */ + numerator /= 1000; + denominator /= 1000; + + return DIV_ROUND_UP(numerator * 100, denominator); +} + +static u32 svs_readl_relaxed(struct svs_platform *svsp, enum svs_reg_index rg_i) +{ + return readl_relaxed(svsp->base + svsp->regs[rg_i]); +} + +static void svs_writel_relaxed(struct svs_platform *svsp, u32 val, + enum svs_reg_index rg_i) +{ + writel_relaxed(val, svsp->base + svsp->regs[rg_i]); +} + +static void svs_switch_bank(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + svs_writel_relaxed(svsp, svsb->core_sel, CORESEL); +} + +static u32 svs_bank_volt_to_opp_volt(u32 svsb_volt, u32 svsb_volt_step, + u32 svsb_volt_base) +{ + return (svsb_volt * svsb_volt_step) + svsb_volt_base; +} + +static u32 svs_opp_volt_to_bank_volt(u32 opp_u_volt, u32 svsb_volt_step, + u32 svsb_volt_base) +{ + return (opp_u_volt - svsb_volt_base) / svsb_volt_step; +} + +static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb) +{ + struct dev_pm_opp *opp; + u32 i, opp_u_volt; + + for (i = 0; i < svsb->opp_count; i++) { + opp = dev_pm_opp_find_freq_exact(svsb->opp_dev, + svsb->opp_dfreq[i], + true); + if (IS_ERR(opp)) { + dev_err(svsb->dev, "cannot find freq = %u (%ld)\n", + svsb->opp_dfreq[i], PTR_ERR(opp)); + return PTR_ERR(opp); + } + + opp_u_volt = dev_pm_opp_get_voltage(opp); + svsb->volt[i] = svs_opp_volt_to_bank_volt(opp_u_volt, + svsb->volt_step, + svsb->volt_base); + dev_pm_opp_put(opp); + } + + return 0; +} + +static int svs_adjust_pm_opp_volts(struct svs_bank *svsb) +{ + int ret = -EPERM, tzone_temp = 0; + u32 i, svsb_volt, opp_volt, temp_voffset = 0, opp_start, opp_stop; + + mutex_lock(&svsb->lock); + + /* + * 2-line bank updates its corresponding opp volts. + * 1-line bank updates all opp volts. + */ + if (svsb->type == SVSB_HIGH) { + opp_start = 0; + opp_stop = svsb->turn_pt; + } else if (svsb->type == SVSB_LOW) { + opp_start = svsb->turn_pt; + opp_stop = svsb->opp_count; + } else { + opp_start = 0; + opp_stop = svsb->opp_count; + } + + /* Get thermal effect */ + if (svsb->phase == SVSB_PHASE_MON) { + ret = thermal_zone_get_temp(svsb->tzd, &tzone_temp); + if (ret || (svsb->temp > SVSB_TEMP_UPPER_BOUND && + svsb->temp < SVSB_TEMP_LOWER_BOUND)) { + dev_err(svsb->dev, "%s: %d (0x%x), run default volts\n", + svsb->tzone_name, ret, svsb->temp); + svsb->phase = SVSB_PHASE_ERROR; + } + + if (tzone_temp >= svsb->tzone_htemp) + temp_voffset += svsb->tzone_htemp_voffset; + else if (tzone_temp <= svsb->tzone_ltemp) + temp_voffset += svsb->tzone_ltemp_voffset; + + /* 2-line bank update all opp volts when running mon mode */ + if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) { + opp_start = 0; + opp_stop = svsb->opp_count; + } + } + + /* vmin <= svsb_volt (opp_volt) <= default opp voltage */ + for (i = opp_start; i < opp_stop; i++) { + switch (svsb->phase) { + case SVSB_PHASE_ERROR: + opp_volt = svsb->opp_dvolt[i]; + break; + case SVSB_PHASE_INIT01: + /* do nothing */ + goto unlock_mutex; + case SVSB_PHASE_INIT02: + svsb_volt = max(svsb->volt[i], svsb->vmin); + opp_volt = svs_bank_volt_to_opp_volt(svsb_volt, + svsb->volt_step, + svsb->volt_base); + break; + case SVSB_PHASE_MON: + svsb_volt = max(svsb->volt[i] + temp_voffset, svsb->vmin); + opp_volt = svs_bank_volt_to_opp_volt(svsb_volt, + svsb->volt_step, + svsb->volt_base); + break; + default: + dev_err(svsb->dev, "unknown phase: %u\n", svsb->phase); + ret = -EINVAL; + goto unlock_mutex; + } + + opp_volt = min(opp_volt, svsb->opp_dvolt[i]); + ret = dev_pm_opp_adjust_voltage(svsb->opp_dev, + svsb->opp_dfreq[i], + opp_volt, opp_volt, + svsb->opp_dvolt[i]); + if (ret) { + dev_err(svsb->dev, "set %uuV fail: %d\n", + opp_volt, ret); + goto unlock_mutex; + } + } + +unlock_mutex: + mutex_unlock(&svsb->lock); + + return ret; +} + +static int svs_dump_debug_show(struct seq_file *m, void *p) +{ + struct svs_platform *svsp = (struct svs_platform *)m->private; + struct svs_bank *svsb; + unsigned long svs_reg_addr; + u32 idx, i, j, bank_id; + + for (i = 0; i < svsp->efuse_max; i++) + if (svsp->efuse && svsp->efuse[i]) + seq_printf(m, "M_HW_RES%d = 0x%08x\n", + i, svsp->efuse[i]); + + for (i = 0; i < svsp->tefuse_max; i++) + if (svsp->tefuse) + seq_printf(m, "THERMAL_EFUSE%d = 0x%08x\n", + i, svsp->tefuse[i]); + + for (bank_id = 0, idx = 0; idx < svsp->bank_max; idx++, bank_id++) { + svsb = &svsp->banks[idx]; + + for (i = SVSB_PHASE_INIT01; i <= SVSB_PHASE_MON; i++) { + seq_printf(m, "Bank_number = %u\n", bank_id); + + if (i == SVSB_PHASE_INIT01 || i == SVSB_PHASE_INIT02) + seq_printf(m, "mode = init%d\n", i); + else if (i == SVSB_PHASE_MON) + seq_puts(m, "mode = mon\n"); + else + seq_puts(m, "mode = error\n"); + + for (j = DESCHAR; j < SVS_REG_MAX; j++) { + svs_reg_addr = (unsigned long)(svsp->base + + svsp->regs[j]); + seq_printf(m, "0x%08lx = 0x%08x\n", + svs_reg_addr, svsb->reg_data[i][j]); + } + } + } + + return 0; +} + +debug_fops_ro(dump); + +static int svs_enable_debug_show(struct seq_file *m, void *v) +{ + struct svs_bank *svsb = (struct svs_bank *)m->private; + + switch (svsb->phase) { + case SVSB_PHASE_ERROR: + seq_puts(m, "disabled\n"); + break; + case SVSB_PHASE_INIT01: + seq_puts(m, "init1\n"); + break; + case SVSB_PHASE_INIT02: + seq_puts(m, "init2\n"); + break; + case SVSB_PHASE_MON: + seq_puts(m, "mon mode\n"); + break; + default: + seq_puts(m, "unknown\n"); + break; + } + + return 0; +} + +static ssize_t svs_enable_debug_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *pos) +{ + struct svs_bank *svsb = file_inode(filp)->i_private; + struct svs_platform *svsp = dev_get_drvdata(svsb->dev); + unsigned long flags; + int enabled, ret; + char *buf = NULL; + + if (count >= PAGE_SIZE) + return -EINVAL; + + buf = (char *)memdup_user_nul(buffer, count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + ret = kstrtoint(buf, 10, &enabled); + if (ret) + return ret; + + if (!enabled) { + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svsb->mode_support = SVSB_MODE_ALL_DISABLE; + svs_switch_bank(svsp); + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); + spin_unlock_irqrestore(&svs_lock, flags); + + svsb->phase = SVSB_PHASE_ERROR; + svs_adjust_pm_opp_volts(svsb); + } + + kfree(buf); + + return count; +} + +debug_fops_rw(enable); + +static int svs_status_debug_show(struct seq_file *m, void *v) +{ + struct svs_bank *svsb = (struct svs_bank *)m->private; + struct dev_pm_opp *opp; + int tzone_temp = 0, ret; + u32 i; + + ret = thermal_zone_get_temp(svsb->tzd, &tzone_temp); + if (ret) + seq_printf(m, "%s: temperature ignore, turn_pt = %u\n", + svsb->name, svsb->turn_pt); + else + seq_printf(m, "%s: temperature = %d, turn_pt = %u\n", + svsb->name, tzone_temp, svsb->turn_pt); + + for (i = 0; i < svsb->opp_count; i++) { + opp = dev_pm_opp_find_freq_exact(svsb->opp_dev, + svsb->opp_dfreq[i], true); + if (IS_ERR(opp)) { + seq_printf(m, "%s: cannot find freq = %u (%ld)\n", + svsb->name, svsb->opp_dfreq[i], + PTR_ERR(opp)); + return PTR_ERR(opp); + } + + seq_printf(m, "opp_freq[%02u]: %u, opp_volt[%02u]: %lu, ", + i, svsb->opp_dfreq[i], i, + dev_pm_opp_get_voltage(opp)); + seq_printf(m, "svsb_volt[%02u]: 0x%x, freq_pct[%02u]: %u\n", + i, svsb->volt[i], i, svsb->freq_pct[i]); + dev_pm_opp_put(opp); + } + + return 0; +} + +debug_fops_ro(status); + +static int svs_create_debug_cmds(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct dentry *svs_dir, *svsb_dir, *file_entry; + const char *d = "/sys/kernel/debug/svs"; + u32 i, idx; + + struct svs_dentry { + const char *name; + const struct file_operations *fops; + }; + + struct svs_dentry svs_entries[] = { + svs_dentry_data(dump), + }; + + struct svs_dentry svsb_entries[] = { + svs_dentry_data(enable), + svs_dentry_data(status), + }; + + svs_dir = debugfs_create_dir("svs", NULL); + if (IS_ERR(svs_dir)) { + dev_err(svsp->dev, "cannot create %s: %ld\n", + d, PTR_ERR(svs_dir)); + return PTR_ERR(svs_dir); + } + + for (i = 0; i < ARRAY_SIZE(svs_entries); i++) { + file_entry = debugfs_create_file(svs_entries[i].name, 0664, + svs_dir, svsp, + svs_entries[i].fops); + if (IS_ERR(file_entry)) { + dev_err(svsp->dev, "cannot create %s/%s: %ld\n", + d, svs_entries[i].name, PTR_ERR(file_entry)); + return PTR_ERR(file_entry); + } + } + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (svsb->mode_support == SVSB_MODE_ALL_DISABLE) + continue; + + svsb_dir = debugfs_create_dir(svsb->name, svs_dir); + if (IS_ERR(svsb_dir)) { + dev_err(svsp->dev, "cannot create %s/%s: %ld\n", + d, svsb->name, PTR_ERR(svsb_dir)); + return PTR_ERR(svsb_dir); + } + + for (i = 0; i < ARRAY_SIZE(svsb_entries); i++) { + file_entry = debugfs_create_file(svsb_entries[i].name, + 0664, svsb_dir, svsb, + svsb_entries[i].fops); + if (IS_ERR(file_entry)) { + dev_err(svsp->dev, "no %s/%s/%s?: %ld\n", + d, svsb->name, svsb_entries[i].name, + PTR_ERR(file_entry)); + return PTR_ERR(file_entry); + } + } + } + + return 0; +} + +static u32 interpolate(u32 f0, u32 f1, u32 v0, u32 v1, u32 fx) +{ + u32 vx; + + if (v0 == v1 || f0 == f1) + return v0; + + /* *100 to have decimal fraction factor */ + vx = (v0 * 100) - ((((v0 - v1) * 100) / (f0 - f1)) * (f0 - fx)); + + return DIV_ROUND_UP(vx, 100); +} + +static void svs_get_bank_volts_v3(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + u32 i, j, *vop, vop74, vop30, turn_pt = svsb->turn_pt; + u32 b_sft, shift_byte = 0, opp_start = 0, opp_stop = 0; + u32 middle_index = (svsb->opp_count / 2); + + if (svsb->phase == SVSB_PHASE_MON && + svsb->volt_flags & SVSB_MON_VOLT_IGNORE) + return; + + vop74 = svs_readl_relaxed(svsp, VOP74); + vop30 = svs_readl_relaxed(svsp, VOP30); + + /* Target is to set svsb->volt[] by algorithm */ + if (turn_pt < middle_index) { + if (svsb->type == SVSB_HIGH) { + /* volt[0] ~ volt[turn_pt - 1] */ + for (i = 0; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + } else if (svsb->type == SVSB_LOW) { + /* volt[turn_pt] + volt[j] ~ volt[opp_count - 1] */ + j = svsb->opp_count - 7; + svsb->volt[turn_pt] = vop30 & GENMASK(7, 0); + shift_byte++; + for (i = j; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + + /* volt[turn_pt + 1] ~ volt[j - 1] by interpolate */ + for (i = turn_pt + 1; i < j; i++) + svsb->volt[i] = interpolate(svsb->freq_pct[turn_pt], + svsb->freq_pct[j], + svsb->volt[turn_pt], + svsb->volt[j], + svsb->freq_pct[i]); + } + } else { + if (svsb->type == SVSB_HIGH) { + /* volt[0] + volt[j] ~ volt[turn_pt - 1] */ + j = turn_pt - 7; + svsb->volt[0] = vop30 & GENMASK(7, 0); + shift_byte++; + for (i = j; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + + /* volt[1] ~ volt[j - 1] by interpolate */ + for (i = 1; i < j; i++) + svsb->volt[i] = interpolate(svsb->freq_pct[0], + svsb->freq_pct[j], + svsb->volt[0], + svsb->volt[j], + svsb->freq_pct[i]); + } else if (svsb->type == SVSB_LOW) { + /* volt[turn_pt] ~ volt[opp_count - 1] */ + for (i = turn_pt; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + } + } + + if (svsb->type == SVSB_HIGH) { + opp_start = 0; + opp_stop = svsb->turn_pt; + } else if (svsb->type == SVSB_LOW) { + opp_start = svsb->turn_pt; + opp_stop = svsb->opp_count; + } + + for (i = opp_start; i < opp_stop; i++) + if (svsb->volt_flags & SVSB_REMOVE_DVTFIXED_VOLT) + svsb->volt[i] -= svsb->dvt_fixed; +} + +static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + u32 i, j, *freq_pct, freq_pct74 = 0, freq_pct30 = 0; + u32 b_sft, shift_byte = 0, turn_pt; + u32 middle_index = (svsb->opp_count / 2); + + for (i = 0; i < svsb->opp_count; i++) { + if (svsb->opp_dfreq[i] <= svsb->turn_freq_base) { + svsb->turn_pt = i; + break; + } + } + + turn_pt = svsb->turn_pt; + + /* Target is to fill out freq_pct74 / freq_pct30 by algorithm */ + if (turn_pt < middle_index) { + if (svsb->type == SVSB_HIGH) { + /* + * If we don't handle this situation, + * SVSB_HIGH's FREQPCT74 / FREQPCT30 would keep "0" + * and this leads SVSB_LOW to work abnormally. + */ + if (turn_pt == 0) + freq_pct30 = svsb->freq_pct[0]; + + /* freq_pct[0] ~ freq_pct[turn_pt - 1] */ + for (i = 0; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } else if (svsb->type == SVSB_LOW) { + /* + * freq_pct[turn_pt] + + * freq_pct[opp_count - 7] ~ freq_pct[opp_count -1] + */ + freq_pct30 = svsb->freq_pct[turn_pt]; + shift_byte++; + j = svsb->opp_count - 7; + for (i = j; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } + } else { + if (svsb->type == SVSB_HIGH) { + /* + * freq_pct[0] + + * freq_pct[turn_pt - 7] ~ freq_pct[turn_pt - 1] + */ + freq_pct30 = svsb->freq_pct[0]; + shift_byte++; + j = turn_pt - 7; + for (i = j; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } else if (svsb->type == SVSB_LOW) { + /* freq_pct[turn_pt] ~ freq_pct[opp_count - 1] */ + for (i = turn_pt; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } + } + + svs_writel_relaxed(svsp, freq_pct74, FREQPCT74); + svs_writel_relaxed(svsp, freq_pct30, FREQPCT30); +} + +static void svs_get_bank_volts_v2(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + u32 temp, i; + + temp = svs_readl_relaxed(svsp, VOP74); + svsb->volt[14] = (temp >> 24) & GENMASK(7, 0); + svsb->volt[12] = (temp >> 16) & GENMASK(7, 0); + svsb->volt[10] = (temp >> 8) & GENMASK(7, 0); + svsb->volt[8] = (temp & GENMASK(7, 0)); + + temp = svs_readl_relaxed(svsp, VOP30); + svsb->volt[6] = (temp >> 24) & GENMASK(7, 0); + svsb->volt[4] = (temp >> 16) & GENMASK(7, 0); + svsb->volt[2] = (temp >> 8) & GENMASK(7, 0); + svsb->volt[0] = (temp & GENMASK(7, 0)); + + for (i = 0; i <= 12; i += 2) + svsb->volt[i + 1] = interpolate(svsb->freq_pct[i], + svsb->freq_pct[i + 2], + svsb->volt[i], + svsb->volt[i + 2], + svsb->freq_pct[i + 1]); + + svsb->volt[15] = interpolate(svsb->freq_pct[12], + svsb->freq_pct[14], + svsb->volt[12], + svsb->volt[14], + svsb->freq_pct[15]); + + for (i = 0; i < svsb->opp_count; i++) + svsb->volt[i] += svsb->volt_od; +} + +static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + svs_writel_relaxed(svsp, + (svsb->freq_pct[14] << 24) | + (svsb->freq_pct[12] << 16) | + (svsb->freq_pct[10] << 8) | + svsb->freq_pct[8], + FREQPCT74); + + svs_writel_relaxed(svsp, + (svsb->freq_pct[6] << 24) | + (svsb->freq_pct[4] << 16) | + (svsb->freq_pct[2] << 8) | + svsb->freq_pct[0], + FREQPCT30); +} + +static void svs_set_bank_phase(struct svs_platform *svsp, + enum svsb_phase target_phase) +{ + struct svs_bank *svsb = svsp->pbank; + u32 des_char, temp_char, det_char, limit_vals, init2vals, ts_calcs; + + svs_switch_bank(svsp); + + des_char = (svsb->bdes << 8) | svsb->mdes; + svs_writel_relaxed(svsp, des_char, DESCHAR); + + temp_char = (svsb->vco << 16) | (svsb->mtdes << 8) | svsb->dvt_fixed; + svs_writel_relaxed(svsp, temp_char, TEMPCHAR); + + det_char = (svsb->dcbdet << 8) | svsb->dcmdet; + svs_writel_relaxed(svsp, det_char, DETCHAR); + + svs_writel_relaxed(svsp, svsb->dc_config, DCCONFIG); + svs_writel_relaxed(svsp, svsb->age_config, AGECONFIG); + svs_writel_relaxed(svsp, SVSB_RUNCONFIG_DEFAULT, RUNCONFIG); + + svsb->set_freq_pct(svsp); + + limit_vals = (svsb->vmax << 24) | (svsb->vmin << 16) | + (SVSB_DTHI << 8) | SVSB_DTLO; + svs_writel_relaxed(svsp, limit_vals, LIMITVALS); + + svs_writel_relaxed(svsp, SVSB_DET_WINDOW, DETWINDOW); + svs_writel_relaxed(svsp, SVSB_DET_MAX, CONFIG); + svs_writel_relaxed(svsp, svsb->chk_shift, CHKSHIFT); + svs_writel_relaxed(svsp, svsb->ctl0, CTL0); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); + + switch (target_phase) { + case SVSB_PHASE_INIT01: + svs_writel_relaxed(svsp, svsb->vboot, VBOOT); + svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN); + svs_writel_relaxed(svsp, SVSB_EN_INIT01, SVSEN); + break; + case SVSB_PHASE_INIT02: + svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN); + init2vals = (svsb->age_voffset_in << 16) | svsb->dc_voffset_in; + svs_writel_relaxed(svsp, init2vals, INIT2VALS); + svs_writel_relaxed(svsp, SVSB_EN_INIT02, SVSEN); + break; + case SVSB_PHASE_MON: + ts_calcs = (svsb->bts << 12) | svsb->mts; + svs_writel_relaxed(svsp, ts_calcs, TSCALCS); + svs_writel_relaxed(svsp, SVSB_INTEN_MONVOPEN, INTEN); + svs_writel_relaxed(svsp, SVSB_EN_MON, SVSEN); + break; + default: + dev_err(svsb->dev, "requested unknown target phase: %u\n", + target_phase); + break; + } +} + +static inline void svs_save_bank_register_data(struct svs_platform *svsp, + enum svsb_phase phase) +{ + struct svs_bank *svsb = svsp->pbank; + enum svs_reg_index rg_i; + + for (rg_i = DESCHAR; rg_i < SVS_REG_MAX; rg_i++) + svsb->reg_data[phase][rg_i] = svs_readl_relaxed(svsp, rg_i); +} + +static inline void svs_error_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + dev_err(svsb->dev, "%s: CORESEL = 0x%08x\n", + __func__, svs_readl_relaxed(svsp, CORESEL)); + dev_err(svsb->dev, "SVSEN = 0x%08x, INTSTS = 0x%08x\n", + svs_readl_relaxed(svsp, SVSEN), + svs_readl_relaxed(svsp, INTSTS)); + dev_err(svsb->dev, "SMSTATE0 = 0x%08x, SMSTATE1 = 0x%08x\n", + svs_readl_relaxed(svsp, SMSTATE0), + svs_readl_relaxed(svsp, SMSTATE1)); + dev_err(svsb->dev, "TEMP = 0x%08x\n", svs_readl_relaxed(svsp, TEMP)); + + svs_save_bank_register_data(svsp, SVSB_PHASE_ERROR); + + svsb->phase = SVSB_PHASE_ERROR; + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); +} + +static inline void svs_init01_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + dev_info(svsb->dev, "%s: VDN74~30:0x%08x~0x%08x, DC:0x%08x\n", + __func__, svs_readl_relaxed(svsp, VDESIGN74), + svs_readl_relaxed(svsp, VDESIGN30), + svs_readl_relaxed(svsp, DCVALUES)); + + svs_save_bank_register_data(svsp, SVSB_PHASE_INIT01); + + svsb->phase = SVSB_PHASE_INIT01; + svsb->dc_voffset_in = ~(svs_readl_relaxed(svsp, DCVALUES) & + GENMASK(15, 0)) + 1; + if (svsb->volt_flags & SVSB_INIT01_VOLT_IGNORE || + (svsb->dc_voffset_in & SVSB_DC_SIGNED_BIT && + svsb->volt_flags & SVSB_INIT01_VOLT_INC_ONLY)) + svsb->dc_voffset_in = 0; + + svsb->age_voffset_in = svs_readl_relaxed(svsp, AGEVALUES) & + GENMASK(15, 0); + + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS); + svsb->core_sel &= ~SVSB_DET_CLK_EN; +} + +static inline void svs_init02_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + dev_info(svsb->dev, "%s: VOP74~30:0x%08x~0x%08x, DC:0x%08x\n", + __func__, svs_readl_relaxed(svsp, VOP74), + svs_readl_relaxed(svsp, VOP30), + svs_readl_relaxed(svsp, DCVALUES)); + + svs_save_bank_register_data(svsp, SVSB_PHASE_INIT02); + + svsb->phase = SVSB_PHASE_INIT02; + svsb->get_volts(svsp); + + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS); +} + +static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + svs_save_bank_register_data(svsp, SVSB_PHASE_MON); + + svsb->phase = SVSB_PHASE_MON; + svsb->get_volts(svsp); + + svsb->temp = svs_readl_relaxed(svsp, TEMP) & GENMASK(7, 0); + svs_writel_relaxed(svsp, SVSB_INTSTS_MONVOP, INTSTS); +} + +static irqreturn_t svs_isr(int irq, void *data) +{ + struct svs_platform *svsp = data; + struct svs_bank *svsb = NULL; + unsigned long flags; + u32 idx, int_sts, svs_en; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + WARN(!svsb, "%s: svsb(%s) is null", __func__, svsb->name); + + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + + /* Find out which svs bank fires interrupt */ + if (svsb->int_st & svs_readl_relaxed(svsp, INTST)) { + spin_unlock_irqrestore(&svs_lock, flags); + continue; + } + + svs_switch_bank(svsp); + int_sts = svs_readl_relaxed(svsp, INTSTS); + svs_en = svs_readl_relaxed(svsp, SVSEN); + + if (int_sts == SVSB_INTSTS_COMPLETE && + svs_en == SVSB_EN_INIT01) + svs_init01_isr_handler(svsp); + else if (int_sts == SVSB_INTSTS_COMPLETE && + svs_en == SVSB_EN_INIT02) + svs_init02_isr_handler(svsp); + else if (int_sts & SVSB_INTSTS_MONVOP) + svs_mon_mode_isr_handler(svsp); + else + svs_error_isr_handler(svsp); + + spin_unlock_irqrestore(&svs_lock, flags); + break; + } + + svs_adjust_pm_opp_volts(svsb); + + if (svsb->phase == SVSB_PHASE_INIT01 || + svsb->phase == SVSB_PHASE_INIT02) + complete(&svsb->init_completion); + + return IRQ_HANDLED; +} + +static int svs_init01(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + unsigned long flags, time_left; + bool search_done; + int ret = 0, r; + u32 opp_freq, opp_vboot, buck_volt, idx, i; + + /* Keep CPUs' core power on for svs_init01 initialization */ + cpuidle_pause_and_lock(); + + /* Svs bank init01 preparation - power enable */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + ret = regulator_enable(svsb->buck); + if (ret) { + dev_err(svsb->dev, "%s enable fail: %d\n", + svsb->buck_name, ret); + goto svs_init01_resume_cpuidle; + } + + /* Some buck doesn't support mode change. Show fail msg only */ + ret = regulator_set_mode(svsb->buck, REGULATOR_MODE_FAST); + if (ret) + dev_notice(svsb->dev, "set fast mode fail: %d\n", ret); + + if (svsb->volt_flags & SVSB_INIT01_PD_REQ) { + if (!pm_runtime_enabled(svsb->opp_dev)) { + pm_runtime_enable(svsb->opp_dev); + svsb->pm_runtime_enabled_count++; + } + + ret = pm_runtime_get_sync(svsb->opp_dev); + if (ret < 0) { + dev_err(svsb->dev, "mtcmos on fail: %d\n", ret); + goto svs_init01_resume_cpuidle; + } + } + } + + /* + * Svs bank init01 preparation - vboot voltage adjustment + * Sometimes two svs banks use the same buck. Therefore, + * we have to set each svs bank to target voltage(vboot) first. + */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + /* + * Find the fastest freq that can be run at vboot and + * fix to that freq until svs_init01 is done. + */ + search_done = false; + opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot, + svsb->volt_step, + svsb->volt_base); + + for (i = 0; i < svsb->opp_count; i++) { + opp_freq = svsb->opp_dfreq[i]; + if (!search_done && svsb->opp_dvolt[i] <= opp_vboot) { + ret = dev_pm_opp_adjust_voltage(svsb->opp_dev, + opp_freq, + opp_vboot, + opp_vboot, + opp_vboot); + if (ret) { + dev_err(svsb->dev, + "set opp %uuV vboot fail: %d\n", + opp_vboot, ret); + goto svs_init01_finish; + } + + search_done = true; + } else { + ret = dev_pm_opp_disable(svsb->opp_dev, + svsb->opp_dfreq[i]); + if (ret) { + dev_err(svsb->dev, + "opp %uHz disable fail: %d\n", + svsb->opp_dfreq[i], ret); + goto svs_init01_finish; + } + } + } + } + + /* Svs bank init01 begins */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot, + svsb->volt_step, + svsb->volt_base); + + buck_volt = regulator_get_voltage(svsb->buck); + if (buck_volt != opp_vboot) { + dev_err(svsb->dev, + "buck voltage: %uuV, expected vboot: %uuV\n", + buck_volt, opp_vboot); + ret = -EPERM; + goto svs_init01_finish; + } + + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_set_bank_phase(svsp, SVSB_PHASE_INIT01); + spin_unlock_irqrestore(&svs_lock, flags); + + time_left = wait_for_completion_timeout(&svsb->init_completion, + msecs_to_jiffies(5000)); + if (!time_left) { + dev_err(svsb->dev, "init01 completion timeout\n"); + ret = -EBUSY; + goto svs_init01_finish; + } + } + +svs_init01_finish: + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + for (i = 0; i < svsb->opp_count; i++) { + r = dev_pm_opp_enable(svsb->opp_dev, + svsb->opp_dfreq[i]); + if (r) + dev_err(svsb->dev, "opp %uHz enable fail: %d\n", + svsb->opp_dfreq[i], r); + } + + if (svsb->volt_flags & SVSB_INIT01_PD_REQ) { + r = pm_runtime_put_sync(svsb->opp_dev); + if (r) + dev_err(svsb->dev, "mtcmos off fail: %d\n", r); + + if (svsb->pm_runtime_enabled_count > 0) { + pm_runtime_disable(svsb->opp_dev); + svsb->pm_runtime_enabled_count--; + } + } + + r = regulator_set_mode(svsb->buck, REGULATOR_MODE_NORMAL); + if (r) + dev_notice(svsb->dev, "set normal mode fail: %d\n", r); + + r = regulator_disable(svsb->buck); + if (r) + dev_err(svsb->dev, "%s disable fail: %d\n", + svsb->buck_name, r); + } + +svs_init01_resume_cpuidle: + cpuidle_resume_and_unlock(); + + return ret; +} + +static int svs_init02(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + unsigned long flags, time_left; + u32 idx; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT02)) + continue; + + reinit_completion(&svsb->init_completion); + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_set_bank_phase(svsp, SVSB_PHASE_INIT02); + spin_unlock_irqrestore(&svs_lock, flags); + + time_left = wait_for_completion_timeout(&svsb->init_completion, + msecs_to_jiffies(5000)); + if (!time_left) { + dev_err(svsb->dev, "init02 completion timeout\n"); + return -EBUSY; + } + } + + /* + * 2-line high/low bank update its corresponding opp voltages only. + * Therefore, we sync voltages from opp for high/low bank voltages + * consistency. + */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT02)) + continue; + + if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) { + if (svs_sync_bank_volts_from_opp(svsb)) { + dev_err(svsb->dev, "sync volt fail\n"); + return -EPERM; + } + } + } + + return 0; +} + +static void svs_mon_mode(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + unsigned long flags; + u32 idx; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_MON)) + continue; + + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_set_bank_phase(svsp, SVSB_PHASE_MON); + spin_unlock_irqrestore(&svs_lock, flags); + } +} + +static int svs_start(struct svs_platform *svsp) +{ + int ret; + + ret = svs_init01(svsp); + if (ret) + return ret; + + ret = svs_init02(svsp); + if (ret) + return ret; + + svs_mon_mode(svsp); + + return 0; +} + +static int svs_suspend(struct device *dev) +{ + struct svs_platform *svsp = dev_get_drvdata(dev); + struct svs_bank *svsb; + unsigned long flags; + int ret; + u32 idx; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + /* This might wait for svs_isr() process */ + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_switch_bank(svsp); + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); + spin_unlock_irqrestore(&svs_lock, flags); + + svsb->phase = SVSB_PHASE_ERROR; + svs_adjust_pm_opp_volts(svsb); + } + + ret = reset_control_assert(svsp->rst); + if (ret) { + dev_err(svsp->dev, "cannot assert reset %d\n", ret); + return ret; + } + + clk_disable_unprepare(svsp->main_clk); + + return 0; +} + +static int svs_resume(struct device *dev) +{ + struct svs_platform *svsp = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(svsp->main_clk); + if (ret) { + dev_err(svsp->dev, "cannot enable main_clk, disable svs\n"); + return ret; + } + + ret = reset_control_deassert(svsp->rst); + if (ret) { + dev_err(svsp->dev, "cannot deassert reset %d\n", ret); + goto out_of_resume; + } + + ret = svs_init02(svsp); + if (ret) + goto out_of_resume; + + svs_mon_mode(svsp); + + return 0; + +out_of_resume: + clk_disable_unprepare(svsp->main_clk); + return ret; +} + +static int svs_bank_resource_setup(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct dev_pm_opp *opp; + unsigned long freq; + int count, ret; + u32 idx, i; + + dev_set_drvdata(svsp->dev, svsp); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + svsb->name = "SVSB_CPU_LITTLE"; + break; + case SVSB_CPU_BIG: + svsb->name = "SVSB_CPU_BIG"; + break; + case SVSB_CCI: + svsb->name = "SVSB_CCI"; + break; + case SVSB_GPU: + if (svsb->type == SVSB_HIGH) + svsb->name = "SVSB_GPU_HIGH"; + else if (svsb->type == SVSB_LOW) + svsb->name = "SVSB_GPU_LOW"; + else + svsb->name = "SVSB_GPU"; + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + return -EINVAL; + } + + svsb->dev = devm_kzalloc(svsp->dev, sizeof(*svsb->dev), + GFP_KERNEL); + if (!svsb->dev) + return -ENOMEM; + + ret = dev_set_name(svsb->dev, "%s", svsb->name); + if (ret) + return ret; + + dev_set_drvdata(svsb->dev, svsp); + + ret = dev_pm_opp_of_add_table(svsb->opp_dev); + if (ret) { + dev_err(svsb->dev, "add opp table fail: %d\n", ret); + return ret; + } + + mutex_init(&svsb->lock); + init_completion(&svsb->init_completion); + + if (svsb->mode_support & SVSB_MODE_INIT01) { + svsb->buck = devm_regulator_get_optional(svsb->opp_dev, + svsb->buck_name); + if (IS_ERR(svsb->buck)) { + dev_err(svsb->dev, "cannot get \"%s-supply\"\n", + svsb->buck_name); + return PTR_ERR(svsb->buck); + } + } + + if (svsb->mode_support & SVSB_MODE_MON) { + svsb->tzd = thermal_zone_get_zone_by_name(svsb->tzone_name); + if (IS_ERR(svsb->tzd)) { + dev_err(svsb->dev, "cannot get \"%s\" thermal zone\n", + svsb->tzone_name); + return PTR_ERR(svsb->tzd); + } + } + + count = dev_pm_opp_get_opp_count(svsb->opp_dev); + if (svsb->opp_count != count) { + dev_err(svsb->dev, + "opp_count not \"%u\" but get \"%d\"?\n", + svsb->opp_count, count); + return count; + } + + for (i = 0, freq = U32_MAX; i < svsb->opp_count; i++, freq--) { + opp = dev_pm_opp_find_freq_floor(svsb->opp_dev, &freq); + if (IS_ERR(opp)) { + dev_err(svsb->dev, "cannot find freq = %ld\n", + PTR_ERR(opp)); + return PTR_ERR(opp); + } + + svsb->opp_dfreq[i] = freq; + svsb->opp_dvolt[i] = dev_pm_opp_get_voltage(opp); + svsb->freq_pct[i] = percent(svsb->opp_dfreq[i], + svsb->freq_base); + dev_pm_opp_put(opp); + } + } + + return 0; +} + +static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct nvmem_cell *cell; + u32 idx, i, vmin, golden_temp; + + for (i = 0; i < svsp->efuse_max; i++) + if (svsp->efuse[i]) + dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n", + i, svsp->efuse[i]); + + if (!svsp->efuse[9]) { + dev_notice(svsp->dev, "svs_efuse[9] = 0x0?\n"); + return false; + } + + /* Svs efuse parsing */ + vmin = (svsp->efuse[19] >> 4) & GENMASK(1, 0); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (vmin == 0x1) + svsb->vmin = 0x1e; + + if (svsb->type == SVSB_LOW) { + svsb->mtdes = svsp->efuse[10] & GENMASK(7, 0); + svsb->bdes = (svsp->efuse[10] >> 16) & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[10] >> 24) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[17]) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[17] >> 8) & GENMASK(7, 0); + } else if (svsb->type == SVSB_HIGH) { + svsb->mtdes = svsp->efuse[9] & GENMASK(7, 0); + svsb->bdes = (svsp->efuse[9] >> 16) & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[9] >> 24) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[17] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[17] >> 24) & GENMASK(7, 0); + } + + svsb->vmax += svsb->dvt_fixed; + } + + /* Thermal efuse parsing */ + cell = nvmem_cell_get(svsp->dev, "t-calibration-data"); + if (IS_ERR_OR_NULL(cell)) { + dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n", + PTR_ERR(cell)); + return false; + } + + svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max); + if (IS_ERR(svsp->tefuse)) { + dev_err(svsp->dev, "cannot read thermal efuse: %ld\n", + PTR_ERR(svsp->tefuse)); + nvmem_cell_put(cell); + return false; + } + + svsp->tefuse_max /= sizeof(u32); + nvmem_cell_put(cell); + + for (i = 0; i < svsp->tefuse_max; i++) + if (svsp->tefuse[i] != 0) + break; + + if (i == svsp->tefuse_max) + golden_temp = 50; /* All thermal efuse data are 0 */ + else + golden_temp = (svsp->tefuse[0] >> 24) & GENMASK(7, 0); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + svsb->mts = 500; + svsb->bts = (((500 * golden_temp + 250460) / 1000) - 25) * 4; + } + + return true; +} + +static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct nvmem_cell *cell; + int format[6], x_roomt[6], o_vtsmcu[5], o_vtsabb, tb_roomt = 0; + int adc_ge_t, adc_oe_t, ge, oe, gain, degc_cali, adc_cali_en_t; + int o_slope, o_slope_sign, ts_id; + u32 idx, i, ft_pgm, mts, temp0, temp1, temp2; + + for (i = 0; i < svsp->efuse_max; i++) + if (svsp->efuse[i]) + dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n", + i, svsp->efuse[i]); + + if (!svsp->efuse[2]) { + dev_notice(svsp->dev, "svs_efuse[2] = 0x0?\n"); + return false; + } + + /* Svs efuse parsing */ + ft_pgm = (svsp->efuse[0] >> 4) & GENMASK(3, 0); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (ft_pgm <= 1) + svsb->volt_flags |= SVSB_INIT01_VOLT_IGNORE; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + svsb->bdes = svsp->efuse[16] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[16] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[16] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[16] >> 24) & GENMASK(7, 0); + svsb->mtdes = (svsp->efuse[17] >> 16) & GENMASK(7, 0); + + if (ft_pgm <= 3) + svsb->volt_od += 10; + else + svsb->volt_od += 2; + break; + case SVSB_CPU_BIG: + svsb->bdes = svsp->efuse[18] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[18] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[18] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[18] >> 24) & GENMASK(7, 0); + svsb->mtdes = svsp->efuse[17] & GENMASK(7, 0); + + if (ft_pgm <= 3) + svsb->volt_od += 15; + else + svsb->volt_od += 12; + break; + case SVSB_CCI: + svsb->bdes = svsp->efuse[4] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[4] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[4] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[4] >> 24) & GENMASK(7, 0); + svsb->mtdes = (svsp->efuse[5] >> 16) & GENMASK(7, 0); + + if (ft_pgm <= 3) + svsb->volt_od += 10; + else + svsb->volt_od += 2; + break; + case SVSB_GPU: + svsb->bdes = svsp->efuse[6] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[6] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[6] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[6] >> 24) & GENMASK(7, 0); + svsb->mtdes = svsp->efuse[5] & GENMASK(7, 0); + + if (ft_pgm >= 2) { + svsb->freq_base = 800000000; /* 800MHz */ + svsb->dvt_fixed = 2; + } + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + return false; + } + } + + /* Get thermal efuse by nvmem */ + cell = nvmem_cell_get(svsp->dev, "t-calibration-data"); + if (IS_ERR(cell)) { + dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n", + PTR_ERR(cell)); + goto remove_mt8183_svsb_mon_mode; + } + + svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max); + if (IS_ERR(svsp->tefuse)) { + dev_err(svsp->dev, "cannot read thermal efuse: %ld\n", + PTR_ERR(svsp->tefuse)); + nvmem_cell_put(cell); + goto remove_mt8183_svsb_mon_mode; + } + + svsp->tefuse_max /= sizeof(u32); + nvmem_cell_put(cell); + + /* Thermal efuse parsing */ + adc_ge_t = (svsp->tefuse[1] >> 22) & GENMASK(9, 0); + adc_oe_t = (svsp->tefuse[1] >> 12) & GENMASK(9, 0); + + o_vtsmcu[0] = (svsp->tefuse[0] >> 17) & GENMASK(8, 0); + o_vtsmcu[1] = (svsp->tefuse[0] >> 8) & GENMASK(8, 0); + o_vtsmcu[2] = svsp->tefuse[1] & GENMASK(8, 0); + o_vtsmcu[3] = (svsp->tefuse[2] >> 23) & GENMASK(8, 0); + o_vtsmcu[4] = (svsp->tefuse[2] >> 5) & GENMASK(8, 0); + o_vtsabb = (svsp->tefuse[2] >> 14) & GENMASK(8, 0); + + degc_cali = (svsp->tefuse[0] >> 1) & GENMASK(5, 0); + adc_cali_en_t = svsp->tefuse[0] & BIT(0); + o_slope_sign = (svsp->tefuse[0] >> 7) & BIT(0); + + ts_id = (svsp->tefuse[1] >> 9) & BIT(0); + o_slope = (svsp->tefuse[0] >> 26) & GENMASK(5, 0); + + if (adc_cali_en_t == 1) { + if (!ts_id) + o_slope = 0; + + if (adc_ge_t < 265 || adc_ge_t > 758 || + adc_oe_t < 265 || adc_oe_t > 758 || + o_vtsmcu[0] < -8 || o_vtsmcu[0] > 484 || + o_vtsmcu[1] < -8 || o_vtsmcu[1] > 484 || + o_vtsmcu[2] < -8 || o_vtsmcu[2] > 484 || + o_vtsmcu[3] < -8 || o_vtsmcu[3] > 484 || + o_vtsmcu[4] < -8 || o_vtsmcu[4] > 484 || + o_vtsabb < -8 || o_vtsabb > 484 || + degc_cali < 1 || degc_cali > 63) { + dev_err(svsp->dev, "bad thermal efuse, no mon mode\n"); + goto remove_mt8183_svsb_mon_mode; + } + } else { + dev_err(svsp->dev, "no thermal efuse, no mon mode\n"); + goto remove_mt8183_svsb_mon_mode; + } + + ge = ((adc_ge_t - 512) * 10000) / 4096; + oe = (adc_oe_t - 512); + gain = (10000 + ge); + + format[0] = (o_vtsmcu[0] + 3350 - oe); + format[1] = (o_vtsmcu[1] + 3350 - oe); + format[2] = (o_vtsmcu[2] + 3350 - oe); + format[3] = (o_vtsmcu[3] + 3350 - oe); + format[4] = (o_vtsmcu[4] + 3350 - oe); + format[5] = (o_vtsabb + 3350 - oe); + + for (i = 0; i < 6; i++) + x_roomt[i] = (((format[i] * 10000) / 4096) * 10000) / gain; + + temp0 = (10000 * 100000 / gain) * 15 / 18; + + if (!o_slope_sign) + mts = (temp0 * 10) / (1534 + o_slope * 10); + else + mts = (temp0 * 10) / (1534 - o_slope * 10); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + svsb->mts = mts; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + tb_roomt = x_roomt[3]; + break; + case SVSB_CPU_BIG: + tb_roomt = x_roomt[4]; + break; + case SVSB_CCI: + tb_roomt = x_roomt[3]; + break; + case SVSB_GPU: + tb_roomt = x_roomt[1]; + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + goto remove_mt8183_svsb_mon_mode; + } + + temp0 = (degc_cali * 10 / 2); + temp1 = ((10000 * 100000 / 4096 / gain) * + oe + tb_roomt * 10) * 15 / 18; + + if (!o_slope_sign) + temp2 = temp1 * 100 / (1534 + o_slope * 10); + else + temp2 = temp1 * 100 / (1534 - o_slope * 10); + + svsb->bts = (temp0 + temp2 - 250) * 4 / 10; + } + + return true; + +remove_mt8183_svsb_mon_mode: + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + svsb->mode_support &= ~SVSB_MODE_MON; + } + + return true; +} + +static bool svs_is_efuse_data_correct(struct svs_platform *svsp) +{ + struct nvmem_cell *cell; + + /* Get svs efuse by nvmem */ + cell = nvmem_cell_get(svsp->dev, "svs-calibration-data"); + if (IS_ERR(cell)) { + dev_err(svsp->dev, "no \"svs-calibration-data\"? %ld\n", + PTR_ERR(cell)); + return false; + } + + svsp->efuse = nvmem_cell_read(cell, &svsp->efuse_max); + if (IS_ERR(svsp->efuse)) { + dev_err(svsp->dev, "cannot read svs efuse: %ld\n", + PTR_ERR(svsp->efuse)); + nvmem_cell_put(cell); + return false; + } + + svsp->efuse_max /= sizeof(u32); + nvmem_cell_put(cell); + + return svsp->efuse_parsing(svsp); +} + +static struct device *svs_get_subsys_device(struct svs_platform *svsp, + const char *node_name) +{ + struct platform_device *pdev; + struct device_node *np; + + np = of_find_node_by_name(NULL, node_name); + if (!np) { + dev_err(svsp->dev, "cannot find %s node\n", node_name); + return ERR_PTR(-ENODEV); + } + + pdev = of_find_device_by_node(np); + if (!pdev) { + of_node_put(np); + dev_err(svsp->dev, "cannot find pdev by %s\n", node_name); + return ERR_PTR(-ENXIO); + } + + of_node_put(np); + + return &pdev->dev; +} + +static struct device *svs_add_device_link(struct svs_platform *svsp, + const char *node_name) +{ + struct device *dev; + struct device_link *sup_link; + + if (!node_name) { + dev_err(svsp->dev, "node name cannot be null\n"); + return ERR_PTR(-EINVAL); + } + + dev = svs_get_subsys_device(svsp, node_name); + if (IS_ERR(dev)) + return dev; + + sup_link = device_link_add(svsp->dev, dev, + DL_FLAG_AUTOREMOVE_CONSUMER); + if (!sup_link) { + dev_err(svsp->dev, "sup_link is NULL\n"); + return ERR_PTR(-EINVAL); + } + + if (sup_link->supplier->links.status != DL_DEV_DRIVER_BOUND) + return ERR_PTR(-EPROBE_DEFER); + + return dev; +} + +static int svs_mt8192_platform_probe(struct svs_platform *svsp) +{ + struct device *dev; + struct svs_bank *svsb; + u32 idx; + + svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst"); + if (IS_ERR(svsp->rst)) + return dev_err_probe(svsp->dev, PTR_ERR(svsp->rst), + "cannot get svs reset control\n"); + + dev = svs_add_device_link(svsp, "lvts"); + if (IS_ERR(dev)) + return dev_err_probe(svsp->dev, PTR_ERR(dev), + "failed to get lvts device\n"); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (svsb->type == SVSB_HIGH) + svsb->opp_dev = svs_add_device_link(svsp, "mali"); + else if (svsb->type == SVSB_LOW) + svsb->opp_dev = svs_get_subsys_device(svsp, "mali"); + + if (IS_ERR(svsb->opp_dev)) + return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev), + "failed to get OPP device for bank %d\n", + idx); + } + + return 0; +} + +static int svs_mt8183_platform_probe(struct svs_platform *svsp) +{ + struct device *dev; + struct svs_bank *svsb; + u32 idx; + + dev = svs_add_device_link(svsp, "thermal"); + if (IS_ERR(dev)) + return dev_err_probe(svsp->dev, PTR_ERR(dev), + "failed to get thermal device\n"); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + case SVSB_CPU_BIG: + svsb->opp_dev = get_cpu_device(svsb->cpu_id); + break; + case SVSB_CCI: + svsb->opp_dev = svs_add_device_link(svsp, "cci"); + break; + case SVSB_GPU: + svsb->opp_dev = svs_add_device_link(svsp, "gpu"); + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + return -EINVAL; + } + + if (IS_ERR(svsb->opp_dev)) + return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev), + "failed to get OPP device for bank %d\n", + idx); + } + + return 0; +} + +static struct svs_bank svs_mt8192_banks[] = { + { + .sw_id = SVSB_GPU, + .type = SVSB_LOW, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT, + .mode_support = SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 688000000, + .turn_freq_base = 688000000, + .volt_step = 6250, + .volt_base = 400000, + .vmax = 0x60, + .vmin = 0x1a, + .age_config = 0x555555, + .dc_config = 0x1, + .dvt_fixed = 0x1, + .vco = 0x18, + .chk_shift = 0x87, + .core_sel = 0x0fff0100, + .int_st = BIT(0), + .ctl0 = 0x00540003, + }, + { + .sw_id = SVSB_GPU, + .type = SVSB_HIGH, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .tzone_name = "gpu1", + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | + SVSB_MON_VOLT_IGNORE, + .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 902000000, + .turn_freq_base = 688000000, + .volt_step = 6250, + .volt_base = 400000, + .vmax = 0x60, + .vmin = 0x1a, + .age_config = 0x555555, + .dc_config = 0x1, + .dvt_fixed = 0x6, + .vco = 0x18, + .chk_shift = 0x87, + .core_sel = 0x0fff0101, + .int_st = BIT(1), + .ctl0 = 0x00540003, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 0, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 7, + }, +}; + +static struct svs_bank svs_mt8183_banks[] = { + { + .sw_id = SVSB_CPU_LITTLE, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .cpu_id = 0, + .buck_name = "proc", + .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 1989000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x64, + .vmin = 0x18, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x7, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0000, + .int_st = BIT(0), + .ctl0 = 0x00010001, + }, + { + .sw_id = SVSB_CPU_BIG, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .cpu_id = 4, + .buck_name = "proc", + .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 1989000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x58, + .vmin = 0x10, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x7, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0001, + .int_st = BIT(1), + .ctl0 = 0x00000001, + }, + { + .sw_id = SVSB_CCI, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .buck_name = "proc", + .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 1196000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x64, + .vmin = 0x18, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x7, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0002, + .int_st = BIT(2), + .ctl0 = 0x00100003, + }, + { + .sw_id = SVSB_GPU, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .buck_name = "mali", + .tzone_name = "tzts2", + .volt_flags = SVSB_INIT01_PD_REQ | + SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02 | + SVSB_MODE_MON, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 900000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x40, + .vmin = 0x14, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x3, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0003, + .int_st = BIT(3), + .ctl0 = 0x00050001, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 0, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 3, + }, +}; + +static const struct svs_platform_data svs_mt8192_platform_data = { + .name = "mt8192-svs", + .banks = svs_mt8192_banks, + .efuse_parsing = svs_mt8192_efuse_parsing, + .probe = svs_mt8192_platform_probe, + .irqflags = IRQF_TRIGGER_HIGH, + .regs = svs_regs_v2, + .bank_max = ARRAY_SIZE(svs_mt8192_banks), +}; + +static const struct svs_platform_data svs_mt8183_platform_data = { + .name = "mt8183-svs", + .banks = svs_mt8183_banks, + .efuse_parsing = svs_mt8183_efuse_parsing, + .probe = svs_mt8183_platform_probe, + .irqflags = IRQF_TRIGGER_LOW, + .regs = svs_regs_v2, + .bank_max = ARRAY_SIZE(svs_mt8183_banks), +}; + +static const struct of_device_id svs_of_match[] = { + { + .compatible = "mediatek,mt8192-svs", + .data = &svs_mt8192_platform_data, + }, { + .compatible = "mediatek,mt8183-svs", + .data = &svs_mt8183_platform_data, + }, { + /* Sentinel */ + }, +}; + +static struct svs_platform *svs_platform_probe(struct platform_device *pdev) +{ + struct svs_platform *svsp; + const struct svs_platform_data *svsp_data; + int ret; + + svsp_data = of_device_get_match_data(&pdev->dev); + if (!svsp_data) { + dev_err(&pdev->dev, "no svs platform data?\n"); + return ERR_PTR(-EPERM); + } + + svsp = devm_kzalloc(&pdev->dev, sizeof(*svsp), GFP_KERNEL); + if (!svsp) + return ERR_PTR(-ENOMEM); + + svsp->dev = &pdev->dev; + svsp->name = svsp_data->name; + svsp->banks = svsp_data->banks; + svsp->efuse_parsing = svsp_data->efuse_parsing; + svsp->probe = svsp_data->probe; + svsp->irqflags = svsp_data->irqflags; + svsp->regs = svsp_data->regs; + svsp->bank_max = svsp_data->bank_max; + + ret = svsp->probe(svsp); + if (ret) + return ERR_PTR(ret); + + return svsp; +} + +static int svs_probe(struct platform_device *pdev) +{ + struct svs_platform *svsp; + unsigned int svsp_irq; + int ret; + + svsp = svs_platform_probe(pdev); + if (IS_ERR(svsp)) + return PTR_ERR(svsp); + + if (!svs_is_efuse_data_correct(svsp)) { + dev_notice(svsp->dev, "efuse data isn't correct\n"); + ret = -EPERM; + goto svs_probe_free_resource; + } + + ret = svs_bank_resource_setup(svsp); + if (ret) { + dev_err(svsp->dev, "svs bank resource setup fail: %d\n", ret); + goto svs_probe_free_resource; + } + + svsp_irq = irq_of_parse_and_map(svsp->dev->of_node, 0); + ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr, + svsp->irqflags | IRQF_ONESHOT, + svsp->name, svsp); + if (ret) { + dev_err(svsp->dev, "register irq(%d) failed: %d\n", + svsp_irq, ret); + goto svs_probe_free_resource; + } + + svsp->main_clk = devm_clk_get(svsp->dev, "main"); + if (IS_ERR(svsp->main_clk)) { + dev_err(svsp->dev, "failed to get clock: %ld\n", + PTR_ERR(svsp->main_clk)); + ret = PTR_ERR(svsp->main_clk); + goto svs_probe_free_resource; + } + + ret = clk_prepare_enable(svsp->main_clk); + if (ret) { + dev_err(svsp->dev, "cannot enable main clk: %d\n", ret); + goto svs_probe_free_resource; + } + + svsp->base = of_iomap(svsp->dev->of_node, 0); + if (IS_ERR_OR_NULL(svsp->base)) { + dev_err(svsp->dev, "cannot find svs register base\n"); + ret = -EINVAL; + goto svs_probe_clk_disable; + } + + ret = svs_start(svsp); + if (ret) { + dev_err(svsp->dev, "svs start fail: %d\n", ret); + goto svs_probe_iounmap; + } + + ret = svs_create_debug_cmds(svsp); + if (ret) { + dev_err(svsp->dev, "svs create debug cmds fail: %d\n", ret); + goto svs_probe_iounmap; + } + + return 0; + +svs_probe_iounmap: + iounmap(svsp->base); + +svs_probe_clk_disable: + clk_disable_unprepare(svsp->main_clk); + +svs_probe_free_resource: + if (!IS_ERR_OR_NULL(svsp->efuse)) + kfree(svsp->efuse); + if (!IS_ERR_OR_NULL(svsp->tefuse)) + kfree(svsp->tefuse); + + return ret; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(svs_pm_ops, svs_suspend, svs_resume); + +static struct platform_driver svs_driver = { + .probe = svs_probe, + .driver = { + .name = "mtk-svs", + .pm = &svs_pm_ops, + .of_match_table = of_match_ptr(svs_of_match), + }, +}; + +module_platform_driver(svs_driver); + +MODULE_AUTHOR("Roger Lu "); +MODULE_DESCRIPTION("MediaTek SVS driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/pxa/Kconfig b/drivers/soc/pxa/Kconfig new file mode 100644 index 0000000000..c5c265aa4f --- /dev/null +++ b/drivers/soc/pxa/Kconfig @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +config PLAT_PXA + bool + +config PXA_SSP + tristate + help + Enable support for PXA2xx SSP ports diff --git a/drivers/soc/pxa/Makefile b/drivers/soc/pxa/Makefile new file mode 100644 index 0000000000..413deceddb --- /dev/null +++ b/drivers/soc/pxa/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_PXA3xx) += mfp.o +obj-$(CONFIG_ARCH_MMP) += mfp.o + +obj-$(CONFIG_PXA_SSP) += ssp.o diff --git a/drivers/soc/pxa/mfp.c b/drivers/soc/pxa/mfp.c new file mode 100644 index 0000000000..6220ba321c --- /dev/null +++ b/drivers/soc/pxa/mfp.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/arch/arm/plat-pxa/mfp.c + * + * Multi-Function Pin Support + * + * Copyright (C) 2007 Marvell Internation Ltd. + * + * 2007-08-21: eric miao + * initial version + */ + +#include +#include +#include +#include + +#include + +#define MFPR_SIZE (PAGE_SIZE) + +/* MFPR register bit definitions */ +#define MFPR_PULL_SEL (0x1 << 15) +#define MFPR_PULLUP_EN (0x1 << 14) +#define MFPR_PULLDOWN_EN (0x1 << 13) +#define MFPR_SLEEP_SEL (0x1 << 9) +#define MFPR_SLEEP_OE_N (0x1 << 7) +#define MFPR_EDGE_CLEAR (0x1 << 6) +#define MFPR_EDGE_FALL_EN (0x1 << 5) +#define MFPR_EDGE_RISE_EN (0x1 << 4) + +#define MFPR_SLEEP_DATA(x) ((x) << 8) +#define MFPR_DRIVE(x) (((x) & 0x7) << 10) +#define MFPR_AF_SEL(x) (((x) & 0x7) << 0) + +#define MFPR_EDGE_NONE (0) +#define MFPR_EDGE_RISE (MFPR_EDGE_RISE_EN) +#define MFPR_EDGE_FALL (MFPR_EDGE_FALL_EN) +#define MFPR_EDGE_BOTH (MFPR_EDGE_RISE | MFPR_EDGE_FALL) + +/* + * Table that determines the low power modes outputs, with actual settings + * used in parentheses for don't-care values. Except for the float output, + * the configured driven and pulled levels match, so if there is a need for + * non-LPM pulled output, the same configuration could probably be used. + * + * Output value sleep_oe_n sleep_data pullup_en pulldown_en pull_sel + * (bit 7) (bit 8) (bit 14) (bit 13) (bit 15) + * + * Input 0 X(0) X(0) X(0) 0 + * Drive 0 0 0 0 X(1) 0 + * Drive 1 0 1 X(1) 0 0 + * Pull hi (1) 1 X(1) 1 0 0 + * Pull lo (0) 1 X(0) 0 1 0 + * Z (float) 1 X(0) 0 0 0 + */ +#define MFPR_LPM_INPUT (0) +#define MFPR_LPM_DRIVE_LOW (MFPR_SLEEP_DATA(0) | MFPR_PULLDOWN_EN) +#define MFPR_LPM_DRIVE_HIGH (MFPR_SLEEP_DATA(1) | MFPR_PULLUP_EN) +#define MFPR_LPM_PULL_LOW (MFPR_LPM_DRIVE_LOW | MFPR_SLEEP_OE_N) +#define MFPR_LPM_PULL_HIGH (MFPR_LPM_DRIVE_HIGH | MFPR_SLEEP_OE_N) +#define MFPR_LPM_FLOAT (MFPR_SLEEP_OE_N) +#define MFPR_LPM_MASK (0xe080) + +/* + * The pullup and pulldown state of the MFP pin at run mode is by default + * determined by the selected alternate function. In case that some buggy + * devices need to override this default behavior, the definitions below + * indicates the setting of corresponding MFPR bits + * + * Definition pull_sel pullup_en pulldown_en + * MFPR_PULL_NONE 0 0 0 + * MFPR_PULL_LOW 1 0 1 + * MFPR_PULL_HIGH 1 1 0 + * MFPR_PULL_BOTH 1 1 1 + * MFPR_PULL_FLOAT 1 0 0 + */ +#define MFPR_PULL_NONE (0) +#define MFPR_PULL_LOW (MFPR_PULL_SEL | MFPR_PULLDOWN_EN) +#define MFPR_PULL_BOTH (MFPR_PULL_LOW | MFPR_PULLUP_EN) +#define MFPR_PULL_HIGH (MFPR_PULL_SEL | MFPR_PULLUP_EN) +#define MFPR_PULL_FLOAT (MFPR_PULL_SEL) + +/* mfp_spin_lock is used to ensure that MFP register configuration + * (most likely a read-modify-write operation) is atomic, and that + * mfp_table[] is consistent + */ +static DEFINE_SPINLOCK(mfp_spin_lock); + +static void __iomem *mfpr_mmio_base; + +struct mfp_pin { + unsigned long config; /* -1 for not configured */ + unsigned long mfpr_off; /* MFPRxx Register offset */ + unsigned long mfpr_run; /* Run-Mode Register Value */ + unsigned long mfpr_lpm; /* Low Power Mode Register Value */ +}; + +static struct mfp_pin mfp_table[MFP_PIN_MAX]; + +/* mapping of MFP_LPM_* definitions to MFPR_LPM_* register bits */ +static const unsigned long mfpr_lpm[] = { + MFPR_LPM_INPUT, + MFPR_LPM_DRIVE_LOW, + MFPR_LPM_DRIVE_HIGH, + MFPR_LPM_PULL_LOW, + MFPR_LPM_PULL_HIGH, + MFPR_LPM_FLOAT, + MFPR_LPM_INPUT, +}; + +/* mapping of MFP_PULL_* definitions to MFPR_PULL_* register bits */ +static const unsigned long mfpr_pull[] = { + MFPR_PULL_NONE, + MFPR_PULL_LOW, + MFPR_PULL_HIGH, + MFPR_PULL_BOTH, + MFPR_PULL_FLOAT, +}; + +/* mapping of MFP_LPM_EDGE_* definitions to MFPR_EDGE_* register bits */ +static const unsigned long mfpr_edge[] = { + MFPR_EDGE_NONE, + MFPR_EDGE_RISE, + MFPR_EDGE_FALL, + MFPR_EDGE_BOTH, +}; + +#define mfpr_readl(off) \ + __raw_readl(mfpr_mmio_base + (off)) + +#define mfpr_writel(off, val) \ + __raw_writel(val, mfpr_mmio_base + (off)) + +#define mfp_configured(p) ((p)->config != -1) + +/* + * perform a read-back of any valid MFPR register to make sure the + * previous writings are finished + */ +static unsigned long mfpr_off_readback; +#define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + mfpr_off_readback) + +static inline void __mfp_config_run(struct mfp_pin *p) +{ + if (mfp_configured(p)) + mfpr_writel(p->mfpr_off, p->mfpr_run); +} + +static inline void __mfp_config_lpm(struct mfp_pin *p) +{ + if (mfp_configured(p)) { + unsigned long mfpr_clr = (p->mfpr_run & ~MFPR_EDGE_BOTH) | MFPR_EDGE_CLEAR; + if (mfpr_clr != p->mfpr_run) + mfpr_writel(p->mfpr_off, mfpr_clr); + if (p->mfpr_lpm != mfpr_clr) + mfpr_writel(p->mfpr_off, p->mfpr_lpm); + } +} + +void mfp_config(unsigned long *mfp_cfgs, int num) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&mfp_spin_lock, flags); + + for (i = 0; i < num; i++, mfp_cfgs++) { + unsigned long tmp, c = *mfp_cfgs; + struct mfp_pin *p; + int pin, af, drv, lpm, edge, pull; + + pin = MFP_PIN(c); + BUG_ON(pin >= MFP_PIN_MAX); + p = &mfp_table[pin]; + + af = MFP_AF(c); + drv = MFP_DS(c); + lpm = MFP_LPM_STATE(c); + edge = MFP_LPM_EDGE(c); + pull = MFP_PULL(c); + + /* run-mode pull settings will conflict with MFPR bits of + * low power mode state, calculate mfpr_run and mfpr_lpm + * individually if pull != MFP_PULL_NONE + */ + tmp = MFPR_AF_SEL(af) | MFPR_DRIVE(drv); + + if (likely(pull == MFP_PULL_NONE)) { + p->mfpr_run = tmp | mfpr_lpm[lpm] | mfpr_edge[edge]; + p->mfpr_lpm = p->mfpr_run; + } else { + p->mfpr_lpm = tmp | mfpr_lpm[lpm] | mfpr_edge[edge]; + p->mfpr_run = tmp | mfpr_pull[pull]; + } + + p->config = c; __mfp_config_run(p); + } + + mfpr_sync(); + spin_unlock_irqrestore(&mfp_spin_lock, flags); +} + +unsigned long mfp_read(int mfp) +{ + unsigned long val, flags; + + BUG_ON(mfp < 0 || mfp >= MFP_PIN_MAX); + + spin_lock_irqsave(&mfp_spin_lock, flags); + val = mfpr_readl(mfp_table[mfp].mfpr_off); + spin_unlock_irqrestore(&mfp_spin_lock, flags); + + return val; +} + +void mfp_write(int mfp, unsigned long val) +{ + unsigned long flags; + + BUG_ON(mfp < 0 || mfp >= MFP_PIN_MAX); + + spin_lock_irqsave(&mfp_spin_lock, flags); + mfpr_writel(mfp_table[mfp].mfpr_off, val); + mfpr_sync(); + spin_unlock_irqrestore(&mfp_spin_lock, flags); +} + +void __init mfp_init_base(void __iomem *mfpr_base) +{ + int i; + + /* initialize the table with default - unconfigured */ + for (i = 0; i < ARRAY_SIZE(mfp_table); i++) + mfp_table[i].config = -1; + + mfpr_mmio_base = mfpr_base; +} + +void __init mfp_init_addr(struct mfp_addr_map *map) +{ + struct mfp_addr_map *p; + unsigned long offset, flags; + int i; + + spin_lock_irqsave(&mfp_spin_lock, flags); + + /* mfp offset for readback */ + mfpr_off_readback = map[0].offset; + + for (p = map; p->start != MFP_PIN_INVALID; p++) { + offset = p->offset; + i = p->start; + + do { + mfp_table[i].mfpr_off = offset; + mfp_table[i].mfpr_run = 0; + mfp_table[i].mfpr_lpm = 0; + offset += 4; i++; + } while ((i <= p->end) && (p->end != -1)); + } + + spin_unlock_irqrestore(&mfp_spin_lock, flags); +} + +void mfp_config_lpm(void) +{ + struct mfp_pin *p = &mfp_table[0]; + int pin; + + for (pin = 0; pin < ARRAY_SIZE(mfp_table); pin++, p++) + __mfp_config_lpm(p); +} + +void mfp_config_run(void) +{ + struct mfp_pin *p = &mfp_table[0]; + int pin; + + for (pin = 0; pin < ARRAY_SIZE(mfp_table); pin++, p++) + __mfp_config_run(p); +} diff --git a/drivers/soc/pxa/ssp.c b/drivers/soc/pxa/ssp.c new file mode 100644 index 0000000000..563440315a --- /dev/null +++ b/drivers/soc/pxa/ssp.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/arch/arm/mach-pxa/ssp.c + * + * based on linux/arch/arm/mach-sa1100/ssp.c by Russell King + * + * Copyright (C) 2003 Russell King. + * Copyright (C) 2003 Wolfson Microelectronics PLC + * + * PXA2xx SSP driver. This provides the generic core for simple + * IO-based SSP applications and allows easy port setup for DMA access. + * + * Author: Liam Girdwood + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static DEFINE_MUTEX(ssp_lock); +static LIST_HEAD(ssp_list); + +struct ssp_device *pxa_ssp_request(int port, const char *label) +{ + struct ssp_device *ssp = NULL; + + mutex_lock(&ssp_lock); + + list_for_each_entry(ssp, &ssp_list, node) { + if (ssp->port_id == port && ssp->use_count == 0) { + ssp->use_count++; + ssp->label = label; + break; + } + } + + mutex_unlock(&ssp_lock); + + if (&ssp->node == &ssp_list) + return NULL; + + return ssp; +} +EXPORT_SYMBOL(pxa_ssp_request); + +struct ssp_device *pxa_ssp_request_of(const struct device_node *of_node, + const char *label) +{ + struct ssp_device *ssp = NULL; + + mutex_lock(&ssp_lock); + + list_for_each_entry(ssp, &ssp_list, node) { + if (ssp->of_node == of_node && ssp->use_count == 0) { + ssp->use_count++; + ssp->label = label; + break; + } + } + + mutex_unlock(&ssp_lock); + + if (&ssp->node == &ssp_list) + return NULL; + + return ssp; +} +EXPORT_SYMBOL(pxa_ssp_request_of); + +void pxa_ssp_free(struct ssp_device *ssp) +{ + mutex_lock(&ssp_lock); + if (ssp->use_count) { + ssp->use_count--; + ssp->label = NULL; + } else + dev_err(ssp->dev, "device already free\n"); + mutex_unlock(&ssp_lock); +} +EXPORT_SYMBOL(pxa_ssp_free); + +#ifdef CONFIG_OF +static const struct of_device_id pxa_ssp_of_ids[] = { + { .compatible = "mrvl,pxa25x-ssp", .data = (void *) PXA25x_SSP }, + { .compatible = "mvrl,pxa25x-nssp", .data = (void *) PXA25x_NSSP }, + { .compatible = "mrvl,pxa27x-ssp", .data = (void *) PXA27x_SSP }, + { .compatible = "mrvl,pxa3xx-ssp", .data = (void *) PXA3xx_SSP }, + { .compatible = "mvrl,pxa168-ssp", .data = (void *) PXA168_SSP }, + { .compatible = "mrvl,pxa910-ssp", .data = (void *) PXA910_SSP }, + { .compatible = "mrvl,ce4100-ssp", .data = (void *) CE4100_SSP }, + { }, +}; +MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids); +#endif + +static int pxa_ssp_probe(struct platform_device *pdev) +{ + struct resource *res; + struct ssp_device *ssp; + struct device *dev = &pdev->dev; + + ssp = devm_kzalloc(dev, sizeof(struct ssp_device), GFP_KERNEL); + if (ssp == NULL) + return -ENOMEM; + + ssp->dev = dev; + + ssp->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ssp->clk)) + return PTR_ERR(ssp->clk); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(dev, "no memory resource defined\n"); + return -ENODEV; + } + + res = devm_request_mem_region(dev, res->start, resource_size(res), + pdev->name); + if (res == NULL) { + dev_err(dev, "failed to request memory resource\n"); + return -EBUSY; + } + + ssp->phys_base = res->start; + + ssp->mmio_base = devm_ioremap(dev, res->start, resource_size(res)); + if (ssp->mmio_base == NULL) { + dev_err(dev, "failed to ioremap() registers\n"); + return -ENODEV; + } + + ssp->irq = platform_get_irq(pdev, 0); + if (ssp->irq < 0) { + dev_err(dev, "no IRQ resource defined\n"); + return -ENODEV; + } + + if (dev->of_node) { + const struct of_device_id *id = + of_match_device(of_match_ptr(pxa_ssp_of_ids), dev); + ssp->type = (int) id->data; + } else { + const struct platform_device_id *id = + platform_get_device_id(pdev); + ssp->type = (int) id->driver_data; + + /* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id + * starts from 0, do a translation here + */ + ssp->port_id = pdev->id + 1; + } + + ssp->use_count = 0; + ssp->of_node = dev->of_node; + + mutex_lock(&ssp_lock); + list_add(&ssp->node, &ssp_list); + mutex_unlock(&ssp_lock); + + platform_set_drvdata(pdev, ssp); + + return 0; +} + +static int pxa_ssp_remove(struct platform_device *pdev) +{ + struct ssp_device *ssp; + + ssp = platform_get_drvdata(pdev); + if (ssp == NULL) + return -ENODEV; + + mutex_lock(&ssp_lock); + list_del(&ssp->node); + mutex_unlock(&ssp_lock); + + return 0; +} + +static const struct platform_device_id ssp_id_table[] = { + { "pxa25x-ssp", PXA25x_SSP }, + { "pxa25x-nssp", PXA25x_NSSP }, + { "pxa27x-ssp", PXA27x_SSP }, + { "pxa3xx-ssp", PXA3xx_SSP }, + { "pxa168-ssp", PXA168_SSP }, + { "pxa910-ssp", PXA910_SSP }, + { }, +}; + +static struct platform_driver pxa_ssp_driver = { + .probe = pxa_ssp_probe, + .remove = pxa_ssp_remove, + .driver = { + .name = "pxa2xx-ssp", + .of_match_table = of_match_ptr(pxa_ssp_of_ids), + }, + .id_table = ssp_id_table, +}; + +static int __init pxa_ssp_init(void) +{ + return platform_driver_register(&pxa_ssp_driver); +} + +static void __exit pxa_ssp_exit(void) +{ + platform_driver_unregister(&pxa_ssp_driver); +} + +arch_initcall(pxa_ssp_init); +module_exit(pxa_ssp_exit); + +MODULE_DESCRIPTION("PXA SSP driver"); +MODULE_AUTHOR("Liam Girdwood"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c new file mode 100644 index 0000000000..7f8aca533c --- /dev/null +++ b/drivers/soc/qcom/icc-bwmon.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2021-2022 Linaro Ltd + * Author: Krzysztof Kozlowski , based on + * previous work of Thara Gopinath and msm-4.9 downstream sources. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * The BWMON samples data throughput within 'sample_ms' time. With three + * configurable thresholds (Low, Medium and High) gives four windows (called + * zones) of current bandwidth: + * + * Zone 0: byte count < THRES_LO + * Zone 1: THRES_LO < byte count < THRES_MED + * Zone 2: THRES_MED < byte count < THRES_HIGH + * Zone 3: THRES_HIGH < byte count + * + * Zones 0 and 2 are not used by this driver. + */ + +/* Internal sampling clock frequency */ +#define HW_TIMER_HZ 19200000 + +#define BWMON_GLOBAL_IRQ_STATUS 0x0 +#define BWMON_GLOBAL_IRQ_CLEAR 0x8 +#define BWMON_GLOBAL_IRQ_ENABLE 0xc +#define BWMON_GLOBAL_IRQ_ENABLE_ENABLE BIT(0) + +#define BWMON_IRQ_STATUS 0x100 +#define BWMON_IRQ_STATUS_ZONE_SHIFT 4 +#define BWMON_IRQ_CLEAR 0x108 +#define BWMON_IRQ_ENABLE 0x10c +#define BWMON_IRQ_ENABLE_ZONE1_SHIFT 5 +#define BWMON_IRQ_ENABLE_ZONE2_SHIFT 6 +#define BWMON_IRQ_ENABLE_ZONE3_SHIFT 7 +#define BWMON_IRQ_ENABLE_MASK (BIT(BWMON_IRQ_ENABLE_ZONE1_SHIFT) | \ + BIT(BWMON_IRQ_ENABLE_ZONE3_SHIFT)) + +#define BWMON_ENABLE 0x2a0 +#define BWMON_ENABLE_ENABLE BIT(0) + +#define BWMON_CLEAR 0x2a4 +#define BWMON_CLEAR_CLEAR BIT(0) + +#define BWMON_SAMPLE_WINDOW 0x2a8 +#define BWMON_THRESHOLD_HIGH 0x2ac +#define BWMON_THRESHOLD_MED 0x2b0 +#define BWMON_THRESHOLD_LOW 0x2b4 + +#define BWMON_ZONE_ACTIONS 0x2b8 +/* + * Actions to perform on some zone 'z' when current zone hits the threshold: + * Increment counter of zone 'z' + */ +#define BWMON_ZONE_ACTIONS_INCREMENT(z) (0x2 << ((z) * 2)) +/* Clear counter of zone 'z' */ +#define BWMON_ZONE_ACTIONS_CLEAR(z) (0x1 << ((z) * 2)) + +/* Zone 0 threshold hit: Clear zone count */ +#define BWMON_ZONE_ACTIONS_ZONE0 (BWMON_ZONE_ACTIONS_CLEAR(0)) + +/* Zone 1 threshold hit: Increment zone count & clear lower zones */ +#define BWMON_ZONE_ACTIONS_ZONE1 (BWMON_ZONE_ACTIONS_INCREMENT(1) | \ + BWMON_ZONE_ACTIONS_CLEAR(0)) + +/* Zone 2 threshold hit: Increment zone count & clear lower zones */ +#define BWMON_ZONE_ACTIONS_ZONE2 (BWMON_ZONE_ACTIONS_INCREMENT(2) | \ + BWMON_ZONE_ACTIONS_CLEAR(1) | \ + BWMON_ZONE_ACTIONS_CLEAR(0)) + +/* Zone 3 threshold hit: Increment zone count & clear lower zones */ +#define BWMON_ZONE_ACTIONS_ZONE3 (BWMON_ZONE_ACTIONS_INCREMENT(3) | \ + BWMON_ZONE_ACTIONS_CLEAR(2) | \ + BWMON_ZONE_ACTIONS_CLEAR(1) | \ + BWMON_ZONE_ACTIONS_CLEAR(0)) +/* Value for BWMON_ZONE_ACTIONS */ +#define BWMON_ZONE_ACTIONS_DEFAULT (BWMON_ZONE_ACTIONS_ZONE0 | \ + BWMON_ZONE_ACTIONS_ZONE1 << 8 | \ + BWMON_ZONE_ACTIONS_ZONE2 << 16 | \ + BWMON_ZONE_ACTIONS_ZONE3 << 24) + +/* + * There is no clear documentation/explanation of BWMON_THRESHOLD_COUNT + * register. Based on observations, this is number of times one threshold has to + * be reached, to trigger interrupt in given zone. + * + * 0xff are maximum values meant to ignore the zones 0 and 2. + */ +#define BWMON_THRESHOLD_COUNT 0x2bc +#define BWMON_THRESHOLD_COUNT_ZONE1_SHIFT 8 +#define BWMON_THRESHOLD_COUNT_ZONE2_SHIFT 16 +#define BWMON_THRESHOLD_COUNT_ZONE3_SHIFT 24 +#define BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT 0xff +#define BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT 0xff + +/* BWMONv4 count registers use count unit of 64 kB */ +#define BWMON_COUNT_UNIT_KB 64 +#define BWMON_ZONE_COUNT 0x2d8 +#define BWMON_ZONE_MAX(zone) (0x2e0 + 4 * (zone)) + +struct icc_bwmon_data { + unsigned int sample_ms; + unsigned int default_highbw_kbps; + unsigned int default_medbw_kbps; + unsigned int default_lowbw_kbps; + u8 zone1_thres_count; + u8 zone3_thres_count; +}; + +struct icc_bwmon { + struct device *dev; + void __iomem *base; + int irq; + + unsigned int default_lowbw_kbps; + unsigned int sample_ms; + unsigned int max_bw_kbps; + unsigned int min_bw_kbps; + unsigned int target_kbps; + unsigned int current_kbps; +}; + +static void bwmon_clear_counters(struct icc_bwmon *bwmon) +{ + /* + * Clear counters. The order and barriers are + * important. Quoting downstream Qualcomm msm-4.9 tree: + * + * The counter clear and IRQ clear bits are not in the same 4KB + * region. So, we need to make sure the counter clear is completed + * before we try to clear the IRQ or do any other counter operations. + */ + writel(BWMON_CLEAR_CLEAR, bwmon->base + BWMON_CLEAR); +} + +static void bwmon_clear_irq(struct icc_bwmon *bwmon) +{ + /* + * Clear zone and global interrupts. The order and barriers are + * important. Quoting downstream Qualcomm msm-4.9 tree: + * + * Synchronize the local interrupt clear in mon_irq_clear() + * with the global interrupt clear here. Otherwise, the CPU + * may reorder the two writes and clear the global interrupt + * before the local interrupt, causing the global interrupt + * to be retriggered by the local interrupt still being high. + * + * Similarly, because the global registers are in a different + * region than the local registers, we need to ensure any register + * writes to enable the monitor after this call are ordered with the + * clearing here so that local writes don't happen before the + * interrupt is cleared. + */ + writel(BWMON_IRQ_ENABLE_MASK, bwmon->base + BWMON_IRQ_CLEAR); + writel(BIT(0), bwmon->base + BWMON_GLOBAL_IRQ_CLEAR); +} + +static void bwmon_disable(struct icc_bwmon *bwmon) +{ + /* Disable interrupts. Strict ordering, see bwmon_clear_irq(). */ + writel(0x0, bwmon->base + BWMON_GLOBAL_IRQ_ENABLE); + writel(0x0, bwmon->base + BWMON_IRQ_ENABLE); + + /* + * Disable bwmon. Must happen before bwmon_clear_irq() to avoid spurious + * IRQ. + */ + writel(0x0, bwmon->base + BWMON_ENABLE); +} + +static void bwmon_enable(struct icc_bwmon *bwmon, unsigned int irq_enable) +{ + /* Enable interrupts */ + writel(BWMON_GLOBAL_IRQ_ENABLE_ENABLE, + bwmon->base + BWMON_GLOBAL_IRQ_ENABLE); + writel(irq_enable, bwmon->base + BWMON_IRQ_ENABLE); + + /* Enable bwmon */ + writel(BWMON_ENABLE_ENABLE, bwmon->base + BWMON_ENABLE); +} + +static unsigned int bwmon_kbps_to_count(unsigned int kbps) +{ + return kbps / BWMON_COUNT_UNIT_KB; +} + +static void bwmon_set_threshold(struct icc_bwmon *bwmon, unsigned int reg, + unsigned int kbps) +{ + unsigned int thres; + + thres = mult_frac(bwmon_kbps_to_count(kbps), bwmon->sample_ms, + MSEC_PER_SEC); + writel_relaxed(thres, bwmon->base + reg); +} + +static void bwmon_start(struct icc_bwmon *bwmon, + const struct icc_bwmon_data *data) +{ + unsigned int thres_count; + int window; + + bwmon_clear_counters(bwmon); + + window = mult_frac(bwmon->sample_ms, HW_TIMER_HZ, MSEC_PER_SEC); + /* Maximum sampling window: 0xfffff */ + writel_relaxed(window, bwmon->base + BWMON_SAMPLE_WINDOW); + + bwmon_set_threshold(bwmon, BWMON_THRESHOLD_HIGH, + data->default_highbw_kbps); + bwmon_set_threshold(bwmon, BWMON_THRESHOLD_MED, + data->default_medbw_kbps); + bwmon_set_threshold(bwmon, BWMON_THRESHOLD_LOW, + data->default_lowbw_kbps); + + thres_count = data->zone3_thres_count << BWMON_THRESHOLD_COUNT_ZONE3_SHIFT | + BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT << BWMON_THRESHOLD_COUNT_ZONE2_SHIFT | + data->zone1_thres_count << BWMON_THRESHOLD_COUNT_ZONE1_SHIFT | + BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT; + writel_relaxed(thres_count, bwmon->base + BWMON_THRESHOLD_COUNT); + writel_relaxed(BWMON_ZONE_ACTIONS_DEFAULT, + bwmon->base + BWMON_ZONE_ACTIONS); + /* Write barriers in bwmon_clear_irq() */ + + bwmon_clear_irq(bwmon); + bwmon_enable(bwmon, BWMON_IRQ_ENABLE_MASK); +} + +static irqreturn_t bwmon_intr(int irq, void *dev_id) +{ + struct icc_bwmon *bwmon = dev_id; + unsigned int status, max; + int zone; + + status = readl(bwmon->base + BWMON_IRQ_STATUS); + status &= BWMON_IRQ_ENABLE_MASK; + if (!status) { + /* + * Only zone 1 and zone 3 interrupts are enabled but zone 2 + * threshold could be hit and trigger interrupt even if not + * enabled. + * Such spurious interrupt might come with valuable max count or + * not, so solution would be to always check all + * BWMON_ZONE_MAX() registers to find the highest value. + * Such case is currently ignored. + */ + return IRQ_NONE; + } + + bwmon_disable(bwmon); + + zone = get_bitmask_order(status >> BWMON_IRQ_STATUS_ZONE_SHIFT) - 1; + /* + * Zone max bytes count register returns count units within sampling + * window. Downstream kernel for BWMONv4 (called BWMON type 2 in + * downstream) always increments the max bytes count by one. + */ + max = readl(bwmon->base + BWMON_ZONE_MAX(zone)) + 1; + max *= BWMON_COUNT_UNIT_KB; + bwmon->target_kbps = mult_frac(max, MSEC_PER_SEC, bwmon->sample_ms); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t bwmon_intr_thread(int irq, void *dev_id) +{ + struct icc_bwmon *bwmon = dev_id; + unsigned int irq_enable = 0; + struct dev_pm_opp *opp, *target_opp; + unsigned int bw_kbps, up_kbps, down_kbps; + + bw_kbps = bwmon->target_kbps; + + target_opp = dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_kbps, 0); + if (IS_ERR(target_opp) && PTR_ERR(target_opp) == -ERANGE) + target_opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0); + + bwmon->target_kbps = bw_kbps; + + bw_kbps--; + opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0); + if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE) + down_kbps = bwmon->target_kbps; + else + down_kbps = bw_kbps; + + up_kbps = bwmon->target_kbps + 1; + + if (bwmon->target_kbps >= bwmon->max_bw_kbps) + irq_enable = BIT(BWMON_IRQ_ENABLE_ZONE1_SHIFT); + else if (bwmon->target_kbps <= bwmon->min_bw_kbps) + irq_enable = BIT(BWMON_IRQ_ENABLE_ZONE3_SHIFT); + else + irq_enable = BWMON_IRQ_ENABLE_MASK; + + bwmon_set_threshold(bwmon, BWMON_THRESHOLD_HIGH, up_kbps); + bwmon_set_threshold(bwmon, BWMON_THRESHOLD_MED, down_kbps); + /* Write barriers in bwmon_clear_counters() */ + bwmon_clear_counters(bwmon); + bwmon_clear_irq(bwmon); + bwmon_enable(bwmon, irq_enable); + + if (bwmon->target_kbps == bwmon->current_kbps) + goto out; + + dev_pm_opp_set_opp(bwmon->dev, target_opp); + bwmon->current_kbps = bwmon->target_kbps; + +out: + dev_pm_opp_put(target_opp); + if (!IS_ERR(opp)) + dev_pm_opp_put(opp); + + return IRQ_HANDLED; +} + +static int bwmon_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dev_pm_opp *opp; + struct icc_bwmon *bwmon; + const struct icc_bwmon_data *data; + int ret; + + bwmon = devm_kzalloc(dev, sizeof(*bwmon), GFP_KERNEL); + if (!bwmon) + return -ENOMEM; + + data = of_device_get_match_data(dev); + + bwmon->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(bwmon->base)) { + dev_err(dev, "failed to map bwmon registers\n"); + return PTR_ERR(bwmon->base); + } + + bwmon->irq = platform_get_irq(pdev, 0); + if (bwmon->irq < 0) + return bwmon->irq; + + ret = devm_pm_opp_of_add_table(dev); + if (ret) + return dev_err_probe(dev, ret, "failed to add OPP table\n"); + + bwmon->max_bw_kbps = UINT_MAX; + opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0); + if (IS_ERR(opp)) + return dev_err_probe(dev, ret, "failed to find max peak bandwidth\n"); + + bwmon->min_bw_kbps = 0; + opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0); + if (IS_ERR(opp)) + return dev_err_probe(dev, ret, "failed to find min peak bandwidth\n"); + + bwmon->sample_ms = data->sample_ms; + bwmon->default_lowbw_kbps = data->default_lowbw_kbps; + bwmon->dev = dev; + + bwmon_disable(bwmon); + ret = devm_request_threaded_irq(dev, bwmon->irq, bwmon_intr, + bwmon_intr_thread, + IRQF_ONESHOT, dev_name(dev), bwmon); + if (ret) + return dev_err_probe(dev, ret, "failed to request IRQ\n"); + + platform_set_drvdata(pdev, bwmon); + bwmon_start(bwmon, data); + + return 0; +} + +static int bwmon_remove(struct platform_device *pdev) +{ + struct icc_bwmon *bwmon = platform_get_drvdata(pdev); + + bwmon_disable(bwmon); + + return 0; +} + +/* BWMON v4 */ +static const struct icc_bwmon_data msm8998_bwmon_data = { + .sample_ms = 4, + .default_highbw_kbps = 4800 * 1024, /* 4.8 GBps */ + .default_medbw_kbps = 512 * 1024, /* 512 MBps */ + .default_lowbw_kbps = 0, + .zone1_thres_count = 16, + .zone3_thres_count = 1, +}; + +static const struct of_device_id bwmon_of_match[] = { + { .compatible = "qcom,msm8998-bwmon", .data = &msm8998_bwmon_data }, + {} +}; +MODULE_DEVICE_TABLE(of, bwmon_of_match); + +static struct platform_driver bwmon_driver = { + .probe = bwmon_probe, + .remove = bwmon_remove, + .driver = { + .name = "qcom-bwmon", + .of_match_table = bwmon_of_match, + }, +}; +module_platform_driver(bwmon_driver); + +MODULE_AUTHOR("Krzysztof Kozlowski "); +MODULE_DESCRIPTION("QCOM BWMON driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/renesas/r8a779g0-sysc.c b/drivers/soc/renesas/r8a779g0-sysc.c new file mode 100644 index 0000000000..a452709f06 --- /dev/null +++ b/drivers/soc/renesas/r8a779g0-sysc.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas R-Car V4H System Controller + * + * Copyright (C) 2022 Renesas Electronics Corp. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "rcar-gen4-sysc.h" + +static struct rcar_gen4_sysc_area r8a779g0_areas[] __initdata = { + { "always-on", R8A779G0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON }, + { "a3e0", R8A779G0_PD_A3E0, R8A779G0_PD_ALWAYS_ON, PD_SCU }, + { "a2e0d0", R8A779G0_PD_A2E0D0, R8A779G0_PD_A3E0, PD_SCU }, + { "a2e0d1", R8A779G0_PD_A2E0D1, R8A779G0_PD_A3E0, PD_SCU }, + { "a1e0d0c0", R8A779G0_PD_A1E0D0C0, R8A779G0_PD_A2E0D0, PD_CPU_NOCR }, + { "a1e0d0c1", R8A779G0_PD_A1E0D0C1, R8A779G0_PD_A2E0D0, PD_CPU_NOCR }, + { "a1e0d1c0", R8A779G0_PD_A1E0D1C0, R8A779G0_PD_A2E0D1, PD_CPU_NOCR }, + { "a1e0d1c1", R8A779G0_PD_A1E0D1C1, R8A779G0_PD_A2E0D1, PD_CPU_NOCR }, + { "a33dga", R8A779G0_PD_A33DGA, R8A779G0_PD_ALWAYS_ON }, + { "a23dgb", R8A779G0_PD_A23DGB, R8A779G0_PD_A33DGA }, + { "a3vip0", R8A779G0_PD_A3VIP0, R8A779G0_PD_ALWAYS_ON }, + { "a3vip1", R8A779G0_PD_A3VIP1, R8A779G0_PD_ALWAYS_ON }, + { "a3vip2", R8A779G0_PD_A3VIP2, R8A779G0_PD_ALWAYS_ON }, + { "a3isp0", R8A779G0_PD_A3ISP0, R8A779G0_PD_ALWAYS_ON }, + { "a3isp1", R8A779G0_PD_A3ISP1, R8A779G0_PD_ALWAYS_ON }, + { "a3ir", R8A779G0_PD_A3IR, R8A779G0_PD_ALWAYS_ON }, + { "a2cn0", R8A779G0_PD_A2CN0, R8A779G0_PD_A3IR }, + { "a1cnn0", R8A779G0_PD_A1CNN0, R8A779G0_PD_A2CN0 }, + { "a1dsp0", R8A779G0_PD_A1DSP0, R8A779G0_PD_A2CN0 }, + { "a1dsp1", R8A779G0_PD_A1DSP1, R8A779G0_PD_A2CN0 }, + { "a1dsp2", R8A779G0_PD_A1DSP2, R8A779G0_PD_A2CN0 }, + { "a1dsp3", R8A779G0_PD_A1DSP3, R8A779G0_PD_A2CN0 }, + { "a2imp01", R8A779G0_PD_A2IMP01, R8A779G0_PD_A3IR }, + { "a2imp23", R8A779G0_PD_A2IMP23, R8A779G0_PD_A3IR }, + { "a2psc", R8A779G0_PD_A2PSC, R8A779G0_PD_A3IR }, + { "a2dma", R8A779G0_PD_A2DMA, R8A779G0_PD_A3IR }, + { "a2cv0", R8A779G0_PD_A2CV0, R8A779G0_PD_A3IR }, + { "a2cv1", R8A779G0_PD_A2CV1, R8A779G0_PD_A3IR }, + { "a2cv2", R8A779G0_PD_A2CV2, R8A779G0_PD_A3IR }, + { "a2cv3", R8A779G0_PD_A2CV3, R8A779G0_PD_A3IR }, +}; + +const struct rcar_gen4_sysc_info r8a779g0_sysc_info __initconst = { + .areas = r8a779g0_areas, + .num_areas = ARRAY_SIZE(r8a779g0_areas), +}; diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c new file mode 100644 index 0000000000..3e891bf224 --- /dev/null +++ b/drivers/spi/spi-aspeed-smc.c @@ -0,0 +1,1220 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * ASPEED FMC/SPI Memory Controller Driver + * + * Copyright (c) 2015-2022, IBM Corporation. + * Copyright (c) 2020, ASPEED Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define DEVICE_NAME "spi-aspeed-smc" + +/* Type setting Register */ +#define CONFIG_REG 0x0 +#define CONFIG_TYPE_SPI 0x2 + +/* CE Control Register */ +#define CE_CTRL_REG 0x4 + +/* CEx Control Register */ +#define CE0_CTRL_REG 0x10 +#define CTRL_IO_MODE_MASK GENMASK(30, 28) +#define CTRL_IO_SINGLE_DATA 0x0 +#define CTRL_IO_DUAL_DATA BIT(29) +#define CTRL_IO_QUAD_DATA BIT(30) +#define CTRL_COMMAND_SHIFT 16 +#define CTRL_IO_ADDRESS_4B BIT(13) /* AST2400 SPI only */ +#define CTRL_IO_DUMMY_SET(dummy) \ + (((((dummy) >> 2) & 0x1) << 14) | (((dummy) & 0x3) << 6)) +#define CTRL_FREQ_SEL_SHIFT 8 +#define CTRL_FREQ_SEL_MASK GENMASK(11, CTRL_FREQ_SEL_SHIFT) +#define CTRL_CE_STOP_ACTIVE BIT(2) +#define CTRL_IO_MODE_CMD_MASK GENMASK(1, 0) +#define CTRL_IO_MODE_NORMAL 0x0 +#define CTRL_IO_MODE_READ 0x1 +#define CTRL_IO_MODE_WRITE 0x2 +#define CTRL_IO_MODE_USER 0x3 + +#define CTRL_IO_CMD_MASK 0xf0ff40c3 + +/* CEx Address Decoding Range Register */ +#define CE0_SEGMENT_ADDR_REG 0x30 + +/* CEx Read timing compensation register */ +#define CE0_TIMING_COMPENSATION_REG 0x94 + +enum aspeed_spi_ctl_reg_value { + ASPEED_SPI_BASE, + ASPEED_SPI_READ, + ASPEED_SPI_WRITE, + ASPEED_SPI_MAX, +}; + +struct aspeed_spi; + +struct aspeed_spi_chip { + struct aspeed_spi *aspi; + u32 cs; + void __iomem *ctl; + void __iomem *ahb_base; + u32 ahb_window_size; + u32 ctl_val[ASPEED_SPI_MAX]; + u32 clk_freq; +}; + +struct aspeed_spi_data { + u32 ctl0; + u32 max_cs; + bool hastype; + u32 mode_bits; + u32 we0; + u32 timing; + u32 hclk_mask; + u32 hdiv_max; + + u32 (*segment_start)(struct aspeed_spi *aspi, u32 reg); + u32 (*segment_end)(struct aspeed_spi *aspi, u32 reg); + u32 (*segment_reg)(struct aspeed_spi *aspi, u32 start, u32 end); + int (*calibrate)(struct aspeed_spi_chip *chip, u32 hdiv, + const u8 *golden_buf, u8 *test_buf); +}; + +#define ASPEED_SPI_MAX_NUM_CS 5 + +struct aspeed_spi { + const struct aspeed_spi_data *data; + + void __iomem *regs; + void __iomem *ahb_base; + u32 ahb_base_phy; + u32 ahb_window_size; + struct device *dev; + + struct clk *clk; + u32 clk_freq; + + struct aspeed_spi_chip chips[ASPEED_SPI_MAX_NUM_CS]; +}; + +static u32 aspeed_spi_get_io_mode(const struct spi_mem_op *op) +{ + switch (op->data.buswidth) { + case 1: + return CTRL_IO_SINGLE_DATA; + case 2: + return CTRL_IO_DUAL_DATA; + case 4: + return CTRL_IO_QUAD_DATA; + default: + return CTRL_IO_SINGLE_DATA; + } +} + +static void aspeed_spi_set_io_mode(struct aspeed_spi_chip *chip, u32 io_mode) +{ + u32 ctl; + + if (io_mode > 0) { + ctl = readl(chip->ctl) & ~CTRL_IO_MODE_MASK; + ctl |= io_mode; + writel(ctl, chip->ctl); + } +} + +static void aspeed_spi_start_user(struct aspeed_spi_chip *chip) +{ + u32 ctl = chip->ctl_val[ASPEED_SPI_BASE]; + + ctl |= CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE; + writel(ctl, chip->ctl); + + ctl &= ~CTRL_CE_STOP_ACTIVE; + writel(ctl, chip->ctl); +} + +static void aspeed_spi_stop_user(struct aspeed_spi_chip *chip) +{ + u32 ctl = chip->ctl_val[ASPEED_SPI_READ] | + CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE; + + writel(ctl, chip->ctl); + + /* Restore defaults */ + writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); +} + +static int aspeed_spi_read_from_ahb(void *buf, void __iomem *src, size_t len) +{ + size_t offset = 0; + + if (IS_ALIGNED((uintptr_t)src, sizeof(uintptr_t)) && + IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) { + ioread32_rep(src, buf, len >> 2); + offset = len & ~0x3; + len -= offset; + } + ioread8_rep(src, (u8 *)buf + offset, len); + return 0; +} + +static int aspeed_spi_write_to_ahb(void __iomem *dst, const void *buf, size_t len) +{ + size_t offset = 0; + + if (IS_ALIGNED((uintptr_t)dst, sizeof(uintptr_t)) && + IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) { + iowrite32_rep(dst, buf, len >> 2); + offset = len & ~0x3; + len -= offset; + } + iowrite8_rep(dst, (const u8 *)buf + offset, len); + return 0; +} + +static int aspeed_spi_send_cmd_addr(struct aspeed_spi_chip *chip, u8 addr_nbytes, + u64 offset, u32 opcode) +{ + __be32 temp; + u32 cmdaddr; + + switch (addr_nbytes) { + case 3: + cmdaddr = offset & 0xFFFFFF; + cmdaddr |= opcode << 24; + + temp = cpu_to_be32(cmdaddr); + aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4); + break; + case 4: + temp = cpu_to_be32(offset); + aspeed_spi_write_to_ahb(chip->ahb_base, &opcode, 1); + aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4); + break; + default: + WARN_ONCE(1, "Unexpected address width %u", addr_nbytes); + return -EOPNOTSUPP; + } + return 0; +} + +static int aspeed_spi_read_reg(struct aspeed_spi_chip *chip, + const struct spi_mem_op *op) +{ + aspeed_spi_start_user(chip); + aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1); + aspeed_spi_read_from_ahb(op->data.buf.in, + chip->ahb_base, op->data.nbytes); + aspeed_spi_stop_user(chip); + return 0; +} + +static int aspeed_spi_write_reg(struct aspeed_spi_chip *chip, + const struct spi_mem_op *op) +{ + aspeed_spi_start_user(chip); + aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1); + aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, + op->data.nbytes); + aspeed_spi_stop_user(chip); + return 0; +} + +static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip, + const struct spi_mem_op *op, + u64 offset, size_t len, void *buf) +{ + int io_mode = aspeed_spi_get_io_mode(op); + u8 dummy = 0xFF; + int i; + int ret; + + aspeed_spi_start_user(chip); + + ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, offset, op->cmd.opcode); + if (ret < 0) + return ret; + + if (op->dummy.buswidth && op->dummy.nbytes) { + for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++) + aspeed_spi_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy)); + } + + aspeed_spi_set_io_mode(chip, io_mode); + + aspeed_spi_read_from_ahb(buf, chip->ahb_base, len); + aspeed_spi_stop_user(chip); + return 0; +} + +static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip, + const struct spi_mem_op *op) +{ + int ret; + + aspeed_spi_start_user(chip); + ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, op->addr.val, op->cmd.opcode); + if (ret < 0) + return ret; + aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes); + aspeed_spi_stop_user(chip); + return 0; +} + +/* support for 1-1-1, 1-1-2 or 1-1-4 */ +static bool aspeed_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + if (op->cmd.buswidth > 1) + return false; + + if (op->addr.nbytes != 0) { + if (op->addr.buswidth > 1) + return false; + if (op->addr.nbytes < 3 || op->addr.nbytes > 4) + return false; + } + + if (op->dummy.nbytes != 0) { + if (op->dummy.buswidth > 1 || op->dummy.nbytes > 7) + return false; + } + + if (op->data.nbytes != 0 && op->data.buswidth > 4) + return false; + + return spi_mem_default_supports_op(mem, op); +} + +static const struct aspeed_spi_data ast2400_spi_data; + +static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->master); + struct aspeed_spi_chip *chip = &aspi->chips[mem->spi->chip_select]; + u32 addr_mode, addr_mode_backup; + u32 ctl_val; + int ret = 0; + + dev_dbg(aspi->dev, + "CE%d %s OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x len:%#x", + chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write", + op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, + op->dummy.buswidth, op->data.buswidth, + op->addr.nbytes, op->dummy.nbytes, op->data.nbytes); + + addr_mode = readl(aspi->regs + CE_CTRL_REG); + addr_mode_backup = addr_mode; + + ctl_val = chip->ctl_val[ASPEED_SPI_BASE]; + ctl_val &= ~CTRL_IO_CMD_MASK; + + ctl_val |= op->cmd.opcode << CTRL_COMMAND_SHIFT; + + /* 4BYTE address mode */ + if (op->addr.nbytes) { + if (op->addr.nbytes == 4) + addr_mode |= (0x11 << chip->cs); + else + addr_mode &= ~(0x11 << chip->cs); + + if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data) + ctl_val |= CTRL_IO_ADDRESS_4B; + } + + if (op->dummy.nbytes) + ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth); + + if (op->data.nbytes) + ctl_val |= aspeed_spi_get_io_mode(op); + + if (op->data.dir == SPI_MEM_DATA_OUT) + ctl_val |= CTRL_IO_MODE_WRITE; + else + ctl_val |= CTRL_IO_MODE_READ; + + if (addr_mode != addr_mode_backup) + writel(addr_mode, aspi->regs + CE_CTRL_REG); + writel(ctl_val, chip->ctl); + + if (op->data.dir == SPI_MEM_DATA_IN) { + if (!op->addr.nbytes) + ret = aspeed_spi_read_reg(chip, op); + else + ret = aspeed_spi_read_user(chip, op, op->addr.val, + op->data.nbytes, op->data.buf.in); + } else { + if (!op->addr.nbytes) + ret = aspeed_spi_write_reg(chip, op); + else + ret = aspeed_spi_write_user(chip, op); + } + + /* Restore defaults */ + if (addr_mode != addr_mode_backup) + writel(addr_mode_backup, aspi->regs + CE_CTRL_REG); + writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); + return ret; +} + +static int aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + int ret; + + ret = do_aspeed_spi_exec_op(mem, op); + if (ret) + dev_err(&mem->spi->dev, "operation failed: %d\n", ret); + return ret; +} + +static const char *aspeed_spi_get_name(struct spi_mem *mem) +{ + struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->master); + struct device *dev = aspi->dev; + + return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), mem->spi->chip_select); +} + +struct aspeed_spi_window { + u32 cs; + u32 offset; + u32 size; +}; + +static void aspeed_spi_get_windows(struct aspeed_spi *aspi, + struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS]) +{ + const struct aspeed_spi_data *data = aspi->data; + u32 reg_val; + u32 cs; + + for (cs = 0; cs < aspi->data->max_cs; cs++) { + reg_val = readl(aspi->regs + CE0_SEGMENT_ADDR_REG + cs * 4); + windows[cs].cs = cs; + windows[cs].size = data->segment_end(aspi, reg_val) - + data->segment_start(aspi, reg_val); + windows[cs].offset = cs ? windows[cs - 1].offset + windows[cs - 1].size : 0; + dev_vdbg(aspi->dev, "CE%d offset=0x%.8x size=0x%x\n", cs, + windows[cs].offset, windows[cs].size); + } +} + +/* + * On the AST2600, some CE windows are closed by default at reset but + * U-Boot should open all. + */ +static int aspeed_spi_chip_set_default_window(struct aspeed_spi_chip *chip) +{ + struct aspeed_spi *aspi = chip->aspi; + struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 }; + struct aspeed_spi_window *win = &windows[chip->cs]; + + /* No segment registers for the AST2400 SPI controller */ + if (aspi->data == &ast2400_spi_data) { + win->offset = 0; + win->size = aspi->ahb_window_size; + } else { + aspeed_spi_get_windows(aspi, windows); + } + + chip->ahb_base = aspi->ahb_base + win->offset; + chip->ahb_window_size = win->size; + + dev_dbg(aspi->dev, "CE%d default window [ 0x%.8x - 0x%.8x ] %dMB", + chip->cs, aspi->ahb_base_phy + win->offset, + aspi->ahb_base_phy + win->offset + win->size - 1, + win->size >> 20); + + return chip->ahb_window_size ? 0 : -1; +} + +static int aspeed_spi_set_window(struct aspeed_spi *aspi, + const struct aspeed_spi_window *win) +{ + u32 start = aspi->ahb_base_phy + win->offset; + u32 end = start + win->size; + void __iomem *seg_reg = aspi->regs + CE0_SEGMENT_ADDR_REG + win->cs * 4; + u32 seg_val_backup = readl(seg_reg); + u32 seg_val = aspi->data->segment_reg(aspi, start, end); + + if (seg_val == seg_val_backup) + return 0; + + writel(seg_val, seg_reg); + + /* + * Restore initial value if something goes wrong else we could + * loose access to the chip. + */ + if (seg_val != readl(seg_reg)) { + dev_err(aspi->dev, "CE%d invalid window [ 0x%.8x - 0x%.8x ] %dMB", + win->cs, start, end - 1, win->size >> 20); + writel(seg_val_backup, seg_reg); + return -EIO; + } + + if (win->size) + dev_dbg(aspi->dev, "CE%d new window [ 0x%.8x - 0x%.8x ] %dMB", + win->cs, start, end - 1, win->size >> 20); + else + dev_dbg(aspi->dev, "CE%d window closed", win->cs); + + return 0; +} + +/* + * Yet to be done when possible : + * - Align mappings on flash size (we don't have the info) + * - ioremap each window, not strictly necessary since the overall window + * is correct. + */ +static const struct aspeed_spi_data ast2500_spi_data; +static const struct aspeed_spi_data ast2600_spi_data; +static const struct aspeed_spi_data ast2600_fmc_data; + +static int aspeed_spi_chip_adjust_window(struct aspeed_spi_chip *chip, + u32 local_offset, u32 size) +{ + struct aspeed_spi *aspi = chip->aspi; + struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 }; + struct aspeed_spi_window *win = &windows[chip->cs]; + int ret; + + /* No segment registers for the AST2400 SPI controller */ + if (aspi->data == &ast2400_spi_data) + return 0; + + /* + * Due to an HW issue on the AST2500 SPI controller, the CE0 + * window size should be smaller than the maximum 128MB. + */ + if (aspi->data == &ast2500_spi_data && chip->cs == 0 && size == SZ_128M) { + size = 120 << 20; + dev_info(aspi->dev, "CE%d window resized to %dMB (AST2500 HW quirk)", + chip->cs, size >> 20); + } + + /* + * The decoding size of AST2600 SPI controller should set at + * least 2MB. + */ + if ((aspi->data == &ast2600_spi_data || aspi->data == &ast2600_fmc_data) && + size < SZ_2M) { + size = SZ_2M; + dev_info(aspi->dev, "CE%d window resized to %dMB (AST2600 Decoding)", + chip->cs, size >> 20); + } + + aspeed_spi_get_windows(aspi, windows); + + /* Adjust this chip window */ + win->offset += local_offset; + win->size = size; + + if (win->offset + win->size > aspi->ahb_window_size) { + win->size = aspi->ahb_window_size - win->offset; + dev_warn(aspi->dev, "CE%d window resized to %dMB", chip->cs, win->size >> 20); + } + + ret = aspeed_spi_set_window(aspi, win); + if (ret) + return ret; + + /* Update chip mapping info */ + chip->ahb_base = aspi->ahb_base + win->offset; + chip->ahb_window_size = win->size; + + /* + * Also adjust next chip window to make sure that it does not + * overlap with the current window. + */ + if (chip->cs < aspi->data->max_cs - 1) { + struct aspeed_spi_window *next = &windows[chip->cs + 1]; + + /* Change offset and size to keep the same end address */ + if ((next->offset + next->size) > (win->offset + win->size)) + next->size = (next->offset + next->size) - (win->offset + win->size); + else + next->size = 0; + next->offset = win->offset + win->size; + + aspeed_spi_set_window(aspi, next); + } + return 0; +} + +static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip); + +static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc) +{ + struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->master); + struct aspeed_spi_chip *chip = &aspi->chips[desc->mem->spi->chip_select]; + struct spi_mem_op *op = &desc->info.op_tmpl; + u32 ctl_val; + int ret = 0; + + dev_dbg(aspi->dev, + "CE%d %s dirmap [ 0x%.8llx - 0x%.8llx ] OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x\n", + chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write", + desc->info.offset, desc->info.offset + desc->info.length, + op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, + op->dummy.buswidth, op->data.buswidth, + op->addr.nbytes, op->dummy.nbytes); + + chip->clk_freq = desc->mem->spi->max_speed_hz; + + /* Only for reads */ + if (op->data.dir != SPI_MEM_DATA_IN) + return -EOPNOTSUPP; + + aspeed_spi_chip_adjust_window(chip, desc->info.offset, desc->info.length); + + if (desc->info.length > chip->ahb_window_size) + dev_warn(aspi->dev, "CE%d window (%dMB) too small for mapping", + chip->cs, chip->ahb_window_size >> 20); + + /* Define the default IO read settings */ + ctl_val = readl(chip->ctl) & ~CTRL_IO_CMD_MASK; + ctl_val |= aspeed_spi_get_io_mode(op) | + op->cmd.opcode << CTRL_COMMAND_SHIFT | + CTRL_IO_MODE_READ; + + if (op->dummy.nbytes) + ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth); + + /* Tune 4BYTE address mode */ + if (op->addr.nbytes) { + u32 addr_mode = readl(aspi->regs + CE_CTRL_REG); + + if (op->addr.nbytes == 4) + addr_mode |= (0x11 << chip->cs); + else + addr_mode &= ~(0x11 << chip->cs); + writel(addr_mode, aspi->regs + CE_CTRL_REG); + + /* AST2400 SPI controller sets 4BYTE address mode in + * CE0 Control Register + */ + if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data) + ctl_val |= CTRL_IO_ADDRESS_4B; + } + + /* READ mode is the controller default setting */ + chip->ctl_val[ASPEED_SPI_READ] = ctl_val; + writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); + + ret = aspeed_spi_do_calibration(chip); + + dev_info(aspi->dev, "CE%d read buswidth:%d [0x%08x]\n", + chip->cs, op->data.buswidth, chip->ctl_val[ASPEED_SPI_READ]); + + return ret; +} + +static ssize_t aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc *desc, + u64 offset, size_t len, void *buf) +{ + struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->master); + struct aspeed_spi_chip *chip = &aspi->chips[desc->mem->spi->chip_select]; + + /* Switch to USER command mode if mapping window is too small */ + if (chip->ahb_window_size < offset + len) { + int ret; + + ret = aspeed_spi_read_user(chip, &desc->info.op_tmpl, offset, len, buf); + if (ret < 0) + return ret; + } else { + memcpy_fromio(buf, chip->ahb_base + offset, len); + } + + return len; +} + +static const struct spi_controller_mem_ops aspeed_spi_mem_ops = { + .supports_op = aspeed_spi_supports_op, + .exec_op = aspeed_spi_exec_op, + .get_name = aspeed_spi_get_name, + .dirmap_create = aspeed_spi_dirmap_create, + .dirmap_read = aspeed_spi_dirmap_read, +}; + +static void aspeed_spi_chip_set_type(struct aspeed_spi *aspi, unsigned int cs, int type) +{ + u32 reg; + + reg = readl(aspi->regs + CONFIG_REG); + reg &= ~(0x3 << (cs * 2)); + reg |= type << (cs * 2); + writel(reg, aspi->regs + CONFIG_REG); +} + +static void aspeed_spi_chip_enable(struct aspeed_spi *aspi, unsigned int cs, bool enable) +{ + u32 we_bit = BIT(aspi->data->we0 + cs); + u32 reg = readl(aspi->regs + CONFIG_REG); + + if (enable) + reg |= we_bit; + else + reg &= ~we_bit; + writel(reg, aspi->regs + CONFIG_REG); +} + +static int aspeed_spi_setup(struct spi_device *spi) +{ + struct aspeed_spi *aspi = spi_controller_get_devdata(spi->master); + const struct aspeed_spi_data *data = aspi->data; + unsigned int cs = spi->chip_select; + struct aspeed_spi_chip *chip = &aspi->chips[cs]; + + chip->aspi = aspi; + chip->cs = cs; + chip->ctl = aspi->regs + data->ctl0 + cs * 4; + + /* The driver only supports SPI type flash */ + if (data->hastype) + aspeed_spi_chip_set_type(aspi, cs, CONFIG_TYPE_SPI); + + if (aspeed_spi_chip_set_default_window(chip) < 0) { + dev_warn(aspi->dev, "CE%d window invalid", cs); + return -EINVAL; + } + + aspeed_spi_chip_enable(aspi, cs, true); + + chip->ctl_val[ASPEED_SPI_BASE] = CTRL_CE_STOP_ACTIVE | CTRL_IO_MODE_USER; + + dev_dbg(aspi->dev, "CE%d setup done\n", cs); + return 0; +} + +static void aspeed_spi_cleanup(struct spi_device *spi) +{ + struct aspeed_spi *aspi = spi_controller_get_devdata(spi->master); + unsigned int cs = spi->chip_select; + + aspeed_spi_chip_enable(aspi, cs, false); + + dev_dbg(aspi->dev, "CE%d cleanup done\n", cs); +} + +static void aspeed_spi_enable(struct aspeed_spi *aspi, bool enable) +{ + int cs; + + for (cs = 0; cs < aspi->data->max_cs; cs++) + aspeed_spi_chip_enable(aspi, cs, enable); +} + +static int aspeed_spi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct aspeed_spi_data *data; + struct spi_controller *ctlr; + struct aspeed_spi *aspi; + struct resource *res; + int ret; + + data = of_device_get_match_data(&pdev->dev); + if (!data) + return -ENODEV; + + ctlr = devm_spi_alloc_master(dev, sizeof(*aspi)); + if (!ctlr) + return -ENOMEM; + + aspi = spi_controller_get_devdata(ctlr); + platform_set_drvdata(pdev, aspi); + aspi->data = data; + aspi->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + aspi->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(aspi->regs)) { + dev_err(dev, "missing AHB register window\n"); + return PTR_ERR(aspi->regs); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + aspi->ahb_base = devm_ioremap_resource(dev, res); + if (IS_ERR(aspi->ahb_base)) { + dev_err(dev, "missing AHB mapping window\n"); + return PTR_ERR(aspi->ahb_base); + } + + aspi->ahb_window_size = resource_size(res); + aspi->ahb_base_phy = res->start; + + aspi->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(aspi->clk)) { + dev_err(dev, "missing clock\n"); + return PTR_ERR(aspi->clk); + } + + aspi->clk_freq = clk_get_rate(aspi->clk); + if (!aspi->clk_freq) { + dev_err(dev, "invalid clock\n"); + return -EINVAL; + } + + ret = clk_prepare_enable(aspi->clk); + if (ret) { + dev_err(dev, "can not enable the clock\n"); + return ret; + } + + /* IRQ is for DMA, which the driver doesn't support yet */ + + ctlr->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | data->mode_bits; + ctlr->bus_num = pdev->id; + ctlr->mem_ops = &aspeed_spi_mem_ops; + ctlr->setup = aspeed_spi_setup; + ctlr->cleanup = aspeed_spi_cleanup; + ctlr->num_chipselect = data->max_cs; + ctlr->dev.of_node = dev->of_node; + + ret = devm_spi_register_controller(dev, ctlr); + if (ret) { + dev_err(&pdev->dev, "spi_register_controller failed\n"); + goto disable_clk; + } + return 0; + +disable_clk: + clk_disable_unprepare(aspi->clk); + return ret; +} + +static int aspeed_spi_remove(struct platform_device *pdev) +{ + struct aspeed_spi *aspi = platform_get_drvdata(pdev); + + aspeed_spi_enable(aspi, false); + clk_disable_unprepare(aspi->clk); + return 0; +} + +/* + * AHB mappings + */ + +/* + * The Segment Registers of the AST2400 and AST2500 use a 8MB unit. + * The address range is encoded with absolute addresses in the overall + * mapping window. + */ +static u32 aspeed_spi_segment_start(struct aspeed_spi *aspi, u32 reg) +{ + return ((reg >> 16) & 0xFF) << 23; +} + +static u32 aspeed_spi_segment_end(struct aspeed_spi *aspi, u32 reg) +{ + return ((reg >> 24) & 0xFF) << 23; +} + +static u32 aspeed_spi_segment_reg(struct aspeed_spi *aspi, u32 start, u32 end) +{ + return (((start >> 23) & 0xFF) << 16) | (((end >> 23) & 0xFF) << 24); +} + +/* + * The Segment Registers of the AST2600 use a 1MB unit. The address + * range is encoded with offsets in the overall mapping window. + */ + +#define AST2600_SEG_ADDR_MASK 0x0ff00000 + +static u32 aspeed_spi_segment_ast2600_start(struct aspeed_spi *aspi, + u32 reg) +{ + u32 start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK; + + return aspi->ahb_base_phy + start_offset; +} + +static u32 aspeed_spi_segment_ast2600_end(struct aspeed_spi *aspi, + u32 reg) +{ + u32 end_offset = reg & AST2600_SEG_ADDR_MASK; + + /* segment is disabled */ + if (!end_offset) + return aspi->ahb_base_phy; + + return aspi->ahb_base_phy + end_offset + 0x100000; +} + +static u32 aspeed_spi_segment_ast2600_reg(struct aspeed_spi *aspi, + u32 start, u32 end) +{ + /* disable zero size segments */ + if (start == end) + return 0; + + return ((start & AST2600_SEG_ADDR_MASK) >> 16) | + ((end - 1) & AST2600_SEG_ADDR_MASK); +} + +/* + * Read timing compensation sequences + */ + +#define CALIBRATE_BUF_SIZE SZ_16K + +static bool aspeed_spi_check_reads(struct aspeed_spi_chip *chip, + const u8 *golden_buf, u8 *test_buf) +{ + int i; + + for (i = 0; i < 10; i++) { + memcpy_fromio(test_buf, chip->ahb_base, CALIBRATE_BUF_SIZE); + if (memcmp(test_buf, golden_buf, CALIBRATE_BUF_SIZE) != 0) { +#if defined(VERBOSE_DEBUG) + print_hex_dump_bytes(DEVICE_NAME " fail: ", DUMP_PREFIX_NONE, + test_buf, 0x100); +#endif + return false; + } + } + return true; +} + +#define FREAD_TPASS(i) (((i) / 2) | (((i) & 1) ? 0 : 8)) + +/* + * The timing register is shared by all devices. Only update for CE0. + */ +static int aspeed_spi_calibrate(struct aspeed_spi_chip *chip, u32 hdiv, + const u8 *golden_buf, u8 *test_buf) +{ + struct aspeed_spi *aspi = chip->aspi; + const struct aspeed_spi_data *data = aspi->data; + int i; + int good_pass = -1, pass_count = 0; + u32 shift = (hdiv - 1) << 2; + u32 mask = ~(0xfu << shift); + u32 fread_timing_val = 0; + + /* Try HCLK delay 0..5, each one with/without delay and look for a + * good pair. + */ + for (i = 0; i < 12; i++) { + bool pass; + + if (chip->cs == 0) { + fread_timing_val &= mask; + fread_timing_val |= FREAD_TPASS(i) << shift; + writel(fread_timing_val, aspi->regs + data->timing); + } + pass = aspeed_spi_check_reads(chip, golden_buf, test_buf); + dev_dbg(aspi->dev, + " * [%08x] %d HCLK delay, %dns DI delay : %s", + fread_timing_val, i / 2, (i & 1) ? 0 : 4, + pass ? "PASS" : "FAIL"); + if (pass) { + pass_count++; + if (pass_count == 3) { + good_pass = i - 1; + break; + } + } else { + pass_count = 0; + } + } + + /* No good setting for this frequency */ + if (good_pass < 0) + return -1; + + /* We have at least one pass of margin, let's use first pass */ + if (chip->cs == 0) { + fread_timing_val &= mask; + fread_timing_val |= FREAD_TPASS(good_pass) << shift; + writel(fread_timing_val, aspi->regs + data->timing); + } + dev_dbg(aspi->dev, " * -> good is pass %d [0x%08x]", + good_pass, fread_timing_val); + return 0; +} + +static bool aspeed_spi_check_calib_data(const u8 *test_buf, u32 size) +{ + const u32 *tb32 = (const u32 *)test_buf; + u32 i, cnt = 0; + + /* We check if we have enough words that are neither all 0 + * nor all 1's so the calibration can be considered valid. + * + * I use an arbitrary threshold for now of 64 + */ + size >>= 2; + for (i = 0; i < size; i++) { + if (tb32[i] != 0 && tb32[i] != 0xffffffff) + cnt++; + } + return cnt >= 64; +} + +static const u32 aspeed_spi_hclk_divs[] = { + 0xf, /* HCLK */ + 0x7, /* HCLK/2 */ + 0xe, /* HCLK/3 */ + 0x6, /* HCLK/4 */ + 0xd, /* HCLK/5 */ +}; + +#define ASPEED_SPI_HCLK_DIV(i) \ + (aspeed_spi_hclk_divs[(i) - 1] << CTRL_FREQ_SEL_SHIFT) + +static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip) +{ + struct aspeed_spi *aspi = chip->aspi; + const struct aspeed_spi_data *data = aspi->data; + u32 ahb_freq = aspi->clk_freq; + u32 max_freq = chip->clk_freq; + u32 ctl_val; + u8 *golden_buf = NULL; + u8 *test_buf = NULL; + int i, rc, best_div = -1; + + dev_dbg(aspi->dev, "calculate timing compensation - AHB freq: %d MHz", + ahb_freq / 1000000); + + /* + * use the related low frequency to get check calibration data + * and get golden data. + */ + ctl_val = chip->ctl_val[ASPEED_SPI_READ] & data->hclk_mask; + writel(ctl_val, chip->ctl); + + test_buf = kzalloc(CALIBRATE_BUF_SIZE * 2, GFP_KERNEL); + if (!test_buf) + return -ENOMEM; + + golden_buf = test_buf + CALIBRATE_BUF_SIZE; + + memcpy_fromio(golden_buf, chip->ahb_base, CALIBRATE_BUF_SIZE); + if (!aspeed_spi_check_calib_data(golden_buf, CALIBRATE_BUF_SIZE)) { + dev_info(aspi->dev, "Calibration area too uniform, using low speed"); + goto no_calib; + } + +#if defined(VERBOSE_DEBUG) + print_hex_dump_bytes(DEVICE_NAME " good: ", DUMP_PREFIX_NONE, + golden_buf, 0x100); +#endif + + /* Now we iterate the HCLK dividers until we find our breaking point */ + for (i = ARRAY_SIZE(aspeed_spi_hclk_divs); i > data->hdiv_max - 1; i--) { + u32 tv, freq; + + freq = ahb_freq / i; + if (freq > max_freq) + continue; + + /* Set the timing */ + tv = chip->ctl_val[ASPEED_SPI_READ] | ASPEED_SPI_HCLK_DIV(i); + writel(tv, chip->ctl); + dev_dbg(aspi->dev, "Trying HCLK/%d [%08x] ...", i, tv); + rc = data->calibrate(chip, i, golden_buf, test_buf); + if (rc == 0) + best_div = i; + } + + /* Nothing found ? */ + if (best_div < 0) { + dev_warn(aspi->dev, "No good frequency, using dumb slow"); + } else { + dev_dbg(aspi->dev, "Found good read timings at HCLK/%d", best_div); + + /* Record the freq */ + for (i = 0; i < ASPEED_SPI_MAX; i++) + chip->ctl_val[i] = (chip->ctl_val[i] & data->hclk_mask) | + ASPEED_SPI_HCLK_DIV(best_div); + } + +no_calib: + writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); + kfree(test_buf); + return 0; +} + +#define TIMING_DELAY_DI BIT(3) +#define TIMING_DELAY_HCYCLE_MAX 5 +#define TIMING_REG_AST2600(chip) \ + ((chip)->aspi->regs + (chip)->aspi->data->timing + \ + (chip)->cs * 4) + +static int aspeed_spi_ast2600_calibrate(struct aspeed_spi_chip *chip, u32 hdiv, + const u8 *golden_buf, u8 *test_buf) +{ + struct aspeed_spi *aspi = chip->aspi; + int hcycle; + u32 shift = (hdiv - 2) << 3; + u32 mask = ~(0xfu << shift); + u32 fread_timing_val = 0; + + for (hcycle = 0; hcycle <= TIMING_DELAY_HCYCLE_MAX; hcycle++) { + int delay_ns; + bool pass = false; + + fread_timing_val &= mask; + fread_timing_val |= hcycle << shift; + + /* no DI input delay first */ + writel(fread_timing_val, TIMING_REG_AST2600(chip)); + pass = aspeed_spi_check_reads(chip, golden_buf, test_buf); + dev_dbg(aspi->dev, + " * [%08x] %d HCLK delay, DI delay none : %s", + fread_timing_val, hcycle, pass ? "PASS" : "FAIL"); + if (pass) + return 0; + + /* Add DI input delays */ + fread_timing_val &= mask; + fread_timing_val |= (TIMING_DELAY_DI | hcycle) << shift; + + for (delay_ns = 0; delay_ns < 0x10; delay_ns++) { + fread_timing_val &= ~(0xf << (4 + shift)); + fread_timing_val |= delay_ns << (4 + shift); + + writel(fread_timing_val, TIMING_REG_AST2600(chip)); + pass = aspeed_spi_check_reads(chip, golden_buf, test_buf); + dev_dbg(aspi->dev, + " * [%08x] %d HCLK delay, DI delay %d.%dns : %s", + fread_timing_val, hcycle, (delay_ns + 1) / 2, + (delay_ns + 1) & 1 ? 5 : 5, pass ? "PASS" : "FAIL"); + /* + * TODO: This is optimistic. We should look + * for a working interval and save the middle + * value in the read timing register. + */ + if (pass) + return 0; + } + } + + /* No good setting for this frequency */ + return -1; +} + +/* + * Platform definitions + */ +static const struct aspeed_spi_data ast2400_fmc_data = { + .max_cs = 5, + .hastype = true, + .we0 = 16, + .ctl0 = CE0_CTRL_REG, + .timing = CE0_TIMING_COMPENSATION_REG, + .hclk_mask = 0xfffff0ff, + .hdiv_max = 1, + .calibrate = aspeed_spi_calibrate, + .segment_start = aspeed_spi_segment_start, + .segment_end = aspeed_spi_segment_end, + .segment_reg = aspeed_spi_segment_reg, +}; + +static const struct aspeed_spi_data ast2400_spi_data = { + .max_cs = 1, + .hastype = false, + .we0 = 0, + .ctl0 = 0x04, + .timing = 0x14, + .hclk_mask = 0xfffff0ff, + .hdiv_max = 1, + .calibrate = aspeed_spi_calibrate, + /* No segment registers */ +}; + +static const struct aspeed_spi_data ast2500_fmc_data = { + .max_cs = 3, + .hastype = true, + .we0 = 16, + .ctl0 = CE0_CTRL_REG, + .timing = CE0_TIMING_COMPENSATION_REG, + .hclk_mask = 0xffffd0ff, + .hdiv_max = 1, + .calibrate = aspeed_spi_calibrate, + .segment_start = aspeed_spi_segment_start, + .segment_end = aspeed_spi_segment_end, + .segment_reg = aspeed_spi_segment_reg, +}; + +static const struct aspeed_spi_data ast2500_spi_data = { + .max_cs = 2, + .hastype = false, + .we0 = 16, + .ctl0 = CE0_CTRL_REG, + .timing = CE0_TIMING_COMPENSATION_REG, + .hclk_mask = 0xffffd0ff, + .hdiv_max = 1, + .calibrate = aspeed_spi_calibrate, + .segment_start = aspeed_spi_segment_start, + .segment_end = aspeed_spi_segment_end, + .segment_reg = aspeed_spi_segment_reg, +}; + +static const struct aspeed_spi_data ast2600_fmc_data = { + .max_cs = 3, + .hastype = false, + .mode_bits = SPI_RX_QUAD | SPI_RX_QUAD, + .we0 = 16, + .ctl0 = CE0_CTRL_REG, + .timing = CE0_TIMING_COMPENSATION_REG, + .hclk_mask = 0xf0fff0ff, + .hdiv_max = 2, + .calibrate = aspeed_spi_ast2600_calibrate, + .segment_start = aspeed_spi_segment_ast2600_start, + .segment_end = aspeed_spi_segment_ast2600_end, + .segment_reg = aspeed_spi_segment_ast2600_reg, +}; + +static const struct aspeed_spi_data ast2600_spi_data = { + .max_cs = 2, + .hastype = false, + .mode_bits = SPI_RX_QUAD | SPI_RX_QUAD, + .we0 = 16, + .ctl0 = CE0_CTRL_REG, + .timing = CE0_TIMING_COMPENSATION_REG, + .hclk_mask = 0xf0fff0ff, + .hdiv_max = 2, + .calibrate = aspeed_spi_ast2600_calibrate, + .segment_start = aspeed_spi_segment_ast2600_start, + .segment_end = aspeed_spi_segment_ast2600_end, + .segment_reg = aspeed_spi_segment_ast2600_reg, +}; + +static const struct of_device_id aspeed_spi_matches[] = { + { .compatible = "aspeed,ast2400-fmc", .data = &ast2400_fmc_data }, + { .compatible = "aspeed,ast2400-spi", .data = &ast2400_spi_data }, + { .compatible = "aspeed,ast2500-fmc", .data = &ast2500_fmc_data }, + { .compatible = "aspeed,ast2500-spi", .data = &ast2500_spi_data }, + { .compatible = "aspeed,ast2600-fmc", .data = &ast2600_fmc_data }, + { .compatible = "aspeed,ast2600-spi", .data = &ast2600_spi_data }, + { } +}; +MODULE_DEVICE_TABLE(of, aspeed_spi_matches); + +static struct platform_driver aspeed_spi_driver = { + .probe = aspeed_spi_probe, + .remove = aspeed_spi_remove, + .driver = { + .name = DEVICE_NAME, + .of_match_table = aspeed_spi_matches, + } +}; + +module_platform_driver(aspeed_spi_driver); + +MODULE_DESCRIPTION("ASPEED Static Memory Controller Driver"); +MODULE_AUTHOR("Chin-Ting Kuo "); +MODULE_AUTHOR("Cedric Le Goater "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-gxp.c b/drivers/spi/spi-gxp.c new file mode 100644 index 0000000000..9ea355f7d6 --- /dev/null +++ b/drivers/spi/spi-gxp.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0=or-later +/* Copyright (C) 2022 Hewlett-Packard Development Company, L.P. */ + +#include +#include +#include +#include +#include +#include + +#define GXP_SPI0_MAX_CHIPSELECT 2 +#define GXP_SPI_SLEEP_TIME 1 +#define GXP_SPI_TIMEOUT (130 * 1000000 / GXP_SPI_SLEEP_TIME) + +#define MANUAL_MODE 0 +#define DIRECT_MODE 1 +#define SPILDAT_LEN 256 + +#define OFFSET_SPIMCFG 0x0 +#define OFFSET_SPIMCTRL 0x4 +#define OFFSET_SPICMD 0x5 +#define OFFSET_SPIDCNT 0x6 +#define OFFSET_SPIADDR 0x8 +#define OFFSET_SPIINTSTS 0xc + +#define SPIMCTRL_START 0x01 +#define SPIMCTRL_BUSY 0x02 +#define SPIMCTRL_DIR 0x08 + +struct gxp_spi; + +struct gxp_spi_chip { + struct gxp_spi *spifi; + u32 cs; +}; + +struct gxp_spi_data { + u32 max_cs; + u32 mode_bits; +}; + +struct gxp_spi { + const struct gxp_spi_data *data; + void __iomem *reg_base; + void __iomem *dat_base; + void __iomem *dir_base; + struct device *dev; + struct gxp_spi_chip chips[GXP_SPI0_MAX_CHIPSELECT]; +}; + +static void gxp_spi_set_mode(struct gxp_spi *spifi, int mode) +{ + u8 value; + void __iomem *reg_base = spifi->reg_base; + + value = readb(reg_base + OFFSET_SPIMCTRL); + + if (mode == MANUAL_MODE) { + writeb(0x55, reg_base + OFFSET_SPICMD); + writeb(0xaa, reg_base + OFFSET_SPICMD); + value &= ~0x30; + } else { + value |= 0x30; + } + writeb(value, reg_base + OFFSET_SPIMCTRL); +} + +static int gxp_spi_read_reg(struct gxp_spi_chip *chip, const struct spi_mem_op *op) +{ + int ret; + struct gxp_spi *spifi = chip->spifi; + void __iomem *reg_base = spifi->reg_base; + u32 value; + + value = readl(reg_base + OFFSET_SPIMCFG); + value &= ~(1 << 24); + value |= (chip->cs << 24); + value &= ~(0x07 << 16); + value &= ~(0x1f << 19); + writel(value, reg_base + OFFSET_SPIMCFG); + + writel(0, reg_base + OFFSET_SPIADDR); + + writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD); + + writew(op->data.nbytes, reg_base + OFFSET_SPIDCNT); + + value = readb(reg_base + OFFSET_SPIMCTRL); + value &= ~SPIMCTRL_DIR; + value |= SPIMCTRL_START; + + writeb(value, reg_base + OFFSET_SPIMCTRL); + + ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value, + !(value & SPIMCTRL_BUSY), + GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT); + if (ret) { + dev_warn(spifi->dev, "read reg busy time out\n"); + return ret; + } + + memcpy_fromio(op->data.buf.in, spifi->dat_base, op->data.nbytes); + return ret; +} + +static int gxp_spi_write_reg(struct gxp_spi_chip *chip, const struct spi_mem_op *op) +{ + int ret; + struct gxp_spi *spifi = chip->spifi; + void __iomem *reg_base = spifi->reg_base; + u32 value; + + value = readl(reg_base + OFFSET_SPIMCFG); + value &= ~(1 << 24); + value |= (chip->cs << 24); + value &= ~(0x07 << 16); + value &= ~(0x1f << 19); + writel(value, reg_base + OFFSET_SPIMCFG); + + writel(0, reg_base + OFFSET_SPIADDR); + + writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD); + + memcpy_toio(spifi->dat_base, op->data.buf.in, op->data.nbytes); + + writew(op->data.nbytes, reg_base + OFFSET_SPIDCNT); + + value = readb(reg_base + OFFSET_SPIMCTRL); + value |= SPIMCTRL_DIR; + value |= SPIMCTRL_START; + + writeb(value, reg_base + OFFSET_SPIMCTRL); + + ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value, + !(value & SPIMCTRL_BUSY), + GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT); + if (ret) + dev_warn(spifi->dev, "write reg busy time out\n"); + + return ret; +} + +static ssize_t gxp_spi_read(struct gxp_spi_chip *chip, const struct spi_mem_op *op) +{ + struct gxp_spi *spifi = chip->spifi; + u32 offset = op->addr.val; + + if (chip->cs == 0) + offset += 0x4000000; + + memcpy_fromio(op->data.buf.in, spifi->dir_base + offset, op->data.nbytes); + + return 0; +} + +static ssize_t gxp_spi_write(struct gxp_spi_chip *chip, const struct spi_mem_op *op) +{ + struct gxp_spi *spifi = chip->spifi; + void __iomem *reg_base = spifi->reg_base; + u32 write_len; + u32 value; + int ret; + + write_len = op->data.nbytes; + if (write_len > SPILDAT_LEN) + write_len = SPILDAT_LEN; + + value = readl(reg_base + OFFSET_SPIMCFG); + value &= ~(1 << 24); + value |= (chip->cs << 24); + value &= ~(0x07 << 16); + value |= (op->addr.nbytes << 16); + value &= ~(0x1f << 19); + writel(value, reg_base + OFFSET_SPIMCFG); + + writel(op->addr.val, reg_base + OFFSET_SPIADDR); + + writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD); + + writew(write_len, reg_base + OFFSET_SPIDCNT); + + memcpy_toio(spifi->dat_base, op->data.buf.in, write_len); + + value = readb(reg_base + OFFSET_SPIMCTRL); + value |= SPIMCTRL_DIR; + value |= SPIMCTRL_START; + + writeb(value, reg_base + OFFSET_SPIMCTRL); + + ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value, + !(value & SPIMCTRL_BUSY), + GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT); + if (ret) { + dev_warn(spifi->dev, "write busy time out\n"); + return ret; + } + + return write_len; +} + +static int do_gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct gxp_spi *spifi = spi_controller_get_devdata(mem->spi->master); + struct gxp_spi_chip *chip = &spifi->chips[mem->spi->chip_select]; + int ret; + + if (op->data.dir == SPI_MEM_DATA_IN) { + if (!op->addr.nbytes) + ret = gxp_spi_read_reg(chip, op); + else + ret = gxp_spi_read(chip, op); + } else { + if (!op->addr.nbytes) + ret = gxp_spi_write_reg(chip, op); + else + ret = gxp_spi_write(chip, op); + } + + return ret; +} + +static int gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + int ret; + + ret = do_gxp_exec_mem_op(mem, op); + if (ret) + dev_err(&mem->spi->dev, "operation failed: %d", ret); + + return ret; +} + +static const struct spi_controller_mem_ops gxp_spi_mem_ops = { + .exec_op = gxp_exec_mem_op, +}; + +static int gxp_spi_setup(struct spi_device *spi) +{ + struct gxp_spi *spifi = spi_controller_get_devdata(spi->master); + unsigned int cs = spi->chip_select; + struct gxp_spi_chip *chip = &spifi->chips[cs]; + + chip->spifi = spifi; + chip->cs = cs; + + gxp_spi_set_mode(spifi, MANUAL_MODE); + + return 0; +} + +static int gxp_spifi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct gxp_spi_data *data; + struct spi_controller *ctlr; + struct gxp_spi *spifi; + struct resource *res; + int ret; + + data = of_device_get_match_data(&pdev->dev); + + ctlr = devm_spi_alloc_master(dev, sizeof(*spifi)); + if (!ctlr) + return -ENOMEM; + + spifi = spi_controller_get_devdata(ctlr); + + platform_set_drvdata(pdev, spifi); + spifi->data = data; + spifi->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + spifi->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spifi->reg_base)) + return PTR_ERR(spifi->reg_base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + spifi->dat_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spifi->dat_base)) + return PTR_ERR(spifi->dat_base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + spifi->dir_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spifi->dir_base)) + return PTR_ERR(spifi->dir_base); + + ctlr->mode_bits = data->mode_bits; + ctlr->bus_num = pdev->id; + ctlr->mem_ops = &gxp_spi_mem_ops; + ctlr->setup = gxp_spi_setup; + ctlr->num_chipselect = data->max_cs; + ctlr->dev.of_node = dev->of_node; + + ret = devm_spi_register_controller(dev, ctlr); + if (ret) { + return dev_err_probe(&pdev->dev, ret, + "failed to register spi controller\n"); + } + + return 0; +} + +static const struct gxp_spi_data gxp_spifi_data = { + .max_cs = 2, + .mode_bits = 0, +}; + +static const struct of_device_id gxp_spifi_match[] = { + {.compatible = "hpe,gxp-spifi", .data = &gxp_spifi_data }, + { /* null */ } +}; +MODULE_DEVICE_TABLE(of, gxp_spifi_match); + +static struct platform_driver gxp_spifi_driver = { + .probe = gxp_spifi_probe, + .driver = { + .name = "gxp-spifi", + .of_match_table = gxp_spifi_match, + }, +}; +module_platform_driver(gxp_spifi_driver); + +MODULE_DESCRIPTION("HPE GXP SPI Flash Interface driver"); +MODULE_AUTHOR("Nick Hawkins "); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c new file mode 100644 index 0000000000..ce4385330b --- /dev/null +++ b/drivers/spi/spi-microchip-core.c @@ -0,0 +1,617 @@ +// SPDX-License-Identifier: (GPL-2.0) +/* + * Microchip CoreSPI SPI controller driver + * + * Copyright (c) 2018-2022 Microchip Technology Inc. and its subsidiaries + * + * Author: Daire McNamara + * Author: Conor Dooley + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_LEN (0xffff) +#define MAX_CS (8) +#define DEFAULT_FRAMESIZE (8) +#define FIFO_DEPTH (32) +#define CLK_GEN_MODE1_MAX (255) +#define CLK_GEN_MODE0_MAX (15) +#define CLK_GEN_MIN (0) +#define MODE_X_MASK_SHIFT (24) + +#define CONTROL_ENABLE BIT(0) +#define CONTROL_MASTER BIT(1) +#define CONTROL_RX_DATA_INT BIT(4) +#define CONTROL_TX_DATA_INT BIT(5) +#define CONTROL_RX_OVER_INT BIT(6) +#define CONTROL_TX_UNDER_INT BIT(7) +#define CONTROL_SPO BIT(24) +#define CONTROL_SPH BIT(25) +#define CONTROL_SPS BIT(26) +#define CONTROL_FRAMEURUN BIT(27) +#define CONTROL_CLKMODE BIT(28) +#define CONTROL_BIGFIFO BIT(29) +#define CONTROL_OENOFF BIT(30) +#define CONTROL_RESET BIT(31) + +#define CONTROL_MODE_MASK GENMASK(3, 2) +#define MOTOROLA_MODE (0) +#define CONTROL_FRAMECNT_MASK GENMASK(23, 8) +#define CONTROL_FRAMECNT_SHIFT (8) + +#define STATUS_ACTIVE BIT(14) +#define STATUS_SSEL BIT(13) +#define STATUS_FRAMESTART BIT(12) +#define STATUS_TXFIFO_EMPTY_NEXT_READ BIT(11) +#define STATUS_TXFIFO_EMPTY BIT(10) +#define STATUS_TXFIFO_FULL_NEXT_WRITE BIT(9) +#define STATUS_TXFIFO_FULL BIT(8) +#define STATUS_RXFIFO_EMPTY_NEXT_READ BIT(7) +#define STATUS_RXFIFO_EMPTY BIT(6) +#define STATUS_RXFIFO_FULL_NEXT_WRITE BIT(5) +#define STATUS_RXFIFO_FULL BIT(4) +#define STATUS_TX_UNDERRUN BIT(3) +#define STATUS_RX_OVERFLOW BIT(2) +#define STATUS_RXDAT_RXED BIT(1) +#define STATUS_TXDAT_SENT BIT(0) + +#define INT_TXDONE BIT(0) +#define INT_RXRDY BIT(1) +#define INT_RX_CHANNEL_OVERFLOW BIT(2) +#define INT_TX_CHANNEL_UNDERRUN BIT(3) + +#define INT_ENABLE_MASK (CONTROL_RX_DATA_INT | CONTROL_TX_DATA_INT | \ + CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT) + +#define REG_CONTROL (0x00) +#define REG_FRAME_SIZE (0x04) +#define REG_STATUS (0x08) +#define REG_INT_CLEAR (0x0c) +#define REG_RX_DATA (0x10) +#define REG_TX_DATA (0x14) +#define REG_CLK_GEN (0x18) +#define REG_SLAVE_SELECT (0x1c) +#define SSEL_MASK GENMASK(7, 0) +#define SSEL_DIRECT BIT(8) +#define SSELOUT_SHIFT 9 +#define SSELOUT BIT(SSELOUT_SHIFT) +#define REG_MIS (0x20) +#define REG_RIS (0x24) +#define REG_CONTROL2 (0x28) +#define REG_COMMAND (0x2c) +#define REG_PKTSIZE (0x30) +#define REG_CMD_SIZE (0x34) +#define REG_HWSTATUS (0x38) +#define REG_STAT8 (0x3c) +#define REG_CTRL2 (0x48) +#define REG_FRAMESUP (0x50) + +struct mchp_corespi { + void __iomem *regs; + struct clk *clk; + const u8 *tx_buf; + u8 *rx_buf; + u32 clk_gen; /* divider for spi output clock generated by the controller */ + u32 clk_mode; + int irq; + int tx_len; + int rx_len; + int pending; +}; + +static inline u32 mchp_corespi_read(struct mchp_corespi *spi, unsigned int reg) +{ + return readl(spi->regs + reg); +} + +static inline void mchp_corespi_write(struct mchp_corespi *spi, unsigned int reg, u32 val) +{ + writel(val, spi->regs + reg); +} + +static inline void mchp_corespi_enable(struct mchp_corespi *spi) +{ + u32 control = mchp_corespi_read(spi, REG_CONTROL); + + control |= CONTROL_ENABLE; + + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static inline void mchp_corespi_disable(struct mchp_corespi *spi) +{ + u32 control = mchp_corespi_read(spi, REG_CONTROL); + + control &= ~CONTROL_ENABLE; + + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi) +{ + u8 data; + int fifo_max, i = 0; + + fifo_max = min(spi->rx_len, FIFO_DEPTH); + + while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) { + data = mchp_corespi_read(spi, REG_RX_DATA); + + if (spi->rx_buf) + *spi->rx_buf++ = data; + i++; + } + spi->rx_len -= i; + spi->pending -= i; +} + +static void mchp_corespi_enable_ints(struct mchp_corespi *spi) +{ + u32 control, mask = INT_ENABLE_MASK; + + mchp_corespi_disable(spi); + + control = mchp_corespi_read(spi, REG_CONTROL); + + control |= mask; + mchp_corespi_write(spi, REG_CONTROL, control); + + control |= CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static void mchp_corespi_disable_ints(struct mchp_corespi *spi) +{ + u32 control, mask = INT_ENABLE_MASK; + + mchp_corespi_disable(spi); + + control = mchp_corespi_read(spi, REG_CONTROL); + control &= ~mask; + mchp_corespi_write(spi, REG_CONTROL, control); + + control |= CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len) +{ + u32 control; + u16 lenpart; + + /* + * Disable the SPI controller. Writes to transfer length have + * no effect when the controller is enabled. + */ + mchp_corespi_disable(spi); + + /* + * The lower 16 bits of the frame count are stored in the control reg + * for legacy reasons, but the upper 16 written to a different register: + * FRAMESUP. While both the upper and lower bits can be *READ* from the + * FRAMESUP register, writing to the lower 16 bits is a NOP + */ + lenpart = len & 0xffff; + + control = mchp_corespi_read(spi, REG_CONTROL); + control &= ~CONTROL_FRAMECNT_MASK; + control |= lenpart << CONTROL_FRAMECNT_SHIFT; + mchp_corespi_write(spi, REG_CONTROL, control); + + lenpart = len & 0xffff0000; + mchp_corespi_write(spi, REG_FRAMESUP, lenpart); + + control |= CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi) +{ + u8 byte; + int fifo_max, i = 0; + + fifo_max = min(spi->tx_len, FIFO_DEPTH); + mchp_corespi_set_xfer_size(spi, fifo_max); + + while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) { + byte = spi->tx_buf ? *spi->tx_buf++ : 0xaa; + mchp_corespi_write(spi, REG_TX_DATA, byte); + i++; + } + + spi->tx_len -= i; + spi->pending += i; +} + +static inline void mchp_corespi_set_framesize(struct mchp_corespi *spi, int bt) +{ + u32 control; + + /* + * Disable the SPI controller. Writes to the frame size have + * no effect when the controller is enabled. + */ + mchp_corespi_disable(spi); + + mchp_corespi_write(spi, REG_FRAME_SIZE, bt); + + control = mchp_corespi_read(spi, REG_CONTROL); + control |= CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static void mchp_corespi_set_cs(struct spi_device *spi, bool disable) +{ + u32 reg; + struct mchp_corespi *corespi = spi_master_get_devdata(spi->master); + + reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT); + reg &= ~BIT(spi->chip_select); + reg |= !disable << spi->chip_select; + + mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg); +} + +static int mchp_corespi_setup(struct spi_device *spi) +{ + struct mchp_corespi *corespi = spi_master_get_devdata(spi->master); + u32 reg; + + /* + * Active high slaves need to be specifically set to their inactive + * states during probe by adding them to the "control group" & thus + * driving their select line low. + */ + if (spi->mode & SPI_CS_HIGH) { + reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT); + reg |= BIT(spi->chip_select); + mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg); + } + return 0; +} + +static void mchp_corespi_init(struct spi_master *master, struct mchp_corespi *spi) +{ + unsigned long clk_hz; + u32 control = mchp_corespi_read(spi, REG_CONTROL); + + control |= CONTROL_MASTER; + + control &= ~CONTROL_MODE_MASK; + control |= MOTOROLA_MODE; + + mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE); + + /* max. possible spi clock rate is the apb clock rate */ + clk_hz = clk_get_rate(spi->clk); + master->max_speed_hz = clk_hz; + + /* + * The controller must be configured so that it doesn't remove Chip + * Select until the entire message has been transferred, even if at + * some points TX FIFO becomes empty. + * + * BIGFIFO mode is also enabled, which sets the fifo depth to 32 frames + * for the 8 bit transfers that this driver uses. + */ + control = mchp_corespi_read(spi, REG_CONTROL); + control |= CONTROL_SPS | CONTROL_BIGFIFO; + + mchp_corespi_write(spi, REG_CONTROL, control); + + mchp_corespi_enable_ints(spi); + + /* + * It is required to enable direct mode, otherwise control over the chip + * select is relinquished to the hardware. SSELOUT is enabled too so we + * can deal with active high slaves. + */ + mchp_corespi_write(spi, REG_SLAVE_SELECT, SSELOUT | SSEL_DIRECT); + + control = mchp_corespi_read(spi, REG_CONTROL); + + control &= ~CONTROL_RESET; + control |= CONTROL_ENABLE; + + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi) +{ + u32 control; + + mchp_corespi_disable(spi); + + control = mchp_corespi_read(spi, REG_CONTROL); + if (spi->clk_mode) + control |= CONTROL_CLKMODE; + else + control &= ~CONTROL_CLKMODE; + + mchp_corespi_write(spi, REG_CLK_GEN, spi->clk_gen); + mchp_corespi_write(spi, REG_CONTROL, control); + mchp_corespi_write(spi, REG_CONTROL, control | CONTROL_ENABLE); +} + +static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int mode) +{ + u32 control, mode_val; + + switch (mode & SPI_MODE_X_MASK) { + case SPI_MODE_0: + mode_val = 0; + break; + case SPI_MODE_1: + mode_val = CONTROL_SPH; + break; + case SPI_MODE_2: + mode_val = CONTROL_SPO; + break; + case SPI_MODE_3: + mode_val = CONTROL_SPH | CONTROL_SPO; + break; + } + + /* + * Disable the SPI controller. Writes to the frame size have + * no effect when the controller is enabled. + */ + mchp_corespi_disable(spi); + + control = mchp_corespi_read(spi, REG_CONTROL); + control &= ~(SPI_MODE_X_MASK << MODE_X_MASK_SHIFT); + control |= mode_val; + + mchp_corespi_write(spi, REG_CONTROL, control); + + control |= CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct mchp_corespi *spi = spi_master_get_devdata(master); + u32 intfield = mchp_corespi_read(spi, REG_MIS) & 0xf; + bool finalise = false; + + /* Interrupt line may be shared and not for us at all */ + if (intfield == 0) + return IRQ_NONE; + + if (intfield & INT_TXDONE) { + mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE); + + if (spi->rx_len) + mchp_corespi_read_fifo(spi); + + if (spi->tx_len) + mchp_corespi_write_fifo(spi); + + if (!spi->rx_len) + finalise = true; + } + + if (intfield & INT_RXRDY) + mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY); + + if (intfield & INT_RX_CHANNEL_OVERFLOW) { + mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW); + finalise = true; + dev_err(&master->dev, + "%s: RX OVERFLOW: rxlen: %d, txlen: %d\n", __func__, + spi->rx_len, spi->tx_len); + } + + if (intfield & INT_TX_CHANNEL_UNDERRUN) { + mchp_corespi_write(spi, REG_INT_CLEAR, INT_TX_CHANNEL_UNDERRUN); + finalise = true; + dev_err(&master->dev, + "%s: TX UNDERFLOW: rxlen: %d, txlen: %d\n", __func__, + spi->rx_len, spi->tx_len); + } + + if (finalise) + spi_finalize_current_transfer(master); + + return IRQ_HANDLED; +} + +static int mchp_corespi_calculate_clkgen(struct mchp_corespi *spi, + unsigned long target_hz) +{ + unsigned long clk_hz, spi_hz, clk_gen; + + clk_hz = clk_get_rate(spi->clk); + if (!clk_hz) + return -EINVAL; + spi_hz = min(target_hz, clk_hz); + + /* + * There are two possible clock modes for the controller generated + * clock's division ratio: + * CLK_MODE = 0: 1 / (2^(CLK_GEN + 1)) where CLK_GEN = 0 to 15. + * CLK_MODE = 1: 1 / (2 * CLK_GEN + 1) where CLK_GEN = 0 to 255. + * First try mode 1, fall back to 0 and if we have tried both modes and + * we /still/ can't get a good setting, we then throw the toys out of + * the pram and give up + * clk_gen is the register name for the clock divider on MPFS. + */ + clk_gen = DIV_ROUND_UP(clk_hz, 2 * spi_hz) - 1; + if (clk_gen > CLK_GEN_MODE1_MAX || clk_gen <= CLK_GEN_MIN) { + clk_gen = DIV_ROUND_UP(clk_hz, spi_hz); + clk_gen = fls(clk_gen) - 1; + + if (clk_gen > CLK_GEN_MODE0_MAX) + return -EINVAL; + + spi->clk_mode = 0; + } else { + spi->clk_mode = 1; + } + + spi->clk_gen = clk_gen; + return 0; +} + +static int mchp_corespi_transfer_one(struct spi_master *master, + struct spi_device *spi_dev, + struct spi_transfer *xfer) +{ + struct mchp_corespi *spi = spi_master_get_devdata(master); + int ret; + + ret = mchp_corespi_calculate_clkgen(spi, (unsigned long)xfer->speed_hz); + if (ret) { + dev_err(&master->dev, "failed to set clk_gen for target %u Hz\n", xfer->speed_hz); + return ret; + } + + mchp_corespi_set_clk_gen(spi); + + spi->tx_buf = xfer->tx_buf; + spi->rx_buf = xfer->rx_buf; + spi->tx_len = xfer->len; + spi->rx_len = xfer->len; + spi->pending = 0; + + mchp_corespi_set_xfer_size(spi, (spi->tx_len > FIFO_DEPTH) + ? FIFO_DEPTH : spi->tx_len); + + if (spi->tx_len) + mchp_corespi_write_fifo(spi); + return 1; +} + +static int mchp_corespi_prepare_message(struct spi_master *master, + struct spi_message *msg) +{ + struct spi_device *spi_dev = msg->spi; + struct mchp_corespi *spi = spi_master_get_devdata(master); + + mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE); + mchp_corespi_set_mode(spi, spi_dev->mode); + + return 0; +} + +static int mchp_corespi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct mchp_corespi *spi; + struct resource *res; + u32 num_cs; + int ret = 0; + + master = devm_spi_alloc_master(&pdev->dev, sizeof(*spi)); + if (!master) + return dev_err_probe(&pdev->dev, -ENOMEM, + "unable to allocate master for SPI controller\n"); + + platform_set_drvdata(pdev, master); + + if (of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs)) + num_cs = MAX_CS; + + master->num_chipselect = num_cs; + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->setup = mchp_corespi_setup; + master->bits_per_word_mask = SPI_BPW_MASK(8); + master->transfer_one = mchp_corespi_transfer_one; + master->prepare_message = mchp_corespi_prepare_message; + master->set_cs = mchp_corespi_set_cs; + master->dev.of_node = pdev->dev.of_node; + + spi = spi_master_get_devdata(master); + + spi->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(spi->regs)) + return PTR_ERR(spi->regs); + + spi->irq = platform_get_irq(pdev, 0); + if (spi->irq <= 0) + return dev_err_probe(&pdev->dev, -ENXIO, + "invalid IRQ %d for SPI controller\n", + spi->irq); + + ret = devm_request_irq(&pdev->dev, spi->irq, mchp_corespi_interrupt, + IRQF_SHARED, dev_name(&pdev->dev), master); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "could not request irq: %d\n", ret); + + spi->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(spi->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(spi->clk), + "could not get clk: %d\n", ret); + + ret = clk_prepare_enable(spi->clk); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "failed to enable clock\n"); + + mchp_corespi_init(master, spi); + + ret = devm_spi_register_master(&pdev->dev, master); + if (ret) { + mchp_corespi_disable(spi); + clk_disable_unprepare(spi->clk); + return dev_err_probe(&pdev->dev, ret, + "unable to register master for SPI controller\n"); + } + + dev_info(&pdev->dev, "Registered SPI controller %d\n", master->bus_num); + + return 0; +} + +static int mchp_corespi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct mchp_corespi *spi = spi_master_get_devdata(master); + + mchp_corespi_disable_ints(spi); + clk_disable_unprepare(spi->clk); + mchp_corespi_disable(spi); + + return 0; +} + +#define MICROCHIP_SPI_PM_OPS (NULL) + +/* + * Platform driver data structure + */ + +#if defined(CONFIG_OF) +static const struct of_device_id mchp_corespi_dt_ids[] = { + { .compatible = "microchip,mpfs-spi" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mchp_corespi_dt_ids); +#endif + +static struct platform_driver mchp_corespi_driver = { + .probe = mchp_corespi_probe, + .driver = { + .name = "microchip-corespi", + .pm = MICROCHIP_SPI_PM_OPS, + .of_match_table = of_match_ptr(mchp_corespi_dt_ids), + }, + .remove = mchp_corespi_remove, +}; +module_platform_driver(mchp_corespi_driver); +MODULE_DESCRIPTION("Microchip coreSPI SPI controller driver"); +MODULE_AUTHOR("Daire McNamara "); +MODULE_AUTHOR("Conor Dooley "); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c new file mode 100644 index 0000000000..d66bf97625 --- /dev/null +++ b/drivers/spi/spi-mtk-snfi.c @@ -0,0 +1,1472 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Driver for the SPI-NAND mode of Mediatek NAND Flash Interface +// +// Copyright (c) 2022 Chuanhong Guo +// +// This driver is based on the SPI-NAND mtd driver from Mediatek SDK: +// +// Copyright (C) 2020 MediaTek Inc. +// Author: Weijie Gao +// +// This controller organize the page data as several interleaved sectors +// like the following: (sizeof(FDM + ECC) = snf->nfi_cfg.spare_size) +// +---------+------+------+---------+------+------+-----+ +// | Sector1 | FDM1 | ECC1 | Sector2 | FDM2 | ECC2 | ... | +// +---------+------+------+---------+------+------+-----+ +// With auto-format turned on, DMA only returns this part: +// +---------+---------+-----+ +// | Sector1 | Sector2 | ... | +// +---------+---------+-----+ +// The FDM data will be filled to the registers, and ECC parity data isn't +// accessible. +// With auto-format off, all ((Sector+FDM+ECC)*nsectors) will be read over DMA +// in it's original order shown in the first table. ECC can't be turned on when +// auto-format is off. +// +// However, Linux SPI-NAND driver expects the data returned as: +// +------+-----+ +// | Page | OOB | +// +------+-----+ +// where the page data is continuously stored instead of interleaved. +// So we assume all instructions matching the page_op template between ECC +// prepare_io_req and finish_io_req are for page cache r/w. +// Here's how this spi-mem driver operates when reading: +// 1. Always set snf->autofmt = true in prepare_io_req (even when ECC is off). +// 2. Perform page ops and let the controller fill the DMA bounce buffer with +// de-interleaved sector data and set FDM registers. +// 3. Return the data as: +// +---------+---------+-----+------+------+-----+ +// | Sector1 | Sector2 | ... | FDM1 | FDM2 | ... | +// +---------+---------+-----+------+------+-----+ +// 4. For other matching spi_mem ops outside a prepare/finish_io_req pair, +// read the data with auto-format off into the bounce buffer and copy +// needed data to the buffer specified in the request. +// +// Write requests operates in a similar manner. +// As a limitation of this strategy, we won't be able to access any ECC parity +// data at all in Linux. +// +// Here's the bad block mark situation on MTK chips: +// In older chips like mt7622, MTK uses the first FDM byte in the first sector +// as the bad block mark. After de-interleaving, this byte appears at [pagesize] +// in the returned data, which is the BBM position expected by kernel. However, +// the conventional bad block mark is the first byte of the OOB, which is part +// of the last sector data in the interleaved layout. Instead of fixing their +// hardware, MTK decided to address this inconsistency in software. On these +// later chips, the BootROM expects the following: +// 1. The [pagesize] byte on a nand page is used as BBM, which will appear at +// (page_size - (nsectors - 1) * spare_size) in the DMA buffer. +// 2. The original byte stored at that position in the DMA buffer will be stored +// as the first byte of the FDM section in the last sector. +// We can't disagree with the BootROM, so after de-interleaving, we need to +// perform the following swaps in read: +// 1. Store the BBM at [page_size - (nsectors - 1) * spare_size] to [page_size], +// which is the expected BBM position by kernel. +// 2. Store the page data byte at [pagesize + (nsectors-1) * fdm] back to +// [page_size - (nsectors - 1) * spare_size] +// Similarly, when writing, we need to perform swaps in the other direction. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// NFI registers +#define NFI_CNFG 0x000 +#define CNFG_OP_MODE_S 12 +#define CNFG_OP_MODE_CUST 6 +#define CNFG_OP_MODE_PROGRAM 3 +#define CNFG_AUTO_FMT_EN BIT(9) +#define CNFG_HW_ECC_EN BIT(8) +#define CNFG_DMA_BURST_EN BIT(2) +#define CNFG_READ_MODE BIT(1) +#define CNFG_DMA_MODE BIT(0) + +#define NFI_PAGEFMT 0x0004 +#define NFI_SPARE_SIZE_LS_S 16 +#define NFI_FDM_ECC_NUM_S 12 +#define NFI_FDM_NUM_S 8 +#define NFI_SPARE_SIZE_S 4 +#define NFI_SEC_SEL_512 BIT(2) +#define NFI_PAGE_SIZE_S 0 +#define NFI_PAGE_SIZE_512_2K 0 +#define NFI_PAGE_SIZE_2K_4K 1 +#define NFI_PAGE_SIZE_4K_8K 2 +#define NFI_PAGE_SIZE_8K_16K 3 + +#define NFI_CON 0x008 +#define CON_SEC_NUM_S 12 +#define CON_BWR BIT(9) +#define CON_BRD BIT(8) +#define CON_NFI_RST BIT(1) +#define CON_FIFO_FLUSH BIT(0) + +#define NFI_INTR_EN 0x010 +#define NFI_INTR_STA 0x014 +#define NFI_IRQ_INTR_EN BIT(31) +#define NFI_IRQ_CUS_READ BIT(8) +#define NFI_IRQ_CUS_PG BIT(7) + +#define NFI_CMD 0x020 +#define NFI_CMD_DUMMY_READ 0x00 +#define NFI_CMD_DUMMY_WRITE 0x80 + +#define NFI_STRDATA 0x040 +#define STR_DATA BIT(0) + +#define NFI_STA 0x060 +#define NFI_NAND_FSM GENMASK(28, 24) +#define NFI_FSM GENMASK(19, 16) +#define READ_EMPTY BIT(12) + +#define NFI_FIFOSTA 0x064 +#define FIFO_WR_REMAIN_S 8 +#define FIFO_RD_REMAIN_S 0 + +#define NFI_ADDRCNTR 0x070 +#define SEC_CNTR GENMASK(16, 12) +#define SEC_CNTR_S 12 +#define NFI_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S) + +#define NFI_STRADDR 0x080 + +#define NFI_BYTELEN 0x084 +#define BUS_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S) + +#define NFI_FDM0L 0x0a0 +#define NFI_FDM0M 0x0a4 +#define NFI_FDML(n) (NFI_FDM0L + (n)*8) +#define NFI_FDMM(n) (NFI_FDM0M + (n)*8) + +#define NFI_DEBUG_CON1 0x220 +#define WBUF_EN BIT(2) + +#define NFI_MASTERSTA 0x224 +#define MAS_ADDR GENMASK(11, 9) +#define MAS_RD GENMASK(8, 6) +#define MAS_WR GENMASK(5, 3) +#define MAS_RDDLY GENMASK(2, 0) +#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY) + +// SNFI registers +#define SNF_MAC_CTL 0x500 +#define MAC_XIO_SEL BIT(4) +#define SF_MAC_EN BIT(3) +#define SF_TRIG BIT(2) +#define WIP_READY BIT(1) +#define WIP BIT(0) + +#define SNF_MAC_OUTL 0x504 +#define SNF_MAC_INL 0x508 + +#define SNF_RD_CTL2 0x510 +#define DATA_READ_DUMMY_S 8 +#define DATA_READ_MAX_DUMMY 0xf +#define DATA_READ_CMD_S 0 + +#define SNF_RD_CTL3 0x514 + +#define SNF_PG_CTL1 0x524 +#define PG_LOAD_CMD_S 8 + +#define SNF_PG_CTL2 0x528 + +#define SNF_MISC_CTL 0x538 +#define SW_RST BIT(28) +#define FIFO_RD_LTC_S 25 +#define PG_LOAD_X4_EN BIT(20) +#define DATA_READ_MODE_S 16 +#define DATA_READ_MODE GENMASK(18, 16) +#define DATA_READ_MODE_X1 0 +#define DATA_READ_MODE_X2 1 +#define DATA_READ_MODE_X4 2 +#define DATA_READ_MODE_DUAL 5 +#define DATA_READ_MODE_QUAD 6 +#define PG_LOAD_CUSTOM_EN BIT(7) +#define DATARD_CUSTOM_EN BIT(6) +#define CS_DESELECT_CYC_S 0 + +#define SNF_MISC_CTL2 0x53c +#define PROGRAM_LOAD_BYTE_NUM_S 16 +#define READ_DATA_BYTE_NUM_S 11 + +#define SNF_DLY_CTL3 0x548 +#define SFCK_SAM_DLY_S 0 + +#define SNF_STA_CTL1 0x550 +#define CUS_PG_DONE BIT(28) +#define CUS_READ_DONE BIT(27) +#define SPI_STATE_S 0 +#define SPI_STATE GENMASK(3, 0) + +#define SNF_CFG 0x55c +#define SPI_MODE BIT(0) + +#define SNF_GPRAM 0x800 +#define SNF_GPRAM_SIZE 0xa0 + +#define SNFI_POLL_INTERVAL 1000000 + +static const u8 mt7622_spare_sizes[] = { 16, 26, 27, 28 }; + +struct mtk_snand_caps { + u16 sector_size; + u16 max_sectors; + u16 fdm_size; + u16 fdm_ecc_size; + u16 fifo_size; + + bool bbm_swap; + bool empty_page_check; + u32 mastersta_mask; + + const u8 *spare_sizes; + u32 num_spare_size; +}; + +static const struct mtk_snand_caps mt7622_snand_caps = { + .sector_size = 512, + .max_sectors = 8, + .fdm_size = 8, + .fdm_ecc_size = 1, + .fifo_size = 32, + .bbm_swap = false, + .empty_page_check = false, + .mastersta_mask = NFI_MASTERSTA_MASK_7622, + .spare_sizes = mt7622_spare_sizes, + .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes) +}; + +static const struct mtk_snand_caps mt7629_snand_caps = { + .sector_size = 512, + .max_sectors = 8, + .fdm_size = 8, + .fdm_ecc_size = 1, + .fifo_size = 32, + .bbm_swap = true, + .empty_page_check = false, + .mastersta_mask = NFI_MASTERSTA_MASK_7622, + .spare_sizes = mt7622_spare_sizes, + .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes) +}; + +struct mtk_snand_conf { + size_t page_size; + size_t oob_size; + u8 nsectors; + u8 spare_size; +}; + +struct mtk_snand { + struct spi_controller *ctlr; + struct device *dev; + struct clk *nfi_clk; + struct clk *pad_clk; + void __iomem *nfi_base; + int irq; + struct completion op_done; + const struct mtk_snand_caps *caps; + struct mtk_ecc_config *ecc_cfg; + struct mtk_ecc *ecc; + struct mtk_snand_conf nfi_cfg; + struct mtk_ecc_stats ecc_stats; + struct nand_ecc_engine ecc_eng; + bool autofmt; + u8 *buf; + size_t buf_len; +}; + +static struct mtk_snand *nand_to_mtk_snand(struct nand_device *nand) +{ + struct nand_ecc_engine *eng = nand->ecc.engine; + + return container_of(eng, struct mtk_snand, ecc_eng); +} + +static inline int snand_prepare_bouncebuf(struct mtk_snand *snf, size_t size) +{ + if (snf->buf_len >= size) + return 0; + kfree(snf->buf); + snf->buf = kmalloc(size, GFP_KERNEL); + if (!snf->buf) + return -ENOMEM; + snf->buf_len = size; + memset(snf->buf, 0xff, snf->buf_len); + return 0; +} + +static inline u32 nfi_read32(struct mtk_snand *snf, u32 reg) +{ + return readl(snf->nfi_base + reg); +} + +static inline void nfi_write32(struct mtk_snand *snf, u32 reg, u32 val) +{ + writel(val, snf->nfi_base + reg); +} + +static inline void nfi_write16(struct mtk_snand *snf, u32 reg, u16 val) +{ + writew(val, snf->nfi_base + reg); +} + +static inline void nfi_rmw32(struct mtk_snand *snf, u32 reg, u32 clr, u32 set) +{ + u32 val; + + val = readl(snf->nfi_base + reg); + val &= ~clr; + val |= set; + writel(val, snf->nfi_base + reg); +} + +static void nfi_read_data(struct mtk_snand *snf, u32 reg, u8 *data, u32 len) +{ + u32 i, val = 0, es = sizeof(u32); + + for (i = reg; i < reg + len; i++) { + if (i == reg || i % es == 0) + val = nfi_read32(snf, i & ~(es - 1)); + + *data++ = (u8)(val >> (8 * (i % es))); + } +} + +static int mtk_nfi_reset(struct mtk_snand *snf) +{ + u32 val, fifo_mask; + int ret; + + nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST); + + ret = readw_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val, + !(val & snf->caps->mastersta_mask), 0, + SNFI_POLL_INTERVAL); + if (ret) { + dev_err(snf->dev, "NFI master is still busy after reset\n"); + return ret; + } + + ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val, + !(val & (NFI_FSM | NFI_NAND_FSM)), 0, + SNFI_POLL_INTERVAL); + if (ret) { + dev_err(snf->dev, "Failed to reset NFI\n"); + return ret; + } + + fifo_mask = ((snf->caps->fifo_size - 1) << FIFO_RD_REMAIN_S) | + ((snf->caps->fifo_size - 1) << FIFO_WR_REMAIN_S); + ret = readw_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val, + !(val & fifo_mask), 0, SNFI_POLL_INTERVAL); + if (ret) { + dev_err(snf->dev, "NFI FIFOs are not empty\n"); + return ret; + } + + return 0; +} + +static int mtk_snand_mac_reset(struct mtk_snand *snf) +{ + int ret; + u32 val; + + nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST); + + ret = readl_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val, + !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL); + if (ret) + dev_err(snf->dev, "Failed to reset SNFI MAC\n"); + + nfi_write32(snf, SNF_MISC_CTL, + (2 << FIFO_RD_LTC_S) | (10 << CS_DESELECT_CYC_S)); + + return ret; +} + +static int mtk_snand_mac_trigger(struct mtk_snand *snf, u32 outlen, u32 inlen) +{ + int ret; + u32 val; + + nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN); + nfi_write32(snf, SNF_MAC_OUTL, outlen); + nfi_write32(snf, SNF_MAC_INL, inlen); + + nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG); + + ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, + val & WIP_READY, 0, SNFI_POLL_INTERVAL); + if (ret) { + dev_err(snf->dev, "Timed out waiting for WIP_READY\n"); + goto cleanup; + } + + ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, !(val & WIP), + 0, SNFI_POLL_INTERVAL); + if (ret) + dev_err(snf->dev, "Timed out waiting for WIP cleared\n"); + +cleanup: + nfi_write32(snf, SNF_MAC_CTL, 0); + + return ret; +} + +static int mtk_snand_mac_io(struct mtk_snand *snf, const struct spi_mem_op *op) +{ + u32 rx_len = 0; + u32 reg_offs = 0; + u32 val = 0; + const u8 *tx_buf = NULL; + u8 *rx_buf = NULL; + int i, ret; + u8 b; + + if (op->data.dir == SPI_MEM_DATA_IN) { + rx_len = op->data.nbytes; + rx_buf = op->data.buf.in; + } else { + tx_buf = op->data.buf.out; + } + + mtk_snand_mac_reset(snf); + + for (i = 0; i < op->cmd.nbytes; i++, reg_offs++) { + b = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff; + val |= b << (8 * (reg_offs % 4)); + if (reg_offs % 4 == 3) { + nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); + val = 0; + } + } + + for (i = 0; i < op->addr.nbytes; i++, reg_offs++) { + b = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff; + val |= b << (8 * (reg_offs % 4)); + if (reg_offs % 4 == 3) { + nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); + val = 0; + } + } + + for (i = 0; i < op->dummy.nbytes; i++, reg_offs++) { + if (reg_offs % 4 == 3) { + nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); + val = 0; + } + } + + if (op->data.dir == SPI_MEM_DATA_OUT) { + for (i = 0; i < op->data.nbytes; i++, reg_offs++) { + val |= tx_buf[i] << (8 * (reg_offs % 4)); + if (reg_offs % 4 == 3) { + nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); + val = 0; + } + } + } + + if (reg_offs % 4) + nfi_write32(snf, SNF_GPRAM + (reg_offs & ~3), val); + + for (i = 0; i < reg_offs; i += 4) + dev_dbg(snf->dev, "%d: %08X", i, + nfi_read32(snf, SNF_GPRAM + i)); + + dev_dbg(snf->dev, "SNF TX: %u RX: %u", reg_offs, rx_len); + + ret = mtk_snand_mac_trigger(snf, reg_offs, rx_len); + if (ret) + return ret; + + if (!rx_len) + return 0; + + nfi_read_data(snf, SNF_GPRAM + reg_offs, rx_buf, rx_len); + return 0; +} + +static int mtk_snand_setup_pagefmt(struct mtk_snand *snf, u32 page_size, + u32 oob_size) +{ + int spare_idx = -1; + u32 spare_size, spare_size_shift, pagesize_idx; + u32 sector_size_512; + u8 nsectors; + int i; + + // skip if it's already configured as required. + if (snf->nfi_cfg.page_size == page_size && + snf->nfi_cfg.oob_size == oob_size) + return 0; + + nsectors = page_size / snf->caps->sector_size; + if (nsectors > snf->caps->max_sectors) { + dev_err(snf->dev, "too many sectors required.\n"); + goto err; + } + + if (snf->caps->sector_size == 512) { + sector_size_512 = NFI_SEC_SEL_512; + spare_size_shift = NFI_SPARE_SIZE_S; + } else { + sector_size_512 = 0; + spare_size_shift = NFI_SPARE_SIZE_LS_S; + } + + switch (page_size) { + case SZ_512: + pagesize_idx = NFI_PAGE_SIZE_512_2K; + break; + case SZ_2K: + if (snf->caps->sector_size == 512) + pagesize_idx = NFI_PAGE_SIZE_2K_4K; + else + pagesize_idx = NFI_PAGE_SIZE_512_2K; + break; + case SZ_4K: + if (snf->caps->sector_size == 512) + pagesize_idx = NFI_PAGE_SIZE_4K_8K; + else + pagesize_idx = NFI_PAGE_SIZE_2K_4K; + break; + case SZ_8K: + if (snf->caps->sector_size == 512) + pagesize_idx = NFI_PAGE_SIZE_8K_16K; + else + pagesize_idx = NFI_PAGE_SIZE_4K_8K; + break; + case SZ_16K: + pagesize_idx = NFI_PAGE_SIZE_8K_16K; + break; + default: + dev_err(snf->dev, "unsupported page size.\n"); + goto err; + } + + spare_size = oob_size / nsectors; + // If we're using the 1KB sector size, HW will automatically double the + // spare size. We should only use half of the value in this case. + if (snf->caps->sector_size == 1024) + spare_size /= 2; + + for (i = snf->caps->num_spare_size - 1; i >= 0; i--) { + if (snf->caps->spare_sizes[i] <= spare_size) { + spare_size = snf->caps->spare_sizes[i]; + if (snf->caps->sector_size == 1024) + spare_size *= 2; + spare_idx = i; + break; + } + } + + if (spare_idx < 0) { + dev_err(snf->dev, "unsupported spare size: %u\n", spare_size); + goto err; + } + + nfi_write32(snf, NFI_PAGEFMT, + (snf->caps->fdm_ecc_size << NFI_FDM_ECC_NUM_S) | + (snf->caps->fdm_size << NFI_FDM_NUM_S) | + (spare_idx << spare_size_shift) | + (pagesize_idx << NFI_PAGE_SIZE_S) | + sector_size_512); + + snf->nfi_cfg.page_size = page_size; + snf->nfi_cfg.oob_size = oob_size; + snf->nfi_cfg.nsectors = nsectors; + snf->nfi_cfg.spare_size = spare_size; + + dev_dbg(snf->dev, "page format: (%u + %u) * %u\n", + snf->caps->sector_size, spare_size, nsectors); + return snand_prepare_bouncebuf(snf, page_size + oob_size); +err: + dev_err(snf->dev, "page size %u + %u is not supported\n", page_size, + oob_size); + return -EOPNOTSUPP; +} + +static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobecc) +{ + // ECC area is not accessible + return -ERANGE; +} + +static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobfree) +{ + struct nand_device *nand = mtd_to_nanddev(mtd); + struct mtk_snand *ms = nand_to_mtk_snand(nand); + + if (section >= ms->nfi_cfg.nsectors) + return -ERANGE; + + oobfree->length = ms->caps->fdm_size - 1; + oobfree->offset = section * ms->caps->fdm_size + 1; + return 0; +} + +static const struct mtd_ooblayout_ops mtk_snand_ooblayout = { + .ecc = mtk_snand_ooblayout_ecc, + .free = mtk_snand_ooblayout_free, +}; + +static int mtk_snand_ecc_init_ctx(struct nand_device *nand) +{ + struct mtk_snand *snf = nand_to_mtk_snand(nand); + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; + struct nand_ecc_props *reqs = &nand->ecc.requirements; + struct nand_ecc_props *user = &nand->ecc.user_conf; + struct mtd_info *mtd = nanddev_to_mtd(nand); + int step_size = 0, strength = 0, desired_correction = 0, steps; + bool ecc_user = false; + int ret; + u32 parity_bits, max_ecc_bytes; + struct mtk_ecc_config *ecc_cfg; + + ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize, + nand->memorg.oobsize); + if (ret) + return ret; + + ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL); + if (!ecc_cfg) + return -ENOMEM; + + nand->ecc.ctx.priv = ecc_cfg; + + if (user->step_size && user->strength) { + step_size = user->step_size; + strength = user->strength; + ecc_user = true; + } else if (reqs->step_size && reqs->strength) { + step_size = reqs->step_size; + strength = reqs->strength; + } + + if (step_size && strength) { + steps = mtd->writesize / step_size; + desired_correction = steps * strength; + strength = desired_correction / snf->nfi_cfg.nsectors; + } + + ecc_cfg->mode = ECC_NFI_MODE; + ecc_cfg->sectors = snf->nfi_cfg.nsectors; + ecc_cfg->len = snf->caps->sector_size + snf->caps->fdm_ecc_size; + + // calculate the max possible strength under current page format + parity_bits = mtk_ecc_get_parity_bits(snf->ecc); + max_ecc_bytes = snf->nfi_cfg.spare_size - snf->caps->fdm_size; + ecc_cfg->strength = max_ecc_bytes * 8 / parity_bits; + mtk_ecc_adjust_strength(snf->ecc, &ecc_cfg->strength); + + // if there's a user requested strength, find the minimum strength that + // meets the requirement. Otherwise use the maximum strength which is + // expected by BootROM. + if (ecc_user && strength) { + u32 s_next = ecc_cfg->strength - 1; + + while (1) { + mtk_ecc_adjust_strength(snf->ecc, &s_next); + if (s_next >= ecc_cfg->strength) + break; + if (s_next < strength) + break; + s_next = ecc_cfg->strength - 1; + } + } + + mtd_set_ooblayout(mtd, &mtk_snand_ooblayout); + + conf->step_size = snf->caps->sector_size; + conf->strength = ecc_cfg->strength; + + if (ecc_cfg->strength < strength) + dev_warn(snf->dev, "unable to fulfill ECC of %u bits.\n", + strength); + dev_info(snf->dev, "ECC strength: %u bits per %u bytes\n", + ecc_cfg->strength, snf->caps->sector_size); + + return 0; +} + +static void mtk_snand_ecc_cleanup_ctx(struct nand_device *nand) +{ + struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand); + + kfree(ecc_cfg); +} + +static int mtk_snand_ecc_prepare_io_req(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct mtk_snand *snf = nand_to_mtk_snand(nand); + struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand); + int ret; + + ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize, + nand->memorg.oobsize); + if (ret) + return ret; + snf->autofmt = true; + snf->ecc_cfg = ecc_cfg; + return 0; +} + +static int mtk_snand_ecc_finish_io_req(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct mtk_snand *snf = nand_to_mtk_snand(nand); + struct mtd_info *mtd = nanddev_to_mtd(nand); + + snf->ecc_cfg = NULL; + snf->autofmt = false; + if ((req->mode == MTD_OPS_RAW) || (req->type != NAND_PAGE_READ)) + return 0; + + if (snf->ecc_stats.failed) + mtd->ecc_stats.failed += snf->ecc_stats.failed; + mtd->ecc_stats.corrected += snf->ecc_stats.corrected; + return snf->ecc_stats.failed ? -EBADMSG : snf->ecc_stats.bitflips; +} + +static struct nand_ecc_engine_ops mtk_snfi_ecc_engine_ops = { + .init_ctx = mtk_snand_ecc_init_ctx, + .cleanup_ctx = mtk_snand_ecc_cleanup_ctx, + .prepare_io_req = mtk_snand_ecc_prepare_io_req, + .finish_io_req = mtk_snand_ecc_finish_io_req, +}; + +static void mtk_snand_read_fdm(struct mtk_snand *snf, u8 *buf) +{ + u32 vall, valm; + u8 *oobptr = buf; + int i, j; + + for (i = 0; i < snf->nfi_cfg.nsectors; i++) { + vall = nfi_read32(snf, NFI_FDML(i)); + valm = nfi_read32(snf, NFI_FDMM(i)); + + for (j = 0; j < snf->caps->fdm_size; j++) + oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8); + + oobptr += snf->caps->fdm_size; + } +} + +static void mtk_snand_write_fdm(struct mtk_snand *snf, const u8 *buf) +{ + u32 fdm_size = snf->caps->fdm_size; + const u8 *oobptr = buf; + u32 vall, valm; + int i, j; + + for (i = 0; i < snf->nfi_cfg.nsectors; i++) { + vall = 0; + valm = 0; + + for (j = 0; j < 8; j++) { + if (j < 4) + vall |= (j < fdm_size ? oobptr[j] : 0xff) + << (j * 8); + else + valm |= (j < fdm_size ? oobptr[j] : 0xff) + << ((j - 4) * 8); + } + + nfi_write32(snf, NFI_FDML(i), vall); + nfi_write32(snf, NFI_FDMM(i), valm); + + oobptr += fdm_size; + } +} + +static void mtk_snand_bm_swap(struct mtk_snand *snf, u8 *buf) +{ + u32 buf_bbm_pos, fdm_bbm_pos; + + if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) + return; + + // swap [pagesize] byte on nand with the first fdm byte + // in the last sector. + buf_bbm_pos = snf->nfi_cfg.page_size - + (snf->nfi_cfg.nsectors - 1) * snf->nfi_cfg.spare_size; + fdm_bbm_pos = snf->nfi_cfg.page_size + + (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size; + + swap(snf->buf[fdm_bbm_pos], buf[buf_bbm_pos]); +} + +static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf) +{ + u32 fdm_bbm_pos1, fdm_bbm_pos2; + + if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) + return; + + // swap the first fdm byte in the first and the last sector. + fdm_bbm_pos1 = snf->nfi_cfg.page_size; + fdm_bbm_pos2 = snf->nfi_cfg.page_size + + (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size; + swap(snf->buf[fdm_bbm_pos1], snf->buf[fdm_bbm_pos2]); +} + +static int mtk_snand_read_page_cache(struct mtk_snand *snf, + const struct spi_mem_op *op) +{ + u8 *buf = snf->buf; + u8 *buf_fdm = buf + snf->nfi_cfg.page_size; + // the address part to be sent by the controller + u32 op_addr = op->addr.val; + // where to start copying data from bounce buffer + u32 rd_offset = 0; + u32 dummy_clk = (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth); + u32 op_mode = 0; + u32 dma_len = snf->buf_len; + int ret = 0; + u32 rd_mode, rd_bytes, val; + dma_addr_t buf_dma; + + if (snf->autofmt) { + u32 last_bit; + u32 mask; + + dma_len = snf->nfi_cfg.page_size; + op_mode = CNFG_AUTO_FMT_EN; + if (op->data.ecc) + op_mode |= CNFG_HW_ECC_EN; + // extract the plane bit: + // Find the highest bit set in (pagesize+oobsize). + // Bits higher than that in op->addr are kept and sent over SPI + // Lower bits are used as an offset for copying data from DMA + // bounce buffer. + last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size); + mask = (1 << last_bit) - 1; + rd_offset = op_addr & mask; + op_addr &= ~mask; + + // check if we can dma to the caller memory + if (rd_offset == 0 && op->data.nbytes >= snf->nfi_cfg.page_size) + buf = op->data.buf.in; + } + mtk_snand_mac_reset(snf); + mtk_nfi_reset(snf); + + // command and dummy cycles + nfi_write32(snf, SNF_RD_CTL2, + (dummy_clk << DATA_READ_DUMMY_S) | + (op->cmd.opcode << DATA_READ_CMD_S)); + + // read address + nfi_write32(snf, SNF_RD_CTL3, op_addr); + + // Set read op_mode + if (op->data.buswidth == 4) + rd_mode = op->addr.buswidth == 4 ? DATA_READ_MODE_QUAD : + DATA_READ_MODE_X4; + else if (op->data.buswidth == 2) + rd_mode = op->addr.buswidth == 2 ? DATA_READ_MODE_DUAL : + DATA_READ_MODE_X2; + else + rd_mode = DATA_READ_MODE_X1; + rd_mode <<= DATA_READ_MODE_S; + nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE, + rd_mode | DATARD_CUSTOM_EN); + + // Set bytes to read + rd_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) * + snf->nfi_cfg.nsectors; + nfi_write32(snf, SNF_MISC_CTL2, + (rd_bytes << PROGRAM_LOAD_BYTE_NUM_S) | rd_bytes); + + // NFI read prepare + nfi_write16(snf, NFI_CNFG, + (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) | CNFG_DMA_BURST_EN | + CNFG_READ_MODE | CNFG_DMA_MODE | op_mode); + + nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S)); + + buf_dma = dma_map_single(snf->dev, buf, dma_len, DMA_FROM_DEVICE); + ret = dma_mapping_error(snf->dev, buf_dma); + if (ret) { + dev_err(snf->dev, "DMA mapping failed.\n"); + goto cleanup; + } + nfi_write32(snf, NFI_STRADDR, buf_dma); + if (op->data.ecc) { + snf->ecc_cfg->op = ECC_DECODE; + ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg); + if (ret) + goto cleanup_dma; + } + // Prepare for custom read interrupt + nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ); + reinit_completion(&snf->op_done); + + // Trigger NFI into custom mode + nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ); + + // Start DMA read + nfi_rmw32(snf, NFI_CON, 0, CON_BRD); + nfi_write16(snf, NFI_STRDATA, STR_DATA); + + if (!wait_for_completion_timeout( + &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) { + dev_err(snf->dev, "DMA timed out for reading from cache.\n"); + ret = -ETIMEDOUT; + goto cleanup; + } + + // Wait for BUS_SEC_CNTR returning expected value + ret = readl_poll_timeout(snf->nfi_base + NFI_BYTELEN, val, + BUS_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0, + SNFI_POLL_INTERVAL); + if (ret) { + dev_err(snf->dev, "Timed out waiting for BUS_SEC_CNTR\n"); + goto cleanup2; + } + + // Wait for bus becoming idle + ret = readl_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val, + !(val & snf->caps->mastersta_mask), 0, + SNFI_POLL_INTERVAL); + if (ret) { + dev_err(snf->dev, "Timed out waiting for bus becoming idle\n"); + goto cleanup2; + } + + if (op->data.ecc) { + ret = mtk_ecc_wait_done(snf->ecc, ECC_DECODE); + if (ret) { + dev_err(snf->dev, "wait ecc done timeout\n"); + goto cleanup2; + } + // save status before disabling ecc + mtk_ecc_get_stats(snf->ecc, &snf->ecc_stats, + snf->nfi_cfg.nsectors); + } + + dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE); + + if (snf->autofmt) { + mtk_snand_read_fdm(snf, buf_fdm); + if (snf->caps->bbm_swap) { + mtk_snand_bm_swap(snf, buf); + mtk_snand_fdm_bm_swap(snf); + } + } + + // copy data back + if (nfi_read32(snf, NFI_STA) & READ_EMPTY) { + memset(op->data.buf.in, 0xff, op->data.nbytes); + snf->ecc_stats.bitflips = 0; + snf->ecc_stats.failed = 0; + snf->ecc_stats.corrected = 0; + } else { + if (buf == op->data.buf.in) { + u32 cap_len = snf->buf_len - snf->nfi_cfg.page_size; + u32 req_left = op->data.nbytes - snf->nfi_cfg.page_size; + + if (req_left) + memcpy(op->data.buf.in + snf->nfi_cfg.page_size, + buf_fdm, + cap_len < req_left ? cap_len : req_left); + } else if (rd_offset < snf->buf_len) { + u32 cap_len = snf->buf_len - rd_offset; + + if (op->data.nbytes < cap_len) + cap_len = op->data.nbytes; + memcpy(op->data.buf.in, snf->buf + rd_offset, cap_len); + } + } +cleanup2: + if (op->data.ecc) + mtk_ecc_disable(snf->ecc); +cleanup_dma: + // unmap dma only if any error happens. (otherwise it's done before + // data copying) + if (ret) + dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE); +cleanup: + // Stop read + nfi_write32(snf, NFI_CON, 0); + nfi_write16(snf, NFI_CNFG, 0); + + // Clear SNF done flag + nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE); + nfi_write32(snf, SNF_STA_CTL1, 0); + + // Disable interrupt + nfi_read32(snf, NFI_INTR_STA); + nfi_write32(snf, NFI_INTR_EN, 0); + + nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0); + return ret; +} + +static int mtk_snand_write_page_cache(struct mtk_snand *snf, + const struct spi_mem_op *op) +{ + // the address part to be sent by the controller + u32 op_addr = op->addr.val; + // where to start copying data from bounce buffer + u32 wr_offset = 0; + u32 op_mode = 0; + int ret = 0; + u32 wr_mode = 0; + u32 dma_len = snf->buf_len; + u32 wr_bytes, val; + size_t cap_len; + dma_addr_t buf_dma; + + if (snf->autofmt) { + u32 last_bit; + u32 mask; + + dma_len = snf->nfi_cfg.page_size; + op_mode = CNFG_AUTO_FMT_EN; + if (op->data.ecc) + op_mode |= CNFG_HW_ECC_EN; + + last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size); + mask = (1 << last_bit) - 1; + wr_offset = op_addr & mask; + op_addr &= ~mask; + } + mtk_snand_mac_reset(snf); + mtk_nfi_reset(snf); + + if (wr_offset) + memset(snf->buf, 0xff, wr_offset); + + cap_len = snf->buf_len - wr_offset; + if (op->data.nbytes < cap_len) + cap_len = op->data.nbytes; + memcpy(snf->buf + wr_offset, op->data.buf.out, cap_len); + if (snf->autofmt) { + if (snf->caps->bbm_swap) { + mtk_snand_fdm_bm_swap(snf); + mtk_snand_bm_swap(snf, snf->buf); + } + mtk_snand_write_fdm(snf, snf->buf + snf->nfi_cfg.page_size); + } + + // Command + nfi_write32(snf, SNF_PG_CTL1, (op->cmd.opcode << PG_LOAD_CMD_S)); + + // write address + nfi_write32(snf, SNF_PG_CTL2, op_addr); + + // Set read op_mode + if (op->data.buswidth == 4) + wr_mode = PG_LOAD_X4_EN; + + nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, + wr_mode | PG_LOAD_CUSTOM_EN); + + // Set bytes to write + wr_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) * + snf->nfi_cfg.nsectors; + nfi_write32(snf, SNF_MISC_CTL2, + (wr_bytes << PROGRAM_LOAD_BYTE_NUM_S) | wr_bytes); + + // NFI write prepare + nfi_write16(snf, NFI_CNFG, + (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) | + CNFG_DMA_BURST_EN | CNFG_DMA_MODE | op_mode); + + nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S)); + buf_dma = dma_map_single(snf->dev, snf->buf, dma_len, DMA_TO_DEVICE); + ret = dma_mapping_error(snf->dev, buf_dma); + if (ret) { + dev_err(snf->dev, "DMA mapping failed.\n"); + goto cleanup; + } + nfi_write32(snf, NFI_STRADDR, buf_dma); + if (op->data.ecc) { + snf->ecc_cfg->op = ECC_ENCODE; + ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg); + if (ret) + goto cleanup_dma; + } + // Prepare for custom write interrupt + nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG); + reinit_completion(&snf->op_done); + ; + + // Trigger NFI into custom mode + nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE); + + // Start DMA write + nfi_rmw32(snf, NFI_CON, 0, CON_BWR); + nfi_write16(snf, NFI_STRDATA, STR_DATA); + + if (!wait_for_completion_timeout( + &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) { + dev_err(snf->dev, "DMA timed out for program load.\n"); + ret = -ETIMEDOUT; + goto cleanup_ecc; + } + + // Wait for NFI_SEC_CNTR returning expected value + ret = readl_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val, + NFI_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0, + SNFI_POLL_INTERVAL); + if (ret) + dev_err(snf->dev, "Timed out waiting for NFI_SEC_CNTR\n"); + +cleanup_ecc: + if (op->data.ecc) + mtk_ecc_disable(snf->ecc); +cleanup_dma: + dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_TO_DEVICE); +cleanup: + // Stop write + nfi_write32(snf, NFI_CON, 0); + nfi_write16(snf, NFI_CNFG, 0); + + // Clear SNF done flag + nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE); + nfi_write32(snf, SNF_STA_CTL1, 0); + + // Disable interrupt + nfi_read32(snf, NFI_INTR_STA); + nfi_write32(snf, NFI_INTR_EN, 0); + + nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0); + + return ret; +} + +/** + * mtk_snand_is_page_ops() - check if the op is a controller supported page op. + * @op spi-mem op to check + * + * Check whether op can be executed with read_from_cache or program_load + * mode in the controller. + * This controller can execute typical Read From Cache and Program Load + * instructions found on SPI-NAND with 2-byte address. + * DTR and cmd buswidth & nbytes should be checked before calling this. + * + * Return: true if the op matches the instruction template + */ +static bool mtk_snand_is_page_ops(const struct spi_mem_op *op) +{ + if (op->addr.nbytes != 2) + return false; + + if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && + op->addr.buswidth != 4) + return false; + + // match read from page instructions + if (op->data.dir == SPI_MEM_DATA_IN) { + // check dummy cycle first + if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth > + DATA_READ_MAX_DUMMY) + return false; + // quad io / quad out + if ((op->addr.buswidth == 4 || op->addr.buswidth == 1) && + op->data.buswidth == 4) + return true; + + // dual io / dual out + if ((op->addr.buswidth == 2 || op->addr.buswidth == 1) && + op->data.buswidth == 2) + return true; + + // standard spi + if (op->addr.buswidth == 1 && op->data.buswidth == 1) + return true; + } else if (op->data.dir == SPI_MEM_DATA_OUT) { + // check dummy cycle first + if (op->dummy.nbytes) + return false; + // program load quad out + if (op->addr.buswidth == 1 && op->data.buswidth == 4) + return true; + // standard spi + if (op->addr.buswidth == 1 && op->data.buswidth == 1) + return true; + } + return false; +} + +static bool mtk_snand_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + if (!spi_mem_default_supports_op(mem, op)) + return false; + if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1) + return false; + if (mtk_snand_is_page_ops(op)) + return true; + return ((op->addr.nbytes == 0 || op->addr.buswidth == 1) && + (op->dummy.nbytes == 0 || op->dummy.buswidth == 1) && + (op->data.nbytes == 0 || op->data.buswidth == 1)); +} + +static int mtk_snand_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master); + // page ops transfer size must be exactly ((sector_size + spare_size) * + // nsectors). Limit the op size if the caller requests more than that. + // exec_op will read more than needed and discard the leftover if the + // caller requests less data. + if (mtk_snand_is_page_ops(op)) { + size_t l; + // skip adjust_op_size for page ops + if (ms->autofmt) + return 0; + l = ms->caps->sector_size + ms->nfi_cfg.spare_size; + l *= ms->nfi_cfg.nsectors; + if (op->data.nbytes > l) + op->data.nbytes = l; + } else { + size_t hl = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; + + if (hl >= SNF_GPRAM_SIZE) + return -EOPNOTSUPP; + if (op->data.nbytes > SNF_GPRAM_SIZE - hl) + op->data.nbytes = SNF_GPRAM_SIZE - hl; + } + return 0; +} + +static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master); + + dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode, + op->addr.val, op->addr.buswidth, op->addr.nbytes, + op->data.buswidth, op->data.nbytes); + if (mtk_snand_is_page_ops(op)) { + if (op->data.dir == SPI_MEM_DATA_IN) + return mtk_snand_read_page_cache(ms, op); + else + return mtk_snand_write_page_cache(ms, op); + } else { + return mtk_snand_mac_io(ms, op); + } +} + +static const struct spi_controller_mem_ops mtk_snand_mem_ops = { + .adjust_op_size = mtk_snand_adjust_op_size, + .supports_op = mtk_snand_supports_op, + .exec_op = mtk_snand_exec_op, +}; + +static const struct spi_controller_mem_caps mtk_snand_mem_caps = { + .ecc = true, +}; + +static irqreturn_t mtk_snand_irq(int irq, void *id) +{ + struct mtk_snand *snf = id; + u32 sta, ien; + + sta = nfi_read32(snf, NFI_INTR_STA); + ien = nfi_read32(snf, NFI_INTR_EN); + + if (!(sta & ien)) + return IRQ_NONE; + + nfi_write32(snf, NFI_INTR_EN, 0); + complete(&snf->op_done); + return IRQ_HANDLED; +} + +static const struct of_device_id mtk_snand_ids[] = { + { .compatible = "mediatek,mt7622-snand", .data = &mt7622_snand_caps }, + { .compatible = "mediatek,mt7629-snand", .data = &mt7629_snand_caps }, + {}, +}; + +MODULE_DEVICE_TABLE(of, mtk_snand_ids); + +static int mtk_snand_enable_clk(struct mtk_snand *ms) +{ + int ret; + + ret = clk_prepare_enable(ms->nfi_clk); + if (ret) { + dev_err(ms->dev, "unable to enable nfi clk\n"); + return ret; + } + ret = clk_prepare_enable(ms->pad_clk); + if (ret) { + dev_err(ms->dev, "unable to enable pad clk\n"); + goto err1; + } + return 0; +err1: + clk_disable_unprepare(ms->nfi_clk); + return ret; +} + +static void mtk_snand_disable_clk(struct mtk_snand *ms) +{ + clk_disable_unprepare(ms->pad_clk); + clk_disable_unprepare(ms->nfi_clk); +} + +static int mtk_snand_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + const struct of_device_id *dev_id; + struct spi_controller *ctlr; + struct mtk_snand *ms; + int ret; + + dev_id = of_match_node(mtk_snand_ids, np); + if (!dev_id) + return -EINVAL; + + ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*ms)); + if (!ctlr) + return -ENOMEM; + platform_set_drvdata(pdev, ctlr); + + ms = spi_controller_get_devdata(ctlr); + + ms->ctlr = ctlr; + ms->caps = dev_id->data; + + ms->ecc = of_mtk_ecc_get(np); + if (IS_ERR(ms->ecc)) + return PTR_ERR(ms->ecc); + else if (!ms->ecc) + return -ENODEV; + + ms->nfi_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ms->nfi_base)) { + ret = PTR_ERR(ms->nfi_base); + goto release_ecc; + } + + ms->dev = &pdev->dev; + + ms->nfi_clk = devm_clk_get(&pdev->dev, "nfi_clk"); + if (IS_ERR(ms->nfi_clk)) { + ret = PTR_ERR(ms->nfi_clk); + dev_err(&pdev->dev, "unable to get nfi_clk, err = %d\n", ret); + goto release_ecc; + } + + ms->pad_clk = devm_clk_get(&pdev->dev, "pad_clk"); + if (IS_ERR(ms->pad_clk)) { + ret = PTR_ERR(ms->pad_clk); + dev_err(&pdev->dev, "unable to get pad_clk, err = %d\n", ret); + goto release_ecc; + } + + ret = mtk_snand_enable_clk(ms); + if (ret) + goto release_ecc; + + init_completion(&ms->op_done); + + ms->irq = platform_get_irq(pdev, 0); + if (ms->irq < 0) { + ret = ms->irq; + goto disable_clk; + } + ret = devm_request_irq(ms->dev, ms->irq, mtk_snand_irq, 0x0, + "mtk-snand", ms); + if (ret) { + dev_err(ms->dev, "failed to request snfi irq\n"); + goto disable_clk; + } + + ret = dma_set_mask(ms->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(ms->dev, "failed to set dma mask\n"); + goto disable_clk; + } + + // switch to SNFI mode + nfi_write32(ms, SNF_CFG, SPI_MODE); + + // setup an initial page format for ops matching page_cache_op template + // before ECC is called. + ret = mtk_snand_setup_pagefmt(ms, ms->caps->sector_size, + ms->caps->spare_sizes[0]); + if (ret) { + dev_err(ms->dev, "failed to set initial page format\n"); + goto disable_clk; + } + + // setup ECC engine + ms->ecc_eng.dev = &pdev->dev; + ms->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED; + ms->ecc_eng.ops = &mtk_snfi_ecc_engine_ops; + ms->ecc_eng.priv = ms; + + ret = nand_ecc_register_on_host_hw_engine(&ms->ecc_eng); + if (ret) { + dev_err(&pdev->dev, "failed to register ecc engine.\n"); + goto disable_clk; + } + + ctlr->num_chipselect = 1; + ctlr->mem_ops = &mtk_snand_mem_ops; + ctlr->mem_caps = &mtk_snand_mem_caps; + ctlr->bits_per_word_mask = SPI_BPW_MASK(8); + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; + ctlr->dev.of_node = pdev->dev.of_node; + ret = spi_register_controller(ctlr); + if (ret) { + dev_err(&pdev->dev, "spi_register_controller failed.\n"); + goto disable_clk; + } + + return 0; +disable_clk: + mtk_snand_disable_clk(ms); +release_ecc: + mtk_ecc_release(ms->ecc); + return ret; +} + +static int mtk_snand_remove(struct platform_device *pdev) +{ + struct spi_controller *ctlr = platform_get_drvdata(pdev); + struct mtk_snand *ms = spi_controller_get_devdata(ctlr); + + spi_unregister_controller(ctlr); + mtk_snand_disable_clk(ms); + mtk_ecc_release(ms->ecc); + kfree(ms->buf); + return 0; +} + +static struct platform_driver mtk_snand_driver = { + .probe = mtk_snand_probe, + .remove = mtk_snand_remove, + .driver = { + .name = "mtk-snand", + .of_match_table = mtk_snand_ids, + }, +}; + +module_platform_driver(mtk_snand_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Chuanhong Guo "); +MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver"); diff --git a/drivers/staging/media/atomisp/notes.txt b/drivers/staging/media/atomisp/notes.txt new file mode 100644 index 0000000000..d128b792e0 --- /dev/null +++ b/drivers/staging/media/atomisp/notes.txt @@ -0,0 +1,30 @@ +Some notes about the working of the atomisp drivers (learned while working +on cleaning it up). + +The atomisp seems to be a generic DSP(ISP) like processor without a fixed +pipeline. It does not have its own memory, but instead uses main memory. +The ISP has its own address-space and main memory needs to be mapped into +its address space through the ISP's MMU. + +Memory is allocated by the hmm code. hmm_alloc() returns an ISP virtual +address. The hmm code keeps a list of all allocations and when necessary +the hmm code finds the backing hmm-buffer-object (hmm_bo) by looking +up the hmm_bo based on the ISP virtual address. + +The actual processing pipeline is made by loading one or more programs, +called binaries. The shisp_240??0_v21.bin firmware file contains many +different binaries. Binaries are picked by filling a ia_css_binary_descr +struct with various input and output parameters and then calling +ia_css_binary_find(). Some binaries support creating multiple outputs +(preview + video frame?) at the same time. + +For example for the /dev/video0 preview node load_preview_binaries() +from atomisp/pci/sh_css.c is called and then loads a preview and +optionally a scalar binary. Note when digital zoom is disabled +(it is enabled by default) only the preview binary is loaded. +So in this case a single binary handles the entire pipeline. + +Since getting a picture requires multiple processing steps, +this means that unlike in fixed pipelines the soft pipelines +on the ISP can do multiple processing steps in a single pipeline +element (in a single binary). diff --git a/drivers/staging/media/stkwebcam/Kconfig b/drivers/staging/media/stkwebcam/Kconfig new file mode 100644 index 0000000000..4450403dff --- /dev/null +++ b/drivers/staging/media/stkwebcam/Kconfig @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only +config VIDEO_STKWEBCAM + tristate "USB Syntek DC1125 Camera support (DEPRECATED)" + depends on VIDEO_DEV + depends on USB + help + Say Y here if you want to use this type of camera. + Supported devices are typically found in some Asus laptops, + with USB id 174f:a311 and 05e1:0501. Other Syntek cameras + may be supported by the stk11xx driver, from which this is + derived, see + + This driver is deprecated and is scheduled for removal by + the end of 2022. See the TODO file for more information. + + To compile this driver as a module, choose M here: the + module will be called stkwebcam. + diff --git a/drivers/staging/media/stkwebcam/Makefile b/drivers/staging/media/stkwebcam/Makefile new file mode 100644 index 0000000000..17ad7b6f43 --- /dev/null +++ b/drivers/staging/media/stkwebcam/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +stkwebcam-objs := stk-webcam.o stk-sensor.o + +obj-$(CONFIG_VIDEO_STKWEBCAM) += stkwebcam.o + diff --git a/drivers/staging/media/stkwebcam/TODO b/drivers/staging/media/stkwebcam/TODO new file mode 100644 index 0000000000..735304a727 --- /dev/null +++ b/drivers/staging/media/stkwebcam/TODO @@ -0,0 +1,12 @@ +This is a very old driver for very old hardware (specifically +laptops that use this sensor). In addition according to reports +the picture quality is quite bad. + +This is also one of the few drivers still not using the vb2 +framework (or even the old videobuf framework!), so this driver +is now deprecated with the intent of removing it altogether by +the end of 2022. + +In order to keep this driver it has to be converted to vb2. +If someone is interested in doing this work, then contact the +linux-media mailinglist (https://linuxtv.org/lists.php). diff --git a/drivers/staging/media/stkwebcam/stk-sensor.c b/drivers/staging/media/stkwebcam/stk-sensor.c new file mode 100644 index 0000000000..94aa6a27f9 --- /dev/null +++ b/drivers/staging/media/stkwebcam/stk-sensor.c @@ -0,0 +1,587 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* stk-sensor.c: Driver for ov96xx sensor (used in some Syntek webcams) + * + * Copyright 2007-2008 Jaime Velasco Juan + * + * Some parts derived from ov7670.c: + * Copyright 2006 One Laptop Per Child Association, Inc. Written + * by Jonathan Corbet with substantial inspiration from Mark + * McClelland's ovcamchip code. + * + * Copyright 2006-7 Jonathan Corbet + * + * This file may be distributed under the terms of the GNU General + */ + +/* Controlling the sensor via the STK1125 vendor specific control interface: + * The camera uses an OmniVision sensor and the stk1125 provides an + * SCCB(i2c)-USB bridge which let us program the sensor. + * In my case the sensor id is 0x9652, it can be read from sensor's register + * 0x0A and 0x0B as follows: + * - read register #R: + * output #R to index 0x0208 + * output 0x0070 to index 0x0200 + * input 1 byte from index 0x0201 (some kind of status register) + * until its value is 0x01 + * input 1 byte from index 0x0209. This is the value of #R + * - write value V to register #R + * output #R to index 0x0204 + * output V to index 0x0205 + * output 0x0005 to index 0x0200 + * input 1 byte from index 0x0201 until its value becomes 0x04 + */ + +/* It seems the i2c bus is controlled with these registers */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "stk-webcam.h" + +#define STK_IIC_BASE (0x0200) +# define STK_IIC_OP (STK_IIC_BASE) +# define STK_IIC_OP_TX (0x05) +# define STK_IIC_OP_RX (0x70) +# define STK_IIC_STAT (STK_IIC_BASE+1) +# define STK_IIC_STAT_TX_OK (0x04) +# define STK_IIC_STAT_RX_OK (0x01) +/* I don't know what does this register. + * when it is 0x00 or 0x01, we cannot talk to the sensor, + * other values work */ +# define STK_IIC_ENABLE (STK_IIC_BASE+2) +# define STK_IIC_ENABLE_NO (0x00) +/* This is what the driver writes in windows */ +# define STK_IIC_ENABLE_YES (0x1e) +/* + * Address of the slave. Seems like the binary driver look for the + * sensor in multiple places, attempting a reset sequence. + * We only know about the ov9650 + */ +# define STK_IIC_ADDR (STK_IIC_BASE+3) +# define STK_IIC_TX_INDEX (STK_IIC_BASE+4) +# define STK_IIC_TX_VALUE (STK_IIC_BASE+5) +# define STK_IIC_RX_INDEX (STK_IIC_BASE+8) +# define STK_IIC_RX_VALUE (STK_IIC_BASE+9) + +#define MAX_RETRIES (50) + +#define SENSOR_ADDRESS (0x60) + +/* From ov7670.c (These registers aren't fully accurate) */ + +/* Registers */ +#define REG_GAIN 0x00 /* Gain lower 8 bits (rest in vref) */ +#define REG_BLUE 0x01 /* blue gain */ +#define REG_RED 0x02 /* red gain */ +#define REG_VREF 0x03 /* Pieces of GAIN, VSTART, VSTOP */ +#define REG_COM1 0x04 /* Control 1 */ +#define COM1_CCIR656 0x40 /* CCIR656 enable */ +#define COM1_QFMT 0x20 /* QVGA/QCIF format */ +#define COM1_SKIP_0 0x00 /* Do not skip any row */ +#define COM1_SKIP_2 0x04 /* Skip 2 rows of 4 */ +#define COM1_SKIP_3 0x08 /* Skip 3 rows of 4 */ +#define REG_BAVE 0x05 /* U/B Average level */ +#define REG_GbAVE 0x06 /* Y/Gb Average level */ +#define REG_AECHH 0x07 /* AEC MS 5 bits */ +#define REG_RAVE 0x08 /* V/R Average level */ +#define REG_COM2 0x09 /* Control 2 */ +#define COM2_SSLEEP 0x10 /* Soft sleep mode */ +#define REG_PID 0x0a /* Product ID MSB */ +#define REG_VER 0x0b /* Product ID LSB */ +#define REG_COM3 0x0c /* Control 3 */ +#define COM3_SWAP 0x40 /* Byte swap */ +#define COM3_SCALEEN 0x08 /* Enable scaling */ +#define COM3_DCWEN 0x04 /* Enable downsamp/crop/window */ +#define REG_COM4 0x0d /* Control 4 */ +#define REG_COM5 0x0e /* All "reserved" */ +#define REG_COM6 0x0f /* Control 6 */ +#define REG_AECH 0x10 /* More bits of AEC value */ +#define REG_CLKRC 0x11 /* Clock control */ +#define CLK_PLL 0x80 /* Enable internal PLL */ +#define CLK_EXT 0x40 /* Use external clock directly */ +#define CLK_SCALE 0x3f /* Mask for internal clock scale */ +#define REG_COM7 0x12 /* Control 7 */ +#define COM7_RESET 0x80 /* Register reset */ +#define COM7_FMT_MASK 0x38 +#define COM7_FMT_SXGA 0x00 +#define COM7_FMT_VGA 0x40 +#define COM7_FMT_CIF 0x20 /* CIF format */ +#define COM7_FMT_QVGA 0x10 /* QVGA format */ +#define COM7_FMT_QCIF 0x08 /* QCIF format */ +#define COM7_RGB 0x04 /* bits 0 and 2 - RGB format */ +#define COM7_YUV 0x00 /* YUV */ +#define COM7_BAYER 0x01 /* Bayer format */ +#define COM7_PBAYER 0x05 /* "Processed bayer" */ +#define REG_COM8 0x13 /* Control 8 */ +#define COM8_FASTAEC 0x80 /* Enable fast AGC/AEC */ +#define COM8_AECSTEP 0x40 /* Unlimited AEC step size */ +#define COM8_BFILT 0x20 /* Band filter enable */ +#define COM8_AGC 0x04 /* Auto gain enable */ +#define COM8_AWB 0x02 /* White balance enable */ +#define COM8_AEC 0x01 /* Auto exposure enable */ +#define REG_COM9 0x14 /* Control 9 - gain ceiling */ +#define REG_COM10 0x15 /* Control 10 */ +#define COM10_HSYNC 0x40 /* HSYNC instead of HREF */ +#define COM10_PCLK_HB 0x20 /* Suppress PCLK on horiz blank */ +#define COM10_HREF_REV 0x08 /* Reverse HREF */ +#define COM10_VS_LEAD 0x04 /* VSYNC on clock leading edge */ +#define COM10_VS_NEG 0x02 /* VSYNC negative */ +#define COM10_HS_NEG 0x01 /* HSYNC negative */ +#define REG_HSTART 0x17 /* Horiz start high bits */ +#define REG_HSTOP 0x18 /* Horiz stop high bits */ +#define REG_VSTART 0x19 /* Vert start high bits */ +#define REG_VSTOP 0x1a /* Vert stop high bits */ +#define REG_PSHFT 0x1b /* Pixel delay after HREF */ +#define REG_MIDH 0x1c /* Manuf. ID high */ +#define REG_MIDL 0x1d /* Manuf. ID low */ +#define REG_MVFP 0x1e /* Mirror / vflip */ +#define MVFP_MIRROR 0x20 /* Mirror image */ +#define MVFP_FLIP 0x10 /* Vertical flip */ + +#define REG_AEW 0x24 /* AGC upper limit */ +#define REG_AEB 0x25 /* AGC lower limit */ +#define REG_VPT 0x26 /* AGC/AEC fast mode op region */ +#define REG_ADVFL 0x2d /* Insert dummy lines (LSB) */ +#define REG_ADVFH 0x2e /* Insert dummy lines (MSB) */ +#define REG_HSYST 0x30 /* HSYNC rising edge delay */ +#define REG_HSYEN 0x31 /* HSYNC falling edge delay */ +#define REG_HREF 0x32 /* HREF pieces */ +#define REG_TSLB 0x3a /* lots of stuff */ +#define TSLB_YLAST 0x04 /* UYVY or VYUY - see com13 */ +#define TSLB_BYTEORD 0x08 /* swap bytes in 16bit mode? */ +#define REG_COM11 0x3b /* Control 11 */ +#define COM11_NIGHT 0x80 /* NIght mode enable */ +#define COM11_NMFR 0x60 /* Two bit NM frame rate */ +#define COM11_HZAUTO 0x10 /* Auto detect 50/60 Hz */ +#define COM11_50HZ 0x08 /* Manual 50Hz select */ +#define COM11_EXP 0x02 +#define REG_COM12 0x3c /* Control 12 */ +#define COM12_HREF 0x80 /* HREF always */ +#define REG_COM13 0x3d /* Control 13 */ +#define COM13_GAMMA 0x80 /* Gamma enable */ +#define COM13_UVSAT 0x40 /* UV saturation auto adjustment */ +#define COM13_CMATRIX 0x10 /* Enable color matrix for RGB or YUV */ +#define COM13_UVSWAP 0x01 /* V before U - w/TSLB */ +#define REG_COM14 0x3e /* Control 14 */ +#define COM14_DCWEN 0x10 /* DCW/PCLK-scale enable */ +#define REG_EDGE 0x3f /* Edge enhancement factor */ +#define REG_COM15 0x40 /* Control 15 */ +#define COM15_R10F0 0x00 /* Data range 10 to F0 */ +#define COM15_R01FE 0x80 /* 01 to FE */ +#define COM15_R00FF 0xc0 /* 00 to FF */ +#define COM15_RGB565 0x10 /* RGB565 output */ +#define COM15_RGBFIXME 0x20 /* FIXME */ +#define COM15_RGB555 0x30 /* RGB555 output */ +#define REG_COM16 0x41 /* Control 16 */ +#define COM16_AWBGAIN 0x08 /* AWB gain enable */ +#define REG_COM17 0x42 /* Control 17 */ +#define COM17_AECWIN 0xc0 /* AEC window - must match COM4 */ +#define COM17_CBAR 0x08 /* DSP Color bar */ + +/* + * This matrix defines how the colors are generated, must be + * tweaked to adjust hue and saturation. + * + * Order: v-red, v-green, v-blue, u-red, u-green, u-blue + * + * They are nine-bit signed quantities, with the sign bit + * stored in 0x58. Sign for v-red is bit 0, and up from there. + */ +#define REG_CMATRIX_BASE 0x4f +#define CMATRIX_LEN 6 +#define REG_CMATRIX_SIGN 0x58 + + +#define REG_BRIGHT 0x55 /* Brightness */ +#define REG_CONTRAS 0x56 /* Contrast control */ + +#define REG_GFIX 0x69 /* Fix gain control */ + +#define REG_RGB444 0x8c /* RGB 444 control */ +#define R444_ENABLE 0x02 /* Turn on RGB444, overrides 5x5 */ +#define R444_RGBX 0x01 /* Empty nibble at end */ + +#define REG_HAECC1 0x9f /* Hist AEC/AGC control 1 */ +#define REG_HAECC2 0xa0 /* Hist AEC/AGC control 2 */ + +#define REG_BD50MAX 0xa5 /* 50hz banding step limit */ +#define REG_HAECC3 0xa6 /* Hist AEC/AGC control 3 */ +#define REG_HAECC4 0xa7 /* Hist AEC/AGC control 4 */ +#define REG_HAECC5 0xa8 /* Hist AEC/AGC control 5 */ +#define REG_HAECC6 0xa9 /* Hist AEC/AGC control 6 */ +#define REG_HAECC7 0xaa /* Hist AEC/AGC control 7 */ +#define REG_BD60MAX 0xab /* 60hz banding step limit */ + + + + +/* Returns 0 if OK */ +static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val) +{ + int i = 0; + u8 tmpval = 0; + + if (stk_camera_write_reg(dev, STK_IIC_TX_INDEX, reg)) + return 1; + if (stk_camera_write_reg(dev, STK_IIC_TX_VALUE, val)) + return 1; + if (stk_camera_write_reg(dev, STK_IIC_OP, STK_IIC_OP_TX)) + return 1; + do { + if (stk_camera_read_reg(dev, STK_IIC_STAT, &tmpval)) + return 1; + i++; + } while (tmpval == 0 && i < MAX_RETRIES); + if (tmpval != STK_IIC_STAT_TX_OK) { + if (tmpval) + pr_err("stk_sensor_outb failed, status=0x%02x\n", + tmpval); + return 1; + } else + return 0; +} + +static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val) +{ + int i = 0; + u8 tmpval = 0; + + if (stk_camera_write_reg(dev, STK_IIC_RX_INDEX, reg)) + return 1; + if (stk_camera_write_reg(dev, STK_IIC_OP, STK_IIC_OP_RX)) + return 1; + do { + if (stk_camera_read_reg(dev, STK_IIC_STAT, &tmpval)) + return 1; + i++; + } while (tmpval == 0 && i < MAX_RETRIES); + if (tmpval != STK_IIC_STAT_RX_OK) { + if (tmpval) + pr_err("stk_sensor_inb failed, status=0x%02x\n", + tmpval); + return 1; + } + + if (stk_camera_read_reg(dev, STK_IIC_RX_VALUE, &tmpval)) + return 1; + + *val = tmpval; + return 0; +} + +static int stk_sensor_write_regvals(struct stk_camera *dev, + struct regval *rv) +{ + int ret; + if (rv == NULL) + return 0; + while (rv->reg != 0xff || rv->val != 0xff) { + ret = stk_sensor_outb(dev, rv->reg, rv->val); + if (ret != 0) + return ret; + rv++; + } + return 0; +} + +int stk_sensor_sleep(struct stk_camera *dev) +{ + u8 tmp; + return stk_sensor_inb(dev, REG_COM2, &tmp) + || stk_sensor_outb(dev, REG_COM2, tmp|COM2_SSLEEP); +} + +int stk_sensor_wakeup(struct stk_camera *dev) +{ + u8 tmp; + return stk_sensor_inb(dev, REG_COM2, &tmp) + || stk_sensor_outb(dev, REG_COM2, tmp&~COM2_SSLEEP); +} + +static struct regval ov_initvals[] = { + {REG_CLKRC, CLK_PLL}, + {REG_COM11, 0x01}, + {0x6a, 0x7d}, + {REG_AECH, 0x40}, + {REG_GAIN, 0x00}, + {REG_BLUE, 0x80}, + {REG_RED, 0x80}, + /* Do not enable fast AEC for now */ + /*{REG_COM8, COM8_FASTAEC|COM8_AECSTEP|COM8_BFILT|COM8_AGC|COM8_AEC},*/ + {REG_COM8, COM8_AECSTEP|COM8_BFILT|COM8_AGC|COM8_AEC}, + {0x39, 0x50}, {0x38, 0x93}, + {0x37, 0x00}, {0x35, 0x81}, + {REG_COM5, 0x20}, + {REG_COM1, 0x00}, + {REG_COM3, 0x00}, + {REG_COM4, 0x00}, + {REG_PSHFT, 0x00}, + {0x16, 0x07}, + {0x33, 0xe2}, {0x34, 0xbf}, + {REG_COM16, 0x00}, + {0x96, 0x04}, + /* Gamma curve values */ +/* { 0x7a, 0x20 }, { 0x7b, 0x10 }, + { 0x7c, 0x1e }, { 0x7d, 0x35 }, + { 0x7e, 0x5a }, { 0x7f, 0x69 }, + { 0x80, 0x76 }, { 0x81, 0x80 }, + { 0x82, 0x88 }, { 0x83, 0x8f }, + { 0x84, 0x96 }, { 0x85, 0xa3 }, + { 0x86, 0xaf }, { 0x87, 0xc4 }, + { 0x88, 0xd7 }, { 0x89, 0xe8 }, +*/ + {REG_GFIX, 0x40}, + {0x8e, 0x00}, + {REG_COM12, 0x73}, + {0x8f, 0xdf}, {0x8b, 0x06}, + {0x8c, 0x20}, + {0x94, 0x88}, {0x95, 0x88}, +/* {REG_COM15, 0xc1}, TODO */ + {0x29, 0x3f}, + {REG_COM6, 0x42}, + {REG_BD50MAX, 0x80}, + {REG_HAECC6, 0xb8}, {REG_HAECC7, 0x92}, + {REG_BD60MAX, 0x0a}, + {0x90, 0x00}, {0x91, 0x00}, + {REG_HAECC1, 0x00}, {REG_HAECC2, 0x00}, + {REG_AEW, 0x68}, {REG_AEB, 0x5c}, + {REG_VPT, 0xc3}, + {REG_COM9, 0x2e}, + {0x2a, 0x00}, {0x2b, 0x00}, + + {0xff, 0xff}, /* END MARKER */ +}; + +/* Probe the I2C bus and initialise the sensor chip */ +int stk_sensor_init(struct stk_camera *dev) +{ + u8 idl = 0; + u8 idh = 0; + + if (stk_camera_write_reg(dev, STK_IIC_ENABLE, STK_IIC_ENABLE_YES) + || stk_camera_write_reg(dev, STK_IIC_ADDR, SENSOR_ADDRESS) + || stk_sensor_outb(dev, REG_COM7, COM7_RESET)) { + pr_err("Sensor resetting failed\n"); + return -ENODEV; + } + msleep(10); + /* Read the manufacturer ID: ov = 0x7FA2 */ + if (stk_sensor_inb(dev, REG_MIDH, &idh) + || stk_sensor_inb(dev, REG_MIDL, &idl)) { + pr_err("Strange error reading sensor ID\n"); + return -ENODEV; + } + if (idh != 0x7f || idl != 0xa2) { + pr_err("Huh? you don't have a sensor from ovt\n"); + return -ENODEV; + } + if (stk_sensor_inb(dev, REG_PID, &idh) + || stk_sensor_inb(dev, REG_VER, &idl)) { + pr_err("Could not read sensor model\n"); + return -ENODEV; + } + stk_sensor_write_regvals(dev, ov_initvals); + msleep(10); + pr_info("OmniVision sensor detected, id %02X%02X at address %x\n", + idh, idl, SENSOR_ADDRESS); + return 0; +} + +/* V4L2_PIX_FMT_UYVY */ +static struct regval ov_fmt_uyvy[] = { + {REG_TSLB, TSLB_YLAST|0x08 }, + { 0x4f, 0x80 }, /* "matrix coefficient 1" */ + { 0x50, 0x80 }, /* "matrix coefficient 2" */ + { 0x51, 0 }, /* vb */ + { 0x52, 0x22 }, /* "matrix coefficient 4" */ + { 0x53, 0x5e }, /* "matrix coefficient 5" */ + { 0x54, 0x80 }, /* "matrix coefficient 6" */ + {REG_COM13, COM13_UVSAT|COM13_CMATRIX}, + {REG_COM15, COM15_R00FF }, + {0xff, 0xff}, /* END MARKER */ +}; +/* V4L2_PIX_FMT_YUYV */ +static struct regval ov_fmt_yuyv[] = { + {REG_TSLB, 0 }, + { 0x4f, 0x80 }, /* "matrix coefficient 1" */ + { 0x50, 0x80 }, /* "matrix coefficient 2" */ + { 0x51, 0 }, /* vb */ + { 0x52, 0x22 }, /* "matrix coefficient 4" */ + { 0x53, 0x5e }, /* "matrix coefficient 5" */ + { 0x54, 0x80 }, /* "matrix coefficient 6" */ + {REG_COM13, COM13_UVSAT|COM13_CMATRIX}, + {REG_COM15, COM15_R00FF }, + {0xff, 0xff}, /* END MARKER */ +}; + +/* V4L2_PIX_FMT_RGB565X rrrrrggg gggbbbbb */ +static struct regval ov_fmt_rgbr[] = { + { REG_RGB444, 0 }, /* No RGB444 please */ + {REG_TSLB, 0x00}, + { REG_COM1, 0x0 }, + { REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */ + { 0x4f, 0xb3 }, /* "matrix coefficient 1" */ + { 0x50, 0xb3 }, /* "matrix coefficient 2" */ + { 0x51, 0 }, /* vb */ + { 0x52, 0x3d }, /* "matrix coefficient 4" */ + { 0x53, 0xa7 }, /* "matrix coefficient 5" */ + { 0x54, 0xe4 }, /* "matrix coefficient 6" */ + { REG_COM13, COM13_GAMMA }, + { REG_COM15, COM15_RGB565|COM15_R00FF }, + { 0xff, 0xff }, +}; + +/* V4L2_PIX_FMT_RGB565 gggbbbbb rrrrrggg */ +static struct regval ov_fmt_rgbp[] = { + { REG_RGB444, 0 }, /* No RGB444 please */ + {REG_TSLB, TSLB_BYTEORD }, + { REG_COM1, 0x0 }, + { REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */ + { 0x4f, 0xb3 }, /* "matrix coefficient 1" */ + { 0x50, 0xb3 }, /* "matrix coefficient 2" */ + { 0x51, 0 }, /* vb */ + { 0x52, 0x3d }, /* "matrix coefficient 4" */ + { 0x53, 0xa7 }, /* "matrix coefficient 5" */ + { 0x54, 0xe4 }, /* "matrix coefficient 6" */ + { REG_COM13, COM13_GAMMA }, + { REG_COM15, COM15_RGB565|COM15_R00FF }, + { 0xff, 0xff }, +}; + +/* V4L2_PIX_FMT_SRGGB8 */ +static struct regval ov_fmt_bayer[] = { + /* This changes color order */ + {REG_TSLB, 0x40}, /* BGGR */ + /* {REG_TSLB, 0x08}, */ /* BGGR with vertical image flipping */ + {REG_COM15, COM15_R00FF }, + {0xff, 0xff}, /* END MARKER */ +}; +/* + * Store a set of start/stop values into the camera. + */ +static int stk_sensor_set_hw(struct stk_camera *dev, + int hstart, int hstop, int vstart, int vstop) +{ + int ret; + unsigned char v; +/* + * Horizontal: 11 bits, top 8 live in hstart and hstop. Bottom 3 of + * hstart are in href[2:0], bottom 3 of hstop in href[5:3]. There is + * a mystery "edge offset" value in the top two bits of href. + */ + ret = stk_sensor_outb(dev, REG_HSTART, (hstart >> 3) & 0xff); + ret += stk_sensor_outb(dev, REG_HSTOP, (hstop >> 3) & 0xff); + ret += stk_sensor_inb(dev, REG_HREF, &v); + v = (v & 0xc0) | ((hstop & 0x7) << 3) | (hstart & 0x7); + msleep(10); + ret += stk_sensor_outb(dev, REG_HREF, v); +/* + * Vertical: similar arrangement (note: this is different from ov7670.c) + */ + ret += stk_sensor_outb(dev, REG_VSTART, (vstart >> 3) & 0xff); + ret += stk_sensor_outb(dev, REG_VSTOP, (vstop >> 3) & 0xff); + ret += stk_sensor_inb(dev, REG_VREF, &v); + v = (v & 0xc0) | ((vstop & 0x7) << 3) | (vstart & 0x7); + msleep(10); + ret += stk_sensor_outb(dev, REG_VREF, v); + return ret; +} + + +int stk_sensor_configure(struct stk_camera *dev) +{ + int com7; + /* + * We setup the sensor to output dummy lines in low-res modes, + * so we don't get absurdly hight framerates. + */ + unsigned dummylines; + int flip; + struct regval *rv; + + switch (dev->vsettings.mode) { + case MODE_QCIF: com7 = COM7_FMT_QCIF; + dummylines = 604; + break; + case MODE_QVGA: com7 = COM7_FMT_QVGA; + dummylines = 267; + break; + case MODE_CIF: com7 = COM7_FMT_CIF; + dummylines = 412; + break; + case MODE_VGA: com7 = COM7_FMT_VGA; + dummylines = 11; + break; + case MODE_SXGA: com7 = COM7_FMT_SXGA; + dummylines = 0; + break; + default: + pr_err("Unsupported mode %d\n", dev->vsettings.mode); + return -EFAULT; + } + switch (dev->vsettings.palette) { + case V4L2_PIX_FMT_UYVY: + com7 |= COM7_YUV; + rv = ov_fmt_uyvy; + break; + case V4L2_PIX_FMT_YUYV: + com7 |= COM7_YUV; + rv = ov_fmt_yuyv; + break; + case V4L2_PIX_FMT_RGB565: + com7 |= COM7_RGB; + rv = ov_fmt_rgbp; + break; + case V4L2_PIX_FMT_RGB565X: + com7 |= COM7_RGB; + rv = ov_fmt_rgbr; + break; + case V4L2_PIX_FMT_SBGGR8: + com7 |= COM7_PBAYER; + rv = ov_fmt_bayer; + break; + default: + pr_err("Unsupported colorspace\n"); + return -EFAULT; + } + /*FIXME sometimes the sensor go to a bad state + stk_sensor_write_regvals(dev, ov_initvals); */ + stk_sensor_outb(dev, REG_COM7, com7); + msleep(50); + stk_sensor_write_regvals(dev, rv); + flip = (dev->vsettings.vflip?MVFP_FLIP:0) + | (dev->vsettings.hflip?MVFP_MIRROR:0); + stk_sensor_outb(dev, REG_MVFP, flip); + if (dev->vsettings.palette == V4L2_PIX_FMT_SBGGR8 + && !dev->vsettings.vflip) + stk_sensor_outb(dev, REG_TSLB, 0x08); + stk_sensor_outb(dev, REG_ADVFH, dummylines >> 8); + stk_sensor_outb(dev, REG_ADVFL, dummylines & 0xff); + msleep(50); + switch (dev->vsettings.mode) { + case MODE_VGA: + if (stk_sensor_set_hw(dev, 302, 1582, 6, 486)) + pr_err("stk_sensor_set_hw failed (VGA)\n"); + break; + case MODE_SXGA: + case MODE_CIF: + case MODE_QVGA: + case MODE_QCIF: + /*FIXME These settings seem ignored by the sensor + if (stk_sensor_set_hw(dev, 220, 1500, 10, 1034)) + pr_err("stk_sensor_set_hw failed (SXGA)\n"); + */ + break; + } + msleep(10); + return 0; +} + +int stk_sensor_set_brightness(struct stk_camera *dev, int br) +{ + if (br < 0 || br > 0xff) + return -EINVAL; + stk_sensor_outb(dev, REG_AEB, max(0x00, br - 6)); + stk_sensor_outb(dev, REG_AEW, min(0xff, br + 6)); + return 0; +} + diff --git a/drivers/staging/media/stkwebcam/stk-webcam.c b/drivers/staging/media/stkwebcam/stk-webcam.c new file mode 100644 index 0000000000..787edb3d47 --- /dev/null +++ b/drivers/staging/media/stkwebcam/stk-webcam.c @@ -0,0 +1,1434 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * stk-webcam.c : Driver for Syntek 1125 USB webcam controller + * + * Copyright (C) 2006 Nicolas VIVIEN + * Copyright 2007-2008 Jaime Velasco Juan + * + * Some parts are inspired from cafe_ccic.c + * Copyright 2006-2007 Jonathan Corbet + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stk-webcam.h" + + +static int hflip = -1; +module_param(hflip, int, 0444); +MODULE_PARM_DESC(hflip, "Horizontal image flip (mirror). Defaults to 0"); + +static int vflip = -1; +module_param(vflip, int, 0444); +MODULE_PARM_DESC(vflip, "Vertical image flip. Defaults to 0"); + +static int debug; +module_param(debug, int, 0444); +MODULE_PARM_DESC(debug, "Debug v4l ioctls. Defaults to 0"); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jaime Velasco Juan and Nicolas VIVIEN"); +MODULE_DESCRIPTION("Syntek DC1125 webcam driver"); + +/* Some cameras have audio interfaces, we aren't interested in those */ +static const struct usb_device_id stkwebcam_table[] = { + { USB_DEVICE_AND_INTERFACE_INFO(0x174f, 0xa311, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x05e1, 0x0501, 0xff, 0xff, 0xff) }, + { } +}; +MODULE_DEVICE_TABLE(usb, stkwebcam_table); + +/* + * The stk webcam laptop module is mounted upside down in some laptops :( + * + * Some background information (thanks to Hans de Goede for providing this): + * + * 1) Once upon a time the stkwebcam driver was written + * + * 2) The webcam in question was used mostly in Asus laptop models, including + * the laptop of the original author of the driver, and in these models, in + * typical Asus fashion (see the long long list for uvc cams inside v4l-utils), + * they mounted the webcam-module the wrong way up. So the hflip and vflip + * module options were given a default value of 1 (the correct value for + * upside down mounted models) + * + * 3) Years later I got a bug report from a user with a laptop with stkwebcam, + * where the module was actually mounted the right way up, and thus showed + * upside down under Linux. So now I was facing the choice of 2 options: + * + * a) Add a not-upside-down list to stkwebcam, which overrules the default. + * + * b) Do it like all the other drivers do, and make the default right for + * cams mounted the proper way and add an upside-down model list, with + * models where we need to flip-by-default. + * + * Despite knowing that going b) would cause a period of pain where we were + * building the table I opted to go for option b), since a) is just too ugly, + * and worse different from how every other driver does it leading to + * confusion in the long run. This change was made in kernel 3.6. + * + * So for any user report about upside-down images since kernel 3.6 ask them + * to provide the output of 'sudo dmidecode' so the laptop can be added in + * the table below. + */ +static const struct dmi_system_id stk_upside_down_dmi_table[] = { + { + .ident = "ASUS G1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "G1") + } + }, { + .ident = "ASUS F3JC", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "F3JC") + } + }, + { + .ident = "T12Rg-H", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HCL Infosystems Limited"), + DMI_MATCH(DMI_PRODUCT_NAME, "T12Rg-H") + } + }, + { + .ident = "ASUS A6VM", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "A6VM") + } + }, + { + .ident = "ASUS A6JC", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "A6JC") + } + }, + {} +}; + + +/* + * Basic stuff + */ +int stk_camera_write_reg(struct stk_camera *dev, u16 index, u8 value) +{ + struct usb_device *udev = dev->udev; + int ret; + + ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + 0x01, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + value, + index, + NULL, + 0, + 500); + if (ret < 0) + return ret; + else + return 0; +} + +int stk_camera_read_reg(struct stk_camera *dev, u16 index, u8 *value) +{ + struct usb_device *udev = dev->udev; + int ret; + + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + 0x00, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x00, + index, + &dev->read_reg_scratch, + sizeof(u8), + 500); + if (ret >= 0) + *value = dev->read_reg_scratch; + + if (ret < 0) + return ret; + else + return 0; +} + +static int stk_start_stream(struct stk_camera *dev) +{ + u8 value; + int i, ret; + u8 value_116, value_117; + + + if (!is_present(dev)) + return -ENODEV; + if (!is_memallocd(dev) || !is_initialised(dev)) { + pr_err("FIXME: Buffers are not allocated\n"); + return -EFAULT; + } + ret = usb_set_interface(dev->udev, 0, 5); + + if (ret < 0) + pr_err("usb_set_interface failed !\n"); + if (stk_sensor_wakeup(dev)) + pr_err("error awaking the sensor\n"); + + stk_camera_read_reg(dev, 0x0116, &value_116); + stk_camera_read_reg(dev, 0x0117, &value_117); + + stk_camera_write_reg(dev, 0x0116, 0x0000); + stk_camera_write_reg(dev, 0x0117, 0x0000); + + stk_camera_read_reg(dev, 0x0100, &value); + stk_camera_write_reg(dev, 0x0100, value | 0x80); + + stk_camera_write_reg(dev, 0x0116, value_116); + stk_camera_write_reg(dev, 0x0117, value_117); + for (i = 0; i < MAX_ISO_BUFS; i++) { + if (dev->isobufs[i].urb) { + ret = usb_submit_urb(dev->isobufs[i].urb, GFP_KERNEL); + atomic_inc(&dev->urbs_used); + if (ret) + return ret; + } + } + set_streaming(dev); + return 0; +} + +static int stk_stop_stream(struct stk_camera *dev) +{ + u8 value; + int i; + if (is_present(dev)) { + stk_camera_read_reg(dev, 0x0100, &value); + stk_camera_write_reg(dev, 0x0100, value & ~0x80); + if (dev->isobufs != NULL) { + for (i = 0; i < MAX_ISO_BUFS; i++) { + if (dev->isobufs[i].urb) + usb_kill_urb(dev->isobufs[i].urb); + } + } + unset_streaming(dev); + + if (usb_set_interface(dev->udev, 0, 0)) + pr_err("usb_set_interface failed !\n"); + if (stk_sensor_sleep(dev)) + pr_err("error suspending the sensor\n"); + } + return 0; +} + +/* + * This seems to be the shortest init sequence we + * must do in order to find the sensor + * Bit 5 of reg. 0x0000 here is important, when reset to 0 the sensor + * is also reset. Maybe powers down it? + * Rest of values don't make a difference + */ + +static struct regval stk1125_initvals[] = { + /*TODO: What means this sequence? */ + {0x0000, 0x24}, + {0x0100, 0x21}, + {0x0002, 0x68}, + {0x0003, 0x80}, + {0x0005, 0x00}, + {0x0007, 0x03}, + {0x000d, 0x00}, + {0x000f, 0x02}, + {0x0300, 0x12}, + {0x0350, 0x41}, + {0x0351, 0x00}, + {0x0352, 0x00}, + {0x0353, 0x00}, + {0x0018, 0x10}, + {0x0019, 0x00}, + {0x001b, 0x0e}, + {0x001c, 0x46}, + {0x0300, 0x80}, + {0x001a, 0x04}, + {0x0110, 0x00}, + {0x0111, 0x00}, + {0x0112, 0x00}, + {0x0113, 0x00}, + + {0xffff, 0xff}, +}; + + +static int stk_initialise(struct stk_camera *dev) +{ + struct regval *rv; + int ret; + if (!is_present(dev)) + return -ENODEV; + if (is_initialised(dev)) + return 0; + rv = stk1125_initvals; + while (rv->reg != 0xffff) { + ret = stk_camera_write_reg(dev, rv->reg, rv->val); + if (ret) + return ret; + rv++; + } + if (stk_sensor_init(dev) == 0) { + set_initialised(dev); + return 0; + } else + return -1; +} + +/* *********************************************** */ +/* + * This function is called as an URB transfert is complete (Isochronous pipe). + * So, the traitement is done in interrupt time, so it has be fast, not crash, + * and not stall. Neat. + */ +static void stk_isoc_handler(struct urb *urb) +{ + int i; + int ret; + int framelen; + unsigned long flags; + + unsigned char *fill = NULL; + unsigned char *iso_buf = NULL; + + struct stk_camera *dev; + struct stk_sio_buffer *fb; + + dev = (struct stk_camera *) urb->context; + + if (dev == NULL) { + pr_err("isoc_handler called with NULL device !\n"); + return; + } + + if (urb->status == -ENOENT || urb->status == -ECONNRESET + || urb->status == -ESHUTDOWN) { + atomic_dec(&dev->urbs_used); + return; + } + + spin_lock_irqsave(&dev->spinlock, flags); + + if (urb->status != -EINPROGRESS && urb->status != 0) { + pr_err("isoc_handler: urb->status == %d\n", urb->status); + goto resubmit; + } + + if (list_empty(&dev->sio_avail)) { + /*FIXME Stop streaming after a while */ + pr_err_ratelimited("isoc_handler without available buffer!\n"); + goto resubmit; + } + fb = list_first_entry(&dev->sio_avail, + struct stk_sio_buffer, list); + fill = fb->buffer + fb->v4lbuf.bytesused; + + for (i = 0; i < urb->number_of_packets; i++) { + if (urb->iso_frame_desc[i].status != 0) { + if (urb->iso_frame_desc[i].status != -EXDEV) + pr_err("Frame %d has error %d\n", + i, urb->iso_frame_desc[i].status); + continue; + } + framelen = urb->iso_frame_desc[i].actual_length; + iso_buf = urb->transfer_buffer + urb->iso_frame_desc[i].offset; + + if (framelen <= 4) + continue; /* no data */ + + /* + * we found something informational from there + * the isoc frames have to type of headers + * type1: 00 xx 00 00 or 20 xx 00 00 + * type2: 80 xx 00 00 00 00 00 00 or a0 xx 00 00 00 00 00 00 + * xx is a sequencer which has never been seen over 0x3f + * imho data written down looks like bayer, i see similarities + * after every 640 bytes + */ + if (*iso_buf & 0x80) { + framelen -= 8; + iso_buf += 8; + /* This marks a new frame */ + if (fb->v4lbuf.bytesused != 0 + && fb->v4lbuf.bytesused != dev->frame_size) { + pr_err_ratelimited("frame %d, bytesused=%d, skipping\n", + i, fb->v4lbuf.bytesused); + fb->v4lbuf.bytesused = 0; + fill = fb->buffer; + } else if (fb->v4lbuf.bytesused == dev->frame_size) { + if (list_is_singular(&dev->sio_avail)) { + /* Always reuse the last buffer */ + fb->v4lbuf.bytesused = 0; + fill = fb->buffer; + } else { + list_move_tail(dev->sio_avail.next, + &dev->sio_full); + wake_up(&dev->wait_frame); + fb = list_first_entry(&dev->sio_avail, + struct stk_sio_buffer, list); + fb->v4lbuf.bytesused = 0; + fill = fb->buffer; + } + } + } else { + framelen -= 4; + iso_buf += 4; + } + + /* Our buffer is full !!! */ + if (framelen + fb->v4lbuf.bytesused > dev->frame_size) { + pr_err_ratelimited("Frame buffer overflow, lost sync\n"); + /*FIXME Do something here? */ + continue; + } + spin_unlock_irqrestore(&dev->spinlock, flags); + memcpy(fill, iso_buf, framelen); + spin_lock_irqsave(&dev->spinlock, flags); + fill += framelen; + + /* New size of our buffer */ + fb->v4lbuf.bytesused += framelen; + } + +resubmit: + spin_unlock_irqrestore(&dev->spinlock, flags); + urb->dev = dev->udev; + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret != 0) { + pr_err("Error (%d) re-submitting urb in stk_isoc_handler\n", + ret); + } +} + +/* -------------------------------------------- */ + +static int stk_prepare_iso(struct stk_camera *dev) +{ + void *kbuf; + int i, j; + struct urb *urb; + struct usb_device *udev; + + if (dev == NULL) + return -ENXIO; + udev = dev->udev; + + if (dev->isobufs) + pr_err("isobufs already allocated. Bad\n"); + else + dev->isobufs = kcalloc(MAX_ISO_BUFS, sizeof(*dev->isobufs), + GFP_KERNEL); + if (dev->isobufs == NULL) { + pr_err("Unable to allocate iso buffers\n"); + return -ENOMEM; + } + for (i = 0; i < MAX_ISO_BUFS; i++) { + if (dev->isobufs[i].data == NULL) { + kbuf = kzalloc(ISO_BUFFER_SIZE, GFP_KERNEL); + if (kbuf == NULL) { + pr_err("Failed to allocate iso buffer %d\n", i); + goto isobufs_out; + } + dev->isobufs[i].data = kbuf; + } else + pr_err("isobuf data already allocated\n"); + if (dev->isobufs[i].urb == NULL) { + urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL); + if (urb == NULL) + goto isobufs_out; + dev->isobufs[i].urb = urb; + } else { + pr_err("Killing URB\n"); + usb_kill_urb(dev->isobufs[i].urb); + urb = dev->isobufs[i].urb; + } + urb->interval = 1; + urb->dev = udev; + urb->pipe = usb_rcvisocpipe(udev, dev->isoc_ep); + urb->transfer_flags = URB_ISO_ASAP; + urb->transfer_buffer = dev->isobufs[i].data; + urb->transfer_buffer_length = ISO_BUFFER_SIZE; + urb->complete = stk_isoc_handler; + urb->context = dev; + urb->start_frame = 0; + urb->number_of_packets = ISO_FRAMES_PER_DESC; + + for (j = 0; j < ISO_FRAMES_PER_DESC; j++) { + urb->iso_frame_desc[j].offset = j * ISO_MAX_FRAME_SIZE; + urb->iso_frame_desc[j].length = ISO_MAX_FRAME_SIZE; + } + } + set_memallocd(dev); + return 0; + +isobufs_out: + for (i = 0; i < MAX_ISO_BUFS && dev->isobufs[i].data; i++) + kfree(dev->isobufs[i].data); + for (i = 0; i < MAX_ISO_BUFS && dev->isobufs[i].urb; i++) + usb_free_urb(dev->isobufs[i].urb); + kfree(dev->isobufs); + dev->isobufs = NULL; + return -ENOMEM; +} + +static void stk_clean_iso(struct stk_camera *dev) +{ + int i; + + if (dev == NULL || dev->isobufs == NULL) + return; + + for (i = 0; i < MAX_ISO_BUFS; i++) { + struct urb *urb; + + urb = dev->isobufs[i].urb; + if (urb) { + if (atomic_read(&dev->urbs_used) && is_present(dev)) + usb_kill_urb(urb); + usb_free_urb(urb); + } + kfree(dev->isobufs[i].data); + } + kfree(dev->isobufs); + dev->isobufs = NULL; + unset_memallocd(dev); +} + +static int stk_setup_siobuf(struct stk_camera *dev, int index) +{ + struct stk_sio_buffer *buf = dev->sio_bufs + index; + INIT_LIST_HEAD(&buf->list); + buf->v4lbuf.length = PAGE_ALIGN(dev->frame_size); + buf->buffer = vmalloc_user(buf->v4lbuf.length); + if (buf->buffer == NULL) + return -ENOMEM; + buf->mapcount = 0; + buf->dev = dev; + buf->v4lbuf.index = index; + buf->v4lbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + buf->v4lbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + buf->v4lbuf.field = V4L2_FIELD_NONE; + buf->v4lbuf.memory = V4L2_MEMORY_MMAP; + buf->v4lbuf.m.offset = 2*index*buf->v4lbuf.length; + return 0; +} + +static int stk_free_sio_buffers(struct stk_camera *dev) +{ + int i; + int nbufs; + unsigned long flags; + if (dev->n_sbufs == 0 || dev->sio_bufs == NULL) + return 0; + /* + * If any buffers are mapped, we cannot free them at all. + */ + for (i = 0; i < dev->n_sbufs; i++) { + if (dev->sio_bufs[i].mapcount > 0) + return -EBUSY; + } + /* + * OK, let's do it. + */ + spin_lock_irqsave(&dev->spinlock, flags); + INIT_LIST_HEAD(&dev->sio_avail); + INIT_LIST_HEAD(&dev->sio_full); + nbufs = dev->n_sbufs; + dev->n_sbufs = 0; + spin_unlock_irqrestore(&dev->spinlock, flags); + for (i = 0; i < nbufs; i++) + vfree(dev->sio_bufs[i].buffer); + kfree(dev->sio_bufs); + dev->sio_bufs = NULL; + return 0; +} + +static int stk_prepare_sio_buffers(struct stk_camera *dev, unsigned n_sbufs) +{ + int i; + if (dev->sio_bufs != NULL) + pr_err("sio_bufs already allocated\n"); + else { + dev->sio_bufs = kcalloc(n_sbufs, + sizeof(struct stk_sio_buffer), + GFP_KERNEL); + if (dev->sio_bufs == NULL) + return -ENOMEM; + for (i = 0; i < n_sbufs; i++) { + if (stk_setup_siobuf(dev, i)) + return (dev->n_sbufs > 1 ? 0 : -ENOMEM); + dev->n_sbufs = i+1; + } + } + return 0; +} + +static int stk_allocate_buffers(struct stk_camera *dev, unsigned n_sbufs) +{ + int err; + err = stk_prepare_iso(dev); + if (err) { + stk_clean_iso(dev); + return err; + } + err = stk_prepare_sio_buffers(dev, n_sbufs); + if (err) { + stk_free_sio_buffers(dev); + return err; + } + return 0; +} + +static void stk_free_buffers(struct stk_camera *dev) +{ + stk_clean_iso(dev); + stk_free_sio_buffers(dev); +} +/* -------------------------------------------- */ + +/* v4l file operations */ + +static int v4l_stk_open(struct file *fp) +{ + struct stk_camera *dev = video_drvdata(fp); + int err; + + if (dev == NULL || !is_present(dev)) + return -ENXIO; + + if (mutex_lock_interruptible(&dev->lock)) + return -ERESTARTSYS; + if (!dev->first_init) + stk_camera_write_reg(dev, 0x0, 0x24); + else + dev->first_init = 0; + + err = v4l2_fh_open(fp); + if (!err) + usb_autopm_get_interface(dev->interface); + mutex_unlock(&dev->lock); + return err; +} + +static int v4l_stk_release(struct file *fp) +{ + struct stk_camera *dev = video_drvdata(fp); + + mutex_lock(&dev->lock); + if (dev->owner == fp) { + stk_stop_stream(dev); + stk_free_buffers(dev); + stk_camera_write_reg(dev, 0x0, 0x49); /* turn off the LED */ + unset_initialised(dev); + dev->owner = NULL; + } + + usb_autopm_put_interface(dev->interface); + mutex_unlock(&dev->lock); + return v4l2_fh_release(fp); +} + +static ssize_t stk_read(struct file *fp, char __user *buf, + size_t count, loff_t *f_pos) +{ + int i; + int ret; + unsigned long flags; + struct stk_sio_buffer *sbuf; + struct stk_camera *dev = video_drvdata(fp); + + if (!is_present(dev)) + return -EIO; + if (dev->owner && (!dev->reading || dev->owner != fp)) + return -EBUSY; + dev->owner = fp; + if (!is_streaming(dev)) { + if (stk_initialise(dev) + || stk_allocate_buffers(dev, 3) + || stk_start_stream(dev)) + return -ENOMEM; + dev->reading = 1; + spin_lock_irqsave(&dev->spinlock, flags); + for (i = 0; i < dev->n_sbufs; i++) { + list_add_tail(&dev->sio_bufs[i].list, &dev->sio_avail); + dev->sio_bufs[i].v4lbuf.flags = V4L2_BUF_FLAG_QUEUED; + } + spin_unlock_irqrestore(&dev->spinlock, flags); + } + if (*f_pos == 0) { + if (fp->f_flags & O_NONBLOCK && list_empty(&dev->sio_full)) + return -EWOULDBLOCK; + ret = wait_event_interruptible(dev->wait_frame, + !list_empty(&dev->sio_full) || !is_present(dev)); + if (ret) + return ret; + if (!is_present(dev)) + return -EIO; + } + if (count + *f_pos > dev->frame_size) + count = dev->frame_size - *f_pos; + spin_lock_irqsave(&dev->spinlock, flags); + if (list_empty(&dev->sio_full)) { + spin_unlock_irqrestore(&dev->spinlock, flags); + pr_err("BUG: No siobufs ready\n"); + return 0; + } + sbuf = list_first_entry(&dev->sio_full, struct stk_sio_buffer, list); + spin_unlock_irqrestore(&dev->spinlock, flags); + + if (copy_to_user(buf, sbuf->buffer + *f_pos, count)) + return -EFAULT; + + *f_pos += count; + + if (*f_pos >= dev->frame_size) { + *f_pos = 0; + spin_lock_irqsave(&dev->spinlock, flags); + list_move_tail(&sbuf->list, &dev->sio_avail); + spin_unlock_irqrestore(&dev->spinlock, flags); + } + return count; +} + +static ssize_t v4l_stk_read(struct file *fp, char __user *buf, + size_t count, loff_t *f_pos) +{ + struct stk_camera *dev = video_drvdata(fp); + int ret; + + if (mutex_lock_interruptible(&dev->lock)) + return -ERESTARTSYS; + ret = stk_read(fp, buf, count, f_pos); + mutex_unlock(&dev->lock); + return ret; +} + +static __poll_t v4l_stk_poll(struct file *fp, poll_table *wait) +{ + struct stk_camera *dev = video_drvdata(fp); + __poll_t res = v4l2_ctrl_poll(fp, wait); + + poll_wait(fp, &dev->wait_frame, wait); + + if (!is_present(dev)) + return EPOLLERR; + + if (!list_empty(&dev->sio_full)) + return res | EPOLLIN | EPOLLRDNORM; + + return res; +} + + +static void stk_v4l_vm_open(struct vm_area_struct *vma) +{ + struct stk_sio_buffer *sbuf = vma->vm_private_data; + sbuf->mapcount++; +} +static void stk_v4l_vm_close(struct vm_area_struct *vma) +{ + struct stk_sio_buffer *sbuf = vma->vm_private_data; + sbuf->mapcount--; + if (sbuf->mapcount == 0) + sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_MAPPED; +} +static const struct vm_operations_struct stk_v4l_vm_ops = { + .open = stk_v4l_vm_open, + .close = stk_v4l_vm_close +}; + +static int v4l_stk_mmap(struct file *fp, struct vm_area_struct *vma) +{ + unsigned int i; + int ret; + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + struct stk_camera *dev = video_drvdata(fp); + struct stk_sio_buffer *sbuf = NULL; + + if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + for (i = 0; i < dev->n_sbufs; i++) { + if (dev->sio_bufs[i].v4lbuf.m.offset == offset) { + sbuf = dev->sio_bufs + i; + break; + } + } + if (sbuf == NULL) + return -EINVAL; + ret = remap_vmalloc_range(vma, sbuf->buffer, 0); + if (ret) + return ret; + vma->vm_flags |= VM_DONTEXPAND; + vma->vm_private_data = sbuf; + vma->vm_ops = &stk_v4l_vm_ops; + sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_MAPPED; + stk_v4l_vm_open(vma); + return 0; +} + +/* v4l ioctl handlers */ + +static int stk_vidioc_querycap(struct file *filp, + void *priv, struct v4l2_capability *cap) +{ + struct stk_camera *dev = video_drvdata(filp); + + strscpy(cap->driver, "stk", sizeof(cap->driver)); + strscpy(cap->card, "stk", sizeof(cap->card)); + usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); + return 0; +} + +static int stk_vidioc_enum_input(struct file *filp, + void *priv, struct v4l2_input *input) +{ + if (input->index != 0) + return -EINVAL; + + strscpy(input->name, "Syntek USB Camera", sizeof(input->name)); + input->type = V4L2_INPUT_TYPE_CAMERA; + return 0; +} + + +static int stk_vidioc_g_input(struct file *filp, void *priv, unsigned int *i) +{ + *i = 0; + return 0; +} + +static int stk_vidioc_s_input(struct file *filp, void *priv, unsigned int i) +{ + return i ? -EINVAL : 0; +} + +static int stk_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct stk_camera *dev = + container_of(ctrl->handler, struct stk_camera, hdl); + + switch (ctrl->id) { + case V4L2_CID_BRIGHTNESS: + return stk_sensor_set_brightness(dev, ctrl->val); + case V4L2_CID_HFLIP: + if (dmi_check_system(stk_upside_down_dmi_table)) + dev->vsettings.hflip = !ctrl->val; + else + dev->vsettings.hflip = ctrl->val; + return 0; + case V4L2_CID_VFLIP: + if (dmi_check_system(stk_upside_down_dmi_table)) + dev->vsettings.vflip = !ctrl->val; + else + dev->vsettings.vflip = ctrl->val; + return 0; + default: + return -EINVAL; + } + return 0; +} + + +static int stk_vidioc_enum_fmt_vid_cap(struct file *filp, + void *priv, struct v4l2_fmtdesc *fmtd) +{ + switch (fmtd->index) { + case 0: + fmtd->pixelformat = V4L2_PIX_FMT_RGB565; + break; + case 1: + fmtd->pixelformat = V4L2_PIX_FMT_RGB565X; + break; + case 2: + fmtd->pixelformat = V4L2_PIX_FMT_UYVY; + break; + case 3: + fmtd->pixelformat = V4L2_PIX_FMT_SBGGR8; + break; + case 4: + fmtd->pixelformat = V4L2_PIX_FMT_YUYV; + break; + default: + return -EINVAL; + } + return 0; +} + +static struct stk_size { + unsigned w; + unsigned h; + enum stk_mode m; +} stk_sizes[] = { + { .w = 1280, .h = 1024, .m = MODE_SXGA, }, + { .w = 640, .h = 480, .m = MODE_VGA, }, + { .w = 352, .h = 288, .m = MODE_CIF, }, + { .w = 320, .h = 240, .m = MODE_QVGA, }, + { .w = 176, .h = 144, .m = MODE_QCIF, }, +}; + +static int stk_vidioc_g_fmt_vid_cap(struct file *filp, + void *priv, struct v4l2_format *f) +{ + struct v4l2_pix_format *pix_format = &f->fmt.pix; + struct stk_camera *dev = video_drvdata(filp); + int i; + + for (i = 0; i < ARRAY_SIZE(stk_sizes) && + stk_sizes[i].m != dev->vsettings.mode; i++) + ; + if (i == ARRAY_SIZE(stk_sizes)) { + pr_err("ERROR: mode invalid\n"); + return -EINVAL; + } + pix_format->width = stk_sizes[i].w; + pix_format->height = stk_sizes[i].h; + pix_format->field = V4L2_FIELD_NONE; + pix_format->colorspace = V4L2_COLORSPACE_SRGB; + pix_format->pixelformat = dev->vsettings.palette; + if (dev->vsettings.palette == V4L2_PIX_FMT_SBGGR8) + pix_format->bytesperline = pix_format->width; + else + pix_format->bytesperline = 2 * pix_format->width; + pix_format->sizeimage = pix_format->bytesperline + * pix_format->height; + return 0; +} + +static int stk_try_fmt_vid_cap(struct file *filp, + struct v4l2_format *fmtd, int *idx) +{ + int i; + switch (fmtd->fmt.pix.pixelformat) { + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_RGB565X: + case V4L2_PIX_FMT_UYVY: + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_SBGGR8: + break; + default: + return -EINVAL; + } + for (i = 1; i < ARRAY_SIZE(stk_sizes); i++) { + if (fmtd->fmt.pix.width > stk_sizes[i].w) + break; + } + if (i == ARRAY_SIZE(stk_sizes) + || (abs(fmtd->fmt.pix.width - stk_sizes[i-1].w) + < abs(fmtd->fmt.pix.width - stk_sizes[i].w))) { + fmtd->fmt.pix.height = stk_sizes[i-1].h; + fmtd->fmt.pix.width = stk_sizes[i-1].w; + if (idx) + *idx = i - 1; + } else { + fmtd->fmt.pix.height = stk_sizes[i].h; + fmtd->fmt.pix.width = stk_sizes[i].w; + if (idx) + *idx = i; + } + + fmtd->fmt.pix.field = V4L2_FIELD_NONE; + fmtd->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; + if (fmtd->fmt.pix.pixelformat == V4L2_PIX_FMT_SBGGR8) + fmtd->fmt.pix.bytesperline = fmtd->fmt.pix.width; + else + fmtd->fmt.pix.bytesperline = 2 * fmtd->fmt.pix.width; + fmtd->fmt.pix.sizeimage = fmtd->fmt.pix.bytesperline + * fmtd->fmt.pix.height; + return 0; +} + +static int stk_vidioc_try_fmt_vid_cap(struct file *filp, + void *priv, struct v4l2_format *fmtd) +{ + return stk_try_fmt_vid_cap(filp, fmtd, NULL); +} + +static int stk_setup_format(struct stk_camera *dev) +{ + int i = 0; + int depth; + if (dev->vsettings.palette == V4L2_PIX_FMT_SBGGR8) + depth = 1; + else + depth = 2; + while (i < ARRAY_SIZE(stk_sizes) && + stk_sizes[i].m != dev->vsettings.mode) + i++; + if (i == ARRAY_SIZE(stk_sizes)) { + pr_err("Something is broken in %s\n", __func__); + return -EFAULT; + } + /* This registers controls some timings, not sure of what. */ + stk_camera_write_reg(dev, 0x001b, 0x0e); + if (dev->vsettings.mode == MODE_SXGA) + stk_camera_write_reg(dev, 0x001c, 0x0e); + else + stk_camera_write_reg(dev, 0x001c, 0x46); + /* + * Registers 0x0115 0x0114 are the size of each line (bytes), + * regs 0x0117 0x0116 are the height of the image. + */ + stk_camera_write_reg(dev, 0x0115, + ((stk_sizes[i].w * depth) >> 8) & 0xff); + stk_camera_write_reg(dev, 0x0114, + (stk_sizes[i].w * depth) & 0xff); + stk_camera_write_reg(dev, 0x0117, + (stk_sizes[i].h >> 8) & 0xff); + stk_camera_write_reg(dev, 0x0116, + stk_sizes[i].h & 0xff); + return stk_sensor_configure(dev); +} + +static int stk_vidioc_s_fmt_vid_cap(struct file *filp, + void *priv, struct v4l2_format *fmtd) +{ + int ret; + int idx; + struct stk_camera *dev = video_drvdata(filp); + + if (dev == NULL) + return -ENODEV; + if (!is_present(dev)) + return -ENODEV; + if (is_streaming(dev)) + return -EBUSY; + if (dev->owner) + return -EBUSY; + ret = stk_try_fmt_vid_cap(filp, fmtd, &idx); + if (ret) + return ret; + + dev->vsettings.palette = fmtd->fmt.pix.pixelformat; + stk_free_buffers(dev); + dev->frame_size = fmtd->fmt.pix.sizeimage; + dev->vsettings.mode = stk_sizes[idx].m; + + stk_initialise(dev); + return stk_setup_format(dev); +} + +static int stk_vidioc_reqbufs(struct file *filp, + void *priv, struct v4l2_requestbuffers *rb) +{ + struct stk_camera *dev = video_drvdata(filp); + + if (dev == NULL) + return -ENODEV; + if (rb->memory != V4L2_MEMORY_MMAP) + return -EINVAL; + if (is_streaming(dev) + || (dev->owner && dev->owner != filp)) + return -EBUSY; + stk_free_buffers(dev); + if (rb->count == 0) { + stk_camera_write_reg(dev, 0x0, 0x49); /* turn off the LED */ + unset_initialised(dev); + dev->owner = NULL; + return 0; + } + dev->owner = filp; + + /*FIXME If they ask for zero, we must stop streaming and free */ + if (rb->count < 3) + rb->count = 3; + /* Arbitrary limit */ + else if (rb->count > 5) + rb->count = 5; + + stk_allocate_buffers(dev, rb->count); + rb->count = dev->n_sbufs; + return 0; +} + +static int stk_vidioc_querybuf(struct file *filp, + void *priv, struct v4l2_buffer *buf) +{ + struct stk_camera *dev = video_drvdata(filp); + struct stk_sio_buffer *sbuf; + + if (buf->index >= dev->n_sbufs) + return -EINVAL; + sbuf = dev->sio_bufs + buf->index; + *buf = sbuf->v4lbuf; + return 0; +} + +static int stk_vidioc_qbuf(struct file *filp, + void *priv, struct v4l2_buffer *buf) +{ + struct stk_camera *dev = video_drvdata(filp); + struct stk_sio_buffer *sbuf; + unsigned long flags; + + if (buf->memory != V4L2_MEMORY_MMAP) + return -EINVAL; + + if (buf->index >= dev->n_sbufs) + return -EINVAL; + sbuf = dev->sio_bufs + buf->index; + if (sbuf->v4lbuf.flags & V4L2_BUF_FLAG_QUEUED) + return 0; + sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_QUEUED; + sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_DONE; + spin_lock_irqsave(&dev->spinlock, flags); + list_add_tail(&sbuf->list, &dev->sio_avail); + *buf = sbuf->v4lbuf; + spin_unlock_irqrestore(&dev->spinlock, flags); + return 0; +} + +static int stk_vidioc_dqbuf(struct file *filp, + void *priv, struct v4l2_buffer *buf) +{ + struct stk_camera *dev = video_drvdata(filp); + struct stk_sio_buffer *sbuf; + unsigned long flags; + int ret; + + if (!is_streaming(dev)) + return -EINVAL; + + if (filp->f_flags & O_NONBLOCK && list_empty(&dev->sio_full)) + return -EWOULDBLOCK; + ret = wait_event_interruptible(dev->wait_frame, + !list_empty(&dev->sio_full) || !is_present(dev)); + if (ret) + return ret; + if (!is_present(dev)) + return -EIO; + + spin_lock_irqsave(&dev->spinlock, flags); + sbuf = list_first_entry(&dev->sio_full, struct stk_sio_buffer, list); + list_del_init(&sbuf->list); + spin_unlock_irqrestore(&dev->spinlock, flags); + sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_QUEUED; + sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_DONE; + sbuf->v4lbuf.sequence = ++dev->sequence; + v4l2_buffer_set_timestamp(&sbuf->v4lbuf, ktime_get_ns()); + + *buf = sbuf->v4lbuf; + return 0; +} + +static int stk_vidioc_streamon(struct file *filp, + void *priv, enum v4l2_buf_type type) +{ + struct stk_camera *dev = video_drvdata(filp); + if (is_streaming(dev)) + return 0; + if (dev->sio_bufs == NULL) + return -EINVAL; + dev->sequence = 0; + return stk_start_stream(dev); +} + +static int stk_vidioc_streamoff(struct file *filp, + void *priv, enum v4l2_buf_type type) +{ + struct stk_camera *dev = video_drvdata(filp); + unsigned long flags; + int i; + stk_stop_stream(dev); + spin_lock_irqsave(&dev->spinlock, flags); + INIT_LIST_HEAD(&dev->sio_avail); + INIT_LIST_HEAD(&dev->sio_full); + for (i = 0; i < dev->n_sbufs; i++) { + INIT_LIST_HEAD(&dev->sio_bufs[i].list); + dev->sio_bufs[i].v4lbuf.flags = 0; + } + spin_unlock_irqrestore(&dev->spinlock, flags); + return 0; +} + + +static int stk_vidioc_g_parm(struct file *filp, + void *priv, struct v4l2_streamparm *sp) +{ + /*FIXME This is not correct */ + sp->parm.capture.timeperframe.numerator = 1; + sp->parm.capture.timeperframe.denominator = 30; + sp->parm.capture.readbuffers = 2; + return 0; +} + +static int stk_vidioc_enum_framesizes(struct file *filp, + void *priv, struct v4l2_frmsizeenum *frms) +{ + if (frms->index >= ARRAY_SIZE(stk_sizes)) + return -EINVAL; + switch (frms->pixel_format) { + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_RGB565X: + case V4L2_PIX_FMT_UYVY: + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_SBGGR8: + frms->type = V4L2_FRMSIZE_TYPE_DISCRETE; + frms->discrete.width = stk_sizes[frms->index].w; + frms->discrete.height = stk_sizes[frms->index].h; + return 0; + default: return -EINVAL; + } +} + +static const struct v4l2_ctrl_ops stk_ctrl_ops = { + .s_ctrl = stk_s_ctrl, +}; + +static const struct v4l2_file_operations v4l_stk_fops = { + .owner = THIS_MODULE, + .open = v4l_stk_open, + .release = v4l_stk_release, + .read = v4l_stk_read, + .poll = v4l_stk_poll, + .mmap = v4l_stk_mmap, + .unlocked_ioctl = video_ioctl2, +}; + +static const struct v4l2_ioctl_ops v4l_stk_ioctl_ops = { + .vidioc_querycap = stk_vidioc_querycap, + .vidioc_enum_fmt_vid_cap = stk_vidioc_enum_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = stk_vidioc_try_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = stk_vidioc_s_fmt_vid_cap, + .vidioc_g_fmt_vid_cap = stk_vidioc_g_fmt_vid_cap, + .vidioc_enum_input = stk_vidioc_enum_input, + .vidioc_s_input = stk_vidioc_s_input, + .vidioc_g_input = stk_vidioc_g_input, + .vidioc_reqbufs = stk_vidioc_reqbufs, + .vidioc_querybuf = stk_vidioc_querybuf, + .vidioc_qbuf = stk_vidioc_qbuf, + .vidioc_dqbuf = stk_vidioc_dqbuf, + .vidioc_streamon = stk_vidioc_streamon, + .vidioc_streamoff = stk_vidioc_streamoff, + .vidioc_g_parm = stk_vidioc_g_parm, + .vidioc_enum_framesizes = stk_vidioc_enum_framesizes, + .vidioc_log_status = v4l2_ctrl_log_status, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +static void stk_v4l_dev_release(struct video_device *vd) +{ + struct stk_camera *dev = vdev_to_camera(vd); + + if (dev->sio_bufs != NULL || dev->isobufs != NULL) + pr_err("We are leaking memory\n"); + usb_put_intf(dev->interface); + usb_put_dev(dev->udev); + + v4l2_ctrl_handler_free(&dev->hdl); + v4l2_device_unregister(&dev->v4l2_dev); + kfree(dev); +} + +static const struct video_device stk_v4l_data = { + .name = "stkwebcam", + .fops = &v4l_stk_fops, + .ioctl_ops = &v4l_stk_ioctl_ops, + .release = stk_v4l_dev_release, +}; + + +static int stk_register_video_device(struct stk_camera *dev) +{ + int err; + + dev->vdev = stk_v4l_data; + dev->vdev.lock = &dev->lock; + dev->vdev.v4l2_dev = &dev->v4l2_dev; + dev->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | + V4L2_CAP_STREAMING; + video_set_drvdata(&dev->vdev, dev); + err = video_register_device(&dev->vdev, VFL_TYPE_VIDEO, -1); + if (err) + pr_err("v4l registration failed\n"); + else + pr_info("Syntek USB2.0 Camera is now controlling device %s\n", + video_device_node_name(&dev->vdev)); + return err; +} + + +/* USB Stuff */ + +static int stk_camera_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ + struct v4l2_ctrl_handler *hdl; + int err = 0; + int i; + + struct stk_camera *dev = NULL; + struct usb_device *udev = interface_to_usbdev(interface); + struct usb_host_interface *iface_desc; + struct usb_endpoint_descriptor *endpoint; + + dev = kzalloc(sizeof(struct stk_camera), GFP_KERNEL); + if (dev == NULL) { + pr_err("Out of memory !\n"); + return -ENOMEM; + } + err = v4l2_device_register(&interface->dev, &dev->v4l2_dev); + if (err < 0) { + dev_err(&udev->dev, "couldn't register v4l2_device\n"); + kfree(dev); + return err; + } + hdl = &dev->hdl; + v4l2_ctrl_handler_init(hdl, 3); + v4l2_ctrl_new_std(hdl, &stk_ctrl_ops, + V4L2_CID_BRIGHTNESS, 0, 0xff, 0x1, 0x60); + v4l2_ctrl_new_std(hdl, &stk_ctrl_ops, + V4L2_CID_HFLIP, 0, 1, 1, 1); + v4l2_ctrl_new_std(hdl, &stk_ctrl_ops, + V4L2_CID_VFLIP, 0, 1, 1, 1); + if (hdl->error) { + err = hdl->error; + dev_err(&udev->dev, "couldn't register control\n"); + goto error; + } + dev->v4l2_dev.ctrl_handler = hdl; + + spin_lock_init(&dev->spinlock); + mutex_init(&dev->lock); + init_waitqueue_head(&dev->wait_frame); + dev->first_init = 1; /* webcam LED management */ + + dev->udev = usb_get_dev(udev); + dev->interface = interface; + usb_get_intf(interface); + + if (hflip != -1) + dev->vsettings.hflip = hflip; + else if (dmi_check_system(stk_upside_down_dmi_table)) + dev->vsettings.hflip = 1; + else + dev->vsettings.hflip = 0; + if (vflip != -1) + dev->vsettings.vflip = vflip; + else if (dmi_check_system(stk_upside_down_dmi_table)) + dev->vsettings.vflip = 1; + else + dev->vsettings.vflip = 0; + dev->n_sbufs = 0; + set_present(dev); + + /* Set up the endpoint information + * use only the first isoc-in endpoint + * for the current alternate setting */ + iface_desc = interface->cur_altsetting; + + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; + + if (!dev->isoc_ep + && usb_endpoint_is_isoc_in(endpoint)) { + /* we found an isoc in endpoint */ + dev->isoc_ep = usb_endpoint_num(endpoint); + break; + } + } + if (!dev->isoc_ep) { + pr_err("Could not find isoc-in endpoint\n"); + err = -ENODEV; + goto error_put; + } + dev->vsettings.palette = V4L2_PIX_FMT_RGB565; + dev->vsettings.mode = MODE_VGA; + dev->frame_size = 640 * 480 * 2; + + INIT_LIST_HEAD(&dev->sio_avail); + INIT_LIST_HEAD(&dev->sio_full); + + usb_set_intfdata(interface, dev); + + err = stk_register_video_device(dev); + if (err) + goto error_put; + + return 0; + +error_put: + usb_put_intf(interface); + usb_put_dev(dev->udev); +error: + v4l2_ctrl_handler_free(hdl); + v4l2_device_unregister(&dev->v4l2_dev); + kfree(dev); + return err; +} + +static void stk_camera_disconnect(struct usb_interface *interface) +{ + struct stk_camera *dev = usb_get_intfdata(interface); + + usb_set_intfdata(interface, NULL); + unset_present(dev); + + wake_up_interruptible(&dev->wait_frame); + + pr_info("Syntek USB2.0 Camera release resources device %s\n", + video_device_node_name(&dev->vdev)); + + video_unregister_device(&dev->vdev); +} + +#ifdef CONFIG_PM +static int stk_camera_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct stk_camera *dev = usb_get_intfdata(intf); + if (is_streaming(dev)) { + stk_stop_stream(dev); + /* yes, this is ugly */ + set_streaming(dev); + } + return 0; +} + +static int stk_camera_resume(struct usb_interface *intf) +{ + struct stk_camera *dev = usb_get_intfdata(intf); + if (!is_initialised(dev)) + return 0; + unset_initialised(dev); + stk_initialise(dev); + stk_camera_write_reg(dev, 0x0, 0x49); + stk_setup_format(dev); + if (is_streaming(dev)) + stk_start_stream(dev); + return 0; +} +#endif + +static struct usb_driver stk_camera_driver = { + .name = "stkwebcam", + .probe = stk_camera_probe, + .disconnect = stk_camera_disconnect, + .id_table = stkwebcam_table, +#ifdef CONFIG_PM + .suspend = stk_camera_suspend, + .resume = stk_camera_resume, +#endif +}; + +module_usb_driver(stk_camera_driver); diff --git a/drivers/staging/media/stkwebcam/stk-webcam.h b/drivers/staging/media/stkwebcam/stk-webcam.h new file mode 100644 index 0000000000..136decffe9 --- /dev/null +++ b/drivers/staging/media/stkwebcam/stk-webcam.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * stk-webcam.h : Driver for Syntek 1125 USB webcam controller + * + * Copyright (C) 2006 Nicolas VIVIEN + * Copyright 2007-2008 Jaime Velasco Juan + */ + +#ifndef STKWEBCAM_H +#define STKWEBCAM_H + +#include +#include +#include +#include + +#define DRIVER_VERSION "v0.0.1" +#define DRIVER_VERSION_NUM 0x000001 + +#define MAX_ISO_BUFS 3 +#define ISO_FRAMES_PER_DESC 16 +#define ISO_MAX_FRAME_SIZE 3 * 1024 +#define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE) + +struct stk_iso_buf { + void *data; + int length; + int read; + struct urb *urb; +}; + +/* Streaming IO buffers */ +struct stk_sio_buffer { + struct v4l2_buffer v4lbuf; + char *buffer; + int mapcount; + struct stk_camera *dev; + struct list_head list; +}; + +enum stk_mode {MODE_VGA, MODE_SXGA, MODE_CIF, MODE_QVGA, MODE_QCIF}; + +struct stk_video { + enum stk_mode mode; + __u32 palette; + int hflip; + int vflip; +}; + +enum stk_status { + S_PRESENT = 1, + S_INITIALISED = 2, + S_MEMALLOCD = 4, + S_STREAMING = 8, +}; +#define is_present(dev) ((dev)->status & S_PRESENT) +#define is_initialised(dev) ((dev)->status & S_INITIALISED) +#define is_streaming(dev) ((dev)->status & S_STREAMING) +#define is_memallocd(dev) ((dev)->status & S_MEMALLOCD) +#define set_present(dev) ((dev)->status = S_PRESENT) +#define unset_present(dev) ((dev)->status &= \ + ~(S_PRESENT|S_INITIALISED|S_STREAMING)) +#define set_initialised(dev) ((dev)->status |= S_INITIALISED) +#define unset_initialised(dev) ((dev)->status &= ~S_INITIALISED) +#define set_memallocd(dev) ((dev)->status |= S_MEMALLOCD) +#define unset_memallocd(dev) ((dev)->status &= ~S_MEMALLOCD) +#define set_streaming(dev) ((dev)->status |= S_STREAMING) +#define unset_streaming(dev) ((dev)->status &= ~S_STREAMING) + +struct regval { + unsigned reg; + unsigned val; +}; + +struct stk_camera { + struct v4l2_device v4l2_dev; + struct v4l2_ctrl_handler hdl; + struct video_device vdev; + struct usb_device *udev; + struct usb_interface *interface; + int webcam_model; + struct file *owner; + struct mutex lock; + int first_init; + + u8 isoc_ep; + + /* Not sure if this is right */ + atomic_t urbs_used; + + struct stk_video vsettings; + + enum stk_status status; + + spinlock_t spinlock; + wait_queue_head_t wait_frame; + + struct stk_iso_buf *isobufs; + + int frame_size; + /* Streaming buffers */ + int reading; + unsigned int n_sbufs; + struct stk_sio_buffer *sio_bufs; + struct list_head sio_avail; + struct list_head sio_full; + unsigned sequence; + + u8 read_reg_scratch; +}; + +#define vdev_to_camera(d) container_of(d, struct stk_camera, vdev) + +int stk_camera_write_reg(struct stk_camera *, u16, u8); +int stk_camera_read_reg(struct stk_camera *, u16, u8 *); + +int stk_sensor_init(struct stk_camera *); +int stk_sensor_configure(struct stk_camera *); +int stk_sensor_sleep(struct stk_camera *dev); +int stk_sensor_wakeup(struct stk_camera *dev); +int stk_sensor_set_brightness(struct stk_camera *dev, int br); + +#endif diff --git a/drivers/staging/vc04_services/interface/TESTING b/drivers/staging/vc04_services/interface/TESTING new file mode 100644 index 0000000000..a6d63efcbc --- /dev/null +++ b/drivers/staging/vc04_services/interface/TESTING @@ -0,0 +1,82 @@ +This document contains some hints to test the function of the VCHIQ driver +without having additional hardware to the Raspberry Pi. + +* Requirements & limitations + +Testing the VCHIQ driver requires a Raspberry Pi with one of the following SoC: + - BCM2835 ( e.g. Raspberry Pi Zero W ) + - BCM2836 ( e.g. Raspberry Pi 2 ) + - BCM2837 ( e.g. Raspberry Pi 3 B+ ) + +The BCM2711 used in the Raspberry Pi 4 is currently not supported in the +mainline kernel. + +There are no specific requirements to the VideoCore firmware to get VCHIQ +working. + +The test scenarios described in this document based on the tool vchiq_test. +Its source code is available here: https://github.com/raspberrypi/userland + +* Configuration + +Here are the most common kernel configurations: + + 1. BCM2835 target SoC (ARM 32 bit) + + Just use bcm2835_defconfig which already has VCHIQ enabled. + + 2. BCM2836/7 target SoC (ARM 32 bit) + + Use the multi_v7_defconfig as a base and then enable all VCHIQ options. + + 3. BCM2837 target SoC (ARM 64 bit) + + Use the defconfig as a base and then enable all VCHIQ options. + +* Scenarios + + * Initial test + + Check the driver is probed and /dev/vchiq is created + + * Functional test + + Command: vchiq_test -f 10 + + Expected output: + Functional test - iters:10 + ======== iteration 1 ======== + Testing bulk transfer for alignment. + Testing bulk transfer at PAGE_SIZE. + ... + + * Ping test + + Command: vchiq_test -p 1 + + Expected output: + Ping test - service:echo, iters:1, version 3 + vchi ping (size 0) -> 57.000000us + vchi ping (size 0, 0 async, 0 oneway) -> 122.000000us + vchi bulk (size 0, 0 async, 0 oneway) -> 546.000000us + vchi bulk (size 0, 0 oneway) -> 230.000000us + vchi ping (size 0) -> 49.000000us + vchi ping (size 0, 0 async, 0 oneway) -> 70.000000us + vchi bulk (size 0, 0 async, 0 oneway) -> 296.000000us + vchi bulk (size 0, 0 oneway) -> 266.000000us + vchi ping (size 0, 1 async, 0 oneway) -> 65.000000us + vchi bulk (size 0, 0 oneway) -> 456.000000us + vchi ping (size 0, 2 async, 0 oneway) -> 74.000000us + vchi bulk (size 0, 0 oneway) -> 640.000000us + vchi ping (size 0, 10 async, 0 oneway) -> 125.000000us + vchi bulk (size 0, 0 oneway) -> 2309.000000us + vchi ping (size 0, 0 async, 1 oneway) -> 70.000000us + vchi ping (size 0, 0 async, 2 oneway) -> 76.000000us + vchi ping (size 0, 0 async, 10 oneway) -> 105.000000us + vchi ping (size 0, 10 async, 10 oneway) -> 165.000000us + vchi ping (size 0, 100 async, 0 oneway) -> nanus + vchi bulk (size 0, 0 oneway) -> nanus + vchi ping (size 0, 0 async, 100 oneway) -> nanus + vchi ping (size 0, 100 async, 100 oneway) -> infus + vchi ping (size 0, 200 async, 0 oneway) -> infus + ... diff --git a/drivers/staging/vme_user/Kconfig b/drivers/staging/vme_user/Kconfig new file mode 100644 index 0000000000..c8eabf8f40 --- /dev/null +++ b/drivers/staging/vme_user/Kconfig @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: GPL-2.0 +menuconfig VME_BUS + bool "VME bridge support" + depends on STAGING && PCI + help + If you say Y here you get support for the VME bridge Framework. + +if VME_BUS + +comment "VME Bridge Drivers" + +config VME_TSI148 + tristate "Tempe" + depends on HAS_DMA + help + If you say Y here you get support for the Tundra TSI148 VME bridge + chip. + +config VME_FAKE + tristate "Fake" + help + If you say Y here you get support for the fake VME bridge. This + provides a virtualised VME Bus for devices with no VME bridge. This + is mainly useful for VME development (in the absence of VME + hardware). + +comment "VME Device Drivers" + +config VME_USER + tristate "VME user space access driver" + depends on STAGING && VME_BUS + help + If you say Y here you want to be able to access a limited number of + VME windows in a manner at least semi-compatible with the interface + provided with the original driver at . + + To compile this driver as a module, choose M here. The module will + be called vme_user. If unsure, say N. + +endif diff --git a/drivers/staging/vme_user/Makefile b/drivers/staging/vme_user/Makefile new file mode 100644 index 0000000000..8dcc6938ce --- /dev/null +++ b/drivers/staging/vme_user/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the VME device drivers. +# + +obj-$(CONFIG_VME_BUS) += vme.o +obj-$(CONFIG_VME_USER) += vme_user.o +obj-$(CONFIG_VME_TSI148) += vme_tsi148.o +obj-$(CONFIG_VME_FAKE) += vme_fake.o diff --git a/drivers/staging/vme_user/vme.c b/drivers/staging/vme_user/vme.c new file mode 100644 index 0000000000..b5555683a0 --- /dev/null +++ b/drivers/staging/vme_user/vme.c @@ -0,0 +1,2015 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * VME Bridge Framework + * + * Author: Martyn Welch + * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. + * + * Based on work by Tom Armistead and Ajit Prem + * Copyright 2004 Motorola Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vme.h" +#include "vme_bridge.h" + +/* Bitmask and list of registered buses both protected by common mutex */ +static unsigned int vme_bus_numbers; +static LIST_HEAD(vme_bus_list); +static DEFINE_MUTEX(vme_buses_lock); + +static int __init vme_init(void); + +static struct vme_dev *dev_to_vme_dev(struct device *dev) +{ + return container_of(dev, struct vme_dev, dev); +} + +/* + * Find the bridge that the resource is associated with. + */ +static struct vme_bridge *find_bridge(struct vme_resource *resource) +{ + /* Get list to search */ + switch (resource->type) { + case VME_MASTER: + return list_entry(resource->entry, struct vme_master_resource, + list)->parent; + case VME_SLAVE: + return list_entry(resource->entry, struct vme_slave_resource, + list)->parent; + case VME_DMA: + return list_entry(resource->entry, struct vme_dma_resource, + list)->parent; + case VME_LM: + return list_entry(resource->entry, struct vme_lm_resource, + list)->parent; + default: + printk(KERN_ERR "Unknown resource type\n"); + return NULL; + } +} + +/** + * vme_alloc_consistent - Allocate contiguous memory. + * @resource: Pointer to VME resource. + * @size: Size of allocation required. + * @dma: Pointer to variable to store physical address of allocation. + * + * Allocate a contiguous block of memory for use by the driver. This is used to + * create the buffers for the slave windows. + * + * Return: Virtual address of allocation on success, NULL on failure. + */ +void *vme_alloc_consistent(struct vme_resource *resource, size_t size, + dma_addr_t *dma) +{ + struct vme_bridge *bridge; + + if (!resource) { + printk(KERN_ERR "No resource\n"); + return NULL; + } + + bridge = find_bridge(resource); + if (!bridge) { + printk(KERN_ERR "Can't find bridge\n"); + return NULL; + } + + if (!bridge->parent) { + printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name); + return NULL; + } + + if (!bridge->alloc_consistent) { + printk(KERN_ERR "alloc_consistent not supported by bridge %s\n", + bridge->name); + return NULL; + } + + return bridge->alloc_consistent(bridge->parent, size, dma); +} +EXPORT_SYMBOL(vme_alloc_consistent); + +/** + * vme_free_consistent - Free previously allocated memory. + * @resource: Pointer to VME resource. + * @size: Size of allocation to free. + * @vaddr: Virtual address of allocation. + * @dma: Physical address of allocation. + * + * Free previously allocated block of contiguous memory. + */ +void vme_free_consistent(struct vme_resource *resource, size_t size, + void *vaddr, dma_addr_t dma) +{ + struct vme_bridge *bridge; + + if (!resource) { + printk(KERN_ERR "No resource\n"); + return; + } + + bridge = find_bridge(resource); + if (!bridge) { + printk(KERN_ERR "Can't find bridge\n"); + return; + } + + if (!bridge->parent) { + printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name); + return; + } + + if (!bridge->free_consistent) { + printk(KERN_ERR "free_consistent not supported by bridge %s\n", + bridge->name); + return; + } + + bridge->free_consistent(bridge->parent, size, vaddr, dma); +} +EXPORT_SYMBOL(vme_free_consistent); + +/** + * vme_get_size - Helper function returning size of a VME window + * @resource: Pointer to VME slave or master resource. + * + * Determine the size of the VME window provided. This is a helper + * function, wrappering the call to vme_master_get or vme_slave_get + * depending on the type of window resource handed to it. + * + * Return: Size of the window on success, zero on failure. + */ +size_t vme_get_size(struct vme_resource *resource) +{ + int enabled, retval; + unsigned long long base, size; + dma_addr_t buf_base; + u32 aspace, cycle, dwidth; + + switch (resource->type) { + case VME_MASTER: + retval = vme_master_get(resource, &enabled, &base, &size, + &aspace, &cycle, &dwidth); + if (retval) + return 0; + + return size; + case VME_SLAVE: + retval = vme_slave_get(resource, &enabled, &base, &size, + &buf_base, &aspace, &cycle); + if (retval) + return 0; + + return size; + case VME_DMA: + return 0; + default: + printk(KERN_ERR "Unknown resource type\n"); + return 0; + } +} +EXPORT_SYMBOL(vme_get_size); + +int vme_check_window(u32 aspace, unsigned long long vme_base, + unsigned long long size) +{ + int retval = 0; + + if (vme_base + size < size) + return -EINVAL; + + switch (aspace) { + case VME_A16: + if (vme_base + size > VME_A16_MAX) + retval = -EFAULT; + break; + case VME_A24: + if (vme_base + size > VME_A24_MAX) + retval = -EFAULT; + break; + case VME_A32: + if (vme_base + size > VME_A32_MAX) + retval = -EFAULT; + break; + case VME_A64: + /* The VME_A64_MAX limit is actually U64_MAX + 1 */ + break; + case VME_CRCSR: + if (vme_base + size > VME_CRCSR_MAX) + retval = -EFAULT; + break; + case VME_USER1: + case VME_USER2: + case VME_USER3: + case VME_USER4: + /* User Defined */ + break; + default: + printk(KERN_ERR "Invalid address space\n"); + retval = -EINVAL; + break; + } + + return retval; +} +EXPORT_SYMBOL(vme_check_window); + +static u32 vme_get_aspace(int am) +{ + switch (am) { + case 0x29: + case 0x2D: + return VME_A16; + case 0x38: + case 0x39: + case 0x3A: + case 0x3B: + case 0x3C: + case 0x3D: + case 0x3E: + case 0x3F: + return VME_A24; + case 0x8: + case 0x9: + case 0xA: + case 0xB: + case 0xC: + case 0xD: + case 0xE: + case 0xF: + return VME_A32; + case 0x0: + case 0x1: + case 0x3: + return VME_A64; + } + + return 0; +} + +/** + * vme_slave_request - Request a VME slave window resource. + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * @address: Required VME address space. + * @cycle: Required VME data transfer cycle type. + * + * Request use of a VME window resource capable of being set for the requested + * address space and data transfer cycle. + * + * Return: Pointer to VME resource on success, NULL on failure. + */ +struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address, + u32 cycle) +{ + struct vme_bridge *bridge; + struct list_head *slave_pos = NULL; + struct vme_slave_resource *allocated_image = NULL; + struct vme_slave_resource *slave_image = NULL; + struct vme_resource *resource = NULL; + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + goto err_bus; + } + + /* Loop through slave resources */ + list_for_each(slave_pos, &bridge->slave_resources) { + slave_image = list_entry(slave_pos, + struct vme_slave_resource, list); + + if (!slave_image) { + printk(KERN_ERR "Registered NULL Slave resource\n"); + continue; + } + + /* Find an unlocked and compatible image */ + mutex_lock(&slave_image->mtx); + if (((slave_image->address_attr & address) == address) && + ((slave_image->cycle_attr & cycle) == cycle) && + (slave_image->locked == 0)) { + + slave_image->locked = 1; + mutex_unlock(&slave_image->mtx); + allocated_image = slave_image; + break; + } + mutex_unlock(&slave_image->mtx); + } + + /* No free image */ + if (!allocated_image) + goto err_image; + + resource = kmalloc(sizeof(*resource), GFP_KERNEL); + if (!resource) + goto err_alloc; + + resource->type = VME_SLAVE; + resource->entry = &allocated_image->list; + + return resource; + +err_alloc: + /* Unlock image */ + mutex_lock(&slave_image->mtx); + slave_image->locked = 0; + mutex_unlock(&slave_image->mtx); +err_image: +err_bus: + return NULL; +} +EXPORT_SYMBOL(vme_slave_request); + +/** + * vme_slave_set - Set VME slave window configuration. + * @resource: Pointer to VME slave resource. + * @enabled: State to which the window should be configured. + * @vme_base: Base address for the window. + * @size: Size of the VME window. + * @buf_base: Based address of buffer used to provide VME slave window storage. + * @aspace: VME address space for the VME window. + * @cycle: VME data transfer cycle type for the VME window. + * + * Set configuration for provided VME slave window. + * + * Return: Zero on success, -EINVAL if operation is not supported on this + * device, if an invalid resource has been provided or invalid + * attributes are provided. Hardware specific errors may also be + * returned. + */ +int vme_slave_set(struct vme_resource *resource, int enabled, + unsigned long long vme_base, unsigned long long size, + dma_addr_t buf_base, u32 aspace, u32 cycle) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_slave_resource *image; + int retval; + + if (resource->type != VME_SLAVE) { + printk(KERN_ERR "Not a slave resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_slave_resource, list); + + if (!bridge->slave_set) { + printk(KERN_ERR "Function not supported\n"); + return -ENOSYS; + } + + if (!(((image->address_attr & aspace) == aspace) && + ((image->cycle_attr & cycle) == cycle))) { + printk(KERN_ERR "Invalid attributes\n"); + return -EINVAL; + } + + retval = vme_check_window(aspace, vme_base, size); + if (retval) + return retval; + + return bridge->slave_set(image, enabled, vme_base, size, buf_base, + aspace, cycle); +} +EXPORT_SYMBOL(vme_slave_set); + +/** + * vme_slave_get - Retrieve VME slave window configuration. + * @resource: Pointer to VME slave resource. + * @enabled: Pointer to variable for storing state. + * @vme_base: Pointer to variable for storing window base address. + * @size: Pointer to variable for storing window size. + * @buf_base: Pointer to variable for storing slave buffer base address. + * @aspace: Pointer to variable for storing VME address space. + * @cycle: Pointer to variable for storing VME data transfer cycle type. + * + * Return configuration for provided VME slave window. + * + * Return: Zero on success, -EINVAL if operation is not supported on this + * device or if an invalid resource has been provided. + */ +int vme_slave_get(struct vme_resource *resource, int *enabled, + unsigned long long *vme_base, unsigned long long *size, + dma_addr_t *buf_base, u32 *aspace, u32 *cycle) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_slave_resource *image; + + if (resource->type != VME_SLAVE) { + printk(KERN_ERR "Not a slave resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_slave_resource, list); + + if (!bridge->slave_get) { + printk(KERN_ERR "vme_slave_get not supported\n"); + return -EINVAL; + } + + return bridge->slave_get(image, enabled, vme_base, size, buf_base, + aspace, cycle); +} +EXPORT_SYMBOL(vme_slave_get); + +/** + * vme_slave_free - Free VME slave window + * @resource: Pointer to VME slave resource. + * + * Free the provided slave resource so that it may be reallocated. + */ +void vme_slave_free(struct vme_resource *resource) +{ + struct vme_slave_resource *slave_image; + + if (resource->type != VME_SLAVE) { + printk(KERN_ERR "Not a slave resource\n"); + return; + } + + slave_image = list_entry(resource->entry, struct vme_slave_resource, + list); + if (!slave_image) { + printk(KERN_ERR "Can't find slave resource\n"); + return; + } + + /* Unlock image */ + mutex_lock(&slave_image->mtx); + if (slave_image->locked == 0) + printk(KERN_ERR "Image is already free\n"); + + slave_image->locked = 0; + mutex_unlock(&slave_image->mtx); + + /* Free up resource memory */ + kfree(resource); +} +EXPORT_SYMBOL(vme_slave_free); + +/** + * vme_master_request - Request a VME master window resource. + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * @address: Required VME address space. + * @cycle: Required VME data transfer cycle type. + * @dwidth: Required VME data transfer width. + * + * Request use of a VME window resource capable of being set for the requested + * address space, data transfer cycle and width. + * + * Return: Pointer to VME resource on success, NULL on failure. + */ +struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address, + u32 cycle, u32 dwidth) +{ + struct vme_bridge *bridge; + struct list_head *master_pos = NULL; + struct vme_master_resource *allocated_image = NULL; + struct vme_master_resource *master_image = NULL; + struct vme_resource *resource = NULL; + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + goto err_bus; + } + + /* Loop through master resources */ + list_for_each(master_pos, &bridge->master_resources) { + master_image = list_entry(master_pos, + struct vme_master_resource, list); + + if (!master_image) { + printk(KERN_WARNING "Registered NULL master resource\n"); + continue; + } + + /* Find an unlocked and compatible image */ + spin_lock(&master_image->lock); + if (((master_image->address_attr & address) == address) && + ((master_image->cycle_attr & cycle) == cycle) && + ((master_image->width_attr & dwidth) == dwidth) && + (master_image->locked == 0)) { + + master_image->locked = 1; + spin_unlock(&master_image->lock); + allocated_image = master_image; + break; + } + spin_unlock(&master_image->lock); + } + + /* Check to see if we found a resource */ + if (!allocated_image) { + printk(KERN_ERR "Can't find a suitable resource\n"); + goto err_image; + } + + resource = kmalloc(sizeof(*resource), GFP_KERNEL); + if (!resource) + goto err_alloc; + + resource->type = VME_MASTER; + resource->entry = &allocated_image->list; + + return resource; + +err_alloc: + /* Unlock image */ + spin_lock(&master_image->lock); + master_image->locked = 0; + spin_unlock(&master_image->lock); +err_image: +err_bus: + return NULL; +} +EXPORT_SYMBOL(vme_master_request); + +/** + * vme_master_set - Set VME master window configuration. + * @resource: Pointer to VME master resource. + * @enabled: State to which the window should be configured. + * @vme_base: Base address for the window. + * @size: Size of the VME window. + * @aspace: VME address space for the VME window. + * @cycle: VME data transfer cycle type for the VME window. + * @dwidth: VME data transfer width for the VME window. + * + * Set configuration for provided VME master window. + * + * Return: Zero on success, -EINVAL if operation is not supported on this + * device, if an invalid resource has been provided or invalid + * attributes are provided. Hardware specific errors may also be + * returned. + */ +int vme_master_set(struct vme_resource *resource, int enabled, + unsigned long long vme_base, unsigned long long size, u32 aspace, + u32 cycle, u32 dwidth) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_master_resource *image; + int retval; + + if (resource->type != VME_MASTER) { + printk(KERN_ERR "Not a master resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_master_resource, list); + + if (!bridge->master_set) { + printk(KERN_WARNING "vme_master_set not supported\n"); + return -EINVAL; + } + + if (!(((image->address_attr & aspace) == aspace) && + ((image->cycle_attr & cycle) == cycle) && + ((image->width_attr & dwidth) == dwidth))) { + printk(KERN_WARNING "Invalid attributes\n"); + return -EINVAL; + } + + retval = vme_check_window(aspace, vme_base, size); + if (retval) + return retval; + + return bridge->master_set(image, enabled, vme_base, size, aspace, + cycle, dwidth); +} +EXPORT_SYMBOL(vme_master_set); + +/** + * vme_master_get - Retrieve VME master window configuration. + * @resource: Pointer to VME master resource. + * @enabled: Pointer to variable for storing state. + * @vme_base: Pointer to variable for storing window base address. + * @size: Pointer to variable for storing window size. + * @aspace: Pointer to variable for storing VME address space. + * @cycle: Pointer to variable for storing VME data transfer cycle type. + * @dwidth: Pointer to variable for storing VME data transfer width. + * + * Return configuration for provided VME master window. + * + * Return: Zero on success, -EINVAL if operation is not supported on this + * device or if an invalid resource has been provided. + */ +int vme_master_get(struct vme_resource *resource, int *enabled, + unsigned long long *vme_base, unsigned long long *size, u32 *aspace, + u32 *cycle, u32 *dwidth) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_master_resource *image; + + if (resource->type != VME_MASTER) { + printk(KERN_ERR "Not a master resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_master_resource, list); + + if (!bridge->master_get) { + printk(KERN_WARNING "%s not supported\n", __func__); + return -EINVAL; + } + + return bridge->master_get(image, enabled, vme_base, size, aspace, + cycle, dwidth); +} +EXPORT_SYMBOL(vme_master_get); + +/** + * vme_master_read - Read data from VME space into a buffer. + * @resource: Pointer to VME master resource. + * @buf: Pointer to buffer where data should be transferred. + * @count: Number of bytes to transfer. + * @offset: Offset into VME master window at which to start transfer. + * + * Perform read of count bytes of data from location on VME bus which maps into + * the VME master window at offset to buf. + * + * Return: Number of bytes read, -EINVAL if resource is not a VME master + * resource or read operation is not supported. -EFAULT returned if + * invalid offset is provided. Hardware specific errors may also be + * returned. + */ +ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count, + loff_t offset) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_master_resource *image; + size_t length; + + if (!bridge->master_read) { + printk(KERN_WARNING "Reading from resource not supported\n"); + return -EINVAL; + } + + if (resource->type != VME_MASTER) { + printk(KERN_ERR "Not a master resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_master_resource, list); + + length = vme_get_size(resource); + + if (offset > length) { + printk(KERN_WARNING "Invalid Offset\n"); + return -EFAULT; + } + + if ((offset + count) > length) + count = length - offset; + + return bridge->master_read(image, buf, count, offset); + +} +EXPORT_SYMBOL(vme_master_read); + +/** + * vme_master_write - Write data out to VME space from a buffer. + * @resource: Pointer to VME master resource. + * @buf: Pointer to buffer holding data to transfer. + * @count: Number of bytes to transfer. + * @offset: Offset into VME master window at which to start transfer. + * + * Perform write of count bytes of data from buf to location on VME bus which + * maps into the VME master window at offset. + * + * Return: Number of bytes written, -EINVAL if resource is not a VME master + * resource or write operation is not supported. -EFAULT returned if + * invalid offset is provided. Hardware specific errors may also be + * returned. + */ +ssize_t vme_master_write(struct vme_resource *resource, void *buf, + size_t count, loff_t offset) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_master_resource *image; + size_t length; + + if (!bridge->master_write) { + printk(KERN_WARNING "Writing to resource not supported\n"); + return -EINVAL; + } + + if (resource->type != VME_MASTER) { + printk(KERN_ERR "Not a master resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_master_resource, list); + + length = vme_get_size(resource); + + if (offset > length) { + printk(KERN_WARNING "Invalid Offset\n"); + return -EFAULT; + } + + if ((offset + count) > length) + count = length - offset; + + return bridge->master_write(image, buf, count, offset); +} +EXPORT_SYMBOL(vme_master_write); + +/** + * vme_master_rmw - Perform read-modify-write cycle. + * @resource: Pointer to VME master resource. + * @mask: Bits to be compared and swapped in operation. + * @compare: Bits to be compared with data read from offset. + * @swap: Bits to be swapped in data read from offset. + * @offset: Offset into VME master window at which to perform operation. + * + * Perform read-modify-write cycle on provided location: + * - Location on VME bus is read. + * - Bits selected by mask are compared with compare. + * - Where a selected bit matches that in compare and are selected in swap, + * the bit is swapped. + * - Result written back to location on VME bus. + * + * Return: Bytes written on success, -EINVAL if resource is not a VME master + * resource or RMW operation is not supported. Hardware specific + * errors may also be returned. + */ +unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask, + unsigned int compare, unsigned int swap, loff_t offset) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_master_resource *image; + + if (!bridge->master_rmw) { + printk(KERN_WARNING "Writing to resource not supported\n"); + return -EINVAL; + } + + if (resource->type != VME_MASTER) { + printk(KERN_ERR "Not a master resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_master_resource, list); + + return bridge->master_rmw(image, mask, compare, swap, offset); +} +EXPORT_SYMBOL(vme_master_rmw); + +/** + * vme_master_mmap - Mmap region of VME master window. + * @resource: Pointer to VME master resource. + * @vma: Pointer to definition of user mapping. + * + * Memory map a region of the VME master window into user space. + * + * Return: Zero on success, -EINVAL if resource is not a VME master + * resource or -EFAULT if map exceeds window size. Other generic mmap + * errors may also be returned. + */ +int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma) +{ + struct vme_master_resource *image; + phys_addr_t phys_addr; + unsigned long vma_size; + + if (resource->type != VME_MASTER) { + pr_err("Not a master resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_master_resource, list); + phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT); + vma_size = vma->vm_end - vma->vm_start; + + if (phys_addr + vma_size > image->bus_resource.end + 1) { + pr_err("Map size cannot exceed the window size\n"); + return -EFAULT; + } + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start); +} +EXPORT_SYMBOL(vme_master_mmap); + +/** + * vme_master_free - Free VME master window + * @resource: Pointer to VME master resource. + * + * Free the provided master resource so that it may be reallocated. + */ +void vme_master_free(struct vme_resource *resource) +{ + struct vme_master_resource *master_image; + + if (resource->type != VME_MASTER) { + printk(KERN_ERR "Not a master resource\n"); + return; + } + + master_image = list_entry(resource->entry, struct vme_master_resource, + list); + if (!master_image) { + printk(KERN_ERR "Can't find master resource\n"); + return; + } + + /* Unlock image */ + spin_lock(&master_image->lock); + if (master_image->locked == 0) + printk(KERN_ERR "Image is already free\n"); + + master_image->locked = 0; + spin_unlock(&master_image->lock); + + /* Free up resource memory */ + kfree(resource); +} +EXPORT_SYMBOL(vme_master_free); + +/** + * vme_dma_request - Request a DMA controller. + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * @route: Required src/destination combination. + * + * Request a VME DMA controller with capability to perform transfers bewteen + * requested source/destination combination. + * + * Return: Pointer to VME DMA resource on success, NULL on failure. + */ +struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route) +{ + struct vme_bridge *bridge; + struct list_head *dma_pos = NULL; + struct vme_dma_resource *allocated_ctrlr = NULL; + struct vme_dma_resource *dma_ctrlr = NULL; + struct vme_resource *resource = NULL; + + /* XXX Not checking resource attributes */ + printk(KERN_ERR "No VME resource Attribute tests done\n"); + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + goto err_bus; + } + + /* Loop through DMA resources */ + list_for_each(dma_pos, &bridge->dma_resources) { + dma_ctrlr = list_entry(dma_pos, + struct vme_dma_resource, list); + if (!dma_ctrlr) { + printk(KERN_ERR "Registered NULL DMA resource\n"); + continue; + } + + /* Find an unlocked and compatible controller */ + mutex_lock(&dma_ctrlr->mtx); + if (((dma_ctrlr->route_attr & route) == route) && + (dma_ctrlr->locked == 0)) { + + dma_ctrlr->locked = 1; + mutex_unlock(&dma_ctrlr->mtx); + allocated_ctrlr = dma_ctrlr; + break; + } + mutex_unlock(&dma_ctrlr->mtx); + } + + /* Check to see if we found a resource */ + if (!allocated_ctrlr) + goto err_ctrlr; + + resource = kmalloc(sizeof(*resource), GFP_KERNEL); + if (!resource) + goto err_alloc; + + resource->type = VME_DMA; + resource->entry = &allocated_ctrlr->list; + + return resource; + +err_alloc: + /* Unlock image */ + mutex_lock(&dma_ctrlr->mtx); + dma_ctrlr->locked = 0; + mutex_unlock(&dma_ctrlr->mtx); +err_ctrlr: +err_bus: + return NULL; +} +EXPORT_SYMBOL(vme_dma_request); + +/** + * vme_new_dma_list - Create new VME DMA list. + * @resource: Pointer to VME DMA resource. + * + * Create a new VME DMA list. It is the responsibility of the user to free + * the list once it is no longer required with vme_dma_list_free(). + * + * Return: Pointer to new VME DMA list, NULL on allocation failure or invalid + * VME DMA resource. + */ +struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource) +{ + struct vme_dma_list *dma_list; + + if (resource->type != VME_DMA) { + printk(KERN_ERR "Not a DMA resource\n"); + return NULL; + } + + dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL); + if (!dma_list) + return NULL; + + INIT_LIST_HEAD(&dma_list->entries); + dma_list->parent = list_entry(resource->entry, + struct vme_dma_resource, + list); + mutex_init(&dma_list->mtx); + + return dma_list; +} +EXPORT_SYMBOL(vme_new_dma_list); + +/** + * vme_dma_pattern_attribute - Create "Pattern" type VME DMA list attribute. + * @pattern: Value to use used as pattern + * @type: Type of pattern to be written. + * + * Create VME DMA list attribute for pattern generation. It is the + * responsibility of the user to free used attributes using + * vme_dma_free_attribute(). + * + * Return: Pointer to VME DMA attribute, NULL on failure. + */ +struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type) +{ + struct vme_dma_attr *attributes; + struct vme_dma_pattern *pattern_attr; + + attributes = kmalloc(sizeof(*attributes), GFP_KERNEL); + if (!attributes) + goto err_attr; + + pattern_attr = kmalloc(sizeof(*pattern_attr), GFP_KERNEL); + if (!pattern_attr) + goto err_pat; + + attributes->type = VME_DMA_PATTERN; + attributes->private = (void *)pattern_attr; + + pattern_attr->pattern = pattern; + pattern_attr->type = type; + + return attributes; + +err_pat: + kfree(attributes); +err_attr: + return NULL; +} +EXPORT_SYMBOL(vme_dma_pattern_attribute); + +/** + * vme_dma_pci_attribute - Create "PCI" type VME DMA list attribute. + * @address: PCI base address for DMA transfer. + * + * Create VME DMA list attribute pointing to a location on PCI for DMA + * transfers. It is the responsibility of the user to free used attributes + * using vme_dma_free_attribute(). + * + * Return: Pointer to VME DMA attribute, NULL on failure. + */ +struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address) +{ + struct vme_dma_attr *attributes; + struct vme_dma_pci *pci_attr; + + /* XXX Run some sanity checks here */ + + attributes = kmalloc(sizeof(*attributes), GFP_KERNEL); + if (!attributes) + goto err_attr; + + pci_attr = kmalloc(sizeof(*pci_attr), GFP_KERNEL); + if (!pci_attr) + goto err_pci; + + attributes->type = VME_DMA_PCI; + attributes->private = (void *)pci_attr; + + pci_attr->address = address; + + return attributes; + +err_pci: + kfree(attributes); +err_attr: + return NULL; +} +EXPORT_SYMBOL(vme_dma_pci_attribute); + +/** + * vme_dma_vme_attribute - Create "VME" type VME DMA list attribute. + * @address: VME base address for DMA transfer. + * @aspace: VME address space to use for DMA transfer. + * @cycle: VME bus cycle to use for DMA transfer. + * @dwidth: VME data width to use for DMA transfer. + * + * Create VME DMA list attribute pointing to a location on the VME bus for DMA + * transfers. It is the responsibility of the user to free used attributes + * using vme_dma_free_attribute(). + * + * Return: Pointer to VME DMA attribute, NULL on failure. + */ +struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address, + u32 aspace, u32 cycle, u32 dwidth) +{ + struct vme_dma_attr *attributes; + struct vme_dma_vme *vme_attr; + + attributes = kmalloc(sizeof(*attributes), GFP_KERNEL); + if (!attributes) + goto err_attr; + + vme_attr = kmalloc(sizeof(*vme_attr), GFP_KERNEL); + if (!vme_attr) + goto err_vme; + + attributes->type = VME_DMA_VME; + attributes->private = (void *)vme_attr; + + vme_attr->address = address; + vme_attr->aspace = aspace; + vme_attr->cycle = cycle; + vme_attr->dwidth = dwidth; + + return attributes; + +err_vme: + kfree(attributes); +err_attr: + return NULL; +} +EXPORT_SYMBOL(vme_dma_vme_attribute); + +/** + * vme_dma_free_attribute - Free DMA list attribute. + * @attributes: Pointer to DMA list attribute. + * + * Free VME DMA list attribute. VME DMA list attributes can be safely freed + * once vme_dma_list_add() has returned. + */ +void vme_dma_free_attribute(struct vme_dma_attr *attributes) +{ + kfree(attributes->private); + kfree(attributes); +} +EXPORT_SYMBOL(vme_dma_free_attribute); + +/** + * vme_dma_list_add - Add enty to a VME DMA list. + * @list: Pointer to VME list. + * @src: Pointer to DMA list attribute to use as source. + * @dest: Pointer to DMA list attribute to use as destination. + * @count: Number of bytes to transfer. + * + * Add an entry to the provided VME DMA list. Entry requires pointers to source + * and destination DMA attributes and a count. + * + * Please note, the attributes supported as source and destinations for + * transfers are hardware dependent. + * + * Return: Zero on success, -EINVAL if operation is not supported on this + * device or if the link list has already been submitted for execution. + * Hardware specific errors also possible. + */ +int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src, + struct vme_dma_attr *dest, size_t count) +{ + struct vme_bridge *bridge = list->parent->parent; + int retval; + + if (!bridge->dma_list_add) { + printk(KERN_WARNING "Link List DMA generation not supported\n"); + return -EINVAL; + } + + if (!mutex_trylock(&list->mtx)) { + printk(KERN_ERR "Link List already submitted\n"); + return -EINVAL; + } + + retval = bridge->dma_list_add(list, src, dest, count); + + mutex_unlock(&list->mtx); + + return retval; +} +EXPORT_SYMBOL(vme_dma_list_add); + +/** + * vme_dma_list_exec - Queue a VME DMA list for execution. + * @list: Pointer to VME list. + * + * Queue the provided VME DMA list for execution. The call will return once the + * list has been executed. + * + * Return: Zero on success, -EINVAL if operation is not supported on this + * device. Hardware specific errors also possible. + */ +int vme_dma_list_exec(struct vme_dma_list *list) +{ + struct vme_bridge *bridge = list->parent->parent; + int retval; + + if (!bridge->dma_list_exec) { + printk(KERN_ERR "Link List DMA execution not supported\n"); + return -EINVAL; + } + + mutex_lock(&list->mtx); + + retval = bridge->dma_list_exec(list); + + mutex_unlock(&list->mtx); + + return retval; +} +EXPORT_SYMBOL(vme_dma_list_exec); + +/** + * vme_dma_list_free - Free a VME DMA list. + * @list: Pointer to VME list. + * + * Free the provided DMA list and all its entries. + * + * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource + * is still in use. Hardware specific errors also possible. + */ +int vme_dma_list_free(struct vme_dma_list *list) +{ + struct vme_bridge *bridge = list->parent->parent; + int retval; + + if (!bridge->dma_list_empty) { + printk(KERN_WARNING "Emptying of Link Lists not supported\n"); + return -EINVAL; + } + + if (!mutex_trylock(&list->mtx)) { + printk(KERN_ERR "Link List in use\n"); + return -EBUSY; + } + + /* + * Empty out all of the entries from the DMA list. We need to go to the + * low level driver as DMA entries are driver specific. + */ + retval = bridge->dma_list_empty(list); + if (retval) { + printk(KERN_ERR "Unable to empty link-list entries\n"); + mutex_unlock(&list->mtx); + return retval; + } + mutex_unlock(&list->mtx); + kfree(list); + + return retval; +} +EXPORT_SYMBOL(vme_dma_list_free); + +/** + * vme_dma_free - Free a VME DMA resource. + * @resource: Pointer to VME DMA resource. + * + * Free the provided DMA resource so that it may be reallocated. + * + * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource + * is still active. + */ +int vme_dma_free(struct vme_resource *resource) +{ + struct vme_dma_resource *ctrlr; + + if (resource->type != VME_DMA) { + printk(KERN_ERR "Not a DMA resource\n"); + return -EINVAL; + } + + ctrlr = list_entry(resource->entry, struct vme_dma_resource, list); + + if (!mutex_trylock(&ctrlr->mtx)) { + printk(KERN_ERR "Resource busy, can't free\n"); + return -EBUSY; + } + + if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) { + printk(KERN_WARNING "Resource still processing transfers\n"); + mutex_unlock(&ctrlr->mtx); + return -EBUSY; + } + + ctrlr->locked = 0; + + mutex_unlock(&ctrlr->mtx); + + kfree(resource); + + return 0; +} +EXPORT_SYMBOL(vme_dma_free); + +void vme_bus_error_handler(struct vme_bridge *bridge, + unsigned long long address, int am) +{ + struct list_head *handler_pos = NULL; + struct vme_error_handler *handler; + int handler_triggered = 0; + u32 aspace = vme_get_aspace(am); + + list_for_each(handler_pos, &bridge->vme_error_handlers) { + handler = list_entry(handler_pos, struct vme_error_handler, + list); + if ((aspace == handler->aspace) && + (address >= handler->start) && + (address < handler->end)) { + if (!handler->num_errors) + handler->first_error = address; + if (handler->num_errors != UINT_MAX) + handler->num_errors++; + handler_triggered = 1; + } + } + + if (!handler_triggered) + dev_err(bridge->parent, + "Unhandled VME access error at address 0x%llx\n", + address); +} +EXPORT_SYMBOL(vme_bus_error_handler); + +struct vme_error_handler *vme_register_error_handler( + struct vme_bridge *bridge, u32 aspace, + unsigned long long address, size_t len) +{ + struct vme_error_handler *handler; + + handler = kmalloc(sizeof(*handler), GFP_ATOMIC); + if (!handler) + return NULL; + + handler->aspace = aspace; + handler->start = address; + handler->end = address + len; + handler->num_errors = 0; + handler->first_error = 0; + list_add_tail(&handler->list, &bridge->vme_error_handlers); + + return handler; +} +EXPORT_SYMBOL(vme_register_error_handler); + +void vme_unregister_error_handler(struct vme_error_handler *handler) +{ + list_del(&handler->list); + kfree(handler); +} +EXPORT_SYMBOL(vme_unregister_error_handler); + +void vme_irq_handler(struct vme_bridge *bridge, int level, int statid) +{ + void (*call)(int, int, void *); + void *priv_data; + + call = bridge->irq[level - 1].callback[statid].func; + priv_data = bridge->irq[level - 1].callback[statid].priv_data; + if (call) + call(level, statid, priv_data); + else + printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n", + level, statid); +} +EXPORT_SYMBOL(vme_irq_handler); + +/** + * vme_irq_request - Request a specific VME interrupt. + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * @level: Interrupt priority being requested. + * @statid: Interrupt vector being requested. + * @callback: Pointer to callback function called when VME interrupt/vector + * received. + * @priv_data: Generic pointer that will be passed to the callback function. + * + * Request callback to be attached as a handler for VME interrupts with provided + * level and statid. + * + * Return: Zero on success, -EINVAL on invalid vme device, level or if the + * function is not supported, -EBUSY if the level/statid combination is + * already in use. Hardware specific errors also possible. + */ +int vme_irq_request(struct vme_dev *vdev, int level, int statid, + void (*callback)(int, int, void *), + void *priv_data) +{ + struct vme_bridge *bridge; + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + return -EINVAL; + } + + if ((level < 1) || (level > 7)) { + printk(KERN_ERR "Invalid interrupt level\n"); + return -EINVAL; + } + + if (!bridge->irq_set) { + printk(KERN_ERR "Configuring interrupts not supported\n"); + return -EINVAL; + } + + mutex_lock(&bridge->irq_mtx); + + if (bridge->irq[level - 1].callback[statid].func) { + mutex_unlock(&bridge->irq_mtx); + printk(KERN_WARNING "VME Interrupt already taken\n"); + return -EBUSY; + } + + bridge->irq[level - 1].count++; + bridge->irq[level - 1].callback[statid].priv_data = priv_data; + bridge->irq[level - 1].callback[statid].func = callback; + + /* Enable IRQ level */ + bridge->irq_set(bridge, level, 1, 1); + + mutex_unlock(&bridge->irq_mtx); + + return 0; +} +EXPORT_SYMBOL(vme_irq_request); + +/** + * vme_irq_free - Free a VME interrupt. + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * @level: Interrupt priority of interrupt being freed. + * @statid: Interrupt vector of interrupt being freed. + * + * Remove previously attached callback from VME interrupt priority/vector. + */ +void vme_irq_free(struct vme_dev *vdev, int level, int statid) +{ + struct vme_bridge *bridge; + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + return; + } + + if ((level < 1) || (level > 7)) { + printk(KERN_ERR "Invalid interrupt level\n"); + return; + } + + if (!bridge->irq_set) { + printk(KERN_ERR "Configuring interrupts not supported\n"); + return; + } + + mutex_lock(&bridge->irq_mtx); + + bridge->irq[level - 1].count--; + + /* Disable IRQ level if no more interrupts attached at this level*/ + if (bridge->irq[level - 1].count == 0) + bridge->irq_set(bridge, level, 0, 1); + + bridge->irq[level - 1].callback[statid].func = NULL; + bridge->irq[level - 1].callback[statid].priv_data = NULL; + + mutex_unlock(&bridge->irq_mtx); +} +EXPORT_SYMBOL(vme_irq_free); + +/** + * vme_irq_generate - Generate VME interrupt. + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * @level: Interrupt priority at which to assert the interrupt. + * @statid: Interrupt vector to associate with the interrupt. + * + * Generate a VME interrupt of the provided level and with the provided + * statid. + * + * Return: Zero on success, -EINVAL on invalid vme device, level or if the + * function is not supported. Hardware specific errors also possible. + */ +int vme_irq_generate(struct vme_dev *vdev, int level, int statid) +{ + struct vme_bridge *bridge; + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + return -EINVAL; + } + + if ((level < 1) || (level > 7)) { + printk(KERN_WARNING "Invalid interrupt level\n"); + return -EINVAL; + } + + if (!bridge->irq_generate) { + printk(KERN_WARNING "Interrupt generation not supported\n"); + return -EINVAL; + } + + return bridge->irq_generate(bridge, level, statid); +} +EXPORT_SYMBOL(vme_irq_generate); + +/** + * vme_lm_request - Request a VME location monitor + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * + * Allocate a location monitor resource to the driver. A location monitor + * allows the driver to monitor accesses to a contiguous number of + * addresses on the VME bus. + * + * Return: Pointer to a VME resource on success or NULL on failure. + */ +struct vme_resource *vme_lm_request(struct vme_dev *vdev) +{ + struct vme_bridge *bridge; + struct list_head *lm_pos = NULL; + struct vme_lm_resource *allocated_lm = NULL; + struct vme_lm_resource *lm = NULL; + struct vme_resource *resource = NULL; + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + goto err_bus; + } + + /* Loop through LM resources */ + list_for_each(lm_pos, &bridge->lm_resources) { + lm = list_entry(lm_pos, + struct vme_lm_resource, list); + if (!lm) { + printk(KERN_ERR "Registered NULL Location Monitor resource\n"); + continue; + } + + /* Find an unlocked controller */ + mutex_lock(&lm->mtx); + if (lm->locked == 0) { + lm->locked = 1; + mutex_unlock(&lm->mtx); + allocated_lm = lm; + break; + } + mutex_unlock(&lm->mtx); + } + + /* Check to see if we found a resource */ + if (!allocated_lm) + goto err_lm; + + resource = kmalloc(sizeof(*resource), GFP_KERNEL); + if (!resource) + goto err_alloc; + + resource->type = VME_LM; + resource->entry = &allocated_lm->list; + + return resource; + +err_alloc: + /* Unlock image */ + mutex_lock(&lm->mtx); + lm->locked = 0; + mutex_unlock(&lm->mtx); +err_lm: +err_bus: + return NULL; +} +EXPORT_SYMBOL(vme_lm_request); + +/** + * vme_lm_count - Determine number of VME Addresses monitored + * @resource: Pointer to VME location monitor resource. + * + * The number of contiguous addresses monitored is hardware dependent. + * Return the number of contiguous addresses monitored by the + * location monitor. + * + * Return: Count of addresses monitored or -EINVAL when provided with an + * invalid location monitor resource. + */ +int vme_lm_count(struct vme_resource *resource) +{ + struct vme_lm_resource *lm; + + if (resource->type != VME_LM) { + printk(KERN_ERR "Not a Location Monitor resource\n"); + return -EINVAL; + } + + lm = list_entry(resource->entry, struct vme_lm_resource, list); + + return lm->monitors; +} +EXPORT_SYMBOL(vme_lm_count); + +/** + * vme_lm_set - Configure location monitor + * @resource: Pointer to VME location monitor resource. + * @lm_base: Base address to monitor. + * @aspace: VME address space to monitor. + * @cycle: VME bus cycle type to monitor. + * + * Set the base address, address space and cycle type of accesses to be + * monitored by the location monitor. + * + * Return: Zero on success, -EINVAL when provided with an invalid location + * monitor resource or function is not supported. Hardware specific + * errors may also be returned. + */ +int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base, + u32 aspace, u32 cycle) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_lm_resource *lm; + + if (resource->type != VME_LM) { + printk(KERN_ERR "Not a Location Monitor resource\n"); + return -EINVAL; + } + + lm = list_entry(resource->entry, struct vme_lm_resource, list); + + if (!bridge->lm_set) { + printk(KERN_ERR "vme_lm_set not supported\n"); + return -EINVAL; + } + + return bridge->lm_set(lm, lm_base, aspace, cycle); +} +EXPORT_SYMBOL(vme_lm_set); + +/** + * vme_lm_get - Retrieve location monitor settings + * @resource: Pointer to VME location monitor resource. + * @lm_base: Pointer used to output the base address monitored. + * @aspace: Pointer used to output the address space monitored. + * @cycle: Pointer used to output the VME bus cycle type monitored. + * + * Retrieve the base address, address space and cycle type of accesses to + * be monitored by the location monitor. + * + * Return: Zero on success, -EINVAL when provided with an invalid location + * monitor resource or function is not supported. Hardware specific + * errors may also be returned. + */ +int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base, + u32 *aspace, u32 *cycle) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_lm_resource *lm; + + if (resource->type != VME_LM) { + printk(KERN_ERR "Not a Location Monitor resource\n"); + return -EINVAL; + } + + lm = list_entry(resource->entry, struct vme_lm_resource, list); + + if (!bridge->lm_get) { + printk(KERN_ERR "vme_lm_get not supported\n"); + return -EINVAL; + } + + return bridge->lm_get(lm, lm_base, aspace, cycle); +} +EXPORT_SYMBOL(vme_lm_get); + +/** + * vme_lm_attach - Provide callback for location monitor address + * @resource: Pointer to VME location monitor resource. + * @monitor: Offset to which callback should be attached. + * @callback: Pointer to callback function called when triggered. + * @data: Generic pointer that will be passed to the callback function. + * + * Attach a callback to the specificed offset into the location monitors + * monitored addresses. A generic pointer is provided to allow data to be + * passed to the callback when called. + * + * Return: Zero on success, -EINVAL when provided with an invalid location + * monitor resource or function is not supported. Hardware specific + * errors may also be returned. + */ +int vme_lm_attach(struct vme_resource *resource, int monitor, + void (*callback)(void *), void *data) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_lm_resource *lm; + + if (resource->type != VME_LM) { + printk(KERN_ERR "Not a Location Monitor resource\n"); + return -EINVAL; + } + + lm = list_entry(resource->entry, struct vme_lm_resource, list); + + if (!bridge->lm_attach) { + printk(KERN_ERR "vme_lm_attach not supported\n"); + return -EINVAL; + } + + return bridge->lm_attach(lm, monitor, callback, data); +} +EXPORT_SYMBOL(vme_lm_attach); + +/** + * vme_lm_detach - Remove callback for location monitor address + * @resource: Pointer to VME location monitor resource. + * @monitor: Offset to which callback should be removed. + * + * Remove the callback associated with the specificed offset into the + * location monitors monitored addresses. + * + * Return: Zero on success, -EINVAL when provided with an invalid location + * monitor resource or function is not supported. Hardware specific + * errors may also be returned. + */ +int vme_lm_detach(struct vme_resource *resource, int monitor) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_lm_resource *lm; + + if (resource->type != VME_LM) { + printk(KERN_ERR "Not a Location Monitor resource\n"); + return -EINVAL; + } + + lm = list_entry(resource->entry, struct vme_lm_resource, list); + + if (!bridge->lm_detach) { + printk(KERN_ERR "vme_lm_detach not supported\n"); + return -EINVAL; + } + + return bridge->lm_detach(lm, monitor); +} +EXPORT_SYMBOL(vme_lm_detach); + +/** + * vme_lm_free - Free allocated VME location monitor + * @resource: Pointer to VME location monitor resource. + * + * Free allocation of a VME location monitor. + * + * WARNING: This function currently expects that any callbacks that have + * been attached to the location monitor have been removed. + * + * Return: Zero on success, -EINVAL when provided with an invalid location + * monitor resource. + */ +void vme_lm_free(struct vme_resource *resource) +{ + struct vme_lm_resource *lm; + + if (resource->type != VME_LM) { + printk(KERN_ERR "Not a Location Monitor resource\n"); + return; + } + + lm = list_entry(resource->entry, struct vme_lm_resource, list); + + mutex_lock(&lm->mtx); + + /* XXX + * Check to see that there aren't any callbacks still attached, if + * there are we should probably be detaching them! + */ + + lm->locked = 0; + + mutex_unlock(&lm->mtx); + + kfree(resource); +} +EXPORT_SYMBOL(vme_lm_free); + +/** + * vme_slot_num - Retrieve slot ID + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * + * Retrieve the slot ID associated with the provided VME device. + * + * Return: The slot ID on success, -EINVAL if VME bridge cannot be determined + * or the function is not supported. Hardware specific errors may also + * be returned. + */ +int vme_slot_num(struct vme_dev *vdev) +{ + struct vme_bridge *bridge; + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + return -EINVAL; + } + + if (!bridge->slot_get) { + printk(KERN_WARNING "vme_slot_num not supported\n"); + return -EINVAL; + } + + return bridge->slot_get(bridge); +} +EXPORT_SYMBOL(vme_slot_num); + +/** + * vme_bus_num - Retrieve bus number + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * + * Retrieve the bus enumeration associated with the provided VME device. + * + * Return: The bus number on success, -EINVAL if VME bridge cannot be + * determined. + */ +int vme_bus_num(struct vme_dev *vdev) +{ + struct vme_bridge *bridge; + + bridge = vdev->bridge; + if (!bridge) { + pr_err("Can't find VME bus\n"); + return -EINVAL; + } + + return bridge->num; +} +EXPORT_SYMBOL(vme_bus_num); + +/* - Bridge Registration --------------------------------------------------- */ + +static void vme_dev_release(struct device *dev) +{ + kfree(dev_to_vme_dev(dev)); +} + +/* Common bridge initialization */ +struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge) +{ + INIT_LIST_HEAD(&bridge->vme_error_handlers); + INIT_LIST_HEAD(&bridge->master_resources); + INIT_LIST_HEAD(&bridge->slave_resources); + INIT_LIST_HEAD(&bridge->dma_resources); + INIT_LIST_HEAD(&bridge->lm_resources); + mutex_init(&bridge->irq_mtx); + + return bridge; +} +EXPORT_SYMBOL(vme_init_bridge); + +int vme_register_bridge(struct vme_bridge *bridge) +{ + int i; + int ret = -1; + + mutex_lock(&vme_buses_lock); + for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) { + if ((vme_bus_numbers & (1 << i)) == 0) { + vme_bus_numbers |= (1 << i); + bridge->num = i; + INIT_LIST_HEAD(&bridge->devices); + list_add_tail(&bridge->bus_list, &vme_bus_list); + ret = 0; + break; + } + } + mutex_unlock(&vme_buses_lock); + + return ret; +} +EXPORT_SYMBOL(vme_register_bridge); + +void vme_unregister_bridge(struct vme_bridge *bridge) +{ + struct vme_dev *vdev; + struct vme_dev *tmp; + + mutex_lock(&vme_buses_lock); + vme_bus_numbers &= ~(1 << bridge->num); + list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) { + list_del(&vdev->drv_list); + list_del(&vdev->bridge_list); + device_unregister(&vdev->dev); + } + list_del(&bridge->bus_list); + mutex_unlock(&vme_buses_lock); +} +EXPORT_SYMBOL(vme_unregister_bridge); + +/* - Driver Registration --------------------------------------------------- */ + +static int __vme_register_driver_bus(struct vme_driver *drv, + struct vme_bridge *bridge, unsigned int ndevs) +{ + int err; + unsigned int i; + struct vme_dev *vdev; + struct vme_dev *tmp; + + for (i = 0; i < ndevs; i++) { + vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); + if (!vdev) { + err = -ENOMEM; + goto err_devalloc; + } + vdev->num = i; + vdev->bridge = bridge; + vdev->dev.platform_data = drv; + vdev->dev.release = vme_dev_release; + vdev->dev.parent = bridge->parent; + vdev->dev.bus = &vme_bus_type; + dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num, + vdev->num); + + err = device_register(&vdev->dev); + if (err) + goto err_reg; + + if (vdev->dev.platform_data) { + list_add_tail(&vdev->drv_list, &drv->devices); + list_add_tail(&vdev->bridge_list, &bridge->devices); + } else + device_unregister(&vdev->dev); + } + return 0; + +err_reg: + put_device(&vdev->dev); +err_devalloc: + list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) { + list_del(&vdev->drv_list); + list_del(&vdev->bridge_list); + device_unregister(&vdev->dev); + } + return err; +} + +static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs) +{ + struct vme_bridge *bridge; + int err = 0; + + mutex_lock(&vme_buses_lock); + list_for_each_entry(bridge, &vme_bus_list, bus_list) { + /* + * This cannot cause trouble as we already have vme_buses_lock + * and if the bridge is removed, it will have to go through + * vme_unregister_bridge() to do it (which calls remove() on + * the bridge which in turn tries to acquire vme_buses_lock and + * will have to wait). + */ + err = __vme_register_driver_bus(drv, bridge, ndevs); + if (err) + break; + } + mutex_unlock(&vme_buses_lock); + return err; +} + +/** + * vme_register_driver - Register a VME driver + * @drv: Pointer to VME driver structure to register. + * @ndevs: Maximum number of devices to allow to be enumerated. + * + * Register a VME device driver with the VME subsystem. + * + * Return: Zero on success, error value on registration failure. + */ +int vme_register_driver(struct vme_driver *drv, unsigned int ndevs) +{ + int err; + + drv->driver.name = drv->name; + drv->driver.bus = &vme_bus_type; + INIT_LIST_HEAD(&drv->devices); + + err = driver_register(&drv->driver); + if (err) + return err; + + err = __vme_register_driver(drv, ndevs); + if (err) + driver_unregister(&drv->driver); + + return err; +} +EXPORT_SYMBOL(vme_register_driver); + +/** + * vme_unregister_driver - Unregister a VME driver + * @drv: Pointer to VME driver structure to unregister. + * + * Unregister a VME device driver from the VME subsystem. + */ +void vme_unregister_driver(struct vme_driver *drv) +{ + struct vme_dev *dev, *dev_tmp; + + mutex_lock(&vme_buses_lock); + list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) { + list_del(&dev->drv_list); + list_del(&dev->bridge_list); + device_unregister(&dev->dev); + } + mutex_unlock(&vme_buses_lock); + + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL(vme_unregister_driver); + +/* - Bus Registration ------------------------------------------------------ */ + +static int vme_bus_match(struct device *dev, struct device_driver *drv) +{ + struct vme_driver *vme_drv; + + vme_drv = container_of(drv, struct vme_driver, driver); + + if (dev->platform_data == vme_drv) { + struct vme_dev *vdev = dev_to_vme_dev(dev); + + if (vme_drv->match && vme_drv->match(vdev)) + return 1; + + dev->platform_data = NULL; + } + return 0; +} + +static int vme_bus_probe(struct device *dev) +{ + struct vme_driver *driver; + struct vme_dev *vdev = dev_to_vme_dev(dev); + + driver = dev->platform_data; + if (driver->probe) + return driver->probe(vdev); + + return -ENODEV; +} + +static void vme_bus_remove(struct device *dev) +{ + struct vme_driver *driver; + struct vme_dev *vdev = dev_to_vme_dev(dev); + + driver = dev->platform_data; + if (driver->remove) + driver->remove(vdev); +} + +struct bus_type vme_bus_type = { + .name = "vme", + .match = vme_bus_match, + .probe = vme_bus_probe, + .remove = vme_bus_remove, +}; +EXPORT_SYMBOL(vme_bus_type); + +static int __init vme_init(void) +{ + return bus_register(&vme_bus_type); +} +subsys_initcall(vme_init); diff --git a/drivers/staging/vme_user/vme.h b/drivers/staging/vme_user/vme.h new file mode 100644 index 0000000000..b204a9b4be --- /dev/null +++ b/drivers/staging/vme_user/vme.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _VME_H_ +#define _VME_H_ + +/* Resource Type */ +enum vme_resource_type { + VME_MASTER, + VME_SLAVE, + VME_DMA, + VME_LM +}; + +/* VME Address Spaces */ +#define VME_A16 0x1 +#define VME_A24 0x2 +#define VME_A32 0x4 +#define VME_A64 0x8 +#define VME_CRCSR 0x10 +#define VME_USER1 0x20 +#define VME_USER2 0x40 +#define VME_USER3 0x80 +#define VME_USER4 0x100 + +#define VME_A16_MAX 0x10000ULL +#define VME_A24_MAX 0x1000000ULL +#define VME_A32_MAX 0x100000000ULL +#define VME_A64_MAX 0x10000000000000000ULL +#define VME_CRCSR_MAX 0x1000000ULL + + +/* VME Cycle Types */ +#define VME_SCT 0x1 +#define VME_BLT 0x2 +#define VME_MBLT 0x4 +#define VME_2eVME 0x8 +#define VME_2eSST 0x10 +#define VME_2eSSTB 0x20 + +#define VME_2eSST160 0x100 +#define VME_2eSST267 0x200 +#define VME_2eSST320 0x400 + +#define VME_SUPER 0x1000 +#define VME_USER 0x2000 +#define VME_PROG 0x4000 +#define VME_DATA 0x8000 + +/* VME Data Widths */ +#define VME_D8 0x1 +#define VME_D16 0x2 +#define VME_D32 0x4 +#define VME_D64 0x8 + +/* Arbitration Scheduling Modes */ +#define VME_R_ROBIN_MODE 0x1 +#define VME_PRIORITY_MODE 0x2 + +#define VME_DMA_PATTERN (1<<0) +#define VME_DMA_PCI (1<<1) +#define VME_DMA_VME (1<<2) + +#define VME_DMA_PATTERN_BYTE (1<<0) +#define VME_DMA_PATTERN_WORD (1<<1) +#define VME_DMA_PATTERN_INCREMENT (1<<2) + +#define VME_DMA_VME_TO_MEM (1<<0) +#define VME_DMA_MEM_TO_VME (1<<1) +#define VME_DMA_VME_TO_VME (1<<2) +#define VME_DMA_MEM_TO_MEM (1<<3) +#define VME_DMA_PATTERN_TO_VME (1<<4) +#define VME_DMA_PATTERN_TO_MEM (1<<5) + +struct vme_dma_attr { + u32 type; + void *private; +}; + +struct vme_resource { + enum vme_resource_type type; + struct list_head *entry; +}; + +extern struct bus_type vme_bus_type; + +/* Number of VME interrupt vectors */ +#define VME_NUM_STATUSID 256 + +/* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */ +#define VME_MAX_BRIDGES (sizeof(unsigned int)*8) +#define VME_MAX_SLOTS 32 + +#define VME_SLOT_CURRENT -1 +#define VME_SLOT_ALL -2 + +/** + * struct vme_dev - Structure representing a VME device + * @num: The device number + * @bridge: Pointer to the bridge device this device is on + * @dev: Internal device structure + * @drv_list: List of devices (per driver) + * @bridge_list: List of devices (per bridge) + */ +struct vme_dev { + int num; + struct vme_bridge *bridge; + struct device dev; + struct list_head drv_list; + struct list_head bridge_list; +}; + +/** + * struct vme_driver - Structure representing a VME driver + * @name: Driver name, should be unique among VME drivers and usually the same + * as the module name. + * @match: Callback used to determine whether probe should be run. + * @probe: Callback for device binding, called when new device is detected. + * @remove: Callback, called on device removal. + * @driver: Underlying generic device driver structure. + * @devices: List of VME devices (struct vme_dev) associated with this driver. + */ +struct vme_driver { + const char *name; + int (*match)(struct vme_dev *); + int (*probe)(struct vme_dev *); + void (*remove)(struct vme_dev *); + struct device_driver driver; + struct list_head devices; +}; + +void *vme_alloc_consistent(struct vme_resource *, size_t, dma_addr_t *); +void vme_free_consistent(struct vme_resource *, size_t, void *, + dma_addr_t); + +size_t vme_get_size(struct vme_resource *); +int vme_check_window(u32 aspace, unsigned long long vme_base, + unsigned long long size); + +struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32); +int vme_slave_set(struct vme_resource *, int, unsigned long long, + unsigned long long, dma_addr_t, u32, u32); +int vme_slave_get(struct vme_resource *, int *, unsigned long long *, + unsigned long long *, dma_addr_t *, u32 *, u32 *); +void vme_slave_free(struct vme_resource *); + +struct vme_resource *vme_master_request(struct vme_dev *, u32, u32, u32); +int vme_master_set(struct vme_resource *, int, unsigned long long, + unsigned long long, u32, u32, u32); +int vme_master_get(struct vme_resource *, int *, unsigned long long *, + unsigned long long *, u32 *, u32 *, u32 *); +ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t); +ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t); +unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int, + unsigned int, loff_t); +int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma); +void vme_master_free(struct vme_resource *); + +struct vme_resource *vme_dma_request(struct vme_dev *, u32); +struct vme_dma_list *vme_new_dma_list(struct vme_resource *); +struct vme_dma_attr *vme_dma_pattern_attribute(u32, u32); +struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t); +struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, u32, u32, u32); +void vme_dma_free_attribute(struct vme_dma_attr *); +int vme_dma_list_add(struct vme_dma_list *, struct vme_dma_attr *, + struct vme_dma_attr *, size_t); +int vme_dma_list_exec(struct vme_dma_list *); +int vme_dma_list_free(struct vme_dma_list *); +int vme_dma_free(struct vme_resource *); + +int vme_irq_request(struct vme_dev *, int, int, + void (*callback)(int, int, void *), void *); +void vme_irq_free(struct vme_dev *, int, int); +int vme_irq_generate(struct vme_dev *, int, int); + +struct vme_resource *vme_lm_request(struct vme_dev *); +int vme_lm_count(struct vme_resource *); +int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32); +int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *); +int vme_lm_attach(struct vme_resource *, int, void (*callback)(void *), void *); +int vme_lm_detach(struct vme_resource *, int); +void vme_lm_free(struct vme_resource *); + +int vme_slot_num(struct vme_dev *); +int vme_bus_num(struct vme_dev *); + +int vme_register_driver(struct vme_driver *, unsigned int); +void vme_unregister_driver(struct vme_driver *); + + +#endif /* _VME_H_ */ + diff --git a/drivers/staging/vme_user/vme_bridge.h b/drivers/staging/vme_user/vme_bridge.h new file mode 100644 index 0000000000..0bbefe9851 --- /dev/null +++ b/drivers/staging/vme_user/vme_bridge.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _VME_BRIDGE_H_ +#define _VME_BRIDGE_H_ + +#include "vme.h" + +#define VME_CRCSR_BUF_SIZE (508*1024) +/* + * Resource structures + */ +struct vme_master_resource { + struct list_head list; + struct vme_bridge *parent; + /* + * We are likely to need to access the VME bus in interrupt context, so + * protect master routines with a spinlock rather than a mutex. + */ + spinlock_t lock; + int locked; + int number; + u32 address_attr; + u32 cycle_attr; + u32 width_attr; + struct resource bus_resource; + void __iomem *kern_base; +}; + +struct vme_slave_resource { + struct list_head list; + struct vme_bridge *parent; + struct mutex mtx; + int locked; + int number; + u32 address_attr; + u32 cycle_attr; +}; + +struct vme_dma_pattern { + u32 pattern; + u32 type; +}; + +struct vme_dma_pci { + dma_addr_t address; +}; + +struct vme_dma_vme { + unsigned long long address; + u32 aspace; + u32 cycle; + u32 dwidth; +}; + +struct vme_dma_list { + struct list_head list; + struct vme_dma_resource *parent; + struct list_head entries; + struct mutex mtx; +}; + +struct vme_dma_resource { + struct list_head list; + struct vme_bridge *parent; + struct mutex mtx; + int locked; + int number; + struct list_head pending; + struct list_head running; + u32 route_attr; +}; + +struct vme_lm_resource { + struct list_head list; + struct vme_bridge *parent; + struct mutex mtx; + int locked; + int number; + int monitors; +}; + +struct vme_error_handler { + struct list_head list; + unsigned long long start; /* Beginning of error window */ + unsigned long long end; /* End of error window */ + unsigned long long first_error; /* Address of the first error */ + u32 aspace; /* Address space of error window*/ + unsigned num_errors; /* Number of errors */ +}; + +struct vme_callback { + void (*func)(int, int, void*); + void *priv_data; +}; + +struct vme_irq { + int count; + struct vme_callback callback[VME_NUM_STATUSID]; +}; + +/* Allow 16 characters for name (including null character) */ +#define VMENAMSIZ 16 + +/* This structure stores all the information about one bridge + * The structure should be dynamically allocated by the driver and one instance + * of the structure should be present for each VME chip present in the system. + */ +struct vme_bridge { + char name[VMENAMSIZ]; + int num; + struct list_head master_resources; + struct list_head slave_resources; + struct list_head dma_resources; + struct list_head lm_resources; + + /* List for registered errors handlers */ + struct list_head vme_error_handlers; + /* List of devices on this bridge */ + struct list_head devices; + + /* Bridge Info - XXX Move to private structure? */ + struct device *parent; /* Parent device (eg. pdev->dev for PCI) */ + void *driver_priv; /* Private pointer for the bridge driver */ + struct list_head bus_list; /* list of VME buses */ + + /* Interrupt callbacks */ + struct vme_irq irq[7]; + /* Locking for VME irq callback configuration */ + struct mutex irq_mtx; + + /* Slave Functions */ + int (*slave_get) (struct vme_slave_resource *, int *, + unsigned long long *, unsigned long long *, dma_addr_t *, + u32 *, u32 *); + int (*slave_set) (struct vme_slave_resource *, int, unsigned long long, + unsigned long long, dma_addr_t, u32, u32); + + /* Master Functions */ + int (*master_get) (struct vme_master_resource *, int *, + unsigned long long *, unsigned long long *, u32 *, u32 *, + u32 *); + int (*master_set) (struct vme_master_resource *, int, + unsigned long long, unsigned long long, u32, u32, u32); + ssize_t (*master_read) (struct vme_master_resource *, void *, size_t, + loff_t); + ssize_t (*master_write) (struct vme_master_resource *, void *, size_t, + loff_t); + unsigned int (*master_rmw) (struct vme_master_resource *, unsigned int, + unsigned int, unsigned int, loff_t); + + /* DMA Functions */ + int (*dma_list_add) (struct vme_dma_list *, struct vme_dma_attr *, + struct vme_dma_attr *, size_t); + int (*dma_list_exec) (struct vme_dma_list *); + int (*dma_list_empty) (struct vme_dma_list *); + + /* Interrupt Functions */ + void (*irq_set) (struct vme_bridge *, int, int, int); + int (*irq_generate) (struct vme_bridge *, int, int); + + /* Location monitor functions */ + int (*lm_set) (struct vme_lm_resource *, unsigned long long, u32, u32); + int (*lm_get) (struct vme_lm_resource *, unsigned long long *, u32 *, + u32 *); + int (*lm_attach)(struct vme_lm_resource *, int, + void (*callback)(void *), void *); + int (*lm_detach) (struct vme_lm_resource *, int); + + /* CR/CSR space functions */ + int (*slot_get) (struct vme_bridge *); + + /* Bridge parent interface */ + void *(*alloc_consistent)(struct device *dev, size_t size, + dma_addr_t *dma); + void (*free_consistent)(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma); +}; + +void vme_bus_error_handler(struct vme_bridge *bridge, + unsigned long long address, int am); +void vme_irq_handler(struct vme_bridge *, int, int); + +struct vme_bridge *vme_init_bridge(struct vme_bridge *); +int vme_register_bridge(struct vme_bridge *); +void vme_unregister_bridge(struct vme_bridge *); +struct vme_error_handler *vme_register_error_handler( + struct vme_bridge *bridge, u32 aspace, + unsigned long long address, size_t len); +void vme_unregister_error_handler(struct vme_error_handler *handler); + +#endif /* _VME_BRIDGE_H_ */ diff --git a/drivers/staging/vme_user/vme_fake.c b/drivers/staging/vme_user/vme_fake.c new file mode 100644 index 0000000000..dd646b0c53 --- /dev/null +++ b/drivers/staging/vme_user/vme_fake.c @@ -0,0 +1,1305 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Fake VME bridge support. + * + * This drive provides a fake VME bridge chip, this enables debugging of the + * VME framework in the absence of a VME system. + * + * This driver has to do a number of things in software that would be driven + * by hardware if it was available, it will also result in extra overhead at + * times when compared with driving actual hardware. + * + * Author: Martyn Welch + * Copyright (c) 2014 Martyn Welch + * + * Based on vme_tsi148.c: + * + * Author: Martyn Welch + * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. + * + * Based on work by Tom Armistead and Ajit Prem + * Copyright 2004 Motorola Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vme.h" +#include "vme_bridge.h" + +/* + * Define the number of each that the fake driver supports. + */ +#define FAKE_MAX_MASTER 8 /* Max Master Windows */ +#define FAKE_MAX_SLAVE 8 /* Max Slave Windows */ + +/* Structures to hold information normally held in device registers */ +struct fake_slave_window { + int enabled; + unsigned long long vme_base; + unsigned long long size; + void *buf_base; + u32 aspace; + u32 cycle; +}; + +struct fake_master_window { + int enabled; + unsigned long long vme_base; + unsigned long long size; + u32 aspace; + u32 cycle; + u32 dwidth; +}; + +/* Structure used to hold driver specific information */ +struct fake_driver { + struct vme_bridge *parent; + struct fake_slave_window slaves[FAKE_MAX_SLAVE]; + struct fake_master_window masters[FAKE_MAX_MASTER]; + u32 lm_enabled; + unsigned long long lm_base; + u32 lm_aspace; + u32 lm_cycle; + void (*lm_callback[4])(void *); + void *lm_data[4]; + struct tasklet_struct int_tasklet; + int int_level; + int int_statid; + void *crcsr_kernel; + dma_addr_t crcsr_bus; + /* Only one VME interrupt can be generated at a time, provide locking */ + struct mutex vme_int; +}; + +/* Module parameter */ +static int geoid; + +static const char driver_name[] = "vme_fake"; + +static struct vme_bridge *exit_pointer; + +static struct device *vme_root; + +/* + * Calling VME bus interrupt callback if provided. + */ +static void fake_VIRQ_tasklet(unsigned long data) +{ + struct vme_bridge *fake_bridge; + struct fake_driver *bridge; + + fake_bridge = (struct vme_bridge *) data; + bridge = fake_bridge->driver_priv; + + vme_irq_handler(fake_bridge, bridge->int_level, bridge->int_statid); +} + +/* + * Configure VME interrupt + */ +static void fake_irq_set(struct vme_bridge *fake_bridge, int level, + int state, int sync) +{ + /* Nothing to do */ +} + +static void *fake_pci_to_ptr(dma_addr_t addr) +{ + return (void *)(uintptr_t)addr; +} + +static dma_addr_t fake_ptr_to_pci(void *addr) +{ + return (dma_addr_t)(uintptr_t)addr; +} + +/* + * Generate a VME bus interrupt at the requested level & vector. Wait for + * interrupt to be acked. + */ +static int fake_irq_generate(struct vme_bridge *fake_bridge, int level, + int statid) +{ + struct fake_driver *bridge; + + bridge = fake_bridge->driver_priv; + + mutex_lock(&bridge->vme_int); + + bridge->int_level = level; + + bridge->int_statid = statid; + + /* + * Schedule tasklet to run VME handler to emulate normal VME interrupt + * handler behaviour. + */ + tasklet_schedule(&bridge->int_tasklet); + + mutex_unlock(&bridge->vme_int); + + return 0; +} + +/* + * Initialize a slave window with the requested attributes. + */ +static int fake_slave_set(struct vme_slave_resource *image, int enabled, + unsigned long long vme_base, unsigned long long size, + dma_addr_t buf_base, u32 aspace, u32 cycle) +{ + unsigned int i, granularity = 0; + unsigned long long vme_bound; + struct vme_bridge *fake_bridge; + struct fake_driver *bridge; + + fake_bridge = image->parent; + bridge = fake_bridge->driver_priv; + + i = image->number; + + switch (aspace) { + case VME_A16: + granularity = 0x10; + break; + case VME_A24: + granularity = 0x1000; + break; + case VME_A32: + granularity = 0x10000; + break; + case VME_A64: + granularity = 0x10000; + break; + case VME_CRCSR: + case VME_USER1: + case VME_USER2: + case VME_USER3: + case VME_USER4: + default: + pr_err("Invalid address space\n"); + return -EINVAL; + } + + /* + * Bound address is a valid address for the window, adjust + * accordingly + */ + vme_bound = vme_base + size - granularity; + + if (vme_base & (granularity - 1)) { + pr_err("Invalid VME base alignment\n"); + return -EINVAL; + } + if (vme_bound & (granularity - 1)) { + pr_err("Invalid VME bound alignment\n"); + return -EINVAL; + } + + mutex_lock(&image->mtx); + + bridge->slaves[i].enabled = enabled; + bridge->slaves[i].vme_base = vme_base; + bridge->slaves[i].size = size; + bridge->slaves[i].buf_base = fake_pci_to_ptr(buf_base); + bridge->slaves[i].aspace = aspace; + bridge->slaves[i].cycle = cycle; + + mutex_unlock(&image->mtx); + + return 0; +} + +/* + * Get slave window configuration. + */ +static int fake_slave_get(struct vme_slave_resource *image, int *enabled, + unsigned long long *vme_base, unsigned long long *size, + dma_addr_t *buf_base, u32 *aspace, u32 *cycle) +{ + unsigned int i; + struct fake_driver *bridge; + + bridge = image->parent->driver_priv; + + i = image->number; + + mutex_lock(&image->mtx); + + *enabled = bridge->slaves[i].enabled; + *vme_base = bridge->slaves[i].vme_base; + *size = bridge->slaves[i].size; + *buf_base = fake_ptr_to_pci(bridge->slaves[i].buf_base); + *aspace = bridge->slaves[i].aspace; + *cycle = bridge->slaves[i].cycle; + + mutex_unlock(&image->mtx); + + return 0; +} + +/* + * Set the attributes of an outbound window. + */ +static int fake_master_set(struct vme_master_resource *image, int enabled, + unsigned long long vme_base, unsigned long long size, + u32 aspace, u32 cycle, u32 dwidth) +{ + int retval = 0; + unsigned int i; + struct vme_bridge *fake_bridge; + struct fake_driver *bridge; + + fake_bridge = image->parent; + + bridge = fake_bridge->driver_priv; + + /* Verify input data */ + if (vme_base & 0xFFFF) { + pr_err("Invalid VME Window alignment\n"); + retval = -EINVAL; + goto err_window; + } + + if (size & 0xFFFF) { + pr_err("Invalid size alignment\n"); + retval = -EINVAL; + goto err_window; + } + + if ((size == 0) && (enabled != 0)) { + pr_err("Size must be non-zero for enabled windows\n"); + retval = -EINVAL; + goto err_window; + } + + /* Setup data width */ + switch (dwidth) { + case VME_D8: + case VME_D16: + case VME_D32: + break; + default: + pr_err("Invalid data width\n"); + retval = -EINVAL; + goto err_dwidth; + } + + /* Setup address space */ + switch (aspace) { + case VME_A16: + case VME_A24: + case VME_A32: + case VME_A64: + case VME_CRCSR: + case VME_USER1: + case VME_USER2: + case VME_USER3: + case VME_USER4: + break; + default: + pr_err("Invalid address space\n"); + retval = -EINVAL; + goto err_aspace; + } + + spin_lock(&image->lock); + + i = image->number; + + bridge->masters[i].enabled = enabled; + bridge->masters[i].vme_base = vme_base; + bridge->masters[i].size = size; + bridge->masters[i].aspace = aspace; + bridge->masters[i].cycle = cycle; + bridge->masters[i].dwidth = dwidth; + + spin_unlock(&image->lock); + + return 0; + +err_aspace: +err_dwidth: +err_window: + return retval; + +} + +/* + * Set the attributes of an outbound window. + */ +static int __fake_master_get(struct vme_master_resource *image, int *enabled, + unsigned long long *vme_base, unsigned long long *size, + u32 *aspace, u32 *cycle, u32 *dwidth) +{ + unsigned int i; + struct fake_driver *bridge; + + bridge = image->parent->driver_priv; + + i = image->number; + + *enabled = bridge->masters[i].enabled; + *vme_base = bridge->masters[i].vme_base; + *size = bridge->masters[i].size; + *aspace = bridge->masters[i].aspace; + *cycle = bridge->masters[i].cycle; + *dwidth = bridge->masters[i].dwidth; + + return 0; +} + + +static int fake_master_get(struct vme_master_resource *image, int *enabled, + unsigned long long *vme_base, unsigned long long *size, + u32 *aspace, u32 *cycle, u32 *dwidth) +{ + int retval; + + spin_lock(&image->lock); + + retval = __fake_master_get(image, enabled, vme_base, size, aspace, + cycle, dwidth); + + spin_unlock(&image->lock); + + return retval; +} + + +static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr, + u32 aspace, u32 cycle) +{ + struct vme_bridge *fake_bridge; + unsigned long long lm_base; + u32 lm_aspace, lm_cycle; + int i; + struct vme_lm_resource *lm; + struct list_head *pos = NULL, *n; + + /* Get vme_bridge */ + fake_bridge = bridge->parent; + + /* Loop through each location monitor resource */ + list_for_each_safe(pos, n, &fake_bridge->lm_resources) { + lm = list_entry(pos, struct vme_lm_resource, list); + + /* If disabled, we're done */ + if (bridge->lm_enabled == 0) + return; + + lm_base = bridge->lm_base; + lm_aspace = bridge->lm_aspace; + lm_cycle = bridge->lm_cycle; + + /* First make sure that the cycle and address space match */ + if ((lm_aspace == aspace) && (lm_cycle == cycle)) { + for (i = 0; i < lm->monitors; i++) { + /* Each location monitor covers 8 bytes */ + if (((lm_base + (8 * i)) <= addr) && + ((lm_base + (8 * i) + 8) > addr)) { + if (bridge->lm_callback[i]) + bridge->lm_callback[i]( + bridge->lm_data[i]); + } + } + } + } +} + +static noinline_for_stack u8 fake_vmeread8(struct fake_driver *bridge, + unsigned long long addr, + u32 aspace, u32 cycle) +{ + u8 retval = 0xff; + int i; + unsigned long long start, end, offset; + u8 *loc; + + for (i = 0; i < FAKE_MAX_SLAVE; i++) { + start = bridge->slaves[i].vme_base; + end = bridge->slaves[i].vme_base + bridge->slaves[i].size; + + if (aspace != bridge->slaves[i].aspace) + continue; + + if (cycle != bridge->slaves[i].cycle) + continue; + + if ((addr >= start) && (addr < end)) { + offset = addr - bridge->slaves[i].vme_base; + loc = (u8 *)(bridge->slaves[i].buf_base + offset); + retval = *loc; + + break; + } + } + + fake_lm_check(bridge, addr, aspace, cycle); + + return retval; +} + +static noinline_for_stack u16 fake_vmeread16(struct fake_driver *bridge, + unsigned long long addr, + u32 aspace, u32 cycle) +{ + u16 retval = 0xffff; + int i; + unsigned long long start, end, offset; + u16 *loc; + + for (i = 0; i < FAKE_MAX_SLAVE; i++) { + if (aspace != bridge->slaves[i].aspace) + continue; + + if (cycle != bridge->slaves[i].cycle) + continue; + + start = bridge->slaves[i].vme_base; + end = bridge->slaves[i].vme_base + bridge->slaves[i].size; + + if ((addr >= start) && ((addr + 1) < end)) { + offset = addr - bridge->slaves[i].vme_base; + loc = (u16 *)(bridge->slaves[i].buf_base + offset); + retval = *loc; + + break; + } + } + + fake_lm_check(bridge, addr, aspace, cycle); + + return retval; +} + +static noinline_for_stack u32 fake_vmeread32(struct fake_driver *bridge, + unsigned long long addr, + u32 aspace, u32 cycle) +{ + u32 retval = 0xffffffff; + int i; + unsigned long long start, end, offset; + u32 *loc; + + for (i = 0; i < FAKE_MAX_SLAVE; i++) { + if (aspace != bridge->slaves[i].aspace) + continue; + + if (cycle != bridge->slaves[i].cycle) + continue; + + start = bridge->slaves[i].vme_base; + end = bridge->slaves[i].vme_base + bridge->slaves[i].size; + + if ((addr >= start) && ((addr + 3) < end)) { + offset = addr - bridge->slaves[i].vme_base; + loc = (u32 *)(bridge->slaves[i].buf_base + offset); + retval = *loc; + + break; + } + } + + fake_lm_check(bridge, addr, aspace, cycle); + + return retval; +} + +static ssize_t fake_master_read(struct vme_master_resource *image, void *buf, + size_t count, loff_t offset) +{ + int retval; + u32 aspace, cycle, dwidth; + struct vme_bridge *fake_bridge; + struct fake_driver *priv; + int i; + unsigned long long addr; + unsigned int done = 0; + unsigned int count32; + + fake_bridge = image->parent; + + priv = fake_bridge->driver_priv; + + i = image->number; + + addr = (unsigned long long)priv->masters[i].vme_base + offset; + aspace = priv->masters[i].aspace; + cycle = priv->masters[i].cycle; + dwidth = priv->masters[i].dwidth; + + spin_lock(&image->lock); + + /* The following code handles VME address alignment. We cannot use + * memcpy_xxx here because it may cut data transfers in to 8-bit + * cycles when D16 or D32 cycles are required on the VME bus. + * On the other hand, the bridge itself assures that the maximum data + * cycle configured for the transfer is used and splits it + * automatically for non-aligned addresses, so we don't want the + * overhead of needlessly forcing small transfers for the entire cycle. + */ + if (addr & 0x1) { + *(u8 *)buf = fake_vmeread8(priv, addr, aspace, cycle); + done += 1; + if (done == count) + goto out; + } + if ((dwidth == VME_D16) || (dwidth == VME_D32)) { + if ((addr + done) & 0x2) { + if ((count - done) < 2) { + *(u8 *)(buf + done) = fake_vmeread8(priv, + addr + done, aspace, cycle); + done += 1; + goto out; + } else { + *(u16 *)(buf + done) = fake_vmeread16(priv, + addr + done, aspace, cycle); + done += 2; + } + } + } + + if (dwidth == VME_D32) { + count32 = (count - done) & ~0x3; + while (done < count32) { + *(u32 *)(buf + done) = fake_vmeread32(priv, addr + done, + aspace, cycle); + done += 4; + } + } else if (dwidth == VME_D16) { + count32 = (count - done) & ~0x3; + while (done < count32) { + *(u16 *)(buf + done) = fake_vmeread16(priv, addr + done, + aspace, cycle); + done += 2; + } + } else if (dwidth == VME_D8) { + count32 = (count - done); + while (done < count32) { + *(u8 *)(buf + done) = fake_vmeread8(priv, addr + done, + aspace, cycle); + done += 1; + } + + } + + if ((dwidth == VME_D16) || (dwidth == VME_D32)) { + if ((count - done) & 0x2) { + *(u16 *)(buf + done) = fake_vmeread16(priv, addr + done, + aspace, cycle); + done += 2; + } + } + if ((count - done) & 0x1) { + *(u8 *)(buf + done) = fake_vmeread8(priv, addr + done, aspace, + cycle); + done += 1; + } + +out: + retval = count; + + spin_unlock(&image->lock); + + return retval; +} + +static noinline_for_stack void fake_vmewrite8(struct fake_driver *bridge, + u8 *buf, unsigned long long addr, + u32 aspace, u32 cycle) +{ + int i; + unsigned long long start, end, offset; + u8 *loc; + + for (i = 0; i < FAKE_MAX_SLAVE; i++) { + if (aspace != bridge->slaves[i].aspace) + continue; + + if (cycle != bridge->slaves[i].cycle) + continue; + + start = bridge->slaves[i].vme_base; + end = bridge->slaves[i].vme_base + bridge->slaves[i].size; + + if ((addr >= start) && (addr < end)) { + offset = addr - bridge->slaves[i].vme_base; + loc = (u8 *)((void *)bridge->slaves[i].buf_base + offset); + *loc = *buf; + + break; + } + } + + fake_lm_check(bridge, addr, aspace, cycle); + +} + +static noinline_for_stack void fake_vmewrite16(struct fake_driver *bridge, + u16 *buf, unsigned long long addr, + u32 aspace, u32 cycle) +{ + int i; + unsigned long long start, end, offset; + u16 *loc; + + for (i = 0; i < FAKE_MAX_SLAVE; i++) { + if (aspace != bridge->slaves[i].aspace) + continue; + + if (cycle != bridge->slaves[i].cycle) + continue; + + start = bridge->slaves[i].vme_base; + end = bridge->slaves[i].vme_base + bridge->slaves[i].size; + + if ((addr >= start) && ((addr + 1) < end)) { + offset = addr - bridge->slaves[i].vme_base; + loc = (u16 *)((void *)bridge->slaves[i].buf_base + offset); + *loc = *buf; + + break; + } + } + + fake_lm_check(bridge, addr, aspace, cycle); + +} + +static noinline_for_stack void fake_vmewrite32(struct fake_driver *bridge, + u32 *buf, unsigned long long addr, + u32 aspace, u32 cycle) +{ + int i; + unsigned long long start, end, offset; + u32 *loc; + + for (i = 0; i < FAKE_MAX_SLAVE; i++) { + if (aspace != bridge->slaves[i].aspace) + continue; + + if (cycle != bridge->slaves[i].cycle) + continue; + + start = bridge->slaves[i].vme_base; + end = bridge->slaves[i].vme_base + bridge->slaves[i].size; + + if ((addr >= start) && ((addr + 3) < end)) { + offset = addr - bridge->slaves[i].vme_base; + loc = (u32 *)((void *)bridge->slaves[i].buf_base + offset); + *loc = *buf; + + break; + } + } + + fake_lm_check(bridge, addr, aspace, cycle); + +} + +static ssize_t fake_master_write(struct vme_master_resource *image, void *buf, + size_t count, loff_t offset) +{ + int retval = 0; + u32 aspace, cycle, dwidth; + unsigned long long addr; + int i; + unsigned int done = 0; + unsigned int count32; + + struct vme_bridge *fake_bridge; + struct fake_driver *bridge; + + fake_bridge = image->parent; + + bridge = fake_bridge->driver_priv; + + i = image->number; + + addr = bridge->masters[i].vme_base + offset; + aspace = bridge->masters[i].aspace; + cycle = bridge->masters[i].cycle; + dwidth = bridge->masters[i].dwidth; + + spin_lock(&image->lock); + + /* Here we apply for the same strategy we do in master_read + * function in order to assure the correct cycles. + */ + if (addr & 0x1) { + fake_vmewrite8(bridge, (u8 *)buf, addr, aspace, cycle); + done += 1; + if (done == count) + goto out; + } + + if ((dwidth == VME_D16) || (dwidth == VME_D32)) { + if ((addr + done) & 0x2) { + if ((count - done) < 2) { + fake_vmewrite8(bridge, (u8 *)(buf + done), + addr + done, aspace, cycle); + done += 1; + goto out; + } else { + fake_vmewrite16(bridge, (u16 *)(buf + done), + addr + done, aspace, cycle); + done += 2; + } + } + } + + if (dwidth == VME_D32) { + count32 = (count - done) & ~0x3; + while (done < count32) { + fake_vmewrite32(bridge, (u32 *)(buf + done), + addr + done, aspace, cycle); + done += 4; + } + } else if (dwidth == VME_D16) { + count32 = (count - done) & ~0x3; + while (done < count32) { + fake_vmewrite16(bridge, (u16 *)(buf + done), + addr + done, aspace, cycle); + done += 2; + } + } else if (dwidth == VME_D8) { + count32 = (count - done); + while (done < count32) { + fake_vmewrite8(bridge, (u8 *)(buf + done), addr + done, + aspace, cycle); + done += 1; + } + + } + + if ((dwidth == VME_D16) || (dwidth == VME_D32)) { + if ((count - done) & 0x2) { + fake_vmewrite16(bridge, (u16 *)(buf + done), + addr + done, aspace, cycle); + done += 2; + } + } + + if ((count - done) & 0x1) { + fake_vmewrite8(bridge, (u8 *)(buf + done), addr + done, aspace, + cycle); + done += 1; + } + +out: + retval = count; + + spin_unlock(&image->lock); + + return retval; +} + +/* + * Perform an RMW cycle on the VME bus. + * + * Requires a previously configured master window, returns final value. + */ +static unsigned int fake_master_rmw(struct vme_master_resource *image, + unsigned int mask, unsigned int compare, unsigned int swap, + loff_t offset) +{ + u32 tmp, base; + u32 aspace, cycle; + int i; + struct fake_driver *bridge; + + bridge = image->parent->driver_priv; + + /* Find the PCI address that maps to the desired VME address */ + i = image->number; + + base = bridge->masters[i].vme_base; + aspace = bridge->masters[i].aspace; + cycle = bridge->masters[i].cycle; + + /* Lock image */ + spin_lock(&image->lock); + + /* Read existing value */ + tmp = fake_vmeread32(bridge, base + offset, aspace, cycle); + + /* Perform check */ + if ((tmp && mask) == (compare && mask)) { + tmp = tmp | (mask | swap); + tmp = tmp & (~mask | swap); + + /* Write back */ + fake_vmewrite32(bridge, &tmp, base + offset, aspace, cycle); + } + + /* Unlock image */ + spin_unlock(&image->lock); + + return tmp; +} + +/* + * All 4 location monitors reside at the same base - this is therefore a + * system wide configuration. + * + * This does not enable the LM monitor - that should be done when the first + * callback is attached and disabled when the last callback is removed. + */ +static int fake_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base, + u32 aspace, u32 cycle) +{ + int i; + struct vme_bridge *fake_bridge; + struct fake_driver *bridge; + + fake_bridge = lm->parent; + + bridge = fake_bridge->driver_priv; + + mutex_lock(&lm->mtx); + + /* If we already have a callback attached, we can't move it! */ + for (i = 0; i < lm->monitors; i++) { + if (bridge->lm_callback[i]) { + mutex_unlock(&lm->mtx); + pr_err("Location monitor callback attached, can't reset\n"); + return -EBUSY; + } + } + + switch (aspace) { + case VME_A16: + case VME_A24: + case VME_A32: + case VME_A64: + break; + default: + mutex_unlock(&lm->mtx); + pr_err("Invalid address space\n"); + return -EINVAL; + } + + bridge->lm_base = lm_base; + bridge->lm_aspace = aspace; + bridge->lm_cycle = cycle; + + mutex_unlock(&lm->mtx); + + return 0; +} + +/* Get configuration of the callback monitor and return whether it is enabled + * or disabled. + */ +static int fake_lm_get(struct vme_lm_resource *lm, + unsigned long long *lm_base, u32 *aspace, u32 *cycle) +{ + struct fake_driver *bridge; + + bridge = lm->parent->driver_priv; + + mutex_lock(&lm->mtx); + + *lm_base = bridge->lm_base; + *aspace = bridge->lm_aspace; + *cycle = bridge->lm_cycle; + + mutex_unlock(&lm->mtx); + + return bridge->lm_enabled; +} + +/* + * Attach a callback to a specific location monitor. + * + * Callback will be passed the monitor triggered. + */ +static int fake_lm_attach(struct vme_lm_resource *lm, int monitor, + void (*callback)(void *), void *data) +{ + struct vme_bridge *fake_bridge; + struct fake_driver *bridge; + + fake_bridge = lm->parent; + + bridge = fake_bridge->driver_priv; + + mutex_lock(&lm->mtx); + + /* Ensure that the location monitor is configured - need PGM or DATA */ + if (bridge->lm_cycle == 0) { + mutex_unlock(&lm->mtx); + pr_err("Location monitor not properly configured\n"); + return -EINVAL; + } + + /* Check that a callback isn't already attached */ + if (bridge->lm_callback[monitor]) { + mutex_unlock(&lm->mtx); + pr_err("Existing callback attached\n"); + return -EBUSY; + } + + /* Attach callback */ + bridge->lm_callback[monitor] = callback; + bridge->lm_data[monitor] = data; + + /* Ensure that global Location Monitor Enable set */ + bridge->lm_enabled = 1; + + mutex_unlock(&lm->mtx); + + return 0; +} + +/* + * Detach a callback function forn a specific location monitor. + */ +static int fake_lm_detach(struct vme_lm_resource *lm, int monitor) +{ + u32 tmp; + int i; + struct fake_driver *bridge; + + bridge = lm->parent->driver_priv; + + mutex_lock(&lm->mtx); + + /* Detach callback */ + bridge->lm_callback[monitor] = NULL; + bridge->lm_data[monitor] = NULL; + + /* If all location monitors disabled, disable global Location Monitor */ + tmp = 0; + for (i = 0; i < lm->monitors; i++) { + if (bridge->lm_callback[i]) + tmp = 1; + } + + if (tmp == 0) + bridge->lm_enabled = 0; + + mutex_unlock(&lm->mtx); + + return 0; +} + +/* + * Determine Geographical Addressing + */ +static int fake_slot_get(struct vme_bridge *fake_bridge) +{ + return geoid; +} + +static void *fake_alloc_consistent(struct device *parent, size_t size, + dma_addr_t *dma) +{ + void *alloc = kmalloc(size, GFP_KERNEL); + + if (alloc) + *dma = fake_ptr_to_pci(alloc); + + return alloc; +} + +static void fake_free_consistent(struct device *parent, size_t size, + void *vaddr, dma_addr_t dma) +{ + kfree(vaddr); +/* + dma_free_coherent(parent, size, vaddr, dma); +*/ +} + +/* + * Configure CR/CSR space + * + * Access to the CR/CSR can be configured at power-up. The location of the + * CR/CSR registers in the CR/CSR address space is determined by the boards + * Geographic address. + * + * Each board has a 512kB window, with the highest 4kB being used for the + * boards registers, this means there is a fix length 508kB window which must + * be mapped onto PCI memory. + */ +static int fake_crcsr_init(struct vme_bridge *fake_bridge) +{ + u32 vstat; + struct fake_driver *bridge; + + bridge = fake_bridge->driver_priv; + + /* Allocate mem for CR/CSR image */ + bridge->crcsr_kernel = kzalloc(VME_CRCSR_BUF_SIZE, GFP_KERNEL); + bridge->crcsr_bus = fake_ptr_to_pci(bridge->crcsr_kernel); + if (!bridge->crcsr_kernel) + return -ENOMEM; + + vstat = fake_slot_get(fake_bridge); + + pr_info("CR/CSR Offset: %d\n", vstat); + + return 0; +} + +static void fake_crcsr_exit(struct vme_bridge *fake_bridge) +{ + struct fake_driver *bridge; + + bridge = fake_bridge->driver_priv; + + kfree(bridge->crcsr_kernel); +} + + +static int __init fake_init(void) +{ + int retval, i; + struct list_head *pos = NULL, *n; + struct vme_bridge *fake_bridge; + struct fake_driver *fake_device; + struct vme_master_resource *master_image; + struct vme_slave_resource *slave_image; + struct vme_lm_resource *lm; + + /* We need a fake parent device */ + vme_root = __root_device_register("vme", THIS_MODULE); + + /* If we want to support more than one bridge at some point, we need to + * dynamically allocate this so we get one per device. + */ + fake_bridge = kzalloc(sizeof(*fake_bridge), GFP_KERNEL); + if (!fake_bridge) { + retval = -ENOMEM; + goto err_struct; + } + + fake_device = kzalloc(sizeof(*fake_device), GFP_KERNEL); + if (!fake_device) { + retval = -ENOMEM; + goto err_driver; + } + + fake_bridge->driver_priv = fake_device; + + fake_bridge->parent = vme_root; + + fake_device->parent = fake_bridge; + + /* Initialize wait queues & mutual exclusion flags */ + mutex_init(&fake_device->vme_int); + mutex_init(&fake_bridge->irq_mtx); + tasklet_init(&fake_device->int_tasklet, fake_VIRQ_tasklet, + (unsigned long) fake_bridge); + + strcpy(fake_bridge->name, driver_name); + + /* Add master windows to list */ + INIT_LIST_HEAD(&fake_bridge->master_resources); + for (i = 0; i < FAKE_MAX_MASTER; i++) { + master_image = kmalloc(sizeof(*master_image), GFP_KERNEL); + if (!master_image) { + retval = -ENOMEM; + goto err_master; + } + master_image->parent = fake_bridge; + spin_lock_init(&master_image->lock); + master_image->locked = 0; + master_image->number = i; + master_image->address_attr = VME_A16 | VME_A24 | VME_A32 | + VME_A64; + master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | + VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 | + VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER | + VME_PROG | VME_DATA; + master_image->width_attr = VME_D16 | VME_D32; + memset(&master_image->bus_resource, 0, + sizeof(struct resource)); + master_image->kern_base = NULL; + list_add_tail(&master_image->list, + &fake_bridge->master_resources); + } + + /* Add slave windows to list */ + INIT_LIST_HEAD(&fake_bridge->slave_resources); + for (i = 0; i < FAKE_MAX_SLAVE; i++) { + slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL); + if (!slave_image) { + retval = -ENOMEM; + goto err_slave; + } + slave_image->parent = fake_bridge; + mutex_init(&slave_image->mtx); + slave_image->locked = 0; + slave_image->number = i; + slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 | + VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 | + VME_USER3 | VME_USER4; + slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | + VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 | + VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER | + VME_PROG | VME_DATA; + list_add_tail(&slave_image->list, + &fake_bridge->slave_resources); + } + + /* Add location monitor to list */ + INIT_LIST_HEAD(&fake_bridge->lm_resources); + lm = kmalloc(sizeof(*lm), GFP_KERNEL); + if (!lm) { + retval = -ENOMEM; + goto err_lm; + } + lm->parent = fake_bridge; + mutex_init(&lm->mtx); + lm->locked = 0; + lm->number = 1; + lm->monitors = 4; + list_add_tail(&lm->list, &fake_bridge->lm_resources); + + fake_bridge->slave_get = fake_slave_get; + fake_bridge->slave_set = fake_slave_set; + fake_bridge->master_get = fake_master_get; + fake_bridge->master_set = fake_master_set; + fake_bridge->master_read = fake_master_read; + fake_bridge->master_write = fake_master_write; + fake_bridge->master_rmw = fake_master_rmw; + fake_bridge->irq_set = fake_irq_set; + fake_bridge->irq_generate = fake_irq_generate; + fake_bridge->lm_set = fake_lm_set; + fake_bridge->lm_get = fake_lm_get; + fake_bridge->lm_attach = fake_lm_attach; + fake_bridge->lm_detach = fake_lm_detach; + fake_bridge->slot_get = fake_slot_get; + fake_bridge->alloc_consistent = fake_alloc_consistent; + fake_bridge->free_consistent = fake_free_consistent; + + pr_info("Board is%s the VME system controller\n", + (geoid == 1) ? "" : " not"); + + pr_info("VME geographical address is set to %d\n", geoid); + + retval = fake_crcsr_init(fake_bridge); + if (retval) { + pr_err("CR/CSR configuration failed.\n"); + goto err_crcsr; + } + + retval = vme_register_bridge(fake_bridge); + if (retval != 0) { + pr_err("Chip Registration failed.\n"); + goto err_reg; + } + + exit_pointer = fake_bridge; + + return 0; + +err_reg: + fake_crcsr_exit(fake_bridge); +err_crcsr: +err_lm: + /* resources are stored in link list */ + list_for_each_safe(pos, n, &fake_bridge->lm_resources) { + lm = list_entry(pos, struct vme_lm_resource, list); + list_del(pos); + kfree(lm); + } +err_slave: + /* resources are stored in link list */ + list_for_each_safe(pos, n, &fake_bridge->slave_resources) { + slave_image = list_entry(pos, struct vme_slave_resource, list); + list_del(pos); + kfree(slave_image); + } +err_master: + /* resources are stored in link list */ + list_for_each_safe(pos, n, &fake_bridge->master_resources) { + master_image = list_entry(pos, struct vme_master_resource, + list); + list_del(pos); + kfree(master_image); + } + + kfree(fake_device); +err_driver: + kfree(fake_bridge); +err_struct: + return retval; + +} + + +static void __exit fake_exit(void) +{ + struct list_head *pos = NULL; + struct list_head *tmplist; + struct vme_master_resource *master_image; + struct vme_slave_resource *slave_image; + int i; + struct vme_bridge *fake_bridge; + struct fake_driver *bridge; + + fake_bridge = exit_pointer; + + bridge = fake_bridge->driver_priv; + + pr_debug("Driver is being unloaded.\n"); + + /* + * Shutdown all inbound and outbound windows. + */ + for (i = 0; i < FAKE_MAX_MASTER; i++) + bridge->masters[i].enabled = 0; + + for (i = 0; i < FAKE_MAX_SLAVE; i++) + bridge->slaves[i].enabled = 0; + + /* + * Shutdown Location monitor. + */ + bridge->lm_enabled = 0; + + vme_unregister_bridge(fake_bridge); + + fake_crcsr_exit(fake_bridge); + /* resources are stored in link list */ + list_for_each_safe(pos, tmplist, &fake_bridge->slave_resources) { + slave_image = list_entry(pos, struct vme_slave_resource, list); + list_del(pos); + kfree(slave_image); + } + + /* resources are stored in link list */ + list_for_each_safe(pos, tmplist, &fake_bridge->master_resources) { + master_image = list_entry(pos, struct vme_master_resource, + list); + list_del(pos); + kfree(master_image); + } + + kfree(fake_bridge->driver_priv); + + kfree(fake_bridge); + + root_device_unregister(vme_root); +} + + +MODULE_PARM_DESC(geoid, "Set geographical addressing"); +module_param(geoid, int, 0); + +MODULE_DESCRIPTION("Fake VME bridge driver"); +MODULE_LICENSE("GPL"); + +module_init(fake_init); +module_exit(fake_exit); diff --git a/drivers/staging/vme_user/vme_tsi148.c b/drivers/staging/vme_user/vme_tsi148.c new file mode 100644 index 0000000000..9564762132 --- /dev/null +++ b/drivers/staging/vme_user/vme_tsi148.c @@ -0,0 +1,2661 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Support for the Tundra TSI148 VME-PCI Bridge Chip + * + * Author: Martyn Welch + * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. + * + * Based on work by Tom Armistead and Ajit Prem + * Copyright 2004 Motorola Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vme.h" +#include "vme_bridge.h" +#include "vme_tsi148.h" + +static int tsi148_probe(struct pci_dev *, const struct pci_device_id *); +static void tsi148_remove(struct pci_dev *); + + +/* Module parameter */ +static bool err_chk; +static int geoid; + +static const char driver_name[] = "vme_tsi148"; + +static const struct pci_device_id tsi148_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) }, + { }, +}; + +MODULE_DEVICE_TABLE(pci, tsi148_ids); + +static struct pci_driver tsi148_driver = { + .name = driver_name, + .id_table = tsi148_ids, + .probe = tsi148_probe, + .remove = tsi148_remove, +}; + +static void reg_join(unsigned int high, unsigned int low, + unsigned long long *variable) +{ + *variable = (unsigned long long)high << 32; + *variable |= (unsigned long long)low; +} + +static void reg_split(unsigned long long variable, unsigned int *high, + unsigned int *low) +{ + *low = (unsigned int)variable & 0xFFFFFFFF; + *high = (unsigned int)(variable >> 32); +} + +/* + * Wakes up DMA queue. + */ +static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge, + int channel_mask) +{ + u32 serviced = 0; + + if (channel_mask & TSI148_LCSR_INTS_DMA0S) { + wake_up(&bridge->dma_queue[0]); + serviced |= TSI148_LCSR_INTC_DMA0C; + } + if (channel_mask & TSI148_LCSR_INTS_DMA1S) { + wake_up(&bridge->dma_queue[1]); + serviced |= TSI148_LCSR_INTC_DMA1C; + } + + return serviced; +} + +/* + * Wake up location monitor queue + */ +static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat) +{ + int i; + u32 serviced = 0; + + for (i = 0; i < 4; i++) { + if (stat & TSI148_LCSR_INTS_LMS[i]) { + /* We only enable interrupts if the callback is set */ + bridge->lm_callback[i](bridge->lm_data[i]); + serviced |= TSI148_LCSR_INTC_LMC[i]; + } + } + + return serviced; +} + +/* + * Wake up mail box queue. + * + * XXX This functionality is not exposed up though API. + */ +static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat) +{ + int i; + u32 val; + u32 serviced = 0; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + for (i = 0; i < 4; i++) { + if (stat & TSI148_LCSR_INTS_MBS[i]) { + val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]); + dev_err(tsi148_bridge->parent, "VME Mailbox %d received" + ": 0x%x\n", i, val); + serviced |= TSI148_LCSR_INTC_MBC[i]; + } + } + + return serviced; +} + +/* + * Display error & status message when PERR (PCI) exception interrupt occurs. + */ +static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge) +{ + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, " + "attributes: %08x\n", + ioread32be(bridge->base + TSI148_LCSR_EDPAU), + ioread32be(bridge->base + TSI148_LCSR_EDPAL), + ioread32be(bridge->base + TSI148_LCSR_EDPAT)); + + dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split " + "completion reg: %08x\n", + ioread32be(bridge->base + TSI148_LCSR_EDPXA), + ioread32be(bridge->base + TSI148_LCSR_EDPXS)); + + iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT); + + return TSI148_LCSR_INTC_PERRC; +} + +/* + * Save address and status when VME error interrupt occurs. + */ +static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge) +{ + unsigned int error_addr_high, error_addr_low; + unsigned long long error_addr; + u32 error_attrib; + int error_am; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU); + error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL); + error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT); + error_am = (error_attrib & TSI148_LCSR_VEAT_AM_M) >> 8; + + reg_join(error_addr_high, error_addr_low, &error_addr); + + /* Check for exception register overflow (we have lost error data) */ + if (error_attrib & TSI148_LCSR_VEAT_VEOF) { + dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow " + "Occurred\n"); + } + + if (err_chk) + vme_bus_error_handler(tsi148_bridge, error_addr, error_am); + else + dev_err(tsi148_bridge->parent, + "VME Bus Error at address: 0x%llx, attributes: %08x\n", + error_addr, error_attrib); + + /* Clear Status */ + iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT); + + return TSI148_LCSR_INTC_VERRC; +} + +/* + * Wake up IACK queue. + */ +static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge) +{ + wake_up(&bridge->iack_queue); + + return TSI148_LCSR_INTC_IACKC; +} + +/* + * Calling VME bus interrupt callback if provided. + */ +static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge, + u32 stat) +{ + int vec, i, serviced = 0; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + for (i = 7; i > 0; i--) { + if (stat & (1 << i)) { + /* + * Note: Even though the registers are defined as + * 32-bits in the spec, we only want to issue 8-bit + * IACK cycles on the bus, read from offset 3. + */ + vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3); + + vme_irq_handler(tsi148_bridge, i, vec); + + serviced |= (1 << i); + } + } + + return serviced; +} + +/* + * Top level interrupt handler. Clears appropriate interrupt status bits and + * then calls appropriate sub handler(s). + */ +static irqreturn_t tsi148_irqhandler(int irq, void *ptr) +{ + u32 stat, enable, serviced = 0; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *bridge; + + tsi148_bridge = ptr; + + bridge = tsi148_bridge->driver_priv; + + /* Determine which interrupts are unmasked and set */ + enable = ioread32be(bridge->base + TSI148_LCSR_INTEO); + stat = ioread32be(bridge->base + TSI148_LCSR_INTS); + + /* Only look at unmasked interrupts */ + stat &= enable; + + if (unlikely(!stat)) + return IRQ_NONE; + + /* Call subhandlers as appropriate */ + /* DMA irqs */ + if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S)) + serviced |= tsi148_DMA_irqhandler(bridge, stat); + + /* Location monitor irqs */ + if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S | + TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S)) + serviced |= tsi148_LM_irqhandler(bridge, stat); + + /* Mail box irqs */ + if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S | + TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S)) + serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat); + + /* PCI bus error */ + if (stat & TSI148_LCSR_INTS_PERRS) + serviced |= tsi148_PERR_irqhandler(tsi148_bridge); + + /* VME bus error */ + if (stat & TSI148_LCSR_INTS_VERRS) + serviced |= tsi148_VERR_irqhandler(tsi148_bridge); + + /* IACK irq */ + if (stat & TSI148_LCSR_INTS_IACKS) + serviced |= tsi148_IACK_irqhandler(bridge); + + /* VME bus irqs */ + if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S | + TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S | + TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S | + TSI148_LCSR_INTS_IRQ1S)) + serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat); + + /* Clear serviced interrupts */ + iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC); + + return IRQ_HANDLED; +} + +static int tsi148_irq_init(struct vme_bridge *tsi148_bridge) +{ + int result; + unsigned int tmp; + struct pci_dev *pdev; + struct tsi148_driver *bridge; + + pdev = to_pci_dev(tsi148_bridge->parent); + + bridge = tsi148_bridge->driver_priv; + + result = request_irq(pdev->irq, + tsi148_irqhandler, + IRQF_SHARED, + driver_name, tsi148_bridge); + if (result) { + dev_err(tsi148_bridge->parent, "Can't get assigned pci irq " + "vector %02X\n", pdev->irq); + return result; + } + + /* Enable and unmask interrupts */ + tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO | + TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO | + TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO | + TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO | + TSI148_LCSR_INTEO_IACKEO; + + /* This leaves the following interrupts masked. + * TSI148_LCSR_INTEO_VIEEO + * TSI148_LCSR_INTEO_SYSFLEO + * TSI148_LCSR_INTEO_ACFLEO + */ + + /* Don't enable Location Monitor interrupts here - they will be + * enabled when the location monitors are properly configured and + * a callback has been attached. + * TSI148_LCSR_INTEO_LM0EO + * TSI148_LCSR_INTEO_LM1EO + * TSI148_LCSR_INTEO_LM2EO + * TSI148_LCSR_INTEO_LM3EO + */ + + /* Don't enable VME interrupts until we add a handler, else the board + * will respond to it and we don't want that unless it knows how to + * properly deal with it. + * TSI148_LCSR_INTEO_IRQ7EO + * TSI148_LCSR_INTEO_IRQ6EO + * TSI148_LCSR_INTEO_IRQ5EO + * TSI148_LCSR_INTEO_IRQ4EO + * TSI148_LCSR_INTEO_IRQ3EO + * TSI148_LCSR_INTEO_IRQ2EO + * TSI148_LCSR_INTEO_IRQ1EO + */ + + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); + + return 0; +} + +static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge, + struct pci_dev *pdev) +{ + struct tsi148_driver *bridge = tsi148_bridge->driver_priv; + + /* Turn off interrupts */ + iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO); + iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN); + + /* Clear all interrupts */ + iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC); + + /* Detach interrupt handler */ + free_irq(pdev->irq, tsi148_bridge); +} + +/* + * Check to see if an IACk has been received, return true (1) or false (0). + */ +static int tsi148_iack_received(struct tsi148_driver *bridge) +{ + u32 tmp; + + tmp = ioread32be(bridge->base + TSI148_LCSR_VICR); + + if (tmp & TSI148_LCSR_VICR_IRQS) + return 0; + else + return 1; +} + +/* + * Configure VME interrupt + */ +static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level, + int state, int sync) +{ + struct pci_dev *pdev; + u32 tmp; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + /* We need to do the ordering differently for enabling and disabling */ + if (state == 0) { + tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN); + tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); + + tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); + tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); + + if (sync != 0) { + pdev = to_pci_dev(tsi148_bridge->parent); + synchronize_irq(pdev->irq); + } + } else { + tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); + tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); + + tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN); + tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); + } +} + +/* + * Generate a VME bus interrupt at the requested level & vector. Wait for + * interrupt to be acked. + */ +static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, + int statid) +{ + u32 tmp; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + mutex_lock(&bridge->vme_int); + + /* Read VICR register */ + tmp = ioread32be(bridge->base + TSI148_LCSR_VICR); + + /* Set Status/ID */ + tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) | + (statid & TSI148_LCSR_VICR_STID_M); + iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR); + + /* Assert VMEbus IRQ */ + tmp = tmp | TSI148_LCSR_VICR_IRQL[level]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR); + + /* XXX Consider implementing a timeout? */ + wait_event_interruptible(bridge->iack_queue, + tsi148_iack_received(bridge)); + + mutex_unlock(&bridge->vme_int); + + return 0; +} + +/* + * Initialize a slave window with the requested attributes. + */ +static int tsi148_slave_set(struct vme_slave_resource *image, int enabled, + unsigned long long vme_base, unsigned long long size, + dma_addr_t pci_base, u32 aspace, u32 cycle) +{ + unsigned int i, addr = 0, granularity = 0; + unsigned int temp_ctl = 0; + unsigned int vme_base_low, vme_base_high; + unsigned int vme_bound_low, vme_bound_high; + unsigned int pci_offset_low, pci_offset_high; + unsigned long long vme_bound, pci_offset; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *bridge; + + tsi148_bridge = image->parent; + bridge = tsi148_bridge->driver_priv; + + i = image->number; + + switch (aspace) { + case VME_A16: + granularity = 0x10; + addr |= TSI148_LCSR_ITAT_AS_A16; + break; + case VME_A24: + granularity = 0x1000; + addr |= TSI148_LCSR_ITAT_AS_A24; + break; + case VME_A32: + granularity = 0x10000; + addr |= TSI148_LCSR_ITAT_AS_A32; + break; + case VME_A64: + granularity = 0x10000; + addr |= TSI148_LCSR_ITAT_AS_A64; + break; + default: + dev_err(tsi148_bridge->parent, "Invalid address space\n"); + return -EINVAL; + } + + /* Convert 64-bit variables to 2x 32-bit variables */ + reg_split(vme_base, &vme_base_high, &vme_base_low); + + /* + * Bound address is a valid address for the window, adjust + * accordingly + */ + vme_bound = vme_base + size - granularity; + reg_split(vme_bound, &vme_bound_high, &vme_bound_low); + pci_offset = (unsigned long long)pci_base - vme_base; + reg_split(pci_offset, &pci_offset_high, &pci_offset_low); + + if (vme_base_low & (granularity - 1)) { + dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n"); + return -EINVAL; + } + if (vme_bound_low & (granularity - 1)) { + dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n"); + return -EINVAL; + } + if (pci_offset_low & (granularity - 1)) { + dev_err(tsi148_bridge->parent, "Invalid PCI Offset " + "alignment\n"); + return -EINVAL; + } + + /* Disable while we are mucking around */ + temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITAT); + temp_ctl &= ~TSI148_LCSR_ITAT_EN; + iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITAT); + + /* Setup mapping */ + iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITSAU); + iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITSAL); + iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITEAU); + iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITEAL); + iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITOFU); + iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITOFL); + + /* Setup 2eSST speeds */ + temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M; + switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { + case VME_2eSST160: + temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160; + break; + case VME_2eSST267: + temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267; + break; + case VME_2eSST320: + temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320; + break; + } + + /* Setup cycle types */ + temp_ctl &= ~(0x1F << 7); + if (cycle & VME_BLT) + temp_ctl |= TSI148_LCSR_ITAT_BLT; + if (cycle & VME_MBLT) + temp_ctl |= TSI148_LCSR_ITAT_MBLT; + if (cycle & VME_2eVME) + temp_ctl |= TSI148_LCSR_ITAT_2eVME; + if (cycle & VME_2eSST) + temp_ctl |= TSI148_LCSR_ITAT_2eSST; + if (cycle & VME_2eSSTB) + temp_ctl |= TSI148_LCSR_ITAT_2eSSTB; + + /* Setup address space */ + temp_ctl &= ~TSI148_LCSR_ITAT_AS_M; + temp_ctl |= addr; + + temp_ctl &= ~0xF; + if (cycle & VME_SUPER) + temp_ctl |= TSI148_LCSR_ITAT_SUPR ; + if (cycle & VME_USER) + temp_ctl |= TSI148_LCSR_ITAT_NPRIV; + if (cycle & VME_PROG) + temp_ctl |= TSI148_LCSR_ITAT_PGM; + if (cycle & VME_DATA) + temp_ctl |= TSI148_LCSR_ITAT_DATA; + + /* Write ctl reg without enable */ + iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITAT); + + if (enabled) + temp_ctl |= TSI148_LCSR_ITAT_EN; + + iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITAT); + + return 0; +} + +/* + * Get slave window configuration. + */ +static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled, + unsigned long long *vme_base, unsigned long long *size, + dma_addr_t *pci_base, u32 *aspace, u32 *cycle) +{ + unsigned int i, granularity = 0, ctl = 0; + unsigned int vme_base_low, vme_base_high; + unsigned int vme_bound_low, vme_bound_high; + unsigned int pci_offset_low, pci_offset_high; + unsigned long long vme_bound, pci_offset; + struct tsi148_driver *bridge; + + bridge = image->parent->driver_priv; + + i = image->number; + + /* Read registers */ + ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITAT); + + vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITSAU); + vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITSAL); + vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITEAU); + vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITEAL); + pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITOFU); + pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITOFL); + + /* Convert 64-bit variables to 2x 32-bit variables */ + reg_join(vme_base_high, vme_base_low, vme_base); + reg_join(vme_bound_high, vme_bound_low, &vme_bound); + reg_join(pci_offset_high, pci_offset_low, &pci_offset); + + *pci_base = (dma_addr_t)(*vme_base + pci_offset); + + *enabled = 0; + *aspace = 0; + *cycle = 0; + + if (ctl & TSI148_LCSR_ITAT_EN) + *enabled = 1; + + if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) { + granularity = 0x10; + *aspace |= VME_A16; + } + if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) { + granularity = 0x1000; + *aspace |= VME_A24; + } + if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) { + granularity = 0x10000; + *aspace |= VME_A32; + } + if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) { + granularity = 0x10000; + *aspace |= VME_A64; + } + + /* Need granularity before we set the size */ + *size = (unsigned long long)((vme_bound - *vme_base) + granularity); + + + if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160) + *cycle |= VME_2eSST160; + if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267) + *cycle |= VME_2eSST267; + if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320) + *cycle |= VME_2eSST320; + + if (ctl & TSI148_LCSR_ITAT_BLT) + *cycle |= VME_BLT; + if (ctl & TSI148_LCSR_ITAT_MBLT) + *cycle |= VME_MBLT; + if (ctl & TSI148_LCSR_ITAT_2eVME) + *cycle |= VME_2eVME; + if (ctl & TSI148_LCSR_ITAT_2eSST) + *cycle |= VME_2eSST; + if (ctl & TSI148_LCSR_ITAT_2eSSTB) + *cycle |= VME_2eSSTB; + + if (ctl & TSI148_LCSR_ITAT_SUPR) + *cycle |= VME_SUPER; + if (ctl & TSI148_LCSR_ITAT_NPRIV) + *cycle |= VME_USER; + if (ctl & TSI148_LCSR_ITAT_PGM) + *cycle |= VME_PROG; + if (ctl & TSI148_LCSR_ITAT_DATA) + *cycle |= VME_DATA; + + return 0; +} + +/* + * Allocate and map PCI Resource + */ +static int tsi148_alloc_resource(struct vme_master_resource *image, + unsigned long long size) +{ + unsigned long long existing_size; + int retval = 0; + struct pci_dev *pdev; + struct vme_bridge *tsi148_bridge; + + tsi148_bridge = image->parent; + + pdev = to_pci_dev(tsi148_bridge->parent); + + existing_size = (unsigned long long)(image->bus_resource.end - + image->bus_resource.start); + + /* If the existing size is OK, return */ + if ((size != 0) && (existing_size == (size - 1))) + return 0; + + if (existing_size != 0) { + iounmap(image->kern_base); + image->kern_base = NULL; + kfree(image->bus_resource.name); + release_resource(&image->bus_resource); + memset(&image->bus_resource, 0, sizeof(image->bus_resource)); + } + + /* Exit here if size is zero */ + if (size == 0) + return 0; + + if (!image->bus_resource.name) { + image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC); + if (!image->bus_resource.name) { + retval = -ENOMEM; + goto err_name; + } + } + + sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name, + image->number); + + image->bus_resource.start = 0; + image->bus_resource.end = (unsigned long)size; + image->bus_resource.flags = IORESOURCE_MEM; + + retval = pci_bus_alloc_resource(pdev->bus, + &image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM, + 0, NULL, NULL); + if (retval) { + dev_err(tsi148_bridge->parent, "Failed to allocate mem " + "resource for window %d size 0x%lx start 0x%lx\n", + image->number, (unsigned long)size, + (unsigned long)image->bus_resource.start); + goto err_resource; + } + + image->kern_base = ioremap( + image->bus_resource.start, size); + if (!image->kern_base) { + dev_err(tsi148_bridge->parent, "Failed to remap resource\n"); + retval = -ENOMEM; + goto err_remap; + } + + return 0; + +err_remap: + release_resource(&image->bus_resource); +err_resource: + kfree(image->bus_resource.name); + memset(&image->bus_resource, 0, sizeof(image->bus_resource)); +err_name: + return retval; +} + +/* + * Free and unmap PCI Resource + */ +static void tsi148_free_resource(struct vme_master_resource *image) +{ + iounmap(image->kern_base); + image->kern_base = NULL; + release_resource(&image->bus_resource); + kfree(image->bus_resource.name); + memset(&image->bus_resource, 0, sizeof(image->bus_resource)); +} + +/* + * Set the attributes of an outbound window. + */ +static int tsi148_master_set(struct vme_master_resource *image, int enabled, + unsigned long long vme_base, unsigned long long size, u32 aspace, + u32 cycle, u32 dwidth) +{ + int retval = 0; + unsigned int i; + unsigned int temp_ctl = 0; + unsigned int pci_base_low, pci_base_high; + unsigned int pci_bound_low, pci_bound_high; + unsigned int vme_offset_low, vme_offset_high; + unsigned long long pci_bound, vme_offset, pci_base; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *bridge; + struct pci_bus_region region; + struct pci_dev *pdev; + + tsi148_bridge = image->parent; + + bridge = tsi148_bridge->driver_priv; + + pdev = to_pci_dev(tsi148_bridge->parent); + + /* Verify input data */ + if (vme_base & 0xFFFF) { + dev_err(tsi148_bridge->parent, "Invalid VME Window " + "alignment\n"); + retval = -EINVAL; + goto err_window; + } + + if ((size == 0) && (enabled != 0)) { + dev_err(tsi148_bridge->parent, "Size must be non-zero for " + "enabled windows\n"); + retval = -EINVAL; + goto err_window; + } + + spin_lock(&image->lock); + + /* Let's allocate the resource here rather than further up the stack as + * it avoids pushing loads of bus dependent stuff up the stack. If size + * is zero, any existing resource will be freed. + */ + retval = tsi148_alloc_resource(image, size); + if (retval) { + spin_unlock(&image->lock); + dev_err(tsi148_bridge->parent, "Unable to allocate memory for " + "resource\n"); + goto err_res; + } + + if (size == 0) { + pci_base = 0; + pci_bound = 0; + vme_offset = 0; + } else { + pcibios_resource_to_bus(pdev->bus, ®ion, + &image->bus_resource); + pci_base = region.start; + + /* + * Bound address is a valid address for the window, adjust + * according to window granularity. + */ + pci_bound = pci_base + (size - 0x10000); + vme_offset = vme_base - pci_base; + } + + /* Convert 64-bit variables to 2x 32-bit variables */ + reg_split(pci_base, &pci_base_high, &pci_base_low); + reg_split(pci_bound, &pci_bound_high, &pci_bound_low); + reg_split(vme_offset, &vme_offset_high, &vme_offset_low); + + if (pci_base_low & 0xFFFF) { + spin_unlock(&image->lock); + dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n"); + retval = -EINVAL; + goto err_gran; + } + if (pci_bound_low & 0xFFFF) { + spin_unlock(&image->lock); + dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n"); + retval = -EINVAL; + goto err_gran; + } + if (vme_offset_low & 0xFFFF) { + spin_unlock(&image->lock); + dev_err(tsi148_bridge->parent, "Invalid VME Offset " + "alignment\n"); + retval = -EINVAL; + goto err_gran; + } + + i = image->number; + + /* Disable while we are mucking around */ + temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTAT); + temp_ctl &= ~TSI148_LCSR_OTAT_EN; + iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTAT); + + /* Setup 2eSST speeds */ + temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M; + switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { + case VME_2eSST160: + temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160; + break; + case VME_2eSST267: + temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267; + break; + case VME_2eSST320: + temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320; + break; + } + + /* Setup cycle types */ + if (cycle & VME_BLT) { + temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; + temp_ctl |= TSI148_LCSR_OTAT_TM_BLT; + } + if (cycle & VME_MBLT) { + temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; + temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT; + } + if (cycle & VME_2eVME) { + temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; + temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME; + } + if (cycle & VME_2eSST) { + temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; + temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST; + } + if (cycle & VME_2eSSTB) { + dev_warn(tsi148_bridge->parent, "Currently not setting " + "Broadcast Select Registers\n"); + temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; + temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB; + } + + /* Setup data width */ + temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M; + switch (dwidth) { + case VME_D16: + temp_ctl |= TSI148_LCSR_OTAT_DBW_16; + break; + case VME_D32: + temp_ctl |= TSI148_LCSR_OTAT_DBW_32; + break; + default: + spin_unlock(&image->lock); + dev_err(tsi148_bridge->parent, "Invalid data width\n"); + retval = -EINVAL; + goto err_dwidth; + } + + /* Setup address space */ + temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M; + switch (aspace) { + case VME_A16: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16; + break; + case VME_A24: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24; + break; + case VME_A32: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32; + break; + case VME_A64: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64; + break; + case VME_CRCSR: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR; + break; + case VME_USER1: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1; + break; + case VME_USER2: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2; + break; + case VME_USER3: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3; + break; + case VME_USER4: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4; + break; + default: + spin_unlock(&image->lock); + dev_err(tsi148_bridge->parent, "Invalid address space\n"); + retval = -EINVAL; + goto err_aspace; + } + + temp_ctl &= ~(3<<4); + if (cycle & VME_SUPER) + temp_ctl |= TSI148_LCSR_OTAT_SUP; + if (cycle & VME_PROG) + temp_ctl |= TSI148_LCSR_OTAT_PGM; + + /* Setup mapping */ + iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTSAU); + iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTSAL); + iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTEAU); + iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTEAL); + iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTOFU); + iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTOFL); + + /* Write ctl reg without enable */ + iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTAT); + + if (enabled) + temp_ctl |= TSI148_LCSR_OTAT_EN; + + iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTAT); + + spin_unlock(&image->lock); + return 0; + +err_aspace: +err_dwidth: +err_gran: + tsi148_free_resource(image); +err_res: +err_window: + return retval; + +} + +/* + * Set the attributes of an outbound window. + * + * XXX Not parsing prefetch information. + */ +static int __tsi148_master_get(struct vme_master_resource *image, int *enabled, + unsigned long long *vme_base, unsigned long long *size, u32 *aspace, + u32 *cycle, u32 *dwidth) +{ + unsigned int i, ctl; + unsigned int pci_base_low, pci_base_high; + unsigned int pci_bound_low, pci_bound_high; + unsigned int vme_offset_low, vme_offset_high; + + unsigned long long pci_base, pci_bound, vme_offset; + struct tsi148_driver *bridge; + + bridge = image->parent->driver_priv; + + i = image->number; + + ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTAT); + + pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTSAU); + pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTSAL); + pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTEAU); + pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTEAL); + vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTOFU); + vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTOFL); + + /* Convert 64-bit variables to 2x 32-bit variables */ + reg_join(pci_base_high, pci_base_low, &pci_base); + reg_join(pci_bound_high, pci_bound_low, &pci_bound); + reg_join(vme_offset_high, vme_offset_low, &vme_offset); + + *vme_base = pci_base + vme_offset; + *size = (unsigned long long)(pci_bound - pci_base) + 0x10000; + + *enabled = 0; + *aspace = 0; + *cycle = 0; + *dwidth = 0; + + if (ctl & TSI148_LCSR_OTAT_EN) + *enabled = 1; + + /* Setup address space */ + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16) + *aspace |= VME_A16; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24) + *aspace |= VME_A24; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32) + *aspace |= VME_A32; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64) + *aspace |= VME_A64; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR) + *aspace |= VME_CRCSR; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1) + *aspace |= VME_USER1; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2) + *aspace |= VME_USER2; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3) + *aspace |= VME_USER3; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4) + *aspace |= VME_USER4; + + /* Setup 2eSST speeds */ + if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160) + *cycle |= VME_2eSST160; + if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267) + *cycle |= VME_2eSST267; + if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320) + *cycle |= VME_2eSST320; + + /* Setup cycle types */ + if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT) + *cycle |= VME_SCT; + if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT) + *cycle |= VME_BLT; + if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT) + *cycle |= VME_MBLT; + if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME) + *cycle |= VME_2eVME; + if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST) + *cycle |= VME_2eSST; + if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB) + *cycle |= VME_2eSSTB; + + if (ctl & TSI148_LCSR_OTAT_SUP) + *cycle |= VME_SUPER; + else + *cycle |= VME_USER; + + if (ctl & TSI148_LCSR_OTAT_PGM) + *cycle |= VME_PROG; + else + *cycle |= VME_DATA; + + /* Setup data width */ + if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16) + *dwidth = VME_D16; + if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32) + *dwidth = VME_D32; + + return 0; +} + + +static int tsi148_master_get(struct vme_master_resource *image, int *enabled, + unsigned long long *vme_base, unsigned long long *size, u32 *aspace, + u32 *cycle, u32 *dwidth) +{ + int retval; + + spin_lock(&image->lock); + + retval = __tsi148_master_get(image, enabled, vme_base, size, aspace, + cycle, dwidth); + + spin_unlock(&image->lock); + + return retval; +} + +static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf, + size_t count, loff_t offset) +{ + int retval, enabled; + unsigned long long vme_base, size; + u32 aspace, cycle, dwidth; + struct vme_error_handler *handler = NULL; + struct vme_bridge *tsi148_bridge; + void __iomem *addr = image->kern_base + offset; + unsigned int done = 0; + unsigned int count32; + + tsi148_bridge = image->parent; + + spin_lock(&image->lock); + + if (err_chk) { + __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, + &cycle, &dwidth); + handler = vme_register_error_handler(tsi148_bridge, aspace, + vme_base + offset, count); + if (!handler) { + spin_unlock(&image->lock); + return -ENOMEM; + } + } + + /* The following code handles VME address alignment. We cannot use + * memcpy_xxx here because it may cut data transfers in to 8-bit + * cycles when D16 or D32 cycles are required on the VME bus. + * On the other hand, the bridge itself assures that the maximum data + * cycle configured for the transfer is used and splits it + * automatically for non-aligned addresses, so we don't want the + * overhead of needlessly forcing small transfers for the entire cycle. + */ + if ((uintptr_t)addr & 0x1) { + *(u8 *)buf = ioread8(addr); + done += 1; + if (done == count) + goto out; + } + if ((uintptr_t)(addr + done) & 0x2) { + if ((count - done) < 2) { + *(u8 *)(buf + done) = ioread8(addr + done); + done += 1; + goto out; + } else { + *(u16 *)(buf + done) = ioread16(addr + done); + done += 2; + } + } + + count32 = (count - done) & ~0x3; + while (done < count32) { + *(u32 *)(buf + done) = ioread32(addr + done); + done += 4; + } + + if ((count - done) & 0x2) { + *(u16 *)(buf + done) = ioread16(addr + done); + done += 2; + } + if ((count - done) & 0x1) { + *(u8 *)(buf + done) = ioread8(addr + done); + done += 1; + } + +out: + retval = count; + + if (err_chk) { + if (handler->num_errors) { + dev_err(image->parent->parent, + "First VME read error detected an at address 0x%llx\n", + handler->first_error); + retval = handler->first_error - (vme_base + offset); + } + vme_unregister_error_handler(handler); + } + + spin_unlock(&image->lock); + + return retval; +} + + +static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf, + size_t count, loff_t offset) +{ + int retval = 0, enabled; + unsigned long long vme_base, size; + u32 aspace, cycle, dwidth; + void __iomem *addr = image->kern_base + offset; + unsigned int done = 0; + unsigned int count32; + + struct vme_error_handler *handler = NULL; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *bridge; + + tsi148_bridge = image->parent; + + bridge = tsi148_bridge->driver_priv; + + spin_lock(&image->lock); + + if (err_chk) { + __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, + &cycle, &dwidth); + handler = vme_register_error_handler(tsi148_bridge, aspace, + vme_base + offset, count); + if (!handler) { + spin_unlock(&image->lock); + return -ENOMEM; + } + } + + /* Here we apply for the same strategy we do in master_read + * function in order to assure the correct cycles. + */ + if ((uintptr_t)addr & 0x1) { + iowrite8(*(u8 *)buf, addr); + done += 1; + if (done == count) + goto out; + } + if ((uintptr_t)(addr + done) & 0x2) { + if ((count - done) < 2) { + iowrite8(*(u8 *)(buf + done), addr + done); + done += 1; + goto out; + } else { + iowrite16(*(u16 *)(buf + done), addr + done); + done += 2; + } + } + + count32 = (count - done) & ~0x3; + while (done < count32) { + iowrite32(*(u32 *)(buf + done), addr + done); + done += 4; + } + + if ((count - done) & 0x2) { + iowrite16(*(u16 *)(buf + done), addr + done); + done += 2; + } + if ((count - done) & 0x1) { + iowrite8(*(u8 *)(buf + done), addr + done); + done += 1; + } + +out: + retval = count; + + /* + * Writes are posted. We need to do a read on the VME bus to flush out + * all of the writes before we check for errors. We can't guarantee + * that reading the data we have just written is safe. It is believed + * that there isn't any read, write re-ordering, so we can read any + * location in VME space, so lets read the Device ID from the tsi148's + * own registers as mapped into CR/CSR space. + * + * We check for saved errors in the written address range/space. + */ + + if (err_chk) { + ioread16(bridge->flush_image->kern_base + 0x7F000); + + if (handler->num_errors) { + dev_warn(tsi148_bridge->parent, + "First VME write error detected an at address 0x%llx\n", + handler->first_error); + retval = handler->first_error - (vme_base + offset); + } + vme_unregister_error_handler(handler); + } + + spin_unlock(&image->lock); + + return retval; +} + +/* + * Perform an RMW cycle on the VME bus. + * + * Requires a previously configured master window, returns final value. + */ +static unsigned int tsi148_master_rmw(struct vme_master_resource *image, + unsigned int mask, unsigned int compare, unsigned int swap, + loff_t offset) +{ + unsigned long long pci_addr; + unsigned int pci_addr_high, pci_addr_low; + u32 tmp, result; + int i; + struct tsi148_driver *bridge; + + bridge = image->parent->driver_priv; + + /* Find the PCI address that maps to the desired VME address */ + i = image->number; + + /* Locking as we can only do one of these at a time */ + mutex_lock(&bridge->vme_rmw); + + /* Lock image */ + spin_lock(&image->lock); + + pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTSAU); + pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTSAL); + + reg_join(pci_addr_high, pci_addr_low, &pci_addr); + reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low); + + /* Configure registers */ + iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN); + iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC); + iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS); + iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU); + iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL); + + /* Enable RMW */ + tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL); + tmp |= TSI148_LCSR_VMCTRL_RMWEN; + iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL); + + /* Kick process off with a read to the required address. */ + result = ioread32be(image->kern_base + offset); + + /* Disable RMW */ + tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL); + tmp &= ~TSI148_LCSR_VMCTRL_RMWEN; + iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL); + + spin_unlock(&image->lock); + + mutex_unlock(&bridge->vme_rmw); + + return result; +} + +static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr, + u32 aspace, u32 cycle, u32 dwidth) +{ + u32 val; + + val = be32_to_cpu(*attr); + + /* Setup 2eSST speeds */ + switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { + case VME_2eSST160: + val |= TSI148_LCSR_DSAT_2eSSTM_160; + break; + case VME_2eSST267: + val |= TSI148_LCSR_DSAT_2eSSTM_267; + break; + case VME_2eSST320: + val |= TSI148_LCSR_DSAT_2eSSTM_320; + break; + } + + /* Setup cycle types */ + if (cycle & VME_SCT) + val |= TSI148_LCSR_DSAT_TM_SCT; + + if (cycle & VME_BLT) + val |= TSI148_LCSR_DSAT_TM_BLT; + + if (cycle & VME_MBLT) + val |= TSI148_LCSR_DSAT_TM_MBLT; + + if (cycle & VME_2eVME) + val |= TSI148_LCSR_DSAT_TM_2eVME; + + if (cycle & VME_2eSST) + val |= TSI148_LCSR_DSAT_TM_2eSST; + + if (cycle & VME_2eSSTB) { + dev_err(dev, "Currently not setting Broadcast Select " + "Registers\n"); + val |= TSI148_LCSR_DSAT_TM_2eSSTB; + } + + /* Setup data width */ + switch (dwidth) { + case VME_D16: + val |= TSI148_LCSR_DSAT_DBW_16; + break; + case VME_D32: + val |= TSI148_LCSR_DSAT_DBW_32; + break; + default: + dev_err(dev, "Invalid data width\n"); + return -EINVAL; + } + + /* Setup address space */ + switch (aspace) { + case VME_A16: + val |= TSI148_LCSR_DSAT_AMODE_A16; + break; + case VME_A24: + val |= TSI148_LCSR_DSAT_AMODE_A24; + break; + case VME_A32: + val |= TSI148_LCSR_DSAT_AMODE_A32; + break; + case VME_A64: + val |= TSI148_LCSR_DSAT_AMODE_A64; + break; + case VME_CRCSR: + val |= TSI148_LCSR_DSAT_AMODE_CRCSR; + break; + case VME_USER1: + val |= TSI148_LCSR_DSAT_AMODE_USER1; + break; + case VME_USER2: + val |= TSI148_LCSR_DSAT_AMODE_USER2; + break; + case VME_USER3: + val |= TSI148_LCSR_DSAT_AMODE_USER3; + break; + case VME_USER4: + val |= TSI148_LCSR_DSAT_AMODE_USER4; + break; + default: + dev_err(dev, "Invalid address space\n"); + return -EINVAL; + } + + if (cycle & VME_SUPER) + val |= TSI148_LCSR_DSAT_SUP; + if (cycle & VME_PROG) + val |= TSI148_LCSR_DSAT_PGM; + + *attr = cpu_to_be32(val); + + return 0; +} + +static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr, + u32 aspace, u32 cycle, u32 dwidth) +{ + u32 val; + + val = be32_to_cpu(*attr); + + /* Setup 2eSST speeds */ + switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { + case VME_2eSST160: + val |= TSI148_LCSR_DDAT_2eSSTM_160; + break; + case VME_2eSST267: + val |= TSI148_LCSR_DDAT_2eSSTM_267; + break; + case VME_2eSST320: + val |= TSI148_LCSR_DDAT_2eSSTM_320; + break; + } + + /* Setup cycle types */ + if (cycle & VME_SCT) + val |= TSI148_LCSR_DDAT_TM_SCT; + + if (cycle & VME_BLT) + val |= TSI148_LCSR_DDAT_TM_BLT; + + if (cycle & VME_MBLT) + val |= TSI148_LCSR_DDAT_TM_MBLT; + + if (cycle & VME_2eVME) + val |= TSI148_LCSR_DDAT_TM_2eVME; + + if (cycle & VME_2eSST) + val |= TSI148_LCSR_DDAT_TM_2eSST; + + if (cycle & VME_2eSSTB) { + dev_err(dev, "Currently not setting Broadcast Select " + "Registers\n"); + val |= TSI148_LCSR_DDAT_TM_2eSSTB; + } + + /* Setup data width */ + switch (dwidth) { + case VME_D16: + val |= TSI148_LCSR_DDAT_DBW_16; + break; + case VME_D32: + val |= TSI148_LCSR_DDAT_DBW_32; + break; + default: + dev_err(dev, "Invalid data width\n"); + return -EINVAL; + } + + /* Setup address space */ + switch (aspace) { + case VME_A16: + val |= TSI148_LCSR_DDAT_AMODE_A16; + break; + case VME_A24: + val |= TSI148_LCSR_DDAT_AMODE_A24; + break; + case VME_A32: + val |= TSI148_LCSR_DDAT_AMODE_A32; + break; + case VME_A64: + val |= TSI148_LCSR_DDAT_AMODE_A64; + break; + case VME_CRCSR: + val |= TSI148_LCSR_DDAT_AMODE_CRCSR; + break; + case VME_USER1: + val |= TSI148_LCSR_DDAT_AMODE_USER1; + break; + case VME_USER2: + val |= TSI148_LCSR_DDAT_AMODE_USER2; + break; + case VME_USER3: + val |= TSI148_LCSR_DDAT_AMODE_USER3; + break; + case VME_USER4: + val |= TSI148_LCSR_DDAT_AMODE_USER4; + break; + default: + dev_err(dev, "Invalid address space\n"); + return -EINVAL; + } + + if (cycle & VME_SUPER) + val |= TSI148_LCSR_DDAT_SUP; + if (cycle & VME_PROG) + val |= TSI148_LCSR_DDAT_PGM; + + *attr = cpu_to_be32(val); + + return 0; +} + +/* + * Add a link list descriptor to the list + * + * Note: DMA engine expects the DMA descriptor to be big endian. + */ +static int tsi148_dma_list_add(struct vme_dma_list *list, + struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count) +{ + struct tsi148_dma_entry *entry, *prev; + u32 address_high, address_low, val; + struct vme_dma_pattern *pattern_attr; + struct vme_dma_pci *pci_attr; + struct vme_dma_vme *vme_attr; + int retval = 0; + struct vme_bridge *tsi148_bridge; + + tsi148_bridge = list->parent->parent; + + /* Descriptor must be aligned on 64-bit boundaries */ + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + retval = -ENOMEM; + goto err_mem; + } + + /* Test descriptor alignment */ + if ((unsigned long)&entry->descriptor & 0x7) { + dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 " + "byte boundary as required: %p\n", + &entry->descriptor); + retval = -EINVAL; + goto err_align; + } + + /* Given we are going to fill out the structure, we probably don't + * need to zero it, but better safe than sorry for now. + */ + memset(&entry->descriptor, 0, sizeof(entry->descriptor)); + + /* Fill out source part */ + switch (src->type) { + case VME_DMA_PATTERN: + pattern_attr = src->private; + + entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern); + + val = TSI148_LCSR_DSAT_TYP_PAT; + + /* Default behaviour is 32 bit pattern */ + if (pattern_attr->type & VME_DMA_PATTERN_BYTE) + val |= TSI148_LCSR_DSAT_PSZ; + + /* It seems that the default behaviour is to increment */ + if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0) + val |= TSI148_LCSR_DSAT_NIN; + entry->descriptor.dsat = cpu_to_be32(val); + break; + case VME_DMA_PCI: + pci_attr = src->private; + + reg_split((unsigned long long)pci_attr->address, &address_high, + &address_low); + entry->descriptor.dsau = cpu_to_be32(address_high); + entry->descriptor.dsal = cpu_to_be32(address_low); + entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI); + break; + case VME_DMA_VME: + vme_attr = src->private; + + reg_split((unsigned long long)vme_attr->address, &address_high, + &address_low); + entry->descriptor.dsau = cpu_to_be32(address_high); + entry->descriptor.dsal = cpu_to_be32(address_low); + entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME); + + retval = tsi148_dma_set_vme_src_attributes( + tsi148_bridge->parent, &entry->descriptor.dsat, + vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth); + if (retval < 0) + goto err_source; + break; + default: + dev_err(tsi148_bridge->parent, "Invalid source type\n"); + retval = -EINVAL; + goto err_source; + } + + /* Assume last link - this will be over-written by adding another */ + entry->descriptor.dnlau = cpu_to_be32(0); + entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA); + + /* Fill out destination part */ + switch (dest->type) { + case VME_DMA_PCI: + pci_attr = dest->private; + + reg_split((unsigned long long)pci_attr->address, &address_high, + &address_low); + entry->descriptor.ddau = cpu_to_be32(address_high); + entry->descriptor.ddal = cpu_to_be32(address_low); + entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI); + break; + case VME_DMA_VME: + vme_attr = dest->private; + + reg_split((unsigned long long)vme_attr->address, &address_high, + &address_low); + entry->descriptor.ddau = cpu_to_be32(address_high); + entry->descriptor.ddal = cpu_to_be32(address_low); + entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME); + + retval = tsi148_dma_set_vme_dest_attributes( + tsi148_bridge->parent, &entry->descriptor.ddat, + vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth); + if (retval < 0) + goto err_dest; + break; + default: + dev_err(tsi148_bridge->parent, "Invalid destination type\n"); + retval = -EINVAL; + goto err_dest; + } + + /* Fill out count */ + entry->descriptor.dcnt = cpu_to_be32((u32)count); + + /* Add to list */ + list_add_tail(&entry->list, &list->entries); + + entry->dma_handle = dma_map_single(tsi148_bridge->parent, + &entry->descriptor, + sizeof(entry->descriptor), + DMA_TO_DEVICE); + if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) { + dev_err(tsi148_bridge->parent, "DMA mapping error\n"); + retval = -EINVAL; + goto err_dma; + } + + /* Fill out previous descriptors "Next Address" */ + if (entry->list.prev != &list->entries) { + reg_split((unsigned long long)entry->dma_handle, &address_high, + &address_low); + prev = list_entry(entry->list.prev, struct tsi148_dma_entry, + list); + prev->descriptor.dnlau = cpu_to_be32(address_high); + prev->descriptor.dnlal = cpu_to_be32(address_low); + + } + + return 0; + +err_dma: +err_dest: +err_source: +err_align: + kfree(entry); +err_mem: + return retval; +} + +/* + * Check to see if the provided DMA channel is busy. + */ +static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel) +{ + u32 tmp; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] + + TSI148_LCSR_OFFSET_DSTA); + + if (tmp & TSI148_LCSR_DSTA_BSY) + return 0; + else + return 1; + +} + +/* + * Execute a previously generated link list + * + * XXX Need to provide control register configuration. + */ +static int tsi148_dma_list_exec(struct vme_dma_list *list) +{ + struct vme_dma_resource *ctrlr; + int channel, retval; + struct tsi148_dma_entry *entry; + u32 bus_addr_high, bus_addr_low; + u32 val, dctlreg = 0; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *bridge; + + ctrlr = list->parent; + + tsi148_bridge = ctrlr->parent; + + bridge = tsi148_bridge->driver_priv; + + mutex_lock(&ctrlr->mtx); + + channel = ctrlr->number; + + if (!list_empty(&ctrlr->running)) { + /* + * XXX We have an active DMA transfer and currently haven't + * sorted out the mechanism for "pending" DMA transfers. + * Return busy. + */ + /* Need to add to pending here */ + mutex_unlock(&ctrlr->mtx); + return -EBUSY; + } else { + list_add(&list->list, &ctrlr->running); + } + + /* Get first bus address and write into registers */ + entry = list_first_entry(&list->entries, struct tsi148_dma_entry, + list); + + mutex_unlock(&ctrlr->mtx); + + reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low); + + iowrite32be(bus_addr_high, bridge->base + + TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU); + iowrite32be(bus_addr_low, bridge->base + + TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL); + + dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] + + TSI148_LCSR_OFFSET_DCTL); + + /* Start the operation */ + iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base + + TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL); + + retval = wait_event_interruptible(bridge->dma_queue[channel], + tsi148_dma_busy(ctrlr->parent, channel)); + + if (retval) { + iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base + + TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL); + /* Wait for the operation to abort */ + wait_event(bridge->dma_queue[channel], + tsi148_dma_busy(ctrlr->parent, channel)); + retval = -EINTR; + goto exit; + } + + /* + * Read status register, this register is valid until we kick off a + * new transfer. + */ + val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] + + TSI148_LCSR_OFFSET_DSTA); + + if (val & TSI148_LCSR_DSTA_VBE) { + dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val); + retval = -EIO; + } + +exit: + /* Remove list from running list */ + mutex_lock(&ctrlr->mtx); + list_del(&list->list); + mutex_unlock(&ctrlr->mtx); + + return retval; +} + +/* + * Clean up a previously generated link list + * + * We have a separate function, don't assume that the chain can't be reused. + */ +static int tsi148_dma_list_empty(struct vme_dma_list *list) +{ + struct list_head *pos, *temp; + struct tsi148_dma_entry *entry; + + struct vme_bridge *tsi148_bridge = list->parent->parent; + + /* detach and free each entry */ + list_for_each_safe(pos, temp, &list->entries) { + list_del(pos); + entry = list_entry(pos, struct tsi148_dma_entry, list); + + dma_unmap_single(tsi148_bridge->parent, entry->dma_handle, + sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE); + kfree(entry); + } + + return 0; +} + +/* + * All 4 location monitors reside at the same base - this is therefore a + * system wide configuration. + * + * This does not enable the LM monitor - that should be done when the first + * callback is attached and disabled when the last callback is removed. + */ +static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base, + u32 aspace, u32 cycle) +{ + u32 lm_base_high, lm_base_low, lm_ctl = 0; + int i; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *bridge; + + tsi148_bridge = lm->parent; + + bridge = tsi148_bridge->driver_priv; + + mutex_lock(&lm->mtx); + + /* If we already have a callback attached, we can't move it! */ + for (i = 0; i < lm->monitors; i++) { + if (bridge->lm_callback[i]) { + mutex_unlock(&lm->mtx); + dev_err(tsi148_bridge->parent, "Location monitor " + "callback attached, can't reset\n"); + return -EBUSY; + } + } + + switch (aspace) { + case VME_A16: + lm_ctl |= TSI148_LCSR_LMAT_AS_A16; + break; + case VME_A24: + lm_ctl |= TSI148_LCSR_LMAT_AS_A24; + break; + case VME_A32: + lm_ctl |= TSI148_LCSR_LMAT_AS_A32; + break; + case VME_A64: + lm_ctl |= TSI148_LCSR_LMAT_AS_A64; + break; + default: + mutex_unlock(&lm->mtx); + dev_err(tsi148_bridge->parent, "Invalid address space\n"); + return -EINVAL; + } + + if (cycle & VME_SUPER) + lm_ctl |= TSI148_LCSR_LMAT_SUPR ; + if (cycle & VME_USER) + lm_ctl |= TSI148_LCSR_LMAT_NPRIV; + if (cycle & VME_PROG) + lm_ctl |= TSI148_LCSR_LMAT_PGM; + if (cycle & VME_DATA) + lm_ctl |= TSI148_LCSR_LMAT_DATA; + + reg_split(lm_base, &lm_base_high, &lm_base_low); + + iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU); + iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL); + iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT); + + mutex_unlock(&lm->mtx); + + return 0; +} + +/* Get configuration of the callback monitor and return whether it is enabled + * or disabled. + */ +static int tsi148_lm_get(struct vme_lm_resource *lm, + unsigned long long *lm_base, u32 *aspace, u32 *cycle) +{ + u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0; + struct tsi148_driver *bridge; + + bridge = lm->parent->driver_priv; + + mutex_lock(&lm->mtx); + + lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU); + lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL); + lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT); + + reg_join(lm_base_high, lm_base_low, lm_base); + + if (lm_ctl & TSI148_LCSR_LMAT_EN) + enabled = 1; + + if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16) + *aspace |= VME_A16; + + if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24) + *aspace |= VME_A24; + + if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32) + *aspace |= VME_A32; + + if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64) + *aspace |= VME_A64; + + + if (lm_ctl & TSI148_LCSR_LMAT_SUPR) + *cycle |= VME_SUPER; + if (lm_ctl & TSI148_LCSR_LMAT_NPRIV) + *cycle |= VME_USER; + if (lm_ctl & TSI148_LCSR_LMAT_PGM) + *cycle |= VME_PROG; + if (lm_ctl & TSI148_LCSR_LMAT_DATA) + *cycle |= VME_DATA; + + mutex_unlock(&lm->mtx); + + return enabled; +} + +/* + * Attach a callback to a specific location monitor. + * + * Callback will be passed the monitor triggered. + */ +static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor, + void (*callback)(void *), void *data) +{ + u32 lm_ctl, tmp; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *bridge; + + tsi148_bridge = lm->parent; + + bridge = tsi148_bridge->driver_priv; + + mutex_lock(&lm->mtx); + + /* Ensure that the location monitor is configured - need PGM or DATA */ + lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT); + if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) { + mutex_unlock(&lm->mtx); + dev_err(tsi148_bridge->parent, "Location monitor not properly " + "configured\n"); + return -EINVAL; + } + + /* Check that a callback isn't already attached */ + if (bridge->lm_callback[monitor]) { + mutex_unlock(&lm->mtx); + dev_err(tsi148_bridge->parent, "Existing callback attached\n"); + return -EBUSY; + } + + /* Attach callback */ + bridge->lm_callback[monitor] = callback; + bridge->lm_data[monitor] = data; + + /* Enable Location Monitor interrupt */ + tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN); + tmp |= TSI148_LCSR_INTEN_LMEN[monitor]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); + + tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); + tmp |= TSI148_LCSR_INTEO_LMEO[monitor]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); + + /* Ensure that global Location Monitor Enable set */ + if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) { + lm_ctl |= TSI148_LCSR_LMAT_EN; + iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT); + } + + mutex_unlock(&lm->mtx); + + return 0; +} + +/* + * Detach a callback function forn a specific location monitor. + */ +static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor) +{ + u32 lm_en, tmp; + struct tsi148_driver *bridge; + + bridge = lm->parent->driver_priv; + + mutex_lock(&lm->mtx); + + /* Disable Location Monitor and ensure previous interrupts are clear */ + lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN); + lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor]; + iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN); + + tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); + tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); + + iowrite32be(TSI148_LCSR_INTC_LMC[monitor], + bridge->base + TSI148_LCSR_INTC); + + /* Detach callback */ + bridge->lm_callback[monitor] = NULL; + bridge->lm_data[monitor] = NULL; + + /* If all location monitors disabled, disable global Location Monitor */ + if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S | + TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) { + tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT); + tmp &= ~TSI148_LCSR_LMAT_EN; + iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT); + } + + mutex_unlock(&lm->mtx); + + return 0; +} + +/* + * Determine Geographical Addressing + */ +static int tsi148_slot_get(struct vme_bridge *tsi148_bridge) +{ + u32 slot = 0; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + if (!geoid) { + slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT); + slot = slot & TSI148_LCSR_VSTAT_GA_M; + } else + slot = geoid; + + return (int)slot; +} + +static void *tsi148_alloc_consistent(struct device *parent, size_t size, + dma_addr_t *dma) +{ + struct pci_dev *pdev; + + /* Find pci_dev container of dev */ + pdev = to_pci_dev(parent); + + return dma_alloc_coherent(&pdev->dev, size, dma, GFP_KERNEL); +} + +static void tsi148_free_consistent(struct device *parent, size_t size, + void *vaddr, dma_addr_t dma) +{ + struct pci_dev *pdev; + + /* Find pci_dev container of dev */ + pdev = to_pci_dev(parent); + + dma_free_coherent(&pdev->dev, size, vaddr, dma); +} + +/* + * Configure CR/CSR space + * + * Access to the CR/CSR can be configured at power-up. The location of the + * CR/CSR registers in the CR/CSR address space is determined by the boards + * Auto-ID or Geographic address. This function ensures that the window is + * enabled at an offset consistent with the boards geopgraphic address. + * + * Each board has a 512kB window, with the highest 4kB being used for the + * boards registers, this means there is a fix length 508kB window which must + * be mapped onto PCI memory. + */ +static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge, + struct pci_dev *pdev) +{ + u32 cbar, crat, vstat; + u32 crcsr_bus_high, crcsr_bus_low; + int retval; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + /* Allocate mem for CR/CSR image */ + bridge->crcsr_kernel = dma_alloc_coherent(&pdev->dev, + VME_CRCSR_BUF_SIZE, + &bridge->crcsr_bus, GFP_KERNEL); + if (!bridge->crcsr_kernel) { + dev_err(tsi148_bridge->parent, "Failed to allocate memory for " + "CR/CSR image\n"); + return -ENOMEM; + } + + reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low); + + iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU); + iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL); + + /* Ensure that the CR/CSR is configured at the correct offset */ + cbar = ioread32be(bridge->base + TSI148_CBAR); + cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3; + + vstat = tsi148_slot_get(tsi148_bridge); + + if (cbar != vstat) { + cbar = vstat; + dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n"); + iowrite32be(cbar<<3, bridge->base + TSI148_CBAR); + } + dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar); + + crat = ioread32be(bridge->base + TSI148_LCSR_CRAT); + if (crat & TSI148_LCSR_CRAT_EN) + dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n"); + else { + dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n"); + iowrite32be(crat | TSI148_LCSR_CRAT_EN, + bridge->base + TSI148_LCSR_CRAT); + } + + /* If we want flushed, error-checked writes, set up a window + * over the CR/CSR registers. We read from here to safely flush + * through VME writes. + */ + if (err_chk) { + retval = tsi148_master_set(bridge->flush_image, 1, + (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT, + VME_D16); + if (retval) + dev_err(tsi148_bridge->parent, "Configuring flush image" + " failed\n"); + } + + return 0; + +} + +static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge, + struct pci_dev *pdev) +{ + u32 crat; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + /* Turn off CR/CSR space */ + crat = ioread32be(bridge->base + TSI148_LCSR_CRAT); + iowrite32be(crat & ~TSI148_LCSR_CRAT_EN, + bridge->base + TSI148_LCSR_CRAT); + + /* Free image */ + iowrite32be(0, bridge->base + TSI148_LCSR_CROU); + iowrite32be(0, bridge->base + TSI148_LCSR_CROL); + + dma_free_coherent(&pdev->dev, VME_CRCSR_BUF_SIZE, + bridge->crcsr_kernel, bridge->crcsr_bus); +} + +static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int retval, i, master_num; + u32 data; + struct list_head *pos = NULL, *n; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *tsi148_device; + struct vme_master_resource *master_image; + struct vme_slave_resource *slave_image; + struct vme_dma_resource *dma_ctrlr; + struct vme_lm_resource *lm; + + /* If we want to support more than one of each bridge, we need to + * dynamically generate this so we get one per device + */ + tsi148_bridge = kzalloc(sizeof(*tsi148_bridge), GFP_KERNEL); + if (!tsi148_bridge) { + retval = -ENOMEM; + goto err_struct; + } + vme_init_bridge(tsi148_bridge); + + tsi148_device = kzalloc(sizeof(*tsi148_device), GFP_KERNEL); + if (!tsi148_device) { + retval = -ENOMEM; + goto err_driver; + } + + tsi148_bridge->driver_priv = tsi148_device; + + /* Enable the device */ + retval = pci_enable_device(pdev); + if (retval) { + dev_err(&pdev->dev, "Unable to enable device\n"); + goto err_enable; + } + + /* Map Registers */ + retval = pci_request_regions(pdev, driver_name); + if (retval) { + dev_err(&pdev->dev, "Unable to reserve resources\n"); + goto err_resource; + } + + /* map registers in BAR 0 */ + tsi148_device->base = ioremap(pci_resource_start(pdev, 0), + 4096); + if (!tsi148_device->base) { + dev_err(&pdev->dev, "Unable to remap CRG region\n"); + retval = -EIO; + goto err_remap; + } + + /* Check to see if the mapping worked out */ + data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF; + if (data != PCI_VENDOR_ID_TUNDRA) { + dev_err(&pdev->dev, "CRG region check failed\n"); + retval = -EIO; + goto err_test; + } + + /* Initialize wait queues & mutual exclusion flags */ + init_waitqueue_head(&tsi148_device->dma_queue[0]); + init_waitqueue_head(&tsi148_device->dma_queue[1]); + init_waitqueue_head(&tsi148_device->iack_queue); + mutex_init(&tsi148_device->vme_int); + mutex_init(&tsi148_device->vme_rmw); + + tsi148_bridge->parent = &pdev->dev; + strcpy(tsi148_bridge->name, driver_name); + + /* Setup IRQ */ + retval = tsi148_irq_init(tsi148_bridge); + if (retval != 0) { + dev_err(&pdev->dev, "Chip Initialization failed.\n"); + goto err_irq; + } + + /* If we are going to flush writes, we need to read from the VME bus. + * We need to do this safely, thus we read the devices own CR/CSR + * register. To do this we must set up a window in CR/CSR space and + * hence have one less master window resource available. + */ + master_num = TSI148_MAX_MASTER; + if (err_chk) { + master_num--; + + tsi148_device->flush_image = + kmalloc(sizeof(*tsi148_device->flush_image), + GFP_KERNEL); + if (!tsi148_device->flush_image) { + retval = -ENOMEM; + goto err_master; + } + tsi148_device->flush_image->parent = tsi148_bridge; + spin_lock_init(&tsi148_device->flush_image->lock); + tsi148_device->flush_image->locked = 1; + tsi148_device->flush_image->number = master_num; + memset(&tsi148_device->flush_image->bus_resource, 0, + sizeof(tsi148_device->flush_image->bus_resource)); + tsi148_device->flush_image->kern_base = NULL; + } + + /* Add master windows to list */ + for (i = 0; i < master_num; i++) { + master_image = kmalloc(sizeof(*master_image), GFP_KERNEL); + if (!master_image) { + retval = -ENOMEM; + goto err_master; + } + master_image->parent = tsi148_bridge; + spin_lock_init(&master_image->lock); + master_image->locked = 0; + master_image->number = i; + master_image->address_attr = VME_A16 | VME_A24 | VME_A32 | + VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 | + VME_USER3 | VME_USER4; + master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | + VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 | + VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER | + VME_PROG | VME_DATA; + master_image->width_attr = VME_D16 | VME_D32; + memset(&master_image->bus_resource, 0, + sizeof(master_image->bus_resource)); + master_image->kern_base = NULL; + list_add_tail(&master_image->list, + &tsi148_bridge->master_resources); + } + + /* Add slave windows to list */ + for (i = 0; i < TSI148_MAX_SLAVE; i++) { + slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL); + if (!slave_image) { + retval = -ENOMEM; + goto err_slave; + } + slave_image->parent = tsi148_bridge; + mutex_init(&slave_image->mtx); + slave_image->locked = 0; + slave_image->number = i; + slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 | + VME_A64; + slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | + VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 | + VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER | + VME_PROG | VME_DATA; + list_add_tail(&slave_image->list, + &tsi148_bridge->slave_resources); + } + + /* Add dma engines to list */ + for (i = 0; i < TSI148_MAX_DMA; i++) { + dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL); + if (!dma_ctrlr) { + retval = -ENOMEM; + goto err_dma; + } + dma_ctrlr->parent = tsi148_bridge; + mutex_init(&dma_ctrlr->mtx); + dma_ctrlr->locked = 0; + dma_ctrlr->number = i; + dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM | + VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME | + VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME | + VME_DMA_PATTERN_TO_MEM; + INIT_LIST_HEAD(&dma_ctrlr->pending); + INIT_LIST_HEAD(&dma_ctrlr->running); + list_add_tail(&dma_ctrlr->list, + &tsi148_bridge->dma_resources); + } + + /* Add location monitor to list */ + lm = kmalloc(sizeof(*lm), GFP_KERNEL); + if (!lm) { + retval = -ENOMEM; + goto err_lm; + } + lm->parent = tsi148_bridge; + mutex_init(&lm->mtx); + lm->locked = 0; + lm->number = 1; + lm->monitors = 4; + list_add_tail(&lm->list, &tsi148_bridge->lm_resources); + + tsi148_bridge->slave_get = tsi148_slave_get; + tsi148_bridge->slave_set = tsi148_slave_set; + tsi148_bridge->master_get = tsi148_master_get; + tsi148_bridge->master_set = tsi148_master_set; + tsi148_bridge->master_read = tsi148_master_read; + tsi148_bridge->master_write = tsi148_master_write; + tsi148_bridge->master_rmw = tsi148_master_rmw; + tsi148_bridge->dma_list_add = tsi148_dma_list_add; + tsi148_bridge->dma_list_exec = tsi148_dma_list_exec; + tsi148_bridge->dma_list_empty = tsi148_dma_list_empty; + tsi148_bridge->irq_set = tsi148_irq_set; + tsi148_bridge->irq_generate = tsi148_irq_generate; + tsi148_bridge->lm_set = tsi148_lm_set; + tsi148_bridge->lm_get = tsi148_lm_get; + tsi148_bridge->lm_attach = tsi148_lm_attach; + tsi148_bridge->lm_detach = tsi148_lm_detach; + tsi148_bridge->slot_get = tsi148_slot_get; + tsi148_bridge->alloc_consistent = tsi148_alloc_consistent; + tsi148_bridge->free_consistent = tsi148_free_consistent; + + data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT); + dev_info(&pdev->dev, "Board is%s the VME system controller\n", + (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not"); + if (!geoid) + dev_info(&pdev->dev, "VME geographical address is %d\n", + data & TSI148_LCSR_VSTAT_GA_M); + else + dev_info(&pdev->dev, "VME geographical address is set to %d\n", + geoid); + + dev_info(&pdev->dev, "VME Write and flush and error check is %s\n", + err_chk ? "enabled" : "disabled"); + + retval = tsi148_crcsr_init(tsi148_bridge, pdev); + if (retval) { + dev_err(&pdev->dev, "CR/CSR configuration failed.\n"); + goto err_crcsr; + } + + retval = vme_register_bridge(tsi148_bridge); + if (retval != 0) { + dev_err(&pdev->dev, "Chip Registration failed.\n"); + goto err_reg; + } + + pci_set_drvdata(pdev, tsi148_bridge); + + /* Clear VME bus "board fail", and "power-up reset" lines */ + data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT); + data &= ~TSI148_LCSR_VSTAT_BRDFL; + data |= TSI148_LCSR_VSTAT_CPURST; + iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT); + + return 0; + +err_reg: + tsi148_crcsr_exit(tsi148_bridge, pdev); +err_crcsr: +err_lm: + /* resources are stored in link list */ + list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) { + lm = list_entry(pos, struct vme_lm_resource, list); + list_del(pos); + kfree(lm); + } +err_dma: + /* resources are stored in link list */ + list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) { + dma_ctrlr = list_entry(pos, struct vme_dma_resource, list); + list_del(pos); + kfree(dma_ctrlr); + } +err_slave: + /* resources are stored in link list */ + list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) { + slave_image = list_entry(pos, struct vme_slave_resource, list); + list_del(pos); + kfree(slave_image); + } +err_master: + /* resources are stored in link list */ + list_for_each_safe(pos, n, &tsi148_bridge->master_resources) { + master_image = list_entry(pos, struct vme_master_resource, + list); + list_del(pos); + kfree(master_image); + } + + tsi148_irq_exit(tsi148_bridge, pdev); +err_irq: +err_test: + iounmap(tsi148_device->base); +err_remap: + pci_release_regions(pdev); +err_resource: + pci_disable_device(pdev); +err_enable: + kfree(tsi148_device); +err_driver: + kfree(tsi148_bridge); +err_struct: + return retval; + +} + +static void tsi148_remove(struct pci_dev *pdev) +{ + struct list_head *pos = NULL; + struct list_head *tmplist; + struct vme_master_resource *master_image; + struct vme_slave_resource *slave_image; + struct vme_dma_resource *dma_ctrlr; + int i; + struct tsi148_driver *bridge; + struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev); + + bridge = tsi148_bridge->driver_priv; + + + dev_dbg(&pdev->dev, "Driver is being unloaded.\n"); + + /* + * Shutdown all inbound and outbound windows. + */ + for (i = 0; i < 8; i++) { + iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITAT); + iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTAT); + } + + /* + * Shutdown Location monitor. + */ + iowrite32be(0, bridge->base + TSI148_LCSR_LMAT); + + /* + * Shutdown CRG map. + */ + iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT); + + /* + * Clear error status. + */ + iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT); + iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT); + iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT); + + /* + * Remove VIRQ interrupt (if any) + */ + if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800) + iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR); + + /* + * Map all Interrupts to PCI INTA + */ + iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1); + iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2); + + tsi148_irq_exit(tsi148_bridge, pdev); + + vme_unregister_bridge(tsi148_bridge); + + tsi148_crcsr_exit(tsi148_bridge, pdev); + + /* resources are stored in link list */ + list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) { + dma_ctrlr = list_entry(pos, struct vme_dma_resource, list); + list_del(pos); + kfree(dma_ctrlr); + } + + /* resources are stored in link list */ + list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) { + slave_image = list_entry(pos, struct vme_slave_resource, list); + list_del(pos); + kfree(slave_image); + } + + /* resources are stored in link list */ + list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) { + master_image = list_entry(pos, struct vme_master_resource, + list); + list_del(pos); + kfree(master_image); + } + + iounmap(bridge->base); + + pci_release_regions(pdev); + + pci_disable_device(pdev); + + kfree(tsi148_bridge->driver_priv); + + kfree(tsi148_bridge); +} + +module_pci_driver(tsi148_driver); + +MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes"); +module_param(err_chk, bool, 0); + +MODULE_PARM_DESC(geoid, "Override geographical addressing"); +module_param(geoid, int, 0); + +MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/vme_user/vme_tsi148.h b/drivers/staging/vme_user/vme_tsi148.h new file mode 100644 index 0000000000..226fedc6f1 --- /dev/null +++ b/drivers/staging/vme_user/vme_tsi148.h @@ -0,0 +1,1407 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * tsi148.h + * + * Support for the Tundra TSI148 VME Bridge chip + * + * Author: Tom Armistead + * Updated and maintained by Ajit Prem + * Copyright 2004 Motorola Inc. + */ + +#ifndef TSI148_H +#define TSI148_H + +#ifndef PCI_VENDOR_ID_TUNDRA +#define PCI_VENDOR_ID_TUNDRA 0x10e3 +#endif + +#ifndef PCI_DEVICE_ID_TUNDRA_TSI148 +#define PCI_DEVICE_ID_TUNDRA_TSI148 0x148 +#endif + +/* + * Define the number of each that the Tsi148 supports. + */ +#define TSI148_MAX_MASTER 8 /* Max Master Windows */ +#define TSI148_MAX_SLAVE 8 /* Max Slave Windows */ +#define TSI148_MAX_DMA 2 /* Max DMA Controllers */ +#define TSI148_MAX_MAILBOX 4 /* Max Mail Box registers */ +#define TSI148_MAX_SEMAPHORE 8 /* Max Semaphores */ + +/* Structure used to hold driver specific information */ +struct tsi148_driver { + void __iomem *base; /* Base Address of device registers */ + wait_queue_head_t dma_queue[2]; + wait_queue_head_t iack_queue; + void (*lm_callback[4])(void *); /* Called in interrupt handler */ + void *lm_data[4]; + void *crcsr_kernel; + dma_addr_t crcsr_bus; + struct vme_master_resource *flush_image; + struct mutex vme_rmw; /* Only one RMW cycle at a time */ + struct mutex vme_int; /* + * Only one VME interrupt can be + * generated at a time, provide locking + */ +}; + +/* + * Layout of a DMAC Linked-List Descriptor + * + * Note: This structure is accessed via the chip and therefore must be + * correctly laid out - It must also be aligned on 64-bit boundaries. + */ +struct tsi148_dma_descriptor { + __be32 dsau; /* Source Address */ + __be32 dsal; + __be32 ddau; /* Destination Address */ + __be32 ddal; + __be32 dsat; /* Source attributes */ + __be32 ddat; /* Destination attributes */ + __be32 dnlau; /* Next link address */ + __be32 dnlal; + __be32 dcnt; /* Byte count */ + __be32 ddbs; /* 2eSST Broadcast select */ +}; + +struct tsi148_dma_entry { + /* + * The descriptor needs to be aligned on a 64-bit boundary, we increase + * the chance of this by putting it first in the structure. + */ + struct tsi148_dma_descriptor descriptor; + struct list_head list; + dma_addr_t dma_handle; +}; + +/* + * TSI148 ASIC register structure overlays and bit field definitions. + * + * Note: Tsi148 Register Group (CRG) consists of the following + * combination of registers: + * PCFS - PCI Configuration Space Registers + * LCSR - Local Control and Status Registers + * GCSR - Global Control and Status Registers + * CR/CSR - Subset of Configuration ROM / + * Control and Status Registers + */ + + +/* + * Command/Status Registers (CRG + $004) + */ +#define TSI148_PCFS_ID 0x0 +#define TSI148_PCFS_CSR 0x4 +#define TSI148_PCFS_CLASS 0x8 +#define TSI148_PCFS_MISC0 0xC +#define TSI148_PCFS_MBARL 0x10 +#define TSI148_PCFS_MBARU 0x14 + +#define TSI148_PCFS_SUBID 0x28 + +#define TSI148_PCFS_CAPP 0x34 + +#define TSI148_PCFS_MISC1 0x3C + +#define TSI148_PCFS_XCAPP 0x40 +#define TSI148_PCFS_XSTAT 0x44 + +/* + * LCSR definitions + */ + +/* + * Outbound Translations + */ +#define TSI148_LCSR_OT0_OTSAU 0x100 +#define TSI148_LCSR_OT0_OTSAL 0x104 +#define TSI148_LCSR_OT0_OTEAU 0x108 +#define TSI148_LCSR_OT0_OTEAL 0x10C +#define TSI148_LCSR_OT0_OTOFU 0x110 +#define TSI148_LCSR_OT0_OTOFL 0x114 +#define TSI148_LCSR_OT0_OTBS 0x118 +#define TSI148_LCSR_OT0_OTAT 0x11C + +#define TSI148_LCSR_OT1_OTSAU 0x120 +#define TSI148_LCSR_OT1_OTSAL 0x124 +#define TSI148_LCSR_OT1_OTEAU 0x128 +#define TSI148_LCSR_OT1_OTEAL 0x12C +#define TSI148_LCSR_OT1_OTOFU 0x130 +#define TSI148_LCSR_OT1_OTOFL 0x134 +#define TSI148_LCSR_OT1_OTBS 0x138 +#define TSI148_LCSR_OT1_OTAT 0x13C + +#define TSI148_LCSR_OT2_OTSAU 0x140 +#define TSI148_LCSR_OT2_OTSAL 0x144 +#define TSI148_LCSR_OT2_OTEAU 0x148 +#define TSI148_LCSR_OT2_OTEAL 0x14C +#define TSI148_LCSR_OT2_OTOFU 0x150 +#define TSI148_LCSR_OT2_OTOFL 0x154 +#define TSI148_LCSR_OT2_OTBS 0x158 +#define TSI148_LCSR_OT2_OTAT 0x15C + +#define TSI148_LCSR_OT3_OTSAU 0x160 +#define TSI148_LCSR_OT3_OTSAL 0x164 +#define TSI148_LCSR_OT3_OTEAU 0x168 +#define TSI148_LCSR_OT3_OTEAL 0x16C +#define TSI148_LCSR_OT3_OTOFU 0x170 +#define TSI148_LCSR_OT3_OTOFL 0x174 +#define TSI148_LCSR_OT3_OTBS 0x178 +#define TSI148_LCSR_OT3_OTAT 0x17C + +#define TSI148_LCSR_OT4_OTSAU 0x180 +#define TSI148_LCSR_OT4_OTSAL 0x184 +#define TSI148_LCSR_OT4_OTEAU 0x188 +#define TSI148_LCSR_OT4_OTEAL 0x18C +#define TSI148_LCSR_OT4_OTOFU 0x190 +#define TSI148_LCSR_OT4_OTOFL 0x194 +#define TSI148_LCSR_OT4_OTBS 0x198 +#define TSI148_LCSR_OT4_OTAT 0x19C + +#define TSI148_LCSR_OT5_OTSAU 0x1A0 +#define TSI148_LCSR_OT5_OTSAL 0x1A4 +#define TSI148_LCSR_OT5_OTEAU 0x1A8 +#define TSI148_LCSR_OT5_OTEAL 0x1AC +#define TSI148_LCSR_OT5_OTOFU 0x1B0 +#define TSI148_LCSR_OT5_OTOFL 0x1B4 +#define TSI148_LCSR_OT5_OTBS 0x1B8 +#define TSI148_LCSR_OT5_OTAT 0x1BC + +#define TSI148_LCSR_OT6_OTSAU 0x1C0 +#define TSI148_LCSR_OT6_OTSAL 0x1C4 +#define TSI148_LCSR_OT6_OTEAU 0x1C8 +#define TSI148_LCSR_OT6_OTEAL 0x1CC +#define TSI148_LCSR_OT6_OTOFU 0x1D0 +#define TSI148_LCSR_OT6_OTOFL 0x1D4 +#define TSI148_LCSR_OT6_OTBS 0x1D8 +#define TSI148_LCSR_OT6_OTAT 0x1DC + +#define TSI148_LCSR_OT7_OTSAU 0x1E0 +#define TSI148_LCSR_OT7_OTSAL 0x1E4 +#define TSI148_LCSR_OT7_OTEAU 0x1E8 +#define TSI148_LCSR_OT7_OTEAL 0x1EC +#define TSI148_LCSR_OT7_OTOFU 0x1F0 +#define TSI148_LCSR_OT7_OTOFL 0x1F4 +#define TSI148_LCSR_OT7_OTBS 0x1F8 +#define TSI148_LCSR_OT7_OTAT 0x1FC + +#define TSI148_LCSR_OT0 0x100 +#define TSI148_LCSR_OT1 0x120 +#define TSI148_LCSR_OT2 0x140 +#define TSI148_LCSR_OT3 0x160 +#define TSI148_LCSR_OT4 0x180 +#define TSI148_LCSR_OT5 0x1A0 +#define TSI148_LCSR_OT6 0x1C0 +#define TSI148_LCSR_OT7 0x1E0 + +static const int TSI148_LCSR_OT[8] = { TSI148_LCSR_OT0, TSI148_LCSR_OT1, + TSI148_LCSR_OT2, TSI148_LCSR_OT3, + TSI148_LCSR_OT4, TSI148_LCSR_OT5, + TSI148_LCSR_OT6, TSI148_LCSR_OT7 }; + +#define TSI148_LCSR_OFFSET_OTSAU 0x0 +#define TSI148_LCSR_OFFSET_OTSAL 0x4 +#define TSI148_LCSR_OFFSET_OTEAU 0x8 +#define TSI148_LCSR_OFFSET_OTEAL 0xC +#define TSI148_LCSR_OFFSET_OTOFU 0x10 +#define TSI148_LCSR_OFFSET_OTOFL 0x14 +#define TSI148_LCSR_OFFSET_OTBS 0x18 +#define TSI148_LCSR_OFFSET_OTAT 0x1C + +/* + * VMEbus interrupt ack + * offset 200 + */ +#define TSI148_LCSR_VIACK1 0x204 +#define TSI148_LCSR_VIACK2 0x208 +#define TSI148_LCSR_VIACK3 0x20C +#define TSI148_LCSR_VIACK4 0x210 +#define TSI148_LCSR_VIACK5 0x214 +#define TSI148_LCSR_VIACK6 0x218 +#define TSI148_LCSR_VIACK7 0x21C + +static const int TSI148_LCSR_VIACK[8] = { 0, TSI148_LCSR_VIACK1, + TSI148_LCSR_VIACK2, TSI148_LCSR_VIACK3, + TSI148_LCSR_VIACK4, TSI148_LCSR_VIACK5, + TSI148_LCSR_VIACK6, TSI148_LCSR_VIACK7 }; + +/* + * RMW + * offset 220 + */ +#define TSI148_LCSR_RMWAU 0x220 +#define TSI148_LCSR_RMWAL 0x224 +#define TSI148_LCSR_RMWEN 0x228 +#define TSI148_LCSR_RMWC 0x22C +#define TSI148_LCSR_RMWS 0x230 + +/* + * VMEbus control + * offset 234 + */ +#define TSI148_LCSR_VMCTRL 0x234 +#define TSI148_LCSR_VCTRL 0x238 +#define TSI148_LCSR_VSTAT 0x23C + +/* + * PCI status + * offset 240 + */ +#define TSI148_LCSR_PSTAT 0x240 + +/* + * VME filter. + * offset 250 + */ +#define TSI148_LCSR_VMEFL 0x250 + + /* + * VME exception. + * offset 260 + */ +#define TSI148_LCSR_VEAU 0x260 +#define TSI148_LCSR_VEAL 0x264 +#define TSI148_LCSR_VEAT 0x268 + + /* + * PCI error + * offset 270 + */ +#define TSI148_LCSR_EDPAU 0x270 +#define TSI148_LCSR_EDPAL 0x274 +#define TSI148_LCSR_EDPXA 0x278 +#define TSI148_LCSR_EDPXS 0x27C +#define TSI148_LCSR_EDPAT 0x280 + + /* + * Inbound Translations + * offset 300 + */ +#define TSI148_LCSR_IT0_ITSAU 0x300 +#define TSI148_LCSR_IT0_ITSAL 0x304 +#define TSI148_LCSR_IT0_ITEAU 0x308 +#define TSI148_LCSR_IT0_ITEAL 0x30C +#define TSI148_LCSR_IT0_ITOFU 0x310 +#define TSI148_LCSR_IT0_ITOFL 0x314 +#define TSI148_LCSR_IT0_ITAT 0x318 + +#define TSI148_LCSR_IT1_ITSAU 0x320 +#define TSI148_LCSR_IT1_ITSAL 0x324 +#define TSI148_LCSR_IT1_ITEAU 0x328 +#define TSI148_LCSR_IT1_ITEAL 0x32C +#define TSI148_LCSR_IT1_ITOFU 0x330 +#define TSI148_LCSR_IT1_ITOFL 0x334 +#define TSI148_LCSR_IT1_ITAT 0x338 + +#define TSI148_LCSR_IT2_ITSAU 0x340 +#define TSI148_LCSR_IT2_ITSAL 0x344 +#define TSI148_LCSR_IT2_ITEAU 0x348 +#define TSI148_LCSR_IT2_ITEAL 0x34C +#define TSI148_LCSR_IT2_ITOFU 0x350 +#define TSI148_LCSR_IT2_ITOFL 0x354 +#define TSI148_LCSR_IT2_ITAT 0x358 + +#define TSI148_LCSR_IT3_ITSAU 0x360 +#define TSI148_LCSR_IT3_ITSAL 0x364 +#define TSI148_LCSR_IT3_ITEAU 0x368 +#define TSI148_LCSR_IT3_ITEAL 0x36C +#define TSI148_LCSR_IT3_ITOFU 0x370 +#define TSI148_LCSR_IT3_ITOFL 0x374 +#define TSI148_LCSR_IT3_ITAT 0x378 + +#define TSI148_LCSR_IT4_ITSAU 0x380 +#define TSI148_LCSR_IT4_ITSAL 0x384 +#define TSI148_LCSR_IT4_ITEAU 0x388 +#define TSI148_LCSR_IT4_ITEAL 0x38C +#define TSI148_LCSR_IT4_ITOFU 0x390 +#define TSI148_LCSR_IT4_ITOFL 0x394 +#define TSI148_LCSR_IT4_ITAT 0x398 + +#define TSI148_LCSR_IT5_ITSAU 0x3A0 +#define TSI148_LCSR_IT5_ITSAL 0x3A4 +#define TSI148_LCSR_IT5_ITEAU 0x3A8 +#define TSI148_LCSR_IT5_ITEAL 0x3AC +#define TSI148_LCSR_IT5_ITOFU 0x3B0 +#define TSI148_LCSR_IT5_ITOFL 0x3B4 +#define TSI148_LCSR_IT5_ITAT 0x3B8 + +#define TSI148_LCSR_IT6_ITSAU 0x3C0 +#define TSI148_LCSR_IT6_ITSAL 0x3C4 +#define TSI148_LCSR_IT6_ITEAU 0x3C8 +#define TSI148_LCSR_IT6_ITEAL 0x3CC +#define TSI148_LCSR_IT6_ITOFU 0x3D0 +#define TSI148_LCSR_IT6_ITOFL 0x3D4 +#define TSI148_LCSR_IT6_ITAT 0x3D8 + +#define TSI148_LCSR_IT7_ITSAU 0x3E0 +#define TSI148_LCSR_IT7_ITSAL 0x3E4 +#define TSI148_LCSR_IT7_ITEAU 0x3E8 +#define TSI148_LCSR_IT7_ITEAL 0x3EC +#define TSI148_LCSR_IT7_ITOFU 0x3F0 +#define TSI148_LCSR_IT7_ITOFL 0x3F4 +#define TSI148_LCSR_IT7_ITAT 0x3F8 + + +#define TSI148_LCSR_IT0 0x300 +#define TSI148_LCSR_IT1 0x320 +#define TSI148_LCSR_IT2 0x340 +#define TSI148_LCSR_IT3 0x360 +#define TSI148_LCSR_IT4 0x380 +#define TSI148_LCSR_IT5 0x3A0 +#define TSI148_LCSR_IT6 0x3C0 +#define TSI148_LCSR_IT7 0x3E0 + +static const int TSI148_LCSR_IT[8] = { TSI148_LCSR_IT0, TSI148_LCSR_IT1, + TSI148_LCSR_IT2, TSI148_LCSR_IT3, + TSI148_LCSR_IT4, TSI148_LCSR_IT5, + TSI148_LCSR_IT6, TSI148_LCSR_IT7 }; + +#define TSI148_LCSR_OFFSET_ITSAU 0x0 +#define TSI148_LCSR_OFFSET_ITSAL 0x4 +#define TSI148_LCSR_OFFSET_ITEAU 0x8 +#define TSI148_LCSR_OFFSET_ITEAL 0xC +#define TSI148_LCSR_OFFSET_ITOFU 0x10 +#define TSI148_LCSR_OFFSET_ITOFL 0x14 +#define TSI148_LCSR_OFFSET_ITAT 0x18 + + /* + * Inbound Translation GCSR + * offset 400 + */ +#define TSI148_LCSR_GBAU 0x400 +#define TSI148_LCSR_GBAL 0x404 +#define TSI148_LCSR_GCSRAT 0x408 + + /* + * Inbound Translation CRG + * offset 40C + */ +#define TSI148_LCSR_CBAU 0x40C +#define TSI148_LCSR_CBAL 0x410 +#define TSI148_LCSR_CSRAT 0x414 + + /* + * Inbound Translation CR/CSR + * CRG + * offset 418 + */ +#define TSI148_LCSR_CROU 0x418 +#define TSI148_LCSR_CROL 0x41C +#define TSI148_LCSR_CRAT 0x420 + + /* + * Inbound Translation Location Monitor + * offset 424 + */ +#define TSI148_LCSR_LMBAU 0x424 +#define TSI148_LCSR_LMBAL 0x428 +#define TSI148_LCSR_LMAT 0x42C + + /* + * VMEbus Interrupt Control. + * offset 430 + */ +#define TSI148_LCSR_BCU 0x430 +#define TSI148_LCSR_BCL 0x434 +#define TSI148_LCSR_BPGTR 0x438 +#define TSI148_LCSR_BPCTR 0x43C +#define TSI148_LCSR_VICR 0x440 + + /* + * Local Bus Interrupt Control. + * offset 448 + */ +#define TSI148_LCSR_INTEN 0x448 +#define TSI148_LCSR_INTEO 0x44C +#define TSI148_LCSR_INTS 0x450 +#define TSI148_LCSR_INTC 0x454 +#define TSI148_LCSR_INTM1 0x458 +#define TSI148_LCSR_INTM2 0x45C + + /* + * DMA Controllers + * offset 500 + */ +#define TSI148_LCSR_DCTL0 0x500 +#define TSI148_LCSR_DSTA0 0x504 +#define TSI148_LCSR_DCSAU0 0x508 +#define TSI148_LCSR_DCSAL0 0x50C +#define TSI148_LCSR_DCDAU0 0x510 +#define TSI148_LCSR_DCDAL0 0x514 +#define TSI148_LCSR_DCLAU0 0x518 +#define TSI148_LCSR_DCLAL0 0x51C +#define TSI148_LCSR_DSAU0 0x520 +#define TSI148_LCSR_DSAL0 0x524 +#define TSI148_LCSR_DDAU0 0x528 +#define TSI148_LCSR_DDAL0 0x52C +#define TSI148_LCSR_DSAT0 0x530 +#define TSI148_LCSR_DDAT0 0x534 +#define TSI148_LCSR_DNLAU0 0x538 +#define TSI148_LCSR_DNLAL0 0x53C +#define TSI148_LCSR_DCNT0 0x540 +#define TSI148_LCSR_DDBS0 0x544 + +#define TSI148_LCSR_DCTL1 0x580 +#define TSI148_LCSR_DSTA1 0x584 +#define TSI148_LCSR_DCSAU1 0x588 +#define TSI148_LCSR_DCSAL1 0x58C +#define TSI148_LCSR_DCDAU1 0x590 +#define TSI148_LCSR_DCDAL1 0x594 +#define TSI148_LCSR_DCLAU1 0x598 +#define TSI148_LCSR_DCLAL1 0x59C +#define TSI148_LCSR_DSAU1 0x5A0 +#define TSI148_LCSR_DSAL1 0x5A4 +#define TSI148_LCSR_DDAU1 0x5A8 +#define TSI148_LCSR_DDAL1 0x5AC +#define TSI148_LCSR_DSAT1 0x5B0 +#define TSI148_LCSR_DDAT1 0x5B4 +#define TSI148_LCSR_DNLAU1 0x5B8 +#define TSI148_LCSR_DNLAL1 0x5BC +#define TSI148_LCSR_DCNT1 0x5C0 +#define TSI148_LCSR_DDBS1 0x5C4 + +#define TSI148_LCSR_DMA0 0x500 +#define TSI148_LCSR_DMA1 0x580 + + +static const int TSI148_LCSR_DMA[TSI148_MAX_DMA] = { TSI148_LCSR_DMA0, + TSI148_LCSR_DMA1 }; + +#define TSI148_LCSR_OFFSET_DCTL 0x0 +#define TSI148_LCSR_OFFSET_DSTA 0x4 +#define TSI148_LCSR_OFFSET_DCSAU 0x8 +#define TSI148_LCSR_OFFSET_DCSAL 0xC +#define TSI148_LCSR_OFFSET_DCDAU 0x10 +#define TSI148_LCSR_OFFSET_DCDAL 0x14 +#define TSI148_LCSR_OFFSET_DCLAU 0x18 +#define TSI148_LCSR_OFFSET_DCLAL 0x1C +#define TSI148_LCSR_OFFSET_DSAU 0x20 +#define TSI148_LCSR_OFFSET_DSAL 0x24 +#define TSI148_LCSR_OFFSET_DDAU 0x28 +#define TSI148_LCSR_OFFSET_DDAL 0x2C +#define TSI148_LCSR_OFFSET_DSAT 0x30 +#define TSI148_LCSR_OFFSET_DDAT 0x34 +#define TSI148_LCSR_OFFSET_DNLAU 0x38 +#define TSI148_LCSR_OFFSET_DNLAL 0x3C +#define TSI148_LCSR_OFFSET_DCNT 0x40 +#define TSI148_LCSR_OFFSET_DDBS 0x44 + + /* + * GCSR Register Group + */ + + /* + * GCSR CRG + * offset 00 600 - DEVI/VENI + * offset 04 604 - CTRL/GA/REVID + * offset 08 608 - Semaphore3/2/1/0 + * offset 0C 60C - Seamphore7/6/5/4 + */ +#define TSI148_GCSR_ID 0x600 +#define TSI148_GCSR_CSR 0x604 +#define TSI148_GCSR_SEMA0 0x608 +#define TSI148_GCSR_SEMA1 0x60C + + /* + * Mail Box + * GCSR CRG + * offset 10 610 - Mailbox0 + */ +#define TSI148_GCSR_MBOX0 0x610 +#define TSI148_GCSR_MBOX1 0x614 +#define TSI148_GCSR_MBOX2 0x618 +#define TSI148_GCSR_MBOX3 0x61C + +static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0, + TSI148_GCSR_MBOX1, + TSI148_GCSR_MBOX2, + TSI148_GCSR_MBOX3 }; + + /* + * CR/CSR + */ + + /* + * CR/CSR CRG + * offset 7FFF4 FF4 - CSRBCR + * offset 7FFF8 FF8 - CSRBSR + * offset 7FFFC FFC - CBAR + */ +#define TSI148_CSRBCR 0xFF4 +#define TSI148_CSRBSR 0xFF8 +#define TSI148_CBAR 0xFFC + + + + + /* + * TSI148 Register Bit Definitions + */ + + /* + * PFCS Register Set + */ +#define TSI148_PCFS_CMMD_SERR (1<<8) /* SERR_L out pin ssys err */ +#define TSI148_PCFS_CMMD_PERR (1<<6) /* PERR_L out pin parity */ +#define TSI148_PCFS_CMMD_MSTR (1<<2) /* PCI bus master */ +#define TSI148_PCFS_CMMD_MEMSP (1<<1) /* PCI mem space access */ +#define TSI148_PCFS_CMMD_IOSP (1<<0) /* PCI I/O space enable */ + +#define TSI148_PCFS_STAT_RCPVE (1<<15) /* Detected Parity Error */ +#define TSI148_PCFS_STAT_SIGSE (1<<14) /* Signalled System Error */ +#define TSI148_PCFS_STAT_RCVMA (1<<13) /* Received Master Abort */ +#define TSI148_PCFS_STAT_RCVTA (1<<12) /* Received Target Abort */ +#define TSI148_PCFS_STAT_SIGTA (1<<11) /* Signalled Target Abort */ +#define TSI148_PCFS_STAT_SELTIM (3<<9) /* DELSEL Timing */ +#define TSI148_PCFS_STAT_DPAR (1<<8) /* Data Parity Err Reported */ +#define TSI148_PCFS_STAT_FAST (1<<7) /* Fast back-to-back Cap */ +#define TSI148_PCFS_STAT_P66M (1<<5) /* 66 MHz Capable */ +#define TSI148_PCFS_STAT_CAPL (1<<4) /* Capab List - address $34 */ + +/* + * Revision ID/Class Code Registers (CRG +$008) + */ +#define TSI148_PCFS_CLAS_M (0xFF<<24) /* Class ID */ +#define TSI148_PCFS_SUBCLAS_M (0xFF<<16) /* Sub-Class ID */ +#define TSI148_PCFS_PROGIF_M (0xFF<<8) /* Sub-Class ID */ +#define TSI148_PCFS_REVID_M (0xFF<<0) /* Rev ID */ + +/* + * Cache Line Size/ Master Latency Timer/ Header Type Registers (CRG + $00C) + */ +#define TSI148_PCFS_HEAD_M (0xFF<<16) /* Master Lat Timer */ +#define TSI148_PCFS_MLAT_M (0xFF<<8) /* Master Lat Timer */ +#define TSI148_PCFS_CLSZ_M (0xFF<<0) /* Cache Line Size */ + +/* + * Memory Base Address Lower Reg (CRG + $010) + */ +#define TSI148_PCFS_MBARL_BASEL_M (0xFFFFF<<12) /* Base Addr Lower Mask */ +#define TSI148_PCFS_MBARL_PRE (1<<3) /* Prefetch */ +#define TSI148_PCFS_MBARL_MTYPE_M (3<<1) /* Memory Type Mask */ +#define TSI148_PCFS_MBARL_IOMEM (1<<0) /* I/O Space Indicator */ + +/* + * Message Signaled Interrupt Capabilities Register (CRG + $040) + */ +#define TSI148_PCFS_MSICAP_64BAC (1<<7) /* 64-bit Address Capable */ +#define TSI148_PCFS_MSICAP_MME_M (7<<4) /* Multiple Msg Enable Mask */ +#define TSI148_PCFS_MSICAP_MMC_M (7<<1) /* Multiple Msg Capable Mask */ +#define TSI148_PCFS_MSICAP_MSIEN (1<<0) /* Msg signaled INT Enable */ + +/* + * Message Address Lower Register (CRG +$044) + */ +#define TSI148_PCFS_MSIAL_M (0x3FFFFFFF<<2) /* Mask */ + +/* + * Message Data Register (CRG + 4C) + */ +#define TSI148_PCFS_MSIMD_M (0xFFFF<<0) /* Mask */ + +/* + * PCI-X Capabilities Register (CRG + $050) + */ +#define TSI148_PCFS_PCIXCAP_MOST_M (7<<4) /* Max outstanding Split Tran */ +#define TSI148_PCFS_PCIXCAP_MMRBC_M (3<<2) /* Max Mem Read byte cnt */ +#define TSI148_PCFS_PCIXCAP_ERO (1<<1) /* Enable Relaxed Ordering */ +#define TSI148_PCFS_PCIXCAP_DPERE (1<<0) /* Data Parity Recover Enable */ + +/* + * PCI-X Status Register (CRG +$054) + */ +#define TSI148_PCFS_PCIXSTAT_RSCEM (1<<29) /* Received Split Comp Error */ +#define TSI148_PCFS_PCIXSTAT_DMCRS_M (7<<26) /* max Cumulative Read Size */ +#define TSI148_PCFS_PCIXSTAT_DMOST_M (7<<23) /* max outstanding Split Trans + */ +#define TSI148_PCFS_PCIXSTAT_DMMRC_M (3<<21) /* max mem read byte count */ +#define TSI148_PCFS_PCIXSTAT_DC (1<<20) /* Device Complexity */ +#define TSI148_PCFS_PCIXSTAT_USC (1<<19) /* Unexpected Split comp */ +#define TSI148_PCFS_PCIXSTAT_SCD (1<<18) /* Split completion discard */ +#define TSI148_PCFS_PCIXSTAT_133C (1<<17) /* 133MHz capable */ +#define TSI148_PCFS_PCIXSTAT_64D (1<<16) /* 64 bit device */ +#define TSI148_PCFS_PCIXSTAT_BN_M (0xFF<<8) /* Bus number */ +#define TSI148_PCFS_PCIXSTAT_DN_M (0x1F<<3) /* Device number */ +#define TSI148_PCFS_PCIXSTAT_FN_M (7<<0) /* Function Number */ + +/* + * LCSR Registers + */ + +/* + * Outbound Translation Starting Address Lower + */ +#define TSI148_LCSR_OTSAL_M (0xFFFF<<16) /* Mask */ + +/* + * Outbound Translation Ending Address Lower + */ +#define TSI148_LCSR_OTEAL_M (0xFFFF<<16) /* Mask */ + +/* + * Outbound Translation Offset Lower + */ +#define TSI148_LCSR_OTOFFL_M (0xFFFF<<16) /* Mask */ + +/* + * Outbound Translation 2eSST Broadcast Select + */ +#define TSI148_LCSR_OTBS_M (0xFFFFF<<0) /* Mask */ + +/* + * Outbound Translation Attribute + */ +#define TSI148_LCSR_OTAT_EN (1<<31) /* Window Enable */ +#define TSI148_LCSR_OTAT_MRPFD (1<<18) /* Prefetch Disable */ + +#define TSI148_LCSR_OTAT_PFS_M (3<<16) /* Prefetch Size Mask */ +#define TSI148_LCSR_OTAT_PFS_2 (0<<16) /* 2 Cache Lines P Size */ +#define TSI148_LCSR_OTAT_PFS_4 (1<<16) /* 4 Cache Lines P Size */ +#define TSI148_LCSR_OTAT_PFS_8 (2<<16) /* 8 Cache Lines P Size */ +#define TSI148_LCSR_OTAT_PFS_16 (3<<16) /* 16 Cache Lines P Size */ + +#define TSI148_LCSR_OTAT_2eSSTM_M (7<<11) /* 2eSST Xfer Rate Mask */ +#define TSI148_LCSR_OTAT_2eSSTM_160 (0<<11) /* 160MB/s 2eSST Xfer Rate */ +#define TSI148_LCSR_OTAT_2eSSTM_267 (1<<11) /* 267MB/s 2eSST Xfer Rate */ +#define TSI148_LCSR_OTAT_2eSSTM_320 (2<<11) /* 320MB/s 2eSST Xfer Rate */ + +#define TSI148_LCSR_OTAT_TM_M (7<<8) /* Xfer Protocol Mask */ +#define TSI148_LCSR_OTAT_TM_SCT (0<<8) /* SCT Xfer Protocol */ +#define TSI148_LCSR_OTAT_TM_BLT (1<<8) /* BLT Xfer Protocol */ +#define TSI148_LCSR_OTAT_TM_MBLT (2<<8) /* MBLT Xfer Protocol */ +#define TSI148_LCSR_OTAT_TM_2eVME (3<<8) /* 2eVME Xfer Protocol */ +#define TSI148_LCSR_OTAT_TM_2eSST (4<<8) /* 2eSST Xfer Protocol */ +#define TSI148_LCSR_OTAT_TM_2eSSTB (5<<8) /* 2eSST Bcast Xfer Protocol */ + +#define TSI148_LCSR_OTAT_DBW_M (3<<6) /* Max Data Width */ +#define TSI148_LCSR_OTAT_DBW_16 (0<<6) /* 16-bit Data Width */ +#define TSI148_LCSR_OTAT_DBW_32 (1<<6) /* 32-bit Data Width */ + +#define TSI148_LCSR_OTAT_SUP (1<<5) /* Supervisory Access */ +#define TSI148_LCSR_OTAT_PGM (1<<4) /* Program Access */ + +#define TSI148_LCSR_OTAT_AMODE_M (0xf<<0) /* Address Mode Mask */ +#define TSI148_LCSR_OTAT_AMODE_A16 (0<<0) /* A16 Address Space */ +#define TSI148_LCSR_OTAT_AMODE_A24 (1<<0) /* A24 Address Space */ +#define TSI148_LCSR_OTAT_AMODE_A32 (2<<0) /* A32 Address Space */ +#define TSI148_LCSR_OTAT_AMODE_A64 (4<<0) /* A32 Address Space */ +#define TSI148_LCSR_OTAT_AMODE_CRCSR (5<<0) /* CR/CSR Address Space */ +#define TSI148_LCSR_OTAT_AMODE_USER1 (8<<0) /* User1 Address Space */ +#define TSI148_LCSR_OTAT_AMODE_USER2 (9<<0) /* User2 Address Space */ +#define TSI148_LCSR_OTAT_AMODE_USER3 (10<<0) /* User3 Address Space */ +#define TSI148_LCSR_OTAT_AMODE_USER4 (11<<0) /* User4 Address Space */ + +/* + * VME Master Control Register CRG+$234 + */ +#define TSI148_LCSR_VMCTRL_VSA (1<<27) /* VMEbus Stop Ack */ +#define TSI148_LCSR_VMCTRL_VS (1<<26) /* VMEbus Stop */ +#define TSI148_LCSR_VMCTRL_DHB (1<<25) /* Device Has Bus */ +#define TSI148_LCSR_VMCTRL_DWB (1<<24) /* Device Wants Bus */ + +#define TSI148_LCSR_VMCTRL_RMWEN (1<<20) /* RMW Enable */ + +#define TSI148_LCSR_VMCTRL_ATO_M (7<<16) /* Master Access Time-out Mask + */ +#define TSI148_LCSR_VMCTRL_ATO_32 (0<<16) /* 32 us */ +#define TSI148_LCSR_VMCTRL_ATO_128 (1<<16) /* 128 us */ +#define TSI148_LCSR_VMCTRL_ATO_512 (2<<16) /* 512 us */ +#define TSI148_LCSR_VMCTRL_ATO_2M (3<<16) /* 2 ms */ +#define TSI148_LCSR_VMCTRL_ATO_8M (4<<16) /* 8 ms */ +#define TSI148_LCSR_VMCTRL_ATO_32M (5<<16) /* 32 ms */ +#define TSI148_LCSR_VMCTRL_ATO_128M (6<<16) /* 128 ms */ +#define TSI148_LCSR_VMCTRL_ATO_DIS (7<<16) /* Disabled */ + +#define TSI148_LCSR_VMCTRL_VTOFF_M (7<<12) /* VMEbus Master Time off */ +#define TSI148_LCSR_VMCTRL_VTOFF_0 (0<<12) /* 0us */ +#define TSI148_LCSR_VMCTRL_VTOFF_1 (1<<12) /* 1us */ +#define TSI148_LCSR_VMCTRL_VTOFF_2 (2<<12) /* 2us */ +#define TSI148_LCSR_VMCTRL_VTOFF_4 (3<<12) /* 4us */ +#define TSI148_LCSR_VMCTRL_VTOFF_8 (4<<12) /* 8us */ +#define TSI148_LCSR_VMCTRL_VTOFF_16 (5<<12) /* 16us */ +#define TSI148_LCSR_VMCTRL_VTOFF_32 (6<<12) /* 32us */ +#define TSI148_LCSR_VMCTRL_VTOFF_64 (7<<12) /* 64us */ + +#define TSI148_LCSR_VMCTRL_VTON_M (7<<8) /* VMEbus Master Time On */ +#define TSI148_LCSR_VMCTRL_VTON_4 (0<<8) /* 8us */ +#define TSI148_LCSR_VMCTRL_VTON_8 (1<<8) /* 8us */ +#define TSI148_LCSR_VMCTRL_VTON_16 (2<<8) /* 16us */ +#define TSI148_LCSR_VMCTRL_VTON_32 (3<<8) /* 32us */ +#define TSI148_LCSR_VMCTRL_VTON_64 (4<<8) /* 64us */ +#define TSI148_LCSR_VMCTRL_VTON_128 (5<<8) /* 128us */ +#define TSI148_LCSR_VMCTRL_VTON_256 (6<<8) /* 256us */ +#define TSI148_LCSR_VMCTRL_VTON_512 (7<<8) /* 512us */ + +#define TSI148_LCSR_VMCTRL_VREL_M (3<<3) /* VMEbus Master Rel Mode Mask + */ +#define TSI148_LCSR_VMCTRL_VREL_T_D (0<<3) /* Time on or Done */ +#define TSI148_LCSR_VMCTRL_VREL_T_R_D (1<<3) /* Time on and REQ or Done */ +#define TSI148_LCSR_VMCTRL_VREL_T_B_D (2<<3) /* Time on and BCLR or Done */ +#define TSI148_LCSR_VMCTRL_VREL_T_D_R (3<<3) /* Time on or Done and REQ */ + +#define TSI148_LCSR_VMCTRL_VFAIR (1<<2) /* VMEbus Master Fair Mode */ +#define TSI148_LCSR_VMCTRL_VREQL_M (3<<0) /* VMEbus Master Req Level Mask + */ + +/* + * VMEbus Control Register CRG+$238 + */ +#define TSI148_LCSR_VCTRL_LRE (1<<31) /* Late Retry Enable */ + +#define TSI148_LCSR_VCTRL_DLT_M (0xF<<24) /* Deadlock Timer */ +#define TSI148_LCSR_VCTRL_DLT_OFF (0<<24) /* Deadlock Timer Off */ +#define TSI148_LCSR_VCTRL_DLT_16 (1<<24) /* 16 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_32 (2<<24) /* 32 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_64 (3<<24) /* 64 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_128 (4<<24) /* 128 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_256 (5<<24) /* 256 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_512 (6<<24) /* 512 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_1024 (7<<24) /* 1024 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_2048 (8<<24) /* 2048 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_4096 (9<<24) /* 4096 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_8192 (0xA<<24) /* 8192 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_16384 (0xB<<24) /* 16384 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_32768 (0xC<<24) /* 32768 VCLKS */ + +#define TSI148_LCSR_VCTRL_NERBB (1<<20) /* No Early Release of Bus Busy + */ + +#define TSI148_LCSR_VCTRL_SRESET (1<<17) /* System Reset */ +#define TSI148_LCSR_VCTRL_LRESET (1<<16) /* Local Reset */ + +#define TSI148_LCSR_VCTRL_SFAILAI (1<<15) /* SYSFAIL Auto Slot ID */ +#define TSI148_LCSR_VCTRL_BID_M (0x1F<<8) /* Broadcast ID Mask */ + +#define TSI148_LCSR_VCTRL_ATOEN (1<<7) /* Arbiter Time-out Enable */ +#define TSI148_LCSR_VCTRL_ROBIN (1<<6) /* VMEbus Round Robin */ + +#define TSI148_LCSR_VCTRL_GTO_M (7<<0) /* VMEbus Global Time-out Mask + */ +#define TSI148_LCSR_VCTRL_GTO_8 (0<<0) /* 8 us */ +#define TSI148_LCSR_VCTRL_GTO_16 (1<<0) /* 16 us */ +#define TSI148_LCSR_VCTRL_GTO_32 (2<<0) /* 32 us */ +#define TSI148_LCSR_VCTRL_GTO_64 (3<<0) /* 64 us */ +#define TSI148_LCSR_VCTRL_GTO_128 (4<<0) /* 128 us */ +#define TSI148_LCSR_VCTRL_GTO_256 (5<<0) /* 256 us */ +#define TSI148_LCSR_VCTRL_GTO_512 (6<<0) /* 512 us */ +#define TSI148_LCSR_VCTRL_GTO_DIS (7<<0) /* Disabled */ + +/* + * VMEbus Status Register CRG + $23C + */ +#define TSI148_LCSR_VSTAT_CPURST (1<<15) /* Clear power up reset */ +#define TSI148_LCSR_VSTAT_BRDFL (1<<14) /* Board fail */ +#define TSI148_LCSR_VSTAT_PURSTS (1<<12) /* Power up reset status */ +#define TSI148_LCSR_VSTAT_BDFAILS (1<<11) /* Board Fail Status */ +#define TSI148_LCSR_VSTAT_SYSFAILS (1<<10) /* System Fail Status */ +#define TSI148_LCSR_VSTAT_ACFAILS (1<<9) /* AC fail status */ +#define TSI148_LCSR_VSTAT_SCONS (1<<8) /* System Cont Status */ +#define TSI148_LCSR_VSTAT_GAP (1<<5) /* Geographic Addr Parity */ +#define TSI148_LCSR_VSTAT_GA_M (0x1F<<0) /* Geographic Addr Mask */ + +/* + * PCI Configuration Status Register CRG+$240 + */ +#define TSI148_LCSR_PSTAT_REQ64S (1<<6) /* Request 64 status set */ +#define TSI148_LCSR_PSTAT_M66ENS (1<<5) /* M66ENS 66Mhz enable */ +#define TSI148_LCSR_PSTAT_FRAMES (1<<4) /* Frame Status */ +#define TSI148_LCSR_PSTAT_IRDYS (1<<3) /* IRDY status */ +#define TSI148_LCSR_PSTAT_DEVSELS (1<<2) /* DEVL status */ +#define TSI148_LCSR_PSTAT_STOPS (1<<1) /* STOP status */ +#define TSI148_LCSR_PSTAT_TRDYS (1<<0) /* TRDY status */ + +/* + * VMEbus Exception Attributes Register CRG + $268 + */ +#define TSI148_LCSR_VEAT_VES (1<<31) /* Status */ +#define TSI148_LCSR_VEAT_VEOF (1<<30) /* Overflow */ +#define TSI148_LCSR_VEAT_VESCL (1<<29) /* Status Clear */ +#define TSI148_LCSR_VEAT_2EOT (1<<21) /* 2e Odd Termination */ +#define TSI148_LCSR_VEAT_2EST (1<<20) /* 2e Slave terminated */ +#define TSI148_LCSR_VEAT_BERR (1<<19) /* Bus Error */ +#define TSI148_LCSR_VEAT_LWORD (1<<18) /* LWORD_ signal state */ +#define TSI148_LCSR_VEAT_WRITE (1<<17) /* WRITE_ signal state */ +#define TSI148_LCSR_VEAT_IACK (1<<16) /* IACK_ signal state */ +#define TSI148_LCSR_VEAT_DS1 (1<<15) /* DS1_ signal state */ +#define TSI148_LCSR_VEAT_DS0 (1<<14) /* DS0_ signal state */ +#define TSI148_LCSR_VEAT_AM_M (0x3F<<8) /* Address Mode Mask */ +#define TSI148_LCSR_VEAT_XAM_M (0xFF<<0) /* Master AMode Mask */ + + +/* + * VMEbus PCI Error Diagnostics PCI/X Attributes Register CRG + $280 + */ +#define TSI148_LCSR_EDPAT_EDPCL (1<<29) + +/* + * Inbound Translation Starting Address Lower + */ +#define TSI148_LCSR_ITSAL6432_M (0xFFFF<<16) /* Mask */ +#define TSI148_LCSR_ITSAL24_M (0x00FFF<<12) /* Mask */ +#define TSI148_LCSR_ITSAL16_M (0x0000FFF<<4) /* Mask */ + +/* + * Inbound Translation Ending Address Lower + */ +#define TSI148_LCSR_ITEAL6432_M (0xFFFF<<16) /* Mask */ +#define TSI148_LCSR_ITEAL24_M (0x00FFF<<12) /* Mask */ +#define TSI148_LCSR_ITEAL16_M (0x0000FFF<<4) /* Mask */ + +/* + * Inbound Translation Offset Lower + */ +#define TSI148_LCSR_ITOFFL6432_M (0xFFFF<<16) /* Mask */ +#define TSI148_LCSR_ITOFFL24_M (0xFFFFF<<12) /* Mask */ +#define TSI148_LCSR_ITOFFL16_M (0xFFFFFFF<<4) /* Mask */ + +/* + * Inbound Translation Attribute + */ +#define TSI148_LCSR_ITAT_EN (1<<31) /* Window Enable */ +#define TSI148_LCSR_ITAT_TH (1<<18) /* Prefetch Threshold */ + +#define TSI148_LCSR_ITAT_VFS_M (3<<16) /* Virtual FIFO Size Mask */ +#define TSI148_LCSR_ITAT_VFS_64 (0<<16) /* 64 bytes Virtual FIFO Size */ +#define TSI148_LCSR_ITAT_VFS_128 (1<<16) /* 128 bytes Virtual FIFO Sz */ +#define TSI148_LCSR_ITAT_VFS_256 (2<<16) /* 256 bytes Virtual FIFO Sz */ +#define TSI148_LCSR_ITAT_VFS_512 (3<<16) /* 512 bytes Virtual FIFO Sz */ + +#define TSI148_LCSR_ITAT_2eSSTM_M (7<<12) /* 2eSST Xfer Rate Mask */ +#define TSI148_LCSR_ITAT_2eSSTM_160 (0<<12) /* 160MB/s 2eSST Xfer Rate */ +#define TSI148_LCSR_ITAT_2eSSTM_267 (1<<12) /* 267MB/s 2eSST Xfer Rate */ +#define TSI148_LCSR_ITAT_2eSSTM_320 (2<<12) /* 320MB/s 2eSST Xfer Rate */ + +#define TSI148_LCSR_ITAT_2eSSTB (1<<11) /* 2eSST Bcast Xfer Protocol */ +#define TSI148_LCSR_ITAT_2eSST (1<<10) /* 2eSST Xfer Protocol */ +#define TSI148_LCSR_ITAT_2eVME (1<<9) /* 2eVME Xfer Protocol */ +#define TSI148_LCSR_ITAT_MBLT (1<<8) /* MBLT Xfer Protocol */ +#define TSI148_LCSR_ITAT_BLT (1<<7) /* BLT Xfer Protocol */ + +#define TSI148_LCSR_ITAT_AS_M (7<<4) /* Address Space Mask */ +#define TSI148_LCSR_ITAT_AS_A16 (0<<4) /* A16 Address Space */ +#define TSI148_LCSR_ITAT_AS_A24 (1<<4) /* A24 Address Space */ +#define TSI148_LCSR_ITAT_AS_A32 (2<<4) /* A32 Address Space */ +#define TSI148_LCSR_ITAT_AS_A64 (4<<4) /* A64 Address Space */ + +#define TSI148_LCSR_ITAT_SUPR (1<<3) /* Supervisor Access */ +#define TSI148_LCSR_ITAT_NPRIV (1<<2) /* Non-Priv (User) Access */ +#define TSI148_LCSR_ITAT_PGM (1<<1) /* Program Access */ +#define TSI148_LCSR_ITAT_DATA (1<<0) /* Data Access */ + +/* + * GCSR Base Address Lower Address CRG +$404 + */ +#define TSI148_LCSR_GBAL_M (0x7FFFFFF<<5) /* Mask */ + +/* + * GCSR Attribute Register CRG + $408 + */ +#define TSI148_LCSR_GCSRAT_EN (1<<7) /* Enable access to GCSR */ + +#define TSI148_LCSR_GCSRAT_AS_M (7<<4) /* Address Space Mask */ +#define TSI148_LCSR_GCSRAT_AS_A16 (0<<4) /* Address Space 16 */ +#define TSI148_LCSR_GCSRAT_AS_A24 (1<<4) /* Address Space 24 */ +#define TSI148_LCSR_GCSRAT_AS_A32 (2<<4) /* Address Space 32 */ +#define TSI148_LCSR_GCSRAT_AS_A64 (4<<4) /* Address Space 64 */ + +#define TSI148_LCSR_GCSRAT_SUPR (1<<3) /* Sup set -GCSR decoder */ +#define TSI148_LCSR_GCSRAT_NPRIV (1<<2) /* Non-Privliged set - CGSR */ +#define TSI148_LCSR_GCSRAT_PGM (1<<1) /* Program set - GCSR decoder */ +#define TSI148_LCSR_GCSRAT_DATA (1<<0) /* DATA set GCSR decoder */ + +/* + * CRG Base Address Lower Address CRG + $410 + */ +#define TSI148_LCSR_CBAL_M (0xFFFFF<<12) + +/* + * CRG Attribute Register CRG + $414 + */ +#define TSI148_LCSR_CRGAT_EN (1<<7) /* Enable PRG Access */ + +#define TSI148_LCSR_CRGAT_AS_M (7<<4) /* Address Space */ +#define TSI148_LCSR_CRGAT_AS_A16 (0<<4) /* Address Space 16 */ +#define TSI148_LCSR_CRGAT_AS_A24 (1<<4) /* Address Space 24 */ +#define TSI148_LCSR_CRGAT_AS_A32 (2<<4) /* Address Space 32 */ +#define TSI148_LCSR_CRGAT_AS_A64 (4<<4) /* Address Space 64 */ + +#define TSI148_LCSR_CRGAT_SUPR (1<<3) /* Supervisor Access */ +#define TSI148_LCSR_CRGAT_NPRIV (1<<2) /* Non-Privliged(User) Access */ +#define TSI148_LCSR_CRGAT_PGM (1<<1) /* Program Access */ +#define TSI148_LCSR_CRGAT_DATA (1<<0) /* Data Access */ + +/* + * CR/CSR Offset Lower Register CRG + $41C + */ +#define TSI148_LCSR_CROL_M (0x1FFF<<19) /* Mask */ + +/* + * CR/CSR Attribute register CRG + $420 + */ +#define TSI148_LCSR_CRAT_EN (1<<7) /* Enable access to CR/CSR */ + +/* + * Location Monitor base address lower register CRG + $428 + */ +#define TSI148_LCSR_LMBAL_M (0x7FFFFFF<<5) /* Mask */ + +/* + * Location Monitor Attribute Register CRG + $42C + */ +#define TSI148_LCSR_LMAT_EN (1<<7) /* Enable Location Monitor */ + +#define TSI148_LCSR_LMAT_AS_M (7<<4) /* Address Space MASK */ +#define TSI148_LCSR_LMAT_AS_A16 (0<<4) /* A16 */ +#define TSI148_LCSR_LMAT_AS_A24 (1<<4) /* A24 */ +#define TSI148_LCSR_LMAT_AS_A32 (2<<4) /* A32 */ +#define TSI148_LCSR_LMAT_AS_A64 (4<<4) /* A64 */ + +#define TSI148_LCSR_LMAT_SUPR (1<<3) /* Supervisor Access */ +#define TSI148_LCSR_LMAT_NPRIV (1<<2) /* Non-Priv (User) Access */ +#define TSI148_LCSR_LMAT_PGM (1<<1) /* Program Access */ +#define TSI148_LCSR_LMAT_DATA (1<<0) /* Data Access */ + +/* + * Broadcast Pulse Generator Timer Register CRG + $438 + */ +#define TSI148_LCSR_BPGTR_BPGT_M (0xFFFF<<0) /* Mask */ + +/* + * Broadcast Programmable Clock Timer Register CRG + $43C + */ +#define TSI148_LCSR_BPCTR_BPCT_M (0xFFFFFF<<0) /* Mask */ + +/* + * VMEbus Interrupt Control Register CRG + $43C + */ +#define TSI148_LCSR_VICR_CNTS_M (3<<22) /* Cntr Source MASK */ +#define TSI148_LCSR_VICR_CNTS_DIS (1<<22) /* Cntr Disable */ +#define TSI148_LCSR_VICR_CNTS_IRQ1 (2<<22) /* IRQ1 to Cntr */ +#define TSI148_LCSR_VICR_CNTS_IRQ2 (3<<22) /* IRQ2 to Cntr */ + +#define TSI148_LCSR_VICR_EDGIS_M (3<<20) /* Edge interrupt MASK */ +#define TSI148_LCSR_VICR_EDGIS_DIS (1<<20) /* Edge interrupt Disable */ +#define TSI148_LCSR_VICR_EDGIS_IRQ1 (2<<20) /* IRQ1 to Edge */ +#define TSI148_LCSR_VICR_EDGIS_IRQ2 (3<<20) /* IRQ2 to Edge */ + +#define TSI148_LCSR_VICR_IRQIF_M (3<<18) /* IRQ1* Function MASK */ +#define TSI148_LCSR_VICR_IRQIF_NORM (1<<18) /* Normal */ +#define TSI148_LCSR_VICR_IRQIF_PULSE (2<<18) /* Pulse Generator */ +#define TSI148_LCSR_VICR_IRQIF_PROG (3<<18) /* Programmable Clock */ +#define TSI148_LCSR_VICR_IRQIF_1U (4<<18) /* 1us Clock */ + +#define TSI148_LCSR_VICR_IRQ2F_M (3<<16) /* IRQ2* Function MASK */ +#define TSI148_LCSR_VICR_IRQ2F_NORM (1<<16) /* Normal */ +#define TSI148_LCSR_VICR_IRQ2F_PULSE (2<<16) /* Pulse Generator */ +#define TSI148_LCSR_VICR_IRQ2F_PROG (3<<16) /* Programmable Clock */ +#define TSI148_LCSR_VICR_IRQ2F_1U (4<<16) /* 1us Clock */ + +#define TSI148_LCSR_VICR_BIP (1<<15) /* Broadcast Interrupt Pulse */ + +#define TSI148_LCSR_VICR_IRQC (1<<12) /* VMEbus IRQ Clear */ +#define TSI148_LCSR_VICR_IRQS (1<<11) /* VMEbus IRQ Status */ + +#define TSI148_LCSR_VICR_IRQL_M (7<<8) /* VMEbus SW IRQ Level Mask */ +#define TSI148_LCSR_VICR_IRQL_1 (1<<8) /* VMEbus SW IRQ Level 1 */ +#define TSI148_LCSR_VICR_IRQL_2 (2<<8) /* VMEbus SW IRQ Level 2 */ +#define TSI148_LCSR_VICR_IRQL_3 (3<<8) /* VMEbus SW IRQ Level 3 */ +#define TSI148_LCSR_VICR_IRQL_4 (4<<8) /* VMEbus SW IRQ Level 4 */ +#define TSI148_LCSR_VICR_IRQL_5 (5<<8) /* VMEbus SW IRQ Level 5 */ +#define TSI148_LCSR_VICR_IRQL_6 (6<<8) /* VMEbus SW IRQ Level 6 */ +#define TSI148_LCSR_VICR_IRQL_7 (7<<8) /* VMEbus SW IRQ Level 7 */ + +static const int TSI148_LCSR_VICR_IRQL[8] = { 0, TSI148_LCSR_VICR_IRQL_1, + TSI148_LCSR_VICR_IRQL_2, TSI148_LCSR_VICR_IRQL_3, + TSI148_LCSR_VICR_IRQL_4, TSI148_LCSR_VICR_IRQL_5, + TSI148_LCSR_VICR_IRQL_6, TSI148_LCSR_VICR_IRQL_7 }; + +#define TSI148_LCSR_VICR_STID_M (0xFF<<0) /* Status/ID Mask */ + +/* + * Interrupt Enable Register CRG + $440 + */ +#define TSI148_LCSR_INTEN_DMA1EN (1<<25) /* DMAC 1 */ +#define TSI148_LCSR_INTEN_DMA0EN (1<<24) /* DMAC 0 */ +#define TSI148_LCSR_INTEN_LM3EN (1<<23) /* Location Monitor 3 */ +#define TSI148_LCSR_INTEN_LM2EN (1<<22) /* Location Monitor 2 */ +#define TSI148_LCSR_INTEN_LM1EN (1<<21) /* Location Monitor 1 */ +#define TSI148_LCSR_INTEN_LM0EN (1<<20) /* Location Monitor 0 */ +#define TSI148_LCSR_INTEN_MB3EN (1<<19) /* Mail Box 3 */ +#define TSI148_LCSR_INTEN_MB2EN (1<<18) /* Mail Box 2 */ +#define TSI148_LCSR_INTEN_MB1EN (1<<17) /* Mail Box 1 */ +#define TSI148_LCSR_INTEN_MB0EN (1<<16) /* Mail Box 0 */ +#define TSI148_LCSR_INTEN_PERREN (1<<13) /* PCI/X Error */ +#define TSI148_LCSR_INTEN_VERREN (1<<12) /* VMEbus Error */ +#define TSI148_LCSR_INTEN_VIEEN (1<<11) /* VMEbus IRQ Edge */ +#define TSI148_LCSR_INTEN_IACKEN (1<<10) /* IACK */ +#define TSI148_LCSR_INTEN_SYSFLEN (1<<9) /* System Fail */ +#define TSI148_LCSR_INTEN_ACFLEN (1<<8) /* AC Fail */ +#define TSI148_LCSR_INTEN_IRQ7EN (1<<7) /* IRQ7 */ +#define TSI148_LCSR_INTEN_IRQ6EN (1<<6) /* IRQ6 */ +#define TSI148_LCSR_INTEN_IRQ5EN (1<<5) /* IRQ5 */ +#define TSI148_LCSR_INTEN_IRQ4EN (1<<4) /* IRQ4 */ +#define TSI148_LCSR_INTEN_IRQ3EN (1<<3) /* IRQ3 */ +#define TSI148_LCSR_INTEN_IRQ2EN (1<<2) /* IRQ2 */ +#define TSI148_LCSR_INTEN_IRQ1EN (1<<1) /* IRQ1 */ + +static const int TSI148_LCSR_INTEN_LMEN[4] = { TSI148_LCSR_INTEN_LM0EN, + TSI148_LCSR_INTEN_LM1EN, + TSI148_LCSR_INTEN_LM2EN, + TSI148_LCSR_INTEN_LM3EN }; + +static const int TSI148_LCSR_INTEN_IRQEN[7] = { TSI148_LCSR_INTEN_IRQ1EN, + TSI148_LCSR_INTEN_IRQ2EN, + TSI148_LCSR_INTEN_IRQ3EN, + TSI148_LCSR_INTEN_IRQ4EN, + TSI148_LCSR_INTEN_IRQ5EN, + TSI148_LCSR_INTEN_IRQ6EN, + TSI148_LCSR_INTEN_IRQ7EN }; + +/* + * Interrupt Enable Out Register CRG + $444 + */ +#define TSI148_LCSR_INTEO_DMA1EO (1<<25) /* DMAC 1 */ +#define TSI148_LCSR_INTEO_DMA0EO (1<<24) /* DMAC 0 */ +#define TSI148_LCSR_INTEO_LM3EO (1<<23) /* Loc Monitor 3 */ +#define TSI148_LCSR_INTEO_LM2EO (1<<22) /* Loc Monitor 2 */ +#define TSI148_LCSR_INTEO_LM1EO (1<<21) /* Loc Monitor 1 */ +#define TSI148_LCSR_INTEO_LM0EO (1<<20) /* Location Monitor 0 */ +#define TSI148_LCSR_INTEO_MB3EO (1<<19) /* Mail Box 3 */ +#define TSI148_LCSR_INTEO_MB2EO (1<<18) /* Mail Box 2 */ +#define TSI148_LCSR_INTEO_MB1EO (1<<17) /* Mail Box 1 */ +#define TSI148_LCSR_INTEO_MB0EO (1<<16) /* Mail Box 0 */ +#define TSI148_LCSR_INTEO_PERREO (1<<13) /* PCI/X Error */ +#define TSI148_LCSR_INTEO_VERREO (1<<12) /* VMEbus Error */ +#define TSI148_LCSR_INTEO_VIEEO (1<<11) /* VMEbus IRQ Edge */ +#define TSI148_LCSR_INTEO_IACKEO (1<<10) /* IACK */ +#define TSI148_LCSR_INTEO_SYSFLEO (1<<9) /* System Fail */ +#define TSI148_LCSR_INTEO_ACFLEO (1<<8) /* AC Fail */ +#define TSI148_LCSR_INTEO_IRQ7EO (1<<7) /* IRQ7 */ +#define TSI148_LCSR_INTEO_IRQ6EO (1<<6) /* IRQ6 */ +#define TSI148_LCSR_INTEO_IRQ5EO (1<<5) /* IRQ5 */ +#define TSI148_LCSR_INTEO_IRQ4EO (1<<4) /* IRQ4 */ +#define TSI148_LCSR_INTEO_IRQ3EO (1<<3) /* IRQ3 */ +#define TSI148_LCSR_INTEO_IRQ2EO (1<<2) /* IRQ2 */ +#define TSI148_LCSR_INTEO_IRQ1EO (1<<1) /* IRQ1 */ + +static const int TSI148_LCSR_INTEO_LMEO[4] = { TSI148_LCSR_INTEO_LM0EO, + TSI148_LCSR_INTEO_LM1EO, + TSI148_LCSR_INTEO_LM2EO, + TSI148_LCSR_INTEO_LM3EO }; + +static const int TSI148_LCSR_INTEO_IRQEO[7] = { TSI148_LCSR_INTEO_IRQ1EO, + TSI148_LCSR_INTEO_IRQ2EO, + TSI148_LCSR_INTEO_IRQ3EO, + TSI148_LCSR_INTEO_IRQ4EO, + TSI148_LCSR_INTEO_IRQ5EO, + TSI148_LCSR_INTEO_IRQ6EO, + TSI148_LCSR_INTEO_IRQ7EO }; + +/* + * Interrupt Status Register CRG + $448 + */ +#define TSI148_LCSR_INTS_DMA1S (1<<25) /* DMA 1 */ +#define TSI148_LCSR_INTS_DMA0S (1<<24) /* DMA 0 */ +#define TSI148_LCSR_INTS_LM3S (1<<23) /* Location Monitor 3 */ +#define TSI148_LCSR_INTS_LM2S (1<<22) /* Location Monitor 2 */ +#define TSI148_LCSR_INTS_LM1S (1<<21) /* Location Monitor 1 */ +#define TSI148_LCSR_INTS_LM0S (1<<20) /* Location Monitor 0 */ +#define TSI148_LCSR_INTS_MB3S (1<<19) /* Mail Box 3 */ +#define TSI148_LCSR_INTS_MB2S (1<<18) /* Mail Box 2 */ +#define TSI148_LCSR_INTS_MB1S (1<<17) /* Mail Box 1 */ +#define TSI148_LCSR_INTS_MB0S (1<<16) /* Mail Box 0 */ +#define TSI148_LCSR_INTS_PERRS (1<<13) /* PCI/X Error */ +#define TSI148_LCSR_INTS_VERRS (1<<12) /* VMEbus Error */ +#define TSI148_LCSR_INTS_VIES (1<<11) /* VMEbus IRQ Edge */ +#define TSI148_LCSR_INTS_IACKS (1<<10) /* IACK */ +#define TSI148_LCSR_INTS_SYSFLS (1<<9) /* System Fail */ +#define TSI148_LCSR_INTS_ACFLS (1<<8) /* AC Fail */ +#define TSI148_LCSR_INTS_IRQ7S (1<<7) /* IRQ7 */ +#define TSI148_LCSR_INTS_IRQ6S (1<<6) /* IRQ6 */ +#define TSI148_LCSR_INTS_IRQ5S (1<<5) /* IRQ5 */ +#define TSI148_LCSR_INTS_IRQ4S (1<<4) /* IRQ4 */ +#define TSI148_LCSR_INTS_IRQ3S (1<<3) /* IRQ3 */ +#define TSI148_LCSR_INTS_IRQ2S (1<<2) /* IRQ2 */ +#define TSI148_LCSR_INTS_IRQ1S (1<<1) /* IRQ1 */ + +static const int TSI148_LCSR_INTS_LMS[4] = { TSI148_LCSR_INTS_LM0S, + TSI148_LCSR_INTS_LM1S, + TSI148_LCSR_INTS_LM2S, + TSI148_LCSR_INTS_LM3S }; + +static const int TSI148_LCSR_INTS_MBS[4] = { TSI148_LCSR_INTS_MB0S, + TSI148_LCSR_INTS_MB1S, + TSI148_LCSR_INTS_MB2S, + TSI148_LCSR_INTS_MB3S }; + +/* + * Interrupt Clear Register CRG + $44C + */ +#define TSI148_LCSR_INTC_DMA1C (1<<25) /* DMA 1 */ +#define TSI148_LCSR_INTC_DMA0C (1<<24) /* DMA 0 */ +#define TSI148_LCSR_INTC_LM3C (1<<23) /* Location Monitor 3 */ +#define TSI148_LCSR_INTC_LM2C (1<<22) /* Location Monitor 2 */ +#define TSI148_LCSR_INTC_LM1C (1<<21) /* Location Monitor 1 */ +#define TSI148_LCSR_INTC_LM0C (1<<20) /* Location Monitor 0 */ +#define TSI148_LCSR_INTC_MB3C (1<<19) /* Mail Box 3 */ +#define TSI148_LCSR_INTC_MB2C (1<<18) /* Mail Box 2 */ +#define TSI148_LCSR_INTC_MB1C (1<<17) /* Mail Box 1 */ +#define TSI148_LCSR_INTC_MB0C (1<<16) /* Mail Box 0 */ +#define TSI148_LCSR_INTC_PERRC (1<<13) /* VMEbus Error */ +#define TSI148_LCSR_INTC_VERRC (1<<12) /* VMEbus Access Time-out */ +#define TSI148_LCSR_INTC_VIEC (1<<11) /* VMEbus IRQ Edge */ +#define TSI148_LCSR_INTC_IACKC (1<<10) /* IACK */ +#define TSI148_LCSR_INTC_SYSFLC (1<<9) /* System Fail */ +#define TSI148_LCSR_INTC_ACFLC (1<<8) /* AC Fail */ + +static const int TSI148_LCSR_INTC_LMC[4] = { TSI148_LCSR_INTC_LM0C, + TSI148_LCSR_INTC_LM1C, + TSI148_LCSR_INTC_LM2C, + TSI148_LCSR_INTC_LM3C }; + +static const int TSI148_LCSR_INTC_MBC[4] = { TSI148_LCSR_INTC_MB0C, + TSI148_LCSR_INTC_MB1C, + TSI148_LCSR_INTC_MB2C, + TSI148_LCSR_INTC_MB3C }; + +/* + * Interrupt Map Register 1 CRG + $458 + */ +#define TSI148_LCSR_INTM1_DMA1M_M (3<<18) /* DMA 1 */ +#define TSI148_LCSR_INTM1_DMA0M_M (3<<16) /* DMA 0 */ +#define TSI148_LCSR_INTM1_LM3M_M (3<<14) /* Location Monitor 3 */ +#define TSI148_LCSR_INTM1_LM2M_M (3<<12) /* Location Monitor 2 */ +#define TSI148_LCSR_INTM1_LM1M_M (3<<10) /* Location Monitor 1 */ +#define TSI148_LCSR_INTM1_LM0M_M (3<<8) /* Location Monitor 0 */ +#define TSI148_LCSR_INTM1_MB3M_M (3<<6) /* Mail Box 3 */ +#define TSI148_LCSR_INTM1_MB2M_M (3<<4) /* Mail Box 2 */ +#define TSI148_LCSR_INTM1_MB1M_M (3<<2) /* Mail Box 1 */ +#define TSI148_LCSR_INTM1_MB0M_M (3<<0) /* Mail Box 0 */ + +/* + * Interrupt Map Register 2 CRG + $45C + */ +#define TSI148_LCSR_INTM2_PERRM_M (3<<26) /* PCI Bus Error */ +#define TSI148_LCSR_INTM2_VERRM_M (3<<24) /* VMEbus Error */ +#define TSI148_LCSR_INTM2_VIEM_M (3<<22) /* VMEbus IRQ Edge */ +#define TSI148_LCSR_INTM2_IACKM_M (3<<20) /* IACK */ +#define TSI148_LCSR_INTM2_SYSFLM_M (3<<18) /* System Fail */ +#define TSI148_LCSR_INTM2_ACFLM_M (3<<16) /* AC Fail */ +#define TSI148_LCSR_INTM2_IRQ7M_M (3<<14) /* IRQ7 */ +#define TSI148_LCSR_INTM2_IRQ6M_M (3<<12) /* IRQ6 */ +#define TSI148_LCSR_INTM2_IRQ5M_M (3<<10) /* IRQ5 */ +#define TSI148_LCSR_INTM2_IRQ4M_M (3<<8) /* IRQ4 */ +#define TSI148_LCSR_INTM2_IRQ3M_M (3<<6) /* IRQ3 */ +#define TSI148_LCSR_INTM2_IRQ2M_M (3<<4) /* IRQ2 */ +#define TSI148_LCSR_INTM2_IRQ1M_M (3<<2) /* IRQ1 */ + +/* + * DMA Control (0-1) Registers CRG + $500 + */ +#define TSI148_LCSR_DCTL_ABT (1<<27) /* Abort */ +#define TSI148_LCSR_DCTL_PAU (1<<26) /* Pause */ +#define TSI148_LCSR_DCTL_DGO (1<<25) /* DMA Go */ + +#define TSI148_LCSR_DCTL_MOD (1<<23) /* Mode */ + +#define TSI148_LCSR_DCTL_VBKS_M (7<<12) /* VMEbus block Size MASK */ +#define TSI148_LCSR_DCTL_VBKS_32 (0<<12) /* VMEbus block Size 32 */ +#define TSI148_LCSR_DCTL_VBKS_64 (1<<12) /* VMEbus block Size 64 */ +#define TSI148_LCSR_DCTL_VBKS_128 (2<<12) /* VMEbus block Size 128 */ +#define TSI148_LCSR_DCTL_VBKS_256 (3<<12) /* VMEbus block Size 256 */ +#define TSI148_LCSR_DCTL_VBKS_512 (4<<12) /* VMEbus block Size 512 */ +#define TSI148_LCSR_DCTL_VBKS_1024 (5<<12) /* VMEbus block Size 1024 */ +#define TSI148_LCSR_DCTL_VBKS_2048 (6<<12) /* VMEbus block Size 2048 */ +#define TSI148_LCSR_DCTL_VBKS_4096 (7<<12) /* VMEbus block Size 4096 */ + +#define TSI148_LCSR_DCTL_VBOT_M (7<<8) /* VMEbus back-off MASK */ +#define TSI148_LCSR_DCTL_VBOT_0 (0<<8) /* VMEbus back-off 0us */ +#define TSI148_LCSR_DCTL_VBOT_1 (1<<8) /* VMEbus back-off 1us */ +#define TSI148_LCSR_DCTL_VBOT_2 (2<<8) /* VMEbus back-off 2us */ +#define TSI148_LCSR_DCTL_VBOT_4 (3<<8) /* VMEbus back-off 4us */ +#define TSI148_LCSR_DCTL_VBOT_8 (4<<8) /* VMEbus back-off 8us */ +#define TSI148_LCSR_DCTL_VBOT_16 (5<<8) /* VMEbus back-off 16us */ +#define TSI148_LCSR_DCTL_VBOT_32 (6<<8) /* VMEbus back-off 32us */ +#define TSI148_LCSR_DCTL_VBOT_64 (7<<8) /* VMEbus back-off 64us */ + +#define TSI148_LCSR_DCTL_PBKS_M (7<<4) /* PCI block size MASK */ +#define TSI148_LCSR_DCTL_PBKS_32 (0<<4) /* PCI block size 32 bytes */ +#define TSI148_LCSR_DCTL_PBKS_64 (1<<4) /* PCI block size 64 bytes */ +#define TSI148_LCSR_DCTL_PBKS_128 (2<<4) /* PCI block size 128 bytes */ +#define TSI148_LCSR_DCTL_PBKS_256 (3<<4) /* PCI block size 256 bytes */ +#define TSI148_LCSR_DCTL_PBKS_512 (4<<4) /* PCI block size 512 bytes */ +#define TSI148_LCSR_DCTL_PBKS_1024 (5<<4) /* PCI block size 1024 bytes */ +#define TSI148_LCSR_DCTL_PBKS_2048 (6<<4) /* PCI block size 2048 bytes */ +#define TSI148_LCSR_DCTL_PBKS_4096 (7<<4) /* PCI block size 4096 bytes */ + +#define TSI148_LCSR_DCTL_PBOT_M (7<<0) /* PCI back off MASK */ +#define TSI148_LCSR_DCTL_PBOT_0 (0<<0) /* PCI back off 0us */ +#define TSI148_LCSR_DCTL_PBOT_1 (1<<0) /* PCI back off 1us */ +#define TSI148_LCSR_DCTL_PBOT_2 (2<<0) /* PCI back off 2us */ +#define TSI148_LCSR_DCTL_PBOT_4 (3<<0) /* PCI back off 3us */ +#define TSI148_LCSR_DCTL_PBOT_8 (4<<0) /* PCI back off 4us */ +#define TSI148_LCSR_DCTL_PBOT_16 (5<<0) /* PCI back off 8us */ +#define TSI148_LCSR_DCTL_PBOT_32 (6<<0) /* PCI back off 16us */ +#define TSI148_LCSR_DCTL_PBOT_64 (7<<0) /* PCI back off 32us */ + +/* + * DMA Status Registers (0-1) CRG + $504 + */ +#define TSI148_LCSR_DSTA_SMA (1<<31) /* PCI Signalled Master Abt */ +#define TSI148_LCSR_DSTA_RTA (1<<30) /* PCI Received Target Abt */ +#define TSI148_LCSR_DSTA_MRC (1<<29) /* PCI Max Retry Count */ +#define TSI148_LCSR_DSTA_VBE (1<<28) /* VMEbus error */ +#define TSI148_LCSR_DSTA_ABT (1<<27) /* Abort */ +#define TSI148_LCSR_DSTA_PAU (1<<26) /* Pause */ +#define TSI148_LCSR_DSTA_DON (1<<25) /* Done */ +#define TSI148_LCSR_DSTA_BSY (1<<24) /* Busy */ + +/* + * DMA Current Link Address Lower (0-1) + */ +#define TSI148_LCSR_DCLAL_M (0x3FFFFFF<<6) /* Mask */ + +/* + * DMA Source Attribute (0-1) Reg + */ +#define TSI148_LCSR_DSAT_TYP_M (3<<28) /* Source Bus Type */ +#define TSI148_LCSR_DSAT_TYP_PCI (0<<28) /* PCI Bus */ +#define TSI148_LCSR_DSAT_TYP_VME (1<<28) /* VMEbus */ +#define TSI148_LCSR_DSAT_TYP_PAT (2<<28) /* Data Pattern */ + +#define TSI148_LCSR_DSAT_PSZ (1<<25) /* Pattern Size */ +#define TSI148_LCSR_DSAT_NIN (1<<24) /* No Increment */ + +#define TSI148_LCSR_DSAT_2eSSTM_M (3<<11) /* 2eSST Trans Rate Mask */ +#define TSI148_LCSR_DSAT_2eSSTM_160 (0<<11) /* 160 MB/s */ +#define TSI148_LCSR_DSAT_2eSSTM_267 (1<<11) /* 267 MB/s */ +#define TSI148_LCSR_DSAT_2eSSTM_320 (2<<11) /* 320 MB/s */ + +#define TSI148_LCSR_DSAT_TM_M (7<<8) /* Bus Transfer Protocol Mask */ +#define TSI148_LCSR_DSAT_TM_SCT (0<<8) /* SCT */ +#define TSI148_LCSR_DSAT_TM_BLT (1<<8) /* BLT */ +#define TSI148_LCSR_DSAT_TM_MBLT (2<<8) /* MBLT */ +#define TSI148_LCSR_DSAT_TM_2eVME (3<<8) /* 2eVME */ +#define TSI148_LCSR_DSAT_TM_2eSST (4<<8) /* 2eSST */ +#define TSI148_LCSR_DSAT_TM_2eSSTB (5<<8) /* 2eSST Broadcast */ + +#define TSI148_LCSR_DSAT_DBW_M (3<<6) /* Max Data Width MASK */ +#define TSI148_LCSR_DSAT_DBW_16 (0<<6) /* 16 Bits */ +#define TSI148_LCSR_DSAT_DBW_32 (1<<6) /* 32 Bits */ + +#define TSI148_LCSR_DSAT_SUP (1<<5) /* Supervisory Mode */ +#define TSI148_LCSR_DSAT_PGM (1<<4) /* Program Mode */ + +#define TSI148_LCSR_DSAT_AMODE_M (0xf<<0) /* Address Space Mask */ +#define TSI148_LCSR_DSAT_AMODE_A16 (0<<0) /* A16 */ +#define TSI148_LCSR_DSAT_AMODE_A24 (1<<0) /* A24 */ +#define TSI148_LCSR_DSAT_AMODE_A32 (2<<0) /* A32 */ +#define TSI148_LCSR_DSAT_AMODE_A64 (4<<0) /* A64 */ +#define TSI148_LCSR_DSAT_AMODE_CRCSR (5<<0) /* CR/CSR */ +#define TSI148_LCSR_DSAT_AMODE_USER1 (8<<0) /* User1 */ +#define TSI148_LCSR_DSAT_AMODE_USER2 (9<<0) /* User2 */ +#define TSI148_LCSR_DSAT_AMODE_USER3 (0xa<<0) /* User3 */ +#define TSI148_LCSR_DSAT_AMODE_USER4 (0xb<<0) /* User4 */ + +/* + * DMA Destination Attribute Registers (0-1) + */ +#define TSI148_LCSR_DDAT_TYP_PCI (0<<28) /* Destination PCI Bus */ +#define TSI148_LCSR_DDAT_TYP_VME (1<<28) /* Destination VMEbus */ + +#define TSI148_LCSR_DDAT_2eSSTM_M (3<<11) /* 2eSST Transfer Rate Mask */ +#define TSI148_LCSR_DDAT_2eSSTM_160 (0<<11) /* 160 MB/s */ +#define TSI148_LCSR_DDAT_2eSSTM_267 (1<<11) /* 267 MB/s */ +#define TSI148_LCSR_DDAT_2eSSTM_320 (2<<11) /* 320 MB/s */ + +#define TSI148_LCSR_DDAT_TM_M (7<<8) /* Bus Transfer Protocol Mask */ +#define TSI148_LCSR_DDAT_TM_SCT (0<<8) /* SCT */ +#define TSI148_LCSR_DDAT_TM_BLT (1<<8) /* BLT */ +#define TSI148_LCSR_DDAT_TM_MBLT (2<<8) /* MBLT */ +#define TSI148_LCSR_DDAT_TM_2eVME (3<<8) /* 2eVME */ +#define TSI148_LCSR_DDAT_TM_2eSST (4<<8) /* 2eSST */ +#define TSI148_LCSR_DDAT_TM_2eSSTB (5<<8) /* 2eSST Broadcast */ + +#define TSI148_LCSR_DDAT_DBW_M (3<<6) /* Max Data Width MASK */ +#define TSI148_LCSR_DDAT_DBW_16 (0<<6) /* 16 Bits */ +#define TSI148_LCSR_DDAT_DBW_32 (1<<6) /* 32 Bits */ + +#define TSI148_LCSR_DDAT_SUP (1<<5) /* Supervisory/User Access */ +#define TSI148_LCSR_DDAT_PGM (1<<4) /* Program/Data Access */ + +#define TSI148_LCSR_DDAT_AMODE_M (0xf<<0) /* Address Space Mask */ +#define TSI148_LCSR_DDAT_AMODE_A16 (0<<0) /* A16 */ +#define TSI148_LCSR_DDAT_AMODE_A24 (1<<0) /* A24 */ +#define TSI148_LCSR_DDAT_AMODE_A32 (2<<0) /* A32 */ +#define TSI148_LCSR_DDAT_AMODE_A64 (4<<0) /* A64 */ +#define TSI148_LCSR_DDAT_AMODE_CRCSR (5<<0) /* CRC/SR */ +#define TSI148_LCSR_DDAT_AMODE_USER1 (8<<0) /* User1 */ +#define TSI148_LCSR_DDAT_AMODE_USER2 (9<<0) /* User2 */ +#define TSI148_LCSR_DDAT_AMODE_USER3 (0xa<<0) /* User3 */ +#define TSI148_LCSR_DDAT_AMODE_USER4 (0xb<<0) /* User4 */ + +/* + * DMA Next Link Address Lower + */ +#define TSI148_LCSR_DNLAL_DNLAL_M (0x3FFFFFF<<6) /* Address Mask */ +#define TSI148_LCSR_DNLAL_LLA (1<<0) /* Last Link Address Indicator */ + +/* + * DMA 2eSST Broadcast Select + */ +#define TSI148_LCSR_DBS_M (0x1FFFFF<<0) /* Mask */ + +/* + * GCSR Register Group + */ + +/* + * GCSR Control and Status Register CRG + $604 + */ +#define TSI148_GCSR_GCTRL_LRST (1<<15) /* Local Reset */ +#define TSI148_GCSR_GCTRL_SFAILEN (1<<14) /* System Fail enable */ +#define TSI148_GCSR_GCTRL_BDFAILS (1<<13) /* Board Fail Status */ +#define TSI148_GCSR_GCTRL_SCON (1<<12) /* System Copntroller */ +#define TSI148_GCSR_GCTRL_MEN (1<<11) /* Module Enable (READY) */ + +#define TSI148_GCSR_GCTRL_LMI3S (1<<7) /* Loc Monitor 3 Int Status */ +#define TSI148_GCSR_GCTRL_LMI2S (1<<6) /* Loc Monitor 2 Int Status */ +#define TSI148_GCSR_GCTRL_LMI1S (1<<5) /* Loc Monitor 1 Int Status */ +#define TSI148_GCSR_GCTRL_LMI0S (1<<4) /* Loc Monitor 0 Int Status */ +#define TSI148_GCSR_GCTRL_MBI3S (1<<3) /* Mail box 3 Int Status */ +#define TSI148_GCSR_GCTRL_MBI2S (1<<2) /* Mail box 2 Int Status */ +#define TSI148_GCSR_GCTRL_MBI1S (1<<1) /* Mail box 1 Int Status */ +#define TSI148_GCSR_GCTRL_MBI0S (1<<0) /* Mail box 0 Int Status */ + +#define TSI148_GCSR_GAP (1<<5) /* Geographic Addr Parity */ +#define TSI148_GCSR_GA_M (0x1F<<0) /* Geographic Address Mask */ + +/* + * CR/CSR Register Group + */ + +/* + * CR/CSR Bit Clear Register CRG + $FF4 + */ +#define TSI148_CRCSR_CSRBCR_LRSTC (1<<7) /* Local Reset Clear */ +#define TSI148_CRCSR_CSRBCR_SFAILC (1<<6) /* System Fail Enable Clear */ +#define TSI148_CRCSR_CSRBCR_BDFAILS (1<<5) /* Board Fail Status */ +#define TSI148_CRCSR_CSRBCR_MENC (1<<4) /* Module Enable Clear */ +#define TSI148_CRCSR_CSRBCR_BERRSC (1<<3) /* Bus Error Status Clear */ + +/* + * CR/CSR Bit Set Register CRG+$FF8 + */ +#define TSI148_CRCSR_CSRBSR_LISTS (1<<7) /* Local Reset Clear */ +#define TSI148_CRCSR_CSRBSR_SFAILS (1<<6) /* System Fail Enable Clear */ +#define TSI148_CRCSR_CSRBSR_BDFAILS (1<<5) /* Board Fail Status */ +#define TSI148_CRCSR_CSRBSR_MENS (1<<4) /* Module Enable Clear */ +#define TSI148_CRCSR_CSRBSR_BERRS (1<<3) /* Bus Error Status Clear */ + +/* + * CR/CSR Base Address Register CRG + FFC + */ +#define TSI148_CRCSR_CBAR_M (0x1F<<3) /* Mask */ + +#endif /* TSI148_H */ diff --git a/drivers/staging/vme_user/vme_user.c b/drivers/staging/vme_user/vme_user.c new file mode 100644 index 0000000000..4e533c0bfe --- /dev/null +++ b/drivers/staging/vme_user/vme_user.c @@ -0,0 +1,780 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * VMEbus User access driver + * + * Author: Martyn Welch + * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. + * + * Based on work by: + * Tom Armistead and Ajit Prem + * Copyright 2004 Motorola Inc. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "vme.h" +#include "vme_user.h" + +static const char driver_name[] = "vme_user"; + +static int bus[VME_USER_BUS_MAX]; +static unsigned int bus_num; + +/* Currently Documentation/admin-guide/devices.rst defines the + * following for VME: + * + * 221 char VME bus + * 0 = /dev/bus/vme/m0 First master image + * 1 = /dev/bus/vme/m1 Second master image + * 2 = /dev/bus/vme/m2 Third master image + * 3 = /dev/bus/vme/m3 Fourth master image + * 4 = /dev/bus/vme/s0 First slave image + * 5 = /dev/bus/vme/s1 Second slave image + * 6 = /dev/bus/vme/s2 Third slave image + * 7 = /dev/bus/vme/s3 Fourth slave image + * 8 = /dev/bus/vme/ctl Control + * + * It is expected that all VME bus drivers will use the + * same interface. For interface documentation see + * http://www.vmelinux.org/. + * + * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't + * even support the tsi148 chipset (which has 8 master and 8 slave windows). + * We'll run with this for now as far as possible, however it probably makes + * sense to get rid of the old mappings and just do everything dynamically. + * + * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as + * defined above and try to support at least some of the interface from + * http://www.vmelinux.org/ as an alternative the driver can be written + * providing a saner interface later. + * + * The vmelinux.org driver never supported slave images, the devices reserved + * for slaves were repurposed to support all 8 master images on the UniverseII! + * We shall support 4 masters and 4 slaves with this driver. + */ +#define VME_MAJOR 221 /* VME Major Device Number */ +#define VME_DEVS 9 /* Number of dev entries */ + +#define MASTER_MINOR 0 +#define MASTER_MAX 3 +#define SLAVE_MINOR 4 +#define SLAVE_MAX 7 +#define CONTROL_MINOR 8 + +#define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */ + +/* + * Structure to handle image related parameters. + */ +struct image_desc { + void *kern_buf; /* Buffer address in kernel space */ + dma_addr_t pci_buf; /* Buffer address in PCI address space */ + unsigned long long size_buf; /* Buffer size */ + struct mutex mutex; /* Mutex for locking image */ + struct device *device; /* Sysfs device */ + struct vme_resource *resource; /* VME resource */ + int mmap_count; /* Number of current mmap's */ +}; + +static struct image_desc image[VME_DEVS]; + +static struct cdev *vme_user_cdev; /* Character device */ +static struct class *vme_user_sysfs_class; /* Sysfs class */ +static struct vme_dev *vme_user_bridge; /* Pointer to user device */ + +static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR, + MASTER_MINOR, MASTER_MINOR, + SLAVE_MINOR, SLAVE_MINOR, + SLAVE_MINOR, SLAVE_MINOR, + CONTROL_MINOR + }; + +struct vme_user_vma_priv { + unsigned int minor; + refcount_t refcnt; +}; + +static ssize_t resource_to_user(int minor, char __user *buf, size_t count, + loff_t *ppos) +{ + ssize_t copied = 0; + + if (count > image[minor].size_buf) + count = image[minor].size_buf; + + copied = vme_master_read(image[minor].resource, image[minor].kern_buf, + count, *ppos); + if (copied < 0) + return (int)copied; + + if (copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied)) + return -EFAULT; + + return copied; +} + +static ssize_t resource_from_user(unsigned int minor, const char __user *buf, + size_t count, loff_t *ppos) +{ + if (count > image[minor].size_buf) + count = image[minor].size_buf; + + if (copy_from_user(image[minor].kern_buf, buf, (unsigned long)count)) + return -EFAULT; + + return vme_master_write(image[minor].resource, image[minor].kern_buf, + count, *ppos); +} + +static ssize_t buffer_to_user(unsigned int minor, char __user *buf, + size_t count, loff_t *ppos) +{ + void *image_ptr; + + image_ptr = image[minor].kern_buf + *ppos; + if (copy_to_user(buf, image_ptr, (unsigned long)count)) + return -EFAULT; + + return count; +} + +static ssize_t buffer_from_user(unsigned int minor, const char __user *buf, + size_t count, loff_t *ppos) +{ + void *image_ptr; + + image_ptr = image[minor].kern_buf + *ppos; + if (copy_from_user(image_ptr, buf, (unsigned long)count)) + return -EFAULT; + + return count; +} + +static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + unsigned int minor = iminor(file_inode(file)); + ssize_t retval; + size_t image_size; + + if (minor == CONTROL_MINOR) + return 0; + + mutex_lock(&image[minor].mutex); + + /* XXX Do we *really* want this helper - we can use vme_*_get ? */ + image_size = vme_get_size(image[minor].resource); + + /* Ensure we are starting at a valid location */ + if ((*ppos < 0) || (*ppos > (image_size - 1))) { + mutex_unlock(&image[minor].mutex); + return 0; + } + + /* Ensure not reading past end of the image */ + if (*ppos + count > image_size) + count = image_size - *ppos; + + switch (type[minor]) { + case MASTER_MINOR: + retval = resource_to_user(minor, buf, count, ppos); + break; + case SLAVE_MINOR: + retval = buffer_to_user(minor, buf, count, ppos); + break; + default: + retval = -EINVAL; + } + + mutex_unlock(&image[minor].mutex); + if (retval > 0) + *ppos += retval; + + return retval; +} + +static ssize_t vme_user_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned int minor = iminor(file_inode(file)); + ssize_t retval; + size_t image_size; + + if (minor == CONTROL_MINOR) + return 0; + + mutex_lock(&image[minor].mutex); + + image_size = vme_get_size(image[minor].resource); + + /* Ensure we are starting at a valid location */ + if ((*ppos < 0) || (*ppos > (image_size - 1))) { + mutex_unlock(&image[minor].mutex); + return 0; + } + + /* Ensure not reading past end of the image */ + if (*ppos + count > image_size) + count = image_size - *ppos; + + switch (type[minor]) { + case MASTER_MINOR: + retval = resource_from_user(minor, buf, count, ppos); + break; + case SLAVE_MINOR: + retval = buffer_from_user(minor, buf, count, ppos); + break; + default: + retval = -EINVAL; + } + + mutex_unlock(&image[minor].mutex); + + if (retval > 0) + *ppos += retval; + + return retval; +} + +static loff_t vme_user_llseek(struct file *file, loff_t off, int whence) +{ + unsigned int minor = iminor(file_inode(file)); + size_t image_size; + loff_t res; + + switch (type[minor]) { + case MASTER_MINOR: + case SLAVE_MINOR: + mutex_lock(&image[minor].mutex); + image_size = vme_get_size(image[minor].resource); + res = fixed_size_llseek(file, off, whence, image_size); + mutex_unlock(&image[minor].mutex); + return res; + } + + return -EINVAL; +} + +/* + * The ioctls provided by the old VME access method (the one at vmelinux.org) + * are most certainly wrong as the effectively push the registers layout + * through to user space. Given that the VME core can handle multiple bridges, + * with different register layouts this is most certainly not the way to go. + * + * We aren't using the structures defined in the Motorola driver either - these + * are also quite low level, however we should use the definitions that have + * already been defined. + */ +static int vme_user_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct vme_master master; + struct vme_slave slave; + struct vme_irq_id irq_req; + unsigned long copied; + unsigned int minor = iminor(inode); + int retval; + dma_addr_t pci_addr; + void __user *argp = (void __user *)arg; + + switch (type[minor]) { + case CONTROL_MINOR: + switch (cmd) { + case VME_IRQ_GEN: + copied = copy_from_user(&irq_req, argp, + sizeof(irq_req)); + if (copied) { + pr_warn("Partial copy from userspace\n"); + return -EFAULT; + } + + return vme_irq_generate(vme_user_bridge, + irq_req.level, + irq_req.statid); + } + break; + case MASTER_MINOR: + switch (cmd) { + case VME_GET_MASTER: + memset(&master, 0, sizeof(master)); + + /* XXX We do not want to push aspace, cycle and width + * to userspace as they are + */ + retval = vme_master_get(image[minor].resource, + &master.enable, + &master.vme_addr, + &master.size, &master.aspace, + &master.cycle, &master.dwidth); + + copied = copy_to_user(argp, &master, + sizeof(master)); + if (copied) { + pr_warn("Partial copy to userspace\n"); + return -EFAULT; + } + + return retval; + + case VME_SET_MASTER: + + if (image[minor].mmap_count != 0) { + pr_warn("Can't adjust mapped window\n"); + return -EPERM; + } + + copied = copy_from_user(&master, argp, sizeof(master)); + if (copied) { + pr_warn("Partial copy from userspace\n"); + return -EFAULT; + } + + /* XXX We do not want to push aspace, cycle and width + * to userspace as they are + */ + return vme_master_set(image[minor].resource, + master.enable, master.vme_addr, master.size, + master.aspace, master.cycle, master.dwidth); + + break; + } + break; + case SLAVE_MINOR: + switch (cmd) { + case VME_GET_SLAVE: + memset(&slave, 0, sizeof(slave)); + + /* XXX We do not want to push aspace, cycle and width + * to userspace as they are + */ + retval = vme_slave_get(image[minor].resource, + &slave.enable, &slave.vme_addr, + &slave.size, &pci_addr, + &slave.aspace, &slave.cycle); + + copied = copy_to_user(argp, &slave, + sizeof(slave)); + if (copied) { + pr_warn("Partial copy to userspace\n"); + return -EFAULT; + } + + return retval; + + case VME_SET_SLAVE: + + copied = copy_from_user(&slave, argp, sizeof(slave)); + if (copied) { + pr_warn("Partial copy from userspace\n"); + return -EFAULT; + } + + /* XXX We do not want to push aspace, cycle and width + * to userspace as they are + */ + return vme_slave_set(image[minor].resource, + slave.enable, slave.vme_addr, slave.size, + image[minor].pci_buf, slave.aspace, + slave.cycle); + + break; + } + break; + } + + return -EINVAL; +} + +static long +vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret; + struct inode *inode = file_inode(file); + unsigned int minor = iminor(inode); + + mutex_lock(&image[minor].mutex); + ret = vme_user_ioctl(inode, file, cmd, arg); + mutex_unlock(&image[minor].mutex); + + return ret; +} + +static void vme_user_vm_open(struct vm_area_struct *vma) +{ + struct vme_user_vma_priv *vma_priv = vma->vm_private_data; + + refcount_inc(&vma_priv->refcnt); +} + +static void vme_user_vm_close(struct vm_area_struct *vma) +{ + struct vme_user_vma_priv *vma_priv = vma->vm_private_data; + unsigned int minor = vma_priv->minor; + + if (!refcount_dec_and_test(&vma_priv->refcnt)) + return; + + mutex_lock(&image[minor].mutex); + image[minor].mmap_count--; + mutex_unlock(&image[minor].mutex); + + kfree(vma_priv); +} + +static const struct vm_operations_struct vme_user_vm_ops = { + .open = vme_user_vm_open, + .close = vme_user_vm_close, +}; + +static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma) +{ + int err; + struct vme_user_vma_priv *vma_priv; + + mutex_lock(&image[minor].mutex); + + err = vme_master_mmap(image[minor].resource, vma); + if (err) { + mutex_unlock(&image[minor].mutex); + return err; + } + + vma_priv = kmalloc(sizeof(*vma_priv), GFP_KERNEL); + if (!vma_priv) { + mutex_unlock(&image[minor].mutex); + return -ENOMEM; + } + + vma_priv->minor = minor; + refcount_set(&vma_priv->refcnt, 1); + vma->vm_ops = &vme_user_vm_ops; + vma->vm_private_data = vma_priv; + + image[minor].mmap_count++; + + mutex_unlock(&image[minor].mutex); + + return 0; +} + +static int vme_user_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned int minor = iminor(file_inode(file)); + + if (type[minor] == MASTER_MINOR) + return vme_user_master_mmap(minor, vma); + + return -ENODEV; +} + +static const struct file_operations vme_user_fops = { + .read = vme_user_read, + .write = vme_user_write, + .llseek = vme_user_llseek, + .unlocked_ioctl = vme_user_unlocked_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .mmap = vme_user_mmap, +}; + +static int vme_user_match(struct vme_dev *vdev) +{ + int i; + + int cur_bus = vme_bus_num(vdev); + int cur_slot = vme_slot_num(vdev); + + for (i = 0; i < bus_num; i++) + if ((cur_bus == bus[i]) && (cur_slot == vdev->num)) + return 1; + + return 0; +} + +/* + * In this simple access driver, the old behaviour is being preserved as much + * as practical. We will therefore reserve the buffers and request the images + * here so that we don't have to do it later. + */ +static int vme_user_probe(struct vme_dev *vdev) +{ + int i, err; + char *name; + + /* Save pointer to the bridge device */ + if (vme_user_bridge) { + dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n"); + err = -EINVAL; + goto err_dev; + } + vme_user_bridge = vdev; + + /* Initialise descriptors */ + for (i = 0; i < VME_DEVS; i++) { + image[i].kern_buf = NULL; + image[i].pci_buf = 0; + mutex_init(&image[i].mutex); + image[i].device = NULL; + image[i].resource = NULL; + } + + /* Assign major and minor numbers for the driver */ + err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS, + driver_name); + if (err) { + dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n", + VME_MAJOR); + goto err_region; + } + + /* Register the driver as a char device */ + vme_user_cdev = cdev_alloc(); + if (!vme_user_cdev) { + err = -ENOMEM; + goto err_char; + } + vme_user_cdev->ops = &vme_user_fops; + vme_user_cdev->owner = THIS_MODULE; + err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS); + if (err) + goto err_class; + + /* Request slave resources and allocate buffers (128kB wide) */ + for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) { + /* XXX Need to properly request attributes */ + /* For ca91cx42 bridge there are only two slave windows + * supporting A16 addressing, so we request A24 supported + * by all windows. + */ + image[i].resource = vme_slave_request(vme_user_bridge, + VME_A24, VME_SCT); + if (!image[i].resource) { + dev_warn(&vdev->dev, + "Unable to allocate slave resource\n"); + err = -ENOMEM; + goto err_slave; + } + image[i].size_buf = PCI_BUF_SIZE; + image[i].kern_buf = vme_alloc_consistent(image[i].resource, + image[i].size_buf, + &image[i].pci_buf); + if (!image[i].kern_buf) { + dev_warn(&vdev->dev, + "Unable to allocate memory for buffer\n"); + image[i].pci_buf = 0; + vme_slave_free(image[i].resource); + err = -ENOMEM; + goto err_slave; + } + } + + /* + * Request master resources allocate page sized buffers for small + * reads and writes + */ + for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) { + /* XXX Need to properly request attributes */ + image[i].resource = vme_master_request(vme_user_bridge, + VME_A32, VME_SCT, + VME_D32); + if (!image[i].resource) { + dev_warn(&vdev->dev, + "Unable to allocate master resource\n"); + err = -ENOMEM; + goto err_master; + } + image[i].size_buf = PCI_BUF_SIZE; + image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL); + if (!image[i].kern_buf) { + err = -ENOMEM; + vme_master_free(image[i].resource); + goto err_master; + } + } + + /* Create sysfs entries - on udev systems this creates the dev files */ + vme_user_sysfs_class = class_create(THIS_MODULE, driver_name); + if (IS_ERR(vme_user_sysfs_class)) { + dev_err(&vdev->dev, "Error creating vme_user class.\n"); + err = PTR_ERR(vme_user_sysfs_class); + goto err_master; + } + + /* Add sysfs Entries */ + for (i = 0; i < VME_DEVS; i++) { + int num; + + switch (type[i]) { + case MASTER_MINOR: + name = "bus/vme/m%d"; + break; + case CONTROL_MINOR: + name = "bus/vme/ctl"; + break; + case SLAVE_MINOR: + name = "bus/vme/s%d"; + break; + default: + err = -EINVAL; + goto err_sysfs; + } + + num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i; + image[i].device = device_create(vme_user_sysfs_class, NULL, + MKDEV(VME_MAJOR, i), NULL, + name, num); + if (IS_ERR(image[i].device)) { + dev_info(&vdev->dev, "Error creating sysfs device\n"); + err = PTR_ERR(image[i].device); + goto err_sysfs; + } + } + + return 0; + +err_sysfs: + while (i > 0) { + i--; + device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i)); + } + class_destroy(vme_user_sysfs_class); + + /* Ensure counter set correctly to unalloc all master windows */ + i = MASTER_MAX + 1; +err_master: + while (i > MASTER_MINOR) { + i--; + kfree(image[i].kern_buf); + vme_master_free(image[i].resource); + } + + /* + * Ensure counter set correctly to unalloc all slave windows and buffers + */ + i = SLAVE_MAX + 1; +err_slave: + while (i > SLAVE_MINOR) { + i--; + vme_free_consistent(image[i].resource, image[i].size_buf, + image[i].kern_buf, image[i].pci_buf); + vme_slave_free(image[i].resource); + } +err_class: + cdev_del(vme_user_cdev); +err_char: + unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS); +err_region: +err_dev: + return err; +} + +static void vme_user_remove(struct vme_dev *dev) +{ + int i; + + /* Remove sysfs Entries */ + for (i = 0; i < VME_DEVS; i++) { + mutex_destroy(&image[i].mutex); + device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i)); + } + class_destroy(vme_user_sysfs_class); + + for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) { + kfree(image[i].kern_buf); + vme_master_free(image[i].resource); + } + + for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) { + vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0); + vme_free_consistent(image[i].resource, image[i].size_buf, + image[i].kern_buf, image[i].pci_buf); + vme_slave_free(image[i].resource); + } + + /* Unregister device driver */ + cdev_del(vme_user_cdev); + + /* Unregister the major and minor device numbers */ + unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS); +} + +static struct vme_driver vme_user_driver = { + .name = driver_name, + .match = vme_user_match, + .probe = vme_user_probe, + .remove = vme_user_remove, +}; + +static int __init vme_user_init(void) +{ + int retval = 0; + + pr_info("VME User Space Access Driver\n"); + + if (bus_num == 0) { + pr_err("No cards, skipping registration\n"); + retval = -ENODEV; + goto err_nocard; + } + + /* Let's start by supporting one bus, we can support more than one + * in future revisions if that ever becomes necessary. + */ + if (bus_num > VME_USER_BUS_MAX) { + pr_err("Driver only able to handle %d buses\n", + VME_USER_BUS_MAX); + bus_num = VME_USER_BUS_MAX; + } + + /* + * Here we just register the maximum number of devices we can and + * leave vme_user_match() to allow only 1 to go through to probe(). + * This way, if we later want to allow multiple user access devices, + * we just change the code in vme_user_match(). + */ + retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS); + if (retval) + goto err_reg; + + return retval; + +err_reg: +err_nocard: + return retval; +} + +static void __exit vme_user_exit(void) +{ + vme_unregister_driver(&vme_user_driver); +} + +MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected"); +module_param_array(bus, int, &bus_num, 0000); + +MODULE_DESCRIPTION("VME User Space Access Driver"); +MODULE_AUTHOR("Martyn Welch "); +MODULE_LICENSE("GPL"); + +module_init(vme_user_init); +module_exit(vme_user_exit); diff --git a/drivers/staging/vme_user/vme_user.h b/drivers/staging/vme_user/vme_user.h new file mode 100644 index 0000000000..19ecb05781 --- /dev/null +++ b/drivers/staging/vme_user/vme_user.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _VME_USER_H_ +#define _VME_USER_H_ + +#define VME_USER_BUS_MAX 1 + +/* + * VMEbus Master Window Configuration Structure + */ +struct vme_master { + __u32 enable; /* State of Window */ + __u64 vme_addr; /* Starting Address on the VMEbus */ + __u64 size; /* Window Size */ + __u32 aspace; /* Address Space */ + __u32 cycle; /* Cycle properties */ + __u32 dwidth; /* Maximum Data Width */ +#if 0 + char prefetchenable; /* Prefetch Read Enable State */ + int prefetchsize; /* Prefetch Read Size (Cache Lines) */ + char wrpostenable; /* Write Post State */ +#endif +} __packed; + +/* + * IOCTL Commands and structures + */ + +/* Magic number for use in ioctls */ +#define VME_IOC_MAGIC 0xAE + +/* VMEbus Slave Window Configuration Structure */ +struct vme_slave { + __u32 enable; /* State of Window */ + __u64 vme_addr; /* Starting Address on the VMEbus */ + __u64 size; /* Window Size */ + __u32 aspace; /* Address Space */ + __u32 cycle; /* Cycle properties */ +#if 0 + char wrpostenable; /* Write Post State */ + char rmwlock; /* Lock PCI during RMW Cycles */ + char data64bitcapable; /* non-VMEbus capable of 64-bit Data */ +#endif +} __packed; + +struct vme_irq_id { + __u8 level; + __u8 statid; +}; + +#define VME_GET_SLAVE _IOR(VME_IOC_MAGIC, 1, struct vme_slave) +#define VME_SET_SLAVE _IOW(VME_IOC_MAGIC, 2, struct vme_slave) +#define VME_GET_MASTER _IOR(VME_IOC_MAGIC, 3, struct vme_master) +#define VME_SET_MASTER _IOW(VME_IOC_MAGIC, 4, struct vme_master) +#define VME_IRQ_GEN _IOW(VME_IOC_MAGIC, 5, struct vme_irq_id) + +#endif /* _VME_USER_H_ */ + diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c new file mode 100644 index 0000000000..115a44eb4f --- /dev/null +++ b/drivers/thermal/k3_j72xx_bandgap.c @@ -0,0 +1,567 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * TI Bandgap temperature sensor driver for J72XX SoC Family + * + * Copyright (C) 2021 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define K3_VTM_DEVINFO_PWR0_OFFSET 0x4 +#define K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK 0xf0 +#define K3_VTM_TMPSENS0_CTRL_OFFSET 0x300 +#define K3_VTM_MISC_CTRL_OFFSET 0xc +#define K3_VTM_TMPSENS_STAT_OFFSET 0x8 +#define K3_VTM_ANYMAXT_OUTRG_ALERT_EN 0x1 +#define K3_VTM_MISC_CTRL2_OFFSET 0x10 +#define K3_VTM_TS_STAT_DTEMP_MASK 0x3ff +#define K3_VTM_MAX_NUM_TS 8 +#define K3_VTM_TMPSENS_CTRL_SOC BIT(5) +#define K3_VTM_TMPSENS_CTRL_CLRZ BIT(6) +#define K3_VTM_TMPSENS_CTRL_CLKON_REQ BIT(7) +#define K3_VTM_TMPSENS_CTRL_MAXT_OUTRG_EN BIT(11) + +#define K3_VTM_CORRECTION_TEMP_CNT 3 + +#define MINUS40CREF 5 +#define PLUS30CREF 253 +#define PLUS125CREF 730 +#define PLUS150CREF 940 + +#define TABLE_SIZE 1024 +#define MAX_TEMP 123000 +#define COOL_DOWN_TEMP 105000 + +#define FACTORS_REDUCTION 13 +static int *derived_table; + +static int compute_value(int index, const s64 *factors, int nr_factors, + int reduction) +{ + s64 value = 0; + int i; + + for (i = 0; i < nr_factors; i++) + value += factors[i] * int_pow(index, i); + + return (int)div64_s64(value, int_pow(10, reduction)); +} + +static void init_table(int factors_size, int *table, const s64 *factors) +{ + int i; + + for (i = 0; i < TABLE_SIZE; i++) + table[i] = compute_value(i, factors, factors_size, + FACTORS_REDUCTION); +} + +/** + * struct err_values - structure containing error/reference values + * @refs: reference error values for -40C, 30C, 125C & 150C + * @errs: Actual error values for -40C, 30C, 125C & 150C read from the efuse + */ +struct err_values { + int refs[4]; + int errs[4]; +}; + +static void create_table_segments(struct err_values *err_vals, int seg, + int *ref_table) +{ + int m = 0, c, num, den, i, err, idx1, idx2, err1, err2, ref1, ref2; + + if (seg == 0) + idx1 = 0; + else + idx1 = err_vals->refs[seg]; + + idx2 = err_vals->refs[seg + 1]; + err1 = err_vals->errs[seg]; + err2 = err_vals->errs[seg + 1]; + ref1 = err_vals->refs[seg]; + ref2 = err_vals->refs[seg + 1]; + + /* + * Calculate the slope with adc values read from the register + * as the y-axis param and err in adc value as x-axis param + */ + num = ref2 - ref1; + den = err2 - err1; + if (den) + m = num / den; + c = ref2 - m * err2; + + /* + * Take care of divide by zero error if error values are same + * Or when the slope is 0 + */ + if (den != 0 && m != 0) { + for (i = idx1; i <= idx2; i++) { + err = (i - c) / m; + if (((i + err) < 0) || ((i + err) >= TABLE_SIZE)) + continue; + derived_table[i] = ref_table[i + err]; + } + } else { /* Constant error take care of divide by zero */ + for (i = idx1; i <= idx2; i++) { + if (((i + err1) < 0) || ((i + err1) >= TABLE_SIZE)) + continue; + derived_table[i] = ref_table[i + err1]; + } + } +} + +static int prep_lookup_table(struct err_values *err_vals, int *ref_table) +{ + int inc, i, seg; + + /* + * Fill up the lookup table under 3 segments + * region -40C to +30C + * region +30C to +125C + * region +125C to +150C + */ + for (seg = 0; seg < 3; seg++) + create_table_segments(err_vals, seg, ref_table); + + /* Get to the first valid temperature */ + i = 0; + while (!derived_table[i]) + i++; + + /* + * Get to the last zero index and back fill the temperature for + * sake of continuity + */ + if (i) { + /* 300 milli celsius steps */ + while (i--) + derived_table[i] = derived_table[i + 1] - 300; + } + + /* + * Fill the last trailing 0s which are unfilled with increments of + * 100 milli celsius till 1023 code + */ + i = TABLE_SIZE - 1; + while (!derived_table[i]) + i--; + + i++; + inc = 1; + while (i < TABLE_SIZE) { + derived_table[i] = derived_table[i - 1] + inc * 100; + i++; + } + + return 0; +} + +struct k3_thermal_data; + +struct k3_j72xx_bandgap { + struct device *dev; + void __iomem *base; + void __iomem *cfg2_base; + void __iomem *fuse_base; + struct k3_thermal_data *ts_data[K3_VTM_MAX_NUM_TS]; +}; + +/* common data structures */ +struct k3_thermal_data { + struct k3_j72xx_bandgap *bgp; + u32 ctrl_offset; + u32 stat_offset; +}; + +static int two_cmp(int tmp, int mask) +{ + tmp = ~(tmp); + tmp &= mask; + tmp += 1; + + /* Return negative value */ + return (0 - tmp); +} + +static unsigned int vtm_get_best_value(unsigned int s0, unsigned int s1, + unsigned int s2) +{ + int d01 = abs(s0 - s1); + int d02 = abs(s0 - s2); + int d12 = abs(s1 - s2); + + if (d01 <= d02 && d01 <= d12) + return (s0 + s1) / 2; + + if (d02 <= d01 && d02 <= d12) + return (s0 + s2) / 2; + + return (s1 + s2) / 2; +} + +static inline int k3_bgp_read_temp(struct k3_thermal_data *devdata, + int *temp) +{ + struct k3_j72xx_bandgap *bgp; + unsigned int dtemp, s0, s1, s2; + + bgp = devdata->bgp; + /* + * Errata is applicable for am654 pg 1.0 silicon/J7ES. There + * is a variation of the order for certain degree centigrade on AM654. + * Work around that by getting the average of two closest + * readings out of three readings everytime we want to + * report temperatures. + * + * Errata workaround. + */ + s0 = readl(bgp->base + devdata->stat_offset) & + K3_VTM_TS_STAT_DTEMP_MASK; + s1 = readl(bgp->base + devdata->stat_offset) & + K3_VTM_TS_STAT_DTEMP_MASK; + s2 = readl(bgp->base + devdata->stat_offset) & + K3_VTM_TS_STAT_DTEMP_MASK; + dtemp = vtm_get_best_value(s0, s1, s2); + + if (dtemp < 0 || dtemp >= TABLE_SIZE) + return -EINVAL; + + *temp = derived_table[dtemp]; + + return 0; +} + +/* Get temperature callback function for thermal zone */ +static int k3_thermal_get_temp(void *devdata, int *temp) +{ + struct k3_thermal_data *data = devdata; + int ret = 0; + + ret = k3_bgp_read_temp(data, temp); + if (ret) + return ret; + + return ret; +} + +static const struct thermal_zone_of_device_ops k3_of_thermal_ops = { + .get_temp = k3_thermal_get_temp, +}; + +static int k3_j72xx_bandgap_temp_to_adc_code(int temp) +{ + int low = 0, high = TABLE_SIZE - 1, mid; + + if (temp > 160000 || temp < -50000) + return -EINVAL; + + /* Binary search to find the adc code */ + while (low < (high - 1)) { + mid = (low + high) / 2; + if (temp <= derived_table[mid]) + high = mid; + else + low = mid; + } + + return mid; +} + +static void get_efuse_values(int id, struct k3_thermal_data *data, int *err, + struct k3_j72xx_bandgap *bgp) +{ + int i, tmp, pow; + int ct_offsets[5][K3_VTM_CORRECTION_TEMP_CNT] = { + { 0x0, 0x8, 0x4 }, + { 0x0, 0x8, 0x4 }, + { 0x0, -1, 0x4 }, + { 0x0, 0xC, -1 }, + { 0x0, 0xc, 0x8 } + }; + int ct_bm[5][K3_VTM_CORRECTION_TEMP_CNT] = { + { 0x3f, 0x1fe000, 0x1ff }, + { 0xfc0, 0x1fe000, 0x3fe00 }, + { 0x3f000, 0x7f800000, 0x7fc0000 }, + { 0xfc0000, 0x1fe0, 0x1f800000 }, + { 0x3f000000, 0x1fe000, 0x1ff0 } + }; + + for (i = 0; i < 3; i++) { + /* Extract the offset value using bit-mask */ + if (ct_offsets[id][i] == -1 && i == 1) { + /* 25C offset Case of Sensor 2 split between 2 regs */ + tmp = (readl(bgp->fuse_base + 0x8) & 0xE0000000) >> (29); + tmp |= ((readl(bgp->fuse_base + 0xC) & 0x1F) << 3); + pow = tmp & 0x80; + } else if (ct_offsets[id][i] == -1 && i == 2) { + /* 125C Case of Sensor 3 split between 2 regs */ + tmp = (readl(bgp->fuse_base + 0x4) & 0xF8000000) >> (27); + tmp |= ((readl(bgp->fuse_base + 0x8) & 0xF) << 5); + pow = tmp & 0x100; + } else { + tmp = readl(bgp->fuse_base + ct_offsets[id][i]); + tmp &= ct_bm[id][i]; + tmp = tmp >> __ffs(ct_bm[id][i]); + + /* Obtain the sign bit pow*/ + pow = ct_bm[id][i] >> __ffs(ct_bm[id][i]); + pow += 1; + pow /= 2; + } + + /* Check for negative value */ + if (tmp & pow) { + /* 2's complement value */ + tmp = two_cmp(tmp, ct_bm[id][i] >> __ffs(ct_bm[id][i])); + } + err[i] = tmp; + } + + /* Err value for 150C is set to 0 */ + err[i] = 0; +} + +static void print_look_up_table(struct device *dev, int *ref_table) +{ + int i; + + dev_dbg(dev, "The contents of derived array\n"); + dev_dbg(dev, "Code Temperature\n"); + for (i = 0; i < TABLE_SIZE; i++) + dev_dbg(dev, "%d %d %d\n", i, derived_table[i], ref_table[i]); +} + +struct k3_j72xx_bandgap_data { + unsigned int has_errata_i2128; +}; + +static int k3_j72xx_bandgap_probe(struct platform_device *pdev) +{ + int ret = 0, cnt, val, id; + int high_max, low_temp; + struct resource *res; + struct device *dev = &pdev->dev; + struct k3_j72xx_bandgap *bgp; + struct k3_thermal_data *data; + int workaround_needed = 0; + const struct k3_j72xx_bandgap_data *driver_data; + struct thermal_zone_device *ti_thermal; + int *ref_table; + struct err_values err_vals; + + const s64 golden_factors[] = { + -490019999999999936, + 3251200000000000, + -1705800000000, + 603730000, + -92627, + }; + + const s64 pvt_wa_factors[] = { + -415230000000000000, + 3126600000000000, + -1157800000000, + }; + + bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL); + if (!bgp) + return -ENOMEM; + + bgp->dev = dev; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + bgp->base = devm_ioremap_resource(dev, res); + if (IS_ERR(bgp->base)) + return PTR_ERR(bgp->base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + bgp->cfg2_base = devm_ioremap_resource(dev, res); + if (IS_ERR(bgp->cfg2_base)) + return PTR_ERR(bgp->cfg2_base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + bgp->fuse_base = devm_ioremap_resource(dev, res); + if (IS_ERR(bgp->fuse_base)) + return PTR_ERR(bgp->fuse_base); + + driver_data = of_device_get_match_data(dev); + if (driver_data) + workaround_needed = driver_data->has_errata_i2128; + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); + return ret; + } + + /* Get the sensor count in the VTM */ + val = readl(bgp->base + K3_VTM_DEVINFO_PWR0_OFFSET); + cnt = val & K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK; + cnt >>= __ffs(K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK); + + data = devm_kcalloc(bgp->dev, cnt, sizeof(*data), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto err_alloc; + } + + ref_table = kzalloc(sizeof(*ref_table) * TABLE_SIZE, GFP_KERNEL); + if (!ref_table) { + ret = -ENOMEM; + goto err_alloc; + } + + derived_table = devm_kzalloc(bgp->dev, sizeof(*derived_table) * TABLE_SIZE, + GFP_KERNEL); + if (!derived_table) { + ret = -ENOMEM; + goto err_free_ref_table; + } + + /* Workaround not needed if bit30/bit31 is set even for J721e */ + if (workaround_needed && (readl(bgp->fuse_base + 0x0) & 0xc0000000) == 0xc0000000) + workaround_needed = false; + + dev_dbg(bgp->dev, "Work around %sneeded\n", + workaround_needed ? "not " : ""); + + if (!workaround_needed) + init_table(5, ref_table, golden_factors); + else + init_table(3, ref_table, pvt_wa_factors); + + /* Register the thermal sensors */ + for (id = 0; id < cnt; id++) { + data[id].bgp = bgp; + data[id].ctrl_offset = K3_VTM_TMPSENS0_CTRL_OFFSET + id * 0x20; + data[id].stat_offset = data[id].ctrl_offset + + K3_VTM_TMPSENS_STAT_OFFSET; + + if (workaround_needed) { + /* ref adc values for -40C, 30C & 125C respectively */ + err_vals.refs[0] = MINUS40CREF; + err_vals.refs[1] = PLUS30CREF; + err_vals.refs[2] = PLUS125CREF; + err_vals.refs[3] = PLUS150CREF; + get_efuse_values(id, &data[id], err_vals.errs, bgp); + } + + if (id == 0 && workaround_needed) + prep_lookup_table(&err_vals, ref_table); + else if (id == 0 && !workaround_needed) + memcpy(derived_table, ref_table, TABLE_SIZE * 4); + + val = readl(data[id].bgp->cfg2_base + data[id].ctrl_offset); + val |= (K3_VTM_TMPSENS_CTRL_MAXT_OUTRG_EN | + K3_VTM_TMPSENS_CTRL_SOC | + K3_VTM_TMPSENS_CTRL_CLRZ | BIT(4)); + writel(val, data[id].bgp->cfg2_base + data[id].ctrl_offset); + + bgp->ts_data[id] = &data[id]; + ti_thermal = + devm_thermal_zone_of_sensor_register(bgp->dev, id, + &data[id], + &k3_of_thermal_ops); + if (IS_ERR(ti_thermal)) { + dev_err(bgp->dev, "thermal zone device is NULL\n"); + ret = PTR_ERR(ti_thermal); + goto err_free_ref_table; + } + } + + /* + * Program TSHUT thresholds + * Step 1: set the thresholds to ~123C and 105C WKUP_VTM_MISC_CTRL2 + * Step 2: WKUP_VTM_TMPSENS_CTRL_j set the MAXT_OUTRG_EN bit + * This is already taken care as per of init + * Step 3: WKUP_VTM_MISC_CTRL set the ANYMAXT_OUTRG_ALERT_EN bit + */ + high_max = k3_j72xx_bandgap_temp_to_adc_code(MAX_TEMP); + low_temp = k3_j72xx_bandgap_temp_to_adc_code(COOL_DOWN_TEMP); + + writel((low_temp << 16) | high_max, data[0].bgp->cfg2_base + + K3_VTM_MISC_CTRL2_OFFSET); + mdelay(100); + writel(K3_VTM_ANYMAXT_OUTRG_ALERT_EN, data[0].bgp->cfg2_base + + K3_VTM_MISC_CTRL_OFFSET); + + platform_set_drvdata(pdev, bgp); + + print_look_up_table(dev, ref_table); + /* + * Now that the derived_table has the appropriate look up values + * Free up the ref_table + */ + kfree(ref_table); + + return 0; + +err_free_ref_table: + kfree(ref_table); + +err_alloc: + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static int k3_j72xx_bandgap_remove(struct platform_device *pdev) +{ + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j721e_data = { + .has_errata_i2128 = 1, +}; + +static const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j7200_data = { + .has_errata_i2128 = 0, +}; + +static const struct of_device_id of_k3_j72xx_bandgap_match[] = { + { + .compatible = "ti,j721e-vtm", + .data = &k3_j72xx_bandgap_j721e_data, + }, + { + .compatible = "ti,j7200-vtm", + .data = &k3_j72xx_bandgap_j7200_data, + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, of_k3_j72xx_bandgap_match); + +static struct platform_driver k3_j72xx_bandgap_sensor_driver = { + .probe = k3_j72xx_bandgap_probe, + .remove = k3_j72xx_bandgap_remove, + .driver = { + .name = "k3-j72xx-soc-thermal", + .of_match_table = of_k3_j72xx_bandgap_match, + }, +}; + +module_platform_driver(k3_j72xx_bandgap_sensor_driver); + +MODULE_DESCRIPTION("K3 bandgap temperature sensor driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("J Keerthy "); diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig new file mode 100644 index 0000000000..90226f72c1 --- /dev/null +++ b/drivers/ufs/Kconfig @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# UFS subsystem configuration +# + +menuconfig SCSI_UFSHCD + tristate "Universal Flash Storage Controller" + depends on SCSI && SCSI_DMA + select PM_DEVFREQ + select DEVFREQ_GOV_SIMPLE_ONDEMAND + select NLS + help + Enables support for UFS (Universal Flash Storage) host controllers. + A UFS host controller is an electronic component that is able to + communicate with a UFS card. UFS host controllers occur in + smartphones, laptops, digital cameras and also in cars. + The kernel module will be called ufshcd. + + To compile this driver as a module, choose M here and read + . + However, do not compile this as a module if your root file system + (the one containing the directory /) is located on a UFS device. + +if SCSI_UFSHCD + +source "drivers/ufs/core/Kconfig" + +source "drivers/ufs/host/Kconfig" + +endif diff --git a/drivers/ufs/Makefile b/drivers/ufs/Makefile new file mode 100644 index 0000000000..5a199ef18d --- /dev/null +++ b/drivers/ufs/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 + +# The link order is important here. ufshcd-core must initialize +# before vendor drivers. +obj-$(CONFIG_SCSI_UFSHCD) += core/ host/ diff --git a/drivers/ufs/core/Kconfig b/drivers/ufs/core/Kconfig new file mode 100644 index 0000000000..e119781714 --- /dev/null +++ b/drivers/ufs/core/Kconfig @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Kernel configuration file for the UFS Host Controller core. +# +# Copyright (C) 2011-2013 Samsung India Software Operations +# +# Authors: +# Santosh Yaraganavi +# Vinayak Holikatti + +config SCSI_UFS_BSG + bool "Universal Flash Storage BSG device node" + select BLK_DEV_BSGLIB + help + Universal Flash Storage (UFS) is SCSI transport specification for + accessing flash storage on digital cameras, mobile phones and + consumer electronic devices. + A UFS controller communicates with a UFS device by exchanging + UFS Protocol Information Units (UPIUs). + UPIUs can not only be used as a transport layer for the SCSI protocol + but are also used by the UFS native command set. + This transport driver supports exchanging UFS protocol information units + with a UFS device. See also the ufshcd driver, which is a SCSI driver + that supports UFS devices. + + Select this if you need a bsg device node for your UFS controller. + If unsure, say N. + +config SCSI_UFS_CRYPTO + bool "UFS Crypto Engine Support" + depends on BLK_INLINE_ENCRYPTION + help + Enable Crypto Engine Support in UFS. + Enabling this makes it possible for the kernel to use the crypto + capabilities of the UFS device (if present) to perform crypto + operations on data being transferred to/from the device. + +config SCSI_UFS_HPB + bool "Support UFS Host Performance Booster" + help + The UFS HPB feature improves random read performance. It caches + L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB + read command by piggybacking physical page number for bypassing FTL (flash + translation layer)'s L2P address translation. + +config SCSI_UFS_FAULT_INJECTION + bool "UFS Fault Injection Support" + depends on FAULT_INJECTION + help + Enable fault injection support in the UFS driver. This makes it easier + to test the UFS error handler and abort handler. + +config SCSI_UFS_HWMON + bool "UFS Temperature Notification" + depends on SCSI_UFSHCD=HWMON || HWMON=y + help + This provides support for UFS hardware monitoring. If enabled, + a hardware monitoring device will be created for the UFS device. + + If unsure, say N. diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile new file mode 100644 index 0000000000..62f38c5bf8 --- /dev/null +++ b/drivers/ufs/core/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o +ufshcd-core-y += ufshcd.o ufs-sysfs.o +ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o +ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o +ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o +ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o +ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o +ufshcd-core-$(CONFIG_SCSI_UFS_HWMON) += ufs-hwmon.o diff --git a/drivers/ufs/core/ufs-debugfs.c b/drivers/ufs/core/ufs-debugfs.c new file mode 100644 index 0000000000..e3baed6c70 --- /dev/null +++ b/drivers/ufs/core/ufs-debugfs.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2020 Intel Corporation + +#include + +#include "ufs-debugfs.h" +#include +#include "ufshcd-priv.h" + +static struct dentry *ufs_debugfs_root; + +struct ufs_debugfs_attr { + const char *name; + mode_t mode; + const struct file_operations *fops; +}; + +/* @file corresponds to a debugfs attribute in directory hba->debugfs_root. */ +static inline struct ufs_hba *hba_from_file(const struct file *file) +{ + return d_inode(file->f_path.dentry->d_parent)->i_private; +} + +void __init ufs_debugfs_init(void) +{ + ufs_debugfs_root = debugfs_create_dir("ufshcd", NULL); +} + +void ufs_debugfs_exit(void) +{ + debugfs_remove_recursive(ufs_debugfs_root); +} + +static int ufs_debugfs_stats_show(struct seq_file *s, void *data) +{ + struct ufs_hba *hba = hba_from_file(s->file); + struct ufs_event_hist *e = hba->ufs_stats.event; + +#define PRT(fmt, typ) \ + seq_printf(s, fmt, e[UFS_EVT_ ## typ].cnt) + + PRT("PHY Adapter Layer errors (except LINERESET): %llu\n", PA_ERR); + PRT("Data Link Layer errors: %llu\n", DL_ERR); + PRT("Network Layer errors: %llu\n", NL_ERR); + PRT("Transport Layer errors: %llu\n", TL_ERR); + PRT("Generic DME errors: %llu\n", DME_ERR); + PRT("Auto-hibernate errors: %llu\n", AUTO_HIBERN8_ERR); + PRT("IS Fatal errors (CEFES, SBFES, HCFES, DFES): %llu\n", FATAL_ERR); + PRT("DME Link Startup errors: %llu\n", LINK_STARTUP_FAIL); + PRT("PM Resume errors: %llu\n", RESUME_ERR); + PRT("PM Suspend errors : %llu\n", SUSPEND_ERR); + PRT("Logical Unit Resets: %llu\n", DEV_RESET); + PRT("Host Resets: %llu\n", HOST_RESET); + PRT("SCSI command aborts: %llu\n", ABORT); +#undef PRT + return 0; +} +DEFINE_SHOW_ATTRIBUTE(ufs_debugfs_stats); + +static int ee_usr_mask_get(void *data, u64 *val) +{ + struct ufs_hba *hba = data; + + *val = hba->ee_usr_mask; + return 0; +} + +static int ufs_debugfs_get_user_access(struct ufs_hba *hba) +__acquires(&hba->host_sem) +{ + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + ufshcd_rpm_get_sync(hba); + return 0; +} + +static void ufs_debugfs_put_user_access(struct ufs_hba *hba) +__releases(&hba->host_sem) +{ + ufshcd_rpm_put_sync(hba); + up(&hba->host_sem); +} + +static int ee_usr_mask_set(void *data, u64 val) +{ + struct ufs_hba *hba = data; + int err; + + if (val & ~(u64)MASK_EE_STATUS) + return -EINVAL; + err = ufs_debugfs_get_user_access(hba); + if (err) + return err; + err = ufshcd_update_ee_usr_mask(hba, val, MASK_EE_STATUS); + ufs_debugfs_put_user_access(hba); + return err; +} + +DEFINE_DEBUGFS_ATTRIBUTE(ee_usr_mask_fops, ee_usr_mask_get, ee_usr_mask_set, "%#llx\n"); + +void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status) +{ + bool chgd = false; + u16 ee_ctrl_mask; + int err = 0; + + if (!hba->debugfs_ee_rate_limit_ms || !status) + return; + + mutex_lock(&hba->ee_ctrl_mutex); + ee_ctrl_mask = hba->ee_drv_mask | (hba->ee_usr_mask & ~status); + chgd = ee_ctrl_mask != hba->ee_ctrl_mask; + if (chgd) { + err = __ufshcd_write_ee_control(hba, ee_ctrl_mask); + if (err) + dev_err(hba->dev, "%s: failed to write ee control %d\n", + __func__, err); + } + mutex_unlock(&hba->ee_ctrl_mutex); + + if (chgd && !err) { + unsigned long delay = msecs_to_jiffies(hba->debugfs_ee_rate_limit_ms); + + queue_delayed_work(system_freezable_wq, &hba->debugfs_ee_work, delay); + } +} + +static void ufs_debugfs_restart_ee(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(work, struct ufs_hba, debugfs_ee_work.work); + + if (!hba->ee_usr_mask || pm_runtime_suspended(hba->dev) || + ufs_debugfs_get_user_access(hba)) + return; + ufshcd_write_ee_control(hba); + ufs_debugfs_put_user_access(hba); +} + +static int ufs_saved_err_show(struct seq_file *s, void *data) +{ + struct ufs_debugfs_attr *attr = s->private; + struct ufs_hba *hba = hba_from_file(s->file); + const int *p; + + if (strcmp(attr->name, "saved_err") == 0) { + p = &hba->saved_err; + } else if (strcmp(attr->name, "saved_uic_err") == 0) { + p = &hba->saved_uic_err; + } else { + return -ENOENT; + } + + seq_printf(s, "%d\n", *p); + return 0; +} + +static ssize_t ufs_saved_err_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ufs_debugfs_attr *attr = file->f_inode->i_private; + struct ufs_hba *hba = hba_from_file(file); + char val_str[16] = { }; + int val, ret; + + if (count > sizeof(val_str)) + return -EINVAL; + if (copy_from_user(val_str, buf, count)) + return -EFAULT; + ret = kstrtoint(val_str, 0, &val); + if (ret < 0) + return ret; + + spin_lock_irq(hba->host->host_lock); + if (strcmp(attr->name, "saved_err") == 0) { + hba->saved_err = val; + } else if (strcmp(attr->name, "saved_uic_err") == 0) { + hba->saved_uic_err = val; + } else { + ret = -ENOENT; + } + if (ret == 0) + ufshcd_schedule_eh_work(hba); + spin_unlock_irq(hba->host->host_lock); + + return ret < 0 ? ret : count; +} + +static int ufs_saved_err_open(struct inode *inode, struct file *file) +{ + return single_open(file, ufs_saved_err_show, inode->i_private); +} + +static const struct file_operations ufs_saved_err_fops = { + .owner = THIS_MODULE, + .open = ufs_saved_err_open, + .read = seq_read, + .write = ufs_saved_err_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct ufs_debugfs_attr ufs_attrs[] = { + { "stats", 0400, &ufs_debugfs_stats_fops }, + { "saved_err", 0600, &ufs_saved_err_fops }, + { "saved_uic_err", 0600, &ufs_saved_err_fops }, + { } +}; + +void ufs_debugfs_hba_init(struct ufs_hba *hba) +{ + const struct ufs_debugfs_attr *attr; + struct dentry *root; + + /* Set default exception event rate limit period to 20ms */ + hba->debugfs_ee_rate_limit_ms = 20; + INIT_DELAYED_WORK(&hba->debugfs_ee_work, ufs_debugfs_restart_ee); + + root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root); + if (IS_ERR_OR_NULL(root)) + return; + hba->debugfs_root = root; + d_inode(root)->i_private = hba; + for (attr = ufs_attrs; attr->name; attr++) + debugfs_create_file(attr->name, attr->mode, root, (void *)attr, + attr->fops); + debugfs_create_file("exception_event_mask", 0600, hba->debugfs_root, + hba, &ee_usr_mask_fops); + debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root, + &hba->debugfs_ee_rate_limit_ms); +} + +void ufs_debugfs_hba_exit(struct ufs_hba *hba) +{ + debugfs_remove_recursive(hba->debugfs_root); + cancel_delayed_work_sync(&hba->debugfs_ee_work); +} diff --git a/drivers/ufs/core/ufs-debugfs.h b/drivers/ufs/core/ufs-debugfs.h new file mode 100644 index 0000000000..97548a3f90 --- /dev/null +++ b/drivers/ufs/core/ufs-debugfs.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2020 Intel Corporation + */ + +#ifndef __UFS_DEBUGFS_H__ +#define __UFS_DEBUGFS_H__ + +struct ufs_hba; + +#ifdef CONFIG_DEBUG_FS +void __init ufs_debugfs_init(void); +void ufs_debugfs_exit(void); +void ufs_debugfs_hba_init(struct ufs_hba *hba); +void ufs_debugfs_hba_exit(struct ufs_hba *hba); +void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status); +#else +static inline void ufs_debugfs_init(void) {} +static inline void ufs_debugfs_exit(void) {} +static inline void ufs_debugfs_hba_init(struct ufs_hba *hba) {} +static inline void ufs_debugfs_hba_exit(struct ufs_hba *hba) {} +static inline void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status) {} +#endif + +#endif diff --git a/drivers/ufs/core/ufs-fault-injection.c b/drivers/ufs/core/ufs-fault-injection.c new file mode 100644 index 0000000000..7ac7c4e7ff --- /dev/null +++ b/drivers/ufs/core/ufs-fault-injection.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include +#include +#include "ufs-fault-injection.h" + +static int ufs_fault_get(char *buffer, const struct kernel_param *kp); +static int ufs_fault_set(const char *val, const struct kernel_param *kp); + +static const struct kernel_param_ops ufs_fault_ops = { + .get = ufs_fault_get, + .set = ufs_fault_set, +}; + +enum { FAULT_INJ_STR_SIZE = 80 }; + +/* + * For more details about fault injection, please refer to + * Documentation/fault-injection/fault-injection.rst. + */ +static char g_trigger_eh_str[FAULT_INJ_STR_SIZE]; +module_param_cb(trigger_eh, &ufs_fault_ops, g_trigger_eh_str, 0644); +MODULE_PARM_DESC(trigger_eh, + "Fault injection. trigger_eh=,,,"); +static DECLARE_FAULT_ATTR(ufs_trigger_eh_attr); + +static char g_timeout_str[FAULT_INJ_STR_SIZE]; +module_param_cb(timeout, &ufs_fault_ops, g_timeout_str, 0644); +MODULE_PARM_DESC(timeout, + "Fault injection. timeout=,,,"); +static DECLARE_FAULT_ATTR(ufs_timeout_attr); + +static int ufs_fault_get(char *buffer, const struct kernel_param *kp) +{ + const char *fault_str = kp->arg; + + return sysfs_emit(buffer, "%s\n", fault_str); +} + +static int ufs_fault_set(const char *val, const struct kernel_param *kp) +{ + struct fault_attr *attr = NULL; + + if (kp->arg == g_trigger_eh_str) + attr = &ufs_trigger_eh_attr; + else if (kp->arg == g_timeout_str) + attr = &ufs_timeout_attr; + + if (WARN_ON_ONCE(!attr)) + return -EINVAL; + + if (!setup_fault_attr(attr, (char *)val)) + return -EINVAL; + + strlcpy(kp->arg, val, FAULT_INJ_STR_SIZE); + + return 0; +} + +bool ufs_trigger_eh(void) +{ + return should_fail(&ufs_trigger_eh_attr, 1); +} + +bool ufs_fail_completion(void) +{ + return should_fail(&ufs_timeout_attr, 1); +} diff --git a/drivers/ufs/core/ufs-fault-injection.h b/drivers/ufs/core/ufs-fault-injection.h new file mode 100644 index 0000000000..6d0cd8e10c --- /dev/null +++ b/drivers/ufs/core/ufs-fault-injection.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _UFS_FAULT_INJECTION_H +#define _UFS_FAULT_INJECTION_H + +#include +#include + +#ifdef CONFIG_SCSI_UFS_FAULT_INJECTION +bool ufs_trigger_eh(void); +bool ufs_fail_completion(void); +#else +static inline bool ufs_trigger_eh(void) +{ + return false; +} + +static inline bool ufs_fail_completion(void) +{ + return false; +} +#endif + +#endif /* _UFS_FAULT_INJECTION_H */ diff --git a/drivers/ufs/core/ufs-hwmon.c b/drivers/ufs/core/ufs-hwmon.c new file mode 100644 index 0000000000..4c6a872b7a --- /dev/null +++ b/drivers/ufs/core/ufs-hwmon.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * UFS hardware monitoring support + * Copyright (c) 2021, Western Digital Corporation + */ + +#include +#include + +#include +#include "ufshcd-priv.h" + +struct ufs_hwmon_data { + struct ufs_hba *hba; + u8 mask; +}; + +static int ufs_read_temp_enable(struct ufs_hba *hba, u8 mask, long *val) +{ + u32 ee_mask; + int err; + + err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, QUERY_ATTR_IDN_EE_CONTROL, 0, 0, + &ee_mask); + if (err) + return err; + + *val = (mask & ee_mask & MASK_EE_TOO_HIGH_TEMP) || (mask & ee_mask & MASK_EE_TOO_LOW_TEMP); + + return 0; +} + +static int ufs_get_temp(struct ufs_hba *hba, enum attr_idn idn, long *val) +{ + u32 value; + int err; + + err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, idn, 0, 0, &value); + if (err) + return err; + + if (value == 0) + return -ENODATA; + + *val = ((long)value - 80) * MILLIDEGREE_PER_DEGREE; + + return 0; +} + +static int ufs_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, + long *val) +{ + struct ufs_hwmon_data *data = dev_get_drvdata(dev); + struct ufs_hba *hba = data->hba; + int err; + + down(&hba->host_sem); + + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + + ufshcd_rpm_get_sync(hba); + + switch (attr) { + case hwmon_temp_enable: + err = ufs_read_temp_enable(hba, data->mask, val); + + break; + case hwmon_temp_crit: + err = ufs_get_temp(hba, QUERY_ATTR_IDN_HIGH_TEMP_BOUND, val); + + break; + case hwmon_temp_lcrit: + err = ufs_get_temp(hba, QUERY_ATTR_IDN_LOW_TEMP_BOUND, val); + + break; + case hwmon_temp_input: + err = ufs_get_temp(hba, QUERY_ATTR_IDN_CASE_ROUGH_TEMP, val); + + break; + default: + err = -EOPNOTSUPP; + + break; + } + + ufshcd_rpm_put_sync(hba); + + up(&hba->host_sem); + + return err; +} + +static int ufs_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, + long val) +{ + struct ufs_hwmon_data *data = dev_get_drvdata(dev); + struct ufs_hba *hba = data->hba; + int err; + + if (attr != hwmon_temp_enable) + return -EINVAL; + + if (val != 0 && val != 1) + return -EINVAL; + + down(&hba->host_sem); + + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + + ufshcd_rpm_get_sync(hba); + + if (val == 1) + err = ufshcd_update_ee_usr_mask(hba, MASK_EE_URGENT_TEMP, 0); + else + err = ufshcd_update_ee_usr_mask(hba, 0, MASK_EE_URGENT_TEMP); + + ufshcd_rpm_put_sync(hba); + + up(&hba->host_sem); + + return err; +} + +static umode_t ufs_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr, + int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_enable: + return 0644; + case hwmon_temp_crit: + case hwmon_temp_lcrit: + case hwmon_temp_input: + return 0444; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info *ufs_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, HWMON_T_ENABLE | HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_LCRIT), + NULL +}; + +static const struct hwmon_ops ufs_hwmon_ops = { + .is_visible = ufs_hwmon_is_visible, + .read = ufs_hwmon_read, + .write = ufs_hwmon_write, +}; + +static const struct hwmon_chip_info ufs_hwmon_hba_info = { + .ops = &ufs_hwmon_ops, + .info = ufs_hwmon_info, +}; + +void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask) +{ + struct device *dev = hba->dev; + struct ufs_hwmon_data *data; + struct device *hwmon; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return; + + data->hba = hba; + data->mask = mask; + + hwmon = hwmon_device_register_with_info(dev, "ufs", data, &ufs_hwmon_hba_info, NULL); + if (IS_ERR(hwmon)) { + dev_warn(dev, "Failed to instantiate hwmon device\n"); + kfree(data); + return; + } + + hba->hwmon_device = hwmon; +} + +void ufs_hwmon_remove(struct ufs_hba *hba) +{ + struct ufs_hwmon_data *data; + + if (!hba->hwmon_device) + return; + + data = dev_get_drvdata(hba->hwmon_device); + hwmon_device_unregister(hba->hwmon_device); + hba->hwmon_device = NULL; + kfree(data); +} + +void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) +{ + if (!hba->hwmon_device) + return; + + if (ee_mask & MASK_EE_TOO_HIGH_TEMP) + hwmon_notify_event(hba->hwmon_device, hwmon_temp, hwmon_temp_max_alarm, 0); + + if (ee_mask & MASK_EE_TOO_LOW_TEMP) + hwmon_notify_event(hba->hwmon_device, hwmon_temp, hwmon_temp_min_alarm, 0); +} diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c new file mode 100644 index 0000000000..0a088b47d5 --- /dev/null +++ b/drivers/ufs/core/ufs-sysfs.c @@ -0,0 +1,1268 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Western Digital Corporation + +#include +#include +#include +#include + +#include +#include "ufs-sysfs.h" +#include "ufshcd-priv.h" + +static const char *ufshcd_uic_link_state_to_string( + enum uic_link_state state) +{ + switch (state) { + case UIC_LINK_OFF_STATE: return "OFF"; + case UIC_LINK_ACTIVE_STATE: return "ACTIVE"; + case UIC_LINK_HIBERN8_STATE: return "HIBERN8"; + case UIC_LINK_BROKEN_STATE: return "BROKEN"; + default: return "UNKNOWN"; + } +} + +static const char *ufshcd_ufs_dev_pwr_mode_to_string( + enum ufs_dev_pwr_mode state) +{ + switch (state) { + case UFS_ACTIVE_PWR_MODE: return "ACTIVE"; + case UFS_SLEEP_PWR_MODE: return "SLEEP"; + case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN"; + case UFS_DEEPSLEEP_PWR_MODE: return "DEEPSLEEP"; + default: return "UNKNOWN"; + } +} + +static inline ssize_t ufs_sysfs_pm_lvl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count, + bool rpm) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_dev_info *dev_info = &hba->dev_info; + unsigned long flags, value; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + if (value >= UFS_PM_LVL_MAX) + return -EINVAL; + + if (ufs_pm_lvl_states[value].dev_state == UFS_DEEPSLEEP_PWR_MODE && + (!(hba->caps & UFSHCD_CAP_DEEPSLEEP) || + !(dev_info->wspecversion >= 0x310))) + return -EINVAL; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (rpm) + hba->rpm_lvl = value; + else + hba->spm_lvl = value; + spin_unlock_irqrestore(hba->host->host_lock, flags); + return count; +} + +static ssize_t rpm_lvl_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", hba->rpm_lvl); +} + +static ssize_t rpm_lvl_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, true); +} + +static ssize_t rpm_target_dev_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string( + ufs_pm_lvl_states[hba->rpm_lvl].dev_state)); +} + +static ssize_t rpm_target_link_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string( + ufs_pm_lvl_states[hba->rpm_lvl].link_state)); +} + +static ssize_t spm_lvl_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", hba->spm_lvl); +} + +static ssize_t spm_lvl_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, false); +} + +static ssize_t spm_target_dev_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string( + ufs_pm_lvl_states[hba->spm_lvl].dev_state)); +} + +static ssize_t spm_target_link_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string( + ufs_pm_lvl_states[hba->spm_lvl].link_state)); +} + +/* Convert Auto-Hibernate Idle Timer register value to microseconds */ +static int ufshcd_ahit_to_us(u32 ahit) +{ + int timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, ahit); + int scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK, ahit); + + for (; scale > 0; --scale) + timer *= UFSHCI_AHIBERN8_SCALE_FACTOR; + + return timer; +} + +/* Convert microseconds to Auto-Hibernate Idle Timer register value */ +static u32 ufshcd_us_to_ahit(unsigned int timer) +{ + unsigned int scale; + + for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale) + timer /= UFSHCI_AHIBERN8_SCALE_FACTOR; + + return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) | + FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale); +} + +static ssize_t auto_hibern8_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 ahit; + int ret; + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!ufshcd_is_auto_hibern8_supported(hba)) + return -EOPNOTSUPP; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + ret = -EBUSY; + goto out; + } + + pm_runtime_get_sync(hba->dev); + ufshcd_hold(hba, false); + ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); + ufshcd_release(hba); + pm_runtime_put_sync(hba->dev); + + ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit)); + +out: + up(&hba->host_sem); + return ret; +} + +static ssize_t auto_hibern8_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned int timer; + int ret = 0; + + if (!ufshcd_is_auto_hibern8_supported(hba)) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &timer)) + return -EINVAL; + + if (timer > UFSHCI_AHIBERN8_MAX) + return -EINVAL; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + ret = -EBUSY; + goto out; + } + + ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer)); + +out: + up(&hba->host_sem); + return ret ? ret : count; +} + +static ssize_t wb_on_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", hba->dev_info.wb_enabled); +} + +static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned int wb_enable; + ssize_t res; + + if (!ufshcd_is_wb_allowed(hba) || ufshcd_is_clkscaling_supported(hba)) { + /* + * If the platform supports UFSHCD_CAP_CLK_SCALING, turn WB + * on/off will be done while clock scaling up/down. + */ + dev_warn(dev, "To control WB through wb_on is not allowed!\n"); + return -EOPNOTSUPP; + } + + if (kstrtouint(buf, 0, &wb_enable)) + return -EINVAL; + + if (wb_enable != 0 && wb_enable != 1) + return -EINVAL; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + res = -EBUSY; + goto out; + } + + ufshcd_rpm_get_sync(hba); + res = ufshcd_wb_toggle(hba, wb_enable); + ufshcd_rpm_put_sync(hba); +out: + up(&hba->host_sem); + return res < 0 ? res : count; +} + +static DEVICE_ATTR_RW(rpm_lvl); +static DEVICE_ATTR_RO(rpm_target_dev_state); +static DEVICE_ATTR_RO(rpm_target_link_state); +static DEVICE_ATTR_RW(spm_lvl); +static DEVICE_ATTR_RO(spm_target_dev_state); +static DEVICE_ATTR_RO(spm_target_link_state); +static DEVICE_ATTR_RW(auto_hibern8); +static DEVICE_ATTR_RW(wb_on); + +static struct attribute *ufs_sysfs_ufshcd_attrs[] = { + &dev_attr_rpm_lvl.attr, + &dev_attr_rpm_target_dev_state.attr, + &dev_attr_rpm_target_link_state.attr, + &dev_attr_spm_lvl.attr, + &dev_attr_spm_target_dev_state.attr, + &dev_attr_spm_target_link_state.attr, + &dev_attr_auto_hibern8.attr, + &dev_attr_wb_on.attr, + NULL +}; + +static const struct attribute_group ufs_sysfs_default_group = { + .attrs = ufs_sysfs_ufshcd_attrs, +}; + +static ssize_t monitor_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", hba->monitor.enabled); +} + +static ssize_t monitor_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned long value, flags; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + value = !!value; + spin_lock_irqsave(hba->host->host_lock, flags); + if (value == hba->monitor.enabled) + goto out_unlock; + + if (!value) { + memset(&hba->monitor, 0, sizeof(hba->monitor)); + } else { + hba->monitor.enabled = true; + hba->monitor.enabled_ts = ktime_get(); + } + +out_unlock: + spin_unlock_irqrestore(hba->host->host_lock, flags); + return count; +} + +static ssize_t monitor_chunk_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", hba->monitor.chunk_size); +} + +static ssize_t monitor_chunk_size_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned long value, flags; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + spin_lock_irqsave(hba->host->host_lock, flags); + /* Only allow chunk size change when monitor is disabled */ + if (!hba->monitor.enabled) + hba->monitor.chunk_size = value; + spin_unlock_irqrestore(hba->host->host_lock, flags); + return count; +} + +static ssize_t read_total_sectors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[READ]); +} + +static ssize_t read_total_busy_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.total_busy[READ])); +} + +static ssize_t read_nr_requests_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[READ]); +} + +static ssize_t read_req_latency_avg_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_hba_monitor *m = &hba->monitor; + + return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]), + m->nr_req[READ])); +} + +static ssize_t read_req_latency_max_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.lat_max[READ])); +} + +static ssize_t read_req_latency_min_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.lat_min[READ])); +} + +static ssize_t read_req_latency_sum_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.lat_sum[READ])); +} + +static ssize_t write_total_sectors_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[WRITE]); +} + +static ssize_t write_total_busy_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.total_busy[WRITE])); +} + +static ssize_t write_nr_requests_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[WRITE]); +} + +static ssize_t write_req_latency_avg_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_hba_monitor *m = &hba->monitor; + + return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]), + m->nr_req[WRITE])); +} + +static ssize_t write_req_latency_max_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.lat_max[WRITE])); +} + +static ssize_t write_req_latency_min_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.lat_min[WRITE])); +} + +static ssize_t write_req_latency_sum_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.lat_sum[WRITE])); +} + +static DEVICE_ATTR_RW(monitor_enable); +static DEVICE_ATTR_RW(monitor_chunk_size); +static DEVICE_ATTR_RO(read_total_sectors); +static DEVICE_ATTR_RO(read_total_busy); +static DEVICE_ATTR_RO(read_nr_requests); +static DEVICE_ATTR_RO(read_req_latency_avg); +static DEVICE_ATTR_RO(read_req_latency_max); +static DEVICE_ATTR_RO(read_req_latency_min); +static DEVICE_ATTR_RO(read_req_latency_sum); +static DEVICE_ATTR_RO(write_total_sectors); +static DEVICE_ATTR_RO(write_total_busy); +static DEVICE_ATTR_RO(write_nr_requests); +static DEVICE_ATTR_RO(write_req_latency_avg); +static DEVICE_ATTR_RO(write_req_latency_max); +static DEVICE_ATTR_RO(write_req_latency_min); +static DEVICE_ATTR_RO(write_req_latency_sum); + +static struct attribute *ufs_sysfs_monitor_attrs[] = { + &dev_attr_monitor_enable.attr, + &dev_attr_monitor_chunk_size.attr, + &dev_attr_read_total_sectors.attr, + &dev_attr_read_total_busy.attr, + &dev_attr_read_nr_requests.attr, + &dev_attr_read_req_latency_avg.attr, + &dev_attr_read_req_latency_max.attr, + &dev_attr_read_req_latency_min.attr, + &dev_attr_read_req_latency_sum.attr, + &dev_attr_write_total_sectors.attr, + &dev_attr_write_total_busy.attr, + &dev_attr_write_nr_requests.attr, + &dev_attr_write_req_latency_avg.attr, + &dev_attr_write_req_latency_max.attr, + &dev_attr_write_req_latency_min.attr, + &dev_attr_write_req_latency_sum.attr, + NULL +}; + +static const struct attribute_group ufs_sysfs_monitor_group = { + .name = "monitor", + .attrs = ufs_sysfs_monitor_attrs, +}; + +static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba, + enum desc_idn desc_id, + u8 desc_index, + u8 param_offset, + u8 *sysfs_buf, + u8 param_size) +{ + u8 desc_buf[8] = {0}; + int ret; + + if (param_size > 8) + return -EINVAL; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + ret = -EBUSY; + goto out; + } + + ufshcd_rpm_get_sync(hba); + ret = ufshcd_read_desc_param(hba, desc_id, desc_index, + param_offset, desc_buf, param_size); + ufshcd_rpm_put_sync(hba); + if (ret) { + ret = -EINVAL; + goto out; + } + + switch (param_size) { + case 1: + ret = sysfs_emit(sysfs_buf, "0x%02X\n", *desc_buf); + break; + case 2: + ret = sysfs_emit(sysfs_buf, "0x%04X\n", + get_unaligned_be16(desc_buf)); + break; + case 4: + ret = sysfs_emit(sysfs_buf, "0x%08X\n", + get_unaligned_be32(desc_buf)); + break; + case 8: + ret = sysfs_emit(sysfs_buf, "0x%016llX\n", + get_unaligned_be64(desc_buf)); + break; + } + +out: + up(&hba->host_sem); + return ret; +} + +#define UFS_DESC_PARAM(_name, _puname, _duname, _size) \ +static ssize_t _name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct ufs_hba *hba = dev_get_drvdata(dev); \ + return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \ + 0, _duname##_DESC_PARAM##_puname, buf, _size); \ +} \ +static DEVICE_ATTR_RO(_name) + +#define UFS_DEVICE_DESC_PARAM(_name, _uname, _size) \ + UFS_DESC_PARAM(_name, _uname, DEVICE, _size) + +UFS_DEVICE_DESC_PARAM(device_type, _DEVICE_TYPE, 1); +UFS_DEVICE_DESC_PARAM(device_class, _DEVICE_CLASS, 1); +UFS_DEVICE_DESC_PARAM(device_sub_class, _DEVICE_SUB_CLASS, 1); +UFS_DEVICE_DESC_PARAM(protocol, _PRTCL, 1); +UFS_DEVICE_DESC_PARAM(number_of_luns, _NUM_LU, 1); +UFS_DEVICE_DESC_PARAM(number_of_wluns, _NUM_WLU, 1); +UFS_DEVICE_DESC_PARAM(boot_enable, _BOOT_ENBL, 1); +UFS_DEVICE_DESC_PARAM(descriptor_access_enable, _DESC_ACCSS_ENBL, 1); +UFS_DEVICE_DESC_PARAM(initial_power_mode, _INIT_PWR_MODE, 1); +UFS_DEVICE_DESC_PARAM(high_priority_lun, _HIGH_PR_LUN, 1); +UFS_DEVICE_DESC_PARAM(secure_removal_type, _SEC_RMV_TYPE, 1); +UFS_DEVICE_DESC_PARAM(support_security_lun, _SEC_LU, 1); +UFS_DEVICE_DESC_PARAM(bkops_termination_latency, _BKOP_TERM_LT, 1); +UFS_DEVICE_DESC_PARAM(initial_active_icc_level, _ACTVE_ICC_LVL, 1); +UFS_DEVICE_DESC_PARAM(specification_version, _SPEC_VER, 2); +UFS_DEVICE_DESC_PARAM(manufacturing_date, _MANF_DATE, 2); +UFS_DEVICE_DESC_PARAM(manufacturer_id, _MANF_ID, 2); +UFS_DEVICE_DESC_PARAM(rtt_capability, _RTT_CAP, 1); +UFS_DEVICE_DESC_PARAM(rtc_update, _FRQ_RTC, 2); +UFS_DEVICE_DESC_PARAM(ufs_features, _UFS_FEAT, 1); +UFS_DEVICE_DESC_PARAM(ffu_timeout, _FFU_TMT, 1); +UFS_DEVICE_DESC_PARAM(queue_depth, _Q_DPTH, 1); +UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2); +UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1); +UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4); +UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1); +UFS_DEVICE_DESC_PARAM(hpb_version, _HPB_VER, 2); +UFS_DEVICE_DESC_PARAM(hpb_control, _HPB_CONTROL, 1); +UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4); +UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1); +UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1); +UFS_DEVICE_DESC_PARAM(wb_shared_alloc_units, _WB_SHARED_ALLOC_UNITS, 4); + +static struct attribute *ufs_sysfs_device_descriptor[] = { + &dev_attr_device_type.attr, + &dev_attr_device_class.attr, + &dev_attr_device_sub_class.attr, + &dev_attr_protocol.attr, + &dev_attr_number_of_luns.attr, + &dev_attr_number_of_wluns.attr, + &dev_attr_boot_enable.attr, + &dev_attr_descriptor_access_enable.attr, + &dev_attr_initial_power_mode.attr, + &dev_attr_high_priority_lun.attr, + &dev_attr_secure_removal_type.attr, + &dev_attr_support_security_lun.attr, + &dev_attr_bkops_termination_latency.attr, + &dev_attr_initial_active_icc_level.attr, + &dev_attr_specification_version.attr, + &dev_attr_manufacturing_date.attr, + &dev_attr_manufacturer_id.attr, + &dev_attr_rtt_capability.attr, + &dev_attr_rtc_update.attr, + &dev_attr_ufs_features.attr, + &dev_attr_ffu_timeout.attr, + &dev_attr_queue_depth.attr, + &dev_attr_device_version.attr, + &dev_attr_number_of_secure_wpa.attr, + &dev_attr_psa_max_data_size.attr, + &dev_attr_psa_state_timeout.attr, + &dev_attr_hpb_version.attr, + &dev_attr_hpb_control.attr, + &dev_attr_ext_feature_sup.attr, + &dev_attr_wb_presv_us_en.attr, + &dev_attr_wb_type.attr, + &dev_attr_wb_shared_alloc_units.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_device_descriptor_group = { + .name = "device_descriptor", + .attrs = ufs_sysfs_device_descriptor, +}; + +#define UFS_INTERCONNECT_DESC_PARAM(_name, _uname, _size) \ + UFS_DESC_PARAM(_name, _uname, INTERCONNECT, _size) + +UFS_INTERCONNECT_DESC_PARAM(unipro_version, _UNIPRO_VER, 2); +UFS_INTERCONNECT_DESC_PARAM(mphy_version, _MPHY_VER, 2); + +static struct attribute *ufs_sysfs_interconnect_descriptor[] = { + &dev_attr_unipro_version.attr, + &dev_attr_mphy_version.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_interconnect_descriptor_group = { + .name = "interconnect_descriptor", + .attrs = ufs_sysfs_interconnect_descriptor, +}; + +#define UFS_GEOMETRY_DESC_PARAM(_name, _uname, _size) \ + UFS_DESC_PARAM(_name, _uname, GEOMETRY, _size) + +UFS_GEOMETRY_DESC_PARAM(raw_device_capacity, _DEV_CAP, 8); +UFS_GEOMETRY_DESC_PARAM(max_number_of_luns, _MAX_NUM_LUN, 1); +UFS_GEOMETRY_DESC_PARAM(segment_size, _SEG_SIZE, 4); +UFS_GEOMETRY_DESC_PARAM(allocation_unit_size, _ALLOC_UNIT_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(min_addressable_block_size, _MIN_BLK_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(optimal_read_block_size, _OPT_RD_BLK_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(optimal_write_block_size, _OPT_WR_BLK_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(max_in_buffer_size, _MAX_IN_BUF_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(max_out_buffer_size, _MAX_OUT_BUF_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(rpmb_rw_size, _RPMB_RW_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(dyn_capacity_resource_policy, _DYN_CAP_RSRC_PLC, 1); +UFS_GEOMETRY_DESC_PARAM(data_ordering, _DATA_ORDER, 1); +UFS_GEOMETRY_DESC_PARAM(max_number_of_contexts, _MAX_NUM_CTX, 1); +UFS_GEOMETRY_DESC_PARAM(sys_data_tag_unit_size, _TAG_UNIT_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(sys_data_tag_resource_size, _TAG_RSRC_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(secure_removal_types, _SEC_RM_TYPES, 1); +UFS_GEOMETRY_DESC_PARAM(memory_types, _MEM_TYPES, 2); +UFS_GEOMETRY_DESC_PARAM(sys_code_memory_max_alloc_units, + _SCM_MAX_NUM_UNITS, 4); +UFS_GEOMETRY_DESC_PARAM(sys_code_memory_capacity_adjustment_factor, + _SCM_CAP_ADJ_FCTR, 2); +UFS_GEOMETRY_DESC_PARAM(non_persist_memory_max_alloc_units, + _NPM_MAX_NUM_UNITS, 4); +UFS_GEOMETRY_DESC_PARAM(non_persist_memory_capacity_adjustment_factor, + _NPM_CAP_ADJ_FCTR, 2); +UFS_GEOMETRY_DESC_PARAM(enh1_memory_max_alloc_units, + _ENM1_MAX_NUM_UNITS, 4); +UFS_GEOMETRY_DESC_PARAM(enh1_memory_capacity_adjustment_factor, + _ENM1_CAP_ADJ_FCTR, 2); +UFS_GEOMETRY_DESC_PARAM(enh2_memory_max_alloc_units, + _ENM2_MAX_NUM_UNITS, 4); +UFS_GEOMETRY_DESC_PARAM(enh2_memory_capacity_adjustment_factor, + _ENM2_CAP_ADJ_FCTR, 2); +UFS_GEOMETRY_DESC_PARAM(enh3_memory_max_alloc_units, + _ENM3_MAX_NUM_UNITS, 4); +UFS_GEOMETRY_DESC_PARAM(enh3_memory_capacity_adjustment_factor, + _ENM3_CAP_ADJ_FCTR, 2); +UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units, + _ENM4_MAX_NUM_UNITS, 4); +UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor, + _ENM4_CAP_ADJ_FCTR, 2); +UFS_GEOMETRY_DESC_PARAM(hpb_region_size, _HPB_REGION_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(hpb_number_lu, _HPB_NUMBER_LU, 1); +UFS_GEOMETRY_DESC_PARAM(hpb_subregion_size, _HPB_SUBREGION_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(hpb_max_active_regions, _HPB_MAX_ACTIVE_REGS, 2); +UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4); +UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1); +UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1); +UFS_GEOMETRY_DESC_PARAM(wb_sup_red_type, _WB_SUP_RED_TYPE, 1); +UFS_GEOMETRY_DESC_PARAM(wb_sup_wb_type, _WB_SUP_WB_TYPE, 1); + + +static struct attribute *ufs_sysfs_geometry_descriptor[] = { + &dev_attr_raw_device_capacity.attr, + &dev_attr_max_number_of_luns.attr, + &dev_attr_segment_size.attr, + &dev_attr_allocation_unit_size.attr, + &dev_attr_min_addressable_block_size.attr, + &dev_attr_optimal_read_block_size.attr, + &dev_attr_optimal_write_block_size.attr, + &dev_attr_max_in_buffer_size.attr, + &dev_attr_max_out_buffer_size.attr, + &dev_attr_rpmb_rw_size.attr, + &dev_attr_dyn_capacity_resource_policy.attr, + &dev_attr_data_ordering.attr, + &dev_attr_max_number_of_contexts.attr, + &dev_attr_sys_data_tag_unit_size.attr, + &dev_attr_sys_data_tag_resource_size.attr, + &dev_attr_secure_removal_types.attr, + &dev_attr_memory_types.attr, + &dev_attr_sys_code_memory_max_alloc_units.attr, + &dev_attr_sys_code_memory_capacity_adjustment_factor.attr, + &dev_attr_non_persist_memory_max_alloc_units.attr, + &dev_attr_non_persist_memory_capacity_adjustment_factor.attr, + &dev_attr_enh1_memory_max_alloc_units.attr, + &dev_attr_enh1_memory_capacity_adjustment_factor.attr, + &dev_attr_enh2_memory_max_alloc_units.attr, + &dev_attr_enh2_memory_capacity_adjustment_factor.attr, + &dev_attr_enh3_memory_max_alloc_units.attr, + &dev_attr_enh3_memory_capacity_adjustment_factor.attr, + &dev_attr_enh4_memory_max_alloc_units.attr, + &dev_attr_enh4_memory_capacity_adjustment_factor.attr, + &dev_attr_hpb_region_size.attr, + &dev_attr_hpb_number_lu.attr, + &dev_attr_hpb_subregion_size.attr, + &dev_attr_hpb_max_active_regions.attr, + &dev_attr_wb_max_alloc_units.attr, + &dev_attr_wb_max_wb_luns.attr, + &dev_attr_wb_buff_cap_adj.attr, + &dev_attr_wb_sup_red_type.attr, + &dev_attr_wb_sup_wb_type.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_geometry_descriptor_group = { + .name = "geometry_descriptor", + .attrs = ufs_sysfs_geometry_descriptor, +}; + +#define UFS_HEALTH_DESC_PARAM(_name, _uname, _size) \ + UFS_DESC_PARAM(_name, _uname, HEALTH, _size) + +UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1); +UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1); +UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1); + +static struct attribute *ufs_sysfs_health_descriptor[] = { + &dev_attr_eol_info.attr, + &dev_attr_life_time_estimation_a.attr, + &dev_attr_life_time_estimation_b.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_health_descriptor_group = { + .name = "health_descriptor", + .attrs = ufs_sysfs_health_descriptor, +}; + +#define UFS_POWER_DESC_PARAM(_name, _uname, _index) \ +static ssize_t _name##_index##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct ufs_hba *hba = dev_get_drvdata(dev); \ + return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, \ + PWR_DESC##_uname##_0 + _index * 2, buf, 2); \ +} \ +static DEVICE_ATTR_RO(_name##_index) + +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 0); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 1); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 2); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 3); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 4); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 5); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 6); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 7); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 8); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 9); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 10); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 11); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 12); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 13); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 14); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 15); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 0); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 1); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 2); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 3); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 4); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 5); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 6); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 7); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 8); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 9); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 10); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 11); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 12); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 13); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 14); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 15); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 0); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 1); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 2); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 3); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 4); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 5); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 6); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 7); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 8); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 9); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 10); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 11); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 12); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 13); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 14); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 15); + +static struct attribute *ufs_sysfs_power_descriptor[] = { + &dev_attr_active_icc_levels_vcc0.attr, + &dev_attr_active_icc_levels_vcc1.attr, + &dev_attr_active_icc_levels_vcc2.attr, + &dev_attr_active_icc_levels_vcc3.attr, + &dev_attr_active_icc_levels_vcc4.attr, + &dev_attr_active_icc_levels_vcc5.attr, + &dev_attr_active_icc_levels_vcc6.attr, + &dev_attr_active_icc_levels_vcc7.attr, + &dev_attr_active_icc_levels_vcc8.attr, + &dev_attr_active_icc_levels_vcc9.attr, + &dev_attr_active_icc_levels_vcc10.attr, + &dev_attr_active_icc_levels_vcc11.attr, + &dev_attr_active_icc_levels_vcc12.attr, + &dev_attr_active_icc_levels_vcc13.attr, + &dev_attr_active_icc_levels_vcc14.attr, + &dev_attr_active_icc_levels_vcc15.attr, + &dev_attr_active_icc_levels_vccq0.attr, + &dev_attr_active_icc_levels_vccq1.attr, + &dev_attr_active_icc_levels_vccq2.attr, + &dev_attr_active_icc_levels_vccq3.attr, + &dev_attr_active_icc_levels_vccq4.attr, + &dev_attr_active_icc_levels_vccq5.attr, + &dev_attr_active_icc_levels_vccq6.attr, + &dev_attr_active_icc_levels_vccq7.attr, + &dev_attr_active_icc_levels_vccq8.attr, + &dev_attr_active_icc_levels_vccq9.attr, + &dev_attr_active_icc_levels_vccq10.attr, + &dev_attr_active_icc_levels_vccq11.attr, + &dev_attr_active_icc_levels_vccq12.attr, + &dev_attr_active_icc_levels_vccq13.attr, + &dev_attr_active_icc_levels_vccq14.attr, + &dev_attr_active_icc_levels_vccq15.attr, + &dev_attr_active_icc_levels_vccq20.attr, + &dev_attr_active_icc_levels_vccq21.attr, + &dev_attr_active_icc_levels_vccq22.attr, + &dev_attr_active_icc_levels_vccq23.attr, + &dev_attr_active_icc_levels_vccq24.attr, + &dev_attr_active_icc_levels_vccq25.attr, + &dev_attr_active_icc_levels_vccq26.attr, + &dev_attr_active_icc_levels_vccq27.attr, + &dev_attr_active_icc_levels_vccq28.attr, + &dev_attr_active_icc_levels_vccq29.attr, + &dev_attr_active_icc_levels_vccq210.attr, + &dev_attr_active_icc_levels_vccq211.attr, + &dev_attr_active_icc_levels_vccq212.attr, + &dev_attr_active_icc_levels_vccq213.attr, + &dev_attr_active_icc_levels_vccq214.attr, + &dev_attr_active_icc_levels_vccq215.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_power_descriptor_group = { + .name = "power_descriptor", + .attrs = ufs_sysfs_power_descriptor, +}; + +#define UFS_STRING_DESCRIPTOR(_name, _pname) \ +static ssize_t _name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + u8 index; \ + struct ufs_hba *hba = dev_get_drvdata(dev); \ + int ret; \ + int desc_len = QUERY_DESC_MAX_SIZE; \ + u8 *desc_buf; \ + \ + down(&hba->host_sem); \ + if (!ufshcd_is_user_access_allowed(hba)) { \ + up(&hba->host_sem); \ + return -EBUSY; \ + } \ + desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \ + if (!desc_buf) { \ + up(&hba->host_sem); \ + return -ENOMEM; \ + } \ + ufshcd_rpm_get_sync(hba); \ + ret = ufshcd_query_descriptor_retry(hba, \ + UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \ + 0, 0, desc_buf, &desc_len); \ + if (ret) { \ + ret = -EINVAL; \ + goto out; \ + } \ + index = desc_buf[DEVICE_DESC_PARAM##_pname]; \ + kfree(desc_buf); \ + desc_buf = NULL; \ + ret = ufshcd_read_string_desc(hba, index, &desc_buf, \ + SD_ASCII_STD); \ + if (ret < 0) \ + goto out; \ + ret = sysfs_emit(buf, "%s\n", desc_buf); \ +out: \ + ufshcd_rpm_put_sync(hba); \ + kfree(desc_buf); \ + up(&hba->host_sem); \ + return ret; \ +} \ +static DEVICE_ATTR_RO(_name) + +UFS_STRING_DESCRIPTOR(manufacturer_name, _MANF_NAME); +UFS_STRING_DESCRIPTOR(product_name, _PRDCT_NAME); +UFS_STRING_DESCRIPTOR(oem_id, _OEM_ID); +UFS_STRING_DESCRIPTOR(serial_number, _SN); +UFS_STRING_DESCRIPTOR(product_revision, _PRDCT_REV); + +static struct attribute *ufs_sysfs_string_descriptors[] = { + &dev_attr_manufacturer_name.attr, + &dev_attr_product_name.attr, + &dev_attr_oem_id.attr, + &dev_attr_serial_number.attr, + &dev_attr_product_revision.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_string_descriptors_group = { + .name = "string_descriptors", + .attrs = ufs_sysfs_string_descriptors, +}; + +static inline bool ufshcd_is_wb_flags(enum flag_idn idn) +{ + return idn >= QUERY_FLAG_IDN_WB_EN && + idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8; +} + +#define UFS_FLAG(_name, _uname) \ +static ssize_t _name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + bool flag; \ + u8 index = 0; \ + int ret; \ + struct ufs_hba *hba = dev_get_drvdata(dev); \ + \ + down(&hba->host_sem); \ + if (!ufshcd_is_user_access_allowed(hba)) { \ + up(&hba->host_sem); \ + return -EBUSY; \ + } \ + if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \ + index = ufshcd_wb_get_query_index(hba); \ + ufshcd_rpm_get_sync(hba); \ + ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \ + QUERY_FLAG_IDN##_uname, index, &flag); \ + ufshcd_rpm_put_sync(hba); \ + if (ret) { \ + ret = -EINVAL; \ + goto out; \ + } \ + ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \ +out: \ + up(&hba->host_sem); \ + return ret; \ +} \ +static DEVICE_ATTR_RO(_name) + +UFS_FLAG(device_init, _FDEVICEINIT); +UFS_FLAG(permanent_wpe, _PERMANENT_WPE); +UFS_FLAG(power_on_wpe, _PWR_ON_WPE); +UFS_FLAG(bkops_enable, _BKOPS_EN); +UFS_FLAG(life_span_mode_enable, _LIFE_SPAN_MODE_ENABLE); +UFS_FLAG(phy_resource_removal, _FPHYRESOURCEREMOVAL); +UFS_FLAG(busy_rtc, _BUSY_RTC); +UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE); +UFS_FLAG(wb_enable, _WB_EN); +UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN); +UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8); +UFS_FLAG(hpb_enable, _HPB_EN); + +static struct attribute *ufs_sysfs_device_flags[] = { + &dev_attr_device_init.attr, + &dev_attr_permanent_wpe.attr, + &dev_attr_power_on_wpe.attr, + &dev_attr_bkops_enable.attr, + &dev_attr_life_span_mode_enable.attr, + &dev_attr_phy_resource_removal.attr, + &dev_attr_busy_rtc.attr, + &dev_attr_disable_fw_update.attr, + &dev_attr_wb_enable.attr, + &dev_attr_wb_flush_en.attr, + &dev_attr_wb_flush_during_h8.attr, + &dev_attr_hpb_enable.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_flags_group = { + .name = "flags", + .attrs = ufs_sysfs_device_flags, +}; + +static inline bool ufshcd_is_wb_attrs(enum attr_idn idn) +{ + return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS && + idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE; +} + +#define UFS_ATTRIBUTE(_name, _uname) \ +static ssize_t _name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct ufs_hba *hba = dev_get_drvdata(dev); \ + u32 value; \ + int ret; \ + u8 index = 0; \ + \ + down(&hba->host_sem); \ + if (!ufshcd_is_user_access_allowed(hba)) { \ + up(&hba->host_sem); \ + return -EBUSY; \ + } \ + if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \ + index = ufshcd_wb_get_query_index(hba); \ + ufshcd_rpm_get_sync(hba); \ + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \ + QUERY_ATTR_IDN##_uname, index, 0, &value); \ + ufshcd_rpm_put_sync(hba); \ + if (ret) { \ + ret = -EINVAL; \ + goto out; \ + } \ + ret = sysfs_emit(buf, "0x%08X\n", value); \ +out: \ + up(&hba->host_sem); \ + return ret; \ +} \ +static DEVICE_ATTR_RO(_name) + +UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN); +UFS_ATTRIBUTE(max_data_size_hpb_single_cmd, _MAX_HPB_SINGLE_CMD); +UFS_ATTRIBUTE(current_power_mode, _POWER_MODE); +UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL); +UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN); +UFS_ATTRIBUTE(bkops_status, _BKOPS_STATUS); +UFS_ATTRIBUTE(purge_status, _PURGE_STATUS); +UFS_ATTRIBUTE(max_data_in_size, _MAX_DATA_IN); +UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT); +UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ); +UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK); +UFS_ATTRIBUTE(max_number_of_rtt, _MAX_NUM_OF_RTT); +UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL); +UFS_ATTRIBUTE(exception_event_status, _EE_STATUS); +UFS_ATTRIBUTE(ffu_status, _FFU_STATUS); +UFS_ATTRIBUTE(psa_state, _PSA_STATE); +UFS_ATTRIBUTE(psa_data_size, _PSA_DATA_SIZE); +UFS_ATTRIBUTE(wb_flush_status, _WB_FLUSH_STATUS); +UFS_ATTRIBUTE(wb_avail_buf, _AVAIL_WB_BUFF_SIZE); +UFS_ATTRIBUTE(wb_life_time_est, _WB_BUFF_LIFE_TIME_EST); +UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE); + + +static struct attribute *ufs_sysfs_attributes[] = { + &dev_attr_boot_lun_enabled.attr, + &dev_attr_max_data_size_hpb_single_cmd.attr, + &dev_attr_current_power_mode.attr, + &dev_attr_active_icc_level.attr, + &dev_attr_ooo_data_enabled.attr, + &dev_attr_bkops_status.attr, + &dev_attr_purge_status.attr, + &dev_attr_max_data_in_size.attr, + &dev_attr_max_data_out_size.attr, + &dev_attr_reference_clock_frequency.attr, + &dev_attr_configuration_descriptor_lock.attr, + &dev_attr_max_number_of_rtt.attr, + &dev_attr_exception_event_control.attr, + &dev_attr_exception_event_status.attr, + &dev_attr_ffu_status.attr, + &dev_attr_psa_state.attr, + &dev_attr_psa_data_size.attr, + &dev_attr_wb_flush_status.attr, + &dev_attr_wb_avail_buf.attr, + &dev_attr_wb_life_time_est.attr, + &dev_attr_wb_cur_buf.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_attributes_group = { + .name = "attributes", + .attrs = ufs_sysfs_attributes, +}; + +static const struct attribute_group *ufs_sysfs_groups[] = { + &ufs_sysfs_default_group, + &ufs_sysfs_monitor_group, + &ufs_sysfs_device_descriptor_group, + &ufs_sysfs_interconnect_descriptor_group, + &ufs_sysfs_geometry_descriptor_group, + &ufs_sysfs_health_descriptor_group, + &ufs_sysfs_power_descriptor_group, + &ufs_sysfs_string_descriptors_group, + &ufs_sysfs_flags_group, + &ufs_sysfs_attributes_group, + NULL, +}; + +#define UFS_LUN_DESC_PARAM(_pname, _puname, _duname, _size) \ +static ssize_t _pname##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct ufs_hba *hba = shost_priv(sdev->host); \ + u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \ + if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, \ + _duname##_DESC_PARAM##_puname)) \ + return -EINVAL; \ + return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \ + lun, _duname##_DESC_PARAM##_puname, buf, _size); \ +} \ +static DEVICE_ATTR_RO(_pname) + +#define UFS_UNIT_DESC_PARAM(_name, _uname, _size) \ + UFS_LUN_DESC_PARAM(_name, _uname, UNIT, _size) + +UFS_UNIT_DESC_PARAM(lu_enable, _LU_ENABLE, 1); +UFS_UNIT_DESC_PARAM(boot_lun_id, _BOOT_LUN_ID, 1); +UFS_UNIT_DESC_PARAM(lun_write_protect, _LU_WR_PROTECT, 1); +UFS_UNIT_DESC_PARAM(lun_queue_depth, _LU_Q_DEPTH, 1); +UFS_UNIT_DESC_PARAM(psa_sensitive, _PSA_SENSITIVE, 1); +UFS_UNIT_DESC_PARAM(lun_memory_type, _MEM_TYPE, 1); +UFS_UNIT_DESC_PARAM(data_reliability, _DATA_RELIABILITY, 1); +UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1); +UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8); +UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4); +UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1); +UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8); +UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2); +UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1); +UFS_UNIT_DESC_PARAM(hpb_lu_max_active_regions, _HPB_LU_MAX_ACTIVE_RGNS, 2); +UFS_UNIT_DESC_PARAM(hpb_pinned_region_start_offset, _HPB_PIN_RGN_START_OFF, 2); +UFS_UNIT_DESC_PARAM(hpb_number_pinned_regions, _HPB_NUM_PIN_RGNS, 2); +UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4); + +static struct attribute *ufs_sysfs_unit_descriptor[] = { + &dev_attr_lu_enable.attr, + &dev_attr_boot_lun_id.attr, + &dev_attr_lun_write_protect.attr, + &dev_attr_lun_queue_depth.attr, + &dev_attr_psa_sensitive.attr, + &dev_attr_lun_memory_type.attr, + &dev_attr_data_reliability.attr, + &dev_attr_logical_block_size.attr, + &dev_attr_logical_block_count.attr, + &dev_attr_erase_block_size.attr, + &dev_attr_provisioning_type.attr, + &dev_attr_physical_memory_resourse_count.attr, + &dev_attr_context_capabilities.attr, + &dev_attr_large_unit_granularity.attr, + &dev_attr_hpb_lu_max_active_regions.attr, + &dev_attr_hpb_pinned_region_start_offset.attr, + &dev_attr_hpb_number_pinned_regions.attr, + &dev_attr_wb_buf_alloc_units.attr, + NULL, +}; + +const struct attribute_group ufs_sysfs_unit_descriptor_group = { + .name = "unit_descriptor", + .attrs = ufs_sysfs_unit_descriptor, +}; + +static ssize_t dyn_cap_needed_attribute_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 value; + struct scsi_device *sdev = to_scsi_device(dev); + struct ufs_hba *hba = shost_priv(sdev->host); + u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); + int ret; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + ret = -EBUSY; + goto out; + } + + ufshcd_rpm_get_sync(hba); + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value); + ufshcd_rpm_put_sync(hba); + if (ret) { + ret = -EINVAL; + goto out; + } + + ret = sysfs_emit(buf, "0x%08X\n", value); + +out: + up(&hba->host_sem); + return ret; +} +static DEVICE_ATTR_RO(dyn_cap_needed_attribute); + +static struct attribute *ufs_sysfs_lun_attributes[] = { + &dev_attr_dyn_cap_needed_attribute.attr, + NULL, +}; + +const struct attribute_group ufs_sysfs_lun_attributes_group = { + .attrs = ufs_sysfs_lun_attributes, +}; + +void ufs_sysfs_add_nodes(struct device *dev) +{ + int ret; + + ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups); + if (ret) + dev_err(dev, + "%s: sysfs groups creation failed (err = %d)\n", + __func__, ret); +} + +void ufs_sysfs_remove_nodes(struct device *dev) +{ + sysfs_remove_groups(&dev->kobj, ufs_sysfs_groups); +} diff --git a/drivers/ufs/core/ufs-sysfs.h b/drivers/ufs/core/ufs-sysfs.h new file mode 100644 index 0000000000..8d94af3b80 --- /dev/null +++ b/drivers/ufs/core/ufs-sysfs.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Western Digital Corporation + */ + +#ifndef __UFS_SYSFS_H__ +#define __UFS_SYSFS_H__ + +#include + +struct device; + +void ufs_sysfs_add_nodes(struct device *dev); +void ufs_sysfs_remove_nodes(struct device *dev); + +extern const struct attribute_group ufs_sysfs_unit_descriptor_group; +extern const struct attribute_group ufs_sysfs_lun_attributes_group; + +#endif diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c new file mode 100644 index 0000000000..b99e3f3dc4 --- /dev/null +++ b/drivers/ufs/core/ufs_bsg.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * bsg endpoint that supports UPIUs + * + * Copyright (C) 2018 Western Digital Corporation + */ + +#include +#include +#include +#include "ufs_bsg.h" +#include +#include "ufshcd-priv.h" + +static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len, + struct utp_upiu_query *qr) +{ + int desc_size = be16_to_cpu(qr->length); + int desc_id = qr->idn; + + if (desc_size <= 0) + return -EINVAL; + + ufshcd_map_desc_id_to_length(hba, desc_id, desc_len); + if (!*desc_len) + return -EINVAL; + + *desc_len = min_t(int, *desc_len, desc_size); + + return 0; +} + +static int ufs_bsg_verify_query_size(struct ufs_hba *hba, + unsigned int request_len, + unsigned int reply_len) +{ + int min_req_len = sizeof(struct ufs_bsg_request); + int min_rsp_len = sizeof(struct ufs_bsg_reply); + + if (min_req_len > request_len || min_rsp_len > reply_len) { + dev_err(hba->dev, "not enough space assigned\n"); + return -EINVAL; + } + + return 0; +} + +static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job, + uint8_t **desc_buff, int *desc_len, + enum query_opcode desc_op) +{ + struct ufs_bsg_request *bsg_request = job->request; + struct utp_upiu_query *qr; + u8 *descp; + + if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC && + desc_op != UPIU_QUERY_OPCODE_READ_DESC) + goto out; + + qr = &bsg_request->upiu_req.qr; + if (ufs_bsg_get_query_desc_size(hba, desc_len, qr)) { + dev_err(hba->dev, "Illegal desc size\n"); + return -EINVAL; + } + + if (*desc_len > job->request_payload.payload_len) { + dev_err(hba->dev, "Illegal desc size\n"); + return -EINVAL; + } + + descp = kzalloc(*desc_len, GFP_KERNEL); + if (!descp) + return -ENOMEM; + + if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, descp, + *desc_len); + + *desc_buff = descp; + +out: + return 0; +} + +static int ufs_bsg_request(struct bsg_job *job) +{ + struct ufs_bsg_request *bsg_request = job->request; + struct ufs_bsg_reply *bsg_reply = job->reply; + struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent)); + unsigned int req_len = job->request_len; + unsigned int reply_len = job->reply_len; + struct uic_command uc = {}; + int msgcode; + uint8_t *desc_buff = NULL; + int desc_len = 0; + enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP; + int ret; + + ret = ufs_bsg_verify_query_size(hba, req_len, reply_len); + if (ret) + goto out; + + bsg_reply->reply_payload_rcv_len = 0; + + ufshcd_rpm_get_sync(hba); + + msgcode = bsg_request->msgcode; + switch (msgcode) { + case UPIU_TRANSACTION_QUERY_REQ: + desc_op = bsg_request->upiu_req.qr.opcode; + ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff, + &desc_len, desc_op); + if (ret) { + ufshcd_rpm_put_sync(hba); + goto out; + } + + fallthrough; + case UPIU_TRANSACTION_NOP_OUT: + case UPIU_TRANSACTION_TASK_REQ: + ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req, + &bsg_reply->upiu_rsp, msgcode, + desc_buff, &desc_len, desc_op); + if (ret) + dev_err(hba->dev, + "exe raw upiu: error code %d\n", ret); + + break; + case UPIU_TRANSACTION_UIC_CMD: + memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE); + ret = ufshcd_send_uic_cmd(hba, &uc); + if (ret) + dev_err(hba->dev, + "send uic cmd: error code %d\n", ret); + + memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE); + + break; + default: + ret = -ENOTSUPP; + dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode); + + break; + } + + ufshcd_rpm_put_sync(hba); + + if (!desc_buff) + goto out; + + if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len) + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + desc_buff, desc_len); + + kfree(desc_buff); + +out: + bsg_reply->result = ret; + job->reply_len = sizeof(struct ufs_bsg_reply); + /* complete the job here only if no error */ + if (ret == 0) + bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len); + + return ret; +} + +/** + * ufs_bsg_remove - detach and remove the added ufs-bsg node + * @hba: per adapter object + * + * Should be called when unloading the driver. + */ +void ufs_bsg_remove(struct ufs_hba *hba) +{ + struct device *bsg_dev = &hba->bsg_dev; + + if (!hba->bsg_queue) + return; + + bsg_remove_queue(hba->bsg_queue); + + device_del(bsg_dev); + put_device(bsg_dev); +} + +static inline void ufs_bsg_node_release(struct device *dev) +{ + put_device(dev->parent); +} + +/** + * ufs_bsg_probe - Add ufs bsg device node + * @hba: per adapter object + * + * Called during initial loading of the driver, and before scsi_scan_host. + */ +int ufs_bsg_probe(struct ufs_hba *hba) +{ + struct device *bsg_dev = &hba->bsg_dev; + struct Scsi_Host *shost = hba->host; + struct device *parent = &shost->shost_gendev; + struct request_queue *q; + int ret; + + device_initialize(bsg_dev); + + bsg_dev->parent = get_device(parent); + bsg_dev->release = ufs_bsg_node_release; + + dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no); + + ret = device_add(bsg_dev); + if (ret) + goto out; + + q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0); + if (IS_ERR(q)) { + ret = PTR_ERR(q); + goto out; + } + + hba->bsg_queue = q; + + return 0; + +out: + dev_err(bsg_dev, "fail to initialize a bsg dev %d\n", shost->host_no); + put_device(bsg_dev); + return ret; +} diff --git a/drivers/ufs/core/ufs_bsg.h b/drivers/ufs/core/ufs_bsg.h new file mode 100644 index 0000000000..57712d2656 --- /dev/null +++ b/drivers/ufs/core/ufs_bsg.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Western Digital Corporation + */ +#ifndef UFS_BSG_H +#define UFS_BSG_H + +struct ufs_hba; + +#ifdef CONFIG_SCSI_UFS_BSG +void ufs_bsg_remove(struct ufs_hba *hba); +int ufs_bsg_probe(struct ufs_hba *hba); +#else +static inline void ufs_bsg_remove(struct ufs_hba *hba) {} +static inline int ufs_bsg_probe(struct ufs_hba *hba) {return 0; } +#endif + +#endif /* UFS_BSG_H */ diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c new file mode 100644 index 0000000000..198360fe5e --- /dev/null +++ b/drivers/ufs/core/ufshcd-crypto.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 Google LLC + */ + +#include +#include "ufshcd-crypto.h" + +/* Blk-crypto modes supported by UFS crypto */ +static const struct ufs_crypto_alg_entry { + enum ufs_crypto_alg ufs_alg; + enum ufs_crypto_key_size ufs_key_size; +} ufs_crypto_algs[BLK_ENCRYPTION_MODE_MAX] = { + [BLK_ENCRYPTION_MODE_AES_256_XTS] = { + .ufs_alg = UFS_CRYPTO_ALG_AES_XTS, + .ufs_key_size = UFS_CRYPTO_KEY_SIZE_256, + }, +}; + +static int ufshcd_program_key(struct ufs_hba *hba, + const union ufs_crypto_cfg_entry *cfg, int slot) +{ + int i; + u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg); + int err = 0; + + ufshcd_hold(hba, false); + + if (hba->vops && hba->vops->program_key) { + err = hba->vops->program_key(hba, cfg, slot); + goto out; + } + + /* Ensure that CFGE is cleared before programming the key */ + ufshcd_writel(hba, 0, slot_offset + 16 * sizeof(cfg->reg_val[0])); + for (i = 0; i < 16; i++) { + ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[i]), + slot_offset + i * sizeof(cfg->reg_val[0])); + } + /* Write dword 17 */ + ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[17]), + slot_offset + 17 * sizeof(cfg->reg_val[0])); + /* Dword 16 must be written last */ + ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[16]), + slot_offset + 16 * sizeof(cfg->reg_val[0])); +out: + ufshcd_release(hba); + return err; +} + +static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key, + unsigned int slot) +{ + struct ufs_hba *hba = + container_of(profile, struct ufs_hba, crypto_profile); + const union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array; + const struct ufs_crypto_alg_entry *alg = + &ufs_crypto_algs[key->crypto_cfg.crypto_mode]; + u8 data_unit_mask = key->crypto_cfg.data_unit_size / 512; + int i; + int cap_idx = -1; + union ufs_crypto_cfg_entry cfg = {}; + int err; + + BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0); + for (i = 0; i < hba->crypto_capabilities.num_crypto_cap; i++) { + if (ccap_array[i].algorithm_id == alg->ufs_alg && + ccap_array[i].key_size == alg->ufs_key_size && + (ccap_array[i].sdus_mask & data_unit_mask)) { + cap_idx = i; + break; + } + } + + if (WARN_ON(cap_idx < 0)) + return -EOPNOTSUPP; + + cfg.data_unit_size = data_unit_mask; + cfg.crypto_cap_idx = cap_idx; + cfg.config_enable = UFS_CRYPTO_CONFIGURATION_ENABLE; + + if (ccap_array[cap_idx].algorithm_id == UFS_CRYPTO_ALG_AES_XTS) { + /* In XTS mode, the blk_crypto_key's size is already doubled */ + memcpy(cfg.crypto_key, key->raw, key->size/2); + memcpy(cfg.crypto_key + UFS_CRYPTO_KEY_MAX_SIZE/2, + key->raw + key->size/2, key->size/2); + } else { + memcpy(cfg.crypto_key, key->raw, key->size); + } + + err = ufshcd_program_key(hba, &cfg, slot); + + memzero_explicit(&cfg, sizeof(cfg)); + return err; +} + +static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) +{ + /* + * Clear the crypto cfg on the device. Clearing CFGE + * might not be sufficient, so just clear the entire cfg. + */ + union ufs_crypto_cfg_entry cfg = {}; + + return ufshcd_program_key(hba, &cfg, slot); +} + +static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key, + unsigned int slot) +{ + struct ufs_hba *hba = + container_of(profile, struct ufs_hba, crypto_profile); + + return ufshcd_clear_keyslot(hba, slot); +} + +bool ufshcd_crypto_enable(struct ufs_hba *hba) +{ + if (!(hba->caps & UFSHCD_CAP_CRYPTO)) + return false; + + /* Reset might clear all keys, so reprogram all the keys. */ + blk_crypto_reprogram_all_keys(&hba->crypto_profile); + return true; +} + +static const struct blk_crypto_ll_ops ufshcd_crypto_ops = { + .keyslot_program = ufshcd_crypto_keyslot_program, + .keyslot_evict = ufshcd_crypto_keyslot_evict, +}; + +static enum blk_crypto_mode_num +ufshcd_find_blk_crypto_mode(union ufs_crypto_cap_entry cap) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ufs_crypto_algs); i++) { + BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0); + if (ufs_crypto_algs[i].ufs_alg == cap.algorithm_id && + ufs_crypto_algs[i].ufs_key_size == cap.key_size) { + return i; + } + } + return BLK_ENCRYPTION_MODE_INVALID; +} + +/** + * ufshcd_hba_init_crypto_capabilities - Read crypto capabilities, init crypto + * fields in hba + * @hba: Per adapter instance + * + * Return: 0 if crypto was initialized or is not supported, else a -errno value. + */ +int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba) +{ + int cap_idx; + int err = 0; + enum blk_crypto_mode_num blk_mode_num; + + /* + * Don't use crypto if either the hardware doesn't advertise the + * standard crypto capability bit *or* if the vendor specific driver + * hasn't advertised that crypto is supported. + */ + if (!(hba->capabilities & MASK_CRYPTO_SUPPORT) || + !(hba->caps & UFSHCD_CAP_CRYPTO)) + goto out; + + hba->crypto_capabilities.reg_val = + cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP)); + hba->crypto_cfg_register = + (u32)hba->crypto_capabilities.config_array_ptr * 0x100; + hba->crypto_cap_array = + devm_kcalloc(hba->dev, hba->crypto_capabilities.num_crypto_cap, + sizeof(hba->crypto_cap_array[0]), GFP_KERNEL); + if (!hba->crypto_cap_array) { + err = -ENOMEM; + goto out; + } + + /* The actual number of configurations supported is (CFGC+1) */ + err = devm_blk_crypto_profile_init( + hba->dev, &hba->crypto_profile, + hba->crypto_capabilities.config_count + 1); + if (err) + goto out; + + hba->crypto_profile.ll_ops = ufshcd_crypto_ops; + /* UFS only supports 8 bytes for any DUN */ + hba->crypto_profile.max_dun_bytes_supported = 8; + hba->crypto_profile.dev = hba->dev; + + /* + * Cache all the UFS crypto capabilities and advertise the supported + * crypto modes and data unit sizes to the block layer. + */ + for (cap_idx = 0; cap_idx < hba->crypto_capabilities.num_crypto_cap; + cap_idx++) { + hba->crypto_cap_array[cap_idx].reg_val = + cpu_to_le32(ufshcd_readl(hba, + REG_UFS_CRYPTOCAP + + cap_idx * sizeof(__le32))); + blk_mode_num = ufshcd_find_blk_crypto_mode( + hba->crypto_cap_array[cap_idx]); + if (blk_mode_num != BLK_ENCRYPTION_MODE_INVALID) + hba->crypto_profile.modes_supported[blk_mode_num] |= + hba->crypto_cap_array[cap_idx].sdus_mask * 512; + } + + return 0; + +out: + /* Indicate that init failed by clearing UFSHCD_CAP_CRYPTO */ + hba->caps &= ~UFSHCD_CAP_CRYPTO; + return err; +} + +/** + * ufshcd_init_crypto - Initialize crypto hardware + * @hba: Per adapter instance + */ +void ufshcd_init_crypto(struct ufs_hba *hba) +{ + int slot; + + if (!(hba->caps & UFSHCD_CAP_CRYPTO)) + return; + + /* Clear all keyslots - the number of keyslots is (CFGC + 1) */ + for (slot = 0; slot < hba->crypto_capabilities.config_count + 1; slot++) + ufshcd_clear_keyslot(hba, slot); +} + +void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q) +{ + if (hba->caps & UFSHCD_CAP_CRYPTO) + blk_crypto_register(&hba->crypto_profile, q); +} diff --git a/drivers/ufs/core/ufshcd-crypto.h b/drivers/ufs/core/ufshcd-crypto.h new file mode 100644 index 0000000000..504cc84154 --- /dev/null +++ b/drivers/ufs/core/ufshcd-crypto.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 Google LLC + */ + +#ifndef _UFSHCD_CRYPTO_H +#define _UFSHCD_CRYPTO_H + +#include +#include +#include "ufshcd-priv.h" +#include + +#ifdef CONFIG_SCSI_UFS_CRYPTO + +static inline void ufshcd_prepare_lrbp_crypto(struct request *rq, + struct ufshcd_lrb *lrbp) +{ + if (!rq || !rq->crypt_keyslot) { + lrbp->crypto_key_slot = -1; + return; + } + + lrbp->crypto_key_slot = blk_crypto_keyslot_index(rq->crypt_keyslot); + lrbp->data_unit_num = rq->crypt_ctx->bc_dun[0]; +} + +static inline void +ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, u32 *dword_0, + u32 *dword_1, u32 *dword_3) +{ + if (lrbp->crypto_key_slot >= 0) { + *dword_0 |= UTP_REQ_DESC_CRYPTO_ENABLE_CMD; + *dword_0 |= lrbp->crypto_key_slot; + *dword_1 = lower_32_bits(lrbp->data_unit_num); + *dword_3 = upper_32_bits(lrbp->data_unit_num); + } +} + +bool ufshcd_crypto_enable(struct ufs_hba *hba); + +int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba); + +void ufshcd_init_crypto(struct ufs_hba *hba); + +void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q); + +#else /* CONFIG_SCSI_UFS_CRYPTO */ + +static inline void ufshcd_prepare_lrbp_crypto(struct request *rq, + struct ufshcd_lrb *lrbp) { } + +static inline void +ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, u32 *dword_0, + u32 *dword_1, u32 *dword_3) { } + +static inline bool ufshcd_crypto_enable(struct ufs_hba *hba) +{ + return false; +} + +static inline int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba) +{ + return 0; +} + +static inline void ufshcd_init_crypto(struct ufs_hba *hba) { } + +static inline void ufshcd_crypto_register(struct ufs_hba *hba, + struct request_queue *q) { } + +#endif /* CONFIG_SCSI_UFS_CRYPTO */ + +#endif /* _UFSHCD_CRYPTO_H */ diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h new file mode 100644 index 0000000000..8f67db202d --- /dev/null +++ b/drivers/ufs/core/ufshcd-priv.h @@ -0,0 +1,298 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef _UFSHCD_PRIV_H_ +#define _UFSHCD_PRIV_H_ + +#include +#include + +static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba) +{ + return !hba->shutting_down; +} + +void ufshcd_schedule_eh_work(struct ufs_hba *hba); + +static inline bool ufshcd_keep_autobkops_enabled_except_suspend( + struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND; +} + +static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba) +{ + if (hba->dev_info.wb_buffer_type == WB_BUF_MODE_LU_DEDICATED) + return hba->dev_info.wb_dedicated_lu; + return 0; +} + +#ifdef CONFIG_SCSI_UFS_HWMON +void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask); +void ufs_hwmon_remove(struct ufs_hba *hba); +void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask); +#else +static inline void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask) {} +static inline void ufs_hwmon_remove(struct ufs_hba *hba) {} +static inline void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) {} +#endif + +int ufshcd_read_desc_param(struct ufs_hba *hba, + enum desc_idn desc_id, + int desc_index, + u8 param_offset, + u8 *param_read_buf, + u8 param_size); +int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 index, u8 selector, + u32 *attr_val); +int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 index, u8 selector, u32 *attr_val); +int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, + enum flag_idn idn, u8 index, bool *flag_res); +void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); + +#define SD_ASCII_STD true +#define SD_RAW false +int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, + u8 **buf, bool ascii); + +int ufshcd_hold(struct ufs_hba *hba, bool async); +void ufshcd_release(struct ufs_hba *hba); + +void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, + int *desc_length); + +int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); + +int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, + struct utp_upiu_req *req_upiu, + struct utp_upiu_req *rsp_upiu, + int msgcode, + u8 *desc_buff, int *buff_len, + enum query_opcode desc_op); + +int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable); + +/* Wrapper functions for safely calling variant operations */ +static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) +{ + if (hba->vops) + return hba->vops->name; + return ""; +} + +static inline void ufshcd_vops_exit(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->exit) + return hba->vops->exit(hba); +} + +static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->get_ufs_hci_version) + return hba->vops->get_ufs_hci_version(hba); + + return ufshcd_readl(hba, REG_UFS_VERSION); +} + +static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, + bool up, enum ufs_notify_change_status status) +{ + if (hba->vops && hba->vops->clk_scale_notify) + return hba->vops->clk_scale_notify(hba, up, status); + return 0; +} + +static inline void ufshcd_vops_event_notify(struct ufs_hba *hba, + enum ufs_event_type evt, + void *data) +{ + if (hba->vops && hba->vops->event_notify) + hba->vops->event_notify(hba, evt, data); +} + +static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on, + enum ufs_notify_change_status status) +{ + if (hba->vops && hba->vops->setup_clocks) + return hba->vops->setup_clocks(hba, on, status); + return 0; +} + +static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba, + bool status) +{ + if (hba->vops && hba->vops->hce_enable_notify) + return hba->vops->hce_enable_notify(hba, status); + + return 0; +} +static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba, + bool status) +{ + if (hba->vops && hba->vops->link_startup_notify) + return hba->vops->link_startup_notify(hba, status); + + return 0; +} + +static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + if (hba->vops && hba->vops->pwr_change_notify) + return hba->vops->pwr_change_notify(hba, status, + dev_max_params, dev_req_params); + + return -ENOTSUPP; +} + +static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba, + int tag, u8 tm_function) +{ + if (hba->vops && hba->vops->setup_task_mgmt) + return hba->vops->setup_task_mgmt(hba, tag, tm_function); +} + +static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba, + enum uic_cmd_dme cmd, + enum ufs_notify_change_status status) +{ + if (hba->vops && hba->vops->hibern8_notify) + return hba->vops->hibern8_notify(hba, cmd, status); +} + +static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->apply_dev_quirks) + return hba->vops->apply_dev_quirks(hba); + return 0; +} + +static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->fixup_dev_quirks) + hba->vops->fixup_dev_quirks(hba); +} + +static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op, + enum ufs_notify_change_status status) +{ + if (hba->vops && hba->vops->suspend) + return hba->vops->suspend(hba, op, status); + + return 0; +} + +static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op) +{ + if (hba->vops && hba->vops->resume) + return hba->vops->resume(hba, op); + + return 0; +} + +static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->dbg_register_dump) + hba->vops->dbg_register_dump(hba); +} + +static inline int ufshcd_vops_device_reset(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->device_reset) + return hba->vops->device_reset(hba); + + return -EOPNOTSUPP; +} + +static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba, + struct devfreq_dev_profile *p, + struct devfreq_simple_ondemand_data *data) +{ + if (hba->vops && hba->vops->config_scaling_param) + hba->vops->config_scaling_param(hba, p, data); +} + +extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[]; + +/** + * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN + * @scsi_lun: scsi LUN id + * + * Returns UPIU LUN id + */ +static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun) +{ + if (scsi_is_wlun(scsi_lun)) + return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID) + | UFS_UPIU_WLUN_ID; + else + return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID; +} + +int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask); +int ufshcd_write_ee_control(struct ufs_hba *hba); +int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, + const u16 *other_mask, u16 set, u16 clr); + +static inline int ufshcd_update_ee_drv_mask(struct ufs_hba *hba, + u16 set, u16 clr) +{ + return ufshcd_update_ee_control(hba, &hba->ee_drv_mask, + &hba->ee_usr_mask, set, clr); +} + +static inline int ufshcd_update_ee_usr_mask(struct ufs_hba *hba, + u16 set, u16 clr) +{ + return ufshcd_update_ee_control(hba, &hba->ee_usr_mask, + &hba->ee_drv_mask, set, clr); +} + +static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba) +{ + return pm_runtime_get_sync(&hba->ufs_device_wlun->sdev_gendev); +} + +static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba) +{ + return pm_runtime_put_sync(&hba->ufs_device_wlun->sdev_gendev); +} + +static inline void ufshcd_rpm_get_noresume(struct ufs_hba *hba) +{ + pm_runtime_get_noresume(&hba->ufs_device_wlun->sdev_gendev); +} + +static inline int ufshcd_rpm_resume(struct ufs_hba *hba) +{ + return pm_runtime_resume(&hba->ufs_device_wlun->sdev_gendev); +} + +static inline int ufshcd_rpm_put(struct ufs_hba *hba) +{ + return pm_runtime_put(&hba->ufs_device_wlun->sdev_gendev); +} + +/** + * ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor + * @dev_info: pointer of instance of struct ufs_dev_info + * @lun: LU number to check + * @return: true if the lun has a matching unit descriptor, false otherwise + */ +static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, + u8 lun, u8 param_offset) +{ + if (!dev_info || !dev_info->max_lu_supported) { + pr_err("Max General LU supported by UFS isn't initialized\n"); + return false; + } + /* WB is available only for the logical unit from 0 to 7 */ + if (param_offset == UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS) + return lun < UFS_UPIU_MAX_WB_LUN_ID; + return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported); +} + +#endif /* _UFSHCD_PRIV_H_ */ diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c new file mode 100644 index 0000000000..a202d7d524 --- /dev/null +++ b/drivers/ufs/core/ufshcd.c @@ -0,0 +1,10011 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Universal Flash Storage Host controller driver Core + * Copyright (C) 2011-2013 Samsung India Software Operations + * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. + * + * Authors: + * Santosh Yaraganavi + * Vinayak Holikatti + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ufshcd-priv.h" +#include +#include +#include "ufs-sysfs.h" +#include "ufs-debugfs.h" +#include "ufs-fault-injection.h" +#include "ufs_bsg.h" +#include "ufshcd-crypto.h" +#include "ufshpb.h" +#include + +#define CREATE_TRACE_POINTS +#include + +#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ + UTP_TASK_REQ_COMPL |\ + UFSHCD_ERROR_MASK) +/* UIC command timeout, unit: ms */ +#define UIC_CMD_TIMEOUT 500 + +/* NOP OUT retries waiting for NOP IN response */ +#define NOP_OUT_RETRIES 10 +/* Timeout after 50 msecs if NOP OUT hangs without response */ +#define NOP_OUT_TIMEOUT 50 /* msecs */ + +/* Query request retries */ +#define QUERY_REQ_RETRIES 3 +/* Query request timeout */ +#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ + +/* Task management command timeout */ +#define TM_CMD_TIMEOUT 100 /* msecs */ + +/* maximum number of retries for a general UIC command */ +#define UFS_UIC_COMMAND_RETRIES 3 + +/* maximum number of link-startup retries */ +#define DME_LINKSTARTUP_RETRIES 3 + +/* maximum number of reset retries before giving up */ +#define MAX_HOST_RESET_RETRIES 5 + +/* Maximum number of error handler retries before giving up */ +#define MAX_ERR_HANDLER_RETRIES 5 + +/* Expose the flag value from utp_upiu_query.value */ +#define MASK_QUERY_UPIU_FLAG_LOC 0xFF + +/* Interrupt aggregation default timeout, unit: 40us */ +#define INT_AGGR_DEF_TO 0x02 + +/* default delay of autosuspend: 2000 ms */ +#define RPM_AUTOSUSPEND_DELAY_MS 2000 + +/* Default delay of RPM device flush delayed work */ +#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000 + +/* Default value of wait time before gating device ref clock */ +#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */ + +/* Polling time to wait for fDeviceInit */ +#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */ + +#define ufshcd_toggle_vreg(_dev, _vreg, _on) \ + ({ \ + int _ret; \ + if (_on) \ + _ret = ufshcd_enable_vreg(_dev, _vreg); \ + else \ + _ret = ufshcd_disable_vreg(_dev, _vreg); \ + _ret; \ + }) + +#define ufshcd_hex_dump(prefix_str, buf, len) do { \ + size_t __len = (len); \ + print_hex_dump(KERN_ERR, prefix_str, \ + __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\ + 16, 4, buf, __len, false); \ +} while (0) + +int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, + const char *prefix) +{ + u32 *regs; + size_t pos; + + if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */ + return -EINVAL; + + regs = kzalloc(len, GFP_ATOMIC); + if (!regs) + return -ENOMEM; + + for (pos = 0; pos < len; pos += 4) { + if (offset == 0 && + pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER && + pos <= REG_UIC_ERROR_CODE_DME) + continue; + regs[pos / 4] = ufshcd_readl(hba, offset + pos); + } + + ufshcd_hex_dump(prefix, regs, len); + kfree(regs); + + return 0; +} +EXPORT_SYMBOL_GPL(ufshcd_dump_regs); + +enum { + UFSHCD_MAX_CHANNEL = 0, + UFSHCD_MAX_ID = 1, + UFSHCD_NUM_RESERVED = 1, + UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED, + UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED, +}; + +static const char *const ufshcd_state_name[] = { + [UFSHCD_STATE_RESET] = "reset", + [UFSHCD_STATE_OPERATIONAL] = "operational", + [UFSHCD_STATE_ERROR] = "error", + [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal", + [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal", +}; + +/* UFSHCD error handling flags */ +enum { + UFSHCD_EH_IN_PROGRESS = (1 << 0), +}; + +/* UFSHCD UIC layer error flags */ +enum { + UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */ + UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */ + UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */ + UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */ + UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */ + UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */ +}; + +#define ufshcd_set_eh_in_progress(h) \ + ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS) +#define ufshcd_eh_in_progress(h) \ + ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS) +#define ufshcd_clear_eh_in_progress(h) \ + ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) + +const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = { + [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE}, + [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE}, + [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE}, + [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE}, + [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE}, + [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE}, + /* + * For DeepSleep, the link is first put in hibern8 and then off. + * Leaving the link in hibern8 is not supported. + */ + [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE}, +}; + +static inline enum ufs_dev_pwr_mode +ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl) +{ + return ufs_pm_lvl_states[lvl].dev_state; +} + +static inline enum uic_link_state +ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl) +{ + return ufs_pm_lvl_states[lvl].link_state; +} + +static inline enum ufs_pm_level +ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state, + enum uic_link_state link_state) +{ + enum ufs_pm_level lvl; + + for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) { + if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) && + (ufs_pm_lvl_states[lvl].link_state == link_state)) + return lvl; + } + + /* if no match found, return the level 0 */ + return UFS_PM_LVL_0; +} + +static const struct ufs_dev_quirk ufs_fixups[] = { + /* UFS cards deviations table */ + { .wmanufacturerid = UFS_VENDOR_MICRON, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | + UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ }, + { .wmanufacturerid = UFS_VENDOR_SAMSUNG, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | + UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE | + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS }, + { .wmanufacturerid = UFS_VENDOR_SKHYNIX, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME }, + { .wmanufacturerid = UFS_VENDOR_SKHYNIX, + .model = "hB8aL1" /*H28U62301AMR*/, + .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME }, + { .wmanufacturerid = UFS_VENDOR_TOSHIBA, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, + { .wmanufacturerid = UFS_VENDOR_TOSHIBA, + .model = "THGLF2G9C8KBADG", + .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE }, + { .wmanufacturerid = UFS_VENDOR_TOSHIBA, + .model = "THGLF2G9D8KBADG", + .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE }, + {} +}; + +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba); +static void ufshcd_async_scan(void *data, async_cookie_t cookie); +static int ufshcd_reset_and_restore(struct ufs_hba *hba); +static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); +static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); +static void ufshcd_hba_exit(struct ufs_hba *hba); +static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params); +static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); +static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); +static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); +static void ufshcd_resume_clkscaling(struct ufs_hba *hba); +static void ufshcd_suspend_clkscaling(struct ufs_hba *hba); +static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba); +static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); +static irqreturn_t ufshcd_intr(int irq, void *__hba); +static int ufshcd_change_power_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode); +static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); +static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); +static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, + struct ufs_vreg *vreg); +static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag); +static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set); +static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable); +static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba); +static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba); + +static inline void ufshcd_enable_irq(struct ufs_hba *hba) +{ + if (!hba->is_irq_enabled) { + enable_irq(hba->irq); + hba->is_irq_enabled = true; + } +} + +static inline void ufshcd_disable_irq(struct ufs_hba *hba) +{ + if (hba->is_irq_enabled) { + disable_irq(hba->irq); + hba->is_irq_enabled = false; + } +} + +static inline void ufshcd_wb_config(struct ufs_hba *hba) +{ + if (!ufshcd_is_wb_allowed(hba)) + return; + + ufshcd_wb_toggle(hba, true); + + ufshcd_wb_toggle_flush_during_h8(hba, true); + if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)) + ufshcd_wb_toggle_flush(hba, true); +} + +static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) +{ + if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) + scsi_unblock_requests(hba->host); +} + +static void ufshcd_scsi_block_requests(struct ufs_hba *hba) +{ + if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) + scsi_block_requests(hba->host); +} + +static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, + enum ufs_trace_str_t str_t) +{ + struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; + struct utp_upiu_header *header; + + if (!trace_ufshcd_upiu_enabled()) + return; + + if (str_t == UFS_CMD_SEND) + header = &rq->header; + else + header = &hba->lrb[tag].ucd_rsp_ptr->header; + + trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb, + UFS_TSF_CDB); +} + +static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, + enum ufs_trace_str_t str_t, + struct utp_upiu_req *rq_rsp) +{ + if (!trace_ufshcd_upiu_enabled()) + return; + + trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header, + &rq_rsp->qr, UFS_TSF_OSF); +} + +static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, + enum ufs_trace_str_t str_t) +{ + struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag]; + + if (!trace_ufshcd_upiu_enabled()) + return; + + if (str_t == UFS_TM_SEND) + trace_ufshcd_upiu(dev_name(hba->dev), str_t, + &descp->upiu_req.req_header, + &descp->upiu_req.input_param1, + UFS_TSF_TM_INPUT); + else + trace_ufshcd_upiu(dev_name(hba->dev), str_t, + &descp->upiu_rsp.rsp_header, + &descp->upiu_rsp.output_param1, + UFS_TSF_TM_OUTPUT); +} + +static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, + const struct uic_command *ucmd, + enum ufs_trace_str_t str_t) +{ + u32 cmd; + + if (!trace_ufshcd_uic_command_enabled()) + return; + + if (str_t == UFS_CMD_SEND) + cmd = ucmd->command; + else + cmd = ufshcd_readl(hba, REG_UIC_COMMAND); + + trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd, + ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1), + ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2), + ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3)); +} + +static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, + enum ufs_trace_str_t str_t) +{ + u64 lba = 0; + u8 opcode = 0, group_id = 0; + u32 intr, doorbell; + struct ufshcd_lrb *lrbp = &hba->lrb[tag]; + struct scsi_cmnd *cmd = lrbp->cmd; + struct request *rq = scsi_cmd_to_rq(cmd); + int transfer_len = -1; + + if (!cmd) + return; + + /* trace UPIU also */ + ufshcd_add_cmd_upiu_trace(hba, tag, str_t); + if (!trace_ufshcd_command_enabled()) + return; + + opcode = cmd->cmnd[0]; + + if (opcode == READ_10 || opcode == WRITE_10) { + /* + * Currently we only fully trace read(10) and write(10) commands + */ + transfer_len = + be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len); + lba = scsi_get_lba(cmd); + if (opcode == WRITE_10) + group_id = lrbp->cmd->cmnd[6]; + } else if (opcode == UNMAP) { + /* + * The number of Bytes to be unmapped beginning with the lba. + */ + transfer_len = blk_rq_bytes(rq); + lba = scsi_get_lba(cmd); + } + + intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + trace_ufshcd_command(dev_name(hba->dev), str_t, tag, + doorbell, transfer_len, intr, lba, opcode, group_id); +} + +static void ufshcd_print_clk_freqs(struct ufs_hba *hba) +{ + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + + if (list_empty(head)) + return; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq && + clki->max_freq) + dev_err(hba->dev, "clk: %s, rate: %u\n", + clki->name, clki->curr_freq); + } +} + +static void ufshcd_print_evt(struct ufs_hba *hba, u32 id, + const char *err_name) +{ + int i; + bool found = false; + const struct ufs_event_hist *e; + + if (id >= UFS_EVT_CNT) + return; + + e = &hba->ufs_stats.event[id]; + + for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) { + int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH; + + if (e->tstamp[p] == 0) + continue; + dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p, + e->val[p], ktime_to_us(e->tstamp[p])); + found = true; + } + + if (!found) + dev_err(hba->dev, "No record of %s\n", err_name); + else + dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt); +} + +static void ufshcd_print_evt_hist(struct ufs_hba *hba) +{ + ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); + + ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err"); + ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err"); + ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err"); + ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err"); + ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err"); + ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR, + "auto_hibern8_err"); + ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err"); + ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL, + "link_startup_fail"); + ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail"); + ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR, + "suspend_fail"); + ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset"); + ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset"); + ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort"); + + ufshcd_vops_dbg_register_dump(hba); +} + +static +void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) +{ + const struct ufshcd_lrb *lrbp; + int prdt_length; + int tag; + + for_each_set_bit(tag, &bitmap, hba->nutrs) { + lrbp = &hba->lrb[tag]; + + dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", + tag, ktime_to_us(lrbp->issue_time_stamp)); + dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", + tag, ktime_to_us(lrbp->compl_time_stamp)); + dev_err(hba->dev, + "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n", + tag, (u64)lrbp->utrd_dma_addr); + + ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr, + sizeof(struct utp_transfer_req_desc)); + dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag, + (u64)lrbp->ucd_req_dma_addr); + ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr, + sizeof(struct utp_upiu_req)); + dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag, + (u64)lrbp->ucd_rsp_dma_addr); + ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr, + sizeof(struct utp_upiu_rsp)); + + prdt_length = le16_to_cpu( + lrbp->utr_descriptor_ptr->prd_table_length); + if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) + prdt_length /= sizeof(struct ufshcd_sg_entry); + + dev_err(hba->dev, + "UPIU[%d] - PRDT - %d entries phys@0x%llx\n", + tag, prdt_length, + (u64)lrbp->ucd_prdt_dma_addr); + + if (pr_prdt) + ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr, + sizeof(struct ufshcd_sg_entry) * prdt_length); + } +} + +static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) +{ + int tag; + + for_each_set_bit(tag, &bitmap, hba->nutmrs) { + struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag]; + + dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag); + ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp)); + } +} + +static void ufshcd_print_host_state(struct ufs_hba *hba) +{ + const struct scsi_device *sdev_ufs = hba->ufs_device_wlun; + + dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); + dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", + hba->outstanding_reqs, hba->outstanding_tasks); + dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", + hba->saved_err, hba->saved_uic_err); + dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", + hba->curr_dev_pwr_mode, hba->uic_link_state); + dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n", + hba->pm_op_in_progress, hba->is_sys_suspended); + dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n", + hba->auto_bkops_enabled, hba->host->host_self_blocked); + dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state); + dev_err(hba->dev, + "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n", + ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp), + hba->ufs_stats.hibern8_exit_cnt); + dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n", + ktime_to_us(hba->ufs_stats.last_intr_ts), + hba->ufs_stats.last_intr_status); + dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", + hba->eh_flags, hba->req_abort_count); + dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n", + hba->ufs_version, hba->capabilities, hba->caps); + dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks, + hba->dev_quirks); + if (sdev_ufs) + dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n", + sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev); + + ufshcd_print_clk_freqs(hba); +} + +/** + * ufshcd_print_pwr_info - print power params as saved in hba + * power info + * @hba: per-adapter instance + */ +static void ufshcd_print_pwr_info(struct ufs_hba *hba) +{ + static const char * const names[] = { + "INVALID MODE", + "FAST MODE", + "SLOW_MODE", + "INVALID MODE", + "FASTAUTO_MODE", + "SLOWAUTO_MODE", + "INVALID MODE", + }; + + /* + * Using dev_dbg to avoid messages during runtime PM to avoid + * never-ending cycles of messages written back to storage by user space + * causing runtime resume, causing more messages and so on. + */ + dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", + __func__, + hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, + hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, + names[hba->pwr_info.pwr_rx], + names[hba->pwr_info.pwr_tx], + hba->pwr_info.hs_rate); +} + +static void ufshcd_device_reset(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_vops_device_reset(hba); + + if (!err) { + ufshcd_set_ufs_dev_active(hba); + if (ufshcd_is_wb_allowed(hba)) { + hba->dev_info.wb_enabled = false; + hba->dev_info.wb_buf_flush_enabled = false; + } + } + if (err != -EOPNOTSUPP) + ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err); +} + +void ufshcd_delay_us(unsigned long us, unsigned long tolerance) +{ + if (!us) + return; + + if (us < 10) + udelay(us); + else + usleep_range(us, us + tolerance); +} +EXPORT_SYMBOL_GPL(ufshcd_delay_us); + +/** + * ufshcd_wait_for_register - wait for register value to change + * @hba: per-adapter interface + * @reg: mmio register offset + * @mask: mask to apply to the read register value + * @val: value to wait for + * @interval_us: polling interval in microseconds + * @timeout_ms: timeout in milliseconds + * + * Return: + * -ETIMEDOUT on error, zero on success. + */ +static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, + u32 val, unsigned long interval_us, + unsigned long timeout_ms) +{ + int err = 0; + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); + + /* ignore bits that we don't intend to wait on */ + val = val & mask; + + while ((ufshcd_readl(hba, reg) & mask) != val) { + usleep_range(interval_us, interval_us + 50); + if (time_after(jiffies, timeout)) { + if ((ufshcd_readl(hba, reg) & mask) != val) + err = -ETIMEDOUT; + break; + } + } + + return err; +} + +/** + * ufshcd_get_intr_mask - Get the interrupt bit mask + * @hba: Pointer to adapter instance + * + * Returns interrupt bit mask per version + */ +static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) +{ + if (hba->ufs_version == ufshci_version(1, 0)) + return INTERRUPT_MASK_ALL_VER_10; + if (hba->ufs_version <= ufshci_version(2, 0)) + return INTERRUPT_MASK_ALL_VER_11; + + return INTERRUPT_MASK_ALL_VER_21; +} + +/** + * ufshcd_get_ufs_version - Get the UFS version supported by the HBA + * @hba: Pointer to adapter instance + * + * Returns UFSHCI version supported by the controller + */ +static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) +{ + u32 ufshci_ver; + + if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) + ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba); + else + ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION); + + /* + * UFSHCI v1.x uses a different version scheme, in order + * to allow the use of comparisons with the ufshci_version + * function, we convert it to the same scheme as ufs 2.0+. + */ + if (ufshci_ver & 0x00010000) + return ufshci_version(1, ufshci_ver & 0x00000100); + + return ufshci_ver; +} + +/** + * ufshcd_is_device_present - Check if any device connected to + * the host controller + * @hba: pointer to adapter instance + * + * Returns true if device present, false if no device detected + */ +static inline bool ufshcd_is_device_present(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT; +} + +/** + * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status + * @lrbp: pointer to local command reference block + * + * This function is used to get the OCS field from UTRD + * Returns the OCS field in the UTRD + */ +static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) +{ + return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS; +} + +/** + * ufshcd_utrl_clear() - Clear requests from the controller request list. + * @hba: per adapter instance + * @mask: mask with one bit set for each request to be cleared + */ +static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask) +{ + if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) + mask = ~mask; + /* + * From the UFSHCI specification: "UTP Transfer Request List CLear + * Register (UTRLCLR): This field is bit significant. Each bit + * corresponds to a slot in the UTP Transfer Request List, where bit 0 + * corresponds to request slot 0. A bit in this field is set to ‘0’ + * by host software to indicate to the host controller that a transfer + * request slot is cleared. The host controller + * shall free up any resources associated to the request slot + * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The + * host software indicates no change to request slots by setting the + * associated bits in this field to ‘1’. Bits in this field shall only + * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’." + */ + ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR); +} + +/** + * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register + * @hba: per adapter instance + * @pos: position of the bit to be cleared + */ +static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos) +{ + if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) + ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); + else + ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); +} + +/** + * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY + * @reg: Register value of host controller status + * + * Returns integer, 0 on Success and positive value if failed + */ +static inline int ufshcd_get_lists_status(u32 reg) +{ + return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY); +} + +/** + * ufshcd_get_uic_cmd_result - Get the UIC command result + * @hba: Pointer to adapter instance + * + * This function gets the result of UIC command completion + * Returns 0 on success, non zero value on error + */ +static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & + MASK_UIC_COMMAND_RESULT; +} + +/** + * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command + * @hba: Pointer to adapter instance + * + * This function gets UIC command argument3 + * Returns 0 on success, non zero value on error + */ +static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); +} + +/** + * ufshcd_get_req_rsp - returns the TR response transaction type + * @ucd_rsp_ptr: pointer to response UPIU + */ +static inline int +ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; +} + +/** + * ufshcd_get_rsp_upiu_result - Get the result from response UPIU + * @ucd_rsp_ptr: pointer to response UPIU + * + * This function gets the response status and scsi_status from response UPIU + * Returns the response result code. + */ +static inline int +ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; +} + +/* + * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length + * from response UPIU + * @ucd_rsp_ptr: pointer to response UPIU + * + * Return the data segment length. + */ +static inline unsigned int +ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & + MASK_RSP_UPIU_DATA_SEG_LEN; +} + +/** + * ufshcd_is_exception_event - Check if the device raised an exception event + * @ucd_rsp_ptr: pointer to response UPIU + * + * The function checks if the device raised an exception event indicated in + * the Device Information field of response UPIU. + * + * Returns true if exception is raised, false otherwise. + */ +static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & + MASK_RSP_EXCEPTION_EVENT; +} + +/** + * ufshcd_reset_intr_aggr - Reset interrupt aggregation values. + * @hba: per adapter instance + */ +static inline void +ufshcd_reset_intr_aggr(struct ufs_hba *hba) +{ + ufshcd_writel(hba, INT_AGGR_ENABLE | + INT_AGGR_COUNTER_AND_TIMER_RESET, + REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); +} + +/** + * ufshcd_config_intr_aggr - Configure interrupt aggregation values. + * @hba: per adapter instance + * @cnt: Interrupt aggregation counter threshold + * @tmout: Interrupt aggregation timeout value + */ +static inline void +ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) +{ + ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | + INT_AGGR_COUNTER_THLD_VAL(cnt) | + INT_AGGR_TIMEOUT_VAL(tmout), + REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); +} + +/** + * ufshcd_disable_intr_aggr - Disables interrupt aggregation. + * @hba: per adapter instance + */ +static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) +{ + ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); +} + +/** + * ufshcd_enable_run_stop_reg - Enable run-stop registers, + * When run-stop registers are set to 1, it indicates the + * host controller that it can process the requests + * @hba: per adapter instance + */ +static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) +{ + ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, + REG_UTP_TASK_REQ_LIST_RUN_STOP); + ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, + REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); +} + +/** + * ufshcd_hba_start - Start controller initialization sequence + * @hba: per adapter instance + */ +static inline void ufshcd_hba_start(struct ufs_hba *hba) +{ + u32 val = CONTROLLER_ENABLE; + + if (ufshcd_crypto_enable(hba)) + val |= CRYPTO_GENERAL_ENABLE; + + ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE); +} + +/** + * ufshcd_is_hba_active - Get controller state + * @hba: per adapter instance + * + * Returns true if and only if the controller is active. + */ +static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE; +} + +u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) +{ + /* HCI version 1.0 and 1.1 supports UniPro 1.41 */ + if (hba->ufs_version <= ufshci_version(1, 1)) + return UFS_UNIPRO_VER_1_41; + else + return UFS_UNIPRO_VER_1_6; +} +EXPORT_SYMBOL(ufshcd_get_local_unipro_ver); + +static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) +{ + /* + * If both host and device support UniPro ver1.6 or later, PA layer + * parameters tuning happens during link startup itself. + * + * We can manually tune PA layer parameters if either host or device + * doesn't support UniPro ver 1.6 or later. But to keep manual tuning + * logic simple, we will only do manual tuning if local unipro version + * doesn't support ver1.6 or later. + */ + return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6; +} + +/** + * ufshcd_set_clk_freq - set UFS controller clock frequencies + * @hba: per adapter instance + * @scale_up: If True, set max possible frequency othewise set low frequency + * + * Returns 0 if successful + * Returns < 0 for any other errors + */ +static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up) +{ + int ret = 0; + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + + if (list_empty(head)) + goto out; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk)) { + if (scale_up && clki->max_freq) { + if (clki->curr_freq == clki->max_freq) + continue; + + ret = clk_set_rate(clki->clk, clki->max_freq); + if (ret) { + dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", + __func__, clki->name, + clki->max_freq, ret); + break; + } + trace_ufshcd_clk_scaling(dev_name(hba->dev), + "scaled up", clki->name, + clki->curr_freq, + clki->max_freq); + + clki->curr_freq = clki->max_freq; + + } else if (!scale_up && clki->min_freq) { + if (clki->curr_freq == clki->min_freq) + continue; + + ret = clk_set_rate(clki->clk, clki->min_freq); + if (ret) { + dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", + __func__, clki->name, + clki->min_freq, ret); + break; + } + trace_ufshcd_clk_scaling(dev_name(hba->dev), + "scaled down", clki->name, + clki->curr_freq, + clki->min_freq); + clki->curr_freq = clki->min_freq; + } + } + dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, + clki->name, clk_get_rate(clki->clk)); + } + +out: + return ret; +} + +/** + * ufshcd_scale_clks - scale up or scale down UFS controller clocks + * @hba: per adapter instance + * @scale_up: True if scaling up and false if scaling down + * + * Returns 0 if successful + * Returns < 0 for any other errors + */ +static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) +{ + int ret = 0; + ktime_t start = ktime_get(); + + ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); + if (ret) + goto out; + + ret = ufshcd_set_clk_freq(hba, scale_up); + if (ret) + goto out; + + ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); + if (ret) + ufshcd_set_clk_freq(hba, !scale_up); + +out: + trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), + (scale_up ? "up" : "down"), + ktime_to_us(ktime_sub(ktime_get(), start)), ret); + return ret; +} + +/** + * ufshcd_is_devfreq_scaling_required - check if scaling is required or not + * @hba: per adapter instance + * @scale_up: True if scaling up and false if scaling down + * + * Returns true if scaling is required, false otherwise. + */ +static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, + bool scale_up) +{ + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + + if (list_empty(head)) + return false; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk)) { + if (scale_up && clki->max_freq) { + if (clki->curr_freq == clki->max_freq) + continue; + return true; + } else if (!scale_up && clki->min_freq) { + if (clki->curr_freq == clki->min_freq) + continue; + return true; + } + } + } + + return false; +} + +/* + * Determine the number of pending commands by counting the bits in the SCSI + * device budget maps. This approach has been selected because a bit is set in + * the budget map before scsi_host_queue_ready() checks the host_self_blocked + * flag. The host_self_blocked flag can be modified by calling + * scsi_block_requests() or scsi_unblock_requests(). + */ +static u32 ufshcd_pending_cmds(struct ufs_hba *hba) +{ + const struct scsi_device *sdev; + u32 pending = 0; + + lockdep_assert_held(hba->host->host_lock); + __shost_for_each_device(sdev, hba->host) + pending += sbitmap_weight(&sdev->budget_map); + + return pending; +} + +static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, + u64 wait_timeout_us) +{ + unsigned long flags; + int ret = 0; + u32 tm_doorbell; + u32 tr_pending; + bool timeout = false, do_last_check = false; + ktime_t start; + + ufshcd_hold(hba, false); + spin_lock_irqsave(hba->host->host_lock, flags); + /* + * Wait for all the outstanding tasks/transfer requests. + * Verify by checking the doorbell registers are clear. + */ + start = ktime_get(); + do { + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { + ret = -EBUSY; + goto out; + } + + tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); + tr_pending = ufshcd_pending_cmds(hba); + if (!tm_doorbell && !tr_pending) { + timeout = false; + break; + } else if (do_last_check) { + break; + } + + spin_unlock_irqrestore(hba->host->host_lock, flags); + schedule(); + if (ktime_to_us(ktime_sub(ktime_get(), start)) > + wait_timeout_us) { + timeout = true; + /* + * We might have scheduled out for long time so make + * sure to check if doorbells are cleared by this time + * or not. + */ + do_last_check = true; + } + spin_lock_irqsave(hba->host->host_lock, flags); + } while (tm_doorbell || tr_pending); + + if (timeout) { + dev_err(hba->dev, + "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n", + __func__, tm_doorbell, tr_pending); + ret = -EBUSY; + } +out: + spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_release(hba); + return ret; +} + +/** + * ufshcd_scale_gear - scale up/down UFS gear + * @hba: per adapter instance + * @scale_up: True for scaling up gear and false for scaling down + * + * Returns 0 for success, + * Returns -EBUSY if scaling can't happen at this time + * Returns non-zero for any other errors + */ +static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) +{ + int ret = 0; + struct ufs_pa_layer_attr new_pwr_info; + + if (scale_up) { + memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info, + sizeof(struct ufs_pa_layer_attr)); + } else { + memcpy(&new_pwr_info, &hba->pwr_info, + sizeof(struct ufs_pa_layer_attr)); + + if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear || + hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) { + /* save the current power mode */ + memcpy(&hba->clk_scaling.saved_pwr_info.info, + &hba->pwr_info, + sizeof(struct ufs_pa_layer_attr)); + + /* scale down gear */ + new_pwr_info.gear_tx = hba->clk_scaling.min_gear; + new_pwr_info.gear_rx = hba->clk_scaling.min_gear; + } + } + + /* check if the power mode needs to be changed or not? */ + ret = ufshcd_config_pwr_mode(hba, &new_pwr_info); + if (ret) + dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", + __func__, ret, + hba->pwr_info.gear_tx, hba->pwr_info.gear_rx, + new_pwr_info.gear_tx, new_pwr_info.gear_rx); + + return ret; +} + +static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) +{ + #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */ + int ret = 0; + /* + * make sure that there are no outstanding requests when + * clock scaling is in progress + */ + ufshcd_scsi_block_requests(hba); + down_write(&hba->clk_scaling_lock); + + if (!hba->clk_scaling.is_allowed || + ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) { + ret = -EBUSY; + up_write(&hba->clk_scaling_lock); + ufshcd_scsi_unblock_requests(hba); + goto out; + } + + /* let's not get into low power until clock scaling is completed */ + ufshcd_hold(hba, false); + +out: + return ret; +} + +static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock) +{ + if (writelock) + up_write(&hba->clk_scaling_lock); + else + up_read(&hba->clk_scaling_lock); + ufshcd_scsi_unblock_requests(hba); + ufshcd_release(hba); +} + +/** + * ufshcd_devfreq_scale - scale up/down UFS clocks and gear + * @hba: per adapter instance + * @scale_up: True for scaling up and false for scalin down + * + * Returns 0 for success, + * Returns -EBUSY if scaling can't happen at this time + * Returns non-zero for any other errors + */ +static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) +{ + int ret = 0; + bool is_writelock = true; + + ret = ufshcd_clock_scaling_prepare(hba); + if (ret) + return ret; + + /* scale down the gear before scaling down clocks */ + if (!scale_up) { + ret = ufshcd_scale_gear(hba, false); + if (ret) + goto out_unprepare; + } + + ret = ufshcd_scale_clks(hba, scale_up); + if (ret) { + if (!scale_up) + ufshcd_scale_gear(hba, true); + goto out_unprepare; + } + + /* scale up the gear after scaling up clocks */ + if (scale_up) { + ret = ufshcd_scale_gear(hba, true); + if (ret) { + ufshcd_scale_clks(hba, false); + goto out_unprepare; + } + } + + /* Enable Write Booster if we have scaled up else disable it */ + downgrade_write(&hba->clk_scaling_lock); + is_writelock = false; + ufshcd_wb_toggle(hba, scale_up); + +out_unprepare: + ufshcd_clock_scaling_unprepare(hba, is_writelock); + return ret; +} + +static void ufshcd_clk_scaling_suspend_work(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(work, struct ufs_hba, + clk_scaling.suspend_work); + unsigned long irq_flags; + + spin_lock_irqsave(hba->host->host_lock, irq_flags); + if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + return; + } + hba->clk_scaling.is_suspended = true; + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + + __ufshcd_suspend_clkscaling(hba); +} + +static void ufshcd_clk_scaling_resume_work(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(work, struct ufs_hba, + clk_scaling.resume_work); + unsigned long irq_flags; + + spin_lock_irqsave(hba->host->host_lock, irq_flags); + if (!hba->clk_scaling.is_suspended) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + return; + } + hba->clk_scaling.is_suspended = false; + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + + devfreq_resume_device(hba->devfreq); +} + +static int ufshcd_devfreq_target(struct device *dev, + unsigned long *freq, u32 flags) +{ + int ret = 0; + struct ufs_hba *hba = dev_get_drvdata(dev); + ktime_t start; + bool scale_up, sched_clk_scaling_suspend_work = false; + struct list_head *clk_list = &hba->clk_list_head; + struct ufs_clk_info *clki; + unsigned long irq_flags; + + if (!ufshcd_is_clkscaling_supported(hba)) + return -EINVAL; + + clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list); + /* Override with the closest supported frequency */ + *freq = (unsigned long) clk_round_rate(clki->clk, *freq); + spin_lock_irqsave(hba->host->host_lock, irq_flags); + if (ufshcd_eh_in_progress(hba)) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + return 0; + } + + if (!hba->clk_scaling.active_reqs) + sched_clk_scaling_suspend_work = true; + + if (list_empty(clk_list)) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + goto out; + } + + /* Decide based on the rounded-off frequency and update */ + scale_up = *freq == clki->max_freq; + if (!scale_up) + *freq = clki->min_freq; + /* Update the frequency */ + if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + ret = 0; + goto out; /* no state change required */ + } + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + + start = ktime_get(); + ret = ufshcd_devfreq_scale(hba, scale_up); + + trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), + (scale_up ? "up" : "down"), + ktime_to_us(ktime_sub(ktime_get(), start)), ret); + +out: + if (sched_clk_scaling_suspend_work) + queue_work(hba->clk_scaling.workq, + &hba->clk_scaling.suspend_work); + + return ret; +} + +static int ufshcd_devfreq_get_dev_status(struct device *dev, + struct devfreq_dev_status *stat) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_clk_scaling *scaling = &hba->clk_scaling; + unsigned long flags; + struct list_head *clk_list = &hba->clk_list_head; + struct ufs_clk_info *clki; + ktime_t curr_t; + + if (!ufshcd_is_clkscaling_supported(hba)) + return -EINVAL; + + memset(stat, 0, sizeof(*stat)); + + spin_lock_irqsave(hba->host->host_lock, flags); + curr_t = ktime_get(); + if (!scaling->window_start_t) + goto start_window; + + clki = list_first_entry(clk_list, struct ufs_clk_info, list); + /* + * If current frequency is 0, then the ondemand governor considers + * there's no initial frequency set. And it always requests to set + * to max. frequency. + */ + stat->current_frequency = clki->curr_freq; + if (scaling->is_busy_started) + scaling->tot_busy_t += ktime_us_delta(curr_t, + scaling->busy_start_t); + + stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t); + stat->busy_time = scaling->tot_busy_t; +start_window: + scaling->window_start_t = curr_t; + scaling->tot_busy_t = 0; + + if (hba->outstanding_reqs) { + scaling->busy_start_t = curr_t; + scaling->is_busy_started = true; + } else { + scaling->busy_start_t = 0; + scaling->is_busy_started = false; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + return 0; +} + +static int ufshcd_devfreq_init(struct ufs_hba *hba) +{ + struct list_head *clk_list = &hba->clk_list_head; + struct ufs_clk_info *clki; + struct devfreq *devfreq; + int ret; + + /* Skip devfreq if we don't have any clocks in the list */ + if (list_empty(clk_list)) + return 0; + + clki = list_first_entry(clk_list, struct ufs_clk_info, list); + dev_pm_opp_add(hba->dev, clki->min_freq, 0); + dev_pm_opp_add(hba->dev, clki->max_freq, 0); + + ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile, + &hba->vps->ondemand_data); + devfreq = devfreq_add_device(hba->dev, + &hba->vps->devfreq_profile, + DEVFREQ_GOV_SIMPLE_ONDEMAND, + &hba->vps->ondemand_data); + if (IS_ERR(devfreq)) { + ret = PTR_ERR(devfreq); + dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); + + dev_pm_opp_remove(hba->dev, clki->min_freq); + dev_pm_opp_remove(hba->dev, clki->max_freq); + return ret; + } + + hba->devfreq = devfreq; + + return 0; +} + +static void ufshcd_devfreq_remove(struct ufs_hba *hba) +{ + struct list_head *clk_list = &hba->clk_list_head; + struct ufs_clk_info *clki; + + if (!hba->devfreq) + return; + + devfreq_remove_device(hba->devfreq); + hba->devfreq = NULL; + + clki = list_first_entry(clk_list, struct ufs_clk_info, list); + dev_pm_opp_remove(hba->dev, clki->min_freq); + dev_pm_opp_remove(hba->dev, clki->max_freq); +} + +static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba) +{ + unsigned long flags; + + devfreq_suspend_device(hba->devfreq); + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_scaling.window_start_t = 0; + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + +static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) +{ + unsigned long flags; + bool suspend = false; + + cancel_work_sync(&hba->clk_scaling.suspend_work); + cancel_work_sync(&hba->clk_scaling.resume_work); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (!hba->clk_scaling.is_suspended) { + suspend = true; + hba->clk_scaling.is_suspended = true; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (suspend) + __ufshcd_suspend_clkscaling(hba); +} + +static void ufshcd_resume_clkscaling(struct ufs_hba *hba) +{ + unsigned long flags; + bool resume = false; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->clk_scaling.is_suspended) { + resume = true; + hba->clk_scaling.is_suspended = false; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (resume) + devfreq_resume_device(hba->devfreq); +} + +static ssize_t ufshcd_clkscale_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled); +} + +static ssize_t ufshcd_clkscale_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int err = 0; + + if (kstrtou32(buf, 0, &value)) + return -EINVAL; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + err = -EBUSY; + goto out; + } + + value = !!value; + if (value == hba->clk_scaling.is_enabled) + goto out; + + ufshcd_rpm_get_sync(hba); + ufshcd_hold(hba, false); + + hba->clk_scaling.is_enabled = value; + + if (value) { + ufshcd_resume_clkscaling(hba); + } else { + ufshcd_suspend_clkscaling(hba); + err = ufshcd_devfreq_scale(hba, true); + if (err) + dev_err(hba->dev, "%s: failed to scale clocks up %d\n", + __func__, err); + } + + ufshcd_release(hba); + ufshcd_rpm_put_sync(hba); +out: + up(&hba->host_sem); + return err ? err : count; +} + +static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba) +{ + hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; + hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; + sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); + hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; + hba->clk_scaling.enable_attr.attr.mode = 0644; + if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr)) + dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); +} + +static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba) +{ + if (hba->clk_scaling.enable_attr.attr.name) + device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); +} + +static void ufshcd_init_clk_scaling(struct ufs_hba *hba) +{ + char wq_name[sizeof("ufs_clkscaling_00")]; + + if (!ufshcd_is_clkscaling_supported(hba)) + return; + + if (!hba->clk_scaling.min_gear) + hba->clk_scaling.min_gear = UFS_HS_G1; + + INIT_WORK(&hba->clk_scaling.suspend_work, + ufshcd_clk_scaling_suspend_work); + INIT_WORK(&hba->clk_scaling.resume_work, + ufshcd_clk_scaling_resume_work); + + snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d", + hba->host->host_no); + hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); + + hba->clk_scaling.is_initialized = true; +} + +static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) +{ + if (!hba->clk_scaling.is_initialized) + return; + + ufshcd_remove_clk_scaling_sysfs(hba); + destroy_workqueue(hba->clk_scaling.workq); + ufshcd_devfreq_remove(hba); + hba->clk_scaling.is_initialized = false; +} + +static void ufshcd_ungate_work(struct work_struct *work) +{ + int ret; + unsigned long flags; + struct ufs_hba *hba = container_of(work, struct ufs_hba, + clk_gating.ungate_work); + + cancel_delayed_work_sync(&hba->clk_gating.gate_work); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->clk_gating.state == CLKS_ON) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + goto unblock_reqs; + } + + spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_hba_vreg_set_hpm(hba); + ufshcd_setup_clocks(hba, true); + + ufshcd_enable_irq(hba); + + /* Exit from hibern8 */ + if (ufshcd_can_hibern8_during_gating(hba)) { + /* Prevent gating in this path */ + hba->clk_gating.is_suspended = true; + if (ufshcd_is_link_hibern8(hba)) { + ret = ufshcd_uic_hibern8_exit(hba); + if (ret) + dev_err(hba->dev, "%s: hibern8 exit failed %d\n", + __func__, ret); + else + ufshcd_set_link_active(hba); + } + hba->clk_gating.is_suspended = false; + } +unblock_reqs: + ufshcd_scsi_unblock_requests(hba); +} + +/** + * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release. + * Also, exit from hibern8 mode and set the link as active. + * @hba: per adapter instance + * @async: This indicates whether caller should ungate clocks asynchronously. + */ +int ufshcd_hold(struct ufs_hba *hba, bool async) +{ + int rc = 0; + bool flush_result; + unsigned long flags; + + if (!ufshcd_is_clkgating_allowed(hba) || + !hba->clk_gating.is_initialized) + goto out; + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.active_reqs++; + +start: + switch (hba->clk_gating.state) { + case CLKS_ON: + /* + * Wait for the ungate work to complete if in progress. + * Though the clocks may be in ON state, the link could + * still be in hibner8 state if hibern8 is allowed + * during clock gating. + * Make sure we exit hibern8 state also in addition to + * clocks being ON. + */ + if (ufshcd_can_hibern8_during_gating(hba) && + ufshcd_is_link_hibern8(hba)) { + if (async) { + rc = -EAGAIN; + hba->clk_gating.active_reqs--; + break; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + flush_result = flush_work(&hba->clk_gating.ungate_work); + if (hba->clk_gating.is_suspended && !flush_result) + goto out; + spin_lock_irqsave(hba->host->host_lock, flags); + goto start; + } + break; + case REQ_CLKS_OFF: + if (cancel_delayed_work(&hba->clk_gating.gate_work)) { + hba->clk_gating.state = CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + break; + } + /* + * If we are here, it means gating work is either done or + * currently running. Hence, fall through to cancel gating + * work and to enable clocks. + */ + fallthrough; + case CLKS_OFF: + hba->clk_gating.state = REQ_CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + if (queue_work(hba->clk_gating.clk_gating_workq, + &hba->clk_gating.ungate_work)) + ufshcd_scsi_block_requests(hba); + /* + * fall through to check if we should wait for this + * work to be done or not. + */ + fallthrough; + case REQ_CLKS_ON: + if (async) { + rc = -EAGAIN; + hba->clk_gating.active_reqs--; + break; + } + + spin_unlock_irqrestore(hba->host->host_lock, flags); + flush_work(&hba->clk_gating.ungate_work); + /* Make sure state is CLKS_ON before returning */ + spin_lock_irqsave(hba->host->host_lock, flags); + goto start; + default: + dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", + __func__, hba->clk_gating.state); + break; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); +out: + return rc; +} +EXPORT_SYMBOL_GPL(ufshcd_hold); + +static void ufshcd_gate_work(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(work, struct ufs_hba, + clk_gating.gate_work.work); + unsigned long flags; + int ret; + + spin_lock_irqsave(hba->host->host_lock, flags); + /* + * In case you are here to cancel this work the gating state + * would be marked as REQ_CLKS_ON. In this case save time by + * skipping the gating work and exit after changing the clock + * state to CLKS_ON. + */ + if (hba->clk_gating.is_suspended || + (hba->clk_gating.state != REQ_CLKS_OFF)) { + hba->clk_gating.state = CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + goto rel_lock; + } + + if (hba->clk_gating.active_reqs + || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL + || hba->outstanding_reqs || hba->outstanding_tasks + || hba->active_uic_cmd || hba->uic_async_done) + goto rel_lock; + + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* put the link into hibern8 mode before turning off clocks */ + if (ufshcd_can_hibern8_during_gating(hba)) { + ret = ufshcd_uic_hibern8_enter(hba); + if (ret) { + hba->clk_gating.state = CLKS_ON; + dev_err(hba->dev, "%s: hibern8 enter failed %d\n", + __func__, ret); + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + goto out; + } + ufshcd_set_link_hibern8(hba); + } + + ufshcd_disable_irq(hba); + + ufshcd_setup_clocks(hba, false); + + /* Put the host controller in low power mode if possible */ + ufshcd_hba_vreg_set_lpm(hba); + /* + * In case you are here to cancel this work the gating state + * would be marked as REQ_CLKS_ON. In this case keep the state + * as REQ_CLKS_ON which would anyway imply that clocks are off + * and a request to turn them on is pending. By doing this way, + * we keep the state machine in tact and this would ultimately + * prevent from doing cancel work multiple times when there are + * new requests arriving before the current cancel work is done. + */ + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->clk_gating.state == REQ_CLKS_OFF) { + hba->clk_gating.state = CLKS_OFF; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + } +rel_lock: + spin_unlock_irqrestore(hba->host->host_lock, flags); +out: + return; +} + +/* host lock must be held before calling this variant */ +static void __ufshcd_release(struct ufs_hba *hba) +{ + if (!ufshcd_is_clkgating_allowed(hba)) + return; + + hba->clk_gating.active_reqs--; + + if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || + hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || + hba->outstanding_tasks || !hba->clk_gating.is_initialized || + hba->active_uic_cmd || hba->uic_async_done || + hba->clk_gating.state == CLKS_OFF) + return; + + hba->clk_gating.state = REQ_CLKS_OFF; + trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); + queue_delayed_work(hba->clk_gating.clk_gating_workq, + &hba->clk_gating.gate_work, + msecs_to_jiffies(hba->clk_gating.delay_ms)); +} + +void ufshcd_release(struct ufs_hba *hba) +{ + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + __ufshcd_release(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); +} +EXPORT_SYMBOL_GPL(ufshcd_release); + +static ssize_t ufshcd_clkgate_delay_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms); +} + +void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.delay_ms = value; + spin_unlock_irqrestore(hba->host->host_lock, flags); +} +EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set); + +static ssize_t ufshcd_clkgate_delay_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long value; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + ufshcd_clkgate_delay_set(dev, value); + return count; +} + +static ssize_t ufshcd_clkgate_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled); +} + +static ssize_t ufshcd_clkgate_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned long flags; + u32 value; + + if (kstrtou32(buf, 0, &value)) + return -EINVAL; + + value = !!value; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (value == hba->clk_gating.is_enabled) + goto out; + + if (value) + __ufshcd_release(hba); + else + hba->clk_gating.active_reqs++; + + hba->clk_gating.is_enabled = value; +out: + spin_unlock_irqrestore(hba->host->host_lock, flags); + return count; +} + +static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba) +{ + hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; + hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; + sysfs_attr_init(&hba->clk_gating.delay_attr.attr); + hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; + hba->clk_gating.delay_attr.attr.mode = 0644; + if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) + dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); + + hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show; + hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store; + sysfs_attr_init(&hba->clk_gating.enable_attr.attr); + hba->clk_gating.enable_attr.attr.name = "clkgate_enable"; + hba->clk_gating.enable_attr.attr.mode = 0644; + if (device_create_file(hba->dev, &hba->clk_gating.enable_attr)) + dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n"); +} + +static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba) +{ + if (hba->clk_gating.delay_attr.attr.name) + device_remove_file(hba->dev, &hba->clk_gating.delay_attr); + if (hba->clk_gating.enable_attr.attr.name) + device_remove_file(hba->dev, &hba->clk_gating.enable_attr); +} + +static void ufshcd_init_clk_gating(struct ufs_hba *hba) +{ + char wq_name[sizeof("ufs_clk_gating_00")]; + + if (!ufshcd_is_clkgating_allowed(hba)) + return; + + hba->clk_gating.state = CLKS_ON; + + hba->clk_gating.delay_ms = 150; + INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); + INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); + + snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d", + hba->host->host_no); + hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name, + WQ_MEM_RECLAIM | WQ_HIGHPRI); + + ufshcd_init_clk_gating_sysfs(hba); + + hba->clk_gating.is_enabled = true; + hba->clk_gating.is_initialized = true; +} + +static void ufshcd_exit_clk_gating(struct ufs_hba *hba) +{ + if (!hba->clk_gating.is_initialized) + return; + + ufshcd_remove_clk_gating_sysfs(hba); + + /* Ungate the clock if necessary. */ + ufshcd_hold(hba, false); + hba->clk_gating.is_initialized = false; + ufshcd_release(hba); + + destroy_workqueue(hba->clk_gating.clk_gating_workq); +} + +/* Must be called with host lock acquired */ +static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) +{ + bool queue_resume_work = false; + ktime_t curr_t = ktime_get(); + unsigned long flags; + + if (!ufshcd_is_clkscaling_supported(hba)) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (!hba->clk_scaling.active_reqs++) + queue_resume_work = true; + + if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + return; + } + + if (queue_resume_work) + queue_work(hba->clk_scaling.workq, + &hba->clk_scaling.resume_work); + + if (!hba->clk_scaling.window_start_t) { + hba->clk_scaling.window_start_t = curr_t; + hba->clk_scaling.tot_busy_t = 0; + hba->clk_scaling.is_busy_started = false; + } + + if (!hba->clk_scaling.is_busy_started) { + hba->clk_scaling.busy_start_t = curr_t; + hba->clk_scaling.is_busy_started = true; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + +static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) +{ + struct ufs_clk_scaling *scaling = &hba->clk_scaling; + unsigned long flags; + + if (!ufshcd_is_clkscaling_supported(hba)) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_scaling.active_reqs--; + if (!hba->outstanding_reqs && scaling->is_busy_started) { + scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), + scaling->busy_start_t)); + scaling->busy_start_t = 0; + scaling->is_busy_started = false; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + +static inline int ufshcd_monitor_opcode2dir(u8 opcode) +{ + if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16) + return READ; + else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16) + return WRITE; + else + return -EINVAL; +} + +static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) +{ + const struct ufs_hba_monitor *m = &hba->monitor; + + return (m->enabled && lrbp && lrbp->cmd && + (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) && + ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp)); +} + +static void ufshcd_start_monitor(struct ufs_hba *hba, + const struct ufshcd_lrb *lrbp) +{ + int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0) + hba->monitor.busy_start_ts[dir] = ktime_get(); + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + +static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp) +{ + int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) { + const struct request *req = scsi_cmd_to_rq(lrbp->cmd); + struct ufs_hba_monitor *m = &hba->monitor; + ktime_t now, inc, lat; + + now = lrbp->compl_time_stamp; + inc = ktime_sub(now, m->busy_start_ts[dir]); + m->total_busy[dir] = ktime_add(m->total_busy[dir], inc); + m->nr_sec_rw[dir] += blk_rq_sectors(req); + + /* Update latencies */ + m->nr_req[dir]++; + lat = ktime_sub(now, lrbp->issue_time_stamp); + m->lat_sum[dir] += lat; + if (m->lat_max[dir] < lat || !m->lat_max[dir]) + m->lat_max[dir] = lat; + if (m->lat_min[dir] > lat || !m->lat_min[dir]) + m->lat_min[dir] = lat; + + m->nr_queued[dir]--; + /* Push forward the busy start of monitor */ + m->busy_start_ts[dir] = now; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + +/** + * ufshcd_send_command - Send SCSI or device management commands + * @hba: per adapter instance + * @task_tag: Task tag of the command + */ +static inline +void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) +{ + struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; + unsigned long flags; + + lrbp->issue_time_stamp = ktime_get(); + lrbp->compl_time_stamp = ktime_set(0, 0); + ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND); + ufshcd_clk_scaling_start_busy(hba); + if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) + ufshcd_start_monitor(hba, lrbp); + + spin_lock_irqsave(&hba->outstanding_lock, flags); + if (hba->vops && hba->vops->setup_xfer_req) + hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd); + __set_bit(task_tag, &hba->outstanding_reqs); + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); + spin_unlock_irqrestore(&hba->outstanding_lock, flags); +} + +/** + * ufshcd_copy_sense_data - Copy sense data in case of check condition + * @lrbp: pointer to local reference block + */ +static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) +{ + u8 *const sense_buffer = lrbp->cmd->sense_buffer; + int len; + + if (sense_buffer && + ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { + int len_to_copy; + + len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); + len_to_copy = min_t(int, UFS_SENSE_SIZE, len); + + memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data, + len_to_copy); + } +} + +/** + * ufshcd_copy_query_response() - Copy the Query Response and the data + * descriptor + * @hba: per adapter instance + * @lrbp: pointer to local reference block + */ +static +int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufs_query_res *query_res = &hba->dev_cmd.query.response; + + memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); + + /* Get the descriptor */ + if (hba->dev_cmd.query.descriptor && + lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { + u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + + GENERAL_UPIU_REQUEST_SIZE; + u16 resp_len; + u16 buf_len; + + /* data segment length */ + resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & + MASK_QUERY_DATA_SEG_LEN; + buf_len = be16_to_cpu( + hba->dev_cmd.query.request.upiu_req.length); + if (likely(buf_len >= resp_len)) { + memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); + } else { + dev_warn(hba->dev, + "%s: rsp size %d is bigger than buffer size %d", + __func__, resp_len, buf_len); + return -EINVAL; + } + } + + return 0; +} + +/** + * ufshcd_hba_capabilities - Read controller capabilities + * @hba: per adapter instance + * + * Return: 0 on success, negative on error. + */ +static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) +{ + int err; + + hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); + if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) + hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; + + /* nutrs and nutmrs are 0 based values */ + hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; + hba->nutmrs = + ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; + hba->reserved_slot = hba->nutrs - 1; + + /* Read crypto capabilities */ + err = ufshcd_hba_init_crypto_capabilities(hba); + if (err) + dev_err(hba->dev, "crypto setup failed\n"); + + return err; +} + +/** + * ufshcd_ready_for_uic_cmd - Check if controller is ready + * to accept UIC commands + * @hba: per adapter instance + * Return true on success, else false + */ +static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY; +} + +/** + * ufshcd_get_upmcrs - Get the power mode change request status + * @hba: Pointer to adapter instance + * + * This function gets the UPMCRS field of HCS register + * Returns value of UPMCRS field + */ +static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) +{ + return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; +} + +/** + * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer + * @hba: per adapter instance + * @uic_cmd: UIC command + */ +static inline void +ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) +{ + lockdep_assert_held(&hba->uic_cmd_mutex); + + WARN_ON(hba->active_uic_cmd); + + hba->active_uic_cmd = uic_cmd; + + /* Write Args */ + ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); + ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); + ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); + + ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND); + + /* Write UIC Cmd */ + ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, + REG_UIC_COMMAND); +} + +/** + * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command + * @hba: per adapter instance + * @uic_cmd: UIC command + * + * Returns 0 only if success. + */ +static int +ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) +{ + int ret; + unsigned long flags; + + lockdep_assert_held(&hba->uic_cmd_mutex); + + if (wait_for_completion_timeout(&uic_cmd->done, + msecs_to_jiffies(UIC_CMD_TIMEOUT))) { + ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; + } else { + ret = -ETIMEDOUT; + dev_err(hba->dev, + "uic cmd 0x%x with arg3 0x%x completion timeout\n", + uic_cmd->command, uic_cmd->argument3); + + if (!uic_cmd->cmd_active) { + dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n", + __func__); + ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; + } + } + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->active_uic_cmd = NULL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return ret; +} + +/** + * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result + * @hba: per adapter instance + * @uic_cmd: UIC command + * @completion: initialize the completion only if this is set to true + * + * Returns 0 only if success. + */ +static int +__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, + bool completion) +{ + lockdep_assert_held(&hba->uic_cmd_mutex); + lockdep_assert_held(hba->host->host_lock); + + if (!ufshcd_ready_for_uic_cmd(hba)) { + dev_err(hba->dev, + "Controller not ready to accept UIC commands\n"); + return -EIO; + } + + if (completion) + init_completion(&uic_cmd->done); + + uic_cmd->cmd_active = 1; + ufshcd_dispatch_uic_cmd(hba, uic_cmd); + + return 0; +} + +/** + * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result + * @hba: per adapter instance + * @uic_cmd: UIC command + * + * Returns 0 only if success. + */ +int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) +{ + int ret; + unsigned long flags; + + if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) + return 0; + + ufshcd_hold(hba, false); + mutex_lock(&hba->uic_cmd_mutex); + ufshcd_add_delay_before_dme_cmd(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (!ret) + ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); + + mutex_unlock(&hba->uic_cmd_mutex); + + ufshcd_release(hba); + return ret; +} + +/** + * ufshcd_map_sg - Map scatter-gather list to prdt + * @hba: per adapter instance + * @lrbp: pointer to local reference block + * + * Returns 0 in case of success, non-zero value in case of failure + */ +static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufshcd_sg_entry *prd_table; + struct scatterlist *sg; + struct scsi_cmnd *cmd; + int sg_segments; + int i; + + cmd = lrbp->cmd; + sg_segments = scsi_dma_map(cmd); + if (sg_segments < 0) + return sg_segments; + + if (sg_segments) { + + if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) + lrbp->utr_descriptor_ptr->prd_table_length = + cpu_to_le16((sg_segments * + sizeof(struct ufshcd_sg_entry))); + else + lrbp->utr_descriptor_ptr->prd_table_length = + cpu_to_le16(sg_segments); + + prd_table = lrbp->ucd_prdt_ptr; + + scsi_for_each_sg(cmd, sg, sg_segments, i) { + const unsigned int len = sg_dma_len(sg); + + /* + * From the UFSHCI spec: "Data Byte Count (DBC): A '0' + * based value that indicates the length, in bytes, of + * the data block. A maximum of length of 256KB may + * exist for any entry. Bits 1:0 of this field shall be + * 11b to indicate Dword granularity. A value of '3' + * indicates 4 bytes, '7' indicates 8 bytes, etc." + */ + WARN_ONCE(len > 256 * 1024, "len = %#x\n", len); + prd_table[i].size = cpu_to_le32(len - 1); + prd_table[i].addr = cpu_to_le64(sg->dma_address); + prd_table[i].reserved = 0; + } + } else { + lrbp->utr_descriptor_ptr->prd_table_length = 0; + } + + return 0; +} + +/** + * ufshcd_enable_intr - enable interrupts + * @hba: per adapter instance + * @intrs: interrupt bits + */ +static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) +{ + u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + + if (hba->ufs_version == ufshci_version(1, 0)) { + u32 rw; + rw = set & INTERRUPT_MASK_RW_VER_10; + set = rw | ((set ^ intrs) & intrs); + } else { + set |= intrs; + } + + ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); +} + +/** + * ufshcd_disable_intr - disable interrupts + * @hba: per adapter instance + * @intrs: interrupt bits + */ +static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) +{ + u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + + if (hba->ufs_version == ufshci_version(1, 0)) { + u32 rw; + rw = (set & INTERRUPT_MASK_RW_VER_10) & + ~(intrs & INTERRUPT_MASK_RW_VER_10); + set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10); + + } else { + set &= ~intrs; + } + + ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); +} + +/** + * ufshcd_prepare_req_desc_hdr() - Fills the requests header + * descriptor according to request + * @lrbp: pointer to local reference block + * @upiu_flags: flags required in the header + * @cmd_dir: requests data direction + */ +static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, + u8 *upiu_flags, enum dma_data_direction cmd_dir) +{ + struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; + u32 data_direction; + u32 dword_0; + u32 dword_1 = 0; + u32 dword_3 = 0; + + if (cmd_dir == DMA_FROM_DEVICE) { + data_direction = UTP_DEVICE_TO_HOST; + *upiu_flags = UPIU_CMD_FLAGS_READ; + } else if (cmd_dir == DMA_TO_DEVICE) { + data_direction = UTP_HOST_TO_DEVICE; + *upiu_flags = UPIU_CMD_FLAGS_WRITE; + } else { + data_direction = UTP_NO_DATA_TRANSFER; + *upiu_flags = UPIU_CMD_FLAGS_NONE; + } + + dword_0 = data_direction | (lrbp->command_type + << UPIU_COMMAND_TYPE_OFFSET); + if (lrbp->intr_cmd) + dword_0 |= UTP_REQ_DESC_INT_CMD; + + /* Prepare crypto related dwords */ + ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3); + + /* Transfer request descriptor header fields */ + req_desc->header.dword_0 = cpu_to_le32(dword_0); + req_desc->header.dword_1 = cpu_to_le32(dword_1); + /* + * assigning invalid value for command status. Controller + * updates OCS on command completion, with the command + * status + */ + req_desc->header.dword_2 = + cpu_to_le32(OCS_INVALID_COMMAND_STATUS); + req_desc->header.dword_3 = cpu_to_le32(dword_3); + + req_desc->prd_table_length = 0; +} + +/** + * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc, + * for scsi commands + * @lrbp: local reference block pointer + * @upiu_flags: flags + */ +static +void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags) +{ + struct scsi_cmnd *cmd = lrbp->cmd; + struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; + unsigned short cdb_len; + + /* command descriptor fields */ + ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( + UPIU_TRANSACTION_COMMAND, upiu_flags, + lrbp->lun, lrbp->task_tag); + ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( + UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); + + /* Total EHS length and Data segment length will be zero */ + ucd_req_ptr->header.dword_2 = 0; + + ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length); + + cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE); + memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); + memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len); + + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); +} + +/** + * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc, + * for query requsts + * @hba: UFS hba + * @lrbp: local reference block pointer + * @upiu_flags: flags + */ +static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, u8 upiu_flags) +{ + struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; + struct ufs_query *query = &hba->dev_cmd.query; + u16 len = be16_to_cpu(query->request.upiu_req.length); + + /* Query request header */ + ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( + UPIU_TRANSACTION_QUERY_REQ, upiu_flags, + lrbp->lun, lrbp->task_tag); + ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( + 0, query->request.query_func, 0, 0); + + /* Data segment length only need for WRITE_DESC */ + if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) + ucd_req_ptr->header.dword_2 = + UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len); + else + ucd_req_ptr->header.dword_2 = 0; + + /* Copy the Query Request buffer as is */ + memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, + QUERY_OSF_SIZE); + + /* Copy the Descriptor */ + if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) + memcpy(ucd_req_ptr + 1, query->descriptor, len); + + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); +} + +static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) +{ + struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; + + memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); + + /* command descriptor fields */ + ucd_req_ptr->header.dword_0 = + UPIU_HEADER_DWORD( + UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag); + /* clear rest of the fields of basic header */ + ucd_req_ptr->header.dword_1 = 0; + ucd_req_ptr->header.dword_2 = 0; + + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); +} + +/** + * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU) + * for Device Management Purposes + * @hba: per adapter instance + * @lrbp: pointer to local reference block + */ +static int ufshcd_compose_devman_upiu(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) +{ + u8 upiu_flags; + int ret = 0; + + if (hba->ufs_version <= ufshci_version(1, 1)) + lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; + else + lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; + + ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); + if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) + ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags); + else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) + ufshcd_prepare_utp_nop_upiu(lrbp); + else + ret = -EINVAL; + + return ret; +} + +/** + * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU) + * for SCSI Purposes + * @hba: per adapter instance + * @lrbp: pointer to local reference block + */ +static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + u8 upiu_flags; + int ret = 0; + + if (hba->ufs_version <= ufshci_version(1, 1)) + lrbp->command_type = UTP_CMD_TYPE_SCSI; + else + lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; + + if (likely(lrbp->cmd)) { + ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, + lrbp->cmd->sc_data_direction); + ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); + } else { + ret = -EINVAL; + } + + return ret; +} + +/** + * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID + * @upiu_wlun_id: UPIU W-LUN id + * + * Returns SCSI W-LUN id + */ +static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id) +{ + return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE; +} + +static inline bool is_device_wlun(struct scsi_device *sdev) +{ + return sdev->lun == + ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN); +} + +/* + * Associate the UFS controller queue with the default and poll HCTX types. + * Initialize the mq_map[] arrays. + */ +static int ufshcd_map_queues(struct Scsi_Host *shost) +{ + int i, ret; + + for (i = 0; i < shost->nr_maps; i++) { + struct blk_mq_queue_map *map = &shost->tag_set.map[i]; + + switch (i) { + case HCTX_TYPE_DEFAULT: + case HCTX_TYPE_POLL: + map->nr_queues = 1; + break; + case HCTX_TYPE_READ: + map->nr_queues = 0; + continue; + default: + WARN_ON_ONCE(true); + } + map->queue_offset = 0; + ret = blk_mq_map_queues(map); + WARN_ON_ONCE(ret); + } + + return 0; +} + +static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) +{ + struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr; + struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; + dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + + i * sizeof(struct utp_transfer_cmd_desc); + u16 response_offset = offsetof(struct utp_transfer_cmd_desc, + response_upiu); + u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); + + lrb->utr_descriptor_ptr = utrdlp + i; + lrb->utrd_dma_addr = hba->utrdl_dma_addr + + i * sizeof(struct utp_transfer_req_desc); + lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i); + lrb->ucd_req_dma_addr = cmd_desc_element_addr; + lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; + lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset; + lrb->ucd_prdt_ptr = cmd_descp[i].prd_table; + lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset; +} + +/** + * ufshcd_queuecommand - main entry point for SCSI requests + * @host: SCSI host pointer + * @cmd: command from SCSI Midlayer + * + * Returns 0 for success, non-zero in case of failure + */ +static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) +{ + struct ufs_hba *hba = shost_priv(host); + int tag = scsi_cmd_to_rq(cmd)->tag; + struct ufshcd_lrb *lrbp; + int err = 0; + + WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag); + + /* + * Allows the UFS error handler to wait for prior ufshcd_queuecommand() + * calls. + */ + rcu_read_lock(); + + switch (hba->ufshcd_state) { + case UFSHCD_STATE_OPERATIONAL: + break; + case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: + /* + * SCSI error handler can call ->queuecommand() while UFS error + * handler is in progress. Error interrupts could change the + * state from UFSHCD_STATE_RESET to + * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests + * being issued in that case. + */ + if (ufshcd_eh_in_progress(hba)) { + err = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + break; + case UFSHCD_STATE_EH_SCHEDULED_FATAL: + /* + * pm_runtime_get_sync() is used at error handling preparation + * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's + * PM ops, it can never be finished if we let SCSI layer keep + * retrying it, which gets err handler stuck forever. Neither + * can we let the scsi cmd pass through, because UFS is in bad + * state, the scsi cmd may eventually time out, which will get + * err handler blocked for too long. So, just fail the scsi cmd + * sent from PM ops, err handler can recover PM error anyways. + */ + if (hba->pm_op_in_progress) { + hba->force_reset = true; + set_host_byte(cmd, DID_BAD_TARGET); + scsi_done(cmd); + goto out; + } + fallthrough; + case UFSHCD_STATE_RESET: + err = SCSI_MLQUEUE_HOST_BUSY; + goto out; + case UFSHCD_STATE_ERROR: + set_host_byte(cmd, DID_ERROR); + scsi_done(cmd); + goto out; + } + + hba->req_abort_count = 0; + + err = ufshcd_hold(hba, true); + if (err) { + err = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + WARN_ON(ufshcd_is_clkgating_allowed(hba) && + (hba->clk_gating.state != CLKS_ON)); + + lrbp = &hba->lrb[tag]; + WARN_ON(lrbp->cmd); + lrbp->cmd = cmd; + lrbp->task_tag = tag; + lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); + lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); + + ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp); + + lrbp->req_abort_skip = false; + + ufshpb_prep(hba, lrbp); + + ufshcd_comp_scsi_upiu(hba, lrbp); + + err = ufshcd_map_sg(hba, lrbp); + if (err) { + lrbp->cmd = NULL; + ufshcd_release(hba); + goto out; + } + + ufshcd_send_command(hba, tag); + +out: + rcu_read_unlock(); + + if (ufs_trigger_eh()) { + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_schedule_eh_work(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + } + + return err; +} + +static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) +{ + lrbp->cmd = NULL; + lrbp->task_tag = tag; + lrbp->lun = 0; /* device management cmd is not specific to any LUN */ + lrbp->intr_cmd = true; /* No interrupt aggregation */ + ufshcd_prepare_lrbp_crypto(NULL, lrbp); + hba->dev_cmd.type = cmd_type; + + return ufshcd_compose_devman_upiu(hba, lrbp); +} + +/* + * Clear all the requests from the controller for which a bit has been set in + * @mask and wait until the controller confirms that these requests have been + * cleared. + */ +static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask) +{ + unsigned long flags; + + /* clear outstanding transaction before retry */ + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_utrl_clear(hba, mask); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* + * wait for h/w to clear corresponding bit in door-bell. + * max. wait is 1 sec. + */ + return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL, + mask, ~mask, 1000, 1000); +} + +static int +ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufs_query_res *query_res = &hba->dev_cmd.query.response; + + /* Get the UPIU response */ + query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >> + UPIU_RSP_CODE_OFFSET; + return query_res->response; +} + +/** + * ufshcd_dev_cmd_completion() - handles device management command responses + * @hba: per adapter instance + * @lrbp: pointer to local reference block + */ +static int +ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + int resp; + int err = 0; + + hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); + resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); + + switch (resp) { + case UPIU_TRANSACTION_NOP_IN: + if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { + err = -EINVAL; + dev_err(hba->dev, "%s: unexpected response %x\n", + __func__, resp); + } + break; + case UPIU_TRANSACTION_QUERY_RSP: + err = ufshcd_check_query_response(hba, lrbp); + if (!err) + err = ufshcd_copy_query_response(hba, lrbp); + break; + case UPIU_TRANSACTION_REJECT_UPIU: + /* TODO: handle Reject UPIU Response */ + err = -EPERM; + dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", + __func__); + break; + default: + err = -EINVAL; + dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", + __func__, resp); + break; + } + + return err; +} + +static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, int max_timeout) +{ + unsigned long time_left = msecs_to_jiffies(max_timeout); + unsigned long flags; + bool pending; + int err; + +retry: + time_left = wait_for_completion_timeout(hba->dev_cmd.complete, + time_left); + + if (likely(time_left)) { + /* + * The completion handler called complete() and the caller of + * this function still owns the @lrbp tag so the code below does + * not trigger any race conditions. + */ + hba->dev_cmd.complete = NULL; + err = ufshcd_get_tr_ocs(lrbp); + if (!err) + err = ufshcd_dev_cmd_completion(hba, lrbp); + } else { + err = -ETIMEDOUT; + dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", + __func__, lrbp->task_tag); + if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) { + /* successfully cleared the command, retry if needed */ + err = -EAGAIN; + /* + * Since clearing the command succeeded we also need to + * clear the task tag bit from the outstanding_reqs + * variable. + */ + spin_lock_irqsave(&hba->outstanding_lock, flags); + pending = test_bit(lrbp->task_tag, + &hba->outstanding_reqs); + if (pending) { + hba->dev_cmd.complete = NULL; + __clear_bit(lrbp->task_tag, + &hba->outstanding_reqs); + } + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + if (!pending) { + /* + * The completion handler ran while we tried to + * clear the command. + */ + time_left = 1; + goto retry; + } + } else { + dev_err(hba->dev, "%s: failed to clear tag %d\n", + __func__, lrbp->task_tag); + } + } + + return err; +} + +/** + * ufshcd_exec_dev_cmd - API for sending device management requests + * @hba: UFS hba + * @cmd_type: specifies the type (NOP, Query...) + * @timeout: timeout in milliseconds + * + * NOTE: Since there is only one available tag for device management commands, + * it is expected you hold the hba->dev_cmd.lock mutex. + */ +static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, + enum dev_cmd_type cmd_type, int timeout) +{ + DECLARE_COMPLETION_ONSTACK(wait); + const u32 tag = hba->reserved_slot; + struct ufshcd_lrb *lrbp; + int err; + + /* Protects use of hba->reserved_slot. */ + lockdep_assert_held(&hba->dev_cmd.lock); + + down_read(&hba->clk_scaling_lock); + + lrbp = &hba->lrb[tag]; + WARN_ON(lrbp->cmd); + err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); + if (unlikely(err)) + goto out; + + hba->dev_cmd.complete = &wait; + + ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); + + ufshcd_send_command(hba, tag); + err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); + ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, + (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); + +out: + up_read(&hba->clk_scaling_lock); + return err; +} + +/** + * ufshcd_init_query() - init the query response and request parameters + * @hba: per-adapter instance + * @request: address of the request pointer to be initialized + * @response: address of the response pointer to be initialized + * @opcode: operation to perform + * @idn: flag idn to access + * @index: LU number to access + * @selector: query/flag/descriptor further identification + */ +static inline void ufshcd_init_query(struct ufs_hba *hba, + struct ufs_query_req **request, struct ufs_query_res **response, + enum query_opcode opcode, u8 idn, u8 index, u8 selector) +{ + *request = &hba->dev_cmd.query.request; + *response = &hba->dev_cmd.query.response; + memset(*request, 0, sizeof(struct ufs_query_req)); + memset(*response, 0, sizeof(struct ufs_query_res)); + (*request)->upiu_req.opcode = opcode; + (*request)->upiu_req.idn = idn; + (*request)->upiu_req.index = index; + (*request)->upiu_req.selector = selector; +} + +static int ufshcd_query_flag_retry(struct ufs_hba *hba, + enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res) +{ + int ret; + int retries; + + for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { + ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res); + if (ret) + dev_dbg(hba->dev, + "%s: failed with error %d, retries %d\n", + __func__, ret, retries); + else + break; + } + + if (ret) + dev_err(hba->dev, + "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retries\n", + __func__, opcode, idn, ret, retries); + return ret; +} + +/** + * ufshcd_query_flag() - API function for sending flag query requests + * @hba: per-adapter instance + * @opcode: flag query to perform + * @idn: flag idn to access + * @index: flag index to access + * @flag_res: the flag value after the query request completes + * + * Returns 0 for success, non-zero in case of failure + */ +int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, + enum flag_idn idn, u8 index, bool *flag_res) +{ + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + int err, selector = 0; + int timeout = QUERY_REQ_TIMEOUT; + + BUG_ON(!hba); + + ufshcd_hold(hba, false); + mutex_lock(&hba->dev_cmd.lock); + ufshcd_init_query(hba, &request, &response, opcode, idn, index, + selector); + + switch (opcode) { + case UPIU_QUERY_OPCODE_SET_FLAG: + case UPIU_QUERY_OPCODE_CLEAR_FLAG: + case UPIU_QUERY_OPCODE_TOGGLE_FLAG: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + break; + case UPIU_QUERY_OPCODE_READ_FLAG: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + if (!flag_res) { + /* No dummy reads */ + dev_err(hba->dev, "%s: Invalid argument for read request\n", + __func__); + err = -EINVAL; + goto out_unlock; + } + break; + default: + dev_err(hba->dev, + "%s: Expected query flag opcode but got = %d\n", + __func__, opcode); + err = -EINVAL; + goto out_unlock; + } + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); + + if (err) { + dev_err(hba->dev, + "%s: Sending flag query for idn %d failed, err = %d\n", + __func__, idn, err); + goto out_unlock; + } + + if (flag_res) + *flag_res = (be32_to_cpu(response->upiu_res.value) & + MASK_QUERY_UPIU_FLAG_LOC) & 0x1; + +out_unlock: + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); + return err; +} + +/** + * ufshcd_query_attr - API function for sending attribute requests + * @hba: per-adapter instance + * @opcode: attribute opcode + * @idn: attribute idn to access + * @index: index field + * @selector: selector field + * @attr_val: the attribute value after the query request completes + * + * Returns 0 for success, non-zero in case of failure +*/ +int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) +{ + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + int err; + + BUG_ON(!hba); + + if (!attr_val) { + dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", + __func__, opcode); + return -EINVAL; + } + + ufshcd_hold(hba, false); + + mutex_lock(&hba->dev_cmd.lock); + ufshcd_init_query(hba, &request, &response, opcode, idn, index, + selector); + + switch (opcode) { + case UPIU_QUERY_OPCODE_WRITE_ATTR: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + request->upiu_req.value = cpu_to_be32(*attr_val); + break; + case UPIU_QUERY_OPCODE_READ_ATTR: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + break; + default: + dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", + __func__, opcode); + err = -EINVAL; + goto out_unlock; + } + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + + if (err) { + dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", + __func__, opcode, idn, index, err); + goto out_unlock; + } + + *attr_val = be32_to_cpu(response->upiu_res.value); + +out_unlock: + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); + return err; +} + +/** + * ufshcd_query_attr_retry() - API function for sending query + * attribute with retries + * @hba: per-adapter instance + * @opcode: attribute opcode + * @idn: attribute idn to access + * @index: index field + * @selector: selector field + * @attr_val: the attribute value after the query request + * completes + * + * Returns 0 for success, non-zero in case of failure +*/ +int ufshcd_query_attr_retry(struct ufs_hba *hba, + enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, + u32 *attr_val) +{ + int ret = 0; + u32 retries; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + ret = ufshcd_query_attr(hba, opcode, idn, index, + selector, attr_val); + if (ret) + dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", + __func__, ret, retries); + else + break; + } + + if (ret) + dev_err(hba->dev, + "%s: query attribute, idn %d, failed with error %d after %d retries\n", + __func__, idn, ret, QUERY_REQ_RETRIES); + return ret; +} + +static int __ufshcd_query_descriptor(struct ufs_hba *hba, + enum query_opcode opcode, enum desc_idn idn, u8 index, + u8 selector, u8 *desc_buf, int *buf_len) +{ + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + int err; + + BUG_ON(!hba); + + if (!desc_buf) { + dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", + __func__, opcode); + return -EINVAL; + } + + if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { + dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", + __func__, *buf_len); + return -EINVAL; + } + + ufshcd_hold(hba, false); + + mutex_lock(&hba->dev_cmd.lock); + ufshcd_init_query(hba, &request, &response, opcode, idn, index, + selector); + hba->dev_cmd.query.descriptor = desc_buf; + request->upiu_req.length = cpu_to_be16(*buf_len); + + switch (opcode) { + case UPIU_QUERY_OPCODE_WRITE_DESC: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + break; + case UPIU_QUERY_OPCODE_READ_DESC: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + break; + default: + dev_err(hba->dev, + "%s: Expected query descriptor opcode but got = 0x%.2x\n", + __func__, opcode); + err = -EINVAL; + goto out_unlock; + } + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + + if (err) { + dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", + __func__, opcode, idn, index, err); + goto out_unlock; + } + + *buf_len = be16_to_cpu(response->upiu_res.length); + +out_unlock: + hba->dev_cmd.query.descriptor = NULL; + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); + return err; +} + +/** + * ufshcd_query_descriptor_retry - API function for sending descriptor requests + * @hba: per-adapter instance + * @opcode: attribute opcode + * @idn: attribute idn to access + * @index: index field + * @selector: selector field + * @desc_buf: the buffer that contains the descriptor + * @buf_len: length parameter passed to the device + * + * Returns 0 for success, non-zero in case of failure. + * The buf_len parameter will contain, on return, the length parameter + * received on the response. + */ +int ufshcd_query_descriptor_retry(struct ufs_hba *hba, + enum query_opcode opcode, + enum desc_idn idn, u8 index, + u8 selector, + u8 *desc_buf, int *buf_len) +{ + int err; + int retries; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + err = __ufshcd_query_descriptor(hba, opcode, idn, index, + selector, desc_buf, buf_len); + if (!err || err == -EINVAL) + break; + } + + return err; +} + +/** + * ufshcd_map_desc_id_to_length - map descriptor IDN to its length + * @hba: Pointer to adapter instance + * @desc_id: descriptor idn value + * @desc_len: mapped desc length (out) + */ +void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, + int *desc_len) +{ + if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 || + desc_id == QUERY_DESC_IDN_RFU_1) + *desc_len = 0; + else + *desc_len = hba->desc_size[desc_id]; +} +EXPORT_SYMBOL(ufshcd_map_desc_id_to_length); + +static void ufshcd_update_desc_length(struct ufs_hba *hba, + enum desc_idn desc_id, int desc_index, + unsigned char desc_len) +{ + if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE && + desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT) + /* For UFS 3.1, the normal unit descriptor is 10 bytes larger + * than the RPMB unit, however, both descriptors share the same + * desc_idn, to cover both unit descriptors with one length, we + * choose the normal unit descriptor length by desc_index. + */ + hba->desc_size[desc_id] = desc_len; +} + +/** + * ufshcd_read_desc_param - read the specified descriptor parameter + * @hba: Pointer to adapter instance + * @desc_id: descriptor idn value + * @desc_index: descriptor index + * @param_offset: offset of the parameter to read + * @param_read_buf: pointer to buffer where parameter would be read + * @param_size: sizeof(param_read_buf) + * + * Return 0 in case of success, non-zero otherwise + */ +int ufshcd_read_desc_param(struct ufs_hba *hba, + enum desc_idn desc_id, + int desc_index, + u8 param_offset, + u8 *param_read_buf, + u8 param_size) +{ + int ret; + u8 *desc_buf; + int buff_len; + bool is_kmalloc = true; + + /* Safety check */ + if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) + return -EINVAL; + + /* Get the length of descriptor */ + ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); + if (!buff_len) { + dev_err(hba->dev, "%s: Failed to get desc length\n", __func__); + return -EINVAL; + } + + if (param_offset >= buff_len) { + dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n", + __func__, param_offset, desc_id, buff_len); + return -EINVAL; + } + + /* Check whether we need temp memory */ + if (param_offset != 0 || param_size < buff_len) { + desc_buf = kzalloc(buff_len, GFP_KERNEL); + if (!desc_buf) + return -ENOMEM; + } else { + desc_buf = param_read_buf; + is_kmalloc = false; + } + + /* Request for full descriptor */ + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + desc_id, desc_index, 0, + desc_buf, &buff_len); + + if (ret) { + dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n", + __func__, desc_id, desc_index, param_offset, ret); + goto out; + } + + /* Sanity check */ + if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) { + dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", + __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]); + ret = -EINVAL; + goto out; + } + + /* Update descriptor length */ + buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET]; + ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len); + + if (is_kmalloc) { + /* Make sure we don't copy more data than available */ + if (param_offset >= buff_len) + ret = -EINVAL; + else + memcpy(param_read_buf, &desc_buf[param_offset], + min_t(u32, param_size, buff_len - param_offset)); + } +out: + if (is_kmalloc) + kfree(desc_buf); + return ret; +} + +/** + * struct uc_string_id - unicode string + * + * @len: size of this descriptor inclusive + * @type: descriptor type + * @uc: unicode string character + */ +struct uc_string_id { + u8 len; + u8 type; + wchar_t uc[]; +} __packed; + +/* replace non-printable or non-ASCII characters with spaces */ +static inline char ufshcd_remove_non_printable(u8 ch) +{ + return (ch >= 0x20 && ch <= 0x7e) ? ch : ' '; +} + +/** + * ufshcd_read_string_desc - read string descriptor + * @hba: pointer to adapter instance + * @desc_index: descriptor index + * @buf: pointer to buffer where descriptor would be read, + * the caller should free the memory. + * @ascii: if true convert from unicode to ascii characters + * null terminated string. + * + * Return: + * * string size on success. + * * -ENOMEM: on allocation failure + * * -EINVAL: on a wrong parameter + */ +int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, + u8 **buf, bool ascii) +{ + struct uc_string_id *uc_str; + u8 *str; + int ret; + + if (!buf) + return -EINVAL; + + uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); + if (!uc_str) + return -ENOMEM; + + ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0, + (u8 *)uc_str, QUERY_DESC_MAX_SIZE); + if (ret < 0) { + dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", + QUERY_REQ_RETRIES, ret); + str = NULL; + goto out; + } + + if (uc_str->len <= QUERY_DESC_HDR_SIZE) { + dev_dbg(hba->dev, "String Desc is of zero length\n"); + str = NULL; + ret = 0; + goto out; + } + + if (ascii) { + ssize_t ascii_len; + int i; + /* remove header and divide by 2 to move from UTF16 to UTF8 */ + ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1; + str = kzalloc(ascii_len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; + goto out; + } + + /* + * the descriptor contains string in UTF16 format + * we need to convert to utf-8 so it can be displayed + */ + ret = utf16s_to_utf8s(uc_str->uc, + uc_str->len - QUERY_DESC_HDR_SIZE, + UTF16_BIG_ENDIAN, str, ascii_len); + + /* replace non-printable or non-ASCII characters with spaces */ + for (i = 0; i < ret; i++) + str[i] = ufshcd_remove_non_printable(str[i]); + + str[ret++] = '\0'; + + } else { + str = kmemdup(uc_str, uc_str->len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; + goto out; + } + ret = uc_str->len; + } +out: + *buf = str; + kfree(uc_str); + return ret; +} + +/** + * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter + * @hba: Pointer to adapter instance + * @lun: lun id + * @param_offset: offset of the parameter to read + * @param_read_buf: pointer to buffer where parameter would be read + * @param_size: sizeof(param_read_buf) + * + * Return 0 in case of success, non-zero otherwise + */ +static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, + int lun, + enum unit_desc_param param_offset, + u8 *param_read_buf, + u32 param_size) +{ + /* + * Unit descriptors are only available for general purpose LUs (LUN id + * from 0 to 7) and RPMB Well known LU. + */ + if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset)) + return -EOPNOTSUPP; + + return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, + param_offset, param_read_buf, param_size); +} + +static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba) +{ + int err = 0; + u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US; + + if (hba->dev_info.wspecversion >= 0x300) { + err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0, + &gating_wait); + if (err) + dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n", + err, gating_wait); + + if (gating_wait == 0) { + gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US; + dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n", + gating_wait); + } + + hba->dev_info.clk_gating_wait_us = gating_wait; + } + + return err; +} + +/** + * ufshcd_memory_alloc - allocate memory for host memory space data structures + * @hba: per adapter instance + * + * 1. Allocate DMA memory for Command Descriptor array + * Each command descriptor consist of Command UPIU, Response UPIU and PRDT + * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL). + * 3. Allocate DMA memory for UTP Task Management Request Descriptor List + * (UTMRDL) + * 4. Allocate memory for local reference block(lrb). + * + * Returns 0 for success, non-zero in case of failure + */ +static int ufshcd_memory_alloc(struct ufs_hba *hba) +{ + size_t utmrdl_size, utrdl_size, ucdl_size; + + /* Allocate memory for UTP command descriptors */ + ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); + hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, + ucdl_size, + &hba->ucdl_dma_addr, + GFP_KERNEL); + + /* + * UFSHCI requires UTP command descriptor to be 128 byte aligned. + * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE + * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will + * be aligned to 128 bytes as well + */ + if (!hba->ucdl_base_addr || + WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { + dev_err(hba->dev, + "Command Descriptor Memory allocation failed\n"); + goto out; + } + + /* + * Allocate memory for UTP Transfer descriptors + * UFSHCI requires 1024 byte alignment of UTRD + */ + utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); + hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, + utrdl_size, + &hba->utrdl_dma_addr, + GFP_KERNEL); + if (!hba->utrdl_base_addr || + WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { + dev_err(hba->dev, + "Transfer Descriptor Memory allocation failed\n"); + goto out; + } + + /* + * Allocate memory for UTP Task Management descriptors + * UFSHCI requires 1024 byte alignment of UTMRD + */ + utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; + hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, + utmrdl_size, + &hba->utmrdl_dma_addr, + GFP_KERNEL); + if (!hba->utmrdl_base_addr || + WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { + dev_err(hba->dev, + "Task Management Descriptor Memory allocation failed\n"); + goto out; + } + + /* Allocate memory for local reference block */ + hba->lrb = devm_kcalloc(hba->dev, + hba->nutrs, sizeof(struct ufshcd_lrb), + GFP_KERNEL); + if (!hba->lrb) { + dev_err(hba->dev, "LRB Memory allocation failed\n"); + goto out; + } + return 0; +out: + return -ENOMEM; +} + +/** + * ufshcd_host_memory_configure - configure local reference block with + * memory offsets + * @hba: per adapter instance + * + * Configure Host memory space + * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA + * address. + * 2. Update each UTRD with Response UPIU offset, Response UPIU length + * and PRDT offset. + * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT + * into local reference block. + */ +static void ufshcd_host_memory_configure(struct ufs_hba *hba) +{ + struct utp_transfer_req_desc *utrdlp; + dma_addr_t cmd_desc_dma_addr; + dma_addr_t cmd_desc_element_addr; + u16 response_offset; + u16 prdt_offset; + int cmd_desc_size; + int i; + + utrdlp = hba->utrdl_base_addr; + + response_offset = + offsetof(struct utp_transfer_cmd_desc, response_upiu); + prdt_offset = + offsetof(struct utp_transfer_cmd_desc, prd_table); + + cmd_desc_size = sizeof(struct utp_transfer_cmd_desc); + cmd_desc_dma_addr = hba->ucdl_dma_addr; + + for (i = 0; i < hba->nutrs; i++) { + /* Configure UTRD with command descriptor base address */ + cmd_desc_element_addr = + (cmd_desc_dma_addr + (cmd_desc_size * i)); + utrdlp[i].command_desc_base_addr_lo = + cpu_to_le32(lower_32_bits(cmd_desc_element_addr)); + utrdlp[i].command_desc_base_addr_hi = + cpu_to_le32(upper_32_bits(cmd_desc_element_addr)); + + /* Response upiu and prdt offset should be in double words */ + if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) { + utrdlp[i].response_upiu_offset = + cpu_to_le16(response_offset); + utrdlp[i].prd_table_offset = + cpu_to_le16(prdt_offset); + utrdlp[i].response_upiu_length = + cpu_to_le16(ALIGNED_UPIU_SIZE); + } else { + utrdlp[i].response_upiu_offset = + cpu_to_le16(response_offset >> 2); + utrdlp[i].prd_table_offset = + cpu_to_le16(prdt_offset >> 2); + utrdlp[i].response_upiu_length = + cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); + } + + ufshcd_init_lrb(hba, &hba->lrb[i], i); + } +} + +/** + * ufshcd_dme_link_startup - Notify Unipro to perform link startup + * @hba: per adapter instance + * + * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer, + * in order to initialize the Unipro link startup procedure. + * Once the Unipro links are up, the device connected to the controller + * is detected. + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_dme_link_startup(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, + "dme-link-startup: error code %d\n", ret); + return ret; +} +/** + * ufshcd_dme_reset - UIC command for DME_RESET + * @hba: per adapter instance + * + * DME_RESET command is issued in order to reset UniPro stack. + * This function now deals with cold reset. + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_dme_reset(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_RESET; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_err(hba->dev, + "dme-reset: error code %d\n", ret); + + return ret; +} + +int ufshcd_dme_configure_adapt(struct ufs_hba *hba, + int agreed_gear, + int adapt_val) +{ + int ret; + + if (agreed_gear < UFS_HS_G4) + adapt_val = PA_NO_ADAPT; + + ret = ufshcd_dme_set(hba, + UIC_ARG_MIB(PA_TXHSADAPTTYPE), + adapt_val); + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt); + +/** + * ufshcd_dme_enable - UIC command for DME_ENABLE + * @hba: per adapter instance + * + * DME_ENABLE command is issued in order to enable UniPro stack. + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_dme_enable(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_ENABLE; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_err(hba->dev, + "dme-enable: error code %d\n", ret); + + return ret; +} + +static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) +{ + #define MIN_DELAY_BEFORE_DME_CMDS_US 1000 + unsigned long min_sleep_time_us; + + if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) + return; + + /* + * last_dme_cmd_tstamp will be 0 only for 1st call to + * this function + */ + if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { + min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US; + } else { + unsigned long delta = + (unsigned long) ktime_to_us( + ktime_sub(ktime_get(), + hba->last_dme_cmd_tstamp)); + + if (delta < MIN_DELAY_BEFORE_DME_CMDS_US) + min_sleep_time_us = + MIN_DELAY_BEFORE_DME_CMDS_US - delta; + else + return; /* no more delay required */ + } + + /* allow sleep for extra 50us if needed */ + usleep_range(min_sleep_time_us, min_sleep_time_us + 50); +} + +/** + * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET + * @hba: per adapter instance + * @attr_sel: uic command argument1 + * @attr_set: attribute set type as uic command argument2 + * @mib_val: setting value as uic command argument3 + * @peer: indicate whether peer or local + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, + u8 attr_set, u32 mib_val, u8 peer) +{ + struct uic_command uic_cmd = {0}; + static const char *const action[] = { + "dme-set", + "dme-peer-set" + }; + const char *set = action[!!peer]; + int ret; + int retries = UFS_UIC_COMMAND_RETRIES; + + uic_cmd.command = peer ? + UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; + uic_cmd.argument1 = attr_sel; + uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); + uic_cmd.argument3 = mib_val; + + do { + /* for peer attributes we retry upon failure */ + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", + set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); + } while (ret && peer && --retries); + + if (ret) + dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", + set, UIC_GET_ATTR_ID(attr_sel), mib_val, + UFS_UIC_COMMAND_RETRIES - retries); + + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); + +/** + * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET + * @hba: per adapter instance + * @attr_sel: uic command argument1 + * @mib_val: the value of the attribute as returned by the UIC command + * @peer: indicate whether peer or local + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, + u32 *mib_val, u8 peer) +{ + struct uic_command uic_cmd = {0}; + static const char *const action[] = { + "dme-get", + "dme-peer-get" + }; + const char *get = action[!!peer]; + int ret; + int retries = UFS_UIC_COMMAND_RETRIES; + struct ufs_pa_layer_attr orig_pwr_info; + struct ufs_pa_layer_attr temp_pwr_info; + bool pwr_mode_change = false; + + if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { + orig_pwr_info = hba->pwr_info; + temp_pwr_info = orig_pwr_info; + + if (orig_pwr_info.pwr_tx == FAST_MODE || + orig_pwr_info.pwr_rx == FAST_MODE) { + temp_pwr_info.pwr_tx = FASTAUTO_MODE; + temp_pwr_info.pwr_rx = FASTAUTO_MODE; + pwr_mode_change = true; + } else if (orig_pwr_info.pwr_tx == SLOW_MODE || + orig_pwr_info.pwr_rx == SLOW_MODE) { + temp_pwr_info.pwr_tx = SLOWAUTO_MODE; + temp_pwr_info.pwr_rx = SLOWAUTO_MODE; + pwr_mode_change = true; + } + if (pwr_mode_change) { + ret = ufshcd_change_power_mode(hba, &temp_pwr_info); + if (ret) + goto out; + } + } + + uic_cmd.command = peer ? + UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; + uic_cmd.argument1 = attr_sel; + + do { + /* for peer attributes we retry upon failure */ + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", + get, UIC_GET_ATTR_ID(attr_sel), ret); + } while (ret && peer && --retries); + + if (ret) + dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", + get, UIC_GET_ATTR_ID(attr_sel), + UFS_UIC_COMMAND_RETRIES - retries); + + if (mib_val && !ret) + *mib_val = uic_cmd.argument3; + + if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) + && pwr_mode_change) + ufshcd_change_power_mode(hba, &orig_pwr_info); +out: + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); + +/** + * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power + * state) and waits for it to take effect. + * + * @hba: per adapter instance + * @cmd: UIC command to execute + * + * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER & + * DME_HIBERNATE_EXIT commands take some time to take its effect on both host + * and device UniPro link and hence it's final completion would be indicated by + * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in + * addition to normal UIC command completion Status (UCCS). This function only + * returns after the relevant status bits indicate the completion. + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) +{ + DECLARE_COMPLETION_ONSTACK(uic_async_done); + unsigned long flags; + u8 status; + int ret; + bool reenable_intr = false; + + mutex_lock(&hba->uic_cmd_mutex); + ufshcd_add_delay_before_dme_cmd(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (ufshcd_is_link_broken(hba)) { + ret = -ENOLINK; + goto out_unlock; + } + hba->uic_async_done = &uic_async_done; + if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { + ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); + /* + * Make sure UIC command completion interrupt is disabled before + * issuing UIC command. + */ + wmb(); + reenable_intr = true; + } + ret = __ufshcd_send_uic_cmd(hba, cmd, false); + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (ret) { + dev_err(hba->dev, + "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", + cmd->command, cmd->argument3, ret); + goto out; + } + + if (!wait_for_completion_timeout(hba->uic_async_done, + msecs_to_jiffies(UIC_CMD_TIMEOUT))) { + dev_err(hba->dev, + "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", + cmd->command, cmd->argument3); + + if (!cmd->cmd_active) { + dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n", + __func__); + goto check_upmcrs; + } + + ret = -ETIMEDOUT; + goto out; + } + +check_upmcrs: + status = ufshcd_get_upmcrs(hba); + if (status != PWR_LOCAL) { + dev_err(hba->dev, + "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n", + cmd->command, status); + ret = (status != PWR_OK) ? status : -1; + } +out: + if (ret) { + ufshcd_print_host_state(hba); + ufshcd_print_pwr_info(hba); + ufshcd_print_evt_hist(hba); + } + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->active_uic_cmd = NULL; + hba->uic_async_done = NULL; + if (reenable_intr) + ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); + if (ret) { + ufshcd_set_link_broken(hba); + ufshcd_schedule_eh_work(hba); + } +out_unlock: + spin_unlock_irqrestore(hba->host->host_lock, flags); + mutex_unlock(&hba->uic_cmd_mutex); + + return ret; +} + +/** + * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage + * using DME_SET primitives. + * @hba: per adapter instance + * @mode: powr mode value + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) +{ + struct uic_command uic_cmd = {0}; + int ret; + + if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { + ret = ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1); + if (ret) { + dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", + __func__, ret); + goto out; + } + } + + uic_cmd.command = UIC_CMD_DME_SET; + uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); + uic_cmd.argument3 = mode; + ufshcd_hold(hba, false); + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + ufshcd_release(hba); + +out: + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode); + +int ufshcd_link_recovery(struct ufs_hba *hba) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->ufshcd_state = UFSHCD_STATE_RESET; + ufshcd_set_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* Reset the attached device */ + ufshcd_device_reset(hba); + + ret = ufshcd_host_reset_and_restore(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (ret) + hba->ufshcd_state = UFSHCD_STATE_ERROR; + ufshcd_clear_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (ret) + dev_err(hba->dev, "%s: link recovery failed, err %d", + __func__, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_link_recovery); + +int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) +{ + int ret; + struct uic_command uic_cmd = {0}; + ktime_t start = ktime_get(); + + ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); + + uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", + ktime_to_us(ktime_sub(ktime_get(), start)), ret); + + if (ret) + dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", + __func__, ret); + else + ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, + POST_CHANGE); + + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter); + +int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + ktime_t start = ktime_get(); + + ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); + + uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", + ktime_to_us(ktime_sub(ktime_get(), start)), ret); + + if (ret) { + dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", + __func__, ret); + } else { + ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, + POST_CHANGE); + hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get(); + hba->ufs_stats.hibern8_exit_cnt++; + } + + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit); + +void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) +{ + unsigned long flags; + bool update = false; + + if (!ufshcd_is_auto_hibern8_supported(hba)) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->ahit != ahit) { + hba->ahit = ahit; + update = true; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (update && + !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) { + ufshcd_rpm_get_sync(hba); + ufshcd_hold(hba, false); + ufshcd_auto_hibern8_enable(hba); + ufshcd_release(hba); + ufshcd_rpm_put_sync(hba); + } +} +EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update); + +void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) +{ + if (!ufshcd_is_auto_hibern8_supported(hba)) + return; + + ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); +} + + /** + * ufshcd_init_pwr_info - setting the POR (power on reset) + * values in hba power info + * @hba: per-adapter instance + */ +static void ufshcd_init_pwr_info(struct ufs_hba *hba) +{ + hba->pwr_info.gear_rx = UFS_PWM_G1; + hba->pwr_info.gear_tx = UFS_PWM_G1; + hba->pwr_info.lane_rx = 1; + hba->pwr_info.lane_tx = 1; + hba->pwr_info.pwr_rx = SLOWAUTO_MODE; + hba->pwr_info.pwr_tx = SLOWAUTO_MODE; + hba->pwr_info.hs_rate = 0; +} + +/** + * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device + * @hba: per-adapter instance + */ +static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) +{ + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; + + if (hba->max_pwr_info.is_valid) + return 0; + + if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) { + pwr_info->pwr_tx = FASTAUTO_MODE; + pwr_info->pwr_rx = FASTAUTO_MODE; + } else { + pwr_info->pwr_tx = FAST_MODE; + pwr_info->pwr_rx = FAST_MODE; + } + pwr_info->hs_rate = PA_HS_MODE_B; + + /* Get the connected lane count */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), + &pwr_info->lane_rx); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &pwr_info->lane_tx); + + if (!pwr_info->lane_rx || !pwr_info->lane_tx) { + dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", + __func__, + pwr_info->lane_rx, + pwr_info->lane_tx); + return -EINVAL; + } + + /* + * First, get the maximum gears of HS speed. + * If a zero value, it means there is no HSGEAR capability. + * Then, get the maximum gears of PWM speed. + */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); + if (!pwr_info->gear_rx) { + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), + &pwr_info->gear_rx); + if (!pwr_info->gear_rx) { + dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", + __func__, pwr_info->gear_rx); + return -EINVAL; + } + pwr_info->pwr_rx = SLOW_MODE; + } + + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), + &pwr_info->gear_tx); + if (!pwr_info->gear_tx) { + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), + &pwr_info->gear_tx); + if (!pwr_info->gear_tx) { + dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", + __func__, pwr_info->gear_tx); + return -EINVAL; + } + pwr_info->pwr_tx = SLOW_MODE; + } + + hba->max_pwr_info.is_valid = true; + return 0; +} + +static int ufshcd_change_power_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode) +{ + int ret; + + /* if already configured to the requested pwr_mode */ + if (!hba->force_pmc && + pwr_mode->gear_rx == hba->pwr_info.gear_rx && + pwr_mode->gear_tx == hba->pwr_info.gear_tx && + pwr_mode->lane_rx == hba->pwr_info.lane_rx && + pwr_mode->lane_tx == hba->pwr_info.lane_tx && + pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && + pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && + pwr_mode->hs_rate == hba->pwr_info.hs_rate) { + dev_dbg(hba->dev, "%s: power already configured\n", __func__); + return 0; + } + + /* + * Configure attributes for power mode change with below. + * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, + * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, + * - PA_HSSERIES + */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), + pwr_mode->lane_rx); + if (pwr_mode->pwr_rx == FASTAUTO_MODE || + pwr_mode->pwr_rx == FAST_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true); + else + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), + pwr_mode->lane_tx); + if (pwr_mode->pwr_tx == FASTAUTO_MODE || + pwr_mode->pwr_tx == FAST_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); + else + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false); + + if (pwr_mode->pwr_rx == FASTAUTO_MODE || + pwr_mode->pwr_tx == FASTAUTO_MODE || + pwr_mode->pwr_rx == FAST_MODE || + pwr_mode->pwr_tx == FAST_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), + pwr_mode->hs_rate); + + if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) { + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), + DL_AFC0ReqTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), + DL_FC1ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), + DL_TC1ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), + DL_AFC1ReqTimeOutVal_Default); + + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), + DL_AFC0ReqTimeOutVal_Default); + } + + ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 + | pwr_mode->pwr_tx); + + if (ret) { + dev_err(hba->dev, + "%s: power mode change failed %d\n", __func__, ret); + } else { + ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, + pwr_mode); + + memcpy(&hba->pwr_info, pwr_mode, + sizeof(struct ufs_pa_layer_attr)); + } + + return ret; +} + +/** + * ufshcd_config_pwr_mode - configure a new power mode + * @hba: per-adapter instance + * @desired_pwr_mode: desired power configuration + */ +int ufshcd_config_pwr_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *desired_pwr_mode) +{ + struct ufs_pa_layer_attr final_params = { 0 }; + int ret; + + ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, + desired_pwr_mode, &final_params); + + if (ret) + memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); + + ret = ufshcd_change_power_mode(hba, &final_params); + + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); + +/** + * ufshcd_complete_dev_init() - checks device readiness + * @hba: per-adapter instance + * + * Set fDeviceInit flag and poll until device toggles it. + */ +static int ufshcd_complete_dev_init(struct ufs_hba *hba) +{ + int err; + bool flag_res = true; + ktime_t timeout; + + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL); + if (err) { + dev_err(hba->dev, + "%s setting fDeviceInit flag failed with error %d\n", + __func__, err); + goto out; + } + + /* Poll fDeviceInit flag to be cleared */ + timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT); + do { + err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res); + if (!flag_res) + break; + usleep_range(500, 1000); + } while (ktime_before(ktime_get(), timeout)); + + if (err) { + dev_err(hba->dev, + "%s reading fDeviceInit flag failed with error %d\n", + __func__, err); + } else if (flag_res) { + dev_err(hba->dev, + "%s fDeviceInit was not cleared by the device\n", + __func__); + err = -EBUSY; + } +out: + return err; +} + +/** + * ufshcd_make_hba_operational - Make UFS controller operational + * @hba: per adapter instance + * + * To bring UFS host controller to operational state, + * 1. Enable required interrupts + * 2. Configure interrupt aggregation + * 3. Program UTRL and UTMRL base address + * 4. Configure run-stop-registers + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_make_hba_operational(struct ufs_hba *hba) +{ + int err = 0; + u32 reg; + + /* Enable required interrupts */ + ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); + + /* Configure interrupt aggregation */ + if (ufshcd_is_intr_aggr_allowed(hba)) + ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); + else + ufshcd_disable_intr_aggr(hba); + + /* Configure UTRL and UTMRL base address registers */ + ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), + REG_UTP_TRANSFER_REQ_LIST_BASE_L); + ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), + REG_UTP_TRANSFER_REQ_LIST_BASE_H); + ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), + REG_UTP_TASK_REQ_LIST_BASE_L); + ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), + REG_UTP_TASK_REQ_LIST_BASE_H); + + /* + * Make sure base address and interrupt setup are updated before + * enabling the run/stop registers below. + */ + wmb(); + + /* + * UCRDY, UTMRLDY and UTRLRDY bits must be 1 + */ + reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); + if (!(ufshcd_get_lists_status(reg))) { + ufshcd_enable_run_stop_reg(hba); + } else { + dev_err(hba->dev, + "Host controller not ready to process requests"); + err = -EIO; + } + + return err; +} +EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational); + +/** + * ufshcd_hba_stop - Send controller to reset state + * @hba: per adapter instance + */ +void ufshcd_hba_stop(struct ufs_hba *hba) +{ + unsigned long flags; + int err; + + /* + * Obtain the host lock to prevent that the controller is disabled + * while the UFS interrupt handler is active on another CPU. + */ + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, + CONTROLLER_ENABLE, CONTROLLER_DISABLE, + 10, 1); + if (err) + dev_err(hba->dev, "%s: Controller disable failed\n", __func__); +} +EXPORT_SYMBOL_GPL(ufshcd_hba_stop); + +/** + * ufshcd_hba_execute_hce - initialize the controller + * @hba: per adapter instance + * + * The controller resets itself and controller firmware initialization + * sequence kicks off. When controller is ready it will set + * the Host Controller Enable bit to 1. + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_hba_execute_hce(struct ufs_hba *hba) +{ + int retry_outer = 3; + int retry_inner; + +start: + if (ufshcd_is_hba_active(hba)) + /* change controller state to "reset state" */ + ufshcd_hba_stop(hba); + + /* UniPro link is disabled at this point */ + ufshcd_set_link_off(hba); + + ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); + + /* start controller initialization sequence */ + ufshcd_hba_start(hba); + + /* + * To initialize a UFS host controller HCE bit must be set to 1. + * During initialization the HCE bit value changes from 1->0->1. + * When the host controller completes initialization sequence + * it sets the value of HCE bit to 1. The same HCE bit is read back + * to check if the controller has completed initialization sequence. + * So without this delay the value HCE = 1, set in the previous + * instruction might be read back. + * This delay can be changed based on the controller. + */ + ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); + + /* wait for the host controller to complete initialization */ + retry_inner = 50; + while (!ufshcd_is_hba_active(hba)) { + if (retry_inner) { + retry_inner--; + } else { + dev_err(hba->dev, + "Controller enable failed\n"); + if (retry_outer) { + retry_outer--; + goto start; + } + return -EIO; + } + usleep_range(1000, 1100); + } + + /* enable UIC related interrupts */ + ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); + + ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); + + return 0; +} + +int ufshcd_hba_enable(struct ufs_hba *hba) +{ + int ret; + + if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) { + ufshcd_set_link_off(hba); + ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); + + /* enable UIC related interrupts */ + ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); + ret = ufshcd_dme_reset(hba); + if (!ret) { + ret = ufshcd_dme_enable(hba); + if (!ret) + ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); + if (ret) + dev_err(hba->dev, + "Host controller enable failed with non-hce\n"); + } + } else { + ret = ufshcd_hba_execute_hce(hba); + } + + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_hba_enable); + +static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) +{ + int tx_lanes = 0, i, err = 0; + + if (!peer) + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &tx_lanes); + else + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &tx_lanes); + for (i = 0; i < tx_lanes; i++) { + if (!peer) + err = ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(TX_LCC_ENABLE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), + 0); + else + err = ufshcd_dme_peer_set(hba, + UIC_ARG_MIB_SEL(TX_LCC_ENABLE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), + 0); + if (err) { + dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", + __func__, peer, i, err); + break; + } + } + + return err; +} + +static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) +{ + return ufshcd_disable_tx_lcc(hba, true); +} + +void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val) +{ + struct ufs_event_hist *e; + + if (id >= UFS_EVT_CNT) + return; + + e = &hba->ufs_stats.event[id]; + e->val[e->pos] = val; + e->tstamp[e->pos] = ktime_get(); + e->cnt += 1; + e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH; + + ufshcd_vops_event_notify(hba, id, &val); +} +EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist); + +/** + * ufshcd_link_startup - Initialize unipro link startup + * @hba: per adapter instance + * + * Returns 0 for success, non-zero in case of failure + */ +static int ufshcd_link_startup(struct ufs_hba *hba) +{ + int ret; + int retries = DME_LINKSTARTUP_RETRIES; + bool link_startup_again = false; + + /* + * If UFS device isn't active then we will have to issue link startup + * 2 times to make sure the device state move to active. + */ + if (!ufshcd_is_ufs_dev_active(hba)) + link_startup_again = true; + +link_startup: + do { + ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); + + ret = ufshcd_dme_link_startup(hba); + + /* check if device is detected by inter-connect layer */ + if (!ret && !ufshcd_is_device_present(hba)) { + ufshcd_update_evt_hist(hba, + UFS_EVT_LINK_STARTUP_FAIL, + 0); + dev_err(hba->dev, "%s: Device not present\n", __func__); + ret = -ENXIO; + goto out; + } + + /* + * DME link lost indication is only received when link is up, + * but we can't be sure if the link is up until link startup + * succeeds. So reset the local Uni-Pro and try again. + */ + if (ret && retries && ufshcd_hba_enable(hba)) { + ufshcd_update_evt_hist(hba, + UFS_EVT_LINK_STARTUP_FAIL, + (u32)ret); + goto out; + } + } while (ret && retries--); + + if (ret) { + /* failed to get the link up... retire */ + ufshcd_update_evt_hist(hba, + UFS_EVT_LINK_STARTUP_FAIL, + (u32)ret); + goto out; + } + + if (link_startup_again) { + link_startup_again = false; + retries = DME_LINKSTARTUP_RETRIES; + goto link_startup; + } + + /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */ + ufshcd_init_pwr_info(hba); + ufshcd_print_pwr_info(hba); + + if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { + ret = ufshcd_disable_device_tx_lcc(hba); + if (ret) + goto out; + } + + /* Include any host controller configuration via UIC commands */ + ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE); + if (ret) + goto out; + + /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */ + ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); + ret = ufshcd_make_hba_operational(hba); +out: + if (ret) { + dev_err(hba->dev, "link startup failed %d\n", ret); + ufshcd_print_host_state(hba); + ufshcd_print_pwr_info(hba); + ufshcd_print_evt_hist(hba); + } + return ret; +} + +/** + * ufshcd_verify_dev_init() - Verify device initialization + * @hba: per-adapter instance + * + * Send NOP OUT UPIU and wait for NOP IN response to check whether the + * device Transport Protocol (UTP) layer is ready after a reset. + * If the UTP layer at the device side is not initialized, it may + * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT + * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations. + */ +static int ufshcd_verify_dev_init(struct ufs_hba *hba) +{ + int err = 0; + int retries; + + ufshcd_hold(hba, false); + mutex_lock(&hba->dev_cmd.lock); + for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, + hba->nop_out_timeout); + + if (!err || err == -ETIMEDOUT) + break; + + dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); + } + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); + + if (err) + dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); + return err; +} + +/** + * ufshcd_set_queue_depth - set lun queue depth + * @sdev: pointer to SCSI device + * + * Read bLUQueueDepth value and activate scsi tagged command + * queueing. For WLUN, queue depth is set to 1. For best-effort + * cases (bLUQueueDepth = 0) the queue depth is set to a maximum + * value that host can queue. + */ +static void ufshcd_set_queue_depth(struct scsi_device *sdev) +{ + int ret = 0; + u8 lun_qdepth; + struct ufs_hba *hba; + + hba = shost_priv(sdev->host); + + lun_qdepth = hba->nutrs; + ret = ufshcd_read_unit_desc_param(hba, + ufshcd_scsi_to_upiu_lun(sdev->lun), + UNIT_DESC_PARAM_LU_Q_DEPTH, + &lun_qdepth, + sizeof(lun_qdepth)); + + /* Some WLUN doesn't support unit descriptor */ + if (ret == -EOPNOTSUPP) + lun_qdepth = 1; + else if (!lun_qdepth) + /* eventually, we can figure out the real queue depth */ + lun_qdepth = hba->nutrs; + else + lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); + + dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", + __func__, lun_qdepth); + scsi_change_queue_depth(sdev, lun_qdepth); +} + +/* + * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR + * @hba: per-adapter instance + * @lun: UFS device lun id + * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info + * + * Returns 0 in case of success and b_lu_write_protect status would be returned + * @b_lu_write_protect parameter. + * Returns -ENOTSUPP if reading b_lu_write_protect is not supported. + * Returns -EINVAL in case of invalid parameters passed to this function. + */ +static int ufshcd_get_lu_wp(struct ufs_hba *hba, + u8 lun, + u8 *b_lu_write_protect) +{ + int ret; + + if (!b_lu_write_protect) + ret = -EINVAL; + /* + * According to UFS device spec, RPMB LU can't be write + * protected so skip reading bLUWriteProtect parameter for + * it. For other W-LUs, UNIT DESCRIPTOR is not available. + */ + else if (lun >= hba->dev_info.max_lu_supported) + ret = -ENOTSUPP; + else + ret = ufshcd_read_unit_desc_param(hba, + lun, + UNIT_DESC_PARAM_LU_WR_PROTECT, + b_lu_write_protect, + sizeof(*b_lu_write_protect)); + return ret; +} + +/** + * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect + * status + * @hba: per-adapter instance + * @sdev: pointer to SCSI device + * + */ +static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, + const struct scsi_device *sdev) +{ + if (hba->dev_info.f_power_on_wp_en && + !hba->dev_info.is_lu_power_on_wp) { + u8 b_lu_write_protect; + + if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), + &b_lu_write_protect) && + (b_lu_write_protect == UFS_LU_POWER_ON_WP)) + hba->dev_info.is_lu_power_on_wp = true; + } +} + +/** + * ufshcd_setup_links - associate link b/w device wlun and other luns + * @sdev: pointer to SCSI device + * @hba: pointer to ufs hba + */ +static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev) +{ + struct device_link *link; + + /* + * Device wlun is the supplier & rest of the luns are consumers. + * This ensures that device wlun suspends after all other luns. + */ + if (hba->ufs_device_wlun) { + link = device_link_add(&sdev->sdev_gendev, + &hba->ufs_device_wlun->sdev_gendev, + DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); + if (!link) { + dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n", + dev_name(&hba->ufs_device_wlun->sdev_gendev)); + return; + } + hba->luns_avail--; + /* Ignore REPORT_LUN wlun probing */ + if (hba->luns_avail == 1) { + ufshcd_rpm_put(hba); + return; + } + } else { + /* + * Device wlun is probed. The assumption is that WLUNs are + * scanned before other LUNs. + */ + hba->luns_avail--; + } +} + +/** + * ufshcd_slave_alloc - handle initial SCSI device configurations + * @sdev: pointer to SCSI device + * + * Returns success + */ +static int ufshcd_slave_alloc(struct scsi_device *sdev) +{ + struct ufs_hba *hba; + + hba = shost_priv(sdev->host); + + /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ + sdev->use_10_for_ms = 1; + + /* DBD field should be set to 1 in mode sense(10) */ + sdev->set_dbd_for_ms = 1; + + /* allow SCSI layer to restart the device in case of errors */ + sdev->allow_restart = 1; + + /* REPORT SUPPORTED OPERATION CODES is not supported */ + sdev->no_report_opcodes = 1; + + /* WRITE_SAME command is not supported */ + sdev->no_write_same = 1; + + ufshcd_set_queue_depth(sdev); + + ufshcd_get_lu_power_on_wp_status(hba, sdev); + + ufshcd_setup_links(hba, sdev); + + return 0; +} + +/** + * ufshcd_change_queue_depth - change queue depth + * @sdev: pointer to SCSI device + * @depth: required depth to set + * + * Change queue depth and make sure the max. limits are not crossed. + */ +static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) +{ + return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue)); +} + +static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev) +{ + /* skip well-known LU */ + if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || + !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) + return; + + ufshpb_destroy_lu(hba, sdev); +} + +static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev) +{ + /* skip well-known LU */ + if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || + !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) + return; + + ufshpb_init_hpb_lu(hba, sdev); +} + +/** + * ufshcd_slave_configure - adjust SCSI device configurations + * @sdev: pointer to SCSI device + */ +static int ufshcd_slave_configure(struct scsi_device *sdev) +{ + struct ufs_hba *hba = shost_priv(sdev->host); + struct request_queue *q = sdev->request_queue; + + ufshcd_hpb_configure(hba, sdev); + + blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); + if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE) + blk_queue_update_dma_alignment(q, PAGE_SIZE - 1); + /* + * Block runtime-pm until all consumers are added. + * Refer ufshcd_setup_links(). + */ + if (is_device_wlun(sdev)) + pm_runtime_get_noresume(&sdev->sdev_gendev); + else if (ufshcd_is_rpm_autosuspend_allowed(hba)) + sdev->rpm_autosuspend = 1; + /* + * Do not print messages during runtime PM to avoid never-ending cycles + * of messages written back to storage by user space causing runtime + * resume, causing more messages and so on. + */ + sdev->silence_suspend = 1; + + ufshcd_crypto_register(hba, q); + + return 0; +} + +/** + * ufshcd_slave_destroy - remove SCSI device configurations + * @sdev: pointer to SCSI device + */ +static void ufshcd_slave_destroy(struct scsi_device *sdev) +{ + struct ufs_hba *hba; + unsigned long flags; + + hba = shost_priv(sdev->host); + + ufshcd_hpb_destroy(hba, sdev); + + /* Drop the reference as it won't be needed anymore */ + if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) { + spin_lock_irqsave(hba->host->host_lock, flags); + hba->ufs_device_wlun = NULL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + } else if (hba->ufs_device_wlun) { + struct device *supplier = NULL; + + /* Ensure UFS Device WLUN exists and does not disappear */ + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->ufs_device_wlun) { + supplier = &hba->ufs_device_wlun->sdev_gendev; + get_device(supplier); + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (supplier) { + /* + * If a LUN fails to probe (e.g. absent BOOT WLUN), the + * device will not have been registered but can still + * have a device link holding a reference to the device. + */ + device_link_remove(&sdev->sdev_gendev, supplier); + put_device(supplier); + } + } +} + +/** + * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status + * @lrbp: pointer to local reference block of completed command + * @scsi_status: SCSI command status + * + * Returns value base on SCSI command status + */ +static inline int +ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) +{ + int result = 0; + + switch (scsi_status) { + case SAM_STAT_CHECK_CONDITION: + ufshcd_copy_sense_data(lrbp); + fallthrough; + case SAM_STAT_GOOD: + result |= DID_OK << 16 | scsi_status; + break; + case SAM_STAT_TASK_SET_FULL: + case SAM_STAT_BUSY: + case SAM_STAT_TASK_ABORTED: + ufshcd_copy_sense_data(lrbp); + result |= scsi_status; + break; + default: + result |= DID_ERROR << 16; + break; + } /* end of switch */ + + return result; +} + +/** + * ufshcd_transfer_rsp_status - Get overall status of the response + * @hba: per adapter instance + * @lrbp: pointer to local reference block of completed command + * + * Returns result of the command to notify SCSI midlayer + */ +static inline int +ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + int result = 0; + int scsi_status; + enum utp_ocs ocs; + + /* overall command status of utrd */ + ocs = ufshcd_get_tr_ocs(lrbp); + + if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) { + if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) & + MASK_RSP_UPIU_RESULT) + ocs = OCS_SUCCESS; + } + + switch (ocs) { + case OCS_SUCCESS: + result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); + hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); + switch (result) { + case UPIU_TRANSACTION_RESPONSE: + /* + * get the response UPIU result to extract + * the SCSI command status + */ + result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); + + /* + * get the result based on SCSI status response + * to notify the SCSI midlayer of the command status + */ + scsi_status = result & MASK_SCSI_STATUS; + result = ufshcd_scsi_cmd_status(lrbp, scsi_status); + + /* + * Currently we are only supporting BKOPs exception + * events hence we can ignore BKOPs exception event + * during power management callbacks. BKOPs exception + * event is not expected to be raised in runtime suspend + * callback as it allows the urgent bkops. + * During system suspend, we are anyway forcefully + * disabling the bkops and if urgent bkops is needed + * it will be enabled on system resume. Long term + * solution could be to abort the system suspend if + * UFS device needs urgent BKOPs. + */ + if (!hba->pm_op_in_progress && + !ufshcd_eh_in_progress(hba) && + ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) + /* Flushed in suspend */ + schedule_work(&hba->eeh_work); + + if (scsi_status == SAM_STAT_GOOD) + ufshpb_rsp_upiu(hba, lrbp); + break; + case UPIU_TRANSACTION_REJECT_UPIU: + /* TODO: handle Reject UPIU Response */ + result = DID_ERROR << 16; + dev_err(hba->dev, + "Reject UPIU not fully implemented\n"); + break; + default: + dev_err(hba->dev, + "Unexpected request response code = %x\n", + result); + result = DID_ERROR << 16; + break; + } + break; + case OCS_ABORTED: + result |= DID_ABORT << 16; + break; + case OCS_INVALID_COMMAND_STATUS: + result |= DID_REQUEUE << 16; + break; + case OCS_INVALID_CMD_TABLE_ATTR: + case OCS_INVALID_PRDT_ATTR: + case OCS_MISMATCH_DATA_BUF_SIZE: + case OCS_MISMATCH_RESP_UPIU_SIZE: + case OCS_PEER_COMM_FAILURE: + case OCS_FATAL_ERROR: + case OCS_DEVICE_FATAL_ERROR: + case OCS_INVALID_CRYPTO_CONFIG: + case OCS_GENERAL_CRYPTO_ERROR: + default: + result |= DID_ERROR << 16; + dev_err(hba->dev, + "OCS error from controller = %x for tag %d\n", + ocs, lrbp->task_tag); + ufshcd_print_evt_hist(hba); + ufshcd_print_host_state(hba); + break; + } /* end of switch */ + + if ((host_byte(result) != DID_OK) && + (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs) + ufshcd_print_trs(hba, 1 << lrbp->task_tag, true); + return result; +} + +static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, + u32 intr_mask) +{ + if (!ufshcd_is_auto_hibern8_supported(hba) || + !ufshcd_is_auto_hibern8_enabled(hba)) + return false; + + if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK)) + return false; + + if (hba->active_uic_cmd && + (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER || + hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT)) + return false; + + return true; +} + +/** + * ufshcd_uic_cmd_compl - handle completion of uic command + * @hba: per adapter instance + * @intr_status: interrupt status generated by the controller + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) +{ + irqreturn_t retval = IRQ_NONE; + + spin_lock(hba->host->host_lock); + if (ufshcd_is_auto_hibern8_error(hba, intr_status)) + hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); + + if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { + hba->active_uic_cmd->argument2 |= + ufshcd_get_uic_cmd_result(hba); + hba->active_uic_cmd->argument3 = + ufshcd_get_dme_attr_val(hba); + if (!hba->uic_async_done) + hba->active_uic_cmd->cmd_active = 0; + complete(&hba->active_uic_cmd->done); + retval = IRQ_HANDLED; + } + + if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { + hba->active_uic_cmd->cmd_active = 0; + complete(hba->uic_async_done); + retval = IRQ_HANDLED; + } + + if (retval == IRQ_HANDLED) + ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, + UFS_CMD_COMP); + spin_unlock(hba->host->host_lock); + return retval; +} + +/* Release the resources allocated for processing a SCSI command. */ +static void ufshcd_release_scsi_cmd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) +{ + struct scsi_cmnd *cmd = lrbp->cmd; + + scsi_dma_unmap(cmd); + lrbp->cmd = NULL; /* Mark the command as completed. */ + ufshcd_release(hba); + ufshcd_clk_scaling_update_busy(hba); +} + +/** + * __ufshcd_transfer_req_compl - handle SCSI and query command completion + * @hba: per adapter instance + * @completed_reqs: bitmask that indicates which requests to complete + */ +static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, + unsigned long completed_reqs) +{ + struct ufshcd_lrb *lrbp; + struct scsi_cmnd *cmd; + int index; + + for_each_set_bit(index, &completed_reqs, hba->nutrs) { + lrbp = &hba->lrb[index]; + lrbp->compl_time_stamp = ktime_get(); + cmd = lrbp->cmd; + if (cmd) { + if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) + ufshcd_update_monitor(hba, lrbp); + ufshcd_add_command_trace(hba, index, UFS_CMD_COMP); + cmd->result = ufshcd_transfer_rsp_status(hba, lrbp); + ufshcd_release_scsi_cmd(hba, lrbp); + /* Do not touch lrbp after scsi done */ + scsi_done(cmd); + } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || + lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { + if (hba->dev_cmd.complete) { + ufshcd_add_command_trace(hba, index, + UFS_DEV_COMP); + complete(hba->dev_cmd.complete); + ufshcd_clk_scaling_update_busy(hba); + } + } + } +} + +/* + * Returns > 0 if one or more commands have been completed or 0 if no + * requests have been completed. + */ +static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num) +{ + struct ufs_hba *hba = shost_priv(shost); + unsigned long completed_reqs, flags; + u32 tr_doorbell; + + spin_lock_irqsave(&hba->outstanding_lock, flags); + tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + completed_reqs = ~tr_doorbell & hba->outstanding_reqs; + WARN_ONCE(completed_reqs & ~hba->outstanding_reqs, + "completed: %#lx; outstanding: %#lx\n", completed_reqs, + hba->outstanding_reqs); + hba->outstanding_reqs &= ~completed_reqs; + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + if (completed_reqs) + __ufshcd_transfer_req_compl(hba, completed_reqs); + + return completed_reqs; +} + +/** + * ufshcd_transfer_req_compl - handle SCSI and query command completion + * @hba: per adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) +{ + /* Resetting interrupt aggregation counters first and reading the + * DOOR_BELL afterward allows us to handle all the completed requests. + * In order to prevent other interrupts starvation the DB is read once + * after reset. The down side of this solution is the possibility of + * false interrupt if device completes another request after resetting + * aggregation and before reading the DB. + */ + if (ufshcd_is_intr_aggr_allowed(hba) && + !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) + ufshcd_reset_intr_aggr(hba); + + if (ufs_fail_completion()) + return IRQ_HANDLED; + + /* + * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we + * do not want polling to trigger spurious interrupt complaints. + */ + ufshcd_poll(hba->host, 0); + + return IRQ_HANDLED; +} + +int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask) +{ + return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_EE_CONTROL, 0, 0, + &ee_ctrl_mask); +} + +int ufshcd_write_ee_control(struct ufs_hba *hba) +{ + int err; + + mutex_lock(&hba->ee_ctrl_mutex); + err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask); + mutex_unlock(&hba->ee_ctrl_mutex); + if (err) + dev_err(hba->dev, "%s: failed to write ee control %d\n", + __func__, err); + return err; +} + +int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, + const u16 *other_mask, u16 set, u16 clr) +{ + u16 new_mask, ee_ctrl_mask; + int err = 0; + + mutex_lock(&hba->ee_ctrl_mutex); + new_mask = (*mask & ~clr) | set; + ee_ctrl_mask = new_mask | *other_mask; + if (ee_ctrl_mask != hba->ee_ctrl_mask) + err = __ufshcd_write_ee_control(hba, ee_ctrl_mask); + /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */ + if (!err) { + hba->ee_ctrl_mask = ee_ctrl_mask; + *mask = new_mask; + } + mutex_unlock(&hba->ee_ctrl_mutex); + return err; +} + +/** + * ufshcd_disable_ee - disable exception event + * @hba: per-adapter instance + * @mask: exception event to disable + * + * Disables exception event in the device so that the EVENT_ALERT + * bit is not set. + * + * Returns zero on success, non-zero error value on failure. + */ +static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) +{ + return ufshcd_update_ee_drv_mask(hba, 0, mask); +} + +/** + * ufshcd_enable_ee - enable exception event + * @hba: per-adapter instance + * @mask: exception event to enable + * + * Enable corresponding exception event in the device to allow + * device to alert host in critical scenarios. + * + * Returns zero on success, non-zero error value on failure. + */ +static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) +{ + return ufshcd_update_ee_drv_mask(hba, mask, 0); +} + +/** + * ufshcd_enable_auto_bkops - Allow device managed BKOPS + * @hba: per-adapter instance + * + * Allow device to manage background operations on its own. Enabling + * this might lead to inconsistent latencies during normal data transfers + * as the device is allowed to manage its own way of handling background + * operations. + * + * Returns zero on success, non-zero on failure. + */ +static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) +{ + int err = 0; + + if (hba->auto_bkops_enabled) + goto out; + + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_BKOPS_EN, 0, NULL); + if (err) { + dev_err(hba->dev, "%s: failed to enable bkops %d\n", + __func__, err); + goto out; + } + + hba->auto_bkops_enabled = true; + trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled"); + + /* No need of URGENT_BKOPS exception from the device */ + err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); + if (err) + dev_err(hba->dev, "%s: failed to disable exception event %d\n", + __func__, err); +out: + return err; +} + +/** + * ufshcd_disable_auto_bkops - block device in doing background operations + * @hba: per-adapter instance + * + * Disabling background operations improves command response latency but + * has drawback of device moving into critical state where the device is + * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the + * host is idle so that BKOPS are managed effectively without any negative + * impacts. + * + * Returns zero on success, non-zero on failure. + */ +static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) +{ + int err = 0; + + if (!hba->auto_bkops_enabled) + goto out; + + /* + * If host assisted BKOPs is to be enabled, make sure + * urgent bkops exception is allowed. + */ + err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); + if (err) { + dev_err(hba->dev, "%s: failed to enable exception event %d\n", + __func__, err); + goto out; + } + + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, + QUERY_FLAG_IDN_BKOPS_EN, 0, NULL); + if (err) { + dev_err(hba->dev, "%s: failed to disable bkops %d\n", + __func__, err); + ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); + goto out; + } + + hba->auto_bkops_enabled = false; + trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); + hba->is_urgent_bkops_lvl_checked = false; +out: + return err; +} + +/** + * ufshcd_force_reset_auto_bkops - force reset auto bkops state + * @hba: per adapter instance + * + * After a device reset the device may toggle the BKOPS_EN flag + * to default value. The s/w tracking variables should be updated + * as well. This function would change the auto-bkops state based on + * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND. + */ +static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) +{ + if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) { + hba->auto_bkops_enabled = false; + hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; + ufshcd_enable_auto_bkops(hba); + } else { + hba->auto_bkops_enabled = true; + hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; + ufshcd_disable_auto_bkops(hba); + } + hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; + hba->is_urgent_bkops_lvl_checked = false; +} + +static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) +{ + return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status); +} + +/** + * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status + * @hba: per-adapter instance + * @status: bkops_status value + * + * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn + * flag in the device to permit background operations if the device + * bkops_status is greater than or equal to "status" argument passed to + * this function, disable otherwise. + * + * Returns 0 for success, non-zero in case of failure. + * + * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag + * to know whether auto bkops is enabled or disabled after this function + * returns control to it. + */ +static int ufshcd_bkops_ctrl(struct ufs_hba *hba, + enum bkops_status status) +{ + int err; + u32 curr_status = 0; + + err = ufshcd_get_bkops_status(hba, &curr_status); + if (err) { + dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", + __func__, err); + goto out; + } else if (curr_status > BKOPS_STATUS_MAX) { + dev_err(hba->dev, "%s: invalid BKOPS status %d\n", + __func__, curr_status); + err = -EINVAL; + goto out; + } + + if (curr_status >= status) + err = ufshcd_enable_auto_bkops(hba); + else + err = ufshcd_disable_auto_bkops(hba); +out: + return err; +} + +/** + * ufshcd_urgent_bkops - handle urgent bkops exception event + * @hba: per-adapter instance + * + * Enable fBackgroundOpsEn flag in the device to permit background + * operations. + * + * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled + * and negative error value for any other failure. + */ +static int ufshcd_urgent_bkops(struct ufs_hba *hba) +{ + return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); +} + +static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) +{ + return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); +} + +static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba) +{ + int err; + u32 curr_status = 0; + + if (hba->is_urgent_bkops_lvl_checked) + goto enable_auto_bkops; + + err = ufshcd_get_bkops_status(hba, &curr_status); + if (err) { + dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", + __func__, err); + goto out; + } + + /* + * We are seeing that some devices are raising the urgent bkops + * exception events even when BKOPS status doesn't indicate performace + * impacted or critical. Handle these device by determining their urgent + * bkops status at runtime. + */ + if (curr_status < BKOPS_STATUS_PERF_IMPACT) { + dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", + __func__, curr_status); + /* update the current status as the urgent bkops level */ + hba->urgent_bkops_lvl = curr_status; + hba->is_urgent_bkops_lvl_checked = true; + } + +enable_auto_bkops: + err = ufshcd_enable_auto_bkops(hba); +out: + if (err < 0) + dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", + __func__, err); +} + +static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status) +{ + u32 value; + + if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value)) + return; + + dev_info(hba->dev, "exception Tcase %d\n", value - 80); + + ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP); + + /* + * A placeholder for the platform vendors to add whatever additional + * steps required + */ +} + +static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn) +{ + u8 index; + enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG : + UPIU_QUERY_OPCODE_CLEAR_FLAG; + + index = ufshcd_wb_get_query_index(hba); + return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL); +} + +int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable) +{ + int ret; + + if (!ufshcd_is_wb_allowed(hba)) + return 0; + + if (!(enable ^ hba->dev_info.wb_enabled)) + return 0; + + ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN); + if (ret) { + dev_err(hba->dev, "%s Write Booster %s failed %d\n", + __func__, enable ? "enable" : "disable", ret); + return ret; + } + + hba->dev_info.wb_enabled = enable; + dev_dbg(hba->dev, "%s Write Booster %s\n", + __func__, enable ? "enabled" : "disabled"); + + return ret; +} + +static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set) +{ + int ret; + + ret = __ufshcd_wb_toggle(hba, set, + QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8); + if (ret) { + dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n", + __func__, set ? "enable" : "disable", ret); + return; + } + dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n", + __func__, set ? "enabled" : "disabled"); +} + +static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable) +{ + int ret; + + if (!ufshcd_is_wb_allowed(hba) || + hba->dev_info.wb_buf_flush_enabled == enable) + return; + + ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN); + if (ret) { + dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__, + enable ? "enable" : "disable", ret); + return; + } + + hba->dev_info.wb_buf_flush_enabled = enable; + + dev_dbg(hba->dev, "%s WB-Buf Flush %s\n", + __func__, enable ? "enabled" : "disabled"); +} + +static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, + u32 avail_buf) +{ + u32 cur_buf; + int ret; + u8 index; + + index = ufshcd_wb_get_query_index(hba); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE, + index, 0, &cur_buf); + if (ret) { + dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n", + __func__, ret); + return false; + } + + if (!cur_buf) { + dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n", + cur_buf); + return false; + } + /* Let it continue to flush when available buffer exceeds threshold */ + return avail_buf < hba->vps->wb_flush_threshold; +} + +static void ufshcd_wb_force_disable(struct ufs_hba *hba) +{ + if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)) + ufshcd_wb_toggle_flush(hba, false); + + ufshcd_wb_toggle_flush_during_h8(hba, false); + ufshcd_wb_toggle(hba, false); + hba->caps &= ~UFSHCD_CAP_WB_EN; + + dev_info(hba->dev, "%s: WB force disabled\n", __func__); +} + +static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba) +{ + u32 lifetime; + int ret; + u8 index; + + index = ufshcd_wb_get_query_index(hba); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST, + index, 0, &lifetime); + if (ret) { + dev_err(hba->dev, + "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n", + __func__, ret); + return false; + } + + if (lifetime == UFS_WB_EXCEED_LIFETIME) { + dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n", + __func__, lifetime); + return false; + } + + dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n", + __func__, lifetime); + + return true; +} + +static bool ufshcd_wb_need_flush(struct ufs_hba *hba) +{ + int ret; + u32 avail_buf; + u8 index; + + if (!ufshcd_is_wb_allowed(hba)) + return false; + + if (!ufshcd_is_wb_buf_lifetime_available(hba)) { + ufshcd_wb_force_disable(hba); + return false; + } + + /* + * The ufs device needs the vcc to be ON to flush. + * With user-space reduction enabled, it's enough to enable flush + * by checking only the available buffer. The threshold + * defined here is > 90% full. + * With user-space preserved enabled, the current-buffer + * should be checked too because the wb buffer size can reduce + * when disk tends to be full. This info is provided by current + * buffer (dCurrentWriteBoosterBufferSize). There's no point in + * keeping vcc on when current buffer is empty. + */ + index = ufshcd_wb_get_query_index(hba); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE, + index, 0, &avail_buf); + if (ret) { + dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n", + __func__, ret); + return false; + } + + if (!hba->dev_info.b_presrv_uspc_en) + return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10); + + return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf); +} + +static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(to_delayed_work(work), + struct ufs_hba, + rpm_dev_flush_recheck_work); + /* + * To prevent unnecessary VCC power drain after device finishes + * WriteBooster buffer flush or Auto BKOPs, force runtime resume + * after a certain delay to recheck the threshold by next runtime + * suspend. + */ + ufshcd_rpm_get_sync(hba); + ufshcd_rpm_put_sync(hba); +} + +/** + * ufshcd_exception_event_handler - handle exceptions raised by device + * @work: pointer to work data + * + * Read bExceptionEventStatus attribute from the device and handle the + * exception event accordingly. + */ +static void ufshcd_exception_event_handler(struct work_struct *work) +{ + struct ufs_hba *hba; + int err; + u32 status = 0; + hba = container_of(work, struct ufs_hba, eeh_work); + + ufshcd_scsi_block_requests(hba); + err = ufshcd_get_ee_status(hba, &status); + if (err) { + dev_err(hba->dev, "%s: failed to get exception status %d\n", + __func__, err); + goto out; + } + + trace_ufshcd_exception_event(dev_name(hba->dev), status); + + if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS) + ufshcd_bkops_exception_event_handler(hba); + + if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP) + ufshcd_temp_exception_event_handler(hba, status); + + ufs_debugfs_exception_event(hba, status); +out: + ufshcd_scsi_unblock_requests(hba); +} + +/* Complete requests that have door-bell cleared */ +static void ufshcd_complete_requests(struct ufs_hba *hba) +{ + ufshcd_transfer_req_compl(hba); + ufshcd_tmc_handler(hba); +} + +/** + * ufshcd_quirk_dl_nac_errors - This function checks if error handling is + * to recover from the DL NAC errors or not. + * @hba: per-adapter instance + * + * Returns true if error handling is required, false otherwise + */ +static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba) +{ + unsigned long flags; + bool err_handling = true; + + spin_lock_irqsave(hba->host->host_lock, flags); + /* + * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the + * device fatal error and/or DL NAC & REPLAY timeout errors. + */ + if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) + goto out; + + if ((hba->saved_err & DEVICE_FATAL_ERROR) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) + goto out; + + if ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { + int err; + /* + * wait for 50ms to see if we can get any other errors or not. + */ + spin_unlock_irqrestore(hba->host->host_lock, flags); + msleep(50); + spin_lock_irqsave(hba->host->host_lock, flags); + + /* + * now check if we have got any other severe errors other than + * DL NAC error? + */ + if ((hba->saved_err & INT_FATAL_ERRORS) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) + goto out; + + /* + * As DL NAC is the only error received so far, send out NOP + * command to confirm if link is still active or not. + * - If we don't get any response then do error recovery. + * - If we get response then clear the DL NAC error bit. + */ + + spin_unlock_irqrestore(hba->host->host_lock, flags); + err = ufshcd_verify_dev_init(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + + if (err) + goto out; + + /* Link seems to be alive hence ignore the DL NAC errors */ + if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) + hba->saved_err &= ~UIC_ERROR; + /* clear NAC error */ + hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; + if (!hba->saved_uic_err) + err_handling = false; + } +out: + spin_unlock_irqrestore(hba->host->host_lock, flags); + return err_handling; +} + +/* host lock must be held before calling this func */ +static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) +{ + return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || + (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); +} + +void ufshcd_schedule_eh_work(struct ufs_hba *hba) +{ + lockdep_assert_held(hba->host->host_lock); + + /* handle fatal errors only when link is not in error state */ + if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { + if (hba->force_reset || ufshcd_is_link_broken(hba) || + ufshcd_is_saved_err_fatal(hba)) + hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; + else + hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; + queue_work(hba->eh_wq, &hba->eh_work); + } +} + +static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) +{ + down_write(&hba->clk_scaling_lock); + hba->clk_scaling.is_allowed = allow; + up_write(&hba->clk_scaling_lock); +} + +static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend) +{ + if (suspend) { + if (hba->clk_scaling.is_enabled) + ufshcd_suspend_clkscaling(hba); + ufshcd_clk_scaling_allow(hba, false); + } else { + ufshcd_clk_scaling_allow(hba, true); + if (hba->clk_scaling.is_enabled) + ufshcd_resume_clkscaling(hba); + } +} + +static void ufshcd_err_handling_prepare(struct ufs_hba *hba) +{ + ufshcd_rpm_get_sync(hba); + if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) || + hba->is_sys_suspended) { + enum ufs_pm_op pm_op; + + /* + * Don't assume anything of resume, if + * resume fails, irq and clocks can be OFF, and powers + * can be OFF or in LPM. + */ + ufshcd_setup_hba_vreg(hba, true); + ufshcd_enable_irq(hba); + ufshcd_setup_vreg(hba, true); + ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); + ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); + ufshcd_hold(hba, false); + if (!ufshcd_is_clkgating_allowed(hba)) + ufshcd_setup_clocks(hba, true); + ufshcd_release(hba); + pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM; + ufshcd_vops_resume(hba, pm_op); + } else { + ufshcd_hold(hba, false); + if (ufshcd_is_clkscaling_supported(hba) && + hba->clk_scaling.is_enabled) + ufshcd_suspend_clkscaling(hba); + ufshcd_clk_scaling_allow(hba, false); + } + ufshcd_scsi_block_requests(hba); + /* Drain ufshcd_queuecommand() */ + synchronize_rcu(); + cancel_work_sync(&hba->eeh_work); +} + +static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) +{ + ufshcd_scsi_unblock_requests(hba); + ufshcd_release(hba); + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_clk_scaling_suspend(hba, false); + ufshcd_rpm_put(hba); +} + +static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) +{ + return (!hba->is_powered || hba->shutting_down || + !hba->ufs_device_wlun || + hba->ufshcd_state == UFSHCD_STATE_ERROR || + (!(hba->saved_err || hba->saved_uic_err || hba->force_reset || + ufshcd_is_link_broken(hba)))); +} + +#ifdef CONFIG_PM +static void ufshcd_recover_pm_error(struct ufs_hba *hba) +{ + struct Scsi_Host *shost = hba->host; + struct scsi_device *sdev; + struct request_queue *q; + int ret; + + hba->is_sys_suspended = false; + /* + * Set RPM status of wlun device to RPM_ACTIVE, + * this also clears its runtime error. + */ + ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev); + + /* hba device might have a runtime error otherwise */ + if (ret) + ret = pm_runtime_set_active(hba->dev); + /* + * If wlun device had runtime error, we also need to resume those + * consumer scsi devices in case any of them has failed to be + * resumed due to supplier runtime resume failure. This is to unblock + * blk_queue_enter in case there are bios waiting inside it. + */ + if (!ret) { + shost_for_each_device(sdev, shost) { + q = sdev->request_queue; + if (q->dev && (q->rpm_status == RPM_SUSPENDED || + q->rpm_status == RPM_SUSPENDING)) + pm_request_resume(q->dev); + } + } +} +#else +static inline void ufshcd_recover_pm_error(struct ufs_hba *hba) +{ +} +#endif + +static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba) +{ + struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info; + u32 mode; + + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode); + + if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK)) + return true; + + if (pwr_info->pwr_tx != (mode & PWRMODE_MASK)) + return true; + + return false; +} + +/** + * ufshcd_err_handler - handle UFS errors that require s/w attention + * @work: pointer to work structure + */ +static void ufshcd_err_handler(struct work_struct *work) +{ + int retries = MAX_ERR_HANDLER_RETRIES; + struct ufs_hba *hba; + unsigned long flags; + bool needs_restore; + bool needs_reset; + bool err_xfer; + bool err_tm; + int pmc_err; + int tag; + + hba = container_of(work, struct ufs_hba, eh_work); + + dev_info(hba->dev, + "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n", + __func__, ufshcd_state_name[hba->ufshcd_state], + hba->is_powered, hba->shutting_down, hba->saved_err, + hba->saved_uic_err, hba->force_reset, + ufshcd_is_link_broken(hba) ? "; link is broken" : ""); + + down(&hba->host_sem); + spin_lock_irqsave(hba->host->host_lock, flags); + if (ufshcd_err_handling_should_stop(hba)) { + if (hba->ufshcd_state != UFSHCD_STATE_ERROR) + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + up(&hba->host_sem); + return; + } + ufshcd_set_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_err_handling_prepare(hba); + /* Complete requests that have door-bell cleared by h/w */ + ufshcd_complete_requests(hba); + spin_lock_irqsave(hba->host->host_lock, flags); +again: + needs_restore = false; + needs_reset = false; + err_xfer = false; + err_tm = false; + + if (hba->ufshcd_state != UFSHCD_STATE_ERROR) + hba->ufshcd_state = UFSHCD_STATE_RESET; + /* + * A full reset and restore might have happened after preparation + * is finished, double check whether we should stop. + */ + if (ufshcd_err_handling_should_stop(hba)) + goto skip_err_handling; + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { + bool ret; + + spin_unlock_irqrestore(hba->host->host_lock, flags); + /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */ + ret = ufshcd_quirk_dl_nac_errors(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + if (!ret && ufshcd_err_handling_should_stop(hba)) + goto skip_err_handling; + } + + if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) || + (hba->saved_uic_err && + (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { + bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR); + + spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_print_host_state(hba); + ufshcd_print_pwr_info(hba); + ufshcd_print_evt_hist(hba); + ufshcd_print_tmrs(hba, hba->outstanding_tasks); + ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt); + spin_lock_irqsave(hba->host->host_lock, flags); + } + + /* + * if host reset is required then skip clearing the pending + * transfers forcefully because they will get cleared during + * host reset and restore + */ + if (hba->force_reset || ufshcd_is_link_broken(hba) || + ufshcd_is_saved_err_fatal(hba) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR | + UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) { + needs_reset = true; + goto do_reset; + } + + /* + * If LINERESET was caught, UFS might have been put to PWM mode, + * check if power mode restore is needed. + */ + if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) { + hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR; + if (!hba->saved_uic_err) + hba->saved_err &= ~UIC_ERROR; + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (ufshcd_is_pwr_mode_restore_needed(hba)) + needs_restore = true; + spin_lock_irqsave(hba->host->host_lock, flags); + if (!hba->saved_err && !needs_restore) + goto skip_err_handling; + } + + hba->silence_err_logs = true; + /* release lock as clear command might sleep */ + spin_unlock_irqrestore(hba->host->host_lock, flags); + /* Clear pending transfer requests */ + for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { + if (ufshcd_try_to_abort_task(hba, tag)) { + err_xfer = true; + goto lock_skip_pending_xfer_clear; + } + dev_err(hba->dev, "Aborted tag %d / CDB %#02x\n", tag, + hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1); + } + + /* Clear pending task management requests */ + for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { + if (ufshcd_clear_tm_cmd(hba, tag)) { + err_tm = true; + goto lock_skip_pending_xfer_clear; + } + } + +lock_skip_pending_xfer_clear: + /* Complete the requests that are cleared by s/w */ + ufshcd_complete_requests(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->silence_err_logs = false; + if (err_xfer || err_tm) { + needs_reset = true; + goto do_reset; + } + + /* + * After all reqs and tasks are cleared from doorbell, + * now it is safe to retore power mode. + */ + if (needs_restore) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + /* + * Hold the scaling lock just in case dev cmds + * are sent via bsg and/or sysfs. + */ + down_write(&hba->clk_scaling_lock); + hba->force_pmc = true; + pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info)); + if (pmc_err) { + needs_reset = true; + dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n", + __func__, pmc_err); + } + hba->force_pmc = false; + ufshcd_print_pwr_info(hba); + up_write(&hba->clk_scaling_lock); + spin_lock_irqsave(hba->host->host_lock, flags); + } + +do_reset: + /* Fatal errors need reset */ + if (needs_reset) { + int err; + + hba->force_reset = false; + spin_unlock_irqrestore(hba->host->host_lock, flags); + err = ufshcd_reset_and_restore(hba); + if (err) + dev_err(hba->dev, "%s: reset and restore failed with err %d\n", + __func__, err); + else + ufshcd_recover_pm_error(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + } + +skip_err_handling: + if (!needs_reset) { + if (hba->ufshcd_state == UFSHCD_STATE_RESET) + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + if (hba->saved_err || hba->saved_uic_err) + dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", + __func__, hba->saved_err, hba->saved_uic_err); + } + /* Exit in an operational state or dead */ + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && + hba->ufshcd_state != UFSHCD_STATE_ERROR) { + if (--retries) + goto again; + hba->ufshcd_state = UFSHCD_STATE_ERROR; + } + ufshcd_clear_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_err_handling_unprepare(hba); + up(&hba->host_sem); + + dev_info(hba->dev, "%s finished; HBA state %s\n", __func__, + ufshcd_state_name[hba->ufshcd_state]); +} + +/** + * ufshcd_update_uic_error - check and set fatal UIC error flags. + * @hba: per-adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) +{ + u32 reg; + irqreturn_t retval = IRQ_NONE; + + /* PHY layer error */ + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); + if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) && + (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) { + ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg); + /* + * To know whether this error is fatal or not, DB timeout + * must be checked but this error is handled separately. + */ + if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK) + dev_dbg(hba->dev, "%s: UIC Lane error reported\n", + __func__); + + /* Got a LINERESET indication. */ + if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) { + struct uic_command *cmd = NULL; + + hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR; + if (hba->uic_async_done && hba->active_uic_cmd) + cmd = hba->active_uic_cmd; + /* + * Ignore the LINERESET during power mode change + * operation via DME_SET command. + */ + if (cmd && (cmd->command == UIC_CMD_DME_SET)) + hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR; + } + retval |= IRQ_HANDLED; + } + + /* PA_INIT_ERROR is fatal and needs UIC reset */ + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); + if ((reg & UIC_DATA_LINK_LAYER_ERROR) && + (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) { + ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg); + + if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) + hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; + else if (hba->dev_quirks & + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { + if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) + hba->uic_error |= + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; + else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) + hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; + } + retval |= IRQ_HANDLED; + } + + /* UIC NL/TL/DME errors needs software retry */ + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); + if ((reg & UIC_NETWORK_LAYER_ERROR) && + (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) { + ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg); + hba->uic_error |= UFSHCD_UIC_NL_ERROR; + retval |= IRQ_HANDLED; + } + + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); + if ((reg & UIC_TRANSPORT_LAYER_ERROR) && + (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) { + ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg); + hba->uic_error |= UFSHCD_UIC_TL_ERROR; + retval |= IRQ_HANDLED; + } + + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); + if ((reg & UIC_DME_ERROR) && + (reg & UIC_DME_ERROR_CODE_MASK)) { + ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg); + hba->uic_error |= UFSHCD_UIC_DME_ERROR; + retval |= IRQ_HANDLED; + } + + dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", + __func__, hba->uic_error); + return retval; +} + +/** + * ufshcd_check_errors - Check for errors that need s/w attention + * @hba: per-adapter instance + * @intr_status: interrupt status generated by the controller + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) +{ + bool queue_eh_work = false; + irqreturn_t retval = IRQ_NONE; + + spin_lock(hba->host->host_lock); + hba->errors |= UFSHCD_ERROR_MASK & intr_status; + + if (hba->errors & INT_FATAL_ERRORS) { + ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR, + hba->errors); + queue_eh_work = true; + } + + if (hba->errors & UIC_ERROR) { + hba->uic_error = 0; + retval = ufshcd_update_uic_error(hba); + if (hba->uic_error) + queue_eh_work = true; + } + + if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) { + dev_err(hba->dev, + "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n", + __func__, (hba->errors & UIC_HIBERNATE_ENTER) ? + "Enter" : "Exit", + hba->errors, ufshcd_get_upmcrs(hba)); + ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR, + hba->errors); + ufshcd_set_link_broken(hba); + queue_eh_work = true; + } + + if (queue_eh_work) { + /* + * update the transfer error masks to sticky bits, let's do this + * irrespective of current ufshcd_state. + */ + hba->saved_err |= hba->errors; + hba->saved_uic_err |= hba->uic_error; + + /* dump controller state before resetting */ + if ((hba->saved_err & + (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) || + (hba->saved_uic_err && + (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { + dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n", + __func__, hba->saved_err, + hba->saved_uic_err); + ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, + "host_regs: "); + ufshcd_print_pwr_info(hba); + } + ufshcd_schedule_eh_work(hba); + retval |= IRQ_HANDLED; + } + /* + * if (!queue_eh_work) - + * Other errors are either non-fatal where host recovers + * itself without s/w intervention or errors that will be + * handled by the SCSI core layer. + */ + hba->errors = 0; + hba->uic_error = 0; + spin_unlock(hba->host->host_lock); + return retval; +} + +/** + * ufshcd_tmc_handler - handle task management function completion + * @hba: per adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) +{ + unsigned long flags, pending, issued; + irqreturn_t ret = IRQ_NONE; + int tag; + + spin_lock_irqsave(hba->host->host_lock, flags); + pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); + issued = hba->outstanding_tasks & ~pending; + for_each_set_bit(tag, &issued, hba->nutmrs) { + struct request *req = hba->tmf_rqs[tag]; + struct completion *c = req->end_io_data; + + complete(c); + ret = IRQ_HANDLED; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return ret; +} + +/** + * ufshcd_sl_intr - Interrupt service routine + * @hba: per adapter instance + * @intr_status: contains interrupts generated by the controller + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) +{ + irqreturn_t retval = IRQ_NONE; + + if (intr_status & UFSHCD_UIC_MASK) + retval |= ufshcd_uic_cmd_compl(hba, intr_status); + + if (intr_status & UFSHCD_ERROR_MASK || hba->errors) + retval |= ufshcd_check_errors(hba, intr_status); + + if (intr_status & UTP_TASK_REQ_COMPL) + retval |= ufshcd_tmc_handler(hba); + + if (intr_status & UTP_TRANSFER_REQ_COMPL) + retval |= ufshcd_transfer_req_compl(hba); + + return retval; +} + +/** + * ufshcd_intr - Main interrupt service routine + * @irq: irq number + * @__hba: pointer to adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_intr(int irq, void *__hba) +{ + u32 intr_status, enabled_intr_status = 0; + irqreturn_t retval = IRQ_NONE; + struct ufs_hba *hba = __hba; + int retries = hba->nutrs; + + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + hba->ufs_stats.last_intr_status = intr_status; + hba->ufs_stats.last_intr_ts = ktime_get(); + + /* + * There could be max of hba->nutrs reqs in flight and in worst case + * if the reqs get finished 1 by 1 after the interrupt status is + * read, make sure we handle them by checking the interrupt status + * again in a loop until we process all of the reqs before returning. + */ + while (intr_status && retries--) { + enabled_intr_status = + intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); + if (enabled_intr_status) + retval |= ufshcd_sl_intr(hba, enabled_intr_status); + + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + } + + if (enabled_intr_status && retval == IRQ_NONE && + (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) || + hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) { + dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n", + __func__, + intr_status, + hba->ufs_stats.last_intr_status, + enabled_intr_status); + ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); + } + + return retval; +} + +static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) +{ + int err = 0; + u32 mask = 1 << tag; + unsigned long flags; + + if (!test_bit(tag, &hba->outstanding_tasks)) + goto out; + + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_utmrl_clear(hba, tag); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* poll for max. 1 sec to clear door bell register by h/w */ + err = ufshcd_wait_for_register(hba, + REG_UTP_TASK_REQ_DOOR_BELL, + mask, 0, 1000, 1000); + + dev_err(hba->dev, "Clearing task management function with tag %d %s\n", + tag, err ? "succeeded" : "failed"); + +out: + return err; +} + +static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, + struct utp_task_req_desc *treq, u8 tm_function) +{ + struct request_queue *q = hba->tmf_queue; + struct Scsi_Host *host = hba->host; + DECLARE_COMPLETION_ONSTACK(wait); + struct request *req; + unsigned long flags; + int task_tag, err; + + /* + * blk_mq_alloc_request() is used here only to get a free tag. + */ + req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0); + if (IS_ERR(req)) + return PTR_ERR(req); + + req->end_io_data = &wait; + ufshcd_hold(hba, false); + + spin_lock_irqsave(host->host_lock, flags); + + task_tag = req->tag; + WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n", + task_tag); + hba->tmf_rqs[req->tag] = req; + treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag); + + memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); + ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); + + /* send command to the controller */ + __set_bit(task_tag, &hba->outstanding_tasks); + + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); + /* Make sure that doorbell is committed immediately */ + wmb(); + + spin_unlock_irqrestore(host->host_lock, flags); + + ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND); + + /* wait until the task management command is completed */ + err = wait_for_completion_io_timeout(&wait, + msecs_to_jiffies(TM_CMD_TIMEOUT)); + if (!err) { + ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR); + dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", + __func__, tm_function); + if (ufshcd_clear_tm_cmd(hba, task_tag)) + dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", + __func__, task_tag); + err = -ETIMEDOUT; + } else { + err = 0; + memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq)); + + ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP); + } + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->tmf_rqs[req->tag] = NULL; + __clear_bit(task_tag, &hba->outstanding_tasks); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + ufshcd_release(hba); + blk_mq_free_request(req); + + return err; +} + +/** + * ufshcd_issue_tm_cmd - issues task management commands to controller + * @hba: per adapter instance + * @lun_id: LUN ID to which TM command is sent + * @task_id: task ID to which the TM command is applicable + * @tm_function: task management function opcode + * @tm_response: task management service response return value + * + * Returns non-zero value on error, zero on success. + */ +static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, + u8 tm_function, u8 *tm_response) +{ + struct utp_task_req_desc treq = { { 0 }, }; + enum utp_ocs ocs_value; + int err; + + /* Configure task request descriptor */ + treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); + treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS); + + /* Configure task request UPIU */ + treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) | + cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24); + treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16); + + /* + * The host shall provide the same value for LUN field in the basic + * header and for Input Parameter. + */ + treq.upiu_req.input_param1 = cpu_to_be32(lun_id); + treq.upiu_req.input_param2 = cpu_to_be32(task_id); + + err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function); + if (err == -ETIMEDOUT) + return err; + + ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS; + if (ocs_value != OCS_SUCCESS) + dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", + __func__, ocs_value); + else if (tm_response) + *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) & + MASK_TM_SERVICE_RESP; + return err; +} + +/** + * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests + * @hba: per-adapter instance + * @req_upiu: upiu request + * @rsp_upiu: upiu reply + * @desc_buff: pointer to descriptor buffer, NULL if NA + * @buff_len: descriptor size, 0 if NA + * @cmd_type: specifies the type (NOP, Query...) + * @desc_op: descriptor operation + * + * Those type of requests uses UTP Transfer Request Descriptor - utrd. + * Therefore, it "rides" the device management infrastructure: uses its tag and + * tasks work queues. + * + * Since there is only one available tag for device management commands, + * the caller is expected to hold the hba->dev_cmd.lock mutex. + */ +static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, + struct utp_upiu_req *req_upiu, + struct utp_upiu_req *rsp_upiu, + u8 *desc_buff, int *buff_len, + enum dev_cmd_type cmd_type, + enum query_opcode desc_op) +{ + DECLARE_COMPLETION_ONSTACK(wait); + const u32 tag = hba->reserved_slot; + struct ufshcd_lrb *lrbp; + int err = 0; + u8 upiu_flags; + + /* Protects use of hba->reserved_slot. */ + lockdep_assert_held(&hba->dev_cmd.lock); + + down_read(&hba->clk_scaling_lock); + + lrbp = &hba->lrb[tag]; + WARN_ON(lrbp->cmd); + lrbp->cmd = NULL; + lrbp->task_tag = tag; + lrbp->lun = 0; + lrbp->intr_cmd = true; + ufshcd_prepare_lrbp_crypto(NULL, lrbp); + hba->dev_cmd.type = cmd_type; + + if (hba->ufs_version <= ufshci_version(1, 1)) + lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; + else + lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; + + /* update the task tag in the request upiu */ + req_upiu->header.dword_0 |= cpu_to_be32(tag); + + ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); + + /* just copy the upiu request as it is */ + memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr)); + if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) { + /* The Data Segment Area is optional depending upon the query + * function value. for WRITE DESCRIPTOR, the data segment + * follows right after the tsf. + */ + memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len); + *buff_len = 0; + } + + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); + + hba->dev_cmd.complete = &wait; + + ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); + + ufshcd_send_command(hba, tag); + /* + * ignore the returning value here - ufshcd_check_query_response is + * bound to fail since dev_cmd.query and dev_cmd.type were left empty. + * read the response directly ignoring all errors. + */ + ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT); + + /* just copy the upiu response as it is */ + memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); + if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) { + u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu); + u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & + MASK_QUERY_DATA_SEG_LEN; + + if (*buff_len >= resp_len) { + memcpy(desc_buff, descp, resp_len); + *buff_len = resp_len; + } else { + dev_warn(hba->dev, + "%s: rsp size %d is bigger than buffer size %d", + __func__, resp_len, *buff_len); + *buff_len = 0; + err = -EINVAL; + } + } + ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, + (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); + + up_read(&hba->clk_scaling_lock); + return err; +} + +/** + * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands + * @hba: per-adapter instance + * @req_upiu: upiu request + * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands + * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target + * @desc_buff: pointer to descriptor buffer, NULL if NA + * @buff_len: descriptor size, 0 if NA + * @desc_op: descriptor operation + * + * Supports UTP Transfer requests (nop and query), and UTP Task + * Management requests. + * It is up to the caller to fill the upiu conent properly, as it will + * be copied without any further input validations. + */ +int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, + struct utp_upiu_req *req_upiu, + struct utp_upiu_req *rsp_upiu, + int msgcode, + u8 *desc_buff, int *buff_len, + enum query_opcode desc_op) +{ + int err; + enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY; + struct utp_task_req_desc treq = { { 0 }, }; + enum utp_ocs ocs_value; + u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC; + + switch (msgcode) { + case UPIU_TRANSACTION_NOP_OUT: + cmd_type = DEV_CMD_TYPE_NOP; + fallthrough; + case UPIU_TRANSACTION_QUERY_REQ: + ufshcd_hold(hba, false); + mutex_lock(&hba->dev_cmd.lock); + err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu, + desc_buff, buff_len, + cmd_type, desc_op); + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); + + break; + case UPIU_TRANSACTION_TASK_REQ: + treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); + treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS); + + memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu)); + + err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f); + if (err == -ETIMEDOUT) + break; + + ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS; + if (ocs_value != OCS_SUCCESS) { + dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__, + ocs_value); + break; + } + + memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu)); + + break; + default: + err = -EINVAL; + + break; + } + + return err; +} + +/** + * ufshcd_eh_device_reset_handler() - Reset a single logical unit. + * @cmd: SCSI command pointer + * + * Returns SUCCESS/FAILED + */ +static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) +{ + unsigned long flags, pending_reqs = 0, not_cleared = 0; + struct Scsi_Host *host; + struct ufs_hba *hba; + u32 pos; + int err; + u8 resp = 0xF, lun; + + host = cmd->device->host; + hba = shost_priv(host); + + lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); + err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp); + if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { + if (!err) + err = resp; + goto out; + } + + /* clear the commands that were pending for corresponding LUN */ + spin_lock_irqsave(&hba->outstanding_lock, flags); + for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) + if (hba->lrb[pos].lun == lun) + __set_bit(pos, &pending_reqs); + hba->outstanding_reqs &= ~pending_reqs; + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + if (ufshcd_clear_cmds(hba, pending_reqs) < 0) { + spin_lock_irqsave(&hba->outstanding_lock, flags); + not_cleared = pending_reqs & + ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + hba->outstanding_reqs |= not_cleared; + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + dev_err(hba->dev, "%s: failed to clear requests %#lx\n", + __func__, not_cleared); + } + __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared); + +out: + hba->req_abort_count = 0; + ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err); + if (!err) { + err = SUCCESS; + } else { + dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); + err = FAILED; + } + return err; +} + +static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) +{ + struct ufshcd_lrb *lrbp; + int tag; + + for_each_set_bit(tag, &bitmap, hba->nutrs) { + lrbp = &hba->lrb[tag]; + lrbp->req_abort_skip = true; + } +} + +/** + * ufshcd_try_to_abort_task - abort a specific task + * @hba: Pointer to adapter instance + * @tag: Task tag/index to be aborted + * + * Abort the pending command in device by sending UFS_ABORT_TASK task management + * command, and in host controller by clearing the door-bell register. There can + * be race between controller sending the command to the device while abort is + * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is + * really issued and then try to abort it. + * + * Returns zero on success, non-zero on failure + */ +static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) +{ + struct ufshcd_lrb *lrbp = &hba->lrb[tag]; + int err = 0; + int poll_cnt; + u8 resp = 0xF; + u32 reg; + + for (poll_cnt = 100; poll_cnt; poll_cnt--) { + err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, + UFS_QUERY_TASK, &resp); + if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) { + /* cmd pending in the device */ + dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n", + __func__, tag); + break; + } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { + /* + * cmd not pending in the device, check if it is + * in transition. + */ + dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", + __func__, tag); + reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + if (reg & (1 << tag)) { + /* sleep for max. 200us to stabilize */ + usleep_range(100, 200); + continue; + } + /* command completed already */ + dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", + __func__, tag); + goto out; + } else { + dev_err(hba->dev, + "%s: no response from device. tag = %d, err %d\n", + __func__, tag, err); + if (!err) + err = resp; /* service response error */ + goto out; + } + } + + if (!poll_cnt) { + err = -EBUSY; + goto out; + } + + err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, + UFS_ABORT_TASK, &resp); + if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { + if (!err) { + err = resp; /* service response error */ + dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", + __func__, tag, err); + } + goto out; + } + + err = ufshcd_clear_cmds(hba, 1U << tag); + if (err) + dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", + __func__, tag, err); + +out: + return err; +} + +/** + * ufshcd_abort - scsi host template eh_abort_handler callback + * @cmd: SCSI command pointer + * + * Returns SUCCESS/FAILED + */ +static int ufshcd_abort(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + struct ufs_hba *hba = shost_priv(host); + int tag = scsi_cmd_to_rq(cmd)->tag; + struct ufshcd_lrb *lrbp = &hba->lrb[tag]; + unsigned long flags; + int err = FAILED; + bool outstanding; + u32 reg; + + WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); + + ufshcd_hold(hba, false); + reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + /* If command is already aborted/completed, return FAILED. */ + if (!(test_bit(tag, &hba->outstanding_reqs))) { + dev_err(hba->dev, + "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n", + __func__, tag, hba->outstanding_reqs, reg); + goto release; + } + + /* Print Transfer Request of aborted task */ + dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); + + /* + * Print detailed info about aborted request. + * As more than one request might get aborted at the same time, + * print full information only for the first aborted request in order + * to reduce repeated printouts. For other aborted requests only print + * basic details. + */ + scsi_print_command(cmd); + if (!hba->req_abort_count) { + ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag); + ufshcd_print_evt_hist(hba); + ufshcd_print_host_state(hba); + ufshcd_print_pwr_info(hba); + ufshcd_print_trs(hba, 1 << tag, true); + } else { + ufshcd_print_trs(hba, 1 << tag, false); + } + hba->req_abort_count++; + + if (!(reg & (1 << tag))) { + dev_err(hba->dev, + "%s: cmd was completed, but without a notifying intr, tag = %d", + __func__, tag); + __ufshcd_transfer_req_compl(hba, 1UL << tag); + goto release; + } + + /* + * Task abort to the device W-LUN is illegal. When this command + * will fail, due to spec violation, scsi err handling next step + * will be to send LU reset which, again, is a spec violation. + * To avoid these unnecessary/illegal steps, first we clean up + * the lrb taken by this cmd and re-set it in outstanding_reqs, + * then queue the eh_work and bail. + */ + if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) { + ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); + + spin_lock_irqsave(host->host_lock, flags); + hba->force_reset = true; + ufshcd_schedule_eh_work(hba); + spin_unlock_irqrestore(host->host_lock, flags); + goto release; + } + + /* Skip task abort in case previous aborts failed and report failure */ + if (lrbp->req_abort_skip) { + dev_err(hba->dev, "%s: skipping abort\n", __func__); + ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); + goto release; + } + + err = ufshcd_try_to_abort_task(hba, tag); + if (err) { + dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); + ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); + err = FAILED; + goto release; + } + + /* + * Clear the corresponding bit from outstanding_reqs since the command + * has been aborted successfully. + */ + spin_lock_irqsave(&hba->outstanding_lock, flags); + outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs); + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + if (outstanding) + ufshcd_release_scsi_cmd(hba, lrbp); + + err = SUCCESS; + +release: + /* Matches the ufshcd_hold() call at the start of this function. */ + ufshcd_release(hba); + return err; +} + +/** + * ufshcd_host_reset_and_restore - reset and restore host controller + * @hba: per-adapter instance + * + * Note that host controller reset may issue DME_RESET to + * local and remote (device) Uni-Pro stack and the attributes + * are reset to default state. + * + * Returns zero on success, non-zero on failure + */ +static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) +{ + int err; + + /* + * Stop the host controller and complete the requests + * cleared by h/w + */ + ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET); + ufshcd_hba_stop(hba); + hba->silence_err_logs = true; + ufshcd_complete_requests(hba); + hba->silence_err_logs = false; + + /* scale up clocks to max frequency before full reinitialization */ + ufshcd_scale_clks(hba, true); + + err = ufshcd_hba_enable(hba); + + /* Establish the link again and restore the device */ + if (!err) + err = ufshcd_probe_hba(hba, false); + + if (err) + dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); + ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err); + return err; +} + +/** + * ufshcd_reset_and_restore - reset and re-initialize host/device + * @hba: per-adapter instance + * + * Reset and recover device, host and re-establish link. This + * is helpful to recover the communication in fatal error conditions. + * + * Returns zero on success, non-zero on failure + */ +static int ufshcd_reset_and_restore(struct ufs_hba *hba) +{ + u32 saved_err = 0; + u32 saved_uic_err = 0; + int err = 0; + unsigned long flags; + int retries = MAX_HOST_RESET_RETRIES; + + spin_lock_irqsave(hba->host->host_lock, flags); + do { + /* + * This is a fresh start, cache and clear saved error first, + * in case new error generated during reset and restore. + */ + saved_err |= hba->saved_err; + saved_uic_err |= hba->saved_uic_err; + hba->saved_err = 0; + hba->saved_uic_err = 0; + hba->force_reset = false; + hba->ufshcd_state = UFSHCD_STATE_RESET; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* Reset the attached device */ + ufshcd_device_reset(hba); + + err = ufshcd_host_reset_and_restore(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (err) + continue; + /* Do not exit unless operational or dead */ + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && + hba->ufshcd_state != UFSHCD_STATE_ERROR && + hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL) + err = -EAGAIN; + } while (err && --retries); + + /* + * Inform scsi mid-layer that we did reset and allow to handle + * Unit Attention properly. + */ + scsi_report_bus_reset(hba->host, 0); + if (err) { + hba->ufshcd_state = UFSHCD_STATE_ERROR; + hba->saved_err |= saved_err; + hba->saved_uic_err |= saved_uic_err; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return err; +} + +/** + * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer + * @cmd: SCSI command pointer + * + * Returns SUCCESS/FAILED + */ +static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) +{ + int err = SUCCESS; + unsigned long flags; + struct ufs_hba *hba; + + hba = shost_priv(cmd->device->host); + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->force_reset = true; + ufshcd_schedule_eh_work(hba); + dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + flush_work(&hba->eh_work); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->ufshcd_state == UFSHCD_STATE_ERROR) + err = FAILED; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return err; +} + +/** + * ufshcd_get_max_icc_level - calculate the ICC level + * @sup_curr_uA: max. current supported by the regulator + * @start_scan: row at the desc table to start scan from + * @buff: power descriptor buffer + * + * Returns calculated max ICC level for specific regulator + */ +static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, + const char *buff) +{ + int i; + int curr_uA; + u16 data; + u16 unit; + + for (i = start_scan; i >= 0; i--) { + data = get_unaligned_be16(&buff[2 * i]); + unit = (data & ATTR_ICC_LVL_UNIT_MASK) >> + ATTR_ICC_LVL_UNIT_OFFSET; + curr_uA = data & ATTR_ICC_LVL_VALUE_MASK; + switch (unit) { + case UFSHCD_NANO_AMP: + curr_uA = curr_uA / 1000; + break; + case UFSHCD_MILI_AMP: + curr_uA = curr_uA * 1000; + break; + case UFSHCD_AMP: + curr_uA = curr_uA * 1000 * 1000; + break; + case UFSHCD_MICRO_AMP: + default: + break; + } + if (sup_curr_uA >= curr_uA) + break; + } + if (i < 0) { + i = 0; + pr_err("%s: Couldn't find valid icc_level = %d", __func__, i); + } + + return (u32)i; +} + +/** + * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level + * In case regulators are not initialized we'll return 0 + * @hba: per-adapter instance + * @desc_buf: power descriptor buffer to extract ICC levels from. + * @len: length of desc_buff + * + * Returns calculated ICC level + */ +static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, + const u8 *desc_buf, int len) +{ + u32 icc_level = 0; + + if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || + !hba->vreg_info.vccq2) { + /* + * Using dev_dbg to avoid messages during runtime PM to avoid + * never-ending cycles of messages written back to storage by + * user space causing runtime resume, causing more messages and + * so on. + */ + dev_dbg(hba->dev, + "%s: Regulator capability was not set, actvIccLevel=%d", + __func__, icc_level); + goto out; + } + + if (hba->vreg_info.vcc->max_uA) + icc_level = ufshcd_get_max_icc_level( + hba->vreg_info.vcc->max_uA, + POWER_DESC_MAX_ACTV_ICC_LVLS - 1, + &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]); + + if (hba->vreg_info.vccq->max_uA) + icc_level = ufshcd_get_max_icc_level( + hba->vreg_info.vccq->max_uA, + icc_level, + &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]); + + if (hba->vreg_info.vccq2->max_uA) + icc_level = ufshcd_get_max_icc_level( + hba->vreg_info.vccq2->max_uA, + icc_level, + &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]); +out: + return icc_level; +} + +static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba) +{ + int ret; + int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER]; + u8 *desc_buf; + u32 icc_level; + + desc_buf = kmalloc(buff_len, GFP_KERNEL); + if (!desc_buf) + return; + + ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0, + desc_buf, buff_len); + if (ret) { + dev_err(hba->dev, + "%s: Failed reading power descriptor.len = %d ret = %d", + __func__, buff_len, ret); + goto out; + } + + icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf, + buff_len); + dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level); + + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level); + + if (ret) + dev_err(hba->dev, + "%s: Failed configuring bActiveICCLevel = %d ret = %d", + __func__, icc_level, ret); + +out: + kfree(desc_buf); +} + +static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev) +{ + scsi_autopm_get_device(sdev); + blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev); + if (sdev->rpm_autosuspend) + pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev, + RPM_AUTOSUSPEND_DELAY_MS); + scsi_autopm_put_device(sdev); +} + +/** + * ufshcd_scsi_add_wlus - Adds required W-LUs + * @hba: per-adapter instance + * + * UFS device specification requires the UFS devices to support 4 well known + * logical units: + * "REPORT_LUNS" (address: 01h) + * "UFS Device" (address: 50h) + * "RPMB" (address: 44h) + * "BOOT" (address: 30h) + * UFS device's power management needs to be controlled by "POWER CONDITION" + * field of SSU (START STOP UNIT) command. But this "power condition" field + * will take effect only when its sent to "UFS device" well known logical unit + * hence we require the scsi_device instance to represent this logical unit in + * order for the UFS host driver to send the SSU command for power management. + * + * We also require the scsi_device instance for "RPMB" (Replay Protected Memory + * Block) LU so user space process can control this LU. User space may also + * want to have access to BOOT LU. + * + * This function adds scsi device instances for each of all well known LUs + * (except "REPORT LUNS" LU). + * + * Returns zero on success (all required W-LUs are added successfully), + * non-zero error value on failure (if failed to add any of the required W-LU). + */ +static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) +{ + int ret = 0; + struct scsi_device *sdev_boot, *sdev_rpmb; + + hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0, + ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); + if (IS_ERR(hba->ufs_device_wlun)) { + ret = PTR_ERR(hba->ufs_device_wlun); + hba->ufs_device_wlun = NULL; + goto out; + } + scsi_device_put(hba->ufs_device_wlun); + + sdev_rpmb = __scsi_add_device(hba->host, 0, 0, + ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); + if (IS_ERR(sdev_rpmb)) { + ret = PTR_ERR(sdev_rpmb); + goto remove_ufs_device_wlun; + } + ufshcd_blk_pm_runtime_init(sdev_rpmb); + scsi_device_put(sdev_rpmb); + + sdev_boot = __scsi_add_device(hba->host, 0, 0, + ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); + if (IS_ERR(sdev_boot)) { + dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__); + } else { + ufshcd_blk_pm_runtime_init(sdev_boot); + scsi_device_put(sdev_boot); + } + goto out; + +remove_ufs_device_wlun: + scsi_remove_device(hba->ufs_device_wlun); +out: + return ret; +} + +static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf) +{ + struct ufs_dev_info *dev_info = &hba->dev_info; + u8 lun; + u32 d_lu_wb_buf_alloc; + u32 ext_ufs_feature; + + if (!ufshcd_is_wb_allowed(hba)) + return; + + /* + * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or + * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES + * enabled + */ + if (!(dev_info->wspecversion >= 0x310 || + dev_info->wspecversion == 0x220 || + (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))) + goto wb_disabled; + + if (hba->desc_size[QUERY_DESC_IDN_DEVICE] < + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4) + goto wb_disabled; + + ext_ufs_feature = get_unaligned_be32(desc_buf + + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); + + if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP)) + goto wb_disabled; + + /* + * WB may be supported but not configured while provisioning. The spec + * says, in dedicated wb buffer mode, a max of 1 lun would have wb + * buffer configured. + */ + dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE]; + + dev_info->b_presrv_uspc_en = + desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN]; + + if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) { + if (!get_unaligned_be32(desc_buf + + DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS)) + goto wb_disabled; + } else { + for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) { + d_lu_wb_buf_alloc = 0; + ufshcd_read_unit_desc_param(hba, + lun, + UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS, + (u8 *)&d_lu_wb_buf_alloc, + sizeof(d_lu_wb_buf_alloc)); + if (d_lu_wb_buf_alloc) { + dev_info->wb_dedicated_lu = lun; + break; + } + } + + if (!d_lu_wb_buf_alloc) + goto wb_disabled; + } + + if (!ufshcd_is_wb_buf_lifetime_available(hba)) + goto wb_disabled; + + return; + +wb_disabled: + hba->caps &= ~UFSHCD_CAP_WB_EN; +} + +static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf) +{ + struct ufs_dev_info *dev_info = &hba->dev_info; + u32 ext_ufs_feature; + u8 mask = 0; + + if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300) + return; + + ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); + + if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF) + mask |= MASK_EE_TOO_LOW_TEMP; + + if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF) + mask |= MASK_EE_TOO_HIGH_TEMP; + + if (mask) { + ufshcd_enable_ee(hba, mask); + ufs_hwmon_probe(hba, mask); + } +} + +void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, + const struct ufs_dev_quirk *fixups) +{ + const struct ufs_dev_quirk *f; + struct ufs_dev_info *dev_info = &hba->dev_info; + + if (!fixups) + return; + + for (f = fixups; f->quirk; f++) { + if ((f->wmanufacturerid == dev_info->wmanufacturerid || + f->wmanufacturerid == UFS_ANY_VENDOR) && + ((dev_info->model && + STR_PRFX_EQUAL(f->model, dev_info->model)) || + !strcmp(f->model, UFS_ANY_MODEL))) + hba->dev_quirks |= f->quirk; + } +} +EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks); + +static void ufs_fixup_device_setup(struct ufs_hba *hba) +{ + /* fix by general quirk table */ + ufshcd_fixup_dev_quirks(hba, ufs_fixups); + + /* allow vendors to fix quirks */ + ufshcd_vops_fixup_dev_quirks(hba); +} + +static int ufs_get_device_desc(struct ufs_hba *hba) +{ + int err; + u8 model_index; + u8 b_ufs_feature_sup; + u8 *desc_buf; + struct ufs_dev_info *dev_info = &hba->dev_info; + + desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); + if (!desc_buf) { + err = -ENOMEM; + goto out; + } + + err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf, + hba->desc_size[QUERY_DESC_IDN_DEVICE]); + if (err) { + dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", + __func__, err); + goto out; + } + + /* + * getting vendor (manufacturerID) and Bank Index in big endian + * format + */ + dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | + desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; + + /* getting Specification Version in big endian format */ + dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 | + desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1]; + b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT]; + + model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; + + if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION && + (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) { + bool hpb_en = false; + + ufshpb_get_dev_info(hba, desc_buf); + + if (!ufshpb_is_legacy(hba)) + err = ufshcd_query_flag_retry(hba, + UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_HPB_EN, 0, + &hpb_en); + + if (ufshpb_is_legacy(hba) || (!err && hpb_en)) + dev_info->hpb_enabled = true; + } + + err = ufshcd_read_string_desc(hba, model_index, + &dev_info->model, SD_ASCII_STD); + if (err < 0) { + dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", + __func__, err); + goto out; + } + + hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] + + desc_buf[DEVICE_DESC_PARAM_NUM_WLU]; + + ufs_fixup_device_setup(hba); + + ufshcd_wb_probe(hba, desc_buf); + + ufshcd_temp_notif_probe(hba, desc_buf); + + /* + * ufshcd_read_string_desc returns size of the string + * reset the error value + */ + err = 0; + +out: + kfree(desc_buf); + return err; +} + +static void ufs_put_device_desc(struct ufs_hba *hba) +{ + struct ufs_dev_info *dev_info = &hba->dev_info; + + kfree(dev_info->model); + dev_info->model = NULL; +} + +/** + * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro + * @hba: per-adapter instance + * + * PA_TActivate parameter can be tuned manually if UniPro version is less than + * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's + * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce + * the hibern8 exit latency. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba) +{ + int ret = 0; + u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate; + + ret = ufshcd_dme_peer_get(hba, + UIC_ARG_MIB_SEL( + RX_MIN_ACTIVATETIME_CAPABILITY, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), + &peer_rx_min_activatetime); + if (ret) + goto out; + + /* make sure proper unit conversion is applied */ + tuned_pa_tactivate = + ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US) + / PA_TACTIVATE_TIME_UNIT_US); + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), + tuned_pa_tactivate); + +out: + return ret; +} + +/** + * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro + * @hba: per-adapter instance + * + * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than + * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's + * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY. + * This optimal value can help reduce the hibern8 exit latency. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) +{ + int ret = 0; + u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0; + u32 max_hibern8_time, tuned_pa_hibern8time; + + ret = ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), + &local_tx_hibern8_time_cap); + if (ret) + goto out; + + ret = ufshcd_dme_peer_get(hba, + UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), + &peer_rx_hibern8_time_cap); + if (ret) + goto out; + + max_hibern8_time = max(local_tx_hibern8_time_cap, + peer_rx_hibern8_time_cap); + /* make sure proper unit conversion is applied */ + tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US) + / PA_HIBERN8_TIME_UNIT_US); + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), + tuned_pa_hibern8time); +out: + return ret; +} + +/** + * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is + * less than device PA_TACTIVATE time. + * @hba: per-adapter instance + * + * Some UFS devices require host PA_TACTIVATE to be lower than device + * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk + * for such devices. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) +{ + int ret = 0; + u32 granularity, peer_granularity; + u32 pa_tactivate, peer_pa_tactivate; + u32 pa_tactivate_us, peer_pa_tactivate_us; + static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100}; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), + &granularity); + if (ret) + goto out; + + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), + &peer_granularity); + if (ret) + goto out; + + if ((granularity < PA_GRANULARITY_MIN_VAL) || + (granularity > PA_GRANULARITY_MAX_VAL)) { + dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d", + __func__, granularity); + return -EINVAL; + } + + if ((peer_granularity < PA_GRANULARITY_MIN_VAL) || + (peer_granularity > PA_GRANULARITY_MAX_VAL)) { + dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d", + __func__, peer_granularity); + return -EINVAL; + } + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); + if (ret) + goto out; + + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), + &peer_pa_tactivate); + if (ret) + goto out; + + pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1]; + peer_pa_tactivate_us = peer_pa_tactivate * + gran_to_us_table[peer_granularity - 1]; + + if (pa_tactivate_us >= peer_pa_tactivate_us) { + u32 new_peer_pa_tactivate; + + new_peer_pa_tactivate = pa_tactivate_us / + gran_to_us_table[peer_granularity - 1]; + new_peer_pa_tactivate++; + ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), + new_peer_pa_tactivate); + } + +out: + return ret; +} + +static void ufshcd_tune_unipro_params(struct ufs_hba *hba) +{ + if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { + ufshcd_tune_pa_tactivate(hba); + ufshcd_tune_pa_hibern8time(hba); + } + + ufshcd_vops_apply_dev_quirks(hba); + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) + /* set 1ms timeout for PA_TACTIVATE */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) + ufshcd_quirk_tune_host_pa_tactivate(hba); +} + +static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) +{ + hba->ufs_stats.hibern8_exit_cnt = 0; + hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); + hba->req_abort_count = 0; +} + +static int ufshcd_device_geo_params_init(struct ufs_hba *hba) +{ + int err; + size_t buff_len; + u8 *desc_buf; + + buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY]; + desc_buf = kmalloc(buff_len, GFP_KERNEL); + if (!desc_buf) { + err = -ENOMEM; + goto out; + } + + err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0, + desc_buf, buff_len); + if (err) { + dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n", + __func__, err); + goto out; + } + + if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1) + hba->dev_info.max_lu_supported = 32; + else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0) + hba->dev_info.max_lu_supported = 8; + + if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >= + GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS) + ufshpb_get_geo_info(hba, desc_buf); + +out: + kfree(desc_buf); + return err; +} + +struct ufs_ref_clk { + unsigned long freq_hz; + enum ufs_ref_clk_freq val; +}; + +static const struct ufs_ref_clk ufs_ref_clk_freqs[] = { + {19200000, REF_CLK_FREQ_19_2_MHZ}, + {26000000, REF_CLK_FREQ_26_MHZ}, + {38400000, REF_CLK_FREQ_38_4_MHZ}, + {52000000, REF_CLK_FREQ_52_MHZ}, + {0, REF_CLK_FREQ_INVAL}, +}; + +static enum ufs_ref_clk_freq +ufs_get_bref_clk_from_hz(unsigned long freq) +{ + int i; + + for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++) + if (ufs_ref_clk_freqs[i].freq_hz == freq) + return ufs_ref_clk_freqs[i].val; + + return REF_CLK_FREQ_INVAL; +} + +void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk) +{ + unsigned long freq; + + freq = clk_get_rate(refclk); + + hba->dev_ref_clk_freq = + ufs_get_bref_clk_from_hz(freq); + + if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) + dev_err(hba->dev, + "invalid ref_clk setting = %ld\n", freq); +} + +static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba) +{ + int err; + u32 ref_clk; + u32 freq = hba->dev_ref_clk_freq; + + err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk); + + if (err) { + dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n", + err); + goto out; + } + + if (ref_clk == freq) + goto out; /* nothing to update */ + + err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq); + + if (err) { + dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n", + ufs_ref_clk_freqs[freq].freq_hz); + goto out; + } + + dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n", + ufs_ref_clk_freqs[freq].freq_hz); + +out: + return err; +} + +static int ufshcd_device_params_init(struct ufs_hba *hba) +{ + bool flag; + int ret, i; + + /* Init device descriptor sizes */ + for (i = 0; i < QUERY_DESC_IDN_MAX; i++) + hba->desc_size[i] = QUERY_DESC_MAX_SIZE; + + /* Init UFS geometry descriptor related parameters */ + ret = ufshcd_device_geo_params_init(hba); + if (ret) + goto out; + + /* Check and apply UFS device quirks */ + ret = ufs_get_device_desc(hba); + if (ret) { + dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", + __func__, ret); + goto out; + } + + ufshcd_get_ref_clk_gating_wait(hba); + + if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag)) + hba->dev_info.f_power_on_wp_en = flag; + + /* Probe maximum power mode co-supported by both UFS host and device */ + if (ufshcd_get_max_pwr_mode(hba)) + dev_err(hba->dev, + "%s: Failed getting max supported power mode\n", + __func__); +out: + return ret; +} + +/** + * ufshcd_add_lus - probe and add UFS logical units + * @hba: per-adapter instance + */ +static int ufshcd_add_lus(struct ufs_hba *hba) +{ + int ret; + + /* Add required well known logical units to scsi mid layer */ + ret = ufshcd_scsi_add_wlus(hba); + if (ret) + goto out; + + /* Initialize devfreq after UFS device is detected */ + if (ufshcd_is_clkscaling_supported(hba)) { + memcpy(&hba->clk_scaling.saved_pwr_info.info, + &hba->pwr_info, + sizeof(struct ufs_pa_layer_attr)); + hba->clk_scaling.saved_pwr_info.is_valid = true; + hba->clk_scaling.is_allowed = true; + + ret = ufshcd_devfreq_init(hba); + if (ret) + goto out; + + hba->clk_scaling.is_enabled = true; + ufshcd_init_clk_scaling_sysfs(hba); + } + + ufs_bsg_probe(hba); + ufshpb_init(hba); + scsi_scan_host(hba->host); + pm_runtime_put_sync(hba->dev); + +out: + return ret; +} + +/** + * ufshcd_probe_hba - probe hba to detect device and initialize it + * @hba: per-adapter instance + * @init_dev_params: whether or not to call ufshcd_device_params_init(). + * + * Execute link-startup and verify device initialization + */ +static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) +{ + int ret; + unsigned long flags; + ktime_t start = ktime_get(); + + hba->ufshcd_state = UFSHCD_STATE_RESET; + + ret = ufshcd_link_startup(hba); + if (ret) + goto out; + + if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) + goto out; + + /* Debug counters initialization */ + ufshcd_clear_dbg_ufs_stats(hba); + + /* UniPro link is active now */ + ufshcd_set_link_active(hba); + + /* Verify device initialization by sending NOP OUT UPIU */ + ret = ufshcd_verify_dev_init(hba); + if (ret) + goto out; + + /* Initiate UFS initialization, and waiting until completion */ + ret = ufshcd_complete_dev_init(hba); + if (ret) + goto out; + + /* + * Initialize UFS device parameters used by driver, these + * parameters are associated with UFS descriptors. + */ + if (init_dev_params) { + ret = ufshcd_device_params_init(hba); + if (ret) + goto out; + } + + ufshcd_tune_unipro_params(hba); + + /* UFS device is also active now */ + ufshcd_set_ufs_dev_active(hba); + ufshcd_force_reset_auto_bkops(hba); + + /* Gear up to HS gear if supported */ + if (hba->max_pwr_info.is_valid) { + /* + * Set the right value to bRefClkFreq before attempting to + * switch to HS gears. + */ + if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) + ufshcd_set_dev_ref_clk(hba); + ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); + if (ret) { + dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", + __func__, ret); + goto out; + } + ufshcd_print_pwr_info(hba); + } + + /* + * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec) + * and for removable UFS card as well, hence always set the parameter. + * Note: Error handler may issue the device reset hence resetting + * bActiveICCLevel as well so it is always safe to set this here. + */ + ufshcd_set_active_icc_lvl(hba); + + ufshcd_wb_config(hba); + if (hba->ee_usr_mask) + ufshcd_write_ee_control(hba); + /* Enable Auto-Hibernate if configured */ + ufshcd_auto_hibern8_enable(hba); + + ufshpb_toggle_state(hba, HPB_RESET, HPB_PRESENT); +out: + spin_lock_irqsave(hba->host->host_lock, flags); + if (ret) + hba->ufshcd_state = UFSHCD_STATE_ERROR; + else if (hba->ufshcd_state == UFSHCD_STATE_RESET) + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + trace_ufshcd_init(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + return ret; +} + +/** + * ufshcd_async_scan - asynchronous execution for probing hba + * @data: data pointer to pass to this function + * @cookie: cookie data + */ +static void ufshcd_async_scan(void *data, async_cookie_t cookie) +{ + struct ufs_hba *hba = (struct ufs_hba *)data; + int ret; + + down(&hba->host_sem); + /* Initialize hba, detect and initialize UFS device */ + ret = ufshcd_probe_hba(hba, true); + up(&hba->host_sem); + if (ret) + goto out; + + /* Probe and add UFS logical units */ + ret = ufshcd_add_lus(hba); +out: + /* + * If we failed to initialize the device or the device is not + * present, turn off the power/clocks etc. + */ + if (ret) { + pm_runtime_put_sync(hba->dev); + ufshcd_hba_exit(hba); + } +} + +static const struct attribute_group *ufshcd_driver_groups[] = { + &ufs_sysfs_unit_descriptor_group, + &ufs_sysfs_lun_attributes_group, +#ifdef CONFIG_SCSI_UFS_HPB + &ufs_sysfs_hpb_stat_group, + &ufs_sysfs_hpb_param_group, +#endif + NULL, +}; + +static struct ufs_hba_variant_params ufs_hba_vps = { + .hba_enable_delay_us = 1000, + .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40), + .devfreq_profile.polling_ms = 100, + .devfreq_profile.target = ufshcd_devfreq_target, + .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status, + .ondemand_data.upthreshold = 70, + .ondemand_data.downdifferential = 5, +}; + +static struct scsi_host_template ufshcd_driver_template = { + .module = THIS_MODULE, + .name = UFSHCD, + .proc_name = UFSHCD, + .map_queues = ufshcd_map_queues, + .queuecommand = ufshcd_queuecommand, + .mq_poll = ufshcd_poll, + .slave_alloc = ufshcd_slave_alloc, + .slave_configure = ufshcd_slave_configure, + .slave_destroy = ufshcd_slave_destroy, + .change_queue_depth = ufshcd_change_queue_depth, + .eh_abort_handler = ufshcd_abort, + .eh_device_reset_handler = ufshcd_eh_device_reset_handler, + .eh_host_reset_handler = ufshcd_eh_host_reset_handler, + .this_id = -1, + .sg_tablesize = SG_ALL, + .cmd_per_lun = UFSHCD_CMD_PER_LUN, + .can_queue = UFSHCD_CAN_QUEUE, + .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX, + .max_sectors = (1 << 20) / SECTOR_SIZE, /* 1 MiB */ + .max_host_blocked = 1, + .track_queue_depth = 1, + .sdev_groups = ufshcd_driver_groups, + .dma_boundary = PAGE_SIZE - 1, + .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS, +}; + +static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, + int ua) +{ + int ret; + + if (!vreg) + return 0; + + /* + * "set_load" operation shall be required on those regulators + * which specifically configured current limitation. Otherwise + * zero max_uA may cause unexpected behavior when regulator is + * enabled or set as high power mode. + */ + if (!vreg->max_uA) + return 0; + + ret = regulator_set_load(vreg->reg, ua); + if (ret < 0) { + dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n", + __func__, vreg->name, ua, ret); + } + + return ret; +} + +static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, + struct ufs_vreg *vreg) +{ + return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); +} + +static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, + struct ufs_vreg *vreg) +{ + if (!vreg) + return 0; + + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); +} + +static int ufshcd_config_vreg(struct device *dev, + struct ufs_vreg *vreg, bool on) +{ + if (regulator_count_voltages(vreg->reg) <= 0) + return 0; + + return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0); +} + +static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) +{ + int ret = 0; + + if (!vreg || vreg->enabled) + goto out; + + ret = ufshcd_config_vreg(dev, vreg, true); + if (!ret) + ret = regulator_enable(vreg->reg); + + if (!ret) + vreg->enabled = true; + else + dev_err(dev, "%s: %s enable failed, err=%d\n", + __func__, vreg->name, ret); +out: + return ret; +} + +static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) +{ + int ret = 0; + + if (!vreg || !vreg->enabled || vreg->always_on) + goto out; + + ret = regulator_disable(vreg->reg); + + if (!ret) { + /* ignore errors on applying disable config */ + ufshcd_config_vreg(dev, vreg, false); + vreg->enabled = false; + } else { + dev_err(dev, "%s: %s disable failed, err=%d\n", + __func__, vreg->name, ret); + } +out: + return ret; +} + +static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) +{ + int ret = 0; + struct device *dev = hba->dev; + struct ufs_vreg_info *info = &hba->vreg_info; + + ret = ufshcd_toggle_vreg(dev, info->vcc, on); + if (ret) + goto out; + + ret = ufshcd_toggle_vreg(dev, info->vccq, on); + if (ret) + goto out; + + ret = ufshcd_toggle_vreg(dev, info->vccq2, on); + +out: + if (ret) { + ufshcd_toggle_vreg(dev, info->vccq2, false); + ufshcd_toggle_vreg(dev, info->vccq, false); + ufshcd_toggle_vreg(dev, info->vcc, false); + } + return ret; +} + +static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) +{ + struct ufs_vreg_info *info = &hba->vreg_info; + + return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); +} + +int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg) +{ + int ret = 0; + + if (!vreg) + goto out; + + vreg->reg = devm_regulator_get(dev, vreg->name); + if (IS_ERR(vreg->reg)) { + ret = PTR_ERR(vreg->reg); + dev_err(dev, "%s: %s get failed, err=%d\n", + __func__, vreg->name, ret); + } +out: + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_get_vreg); + +static int ufshcd_init_vreg(struct ufs_hba *hba) +{ + int ret = 0; + struct device *dev = hba->dev; + struct ufs_vreg_info *info = &hba->vreg_info; + + ret = ufshcd_get_vreg(dev, info->vcc); + if (ret) + goto out; + + ret = ufshcd_get_vreg(dev, info->vccq); + if (!ret) + ret = ufshcd_get_vreg(dev, info->vccq2); +out: + return ret; +} + +static int ufshcd_init_hba_vreg(struct ufs_hba *hba) +{ + struct ufs_vreg_info *info = &hba->vreg_info; + + return ufshcd_get_vreg(hba->dev, info->vdd_hba); +} + +static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) +{ + int ret = 0; + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + unsigned long flags; + ktime_t start = ktime_get(); + bool clk_state_changed = false; + + if (list_empty(head)) + goto out; + + ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); + if (ret) + return ret; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk)) { + /* + * Don't disable clocks which are needed + * to keep the link active. + */ + if (ufshcd_is_link_active(hba) && + clki->keep_link_active) + continue; + + clk_state_changed = on ^ clki->enabled; + if (on && !clki->enabled) { + ret = clk_prepare_enable(clki->clk); + if (ret) { + dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", + __func__, clki->name, ret); + goto out; + } + } else if (!on && clki->enabled) { + clk_disable_unprepare(clki->clk); + } + clki->enabled = on; + dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, + clki->name, on ? "en" : "dis"); + } + } + + ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); + if (ret) + return ret; + +out: + if (ret) { + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) + clk_disable_unprepare(clki->clk); + } + } else if (!ret && on) { + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.state = CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + spin_unlock_irqrestore(hba->host->host_lock, flags); + } + + if (clk_state_changed) + trace_ufshcd_profile_clk_gating(dev_name(hba->dev), + (on ? "on" : "off"), + ktime_to_us(ktime_sub(ktime_get(), start)), ret); + return ret; +} + +static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba) +{ + u32 freq; + int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq); + + if (ret) { + dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret); + return REF_CLK_FREQ_INVAL; + } + + return ufs_get_bref_clk_from_hz(freq); +} + +static int ufshcd_init_clocks(struct ufs_hba *hba) +{ + int ret = 0; + struct ufs_clk_info *clki; + struct device *dev = hba->dev; + struct list_head *head = &hba->clk_list_head; + + if (list_empty(head)) + goto out; + + list_for_each_entry(clki, head, list) { + if (!clki->name) + continue; + + clki->clk = devm_clk_get(dev, clki->name); + if (IS_ERR(clki->clk)) { + ret = PTR_ERR(clki->clk); + dev_err(dev, "%s: %s clk get failed, %d\n", + __func__, clki->name, ret); + goto out; + } + + /* + * Parse device ref clk freq as per device tree "ref_clk". + * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL + * in ufshcd_alloc_host(). + */ + if (!strcmp(clki->name, "ref_clk")) + ufshcd_parse_dev_ref_clk_freq(hba, clki->clk); + + if (clki->max_freq) { + ret = clk_set_rate(clki->clk, clki->max_freq); + if (ret) { + dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", + __func__, clki->name, + clki->max_freq, ret); + goto out; + } + clki->curr_freq = clki->max_freq; + } + dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__, + clki->name, clk_get_rate(clki->clk)); + } +out: + return ret; +} + +static int ufshcd_variant_hba_init(struct ufs_hba *hba) +{ + int err = 0; + + if (!hba->vops) + goto out; + + err = ufshcd_vops_init(hba); + if (err) + dev_err(hba->dev, "%s: variant %s init failed err %d\n", + __func__, ufshcd_get_var_name(hba), err); +out: + return err; +} + +static void ufshcd_variant_hba_exit(struct ufs_hba *hba) +{ + if (!hba->vops) + return; + + ufshcd_vops_exit(hba); +} + +static int ufshcd_hba_init(struct ufs_hba *hba) +{ + int err; + + /* + * Handle host controller power separately from the UFS device power + * rails as it will help controlling the UFS host controller power + * collapse easily which is different than UFS device power collapse. + * Also, enable the host controller power before we go ahead with rest + * of the initialization here. + */ + err = ufshcd_init_hba_vreg(hba); + if (err) + goto out; + + err = ufshcd_setup_hba_vreg(hba, true); + if (err) + goto out; + + err = ufshcd_init_clocks(hba); + if (err) + goto out_disable_hba_vreg; + + if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) + hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba); + + err = ufshcd_setup_clocks(hba, true); + if (err) + goto out_disable_hba_vreg; + + err = ufshcd_init_vreg(hba); + if (err) + goto out_disable_clks; + + err = ufshcd_setup_vreg(hba, true); + if (err) + goto out_disable_clks; + + err = ufshcd_variant_hba_init(hba); + if (err) + goto out_disable_vreg; + + ufs_debugfs_hba_init(hba); + + hba->is_powered = true; + goto out; + +out_disable_vreg: + ufshcd_setup_vreg(hba, false); +out_disable_clks: + ufshcd_setup_clocks(hba, false); +out_disable_hba_vreg: + ufshcd_setup_hba_vreg(hba, false); +out: + return err; +} + +static void ufshcd_hba_exit(struct ufs_hba *hba) +{ + if (hba->is_powered) { + ufshcd_exit_clk_scaling(hba); + ufshcd_exit_clk_gating(hba); + if (hba->eh_wq) + destroy_workqueue(hba->eh_wq); + ufs_debugfs_hba_exit(hba); + ufshcd_variant_hba_exit(hba); + ufshcd_setup_vreg(hba, false); + ufshcd_setup_clocks(hba, false); + ufshcd_setup_hba_vreg(hba, false); + hba->is_powered = false; + ufs_put_device_desc(hba); + } +} + +/** + * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device + * power mode + * @hba: per adapter instance + * @pwr_mode: device power mode to set + * + * Returns 0 if requested power mode is set successfully + * Returns < 0 if failed to set the requested power mode + */ +static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, + enum ufs_dev_pwr_mode pwr_mode) +{ + unsigned char cmd[6] = { START_STOP }; + struct scsi_sense_hdr sshdr; + struct scsi_device *sdp; + unsigned long flags; + int ret, retries; + unsigned long deadline; + int32_t remaining; + + spin_lock_irqsave(hba->host->host_lock, flags); + sdp = hba->ufs_device_wlun; + if (sdp) { + ret = scsi_device_get(sdp); + if (!ret && !scsi_device_online(sdp)) { + ret = -ENODEV; + scsi_device_put(sdp); + } + } else { + ret = -ENODEV; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (ret) + return ret; + + /* + * If scsi commands fail, the scsi mid-layer schedules scsi error- + * handling, which would wait for host to be resumed. Since we know + * we are functional while we are here, skip host resume in error + * handling context. + */ + hba->host->eh_noresume = 1; + + cmd[4] = pwr_mode << 4; + + /* + * Current function would be generally called from the power management + * callbacks hence set the RQF_PM flag so that it doesn't resume the + * already suspended childs. + */ + deadline = jiffies + 10 * HZ; + for (retries = 3; retries > 0; --retries) { + ret = -ETIMEDOUT; + remaining = deadline - jiffies; + if (remaining <= 0) + break; + ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, + remaining / HZ, 0, 0, RQF_PM, NULL); + if (!scsi_status_is_check_condition(ret) || + !scsi_sense_valid(&sshdr) || + sshdr.sense_key != UNIT_ATTENTION) + break; + } + if (ret) { + sdev_printk(KERN_WARNING, sdp, + "START_STOP failed for power mode: %d, result %x\n", + pwr_mode, ret); + if (ret > 0) { + if (scsi_sense_valid(&sshdr)) + scsi_print_sense_hdr(sdp, NULL, &sshdr); + ret = -EIO; + } + } + + if (!ret) + hba->curr_dev_pwr_mode = pwr_mode; + + scsi_device_put(sdp); + hba->host->eh_noresume = 0; + return ret; +} + +static int ufshcd_link_state_transition(struct ufs_hba *hba, + enum uic_link_state req_link_state, + int check_for_bkops) +{ + int ret = 0; + + if (req_link_state == hba->uic_link_state) + return 0; + + if (req_link_state == UIC_LINK_HIBERN8_STATE) { + ret = ufshcd_uic_hibern8_enter(hba); + if (!ret) { + ufshcd_set_link_hibern8(hba); + } else { + dev_err(hba->dev, "%s: hibern8 enter failed %d\n", + __func__, ret); + goto out; + } + } + /* + * If autobkops is enabled, link can't be turned off because + * turning off the link would also turn off the device, except in the + * case of DeepSleep where the device is expected to remain powered. + */ + else if ((req_link_state == UIC_LINK_OFF_STATE) && + (!check_for_bkops || !hba->auto_bkops_enabled)) { + /* + * Let's make sure that link is in low power mode, we are doing + * this currently by putting the link in Hibern8. Otherway to + * put the link in low power mode is to send the DME end point + * to device and then send the DME reset command to local + * unipro. But putting the link in hibern8 is much faster. + * + * Note also that putting the link in Hibern8 is a requirement + * for entering DeepSleep. + */ + ret = ufshcd_uic_hibern8_enter(hba); + if (ret) { + dev_err(hba->dev, "%s: hibern8 enter failed %d\n", + __func__, ret); + goto out; + } + /* + * Change controller state to "reset state" which + * should also put the link in off/reset state + */ + ufshcd_hba_stop(hba); + /* + * TODO: Check if we need any delay to make sure that + * controller is reset + */ + ufshcd_set_link_off(hba); + } + +out: + return ret; +} + +static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) +{ + bool vcc_off = false; + + /* + * It seems some UFS devices may keep drawing more than sleep current + * (atleast for 500us) from UFS rails (especially from VCCQ rail). + * To avoid this situation, add 2ms delay before putting these UFS + * rails in LPM mode. + */ + if (!ufshcd_is_link_active(hba) && + hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) + usleep_range(2000, 2100); + + /* + * If UFS device is either in UFS_Sleep turn off VCC rail to save some + * power. + * + * If UFS device and link is in OFF state, all power supplies (VCC, + * VCCQ, VCCQ2) can be turned off if power on write protect is not + * required. If UFS link is inactive (Hibern8 or OFF state) and device + * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode. + * + * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway + * in low power state which would save some power. + * + * If Write Booster is enabled and the device needs to flush the WB + * buffer OR if bkops status is urgent for WB, keep Vcc on. + */ + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && + !hba->dev_info.is_lu_power_on_wp) { + ufshcd_setup_vreg(hba, false); + vcc_off = true; + } else if (!ufshcd_is_ufs_dev_active(hba)) { + ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); + vcc_off = true; + if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) { + ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); + ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); + } + } + + /* + * Some UFS devices require delay after VCC power rail is turned-off. + */ + if (vcc_off && hba->vreg_info.vcc && + hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM) + usleep_range(5000, 5100); +} + +#ifdef CONFIG_PM +static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) +{ + int ret = 0; + + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && + !hba->dev_info.is_lu_power_on_wp) { + ret = ufshcd_setup_vreg(hba, true); + } else if (!ufshcd_is_ufs_dev_active(hba)) { + if (!ufshcd_is_link_active(hba)) { + ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); + if (ret) + goto vcc_disable; + ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); + if (ret) + goto vccq_lpm; + } + ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); + } + goto out; + +vccq_lpm: + ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); +vcc_disable: + ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); +out: + return ret; +} +#endif /* CONFIG_PM */ + +static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) +{ + if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) + ufshcd_setup_hba_vreg(hba, false); +} + +static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) +{ + if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) + ufshcd_setup_hba_vreg(hba, true); +} + +static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) +{ + int ret = 0; + int check_for_bkops; + enum ufs_pm_level pm_lvl; + enum ufs_dev_pwr_mode req_dev_pwr_mode; + enum uic_link_state req_link_state; + + hba->pm_op_in_progress = true; + if (pm_op != UFS_SHUTDOWN_PM) { + pm_lvl = pm_op == UFS_RUNTIME_PM ? + hba->rpm_lvl : hba->spm_lvl; + req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl); + req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl); + } else { + req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE; + req_link_state = UIC_LINK_OFF_STATE; + } + + ufshpb_suspend(hba); + + /* + * If we can't transition into any of the low power modes + * just gate the clocks. + */ + ufshcd_hold(hba, false); + hba->clk_gating.is_suspended = true; + + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_clk_scaling_suspend(hba, true); + + if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && + req_link_state == UIC_LINK_ACTIVE_STATE) { + goto vops_suspend; + } + + if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && + (req_link_state == hba->uic_link_state)) + goto enable_scaling; + + /* UFS device & link must be active before we enter in this function */ + if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { + ret = -EINVAL; + goto enable_scaling; + } + + if (pm_op == UFS_RUNTIME_PM) { + if (ufshcd_can_autobkops_during_suspend(hba)) { + /* + * The device is idle with no requests in the queue, + * allow background operations if bkops status shows + * that performance might be impacted. + */ + ret = ufshcd_urgent_bkops(hba); + if (ret) + goto enable_scaling; + } else { + /* make sure that auto bkops is disabled */ + ufshcd_disable_auto_bkops(hba); + } + /* + * If device needs to do BKOP or WB buffer flush during + * Hibern8, keep device power mode as "active power mode" + * and VCC supply. + */ + hba->dev_info.b_rpm_dev_flush_capable = + hba->auto_bkops_enabled || + (((req_link_state == UIC_LINK_HIBERN8_STATE) || + ((req_link_state == UIC_LINK_ACTIVE_STATE) && + ufshcd_is_auto_hibern8_enabled(hba))) && + ufshcd_wb_need_flush(hba)); + } + + flush_work(&hba->eeh_work); + + ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); + if (ret) + goto enable_scaling; + + if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) { + if (pm_op != UFS_RUNTIME_PM) + /* ensure that bkops is disabled */ + ufshcd_disable_auto_bkops(hba); + + if (!hba->dev_info.b_rpm_dev_flush_capable) { + ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); + if (ret) + goto enable_scaling; + } + } + + /* + * In the case of DeepSleep, the device is expected to remain powered + * with the link off, so do not check for bkops. + */ + check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba); + ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops); + if (ret) + goto set_dev_active; + +vops_suspend: + /* + * Call vendor specific suspend callback. As these callbacks may access + * vendor specific host controller register space call them before the + * host clocks are ON. + */ + ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); + if (ret) + goto set_link_active; + goto out; + +set_link_active: + /* + * Device hardware reset is required to exit DeepSleep. Also, for + * DeepSleep, the link is off so host reset and restore will be done + * further below. + */ + if (ufshcd_is_ufs_dev_deepsleep(hba)) { + ufshcd_device_reset(hba); + WARN_ON(!ufshcd_is_link_off(hba)); + } + if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) + ufshcd_set_link_active(hba); + else if (ufshcd_is_link_off(hba)) + ufshcd_host_reset_and_restore(hba); +set_dev_active: + /* Can also get here needing to exit DeepSleep */ + if (ufshcd_is_ufs_dev_deepsleep(hba)) { + ufshcd_device_reset(hba); + ufshcd_host_reset_and_restore(hba); + } + if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) + ufshcd_disable_auto_bkops(hba); +enable_scaling: + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_clk_scaling_suspend(hba, false); + + hba->dev_info.b_rpm_dev_flush_capable = false; +out: + if (hba->dev_info.b_rpm_dev_flush_capable) { + schedule_delayed_work(&hba->rpm_dev_flush_recheck_work, + msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS)); + } + + if (ret) { + ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret); + hba->clk_gating.is_suspended = false; + ufshcd_release(hba); + ufshpb_resume(hba); + } + hba->pm_op_in_progress = false; + return ret; +} + +#ifdef CONFIG_PM +static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) +{ + int ret; + enum uic_link_state old_link_state = hba->uic_link_state; + + hba->pm_op_in_progress = true; + + /* + * Call vendor specific resume callback. As these callbacks may access + * vendor specific host controller register space call them when the + * host clocks are ON. + */ + ret = ufshcd_vops_resume(hba, pm_op); + if (ret) + goto out; + + /* For DeepSleep, the only supported option is to have the link off */ + WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba)); + + if (ufshcd_is_link_hibern8(hba)) { + ret = ufshcd_uic_hibern8_exit(hba); + if (!ret) { + ufshcd_set_link_active(hba); + } else { + dev_err(hba->dev, "%s: hibern8 exit failed %d\n", + __func__, ret); + goto vendor_suspend; + } + } else if (ufshcd_is_link_off(hba)) { + /* + * A full initialization of the host and the device is + * required since the link was put to off during suspend. + * Note, in the case of DeepSleep, the device will exit + * DeepSleep due to device reset. + */ + ret = ufshcd_reset_and_restore(hba); + /* + * ufshcd_reset_and_restore() should have already + * set the link state as active + */ + if (ret || !ufshcd_is_link_active(hba)) + goto vendor_suspend; + } + + if (!ufshcd_is_ufs_dev_active(hba)) { + ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); + if (ret) + goto set_old_link_state; + } + + if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) + ufshcd_enable_auto_bkops(hba); + else + /* + * If BKOPs operations are urgently needed at this moment then + * keep auto-bkops enabled or else disable it. + */ + ufshcd_urgent_bkops(hba); + + if (hba->ee_usr_mask) + ufshcd_write_ee_control(hba); + + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_clk_scaling_suspend(hba, false); + + if (hba->dev_info.b_rpm_dev_flush_capable) { + hba->dev_info.b_rpm_dev_flush_capable = false; + cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); + } + + /* Enable Auto-Hibernate if configured */ + ufshcd_auto_hibern8_enable(hba); + + ufshpb_resume(hba); + goto out; + +set_old_link_state: + ufshcd_link_state_transition(hba, old_link_state, 0); +vendor_suspend: + ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); + ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); +out: + if (ret) + ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret); + hba->clk_gating.is_suspended = false; + ufshcd_release(hba); + hba->pm_op_in_progress = false; + return ret; +} + +static int ufshcd_wl_runtime_suspend(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufs_hba *hba; + int ret; + ktime_t start = ktime_get(); + + hba = shost_priv(sdev->host); + + ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM); + if (ret) + dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); + + trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + + return ret; +} + +static int ufshcd_wl_runtime_resume(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufs_hba *hba; + int ret = 0; + ktime_t start = ktime_get(); + + hba = shost_priv(sdev->host); + + ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM); + if (ret) + dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); + + trace_ufshcd_wl_runtime_resume(dev_name(dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + + return ret; +} +#endif + +#ifdef CONFIG_PM_SLEEP +static int ufshcd_wl_suspend(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufs_hba *hba; + int ret = 0; + ktime_t start = ktime_get(); + + hba = shost_priv(sdev->host); + down(&hba->host_sem); + + if (pm_runtime_suspended(dev)) + goto out; + + ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM); + if (ret) { + dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); + up(&hba->host_sem); + } + +out: + if (!ret) + hba->is_sys_suspended = true; + trace_ufshcd_wl_suspend(dev_name(dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + + return ret; +} + +static int ufshcd_wl_resume(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufs_hba *hba; + int ret = 0; + ktime_t start = ktime_get(); + + hba = shost_priv(sdev->host); + + if (pm_runtime_suspended(dev)) + goto out; + + ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM); + if (ret) + dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); +out: + trace_ufshcd_wl_resume(dev_name(dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + if (!ret) + hba->is_sys_suspended = false; + up(&hba->host_sem); + return ret; +} +#endif + +static void ufshcd_wl_shutdown(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufs_hba *hba; + + hba = shost_priv(sdev->host); + + down(&hba->host_sem); + hba->shutting_down = true; + up(&hba->host_sem); + + /* Turn on everything while shutting down */ + ufshcd_rpm_get_sync(hba); + scsi_device_quiesce(sdev); + shost_for_each_device(sdev, hba->host) { + if (sdev == hba->ufs_device_wlun) + continue; + scsi_device_quiesce(sdev); + } + __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); +} + +/** + * ufshcd_suspend - helper function for suspend operations + * @hba: per adapter instance + * + * This function will put disable irqs, turn off clocks + * and set vreg and hba-vreg in lpm mode. + */ +static int ufshcd_suspend(struct ufs_hba *hba) +{ + int ret; + + if (!hba->is_powered) + return 0; + /* + * Disable the host irq as host controller as there won't be any + * host controller transaction expected till resume. + */ + ufshcd_disable_irq(hba); + ret = ufshcd_setup_clocks(hba, false); + if (ret) { + ufshcd_enable_irq(hba); + return ret; + } + if (ufshcd_is_clkgating_allowed(hba)) { + hba->clk_gating.state = CLKS_OFF; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + } + + ufshcd_vreg_set_lpm(hba); + /* Put the host controller in low power mode if possible */ + ufshcd_hba_vreg_set_lpm(hba); + return ret; +} + +#ifdef CONFIG_PM +/** + * ufshcd_resume - helper function for resume operations + * @hba: per adapter instance + * + * This function basically turns on the regulators, clocks and + * irqs of the hba. + * + * Returns 0 for success and non-zero for failure + */ +static int ufshcd_resume(struct ufs_hba *hba) +{ + int ret; + + if (!hba->is_powered) + return 0; + + ufshcd_hba_vreg_set_hpm(hba); + ret = ufshcd_vreg_set_hpm(hba); + if (ret) + goto out; + + /* Make sure clocks are enabled before accessing controller */ + ret = ufshcd_setup_clocks(hba, true); + if (ret) + goto disable_vreg; + + /* enable the host irq as host controller would be active soon */ + ufshcd_enable_irq(hba); + goto out; + +disable_vreg: + ufshcd_vreg_set_lpm(hba); +out: + if (ret) + ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret); + return ret; +} +#endif /* CONFIG_PM */ + +#ifdef CONFIG_PM_SLEEP +/** + * ufshcd_system_suspend - system suspend callback + * @dev: Device associated with the UFS controller. + * + * Executed before putting the system into a sleep state in which the contents + * of main memory are preserved. + * + * Returns 0 for success and non-zero for failure + */ +int ufshcd_system_suspend(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret = 0; + ktime_t start = ktime_get(); + + if (pm_runtime_suspended(hba->dev)) + goto out; + + ret = ufshcd_suspend(hba); +out: + trace_ufshcd_system_suspend(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + return ret; +} +EXPORT_SYMBOL(ufshcd_system_suspend); + +/** + * ufshcd_system_resume - system resume callback + * @dev: Device associated with the UFS controller. + * + * Executed after waking the system up from a sleep state in which the contents + * of main memory were preserved. + * + * Returns 0 for success and non-zero for failure + */ +int ufshcd_system_resume(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + ktime_t start = ktime_get(); + int ret = 0; + + if (pm_runtime_suspended(hba->dev)) + goto out; + + ret = ufshcd_resume(hba); + +out: + trace_ufshcd_system_resume(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + + return ret; +} +EXPORT_SYMBOL(ufshcd_system_resume); +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +/** + * ufshcd_runtime_suspend - runtime suspend callback + * @dev: Device associated with the UFS controller. + * + * Check the description of ufshcd_suspend() function for more details. + * + * Returns 0 for success and non-zero for failure + */ +int ufshcd_runtime_suspend(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + ktime_t start = ktime_get(); + + ret = ufshcd_suspend(hba); + + trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + return ret; +} +EXPORT_SYMBOL(ufshcd_runtime_suspend); + +/** + * ufshcd_runtime_resume - runtime resume routine + * @dev: Device associated with the UFS controller. + * + * This function basically brings controller + * to active state. Following operations are done in this function: + * + * 1. Turn on all the controller related clocks + * 2. Turn ON VCC rail + */ +int ufshcd_runtime_resume(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + ktime_t start = ktime_get(); + + ret = ufshcd_resume(hba); + + trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + return ret; +} +EXPORT_SYMBOL(ufshcd_runtime_resume); +#endif /* CONFIG_PM */ + +/** + * ufshcd_shutdown - shutdown routine + * @hba: per adapter instance + * + * This function would turn off both UFS device and UFS hba + * regulators. It would also disable clocks. + * + * Returns 0 always to allow force shutdown even in case of errors. + */ +int ufshcd_shutdown(struct ufs_hba *hba) +{ + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) + ufshcd_suspend(hba); + + hba->is_powered = false; + /* allow force shutdown even in case of errors */ + return 0; +} +EXPORT_SYMBOL(ufshcd_shutdown); + +/** + * ufshcd_remove - de-allocate SCSI host and host memory space + * data structure memory + * @hba: per adapter instance + */ +void ufshcd_remove(struct ufs_hba *hba) +{ + if (hba->ufs_device_wlun) + ufshcd_rpm_get_sync(hba); + ufs_hwmon_remove(hba); + ufs_bsg_remove(hba); + ufshpb_remove(hba); + ufs_sysfs_remove_nodes(hba->dev); + blk_mq_destroy_queue(hba->tmf_queue); + blk_mq_free_tag_set(&hba->tmf_tag_set); + scsi_remove_host(hba->host); + /* disable interrupts */ + ufshcd_disable_intr(hba, hba->intr_mask); + ufshcd_hba_stop(hba); + ufshcd_hba_exit(hba); +} +EXPORT_SYMBOL_GPL(ufshcd_remove); + +/** + * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA) + * @hba: pointer to Host Bus Adapter (HBA) + */ +void ufshcd_dealloc_host(struct ufs_hba *hba) +{ + scsi_host_put(hba->host); +} +EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); + +/** + * ufshcd_set_dma_mask - Set dma mask based on the controller + * addressing capability + * @hba: per adapter instance + * + * Returns 0 for success, non-zero for failure + */ +static int ufshcd_set_dma_mask(struct ufs_hba *hba) +{ + if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { + if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) + return 0; + } + return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); +} + +/** + * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) + * @dev: pointer to device handle + * @hba_handle: driver private handle + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) +{ + struct Scsi_Host *host; + struct ufs_hba *hba; + int err = 0; + + if (!dev) { + dev_err(dev, + "Invalid memory reference for dev is NULL\n"); + err = -ENODEV; + goto out_error; + } + + host = scsi_host_alloc(&ufshcd_driver_template, + sizeof(struct ufs_hba)); + if (!host) { + dev_err(dev, "scsi_host_alloc failed\n"); + err = -ENOMEM; + goto out_error; + } + host->nr_maps = HCTX_TYPE_POLL + 1; + hba = shost_priv(host); + hba->host = host; + hba->dev = dev; + hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; + hba->nop_out_timeout = NOP_OUT_TIMEOUT; + INIT_LIST_HEAD(&hba->clk_list_head); + spin_lock_init(&hba->outstanding_lock); + + *hba_handle = hba; + +out_error: + return err; +} +EXPORT_SYMBOL(ufshcd_alloc_host); + +/* This function exists because blk_mq_alloc_tag_set() requires this. */ +static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *qd) +{ + WARN_ON_ONCE(true); + return BLK_STS_NOTSUPP; +} + +static const struct blk_mq_ops ufshcd_tmf_ops = { + .queue_rq = ufshcd_queue_tmf, +}; + +/** + * ufshcd_init - Driver initialization routine + * @hba: per-adapter instance + * @mmio_base: base register address + * @irq: Interrupt line of device + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) +{ + int err; + struct Scsi_Host *host = hba->host; + struct device *dev = hba->dev; + char eh_wq_name[sizeof("ufs_eh_wq_00")]; + + /* + * dev_set_drvdata() must be called before any callbacks are registered + * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon, + * sysfs). + */ + dev_set_drvdata(dev, hba); + + if (!mmio_base) { + dev_err(hba->dev, + "Invalid memory reference for mmio_base is NULL\n"); + err = -ENODEV; + goto out_error; + } + + hba->mmio_base = mmio_base; + hba->irq = irq; + hba->vps = &ufs_hba_vps; + + err = ufshcd_hba_init(hba); + if (err) + goto out_error; + + /* Read capabilities registers */ + err = ufshcd_hba_capabilities(hba); + if (err) + goto out_disable; + + /* Get UFS version supported by the controller */ + hba->ufs_version = ufshcd_get_ufs_version(hba); + + /* Get Interrupt bit mask per version */ + hba->intr_mask = ufshcd_get_intr_mask(hba); + + err = ufshcd_set_dma_mask(hba); + if (err) { + dev_err(hba->dev, "set dma mask failed\n"); + goto out_disable; + } + + /* Allocate memory for host memory space */ + err = ufshcd_memory_alloc(hba); + if (err) { + dev_err(hba->dev, "Memory allocation failed\n"); + goto out_disable; + } + + /* Configure LRB */ + ufshcd_host_memory_configure(hba); + + host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; + host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED; + host->max_id = UFSHCD_MAX_ID; + host->max_lun = UFS_MAX_LUNS; + host->max_channel = UFSHCD_MAX_CHANNEL; + host->unique_id = host->host_no; + host->max_cmd_len = UFS_CDB_SIZE; + + hba->max_pwr_info.is_valid = false; + + /* Initialize work queues */ + snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d", + hba->host->host_no); + hba->eh_wq = create_singlethread_workqueue(eh_wq_name); + if (!hba->eh_wq) { + dev_err(hba->dev, "%s: failed to create eh workqueue\n", + __func__); + err = -ENOMEM; + goto out_disable; + } + INIT_WORK(&hba->eh_work, ufshcd_err_handler); + INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); + + sema_init(&hba->host_sem, 1); + + /* Initialize UIC command mutex */ + mutex_init(&hba->uic_cmd_mutex); + + /* Initialize mutex for device management commands */ + mutex_init(&hba->dev_cmd.lock); + + /* Initialize mutex for exception event control */ + mutex_init(&hba->ee_ctrl_mutex); + + init_rwsem(&hba->clk_scaling_lock); + + ufshcd_init_clk_gating(hba); + + ufshcd_init_clk_scaling(hba); + + /* + * In order to avoid any spurious interrupt immediately after + * registering UFS controller interrupt handler, clear any pending UFS + * interrupt status and disable all the UFS interrupts. + */ + ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), + REG_INTERRUPT_STATUS); + ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); + /* + * Make sure that UFS interrupts are disabled and any pending interrupt + * status is cleared before registering UFS interrupt handler. + */ + mb(); + + /* IRQ registration */ + err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); + if (err) { + dev_err(hba->dev, "request irq failed\n"); + goto out_disable; + } else { + hba->is_irq_enabled = true; + } + + err = scsi_add_host(host, hba->dev); + if (err) { + dev_err(hba->dev, "scsi_add_host failed\n"); + goto out_disable; + } + + hba->tmf_tag_set = (struct blk_mq_tag_set) { + .nr_hw_queues = 1, + .queue_depth = hba->nutmrs, + .ops = &ufshcd_tmf_ops, + .flags = BLK_MQ_F_NO_SCHED, + }; + err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); + if (err < 0) + goto out_remove_scsi_host; + hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); + if (IS_ERR(hba->tmf_queue)) { + err = PTR_ERR(hba->tmf_queue); + goto free_tmf_tag_set; + } + hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, + sizeof(*hba->tmf_rqs), GFP_KERNEL); + if (!hba->tmf_rqs) { + err = -ENOMEM; + goto free_tmf_queue; + } + + /* Reset the attached device */ + ufshcd_device_reset(hba); + + ufshcd_init_crypto(hba); + + /* Host controller enable */ + err = ufshcd_hba_enable(hba); + if (err) { + dev_err(hba->dev, "Host controller enable failed\n"); + ufshcd_print_evt_hist(hba); + ufshcd_print_host_state(hba); + goto free_tmf_queue; + } + + /* + * Set the default power management level for runtime and system PM. + * Default power saving mode is to keep UFS link in Hibern8 state + * and UFS device in sleep state. + */ + hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + UFS_SLEEP_PWR_MODE, + UIC_LINK_HIBERN8_STATE); + hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + UFS_SLEEP_PWR_MODE, + UIC_LINK_HIBERN8_STATE); + + INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, + ufshcd_rpm_dev_flush_recheck_work); + + /* Set the default auto-hiberate idle timer value to 150 ms */ + if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) { + hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) | + FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3); + } + + /* Hold auto suspend until async scan completes */ + pm_runtime_get_sync(dev); + atomic_set(&hba->scsi_block_reqs_cnt, 0); + /* + * We are assuming that device wasn't put in sleep/power-down + * state exclusively during the boot stage before kernel. + * This assumption helps avoid doing link startup twice during + * ufshcd_probe_hba(). + */ + ufshcd_set_ufs_dev_active(hba); + + async_schedule(ufshcd_async_scan, hba); + ufs_sysfs_add_nodes(hba->dev); + + device_enable_async_suspend(dev); + return 0; + +free_tmf_queue: + blk_mq_destroy_queue(hba->tmf_queue); +free_tmf_tag_set: + blk_mq_free_tag_set(&hba->tmf_tag_set); +out_remove_scsi_host: + scsi_remove_host(hba->host); +out_disable: + hba->is_irq_enabled = false; + ufshcd_hba_exit(hba); +out_error: + return err; +} +EXPORT_SYMBOL_GPL(ufshcd_init); + +void ufshcd_resume_complete(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (hba->complete_put) { + ufshcd_rpm_put(hba); + hba->complete_put = false; + } +} +EXPORT_SYMBOL_GPL(ufshcd_resume_complete); + +static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba) +{ + struct device *dev = &hba->ufs_device_wlun->sdev_gendev; + enum ufs_dev_pwr_mode dev_pwr_mode; + enum uic_link_state link_state; + unsigned long flags; + bool res; + + spin_lock_irqsave(&dev->power.lock, flags); + dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl); + link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl); + res = pm_runtime_suspended(dev) && + hba->curr_dev_pwr_mode == dev_pwr_mode && + hba->uic_link_state == link_state && + !hba->dev_info.b_rpm_dev_flush_capable; + spin_unlock_irqrestore(&dev->power.lock, flags); + + return res; +} + +int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + + /* + * SCSI assumes that runtime-pm and system-pm for scsi drivers + * are same. And it doesn't wake up the device for system-suspend + * if it's runtime suspended. But ufs doesn't follow that. + * Refer ufshcd_resume_complete() + */ + if (hba->ufs_device_wlun) { + /* Prevent runtime suspend */ + ufshcd_rpm_get_noresume(hba); + /* + * Check if already runtime suspended in same state as system + * suspend would be. + */ + if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) { + /* RPM state is not ok for SPM, so runtime resume */ + ret = ufshcd_rpm_resume(hba); + if (ret < 0 && ret != -EACCES) { + ufshcd_rpm_put(hba); + return ret; + } + } + hba->complete_put = true; + } + return 0; +} +EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare); + +int ufshcd_suspend_prepare(struct device *dev) +{ + return __ufshcd_suspend_prepare(dev, true); +} +EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare); + +#ifdef CONFIG_PM_SLEEP +static int ufshcd_wl_poweroff(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufs_hba *hba = shost_priv(sdev->host); + + __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); + return 0; +} +#endif + +static int ufshcd_wl_probe(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + if (!is_device_wlun(sdev)) + return -ENODEV; + + blk_pm_runtime_init(sdev->request_queue, dev); + pm_runtime_set_autosuspend_delay(dev, 0); + pm_runtime_allow(dev); + + return 0; +} + +static int ufshcd_wl_remove(struct device *dev) +{ + pm_runtime_forbid(dev); + return 0; +} + +static const struct dev_pm_ops ufshcd_wl_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = ufshcd_wl_suspend, + .resume = ufshcd_wl_resume, + .freeze = ufshcd_wl_suspend, + .thaw = ufshcd_wl_resume, + .poweroff = ufshcd_wl_poweroff, + .restore = ufshcd_wl_resume, +#endif + SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL) +}; + +/* + * ufs_dev_wlun_template - describes ufs device wlun + * ufs-device wlun - used to send pm commands + * All luns are consumers of ufs-device wlun. + * + * Currently, no sd driver is present for wluns. + * Hence the no specific pm operations are performed. + * With ufs design, SSU should be sent to ufs-device wlun. + * Hence register a scsi driver for ufs wluns only. + */ +static struct scsi_driver ufs_dev_wlun_template = { + .gendrv = { + .name = "ufs_device_wlun", + .owner = THIS_MODULE, + .probe = ufshcd_wl_probe, + .remove = ufshcd_wl_remove, + .pm = &ufshcd_wl_pm_ops, + .shutdown = ufshcd_wl_shutdown, + }, +}; + +static int __init ufshcd_core_init(void) +{ + int ret; + + /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */ + static_assert(sizeof(struct utp_transfer_cmd_desc) == + 2 * ALIGNED_UPIU_SIZE + + SG_ALL * sizeof(struct ufshcd_sg_entry)); + + ufs_debugfs_init(); + + ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv); + if (ret) + ufs_debugfs_exit(); + return ret; +} + +static void __exit ufshcd_core_exit(void) +{ + ufs_debugfs_exit(); + scsi_unregister_driver(&ufs_dev_wlun_template.gendrv); +} + +module_init(ufshcd_core_init); +module_exit(ufshcd_core_exit); + +MODULE_AUTHOR("Santosh Yaragnavi "); +MODULE_AUTHOR("Vinayak Holikatti "); +MODULE_DESCRIPTION("Generic UFS host controller driver Core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c new file mode 100644 index 0000000000..a1a7a1175a --- /dev/null +++ b/drivers/ufs/core/ufshpb.c @@ -0,0 +1,2666 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Universal Flash Storage Host Performance Booster + * + * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd. + * + * Authors: + * Yongmyung Lee + * Jinyoung Choi + */ + +#include +#include +#include +#include +#include + +#include "ufshcd-priv.h" +#include "ufshpb.h" +#include "../../scsi/sd.h" + +#define ACTIVATION_THRESHOLD 8 /* 8 IOs */ +#define READ_TO_MS 1000 +#define READ_TO_EXPIRIES 100 +#define POLLING_INTERVAL_MS 200 +#define THROTTLE_MAP_REQ_DEFAULT 1 + +/* memory management */ +static struct kmem_cache *ufshpb_mctx_cache; +static mempool_t *ufshpb_mctx_pool; +static mempool_t *ufshpb_page_pool; +/* A cache size of 2MB can cache ppn in the 1GB range. */ +static unsigned int ufshpb_host_map_kbytes = 2048; +static int tot_active_srgn_pages; + +static struct workqueue_struct *ufshpb_wq; + +static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx); + +bool ufshpb_is_allowed(struct ufs_hba *hba) +{ + return !(hba->ufshpb_dev.hpb_disabled); +} + +/* HPB version 1.0 is called as legacy version. */ +bool ufshpb_is_legacy(struct ufs_hba *hba) +{ + return hba->ufshpb_dev.is_legacy; +} + +static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev) +{ + return sdev->hostdata; +} + +static int ufshpb_get_state(struct ufshpb_lu *hpb) +{ + return atomic_read(&hpb->hpb_state); +} + +static void ufshpb_set_state(struct ufshpb_lu *hpb, int state) +{ + atomic_set(&hpb->hpb_state, state); +} + +static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + return rgn->rgn_state != HPB_RGN_INACTIVE && + srgn->srgn_state == HPB_SRGN_VALID; +} + +static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd) +{ + return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ; +} + +static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd) +{ + return op_is_write(req_op(scsi_cmd_to_rq(cmd))) || + op_is_discard(req_op(scsi_cmd_to_rq(cmd))); +} + +static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len) +{ + return transfer_len <= hpb->pre_req_max_tr_len; +} + +static bool ufshpb_is_general_lun(int lun) +{ + return lun < UFS_UPIU_MAX_UNIT_NUM_ID; +} + +static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx) +{ + return hpb->lu_pinned_end != PINNED_NOT_SET && + rgn_idx >= hpb->lu_pinned_start && rgn_idx <= hpb->lu_pinned_end; +} + +static void ufshpb_kick_map_work(struct ufshpb_lu *hpb) +{ + bool ret = false; + unsigned long flags; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) + return; + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn)) + ret = true; + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + if (ret) + queue_work(ufshpb_wq, &hpb->map_work); +} + +static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, + struct utp_hpb_rsp *rsp_field) +{ + /* Check HPB_UPDATE_ALERT */ + if (!(lrbp->ucd_rsp_ptr->header.dword_2 & + UPIU_HEADER_DWORD(0, 2, 0, 0))) + return false; + + if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN || + rsp_field->desc_type != DEV_DES_TYPE || + rsp_field->additional_len != DEV_ADDITIONAL_LEN || + rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM || + rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM || + rsp_field->hpb_op == HPB_RSP_NONE || + (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE && + !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt)) + return false; + + if (!ufshpb_is_general_lun(rsp_field->lun)) { + dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n", + lrbp->lun); + return false; + } + + return true; +} + +static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx, + int srgn_offset, int cnt, bool set_dirty) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn, *prev_srgn = NULL; + int set_bit_len; + int bitmap_len; + unsigned long flags; + +next_srgn: + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + if (likely(!srgn->is_last)) + bitmap_len = hpb->entries_per_srgn; + else + bitmap_len = hpb->last_srgn_entries; + + if ((srgn_offset + cnt) > bitmap_len) + set_bit_len = bitmap_len - srgn_offset; + else + set_bit_len = cnt; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (rgn->rgn_state != HPB_RGN_INACTIVE) { + if (set_dirty) { + if (srgn->srgn_state == HPB_SRGN_VALID) + bitmap_set(srgn->mctx->ppn_dirty, srgn_offset, + set_bit_len); + } else if (hpb->is_hcm) { + /* rewind the read timer for lru regions */ + rgn->read_timeout = ktime_add_ms(ktime_get(), + rgn->hpb->params.read_timeout_ms); + rgn->read_timeout_expiries = + rgn->hpb->params.read_timeout_expiries; + } + } + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + if (hpb->is_hcm && prev_srgn != srgn) { + bool activate = false; + + spin_lock(&rgn->rgn_lock); + if (set_dirty) { + rgn->reads -= srgn->reads; + srgn->reads = 0; + set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); + } else { + srgn->reads++; + rgn->reads++; + if (srgn->reads == hpb->params.activation_thld) + activate = true; + } + spin_unlock(&rgn->rgn_lock); + + if (activate || + test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) { + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_update_active_info(hpb, rgn_idx, srgn_idx); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "activate region %d-%d\n", rgn_idx, srgn_idx); + } + + prev_srgn = srgn; + } + + srgn_offset = 0; + if (++srgn_idx == hpb->srgns_per_rgn) { + srgn_idx = 0; + rgn_idx++; + } + + cnt -= set_bit_len; + if (cnt > 0) + goto next_srgn; +} + +static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx, int srgn_offset, int cnt) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int bitmap_len; + int bit_len; + +next_srgn: + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + if (likely(!srgn->is_last)) + bitmap_len = hpb->entries_per_srgn; + else + bitmap_len = hpb->last_srgn_entries; + + if (!ufshpb_is_valid_srgn(rgn, srgn)) + return true; + + /* + * If the region state is active, mctx must be allocated. + * In this case, check whether the region is evicted or + * mctx allocation fail. + */ + if (unlikely(!srgn->mctx)) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "no mctx in region %d subregion %d.\n", + srgn->rgn_idx, srgn->srgn_idx); + return true; + } + + if ((srgn_offset + cnt) > bitmap_len) + bit_len = bitmap_len - srgn_offset; + else + bit_len = cnt; + + if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset, + srgn_offset) < bit_len + srgn_offset) + return true; + + srgn_offset = 0; + if (++srgn_idx == hpb->srgns_per_rgn) { + srgn_idx = 0; + rgn_idx++; + } + + cnt -= bit_len; + if (cnt > 0) + goto next_srgn; + + return false; +} + +static inline bool is_rgn_dirty(struct ufshpb_region *rgn) +{ + return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); +} + +static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb, + struct ufshpb_map_ctx *mctx, int pos, + int len, __be64 *ppn_buf) +{ + struct page *page; + int index, offset; + int copied; + + index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE); + offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE); + + if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE)) + copied = len; + else + copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset; + + page = mctx->m_page[index]; + if (unlikely(!page)) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "error. cannot find page in mctx\n"); + return -ENOMEM; + } + + memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE), + copied * HPB_ENTRY_SIZE); + + return copied; +} + +static void +ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx, + int *srgn_idx, int *offset) +{ + int rgn_offset; + + *rgn_idx = lpn >> hpb->entries_per_rgn_shift; + rgn_offset = lpn & hpb->entries_per_rgn_mask; + *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift; + *offset = rgn_offset & hpb->entries_per_srgn_mask; +} + +static void +ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, + __be64 ppn, u8 transfer_len) +{ + unsigned char *cdb = lrbp->cmd->cmnd; + __be64 ppn_tmp = ppn; + cdb[0] = UFSHPB_READ; + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ) + ppn_tmp = (__force __be64)swab64((__force u64)ppn); + + /* ppn value is stored as big-endian in the host memory */ + memcpy(&cdb[6], &ppn_tmp, sizeof(__be64)); + cdb[14] = transfer_len; + cdb[15] = 0; + + lrbp->cmd->cmd_len = UFS_CDB_SIZE; +} + +/* + * This function will set up HPB read command using host-side L2P map data. + */ +int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufshpb_lu *hpb; + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + struct scsi_cmnd *cmd = lrbp->cmd; + u32 lpn; + __be64 ppn; + unsigned long flags; + int transfer_len, rgn_idx, srgn_idx, srgn_offset; + int err = 0; + + hpb = ufshpb_get_hpb_data(cmd->device); + if (!hpb) + return -ENODEV; + + if (ufshpb_get_state(hpb) == HPB_INIT) + return -ENODEV; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT", __func__); + return -ENODEV; + } + + if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) || + (!ufshpb_is_write_or_discard(cmd) && + !ufshpb_is_read_cmd(cmd))) + return 0; + + transfer_len = sectors_to_logical(cmd->device, + blk_rq_sectors(scsi_cmd_to_rq(cmd))); + if (unlikely(!transfer_len)) + return 0; + + lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd))); + ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset); + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + /* If command type is WRITE or DISCARD, set bitmap as drity */ + if (ufshpb_is_write_or_discard(cmd)) { + ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset, + transfer_len, true); + return 0; + } + + if (!ufshpb_is_supported_chunk(hpb, transfer_len)) + return 0; + + if (hpb->is_hcm) { + /* + * in host control mode, reads are the main source for + * activation trials. + */ + ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset, + transfer_len, false); + + /* keep those counters normalized */ + if (rgn->reads > hpb->entries_per_srgn) + schedule_work(&hpb->ufshpb_normalization_work); + } + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset, + transfer_len)) { + hpb->stats.miss_cnt++; + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return 0; + } + + err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + if (unlikely(err < 0)) { + /* + * In this case, the region state is active, + * but the ppn table is not allocated. + * Make sure that ppn table must be allocated on + * active state. + */ + dev_err(hba->dev, "get ppn failed. err %d\n", err); + return err; + } + + ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len); + + hpb->stats.hit_cnt++; + return 0; +} + +static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, int rgn_idx, + enum req_op op, bool atomic) +{ + struct ufshpb_req *rq; + struct request *req; + int retries = HPB_MAP_REQ_RETRIES; + + rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL); + if (!rq) + return NULL; + +retry: + req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, op, + BLK_MQ_REQ_NOWAIT); + + if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) { + usleep_range(3000, 3100); + goto retry; + } + + if (IS_ERR(req)) + goto free_rq; + + rq->hpb = hpb; + rq->req = req; + rq->rb.rgn_idx = rgn_idx; + + return rq; + +free_rq: + kmem_cache_free(hpb->map_req_cache, rq); + return NULL; +} + +static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq) +{ + blk_mq_free_request(rq->req); + kmem_cache_free(hpb->map_req_cache, rq); +} + +static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_req *map_req; + struct bio *bio; + unsigned long flags; + + if (hpb->is_hcm && + hpb->num_inflight_map_req >= hpb->params.inflight_map_req) { + dev_info(&hpb->sdev_ufs_lu->sdev_dev, + "map_req throttle. inflight %d throttle %d", + hpb->num_inflight_map_req, + hpb->params.inflight_map_req); + return NULL; + } + + map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false); + if (!map_req) + return NULL; + + bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL); + if (!bio) { + ufshpb_put_req(hpb, map_req); + return NULL; + } + + map_req->bio = bio; + + map_req->rb.srgn_idx = srgn->srgn_idx; + map_req->rb.mctx = srgn->mctx; + + spin_lock_irqsave(&hpb->param_lock, flags); + hpb->num_inflight_map_req++; + spin_unlock_irqrestore(&hpb->param_lock, flags); + + return map_req; +} + +static void ufshpb_put_map_req(struct ufshpb_lu *hpb, + struct ufshpb_req *map_req) +{ + unsigned long flags; + + bio_put(map_req->bio); + ufshpb_put_req(hpb, map_req); + + spin_lock_irqsave(&hpb->param_lock, flags); + hpb->num_inflight_map_req--; + spin_unlock_irqrestore(&hpb->param_lock, flags); +} + +static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_region *rgn; + u32 num_entries = hpb->entries_per_srgn; + + if (!srgn->mctx) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "no mctx in region %d subregion %d.\n", + srgn->rgn_idx, srgn->srgn_idx); + return -1; + } + + if (unlikely(srgn->is_last)) + num_entries = hpb->last_srgn_entries; + + bitmap_zero(srgn->mctx->ppn_dirty, num_entries); + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); + + return 0; +} + +static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + list_del_init(&rgn->list_inact_rgn); + + if (list_empty(&srgn->list_act_srgn)) + list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); + + hpb->stats.rcmd_active_cnt++; +} + +static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int srgn_idx; + + rgn = hpb->rgn_tbl + rgn_idx; + + for_each_sub_region(rgn, srgn_idx, srgn) + list_del_init(&srgn->list_act_srgn); + + if (list_empty(&rgn->list_inact_rgn)) + list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn); + + hpb->stats.rcmd_inactive_cnt++; +} + +static void ufshpb_activate_subregion(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_region *rgn; + + /* + * If there is no mctx in subregion + * after I/O progress for HPB_READ_BUFFER, the region to which the + * subregion belongs was evicted. + * Make sure the region must not evict in I/O progress + */ + if (!srgn->mctx) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "no mctx in region %d subregion %d.\n", + srgn->rgn_idx, srgn->srgn_idx); + srgn->srgn_state = HPB_SRGN_INVALID; + return; + } + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + + if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "region %d subregion %d evicted\n", + srgn->rgn_idx, srgn->srgn_idx); + srgn->srgn_state = HPB_SRGN_INVALID; + return; + } + srgn->srgn_state = HPB_SRGN_VALID; +} + +static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error) +{ + struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data; + + ufshpb_put_req(umap_req->hpb, umap_req); +} + +static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error) +{ + struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data; + struct ufshpb_lu *hpb = map_req->hpb; + struct ufshpb_subregion *srgn; + unsigned long flags; + + srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl + + map_req->rb.srgn_idx; + + ufshpb_clear_dirty_bitmap(hpb, srgn); + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + ufshpb_activate_subregion(hpb, srgn); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + ufshpb_put_map_req(map_req->hpb, map_req); +} + +static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn) +{ + cdb[0] = UFSHPB_WRITE_BUFFER; + cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID : + UFSHPB_WRITE_BUFFER_INACT_ALL_ID; + if (rgn) + put_unaligned_be16(rgn->rgn_idx, &cdb[2]); + cdb[9] = 0x00; +} + +static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx, + int srgn_idx, int srgn_mem_size) +{ + cdb[0] = UFSHPB_READ_BUFFER; + cdb[1] = UFSHPB_READ_BUFFER_ID; + + put_unaligned_be16(rgn_idx, &cdb[2]); + put_unaligned_be16(srgn_idx, &cdb[4]); + put_unaligned_be24(srgn_mem_size, &cdb[6]); + + cdb[9] = 0x00; +} + +static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb, + struct ufshpb_req *umap_req, + struct ufshpb_region *rgn) +{ + struct request *req = umap_req->req; + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); + + req->timeout = 0; + req->end_io_data = umap_req; + req->end_io = ufshpb_umap_req_compl_fn; + + ufshpb_set_unmap_cmd(scmd->cmnd, rgn); + scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH; + + blk_execute_rq_nowait(req, true); + + hpb->stats.umap_req_cnt++; +} + +static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, + struct ufshpb_req *map_req, bool last) +{ + struct request_queue *q; + struct request *req; + struct scsi_cmnd *scmd; + int mem_size = hpb->srgn_mem_size; + int ret = 0; + int i; + + q = hpb->sdev_ufs_lu->request_queue; + for (i = 0; i < hpb->pages_per_srgn; i++) { + ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i], + PAGE_SIZE, 0); + if (ret != PAGE_SIZE) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "bio_add_pc_page fail %d - %d\n", + map_req->rb.rgn_idx, map_req->rb.srgn_idx); + return ret; + } + } + + req = map_req->req; + + blk_rq_append_bio(req, map_req->bio); + + req->end_io_data = map_req; + req->end_io = ufshpb_map_req_compl_fn; + + if (unlikely(last)) + mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE; + + scmd = blk_mq_rq_to_pdu(req); + ufshpb_set_read_buf_cmd(scmd->cmnd, map_req->rb.rgn_idx, + map_req->rb.srgn_idx, mem_size); + scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH; + + blk_execute_rq_nowait(req, true); + + hpb->stats.map_req_cnt++; + return 0; +} + +static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb, + bool last) +{ + struct ufshpb_map_ctx *mctx; + u32 num_entries = hpb->entries_per_srgn; + int i, j; + + mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL); + if (!mctx) + return NULL; + + mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL); + if (!mctx->m_page) + goto release_mctx; + + if (unlikely(last)) + num_entries = hpb->last_srgn_entries; + + mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL); + if (!mctx->ppn_dirty) + goto release_m_page; + + for (i = 0; i < hpb->pages_per_srgn; i++) { + mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL); + if (!mctx->m_page[i]) { + for (j = 0; j < i; j++) + mempool_free(mctx->m_page[j], ufshpb_page_pool); + goto release_ppn_dirty; + } + clear_page(page_address(mctx->m_page[i])); + } + + return mctx; + +release_ppn_dirty: + bitmap_free(mctx->ppn_dirty); +release_m_page: + kmem_cache_free(hpb->m_page_cache, mctx->m_page); +release_mctx: + mempool_free(mctx, ufshpb_mctx_pool); + return NULL; +} + +static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb, + struct ufshpb_map_ctx *mctx) +{ + int i; + + for (i = 0; i < hpb->pages_per_srgn; i++) + mempool_free(mctx->m_page[i], ufshpb_page_pool); + + bitmap_free(mctx->ppn_dirty); + kmem_cache_free(hpb->m_page_cache, mctx->m_page); + mempool_free(mctx, ufshpb_mctx_pool); +} + +static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct ufshpb_subregion *srgn; + int srgn_idx; + + for_each_sub_region(rgn, srgn_idx, srgn) + if (srgn->srgn_state == HPB_SRGN_ISSUED) + return -EPERM; + + return 0; +} + +static void ufshpb_read_to_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, + ufshpb_read_to_work.work); + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn, *next_rgn; + unsigned long flags; + unsigned int poll; + LIST_HEAD(expired_list); + + if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits)) + return; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + + list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn, + list_lru_rgn) { + bool timedout = ktime_after(ktime_get(), rgn->read_timeout); + + if (timedout) { + rgn->read_timeout_expiries--; + if (is_rgn_dirty(rgn) || + rgn->read_timeout_expiries == 0) + list_add(&rgn->list_expired_rgn, &expired_list); + else + rgn->read_timeout = ktime_add_ms(ktime_get(), + hpb->params.read_timeout_ms); + } + } + + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + list_for_each_entry_safe(rgn, next_rgn, &expired_list, + list_expired_rgn) { + list_del_init(&rgn->list_expired_rgn); + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_update_inactive_info(hpb, rgn->rgn_idx); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + } + + ufshpb_kick_map_work(hpb); + + clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits); + + poll = hpb->params.timeout_polling_interval_ms; + schedule_delayed_work(&hpb->ufshpb_read_to_work, + msecs_to_jiffies(poll)); +} + +static void ufshpb_add_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + rgn->rgn_state = HPB_RGN_ACTIVE; + list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); + atomic_inc(&lru_info->active_cnt); + if (rgn->hpb->is_hcm) { + rgn->read_timeout = + ktime_add_ms(ktime_get(), + rgn->hpb->params.read_timeout_ms); + rgn->read_timeout_expiries = + rgn->hpb->params.read_timeout_expiries; + } +} + +static void ufshpb_hit_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); +} + +static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb) +{ + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn, *victim_rgn = NULL; + + list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) { + if (ufshpb_check_srgns_issue_state(hpb, rgn)) + continue; + + /* + * in host control mode, verify that the exiting region + * has fewer reads + */ + if (hpb->is_hcm && + rgn->reads > hpb->params.eviction_thld_exit) + continue; + + victim_rgn = rgn; + break; + } + + if (!victim_rgn) + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "%s: no region allocated\n", + __func__); + + return victim_rgn; +} + +static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + list_del_init(&rgn->list_lru_rgn); + rgn->rgn_state = HPB_RGN_INACTIVE; + atomic_dec(&lru_info->active_cnt); +} + +static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + if (srgn->srgn_state != HPB_SRGN_UNUSED) { + ufshpb_put_map_ctx(hpb, srgn->mctx); + srgn->srgn_state = HPB_SRGN_UNUSED; + srgn->mctx = NULL; + } +} + +static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + bool atomic) +{ + struct ufshpb_req *umap_req; + int rgn_idx = rgn ? rgn->rgn_idx : 0; + + umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic); + if (!umap_req) + return -ENOMEM; + + ufshpb_execute_umap_req(hpb, umap_req, rgn); + + return 0; +} + +static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + return ufshpb_issue_umap_req(hpb, rgn, true); +} + +static void __ufshpb_evict_region(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct victim_select_info *lru_info; + struct ufshpb_subregion *srgn; + int srgn_idx; + + lru_info = &hpb->lru_info; + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx); + + ufshpb_cleanup_lru_info(lru_info, rgn); + + for_each_sub_region(rgn, srgn_idx, srgn) + ufshpb_purge_active_subregion(hpb, srgn); +} + +static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (rgn->rgn_state == HPB_RGN_PINNED) { + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "pinned region cannot drop-out. region %d\n", + rgn->rgn_idx); + goto out; + } + + if (!list_empty(&rgn->list_lru_rgn)) { + if (ufshpb_check_srgns_issue_state(hpb, rgn)) { + ret = -EBUSY; + goto out; + } + + if (hpb->is_hcm) { + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + ret = ufshpb_issue_umap_single_req(hpb, rgn); + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (ret) + goto out; + } + + __ufshpb_evict_region(hpb, rgn); + } +out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return ret; +} + +static int ufshpb_issue_map_req(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_req *map_req; + unsigned long flags; + int ret; + int err = -EAGAIN; + bool alloc_required = false; + enum HPB_SRGN_STATE state = HPB_SRGN_INVALID; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT\n", __func__); + goto unlock_out; + } + + if ((rgn->rgn_state == HPB_RGN_INACTIVE) && + (srgn->srgn_state == HPB_SRGN_INVALID)) { + err = 0; + goto unlock_out; + } + + if (srgn->srgn_state == HPB_SRGN_UNUSED) + alloc_required = true; + + /* + * If the subregion is already ISSUED state, + * a specific event (e.g., GC or wear-leveling, etc.) occurs in + * the device and HPB response for map loading is received. + * In this case, after finishing the HPB_READ_BUFFER, + * the next HPB_READ_BUFFER is performed again to obtain the latest + * map data. + */ + if (srgn->srgn_state == HPB_SRGN_ISSUED) + goto unlock_out; + + srgn->srgn_state = HPB_SRGN_ISSUED; + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + if (alloc_required) { + srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last); + if (!srgn->mctx) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "get map_ctx failed. region %d - %d\n", + rgn->rgn_idx, srgn->srgn_idx); + state = HPB_SRGN_UNUSED; + goto change_srgn_state; + } + } + + map_req = ufshpb_get_map_req(hpb, srgn); + if (!map_req) + goto change_srgn_state; + + + ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last); + if (ret) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "%s: issue map_req failed: %d, region %d - %d\n", + __func__, ret, srgn->rgn_idx, srgn->srgn_idx); + goto free_map_req; + } + return 0; + +free_map_req: + ufshpb_put_map_req(hpb, map_req); +change_srgn_state: + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + srgn->srgn_state = state; +unlock_out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return err; +} + +static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) +{ + struct ufshpb_region *victim_rgn = NULL; + struct victim_select_info *lru_info = &hpb->lru_info; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + /* + * If region belongs to lru_list, just move the region + * to the front of lru list because the state of the region + * is already active-state. + */ + if (!list_empty(&rgn->list_lru_rgn)) { + ufshpb_hit_lru_info(lru_info, rgn); + goto out; + } + + if (rgn->rgn_state == HPB_RGN_INACTIVE) { + if (atomic_read(&lru_info->active_cnt) == + lru_info->max_lru_active_cnt) { + /* + * If the maximum number of active regions + * is exceeded, evict the least recently used region. + * This case may occur when the device responds + * to the eviction information late. + * It is okay to evict the least recently used region, + * because the device could detect this region + * by not issuing HPB_READ + * + * in host control mode, verify that the entering + * region has enough reads + */ + if (hpb->is_hcm && + rgn->reads < hpb->params.eviction_thld_enter) { + ret = -EACCES; + goto out; + } + + victim_rgn = ufshpb_victim_lru_info(hpb); + if (!victim_rgn) { + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "cannot get victim region %s\n", + hpb->is_hcm ? "" : "error"); + ret = -ENOMEM; + goto out; + } + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "LRU full (%d), choose victim %d\n", + atomic_read(&lru_info->active_cnt), + victim_rgn->rgn_idx); + + if (hpb->is_hcm) { + spin_unlock_irqrestore(&hpb->rgn_state_lock, + flags); + ret = ufshpb_issue_umap_single_req(hpb, + victim_rgn); + spin_lock_irqsave(&hpb->rgn_state_lock, + flags); + if (ret) + goto out; + } + + __ufshpb_evict_region(hpb, victim_rgn); + } + + /* + * When a region is added to lru_info list_head, + * it is guaranteed that the subregion has been + * assigned all mctx. If failed, try to receive mctx again + * without being added to lru_info list_head + */ + ufshpb_add_lru_info(lru_info, rgn); + } +out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return ret; +} +/** + *ufshpb_submit_region_inactive() - submit a region to be inactivated later + *@hpb: per-LU HPB instance + *@region_index: the index associated with the region that will be inactivated later + */ +static void ufshpb_submit_region_inactive(struct ufshpb_lu *hpb, int region_index) +{ + int subregion_index; + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + + /* + * Remove this region from active region list and add it to inactive list + */ + spin_lock(&hpb->rsp_list_lock); + ufshpb_update_inactive_info(hpb, region_index); + spin_unlock(&hpb->rsp_list_lock); + + rgn = hpb->rgn_tbl + region_index; + + /* + * Set subregion state to be HPB_SRGN_INVALID, there will no HPB read on this subregion + */ + spin_lock(&hpb->rgn_state_lock); + if (rgn->rgn_state != HPB_RGN_INACTIVE) { + for (subregion_index = 0; subregion_index < rgn->srgn_cnt; subregion_index++) { + srgn = rgn->srgn_tbl + subregion_index; + if (srgn->srgn_state == HPB_SRGN_VALID) + srgn->srgn_state = HPB_SRGN_INVALID; + } + } + spin_unlock(&hpb->rgn_state_lock); +} + +static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, + struct utp_hpb_rsp *rsp_field) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int i, rgn_i, srgn_i; + + BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE); + /* + * If the active region and the inactive region are the same, + * we will inactivate this region. + * The device could check this (region inactivated) and + * will response the proper active region information + */ + for (i = 0; i < rsp_field->active_rgn_cnt; i++) { + rgn_i = + be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn); + srgn_i = + be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn); + + rgn = hpb->rgn_tbl + rgn_i; + if (hpb->is_hcm && + (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) { + /* + * in host control mode, subregion activation + * recommendations are only allowed to active regions. + * Also, ignore recommendations for dirty regions - the + * host will make decisions concerning those by himself + */ + continue; + } + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "activate(%d) region %d - %d\n", i, rgn_i, srgn_i); + + spin_lock(&hpb->rsp_list_lock); + ufshpb_update_active_info(hpb, rgn_i, srgn_i); + spin_unlock(&hpb->rsp_list_lock); + + srgn = rgn->srgn_tbl + srgn_i; + + /* blocking HPB_READ */ + spin_lock(&hpb->rgn_state_lock); + if (srgn->srgn_state == HPB_SRGN_VALID) + srgn->srgn_state = HPB_SRGN_INVALID; + spin_unlock(&hpb->rgn_state_lock); + } + + if (hpb->is_hcm) { + /* + * in host control mode the device is not allowed to inactivate + * regions + */ + goto out; + } + + for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) { + rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]); + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "inactivate(%d) region %d\n", i, rgn_i); + ufshpb_submit_region_inactive(hpb, rgn_i); + } + +out: + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n", + rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt); + + if (ufshpb_get_state(hpb) == HPB_PRESENT) + queue_work(ufshpb_wq, &hpb->map_work); +} + +/* + * Set the flags of all active regions to RGN_FLAG_UPDATE to let host side reload L2P entries later + */ +static void ufshpb_set_regions_update(struct ufshpb_lu *hpb) +{ + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn; + unsigned long flags; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + + list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) + set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags); + + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); +} + +static void ufshpb_dev_reset_handler(struct ufs_hba *hba) +{ + struct scsi_device *sdev; + struct ufshpb_lu *hpb; + + __shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb) + continue; + + if (hpb->is_hcm) { + /* + * For the HPB host control mode, in case device powered up and lost HPB + * information, we will set the region flag to be RGN_FLAG_UPDATE, it will + * let host reload its L2P entries(reactivate region in the UFS device). + */ + ufshpb_set_regions_update(hpb); + } else { + /* + * For the HPB device control mode, if host side receives 02h:HPB Operation + * in UPIU response, which means device recommends the host side should + * inactivate all active regions. Here we add all active regions to inactive + * list, they will be inactivated later in ufshpb_map_work_handler(). + */ + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn; + + list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) + ufshpb_submit_region_inactive(hpb, rgn->rgn_idx); + + if (ufshpb_get_state(hpb) == HPB_PRESENT) + queue_work(ufshpb_wq, &hpb->map_work); + } + } +} + +/* + * This function will parse recommended active subregion information in sense + * data field of response UPIU with SAM_STAT_GOOD state. + */ +void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device); + struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr; + int data_seg_len; + + data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) + & MASK_RSP_UPIU_DATA_SEG_LEN; + + /* If data segment length is zero, rsp_field is not valid */ + if (!data_seg_len) + return; + + if (unlikely(lrbp->lun != rsp_field->lun)) { + struct scsi_device *sdev; + bool found = false; + + __shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + + if (!hpb) + continue; + + if (rsp_field->lun == hpb->lun) { + found = true; + break; + } + } + + if (!found) + return; + } + + if (!hpb) + return; + + if (ufshpb_get_state(hpb) == HPB_INIT) + return; + + if ((ufshpb_get_state(hpb) != HPB_PRESENT) && + (ufshpb_get_state(hpb) != HPB_SUSPEND)) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT/SUSPEND\n", + __func__); + return; + } + + BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE); + + if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field)) + return; + + hpb->stats.rcmd_noti_cnt++; + + switch (rsp_field->hpb_op) { + case HPB_RSP_REQ_REGION_UPDATE: + if (data_seg_len != DEV_DATA_SEG_LEN) + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "%s: data seg length is not same.\n", + __func__); + ufshpb_rsp_req_region_update(hpb, rsp_field); + break; + case HPB_RSP_DEV_RESET: + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "UFS device lost HPB information during PM.\n"); + ufshpb_dev_reset_handler(hba); + + break; + default: + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "hpb_op is not available: %d\n", + rsp_field->hpb_op); + break; + } +} + +static void ufshpb_add_active_list(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + if (!list_empty(&rgn->list_inact_rgn)) + return; + + if (!list_empty(&srgn->list_act_srgn)) { + list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn); + return; + } + + list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn); +} + +static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct list_head *pending_list) +{ + struct ufshpb_subregion *srgn; + int srgn_idx; + + if (!list_empty(&rgn->list_inact_rgn)) + return; + + for_each_sub_region(rgn, srgn_idx, srgn) + if (!list_empty(&srgn->list_act_srgn)) + return; + + list_add_tail(&rgn->list_inact_rgn, pending_list); +} + +static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn, + struct ufshpb_subregion, + list_act_srgn))) { + if (ufshpb_get_state(hpb) == HPB_SUSPEND) + break; + + list_del_init(&srgn->list_act_srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + ret = ufshpb_add_region(hpb, rgn); + if (ret) + goto active_failed; + + ret = ufshpb_issue_map_req(hpb, rgn, srgn); + if (ret) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "issue map_req failed. ret %d, region %d - %d\n", + ret, rgn->rgn_idx, srgn->srgn_idx); + goto active_failed; + } + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + } + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + return; + +active_failed: + dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n", + rgn->rgn_idx, srgn->srgn_idx); + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_add_active_list(hpb, rgn, srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn; + unsigned long flags; + int ret; + LIST_HEAD(pending_list); + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn, + struct ufshpb_region, + list_inact_rgn))) { + if (ufshpb_get_state(hpb) == HPB_SUSPEND) + break; + + list_del_init(&rgn->list_inact_rgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + ret = ufshpb_evict_region(hpb, rgn); + if (ret) { + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_add_pending_evict_list(hpb, rgn, &pending_list); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + } + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + } + + list_splice(&pending_list, &hpb->lh_inact_rgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_normalization_work_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, + ufshpb_normalization_work); + int rgn_idx; + u8 factor = hpb->params.normalization_factor; + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx; + int srgn_idx; + + spin_lock(&rgn->rgn_lock); + rgn->reads = 0; + for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) { + struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx; + + srgn->reads >>= factor; + rgn->reads += srgn->reads; + } + spin_unlock(&rgn->rgn_lock); + + if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads) + continue; + + /* if region is active but has no reads - inactivate it */ + spin_lock(&hpb->rsp_list_lock); + ufshpb_update_inactive_info(hpb, rgn->rgn_idx); + spin_unlock(&hpb->rsp_list_lock); + } +} + +static void ufshpb_map_work_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work); + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT\n", __func__); + return; + } + + ufshpb_run_inactive_region_list(hpb); + ufshpb_run_active_subregion_list(hpb); +} + +/* + * this function doesn't need to hold lock due to be called in init. + * (rgn_state_lock, rsp_list_lock, etc..) + */ +static int ufshpb_init_pinned_active_region(struct ufs_hba *hba, + struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct ufshpb_subregion *srgn; + int srgn_idx, i; + int err = 0; + + for_each_sub_region(rgn, srgn_idx, srgn) { + srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last); + srgn->srgn_state = HPB_SRGN_INVALID; + if (!srgn->mctx) { + err = -ENOMEM; + dev_err(hba->dev, + "alloc mctx for pinned region failed\n"); + goto release; + } + + list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); + } + + rgn->rgn_state = HPB_RGN_PINNED; + return 0; + +release: + for (i = 0; i < srgn_idx; i++) { + srgn = rgn->srgn_tbl + i; + ufshpb_put_map_ctx(hpb, srgn->mctx); + } + return err; +} + +static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, bool last) +{ + int srgn_idx; + struct ufshpb_subregion *srgn; + + for_each_sub_region(rgn, srgn_idx, srgn) { + INIT_LIST_HEAD(&srgn->list_act_srgn); + + srgn->rgn_idx = rgn->rgn_idx; + srgn->srgn_idx = srgn_idx; + srgn->srgn_state = HPB_SRGN_UNUSED; + } + + if (unlikely(last && hpb->last_srgn_entries)) + srgn->is_last = true; +} + +static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, int srgn_cnt) +{ + rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion), + GFP_KERNEL); + if (!rgn->srgn_tbl) + return -ENOMEM; + + rgn->srgn_cnt = srgn_cnt; + return 0; +} + +static void ufshpb_lu_parameter_init(struct ufs_hba *hba, + struct ufshpb_lu *hpb, + struct ufshpb_dev_info *hpb_dev_info, + struct ufshpb_lu_info *hpb_lu_info) +{ + u32 entries_per_rgn; + u64 rgn_mem_size, tmp; + + if (ufshpb_is_legacy(hba)) + hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH; + else + hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd; + + hpb->lu_pinned_start = hpb_lu_info->pinned_start; + hpb->lu_pinned_end = hpb_lu_info->num_pinned ? + (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1) + : PINNED_NOT_SET; + hpb->lru_info.max_lru_active_cnt = + hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned; + + rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT + * HPB_ENTRY_SIZE; + do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE); + hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size) + * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE; + + tmp = rgn_mem_size; + do_div(tmp, HPB_ENTRY_SIZE); + entries_per_rgn = (u32)tmp; + hpb->entries_per_rgn_shift = ilog2(entries_per_rgn); + hpb->entries_per_rgn_mask = entries_per_rgn - 1; + + hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE; + hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn); + hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1; + + tmp = rgn_mem_size; + do_div(tmp, hpb->srgn_mem_size); + hpb->srgns_per_rgn = (int)tmp; + + hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks, + entries_per_rgn); + hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks, + (hpb->srgn_mem_size / HPB_ENTRY_SIZE)); + hpb->last_srgn_entries = hpb_lu_info->num_blocks + % (hpb->srgn_mem_size / HPB_ENTRY_SIZE); + + hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE); + + if (hpb_dev_info->control_mode == HPB_HOST_CONTROL) + hpb->is_hcm = true; +} + +static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn_table, *rgn; + int rgn_idx, i; + int ret = 0; + + rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region), + GFP_KERNEL); + if (!rgn_table) + return -ENOMEM; + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + int srgn_cnt = hpb->srgns_per_rgn; + bool last_srgn = false; + + rgn = rgn_table + rgn_idx; + rgn->rgn_idx = rgn_idx; + + spin_lock_init(&rgn->rgn_lock); + + INIT_LIST_HEAD(&rgn->list_inact_rgn); + INIT_LIST_HEAD(&rgn->list_lru_rgn); + INIT_LIST_HEAD(&rgn->list_expired_rgn); + + if (rgn_idx == hpb->rgns_per_lu - 1) { + srgn_cnt = ((hpb->srgns_per_lu - 1) % + hpb->srgns_per_rgn) + 1; + last_srgn = true; + } + + ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt); + if (ret) + goto release_srgn_table; + ufshpb_init_subregion_tbl(hpb, rgn, last_srgn); + + if (ufshpb_is_pinned_region(hpb, rgn_idx)) { + ret = ufshpb_init_pinned_active_region(hba, hpb, rgn); + if (ret) + goto release_srgn_table; + } else { + rgn->rgn_state = HPB_RGN_INACTIVE; + } + + rgn->rgn_flags = 0; + rgn->hpb = hpb; + } + + hpb->rgn_tbl = rgn_table; + + return 0; + +release_srgn_table: + for (i = 0; i <= rgn_idx; i++) + kvfree(rgn_table[i].srgn_tbl); + + kvfree(rgn_table); + return ret; +} + +static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + int srgn_idx; + struct ufshpb_subregion *srgn; + + for_each_sub_region(rgn, srgn_idx, srgn) + if (srgn->srgn_state != HPB_SRGN_UNUSED) { + srgn->srgn_state = HPB_SRGN_UNUSED; + ufshpb_put_map_ctx(hpb, srgn->mctx); + } +} + +static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb) +{ + int rgn_idx; + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + struct ufshpb_region *rgn; + + rgn = hpb->rgn_tbl + rgn_idx; + if (rgn->rgn_state != HPB_RGN_INACTIVE) { + rgn->rgn_state = HPB_RGN_INACTIVE; + + ufshpb_destroy_subregion_tbl(hpb, rgn); + } + + kvfree(rgn->srgn_tbl); + } + + kvfree(hpb->rgn_tbl); +} + +/* SYSFS functions */ +#define ufshpb_sysfs_attr_show_func(__name) \ +static ssize_t __name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \ + \ + if (!hpb) \ + return -ENODEV; \ + \ + return sysfs_emit(buf, "%llu\n", hpb->stats.__name); \ +} \ +\ +static DEVICE_ATTR_RO(__name) + +ufshpb_sysfs_attr_show_func(hit_cnt); +ufshpb_sysfs_attr_show_func(miss_cnt); +ufshpb_sysfs_attr_show_func(rcmd_noti_cnt); +ufshpb_sysfs_attr_show_func(rcmd_active_cnt); +ufshpb_sysfs_attr_show_func(rcmd_inactive_cnt); +ufshpb_sysfs_attr_show_func(map_req_cnt); +ufshpb_sysfs_attr_show_func(umap_req_cnt); + +static struct attribute *hpb_dev_stat_attrs[] = { + &dev_attr_hit_cnt.attr, + &dev_attr_miss_cnt.attr, + &dev_attr_rcmd_noti_cnt.attr, + &dev_attr_rcmd_active_cnt.attr, + &dev_attr_rcmd_inactive_cnt.attr, + &dev_attr_map_req_cnt.attr, + &dev_attr_umap_req_cnt.attr, + NULL, +}; + +struct attribute_group ufs_sysfs_hpb_stat_group = { + .name = "hpb_stats", + .attrs = hpb_dev_stat_attrs, +}; + +/* SYSFS functions */ +#define ufshpb_sysfs_param_show_func(__name) \ +static ssize_t __name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \ + \ + if (!hpb) \ + return -ENODEV; \ + \ + return sysfs_emit(buf, "%d\n", hpb->params.__name); \ +} + +ufshpb_sysfs_param_show_func(requeue_timeout_ms); +static ssize_t +requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val < 0) + return -EINVAL; + + hpb->params.requeue_timeout_ms = val; + + return count; +} +static DEVICE_ATTR_RW(requeue_timeout_ms); + +ufshpb_sysfs_param_show_func(activation_thld); +static ssize_t +activation_thld_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0) + return -EINVAL; + + hpb->params.activation_thld = val; + + return count; +} +static DEVICE_ATTR_RW(activation_thld); + +ufshpb_sysfs_param_show_func(normalization_factor); +static ssize_t +normalization_factor_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0 || val > ilog2(hpb->entries_per_srgn)) + return -EINVAL; + + hpb->params.normalization_factor = val; + + return count; +} +static DEVICE_ATTR_RW(normalization_factor); + +ufshpb_sysfs_param_show_func(eviction_thld_enter); +static ssize_t +eviction_thld_enter_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= hpb->params.eviction_thld_exit) + return -EINVAL; + + hpb->params.eviction_thld_enter = val; + + return count; +} +static DEVICE_ATTR_RW(eviction_thld_enter); + +ufshpb_sysfs_param_show_func(eviction_thld_exit); +static ssize_t +eviction_thld_exit_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= hpb->params.activation_thld) + return -EINVAL; + + hpb->params.eviction_thld_exit = val; + + return count; +} +static DEVICE_ATTR_RW(eviction_thld_exit); + +ufshpb_sysfs_param_show_func(read_timeout_ms); +static ssize_t +read_timeout_ms_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + /* read_timeout >> timeout_polling_interval */ + if (val < hpb->params.timeout_polling_interval_ms * 2) + return -EINVAL; + + hpb->params.read_timeout_ms = val; + + return count; +} +static DEVICE_ATTR_RW(read_timeout_ms); + +ufshpb_sysfs_param_show_func(read_timeout_expiries); +static ssize_t +read_timeout_expiries_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0) + return -EINVAL; + + hpb->params.read_timeout_expiries = val; + + return count; +} +static DEVICE_ATTR_RW(read_timeout_expiries); + +ufshpb_sysfs_param_show_func(timeout_polling_interval_ms); +static ssize_t +timeout_polling_interval_ms_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + /* timeout_polling_interval << read_timeout */ + if (val <= 0 || val > hpb->params.read_timeout_ms / 2) + return -EINVAL; + + hpb->params.timeout_polling_interval_ms = val; + + return count; +} +static DEVICE_ATTR_RW(timeout_polling_interval_ms); + +ufshpb_sysfs_param_show_func(inflight_map_req); +static ssize_t inflight_map_req_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1) + return -EINVAL; + + hpb->params.inflight_map_req = val; + + return count; +} +static DEVICE_ATTR_RW(inflight_map_req); + +static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb) +{ + hpb->params.activation_thld = ACTIVATION_THRESHOLD; + hpb->params.normalization_factor = 1; + hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5); + hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4); + hpb->params.read_timeout_ms = READ_TO_MS; + hpb->params.read_timeout_expiries = READ_TO_EXPIRIES; + hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS; + hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT; +} + +static struct attribute *hpb_dev_param_attrs[] = { + &dev_attr_requeue_timeout_ms.attr, + &dev_attr_activation_thld.attr, + &dev_attr_normalization_factor.attr, + &dev_attr_eviction_thld_enter.attr, + &dev_attr_eviction_thld_exit.attr, + &dev_attr_read_timeout_ms.attr, + &dev_attr_read_timeout_expiries.attr, + &dev_attr_timeout_polling_interval_ms.attr, + &dev_attr_inflight_map_req.attr, + NULL, +}; + +struct attribute_group ufs_sysfs_hpb_param_group = { + .name = "hpb_params", + .attrs = hpb_dev_param_attrs, +}; + +static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb) +{ + struct ufshpb_req *pre_req = NULL, *t; + int qd = hpb->sdev_ufs_lu->queue_depth / 2; + int i; + + INIT_LIST_HEAD(&hpb->lh_pre_req_free); + + hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL); + hpb->throttle_pre_req = qd; + hpb->num_inflight_pre_req = 0; + + if (!hpb->pre_req) + goto release_mem; + + for (i = 0; i < qd; i++) { + pre_req = hpb->pre_req + i; + INIT_LIST_HEAD(&pre_req->list_req); + pre_req->req = NULL; + + pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL); + if (!pre_req->bio) + goto release_mem; + + pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pre_req->wb.m_page) { + bio_put(pre_req->bio); + goto release_mem; + } + + list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free); + } + + return 0; +release_mem: + list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) { + list_del_init(&pre_req->list_req); + bio_put(pre_req->bio); + __free_page(pre_req->wb.m_page); + } + + kfree(hpb->pre_req); + return -ENOMEM; +} + +static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb) +{ + struct ufshpb_req *pre_req = NULL; + int i; + + for (i = 0; i < hpb->throttle_pre_req; i++) { + pre_req = hpb->pre_req + i; + bio_put(hpb->pre_req[i].bio); + if (!pre_req->wb.m_page) + __free_page(hpb->pre_req[i].wb.m_page); + list_del_init(&pre_req->list_req); + } + + kfree(hpb->pre_req); +} + +static void ufshpb_stat_init(struct ufshpb_lu *hpb) +{ + hpb->stats.hit_cnt = 0; + hpb->stats.miss_cnt = 0; + hpb->stats.rcmd_noti_cnt = 0; + hpb->stats.rcmd_active_cnt = 0; + hpb->stats.rcmd_inactive_cnt = 0; + hpb->stats.map_req_cnt = 0; + hpb->stats.umap_req_cnt = 0; +} + +static void ufshpb_param_init(struct ufshpb_lu *hpb) +{ + hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS; + if (hpb->is_hcm) + ufshpb_hcm_param_init(hpb); +} + +static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb) +{ + int ret; + + spin_lock_init(&hpb->rgn_state_lock); + spin_lock_init(&hpb->rsp_list_lock); + spin_lock_init(&hpb->param_lock); + + INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn); + INIT_LIST_HEAD(&hpb->lh_act_srgn); + INIT_LIST_HEAD(&hpb->lh_inact_rgn); + INIT_LIST_HEAD(&hpb->list_hpb_lu); + + INIT_WORK(&hpb->map_work, ufshpb_map_work_handler); + if (hpb->is_hcm) { + INIT_WORK(&hpb->ufshpb_normalization_work, + ufshpb_normalization_work_handler); + INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work, + ufshpb_read_to_handler); + } + + hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache", + sizeof(struct ufshpb_req), 0, 0, NULL); + if (!hpb->map_req_cache) { + dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail", + hpb->lun); + return -ENOMEM; + } + + hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache", + sizeof(struct page *) * hpb->pages_per_srgn, + 0, 0, NULL); + if (!hpb->m_page_cache) { + dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail", + hpb->lun); + ret = -ENOMEM; + goto release_req_cache; + } + + ret = ufshpb_pre_req_mempool_init(hpb); + if (ret) { + dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail", + hpb->lun); + goto release_m_page_cache; + } + + ret = ufshpb_alloc_region_tbl(hba, hpb); + if (ret) + goto release_pre_req_mempool; + + ufshpb_stat_init(hpb); + ufshpb_param_init(hpb); + + if (hpb->is_hcm) { + unsigned int poll; + + poll = hpb->params.timeout_polling_interval_ms; + schedule_delayed_work(&hpb->ufshpb_read_to_work, + msecs_to_jiffies(poll)); + } + + return 0; + +release_pre_req_mempool: + ufshpb_pre_req_mempool_destroy(hpb); +release_m_page_cache: + kmem_cache_destroy(hpb->m_page_cache); +release_req_cache: + kmem_cache_destroy(hpb->map_req_cache); + return ret; +} + +static struct ufshpb_lu * +ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev, + struct ufshpb_dev_info *hpb_dev_info, + struct ufshpb_lu_info *hpb_lu_info) +{ + struct ufshpb_lu *hpb; + int ret; + + hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL); + if (!hpb) + return NULL; + + hpb->lun = sdev->lun; + hpb->sdev_ufs_lu = sdev; + + ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info); + + ret = ufshpb_lu_hpb_init(hba, hpb); + if (ret) { + dev_err(hba->dev, "hpb lu init failed. ret %d", ret); + goto release_hpb; + } + + sdev->hostdata = hpb; + return hpb; + +release_hpb: + kfree(hpb); + return NULL; +} + +static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn, *next_rgn; + struct ufshpb_subregion *srgn, *next_srgn; + unsigned long flags; + + /* + * If the device reset occurred, the remaining HPB region information + * may be stale. Therefore, by discarding the lists of HPB response + * that remained after reset, we prevent unnecessary work. + */ + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn, + list_inact_rgn) + list_del_init(&rgn->list_inact_rgn); + + list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn, + list_act_srgn) + list_del_init(&srgn->list_act_srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb) +{ + if (hpb->is_hcm) { + cancel_delayed_work_sync(&hpb->ufshpb_read_to_work); + cancel_work_sync(&hpb->ufshpb_normalization_work); + } + cancel_work_sync(&hpb->map_work); +} + +static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba) +{ + int err = 0; + bool flag_res = true; + int try; + + /* wait for the device to complete HPB reset query */ + for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) { + dev_dbg(hba->dev, + "%s start flag reset polling %d times\n", + __func__, try); + + /* Poll fHpbReset flag to be cleared */ + err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res); + + if (err) { + dev_err(hba->dev, + "%s reading fHpbReset flag failed with error %d\n", + __func__, err); + return flag_res; + } + + if (!flag_res) + goto out; + + usleep_range(1000, 1100); + } + if (flag_res) { + dev_err(hba->dev, + "%s fHpbReset was not cleared by the device\n", + __func__); + } +out: + return flag_res; +} + +/** + * ufshpb_toggle_state - switch HPB state of all LUs + * @hba: per-adapter instance + * @src: expected current HPB state + * @dest: target HPB state to switch to + */ +void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + + if (!hpb || ufshpb_get_state(hpb) != src) + continue; + ufshpb_set_state(hpb, dest); + + if (dest == HPB_RESET) { + ufshpb_cancel_jobs(hpb); + ufshpb_discard_rsp_lists(hpb); + } + } +} + +void ufshpb_suspend(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb || ufshpb_get_state(hpb) != HPB_PRESENT) + continue; + + ufshpb_set_state(hpb, HPB_SUSPEND); + ufshpb_cancel_jobs(hpb); + } +} + +void ufshpb_resume(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb || ufshpb_get_state(hpb) != HPB_SUSPEND) + continue; + + ufshpb_set_state(hpb, HPB_PRESENT); + ufshpb_kick_map_work(hpb); + if (hpb->is_hcm) { + unsigned int poll = hpb->params.timeout_polling_interval_ms; + + schedule_delayed_work(&hpb->ufshpb_read_to_work, msecs_to_jiffies(poll)); + } + } +} + +static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun, + struct ufshpb_lu_info *hpb_lu_info) +{ + u16 max_active_rgns; + u8 lu_enable; + int size; + int ret; + char desc_buf[QUERY_DESC_MAX_SIZE]; + + ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size); + + ufshcd_rpm_get_sync(hba); + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + QUERY_DESC_IDN_UNIT, lun, 0, + desc_buf, &size); + ufshcd_rpm_put_sync(hba); + + if (ret) { + dev_err(hba->dev, + "%s: idn: %d lun: %d query request failed", + __func__, QUERY_DESC_IDN_UNIT, lun); + return ret; + } + + lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE]; + if (lu_enable != LU_ENABLED_HPB_FUNC) + return -ENODEV; + + max_active_rgns = get_unaligned_be16( + desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS); + if (!max_active_rgns) { + dev_err(hba->dev, + "lun %d wrong number of max active regions\n", lun); + return -ENODEV; + } + + hpb_lu_info->num_blocks = get_unaligned_be64( + desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT); + hpb_lu_info->pinned_start = get_unaligned_be16( + desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF); + hpb_lu_info->num_pinned = get_unaligned_be16( + desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS); + hpb_lu_info->max_active_rgns = max_active_rgns; + + return 0; +} + +void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) +{ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + + if (!hpb) + return; + + ufshpb_set_state(hpb, HPB_FAILED); + + sdev = hpb->sdev_ufs_lu; + sdev->hostdata = NULL; + + ufshpb_cancel_jobs(hpb); + + ufshpb_pre_req_mempool_destroy(hpb); + ufshpb_destroy_region_tbl(hpb); + + kmem_cache_destroy(hpb->map_req_cache); + kmem_cache_destroy(hpb->m_page_cache); + + list_del_init(&hpb->list_hpb_lu); + + kfree(hpb); +} + +static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba) +{ + int pool_size; + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + bool init_success; + + if (tot_active_srgn_pages == 0) { + ufshpb_remove(hba); + return; + } + + init_success = !ufshpb_check_hpb_reset_query(hba); + + pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; + if (pool_size > tot_active_srgn_pages) { + mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages); + mempool_resize(ufshpb_page_pool, tot_active_srgn_pages); + } + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb) + continue; + + if (init_success) { + ufshpb_set_state(hpb, HPB_PRESENT); + if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0) + queue_work(ufshpb_wq, &hpb->map_work); + } else { + dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun); + ufshpb_destroy_lu(hba, sdev); + } + } + + if (!init_success) + ufshpb_remove(hba); +} + +void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) +{ + struct ufshpb_lu *hpb; + int ret; + struct ufshpb_lu_info hpb_lu_info = { 0 }; + int lun = sdev->lun; + + if (lun >= hba->dev_info.max_lu_supported) + goto out; + + ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info); + if (ret) + goto out; + + hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev, + &hpb_lu_info); + if (!hpb) + goto out; + + tot_active_srgn_pages += hpb_lu_info.max_active_rgns * + hpb->srgns_per_rgn * hpb->pages_per_srgn; + +out: + /* All LUs are initialized */ + if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt)) + ufshpb_hpb_lu_prepared(hba); +} + +static int ufshpb_init_mem_wq(struct ufs_hba *hba) +{ + int ret; + unsigned int pool_size; + + ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache", + sizeof(struct ufshpb_map_ctx), + 0, 0, NULL); + if (!ufshpb_mctx_cache) { + dev_err(hba->dev, "ufshpb: cannot init mctx cache\n"); + return -ENOMEM; + } + + pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; + dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n", + __func__, __LINE__, ufshpb_host_map_kbytes, pool_size); + + ufshpb_mctx_pool = mempool_create_slab_pool(pool_size, + ufshpb_mctx_cache); + if (!ufshpb_mctx_pool) { + dev_err(hba->dev, "ufshpb: cannot init mctx pool\n"); + ret = -ENOMEM; + goto release_mctx_cache; + } + + ufshpb_page_pool = mempool_create_page_pool(pool_size, 0); + if (!ufshpb_page_pool) { + dev_err(hba->dev, "ufshpb: cannot init page pool\n"); + ret = -ENOMEM; + goto release_mctx_pool; + } + + ufshpb_wq = alloc_workqueue("ufshpb-wq", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0); + if (!ufshpb_wq) { + dev_err(hba->dev, "ufshpb: alloc workqueue failed\n"); + ret = -ENOMEM; + goto release_page_pool; + } + + return 0; + +release_page_pool: + mempool_destroy(ufshpb_page_pool); +release_mctx_pool: + mempool_destroy(ufshpb_mctx_pool); +release_mctx_cache: + kmem_cache_destroy(ufshpb_mctx_cache); + return ret; +} + +void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) +{ + struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev; + int max_active_rgns = 0; + int hpb_num_lu; + + hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU]; + if (hpb_num_lu == 0) { + dev_err(hba->dev, "No HPB LU supported\n"); + hpb_info->hpb_disabled = true; + return; + } + + hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE]; + hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE]; + max_active_rgns = get_unaligned_be16(geo_buf + + GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS); + + if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 || + max_active_rgns == 0) { + dev_err(hba->dev, "No HPB supported device\n"); + hpb_info->hpb_disabled = true; + return; + } +} + +void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) +{ + struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev; + int version, ret; + int max_single_cmd; + + hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL]; + + version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER); + if ((version != HPB_SUPPORT_VERSION) && + (version != HPB_SUPPORT_LEGACY_VERSION)) { + dev_err(hba->dev, "%s: HPB %x version is not supported.\n", + __func__, version); + hpb_dev_info->hpb_disabled = true; + return; + } + + if (version == HPB_SUPPORT_LEGACY_VERSION) + hpb_dev_info->is_legacy = true; + + /* + * Get the number of user logical unit to check whether all + * scsi_device finish initialization + */ + hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU]; + + if (hpb_dev_info->is_legacy) + return; + + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd); + + if (ret) + hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH; + else + hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH); +} + +void ufshpb_init(struct ufs_hba *hba) +{ + struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev; + int try; + int ret; + + if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled) + return; + + if (ufshpb_init_mem_wq(hba)) { + hpb_dev_info->hpb_disabled = true; + return; + } + + atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu); + tot_active_srgn_pages = 0; + /* issue HPB reset query */ + for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) { + ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_HPB_RESET, 0, NULL); + if (!ret) + break; + } +} + +void ufshpb_remove(struct ufs_hba *hba) +{ + mempool_destroy(ufshpb_page_pool); + mempool_destroy(ufshpb_mctx_pool); + kmem_cache_destroy(ufshpb_mctx_cache); + + destroy_workqueue(ufshpb_wq); +} + +module_param(ufshpb_host_map_kbytes, uint, 0644); +MODULE_PARM_DESC(ufshpb_host_map_kbytes, + "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool"); diff --git a/drivers/ufs/core/ufshpb.h b/drivers/ufs/core/ufshpb.h new file mode 100644 index 0000000000..0d6e6004d7 --- /dev/null +++ b/drivers/ufs/core/ufshpb.h @@ -0,0 +1,318 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Universal Flash Storage Host Performance Booster + * + * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd. + * + * Authors: + * Yongmyung Lee + * Jinyoung Choi + */ + +#ifndef _UFSHPB_H_ +#define _UFSHPB_H_ + +/* hpb response UPIU macro */ +#define HPB_RSP_NONE 0x0 +#define HPB_RSP_REQ_REGION_UPDATE 0x1 +#define HPB_RSP_DEV_RESET 0x2 +#define MAX_ACTIVE_NUM 2 +#define MAX_INACTIVE_NUM 2 +#define DEV_DATA_SEG_LEN 0x14 +#define DEV_SENSE_SEG_LEN 0x12 +#define DEV_DES_TYPE 0x80 +#define DEV_ADDITIONAL_LEN 0x10 + +/* hpb map & entries macro */ +#define HPB_RGN_SIZE_UNIT 512 +#define HPB_ENTRY_BLOCK_SIZE 4096 +#define HPB_ENTRY_SIZE 0x8 +#define PINNED_NOT_SET U32_MAX + +/* hpb support chunk size */ +#define HPB_LEGACY_CHUNK_HIGH 1 +#define HPB_MULTI_CHUNK_HIGH 255 + +/* hpb vender defined opcode */ +#define UFSHPB_READ 0xF8 +#define UFSHPB_READ_BUFFER 0xF9 +#define UFSHPB_READ_BUFFER_ID 0x01 +#define UFSHPB_WRITE_BUFFER 0xFA +#define UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID 0x01 +#define UFSHPB_WRITE_BUFFER_PREFETCH_ID 0x02 +#define UFSHPB_WRITE_BUFFER_INACT_ALL_ID 0x03 +#define HPB_WRITE_BUFFER_CMD_LENGTH 10 +#define MAX_HPB_READ_ID 0x7F +#define HPB_READ_BUFFER_CMD_LENGTH 10 +#define LU_ENABLED_HPB_FUNC 0x02 + +#define HPB_RESET_REQ_RETRIES 10 +#define HPB_MAP_REQ_RETRIES 5 +#define HPB_REQUEUE_TIME_MS 0 + +#define HPB_SUPPORT_VERSION 0x200 +#define HPB_SUPPORT_LEGACY_VERSION 0x100 + +enum UFSHPB_MODE { + HPB_HOST_CONTROL, + HPB_DEVICE_CONTROL, +}; + +enum UFSHPB_STATE { + HPB_INIT, + HPB_PRESENT, + HPB_SUSPEND, + HPB_FAILED, + HPB_RESET, +}; + +enum HPB_RGN_STATE { + HPB_RGN_INACTIVE, + HPB_RGN_ACTIVE, + /* pinned regions are always active */ + HPB_RGN_PINNED, +}; + +enum HPB_SRGN_STATE { + HPB_SRGN_UNUSED, + HPB_SRGN_INVALID, + HPB_SRGN_VALID, + HPB_SRGN_ISSUED, +}; + +/** + * struct ufshpb_lu_info - UFSHPB logical unit related info + * @num_blocks: the number of logical block + * @pinned_start: the start region number of pinned region + * @num_pinned: the number of pinned regions + * @max_active_rgns: maximum number of active regions + */ +struct ufshpb_lu_info { + int num_blocks; + int pinned_start; + int num_pinned; + int max_active_rgns; +}; + +struct ufshpb_map_ctx { + struct page **m_page; + unsigned long *ppn_dirty; +}; + +struct ufshpb_subregion { + struct ufshpb_map_ctx *mctx; + enum HPB_SRGN_STATE srgn_state; + int rgn_idx; + int srgn_idx; + bool is_last; + + /* subregion reads - for host mode */ + unsigned int reads; + + /* below information is used by rsp_list */ + struct list_head list_act_srgn; +}; + +struct ufshpb_region { + struct ufshpb_lu *hpb; + struct ufshpb_subregion *srgn_tbl; + enum HPB_RGN_STATE rgn_state; + int rgn_idx; + int srgn_cnt; + + /* below information is used by rsp_list */ + struct list_head list_inact_rgn; + + /* below information is used by lru */ + struct list_head list_lru_rgn; + unsigned long rgn_flags; +#define RGN_FLAG_DIRTY 0 +#define RGN_FLAG_UPDATE 1 + + /* region reads - for host mode */ + spinlock_t rgn_lock; + unsigned int reads; + /* region "cold" timer - for host mode */ + ktime_t read_timeout; + unsigned int read_timeout_expiries; + struct list_head list_expired_rgn; +}; + +#define for_each_sub_region(rgn, i, srgn) \ + for ((i) = 0; \ + ((i) < (rgn)->srgn_cnt) && ((srgn) = &(rgn)->srgn_tbl[i]); \ + (i)++) + +/** + * struct ufshpb_req - HPB related request structure (write/read buffer) + * @req: block layer request structure + * @bio: bio for this request + * @hpb: ufshpb_lu structure that related to + * @list_req: ufshpb_req mempool list + * @sense: store its sense data + * @mctx: L2P map information + * @rgn_idx: target region index + * @srgn_idx: target sub-region index + * @lun: target logical unit number + * @m_page: L2P map information data for pre-request + * @len: length of host-side cached L2P map in m_page + * @lpn: start LPN of L2P map in m_page + */ +struct ufshpb_req { + struct request *req; + struct bio *bio; + struct ufshpb_lu *hpb; + struct list_head list_req; + union { + struct { + struct ufshpb_map_ctx *mctx; + unsigned int rgn_idx; + unsigned int srgn_idx; + unsigned int lun; + } rb; + struct { + struct page *m_page; + unsigned int len; + unsigned long lpn; + } wb; + }; +}; + +struct victim_select_info { + struct list_head lh_lru_rgn; /* LRU list of regions */ + int max_lru_active_cnt; /* supported hpb #region - pinned #region */ + atomic_t active_cnt; +}; + +/** + * ufshpb_params - ufs hpb parameters + * @requeue_timeout_ms - requeue threshold of wb command (0x2) + * @activation_thld - min reads [IOs] to activate/update a region + * @normalization_factor - shift right the region's reads + * @eviction_thld_enter - min reads [IOs] for the entering region in eviction + * @eviction_thld_exit - max reads [IOs] for the exiting region in eviction + * @read_timeout_ms - timeout [ms] from the last read IO to the region + * @read_timeout_expiries - amount of allowable timeout expireis + * @timeout_polling_interval_ms - frequency in which timeouts are checked + * @inflight_map_req - number of inflight map requests + */ +struct ufshpb_params { + unsigned int requeue_timeout_ms; + unsigned int activation_thld; + unsigned int normalization_factor; + unsigned int eviction_thld_enter; + unsigned int eviction_thld_exit; + unsigned int read_timeout_ms; + unsigned int read_timeout_expiries; + unsigned int timeout_polling_interval_ms; + unsigned int inflight_map_req; +}; + +struct ufshpb_stats { + u64 hit_cnt; + u64 miss_cnt; + u64 rcmd_noti_cnt; + u64 rcmd_active_cnt; + u64 rcmd_inactive_cnt; + u64 map_req_cnt; + u64 pre_req_cnt; + u64 umap_req_cnt; +}; + +struct ufshpb_lu { + int lun; + struct scsi_device *sdev_ufs_lu; + + spinlock_t rgn_state_lock; /* for protect rgn/srgn state */ + struct ufshpb_region *rgn_tbl; + + atomic_t hpb_state; + + spinlock_t rsp_list_lock; + struct list_head lh_act_srgn; /* hold rsp_list_lock */ + struct list_head lh_inact_rgn; /* hold rsp_list_lock */ + + /* pre request information */ + struct ufshpb_req *pre_req; + int num_inflight_pre_req; + int throttle_pre_req; + int num_inflight_map_req; /* hold param_lock */ + spinlock_t param_lock; + + struct list_head lh_pre_req_free; + int pre_req_max_tr_len; + + /* cached L2P map management worker */ + struct work_struct map_work; + + /* for selecting victim */ + struct victim_select_info lru_info; + struct work_struct ufshpb_normalization_work; + struct delayed_work ufshpb_read_to_work; + unsigned long work_data_bits; +#define TIMEOUT_WORK_RUNNING 0 + + /* pinned region information */ + u32 lu_pinned_start; + u32 lu_pinned_end; + + /* HPB related configuration */ + u32 rgns_per_lu; + u32 srgns_per_lu; + u32 last_srgn_entries; + int srgns_per_rgn; + u32 srgn_mem_size; + u32 entries_per_rgn_mask; + u32 entries_per_rgn_shift; + u32 entries_per_srgn; + u32 entries_per_srgn_mask; + u32 entries_per_srgn_shift; + u32 pages_per_srgn; + + bool is_hcm; + + struct ufshpb_stats stats; + struct ufshpb_params params; + + struct kmem_cache *map_req_cache; + struct kmem_cache *m_page_cache; + + struct list_head list_hpb_lu; +}; + +struct ufs_hba; +struct ufshcd_lrb; + +#ifndef CONFIG_SCSI_UFS_HPB +static int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { return 0; } +static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {} +static void ufshpb_resume(struct ufs_hba *hba) {} +static void ufshpb_suspend(struct ufs_hba *hba) {} +static void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest) {} +static void ufshpb_init(struct ufs_hba *hba) {} +static void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) {} +static void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) {} +static void ufshpb_remove(struct ufs_hba *hba) {} +static bool ufshpb_is_allowed(struct ufs_hba *hba) { return false; } +static void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) {} +static void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) {} +static bool ufshpb_is_legacy(struct ufs_hba *hba) { return false; } +#else +int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); +void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); +void ufshpb_resume(struct ufs_hba *hba); +void ufshpb_suspend(struct ufs_hba *hba); +void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest); +void ufshpb_init(struct ufs_hba *hba); +void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev); +void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev); +void ufshpb_remove(struct ufs_hba *hba); +bool ufshpb_is_allowed(struct ufs_hba *hba); +void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf); +void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf); +bool ufshpb_is_legacy(struct ufs_hba *hba); +extern struct attribute_group ufs_sysfs_hpb_stat_group; +extern struct attribute_group ufs_sysfs_hpb_param_group; +#endif + +#endif /* End of Header */ diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig new file mode 100644 index 0000000000..4cc2dbd79e --- /dev/null +++ b/drivers/ufs/host/Kconfig @@ -0,0 +1,126 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Kernel configuration file for the UFS host controller drivers. +# +# Copyright (C) 2011-2013 Samsung India Software Operations +# +# Authors: +# Santosh Yaraganavi +# Vinayak Holikatti + +config SCSI_UFSHCD_PCI + tristate "PCI bus based UFS Controller support" + depends on PCI + help + This selects the PCI UFS Host Controller Interface. Select this if + you have UFS Host Controller with PCI Interface. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config SCSI_UFS_DWC_TC_PCI + tristate "DesignWare pci support using a G210 Test Chip" + depends on SCSI_UFSHCD_PCI + help + Synopsys Test Chip is a PHY for prototyping purposes. + + If unsure, say N. + +config SCSI_UFSHCD_PLATFORM + tristate "Platform bus based UFS Controller support" + depends on HAS_IOMEM + help + This selects the UFS host controller support. Select this if + you have an UFS controller on Platform bus. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config SCSI_UFS_CDNS_PLATFORM + tristate "Cadence UFS Controller platform driver" + depends on SCSI_UFSHCD_PLATFORM + help + This selects the Cadence-specific additions to UFSHCD platform driver. + + If unsure, say N. + +config SCSI_UFS_DWC_TC_PLATFORM + tristate "DesignWare platform support using a G210 Test Chip" + depends on SCSI_UFSHCD_PLATFORM + help + Synopsys Test Chip is a PHY for prototyping purposes. + + If unsure, say N. + +config SCSI_UFS_QCOM + tristate "QCOM specific hooks to UFS controller platform driver" + depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM + select QCOM_SCM if SCSI_UFS_CRYPTO + select RESET_CONTROLLER + help + This selects the QCOM specific additions to UFSHCD platform driver. + UFS host on QCOM needs some vendor specific configuration before + accessing the hardware which includes PHY configuration and vendor + specific registers. + + Select this if you have UFS controller on QCOM chipset. + If unsure, say N. + +config SCSI_UFS_MEDIATEK + tristate "Mediatek specific hooks to UFS controller platform driver" + depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK + select PHY_MTK_UFS + select RESET_TI_SYSCON + help + This selects the Mediatek specific additions to UFSHCD platform driver. + UFS host on Mediatek needs some vendor specific configuration before + accessing the hardware which includes PHY configuration and vendor + specific registers. + + Select this if you have UFS controller on Mediatek chipset. + + If unsure, say N. + +config SCSI_UFS_HISI + tristate "Hisilicon specific hooks to UFS controller platform driver" + depends on (ARCH_HISI || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM + help + This selects the Hisilicon specific additions to UFSHCD platform driver. + + Select this if you have UFS controller on Hisilicon chipset. + If unsure, say N. + +config SCSI_UFS_RENESAS + tristate "Renesas specific hooks to UFS controller platform driver" + depends on (ARCH_RENESAS || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM + help + This selects the Renesas specific additions to UFSHCD platform driver. + UFS host on Renesas needs some vendor specific configuration before + accessing the hardware. + + Select this if you have UFS controller on Renesas chipset. + + If unsure, say N. + +config SCSI_UFS_TI_J721E + tristate "TI glue layer for Cadence UFS Controller" + depends on OF && HAS_IOMEM && (ARCH_K3 || COMPILE_TEST) + help + This selects driver for TI glue layer for Cadence UFS Host + Controller IP. + + Selects this if you have TI platform with UFS controller. + If unsure, say N. + +config SCSI_UFS_EXYNOS + tristate "Exynos specific hooks to UFS controller platform driver" + depends on SCSI_UFSHCD_PLATFORM && (ARCH_EXYNOS || COMPILE_TEST) + help + This selects the Samsung Exynos SoC specific additions to UFSHCD + platform driver. UFS host on Samsung Exynos SoC includes HCI and + UNIPRO layer, and associates with UFS-PHY driver. + + Select this if you have UFS host controller on Samsung Exynos SoC. + If unsure, say N. diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile new file mode 100644 index 0000000000..7717ca93e7 --- /dev/null +++ b/drivers/ufs/host/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o +obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o +obj-$(CONFIG_SCSI_UFS_CDNS_PLATFORM) += cdns-pltfrm.o +obj-$(CONFIG_SCSI_UFS_QCOM) += ufs_qcom.o +ufs_qcom-y += ufs-qcom.o +ufs_qcom-$(CONFIG_SCSI_UFS_CRYPTO) += ufs-qcom-ice.o +obj-$(CONFIG_SCSI_UFS_EXYNOS) += ufs-exynos.o +obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o +obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o +obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o +obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o +obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o +obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c new file mode 100644 index 0000000000..e05c0ae64e --- /dev/null +++ b/drivers/ufs/host/cdns-pltfrm.c @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Platform UFS Host driver for Cadence controller + * + * Copyright (C) 2018 Cadence Design Systems, Inc. + * + * Authors: + * Jan Kotas + * + */ + +#include +#include +#include +#include +#include +#include + +#include "ufshcd-pltfrm.h" + +#define CDNS_UFS_REG_HCLKDIV 0xFC +#define CDNS_UFS_REG_PHY_XCFGD1 0x113C +#define CDNS_UFS_MAX_L4_ATTRS 12 + +struct cdns_ufs_host { + /** + * cdns_ufs_dme_attr_val - for storing L4 attributes + */ + u32 cdns_ufs_dme_attr_val[CDNS_UFS_MAX_L4_ATTRS]; +}; + +/** + * cdns_ufs_get_l4_attr - get L4 attributes on local side + * @hba: per adapter instance + * + */ +static void cdns_ufs_get_l4_attr(struct ufs_hba *hba) +{ + struct cdns_ufs_host *host = ufshcd_get_variant(hba); + + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERDEVICEID), + &host->cdns_ufs_dme_attr_val[0]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERCPORTID), + &host->cdns_ufs_dme_attr_val[1]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_TRAFFICCLASS), + &host->cdns_ufs_dme_attr_val[2]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PROTOCOLID), + &host->cdns_ufs_dme_attr_val[3]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTFLAGS), + &host->cdns_ufs_dme_attr_val[4]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_TXTOKENVALUE), + &host->cdns_ufs_dme_attr_val[5]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_RXTOKENVALUE), + &host->cdns_ufs_dme_attr_val[6]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE), + &host->cdns_ufs_dme_attr_val[7]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE), + &host->cdns_ufs_dme_attr_val[8]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CREDITSTOSEND), + &host->cdns_ufs_dme_attr_val[9]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTMODE), + &host->cdns_ufs_dme_attr_val[10]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), + &host->cdns_ufs_dme_attr_val[11]); +} + +/** + * cdns_ufs_set_l4_attr - set L4 attributes on local side + * @hba: per adapter instance + * + */ +static void cdns_ufs_set_l4_attr(struct ufs_hba *hba) +{ + struct cdns_ufs_host *host = ufshcd_get_variant(hba); + + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), 0); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), + host->cdns_ufs_dme_attr_val[0]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), + host->cdns_ufs_dme_attr_val[1]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS), + host->cdns_ufs_dme_attr_val[2]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PROTOCOLID), + host->cdns_ufs_dme_attr_val[3]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), + host->cdns_ufs_dme_attr_val[4]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_TXTOKENVALUE), + host->cdns_ufs_dme_attr_val[5]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_RXTOKENVALUE), + host->cdns_ufs_dme_attr_val[6]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE), + host->cdns_ufs_dme_attr_val[7]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE), + host->cdns_ufs_dme_attr_val[8]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CREDITSTOSEND), + host->cdns_ufs_dme_attr_val[9]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTMODE), + host->cdns_ufs_dme_attr_val[10]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), + host->cdns_ufs_dme_attr_val[11]); +} + +/** + * cdns_ufs_set_hclkdiv() + * Sets HCLKDIV register value based on the core_clk + * @hba: host controller instance + * + * Return zero for success and non-zero for failure + */ +static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba) +{ + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + unsigned long core_clk_rate = 0; + u32 core_clk_div = 0; + + if (list_empty(head)) + return 0; + + list_for_each_entry(clki, head, list) { + if (IS_ERR_OR_NULL(clki->clk)) + continue; + if (!strcmp(clki->name, "core_clk")) + core_clk_rate = clk_get_rate(clki->clk); + } + + if (!core_clk_rate) { + dev_err(hba->dev, "%s: unable to find core_clk rate\n", + __func__); + return -EINVAL; + } + + core_clk_div = core_clk_rate / USEC_PER_SEC; + + ufshcd_writel(hba, core_clk_div, CDNS_UFS_REG_HCLKDIV); + /** + * Make sure the register was updated, + * UniPro layer will not work with an incorrect value. + */ + mb(); + + return 0; +} + +/** + * cdns_ufs_hce_enable_notify() + * Called before and after HCE enable bit is set. + * @hba: host controller instance + * @status: notify stage (pre, post change) + * + * Return zero for success and non-zero for failure + */ +static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + if (status != PRE_CHANGE) + return 0; + + return cdns_ufs_set_hclkdiv(hba); +} + +/** + * cdns_ufs_hibern8_notify() + * Called around hibern8 enter/exit. + * @hba: host controller instance + * @cmd: UIC Command + * @status: notify stage (pre, post change) + * + */ +static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd, + enum ufs_notify_change_status status) +{ + if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER) + cdns_ufs_get_l4_attr(hba); + if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT) + cdns_ufs_set_l4_attr(hba); +} + +/** + * cdns_ufs_link_startup_notify() + * Called before and after Link startup is carried out. + * @hba: host controller instance + * @status: notify stage (pre, post change) + * + * Return zero for success and non-zero for failure + */ +static int cdns_ufs_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + if (status != PRE_CHANGE) + return 0; + + /* + * Some UFS devices have issues if LCC is enabled. + * So we are setting PA_Local_TX_LCC_Enable to 0 + * before link startup which will make sure that both host + * and device TX LCC are disabled once link startup is + * completed. + */ + ufshcd_disable_host_tx_lcc(hba); + + /* + * Disabling Autohibern8 feature in cadence UFS + * to mask unexpected interrupt trigger. + */ + hba->ahit = 0; + + return 0; +} + +/** + * cdns_ufs_init - performs additional ufs initialization + * @hba: host controller instance + * + * Returns status of initialization + */ +static int cdns_ufs_init(struct ufs_hba *hba) +{ + int status = 0; + struct cdns_ufs_host *host; + struct device *dev = hba->dev; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + + if (!host) + return -ENOMEM; + ufshcd_set_variant(hba, host); + + status = ufshcd_vops_phy_initialization(hba); + + return status; +} + +/** + * cdns_ufs_m31_16nm_phy_initialization - performs m31 phy initialization + * @hba: host controller instance + * + * Always returns 0 + */ +static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba) +{ + u32 data; + + /* Increase RX_Advanced_Min_ActivateTime_Capability */ + data = ufshcd_readl(hba, CDNS_UFS_REG_PHY_XCFGD1); + data |= BIT(24); + ufshcd_writel(hba, data, CDNS_UFS_REG_PHY_XCFGD1); + + return 0; +} + +static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = { + .name = "cdns-ufs-pltfm", + .init = cdns_ufs_init, + .hce_enable_notify = cdns_ufs_hce_enable_notify, + .link_startup_notify = cdns_ufs_link_startup_notify, + .hibern8_notify = cdns_ufs_hibern8_notify, +}; + +static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = { + .name = "cdns-ufs-pltfm", + .init = cdns_ufs_init, + .hce_enable_notify = cdns_ufs_hce_enable_notify, + .link_startup_notify = cdns_ufs_link_startup_notify, + .phy_initialization = cdns_ufs_m31_16nm_phy_initialization, + .hibern8_notify = cdns_ufs_hibern8_notify, +}; + +static const struct of_device_id cdns_ufs_of_match[] = { + { + .compatible = "cdns,ufshc", + .data = &cdns_ufs_pltfm_hba_vops, + }, + { + .compatible = "cdns,ufshc-m31-16nm", + .data = &cdns_ufs_m31_16nm_pltfm_hba_vops, + }, + { }, +}; + +MODULE_DEVICE_TABLE(of, cdns_ufs_of_match); + +/** + * cdns_ufs_pltfrm_probe - probe routine of the driver + * @pdev: pointer to platform device handle + * + * Return zero for success and non-zero for failure + */ +static int cdns_ufs_pltfrm_probe(struct platform_device *pdev) +{ + int err; + const struct of_device_id *of_id; + struct ufs_hba_variant_ops *vops; + struct device *dev = &pdev->dev; + + of_id = of_match_node(cdns_ufs_of_match, dev->of_node); + vops = (struct ufs_hba_variant_ops *)of_id->data; + + /* Perform generic probe */ + err = ufshcd_pltfrm_init(pdev, vops); + if (err) + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); + + return err; +} + +/** + * cdns_ufs_pltfrm_remove - removes the ufs driver + * @pdev: pointer to platform device handle + * + * Always returns 0 + */ +static int cdns_ufs_pltfrm_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + ufshcd_remove(hba); + return 0; +} + +static const struct dev_pm_ops cdns_ufs_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) + .prepare = ufshcd_suspend_prepare, + .complete = ufshcd_resume_complete, +}; + +static struct platform_driver cdns_ufs_pltfrm_driver = { + .probe = cdns_ufs_pltfrm_probe, + .remove = cdns_ufs_pltfrm_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "cdns-ufshcd", + .pm = &cdns_ufs_dev_pm_ops, + .of_match_table = cdns_ufs_of_match, + }, +}; + +module_platform_driver(cdns_ufs_pltfrm_driver); + +MODULE_AUTHOR("Jan Kotas "); +MODULE_DESCRIPTION("Cadence UFS host controller platform driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/ufs/host/tc-dwc-g210-pci.c b/drivers/ufs/host/tc-dwc-g210-pci.c new file mode 100644 index 0000000000..92b8ad4b58 --- /dev/null +++ b/drivers/ufs/host/tc-dwc-g210-pci.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto + */ + +#include +#include "ufshcd-dwc.h" +#include "tc-dwc-g210.h" + +#include +#include +#include + +/* Test Chip type expected values */ +#define TC_G210_20BIT 20 +#define TC_G210_40BIT 40 +#define TC_G210_INV 0 + +static int tc_type = TC_G210_INV; +module_param(tc_type, int, 0); +MODULE_PARM_DESC(tc_type, "Test Chip Type (20 = 20-bit, 40 = 40-bit)"); + +/* + * struct ufs_hba_dwc_vops - UFS DWC specific variant operations + */ +static struct ufs_hba_variant_ops tc_dwc_g210_pci_hba_vops = { + .name = "tc-dwc-g210-pci", + .link_startup_notify = ufshcd_dwc_link_startup_notify, +}; + +/** + * tc_dwc_g210_pci_shutdown - main function to put the controller in reset state + * @pdev: pointer to PCI device handle + */ +static void tc_dwc_g210_pci_shutdown(struct pci_dev *pdev) +{ + ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev)); +} + +/** + * tc_dwc_g210_pci_remove - de-allocate PCI/SCSI host and host memory space + * data structure memory + * @pdev: pointer to PCI handle + */ +static void tc_dwc_g210_pci_remove(struct pci_dev *pdev) +{ + struct ufs_hba *hba = pci_get_drvdata(pdev); + + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + ufshcd_remove(hba); +} + +/** + * tc_dwc_g210_pci_probe - probe routine of the driver + * @pdev: pointer to PCI device handle + * @id: PCI device id + * + * Returns 0 on success, non-zero value on failure + */ +static int +tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ufs_hba *hba; + void __iomem *mmio_base; + int err; + + /* Check Test Chip type and set the specific setup routine */ + if (tc_type == TC_G210_20BIT) { + tc_dwc_g210_pci_hba_vops.phy_initialization = + tc_dwc_g210_config_20_bit; + } else if (tc_type == TC_G210_40BIT) { + tc_dwc_g210_pci_hba_vops.phy_initialization = + tc_dwc_g210_config_40_bit; + } else { + dev_err(&pdev->dev, "test chip version not specified\n"); + return -EPERM; + } + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + return err; + } + + pci_set_master(pdev); + + err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD); + if (err < 0) { + dev_err(&pdev->dev, "request and iomap failed\n"); + return err; + } + + mmio_base = pcim_iomap_table(pdev)[0]; + + err = ufshcd_alloc_host(&pdev->dev, &hba); + if (err) { + dev_err(&pdev->dev, "Allocation failed\n"); + return err; + } + + hba->vops = &tc_dwc_g210_pci_hba_vops; + + err = ufshcd_init(hba, mmio_base, pdev->irq); + if (err) { + dev_err(&pdev->dev, "Initialization failed\n"); + return err; + } + + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_allow(&pdev->dev); + + return 0; +} + +static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) + .prepare = ufshcd_suspend_prepare, + .complete = ufshcd_resume_complete, +}; + +static const struct pci_device_id tc_dwc_g210_pci_tbl[] = { + { PCI_VENDOR_ID_SYNOPSYS, 0xB101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_SYNOPSYS, 0xB102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(pci, tc_dwc_g210_pci_tbl); + +static struct pci_driver tc_dwc_g210_pci_driver = { + .name = "tc-dwc-g210-pci", + .id_table = tc_dwc_g210_pci_tbl, + .probe = tc_dwc_g210_pci_probe, + .remove = tc_dwc_g210_pci_remove, + .shutdown = tc_dwc_g210_pci_shutdown, + .driver = { + .pm = &tc_dwc_g210_pci_pm_ops + }, +}; + +module_pci_driver(tc_dwc_g210_pci_driver); + +MODULE_AUTHOR("Joao Pinto "); +MODULE_DESCRIPTION("Synopsys Test Chip G210 PCI glue driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/ufs/host/tc-dwc-g210-pltfrm.c b/drivers/ufs/host/tc-dwc-g210-pltfrm.c new file mode 100644 index 0000000000..f15a84d0c1 --- /dev/null +++ b/drivers/ufs/host/tc-dwc-g210-pltfrm.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto + */ + +#include +#include +#include +#include +#include +#include + +#include "ufshcd-pltfrm.h" +#include "ufshcd-dwc.h" +#include "tc-dwc-g210.h" + +/* + * UFS DWC specific variant operations + */ +static struct ufs_hba_variant_ops tc_dwc_g210_20bit_pltfm_hba_vops = { + .name = "tc-dwc-g210-pltfm", + .link_startup_notify = ufshcd_dwc_link_startup_notify, + .phy_initialization = tc_dwc_g210_config_20_bit, +}; + +static struct ufs_hba_variant_ops tc_dwc_g210_40bit_pltfm_hba_vops = { + .name = "tc-dwc-g210-pltfm", + .link_startup_notify = ufshcd_dwc_link_startup_notify, + .phy_initialization = tc_dwc_g210_config_40_bit, +}; + +static const struct of_device_id tc_dwc_g210_pltfm_match[] = { + { + .compatible = "snps,g210-tc-6.00-20bit", + .data = &tc_dwc_g210_20bit_pltfm_hba_vops, + }, + { + .compatible = "snps,g210-tc-6.00-40bit", + .data = &tc_dwc_g210_40bit_pltfm_hba_vops, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, tc_dwc_g210_pltfm_match); + +/** + * tc_dwc_g210_pltfm_probe() + * @pdev: pointer to platform device structure + * + */ +static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev) +{ + int err; + const struct of_device_id *of_id; + struct ufs_hba_variant_ops *vops; + struct device *dev = &pdev->dev; + + of_id = of_match_node(tc_dwc_g210_pltfm_match, dev->of_node); + vops = (struct ufs_hba_variant_ops *)of_id->data; + + /* Perform generic probe */ + err = ufshcd_pltfrm_init(pdev, vops); + if (err) + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); + + return err; +} + +/** + * tc_dwc_g210_pltfm_remove() + * @pdev: pointer to platform device structure + * + */ +static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&(pdev)->dev); + ufshcd_remove(hba); + + return 0; +} + +static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) +}; + +static struct platform_driver tc_dwc_g210_pltfm_driver = { + .probe = tc_dwc_g210_pltfm_probe, + .remove = tc_dwc_g210_pltfm_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "tc-dwc-g210-pltfm", + .pm = &tc_dwc_g210_pltfm_pm_ops, + .of_match_table = of_match_ptr(tc_dwc_g210_pltfm_match), + }, +}; + +module_platform_driver(tc_dwc_g210_pltfm_driver); + +MODULE_ALIAS("platform:tc-dwc-g210-pltfm"); +MODULE_DESCRIPTION("Synopsys Test Chip G210 platform glue driver"); +MODULE_AUTHOR("Joao Pinto "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/ufs/host/tc-dwc-g210.c b/drivers/ufs/host/tc-dwc-g210.c new file mode 100644 index 0000000000..deb93dbd83 --- /dev/null +++ b/drivers/ufs/host/tc-dwc-g210.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto + */ + +#include + +#include +#include + +#include "ufshcd-dwc.h" +#include "ufshci-dwc.h" +#include "tc-dwc-g210.h" + +/** + * tc_dwc_g210_setup_40bit_rmmi() + * This function configures Synopsys TC specific atributes (40-bit RMMI) + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba) +{ + static const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL }, + { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL }, + { UIC_ARG_MIB(CDIRECTCTRL6), 0x80, DME_LOCAL }, + { UIC_ARG_MIB(CBDIVFACTOR), 0x08, DME_LOCAL }, + { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL }, + { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL }, + { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x14, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f, + DME_LOCAL }, + { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL }, + }; + + return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, + ARRAY_SIZE(setup_attrs)); +} + +/** + * tc_dwc_g210_setup_20bit_rmmi_lane0() + * This function configures Synopsys TC 20-bit RMMI Lane 0 + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba) +{ + static const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x12, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 2, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f, + DME_LOCAL }, + { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL }, + }; + + return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, + ARRAY_SIZE(setup_attrs)); +} + +/** + * tc_dwc_g210_setup_20bit_rmmi_lane1() + * This function configures Synopsys TC 20-bit RMMI Lane 1 + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba) +{ + int connected_rx_lanes = 0; + int connected_tx_lanes = 0; + int ret = 0; + + static const struct ufshcd_dme_attr_val setup_tx_attrs[] = { + { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN1_TX), 0x0d, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN1_TX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN1_TX), 0x12, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6, + DME_LOCAL }, + }; + + static const struct ufshcd_dme_attr_val setup_rx_attrs[] = { + { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN1_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN1_RX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN1_RX), 2, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN1_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN1_RX), 0x03, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN1_RX), 0x16, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN1_RX), 0x42, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN1_RX), 0xa4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN1_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN1_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN1_RX), 0x28, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN1_RX), 0x1E, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN1_RX), 0x2f, + DME_LOCAL }, + }; + + /* Get the available lane count */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES), + &connected_rx_lanes); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES), + &connected_tx_lanes); + + if (connected_tx_lanes == 2) { + + ret = ufshcd_dwc_dme_set_attrs(hba, setup_tx_attrs, + ARRAY_SIZE(setup_tx_attrs)); + + if (ret) + goto out; + } + + if (connected_rx_lanes == 2) { + ret = ufshcd_dwc_dme_set_attrs(hba, setup_rx_attrs, + ARRAY_SIZE(setup_rx_attrs)); + } + +out: + return ret; +} + +/** + * tc_dwc_g210_setup_20bit_rmmi() + * This function configures Synopsys TC specific atributes (20-bit RMMI) + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_20bit_rmmi(struct ufs_hba *hba) +{ + int ret = 0; + + static const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL }, + { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL }, + { UIC_ARG_MIB(CDIRECTCTRL6), 0xc0, DME_LOCAL }, + { UIC_ARG_MIB(CBDIVFACTOR), 0x44, DME_LOCAL }, + { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL }, + { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL }, + { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL }, + }; + + ret = ufshcd_dwc_dme_set_attrs(hba, setup_attrs, + ARRAY_SIZE(setup_attrs)); + if (ret) + goto out; + + /* Lane 0 configuration*/ + ret = tc_dwc_g210_setup_20bit_rmmi_lane0(hba); + if (ret) + goto out; + + /* Lane 1 configuration*/ + ret = tc_dwc_g210_setup_20bit_rmmi_lane1(hba); + if (ret) + goto out; + +out: + return ret; +} + +/** + * tc_dwc_g210_config_40_bit() + * This function configures Local (host) Synopsys 40-bit TC specific attributes + * + * @hba: Pointer to drivers structure + * + * Returns 0 on success non-zero value on failure + */ +int tc_dwc_g210_config_40_bit(struct ufs_hba *hba) +{ + int ret = 0; + + dev_info(hba->dev, "Configuring Test Chip 40-bit RMMI\n"); + ret = tc_dwc_g210_setup_40bit_rmmi(hba); + if (ret) { + dev_err(hba->dev, "Configuration failed\n"); + goto out; + } + + /* To write Shadow register bank to effective configuration block */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01); + if (ret) + goto out; + + /* To configure Debug OMC */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01); + +out: + return ret; +} +EXPORT_SYMBOL(tc_dwc_g210_config_40_bit); + +/** + * tc_dwc_g210_config_20_bit() + * This function configures Local (host) Synopsys 20-bit TC specific attributes + * + * @hba: Pointer to drivers structure + * + * Returns 0 on success non-zero value on failure + */ +int tc_dwc_g210_config_20_bit(struct ufs_hba *hba) +{ + int ret = 0; + + dev_info(hba->dev, "Configuring Test Chip 20-bit RMMI\n"); + ret = tc_dwc_g210_setup_20bit_rmmi(hba); + if (ret) { + dev_err(hba->dev, "Configuration failed\n"); + goto out; + } + + /* To write Shadow register bank to effective configuration block */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01); + if (ret) + goto out; + + /* To configure Debug OMC */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01); + +out: + return ret; +} +EXPORT_SYMBOL(tc_dwc_g210_config_20_bit); + +MODULE_AUTHOR("Joao Pinto "); +MODULE_DESCRIPTION("Synopsys G210 Test Chip driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/ufs/host/tc-dwc-g210.h b/drivers/ufs/host/tc-dwc-g210.h new file mode 100644 index 0000000000..f7154012f5 --- /dev/null +++ b/drivers/ufs/host/tc-dwc-g210.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto + */ + +#ifndef _TC_DWC_G210_H +#define _TC_DWC_G210_H + +struct ufs_hba; + +int tc_dwc_g210_config_40_bit(struct ufs_hba *hba); +int tc_dwc_g210_config_20_bit(struct ufs_hba *hba); + +#endif /* End of Header */ diff --git a/drivers/ufs/host/ti-j721e-ufs.c b/drivers/ufs/host/ti-j721e-ufs.c new file mode 100644 index 0000000000..122d650d08 --- /dev/null +++ b/drivers/ufs/host/ti-j721e-ufs.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ +// + +#include +#include +#include +#include +#include +#include +#include + +#define TI_UFS_SS_CTRL 0x4 +#define TI_UFS_SS_RST_N_PCS BIT(0) +#define TI_UFS_SS_CLK_26MHZ BIT(4) + +static int ti_j721e_ufs_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + unsigned long clk_rate; + void __iomem *regbase; + struct clk *clk; + u32 reg = 0; + int ret; + + regbase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regbase)) + return PTR_ERR(regbase); + + pm_runtime_enable(dev); + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + goto disable_pm; + + /* Select MPHY refclk frequency */ + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + dev_err(dev, "Cannot claim MPHY clock.\n"); + goto clk_err; + } + clk_rate = clk_get_rate(clk); + if (clk_rate == 26000000) + reg |= TI_UFS_SS_CLK_26MHZ; + devm_clk_put(dev, clk); + + /* Take UFS slave device out of reset */ + reg |= TI_UFS_SS_RST_N_PCS; + writel(reg, regbase + TI_UFS_SS_CTRL); + + ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, + dev); + if (ret) { + dev_err(dev, "failed to populate child nodes %d\n", ret); + goto clk_err; + } + + return ret; + +clk_err: + pm_runtime_put_sync(dev); +disable_pm: + pm_runtime_disable(dev); + return ret; +} + +static int ti_j721e_ufs_remove(struct platform_device *pdev) +{ + of_platform_depopulate(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static const struct of_device_id ti_j721e_ufs_of_match[] = { + { + .compatible = "ti,j721e-ufs", + }, + { }, +}; + +static struct platform_driver ti_j721e_ufs_driver = { + .probe = ti_j721e_ufs_probe, + .remove = ti_j721e_ufs_remove, + .driver = { + .name = "ti-j721e-ufs", + .of_match_table = ti_j721e_ufs_of_match, + }, +}; +module_platform_driver(ti_j721e_ufs_driver); + +MODULE_AUTHOR("Vignesh Raghavendra "); +MODULE_DESCRIPTION("TI UFS host controller glue driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c new file mode 100644 index 0000000000..c3628a8645 --- /dev/null +++ b/drivers/ufs/host/ufs-exynos.c @@ -0,0 +1,1764 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * UFS Host Controller driver for Exynos specific extensions + * + * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd. + * Author: Seungwon Jeon + * Author: Alim Akhtar + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "ufshcd-pltfrm.h" +#include +#include + +#include "ufs-exynos.h" + +/* + * Exynos's Vendor specific registers for UFSHCI + */ +#define HCI_TXPRDT_ENTRY_SIZE 0x00 +#define PRDT_PREFECT_EN BIT(31) +#define PRDT_SET_SIZE(x) ((x) & 0x1F) +#define HCI_RXPRDT_ENTRY_SIZE 0x04 +#define HCI_1US_TO_CNT_VAL 0x0C +#define CNT_VAL_1US_MASK 0x3FF +#define HCI_UTRL_NEXUS_TYPE 0x40 +#define HCI_UTMRL_NEXUS_TYPE 0x44 +#define HCI_SW_RST 0x50 +#define UFS_LINK_SW_RST BIT(0) +#define UFS_UNIPRO_SW_RST BIT(1) +#define UFS_SW_RST_MASK (UFS_UNIPRO_SW_RST | UFS_LINK_SW_RST) +#define HCI_DATA_REORDER 0x60 +#define HCI_UNIPRO_APB_CLK_CTRL 0x68 +#define UNIPRO_APB_CLK(v, x) (((v) & ~0xF) | ((x) & 0xF)) +#define HCI_AXIDMA_RWDATA_BURST_LEN 0x6C +#define HCI_GPIO_OUT 0x70 +#define HCI_ERR_EN_PA_LAYER 0x78 +#define HCI_ERR_EN_DL_LAYER 0x7C +#define HCI_ERR_EN_N_LAYER 0x80 +#define HCI_ERR_EN_T_LAYER 0x84 +#define HCI_ERR_EN_DME_LAYER 0x88 +#define HCI_CLKSTOP_CTRL 0xB0 +#define REFCLKOUT_STOP BIT(4) +#define MPHY_APBCLK_STOP BIT(3) +#define REFCLK_STOP BIT(2) +#define UNIPRO_MCLK_STOP BIT(1) +#define UNIPRO_PCLK_STOP BIT(0) +#define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\ + UNIPRO_MCLK_STOP | MPHY_APBCLK_STOP|\ + UNIPRO_PCLK_STOP) +#define HCI_MISC 0xB4 +#define REFCLK_CTRL_EN BIT(7) +#define UNIPRO_PCLK_CTRL_EN BIT(6) +#define UNIPRO_MCLK_CTRL_EN BIT(5) +#define HCI_CORECLK_CTRL_EN BIT(4) +#define CLK_CTRL_EN_MASK (REFCLK_CTRL_EN |\ + UNIPRO_PCLK_CTRL_EN |\ + UNIPRO_MCLK_CTRL_EN) +/* Device fatal error */ +#define DFES_ERR_EN BIT(31) +#define DFES_DEF_L2_ERRS (UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\ + UIC_DATA_LINK_LAYER_ERROR_PA_INIT) +#define DFES_DEF_L3_ERRS (UIC_NETWORK_UNSUPPORTED_HEADER_TYPE |\ + UIC_NETWORK_BAD_DEVICEID_ENC |\ + UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING) +#define DFES_DEF_L4_ERRS (UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE |\ + UIC_TRANSPORT_UNKNOWN_CPORTID |\ + UIC_TRANSPORT_NO_CONNECTION_RX |\ + UIC_TRANSPORT_BAD_TC) + +/* FSYS UFS Shareability */ +#define UFS_WR_SHARABLE BIT(2) +#define UFS_RD_SHARABLE BIT(1) +#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE) +#define UFS_SHAREABILITY_OFFSET 0x710 + +/* Multi-host registers */ +#define MHCTRL 0xC4 +#define MHCTRL_EN_VH_MASK (0xE) +#define MHCTRL_EN_VH(vh) (vh << 1) +#define PH2VH_MBOX 0xD8 + +#define MH_MSG_MASK (0xFF) + +#define MH_MSG(id, msg) ((id << 8) | (msg & 0xFF)) +#define MH_MSG_PH_READY 0x1 +#define MH_MSG_VH_READY 0x2 + +#define ALLOW_INQUIRY BIT(25) +#define ALLOW_MODE_SELECT BIT(24) +#define ALLOW_MODE_SENSE BIT(23) +#define ALLOW_PRE_FETCH GENMASK(22, 21) +#define ALLOW_READ_CMD_ALL GENMASK(20, 18) /* read_6/10/16 */ +#define ALLOW_READ_BUFFER BIT(17) +#define ALLOW_READ_CAPACITY GENMASK(16, 15) +#define ALLOW_REPORT_LUNS BIT(14) +#define ALLOW_REQUEST_SENSE BIT(13) +#define ALLOW_SYNCHRONIZE_CACHE GENMASK(8, 7) +#define ALLOW_TEST_UNIT_READY BIT(6) +#define ALLOW_UNMAP BIT(5) +#define ALLOW_VERIFY BIT(4) +#define ALLOW_WRITE_CMD_ALL GENMASK(3, 1) /* write_6/10/16 */ + +#define ALLOW_TRANS_VH_DEFAULT (ALLOW_INQUIRY | ALLOW_MODE_SELECT | \ + ALLOW_MODE_SENSE | ALLOW_PRE_FETCH | \ + ALLOW_READ_CMD_ALL | ALLOW_READ_BUFFER | \ + ALLOW_READ_CAPACITY | ALLOW_REPORT_LUNS | \ + ALLOW_REQUEST_SENSE | ALLOW_SYNCHRONIZE_CACHE | \ + ALLOW_TEST_UNIT_READY | ALLOW_UNMAP | \ + ALLOW_VERIFY | ALLOW_WRITE_CMD_ALL) + +#define HCI_MH_ALLOWABLE_TRAN_OF_VH 0x30C +#define HCI_MH_IID_IN_TASK_TAG 0X308 + +#define PH_READY_TIMEOUT_MS (5 * MSEC_PER_SEC) + +enum { + UNIPRO_L1_5 = 0,/* PHY Adapter */ + UNIPRO_L2, /* Data Link */ + UNIPRO_L3, /* Network */ + UNIPRO_L4, /* Transport */ + UNIPRO_DME, /* DME */ +}; + +/* + * UNIPRO registers + */ +#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0 0x78B8 +#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1 0x78BC +#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2 0x78C0 + +/* + * UFS Protector registers + */ +#define UFSPRSECURITY 0x010 +#define NSSMU BIT(14) +#define UFSPSBEGIN0 0x200 +#define UFSPSEND0 0x204 +#define UFSPSLUN0 0x208 +#define UFSPSCTRL0 0x20C + +#define CNTR_DIV_VAL 40 + +static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en); +static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en); + +static inline void exynos_ufs_enable_auto_ctrl_hcc(struct exynos_ufs *ufs) +{ + exynos_ufs_auto_ctrl_hcc(ufs, true); +} + +static inline void exynos_ufs_disable_auto_ctrl_hcc(struct exynos_ufs *ufs) +{ + exynos_ufs_auto_ctrl_hcc(ufs, false); +} + +static inline void exynos_ufs_disable_auto_ctrl_hcc_save( + struct exynos_ufs *ufs, u32 *val) +{ + *val = hci_readl(ufs, HCI_MISC); + exynos_ufs_auto_ctrl_hcc(ufs, false); +} + +static inline void exynos_ufs_auto_ctrl_hcc_restore( + struct exynos_ufs *ufs, u32 *val) +{ + hci_writel(ufs, *val, HCI_MISC); +} + +static inline void exynos_ufs_gate_clks(struct exynos_ufs *ufs) +{ + exynos_ufs_ctrl_clkstop(ufs, true); +} + +static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs) +{ + exynos_ufs_ctrl_clkstop(ufs, false); +} + +static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) +{ + return 0; +} + +static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) +{ + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + + /* IO Coherency setting */ + if (ufs->sysreg) { + return regmap_update_bits(ufs->sysreg, + ufs->shareability_reg_offset, + UFS_SHARABLE, UFS_SHARABLE); + } + + attr->tx_dif_p_nsec = 3200000; + + return 0; +} + +static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + + /* Enable Virtual Host #1 */ + ufshcd_rmwl(hba, MHCTRL_EN_VH_MASK, MHCTRL_EN_VH(1), MHCTRL); + /* Default VH Transfer permissions */ + hci_writel(ufs, ALLOW_TRANS_VH_DEFAULT, HCI_MH_ALLOWABLE_TRAN_OF_VH); + /* IID information is replaced in TASKTAG[7:5] instead of IID in UCD */ + hci_writel(ufs, 0x1, HCI_MH_IID_IN_TASK_TAG); + + return 0; +} + +static int exynosauto_ufs_pre_link(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + int i; + u32 tx_line_reset_period, rx_line_reset_period; + + rx_line_reset_period = (RX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC; + tx_line_reset_period = (TX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC; + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40); + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, i), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, i), 0x0); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE2, i), + (rx_line_reset_period >> 16) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE1, i), + (rx_line_reset_period >> 8) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE0, i), + (rx_line_reset_period) & 0xFF); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x2f, i), 0x79); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x84, i), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x25, i), 0xf6); + } + + for_each_ufs_tx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, i), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + /* Not to affect VND_TX_LINERESET_PVALUE to VND_TX_CLK_PRD */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, i), + 0x02); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, i), + (tx_line_reset_period >> 16) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, i), + (tx_line_reset_period >> 8) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE0, i), + (tx_line_reset_period) & 0xFF); + + /* TX PWM Gear Capability / PWM_G1_ONLY */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x04, i), 0x1); + } + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0); + + ufshcd_dme_set(hba, UIC_ARG_MIB(0xa011), 0x8000); + + return 0; +} + +static int exynosauto_ufs_pre_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + + /* PACP_PWR_req and delivered to the remote DME */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000); + + return 0; +} + +static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + u32 enabled_vh; + + enabled_vh = ufshcd_readl(hba, MHCTRL) & MHCTRL_EN_VH_MASK; + + /* Send physical host ready message to virtual hosts */ + ufshcd_writel(hba, MH_MSG(enabled_vh, MH_MSG_PH_READY), PH2VH_MBOX); + + return 0; +} + +static int exynos7_ufs_pre_link(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + u32 val = ufs->drv_data->uic_attr->pa_dbg_option_suite; + int i; + + exynos_ufs_enable_ov_tm(hba); + for_each_ufs_tx_lane(ufs, i) + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x297, i), 0x17); + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x362, i), 0xff); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x363, i), 0x00); + } + exynos_ufs_disable_ov_tm(hba); + + for_each_ufs_tx_lane(ufs, i) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(TX_HIBERN8_CONTROL, i), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_TXPHY_CFGUPDT), 0x1); + udelay(1); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val | (1 << 12)); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_RESET_PHY), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_LINE_RESET), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_LINE_RESET_REQ), 0x1); + udelay(1600); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val); + + return 0; +} + +static int exynos7_ufs_post_link(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + int i; + + exynos_ufs_enable_ov_tm(hba); + for_each_ufs_tx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x28b, i), 0x83); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x29a, i), 0x07); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x277, i), + TX_LINERESET_N(exynos_ufs_calc_time_cntr(ufs, 200000))); + } + exynos_ufs_disable_ov_tm(hba); + + exynos_ufs_enable_dbg_mode(hba); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xbb8); + exynos_ufs_disable_dbg_mode(hba); + + return 0; +} + +static int exynos7_ufs_pre_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + unipro_writel(ufs, 0x22, UNIPRO_DBG_FORCE_DME_CTRL_STATE); + + return 0; +} + +static int exynos7_ufs_post_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + int lanes = max_t(u32, pwr->lane_rx, pwr->lane_tx); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_RXPHY_CFGUPDT), 0x1); + + if (lanes == 1) { + exynos_ufs_enable_dbg_mode(hba); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 0x1); + exynos_ufs_disable_dbg_mode(hba); + } + + return 0; +} + +/* + * exynos_ufs_auto_ctrl_hcc - HCI core clock control by h/w + * Control should be disabled in the below cases + * - Before host controller S/W reset + * - Access to UFS protector's register + */ +static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en) +{ + u32 misc = hci_readl(ufs, HCI_MISC); + + if (en) + hci_writel(ufs, misc | HCI_CORECLK_CTRL_EN, HCI_MISC); + else + hci_writel(ufs, misc & ~HCI_CORECLK_CTRL_EN, HCI_MISC); +} + +static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en) +{ + u32 ctrl = hci_readl(ufs, HCI_CLKSTOP_CTRL); + u32 misc = hci_readl(ufs, HCI_MISC); + + if (en) { + hci_writel(ufs, misc | CLK_CTRL_EN_MASK, HCI_MISC); + hci_writel(ufs, ctrl | CLK_STOP_MASK, HCI_CLKSTOP_CTRL); + } else { + hci_writel(ufs, ctrl & ~CLK_STOP_MASK, HCI_CLKSTOP_CTRL); + hci_writel(ufs, misc & ~CLK_CTRL_EN_MASK, HCI_MISC); + } +} + +static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct list_head *head = &hba->clk_list_head; + struct ufs_clk_info *clki; + unsigned long pclk_rate; + u32 f_min, f_max; + u8 div = 0; + int ret = 0; + + if (list_empty(head)) + goto out; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR(clki->clk)) { + if (!strcmp(clki->name, "core_clk")) + ufs->clk_hci_core = clki->clk; + else if (!strcmp(clki->name, "sclk_unipro_main")) + ufs->clk_unipro_main = clki->clk; + } + } + + if (!ufs->clk_hci_core || !ufs->clk_unipro_main) { + dev_err(hba->dev, "failed to get clk info\n"); + ret = -EINVAL; + goto out; + } + + ufs->mclk_rate = clk_get_rate(ufs->clk_unipro_main); + pclk_rate = clk_get_rate(ufs->clk_hci_core); + f_min = ufs->pclk_avail_min; + f_max = ufs->pclk_avail_max; + + if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) { + do { + pclk_rate /= (div + 1); + + if (pclk_rate <= f_max) + break; + div++; + } while (pclk_rate >= f_min); + } + + if (unlikely(pclk_rate < f_min || pclk_rate > f_max)) { + dev_err(hba->dev, "not available pclk range %lu\n", pclk_rate); + ret = -EINVAL; + goto out; + } + + ufs->pclk_rate = pclk_rate; + ufs->pclk_div = div; + +out: + return ret; +} + +static void exynos_ufs_set_unipro_pclk_div(struct exynos_ufs *ufs) +{ + if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) { + u32 val; + + val = hci_readl(ufs, HCI_UNIPRO_APB_CLK_CTRL); + hci_writel(ufs, UNIPRO_APB_CLK(val, ufs->pclk_div), + HCI_UNIPRO_APB_CLK_CTRL); + } +} + +static void exynos_ufs_set_pwm_clk_div(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + + ufshcd_dme_set(hba, + UIC_ARG_MIB(CMN_PWM_CLK_CTRL), attr->cmn_pwm_clk_ctrl); +} + +static void exynos_ufs_calc_pwm_clk_div(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + const unsigned int div = 30, mult = 20; + const unsigned long pwm_min = 3 * 1000 * 1000; + const unsigned long pwm_max = 9 * 1000 * 1000; + const int divs[] = {32, 16, 8, 4}; + unsigned long clk = 0, _clk, clk_period; + int i = 0, clk_idx = -1; + + clk_period = UNIPRO_PCLK_PERIOD(ufs); + for (i = 0; i < ARRAY_SIZE(divs); i++) { + _clk = NSEC_PER_SEC * mult / (clk_period * divs[i] * div); + if (_clk >= pwm_min && _clk <= pwm_max) { + if (_clk > clk) { + clk_idx = i; + clk = _clk; + } + } + } + + if (clk_idx == -1) { + ufshcd_dme_get(hba, UIC_ARG_MIB(CMN_PWM_CLK_CTRL), &clk_idx); + dev_err(hba->dev, + "failed to decide pwm clock divider, will not change\n"); + } + + attr->cmn_pwm_clk_ctrl = clk_idx & PWM_CLK_CTRL_MASK; +} + +long exynos_ufs_calc_time_cntr(struct exynos_ufs *ufs, long period) +{ + const int precise = 10; + long pclk_rate = ufs->pclk_rate; + long clk_period, fraction; + + clk_period = UNIPRO_PCLK_PERIOD(ufs); + fraction = ((NSEC_PER_SEC % pclk_rate) * precise) / pclk_rate; + + return (period * precise) / ((clk_period * precise) + fraction); +} + +static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs *ufs) +{ + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg; + + t_cfg->tx_linereset_p = + exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec); + t_cfg->tx_linereset_n = + exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_n_nsec); + t_cfg->tx_high_z_cnt = + exynos_ufs_calc_time_cntr(ufs, attr->tx_high_z_cnt_nsec); + t_cfg->tx_base_n_val = + exynos_ufs_calc_time_cntr(ufs, attr->tx_base_unit_nsec); + t_cfg->tx_gran_n_val = + exynos_ufs_calc_time_cntr(ufs, attr->tx_gran_unit_nsec); + t_cfg->tx_sleep_cnt = + exynos_ufs_calc_time_cntr(ufs, attr->tx_sleep_cnt); + + t_cfg->rx_linereset = + exynos_ufs_calc_time_cntr(ufs, attr->rx_dif_p_nsec); + t_cfg->rx_hibern8_wait = + exynos_ufs_calc_time_cntr(ufs, attr->rx_hibern8_wait_nsec); + t_cfg->rx_base_n_val = + exynos_ufs_calc_time_cntr(ufs, attr->rx_base_unit_nsec); + t_cfg->rx_gran_n_val = + exynos_ufs_calc_time_cntr(ufs, attr->rx_gran_unit_nsec); + t_cfg->rx_sleep_cnt = + exynos_ufs_calc_time_cntr(ufs, attr->rx_sleep_cnt); + t_cfg->rx_stall_cnt = + exynos_ufs_calc_time_cntr(ufs, attr->rx_stall_cnt); +} + +static void exynos_ufs_config_phy_time_attr(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg; + int i; + + exynos_ufs_set_pwm_clk_div(ufs); + + exynos_ufs_enable_ov_tm(hba); + + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_FILLER_ENABLE, i), + ufs->drv_data->uic_attr->rx_filler_enable); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_LINERESET_VAL, i), + RX_LINERESET(t_cfg->rx_linereset)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_07_00, i), + RX_BASE_NVAL_L(t_cfg->rx_base_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_15_08, i), + RX_BASE_NVAL_H(t_cfg->rx_base_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_07_00, i), + RX_GRAN_NVAL_L(t_cfg->rx_gran_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_10_08, i), + RX_GRAN_NVAL_H(t_cfg->rx_gran_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_SLEEP_CNT_TIMER, i), + RX_OV_SLEEP_CNT(t_cfg->rx_sleep_cnt)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_STALL_CNT_TIMER, i), + RX_OV_STALL_CNT(t_cfg->rx_stall_cnt)); + } + + for_each_ufs_tx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_LINERESET_P_VAL, i), + TX_LINERESET_P(t_cfg->tx_linereset_p)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_07_00, i), + TX_HIGH_Z_CNT_L(t_cfg->tx_high_z_cnt)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_11_08, i), + TX_HIGH_Z_CNT_H(t_cfg->tx_high_z_cnt)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_07_00, i), + TX_BASE_NVAL_L(t_cfg->tx_base_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_15_08, i), + TX_BASE_NVAL_H(t_cfg->tx_base_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_07_00, i), + TX_GRAN_NVAL_L(t_cfg->tx_gran_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_10_08, i), + TX_GRAN_NVAL_H(t_cfg->tx_gran_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_OV_SLEEP_CNT_TIMER, i), + TX_OV_H8_ENTER_EN | + TX_OV_SLEEP_CNT(t_cfg->tx_sleep_cnt)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_MIN_ACTIVATETIME, i), + ufs->drv_data->uic_attr->tx_min_activatetime); + } + + exynos_ufs_disable_ov_tm(hba); +} + +static void exynos_ufs_config_phy_cap_attr(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + int i; + + exynos_ufs_enable_ov_tm(hba); + + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G1_SYNC_LENGTH_CAP, i), + attr->rx_hs_g1_sync_len_cap); + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G2_SYNC_LENGTH_CAP, i), + attr->rx_hs_g2_sync_len_cap); + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G3_SYNC_LENGTH_CAP, i), + attr->rx_hs_g3_sync_len_cap); + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G1_PREP_LENGTH_CAP, i), + attr->rx_hs_g1_prep_sync_len_cap); + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G2_PREP_LENGTH_CAP, i), + attr->rx_hs_g2_prep_sync_len_cap); + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G3_PREP_LENGTH_CAP, i), + attr->rx_hs_g3_prep_sync_len_cap); + } + + if (attr->rx_adv_fine_gran_sup_en == 0) { + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP, i), 0); + + if (attr->rx_min_actv_time_cap) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL( + RX_MIN_ACTIVATETIME_CAPABILITY, i), + attr->rx_min_actv_time_cap); + + if (attr->rx_hibern8_time_cap) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAP, i), + attr->rx_hibern8_time_cap); + } + } else if (attr->rx_adv_fine_gran_sup_en == 1) { + for_each_ufs_rx_lane(ufs, i) { + if (attr->rx_adv_fine_gran_step) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP, + i), RX_ADV_FINE_GRAN_STEP( + attr->rx_adv_fine_gran_step)); + + if (attr->rx_adv_min_actv_time_cap) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL( + RX_ADV_MIN_ACTIVATETIME_CAP, i), + attr->rx_adv_min_actv_time_cap); + + if (attr->rx_adv_hibern8_time_cap) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_ADV_HIBERN8TIME_CAP, + i), + attr->rx_adv_hibern8_time_cap); + } + } + + exynos_ufs_disable_ov_tm(hba); +} + +static void exynos_ufs_establish_connt(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + enum { + DEV_ID = 0x00, + PEER_DEV_ID = 0x01, + PEER_CPORT_ID = 0x00, + TRAFFIC_CLASS = 0x00, + }; + + /* allow cport attributes to be set */ + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_IDLE); + + /* local unipro attributes */ + ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID), DEV_ID); + ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), true); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), PEER_DEV_ID); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), PEER_CPORT_ID); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), CPORT_DEF_FLAGS); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS), TRAFFIC_CLASS); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_CONNECTED); +} + +static void exynos_ufs_config_smu(struct exynos_ufs *ufs) +{ + u32 reg, val; + + exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val); + + /* make encryption disabled by default */ + reg = ufsp_readl(ufs, UFSPRSECURITY); + ufsp_writel(ufs, reg | NSSMU, UFSPRSECURITY); + ufsp_writel(ufs, 0x0, UFSPSBEGIN0); + ufsp_writel(ufs, 0xffffffff, UFSPSEND0); + ufsp_writel(ufs, 0xff, UFSPSLUN0); + ufsp_writel(ufs, 0xf1, UFSPSCTRL0); + + exynos_ufs_auto_ctrl_hcc_restore(ufs, &val); +} + +static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + u8 g = max_t(u32, pwr->gear_rx, pwr->gear_tx); + u32 mask, sync_len; + enum { + SYNC_LEN_G1 = 80 * 1000, /* 80us */ + SYNC_LEN_G2 = 40 * 1000, /* 44us */ + SYNC_LEN_G3 = 20 * 1000, /* 20us */ + }; + int i; + + if (g == 1) + sync_len = SYNC_LEN_G1; + else if (g == 2) + sync_len = SYNC_LEN_G2; + else if (g == 3) + sync_len = SYNC_LEN_G3; + else + return; + + mask = exynos_ufs_calc_time_cntr(ufs, sync_len); + mask = (mask >> 8) & 0xff; + + exynos_ufs_enable_ov_tm(hba); + + for_each_ufs_rx_lane(ufs, i) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_SYNC_MASK_LENGTH, i), mask); + + exynos_ufs_disable_ov_tm(hba); +} + +static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + struct phy *generic_phy = ufs->phy; + struct ufs_dev_params ufs_exynos_cap; + int ret; + + if (!dev_req_params) { + pr_err("%s: incoming dev_req_params is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + ufshcd_init_pwr_dev_param(&ufs_exynos_cap); + + ret = ufshcd_get_pwr_dev_param(&ufs_exynos_cap, + dev_max_params, dev_req_params); + if (ret) { + pr_err("%s: failed to determine capabilities\n", __func__); + goto out; + } + + if (ufs->drv_data->pre_pwr_change) + ufs->drv_data->pre_pwr_change(ufs, dev_req_params); + + if (ufshcd_is_hs_mode(dev_req_params)) { + exynos_ufs_config_sync_pattern_mask(ufs, dev_req_params); + + switch (dev_req_params->hs_rate) { + case PA_HS_MODE_A: + case PA_HS_MODE_B: + phy_calibrate(generic_phy); + break; + } + } + + /* setting for three timeout values for traffic class #0 */ + ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064); + ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224); + ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160); + + return 0; +out: + return ret; +} + +#define PWR_MODE_STR_LEN 64 +static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_req) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + struct phy *generic_phy = ufs->phy; + int gear = max_t(u32, pwr_req->gear_rx, pwr_req->gear_tx); + int lanes = max_t(u32, pwr_req->lane_rx, pwr_req->lane_tx); + char pwr_str[PWR_MODE_STR_LEN] = ""; + + /* let default be PWM Gear 1, Lane 1 */ + if (!gear) + gear = 1; + + if (!lanes) + lanes = 1; + + if (ufs->drv_data->post_pwr_change) + ufs->drv_data->post_pwr_change(ufs, pwr_req); + + if ((ufshcd_is_hs_mode(pwr_req))) { + switch (pwr_req->hs_rate) { + case PA_HS_MODE_A: + case PA_HS_MODE_B: + phy_calibrate(generic_phy); + break; + } + + snprintf(pwr_str, PWR_MODE_STR_LEN, "%s series_%s G_%d L_%d", + "FAST", pwr_req->hs_rate == PA_HS_MODE_A ? "A" : "B", + gear, lanes); + } else { + snprintf(pwr_str, PWR_MODE_STR_LEN, "%s G_%d L_%d", + "SLOW", gear, lanes); + } + + dev_info(hba->dev, "Power mode changed to : %s\n", pwr_str); + + return 0; +} + +static void exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba *hba, + int tag, bool is_scsi_cmd) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + u32 type; + + type = hci_readl(ufs, HCI_UTRL_NEXUS_TYPE); + + if (is_scsi_cmd) + hci_writel(ufs, type | (1 << tag), HCI_UTRL_NEXUS_TYPE); + else + hci_writel(ufs, type & ~(1 << tag), HCI_UTRL_NEXUS_TYPE); +} + +static void exynos_ufs_specify_nexus_t_tm_req(struct ufs_hba *hba, + int tag, u8 func) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + u32 type; + + type = hci_readl(ufs, HCI_UTMRL_NEXUS_TYPE); + + switch (func) { + case UFS_ABORT_TASK: + case UFS_QUERY_TASK: + hci_writel(ufs, type | (1 << tag), HCI_UTMRL_NEXUS_TYPE); + break; + case UFS_ABORT_TASK_SET: + case UFS_CLEAR_TASK_SET: + case UFS_LOGICAL_RESET: + case UFS_QUERY_TASK_SET: + hci_writel(ufs, type & ~(1 << tag), HCI_UTMRL_NEXUS_TYPE); + break; + } +} + +static int exynos_ufs_phy_init(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct phy *generic_phy = ufs->phy; + int ret = 0; + + if (ufs->avail_ln_rx == 0 || ufs->avail_ln_tx == 0) { + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES), + &ufs->avail_ln_rx); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES), + &ufs->avail_ln_tx); + WARN(ufs->avail_ln_rx != ufs->avail_ln_tx, + "available data lane is not equal(rx:%d, tx:%d)\n", + ufs->avail_ln_rx, ufs->avail_ln_tx); + } + + phy_set_bus_width(generic_phy, ufs->avail_ln_rx); + ret = phy_init(generic_phy); + if (ret) { + dev_err(hba->dev, "%s: phy init failed, ret = %d\n", + __func__, ret); + return ret; + } + + ret = phy_power_on(generic_phy); + if (ret) + goto out_exit_phy; + + return 0; + +out_exit_phy: + phy_exit(generic_phy); + + return ret; +} + +static void exynos_ufs_config_unipro(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTRAILINGCLOCKS), + ufs->drv_data->uic_attr->tx_trailingclks); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), + ufs->drv_data->uic_attr->pa_dbg_option_suite); +} + +static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index) +{ + switch (index) { + case UNIPRO_L1_5: + hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_PA_LAYER); + break; + case UNIPRO_L2: + hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DL_LAYER); + break; + case UNIPRO_L3: + hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_N_LAYER); + break; + case UNIPRO_L4: + hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_T_LAYER); + break; + case UNIPRO_DME: + hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DME_LAYER); + break; + } +} + +static int exynos_ufs_setup_clocks(struct ufs_hba *hba, bool on, + enum ufs_notify_change_status status) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + if (!ufs) + return 0; + + if (on && status == PRE_CHANGE) { + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) + exynos_ufs_disable_auto_ctrl_hcc(ufs); + exynos_ufs_ungate_clks(ufs); + } else if (!on && status == POST_CHANGE) { + exynos_ufs_gate_clks(ufs); + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) + exynos_ufs_enable_auto_ctrl_hcc(ufs); + } + + return 0; +} + +static int exynos_ufs_pre_link(struct ufs_hba *hba) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + /* hci */ + exynos_ufs_config_intr(ufs, DFES_DEF_L2_ERRS, UNIPRO_L2); + exynos_ufs_config_intr(ufs, DFES_DEF_L3_ERRS, UNIPRO_L3); + exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4); + exynos_ufs_set_unipro_pclk_div(ufs); + + /* unipro */ + exynos_ufs_config_unipro(ufs); + + /* m-phy */ + exynos_ufs_phy_init(ufs); + if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) { + exynos_ufs_config_phy_time_attr(ufs); + exynos_ufs_config_phy_cap_attr(ufs); + } + + exynos_ufs_setup_clocks(hba, true, PRE_CHANGE); + + if (ufs->drv_data->pre_link) + ufs->drv_data->pre_link(ufs); + + return 0; +} + +static void exynos_ufs_fit_aggr_timeout(struct exynos_ufs *ufs) +{ + u32 val; + + val = exynos_ufs_calc_time_cntr(ufs, IATOVAL_NSEC / CNTR_DIV_VAL); + hci_writel(ufs, val & CNT_VAL_1US_MASK, HCI_1US_TO_CNT_VAL); +} + +static int exynos_ufs_post_link(struct ufs_hba *hba) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + struct phy *generic_phy = ufs->phy; + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + + exynos_ufs_establish_connt(ufs); + exynos_ufs_fit_aggr_timeout(ufs); + + hci_writel(ufs, 0xa, HCI_DATA_REORDER); + hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE); + hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE); + hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); + hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); + hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN); + + if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB) + ufshcd_dme_set(hba, + UIC_ARG_MIB(T_DBG_SKIP_INIT_HIBERN8_EXIT), true); + + if (attr->pa_granularity) { + exynos_ufs_enable_dbg_mode(hba); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_GRANULARITY), + attr->pa_granularity); + exynos_ufs_disable_dbg_mode(hba); + + if (attr->pa_tactivate) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), + attr->pa_tactivate); + if (attr->pa_hibern8time && + !(ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER)) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), + attr->pa_hibern8time); + } + + if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) { + if (!attr->pa_granularity) + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), + &attr->pa_granularity); + if (!attr->pa_hibern8time) + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), + &attr->pa_hibern8time); + /* + * not wait for HIBERN8 time to exit hibernation + */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 0); + + if (attr->pa_granularity < 1 || attr->pa_granularity > 6) { + /* Valid range for granularity: 1 ~ 6 */ + dev_warn(hba->dev, + "%s: pa_granularity %d is invalid, assuming backwards compatibility\n", + __func__, + attr->pa_granularity); + attr->pa_granularity = 6; + } + } + + phy_calibrate(generic_phy); + + if (ufs->drv_data->post_link) + ufs->drv_data->post_link(ufs); + + return 0; +} + +static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs) +{ + struct device_node *np = dev->of_node; + struct exynos_ufs_uic_attr *attr; + int ret = 0; + + ufs->drv_data = device_get_match_data(dev); + + if (ufs->drv_data && ufs->drv_data->uic_attr) { + attr = ufs->drv_data->uic_attr; + } else { + dev_err(dev, "failed to get uic attributes\n"); + ret = -EINVAL; + goto out; + } + + ufs->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg"); + if (IS_ERR(ufs->sysreg)) + ufs->sysreg = NULL; + else { + if (of_property_read_u32_index(np, "samsung,sysreg", 1, + &ufs->shareability_reg_offset)) { + dev_warn(dev, "can't get an offset from sysreg. Set to default value\n"); + ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET; + } + } + + ufs->pclk_avail_min = PCLK_AVAIL_MIN; + ufs->pclk_avail_max = PCLK_AVAIL_MAX; + + attr->rx_adv_fine_gran_sup_en = RX_ADV_FINE_GRAN_SUP_EN; + attr->rx_adv_fine_gran_step = RX_ADV_FINE_GRAN_STEP_VAL; + attr->rx_adv_min_actv_time_cap = RX_ADV_MIN_ACTV_TIME_CAP; + attr->pa_granularity = PA_GRANULARITY_VAL; + attr->pa_tactivate = PA_TACTIVATE_VAL; + attr->pa_hibern8time = PA_HIBERN8TIME_VAL; + +out: + return ret; +} + +static inline void exynos_ufs_priv_init(struct ufs_hba *hba, + struct exynos_ufs *ufs) +{ + ufs->hba = hba; + ufs->opts = ufs->drv_data->opts; + ufs->rx_sel_idx = PA_MAXDATALANES; + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX) + ufs->rx_sel_idx = 0; + hba->priv = (void *)ufs; + hba->quirks = ufs->drv_data->quirks; +} + +static int exynos_ufs_init(struct ufs_hba *hba) +{ + struct device *dev = hba->dev; + struct platform_device *pdev = to_platform_device(dev); + struct exynos_ufs *ufs; + int ret; + + ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL); + if (!ufs) + return -ENOMEM; + + /* exynos-specific hci */ + ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci"); + if (IS_ERR(ufs->reg_hci)) { + dev_err(dev, "cannot ioremap for hci vendor register\n"); + return PTR_ERR(ufs->reg_hci); + } + + /* unipro */ + ufs->reg_unipro = devm_platform_ioremap_resource_byname(pdev, "unipro"); + if (IS_ERR(ufs->reg_unipro)) { + dev_err(dev, "cannot ioremap for unipro register\n"); + return PTR_ERR(ufs->reg_unipro); + } + + /* ufs protector */ + ufs->reg_ufsp = devm_platform_ioremap_resource_byname(pdev, "ufsp"); + if (IS_ERR(ufs->reg_ufsp)) { + dev_err(dev, "cannot ioremap for ufs protector register\n"); + return PTR_ERR(ufs->reg_ufsp); + } + + ret = exynos_ufs_parse_dt(dev, ufs); + if (ret) { + dev_err(dev, "failed to get dt info.\n"); + goto out; + } + + ufs->phy = devm_phy_get(dev, "ufs-phy"); + if (IS_ERR(ufs->phy)) { + ret = PTR_ERR(ufs->phy); + dev_err(dev, "failed to get ufs-phy\n"); + goto out; + } + + exynos_ufs_priv_init(hba, ufs); + + if (ufs->drv_data->drv_init) { + ret = ufs->drv_data->drv_init(dev, ufs); + if (ret) { + dev_err(dev, "failed to init drv-data\n"); + goto out; + } + } + + ret = exynos_ufs_get_clk_info(ufs); + if (ret) + goto out; + exynos_ufs_specify_phy_time_attr(ufs); + exynos_ufs_config_smu(ufs); + return 0; + +out: + hba->priv = NULL; + return ret; +} + +static int exynos_ufs_host_reset(struct ufs_hba *hba) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + unsigned long timeout = jiffies + msecs_to_jiffies(1); + u32 val; + int ret = 0; + + exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val); + + hci_writel(ufs, UFS_SW_RST_MASK, HCI_SW_RST); + + do { + if (!(hci_readl(ufs, HCI_SW_RST) & UFS_SW_RST_MASK)) + goto out; + } while (time_before(jiffies, timeout)); + + dev_err(hba->dev, "timeout host sw-reset\n"); + ret = -ETIMEDOUT; + +out: + exynos_ufs_auto_ctrl_hcc_restore(ufs, &val); + return ret; +} + +static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + hci_writel(ufs, 0 << 0, HCI_GPIO_OUT); + udelay(5); + hci_writel(ufs, 1 << 0, HCI_GPIO_OUT); +} + +static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + + if (!enter) { + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) + exynos_ufs_disable_auto_ctrl_hcc(ufs); + exynos_ufs_ungate_clks(ufs); + + if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) { + static const unsigned int granularity_tbl[] = { + 1, 4, 8, 16, 32, 100 + }; + int h8_time = attr->pa_hibern8time * + granularity_tbl[attr->pa_granularity - 1]; + unsigned long us; + s64 delta; + + do { + delta = h8_time - ktime_us_delta(ktime_get(), + ufs->entry_hibern8_t); + if (delta <= 0) + break; + + us = min_t(s64, delta, USEC_PER_MSEC); + if (us >= 10) + usleep_range(us, us + 10); + } while (1); + } + } +} + +static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + if (!enter) { + u32 cur_mode = 0; + u32 pwrmode; + + if (ufshcd_is_hs_mode(&ufs->dev_req_params)) + pwrmode = FAST_MODE; + else + pwrmode = SLOW_MODE; + + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &cur_mode); + if (cur_mode != (pwrmode << 4 | pwrmode)) { + dev_warn(hba->dev, "%s: power mode change\n", __func__); + hba->pwr_info.pwr_rx = (cur_mode >> 4) & 0xf; + hba->pwr_info.pwr_tx = cur_mode & 0xf; + ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); + } + + if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)) + exynos_ufs_establish_connt(ufs); + } else { + ufs->entry_hibern8_t = ktime_get(); + exynos_ufs_gate_clks(ufs); + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) + exynos_ufs_enable_auto_ctrl_hcc(ufs); + } +} + +static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + int ret = 0; + + switch (status) { + case PRE_CHANGE: + if (ufs->drv_data->pre_hce_enable) { + ret = ufs->drv_data->pre_hce_enable(ufs); + if (ret) + return ret; + } + + ret = exynos_ufs_host_reset(hba); + if (ret) + return ret; + exynos_ufs_dev_hw_reset(hba); + break; + case POST_CHANGE: + exynos_ufs_calc_pwm_clk_div(ufs); + if (!(ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)) + exynos_ufs_enable_auto_ctrl_hcc(ufs); + + if (ufs->drv_data->post_hce_enable) + ret = ufs->drv_data->post_hce_enable(ufs); + + break; + } + + return ret; +} + +static int exynos_ufs_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int ret = 0; + + switch (status) { + case PRE_CHANGE: + ret = exynos_ufs_pre_link(hba); + break; + case POST_CHANGE: + ret = exynos_ufs_post_link(hba); + break; + } + + return ret; +} + +static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + int ret = 0; + + switch (status) { + case PRE_CHANGE: + ret = exynos_ufs_pre_pwr_mode(hba, dev_max_params, + dev_req_params); + break; + case POST_CHANGE: + ret = exynos_ufs_post_pwr_mode(hba, dev_req_params); + break; + } + + return ret; +} + +static void exynos_ufs_hibern8_notify(struct ufs_hba *hba, + enum uic_cmd_dme enter, + enum ufs_notify_change_status notify) +{ + switch ((u8)notify) { + case PRE_CHANGE: + exynos_ufs_pre_hibern8(hba, enter); + break; + case POST_CHANGE: + exynos_ufs_post_hibern8(hba, enter); + break; + } +} + +static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, + enum ufs_notify_change_status status) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + if (status == PRE_CHANGE) + return 0; + + if (!ufshcd_is_link_active(hba)) + phy_power_off(ufs->phy); + + return 0; +} + +static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + if (!ufshcd_is_link_active(hba)) + phy_power_on(ufs->phy); + + exynos_ufs_config_smu(ufs); + + return 0; +} + +static int exynosauto_ufs_vh_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + if (status == POST_CHANGE) { + ufshcd_set_link_active(hba); + ufshcd_set_ufs_dev_active(hba); + } + + return 0; +} + +static int exynosauto_ufs_vh_wait_ph_ready(struct ufs_hba *hba) +{ + u32 mbox; + ktime_t start, stop; + + start = ktime_get(); + stop = ktime_add(start, ms_to_ktime(PH_READY_TIMEOUT_MS)); + + do { + mbox = ufshcd_readl(hba, PH2VH_MBOX); + /* TODO: Mailbox message protocols between the PH and VHs are + * not implemented yet. This will be supported later + */ + if ((mbox & MH_MSG_MASK) == MH_MSG_PH_READY) + return 0; + + usleep_range(40, 50); + } while (ktime_before(ktime_get(), stop)); + + return -ETIME; +} + +static int exynosauto_ufs_vh_init(struct ufs_hba *hba) +{ + struct device *dev = hba->dev; + struct platform_device *pdev = to_platform_device(dev); + struct exynos_ufs *ufs; + int ret; + + ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL); + if (!ufs) + return -ENOMEM; + + /* exynos-specific hci */ + ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci"); + if (IS_ERR(ufs->reg_hci)) { + dev_err(dev, "cannot ioremap for hci vendor register\n"); + return PTR_ERR(ufs->reg_hci); + } + + ret = exynosauto_ufs_vh_wait_ph_ready(hba); + if (ret) + return ret; + + ufs->drv_data = device_get_match_data(dev); + if (!ufs->drv_data) + return -ENODEV; + + exynos_ufs_priv_init(hba, ufs); + + return 0; +} + +static int fsd_ufs_pre_link(struct exynos_ufs *ufs) +{ + int i; + struct ufs_hba *hba = ufs->hba; + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB(0x201), 0x12); + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40); + + for_each_ufs_tx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xAA, i), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8F, i), 0x3F); + } + + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x12, i), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x5C, i), 0x38); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0F, i), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x65, i), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x69, i), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x21, i), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x22, i), 0x0); + } + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_AUTOMODE_THLD), 0x4E20); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), 0x2e820183); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0); + + exynos_ufs_establish_connt(ufs); + + return 0; +} + +static int fsd_ufs_post_link(struct exynos_ufs *ufs) +{ + int i; + struct ufs_hba *hba = ufs->hba; + u32 hw_cap_min_tactivate; + u32 peer_rx_min_actv_time_cap; + u32 max_rx_hibern8_time_cap; + + ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0x8F, 4), + &hw_cap_min_tactivate); /* HW Capability of MIN_TACTIVATE */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), + &peer_rx_min_actv_time_cap); /* PA_TActivate */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), + &max_rx_hibern8_time_cap); /* PA_Hibern8Time */ + + if (peer_rx_min_actv_time_cap >= hw_cap_min_tactivate) + ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), + peer_rx_min_actv_time_cap + 1); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), max_rx_hibern8_time_cap + 1); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x01); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xFA); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x00); + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40); + + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x35, i), 0x05); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x73, i), 0x01); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x41, i), 0x02); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x42, i), 0xAC); + } + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0); + + return 0; +} + +static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000); + + unipro_writel(ufs, 12000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0); + unipro_writel(ufs, 32000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1); + unipro_writel(ufs, 16000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2); + + return 0; +} + +static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = { + .name = "exynos_ufs", + .init = exynos_ufs_init, + .hce_enable_notify = exynos_ufs_hce_enable_notify, + .link_startup_notify = exynos_ufs_link_startup_notify, + .pwr_change_notify = exynos_ufs_pwr_change_notify, + .setup_clocks = exynos_ufs_setup_clocks, + .setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req, + .setup_task_mgmt = exynos_ufs_specify_nexus_t_tm_req, + .hibern8_notify = exynos_ufs_hibern8_notify, + .suspend = exynos_ufs_suspend, + .resume = exynos_ufs_resume, +}; + +static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = { + .name = "exynosauto_ufs_vh", + .init = exynosauto_ufs_vh_init, + .link_startup_notify = exynosauto_ufs_vh_link_startup_notify, +}; + +static int exynos_ufs_probe(struct platform_device *pdev) +{ + int err; + struct device *dev = &pdev->dev; + const struct ufs_hba_variant_ops *vops = &ufs_hba_exynos_ops; + const struct exynos_ufs_drv_data *drv_data = + device_get_match_data(dev); + + if (drv_data && drv_data->vops) + vops = drv_data->vops; + + err = ufshcd_pltfrm_init(pdev, vops); + if (err) + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); + + return err; +} + +static int exynos_ufs_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + pm_runtime_get_sync(&(pdev)->dev); + ufshcd_remove(hba); + + phy_power_off(ufs->phy); + phy_exit(ufs->phy); + + return 0; +} + +static struct exynos_ufs_uic_attr exynos7_uic_attr = { + .tx_trailingclks = 0x10, + .tx_dif_p_nsec = 3000000, /* unit: ns */ + .tx_dif_n_nsec = 1000000, /* unit: ns */ + .tx_high_z_cnt_nsec = 20000, /* unit: ns */ + .tx_base_unit_nsec = 100000, /* unit: ns */ + .tx_gran_unit_nsec = 4000, /* unit: ns */ + .tx_sleep_cnt = 1000, /* unit: ns */ + .tx_min_activatetime = 0xa, + .rx_filler_enable = 0x2, + .rx_dif_p_nsec = 1000000, /* unit: ns */ + .rx_hibern8_wait_nsec = 4000000, /* unit: ns */ + .rx_base_unit_nsec = 100000, /* unit: ns */ + .rx_gran_unit_nsec = 4000, /* unit: ns */ + .rx_sleep_cnt = 1280, /* unit: ns */ + .rx_stall_cnt = 320, /* unit: ns */ + .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf), + .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf), + .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf), + .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf), + .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf), + .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf), + .pa_dbg_option_suite = 0x30103, +}; + +static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = { + .uic_attr = &exynos7_uic_attr, + .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR | + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | + UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING, + .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | + EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | + EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX, + .drv_init = exynosauto_ufs_drv_init, + .post_hce_enable = exynosauto_ufs_post_hce_enable, + .pre_link = exynosauto_ufs_pre_link, + .pre_pwr_change = exynosauto_ufs_pre_pwr_change, + .post_pwr_change = exynosauto_ufs_post_pwr_change, +}; + +static const struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = { + .vops = &ufs_hba_exynosauto_vh_ops, + .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR | + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | + UFSHCI_QUIRK_BROKEN_HCE | + UFSHCD_QUIRK_BROKEN_UIC_CMD | + UFSHCD_QUIRK_SKIP_PH_CONFIGURATION | + UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING, + .opts = EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX, +}; + +static const struct exynos_ufs_drv_data exynos_ufs_drvs = { + .uic_attr = &exynos7_uic_attr, + .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | + UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR | + UFSHCI_QUIRK_BROKEN_HCE | + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR | + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | + UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL | + UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING | + UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE, + .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL | + EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | + EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX | + EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB | + EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER, + .drv_init = exynos7_ufs_drv_init, + .pre_link = exynos7_ufs_pre_link, + .post_link = exynos7_ufs_post_link, + .pre_pwr_change = exynos7_ufs_pre_pwr_change, + .post_pwr_change = exynos7_ufs_post_pwr_change, +}; + +static struct exynos_ufs_uic_attr fsd_uic_attr = { + .tx_trailingclks = 0x10, + .tx_dif_p_nsec = 3000000, /* unit: ns */ + .tx_dif_n_nsec = 1000000, /* unit: ns */ + .tx_high_z_cnt_nsec = 20000, /* unit: ns */ + .tx_base_unit_nsec = 100000, /* unit: ns */ + .tx_gran_unit_nsec = 4000, /* unit: ns */ + .tx_sleep_cnt = 1000, /* unit: ns */ + .tx_min_activatetime = 0xa, + .rx_filler_enable = 0x2, + .rx_dif_p_nsec = 1000000, /* unit: ns */ + .rx_hibern8_wait_nsec = 4000000, /* unit: ns */ + .rx_base_unit_nsec = 100000, /* unit: ns */ + .rx_gran_unit_nsec = 4000, /* unit: ns */ + .rx_sleep_cnt = 1280, /* unit: ns */ + .rx_stall_cnt = 320, /* unit: ns */ + .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf), + .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf), + .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf), + .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf), + .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf), + .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf), + .pa_dbg_option_suite = 0x2E820183, +}; + +static const struct exynos_ufs_drv_data fsd_ufs_drvs = { + .uic_attr = &fsd_uic_attr, + .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | + UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR | + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | + UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING | + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR, + .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL | + EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | + EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | + EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX, + .pre_link = fsd_ufs_pre_link, + .post_link = fsd_ufs_post_link, + .pre_pwr_change = fsd_ufs_pre_pwr_change, +}; + +static const struct of_device_id exynos_ufs_of_match[] = { + { .compatible = "samsung,exynos7-ufs", + .data = &exynos_ufs_drvs }, + { .compatible = "samsung,exynosautov9-ufs", + .data = &exynosauto_ufs_drvs }, + { .compatible = "samsung,exynosautov9-ufs-vh", + .data = &exynosauto_ufs_vh_drvs }, + { .compatible = "tesla,fsd-ufs", + .data = &fsd_ufs_drvs }, + {}, +}; + +static const struct dev_pm_ops exynos_ufs_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) + .prepare = ufshcd_suspend_prepare, + .complete = ufshcd_resume_complete, +}; + +static struct platform_driver exynos_ufs_pltform = { + .probe = exynos_ufs_probe, + .remove = exynos_ufs_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "exynos-ufshc", + .pm = &exynos_ufs_pm_ops, + .of_match_table = of_match_ptr(exynos_ufs_of_match), + }, +}; +module_platform_driver(exynos_ufs_pltform); + +MODULE_AUTHOR("Alim Akhtar "); +MODULE_AUTHOR("Seungwon Jeon "); +MODULE_DESCRIPTION("Exynos UFS HCI Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h new file mode 100644 index 0000000000..a4bd6646d7 --- /dev/null +++ b/drivers/ufs/host/ufs-exynos.h @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * UFS Host Controller driver for Exynos specific extensions + * + * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd. + * + */ + +#ifndef _UFS_EXYNOS_H_ +#define _UFS_EXYNOS_H_ + +/* + * UNIPRO registers + */ +#define UNIPRO_DBG_FORCE_DME_CTRL_STATE 0x150 + +/* + * MIBs for PA debug registers + */ +#define PA_DBG_CLK_PERIOD 0x9514 +#define PA_DBG_TXPHY_CFGUPDT 0x9518 +#define PA_DBG_RXPHY_CFGUPDT 0x9519 +#define PA_DBG_MODE 0x9529 +#define PA_DBG_SKIP_RESET_PHY 0x9539 +#define PA_DBG_AUTOMODE_THLD 0x9536 +#define PA_DBG_OV_TM 0x9540 +#define PA_DBG_SKIP_LINE_RESET 0x9541 +#define PA_DBG_LINE_RESET_REQ 0x9543 +#define PA_DBG_OPTION_SUITE 0x9564 +#define PA_DBG_OPTION_SUITE_DYN 0x9565 + +/* + * MIBs for Transport Layer debug registers + */ +#define T_DBG_SKIP_INIT_HIBERN8_EXIT 0xc001 + +/* + * Exynos MPHY attributes + */ +#define TX_LINERESET_N_VAL 0x0277 +#define TX_LINERESET_N(v) (((v) >> 10) & 0xFF) +#define TX_LINERESET_P_VAL 0x027D +#define TX_LINERESET_P(v) (((v) >> 12) & 0xFF) +#define TX_OV_SLEEP_CNT_TIMER 0x028E +#define TX_OV_H8_ENTER_EN (1 << 7) +#define TX_OV_SLEEP_CNT(v) (((v) >> 5) & 0x7F) +#define TX_HIGH_Z_CNT_11_08 0x028C +#define TX_HIGH_Z_CNT_H(v) (((v) >> 8) & 0xF) +#define TX_HIGH_Z_CNT_07_00 0x028D +#define TX_HIGH_Z_CNT_L(v) ((v) & 0xFF) +#define TX_BASE_NVAL_07_00 0x0293 +#define TX_BASE_NVAL_L(v) ((v) & 0xFF) +#define TX_BASE_NVAL_15_08 0x0294 +#define TX_BASE_NVAL_H(v) (((v) >> 8) & 0xFF) +#define TX_GRAN_NVAL_07_00 0x0295 +#define TX_GRAN_NVAL_L(v) ((v) & 0xFF) +#define TX_GRAN_NVAL_10_08 0x0296 +#define TX_GRAN_NVAL_H(v) (((v) >> 8) & 0x3) + +#define VND_TX_CLK_PRD 0xAA +#define VND_TX_CLK_PRD_EN 0xA9 +#define VND_TX_LINERESET_PVALUE0 0xAD +#define VND_TX_LINERESET_PVALUE1 0xAC +#define VND_TX_LINERESET_PVALUE2 0xAB + +#define TX_LINE_RESET_TIME 3200 + +#define VND_RX_CLK_PRD 0x12 +#define VND_RX_CLK_PRD_EN 0x11 +#define VND_RX_LINERESET_VALUE0 0x1D +#define VND_RX_LINERESET_VALUE1 0x1C +#define VND_RX_LINERESET_VALUE2 0x1B + +#define RX_LINE_RESET_TIME 1000 + +#define RX_FILLER_ENABLE 0x0316 +#define RX_FILLER_EN (1 << 1) +#define RX_LINERESET_VAL 0x0317 +#define RX_LINERESET(v) (((v) >> 12) & 0xFF) +#define RX_LCC_IGNORE 0x0318 +#define RX_SYNC_MASK_LENGTH 0x0321 +#define RX_HIBERN8_WAIT_VAL_BIT_20_16 0x0331 +#define RX_HIBERN8_WAIT_VAL_BIT_15_08 0x0332 +#define RX_HIBERN8_WAIT_VAL_BIT_07_00 0x0333 +#define RX_OV_SLEEP_CNT_TIMER 0x0340 +#define RX_OV_SLEEP_CNT(v) (((v) >> 6) & 0x1F) +#define RX_OV_STALL_CNT_TIMER 0x0341 +#define RX_OV_STALL_CNT(v) (((v) >> 4) & 0xFF) +#define RX_BASE_NVAL_07_00 0x0355 +#define RX_BASE_NVAL_L(v) ((v) & 0xFF) +#define RX_BASE_NVAL_15_08 0x0354 +#define RX_BASE_NVAL_H(v) (((v) >> 8) & 0xFF) +#define RX_GRAN_NVAL_07_00 0x0353 +#define RX_GRAN_NVAL_L(v) ((v) & 0xFF) +#define RX_GRAN_NVAL_10_08 0x0352 +#define RX_GRAN_NVAL_H(v) (((v) >> 8) & 0x3) + +#define CMN_PWM_CLK_CTRL 0x0402 +#define PWM_CLK_CTRL_MASK 0x3 + +#define IATOVAL_NSEC 20000 /* unit: ns */ +#define UNIPRO_PCLK_PERIOD(ufs) (NSEC_PER_SEC / ufs->pclk_rate) + +struct exynos_ufs; + +/* vendor specific pre-defined parameters */ +#define SLOW 1 +#define FAST 2 + +#define RX_ADV_FINE_GRAN_SUP_EN 0x1 +#define RX_ADV_FINE_GRAN_STEP_VAL 0x3 +#define RX_ADV_MIN_ACTV_TIME_CAP 0x9 + +#define PA_GRANULARITY_VAL 0x6 +#define PA_TACTIVATE_VAL 0x3 +#define PA_HIBERN8TIME_VAL 0x20 + +#define PCLK_AVAIL_MIN 70000000 +#define PCLK_AVAIL_MAX 167000000 + +struct exynos_ufs_uic_attr { + /* TX Attributes */ + unsigned int tx_trailingclks; + unsigned int tx_dif_p_nsec; + unsigned int tx_dif_n_nsec; + unsigned int tx_high_z_cnt_nsec; + unsigned int tx_base_unit_nsec; + unsigned int tx_gran_unit_nsec; + unsigned int tx_sleep_cnt; + unsigned int tx_min_activatetime; + /* RX Attributes */ + unsigned int rx_filler_enable; + unsigned int rx_dif_p_nsec; + unsigned int rx_hibern8_wait_nsec; + unsigned int rx_base_unit_nsec; + unsigned int rx_gran_unit_nsec; + unsigned int rx_sleep_cnt; + unsigned int rx_stall_cnt; + unsigned int rx_hs_g1_sync_len_cap; + unsigned int rx_hs_g2_sync_len_cap; + unsigned int rx_hs_g3_sync_len_cap; + unsigned int rx_hs_g1_prep_sync_len_cap; + unsigned int rx_hs_g2_prep_sync_len_cap; + unsigned int rx_hs_g3_prep_sync_len_cap; + /* Common Attributes */ + unsigned int cmn_pwm_clk_ctrl; + /* Internal Attributes */ + unsigned int pa_dbg_option_suite; + /* Changeable Attributes */ + unsigned int rx_adv_fine_gran_sup_en; + unsigned int rx_adv_fine_gran_step; + unsigned int rx_min_actv_time_cap; + unsigned int rx_hibern8_time_cap; + unsigned int rx_adv_min_actv_time_cap; + unsigned int rx_adv_hibern8_time_cap; + unsigned int pa_granularity; + unsigned int pa_tactivate; + unsigned int pa_hibern8time; +}; + +struct exynos_ufs_drv_data { + const struct ufs_hba_variant_ops *vops; + struct exynos_ufs_uic_attr *uic_attr; + unsigned int quirks; + unsigned int opts; + /* SoC's specific operations */ + int (*drv_init)(struct device *dev, struct exynos_ufs *ufs); + int (*pre_link)(struct exynos_ufs *ufs); + int (*post_link)(struct exynos_ufs *ufs); + int (*pre_pwr_change)(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr); + int (*post_pwr_change)(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr); + int (*pre_hce_enable)(struct exynos_ufs *ufs); + int (*post_hce_enable)(struct exynos_ufs *ufs); +}; + +struct ufs_phy_time_cfg { + u32 tx_linereset_p; + u32 tx_linereset_n; + u32 tx_high_z_cnt; + u32 tx_base_n_val; + u32 tx_gran_n_val; + u32 tx_sleep_cnt; + u32 rx_linereset; + u32 rx_hibern8_wait; + u32 rx_base_n_val; + u32 rx_gran_n_val; + u32 rx_sleep_cnt; + u32 rx_stall_cnt; +}; + +struct exynos_ufs { + struct ufs_hba *hba; + struct phy *phy; + void __iomem *reg_hci; + void __iomem *reg_unipro; + void __iomem *reg_ufsp; + struct clk *clk_hci_core; + struct clk *clk_unipro_main; + struct clk *clk_apb; + u32 pclk_rate; + u32 pclk_div; + u32 pclk_avail_min; + u32 pclk_avail_max; + unsigned long mclk_rate; + int avail_ln_rx; + int avail_ln_tx; + int rx_sel_idx; + struct ufs_pa_layer_attr dev_req_params; + struct ufs_phy_time_cfg t_cfg; + ktime_t entry_hibern8_t; + const struct exynos_ufs_drv_data *drv_data; + struct regmap *sysreg; + u32 shareability_reg_offset; + + u32 opts; +#define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL BIT(0) +#define EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB BIT(1) +#define EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL BIT(2) +#define EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX BIT(3) +#define EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER BIT(4) +#define EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR BIT(5) +}; + +#define for_each_ufs_rx_lane(ufs, i) \ + for (i = (ufs)->rx_sel_idx; \ + i < (ufs)->rx_sel_idx + (ufs)->avail_ln_rx; i++) +#define for_each_ufs_tx_lane(ufs, i) \ + for (i = 0; i < (ufs)->avail_ln_tx; i++) + +#define EXYNOS_UFS_MMIO_FUNC(name) \ +static inline void name##_writel(struct exynos_ufs *ufs, u32 val, u32 reg)\ +{ \ + writel(val, ufs->reg_##name + reg); \ +} \ + \ +static inline u32 name##_readl(struct exynos_ufs *ufs, u32 reg) \ +{ \ + return readl(ufs->reg_##name + reg); \ +} + +EXYNOS_UFS_MMIO_FUNC(hci); +EXYNOS_UFS_MMIO_FUNC(unipro); +EXYNOS_UFS_MMIO_FUNC(ufsp); +#undef EXYNOS_UFS_MMIO_FUNC + +long exynos_ufs_calc_time_cntr(struct exynos_ufs *, long); + +static inline void exynos_ufs_enable_ov_tm(struct ufs_hba *hba) +{ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), true); +} + +static inline void exynos_ufs_disable_ov_tm(struct ufs_hba *hba) +{ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), false); +} + +static inline void exynos_ufs_enable_dbg_mode(struct ufs_hba *hba) +{ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), true); +} + +static inline void exynos_ufs_disable_dbg_mode(struct ufs_hba *hba) +{ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), false); +} + +#endif /* _UFS_EXYNOS_H_ */ diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c new file mode 100644 index 0000000000..2eed13bc82 --- /dev/null +++ b/drivers/ufs/host/ufs-hisi.c @@ -0,0 +1,607 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon Hixxxx UFS Driver + * + * Copyright (c) 2016-2017 Linaro Ltd. + * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "ufshcd-pltfrm.h" +#include +#include "ufs-hisi.h" +#include +#include + +static int ufs_hisi_check_hibern8(struct ufs_hba *hba) +{ + int err = 0; + u32 tx_fsm_val_0 = 0; + u32 tx_fsm_val_1 = 0; + unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); + + do { + err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0), + &tx_fsm_val_0); + err |= ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1); + if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 && + tx_fsm_val_1 == TX_FSM_HIBERN8)) + break; + + /* sleep for max. 200us */ + usleep_range(100, 200); + } while (time_before(jiffies, timeout)); + + /* + * we might have scheduled out for long during polling so + * check the state again. + */ + if (time_after(jiffies, timeout)) { + err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0), + &tx_fsm_val_0); + err |= ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1); + } + + if (err) { + dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", + __func__, err); + } else if (tx_fsm_val_0 != TX_FSM_HIBERN8 || + tx_fsm_val_1 != TX_FSM_HIBERN8) { + err = -1; + dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n", + __func__, tx_fsm_val_0, tx_fsm_val_1); + } + + return err; +} + +static void ufs_hisi_clk_init(struct ufs_hba *hba) +{ + struct ufs_hisi_host *host = ufshcd_get_variant(hba); + + ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL); + if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN) + mdelay(1); + /* use abb clk */ + ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL); + ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN); + /* open mphy ref clk */ + ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL); +} + +static void ufs_hisi_soc_init(struct ufs_hba *hba) +{ + struct ufs_hisi_host *host = ufshcd_get_variant(hba); + u32 reg; + + if (!IS_ERR(host->rst)) + reset_control_assert(host->rst); + + /* HC_PSW powerup */ + ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL); + udelay(10); + /* notify PWR ready */ + ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL); + ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0, + UFS_DEVICE_RESET_CTRL); + + reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL); + reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK; + /* set cfg clk freq */ + ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL); + /* set ref clk freq */ + ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL); + /* bypass ufs clk gate */ + ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS, + CLOCK_GATE_BYPASS); + ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL); + + /* open psw clk */ + ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL); + /* disable ufshc iso */ + ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL); + /* disable phy iso */ + ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN); + /* notice iso disable */ + ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL); + + /* disable lp_reset_n */ + ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN); + mdelay(1); + + ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET, + UFS_DEVICE_RESET_CTRL); + + msleep(20); + + /* + * enable the fix of linereset recovery, + * and enable rx_reset/tx_rest beat + * enable ref_clk_en override(bit5) & + * override value = 1(bit4), with mask + */ + ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL); + + if (!IS_ERR(host->rst)) + reset_control_deassert(host->rst); +} + +static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba) +{ + struct ufs_hisi_host *host = ufshcd_get_variant(hba); + int err; + uint32_t value; + uint32_t reg; + + /* Unipro VS_mphy_disable */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1); + /* PA_HSSeries */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2); + /* MPHY CBRATESEL */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1); + /* MPHY CBOVRCTRL2 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D); + /* MPHY CBOVRCTRL3 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1); + + if (host->caps & UFS_HISI_CAP_PHY10nm) { + /* MPHY CBOVRCTRL4 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8127, 0x0), 0x98); + /* MPHY CBOVRCTRL5 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8128, 0x0), 0x1); + } + + /* Unipro VS_MphyCfgUpdt */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); + /* MPHY RXOVRCTRL4 rx0 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58); + /* MPHY RXOVRCTRL4 rx1 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58); + /* MPHY RXOVRCTRL5 rx0 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB); + /* MPHY RXOVRCTRL5 rx1 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB); + /* MPHY RXSQCONTROL rx0 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1); + /* MPHY RXSQCONTROL rx1 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1); + /* Unipro VS_MphyCfgUpdt */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); + + if (host->caps & UFS_HISI_CAP_PHY10nm) { + /* RX_Hibern8Time_Capability*/ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x4), 0xA); + /* RX_Hibern8Time_Capability*/ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x5), 0xA); + /* RX_Min_ActivateTime */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x4), 0xA); + /* RX_Min_ActivateTime*/ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x5), 0xA); + } else { + /* Tactive RX */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7); + /* Tactive RX */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7); + } + + /* Gear3 Synclength */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F); + /* Gear3 Synclength */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F); + /* Gear2 Synclength */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F); + /* Gear2 Synclength */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F); + /* Gear1 Synclength */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F); + /* Gear1 Synclength */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F); + /* Thibernate Tx */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5); + /* Thibernate Tx */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); + /* Unipro VS_mphy_disable */ + ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value); + if (value != 0x1) + dev_info(hba->dev, + "Warring!!! Unipro VS_mphy_disable is 0x%x\n", value); + + /* Unipro VS_mphy_disable */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0); + err = ufs_hisi_check_hibern8(hba); + if (err) + dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n"); + + if (!(host->caps & UFS_HISI_CAP_PHY10nm)) + ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV); + + /* disable auto H8 */ + reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); + reg = reg & (~UFS_AHIT_AH8ITV_MASK); + ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER); + + /* Unipro PA_Local_TX_LCC_Enable */ + ufshcd_disable_host_tx_lcc(hba); + /* close Unipro VS_Mk2ExtnSupport */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0); + ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value); + if (value != 0) { + /* Ensure close success */ + dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n"); + } + + return err; +} + +static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba) +{ + struct ufs_hisi_host *host = ufshcd_get_variant(hba); + + /* Unipro DL_AFC0CreditThreshold */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0); + /* Unipro DL_TC0OutAckThreshold */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0); + /* Unipro DL_TC0TXFCThreshold */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9); + + /* not bypass ufs clk gate */ + ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS, + CLOCK_GATE_BYPASS); + ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS, + UFS_SYSCTRL); + + /* select received symbol cnt */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000); + /* reset counter0 and enable */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005); + + return 0; +} + +static int ufs_hisi_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int err = 0; + + switch (status) { + case PRE_CHANGE: + err = ufs_hisi_link_startup_pre_change(hba); + break; + case POST_CHANGE: + err = ufs_hisi_link_startup_post_change(hba); + break; + default: + break; + } + + return err; +} + +static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param) +{ + ufshcd_init_pwr_dev_param(hisi_param); +} + +static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) +{ + struct ufs_hisi_host *host = ufshcd_get_variant(hba); + + if (host->caps & UFS_HISI_CAP_PHY10nm) { + /* + * Boston platform need to set SaveConfigTime to 0x13, + * and change sync length to maximum value + */ + /* VS_DebugSaveConfigTime */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0xD0A0), 0x13); + /* g1 sync length */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1552), 0x4f); + /* g2 sync length */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1554), 0x4f); + /* g3 sync length */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1556), 0x4f); + /* PA_Hibern8Time */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a7), 0xA); + /* PA_Tactivate */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a8), 0xA); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xd085, 0x0), 0x01); + } + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) { + pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n"); + /* VS_DebugSaveConfigTime */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10); + /* sync length */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48); + } + + /* update */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1); + /* PA_TxSkip */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0); + /*PA_PWRModeUserData0 = 8191, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191); + /*PA_PWRModeUserData1 = 65535, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535); + /*PA_PWRModeUserData2 = 32767, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767); + /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191); + /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535); + /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767); + /*PA_PWRModeUserData3 = 8191, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191); + /*PA_PWRModeUserData4 = 65535, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535); + /*PA_PWRModeUserData5 = 32767, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767); + /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191); + /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535); + /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767); +} + +static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + struct ufs_dev_params ufs_hisi_cap; + int ret = 0; + + if (!dev_req_params) { + dev_err(hba->dev, + "%s: incoming dev_req_params is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + switch (status) { + case PRE_CHANGE: + ufs_hisi_set_dev_cap(&ufs_hisi_cap); + ret = ufshcd_get_pwr_dev_param(&ufs_hisi_cap, + dev_max_params, dev_req_params); + if (ret) { + dev_err(hba->dev, + "%s: failed to determine capabilities\n", __func__); + goto out; + } + + ufs_hisi_pwr_change_pre_change(hba); + break; + case POST_CHANGE: + break; + default: + ret = -EINVAL; + break; + } +out: + return ret; +} + +static int ufs_hisi_suspend_prepare(struct device *dev) +{ + /* RPM and SPM are different. Refer ufs_hisi_suspend() */ + return __ufshcd_suspend_prepare(dev, false); +} + +static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, + enum ufs_notify_change_status status) +{ + struct ufs_hisi_host *host = ufshcd_get_variant(hba); + + if (status == PRE_CHANGE) + return 0; + + if (pm_op == UFS_RUNTIME_PM) + return 0; + + if (host->in_suspend) { + WARN_ON(1); + return 0; + } + + ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL); + udelay(10); + /* set ref_dig_clk override of PHY PCS to 0 */ + ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL); + + host->in_suspend = true; + + return 0; +} + +static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) +{ + struct ufs_hisi_host *host = ufshcd_get_variant(hba); + + if (!host->in_suspend) + return 0; + + /* set ref_dig_clk override of PHY PCS to 1 */ + ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL); + udelay(10); + ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL); + + host->in_suspend = false; + return 0; +} + +static int ufs_hisi_get_resource(struct ufs_hisi_host *host) +{ + struct device *dev = host->hba->dev; + struct platform_device *pdev = to_platform_device(dev); + + /* get resource of ufs sys ctrl */ + host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1); + return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl); +} + +static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba) +{ + hba->rpm_lvl = UFS_PM_LVL_1; + hba->spm_lvl = UFS_PM_LVL_3; +} + +/** + * ufs_hisi_init_common + * @hba: host controller instance + */ +static int ufs_hisi_init_common(struct ufs_hba *hba) +{ + int err = 0; + struct device *dev = hba->dev; + struct ufs_hisi_host *host; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + + host->hba = hba; + ufshcd_set_variant(hba, host); + + host->rst = devm_reset_control_get(dev, "rst"); + if (IS_ERR(host->rst)) { + dev_err(dev, "%s: failed to get reset control\n", __func__); + err = PTR_ERR(host->rst); + goto error; + } + + ufs_hisi_set_pm_lvl(hba); + + err = ufs_hisi_get_resource(host); + if (err) + goto error; + + return 0; + +error: + ufshcd_set_variant(hba, NULL); + return err; +} + +static int ufs_hi3660_init(struct ufs_hba *hba) +{ + int ret = 0; + struct device *dev = hba->dev; + + ret = ufs_hisi_init_common(hba); + if (ret) { + dev_err(dev, "%s: ufs common init fail\n", __func__); + return ret; + } + + ufs_hisi_clk_init(hba); + + ufs_hisi_soc_init(hba); + + return 0; +} + +static int ufs_hi3670_init(struct ufs_hba *hba) +{ + int ret = 0; + struct device *dev = hba->dev; + struct ufs_hisi_host *host; + + ret = ufs_hisi_init_common(hba); + if (ret) { + dev_err(dev, "%s: ufs common init fail\n", __func__); + return ret; + } + + ufs_hisi_clk_init(hba); + + ufs_hisi_soc_init(hba); + + /* Add cap for 10nm PHY variant on HI3670 SoC */ + host = ufshcd_get_variant(hba); + host->caps |= UFS_HISI_CAP_PHY10nm; + + return 0; +} + +static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = { + .name = "hi3660", + .init = ufs_hi3660_init, + .link_startup_notify = ufs_hisi_link_startup_notify, + .pwr_change_notify = ufs_hisi_pwr_change_notify, + .suspend = ufs_hisi_suspend, + .resume = ufs_hisi_resume, +}; + +static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = { + .name = "hi3670", + .init = ufs_hi3670_init, + .link_startup_notify = ufs_hisi_link_startup_notify, + .pwr_change_notify = ufs_hisi_pwr_change_notify, + .suspend = ufs_hisi_suspend, + .resume = ufs_hisi_resume, +}; + +static const struct of_device_id ufs_hisi_of_match[] = { + { .compatible = "hisilicon,hi3660-ufs", .data = &ufs_hba_hi3660_vops }, + { .compatible = "hisilicon,hi3670-ufs", .data = &ufs_hba_hi3670_vops }, + {}, +}; + +MODULE_DEVICE_TABLE(of, ufs_hisi_of_match); + +static int ufs_hisi_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + + of_id = of_match_node(ufs_hisi_of_match, pdev->dev.of_node); + + return ufshcd_pltfrm_init(pdev, of_id->data); +} + +static int ufs_hisi_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + ufshcd_remove(hba); + return 0; +} + +static const struct dev_pm_ops ufs_hisi_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) + .prepare = ufs_hisi_suspend_prepare, + .complete = ufshcd_resume_complete, +}; + +static struct platform_driver ufs_hisi_pltform = { + .probe = ufs_hisi_probe, + .remove = ufs_hisi_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "ufshcd-hisi", + .pm = &ufs_hisi_pm_ops, + .of_match_table = of_match_ptr(ufs_hisi_of_match), + }, +}; +module_platform_driver(ufs_hisi_pltform); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ufshcd-hisi"); +MODULE_DESCRIPTION("HiSilicon Hixxxx UFS Driver"); diff --git a/drivers/ufs/host/ufs-hisi.h b/drivers/ufs/host/ufs-hisi.h new file mode 100644 index 0000000000..5a90c0f4e9 --- /dev/null +++ b/drivers/ufs/host/ufs-hisi.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017, HiSilicon. All rights reserved. + */ + +#ifndef UFS_HISI_H_ +#define UFS_HISI_H_ + +#define HBRN8_POLL_TOUT_MS 1000 + +/* + * ufs sysctrl specific define + */ +#define PSW_POWER_CTRL (0x04) +#define PHY_ISO_EN (0x08) +#define HC_LP_CTRL (0x0C) +#define PHY_CLK_CTRL (0x10) +#define PSW_CLK_CTRL (0x14) +#define CLOCK_GATE_BYPASS (0x18) +#define RESET_CTRL_EN (0x1C) +#define UFS_SYSCTRL (0x5C) +#define UFS_DEVICE_RESET_CTRL (0x60) + +#define BIT_UFS_PSW_ISO_CTRL (1 << 16) +#define BIT_UFS_PSW_MTCMOS_EN (1 << 0) +#define BIT_UFS_REFCLK_ISO_EN (1 << 16) +#define BIT_UFS_PHY_ISO_CTRL (1 << 0) +#define BIT_SYSCTRL_LP_ISOL_EN (1 << 16) +#define BIT_SYSCTRL_PWR_READY (1 << 8) +#define BIT_SYSCTRL_REF_CLOCK_EN (1 << 24) +#define MASK_SYSCTRL_REF_CLOCK_SEL (0x3 << 8) +#define MASK_SYSCTRL_CFG_CLOCK_FREQ (0xFF) +#define UFS_FREQ_CFG_CLK (0x39) +#define BIT_SYSCTRL_PSW_CLK_EN (1 << 4) +#define MASK_UFS_CLK_GATE_BYPASS (0x3F) +#define BIT_SYSCTRL_LP_RESET_N (1 << 0) +#define BIT_UFS_REFCLK_SRC_SEl (1 << 0) +#define MASK_UFS_SYSCRTL_BYPASS (0x3F << 16) +#define MASK_UFS_DEVICE_RESET (0x1 << 16) +#define BIT_UFS_DEVICE_RESET (0x1) + +/* + * M-TX Configuration Attributes for Hixxxx + */ +#define MPHY_TX_FSM_STATE 0x41 +#define TX_FSM_HIBERN8 0x1 + +/* + * Hixxxx UFS HC specific Registers + */ +enum { + UFS_REG_OCPTHRTL = 0xc0, + UFS_REG_OOCPR = 0xc4, + + UFS_REG_CDACFG = 0xd0, + UFS_REG_CDATX1 = 0xd4, + UFS_REG_CDATX2 = 0xd8, + UFS_REG_CDARX1 = 0xdc, + UFS_REG_CDARX2 = 0xe0, + UFS_REG_CDASTA = 0xe4, + + UFS_REG_LBMCFG = 0xf0, + UFS_REG_LBMSTA = 0xf4, + UFS_REG_UFSMODE = 0xf8, + + UFS_REG_HCLKDIV = 0xfc, +}; + +/* AHIT - Auto-Hibernate Idle Timer */ +#define UFS_AHIT_AH8ITV_MASK 0x3FF + +/* REG UFS_REG_OCPTHRTL definition */ +#define UFS_HCLKDIV_NORMAL_VALUE 0xE4 + +/* vendor specific pre-defined parameters */ +#define SLOW 1 +#define FAST 2 + +#define UFS_HISI_CAP_RESERVED BIT(0) +#define UFS_HISI_CAP_PHY10nm BIT(1) + +struct ufs_hisi_host { + struct ufs_hba *hba; + void __iomem *ufs_sys_ctrl; + + struct reset_control *rst; + + uint64_t caps; + + bool in_suspend; +}; + +#define ufs_sys_ctrl_writel(host, val, reg) \ + writel((val), (host)->ufs_sys_ctrl + (reg)) +#define ufs_sys_ctrl_readl(host, reg) readl((host)->ufs_sys_ctrl + (reg)) +#define ufs_sys_ctrl_set_bits(host, mask, reg) \ + ufs_sys_ctrl_writel( \ + (host), ((mask) | (ufs_sys_ctrl_readl((host), (reg)))), (reg)) +#define ufs_sys_ctrl_clr_bits(host, mask, reg) \ + ufs_sys_ctrl_writel((host), \ + ((~(mask)) & (ufs_sys_ctrl_readl((host), (reg)))), \ + (reg)) + +#endif /* UFS_HISI_H_ */ diff --git a/drivers/ufs/host/ufs-mediatek-trace.h b/drivers/ufs/host/ufs-mediatek-trace.h new file mode 100644 index 0000000000..7e010848dc --- /dev/null +++ b/drivers/ufs/host/ufs-mediatek-trace.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 MediaTek Inc. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ufs_mtk + +#if !defined(_TRACE_EVENT_UFS_MEDIATEK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EVENT_UFS_MEDIATEK_H + +#include + +TRACE_EVENT(ufs_mtk_event, + TP_PROTO(unsigned int type, unsigned int data), + TP_ARGS(type, data), + + TP_STRUCT__entry( + __field(unsigned int, type) + __field(unsigned int, data) + ), + + TP_fast_assign( + __entry->type = type; + __entry->data = data; + ), + + TP_printk("ufs:event=%u data=%u", + __entry->type, __entry->data) + ); +#endif + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH ../../drivers/ufs/host +#define TRACE_INCLUDE_FILE ufs-mediatek-trace +#include diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c new file mode 100644 index 0000000000..c958279bdd --- /dev/null +++ b/drivers/ufs/host/ufs-mediatek.c @@ -0,0 +1,1478 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 MediaTek Inc. + * Authors: + * Stanley Chu + * Peter Wang + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "ufshcd-pltfrm.h" +#include +#include +#include "ufs-mediatek.h" + +#define CREATE_TRACE_POINTS +#include "ufs-mediatek-trace.h" + +static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = { + { .wmanufacturerid = UFS_ANY_VENDOR, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM | + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, + { .wmanufacturerid = UFS_VENDOR_SKHYNIX, + .model = "H9HQ21AFAMZDAR", + .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES }, + {} +}; + +static const struct of_device_id ufs_mtk_of_match[] = { + { .compatible = "mediatek,mt8183-ufshci" }, + {}, +}; + +static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE); +} + +static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL); +} + +static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC); +} + +static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + return (host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO); +} + +static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) +{ + u32 tmp; + + if (enable) { + ufshcd_dme_get(hba, + UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); + tmp = tmp | + (1 << RX_SYMBOL_CLK_GATE_EN) | + (1 << SYS_CLK_GATE_EN) | + (1 << TX_CLK_GATE_EN); + ufshcd_dme_set(hba, + UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); + + ufshcd_dme_get(hba, + UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp); + tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE); + ufshcd_dme_set(hba, + UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp); + } else { + ufshcd_dme_get(hba, + UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); + tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) | + (1 << SYS_CLK_GATE_EN) | + (1 << TX_CLK_GATE_EN)); + ufshcd_dme_set(hba, + UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); + + ufshcd_dme_get(hba, + UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp); + tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE); + ufshcd_dme_set(hba, + UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp); + } +} + +static void ufs_mtk_crypto_enable(struct ufs_hba *hba) +{ + struct arm_smccc_res res; + + ufs_mtk_crypto_ctrl(res, 1); + if (res.a0) { + dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n", + __func__, res.a0); + hba->caps &= ~UFSHCD_CAP_CRYPTO; + } +} + +static void ufs_mtk_host_reset(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + reset_control_assert(host->hci_reset); + reset_control_assert(host->crypto_reset); + reset_control_assert(host->unipro_reset); + + usleep_range(100, 110); + + reset_control_deassert(host->unipro_reset); + reset_control_deassert(host->crypto_reset); + reset_control_deassert(host->hci_reset); +} + +static void ufs_mtk_init_reset_control(struct ufs_hba *hba, + struct reset_control **rc, + char *str) +{ + *rc = devm_reset_control_get(hba->dev, str); + if (IS_ERR(*rc)) { + dev_info(hba->dev, "Failed to get reset control %s: %ld\n", + str, PTR_ERR(*rc)); + *rc = NULL; + } +} + +static void ufs_mtk_init_reset(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + ufs_mtk_init_reset_control(hba, &host->hci_reset, + "hci_rst"); + ufs_mtk_init_reset_control(hba, &host->unipro_reset, + "unipro_rst"); + ufs_mtk_init_reset_control(hba, &host->crypto_reset, + "crypto_rst"); +} + +static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + if (status == PRE_CHANGE) { + if (host->unipro_lpm) { + hba->vps->hba_enable_delay_us = 0; + } else { + hba->vps->hba_enable_delay_us = 600; + ufs_mtk_host_reset(hba); + } + + if (hba->caps & UFSHCD_CAP_CRYPTO) + ufs_mtk_crypto_enable(hba); + + if (host->caps & UFS_MTK_CAP_DISABLE_AH8) { + ufshcd_writel(hba, 0, + REG_AUTO_HIBERNATE_IDLE_TIMER); + hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT; + hba->ahit = 0; + } + + /* + * Turn on CLK_CG early to bypass abnormal ERR_CHK signal + * to prevent host hang issue + */ + ufshcd_writel(hba, + ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80, + REG_UFS_XOUFS_CTRL); + } + + return 0; +} + +static int ufs_mtk_bind_mphy(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct device *dev = hba->dev; + struct device_node *np = dev->of_node; + int err = 0; + + host->mphy = devm_of_phy_get_by_index(dev, np, 0); + + if (host->mphy == ERR_PTR(-EPROBE_DEFER)) { + /* + * UFS driver might be probed before the phy driver does. + * In that case we would like to return EPROBE_DEFER code. + */ + err = -EPROBE_DEFER; + dev_info(dev, + "%s: required phy hasn't probed yet. err = %d\n", + __func__, err); + } else if (IS_ERR(host->mphy)) { + err = PTR_ERR(host->mphy); + if (err != -ENODEV) { + dev_info(dev, "%s: PHY get failed %d\n", __func__, + err); + } + } + + if (err) + host->mphy = NULL; + /* + * Allow unbound mphy because not every platform needs specific + * mphy control. + */ + if (err == -ENODEV) + err = 0; + + return err; +} + +static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct arm_smccc_res res; + ktime_t timeout, time_checked; + u32 value; + + if (host->ref_clk_enabled == on) + return 0; + + ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res); + + if (on) { + ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL); + } else { + ufshcd_delay_us(host->ref_clk_gating_wait_us, 10); + ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL); + } + + /* Wait for ack */ + timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US); + do { + time_checked = ktime_get(); + value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL); + + /* Wait until ack bit equals to req bit */ + if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST)) + goto out; + + usleep_range(100, 200); + } while (ktime_before(time_checked, timeout)); + + dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); + + ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res); + + return -ETIMEDOUT; + +out: + host->ref_clk_enabled = on; + if (on) + ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10); + + ufs_mtk_ref_clk_notify(on, POST_CHANGE, res); + + return 0; +} + +static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba, + u16 gating_us) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + if (hba->dev_info.clk_gating_wait_us) { + host->ref_clk_gating_wait_us = + hba->dev_info.clk_gating_wait_us; + } else { + host->ref_clk_gating_wait_us = gating_us; + } + + host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US; +} + +static void ufs_mtk_dbg_sel(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + if (((host->ip_ver >> 16) & 0xFF) >= 0x36) { + ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL); + ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0); + ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1); + ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2); + ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3); + } else { + ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); + } +} + +static void ufs_mtk_wait_idle_state(struct ufs_hba *hba, + unsigned long retry_ms) +{ + u64 timeout, time_checked; + u32 val, sm; + bool wait_idle; + + /* cannot use plain ktime_get() in suspend */ + timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL; + + /* wait a specific time after check base */ + udelay(10); + wait_idle = false; + + do { + time_checked = ktime_get_mono_fast_ns(); + ufs_mtk_dbg_sel(hba); + val = ufshcd_readl(hba, REG_UFS_PROBE); + + sm = val & 0x1f; + + /* + * if state is in H8 enter and H8 enter confirm + * wait until return to idle state. + */ + if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) { + wait_idle = true; + udelay(50); + continue; + } else if (!wait_idle) + break; + + if (wait_idle && (sm == VS_HCE_BASE)) + break; + } while (time_checked < timeout); + + if (wait_idle && sm != VS_HCE_BASE) + dev_info(hba->dev, "wait idle tmo: 0x%x\n", val); +} + +static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state, + unsigned long max_wait_ms) +{ + ktime_t timeout, time_checked; + u32 val; + + timeout = ktime_add_ms(ktime_get(), max_wait_ms); + do { + time_checked = ktime_get(); + ufs_mtk_dbg_sel(hba); + val = ufshcd_readl(hba, REG_UFS_PROBE); + val = val >> 28; + + if (val == state) + return 0; + + /* Sleep for max. 200us */ + usleep_range(100, 200); + } while (ktime_before(time_checked, timeout)); + + if (val == state) + return 0; + + return -ETIMEDOUT; +} + +static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct phy *mphy = host->mphy; + struct arm_smccc_res res; + int ret = 0; + + if (!mphy || !(on ^ host->mphy_powered_on)) + return 0; + + if (on) { + if (ufs_mtk_is_va09_supported(hba)) { + ret = regulator_enable(host->reg_va09); + if (ret < 0) + goto out; + /* wait 200 us to stablize VA09 */ + usleep_range(200, 210); + ufs_mtk_va09_pwr_ctrl(res, 1); + } + phy_power_on(mphy); + } else { + phy_power_off(mphy); + if (ufs_mtk_is_va09_supported(hba)) { + ufs_mtk_va09_pwr_ctrl(res, 0); + ret = regulator_disable(host->reg_va09); + if (ret < 0) + goto out; + } + } +out: + if (ret) { + dev_info(hba->dev, + "failed to %s va09: %d\n", + on ? "enable" : "disable", + ret); + } else { + host->mphy_powered_on = on; + } + + return ret; +} + +static int ufs_mtk_get_host_clk(struct device *dev, const char *name, + struct clk **clk_out) +{ + struct clk *clk; + int err = 0; + + clk = devm_clk_get(dev, name); + if (IS_ERR(clk)) + err = PTR_ERR(clk); + else + *clk_out = clk; + + return err; +} + +static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct ufs_mtk_crypt_cfg *cfg; + struct regulator *reg; + int volt, ret; + + if (!ufs_mtk_is_boost_crypt_enabled(hba)) + return; + + cfg = host->crypt; + volt = cfg->vcore_volt; + reg = cfg->reg_vcore; + + ret = clk_prepare_enable(cfg->clk_crypt_mux); + if (ret) { + dev_info(hba->dev, "clk_prepare_enable(): %d\n", + ret); + return; + } + + if (boost) { + ret = regulator_set_voltage(reg, volt, INT_MAX); + if (ret) { + dev_info(hba->dev, + "failed to set vcore to %d\n", volt); + goto out; + } + + ret = clk_set_parent(cfg->clk_crypt_mux, + cfg->clk_crypt_perf); + if (ret) { + dev_info(hba->dev, + "failed to set clk_crypt_perf\n"); + regulator_set_voltage(reg, 0, INT_MAX); + goto out; + } + } else { + ret = clk_set_parent(cfg->clk_crypt_mux, + cfg->clk_crypt_lp); + if (ret) { + dev_info(hba->dev, + "failed to set clk_crypt_lp\n"); + goto out; + } + + ret = regulator_set_voltage(reg, 0, INT_MAX); + if (ret) { + dev_info(hba->dev, + "failed to set vcore to MIN\n"); + } + } +out: + clk_disable_unprepare(cfg->clk_crypt_mux); +} + +static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name, + struct clk **clk) +{ + int ret; + + ret = ufs_mtk_get_host_clk(hba->dev, name, clk); + if (ret) { + dev_info(hba->dev, "%s: failed to get %s: %d", __func__, + name, ret); + } + + return ret; +} + +static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct ufs_mtk_crypt_cfg *cfg; + struct device *dev = hba->dev; + struct regulator *reg; + u32 volt; + + host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)), + GFP_KERNEL); + if (!host->crypt) + goto disable_caps; + + reg = devm_regulator_get_optional(dev, "dvfsrc-vcore"); + if (IS_ERR(reg)) { + dev_info(dev, "failed to get dvfsrc-vcore: %ld", + PTR_ERR(reg)); + goto disable_caps; + } + + if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min", + &volt)) { + dev_info(dev, "failed to get boost-crypt-vcore-min"); + goto disable_caps; + } + + cfg = host->crypt; + if (ufs_mtk_init_host_clk(hba, "crypt_mux", + &cfg->clk_crypt_mux)) + goto disable_caps; + + if (ufs_mtk_init_host_clk(hba, "crypt_lp", + &cfg->clk_crypt_lp)) + goto disable_caps; + + if (ufs_mtk_init_host_clk(hba, "crypt_perf", + &cfg->clk_crypt_perf)) + goto disable_caps; + + cfg->reg_vcore = reg; + cfg->vcore_volt = volt; + host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE; + +disable_caps: + return; +} + +static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + host->reg_va09 = regulator_get(hba->dev, "va09"); + if (IS_ERR(host->reg_va09)) + dev_info(hba->dev, "failed to get va09"); + else + host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL; +} + +static void ufs_mtk_init_host_caps(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct device_node *np = hba->dev->of_node; + + if (of_property_read_bool(np, "mediatek,ufs-boost-crypt")) + ufs_mtk_init_boost_crypt(hba); + + if (of_property_read_bool(np, "mediatek,ufs-support-va09")) + ufs_mtk_init_va09_pwr_ctrl(hba); + + if (of_property_read_bool(np, "mediatek,ufs-disable-ah8")) + host->caps |= UFS_MTK_CAP_DISABLE_AH8; + + if (of_property_read_bool(np, "mediatek,ufs-broken-vcc")) + host->caps |= UFS_MTK_CAP_BROKEN_VCC; + + if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto")) + host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO; + + dev_info(hba->dev, "caps: 0x%x", host->caps); +} + +static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + if (!host || !host->pm_qos_init) + return; + + cpu_latency_qos_update_request(&host->pm_qos_req, + boost ? 0 : PM_QOS_DEFAULT_VALUE); +} + +static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + if (on) { + phy_power_on(host->mphy); + ufs_mtk_setup_ref_clk(hba, on); + ufs_mtk_boost_crypt(hba, on); + ufs_mtk_boost_pm_qos(hba, on); + } else { + ufs_mtk_boost_pm_qos(hba, on); + ufs_mtk_boost_crypt(hba, on); + ufs_mtk_setup_ref_clk(hba, on); + phy_power_off(host->mphy); + } +} + +/** + * ufs_mtk_setup_clocks - enables/disable clocks + * @hba: host controller instance + * @on: If true, enable clocks else disable them. + * @status: PRE_CHANGE or POST_CHANGE notify + * + * Returns 0 on success, non-zero on failure. + */ +static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, + enum ufs_notify_change_status status) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + bool clk_pwr_off = false; + int ret = 0; + + /* + * In case ufs_mtk_init() is not yet done, simply ignore. + * This ufs_mtk_setup_clocks() shall be called from + * ufs_mtk_init() after init is done. + */ + if (!host) + return 0; + + if (!on && status == PRE_CHANGE) { + if (ufshcd_is_link_off(hba)) { + clk_pwr_off = true; + } else if (ufshcd_is_link_hibern8(hba) || + (!ufshcd_can_hibern8_during_gating(hba) && + ufshcd_is_auto_hibern8_enabled(hba))) { + /* + * Gate ref-clk and poweroff mphy if link state is in + * OFF or Hibern8 by either Auto-Hibern8 or + * ufshcd_link_state_transition(). + */ + ret = ufs_mtk_wait_link_state(hba, + VS_LINK_HIBERN8, + 15); + if (!ret) + clk_pwr_off = true; + } + + if (clk_pwr_off) + ufs_mtk_pwr_ctrl(hba, false); + } else if (on && status == POST_CHANGE) { + ufs_mtk_pwr_ctrl(hba, true); + } + + return ret; +} + +static void ufs_mtk_get_controller_version(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + int ret, ver = 0; + + if (host->hw_ver.major) + return; + + /* Set default (minimum) version anyway */ + host->hw_ver.major = 2; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver); + if (!ret) { + if (ver >= UFS_UNIPRO_VER_1_8) { + host->hw_ver.major = 3; + /* + * Fix HCI version for some platforms with + * incorrect version + */ + if (hba->ufs_version < ufshci_version(3, 0)) + hba->ufs_version = ufshci_version(3, 0); + } + } +} + +static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba) +{ + return hba->ufs_version; +} + +#define MAX_VCC_NAME 30 +static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba) +{ + struct ufs_vreg_info *info = &hba->vreg_info; + struct device_node *np = hba->dev->of_node; + struct device *dev = hba->dev; + char vcc_name[MAX_VCC_NAME]; + struct arm_smccc_res res; + int err, ver; + + if (hba->vreg_info.vcc) + return 0; + + if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) { + ufs_mtk_get_vcc_num(res); + if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX) + snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1); + else + return -ENODEV; + } else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) { + ver = (hba->dev_info.wspecversion & 0xF00) >> 8; + snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver); + } else { + return 0; + } + + err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc); + if (err) + return err; + + err = ufshcd_get_vreg(dev, info->vcc); + if (err) + return err; + + err = regulator_enable(info->vcc->reg); + if (!err) { + info->vcc->enabled = true; + dev_info(dev, "%s: %s enabled\n", __func__, vcc_name); + } + + return err; +} + +static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba) +{ + struct ufs_vreg_info *info = &hba->vreg_info; + struct ufs_vreg **vreg_on, **vreg_off; + + if (hba->dev_info.wspecversion >= 0x0300) { + vreg_on = &info->vccq; + vreg_off = &info->vccq2; + } else { + vreg_on = &info->vccq2; + vreg_off = &info->vccq; + } + + if (*vreg_on) + (*vreg_on)->always_on = true; + + if (*vreg_off) { + regulator_disable((*vreg_off)->reg); + devm_kfree(hba->dev, (*vreg_off)->name); + devm_kfree(hba->dev, *vreg_off); + *vreg_off = NULL; + } +} + +/** + * ufs_mtk_init - find other essential mmio bases + * @hba: host controller instance + * + * Binds PHY with controller and powers up PHY enabling clocks + * and regulators. + * + * Returns -EPROBE_DEFER if binding fails, returns negative error + * on phy power up failure and returns zero on success. + */ +static int ufs_mtk_init(struct ufs_hba *hba) +{ + const struct of_device_id *id; + struct device *dev = hba->dev; + struct ufs_mtk_host *host; + int err = 0; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) { + err = -ENOMEM; + dev_info(dev, "%s: no memory for mtk ufs host\n", __func__); + goto out; + } + + host->hba = hba; + ufshcd_set_variant(hba, host); + + id = of_match_device(ufs_mtk_of_match, dev); + if (!id) { + err = -EINVAL; + goto out; + } + + /* Initialize host capability */ + ufs_mtk_init_host_caps(hba); + + err = ufs_mtk_bind_mphy(hba); + if (err) + goto out_variant_clear; + + ufs_mtk_init_reset(hba); + + /* Enable runtime autosuspend */ + hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; + + /* Enable clock-gating */ + hba->caps |= UFSHCD_CAP_CLK_GATING; + + /* Enable inline encryption */ + hba->caps |= UFSHCD_CAP_CRYPTO; + + /* Enable WriteBooster */ + hba->caps |= UFSHCD_CAP_WB_EN; + hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL; + hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80); + + if (host->caps & UFS_MTK_CAP_DISABLE_AH8) + hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; + + /* + * ufshcd_vops_init() is invoked after + * ufshcd_setup_clock(true) in ufshcd_hba_init() thus + * phy clock setup is skipped. + * + * Enable phy clocks specifically here. + */ + ufs_mtk_mphy_power_on(hba, true); + ufs_mtk_setup_clocks(hba, true, POST_CHANGE); + + host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); + + goto out; + +out_variant_clear: + ufshcd_set_variant(hba, NULL); +out: + return err; +} + +static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba, + struct ufs_pa_layer_attr *dev_req_params) +{ + if (!ufs_mtk_is_pmc_via_fastauto(hba)) + return false; + + if (dev_req_params->hs_rate == hba->pwr_info.hs_rate) + return false; + + if (dev_req_params->pwr_tx != FAST_MODE && + dev_req_params->gear_tx < UFS_HS_G4) + return false; + + if (dev_req_params->pwr_rx != FAST_MODE && + dev_req_params->gear_rx < UFS_HS_G4) + return false; + + return true; +} + +static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct ufs_dev_params host_cap; + int ret; + + ufshcd_init_pwr_dev_param(&host_cap); + host_cap.hs_rx_gear = UFS_HS_G5; + host_cap.hs_tx_gear = UFS_HS_G5; + + ret = ufshcd_get_pwr_dev_param(&host_cap, + dev_max_params, + dev_req_params); + if (ret) { + pr_info("%s: failed to determine capabilities\n", + __func__); + } + + if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) { + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), + dev_req_params->lane_tx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), + dev_req_params->lane_rx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), + dev_req_params->hs_rate); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), + PA_NO_ADAPT); + + ret = ufshcd_uic_change_pwr_mode(hba, + FASTAUTO_MODE << 4 | FASTAUTO_MODE); + + if (ret) { + dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n", + __func__, ret); + } + } + + if (host->hw_ver.major >= 3) { + ret = ufshcd_dme_configure_adapt(hba, + dev_req_params->gear_tx, + PA_INITIAL_ADAPT); + } + + return ret; +} + +static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status stage, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + int ret = 0; + + switch (stage) { + case PRE_CHANGE: + ret = ufs_mtk_pre_pwr_change(hba, dev_max_params, + dev_req_params); + break; + case POST_CHANGE: + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm) +{ + int ret; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + ret = ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0), + lpm ? 1 : 0); + if (!ret || !lpm) { + /* + * Forcibly set as non-LPM mode if UIC commands is failed + * to use default hba_enable_delay_us value for re-enabling + * the host. + */ + host->unipro_lpm = lpm; + } + + return ret; +} + +static int ufs_mtk_pre_link(struct ufs_hba *hba) +{ + int ret; + u32 tmp; + + ufs_mtk_get_controller_version(hba); + + ret = ufs_mtk_unipro_set_lpm(hba, false); + if (ret) + return ret; + + /* + * Setting PA_Local_TX_LCC_Enable to 0 before link startup + * to make sure that both host and device TX LCC are disabled + * once link startup is completed. + */ + ret = ufshcd_disable_host_tx_lcc(hba); + if (ret) + return ret; + + /* disable deep stall */ + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); + if (ret) + return ret; + + tmp &= ~(1 << 6); + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); + + return ret; +} + +static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) +{ + u32 ah_ms; + + if (ufshcd_is_clkgating_allowed(hba)) { + if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) + ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, + hba->ahit); + else + ah_ms = 10; + ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5); + } +} + +static int ufs_mtk_post_link(struct ufs_hba *hba) +{ + /* enable unipro clock gating feature */ + ufs_mtk_cfg_unipro_cg(hba, true); + + /* will be configured during probe hba */ + if (ufshcd_is_auto_hibern8_supported(hba)) + hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) | + FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3); + + ufs_mtk_setup_clk_gating(hba); + + return 0; +} + +static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status stage) +{ + int ret = 0; + + switch (stage) { + case PRE_CHANGE: + ret = ufs_mtk_pre_link(hba); + break; + case POST_CHANGE: + ret = ufs_mtk_post_link(hba); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ufs_mtk_device_reset(struct ufs_hba *hba) +{ + struct arm_smccc_res res; + + /* disable hba before device reset */ + ufshcd_hba_stop(hba); + + ufs_mtk_device_reset_ctrl(0, res); + + /* + * The reset signal is active low. UFS devices shall detect + * more than or equal to 1us of positive or negative RST_n + * pulse width. + * + * To be on safe side, keep the reset low for at least 10us. + */ + usleep_range(10, 15); + + ufs_mtk_device_reset_ctrl(1, res); + + /* Some devices may need time to respond to rst_n */ + usleep_range(10000, 15000); + + dev_info(hba->dev, "device reset done\n"); + + return 0; +} + +static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_hba_enable(hba); + if (err) + return err; + + err = ufs_mtk_unipro_set_lpm(hba, false); + if (err) + return err; + + err = ufshcd_uic_hibern8_exit(hba); + if (!err) + ufshcd_set_link_active(hba); + else + return err; + + err = ufshcd_make_hba_operational(hba); + if (err) + return err; + + return 0; +} + +static int ufs_mtk_link_set_lpm(struct ufs_hba *hba) +{ + int err; + + /* Disable reset confirm feature by UniPro */ + ufshcd_writel(hba, + (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100), + REG_UFS_XOUFS_CTRL); + + err = ufs_mtk_unipro_set_lpm(hba, true); + if (err) { + /* Resume UniPro state for following error recovery */ + ufs_mtk_unipro_set_lpm(hba, false); + return err; + } + + return 0; +} + +static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm) +{ + struct ufs_vreg *vccqx = NULL; + + if (hba->vreg_info.vccq) + vccqx = hba->vreg_info.vccq; + else + vccqx = hba->vreg_info.vccq2; + + regulator_set_mode(vccqx->reg, + lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL); +} + +static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm) +{ + struct arm_smccc_res res; + + ufs_mtk_device_pwr_ctrl(!lpm, + (unsigned long)hba->dev_info.wspecversion, + res); +} + +static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm) +{ + if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2) + return; + + /* Skip if VCC is assumed always-on */ + if (!hba->vreg_info.vcc) + return; + + /* Bypass LPM when device is still active */ + if (lpm && ufshcd_is_ufs_dev_active(hba)) + return; + + /* Bypass LPM if VCC is enabled */ + if (lpm && hba->vreg_info.vcc->enabled) + return; + + if (lpm) { + ufs_mtk_vccqx_set_lpm(hba, lpm); + ufs_mtk_vsx_set_lpm(hba, lpm); + } else { + ufs_mtk_vsx_set_lpm(hba, lpm); + ufs_mtk_vccqx_set_lpm(hba, lpm); + } +} + +static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) +{ + int ret; + + /* disable auto-hibern8 */ + ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); + + /* wait host return to idle state when auto-hibern8 off */ + ufs_mtk_wait_idle_state(hba, 5); + + ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); + if (ret) + dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret); +} + +static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, + enum ufs_notify_change_status status) +{ + int err; + struct arm_smccc_res res; + + if (status == PRE_CHANGE) { + if (!ufshcd_is_auto_hibern8_supported(hba)) + return 0; + ufs_mtk_auto_hibern8_disable(hba); + return 0; + } + + if (ufshcd_is_link_hibern8(hba)) { + err = ufs_mtk_link_set_lpm(hba); + if (err) + goto fail; + } + + if (!ufshcd_is_link_active(hba)) { + /* + * Make sure no error will be returned to prevent + * ufshcd_suspend() re-enabling regulators while vreg is still + * in low-power mode. + */ + err = ufs_mtk_mphy_power_on(hba, false); + if (err) + goto fail; + } + + if (ufshcd_is_link_off(hba)) + ufs_mtk_device_reset_ctrl(0, res); + + ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res); + + return 0; +fail: + /* + * Set link as off state enforcedly to trigger + * ufshcd_host_reset_and_restore() in ufshcd_suspend() + * for completed host reset. + */ + ufshcd_set_link_off(hba); + return -EAGAIN; +} + +static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) +{ + int err; + struct arm_smccc_res res; + + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) + ufs_mtk_dev_vreg_set_lpm(hba, false); + + ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res); + + err = ufs_mtk_mphy_power_on(hba, true); + if (err) + goto fail; + + if (ufshcd_is_link_hibern8(hba)) { + err = ufs_mtk_link_set_hpm(hba); + if (err) + goto fail; + } + + return 0; +fail: + return ufshcd_link_recovery(hba); +} + +static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba) +{ + ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl "); + + ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg "); + + ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL, + REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4, + "MPHY Ctrl "); + + /* Direct debugging information to REG_MTK_PROBE */ + ufs_mtk_dbg_sel(hba); + ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe "); +} + +static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) +{ + struct ufs_dev_info *dev_info = &hba->dev_info; + u16 mid = dev_info->wmanufacturerid; + + if (mid == UFS_VENDOR_SAMSUNG) { + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10); + } + + /* + * Decide waiting time before gating reference clock and + * after ungating reference clock according to vendors' + * requirements. + */ + if (mid == UFS_VENDOR_SAMSUNG) + ufs_mtk_setup_ref_clk_wait_us(hba, 1); + else if (mid == UFS_VENDOR_SKHYNIX) + ufs_mtk_setup_ref_clk_wait_us(hba, 30); + else if (mid == UFS_VENDOR_TOSHIBA) + ufs_mtk_setup_ref_clk_wait_us(hba, 100); + else + ufs_mtk_setup_ref_clk_wait_us(hba, + REFCLK_DEFAULT_WAIT_US); + return 0; +} + +static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) +{ + ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups); + + if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc && + (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) { + hba->vreg_info.vcc->always_on = true; + /* + * VCC will be kept always-on thus we don't + * need any delay during regulator operations + */ + hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | + UFS_DEVICE_QUIRK_DELAY_AFTER_LPM); + } + + ufs_mtk_vreg_fix_vcc(hba); + ufs_mtk_vreg_fix_vccqx(hba); +} + +static void ufs_mtk_event_notify(struct ufs_hba *hba, + enum ufs_event_type evt, void *data) +{ + unsigned int val = *(u32 *)data; + + trace_ufs_mtk_event(evt, val); +} + +/* + * struct ufs_hba_mtk_vops - UFS MTK specific variant operations + * + * The variant operations configure the necessary controller and PHY + * handshake during initialization. + */ +static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = { + .name = "mediatek.ufshci", + .init = ufs_mtk_init, + .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version, + .setup_clocks = ufs_mtk_setup_clocks, + .hce_enable_notify = ufs_mtk_hce_enable_notify, + .link_startup_notify = ufs_mtk_link_startup_notify, + .pwr_change_notify = ufs_mtk_pwr_change_notify, + .apply_dev_quirks = ufs_mtk_apply_dev_quirks, + .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks, + .suspend = ufs_mtk_suspend, + .resume = ufs_mtk_resume, + .dbg_register_dump = ufs_mtk_dbg_register_dump, + .device_reset = ufs_mtk_device_reset, + .event_notify = ufs_mtk_event_notify, +}; + +/** + * ufs_mtk_probe - probe routine of the driver + * @pdev: pointer to Platform device handle + * + * Return zero for success and non-zero for failure + */ +static int ufs_mtk_probe(struct platform_device *pdev) +{ + int err; + struct device *dev = &pdev->dev; + struct device_node *reset_node; + struct platform_device *reset_pdev; + struct device_link *link; + + reset_node = of_find_compatible_node(NULL, NULL, + "ti,syscon-reset"); + if (!reset_node) { + dev_notice(dev, "find ti,syscon-reset fail\n"); + goto skip_reset; + } + reset_pdev = of_find_device_by_node(reset_node); + if (!reset_pdev) { + dev_notice(dev, "find reset_pdev fail\n"); + goto skip_reset; + } + link = device_link_add(dev, &reset_pdev->dev, + DL_FLAG_AUTOPROBE_CONSUMER); + put_device(&reset_pdev->dev); + if (!link) { + dev_notice(dev, "add reset device_link fail\n"); + goto skip_reset; + } + /* supplier is not probed */ + if (link->status == DL_STATE_DORMANT) { + err = -EPROBE_DEFER; + goto out; + } + +skip_reset: + /* perform generic probe */ + err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops); + +out: + if (err) + dev_info(dev, "probe failed %d\n", err); + + of_node_put(reset_node); + return err; +} + +/** + * ufs_mtk_remove - set driver_data of the device to NULL + * @pdev: pointer to platform device handle + * + * Always return 0 + */ +static int ufs_mtk_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&(pdev)->dev); + ufshcd_remove(hba); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int ufs_mtk_system_suspend(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + + ret = ufshcd_system_suspend(dev); + if (ret) + return ret; + + ufs_mtk_dev_vreg_set_lpm(hba, true); + + return 0; +} + +static int ufs_mtk_system_resume(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + ufs_mtk_dev_vreg_set_lpm(hba, false); + + return ufshcd_system_resume(dev); +} +#endif + +static int ufs_mtk_runtime_suspend(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret = 0; + + ret = ufshcd_runtime_suspend(dev); + if (ret) + return ret; + + ufs_mtk_dev_vreg_set_lpm(hba, true); + + return 0; +} + +static int ufs_mtk_runtime_resume(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + ufs_mtk_dev_vreg_set_lpm(hba, false); + + return ufshcd_runtime_resume(dev); +} + +static const struct dev_pm_ops ufs_mtk_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend, + ufs_mtk_system_resume) + SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend, + ufs_mtk_runtime_resume, NULL) + .prepare = ufshcd_suspend_prepare, + .complete = ufshcd_resume_complete, +}; + +static struct platform_driver ufs_mtk_pltform = { + .probe = ufs_mtk_probe, + .remove = ufs_mtk_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "ufshcd-mtk", + .pm = &ufs_mtk_pm_ops, + .of_match_table = ufs_mtk_of_match, + }, +}; + +MODULE_AUTHOR("Stanley Chu "); +MODULE_AUTHOR("Peter Wang "); +MODULE_DESCRIPTION("MediaTek UFS Host Driver"); +MODULE_LICENSE("GPL v2"); + +module_platform_driver(ufs_mtk_pltform); diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h new file mode 100644 index 0000000000..aa26d41552 --- /dev/null +++ b/drivers/ufs/host/ufs-mediatek.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 MediaTek Inc. + */ + +#ifndef _UFS_MEDIATEK_H +#define _UFS_MEDIATEK_H + +#include +#include +#include + +/* + * Vendor specific UFSHCI Registers + */ +#define REG_UFS_XOUFS_CTRL 0x140 +#define REG_UFS_REFCLK_CTRL 0x144 +#define REG_UFS_EXTREG 0x2100 +#define REG_UFS_MPHYCTRL 0x2200 +#define REG_UFS_MTK_IP_VER 0x2240 +#define REG_UFS_REJECT_MON 0x22AC +#define REG_UFS_DEBUG_SEL 0x22C0 +#define REG_UFS_PROBE 0x22C8 +#define REG_UFS_DEBUG_SEL_B0 0x22D0 +#define REG_UFS_DEBUG_SEL_B1 0x22D4 +#define REG_UFS_DEBUG_SEL_B2 0x22D8 +#define REG_UFS_DEBUG_SEL_B3 0x22DC + +/* + * Ref-clk control + * + * Values for register REG_UFS_REFCLK_CTRL + */ +#define REFCLK_RELEASE 0x0 +#define REFCLK_REQUEST BIT(0) +#define REFCLK_ACK BIT(1) + +#define REFCLK_REQ_TIMEOUT_US 3000 +#define REFCLK_DEFAULT_WAIT_US 32 + +/* + * Other attributes + */ +#define VS_DEBUGCLOCKENABLE 0xD0A1 +#define VS_SAVEPOWERCONTROL 0xD0A6 +#define VS_UNIPROPOWERDOWNCONTROL 0xD0A8 + +/* + * Vendor specific link state + */ +enum { + VS_LINK_DISABLED = 0, + VS_LINK_DOWN = 1, + VS_LINK_UP = 2, + VS_LINK_HIBERN8 = 3, + VS_LINK_LOST = 4, + VS_LINK_CFG = 5, +}; + +/* + * Vendor specific host controller state + */ +enum { + VS_HCE_RESET = 0, + VS_HCE_BASE = 1, + VS_HCE_OOCPR_WAIT = 2, + VS_HCE_DME_RESET = 3, + VS_HCE_MIDDLE = 4, + VS_HCE_DME_ENABLE = 5, + VS_HCE_DEFAULTS = 6, + VS_HIB_IDLEEN = 7, + VS_HIB_ENTER = 8, + VS_HIB_ENTER_CONF = 9, + VS_HIB_MIDDLE = 10, + VS_HIB_WAITTIMER = 11, + VS_HIB_EXIT_CONF = 12, + VS_HIB_EXIT = 13, +}; + +/* + * SiP commands + */ +#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276) +#define UFS_MTK_SIP_VA09_PWR_CTRL BIT(0) +#define UFS_MTK_SIP_DEVICE_RESET BIT(1) +#define UFS_MTK_SIP_CRYPTO_CTRL BIT(2) +#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3) +#define UFS_MTK_SIP_HOST_PWR_CTRL BIT(5) +#define UFS_MTK_SIP_GET_VCC_NUM BIT(6) +#define UFS_MTK_SIP_DEVICE_PWR_CTRL BIT(7) + +/* + * VS_DEBUGCLOCKENABLE + */ +enum { + TX_SYMBOL_CLK_REQ_FORCE = 5, +}; + +/* + * VS_SAVEPOWERCONTROL + */ +enum { + RX_SYMBOL_CLK_GATE_EN = 0, + SYS_CLK_GATE_EN = 2, + TX_CLK_GATE_EN = 3, +}; + +/* + * Host capability + */ +enum ufs_mtk_host_caps { + UFS_MTK_CAP_BOOST_CRYPT_ENGINE = 1 << 0, + UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1, + UFS_MTK_CAP_DISABLE_AH8 = 1 << 2, + UFS_MTK_CAP_BROKEN_VCC = 1 << 3, + UFS_MTK_CAP_PMC_VIA_FASTAUTO = 1 << 6, +}; + +struct ufs_mtk_crypt_cfg { + struct regulator *reg_vcore; + struct clk *clk_crypt_perf; + struct clk *clk_crypt_mux; + struct clk *clk_crypt_lp; + int vcore_volt; +}; + +struct ufs_mtk_hw_ver { + u8 step; + u8 minor; + u8 major; +}; + +struct ufs_mtk_host { + struct phy *mphy; + struct pm_qos_request pm_qos_req; + struct regulator *reg_va09; + struct reset_control *hci_reset; + struct reset_control *unipro_reset; + struct reset_control *crypto_reset; + struct ufs_hba *hba; + struct ufs_mtk_crypt_cfg *crypt; + struct ufs_mtk_hw_ver hw_ver; + enum ufs_mtk_host_caps caps; + bool mphy_powered_on; + bool pm_qos_init; + bool unipro_lpm; + bool ref_clk_enabled; + u16 ref_clk_ungating_wait_us; + u16 ref_clk_gating_wait_us; + u32 ip_ver; +}; + +/* + * Multi-VCC by Numbering + */ +enum ufs_mtk_vcc_num { + UFS_VCC_NONE = 0, + UFS_VCC_1, + UFS_VCC_2, + UFS_VCC_MAX +}; + +/* + * Host Power Control options + */ +enum { + HOST_PWR_HCI = 0, + HOST_PWR_MPHY +}; + +/* + * SMC call wrapper function + */ +struct ufs_mtk_smc_arg { + unsigned long cmd; + struct arm_smccc_res *res; + unsigned long v1; + unsigned long v2; + unsigned long v3; + unsigned long v4; + unsigned long v5; + unsigned long v6; + unsigned long v7; +}; + +static void _ufs_mtk_smc(struct ufs_mtk_smc_arg s) +{ + arm_smccc_smc(MTK_SIP_UFS_CONTROL, + s.cmd, s.v1, s.v2, s.v3, s.v4, s.v5, s.v6, s.res); +} + +#define ufs_mtk_smc(...) \ + _ufs_mtk_smc((struct ufs_mtk_smc_arg) {__VA_ARGS__}) + +/* + * SMC call interface + */ +#define ufs_mtk_va09_pwr_ctrl(res, on) \ + ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, &(res), on) + +#define ufs_mtk_crypto_ctrl(res, enable) \ + ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, &(res), enable) + +#define ufs_mtk_ref_clk_notify(on, stage, res) \ + ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, &(res), on, stage) + +#define ufs_mtk_device_reset_ctrl(high, res) \ + ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, &(res), high) + +#define ufs_mtk_host_pwr_ctrl(opt, on, res) \ + ufs_mtk_smc(UFS_MTK_SIP_HOST_PWR_CTRL, &(res), opt, on) + +#define ufs_mtk_get_vcc_num(res) \ + ufs_mtk_smc(UFS_MTK_SIP_GET_VCC_NUM, &(res)) + +#define ufs_mtk_device_pwr_ctrl(on, ufs_ver, res) \ + ufs_mtk_smc(UFS_MTK_SIP_DEVICE_PWR_CTRL, &(res), on, ufs_ver) + +#endif /* !_UFS_MEDIATEK_H */ diff --git a/drivers/ufs/host/ufs-qcom-ice.c b/drivers/ufs/host/ufs-qcom-ice.c new file mode 100644 index 0000000000..745e48ec59 --- /dev/null +++ b/drivers/ufs/host/ufs-qcom-ice.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Qualcomm ICE (Inline Crypto Engine) support. + * + * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. + * Copyright 2019 Google LLC + */ + +#include +#include +#include + +#include "ufs-qcom.h" + +#define AES_256_XTS_KEY_SIZE 64 + +/* QCOM ICE registers */ + +#define QCOM_ICE_REG_CONTROL 0x0000 +#define QCOM_ICE_REG_RESET 0x0004 +#define QCOM_ICE_REG_VERSION 0x0008 +#define QCOM_ICE_REG_FUSE_SETTING 0x0010 +#define QCOM_ICE_REG_PARAMETERS_1 0x0014 +#define QCOM_ICE_REG_PARAMETERS_2 0x0018 +#define QCOM_ICE_REG_PARAMETERS_3 0x001C +#define QCOM_ICE_REG_PARAMETERS_4 0x0020 +#define QCOM_ICE_REG_PARAMETERS_5 0x0024 + +/* QCOM ICE v3.X only */ +#define QCOM_ICE_GENERAL_ERR_STTS 0x0040 +#define QCOM_ICE_INVALID_CCFG_ERR_STTS 0x0030 +#define QCOM_ICE_GENERAL_ERR_MASK 0x0044 + +/* QCOM ICE v2.X only */ +#define QCOM_ICE_REG_NON_SEC_IRQ_STTS 0x0040 +#define QCOM_ICE_REG_NON_SEC_IRQ_MASK 0x0044 + +#define QCOM_ICE_REG_NON_SEC_IRQ_CLR 0x0048 +#define QCOM_ICE_REG_STREAM1_ERROR_SYNDROME1 0x0050 +#define QCOM_ICE_REG_STREAM1_ERROR_SYNDROME2 0x0054 +#define QCOM_ICE_REG_STREAM2_ERROR_SYNDROME1 0x0058 +#define QCOM_ICE_REG_STREAM2_ERROR_SYNDROME2 0x005C +#define QCOM_ICE_REG_STREAM1_BIST_ERROR_VEC 0x0060 +#define QCOM_ICE_REG_STREAM2_BIST_ERROR_VEC 0x0064 +#define QCOM_ICE_REG_STREAM1_BIST_FINISH_VEC 0x0068 +#define QCOM_ICE_REG_STREAM2_BIST_FINISH_VEC 0x006C +#define QCOM_ICE_REG_BIST_STATUS 0x0070 +#define QCOM_ICE_REG_BYPASS_STATUS 0x0074 +#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000 +#define QCOM_ICE_REG_ENDIAN_SWAP 0x1004 +#define QCOM_ICE_REG_TEST_BUS_CONTROL 0x1010 +#define QCOM_ICE_REG_TEST_BUS_REG 0x1014 + +/* BIST ("built-in self-test"?) status flags */ +#define QCOM_ICE_BIST_STATUS_MASK 0xF0000000 + +#define QCOM_ICE_FUSE_SETTING_MASK 0x1 +#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2 +#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4 + +#define qcom_ice_writel(host, val, reg) \ + writel((val), (host)->ice_mmio + (reg)) +#define qcom_ice_readl(host, reg) \ + readl((host)->ice_mmio + (reg)) + +static bool qcom_ice_supported(struct ufs_qcom_host *host) +{ + struct device *dev = host->hba->dev; + u32 regval = qcom_ice_readl(host, QCOM_ICE_REG_VERSION); + int major = regval >> 24; + int minor = (regval >> 16) & 0xFF; + int step = regval & 0xFFFF; + + /* For now this driver only supports ICE version 3. */ + if (major != 3) { + dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n", + major, minor, step); + return false; + } + + dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n", + major, minor, step); + + /* If fuses are blown, ICE might not work in the standard way. */ + regval = qcom_ice_readl(host, QCOM_ICE_REG_FUSE_SETTING); + if (regval & (QCOM_ICE_FUSE_SETTING_MASK | + QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK | + QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) { + dev_warn(dev, "Fuses are blown; ICE is unusable!\n"); + return false; + } + return true; +} + +int ufs_qcom_ice_init(struct ufs_qcom_host *host) +{ + struct ufs_hba *hba = host->hba; + struct device *dev = hba->dev; + struct platform_device *pdev = to_platform_device(dev); + struct resource *res; + int err; + + if (!(ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES) & + MASK_CRYPTO_SUPPORT)) + return 0; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"); + if (!res) { + dev_warn(dev, "ICE registers not found\n"); + goto disable; + } + + if (!qcom_scm_ice_available()) { + dev_warn(dev, "ICE SCM interface not found\n"); + goto disable; + } + + host->ice_mmio = devm_ioremap_resource(dev, res); + if (IS_ERR(host->ice_mmio)) { + err = PTR_ERR(host->ice_mmio); + dev_err(dev, "Failed to map ICE registers; err=%d\n", err); + return err; + } + + if (!qcom_ice_supported(host)) + goto disable; + + return 0; + +disable: + dev_warn(dev, "Disabling inline encryption support\n"); + hba->caps &= ~UFSHCD_CAP_CRYPTO; + return 0; +} + +static void qcom_ice_low_power_mode_enable(struct ufs_qcom_host *host) +{ + u32 regval; + + regval = qcom_ice_readl(host, QCOM_ICE_REG_ADVANCED_CONTROL); + /* + * Enable low power mode sequence + * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0 + */ + regval |= 0x7000; + qcom_ice_writel(host, regval, QCOM_ICE_REG_ADVANCED_CONTROL); +} + +static void qcom_ice_optimization_enable(struct ufs_qcom_host *host) +{ + u32 regval; + + /* ICE Optimizations Enable Sequence */ + regval = qcom_ice_readl(host, QCOM_ICE_REG_ADVANCED_CONTROL); + regval |= 0xD807100; + /* ICE HPG requires delay before writing */ + udelay(5); + qcom_ice_writel(host, regval, QCOM_ICE_REG_ADVANCED_CONTROL); + udelay(5); +} + +int ufs_qcom_ice_enable(struct ufs_qcom_host *host) +{ + if (!(host->hba->caps & UFSHCD_CAP_CRYPTO)) + return 0; + qcom_ice_low_power_mode_enable(host); + qcom_ice_optimization_enable(host); + return ufs_qcom_ice_resume(host); +} + +/* Poll until all BIST bits are reset */ +static int qcom_ice_wait_bist_status(struct ufs_qcom_host *host) +{ + int count; + u32 reg; + + for (count = 0; count < 100; count++) { + reg = qcom_ice_readl(host, QCOM_ICE_REG_BIST_STATUS); + if (!(reg & QCOM_ICE_BIST_STATUS_MASK)) + break; + udelay(50); + } + if (reg) + return -ETIMEDOUT; + return 0; +} + +int ufs_qcom_ice_resume(struct ufs_qcom_host *host) +{ + int err; + + if (!(host->hba->caps & UFSHCD_CAP_CRYPTO)) + return 0; + + err = qcom_ice_wait_bist_status(host); + if (err) { + dev_err(host->hba->dev, "BIST status error (%d)\n", err); + return err; + } + return 0; +} + +/* + * Program a key into a QC ICE keyslot, or evict a keyslot. QC ICE requires + * vendor-specific SCM calls for this; it doesn't support the standard way. + */ +int ufs_qcom_ice_program_key(struct ufs_hba *hba, + const union ufs_crypto_cfg_entry *cfg, int slot) +{ + union ufs_crypto_cap_entry cap; + union { + u8 bytes[AES_256_XTS_KEY_SIZE]; + u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)]; + } key; + int i; + int err; + + if (!(cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE)) + return qcom_scm_ice_invalidate_key(slot); + + /* Only AES-256-XTS has been tested so far. */ + cap = hba->crypto_cap_array[cfg->crypto_cap_idx]; + if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS || + cap.key_size != UFS_CRYPTO_KEY_SIZE_256) { + dev_err_ratelimited(hba->dev, + "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n", + cap.algorithm_id, cap.key_size); + return -EINVAL; + } + + memcpy(key.bytes, cfg->crypto_key, AES_256_XTS_KEY_SIZE); + + /* + * The SCM call byte-swaps the 32-bit words of the key. So we have to + * do the same, in order for the final key be correct. + */ + for (i = 0; i < ARRAY_SIZE(key.words); i++) + __cpu_to_be32s(&key.words[i]); + + err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE, + QCOM_SCM_ICE_CIPHER_AES_256_XTS, + cfg->data_unit_size); + memzero_explicit(&key, sizeof(key)); + return err; +} diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c new file mode 100644 index 0000000000..473fad8370 --- /dev/null +++ b/drivers/ufs/host/ufs-qcom.c @@ -0,0 +1,1520 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2016, Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "ufshcd-pltfrm.h" +#include +#include "ufs-qcom.h" +#include +#include + +#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \ + (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN) + +enum { + TSTBUS_UAWM, + TSTBUS_UARM, + TSTBUS_TXUC, + TSTBUS_RXUC, + TSTBUS_DFC, + TSTBUS_TRLUT, + TSTBUS_TMRLUT, + TSTBUS_OCSC, + TSTBUS_UTP_HCI, + TSTBUS_COMBINED, + TSTBUS_WRAPPER, + TSTBUS_UNIPRO, + TSTBUS_MAX, +}; + +static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS]; + +static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); +static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, + u32 clk_cycles); + +static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) +{ + return container_of(rcd, struct ufs_qcom_host, rcdev); +} + +static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, + const char *prefix, void *priv) +{ + ufshcd_dump_regs(hba, offset, len * 4, prefix); +} + +static int ufs_qcom_host_clk_get(struct device *dev, + const char *name, struct clk **clk_out, bool optional) +{ + struct clk *clk; + int err = 0; + + clk = devm_clk_get(dev, name); + if (!IS_ERR(clk)) { + *clk_out = clk; + return 0; + } + + err = PTR_ERR(clk); + + if (optional && err == -ENOENT) { + *clk_out = NULL; + return 0; + } + + if (err != -EPROBE_DEFER) + dev_err(dev, "failed to get %s err %d\n", name, err); + + return err; +} + +static int ufs_qcom_host_clk_enable(struct device *dev, + const char *name, struct clk *clk) +{ + int err = 0; + + err = clk_prepare_enable(clk); + if (err) + dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err); + + return err; +} + +static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) +{ + if (!host->is_lane_clks_enabled) + return; + + clk_disable_unprepare(host->tx_l1_sync_clk); + clk_disable_unprepare(host->tx_l0_sync_clk); + clk_disable_unprepare(host->rx_l1_sync_clk); + clk_disable_unprepare(host->rx_l0_sync_clk); + + host->is_lane_clks_enabled = false; +} + +static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) +{ + int err = 0; + struct device *dev = host->hba->dev; + + if (host->is_lane_clks_enabled) + return 0; + + err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk", + host->rx_l0_sync_clk); + if (err) + goto out; + + err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk", + host->tx_l0_sync_clk); + if (err) + goto disable_rx_l0; + + err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", + host->rx_l1_sync_clk); + if (err) + goto disable_tx_l0; + + err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", + host->tx_l1_sync_clk); + if (err) + goto disable_rx_l1; + + host->is_lane_clks_enabled = true; + goto out; + +disable_rx_l1: + clk_disable_unprepare(host->rx_l1_sync_clk); +disable_tx_l0: + clk_disable_unprepare(host->tx_l0_sync_clk); +disable_rx_l0: + clk_disable_unprepare(host->rx_l0_sync_clk); +out: + return err; +} + +static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) +{ + int err = 0; + struct device *dev = host->hba->dev; + + if (has_acpi_companion(dev)) + return 0; + + err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk", + &host->rx_l0_sync_clk, false); + if (err) + goto out; + + err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk", + &host->tx_l0_sync_clk, false); + if (err) + goto out; + + /* In case of single lane per direction, don't read lane1 clocks */ + if (host->hba->lanes_per_direction > 1) { + err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", + &host->rx_l1_sync_clk, false); + if (err) + goto out; + + err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", + &host->tx_l1_sync_clk, true); + } +out: + return err; +} + +static int ufs_qcom_check_hibern8(struct ufs_hba *hba) +{ + int err; + u32 tx_fsm_val = 0; + unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); + + do { + err = ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), + &tx_fsm_val); + if (err || tx_fsm_val == TX_FSM_HIBERN8) + break; + + /* sleep for max. 200us */ + usleep_range(100, 200); + } while (time_before(jiffies, timeout)); + + /* + * we might have scheduled out for long during polling so + * check the state again. + */ + if (time_after(jiffies, timeout)) + err = ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), + &tx_fsm_val); + + if (err) { + dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", + __func__, err); + } else if (tx_fsm_val != TX_FSM_HIBERN8) { + err = tx_fsm_val; + dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", + __func__, err); + } + + return err; +} + +static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) +{ + ufshcd_rmwl(host->hba, QUNIPRO_SEL, + ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0, + REG_UFS_CFG1); + /* make sure above configuration is applied before we return */ + mb(); +} + +/* + * ufs_qcom_host_reset - reset host controller and PHY + */ +static int ufs_qcom_host_reset(struct ufs_hba *hba) +{ + int ret = 0; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + bool reenable_intr = false; + + if (!host->core_reset) { + dev_warn(hba->dev, "%s: reset control not set\n", __func__); + goto out; + } + + reenable_intr = hba->is_irq_enabled; + disable_irq(hba->irq); + hba->is_irq_enabled = false; + + ret = reset_control_assert(host->core_reset); + if (ret) { + dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", + __func__, ret); + goto out; + } + + /* + * The hardware requirement for delay between assert/deassert + * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to + * ~125us (4/32768). To be on the safe side add 200us delay. + */ + usleep_range(200, 210); + + ret = reset_control_deassert(host->core_reset); + if (ret) + dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n", + __func__, ret); + + usleep_range(1000, 1100); + + if (reenable_intr) { + enable_irq(hba->irq); + hba->is_irq_enabled = true; + } + +out: + return ret; +} + +static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct phy *phy = host->generic_phy; + int ret = 0; + bool is_rate_B = UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B; + + /* Reset UFS Host Controller and PHY */ + ret = ufs_qcom_host_reset(hba); + if (ret) + dev_warn(hba->dev, "%s: host reset returned %d\n", + __func__, ret); + + if (is_rate_B) + phy_set_mode(phy, PHY_MODE_UFS_HS_B); + + /* phy initialization - calibrate the phy */ + ret = phy_init(phy); + if (ret) { + dev_err(hba->dev, "%s: phy init failed, ret = %d\n", + __func__, ret); + goto out; + } + + /* power on phy - start serdes and phy's power and clocks */ + ret = phy_power_on(phy); + if (ret) { + dev_err(hba->dev, "%s: phy power on failed, ret = %d\n", + __func__, ret); + goto out_disable_phy; + } + + ufs_qcom_select_unipro_mode(host); + + return 0; + +out_disable_phy: + phy_exit(phy); +out: + return ret; +} + +/* + * The UTP controller has a number of internal clock gating cells (CGCs). + * Internal hardware sub-modules within the UTP controller control the CGCs. + * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved + * in a specific operation, UTP controller CGCs are by default disabled and + * this function enables them (after every UFS link startup) to save some power + * leakage. + */ +static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) +{ + ufshcd_writel(hba, + ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL, + REG_UFS_CFG2); + + /* Ensure that HW clock gating is enabled before next operations */ + mb(); +} + +static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int err = 0; + + switch (status) { + case PRE_CHANGE: + ufs_qcom_power_up_sequence(hba); + /* + * The PHY PLL output is the source of tx/rx lane symbol + * clocks, hence, enable the lane clocks only after PHY + * is initialized. + */ + err = ufs_qcom_enable_lane_clks(host); + break; + case POST_CHANGE: + /* check if UFS PHY moved from DISABLED to HIBERN8 */ + err = ufs_qcom_check_hibern8(hba); + ufs_qcom_enable_hw_clk_gating(hba); + ufs_qcom_ice_enable(host); + break; + default: + dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); + err = -EINVAL; + break; + } + return err; +} + +/* + * Returns zero for success and non-zero in case of a failure + */ +static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, + u32 hs, u32 rate, bool update_link_startup_timer) +{ + int ret = 0; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufs_clk_info *clki; + u32 core_clk_period_in_ns; + u32 tx_clk_cycles_per_us = 0; + unsigned long core_clk_rate = 0; + u32 core_clk_cycles_per_us = 0; + + static u32 pwm_fr_table[][2] = { + {UFS_PWM_G1, 0x1}, + {UFS_PWM_G2, 0x1}, + {UFS_PWM_G3, 0x1}, + {UFS_PWM_G4, 0x1}, + }; + + static u32 hs_fr_table_rA[][2] = { + {UFS_HS_G1, 0x1F}, + {UFS_HS_G2, 0x3e}, + {UFS_HS_G3, 0x7D}, + }; + + static u32 hs_fr_table_rB[][2] = { + {UFS_HS_G1, 0x24}, + {UFS_HS_G2, 0x49}, + {UFS_HS_G3, 0x92}, + }; + + /* + * The Qunipro controller does not use following registers: + * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG & + * UFS_REG_PA_LINK_STARTUP_TIMER + * But UTP controller uses SYS1CLK_1US_REG register for Interrupt + * Aggregation logic. + */ + if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba)) + goto out; + + if (gear == 0) { + dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); + goto out_error; + } + + list_for_each_entry(clki, &hba->clk_list_head, list) { + if (!strcmp(clki->name, "core_clk")) + core_clk_rate = clk_get_rate(clki->clk); + } + + /* If frequency is smaller than 1MHz, set to 1MHz */ + if (core_clk_rate < DEFAULT_CLK_RATE_HZ) + core_clk_rate = DEFAULT_CLK_RATE_HZ; + + core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; + if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { + ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); + /* + * make sure above write gets applied before we return from + * this function. + */ + mb(); + } + + if (ufs_qcom_cap_qunipro(host)) + goto out; + + core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate; + core_clk_period_in_ns <<= OFFSET_CLK_NS_REG; + core_clk_period_in_ns &= MASK_CLK_NS_REG; + + switch (hs) { + case FASTAUTO_MODE: + case FAST_MODE: + if (rate == PA_HS_MODE_A) { + if (gear > ARRAY_SIZE(hs_fr_table_rA)) { + dev_err(hba->dev, + "%s: index %d exceeds table size %zu\n", + __func__, gear, + ARRAY_SIZE(hs_fr_table_rA)); + goto out_error; + } + tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1]; + } else if (rate == PA_HS_MODE_B) { + if (gear > ARRAY_SIZE(hs_fr_table_rB)) { + dev_err(hba->dev, + "%s: index %d exceeds table size %zu\n", + __func__, gear, + ARRAY_SIZE(hs_fr_table_rB)); + goto out_error; + } + tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1]; + } else { + dev_err(hba->dev, "%s: invalid rate = %d\n", + __func__, rate); + goto out_error; + } + break; + case SLOWAUTO_MODE: + case SLOW_MODE: + if (gear > ARRAY_SIZE(pwm_fr_table)) { + dev_err(hba->dev, + "%s: index %d exceeds table size %zu\n", + __func__, gear, + ARRAY_SIZE(pwm_fr_table)); + goto out_error; + } + tx_clk_cycles_per_us = pwm_fr_table[gear-1][1]; + break; + case UNCHANGED: + default: + dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs); + goto out_error; + } + + if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) != + (core_clk_period_in_ns | tx_clk_cycles_per_us)) { + /* this register 2 fields shall be written at once */ + ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, + REG_UFS_TX_SYMBOL_CLK_NS_US); + /* + * make sure above write gets applied before we return from + * this function. + */ + mb(); + } + + if (update_link_startup_timer) { + ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100), + REG_UFS_PA_LINK_STARTUP_TIMER); + /* + * make sure that this configuration is applied before + * we return + */ + mb(); + } + goto out; + +out_error: + ret = -EINVAL; +out: + return ret; +} + +static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int err = 0; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + switch (status) { + case PRE_CHANGE: + if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, + 0, true)) { + dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", + __func__); + err = -EINVAL; + goto out; + } + + if (ufs_qcom_cap_qunipro(host)) + /* + * set unipro core clock cycles to 150 & clear clock + * divider + */ + err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, + 150); + + /* + * Some UFS devices (and may be host) have issues if LCC is + * enabled. So we are setting PA_Local_TX_LCC_Enable to 0 + * before link startup which will make sure that both host + * and device TX LCC are disabled once link startup is + * completed. + */ + if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) + err = ufshcd_disable_host_tx_lcc(hba); + + break; + default: + break; + } + +out: + return err; +} + +static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + /* reset gpio is optional */ + if (!host->device_reset) + return; + + gpiod_set_value_cansleep(host->device_reset, asserted); +} + +static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, + enum ufs_notify_change_status status) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct phy *phy = host->generic_phy; + + if (status == PRE_CHANGE) + return 0; + + if (ufs_qcom_is_link_off(hba)) { + /* + * Disable the tx/rx lane symbol clocks before PHY is + * powered down as the PLL source should be disabled + * after downstream clocks are disabled. + */ + ufs_qcom_disable_lane_clks(host); + phy_power_off(phy); + + /* reset the connected UFS device during power down */ + ufs_qcom_device_reset_ctrl(hba, true); + + } else if (!ufs_qcom_is_link_active(hba)) { + ufs_qcom_disable_lane_clks(host); + } + + return 0; +} + +static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct phy *phy = host->generic_phy; + int err; + + if (ufs_qcom_is_link_off(hba)) { + err = phy_power_on(phy); + if (err) { + dev_err(hba->dev, "%s: failed PHY power on: %d\n", + __func__, err); + return err; + } + + err = ufs_qcom_enable_lane_clks(host); + if (err) + return err; + + } else if (!ufs_qcom_is_link_active(hba)) { + err = ufs_qcom_enable_lane_clks(host); + if (err) + return err; + } + + return ufs_qcom_ice_resume(host); +} + +static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) +{ + if (host->dev_ref_clk_ctrl_mmio && + (enable ^ host->is_dev_ref_clk_enabled)) { + u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio); + + if (enable) + temp |= host->dev_ref_clk_en_mask; + else + temp &= ~host->dev_ref_clk_en_mask; + + /* + * If we are here to disable this clock it might be immediately + * after entering into hibern8 in which case we need to make + * sure that device ref_clk is active for specific time after + * hibern8 enter. + */ + if (!enable) { + unsigned long gating_wait; + + gating_wait = host->hba->dev_info.clk_gating_wait_us; + if (!gating_wait) { + udelay(1); + } else { + /* + * bRefClkGatingWaitTime defines the minimum + * time for which the reference clock is + * required by device during transition from + * HS-MODE to LS-MODE or HIBERN8 state. Give it + * more delay to be on the safe side. + */ + gating_wait += 10; + usleep_range(gating_wait, gating_wait + 10); + } + } + + writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio); + + /* + * Make sure the write to ref_clk reaches the destination and + * not stored in a Write Buffer (WB). + */ + readl(host->dev_ref_clk_ctrl_mmio); + + /* + * If we call hibern8 exit after this, we need to make sure that + * device ref_clk is stable for at least 1us before the hibern8 + * exit command. + */ + if (enable) + udelay(1); + + host->is_dev_ref_clk_enabled = enable; + } +} + +static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufs_dev_params ufs_qcom_cap; + int ret = 0; + + if (!dev_req_params) { + pr_err("%s: incoming dev_req_params is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + switch (status) { + case PRE_CHANGE: + ufshcd_init_pwr_dev_param(&ufs_qcom_cap); + ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE; + + if (host->hw_ver.major == 0x1) { + /* + * HS-G3 operations may not reliably work on legacy QCOM + * UFS host controller hardware even though capability + * exchange during link startup phase may end up + * negotiating maximum supported gear as G3. + * Hence downgrade the maximum supported gear to HS-G2. + */ + if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2) + ufs_qcom_cap.hs_tx_gear = UFS_HS_G2; + if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2) + ufs_qcom_cap.hs_rx_gear = UFS_HS_G2; + } + + ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap, + dev_max_params, + dev_req_params); + if (ret) { + pr_err("%s: failed to determine capabilities\n", + __func__); + goto out; + } + + /* enable the device ref clock before changing to HS mode */ + if (!ufshcd_is_hs_mode(&hba->pwr_info) && + ufshcd_is_hs_mode(dev_req_params)) + ufs_qcom_dev_ref_clk_ctrl(host, true); + + if (host->hw_ver.major >= 0x4) { + ufshcd_dme_configure_adapt(hba, + dev_req_params->gear_tx, + PA_INITIAL_ADAPT); + } + break; + case POST_CHANGE: + if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, + dev_req_params->pwr_rx, + dev_req_params->hs_rate, false)) { + dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", + __func__); + /* + * we return error code at the end of the routine, + * but continue to configure UFS_PHY_TX_LANE_ENABLE + * and bus voting as usual + */ + ret = -EINVAL; + } + + /* cache the power mode parameters to use internally */ + memcpy(&host->dev_req_params, + dev_req_params, sizeof(*dev_req_params)); + + /* disable the device ref clock if entered PWM mode */ + if (ufshcd_is_hs_mode(&hba->pwr_info) && + !ufshcd_is_hs_mode(dev_req_params)) + ufs_qcom_dev_ref_clk_ctrl(host, false); + break; + default: + ret = -EINVAL; + break; + } +out: + return ret; +} + +static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) +{ + int err; + u32 pa_vs_config_reg1; + + err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), + &pa_vs_config_reg1); + if (err) + goto out; + + /* Allow extension of MSB bits of PA_SaveConfigTime attribute */ + err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), + (pa_vs_config_reg1 | (1 << 12))); + +out: + return err; +} + +static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) +{ + int err = 0; + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) + err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); + + if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC) + hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; + + return err; +} + +static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (host->hw_ver.major == 0x1) + return ufshci_version(1, 1); + else + return ufshci_version(2, 0); +} + +/** + * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks + * @hba: host controller instance + * + * QCOM UFS host controller might have some non standard behaviours (quirks) + * than what is specified by UFSHCI specification. Advertise all such + * quirks to standard UFS host controller driver so standard takes them into + * account. + */ +static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (host->hw_ver.major == 0x01) { + hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS + | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP + | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE; + + if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001) + hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR; + + hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; + } + + if (host->hw_ver.major == 0x2) { + hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; + + if (!ufs_qcom_cap_qunipro(host)) + /* Legacy UniPro mode still need following quirks */ + hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS + | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE + | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP); + } +} + +static void ufs_qcom_set_caps(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; + hba->caps |= UFSHCD_CAP_CLK_SCALING; + hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; + hba->caps |= UFSHCD_CAP_WB_EN; + hba->caps |= UFSHCD_CAP_CRYPTO; + hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE; + hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; + + if (host->hw_ver.major >= 0x2) { + host->caps = UFS_QCOM_CAP_QUNIPRO | + UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE; + } +} + +/** + * ufs_qcom_setup_clocks - enables/disable clocks + * @hba: host controller instance + * @on: If true, enable clocks else disable them. + * @status: PRE_CHANGE or POST_CHANGE notify + * + * Returns 0 on success, non-zero on failure. + */ +static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, + enum ufs_notify_change_status status) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + /* + * In case ufs_qcom_init() is not yet done, simply ignore. + * This ufs_qcom_setup_clocks() shall be called from + * ufs_qcom_init() after init is done. + */ + if (!host) + return 0; + + switch (status) { + case PRE_CHANGE: + if (!on) { + if (!ufs_qcom_is_link_active(hba)) { + /* disable device ref_clk */ + ufs_qcom_dev_ref_clk_ctrl(host, false); + } + } + break; + case POST_CHANGE: + if (on) { + /* enable the device ref clock for HS mode*/ + if (ufshcd_is_hs_mode(&hba->pwr_info)) + ufs_qcom_dev_ref_clk_ctrl(host, true); + } + break; + } + + return 0; +} + +static int +ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) +{ + struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); + + /* Currently this code only knows about a single reset. */ + WARN_ON(id); + ufs_qcom_assert_reset(host->hba); + /* provide 1ms delay to let the reset pulse propagate. */ + usleep_range(1000, 1100); + return 0; +} + +static int +ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) +{ + struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); + + /* Currently this code only knows about a single reset. */ + WARN_ON(id); + ufs_qcom_deassert_reset(host->hba); + + /* + * after reset deassertion, phy will need all ref clocks, + * voltage, current to settle down before starting serdes. + */ + usleep_range(1000, 1100); + return 0; +} + +static const struct reset_control_ops ufs_qcom_reset_ops = { + .assert = ufs_qcom_reset_assert, + .deassert = ufs_qcom_reset_deassert, +}; + +/** + * ufs_qcom_init - bind phy with controller + * @hba: host controller instance + * + * Binds PHY with controller and powers up PHY enabling clocks + * and regulators. + * + * Returns -EPROBE_DEFER if binding fails, returns negative error + * on phy power up failure and returns zero on success. + */ +static int ufs_qcom_init(struct ufs_hba *hba) +{ + int err; + struct device *dev = hba->dev; + struct platform_device *pdev = to_platform_device(dev); + struct ufs_qcom_host *host; + struct resource *res; + struct ufs_clk_info *clki; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) { + err = -ENOMEM; + dev_err(dev, "%s: no memory for qcom ufs host\n", __func__); + goto out; + } + + /* Make a two way bind between the qcom host and the hba */ + host->hba = hba; + ufshcd_set_variant(hba, host); + + /* Setup the optional reset control of HCI */ + host->core_reset = devm_reset_control_get_optional(hba->dev, "rst"); + if (IS_ERR(host->core_reset)) { + err = dev_err_probe(dev, PTR_ERR(host->core_reset), + "Failed to get reset control\n"); + goto out_variant_clear; + } + + /* Fire up the reset controller. Failure here is non-fatal. */ + host->rcdev.of_node = dev->of_node; + host->rcdev.ops = &ufs_qcom_reset_ops; + host->rcdev.owner = dev->driver->owner; + host->rcdev.nr_resets = 1; + err = devm_reset_controller_register(dev, &host->rcdev); + if (err) { + dev_warn(dev, "Failed to register reset controller\n"); + err = 0; + } + + if (!has_acpi_companion(dev)) { + host->generic_phy = devm_phy_get(dev, "ufsphy"); + if (IS_ERR(host->generic_phy)) { + err = dev_err_probe(dev, PTR_ERR(host->generic_phy), "Failed to get PHY\n"); + goto out_variant_clear; + } + } + + host->device_reset = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(host->device_reset)) { + err = PTR_ERR(host->device_reset); + if (err != -EPROBE_DEFER) + dev_err(dev, "failed to acquire reset gpio: %d\n", err); + goto out_variant_clear; + } + + ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, + &host->hw_ver.minor, &host->hw_ver.step); + + /* + * for newer controllers, device reference clock control bit has + * moved inside UFS controller register address space itself. + */ + if (host->hw_ver.major >= 0x02) { + host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1; + host->dev_ref_clk_en_mask = BIT(26); + } else { + /* "dev_ref_clk_ctrl_mem" is optional resource */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "dev_ref_clk_ctrl_mem"); + if (res) { + host->dev_ref_clk_ctrl_mmio = + devm_ioremap_resource(dev, res); + if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) + host->dev_ref_clk_ctrl_mmio = NULL; + host->dev_ref_clk_en_mask = BIT(5); + } + } + + list_for_each_entry(clki, &hba->clk_list_head, list) { + if (!strcmp(clki->name, "core_clk_unipro")) + clki->keep_link_active = true; + } + + err = ufs_qcom_init_lane_clks(host); + if (err) + goto out_variant_clear; + + ufs_qcom_set_caps(hba); + ufs_qcom_advertise_quirks(hba); + + err = ufs_qcom_ice_init(host); + if (err) + goto out_variant_clear; + + ufs_qcom_setup_clocks(hba, true, POST_CHANGE); + + if (hba->dev->id < MAX_UFS_QCOM_HOSTS) + ufs_qcom_hosts[hba->dev->id] = host; + + host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN; + ufs_qcom_get_default_testbus_cfg(host); + err = ufs_qcom_testbus_config(host); + if (err) { + dev_warn(dev, "%s: failed to configure the testbus %d\n", + __func__, err); + err = 0; + } + + goto out; + +out_variant_clear: + ufshcd_set_variant(hba, NULL); +out: + return err; +} + +static void ufs_qcom_exit(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + ufs_qcom_disable_lane_clks(host); + phy_power_off(host->generic_phy); + phy_exit(host->generic_phy); +} + +static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, + u32 clk_cycles) +{ + int err; + u32 core_clk_ctrl_reg; + + if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK) + return -EINVAL; + + err = ufshcd_dme_get(hba, + UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), + &core_clk_ctrl_reg); + if (err) + goto out; + + core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK; + core_clk_ctrl_reg |= clk_cycles; + + /* Clear CORE_CLK_DIV_EN */ + core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; + + err = ufshcd_dme_set(hba, + UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), + core_clk_ctrl_reg); +out: + return err; +} + +static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) +{ + /* nothing to do as of now */ + return 0; +} + +static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (!ufs_qcom_cap_qunipro(host)) + return 0; + + /* set unipro core clock cycles to 150 and clear clock divider */ + return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); +} + +static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int err; + u32 core_clk_ctrl_reg; + + if (!ufs_qcom_cap_qunipro(host)) + return 0; + + err = ufshcd_dme_get(hba, + UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), + &core_clk_ctrl_reg); + + /* make sure CORE_CLK_DIV_EN is cleared */ + if (!err && + (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) { + core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; + err = ufshcd_dme_set(hba, + UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), + core_clk_ctrl_reg); + } + + return err; +} + +static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (!ufs_qcom_cap_qunipro(host)) + return 0; + + /* set unipro core clock cycles to 75 and clear clock divider */ + return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75); +} + +static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, + bool scale_up, enum ufs_notify_change_status status) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params; + int err = 0; + + if (status == PRE_CHANGE) { + err = ufshcd_uic_hibern8_enter(hba); + if (err) + return err; + if (scale_up) + err = ufs_qcom_clk_scale_up_pre_change(hba); + else + err = ufs_qcom_clk_scale_down_pre_change(hba); + if (err) + ufshcd_uic_hibern8_exit(hba); + + } else { + if (scale_up) + err = ufs_qcom_clk_scale_up_post_change(hba); + else + err = ufs_qcom_clk_scale_down_post_change(hba); + + + if (err || !dev_req_params) { + ufshcd_uic_hibern8_exit(hba); + goto out; + } + + ufs_qcom_cfg_timers(hba, + dev_req_params->gear_rx, + dev_req_params->pwr_rx, + dev_req_params->hs_rate, + false); + ufshcd_uic_hibern8_exit(hba); + } + +out: + return err; +} + +static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, + void *priv, void (*print_fn)(struct ufs_hba *hba, + int offset, int num_regs, const char *str, void *priv)) +{ + u32 reg; + struct ufs_qcom_host *host; + + if (unlikely(!hba)) { + pr_err("%s: hba is NULL\n", __func__); + return; + } + if (unlikely(!print_fn)) { + dev_err(hba->dev, "%s: print_fn is NULL\n", __func__); + return; + } + + host = ufshcd_get_variant(hba); + if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN)) + return; + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC); + print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv); + + reg = ufshcd_readl(hba, REG_UFS_CFG1); + reg |= UTP_DBG_RAMS_EN; + ufshcd_writel(hba, reg, REG_UFS_CFG1); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM); + print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM); + print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM); + print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv); + + /* clear bit 17 - UTP_DBG_RAMS_EN */ + ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM); + print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM); + print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC); + print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC); + print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC); + print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT); + print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); + print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv); +} + +static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host) +{ + if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) { + ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, + UFS_REG_TEST_BUS_EN, REG_UFS_CFG1); + ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); + } else { + ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1); + ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1); + } +} + +static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) +{ + /* provide a legal default configuration */ + host->testbus.select_major = TSTBUS_UNIPRO; + host->testbus.select_minor = 37; +} + +static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) +{ + if (host->testbus.select_major >= TSTBUS_MAX) { + dev_err(host->hba->dev, + "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n", + __func__, host->testbus.select_major); + return false; + } + + return true; +} + +int ufs_qcom_testbus_config(struct ufs_qcom_host *host) +{ + int reg; + int offset; + u32 mask = TEST_BUS_SUB_SEL_MASK; + + if (!host) + return -EINVAL; + + if (!ufs_qcom_testbus_cfg_is_ok(host)) + return -EPERM; + + switch (host->testbus.select_major) { + case TSTBUS_UAWM: + reg = UFS_TEST_BUS_CTRL_0; + offset = 24; + break; + case TSTBUS_UARM: + reg = UFS_TEST_BUS_CTRL_0; + offset = 16; + break; + case TSTBUS_TXUC: + reg = UFS_TEST_BUS_CTRL_0; + offset = 8; + break; + case TSTBUS_RXUC: + reg = UFS_TEST_BUS_CTRL_0; + offset = 0; + break; + case TSTBUS_DFC: + reg = UFS_TEST_BUS_CTRL_1; + offset = 24; + break; + case TSTBUS_TRLUT: + reg = UFS_TEST_BUS_CTRL_1; + offset = 16; + break; + case TSTBUS_TMRLUT: + reg = UFS_TEST_BUS_CTRL_1; + offset = 8; + break; + case TSTBUS_OCSC: + reg = UFS_TEST_BUS_CTRL_1; + offset = 0; + break; + case TSTBUS_WRAPPER: + reg = UFS_TEST_BUS_CTRL_2; + offset = 16; + break; + case TSTBUS_COMBINED: + reg = UFS_TEST_BUS_CTRL_2; + offset = 8; + break; + case TSTBUS_UTP_HCI: + reg = UFS_TEST_BUS_CTRL_2; + offset = 0; + break; + case TSTBUS_UNIPRO: + reg = UFS_UNIPRO_CFG; + offset = 20; + mask = 0xFFF; + break; + /* + * No need for a default case, since + * ufs_qcom_testbus_cfg_is_ok() checks that the configuration + * is legal + */ + } + mask <<= offset; + ufshcd_rmwl(host->hba, TEST_BUS_SEL, + (u32)host->testbus.select_major << 19, + REG_UFS_CFG1); + ufshcd_rmwl(host->hba, mask, + (u32)host->testbus.select_minor << offset, + reg); + ufs_qcom_enable_test_bus(host); + /* + * Make sure the test bus configuration is + * committed before returning. + */ + mb(); + + return 0; +} + +static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) +{ + ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, + "HCI Vendor Specific Registers "); + + ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); +} + +/** + * ufs_qcom_device_reset() - toggle the (optional) device reset line + * @hba: per-adapter instance + * + * Toggles the (optional) reset line to reset the attached device. + */ +static int ufs_qcom_device_reset(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + /* reset gpio is optional */ + if (!host->device_reset) + return -EOPNOTSUPP; + + /* + * The UFS device shall detect reset pulses of 1us, sleep for 10us to + * be on the safe side. + */ + ufs_qcom_device_reset_ctrl(hba, true); + usleep_range(10, 15); + + ufs_qcom_device_reset_ctrl(hba, false); + usleep_range(10, 15); + + return 0; +} + +#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) +static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, + struct devfreq_dev_profile *p, + struct devfreq_simple_ondemand_data *d) +{ + p->polling_ms = 60; + d->upthreshold = 70; + d->downdifferential = 5; +} +#else +static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, + struct devfreq_dev_profile *p, + struct devfreq_simple_ondemand_data *data) +{ +} +#endif + +/* + * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations + * + * The variant operations configure the necessary controller and PHY + * handshake during initialization. + */ +static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { + .name = "qcom", + .init = ufs_qcom_init, + .exit = ufs_qcom_exit, + .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version, + .clk_scale_notify = ufs_qcom_clk_scale_notify, + .setup_clocks = ufs_qcom_setup_clocks, + .hce_enable_notify = ufs_qcom_hce_enable_notify, + .link_startup_notify = ufs_qcom_link_startup_notify, + .pwr_change_notify = ufs_qcom_pwr_change_notify, + .apply_dev_quirks = ufs_qcom_apply_dev_quirks, + .suspend = ufs_qcom_suspend, + .resume = ufs_qcom_resume, + .dbg_register_dump = ufs_qcom_dump_dbg_regs, + .device_reset = ufs_qcom_device_reset, + .config_scaling_param = ufs_qcom_config_scaling_param, + .program_key = ufs_qcom_ice_program_key, +}; + +/** + * ufs_qcom_probe - probe routine of the driver + * @pdev: pointer to Platform device handle + * + * Return zero for success and non-zero for failure + */ +static int ufs_qcom_probe(struct platform_device *pdev) +{ + int err; + struct device *dev = &pdev->dev; + + /* Perform generic probe */ + err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops); + if (err) + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); + + return err; +} + +/** + * ufs_qcom_remove - set driver_data of the device to NULL + * @pdev: pointer to platform device handle + * + * Always returns 0 + */ +static int ufs_qcom_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&(pdev)->dev); + ufshcd_remove(hba); + return 0; +} + +static const struct of_device_id ufs_qcom_of_match[] = { + { .compatible = "qcom,ufshc"}, + {}, +}; +MODULE_DEVICE_TABLE(of, ufs_qcom_of_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id ufs_qcom_acpi_match[] = { + { "QCOM24A5" }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match); +#endif + +static const struct dev_pm_ops ufs_qcom_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) + .prepare = ufshcd_suspend_prepare, + .complete = ufshcd_resume_complete, +}; + +static struct platform_driver ufs_qcom_pltform = { + .probe = ufs_qcom_probe, + .remove = ufs_qcom_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "ufshcd-qcom", + .pm = &ufs_qcom_pm_ops, + .of_match_table = of_match_ptr(ufs_qcom_of_match), + .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match), + }, +}; +module_platform_driver(ufs_qcom_pltform); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h new file mode 100644 index 0000000000..44466a395b --- /dev/null +++ b/drivers/ufs/host/ufs-qcom.h @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + */ + +#ifndef UFS_QCOM_H_ +#define UFS_QCOM_H_ + +#include +#include +#include + +#define MAX_UFS_QCOM_HOSTS 1 +#define MAX_U32 (~(u32)0) +#define MPHY_TX_FSM_STATE 0x41 +#define TX_FSM_HIBERN8 0x1 +#define HBRN8_POLL_TOUT_MS 100 +#define DEFAULT_CLK_RATE_HZ 1000000 +#define BUS_VECTOR_NAME_LEN 32 + +#define UFS_HW_VER_MAJOR_SHFT (28) +#define UFS_HW_VER_MAJOR_MASK (0x000F << UFS_HW_VER_MAJOR_SHFT) +#define UFS_HW_VER_MINOR_SHFT (16) +#define UFS_HW_VER_MINOR_MASK (0x0FFF << UFS_HW_VER_MINOR_SHFT) +#define UFS_HW_VER_STEP_SHFT (0) +#define UFS_HW_VER_STEP_MASK (0xFFFF << UFS_HW_VER_STEP_SHFT) + +/* vendor specific pre-defined parameters */ +#define SLOW 1 +#define FAST 2 + +#define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B + +/* QCOM UFS host controller vendor specific registers */ +enum { + REG_UFS_SYS1CLK_1US = 0xC0, + REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4, + REG_UFS_LOCAL_PORT_ID_REG = 0xC8, + REG_UFS_PA_ERR_CODE = 0xCC, + REG_UFS_RETRY_TIMER_REG = 0xD0, + REG_UFS_PA_LINK_STARTUP_TIMER = 0xD8, + REG_UFS_CFG1 = 0xDC, + REG_UFS_CFG2 = 0xE0, + REG_UFS_HW_VERSION = 0xE4, + + UFS_TEST_BUS = 0xE8, + UFS_TEST_BUS_CTRL_0 = 0xEC, + UFS_TEST_BUS_CTRL_1 = 0xF0, + UFS_TEST_BUS_CTRL_2 = 0xF4, + UFS_UNIPRO_CFG = 0xF8, + + /* + * QCOM UFS host controller vendor specific registers + * added in HW Version 3.0.0 + */ + UFS_AH8_CFG = 0xFC, +}; + +/* QCOM UFS host controller vendor specific debug registers */ +enum { + UFS_DBG_RD_REG_UAWM = 0x100, + UFS_DBG_RD_REG_UARM = 0x200, + UFS_DBG_RD_REG_TXUC = 0x300, + UFS_DBG_RD_REG_RXUC = 0x400, + UFS_DBG_RD_REG_DFC = 0x500, + UFS_DBG_RD_REG_TRLUT = 0x600, + UFS_DBG_RD_REG_TMRLUT = 0x700, + UFS_UFS_DBG_RD_REG_OCSC = 0x800, + + UFS_UFS_DBG_RD_DESC_RAM = 0x1500, + UFS_UFS_DBG_RD_PRDT_RAM = 0x1700, + UFS_UFS_DBG_RD_RESP_RAM = 0x1800, + UFS_UFS_DBG_RD_EDTL_RAM = 0x1900, +}; + +#define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x) +#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x) + +/* bit definitions for REG_UFS_CFG1 register */ +#define QUNIPRO_SEL 0x1 +#define UTP_DBG_RAMS_EN 0x20000 +#define TEST_BUS_EN BIT(18) +#define TEST_BUS_SEL GENMASK(22, 19) +#define UFS_REG_TEST_BUS_EN BIT(30) + +/* bit definitions for REG_UFS_CFG2 register */ +#define UAWM_HW_CGC_EN (1 << 0) +#define UARM_HW_CGC_EN (1 << 1) +#define TXUC_HW_CGC_EN (1 << 2) +#define RXUC_HW_CGC_EN (1 << 3) +#define DFC_HW_CGC_EN (1 << 4) +#define TRLUT_HW_CGC_EN (1 << 5) +#define TMRLUT_HW_CGC_EN (1 << 6) +#define OCSC_HW_CGC_EN (1 << 7) + +/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */ +#define TEST_BUS_SUB_SEL_MASK 0x1F /* All XXX_SEL fields are 5 bits wide */ + +#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\ + TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\ + DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\ + TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN) + +/* bit offset */ +enum { + OFFSET_UFS_PHY_SOFT_RESET = 1, + OFFSET_CLK_NS_REG = 10, +}; + +/* bit masks */ +enum { + MASK_UFS_PHY_SOFT_RESET = 0x2, + MASK_TX_SYMBOL_CLK_1US_REG = 0x3FF, + MASK_CLK_NS_REG = 0xFFFC00, +}; + +/* QCOM UFS debug print bit mask */ +#define UFS_QCOM_DBG_PRINT_REGS_EN BIT(0) +#define UFS_QCOM_DBG_PRINT_ICE_REGS_EN BIT(1) +#define UFS_QCOM_DBG_PRINT_TEST_BUS_EN BIT(2) + +#define UFS_QCOM_DBG_PRINT_ALL \ + (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_ICE_REGS_EN | \ + UFS_QCOM_DBG_PRINT_TEST_BUS_EN) + +/* QUniPro Vendor specific attributes */ +#define PA_VS_CONFIG_REG1 0x9000 +#define DME_VS_CORE_CLK_CTRL 0xD002 +/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */ +#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8) +#define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK 0xFF + +static inline void +ufs_qcom_get_controller_revision(struct ufs_hba *hba, + u8 *major, u16 *minor, u16 *step) +{ + u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION); + + *major = (ver & UFS_HW_VER_MAJOR_MASK) >> UFS_HW_VER_MAJOR_SHFT; + *minor = (ver & UFS_HW_VER_MINOR_MASK) >> UFS_HW_VER_MINOR_SHFT; + *step = (ver & UFS_HW_VER_STEP_MASK) >> UFS_HW_VER_STEP_SHFT; +}; + +static inline void ufs_qcom_assert_reset(struct ufs_hba *hba) +{ + ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET, + 1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1); + + /* + * Make sure assertion of ufs phy reset is written to + * register before returning + */ + mb(); +} + +static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba) +{ + ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET, + 0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1); + + /* + * Make sure de-assertion of ufs phy reset is written to + * register before returning + */ + mb(); +} + +/* Host controller hardware version: major.minor.step */ +struct ufs_hw_version { + u16 step; + u16 minor; + u8 major; +}; + +struct ufs_qcom_testbus { + u8 select_major; + u8 select_minor; +}; + +struct gpio_desc; + +struct ufs_qcom_host { + /* + * Set this capability if host controller supports the QUniPro mode + * and if driver wants the Host controller to operate in QUniPro mode. + * Note: By default this capability will be kept enabled if host + * controller supports the QUniPro mode. + */ + #define UFS_QCOM_CAP_QUNIPRO 0x1 + + /* + * Set this capability if host controller can retain the secure + * configuration even after UFS controller core power collapse. + */ + #define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE 0x2 + u32 caps; + + struct phy *generic_phy; + struct ufs_hba *hba; + struct ufs_pa_layer_attr dev_req_params; + struct clk *rx_l0_sync_clk; + struct clk *tx_l0_sync_clk; + struct clk *rx_l1_sync_clk; + struct clk *tx_l1_sync_clk; + bool is_lane_clks_enabled; + + void __iomem *dev_ref_clk_ctrl_mmio; + bool is_dev_ref_clk_enabled; + struct ufs_hw_version hw_ver; +#ifdef CONFIG_SCSI_UFS_CRYPTO + void __iomem *ice_mmio; +#endif + + u32 dev_ref_clk_en_mask; + + /* Bitmask for enabling debug prints */ + u32 dbg_print_en; + struct ufs_qcom_testbus testbus; + + /* Reset control of HCI */ + struct reset_control *core_reset; + struct reset_controller_dev rcdev; + + struct gpio_desc *device_reset; +}; + +static inline u32 +ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg) +{ + if (host->hw_ver.major <= 0x02) + return UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(reg); + + return UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(reg); +}; + +#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba) +#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba) +#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba) + +int ufs_qcom_testbus_config(struct ufs_qcom_host *host); + +static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host) +{ + return host->caps & UFS_QCOM_CAP_QUNIPRO; +} + +/* ufs-qcom-ice.c */ + +#ifdef CONFIG_SCSI_UFS_CRYPTO +int ufs_qcom_ice_init(struct ufs_qcom_host *host); +int ufs_qcom_ice_enable(struct ufs_qcom_host *host); +int ufs_qcom_ice_resume(struct ufs_qcom_host *host); +int ufs_qcom_ice_program_key(struct ufs_hba *hba, + const union ufs_crypto_cfg_entry *cfg, int slot); +#else +static inline int ufs_qcom_ice_init(struct ufs_qcom_host *host) +{ + return 0; +} +static inline int ufs_qcom_ice_enable(struct ufs_qcom_host *host) +{ + return 0; +} +static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host) +{ + return 0; +} +#define ufs_qcom_ice_program_key NULL +#endif /* !CONFIG_SCSI_UFS_CRYPTO */ + +#endif /* UFS_QCOM_H_ */ diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c new file mode 100644 index 0000000000..f8a5e79ed3 --- /dev/null +++ b/drivers/ufs/host/ufs-renesas.c @@ -0,0 +1,412 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Renesas UFS host controller driver + * + * Copyright (C) 2022 Renesas Electronics Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ufshcd-pltfrm.h" + +struct ufs_renesas_priv { + bool initialized; /* The hardware needs initialization once */ +}; + +enum { + SET_PHY_INDEX_LO = 0, + SET_PHY_INDEX_HI, + TIMER_INDEX, + MAX_INDEX +}; + +enum ufs_renesas_init_param_mode { + MODE_RESTORE, + MODE_SET, + MODE_SAVE, + MODE_POLL, + MODE_WAIT, + MODE_WRITE, +}; + +#define PARAM_RESTORE(_reg, _index) \ + { .mode = MODE_RESTORE, .reg = _reg, .index = _index } +#define PARAM_SET(_index, _set) \ + { .mode = MODE_SET, .index = _index, .u.set = _set } +#define PARAM_SAVE(_reg, _mask, _index) \ + { .mode = MODE_SAVE, .reg = _reg, .mask = (u32)(_mask), \ + .index = _index } +#define PARAM_POLL(_reg, _expected, _mask) \ + { .mode = MODE_POLL, .reg = _reg, .u.expected = _expected, \ + .mask = (u32)(_mask) } +#define PARAM_WAIT(_delay_us) \ + { .mode = MODE_WAIT, .u.delay_us = _delay_us } + +#define PARAM_WRITE(_reg, _val) \ + { .mode = MODE_WRITE, .reg = _reg, .u.val = _val } + +#define PARAM_WRITE_D0_D4(_d0, _d4) \ + PARAM_WRITE(0xd0, _d0), PARAM_WRITE(0xd4, _d4) + +#define PARAM_WRITE_800_80C_POLL(_addr, _data_800) \ + PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \ + PARAM_WRITE_D0_D4(0x00000800, ((_data_800) << 16) | BIT(8) | (_addr)), \ + PARAM_WRITE(0xd0, 0x0000080c), \ + PARAM_POLL(0xd4, BIT(8), BIT(8)) + +#define PARAM_RESTORE_800_80C_POLL(_index) \ + PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \ + PARAM_WRITE(0xd0, 0x00000800), \ + PARAM_RESTORE(0xd4, _index), \ + PARAM_WRITE(0xd0, 0x0000080c), \ + PARAM_POLL(0xd4, BIT(8), BIT(8)) + +#define PARAM_WRITE_804_80C_POLL(_addr, _data_804) \ + PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \ + PARAM_WRITE_D0_D4(0x00000804, ((_data_804) << 16) | BIT(8) | (_addr)), \ + PARAM_WRITE(0xd0, 0x0000080c), \ + PARAM_POLL(0xd4, BIT(8), BIT(8)) + +#define PARAM_WRITE_828_82C_POLL(_data_828) \ + PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), \ + PARAM_WRITE_D0_D4(0x00000828, _data_828), \ + PARAM_WRITE(0xd0, 0x0000082c), \ + PARAM_POLL(0xd4, _data_828, _data_828) + +#define PARAM_WRITE_PHY(_addr16, _data16) \ + PARAM_WRITE(0xf0, 1), \ + PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x18, (_data16) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x19, ((_data16) >> 8) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \ + PARAM_WRITE_828_82C_POLL(0x0f000000), \ + PARAM_WRITE(0xf0, 0) + +#define PARAM_SET_PHY(_addr16, _data16) \ + PARAM_WRITE(0xf0, 1), \ + PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \ + PARAM_WRITE_828_82C_POLL(0x0f000000), \ + PARAM_WRITE_804_80C_POLL(0x1a, 0), \ + PARAM_WRITE(0xd0, 0x00000808), \ + PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_LO), \ + PARAM_WRITE_804_80C_POLL(0x1b, 0), \ + PARAM_WRITE(0xd0, 0x00000808), \ + PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_HI), \ + PARAM_WRITE_828_82C_POLL(0x0f000000), \ + PARAM_WRITE(0xf0, 0), \ + PARAM_WRITE(0xf0, 1), \ + PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \ + PARAM_SET(SET_PHY_INDEX_LO, ((_data16 & 0xff) << 16) | BIT(8) | 0x18), \ + PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_LO), \ + PARAM_SET(SET_PHY_INDEX_HI, (((_data16 >> 8) & 0xff) << 16) | BIT(8) | 0x19), \ + PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_HI), \ + PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \ + PARAM_WRITE_828_82C_POLL(0x0f000000), \ + PARAM_WRITE(0xf0, 0) + +#define PARAM_INDIRECT_WRITE(_gpio, _addr, _data_800) \ + PARAM_WRITE(0xf0, _gpio), \ + PARAM_WRITE_800_80C_POLL(_addr, _data_800), \ + PARAM_WRITE_828_82C_POLL(0x0f000000), \ + PARAM_WRITE(0xf0, 0) + +#define PARAM_INDIRECT_POLL(_gpio, _addr, _expected, _mask) \ + PARAM_WRITE(0xf0, _gpio), \ + PARAM_WRITE_800_80C_POLL(_addr, 0), \ + PARAM_WRITE(0xd0, 0x00000808), \ + PARAM_POLL(0xd4, _expected, _mask), \ + PARAM_WRITE(0xf0, 0) + +struct ufs_renesas_init_param { + enum ufs_renesas_init_param_mode mode; + u32 reg; + union { + u32 expected; + u32 delay_us; + u32 set; + u32 val; + } u; + u32 mask; + u32 index; +}; + +/* This setting is for SERIES B */ +static const struct ufs_renesas_init_param ufs_param[] = { + PARAM_WRITE(0xc0, 0x49425308), + PARAM_WRITE_D0_D4(0x00000104, 0x00000002), + PARAM_WAIT(1), + PARAM_WRITE_D0_D4(0x00000828, 0x00000200), + PARAM_WAIT(1), + PARAM_WRITE_D0_D4(0x00000828, 0x00000000), + PARAM_WRITE_D0_D4(0x00000104, 0x00000001), + PARAM_WRITE_D0_D4(0x00000940, 0x00000001), + PARAM_WAIT(1), + PARAM_WRITE_D0_D4(0x00000940, 0x00000000), + + PARAM_WRITE(0xc0, 0x49425308), + PARAM_WRITE(0xc0, 0x41584901), + + PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), + PARAM_WRITE_D0_D4(0x00000804, 0x00000000), + PARAM_WRITE(0xd0, 0x0000080c), + PARAM_POLL(0xd4, BIT(8), BIT(8)), + + PARAM_WRITE(REG_CONTROLLER_ENABLE, 0x00000001), + + PARAM_WRITE(0xd0, 0x00000804), + PARAM_POLL(0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0)), + + PARAM_WRITE(0xd0, 0x00000d00), + PARAM_SAVE(0xd4, 0x0000ffff, TIMER_INDEX), + PARAM_WRITE(0xd4, 0x00000000), + PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), + PARAM_WRITE_D0_D4(0x00000828, 0x08000000), + PARAM_WRITE(0xd0, 0x0000082c), + PARAM_POLL(0xd4, BIT(27), BIT(27)), + PARAM_WRITE(0xd0, 0x00000d2c), + PARAM_POLL(0xd4, BIT(0), BIT(0)), + + /* phy setup */ + PARAM_INDIRECT_WRITE(1, 0x01, 0x001f), + PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014), + PARAM_INDIRECT_WRITE(7, 0x5e, 0x0014), + PARAM_INDIRECT_WRITE(7, 0x0d, 0x0003), + PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007), + PARAM_INDIRECT_WRITE(7, 0x5f, 0x0003), + PARAM_INDIRECT_WRITE(7, 0x60, 0x0003), + PARAM_INDIRECT_WRITE(7, 0x5b, 0x00a6), + PARAM_INDIRECT_WRITE(7, 0x5c, 0x0003), + + PARAM_INDIRECT_POLL(7, 0x3c, 0, BIT(7)), + PARAM_INDIRECT_POLL(7, 0x4c, 0, BIT(4)), + + PARAM_INDIRECT_WRITE(1, 0x32, 0x0080), + PARAM_INDIRECT_WRITE(1, 0x1f, 0x0001), + PARAM_INDIRECT_WRITE(0, 0x2c, 0x0001), + PARAM_INDIRECT_WRITE(0, 0x32, 0x0087), + + PARAM_INDIRECT_WRITE(1, 0x4d, 0x0061), + PARAM_INDIRECT_WRITE(4, 0x9b, 0x0009), + PARAM_INDIRECT_WRITE(4, 0xa6, 0x0005), + PARAM_INDIRECT_WRITE(4, 0xa5, 0x0058), + PARAM_INDIRECT_WRITE(1, 0x39, 0x0027), + PARAM_INDIRECT_WRITE(1, 0x47, 0x004c), + + PARAM_INDIRECT_WRITE(7, 0x0d, 0x0002), + PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007), + + PARAM_WRITE_PHY(0x0028, 0x0061), + PARAM_WRITE_PHY(0x4014, 0x0061), + PARAM_SET_PHY(0x401c, BIT(2)), + PARAM_WRITE_PHY(0x4000, 0x0000), + PARAM_WRITE_PHY(0x4001, 0x0000), + + PARAM_WRITE_PHY(0x10ae, 0x0001), + PARAM_WRITE_PHY(0x10ad, 0x0000), + PARAM_WRITE_PHY(0x10af, 0x0001), + PARAM_WRITE_PHY(0x10b6, 0x0001), + PARAM_WRITE_PHY(0x10ae, 0x0000), + + PARAM_WRITE_PHY(0x10ae, 0x0001), + PARAM_WRITE_PHY(0x10ad, 0x0000), + PARAM_WRITE_PHY(0x10af, 0x0002), + PARAM_WRITE_PHY(0x10b6, 0x0001), + PARAM_WRITE_PHY(0x10ae, 0x0000), + + PARAM_WRITE_PHY(0x10ae, 0x0001), + PARAM_WRITE_PHY(0x10ad, 0x0080), + PARAM_WRITE_PHY(0x10af, 0x0000), + PARAM_WRITE_PHY(0x10b6, 0x0001), + PARAM_WRITE_PHY(0x10ae, 0x0000), + + PARAM_WRITE_PHY(0x10ae, 0x0001), + PARAM_WRITE_PHY(0x10ad, 0x0080), + PARAM_WRITE_PHY(0x10af, 0x001a), + PARAM_WRITE_PHY(0x10b6, 0x0001), + PARAM_WRITE_PHY(0x10ae, 0x0000), + + PARAM_INDIRECT_WRITE(7, 0x70, 0x0016), + PARAM_INDIRECT_WRITE(7, 0x71, 0x0016), + PARAM_INDIRECT_WRITE(7, 0x72, 0x0014), + PARAM_INDIRECT_WRITE(7, 0x73, 0x0014), + PARAM_INDIRECT_WRITE(7, 0x74, 0x0000), + PARAM_INDIRECT_WRITE(7, 0x75, 0x0000), + PARAM_INDIRECT_WRITE(7, 0x76, 0x0010), + PARAM_INDIRECT_WRITE(7, 0x77, 0x0010), + PARAM_INDIRECT_WRITE(7, 0x78, 0x00ff), + PARAM_INDIRECT_WRITE(7, 0x79, 0x0000), + + PARAM_INDIRECT_WRITE(7, 0x19, 0x0007), + + PARAM_INDIRECT_WRITE(7, 0x1a, 0x0007), + + PARAM_INDIRECT_WRITE(7, 0x24, 0x000c), + + PARAM_INDIRECT_WRITE(7, 0x25, 0x000c), + + PARAM_INDIRECT_WRITE(7, 0x62, 0x0000), + PARAM_INDIRECT_WRITE(7, 0x63, 0x0000), + PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014), + PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017), + PARAM_INDIRECT_WRITE(7, 0x5d, 0x0004), + PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017), + PARAM_INDIRECT_POLL(7, 0x55, 0, BIT(6)), + PARAM_INDIRECT_POLL(7, 0x41, 0, BIT(7)), + /* end of phy setup */ + + PARAM_WRITE(0xf0, 0), + PARAM_WRITE(0xd0, 0x00000d00), + PARAM_RESTORE(0xd4, TIMER_INDEX), +}; + +static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba) +{ + ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + "); +} + +static void ufs_renesas_reg_control(struct ufs_hba *hba, + const struct ufs_renesas_init_param *p) +{ + static u32 save[MAX_INDEX]; + int ret; + u32 val; + + WARN_ON(p->index >= MAX_INDEX); + + switch (p->mode) { + case MODE_RESTORE: + ufshcd_writel(hba, save[p->index], p->reg); + break; + case MODE_SET: + save[p->index] |= p->u.set; + break; + case MODE_SAVE: + save[p->index] = ufshcd_readl(hba, p->reg) & p->mask; + break; + case MODE_POLL: + ret = readl_poll_timeout_atomic(hba->mmio_base + p->reg, + val, + (val & p->mask) == p->u.expected, + 10, 1000); + if (ret) + dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n", + __func__, ret, val, p->mask, p->u.expected); + break; + case MODE_WAIT: + if (p->u.delay_us > 1000) + mdelay(DIV_ROUND_UP(p->u.delay_us, 1000)); + else + udelay(p->u.delay_us); + break; + case MODE_WRITE: + ufshcd_writel(hba, p->u.val, p->reg); + break; + default: + break; + } +} + +static void ufs_renesas_pre_init(struct ufs_hba *hba) +{ + const struct ufs_renesas_init_param *p = ufs_param; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ufs_param); i++) + ufs_renesas_reg_control(hba, &p[i]); +} + +static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct ufs_renesas_priv *priv = ufshcd_get_variant(hba); + + if (priv->initialized) + return 0; + + if (status == PRE_CHANGE) + ufs_renesas_pre_init(hba); + + priv->initialized = true; + + return 0; +} + +static int ufs_renesas_setup_clocks(struct ufs_hba *hba, bool on, + enum ufs_notify_change_status status) +{ + if (on && status == PRE_CHANGE) + pm_runtime_get_sync(hba->dev); + else if (!on && status == POST_CHANGE) + pm_runtime_put(hba->dev); + + return 0; +} + +static int ufs_renesas_init(struct ufs_hba *hba) +{ + struct ufs_renesas_priv *priv; + + priv = devm_kmalloc(hba->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + ufshcd_set_variant(hba, priv); + + hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO; + + return 0; +} + +static const struct ufs_hba_variant_ops ufs_renesas_vops = { + .name = "renesas", + .init = ufs_renesas_init, + .setup_clocks = ufs_renesas_setup_clocks, + .hce_enable_notify = ufs_renesas_hce_enable_notify, + .dbg_register_dump = ufs_renesas_dbg_register_dump, +}; + +static const struct of_device_id __maybe_unused ufs_renesas_of_match[] = { + { .compatible = "renesas,r8a779f0-ufs" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ufs_renesas_of_match); + +static int ufs_renesas_probe(struct platform_device *pdev) +{ + return ufshcd_pltfrm_init(pdev, &ufs_renesas_vops); +} + +static int ufs_renesas_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + ufshcd_remove(hba); + + return 0; +} + +static struct platform_driver ufs_renesas_platform = { + .probe = ufs_renesas_probe, + .remove = ufs_renesas_remove, + .driver = { + .name = "ufshcd-renesas", + .of_match_table = of_match_ptr(ufs_renesas_of_match), + }, +}; +module_platform_driver(ufs_renesas_platform); + +MODULE_AUTHOR("Yoshihiro Shimoda "); +MODULE_DESCRIPTION("Renesas UFS host controller driver"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/ufs/host/ufshcd-dwc.c b/drivers/ufs/host/ufshcd-dwc.c new file mode 100644 index 0000000000..e28a67e1e3 --- /dev/null +++ b/drivers/ufs/host/ufshcd-dwc.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * UFS Host driver for Synopsys Designware Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto + */ + +#include + +#include +#include + +#include "ufshcd-dwc.h" +#include "ufshci-dwc.h" + +int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba, + const struct ufshcd_dme_attr_val *v, int n) +{ + int ret = 0; + int attr_node = 0; + + for (attr_node = 0; attr_node < n; attr_node++) { + ret = ufshcd_dme_set_attr(hba, v[attr_node].attr_sel, + ATTR_SET_NOR, v[attr_node].mib_val, v[attr_node].peer); + + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL(ufshcd_dwc_dme_set_attrs); + +/** + * ufshcd_dwc_program_clk_div() + * This function programs the clk divider value. This value is needed to + * provide 1 microsecond tick to unipro layer. + * @hba: Private Structure pointer + * @divider_val: clock divider value to be programmed + * + */ +static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val) +{ + ufshcd_writel(hba, divider_val, DWC_UFS_REG_HCLKDIV); +} + +/** + * ufshcd_dwc_link_is_up() + * Check if link is up + * @hba: private structure pointer + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_dwc_link_is_up(struct ufs_hba *hba) +{ + int dme_result = 0; + + ufshcd_dme_get(hba, UIC_ARG_MIB(VS_POWERSTATE), &dme_result); + + if (dme_result == UFSHCD_LINK_IS_UP) { + ufshcd_set_link_active(hba); + return 0; + } + + return 1; +} + +/** + * ufshcd_dwc_connection_setup() + * This function configures both the local side (host) and the peer side + * (device) unipro attributes to establish the connection to application/ + * cport. + * This function is not required if the hardware is properly configured to + * have this connection setup on reset. But invoking this function does no + * harm and should be fine even working with any ufs device. + * + * @hba: pointer to drivers private data + * + * Returns 0 on success non-zero value on failure + */ +static int ufshcd_dwc_connection_setup(struct ufs_hba *hba) +{ + static const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL }, + { UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL }, + { UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL }, + { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_LOCAL }, + { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_LOCAL }, + { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_LOCAL }, + { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_LOCAL }, + { UIC_ARG_MIB(T_CPORTMODE), 1, DME_LOCAL }, + { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_LOCAL }, + { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_PEER }, + { UIC_ARG_MIB(N_DEVICEID), 1, DME_PEER }, + { UIC_ARG_MIB(N_DEVICEID_VALID), 1, DME_PEER }, + { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_PEER }, + { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_PEER }, + { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_PEER }, + { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_PEER }, + { UIC_ARG_MIB(T_CPORTMODE), 1, DME_PEER }, + { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_PEER } + }; + + return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, ARRAY_SIZE(setup_attrs)); +} + +/** + * ufshcd_dwc_link_startup_notify() + * UFS Host DWC specific link startup sequence + * @hba: private structure pointer + * @status: Callback notify status + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int err = 0; + + if (status == PRE_CHANGE) { + ufshcd_dwc_program_clk_div(hba, DWC_UFS_REG_HCLKDIV_DIV_125); + + err = ufshcd_vops_phy_initialization(hba); + if (err) { + dev_err(hba->dev, "Phy setup failed (%d)\n", err); + goto out; + } + } else { /* POST_CHANGE */ + err = ufshcd_dwc_link_is_up(hba); + if (err) { + dev_err(hba->dev, "Link is not up\n"); + goto out; + } + + err = ufshcd_dwc_connection_setup(hba); + if (err) + dev_err(hba->dev, "Connection setup failed (%d)\n", + err); + } + +out: + return err; +} +EXPORT_SYMBOL(ufshcd_dwc_link_startup_notify); + +MODULE_AUTHOR("Joao Pinto "); +MODULE_DESCRIPTION("UFS Host driver for Synopsys Designware Core"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/ufs/host/ufshcd-dwc.h b/drivers/ufs/host/ufshcd-dwc.h new file mode 100644 index 0000000000..ad91ea5666 --- /dev/null +++ b/drivers/ufs/host/ufshcd-dwc.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * UFS Host driver for Synopsys Designware Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto + */ + +#ifndef _UFSHCD_DWC_H +#define _UFSHCD_DWC_H + +#include + +struct ufshcd_dme_attr_val { + u32 attr_sel; + u32 mib_val; + u8 peer; +}; + +int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status); +int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba, + const struct ufshcd_dme_attr_val *v, int n); +#endif /* End of Header */ diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c new file mode 100644 index 0000000000..1c91f43e15 --- /dev/null +++ b/drivers/ufs/host/ufshcd-pci.c @@ -0,0 +1,631 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Universal Flash Storage Host controller PCI glue driver + * + * Copyright (C) 2011-2013 Samsung India Software Operations + * + * Authors: + * Santosh Yaraganavi + * Vinayak Holikatti + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct ufs_host { + void (*late_init)(struct ufs_hba *hba); +}; + +enum intel_ufs_dsm_func_id { + INTEL_DSM_FNS = 0, + INTEL_DSM_RESET = 1, +}; + +struct intel_host { + struct ufs_host ufs_host; + u32 dsm_fns; + u32 active_ltr; + u32 idle_ltr; + struct dentry *debugfs_root; + struct gpio_desc *reset_gpio; +}; + +static const guid_t intel_dsm_guid = + GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA, + 0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50); + +static bool __intel_dsm_supported(struct intel_host *host, + enum intel_ufs_dsm_func_id fn) +{ + return fn < 32 && fn >= 0 && (host->dsm_fns & (1u << fn)); +} + +#define INTEL_DSM_SUPPORTED(host, name) \ + __intel_dsm_supported(host, INTEL_DSM_##name) + +static int __intel_dsm(struct intel_host *intel_host, struct device *dev, + unsigned int fn, u32 *result) +{ + union acpi_object *obj; + int err = 0; + size_t len; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL); + if (!obj) + return -EOPNOTSUPP; + + if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) { + err = -EINVAL; + goto out; + } + + len = min_t(size_t, obj->buffer.length, 4); + + *result = 0; + memcpy(result, obj->buffer.pointer, len); +out: + ACPI_FREE(obj); + + return err; +} + +static int intel_dsm(struct intel_host *intel_host, struct device *dev, + unsigned int fn, u32 *result) +{ + if (!__intel_dsm_supported(intel_host, fn)) + return -EOPNOTSUPP; + + return __intel_dsm(intel_host, dev, fn, result); +} + +static void intel_dsm_init(struct intel_host *intel_host, struct device *dev) +{ + int err; + + err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); + dev_dbg(dev, "DSM fns %#x, error %d\n", intel_host->dsm_fns, err); +} + +static int ufs_intel_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + /* Cannot enable ICE until after HC enable */ + if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) { + u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE); + + hce |= CRYPTO_GENERAL_ENABLE; + ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE); + } + + return 0; +} + +static int ufs_intel_disable_lcc(struct ufs_hba *hba) +{ + u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE); + u32 lcc_enable = 0; + + ufshcd_dme_get(hba, attr, &lcc_enable); + if (lcc_enable) + ufshcd_disable_host_tx_lcc(hba); + + return 0; +} + +static int ufs_intel_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int err = 0; + + switch (status) { + case PRE_CHANGE: + err = ufs_intel_disable_lcc(hba); + break; + case POST_CHANGE: + break; + default: + break; + } + + return err; +} + +static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes) +{ + struct ufs_pa_layer_attr pwr_info = hba->pwr_info; + int ret; + + pwr_info.lane_rx = lanes; + pwr_info.lane_tx = lanes; + ret = ufshcd_config_pwr_mode(hba, &pwr_info); + if (ret) + dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n", + __func__, lanes, ret); + return ret; +} + +static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + int err = 0; + + switch (status) { + case PRE_CHANGE: + if (ufshcd_is_hs_mode(dev_max_params) && + (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2)) + ufs_intel_set_lanes(hba, 2); + memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params)); + break; + case POST_CHANGE: + if (ufshcd_is_hs_mode(dev_req_params)) { + u32 peer_granularity; + + usleep_range(1000, 1250); + err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), + &peer_granularity); + } + break; + default: + break; + } + + return err; +} + +static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba) +{ + u32 granularity, peer_granularity; + u32 pa_tactivate, peer_pa_tactivate; + int ret; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity); + if (ret) + goto out; + + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity); + if (ret) + goto out; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); + if (ret) + goto out; + + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate); + if (ret) + goto out; + + if (granularity == peer_granularity) { + u32 new_peer_pa_tactivate = pa_tactivate + 2; + + ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate); + } +out: + return ret; +} + +#define INTEL_ACTIVELTR 0x804 +#define INTEL_IDLELTR 0x808 + +#define INTEL_LTR_REQ BIT(15) +#define INTEL_LTR_SCALE_MASK GENMASK(11, 10) +#define INTEL_LTR_SCALE_1US (2 << 10) +#define INTEL_LTR_SCALE_32US (3 << 10) +#define INTEL_LTR_VALUE_MASK GENMASK(9, 0) + +static void intel_cache_ltr(struct ufs_hba *hba) +{ + struct intel_host *host = ufshcd_get_variant(hba); + + host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR); + host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR); +} + +static void intel_ltr_set(struct device *dev, s32 val) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct intel_host *host = ufshcd_get_variant(hba); + u32 ltr; + + pm_runtime_get_sync(dev); + + /* + * Program latency tolerance (LTR) accordingly what has been asked + * by the PM QoS layer or disable it in case we were passed + * negative value or PM_QOS_LATENCY_ANY. + */ + ltr = readl(hba->mmio_base + INTEL_ACTIVELTR); + + if (val == PM_QOS_LATENCY_ANY || val < 0) { + ltr &= ~INTEL_LTR_REQ; + } else { + ltr |= INTEL_LTR_REQ; + ltr &= ~INTEL_LTR_SCALE_MASK; + ltr &= ~INTEL_LTR_VALUE_MASK; + + if (val > INTEL_LTR_VALUE_MASK) { + val >>= 5; + if (val > INTEL_LTR_VALUE_MASK) + val = INTEL_LTR_VALUE_MASK; + ltr |= INTEL_LTR_SCALE_32US | val; + } else { + ltr |= INTEL_LTR_SCALE_1US | val; + } + } + + if (ltr == host->active_ltr) + goto out; + + writel(ltr, hba->mmio_base + INTEL_ACTIVELTR); + writel(ltr, hba->mmio_base + INTEL_IDLELTR); + + /* Cache the values into intel_host structure */ + intel_cache_ltr(hba); +out: + pm_runtime_put(dev); +} + +static void intel_ltr_expose(struct device *dev) +{ + dev->power.set_latency_tolerance = intel_ltr_set; + dev_pm_qos_expose_latency_tolerance(dev); +} + +static void intel_ltr_hide(struct device *dev) +{ + dev_pm_qos_hide_latency_tolerance(dev); + dev->power.set_latency_tolerance = NULL; +} + +static void intel_add_debugfs(struct ufs_hba *hba) +{ + struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL); + struct intel_host *host = ufshcd_get_variant(hba); + + intel_cache_ltr(hba); + + host->debugfs_root = dir; + debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr); + debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr); +} + +static void intel_remove_debugfs(struct ufs_hba *hba) +{ + struct intel_host *host = ufshcd_get_variant(hba); + + debugfs_remove_recursive(host->debugfs_root); +} + +static int ufs_intel_device_reset(struct ufs_hba *hba) +{ + struct intel_host *host = ufshcd_get_variant(hba); + + if (INTEL_DSM_SUPPORTED(host, RESET)) { + u32 result = 0; + int err; + + err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result); + if (!err && !result) + err = -EIO; + if (err) + dev_err(hba->dev, "%s: DSM error %d result %u\n", + __func__, err, result); + return err; + } + + if (!host->reset_gpio) + return -EOPNOTSUPP; + + gpiod_set_value_cansleep(host->reset_gpio, 1); + usleep_range(10, 15); + + gpiod_set_value_cansleep(host->reset_gpio, 0); + usleep_range(10, 15); + + return 0; +} + +static struct gpio_desc *ufs_intel_get_reset_gpio(struct device *dev) +{ + /* GPIO in _DSD has active low setting */ + return devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); +} + +static int ufs_intel_common_init(struct ufs_hba *hba) +{ + struct intel_host *host; + + hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; + + host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + ufshcd_set_variant(hba, host); + intel_dsm_init(host, hba->dev); + if (INTEL_DSM_SUPPORTED(host, RESET)) { + if (hba->vops->device_reset) + hba->caps |= UFSHCD_CAP_DEEPSLEEP; + } else { + if (hba->vops->device_reset) + host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev); + if (IS_ERR(host->reset_gpio)) { + dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n", + __func__, PTR_ERR(host->reset_gpio)); + host->reset_gpio = NULL; + } + if (host->reset_gpio) { + gpiod_set_value_cansleep(host->reset_gpio, 0); + hba->caps |= UFSHCD_CAP_DEEPSLEEP; + } + } + intel_ltr_expose(hba->dev); + intel_add_debugfs(hba); + return 0; +} + +static void ufs_intel_common_exit(struct ufs_hba *hba) +{ + intel_remove_debugfs(hba); + intel_ltr_hide(hba->dev); +} + +static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op) +{ + if (ufshcd_is_link_hibern8(hba)) { + int ret = ufshcd_uic_hibern8_exit(hba); + + if (!ret) { + ufshcd_set_link_active(hba); + } else { + dev_err(hba->dev, "%s: hibern8 exit failed %d\n", + __func__, ret); + /* + * Force reset and restore. Any other actions can lead + * to an unrecoverable state. + */ + ufshcd_set_link_off(hba); + } + } + + return 0; +} + +static int ufs_intel_ehl_init(struct ufs_hba *hba) +{ + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; + return ufs_intel_common_init(hba); +} + +static void ufs_intel_lkf_late_init(struct ufs_hba *hba) +{ + /* LKF always needs a full reset, so set PM accordingly */ + if (hba->caps & UFSHCD_CAP_DEEPSLEEP) { + hba->spm_lvl = UFS_PM_LVL_6; + hba->rpm_lvl = UFS_PM_LVL_6; + } else { + hba->spm_lvl = UFS_PM_LVL_5; + hba->rpm_lvl = UFS_PM_LVL_5; + } +} + +static int ufs_intel_lkf_init(struct ufs_hba *hba) +{ + struct ufs_host *ufs_host; + int err; + + hba->nop_out_timeout = 200; + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; + hba->caps |= UFSHCD_CAP_CRYPTO; + err = ufs_intel_common_init(hba); + ufs_host = ufshcd_get_variant(hba); + ufs_host->late_init = ufs_intel_lkf_late_init; + return err; +} + +static int ufs_intel_adl_init(struct ufs_hba *hba) +{ + hba->nop_out_timeout = 200; + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; + hba->caps |= UFSHCD_CAP_WB_EN; + return ufs_intel_common_init(hba); +} + +static int ufs_intel_mtl_init(struct ufs_hba *hba) +{ + hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN; + return ufs_intel_common_init(hba); +} + +static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = { + .name = "intel-pci", + .init = ufs_intel_common_init, + .exit = ufs_intel_common_exit, + .link_startup_notify = ufs_intel_link_startup_notify, + .resume = ufs_intel_resume, +}; + +static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = { + .name = "intel-pci", + .init = ufs_intel_ehl_init, + .exit = ufs_intel_common_exit, + .link_startup_notify = ufs_intel_link_startup_notify, + .resume = ufs_intel_resume, +}; + +static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = { + .name = "intel-pci", + .init = ufs_intel_lkf_init, + .exit = ufs_intel_common_exit, + .hce_enable_notify = ufs_intel_hce_enable_notify, + .link_startup_notify = ufs_intel_link_startup_notify, + .pwr_change_notify = ufs_intel_lkf_pwr_change_notify, + .apply_dev_quirks = ufs_intel_lkf_apply_dev_quirks, + .resume = ufs_intel_resume, + .device_reset = ufs_intel_device_reset, +}; + +static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = { + .name = "intel-pci", + .init = ufs_intel_adl_init, + .exit = ufs_intel_common_exit, + .link_startup_notify = ufs_intel_link_startup_notify, + .resume = ufs_intel_resume, + .device_reset = ufs_intel_device_reset, +}; + +static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = { + .name = "intel-pci", + .init = ufs_intel_mtl_init, + .exit = ufs_intel_common_exit, + .hce_enable_notify = ufs_intel_hce_enable_notify, + .link_startup_notify = ufs_intel_link_startup_notify, + .resume = ufs_intel_resume, + .device_reset = ufs_intel_device_reset, +}; + +#ifdef CONFIG_PM_SLEEP +static int ufshcd_pci_restore(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + /* Force a full reset and restore */ + ufshcd_set_link_off(hba); + + return ufshcd_system_resume(dev); +} +#endif + +/** + * ufshcd_pci_shutdown - main function to put the controller in reset state + * @pdev: pointer to PCI device handle + */ +static void ufshcd_pci_shutdown(struct pci_dev *pdev) +{ + ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev)); +} + +/** + * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space + * data structure memory + * @pdev: pointer to PCI handle + */ +static void ufshcd_pci_remove(struct pci_dev *pdev) +{ + struct ufs_hba *hba = pci_get_drvdata(pdev); + + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + ufshcd_remove(hba); + ufshcd_dealloc_host(hba); +} + +/** + * ufshcd_pci_probe - probe routine of the driver + * @pdev: pointer to PCI device handle + * @id: PCI device id + * + * Returns 0 on success, non-zero value on failure + */ +static int +ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ufs_host *ufs_host; + struct ufs_hba *hba; + void __iomem *mmio_base; + int err; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + return err; + } + + pci_set_master(pdev); + + err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD); + if (err < 0) { + dev_err(&pdev->dev, "request and iomap failed\n"); + return err; + } + + mmio_base = pcim_iomap_table(pdev)[0]; + + err = ufshcd_alloc_host(&pdev->dev, &hba); + if (err) { + dev_err(&pdev->dev, "Allocation failed\n"); + return err; + } + + hba->vops = (struct ufs_hba_variant_ops *)id->driver_data; + + err = ufshcd_init(hba, mmio_base, pdev->irq); + if (err) { + dev_err(&pdev->dev, "Initialization failed\n"); + ufshcd_dealloc_host(hba); + return err; + } + + ufs_host = ufshcd_get_variant(hba); + if (ufs_host && ufs_host->late_init) + ufs_host->late_init(hba); + + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_allow(&pdev->dev); + + return 0; +} + +static const struct dev_pm_ops ufshcd_pci_pm_ops = { + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) +#ifdef CONFIG_PM_SLEEP + .suspend = ufshcd_system_suspend, + .resume = ufshcd_system_resume, + .freeze = ufshcd_system_suspend, + .thaw = ufshcd_system_resume, + .poweroff = ufshcd_system_suspend, + .restore = ufshcd_pci_restore, + .prepare = ufshcd_suspend_prepare, + .complete = ufshcd_resume_complete, +#endif +}; + +static const struct pci_device_id ufshcd_pci_tbl[] = { + { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops }, + { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl); + +static struct pci_driver ufshcd_pci_driver = { + .name = UFSHCD, + .id_table = ufshcd_pci_tbl, + .probe = ufshcd_pci_probe, + .remove = ufshcd_pci_remove, + .shutdown = ufshcd_pci_shutdown, + .driver = { + .pm = &ufshcd_pci_pm_ops + }, +}; + +module_pci_driver(ufshcd_pci_driver); + +MODULE_AUTHOR("Santosh Yaragnavi "); +MODULE_AUTHOR("Vinayak Holikatti "); +MODULE_DESCRIPTION("UFS host controller PCI glue driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c new file mode 100644 index 0000000000..5739ff0078 --- /dev/null +++ b/drivers/ufs/host/ufshcd-pltfrm.c @@ -0,0 +1,401 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Universal Flash Storage Host controller Platform bus based glue driver + * Copyright (C) 2011-2013 Samsung India Software Operations + * + * Authors: + * Santosh Yaraganavi + * Vinayak Holikatti + */ + +#include +#include +#include +#include + +#include +#include "ufshcd-pltfrm.h" +#include + +#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2 + +static int ufshcd_parse_clock_info(struct ufs_hba *hba) +{ + int ret = 0; + int cnt; + int i; + struct device *dev = hba->dev; + struct device_node *np = dev->of_node; + const char *name; + u32 *clkfreq = NULL; + struct ufs_clk_info *clki; + int len = 0; + size_t sz = 0; + + if (!np) + goto out; + + cnt = of_property_count_strings(np, "clock-names"); + if (!cnt || (cnt == -EINVAL)) { + dev_info(dev, "%s: Unable to find clocks, assuming enabled\n", + __func__); + } else if (cnt < 0) { + dev_err(dev, "%s: count clock strings failed, err %d\n", + __func__, cnt); + ret = cnt; + } + + if (cnt <= 0) + goto out; + + if (!of_get_property(np, "freq-table-hz", &len)) { + dev_info(dev, "freq-table-hz property not specified\n"); + goto out; + } + + if (len <= 0) + goto out; + + sz = len / sizeof(*clkfreq); + if (sz != 2 * cnt) { + dev_err(dev, "%s len mismatch\n", "freq-table-hz"); + ret = -EINVAL; + goto out; + } + + clkfreq = devm_kcalloc(dev, sz, sizeof(*clkfreq), + GFP_KERNEL); + if (!clkfreq) { + ret = -ENOMEM; + goto out; + } + + ret = of_property_read_u32_array(np, "freq-table-hz", + clkfreq, sz); + if (ret && (ret != -EINVAL)) { + dev_err(dev, "%s: error reading array %d\n", + "freq-table-hz", ret); + return ret; + } + + for (i = 0; i < sz; i += 2) { + ret = of_property_read_string_index(np, "clock-names", i/2, + &name); + if (ret) + goto out; + + clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL); + if (!clki) { + ret = -ENOMEM; + goto out; + } + + clki->min_freq = clkfreq[i]; + clki->max_freq = clkfreq[i+1]; + clki->name = devm_kstrdup(dev, name, GFP_KERNEL); + if (!clki->name) { + ret = -ENOMEM; + goto out; + } + + if (!strcmp(name, "ref_clk")) + clki->keep_link_active = true; + dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz", + clki->min_freq, clki->max_freq, clki->name); + list_add_tail(&clki->list, &hba->clk_list_head); + } +out: + return ret; +} + +static bool phandle_exists(const struct device_node *np, + const char *phandle_name, int index) +{ + struct device_node *parse_np = of_parse_phandle(np, phandle_name, index); + + if (parse_np) + of_node_put(parse_np); + + return parse_np != NULL; +} + +#define MAX_PROP_SIZE 32 +int ufshcd_populate_vreg(struct device *dev, const char *name, + struct ufs_vreg **out_vreg) +{ + char prop_name[MAX_PROP_SIZE]; + struct ufs_vreg *vreg = NULL; + struct device_node *np = dev->of_node; + + if (!np) { + dev_err(dev, "%s: non DT initialization\n", __func__); + goto out; + } + + snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name); + if (!phandle_exists(np, prop_name, 0)) { + dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n", + __func__, prop_name); + goto out; + } + + vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL); + if (!vreg) + return -ENOMEM; + + vreg->name = devm_kstrdup(dev, name, GFP_KERNEL); + if (!vreg->name) + return -ENOMEM; + + snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name); + if (of_property_read_u32(np, prop_name, &vreg->max_uA)) { + dev_info(dev, "%s: unable to find %s\n", __func__, prop_name); + vreg->max_uA = 0; + } +out: + *out_vreg = vreg; + return 0; +} +EXPORT_SYMBOL_GPL(ufshcd_populate_vreg); + +/** + * ufshcd_parse_regulator_info - get regulator info from device tree + * @hba: per adapter instance + * + * Get regulator info from device tree for vcc, vccq, vccq2 power supplies. + * If any of the supplies are not defined it is assumed that they are always-on + * and hence return zero. If the property is defined but parsing is failed + * then return corresponding error. + */ +static int ufshcd_parse_regulator_info(struct ufs_hba *hba) +{ + int err; + struct device *dev = hba->dev; + struct ufs_vreg_info *info = &hba->vreg_info; + + err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba); + if (err) + goto out; + + err = ufshcd_populate_vreg(dev, "vcc", &info->vcc); + if (err) + goto out; + + err = ufshcd_populate_vreg(dev, "vccq", &info->vccq); + if (err) + goto out; + + err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2); +out: + return err; +} + +void ufshcd_pltfrm_shutdown(struct platform_device *pdev) +{ + ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev)); +} +EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown); + +static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba) +{ + struct device *dev = hba->dev; + int ret; + + ret = of_property_read_u32(dev->of_node, "lanes-per-direction", + &hba->lanes_per_direction); + if (ret) { + dev_dbg(hba->dev, + "%s: failed to read lanes-per-direction, ret=%d\n", + __func__, ret); + hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION; + } +} + +/** + * ufshcd_get_pwr_dev_param - get finally agreed attributes for + * power mode change + * @pltfrm_param: pointer to platform parameters + * @dev_max: pointer to device attributes + * @agreed_pwr: returned agreed attributes + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param, + const struct ufs_pa_layer_attr *dev_max, + struct ufs_pa_layer_attr *agreed_pwr) +{ + int min_pltfrm_gear; + int min_dev_gear; + bool is_dev_sup_hs = false; + bool is_pltfrm_max_hs = false; + + if (dev_max->pwr_rx == FAST_MODE) + is_dev_sup_hs = true; + + if (pltfrm_param->desired_working_mode == UFS_HS_MODE) { + is_pltfrm_max_hs = true; + min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear, + pltfrm_param->hs_tx_gear); + } else { + min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear, + pltfrm_param->pwm_tx_gear); + } + + /* + * device doesn't support HS but + * pltfrm_param->desired_working_mode is HS, + * thus device and pltfrm_param don't agree + */ + if (!is_dev_sup_hs && is_pltfrm_max_hs) { + pr_info("%s: device doesn't support HS\n", + __func__); + return -ENOTSUPP; + } else if (is_dev_sup_hs && is_pltfrm_max_hs) { + /* + * since device supports HS, it supports FAST_MODE. + * since pltfrm_param->desired_working_mode is also HS + * then final decision (FAST/FASTAUTO) is done according + * to pltfrm_params as it is the restricting factor + */ + agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs; + agreed_pwr->pwr_tx = agreed_pwr->pwr_rx; + } else { + /* + * here pltfrm_param->desired_working_mode is PWM. + * it doesn't matter whether device supports HS or PWM, + * in both cases pltfrm_param->desired_working_mode will + * determine the mode + */ + agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm; + agreed_pwr->pwr_tx = agreed_pwr->pwr_rx; + } + + /* + * we would like tx to work in the minimum number of lanes + * between device capability and vendor preferences. + * the same decision will be made for rx + */ + agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx, + pltfrm_param->tx_lanes); + agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx, + pltfrm_param->rx_lanes); + + /* device maximum gear is the minimum between device rx and tx gears */ + min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx); + + /* + * if both device capabilities and vendor pre-defined preferences are + * both HS or both PWM then set the minimum gear to be the chosen + * working gear. + * if one is PWM and one is HS then the one that is PWM get to decide + * what is the gear, as it is the one that also decided previously what + * pwr the device will be configured to. + */ + if ((is_dev_sup_hs && is_pltfrm_max_hs) || + (!is_dev_sup_hs && !is_pltfrm_max_hs)) { + agreed_pwr->gear_rx = + min_t(u32, min_dev_gear, min_pltfrm_gear); + } else if (!is_dev_sup_hs) { + agreed_pwr->gear_rx = min_dev_gear; + } else { + agreed_pwr->gear_rx = min_pltfrm_gear; + } + agreed_pwr->gear_tx = agreed_pwr->gear_rx; + + agreed_pwr->hs_rate = pltfrm_param->hs_rate; + + return 0; +} +EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param); + +void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param) +{ + *dev_param = (struct ufs_dev_params){ + .tx_lanes = 2, + .rx_lanes = 2, + .hs_rx_gear = UFS_HS_G3, + .hs_tx_gear = UFS_HS_G3, + .pwm_rx_gear = UFS_PWM_G4, + .pwm_tx_gear = UFS_PWM_G4, + .rx_pwr_pwm = SLOW_MODE, + .tx_pwr_pwm = SLOW_MODE, + .rx_pwr_hs = FAST_MODE, + .tx_pwr_hs = FAST_MODE, + .hs_rate = PA_HS_MODE_B, + .desired_working_mode = UFS_HS_MODE, + }; +} +EXPORT_SYMBOL_GPL(ufshcd_init_pwr_dev_param); + +/** + * ufshcd_pltfrm_init - probe routine of the driver + * @pdev: pointer to Platform device handle + * @vops: pointer to variant ops + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_pltfrm_init(struct platform_device *pdev, + const struct ufs_hba_variant_ops *vops) +{ + struct ufs_hba *hba; + void __iomem *mmio_base; + int irq, err; + struct device *dev = &pdev->dev; + + mmio_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mmio_base)) { + err = PTR_ERR(mmio_base); + goto out; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + err = irq; + goto out; + } + + err = ufshcd_alloc_host(dev, &hba); + if (err) { + dev_err(dev, "Allocation failed\n"); + goto out; + } + + hba->vops = vops; + + err = ufshcd_parse_clock_info(hba); + if (err) { + dev_err(dev, "%s: clock parse failed %d\n", + __func__, err); + goto dealloc_host; + } + err = ufshcd_parse_regulator_info(hba); + if (err) { + dev_err(dev, "%s: regulator init failed %d\n", + __func__, err); + goto dealloc_host; + } + + ufshcd_init_lanes_per_dir(hba); + + err = ufshcd_init(hba, mmio_base, irq); + if (err) { + dev_err(dev, "Initialization failed\n"); + goto dealloc_host; + } + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; + +dealloc_host: + ufshcd_dealloc_host(hba); +out: + return err; +} +EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init); + +MODULE_AUTHOR("Santosh Yaragnavi "); +MODULE_AUTHOR("Vinayak Holikatti "); +MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h new file mode 100644 index 0000000000..2e4ba2bfbc --- /dev/null +++ b/drivers/ufs/host/ufshcd-pltfrm.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#ifndef UFSHCD_PLTFRM_H_ +#define UFSHCD_PLTFRM_H_ + +#include + +#define UFS_PWM_MODE 1 +#define UFS_HS_MODE 2 + +struct ufs_dev_params { + u32 pwm_rx_gear; /* pwm rx gear to work in */ + u32 pwm_tx_gear; /* pwm tx gear to work in */ + u32 hs_rx_gear; /* hs rx gear to work in */ + u32 hs_tx_gear; /* hs tx gear to work in */ + u32 rx_lanes; /* number of rx lanes */ + u32 tx_lanes; /* number of tx lanes */ + u32 rx_pwr_pwm; /* rx pwm working pwr */ + u32 tx_pwr_pwm; /* tx pwm working pwr */ + u32 rx_pwr_hs; /* rx hs working pwr */ + u32 tx_pwr_hs; /* tx hs working pwr */ + u32 hs_rate; /* rate A/B to work in HS */ + u32 desired_working_mode; +}; + +int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *dev_param, + const struct ufs_pa_layer_attr *dev_max, + struct ufs_pa_layer_attr *agreed_pwr); +void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param); +int ufshcd_pltfrm_init(struct platform_device *pdev, + const struct ufs_hba_variant_ops *vops); +void ufshcd_pltfrm_shutdown(struct platform_device *pdev); +int ufshcd_populate_vreg(struct device *dev, const char *name, + struct ufs_vreg **out_vreg); + +#endif /* UFSHCD_PLTFRM_H_ */ diff --git a/drivers/ufs/host/ufshci-dwc.h b/drivers/ufs/host/ufshci-dwc.h new file mode 100644 index 0000000000..6c290e2721 --- /dev/null +++ b/drivers/ufs/host/ufshci-dwc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * UFS Host driver for Synopsys Designware Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto + */ + +#ifndef _UFSHCI_DWC_H +#define _UFSHCI_DWC_H + +/* DWC HC UFSHCI specific Registers */ +enum dwc_specific_registers { + DWC_UFS_REG_HCLKDIV = 0xFC, +}; + +/* Clock Divider Values: Hex equivalent of frequency in MHz */ +enum clk_div_values { + DWC_UFS_REG_HCLKDIV_DIV_62_5 = 0x3e, + DWC_UFS_REG_HCLKDIV_DIV_125 = 0x7d, + DWC_UFS_REG_HCLKDIV_DIV_200 = 0xc8, +}; + +/* Selector Index */ +enum selector_index { + SELIND_LN0_TX = 0x00, + SELIND_LN1_TX = 0x01, + SELIND_LN0_RX = 0x04, + SELIND_LN1_RX = 0x05, +}; + +#endif /* End of Header */ diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c new file mode 100644 index 0000000000..173cf3579c --- /dev/null +++ b/drivers/usb/dwc3/dwc3-am62.c @@ -0,0 +1,328 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * dwc3-am62.c - TI specific Glue layer for AM62 DWC3 USB Controller + * + * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* USB WRAPPER register offsets */ +#define USBSS_PID 0x0 +#define USBSS_OVERCURRENT_CTRL 0x4 +#define USBSS_PHY_CONFIG 0x8 +#define USBSS_PHY_TEST 0xc +#define USBSS_CORE_STAT 0x14 +#define USBSS_HOST_VBUS_CTRL 0x18 +#define USBSS_MODE_CONTROL 0x1c +#define USBSS_WAKEUP_CONFIG 0x30 +#define USBSS_WAKEUP_STAT 0x34 +#define USBSS_OVERRIDE_CONFIG 0x38 +#define USBSS_IRQ_MISC_STATUS_RAW 0x430 +#define USBSS_IRQ_MISC_STATUS 0x434 +#define USBSS_IRQ_MISC_ENABLE_SET 0x438 +#define USBSS_IRQ_MISC_ENABLE_CLR 0x43c +#define USBSS_IRQ_MISC_EOI 0x440 +#define USBSS_INTR_TEST 0x490 +#define USBSS_VBUS_FILTER 0x614 +#define USBSS_VBUS_STAT 0x618 +#define USBSS_DEBUG_CFG 0x708 +#define USBSS_DEBUG_DATA 0x70c +#define USBSS_HOST_HUB_CTRL 0x714 + +/* PHY CONFIG register bits */ +#define USBSS_PHY_VBUS_SEL_MASK GENMASK(2, 1) +#define USBSS_PHY_VBUS_SEL_SHIFT 1 +#define USBSS_PHY_LANE_REVERSE BIT(0) + +/* MODE CONTROL register bits */ +#define USBSS_MODE_VALID BIT(0) + +/* WAKEUP CONFIG register bits */ +#define USBSS_WAKEUP_CFG_OVERCURRENT_EN BIT(3) +#define USBSS_WAKEUP_CFG_LINESTATE_EN BIT(2) +#define USBSS_WAKEUP_CFG_SESSVALID_EN BIT(1) +#define USBSS_WAKEUP_CFG_VBUSVALID_EN BIT(0) + +/* WAKEUP STAT register bits */ +#define USBSS_WAKEUP_STAT_OVERCURRENT BIT(4) +#define USBSS_WAKEUP_STAT_LINESTATE BIT(3) +#define USBSS_WAKEUP_STAT_SESSVALID BIT(2) +#define USBSS_WAKEUP_STAT_VBUSVALID BIT(1) +#define USBSS_WAKEUP_STAT_CLR BIT(0) + +/* IRQ_MISC_STATUS_RAW register bits */ +#define USBSS_IRQ_MISC_RAW_VBUSVALID BIT(22) +#define USBSS_IRQ_MISC_RAW_SESSVALID BIT(20) + +/* IRQ_MISC_STATUS register bits */ +#define USBSS_IRQ_MISC_VBUSVALID BIT(22) +#define USBSS_IRQ_MISC_SESSVALID BIT(20) + +/* IRQ_MISC_ENABLE_SET register bits */ +#define USBSS_IRQ_MISC_ENABLE_SET_VBUSVALID BIT(22) +#define USBSS_IRQ_MISC_ENABLE_SET_SESSVALID BIT(20) + +/* IRQ_MISC_ENABLE_CLR register bits */ +#define USBSS_IRQ_MISC_ENABLE_CLR_VBUSVALID BIT(22) +#define USBSS_IRQ_MISC_ENABLE_CLR_SESSVALID BIT(20) + +/* IRQ_MISC_EOI register bits */ +#define USBSS_IRQ_MISC_EOI_VECTOR BIT(0) + +/* VBUS_STAT register bits */ +#define USBSS_VBUS_STAT_SESSVALID BIT(2) +#define USBSS_VBUS_STAT_VBUSVALID BIT(0) + +/* Mask for PHY PLL REFCLK */ +#define PHY_PLL_REFCLK_MASK GENMASK(3, 0) + +#define DWC3_AM62_AUTOSUSPEND_DELAY 100 + +struct dwc3_data { + struct device *dev; + void __iomem *usbss; + struct clk *usb2_refclk; + int rate_code; + struct regmap *syscon; + unsigned int offset; + unsigned int vbus_divider; +}; + +static const int dwc3_ti_rate_table[] = { /* in KHZ */ + 9600, + 10000, + 12000, + 19200, + 20000, + 24000, + 25000, + 26000, + 38400, + 40000, + 58000, + 50000, + 52000, +}; + +static inline u32 dwc3_ti_readl(struct dwc3_data *data, u32 offset) +{ + return readl((data->usbss) + offset); +} + +static inline void dwc3_ti_writel(struct dwc3_data *data, u32 offset, u32 value) +{ + writel(value, (data->usbss) + offset); +} + +static int phy_syscon_pll_refclk(struct dwc3_data *data) +{ + struct device *dev = data->dev; + struct device_node *node = dev->of_node; + struct of_phandle_args args; + struct regmap *syscon; + int ret; + + syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-phy-pll-refclk"); + if (IS_ERR(syscon)) { + dev_err(dev, "unable to get ti,syscon-phy-pll-refclk regmap\n"); + return PTR_ERR(syscon); + } + + data->syscon = syscon; + + ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-phy-pll-refclk", 1, + 0, &args); + if (ret) + return ret; + + data->offset = args.args[0]; + + ret = regmap_update_bits(data->syscon, data->offset, PHY_PLL_REFCLK_MASK, data->rate_code); + if (ret) { + dev_err(dev, "failed to set phy pll reference clock rate\n"); + return ret; + } + + return 0; +} + +static int dwc3_ti_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = pdev->dev.of_node; + struct dwc3_data *data; + int i, ret; + unsigned long rate; + u32 reg; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = dev; + platform_set_drvdata(pdev, data); + + data->usbss = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(data->usbss)) { + dev_err(dev, "can't map IOMEM resource\n"); + return PTR_ERR(data->usbss); + } + + data->usb2_refclk = devm_clk_get(dev, "ref"); + if (IS_ERR(data->usb2_refclk)) { + dev_err(dev, "can't get usb2_refclk\n"); + return PTR_ERR(data->usb2_refclk); + } + + /* Calculate the rate code */ + rate = clk_get_rate(data->usb2_refclk); + rate /= 1000; // To KHz + for (i = 0; i < ARRAY_SIZE(dwc3_ti_rate_table); i++) { + if (dwc3_ti_rate_table[i] == rate) + break; + } + + if (i == ARRAY_SIZE(dwc3_ti_rate_table)) { + dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate); + return -EINVAL; + } + + data->rate_code = i; + + /* Read the syscon property and set the rate code */ + ret = phy_syscon_pll_refclk(data); + if (ret) + return ret; + + /* VBUS divider select */ + data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider"); + reg = dwc3_ti_readl(data, USBSS_PHY_CONFIG); + if (data->vbus_divider) + reg |= 1 << USBSS_PHY_VBUS_SEL_SHIFT; + + dwc3_ti_writel(data, USBSS_PHY_CONFIG, reg); + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + /* + * Don't ignore its dependencies with its children + */ + pm_suspend_ignore_children(dev, false); + clk_prepare_enable(data->usb2_refclk); + pm_runtime_get_noresume(dev); + + ret = of_platform_populate(node, NULL, NULL, dev); + if (ret) { + dev_err(dev, "failed to create dwc3 core: %d\n", ret); + goto err_pm_disable; + } + + /* Set mode valid bit to indicate role is valid */ + reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL); + reg |= USBSS_MODE_VALID; + dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg); + + /* Setting up autosuspend */ + pm_runtime_set_autosuspend_delay(dev, DWC3_AM62_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(dev); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + return 0; + +err_pm_disable: + clk_disable_unprepare(data->usb2_refclk); + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + return ret; +} + +static int dwc3_ti_remove_core(struct device *dev, void *c) +{ + struct platform_device *pdev = to_platform_device(dev); + + platform_device_unregister(pdev); + return 0; +} + +static int dwc3_ti_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dwc3_data *data = platform_get_drvdata(pdev); + u32 reg; + + device_for_each_child(dev, NULL, dwc3_ti_remove_core); + + /* Clear mode valid bit */ + reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL); + reg &= ~USBSS_MODE_VALID; + dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg); + + pm_runtime_put_sync(dev); + clk_disable_unprepare(data->usb2_refclk); + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + + platform_set_drvdata(pdev, NULL); + return 0; +} + +#ifdef CONFIG_PM +static int dwc3_ti_suspend_common(struct device *dev) +{ + struct dwc3_data *data = dev_get_drvdata(dev); + + clk_disable_unprepare(data->usb2_refclk); + + return 0; +} + +static int dwc3_ti_resume_common(struct device *dev) +{ + struct dwc3_data *data = dev_get_drvdata(dev); + + clk_prepare_enable(data->usb2_refclk); + + return 0; +} + +static UNIVERSAL_DEV_PM_OPS(dwc3_ti_pm_ops, dwc3_ti_suspend_common, + dwc3_ti_resume_common, NULL); + +#define DEV_PM_OPS (&dwc3_ti_pm_ops) +#else +#define DEV_PM_OPS NULL +#endif /* CONFIG_PM */ + +static const struct of_device_id dwc3_ti_of_match[] = { + { .compatible = "ti,am62-usb"}, + {}, +}; +MODULE_DEVICE_TABLE(of, dwc3_ti_of_match); + +static struct platform_driver dwc3_ti_driver = { + .probe = dwc3_ti_probe, + .remove = dwc3_ti_remove, + .driver = { + .name = "dwc3-am62", + .pm = DEV_PM_OPS, + .of_match_table = dwc3_ti_of_match, + }, +}; + +module_platform_driver(dwc3_ti_driver); + +MODULE_ALIAS("platform:dwc3-am62"); +MODULE_AUTHOR("Aswath Govindraju "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("DesignWare USB3 TI Glue Layer"); diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c new file mode 100644 index 0000000000..01968e2167 --- /dev/null +++ b/drivers/usb/gadget/udc/aspeed_udc.c @@ -0,0 +1,1597 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2021 Aspeed Technology Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define AST_UDC_NUM_ENDPOINTS (1 + 4) +#define AST_UDC_EP0_MAX_PACKET 64 /* EP0's max packet size */ +#define AST_UDC_EPn_MAX_PACKET 1024 /* Generic EPs max packet size */ +#define AST_UDC_DESCS_COUNT 256 /* Use 256 stages descriptor mode (32/256) */ +#define AST_UDC_DESC_MODE 1 /* Single/Multiple Stage(s) Descriptor Mode */ + +#define AST_UDC_EP_DMA_SIZE (AST_UDC_EPn_MAX_PACKET + 8 * AST_UDC_DESCS_COUNT) + +/***************************** + * * + * UDC register definitions * + * * + *****************************/ + +#define AST_UDC_FUNC_CTRL 0x00 /* Root Function Control & Status Register */ +#define AST_UDC_CONFIG 0x04 /* Root Configuration Setting Register */ +#define AST_UDC_IER 0x08 /* Interrupt Control Register */ +#define AST_UDC_ISR 0x0C /* Interrupt Status Register */ +#define AST_UDC_EP_ACK_IER 0x10 /* Programmable ep Pool ACK Interrupt Enable Reg */ +#define AST_UDC_EP_NAK_IER 0x14 /* Programmable ep Pool NAK Interrupt Enable Reg */ +#define AST_UDC_EP_ACK_ISR 0x18 /* Programmable ep Pool ACK Interrupt Status Reg */ +#define AST_UDC_EP_NAK_ISR 0x1C /* Programmable ep Pool NAK Interrupt Status Reg */ +#define AST_UDC_DEV_RESET 0x20 /* Device Controller Soft Reset Enable Register */ +#define AST_UDC_STS 0x24 /* USB Status Register */ +#define AST_VHUB_EP_DATA 0x28 /* Programmable ep Pool Data Toggle Value Set */ +#define AST_VHUB_ISO_TX_FAIL 0x2C /* Isochronous Transaction Fail Accumulator */ +#define AST_UDC_EP0_CTRL 0x30 /* Endpoint 0 Control/Status Register */ +#define AST_UDC_EP0_DATA_BUFF 0x34 /* Base Address of ep0 IN/OUT Data Buffer Reg */ +#define AST_UDC_SETUP0 0x80 /* Root Device Setup Data Buffer0 */ +#define AST_UDC_SETUP1 0x84 /* Root Device Setup Data Buffer1 */ + + +/* Main control reg */ +#define USB_PHY_CLK_EN BIT(31) +#define USB_FIFO_DYN_PWRD_EN BIT(19) +#define USB_EP_LONG_DESC BIT(18) +#define USB_BIST_TEST_PASS BIT(13) +#define USB_BIST_TURN_ON BIT(12) +#define USB_PHY_RESET_DIS BIT(11) +#define USB_TEST_MODE(x) ((x) << 8) +#define USB_FORCE_TIMER_HS BIT(7) +#define USB_FORCE_HS BIT(6) +#define USB_REMOTE_WAKEUP_12MS BIT(5) +#define USB_REMOTE_WAKEUP_EN BIT(4) +#define USB_AUTO_REMOTE_WAKEUP_EN BIT(3) +#define USB_STOP_CLK_IN_SUPEND BIT(2) +#define USB_UPSTREAM_FS BIT(1) +#define USB_UPSTREAM_EN BIT(0) + +/* Main config reg */ +#define UDC_CFG_SET_ADDR(x) ((x) & 0x3f) +#define UDC_CFG_ADDR_MASK (0x3f) + +/* Interrupt ctrl & status reg */ +#define UDC_IRQ_EP_POOL_NAK BIT(17) +#define UDC_IRQ_EP_POOL_ACK_STALL BIT(16) +#define UDC_IRQ_BUS_RESUME BIT(8) +#define UDC_IRQ_BUS_SUSPEND BIT(7) +#define UDC_IRQ_BUS_RESET BIT(6) +#define UDC_IRQ_EP0_IN_DATA_NAK BIT(4) +#define UDC_IRQ_EP0_IN_ACK_STALL BIT(3) +#define UDC_IRQ_EP0_OUT_NAK BIT(2) +#define UDC_IRQ_EP0_OUT_ACK_STALL BIT(1) +#define UDC_IRQ_EP0_SETUP BIT(0) +#define UDC_IRQ_ACK_ALL (0x1ff) + +/* EP isr reg */ +#define USB_EP3_ISR BIT(3) +#define USB_EP2_ISR BIT(2) +#define USB_EP1_ISR BIT(1) +#define USB_EP0_ISR BIT(0) +#define UDC_IRQ_EP_ACK_ALL (0xf) + +/*Soft reset reg */ +#define ROOT_UDC_SOFT_RESET BIT(0) + +/* USB status reg */ +#define UDC_STS_HIGHSPEED BIT(27) + +/* Programmable EP data toggle */ +#define EP_TOGGLE_SET_EPNUM(x) ((x) & 0x3) + +/* EP0 ctrl reg */ +#define EP0_GET_RX_LEN(x) ((x >> 16) & 0x7f) +#define EP0_TX_LEN(x) ((x & 0x7f) << 8) +#define EP0_RX_BUFF_RDY BIT(2) +#define EP0_TX_BUFF_RDY BIT(1) +#define EP0_STALL BIT(0) + +/************************************* + * * + * per-endpoint register definitions * + * * + *************************************/ + +#define AST_UDC_EP_CONFIG 0x00 /* Endpoint Configuration Register */ +#define AST_UDC_EP_DMA_CTRL 0x04 /* DMA Descriptor List Control/Status Register */ +#define AST_UDC_EP_DMA_BUFF 0x08 /* DMA Descriptor/Buffer Base Address */ +#define AST_UDC_EP_DMA_STS 0x0C /* DMA Descriptor List R/W Pointer and Status */ + +#define AST_UDC_EP_BASE 0x200 +#define AST_UDC_EP_OFFSET 0x10 + +/* EP config reg */ +#define EP_SET_MAX_PKT(x) ((x & 0x3ff) << 16) +#define EP_DATA_FETCH_CTRL(x) ((x & 0x3) << 14) +#define EP_AUTO_DATA_DISABLE (0x1 << 13) +#define EP_SET_EP_STALL (0x1 << 12) +#define EP_SET_EP_NUM(x) ((x & 0xf) << 8) +#define EP_SET_TYPE_MASK(x) ((x) << 5) +#define EP_TYPE_BULK (0x1) +#define EP_TYPE_INT (0x2) +#define EP_TYPE_ISO (0x3) +#define EP_DIR_OUT (0x1 << 4) +#define EP_ALLOCATED_MASK (0x7 << 1) +#define EP_ENABLE BIT(0) + +/* EP DMA ctrl reg */ +#define EP_DMA_CTRL_GET_PROC_STS(x) ((x >> 4) & 0xf) +#define EP_DMA_CTRL_STS_RX_IDLE 0x0 +#define EP_DMA_CTRL_STS_TX_IDLE 0x8 +#define EP_DMA_CTRL_IN_LONG_MODE (0x1 << 3) +#define EP_DMA_CTRL_RESET (0x1 << 2) +#define EP_DMA_SINGLE_STAGE (0x1 << 1) +#define EP_DMA_DESC_MODE (0x1 << 0) + +/* EP DMA status reg */ +#define EP_DMA_SET_TX_SIZE(x) ((x & 0x7ff) << 16) +#define EP_DMA_GET_TX_SIZE(x) (((x) >> 16) & 0x7ff) +#define EP_DMA_GET_RPTR(x) (((x) >> 8) & 0xff) +#define EP_DMA_GET_WPTR(x) ((x) & 0xff) +#define EP_DMA_SINGLE_KICK (1 << 0) /* WPTR = 1 for single mode */ + +/* EP desc reg */ +#define AST_EP_DMA_DESC_INTR_ENABLE BIT(31) +#define AST_EP_DMA_DESC_PID_DATA0 (0 << 14) +#define AST_EP_DMA_DESC_PID_DATA2 BIT(14) +#define AST_EP_DMA_DESC_PID_DATA1 (2 << 14) +#define AST_EP_DMA_DESC_PID_MDATA (3 << 14) +#define EP_DESC1_IN_LEN(x) ((x) & 0x1fff) +#define AST_EP_DMA_DESC_MAX_LEN (7680) /* Max packet length for trasmit in 1 desc */ + +struct ast_udc_request { + struct usb_request req; + struct list_head queue; + unsigned mapped:1; + unsigned int actual_dma_length; + u32 saved_dma_wptr; +}; + +#define to_ast_req(__req) container_of(__req, struct ast_udc_request, req) + +struct ast_dma_desc { + u32 des_0; + u32 des_1; +}; + +struct ast_udc_ep { + struct usb_ep ep; + + /* Request queue */ + struct list_head queue; + + struct ast_udc_dev *udc; + void __iomem *ep_reg; + void *epn_buf; + dma_addr_t epn_buf_dma; + const struct usb_endpoint_descriptor *desc; + + /* DMA Descriptors */ + struct ast_dma_desc *descs; + dma_addr_t descs_dma; + u32 descs_wptr; + u32 chunk_max; + + bool dir_in:1; + unsigned stopped:1; + bool desc_mode:1; +}; + +#define to_ast_ep(__ep) container_of(__ep, struct ast_udc_ep, ep) + +struct ast_udc_dev { + struct platform_device *pdev; + void __iomem *reg; + int irq; + spinlock_t lock; + struct clk *clk; + struct work_struct wake_work; + + /* EP0 DMA buffers allocated in one chunk */ + void *ep0_buf; + dma_addr_t ep0_buf_dma; + struct ast_udc_ep ep[AST_UDC_NUM_ENDPOINTS]; + + struct usb_gadget gadget; + struct usb_gadget_driver *driver; + void __iomem *creq; + enum usb_device_state suspended_from; + int desc_mode; + + /* Force full speed only */ + bool force_usb1:1; + unsigned is_control_tx:1; + bool wakeup_en:1; +}; + +#define to_ast_dev(__g) container_of(__g, struct ast_udc_dev, gadget) + +static const char * const ast_ep_name[] = { + "ep0", "ep1", "ep2", "ep3", "ep4" +}; + +#ifdef AST_UDC_DEBUG_ALL +#define AST_UDC_DEBUG +#define AST_SETUP_DEBUG +#define AST_EP_DEBUG +#define AST_ISR_DEBUG +#endif + +#ifdef AST_SETUP_DEBUG +#define SETUP_DBG(u, fmt, ...) \ + dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__) +#else +#define SETUP_DBG(u, fmt, ...) +#endif + +#ifdef AST_EP_DEBUG +#define EP_DBG(e, fmt, ...) \ + dev_dbg(&(e)->udc->pdev->dev, "%s():%s " fmt, __func__, \ + (e)->ep.name, ##__VA_ARGS__) +#else +#define EP_DBG(ep, fmt, ...) ((void)(ep)) +#endif + +#ifdef AST_UDC_DEBUG +#define UDC_DBG(u, fmt, ...) \ + dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__) +#else +#define UDC_DBG(u, fmt, ...) +#endif + +#ifdef AST_ISR_DEBUG +#define ISR_DBG(u, fmt, ...) \ + dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__) +#else +#define ISR_DBG(u, fmt, ...) +#endif + +/*-------------------------------------------------------------------------*/ +#define ast_udc_read(udc, offset) \ + readl((udc)->reg + (offset)) +#define ast_udc_write(udc, val, offset) \ + writel((val), (udc)->reg + (offset)) + +#define ast_ep_read(ep, reg) \ + readl((ep)->ep_reg + (reg)) +#define ast_ep_write(ep, val, reg) \ + writel((val), (ep)->ep_reg + (reg)) + +/*-------------------------------------------------------------------------*/ + +static void ast_udc_done(struct ast_udc_ep *ep, struct ast_udc_request *req, + int status) +{ + struct ast_udc_dev *udc = ep->udc; + + EP_DBG(ep, "req @%p, len (%d/%d), buf:0x%x, dir:0x%x\n", + req, req->req.actual, req->req.length, + (u32)req->req.buf, ep->dir_in); + + list_del(&req->queue); + + if (req->req.status == -EINPROGRESS) + req->req.status = status; + else + status = req->req.status; + + if (status && status != -ESHUTDOWN) + EP_DBG(ep, "done req:%p, status:%d\n", req, status); + + spin_unlock(&udc->lock); + usb_gadget_giveback_request(&ep->ep, &req->req); + spin_lock(&udc->lock); +} + +static void ast_udc_nuke(struct ast_udc_ep *ep, int status) +{ + int count = 0; + + while (!list_empty(&ep->queue)) { + struct ast_udc_request *req; + + req = list_entry(ep->queue.next, struct ast_udc_request, + queue); + ast_udc_done(ep, req, status); + count++; + } + + if (count) + EP_DBG(ep, "Nuked %d request(s)\n", count); +} + +/* + * Stop activity on all endpoints. + * Device controller for which EP activity is to be stopped. + * + * All the endpoints are stopped and any pending transfer requests if any on + * the endpoint are terminated. + */ +static void ast_udc_stop_activity(struct ast_udc_dev *udc) +{ + struct ast_udc_ep *ep; + int i; + + for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) { + ep = &udc->ep[i]; + ep->stopped = 1; + ast_udc_nuke(ep, -ESHUTDOWN); + } +} + +static int ast_udc_ep_enable(struct usb_ep *_ep, + const struct usb_endpoint_descriptor *desc) +{ + u16 maxpacket = usb_endpoint_maxp(desc); + struct ast_udc_ep *ep = to_ast_ep(_ep); + struct ast_udc_dev *udc = ep->udc; + u8 epnum = usb_endpoint_num(desc); + unsigned long flags; + u32 ep_conf = 0; + u8 dir_in; + u8 type; + + if (!_ep || !ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT || + maxpacket == 0 || maxpacket > ep->ep.maxpacket) { + EP_DBG(ep, "Failed, invalid EP enable param\n"); + return -EINVAL; + } + + if (!udc->driver) { + EP_DBG(ep, "bogus device state\n"); + return -ESHUTDOWN; + } + + EP_DBG(ep, "maxpacket:0x%x\n", maxpacket); + + spin_lock_irqsave(&udc->lock, flags); + + ep->desc = desc; + ep->stopped = 0; + ep->ep.maxpacket = maxpacket; + ep->chunk_max = AST_EP_DMA_DESC_MAX_LEN; + + if (maxpacket < AST_UDC_EPn_MAX_PACKET) + ep_conf = EP_SET_MAX_PKT(maxpacket); + + ep_conf |= EP_SET_EP_NUM(epnum); + + type = usb_endpoint_type(desc); + dir_in = usb_endpoint_dir_in(desc); + ep->dir_in = dir_in; + if (!ep->dir_in) + ep_conf |= EP_DIR_OUT; + + EP_DBG(ep, "type %d, dir_in %d\n", type, dir_in); + switch (type) { + case USB_ENDPOINT_XFER_ISOC: + ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_ISO); + break; + + case USB_ENDPOINT_XFER_BULK: + ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_BULK); + break; + + case USB_ENDPOINT_XFER_INT: + ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_INT); + break; + } + + ep->desc_mode = udc->desc_mode && ep->descs_dma && ep->dir_in; + if (ep->desc_mode) { + ast_ep_write(ep, EP_DMA_CTRL_RESET, AST_UDC_EP_DMA_CTRL); + ast_ep_write(ep, 0, AST_UDC_EP_DMA_STS); + ast_ep_write(ep, ep->descs_dma, AST_UDC_EP_DMA_BUFF); + + /* Enable Long Descriptor Mode */ + ast_ep_write(ep, EP_DMA_CTRL_IN_LONG_MODE | EP_DMA_DESC_MODE, + AST_UDC_EP_DMA_CTRL); + + ep->descs_wptr = 0; + + } else { + ast_ep_write(ep, EP_DMA_CTRL_RESET, AST_UDC_EP_DMA_CTRL); + ast_ep_write(ep, EP_DMA_SINGLE_STAGE, AST_UDC_EP_DMA_CTRL); + ast_ep_write(ep, 0, AST_UDC_EP_DMA_STS); + } + + /* Cleanup data toggle just in case */ + ast_udc_write(udc, EP_TOGGLE_SET_EPNUM(epnum), AST_VHUB_EP_DATA); + + /* Enable EP */ + ast_ep_write(ep, ep_conf | EP_ENABLE, AST_UDC_EP_CONFIG); + + EP_DBG(ep, "ep_config: 0x%x\n", ast_ep_read(ep, AST_UDC_EP_CONFIG)); + + spin_unlock_irqrestore(&udc->lock, flags); + + return 0; +} + +static int ast_udc_ep_disable(struct usb_ep *_ep) +{ + struct ast_udc_ep *ep = to_ast_ep(_ep); + struct ast_udc_dev *udc = ep->udc; + unsigned long flags; + + spin_lock_irqsave(&udc->lock, flags); + + ep->ep.desc = NULL; + ep->stopped = 1; + + ast_udc_nuke(ep, -ESHUTDOWN); + ast_ep_write(ep, 0, AST_UDC_EP_CONFIG); + + spin_unlock_irqrestore(&udc->lock, flags); + + return 0; +} + +static struct usb_request *ast_udc_ep_alloc_request(struct usb_ep *_ep, + gfp_t gfp_flags) +{ + struct ast_udc_ep *ep = to_ast_ep(_ep); + struct ast_udc_request *req; + + req = kzalloc(sizeof(struct ast_udc_request), gfp_flags); + if (!req) { + EP_DBG(ep, "request allocation failed\n"); + return NULL; + } + + INIT_LIST_HEAD(&req->queue); + + return &req->req; +} + +static void ast_udc_ep_free_request(struct usb_ep *_ep, + struct usb_request *_req) +{ + struct ast_udc_request *req = to_ast_req(_req); + + kfree(req); +} + +static int ast_dma_descriptor_setup(struct ast_udc_ep *ep, u32 dma_buf, + u16 tx_len, struct ast_udc_request *req) +{ + struct ast_udc_dev *udc = ep->udc; + struct device *dev = &udc->pdev->dev; + bool last = false; + int chunk, count; + u32 offset; + + if (!ep->descs) { + dev_warn(dev, "%s: Empty DMA descs list failure\n", + ep->ep.name); + return -EINVAL; + } + + chunk = tx_len; + offset = count = 0; + + EP_DBG(ep, "req @%p, %s:%d, %s:0x%x, %s:0x%x\n", req, + "wptr", ep->descs_wptr, "dma_buf", dma_buf, + "tx_len", tx_len); + + /* Create Descriptor Lists */ + while (chunk >= 0 && !last && count < AST_UDC_DESCS_COUNT) { + + ep->descs[ep->descs_wptr].des_0 = dma_buf + offset; + + if (chunk > ep->chunk_max) { + ep->descs[ep->descs_wptr].des_1 = ep->chunk_max; + } else { + ep->descs[ep->descs_wptr].des_1 = chunk; + last = true; + } + + chunk -= ep->chunk_max; + + EP_DBG(ep, "descs[%d]: 0x%x 0x%x\n", + ep->descs_wptr, + ep->descs[ep->descs_wptr].des_0, + ep->descs[ep->descs_wptr].des_1); + + if (count == 0) + req->saved_dma_wptr = ep->descs_wptr; + + ep->descs_wptr++; + count++; + + if (ep->descs_wptr >= AST_UDC_DESCS_COUNT) + ep->descs_wptr = 0; + + offset = ep->chunk_max * count; + } + + return 0; +} + +static void ast_udc_epn_kick(struct ast_udc_ep *ep, struct ast_udc_request *req) +{ + u32 tx_len; + u32 last; + + last = req->req.length - req->req.actual; + tx_len = last > ep->ep.maxpacket ? ep->ep.maxpacket : last; + + EP_DBG(ep, "kick req @%p, len:%d, dir:%d\n", + req, tx_len, ep->dir_in); + + ast_ep_write(ep, req->req.dma + req->req.actual, AST_UDC_EP_DMA_BUFF); + + /* Start DMA */ + ast_ep_write(ep, EP_DMA_SET_TX_SIZE(tx_len), AST_UDC_EP_DMA_STS); + ast_ep_write(ep, EP_DMA_SET_TX_SIZE(tx_len) | EP_DMA_SINGLE_KICK, + AST_UDC_EP_DMA_STS); +} + +static void ast_udc_epn_kick_desc(struct ast_udc_ep *ep, + struct ast_udc_request *req) +{ + u32 descs_max_size; + u32 tx_len; + u32 last; + + descs_max_size = AST_EP_DMA_DESC_MAX_LEN * AST_UDC_DESCS_COUNT; + + last = req->req.length - req->req.actual; + tx_len = last > descs_max_size ? descs_max_size : last; + + EP_DBG(ep, "kick req @%p, %s:%d, %s:0x%x, %s:0x%x (%d/%d), %s:0x%x\n", + req, "tx_len", tx_len, "dir_in", ep->dir_in, + "dma", req->req.dma + req->req.actual, + req->req.actual, req->req.length, + "descs_max_size", descs_max_size); + + if (!ast_dma_descriptor_setup(ep, req->req.dma + req->req.actual, + tx_len, req)) + req->actual_dma_length += tx_len; + + /* make sure CPU done everything before triggering DMA */ + mb(); + + ast_ep_write(ep, ep->descs_wptr, AST_UDC_EP_DMA_STS); + + EP_DBG(ep, "descs_wptr:%d, dstat:0x%x, dctrl:0x%x\n", + ep->descs_wptr, + ast_ep_read(ep, AST_UDC_EP_DMA_STS), + ast_ep_read(ep, AST_UDC_EP_DMA_CTRL)); +} + +static void ast_udc_ep0_queue(struct ast_udc_ep *ep, + struct ast_udc_request *req) +{ + struct ast_udc_dev *udc = ep->udc; + u32 tx_len; + u32 last; + + last = req->req.length - req->req.actual; + tx_len = last > ep->ep.maxpacket ? ep->ep.maxpacket : last; + + ast_udc_write(udc, req->req.dma + req->req.actual, + AST_UDC_EP0_DATA_BUFF); + + if (ep->dir_in) { + /* IN requests, send data */ + SETUP_DBG(udc, "IN: %s:0x%x, %s:0x%x, %s:%d (%d/%d), %s:%d\n", + "buf", (u32)req->req.buf, + "dma", req->req.dma + req->req.actual, + "tx_len", tx_len, + req->req.actual, req->req.length, + "dir_in", ep->dir_in); + + req->req.actual += tx_len; + ast_udc_write(udc, EP0_TX_LEN(tx_len), AST_UDC_EP0_CTRL); + ast_udc_write(udc, EP0_TX_LEN(tx_len) | EP0_TX_BUFF_RDY, + AST_UDC_EP0_CTRL); + + } else { + /* OUT requests, receive data */ + SETUP_DBG(udc, "OUT: %s:%x, %s:%x, %s:(%d/%d), %s:%d\n", + "buf", (u32)req->req.buf, + "dma", req->req.dma + req->req.actual, + "len", req->req.actual, req->req.length, + "dir_in", ep->dir_in); + + if (!req->req.length) { + /* 0 len request, send tx as completion */ + ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL); + ep->dir_in = 0x1; + } else + ast_udc_write(udc, EP0_RX_BUFF_RDY, AST_UDC_EP0_CTRL); + } +} + +static int ast_udc_ep_queue(struct usb_ep *_ep, struct usb_request *_req, + gfp_t gfp_flags) +{ + struct ast_udc_request *req = to_ast_req(_req); + struct ast_udc_ep *ep = to_ast_ep(_ep); + struct ast_udc_dev *udc = ep->udc; + struct device *dev = &udc->pdev->dev; + unsigned long flags; + int rc; + + if (unlikely(!_req || !_req->complete || !_req->buf || !_ep)) { + dev_warn(dev, "Invalid EP request !\n"); + return -EINVAL; + } + + if (ep->stopped) { + dev_warn(dev, "%s is already stopped !\n", _ep->name); + return -ESHUTDOWN; + } + + spin_lock_irqsave(&udc->lock, flags); + + list_add_tail(&req->queue, &ep->queue); + + req->req.actual = 0; + req->req.status = -EINPROGRESS; + req->actual_dma_length = 0; + + rc = usb_gadget_map_request(&udc->gadget, &req->req, ep->dir_in); + if (rc) { + EP_DBG(ep, "Request mapping failure %d\n", rc); + dev_warn(dev, "Request mapping failure %d\n", rc); + goto end; + } + + EP_DBG(ep, "enqueue req @%p\n", req); + EP_DBG(ep, "l=%d, dma:0x%x, zero:%d, is_in:%d\n", + _req->length, _req->dma, _req->zero, ep->dir_in); + + /* EP0 request enqueue */ + if (ep->ep.desc == NULL) { + if ((req->req.dma % 4) != 0) { + dev_warn(dev, "EP0 req dma alignment error\n"); + rc = -ESHUTDOWN; + goto end; + } + + ast_udc_ep0_queue(ep, req); + goto end; + } + + /* EPn request enqueue */ + if (list_is_singular(&ep->queue)) { + if (ep->desc_mode) + ast_udc_epn_kick_desc(ep, req); + else + ast_udc_epn_kick(ep, req); + } + +end: + spin_unlock_irqrestore(&udc->lock, flags); + + return rc; +} + +static int ast_udc_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) +{ + struct ast_udc_ep *ep = to_ast_ep(_ep); + struct ast_udc_dev *udc = ep->udc; + struct ast_udc_request *req; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&udc->lock, flags); + + /* make sure it's actually queued on this endpoint */ + list_for_each_entry(req, &ep->queue, queue) { + if (&req->req == _req) { + list_del_init(&req->queue); + ast_udc_done(ep, req, -ESHUTDOWN); + _req->status = -ECONNRESET; + break; + } + } + + /* dequeue request not found */ + if (&req->req != _req) + rc = -EINVAL; + + spin_unlock_irqrestore(&udc->lock, flags); + + return rc; +} + +static int ast_udc_ep_set_halt(struct usb_ep *_ep, int value) +{ + struct ast_udc_ep *ep = to_ast_ep(_ep); + struct ast_udc_dev *udc = ep->udc; + unsigned long flags; + int epnum; + u32 ctrl; + + EP_DBG(ep, "val:%d\n", value); + + spin_lock_irqsave(&udc->lock, flags); + + epnum = usb_endpoint_num(ep->desc); + + /* EP0 */ + if (epnum == 0) { + ctrl = ast_udc_read(udc, AST_UDC_EP0_CTRL); + if (value) + ctrl |= EP0_STALL; + else + ctrl &= ~EP0_STALL; + + ast_udc_write(udc, ctrl, AST_UDC_EP0_CTRL); + + } else { + /* EPn */ + ctrl = ast_udc_read(udc, AST_UDC_EP_CONFIG); + if (value) + ctrl |= EP_SET_EP_STALL; + else + ctrl &= ~EP_SET_EP_STALL; + + ast_ep_write(ep, ctrl, AST_UDC_EP_CONFIG); + + /* only epn is stopped and waits for clear */ + ep->stopped = value ? 1 : 0; + } + + spin_unlock_irqrestore(&udc->lock, flags); + + return 0; +} + +static const struct usb_ep_ops ast_udc_ep_ops = { + .enable = ast_udc_ep_enable, + .disable = ast_udc_ep_disable, + .alloc_request = ast_udc_ep_alloc_request, + .free_request = ast_udc_ep_free_request, + .queue = ast_udc_ep_queue, + .dequeue = ast_udc_ep_dequeue, + .set_halt = ast_udc_ep_set_halt, + /* there's only imprecise fifo status reporting */ +}; + +static void ast_udc_ep0_rx(struct ast_udc_dev *udc) +{ + ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF); + ast_udc_write(udc, EP0_RX_BUFF_RDY, AST_UDC_EP0_CTRL); +} + +static void ast_udc_ep0_tx(struct ast_udc_dev *udc) +{ + ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF); + ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL); +} + +static void ast_udc_ep0_out(struct ast_udc_dev *udc) +{ + struct device *dev = &udc->pdev->dev; + struct ast_udc_ep *ep = &udc->ep[0]; + struct ast_udc_request *req; + u16 rx_len; + + if (list_empty(&ep->queue)) + return; + + req = list_entry(ep->queue.next, struct ast_udc_request, queue); + + rx_len = EP0_GET_RX_LEN(ast_udc_read(udc, AST_UDC_EP0_CTRL)); + req->req.actual += rx_len; + + SETUP_DBG(udc, "req %p (%d/%d)\n", req, + req->req.actual, req->req.length); + + if ((rx_len < ep->ep.maxpacket) || + (req->req.actual == req->req.length)) { + ast_udc_ep0_tx(udc); + if (!ep->dir_in) + ast_udc_done(ep, req, 0); + + } else { + if (rx_len > req->req.length) { + // Issue Fix + dev_warn(dev, "Something wrong (%d/%d)\n", + req->req.actual, req->req.length); + ast_udc_ep0_tx(udc); + ast_udc_done(ep, req, 0); + return; + } + + ep->dir_in = 0; + + /* More works */ + ast_udc_ep0_queue(ep, req); + } +} + +static void ast_udc_ep0_in(struct ast_udc_dev *udc) +{ + struct ast_udc_ep *ep = &udc->ep[0]; + struct ast_udc_request *req; + + if (list_empty(&ep->queue)) { + if (udc->is_control_tx) { + ast_udc_ep0_rx(udc); + udc->is_control_tx = 0; + } + + return; + } + + req = list_entry(ep->queue.next, struct ast_udc_request, queue); + + SETUP_DBG(udc, "req %p (%d/%d)\n", req, + req->req.actual, req->req.length); + + if (req->req.length == req->req.actual) { + if (req->req.length) + ast_udc_ep0_rx(udc); + + if (ep->dir_in) + ast_udc_done(ep, req, 0); + + } else { + /* More works */ + ast_udc_ep0_queue(ep, req); + } +} + +static void ast_udc_epn_handle(struct ast_udc_dev *udc, u16 ep_num) +{ + struct ast_udc_ep *ep = &udc->ep[ep_num]; + struct ast_udc_request *req; + u16 len = 0; + + if (list_empty(&ep->queue)) + return; + + req = list_first_entry(&ep->queue, struct ast_udc_request, queue); + + len = EP_DMA_GET_TX_SIZE(ast_ep_read(ep, AST_UDC_EP_DMA_STS)); + req->req.actual += len; + + EP_DBG(ep, "req @%p, length:(%d/%d), %s:0x%x\n", req, + req->req.actual, req->req.length, "len", len); + + /* Done this request */ + if (req->req.length == req->req.actual) { + ast_udc_done(ep, req, 0); + req = list_first_entry_or_null(&ep->queue, + struct ast_udc_request, + queue); + + } else { + /* Check for short packet */ + if (len < ep->ep.maxpacket) { + ast_udc_done(ep, req, 0); + req = list_first_entry_or_null(&ep->queue, + struct ast_udc_request, + queue); + } + } + + /* More requests */ + if (req) + ast_udc_epn_kick(ep, req); +} + +static void ast_udc_epn_handle_desc(struct ast_udc_dev *udc, u16 ep_num) +{ + struct ast_udc_ep *ep = &udc->ep[ep_num]; + struct device *dev = &udc->pdev->dev; + struct ast_udc_request *req; + u32 proc_sts, wr_ptr, rd_ptr; + u32 len_in_desc, ctrl; + u16 total_len = 0; + int i; + + if (list_empty(&ep->queue)) { + dev_warn(dev, "%s request queue empty!\n", ep->ep.name); + return; + } + + req = list_first_entry(&ep->queue, struct ast_udc_request, queue); + + ctrl = ast_ep_read(ep, AST_UDC_EP_DMA_CTRL); + proc_sts = EP_DMA_CTRL_GET_PROC_STS(ctrl); + + /* Check processing status is idle */ + if (proc_sts != EP_DMA_CTRL_STS_RX_IDLE && + proc_sts != EP_DMA_CTRL_STS_TX_IDLE) { + dev_warn(dev, "EP DMA CTRL: 0x%x, PS:0x%x\n", + ast_ep_read(ep, AST_UDC_EP_DMA_CTRL), + proc_sts); + return; + } + + ctrl = ast_ep_read(ep, AST_UDC_EP_DMA_STS); + rd_ptr = EP_DMA_GET_RPTR(ctrl); + wr_ptr = EP_DMA_GET_WPTR(ctrl); + + if (rd_ptr != wr_ptr) { + dev_warn(dev, "desc list is not empty ! %s:%d, %s:%d\n", + "rptr", rd_ptr, "wptr", wr_ptr); + return; + } + + EP_DBG(ep, "rd_ptr:%d, wr_ptr:%d\n", rd_ptr, wr_ptr); + i = req->saved_dma_wptr; + + do { + len_in_desc = EP_DESC1_IN_LEN(ep->descs[i].des_1); + EP_DBG(ep, "desc[%d] len: %d\n", i, len_in_desc); + total_len += len_in_desc; + i++; + if (i >= AST_UDC_DESCS_COUNT) + i = 0; + + } while (i != wr_ptr); + + req->req.actual += total_len; + + EP_DBG(ep, "req @%p, length:(%d/%d), %s:0x%x\n", req, + req->req.actual, req->req.length, "len", total_len); + + /* Done this request */ + if (req->req.length == req->req.actual) { + ast_udc_done(ep, req, 0); + req = list_first_entry_or_null(&ep->queue, + struct ast_udc_request, + queue); + + } else { + /* Check for short packet */ + if (total_len < ep->ep.maxpacket) { + ast_udc_done(ep, req, 0); + req = list_first_entry_or_null(&ep->queue, + struct ast_udc_request, + queue); + } + } + + /* More requests & dma descs not setup yet */ + if (req && (req->actual_dma_length == req->req.actual)) { + EP_DBG(ep, "More requests\n"); + ast_udc_epn_kick_desc(ep, req); + } +} + +static void ast_udc_ep0_data_tx(struct ast_udc_dev *udc, u8 *tx_data, u32 len) +{ + if (len) { + memcpy(udc->ep0_buf, tx_data, len); + + ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF); + ast_udc_write(udc, EP0_TX_LEN(len), AST_UDC_EP0_CTRL); + ast_udc_write(udc, EP0_TX_LEN(len) | EP0_TX_BUFF_RDY, + AST_UDC_EP0_CTRL); + udc->is_control_tx = 1; + + } else + ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL); +} + +static void ast_udc_getstatus(struct ast_udc_dev *udc) +{ + struct usb_ctrlrequest crq; + struct ast_udc_ep *ep; + u16 status = 0; + u16 epnum = 0; + + memcpy_fromio(&crq, udc->creq, sizeof(crq)); + + switch (crq.bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + /* Get device status */ + status = 1 << USB_DEVICE_SELF_POWERED; + break; + case USB_RECIP_INTERFACE: + break; + case USB_RECIP_ENDPOINT: + epnum = crq.wIndex & USB_ENDPOINT_NUMBER_MASK; + status = udc->ep[epnum].stopped; + break; + default: + goto stall; + } + + ep = &udc->ep[epnum]; + EP_DBG(ep, "status: 0x%x\n", status); + ast_udc_ep0_data_tx(udc, (u8 *)&status, sizeof(status)); + + return; + +stall: + EP_DBG(ep, "Can't respond request\n"); + ast_udc_write(udc, ast_udc_read(udc, AST_UDC_EP0_CTRL) | EP0_STALL, + AST_UDC_EP0_CTRL); +} + +static void ast_udc_ep0_handle_setup(struct ast_udc_dev *udc) +{ + struct ast_udc_ep *ep = &udc->ep[0]; + struct ast_udc_request *req; + struct usb_ctrlrequest crq; + int req_num = 0; + int rc = 0; + u32 reg; + + memcpy_fromio(&crq, udc->creq, sizeof(crq)); + + SETUP_DBG(udc, "SETUP packet: %02x/%02x/%04x/%04x/%04x\n", + crq.bRequestType, crq.bRequest, le16_to_cpu(crq.wValue), + le16_to_cpu(crq.wIndex), le16_to_cpu(crq.wLength)); + + /* + * Cleanup ep0 request(s) in queue because + * there is a new control setup comes. + */ + list_for_each_entry(req, &udc->ep[0].queue, queue) { + req_num++; + EP_DBG(ep, "there is req %p in ep0 queue !\n", req); + } + + if (req_num) + ast_udc_nuke(&udc->ep[0], -ETIMEDOUT); + + udc->ep[0].dir_in = crq.bRequestType & USB_DIR_IN; + + if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { + switch (crq.bRequest) { + case USB_REQ_SET_ADDRESS: + if (ast_udc_read(udc, AST_UDC_STS) & UDC_STS_HIGHSPEED) + udc->gadget.speed = USB_SPEED_HIGH; + else + udc->gadget.speed = USB_SPEED_FULL; + + SETUP_DBG(udc, "set addr: 0x%x\n", crq.wValue); + reg = ast_udc_read(udc, AST_UDC_CONFIG); + reg &= ~UDC_CFG_ADDR_MASK; + reg |= UDC_CFG_SET_ADDR(crq.wValue); + ast_udc_write(udc, reg, AST_UDC_CONFIG); + goto req_complete; + + case USB_REQ_CLEAR_FEATURE: + SETUP_DBG(udc, "ep0: CLEAR FEATURE\n"); + goto req_driver; + + case USB_REQ_SET_FEATURE: + SETUP_DBG(udc, "ep0: SET FEATURE\n"); + goto req_driver; + + case USB_REQ_GET_STATUS: + ast_udc_getstatus(udc); + return; + + default: + goto req_driver; + } + + } + +req_driver: + if (udc->driver) { + SETUP_DBG(udc, "Forwarding %s to gadget...\n", + udc->gadget.name); + + spin_unlock(&udc->lock); + rc = udc->driver->setup(&udc->gadget, &crq); + spin_lock(&udc->lock); + + } else { + SETUP_DBG(udc, "No gadget for request !\n"); + } + + if (rc >= 0) + return; + + /* Stall if gadget failed */ + SETUP_DBG(udc, "Stalling, rc:0x%x\n", rc); + ast_udc_write(udc, ast_udc_read(udc, AST_UDC_EP0_CTRL) | EP0_STALL, + AST_UDC_EP0_CTRL); + return; + +req_complete: + SETUP_DBG(udc, "ep0: Sending IN status without data\n"); + ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL); +} + +static irqreturn_t ast_udc_isr(int irq, void *data) +{ + struct ast_udc_dev *udc = (struct ast_udc_dev *)data; + struct ast_udc_ep *ep; + u32 isr, ep_isr; + int i; + + spin_lock(&udc->lock); + + isr = ast_udc_read(udc, AST_UDC_ISR); + if (!isr) + goto done; + + /* Ack interrupts */ + ast_udc_write(udc, isr, AST_UDC_ISR); + + if (isr & UDC_IRQ_BUS_RESET) { + ISR_DBG(udc, "UDC_IRQ_BUS_RESET\n"); + udc->gadget.speed = USB_SPEED_UNKNOWN; + + ep = &udc->ep[1]; + EP_DBG(ep, "dctrl:0x%x\n", + ast_ep_read(ep, AST_UDC_EP_DMA_CTRL)); + + if (udc->driver && udc->driver->reset) { + spin_unlock(&udc->lock); + udc->driver->reset(&udc->gadget); + spin_lock(&udc->lock); + } + } + + if (isr & UDC_IRQ_BUS_SUSPEND) { + ISR_DBG(udc, "UDC_IRQ_BUS_SUSPEND\n"); + udc->suspended_from = udc->gadget.state; + usb_gadget_set_state(&udc->gadget, USB_STATE_SUSPENDED); + + if (udc->driver && udc->driver->suspend) { + spin_unlock(&udc->lock); + udc->driver->suspend(&udc->gadget); + spin_lock(&udc->lock); + } + } + + if (isr & UDC_IRQ_BUS_RESUME) { + ISR_DBG(udc, "UDC_IRQ_BUS_RESUME\n"); + usb_gadget_set_state(&udc->gadget, udc->suspended_from); + + if (udc->driver && udc->driver->resume) { + spin_unlock(&udc->lock); + udc->driver->resume(&udc->gadget); + spin_lock(&udc->lock); + } + } + + if (isr & UDC_IRQ_EP0_IN_ACK_STALL) { + ISR_DBG(udc, "UDC_IRQ_EP0_IN_ACK_STALL\n"); + ast_udc_ep0_in(udc); + } + + if (isr & UDC_IRQ_EP0_OUT_ACK_STALL) { + ISR_DBG(udc, "UDC_IRQ_EP0_OUT_ACK_STALL\n"); + ast_udc_ep0_out(udc); + } + + if (isr & UDC_IRQ_EP0_SETUP) { + ISR_DBG(udc, "UDC_IRQ_EP0_SETUP\n"); + ast_udc_ep0_handle_setup(udc); + } + + if (isr & UDC_IRQ_EP_POOL_ACK_STALL) { + ISR_DBG(udc, "UDC_IRQ_EP_POOL_ACK_STALL\n"); + ep_isr = ast_udc_read(udc, AST_UDC_EP_ACK_ISR); + + /* Ack EP interrupts */ + ast_udc_write(udc, ep_isr, AST_UDC_EP_ACK_ISR); + + /* Handle each EP */ + for (i = 0; i < AST_UDC_NUM_ENDPOINTS - 1; i++) { + if (ep_isr & (0x1 << i)) { + ep = &udc->ep[i + 1]; + if (ep->desc_mode) + ast_udc_epn_handle_desc(udc, i + 1); + else + ast_udc_epn_handle(udc, i + 1); + } + } + } + +done: + spin_unlock(&udc->lock); + return IRQ_HANDLED; +} + +static int ast_udc_gadget_getframe(struct usb_gadget *gadget) +{ + struct ast_udc_dev *udc = to_ast_dev(gadget); + + return (ast_udc_read(udc, AST_UDC_STS) >> 16) & 0x7ff; +} + +static void ast_udc_wake_work(struct work_struct *work) +{ + struct ast_udc_dev *udc = container_of(work, struct ast_udc_dev, + wake_work); + unsigned long flags; + u32 ctrl; + + spin_lock_irqsave(&udc->lock, flags); + + UDC_DBG(udc, "Wakeup Host !\n"); + ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL); + ast_udc_write(udc, ctrl | USB_REMOTE_WAKEUP_EN, AST_UDC_FUNC_CTRL); + + spin_unlock_irqrestore(&udc->lock, flags); +} + +static void ast_udc_wakeup_all(struct ast_udc_dev *udc) +{ + /* + * A device is trying to wake the world, because this + * can recurse into the device, we break the call chain + * using a work queue + */ + schedule_work(&udc->wake_work); +} + +static int ast_udc_wakeup(struct usb_gadget *gadget) +{ + struct ast_udc_dev *udc = to_ast_dev(gadget); + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&udc->lock, flags); + + if (!udc->wakeup_en) { + UDC_DBG(udc, "Remote Wakeup is disabled\n"); + rc = -EINVAL; + goto err; + } + + UDC_DBG(udc, "Device initiated wakeup\n"); + ast_udc_wakeup_all(udc); + +err: + spin_unlock_irqrestore(&udc->lock, flags); + return rc; +} + +/* + * Activate/Deactivate link with host + */ +static int ast_udc_pullup(struct usb_gadget *gadget, int is_on) +{ + struct ast_udc_dev *udc = to_ast_dev(gadget); + unsigned long flags; + u32 ctrl; + + spin_lock_irqsave(&udc->lock, flags); + + UDC_DBG(udc, "is_on: %d\n", is_on); + if (is_on) + ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) | USB_UPSTREAM_EN; + else + ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN; + + ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL); + + spin_unlock_irqrestore(&udc->lock, flags); + + return 0; +} + +static int ast_udc_start(struct usb_gadget *gadget, + struct usb_gadget_driver *driver) +{ + struct ast_udc_dev *udc = to_ast_dev(gadget); + struct ast_udc_ep *ep; + unsigned long flags; + int i; + + spin_lock_irqsave(&udc->lock, flags); + + UDC_DBG(udc, "\n"); + udc->driver = driver; + udc->gadget.dev.of_node = udc->pdev->dev.of_node; + + for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) { + ep = &udc->ep[i]; + ep->stopped = 0; + } + + spin_unlock_irqrestore(&udc->lock, flags); + + return 0; +} + +static int ast_udc_stop(struct usb_gadget *gadget) +{ + struct ast_udc_dev *udc = to_ast_dev(gadget); + unsigned long flags; + u32 ctrl; + + spin_lock_irqsave(&udc->lock, flags); + + UDC_DBG(udc, "\n"); + ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN; + ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL); + + udc->gadget.speed = USB_SPEED_UNKNOWN; + udc->driver = NULL; + + ast_udc_stop_activity(udc); + usb_gadget_set_state(&udc->gadget, USB_STATE_NOTATTACHED); + + spin_unlock_irqrestore(&udc->lock, flags); + + return 0; +} + +static const struct usb_gadget_ops ast_udc_ops = { + .get_frame = ast_udc_gadget_getframe, + .wakeup = ast_udc_wakeup, + .pullup = ast_udc_pullup, + .udc_start = ast_udc_start, + .udc_stop = ast_udc_stop, +}; + +/* + * Support 1 Control Endpoint. + * Support multiple programmable endpoints that can be configured to + * Bulk IN/OUT, Interrupt IN/OUT, and Isochronous IN/OUT type endpoint. + */ +static void ast_udc_init_ep(struct ast_udc_dev *udc) +{ + struct ast_udc_ep *ep; + int i; + + for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) { + ep = &udc->ep[i]; + ep->ep.name = ast_ep_name[i]; + if (i == 0) { + ep->ep.caps.type_control = true; + } else { + ep->ep.caps.type_iso = true; + ep->ep.caps.type_bulk = true; + ep->ep.caps.type_int = true; + } + ep->ep.caps.dir_in = true; + ep->ep.caps.dir_out = true; + + ep->ep.ops = &ast_udc_ep_ops; + ep->udc = udc; + + INIT_LIST_HEAD(&ep->queue); + + if (i == 0) { + usb_ep_set_maxpacket_limit(&ep->ep, + AST_UDC_EP0_MAX_PACKET); + continue; + } + + ep->ep_reg = udc->reg + AST_UDC_EP_BASE + + (AST_UDC_EP_OFFSET * (i - 1)); + + ep->epn_buf = udc->ep0_buf + (i * AST_UDC_EP_DMA_SIZE); + ep->epn_buf_dma = udc->ep0_buf_dma + (i * AST_UDC_EP_DMA_SIZE); + usb_ep_set_maxpacket_limit(&ep->ep, AST_UDC_EPn_MAX_PACKET); + + ep->descs = ep->epn_buf + AST_UDC_EPn_MAX_PACKET; + ep->descs_dma = ep->epn_buf_dma + AST_UDC_EPn_MAX_PACKET; + ep->descs_wptr = 0; + + list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); + } +} + +static void ast_udc_init_dev(struct ast_udc_dev *udc) +{ + INIT_WORK(&udc->wake_work, ast_udc_wake_work); +} + +static void ast_udc_init_hw(struct ast_udc_dev *udc) +{ + u32 ctrl; + + /* Enable PHY */ + ctrl = USB_PHY_CLK_EN | USB_PHY_RESET_DIS; + ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL); + + udelay(1); + ast_udc_write(udc, 0, AST_UDC_DEV_RESET); + + /* Set descriptor ring size */ + if (AST_UDC_DESCS_COUNT == 256) { + ctrl |= USB_EP_LONG_DESC; + ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL); + } + + /* Mask & ack all interrupts before installing the handler */ + ast_udc_write(udc, 0, AST_UDC_IER); + ast_udc_write(udc, UDC_IRQ_ACK_ALL, AST_UDC_ISR); + + /* Enable some interrupts */ + ctrl = UDC_IRQ_EP_POOL_ACK_STALL | UDC_IRQ_BUS_RESUME | + UDC_IRQ_BUS_SUSPEND | UDC_IRQ_BUS_RESET | + UDC_IRQ_EP0_IN_ACK_STALL | UDC_IRQ_EP0_OUT_ACK_STALL | + UDC_IRQ_EP0_SETUP; + ast_udc_write(udc, ctrl, AST_UDC_IER); + + /* Cleanup and enable ep ACK interrupts */ + ast_udc_write(udc, UDC_IRQ_EP_ACK_ALL, AST_UDC_EP_ACK_IER); + ast_udc_write(udc, UDC_IRQ_EP_ACK_ALL, AST_UDC_EP_ACK_ISR); + + ast_udc_write(udc, 0, AST_UDC_EP0_CTRL); +} + +static int ast_udc_remove(struct platform_device *pdev) +{ + struct ast_udc_dev *udc = platform_get_drvdata(pdev); + unsigned long flags; + u32 ctrl; + + usb_del_gadget_udc(&udc->gadget); + if (udc->driver) + return -EBUSY; + + spin_lock_irqsave(&udc->lock, flags); + + /* Disable upstream port connection */ + ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN; + ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL); + + clk_disable_unprepare(udc->clk); + + spin_unlock_irqrestore(&udc->lock, flags); + + if (udc->ep0_buf) + dma_free_coherent(&pdev->dev, + AST_UDC_EP_DMA_SIZE * AST_UDC_NUM_ENDPOINTS, + udc->ep0_buf, + udc->ep0_buf_dma); + + udc->ep0_buf = NULL; + + return 0; +} + +static int ast_udc_probe(struct platform_device *pdev) +{ + enum usb_device_speed max_speed; + struct device *dev = &pdev->dev; + struct ast_udc_dev *udc; + struct resource *res; + int rc; + + udc = devm_kzalloc(&pdev->dev, sizeof(struct ast_udc_dev), GFP_KERNEL); + if (!udc) + return -ENOMEM; + + udc->gadget.dev.parent = dev; + udc->pdev = pdev; + spin_lock_init(&udc->lock); + + udc->gadget.ops = &ast_udc_ops; + udc->gadget.ep0 = &udc->ep[0].ep; + udc->gadget.name = "aspeed-udc"; + udc->gadget.dev.init_name = "gadget"; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + udc->reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(udc->reg)) { + dev_err(&pdev->dev, "Failed to map resources\n"); + return PTR_ERR(udc->reg); + } + + platform_set_drvdata(pdev, udc); + + udc->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(udc->clk)) { + rc = PTR_ERR(udc->clk); + goto err; + } + rc = clk_prepare_enable(udc->clk); + if (rc) { + dev_err(&pdev->dev, "Failed to enable clock (0x%x)\n", rc); + goto err; + } + + /* Check if we need to limit the HW to USB1 */ + max_speed = usb_get_maximum_speed(&pdev->dev); + if (max_speed != USB_SPEED_UNKNOWN && max_speed < USB_SPEED_HIGH) + udc->force_usb1 = true; + + /* + * Allocate DMA buffers for all EPs in one chunk + */ + udc->ep0_buf = dma_alloc_coherent(&pdev->dev, + AST_UDC_EP_DMA_SIZE * + AST_UDC_NUM_ENDPOINTS, + &udc->ep0_buf_dma, GFP_KERNEL); + + udc->gadget.speed = USB_SPEED_UNKNOWN; + udc->gadget.max_speed = USB_SPEED_HIGH; + udc->creq = udc->reg + AST_UDC_SETUP0; + + /* + * Support single stage mode or 32/256 stages descriptor mode. + * Set default as Descriptor Mode. + */ + udc->desc_mode = AST_UDC_DESC_MODE; + + dev_info(&pdev->dev, "DMA %s\n", udc->desc_mode ? + "descriptor mode" : "single mode"); + + INIT_LIST_HEAD(&udc->gadget.ep_list); + INIT_LIST_HEAD(&udc->gadget.ep0->ep_list); + + /* Initialized udc ep */ + ast_udc_init_ep(udc); + + /* Initialized udc device */ + ast_udc_init_dev(udc); + + /* Initialized udc hardware */ + ast_udc_init_hw(udc); + + /* Find interrupt and install handler */ + udc->irq = platform_get_irq(pdev, 0); + if (udc->irq < 0) { + rc = udc->irq; + goto err; + } + + rc = devm_request_irq(&pdev->dev, udc->irq, ast_udc_isr, 0, + KBUILD_MODNAME, udc); + if (rc) { + dev_err(&pdev->dev, "Failed to request interrupt\n"); + goto err; + } + + rc = usb_add_gadget_udc(&pdev->dev, &udc->gadget); + if (rc) { + dev_err(&pdev->dev, "Failed to add gadget udc\n"); + goto err; + } + + dev_info(&pdev->dev, "Initialized udc in USB%s mode\n", + udc->force_usb1 ? "1" : "2"); + + return 0; + +err: + dev_err(&pdev->dev, "Failed to udc probe, rc:0x%x\n", rc); + ast_udc_remove(pdev); + + return rc; +} + +static const struct of_device_id ast_udc_of_dt_ids[] = { + { .compatible = "aspeed,ast2600-udc", }, + {} +}; + +MODULE_DEVICE_TABLE(of, ast_udc_of_dt_ids); + +static struct platform_driver ast_udc_driver = { + .probe = ast_udc_probe, + .remove = ast_udc_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = ast_udc_of_dt_ids, + }, +}; + +module_platform_driver(ast_udc_driver); + +MODULE_DESCRIPTION("ASPEED UDC driver"); +MODULE_AUTHOR("Neal Liu "); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/host/octeon-hcd.c b/drivers/usb/host/octeon-hcd.c new file mode 100644 index 0000000000..a1cd81d4a1 --- /dev/null +++ b/drivers/usb/host/octeon-hcd.c @@ -0,0 +1,3740 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 Cavium Networks + * + * Some parts of the code were originally released under BSD license: + * + * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights + * reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of Cavium Networks nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * This Software, including technical data, may be subject to U.S. export + * control laws, including the U.S. Export Administration Act and its associated + * regulations, and may be subject to export or import regulations in other + * countries. + * + * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" + * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR + * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO + * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION + * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM + * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, + * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF + * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR + * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR + * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "octeon-hcd.h" + +/** + * enum cvmx_usb_speed - the possible USB device speeds + * + * @CVMX_USB_SPEED_HIGH: Device is operation at 480Mbps + * @CVMX_USB_SPEED_FULL: Device is operation at 12Mbps + * @CVMX_USB_SPEED_LOW: Device is operation at 1.5Mbps + */ +enum cvmx_usb_speed { + CVMX_USB_SPEED_HIGH = 0, + CVMX_USB_SPEED_FULL = 1, + CVMX_USB_SPEED_LOW = 2, +}; + +/** + * enum cvmx_usb_transfer - the possible USB transfer types + * + * @CVMX_USB_TRANSFER_CONTROL: USB transfer type control for hub and status + * transfers + * @CVMX_USB_TRANSFER_ISOCHRONOUS: USB transfer type isochronous for low + * priority periodic transfers + * @CVMX_USB_TRANSFER_BULK: USB transfer type bulk for large low priority + * transfers + * @CVMX_USB_TRANSFER_INTERRUPT: USB transfer type interrupt for high priority + * periodic transfers + */ +enum cvmx_usb_transfer { + CVMX_USB_TRANSFER_CONTROL = 0, + CVMX_USB_TRANSFER_ISOCHRONOUS = 1, + CVMX_USB_TRANSFER_BULK = 2, + CVMX_USB_TRANSFER_INTERRUPT = 3, +}; + +/** + * enum cvmx_usb_direction - the transfer directions + * + * @CVMX_USB_DIRECTION_OUT: Data is transferring from Octeon to the device/host + * @CVMX_USB_DIRECTION_IN: Data is transferring from the device/host to Octeon + */ +enum cvmx_usb_direction { + CVMX_USB_DIRECTION_OUT, + CVMX_USB_DIRECTION_IN, +}; + +/** + * enum cvmx_usb_status - possible callback function status codes + * + * @CVMX_USB_STATUS_OK: The transaction / operation finished without + * any errors + * @CVMX_USB_STATUS_SHORT: FIXME: This is currently not implemented + * @CVMX_USB_STATUS_CANCEL: The transaction was canceled while in flight + * by a user call to cvmx_usb_cancel + * @CVMX_USB_STATUS_ERROR: The transaction aborted with an unexpected + * error status + * @CVMX_USB_STATUS_STALL: The transaction received a USB STALL response + * from the device + * @CVMX_USB_STATUS_XACTERR: The transaction failed with an error from the + * device even after a number of retries + * @CVMX_USB_STATUS_DATATGLERR: The transaction failed with a data toggle + * error even after a number of retries + * @CVMX_USB_STATUS_BABBLEERR: The transaction failed with a babble error + * @CVMX_USB_STATUS_FRAMEERR: The transaction failed with a frame error + * even after a number of retries + */ +enum cvmx_usb_status { + CVMX_USB_STATUS_OK, + CVMX_USB_STATUS_SHORT, + CVMX_USB_STATUS_CANCEL, + CVMX_USB_STATUS_ERROR, + CVMX_USB_STATUS_STALL, + CVMX_USB_STATUS_XACTERR, + CVMX_USB_STATUS_DATATGLERR, + CVMX_USB_STATUS_BABBLEERR, + CVMX_USB_STATUS_FRAMEERR, +}; + +/** + * struct cvmx_usb_port_status - the USB port status information + * + * @port_enabled: 1 = Usb port is enabled, 0 = disabled + * @port_over_current: 1 = Over current detected, 0 = Over current not + * detected. Octeon doesn't support over current detection. + * @port_powered: 1 = Port power is being supplied to the device, 0 = + * power is off. Octeon doesn't support turning port power + * off. + * @port_speed: Current port speed. + * @connected: 1 = A device is connected to the port, 0 = No device is + * connected. + * @connect_change: 1 = Device connected state changed since the last set + * status call. + */ +struct cvmx_usb_port_status { + u32 reserved : 25; + u32 port_enabled : 1; + u32 port_over_current : 1; + u32 port_powered : 1; + enum cvmx_usb_speed port_speed : 2; + u32 connected : 1; + u32 connect_change : 1; +}; + +/** + * struct cvmx_usb_iso_packet - descriptor for Isochronous packets + * + * @offset: This is the offset in bytes into the main buffer where this data + * is stored. + * @length: This is the length in bytes of the data. + * @status: This is the status of this individual packet transfer. + */ +struct cvmx_usb_iso_packet { + int offset; + int length; + enum cvmx_usb_status status; +}; + +/** + * enum cvmx_usb_initialize_flags - flags used by the initialization function + * + * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI: The USB port uses a 12MHz crystal + * as clock source at USB_XO and + * USB_XI. + * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND: The USB port uses 12/24/48MHz 2.5V + * board clock source at USB_XO. + * USB_XI should be tied to GND. + * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK: Mask for clock speed field + * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ: Speed of reference clock or + * crystal + * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ: Speed of reference clock + * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ: Speed of reference clock + * @CVMX_USB_INITIALIZE_FLAGS_NO_DMA: Disable DMA and used polled IO for + * data transfer use for the USB + */ +enum cvmx_usb_initialize_flags { + CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI = 1 << 0, + CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND = 1 << 1, + CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK = 3 << 3, + CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ = 1 << 3, + CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ = 2 << 3, + CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ = 3 << 3, + /* Bits 3-4 used to encode the clock frequency */ + CVMX_USB_INITIALIZE_FLAGS_NO_DMA = 1 << 5, +}; + +/** + * enum cvmx_usb_pipe_flags - internal flags for a pipe. + * + * @CVMX_USB_PIPE_FLAGS_SCHEDULED: Used internally to determine if a pipe is + * actively using hardware. + * @CVMX_USB_PIPE_FLAGS_NEED_PING: Used internally to determine if a high speed + * pipe is in the ping state. + */ +enum cvmx_usb_pipe_flags { + CVMX_USB_PIPE_FLAGS_SCHEDULED = 1 << 17, + CVMX_USB_PIPE_FLAGS_NEED_PING = 1 << 18, +}; + +/* Maximum number of times to retry failed transactions */ +#define MAX_RETRIES 3 + +/* Maximum number of hardware channels supported by the USB block */ +#define MAX_CHANNELS 8 + +/* + * The low level hardware can transfer a maximum of this number of bytes in each + * transfer. The field is 19 bits wide + */ +#define MAX_TRANSFER_BYTES ((1 << 19) - 1) + +/* + * The low level hardware can transfer a maximum of this number of packets in + * each transfer. The field is 10 bits wide + */ +#define MAX_TRANSFER_PACKETS ((1 << 10) - 1) + +/** + * Logical transactions may take numerous low level + * transactions, especially when splits are concerned. This + * enum represents all of the possible stages a transaction can + * be in. Note that split completes are always even. This is so + * the NAK handler can backup to the previous low level + * transaction with a simple clearing of bit 0. + */ +enum cvmx_usb_stage { + CVMX_USB_STAGE_NON_CONTROL, + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE, + CVMX_USB_STAGE_SETUP, + CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE, + CVMX_USB_STAGE_DATA, + CVMX_USB_STAGE_DATA_SPLIT_COMPLETE, + CVMX_USB_STAGE_STATUS, + CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE, +}; + +/** + * struct cvmx_usb_transaction - describes each pending USB transaction + * regardless of type. These are linked together + * to form a list of pending requests for a pipe. + * + * @node: List node for transactions in the pipe. + * @type: Type of transaction, duplicated of the pipe. + * @flags: State flags for this transaction. + * @buffer: User's physical buffer address to read/write. + * @buffer_length: Size of the user's buffer in bytes. + * @control_header: For control transactions, physical address of the 8 + * byte standard header. + * @iso_start_frame: For ISO transactions, the starting frame number. + * @iso_number_packets: For ISO transactions, the number of packets in the + * request. + * @iso_packets: For ISO transactions, the sub packets in the request. + * @actual_bytes: Actual bytes transfer for this transaction. + * @stage: For control transactions, the current stage. + * @urb: URB. + */ +struct cvmx_usb_transaction { + struct list_head node; + enum cvmx_usb_transfer type; + u64 buffer; + int buffer_length; + u64 control_header; + int iso_start_frame; + int iso_number_packets; + struct cvmx_usb_iso_packet *iso_packets; + int xfersize; + int pktcnt; + int retries; + int actual_bytes; + enum cvmx_usb_stage stage; + struct urb *urb; +}; + +/** + * struct cvmx_usb_pipe - a pipe represents a virtual connection between Octeon + * and some USB device. It contains a list of pending + * request to the device. + * + * @node: List node for pipe list + * @next: Pipe after this one in the list + * @transactions: List of pending transactions + * @interval: For periodic pipes, the interval between packets in + * frames + * @next_tx_frame: The next frame this pipe is allowed to transmit on + * @flags: State flags for this pipe + * @device_speed: Speed of device connected to this pipe + * @transfer_type: Type of transaction supported by this pipe + * @transfer_dir: IN or OUT. Ignored for Control + * @multi_count: Max packet in a row for the device + * @max_packet: The device's maximum packet size in bytes + * @device_addr: USB device address at other end of pipe + * @endpoint_num: USB endpoint number at other end of pipe + * @hub_device_addr: Hub address this device is connected to + * @hub_port: Hub port this device is connected to + * @pid_toggle: This toggles between 0/1 on every packet send to track + * the data pid needed + * @channel: Hardware DMA channel for this pipe + * @split_sc_frame: The low order bits of the frame number the split + * complete should be sent on + */ +struct cvmx_usb_pipe { + struct list_head node; + struct list_head transactions; + u64 interval; + u64 next_tx_frame; + enum cvmx_usb_pipe_flags flags; + enum cvmx_usb_speed device_speed; + enum cvmx_usb_transfer transfer_type; + enum cvmx_usb_direction transfer_dir; + int multi_count; + u16 max_packet; + u8 device_addr; + u8 endpoint_num; + u8 hub_device_addr; + u8 hub_port; + u8 pid_toggle; + u8 channel; + s8 split_sc_frame; +}; + +struct cvmx_usb_tx_fifo { + struct { + int channel; + int size; + u64 address; + } entry[MAX_CHANNELS + 1]; + int head; + int tail; +}; + +/** + * struct octeon_hcd - the state of the USB block + * + * lock: Serialization lock. + * init_flags: Flags passed to initialize. + * index: Which USB block this is for. + * idle_hardware_channels: Bit set for every idle hardware channel. + * usbcx_hprt: Stored port status so we don't need to read a CSR to + * determine splits. + * pipe_for_channel: Map channels to pipes. + * pipe: Storage for pipes. + * indent: Used by debug output to indent functions. + * port_status: Last port status used for change notification. + * idle_pipes: List of open pipes that have no transactions. + * active_pipes: Active pipes indexed by transfer type. + * frame_number: Increments every SOF interrupt for time keeping. + * active_split: Points to the current active split, or NULL. + */ +struct octeon_hcd { + spinlock_t lock; /* serialization lock */ + int init_flags; + int index; + int idle_hardware_channels; + union cvmx_usbcx_hprt usbcx_hprt; + struct cvmx_usb_pipe *pipe_for_channel[MAX_CHANNELS]; + int indent; + struct cvmx_usb_port_status port_status; + struct list_head idle_pipes; + struct list_head active_pipes[4]; + u64 frame_number; + struct cvmx_usb_transaction *active_split; + struct cvmx_usb_tx_fifo periodic; + struct cvmx_usb_tx_fifo nonperiodic; +}; + +/* + * This macro logically sets a single field in a CSR. It does the sequence + * read, modify, and write + */ +#define USB_SET_FIELD32(address, _union, field, value) \ + do { \ + union _union c; \ + \ + c.u32 = cvmx_usb_read_csr32(usb, address); \ + c.s.field = value; \ + cvmx_usb_write_csr32(usb, address, c.u32); \ + } while (0) + +/* Returns the IO address to push/pop stuff data from the FIFOs */ +#define USB_FIFO_ADDRESS(channel, usb_index) \ + (CVMX_USBCX_GOTGCTL(usb_index) + ((channel) + 1) * 0x1000) + +/** + * struct octeon_temp_buffer - a bounce buffer for USB transfers + * @orig_buffer: the original buffer passed by the USB stack + * @data: the newly allocated temporary buffer (excluding meta-data) + * + * Both the DMA engine and FIFO mode will always transfer full 32-bit words. If + * the buffer is too short, we need to allocate a temporary one, and this struct + * represents it. + */ +struct octeon_temp_buffer { + void *orig_buffer; + u8 data[]; +}; + +static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p) +{ + return container_of((void *)p, struct usb_hcd, hcd_priv); +} + +/** + * octeon_alloc_temp_buffer - allocate a temporary buffer for USB transfer + * (if needed) + * @urb: URB. + * @mem_flags: Memory allocation flags. + * + * This function allocates a temporary bounce buffer whenever it's needed + * due to HW limitations. + */ +static int octeon_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags) +{ + struct octeon_temp_buffer *temp; + + if (urb->num_sgs || urb->sg || + (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) || + !(urb->transfer_buffer_length % sizeof(u32))) + return 0; + + temp = kmalloc(ALIGN(urb->transfer_buffer_length, sizeof(u32)) + + sizeof(*temp), mem_flags); + if (!temp) + return -ENOMEM; + + temp->orig_buffer = urb->transfer_buffer; + if (usb_urb_dir_out(urb)) + memcpy(temp->data, urb->transfer_buffer, + urb->transfer_buffer_length); + urb->transfer_buffer = temp->data; + urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; + + return 0; +} + +/** + * octeon_free_temp_buffer - free a temporary buffer used by USB transfers. + * @urb: URB. + * + * Frees a buffer allocated by octeon_alloc_temp_buffer(). + */ +static void octeon_free_temp_buffer(struct urb *urb) +{ + struct octeon_temp_buffer *temp; + size_t length; + + if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) + return; + + temp = container_of(urb->transfer_buffer, struct octeon_temp_buffer, + data); + if (usb_urb_dir_in(urb)) { + if (usb_pipeisoc(urb->pipe)) + length = urb->transfer_buffer_length; + else + length = urb->actual_length; + + memcpy(temp->orig_buffer, urb->transfer_buffer, length); + } + urb->transfer_buffer = temp->orig_buffer; + urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; + kfree(temp); +} + +/** + * octeon_map_urb_for_dma - Octeon-specific map_urb_for_dma(). + * @hcd: USB HCD structure. + * @urb: URB. + * @mem_flags: Memory allocation flags. + */ +static int octeon_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, + gfp_t mem_flags) +{ + int ret; + + ret = octeon_alloc_temp_buffer(urb, mem_flags); + if (ret) + return ret; + + ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); + if (ret) + octeon_free_temp_buffer(urb); + + return ret; +} + +/** + * octeon_unmap_urb_for_dma - Octeon-specific unmap_urb_for_dma() + * @hcd: USB HCD structure. + * @urb: URB. + */ +static void octeon_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) +{ + usb_hcd_unmap_urb_for_dma(hcd, urb); + octeon_free_temp_buffer(urb); +} + +/** + * Read a USB 32bit CSR. It performs the necessary address swizzle + * for 32bit CSRs and logs the value in a readable format if + * debugging is on. + * + * @usb: USB block this access is for + * @address: 64bit address to read + * + * Returns: Result of the read + */ +static inline u32 cvmx_usb_read_csr32(struct octeon_hcd *usb, u64 address) +{ + return cvmx_read64_uint32(address ^ 4); +} + +/** + * Write a USB 32bit CSR. It performs the necessary address + * swizzle for 32bit CSRs and logs the value in a readable format + * if debugging is on. + * + * @usb: USB block this access is for + * @address: 64bit address to write + * @value: Value to write + */ +static inline void cvmx_usb_write_csr32(struct octeon_hcd *usb, + u64 address, u32 value) +{ + cvmx_write64_uint32(address ^ 4, value); + cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index)); +} + +/** + * Return non zero if this pipe connects to a non HIGH speed + * device through a high speed hub. + * + * @usb: USB block this access is for + * @pipe: Pipe to check + * + * Returns: Non zero if we need to do split transactions + */ +static inline int cvmx_usb_pipe_needs_split(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe) +{ + return pipe->device_speed != CVMX_USB_SPEED_HIGH && + usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH; +} + +/** + * Trivial utility function to return the correct PID for a pipe + * + * @pipe: pipe to check + * + * Returns: PID for pipe + */ +static inline int cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe) +{ + if (pipe->pid_toggle) + return 2; /* Data1 */ + return 0; /* Data0 */ +} + +/* Loops through register until txfflsh or rxfflsh become zero.*/ +static int cvmx_wait_tx_rx(struct octeon_hcd *usb, int fflsh_type) +{ + int result; + u64 address = CVMX_USBCX_GRSTCTL(usb->index); + u64 done = cvmx_get_cycle() + 100 * + (u64)octeon_get_clock_rate / 1000000; + union cvmx_usbcx_grstctl c; + + while (1) { + c.u32 = cvmx_usb_read_csr32(usb, address); + if (fflsh_type == 0 && c.s.txfflsh == 0) { + result = 0; + break; + } else if (fflsh_type == 1 && c.s.rxfflsh == 0) { + result = 0; + break; + } else if (cvmx_get_cycle() > done) { + result = -1; + break; + } + + __delay(100); + } + return result; +} + +static void cvmx_fifo_setup(struct octeon_hcd *usb) +{ + union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3; + union cvmx_usbcx_gnptxfsiz npsiz; + union cvmx_usbcx_hptxfsiz psiz; + + usbcx_ghwcfg3.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_GHWCFG3(usb->index)); + + /* + * Program the USBC_GRXFSIZ register to select the size of the receive + * FIFO (25%). + */ + USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), cvmx_usbcx_grxfsiz, + rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4); + + /* + * Program the USBC_GNPTXFSIZ register to select the size and the start + * address of the non-periodic transmit FIFO for nonperiodic + * transactions (50%). + */ + npsiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index)); + npsiz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2; + npsiz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4; + cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), npsiz.u32); + + /* + * Program the USBC_HPTXFSIZ register to select the size and start + * address of the periodic transmit FIFO for periodic transactions + * (25%). + */ + psiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index)); + psiz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4; + psiz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4; + cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), psiz.u32); + + /* Flush all FIFOs */ + USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), + cvmx_usbcx_grstctl, txfnum, 0x10); + USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), + cvmx_usbcx_grstctl, txfflsh, 1); + cvmx_wait_tx_rx(usb, 0); + USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), + cvmx_usbcx_grstctl, rxfflsh, 1); + cvmx_wait_tx_rx(usb, 1); +} + +/** + * Shutdown a USB port after a call to cvmx_usb_initialize(). + * The port should be disabled with all pipes closed when this + * function is called. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * + * Returns: 0 or a negative error code. + */ +static int cvmx_usb_shutdown(struct octeon_hcd *usb) +{ + union cvmx_usbnx_clk_ctl usbn_clk_ctl; + + /* Make sure all pipes are closed */ + if (!list_empty(&usb->idle_pipes) || + !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_ISOCHRONOUS]) || + !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_INTERRUPT]) || + !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_CONTROL]) || + !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_BULK])) + return -EBUSY; + + /* Disable the clocks and put them in power on reset */ + usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index)); + usbn_clk_ctl.s.enable = 1; + usbn_clk_ctl.s.por = 1; + usbn_clk_ctl.s.hclk_rst = 1; + usbn_clk_ctl.s.prst = 0; + usbn_clk_ctl.s.hrst = 0; + cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); + return 0; +} + +/** + * Initialize a USB port for use. This must be called before any + * other access to the Octeon USB port is made. The port starts + * off in the disabled state. + * + * @dev: Pointer to struct device for logging purposes. + * @usb: Pointer to struct octeon_hcd. + * + * Returns: 0 or a negative error code. + */ +static int cvmx_usb_initialize(struct device *dev, + struct octeon_hcd *usb) +{ + int channel; + int divisor; + int retries = 0; + union cvmx_usbcx_hcfg usbcx_hcfg; + union cvmx_usbnx_clk_ctl usbn_clk_ctl; + union cvmx_usbcx_gintsts usbc_gintsts; + union cvmx_usbcx_gahbcfg usbcx_gahbcfg; + union cvmx_usbcx_gintmsk usbcx_gintmsk; + union cvmx_usbcx_gusbcfg usbcx_gusbcfg; + union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status; + +retry: + /* + * Power On Reset and PHY Initialization + * + * 1. Wait for DCOK to assert (nothing to do) + * + * 2a. Write USBN0/1_CLK_CTL[POR] = 1 and + * USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0 + */ + usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index)); + usbn_clk_ctl.s.por = 1; + usbn_clk_ctl.s.hrst = 0; + usbn_clk_ctl.s.prst = 0; + usbn_clk_ctl.s.hclk_rst = 0; + usbn_clk_ctl.s.enable = 0; + /* + * 2b. Select the USB reference clock/crystal parameters by writing + * appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON] + */ + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) { + /* + * The USB port uses 12/24/48MHz 2.5V board clock + * source at USB_XO. USB_XI should be tied to GND. + * Most Octeon evaluation boards require this setting + */ + if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || + OCTEON_IS_MODEL(OCTEON_CN56XX) || + OCTEON_IS_MODEL(OCTEON_CN50XX)) + /* From CN56XX,CN50XX,CN31XX,CN30XX manuals */ + usbn_clk_ctl.s.p_rtype = 2; /* p_rclk=1 & p_xenbn=0 */ + else + /* From CN52XX manual */ + usbn_clk_ctl.s.p_rtype = 1; + + switch (usb->init_flags & + CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK) { + case CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ: + usbn_clk_ctl.s.p_c_sel = 0; + break; + case CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ: + usbn_clk_ctl.s.p_c_sel = 1; + break; + case CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ: + usbn_clk_ctl.s.p_c_sel = 2; + break; + } + } else { + /* + * The USB port uses a 12MHz crystal as clock source + * at USB_XO and USB_XI + */ + if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) + /* From CN31XX,CN30XX manual */ + usbn_clk_ctl.s.p_rtype = 3; /* p_rclk=1 & p_xenbn=1 */ + else + /* From CN56XX,CN52XX,CN50XX manuals. */ + usbn_clk_ctl.s.p_rtype = 0; + + usbn_clk_ctl.s.p_c_sel = 0; + } + /* + * 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and + * setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down + * such that USB is as close as possible to 125Mhz + */ + divisor = DIV_ROUND_UP(octeon_get_clock_rate(), 125000000); + /* Lower than 4 doesn't seem to work properly */ + if (divisor < 4) + divisor = 4; + usbn_clk_ctl.s.divide = divisor; + usbn_clk_ctl.s.divide2 = 0; + cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); + + /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */ + usbn_clk_ctl.s.hclk_rst = 1; + cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); + /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */ + __delay(64); + /* + * 3. Program the power-on reset field in the USBN clock-control + * register: + * USBN_CLK_CTL[POR] = 0 + */ + usbn_clk_ctl.s.por = 0; + cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); + /* 4. Wait 1 ms for PHY clock to start */ + mdelay(1); + /* + * 5. Program the Reset input from automatic test equipment field in the + * USBP control and status register: + * USBN_USBP_CTL_STATUS[ATE_RESET] = 1 + */ + usbn_usbp_ctl_status.u64 = + cvmx_read64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index)); + usbn_usbp_ctl_status.s.ate_reset = 1; + cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index), + usbn_usbp_ctl_status.u64); + /* 6. Wait 10 cycles */ + __delay(10); + /* + * 7. Clear ATE_RESET field in the USBN clock-control register: + * USBN_USBP_CTL_STATUS[ATE_RESET] = 0 + */ + usbn_usbp_ctl_status.s.ate_reset = 0; + cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index), + usbn_usbp_ctl_status.u64); + /* + * 8. Program the PHY reset field in the USBN clock-control register: + * USBN_CLK_CTL[PRST] = 1 + */ + usbn_clk_ctl.s.prst = 1; + cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); + /* + * 9. Program the USBP control and status register to select host or + * device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for + * device + */ + usbn_usbp_ctl_status.s.hst_mode = 0; + cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index), + usbn_usbp_ctl_status.u64); + /* 10. Wait 1 us */ + udelay(1); + /* + * 11. Program the hreset_n field in the USBN clock-control register: + * USBN_CLK_CTL[HRST] = 1 + */ + usbn_clk_ctl.s.hrst = 1; + cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); + /* 12. Proceed to USB core initialization */ + usbn_clk_ctl.s.enable = 1; + cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); + udelay(1); + + /* + * USB Core Initialization + * + * 1. Read USBC_GHWCFG1, USBC_GHWCFG2, USBC_GHWCFG3, USBC_GHWCFG4 to + * determine USB core configuration parameters. + * + * Nothing needed + * + * 2. Program the following fields in the global AHB configuration + * register (USBC_GAHBCFG) + * DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode + * Burst length, USBC_GAHBCFG[HBSTLEN] = 0 + * Nonperiodic TxFIFO empty level (slave mode only), + * USBC_GAHBCFG[NPTXFEMPLVL] + * Periodic TxFIFO empty level (slave mode only), + * USBC_GAHBCFG[PTXFEMPLVL] + * Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1 + */ + usbcx_gahbcfg.u32 = 0; + usbcx_gahbcfg.s.dmaen = !(usb->init_flags & + CVMX_USB_INITIALIZE_FLAGS_NO_DMA); + usbcx_gahbcfg.s.hbstlen = 0; + usbcx_gahbcfg.s.nptxfemplvl = 1; + usbcx_gahbcfg.s.ptxfemplvl = 1; + usbcx_gahbcfg.s.glblintrmsk = 1; + cvmx_usb_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index), + usbcx_gahbcfg.u32); + + /* + * 3. Program the following fields in USBC_GUSBCFG register. + * HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0 + * ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0 + * USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5 + * PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0 + */ + usbcx_gusbcfg.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_GUSBCFG(usb->index)); + usbcx_gusbcfg.s.toutcal = 0; + usbcx_gusbcfg.s.ddrsel = 0; + usbcx_gusbcfg.s.usbtrdtim = 0x5; + usbcx_gusbcfg.s.phylpwrclksel = 0; + cvmx_usb_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index), + usbcx_gusbcfg.u32); + + /* + * 4. The software must unmask the following bits in the USBC_GINTMSK + * register. + * OTG interrupt mask, USBC_GINTMSK[OTGINTMSK] = 1 + * Mode mismatch interrupt mask, USBC_GINTMSK[MODEMISMSK] = 1 + */ + usbcx_gintmsk.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_GINTMSK(usb->index)); + usbcx_gintmsk.s.otgintmsk = 1; + usbcx_gintmsk.s.modemismsk = 1; + usbcx_gintmsk.s.hchintmsk = 1; + usbcx_gintmsk.s.sofmsk = 0; + /* We need RX FIFO interrupts if we don't have DMA */ + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) + usbcx_gintmsk.s.rxflvlmsk = 1; + cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index), + usbcx_gintmsk.u32); + + /* + * Disable all channel interrupts. We'll enable them per channel later. + */ + for (channel = 0; channel < 8; channel++) + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCINTMSKX(channel, usb->index), + 0); + + /* + * Host Port Initialization + * + * 1. Program the host-port interrupt-mask field to unmask, + * USBC_GINTMSK[PRTINT] = 1 + */ + USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), + cvmx_usbcx_gintmsk, prtintmsk, 1); + USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), + cvmx_usbcx_gintmsk, disconnintmsk, 1); + + /* + * 2. Program the USBC_HCFG register to select full-speed host + * or high-speed host. + */ + usbcx_hcfg.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index)); + usbcx_hcfg.s.fslssupp = 0; + usbcx_hcfg.s.fslspclksel = 0; + cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32); + + cvmx_fifo_setup(usb); + + /* + * If the controller is getting port events right after the reset, it + * means the initialization failed. Try resetting the controller again + * in such case. This is seen to happen after cold boot on DSR-1000N. + */ + usbc_gintsts.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_GINTSTS(usb->index)); + cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index), + usbc_gintsts.u32); + dev_dbg(dev, "gintsts after reset: 0x%x\n", (int)usbc_gintsts.u32); + if (!usbc_gintsts.s.disconnint && !usbc_gintsts.s.prtint) + return 0; + if (retries++ >= 5) + return -EAGAIN; + dev_info(dev, "controller reset failed (gintsts=0x%x) - retrying\n", + (int)usbc_gintsts.u32); + msleep(50); + cvmx_usb_shutdown(usb); + msleep(50); + goto retry; +} + +/** + * Reset a USB port. After this call succeeds, the USB port is + * online and servicing requests. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + */ +static void cvmx_usb_reset_port(struct octeon_hcd *usb) +{ + usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HPRT(usb->index)); + + /* Program the port reset bit to start the reset process */ + USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt, + prtrst, 1); + + /* + * Wait at least 50ms (high speed), or 10ms (full speed) for the reset + * process to complete. + */ + mdelay(50); + + /* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */ + USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt, + prtrst, 0); + + /* + * Read the port speed field to get the enumerated speed, + * USBC_HPRT[PRTSPD]. + */ + usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HPRT(usb->index)); +} + +/** + * Disable a USB port. After this call the USB port will not + * generate data transfers and will not generate events. + * Transactions in process will fail and call their + * associated callbacks. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * + * Returns: 0 or a negative error code. + */ +static int cvmx_usb_disable(struct octeon_hcd *usb) +{ + /* Disable the port */ + USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt, + prtena, 1); + return 0; +} + +/** + * Get the current state of the USB port. Use this call to + * determine if the usb port has anything connected, is enabled, + * or has some sort of error condition. The return value of this + * call has "changed" bits to signal of the value of some fields + * have changed between calls. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * + * Returns: Port status information + */ +static struct cvmx_usb_port_status cvmx_usb_get_status(struct octeon_hcd *usb) +{ + union cvmx_usbcx_hprt usbc_hprt; + struct cvmx_usb_port_status result; + + memset(&result, 0, sizeof(result)); + + usbc_hprt.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index)); + result.port_enabled = usbc_hprt.s.prtena; + result.port_over_current = usbc_hprt.s.prtovrcurract; + result.port_powered = usbc_hprt.s.prtpwr; + result.port_speed = usbc_hprt.s.prtspd; + result.connected = usbc_hprt.s.prtconnsts; + result.connect_change = + result.connected != usb->port_status.connected; + + return result; +} + +/** + * Open a virtual pipe between the host and a USB device. A pipe + * must be opened before data can be transferred between a device + * and Octeon. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @device_addr: + * USB device address to open the pipe to + * (0-127). + * @endpoint_num: + * USB endpoint number to open the pipe to + * (0-15). + * @device_speed: + * The speed of the device the pipe is going + * to. This must match the device's speed, + * which may be different than the port speed. + * @max_packet: The maximum packet length the device can + * transmit/receive (low speed=0-8, full + * speed=0-1023, high speed=0-1024). This value + * comes from the standard endpoint descriptor + * field wMaxPacketSize bits <10:0>. + * @transfer_type: + * The type of transfer this pipe is for. + * @transfer_dir: + * The direction the pipe is in. This is not + * used for control pipes. + * @interval: For ISOCHRONOUS and INTERRUPT transfers, + * this is how often the transfer is scheduled + * for. All other transfers should specify + * zero. The units are in frames (8000/sec at + * high speed, 1000/sec for full speed). + * @multi_count: + * For high speed devices, this is the maximum + * allowed number of packet per microframe. + * Specify zero for non high speed devices. This + * value comes from the standard endpoint descriptor + * field wMaxPacketSize bits <12:11>. + * @hub_device_addr: + * Hub device address this device is connected + * to. Devices connected directly to Octeon + * use zero. This is only used when the device + * is full/low speed behind a high speed hub. + * The address will be of the high speed hub, + * not and full speed hubs after it. + * @hub_port: Which port on the hub the device is + * connected. Use zero for devices connected + * directly to Octeon. Like hub_device_addr, + * this is only used for full/low speed + * devices behind a high speed hub. + * + * Returns: A non-NULL value is a pipe. NULL means an error. + */ +static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct octeon_hcd *usb, + int device_addr, + int endpoint_num, + enum cvmx_usb_speed + device_speed, + int max_packet, + enum cvmx_usb_transfer + transfer_type, + enum cvmx_usb_direction + transfer_dir, + int interval, int multi_count, + int hub_device_addr, + int hub_port) +{ + struct cvmx_usb_pipe *pipe; + + pipe = kzalloc(sizeof(*pipe), GFP_ATOMIC); + if (!pipe) + return NULL; + if ((device_speed == CVMX_USB_SPEED_HIGH) && + (transfer_dir == CVMX_USB_DIRECTION_OUT) && + (transfer_type == CVMX_USB_TRANSFER_BULK)) + pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING; + pipe->device_addr = device_addr; + pipe->endpoint_num = endpoint_num; + pipe->device_speed = device_speed; + pipe->max_packet = max_packet; + pipe->transfer_type = transfer_type; + pipe->transfer_dir = transfer_dir; + INIT_LIST_HEAD(&pipe->transactions); + + /* + * All pipes use interval to rate limit NAK processing. Force an + * interval if one wasn't supplied + */ + if (!interval) + interval = 1; + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + pipe->interval = interval * 8; + /* Force start splits to be schedule on uFrame 0 */ + pipe->next_tx_frame = ((usb->frame_number + 7) & ~7) + + pipe->interval; + } else { + pipe->interval = interval; + pipe->next_tx_frame = usb->frame_number + pipe->interval; + } + pipe->multi_count = multi_count; + pipe->hub_device_addr = hub_device_addr; + pipe->hub_port = hub_port; + pipe->pid_toggle = 0; + pipe->split_sc_frame = -1; + list_add_tail(&pipe->node, &usb->idle_pipes); + + /* + * We don't need to tell the hardware about this pipe yet since + * it doesn't have any submitted requests + */ + + return pipe; +} + +/** + * Poll the RX FIFOs and remove data as needed. This function is only used + * in non DMA mode. It is very important that this function be called quickly + * enough to prevent FIFO overflow. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + */ +static void cvmx_usb_poll_rx_fifo(struct octeon_hcd *usb) +{ + union cvmx_usbcx_grxstsph rx_status; + int channel; + int bytes; + u64 address; + u32 *ptr; + + rx_status.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_GRXSTSPH(usb->index)); + /* Only read data if IN data is there */ + if (rx_status.s.pktsts != 2) + return; + /* Check if no data is available */ + if (!rx_status.s.bcnt) + return; + + channel = rx_status.s.chnum; + bytes = rx_status.s.bcnt; + if (!bytes) + return; + + /* Get where the DMA engine would have written this data */ + address = cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) + + channel * 8); + + ptr = cvmx_phys_to_ptr(address); + cvmx_write64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel * 8, + address + bytes); + + /* Loop writing the FIFO data for this packet into memory */ + while (bytes > 0) { + *ptr++ = cvmx_usb_read_csr32(usb, + USB_FIFO_ADDRESS(channel, usb->index)); + bytes -= 4; + } + CVMX_SYNCW; +} + +/** + * Fill the TX hardware fifo with data out of the software + * fifos + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @fifo: Software fifo to use + * @available: Amount of space in the hardware fifo + * + * Returns: Non zero if the hardware fifo was too small and needs + * to be serviced again. + */ +static int cvmx_usb_fill_tx_hw(struct octeon_hcd *usb, + struct cvmx_usb_tx_fifo *fifo, int available) +{ + /* + * We're done either when there isn't anymore space or the software FIFO + * is empty + */ + while (available && (fifo->head != fifo->tail)) { + int i = fifo->tail; + const u32 *ptr = cvmx_phys_to_ptr(fifo->entry[i].address); + u64 csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel, + usb->index) ^ 4; + int words = available; + + /* Limit the amount of data to what the SW fifo has */ + if (fifo->entry[i].size <= available) { + words = fifo->entry[i].size; + fifo->tail++; + if (fifo->tail > MAX_CHANNELS) + fifo->tail = 0; + } + + /* Update the next locations and counts */ + available -= words; + fifo->entry[i].address += words * 4; + fifo->entry[i].size -= words; + + /* + * Write the HW fifo data. The read every three writes is due + * to an errata on CN3XXX chips + */ + while (words > 3) { + cvmx_write64_uint32(csr_address, *ptr++); + cvmx_write64_uint32(csr_address, *ptr++); + cvmx_write64_uint32(csr_address, *ptr++); + cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index)); + words -= 3; + } + cvmx_write64_uint32(csr_address, *ptr++); + if (--words) { + cvmx_write64_uint32(csr_address, *ptr++); + if (--words) + cvmx_write64_uint32(csr_address, *ptr++); + } + cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index)); + } + return fifo->head != fifo->tail; +} + +/** + * Check the hardware FIFOs and fill them as needed + * + * @usb: USB device state populated by cvmx_usb_initialize(). + */ +static void cvmx_usb_poll_tx_fifo(struct octeon_hcd *usb) +{ + if (usb->periodic.head != usb->periodic.tail) { + union cvmx_usbcx_hptxsts tx_status; + + tx_status.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HPTXSTS(usb->index)); + if (cvmx_usb_fill_tx_hw(usb, &usb->periodic, + tx_status.s.ptxfspcavail)) + USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), + cvmx_usbcx_gintmsk, ptxfempmsk, 1); + else + USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), + cvmx_usbcx_gintmsk, ptxfempmsk, 0); + } + + if (usb->nonperiodic.head != usb->nonperiodic.tail) { + union cvmx_usbcx_gnptxsts tx_status; + + tx_status.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_GNPTXSTS(usb->index)); + if (cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic, + tx_status.s.nptxfspcavail)) + USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), + cvmx_usbcx_gintmsk, nptxfempmsk, 1); + else + USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), + cvmx_usbcx_gintmsk, nptxfempmsk, 0); + } +} + +/** + * Fill the TX FIFO with an outgoing packet + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @channel: Channel number to get packet from + */ +static void cvmx_usb_fill_tx_fifo(struct octeon_hcd *usb, int channel) +{ + union cvmx_usbcx_hccharx hcchar; + union cvmx_usbcx_hcspltx usbc_hcsplt; + union cvmx_usbcx_hctsizx usbc_hctsiz; + struct cvmx_usb_tx_fifo *fifo; + + /* We only need to fill data on outbound channels */ + hcchar.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCCHARX(channel, usb->index)); + if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT) + return; + + /* OUT Splits only have data on the start and not the complete */ + usbc_hcsplt.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCSPLTX(channel, usb->index)); + if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt) + return; + + /* + * Find out how many bytes we need to fill and convert it into 32bit + * words. + */ + usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCTSIZX(channel, usb->index)); + if (!usbc_hctsiz.s.xfersize) + return; + + if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) || + (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS)) + fifo = &usb->periodic; + else + fifo = &usb->nonperiodic; + + fifo->entry[fifo->head].channel = channel; + fifo->entry[fifo->head].address = + cvmx_read64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + + channel * 8); + fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize + 3) >> 2; + fifo->head++; + if (fifo->head > MAX_CHANNELS) + fifo->head = 0; + + cvmx_usb_poll_tx_fifo(usb); +} + +/** + * Perform channel specific setup for Control transactions. All + * the generic stuff will already have been done in cvmx_usb_start_channel(). + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @channel: Channel to setup + * @pipe: Pipe for control transaction + */ +static void cvmx_usb_start_channel_control(struct octeon_hcd *usb, + int channel, + struct cvmx_usb_pipe *pipe) +{ + struct usb_hcd *hcd = octeon_to_hcd(usb); + struct device *dev = hcd->self.controller; + struct cvmx_usb_transaction *transaction = + list_first_entry(&pipe->transactions, typeof(*transaction), + node); + struct usb_ctrlrequest *header = + cvmx_phys_to_ptr(transaction->control_header); + int bytes_to_transfer = transaction->buffer_length - + transaction->actual_bytes; + int packets_to_transfer; + union cvmx_usbcx_hctsizx usbc_hctsiz; + + usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCTSIZX(channel, usb->index)); + + switch (transaction->stage) { + case CVMX_USB_STAGE_NON_CONTROL: + case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE: + dev_err(dev, "%s: ERROR - Non control stage\n", __func__); + break; + case CVMX_USB_STAGE_SETUP: + usbc_hctsiz.s.pid = 3; /* Setup */ + bytes_to_transfer = sizeof(*header); + /* All Control operations start with a setup going OUT */ + USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), + cvmx_usbcx_hccharx, epdir, + CVMX_USB_DIRECTION_OUT); + /* + * Setup send the control header instead of the buffer data. The + * buffer data will be used in the next stage + */ + cvmx_write64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + + channel * 8, + transaction->control_header); + break; + case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE: + usbc_hctsiz.s.pid = 3; /* Setup */ + bytes_to_transfer = 0; + /* All Control operations start with a setup going OUT */ + USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), + cvmx_usbcx_hccharx, epdir, + CVMX_USB_DIRECTION_OUT); + + USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), + cvmx_usbcx_hcspltx, compsplt, 1); + break; + case CVMX_USB_STAGE_DATA: + usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + if (header->bRequestType & USB_DIR_IN) + bytes_to_transfer = 0; + else if (bytes_to_transfer > pipe->max_packet) + bytes_to_transfer = pipe->max_packet; + } + USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), + cvmx_usbcx_hccharx, epdir, + ((header->bRequestType & USB_DIR_IN) ? + CVMX_USB_DIRECTION_IN : + CVMX_USB_DIRECTION_OUT)); + break; + case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE: + usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); + if (!(header->bRequestType & USB_DIR_IN)) + bytes_to_transfer = 0; + USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), + cvmx_usbcx_hccharx, epdir, + ((header->bRequestType & USB_DIR_IN) ? + CVMX_USB_DIRECTION_IN : + CVMX_USB_DIRECTION_OUT)); + USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), + cvmx_usbcx_hcspltx, compsplt, 1); + break; + case CVMX_USB_STAGE_STATUS: + usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); + bytes_to_transfer = 0; + USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), + cvmx_usbcx_hccharx, epdir, + ((header->bRequestType & USB_DIR_IN) ? + CVMX_USB_DIRECTION_OUT : + CVMX_USB_DIRECTION_IN)); + break; + case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE: + usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); + bytes_to_transfer = 0; + USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), + cvmx_usbcx_hccharx, epdir, + ((header->bRequestType & USB_DIR_IN) ? + CVMX_USB_DIRECTION_OUT : + CVMX_USB_DIRECTION_IN)); + USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), + cvmx_usbcx_hcspltx, compsplt, 1); + break; + } + + /* + * Make sure the transfer never exceeds the byte limit of the hardware. + * Further bytes will be sent as continued transactions + */ + if (bytes_to_transfer > MAX_TRANSFER_BYTES) { + /* Round MAX_TRANSFER_BYTES to a multiple of out packet size */ + bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet; + bytes_to_transfer *= pipe->max_packet; + } + + /* + * Calculate the number of packets to transfer. If the length is zero + * we still need to transfer one packet + */ + packets_to_transfer = DIV_ROUND_UP(bytes_to_transfer, + pipe->max_packet); + if (packets_to_transfer == 0) { + packets_to_transfer = 1; + } else if ((packets_to_transfer > 1) && + (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) { + /* + * Limit to one packet when not using DMA. Channels must be + * restarted between every packet for IN transactions, so there + * is no reason to do multiple packets in a row + */ + packets_to_transfer = 1; + bytes_to_transfer = packets_to_transfer * pipe->max_packet; + } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) { + /* + * Limit the number of packet and data transferred to what the + * hardware can handle + */ + packets_to_transfer = MAX_TRANSFER_PACKETS; + bytes_to_transfer = packets_to_transfer * pipe->max_packet; + } + + usbc_hctsiz.s.xfersize = bytes_to_transfer; + usbc_hctsiz.s.pktcnt = packets_to_transfer; + + cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), + usbc_hctsiz.u32); +} + +/** + * Start a channel to perform the pipe's head transaction + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @channel: Channel to setup + * @pipe: Pipe to start + */ +static void cvmx_usb_start_channel(struct octeon_hcd *usb, int channel, + struct cvmx_usb_pipe *pipe) +{ + struct cvmx_usb_transaction *transaction = + list_first_entry(&pipe->transactions, typeof(*transaction), + node); + + /* Make sure all writes to the DMA region get flushed */ + CVMX_SYNCW; + + /* Attach the channel to the pipe */ + usb->pipe_for_channel[channel] = pipe; + pipe->channel = channel; + pipe->flags |= CVMX_USB_PIPE_FLAGS_SCHEDULED; + + /* Mark this channel as in use */ + usb->idle_hardware_channels &= ~(1 << channel); + + /* Enable the channel interrupt bits */ + { + union cvmx_usbcx_hcintx usbc_hcint; + union cvmx_usbcx_hcintmskx usbc_hcintmsk; + union cvmx_usbcx_haintmsk usbc_haintmsk; + + /* Clear all channel status bits */ + usbc_hcint.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCINTX(channel, usb->index)); + + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCINTX(channel, usb->index), + usbc_hcint.u32); + + usbc_hcintmsk.u32 = 0; + usbc_hcintmsk.s.chhltdmsk = 1; + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) { + /* + * Channels need these extra interrupts when we aren't + * in DMA mode. + */ + usbc_hcintmsk.s.datatglerrmsk = 1; + usbc_hcintmsk.s.frmovrunmsk = 1; + usbc_hcintmsk.s.bblerrmsk = 1; + usbc_hcintmsk.s.xacterrmsk = 1; + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + /* + * Splits don't generate xfercompl, so we need + * ACK and NYET. + */ + usbc_hcintmsk.s.nyetmsk = 1; + usbc_hcintmsk.s.ackmsk = 1; + } + usbc_hcintmsk.s.nakmsk = 1; + usbc_hcintmsk.s.stallmsk = 1; + usbc_hcintmsk.s.xfercomplmsk = 1; + } + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCINTMSKX(channel, usb->index), + usbc_hcintmsk.u32); + + /* Enable the channel interrupt to propagate */ + usbc_haintmsk.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HAINTMSK(usb->index)); + usbc_haintmsk.s.haintmsk |= 1 << channel; + cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index), + usbc_haintmsk.u32); + } + + /* Setup the location the DMA engine uses. */ + { + u64 reg; + u64 dma_address = transaction->buffer + + transaction->actual_bytes; + + if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS) + dma_address = transaction->buffer + + transaction->iso_packets[0].offset + + transaction->actual_bytes; + + if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) + reg = CVMX_USBNX_DMA0_OUTB_CHN0(usb->index); + else + reg = CVMX_USBNX_DMA0_INB_CHN0(usb->index); + cvmx_write64_uint64(reg + channel * 8, dma_address); + } + + /* Setup both the size of the transfer and the SPLIT characteristics */ + { + union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0}; + union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0}; + int packets_to_transfer; + int bytes_to_transfer = transaction->buffer_length - + transaction->actual_bytes; + + /* + * ISOCHRONOUS transactions store each individual transfer size + * in the packet structure, not the global buffer_length + */ + if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS) + bytes_to_transfer = + transaction->iso_packets[0].length - + transaction->actual_bytes; + + /* + * We need to do split transactions when we are talking to non + * high speed devices that are behind a high speed hub + */ + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + /* + * On the start split phase (stage is even) record the + * frame number we will need to send the split complete. + * We only store the lower two bits since the time ahead + * can only be two frames + */ + if ((transaction->stage & 1) == 0) { + if (transaction->type == CVMX_USB_TRANSFER_BULK) + pipe->split_sc_frame = + (usb->frame_number + 1) & 0x7f; + else + pipe->split_sc_frame = + (usb->frame_number + 2) & 0x7f; + } else { + pipe->split_sc_frame = -1; + } + + usbc_hcsplt.s.spltena = 1; + usbc_hcsplt.s.hubaddr = pipe->hub_device_addr; + usbc_hcsplt.s.prtaddr = pipe->hub_port; + usbc_hcsplt.s.compsplt = (transaction->stage == + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE); + + /* + * SPLIT transactions can only ever transmit one data + * packet so limit the transfer size to the max packet + * size + */ + if (bytes_to_transfer > pipe->max_packet) + bytes_to_transfer = pipe->max_packet; + + /* + * ISOCHRONOUS OUT splits are unique in that they limit + * data transfers to 188 byte chunks representing the + * begin/middle/end of the data or all + */ + if (!usbc_hcsplt.s.compsplt && + (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) && + (pipe->transfer_type == + CVMX_USB_TRANSFER_ISOCHRONOUS)) { + /* + * Clear the split complete frame number as + * there isn't going to be a split complete + */ + pipe->split_sc_frame = -1; + /* + * See if we've started this transfer and sent + * data + */ + if (transaction->actual_bytes == 0) { + /* + * Nothing sent yet, this is either a + * begin or the entire payload + */ + if (bytes_to_transfer <= 188) + /* Entire payload in one go */ + usbc_hcsplt.s.xactpos = 3; + else + /* First part of payload */ + usbc_hcsplt.s.xactpos = 2; + } else { + /* + * Continuing the previous data, we must + * either be in the middle or at the end + */ + if (bytes_to_transfer <= 188) + /* End of payload */ + usbc_hcsplt.s.xactpos = 1; + else + /* Middle of payload */ + usbc_hcsplt.s.xactpos = 0; + } + /* + * Again, the transfer size is limited to 188 + * bytes + */ + if (bytes_to_transfer > 188) + bytes_to_transfer = 188; + } + } + + /* + * Make sure the transfer never exceeds the byte limit of the + * hardware. Further bytes will be sent as continued + * transactions + */ + if (bytes_to_transfer > MAX_TRANSFER_BYTES) { + /* + * Round MAX_TRANSFER_BYTES to a multiple of out packet + * size + */ + bytes_to_transfer = MAX_TRANSFER_BYTES / + pipe->max_packet; + bytes_to_transfer *= pipe->max_packet; + } + + /* + * Calculate the number of packets to transfer. If the length is + * zero we still need to transfer one packet + */ + packets_to_transfer = + DIV_ROUND_UP(bytes_to_transfer, pipe->max_packet); + if (packets_to_transfer == 0) { + packets_to_transfer = 1; + } else if ((packets_to_transfer > 1) && + (usb->init_flags & + CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) { + /* + * Limit to one packet when not using DMA. Channels must + * be restarted between every packet for IN + * transactions, so there is no reason to do multiple + * packets in a row + */ + packets_to_transfer = 1; + bytes_to_transfer = packets_to_transfer * + pipe->max_packet; + } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) { + /* + * Limit the number of packet and data transferred to + * what the hardware can handle + */ + packets_to_transfer = MAX_TRANSFER_PACKETS; + bytes_to_transfer = packets_to_transfer * + pipe->max_packet; + } + + usbc_hctsiz.s.xfersize = bytes_to_transfer; + usbc_hctsiz.s.pktcnt = packets_to_transfer; + + /* Update the DATA0/DATA1 toggle */ + usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); + /* + * High speed pipes may need a hardware ping before they start + */ + if (pipe->flags & CVMX_USB_PIPE_FLAGS_NEED_PING) + usbc_hctsiz.s.dopng = 1; + + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCSPLTX(channel, usb->index), + usbc_hcsplt.u32); + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCTSIZX(channel, usb->index), + usbc_hctsiz.u32); + } + + /* Setup the Host Channel Characteristics Register */ + { + union cvmx_usbcx_hccharx usbc_hcchar = {.u32 = 0}; + + /* + * Set the startframe odd/even properly. This is only used for + * periodic + */ + usbc_hcchar.s.oddfrm = usb->frame_number & 1; + + /* + * Set the number of back to back packets allowed by this + * endpoint. Split transactions interpret "ec" as the number of + * immediate retries of failure. These retries happen too + * quickly, so we disable these entirely for splits + */ + if (cvmx_usb_pipe_needs_split(usb, pipe)) + usbc_hcchar.s.ec = 1; + else if (pipe->multi_count < 1) + usbc_hcchar.s.ec = 1; + else if (pipe->multi_count > 3) + usbc_hcchar.s.ec = 3; + else + usbc_hcchar.s.ec = pipe->multi_count; + + /* Set the rest of the endpoint specific settings */ + usbc_hcchar.s.devaddr = pipe->device_addr; + usbc_hcchar.s.eptype = transaction->type; + usbc_hcchar.s.lspddev = + (pipe->device_speed == CVMX_USB_SPEED_LOW); + usbc_hcchar.s.epdir = pipe->transfer_dir; + usbc_hcchar.s.epnum = pipe->endpoint_num; + usbc_hcchar.s.mps = pipe->max_packet; + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCCHARX(channel, usb->index), + usbc_hcchar.u32); + } + + /* Do transaction type specific fixups as needed */ + switch (transaction->type) { + case CVMX_USB_TRANSFER_CONTROL: + cvmx_usb_start_channel_control(usb, channel, pipe); + break; + case CVMX_USB_TRANSFER_BULK: + case CVMX_USB_TRANSFER_INTERRUPT: + break; + case CVMX_USB_TRANSFER_ISOCHRONOUS: + if (!cvmx_usb_pipe_needs_split(usb, pipe)) { + /* + * ISO transactions require different PIDs depending on + * direction and how many packets are needed + */ + if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) { + if (pipe->multi_count < 2) /* Need DATA0 */ + USB_SET_FIELD32( + CVMX_USBCX_HCTSIZX(channel, + usb->index), + cvmx_usbcx_hctsizx, pid, 0); + else /* Need MDATA */ + USB_SET_FIELD32( + CVMX_USBCX_HCTSIZX(channel, + usb->index), + cvmx_usbcx_hctsizx, pid, 3); + } + } + break; + } + { + union cvmx_usbcx_hctsizx usbc_hctsiz = { .u32 = + cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCTSIZX(channel, + usb->index)) + }; + transaction->xfersize = usbc_hctsiz.s.xfersize; + transaction->pktcnt = usbc_hctsiz.s.pktcnt; + } + /* Remember when we start a split transaction */ + if (cvmx_usb_pipe_needs_split(usb, pipe)) + usb->active_split = transaction; + USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), + cvmx_usbcx_hccharx, chena, 1); + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) + cvmx_usb_fill_tx_fifo(usb, channel); +} + +/** + * Find a pipe that is ready to be scheduled to hardware. + * @usb: USB device state populated by cvmx_usb_initialize(). + * @xfer_type: Transfer type + * + * Returns: Pipe or NULL if none are ready + */ +static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(struct octeon_hcd *usb, + enum cvmx_usb_transfer xfer_type) +{ + struct list_head *list = usb->active_pipes + xfer_type; + u64 current_frame = usb->frame_number; + struct cvmx_usb_pipe *pipe; + + list_for_each_entry(pipe, list, node) { + struct cvmx_usb_transaction *t = + list_first_entry(&pipe->transactions, typeof(*t), + node); + if (!(pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED) && t && + (pipe->next_tx_frame <= current_frame) && + ((pipe->split_sc_frame == -1) || + ((((int)current_frame - pipe->split_sc_frame) & 0x7f) < + 0x40)) && + (!usb->active_split || (usb->active_split == t))) { + prefetch(t); + return pipe; + } + } + return NULL; +} + +static struct cvmx_usb_pipe *cvmx_usb_next_pipe(struct octeon_hcd *usb, + int is_sof) +{ + struct cvmx_usb_pipe *pipe; + + /* Find a pipe needing service. */ + if (is_sof) { + /* + * Only process periodic pipes on SOF interrupts. This way we + * are sure that the periodic data is sent in the beginning of + * the frame. + */ + pipe = cvmx_usb_find_ready_pipe(usb, + CVMX_USB_TRANSFER_ISOCHRONOUS); + if (pipe) + return pipe; + pipe = cvmx_usb_find_ready_pipe(usb, + CVMX_USB_TRANSFER_INTERRUPT); + if (pipe) + return pipe; + } + pipe = cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_CONTROL); + if (pipe) + return pipe; + return cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_BULK); +} + +/** + * Called whenever a pipe might need to be scheduled to the + * hardware. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @is_sof: True if this schedule was called on a SOF interrupt. + */ +static void cvmx_usb_schedule(struct octeon_hcd *usb, int is_sof) +{ + int channel; + struct cvmx_usb_pipe *pipe; + int need_sof; + enum cvmx_usb_transfer ttype; + + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) { + /* + * Without DMA we need to be careful to not schedule something + * at the end of a frame and cause an overrun. + */ + union cvmx_usbcx_hfnum hfnum = { + .u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HFNUM(usb->index)) + }; + + union cvmx_usbcx_hfir hfir = { + .u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HFIR(usb->index)) + }; + + if (hfnum.s.frrem < hfir.s.frint / 4) + goto done; + } + + while (usb->idle_hardware_channels) { + /* Find an idle channel */ + channel = __fls(usb->idle_hardware_channels); + if (unlikely(channel > 7)) + break; + + pipe = cvmx_usb_next_pipe(usb, is_sof); + if (!pipe) + break; + + cvmx_usb_start_channel(usb, channel, pipe); + } + +done: + /* + * Only enable SOF interrupts when we have transactions pending in the + * future that might need to be scheduled + */ + need_sof = 0; + for (ttype = CVMX_USB_TRANSFER_CONTROL; + ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) { + list_for_each_entry(pipe, &usb->active_pipes[ttype], node) { + if (pipe->next_tx_frame > usb->frame_number) { + need_sof = 1; + break; + } + } + } + USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), + cvmx_usbcx_gintmsk, sofmsk, need_sof); +} + +static void octeon_usb_urb_complete_callback(struct octeon_hcd *usb, + enum cvmx_usb_status status, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction + *transaction, + int bytes_transferred, + struct urb *urb) +{ + struct usb_hcd *hcd = octeon_to_hcd(usb); + struct device *dev = hcd->self.controller; + + if (likely(status == CVMX_USB_STATUS_OK)) + urb->actual_length = bytes_transferred; + else + urb->actual_length = 0; + + urb->hcpriv = NULL; + + /* For Isochronous transactions we need to update the URB packet status + * list from data in our private copy + */ + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + int i; + /* + * The pointer to the private list is stored in the setup_packet + * field. + */ + struct cvmx_usb_iso_packet *iso_packet = + (struct cvmx_usb_iso_packet *)urb->setup_packet; + /* Recalculate the transfer size by adding up each packet */ + urb->actual_length = 0; + for (i = 0; i < urb->number_of_packets; i++) { + if (iso_packet[i].status == CVMX_USB_STATUS_OK) { + urb->iso_frame_desc[i].status = 0; + urb->iso_frame_desc[i].actual_length = + iso_packet[i].length; + urb->actual_length += + urb->iso_frame_desc[i].actual_length; + } else { + dev_dbg(dev, "ISOCHRONOUS packet=%d of %d status=%d pipe=%p transaction=%p size=%d\n", + i, urb->number_of_packets, + iso_packet[i].status, pipe, + transaction, iso_packet[i].length); + urb->iso_frame_desc[i].status = -EREMOTEIO; + } + } + /* Free the private list now that we don't need it anymore */ + kfree(iso_packet); + urb->setup_packet = NULL; + } + + switch (status) { + case CVMX_USB_STATUS_OK: + urb->status = 0; + break; + case CVMX_USB_STATUS_CANCEL: + if (urb->status == 0) + urb->status = -ENOENT; + break; + case CVMX_USB_STATUS_STALL: + dev_dbg(dev, "status=stall pipe=%p transaction=%p size=%d\n", + pipe, transaction, bytes_transferred); + urb->status = -EPIPE; + break; + case CVMX_USB_STATUS_BABBLEERR: + dev_dbg(dev, "status=babble pipe=%p transaction=%p size=%d\n", + pipe, transaction, bytes_transferred); + urb->status = -EPIPE; + break; + case CVMX_USB_STATUS_SHORT: + dev_dbg(dev, "status=short pipe=%p transaction=%p size=%d\n", + pipe, transaction, bytes_transferred); + urb->status = -EREMOTEIO; + break; + case CVMX_USB_STATUS_ERROR: + case CVMX_USB_STATUS_XACTERR: + case CVMX_USB_STATUS_DATATGLERR: + case CVMX_USB_STATUS_FRAMEERR: + dev_dbg(dev, "status=%d pipe=%p transaction=%p size=%d\n", + status, pipe, transaction, bytes_transferred); + urb->status = -EPROTO; + break; + } + usb_hcd_unlink_urb_from_ep(octeon_to_hcd(usb), urb); + spin_unlock(&usb->lock); + usb_hcd_giveback_urb(octeon_to_hcd(usb), urb, urb->status); + spin_lock(&usb->lock); +} + +/** + * Signal the completion of a transaction and free it. The + * transaction will be removed from the pipe transaction list. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Pipe the transaction is on + * @transaction: + * Transaction that completed + * @complete_code: + * Completion code + */ +static void cvmx_usb_complete(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction, + enum cvmx_usb_status complete_code) +{ + /* If this was a split then clear our split in progress marker */ + if (usb->active_split == transaction) + usb->active_split = NULL; + + /* + * Isochronous transactions need extra processing as they might not be + * done after a single data transfer + */ + if (unlikely(transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)) { + /* Update the number of bytes transferred in this ISO packet */ + transaction->iso_packets[0].length = transaction->actual_bytes; + transaction->iso_packets[0].status = complete_code; + + /* + * If there are more ISOs pending and we succeeded, schedule the + * next one + */ + if ((transaction->iso_number_packets > 1) && + (complete_code == CVMX_USB_STATUS_OK)) { + /* No bytes transferred for this packet as of yet */ + transaction->actual_bytes = 0; + /* One less ISO waiting to transfer */ + transaction->iso_number_packets--; + /* Increment to the next location in our packet array */ + transaction->iso_packets++; + transaction->stage = CVMX_USB_STAGE_NON_CONTROL; + return; + } + } + + /* Remove the transaction from the pipe list */ + list_del(&transaction->node); + if (list_empty(&pipe->transactions)) + list_move_tail(&pipe->node, &usb->idle_pipes); + octeon_usb_urb_complete_callback(usb, complete_code, pipe, + transaction, + transaction->actual_bytes, + transaction->urb); + kfree(transaction); +} + +/** + * Submit a usb transaction to a pipe. Called for all types + * of transactions. + * + * @usb: + * @pipe: Which pipe to submit to. + * @type: Transaction type + * @buffer: User buffer for the transaction + * @buffer_length: + * User buffer's length in bytes + * @control_header: + * For control transactions, the 8 byte standard header + * @iso_start_frame: + * For ISO transactions, the start frame + * @iso_number_packets: + * For ISO, the number of packet in the transaction. + * @iso_packets: + * A description of each ISO packet + * @urb: URB for the callback + * + * Returns: Transaction or NULL on failure. + */ +static struct cvmx_usb_transaction *cvmx_usb_submit_transaction( + struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + enum cvmx_usb_transfer type, + u64 buffer, + int buffer_length, + u64 control_header, + int iso_start_frame, + int iso_number_packets, + struct cvmx_usb_iso_packet *iso_packets, + struct urb *urb) +{ + struct cvmx_usb_transaction *transaction; + + if (unlikely(pipe->transfer_type != type)) + return NULL; + + transaction = kzalloc(sizeof(*transaction), GFP_ATOMIC); + if (unlikely(!transaction)) + return NULL; + + transaction->type = type; + transaction->buffer = buffer; + transaction->buffer_length = buffer_length; + transaction->control_header = control_header; + /* FIXME: This is not used, implement it. */ + transaction->iso_start_frame = iso_start_frame; + transaction->iso_number_packets = iso_number_packets; + transaction->iso_packets = iso_packets; + transaction->urb = urb; + if (transaction->type == CVMX_USB_TRANSFER_CONTROL) + transaction->stage = CVMX_USB_STAGE_SETUP; + else + transaction->stage = CVMX_USB_STAGE_NON_CONTROL; + + if (!list_empty(&pipe->transactions)) { + list_add_tail(&transaction->node, &pipe->transactions); + } else { + list_add_tail(&transaction->node, &pipe->transactions); + list_move_tail(&pipe->node, + &usb->active_pipes[pipe->transfer_type]); + + /* + * We may need to schedule the pipe if this was the head of the + * pipe. + */ + cvmx_usb_schedule(usb, 0); + } + + return transaction; +} + +/** + * Call to submit a USB Bulk transfer to a pipe. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Handle to the pipe for the transfer. + * @urb: URB. + * + * Returns: A submitted transaction or NULL on failure. + */ +static struct cvmx_usb_transaction *cvmx_usb_submit_bulk( + struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct urb *urb) +{ + return cvmx_usb_submit_transaction(usb, pipe, CVMX_USB_TRANSFER_BULK, + urb->transfer_dma, + urb->transfer_buffer_length, + 0, /* control_header */ + 0, /* iso_start_frame */ + 0, /* iso_number_packets */ + NULL, /* iso_packets */ + urb); +} + +/** + * Call to submit a USB Interrupt transfer to a pipe. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Handle to the pipe for the transfer. + * @urb: URB returned when the callback is called. + * + * Returns: A submitted transaction or NULL on failure. + */ +static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt( + struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct urb *urb) +{ + return cvmx_usb_submit_transaction(usb, pipe, + CVMX_USB_TRANSFER_INTERRUPT, + urb->transfer_dma, + urb->transfer_buffer_length, + 0, /* control_header */ + 0, /* iso_start_frame */ + 0, /* iso_number_packets */ + NULL, /* iso_packets */ + urb); +} + +/** + * Call to submit a USB Control transfer to a pipe. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Handle to the pipe for the transfer. + * @urb: URB. + * + * Returns: A submitted transaction or NULL on failure. + */ +static struct cvmx_usb_transaction *cvmx_usb_submit_control( + struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct urb *urb) +{ + int buffer_length = urb->transfer_buffer_length; + u64 control_header = urb->setup_dma; + struct usb_ctrlrequest *header = cvmx_phys_to_ptr(control_header); + + if ((header->bRequestType & USB_DIR_IN) == 0) + buffer_length = le16_to_cpu(header->wLength); + + return cvmx_usb_submit_transaction(usb, pipe, + CVMX_USB_TRANSFER_CONTROL, + urb->transfer_dma, buffer_length, + control_header, + 0, /* iso_start_frame */ + 0, /* iso_number_packets */ + NULL, /* iso_packets */ + urb); +} + +/** + * Call to submit a USB Isochronous transfer to a pipe. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Handle to the pipe for the transfer. + * @urb: URB returned when the callback is called. + * + * Returns: A submitted transaction or NULL on failure. + */ +static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous( + struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct urb *urb) +{ + struct cvmx_usb_iso_packet *packets; + + packets = (struct cvmx_usb_iso_packet *)urb->setup_packet; + return cvmx_usb_submit_transaction(usb, pipe, + CVMX_USB_TRANSFER_ISOCHRONOUS, + urb->transfer_dma, + urb->transfer_buffer_length, + 0, /* control_header */ + urb->start_frame, + urb->number_of_packets, + packets, urb); +} + +/** + * Cancel one outstanding request in a pipe. Canceling a request + * can fail if the transaction has already completed before cancel + * is called. Even after a successful cancel call, it may take + * a frame or two for the cvmx_usb_poll() function to call the + * associated callback. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Pipe to cancel requests in. + * @transaction: Transaction to cancel, returned by the submit function. + * + * Returns: 0 or a negative error code. + */ +static int cvmx_usb_cancel(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction) +{ + /* + * If the transaction is the HEAD of the queue and scheduled. We need to + * treat it special + */ + if (list_first_entry(&pipe->transactions, typeof(*transaction), node) == + transaction && (pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED)) { + union cvmx_usbcx_hccharx usbc_hcchar; + + usb->pipe_for_channel[pipe->channel] = NULL; + pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED; + + CVMX_SYNCW; + + usbc_hcchar.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCCHARX(pipe->channel, + usb->index)); + /* + * If the channel isn't enabled then the transaction already + * completed. + */ + if (usbc_hcchar.s.chena) { + usbc_hcchar.s.chdis = 1; + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCCHARX(pipe->channel, + usb->index), + usbc_hcchar.u32); + } + } + cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_CANCEL); + return 0; +} + +/** + * Cancel all outstanding requests in a pipe. Logically all this + * does is call cvmx_usb_cancel() in a loop. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Pipe to cancel requests in. + * + * Returns: 0 or a negative error code. + */ +static int cvmx_usb_cancel_all(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe) +{ + struct cvmx_usb_transaction *transaction, *next; + + /* Simply loop through and attempt to cancel each transaction */ + list_for_each_entry_safe(transaction, next, &pipe->transactions, node) { + int result = cvmx_usb_cancel(usb, pipe, transaction); + + if (unlikely(result != 0)) + return result; + } + return 0; +} + +/** + * Close a pipe created with cvmx_usb_open_pipe(). + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Pipe to close. + * + * Returns: 0 or a negative error code. EBUSY is returned if the pipe has + * outstanding transfers. + */ +static int cvmx_usb_close_pipe(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe) +{ + /* Fail if the pipe has pending transactions */ + if (!list_empty(&pipe->transactions)) + return -EBUSY; + + list_del(&pipe->node); + kfree(pipe); + + return 0; +} + +/** + * Get the current USB protocol level frame number. The frame + * number is always in the range of 0-0x7ff. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * + * Returns: USB frame number + */ +static int cvmx_usb_get_frame_number(struct octeon_hcd *usb) +{ + union cvmx_usbcx_hfnum usbc_hfnum; + + usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index)); + + return usbc_hfnum.s.frnum; +} + +static void cvmx_usb_transfer_control(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction, + union cvmx_usbcx_hccharx usbc_hcchar, + int buffer_space_left, + int bytes_in_last_packet) +{ + switch (transaction->stage) { + case CVMX_USB_STAGE_NON_CONTROL: + case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE: + /* This should be impossible */ + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_ERROR); + break; + case CVMX_USB_STAGE_SETUP: + pipe->pid_toggle = 1; + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + transaction->stage = + CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE; + } else { + struct usb_ctrlrequest *header = + cvmx_phys_to_ptr(transaction->control_header); + if (header->wLength) + transaction->stage = CVMX_USB_STAGE_DATA; + else + transaction->stage = CVMX_USB_STAGE_STATUS; + } + break; + case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE: + { + struct usb_ctrlrequest *header = + cvmx_phys_to_ptr(transaction->control_header); + if (header->wLength) + transaction->stage = CVMX_USB_STAGE_DATA; + else + transaction->stage = CVMX_USB_STAGE_STATUS; + } + break; + case CVMX_USB_STAGE_DATA: + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE; + /* + * For setup OUT data that are splits, + * the hardware doesn't appear to count + * transferred data. Here we manually + * update the data transferred + */ + if (!usbc_hcchar.s.epdir) { + if (buffer_space_left < pipe->max_packet) + transaction->actual_bytes += + buffer_space_left; + else + transaction->actual_bytes += + pipe->max_packet; + } + } else if ((buffer_space_left == 0) || + (bytes_in_last_packet < pipe->max_packet)) { + pipe->pid_toggle = 1; + transaction->stage = CVMX_USB_STAGE_STATUS; + } + break; + case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE: + if ((buffer_space_left == 0) || + (bytes_in_last_packet < pipe->max_packet)) { + pipe->pid_toggle = 1; + transaction->stage = CVMX_USB_STAGE_STATUS; + } else { + transaction->stage = CVMX_USB_STAGE_DATA; + } + break; + case CVMX_USB_STAGE_STATUS: + if (cvmx_usb_pipe_needs_split(usb, pipe)) + transaction->stage = + CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE; + else + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + break; + case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE: + cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK); + break; + } +} + +static void cvmx_usb_transfer_bulk(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction, + union cvmx_usbcx_hcintx usbc_hcint, + int buffer_space_left, + int bytes_in_last_packet) +{ + /* + * The only time a bulk transfer isn't complete when it finishes with + * an ACK is during a split transaction. For splits we need to continue + * the transfer if more data is needed. + */ + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL) + transaction->stage = + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE; + else if (buffer_space_left && + (bytes_in_last_packet == pipe->max_packet)) + transaction->stage = CVMX_USB_STAGE_NON_CONTROL; + else + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + } else { + if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) && + (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) && + (usbc_hcint.s.nak)) + pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING; + if (!buffer_space_left || + (bytes_in_last_packet < pipe->max_packet)) + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + } +} + +static void cvmx_usb_transfer_intr(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction, + int buffer_space_left, + int bytes_in_last_packet) +{ + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL) { + transaction->stage = + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE; + } else if (buffer_space_left && + (bytes_in_last_packet == pipe->max_packet)) { + transaction->stage = CVMX_USB_STAGE_NON_CONTROL; + } else { + pipe->next_tx_frame += pipe->interval; + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + } + } else if (!buffer_space_left || + (bytes_in_last_packet < pipe->max_packet)) { + pipe->next_tx_frame += pipe->interval; + cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK); + } +} + +static void cvmx_usb_transfer_isoc(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction, + int buffer_space_left, + int bytes_in_last_packet, + int bytes_this_transfer) +{ + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + /* + * ISOCHRONOUS OUT splits don't require a complete split stage. + * Instead they use a sequence of begin OUT splits to transfer + * the data 188 bytes at a time. Once the transfer is complete, + * the pipe sleeps until the next schedule interval. + */ + if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) { + /* + * If no space left or this wasn't a max size packet + * then this transfer is complete. Otherwise start it + * again to send the next 188 bytes + */ + if (!buffer_space_left || (bytes_this_transfer < 188)) { + pipe->next_tx_frame += pipe->interval; + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + } + return; + } + if (transaction->stage == + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) { + /* + * We are in the incoming data phase. Keep getting data + * until we run out of space or get a small packet + */ + if ((buffer_space_left == 0) || + (bytes_in_last_packet < pipe->max_packet)) { + pipe->next_tx_frame += pipe->interval; + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + } + } else { + transaction->stage = + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE; + } + } else { + pipe->next_tx_frame += pipe->interval; + cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK); + } +} + +/** + * Poll a channel for status + * + * @usb: USB device + * @channel: Channel to poll + * + * Returns: Zero on success + */ +static int cvmx_usb_poll_channel(struct octeon_hcd *usb, int channel) +{ + struct usb_hcd *hcd = octeon_to_hcd(usb); + struct device *dev = hcd->self.controller; + union cvmx_usbcx_hcintx usbc_hcint; + union cvmx_usbcx_hctsizx usbc_hctsiz; + union cvmx_usbcx_hccharx usbc_hcchar; + struct cvmx_usb_pipe *pipe; + struct cvmx_usb_transaction *transaction; + int bytes_this_transfer; + int bytes_in_last_packet; + int packets_processed; + int buffer_space_left; + + /* Read the interrupt status bits for the channel */ + usbc_hcint.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCINTX(channel, usb->index)); + + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) { + usbc_hcchar.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCCHARX(channel, + usb->index)); + + if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) { + /* + * There seems to be a bug in CN31XX which can cause + * interrupt IN transfers to get stuck until we do a + * write of HCCHARX without changing things + */ + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCCHARX(channel, + usb->index), + usbc_hcchar.u32); + return 0; + } + + /* + * In non DMA mode the channels don't halt themselves. We need + * to manually disable channels that are left running + */ + if (!usbc_hcint.s.chhltd) { + if (usbc_hcchar.s.chena) { + union cvmx_usbcx_hcintmskx hcintmsk; + /* Disable all interrupts except CHHLTD */ + hcintmsk.u32 = 0; + hcintmsk.s.chhltdmsk = 1; + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCINTMSKX(channel, usb->index), + hcintmsk.u32); + usbc_hcchar.s.chdis = 1; + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCCHARX(channel, usb->index), + usbc_hcchar.u32); + return 0; + } else if (usbc_hcint.s.xfercompl) { + /* + * Successful IN/OUT with transfer complete. + * Channel halt isn't needed. + */ + } else { + dev_err(dev, "USB%d: Channel %d interrupt without halt\n", + usb->index, channel); + return 0; + } + } + } else { + /* + * There is are no interrupts that we need to process when the + * channel is still running + */ + if (!usbc_hcint.s.chhltd) + return 0; + } + + /* Disable the channel interrupts now that it is done */ + cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0); + usb->idle_hardware_channels |= (1 << channel); + + /* Make sure this channel is tied to a valid pipe */ + pipe = usb->pipe_for_channel[channel]; + prefetch(pipe); + if (!pipe) + return 0; + transaction = list_first_entry(&pipe->transactions, + typeof(*transaction), + node); + prefetch(transaction); + + /* + * Disconnect this pipe from the HW channel. Later the schedule + * function will figure out which pipe needs to go + */ + usb->pipe_for_channel[channel] = NULL; + pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED; + + /* + * Read the channel config info so we can figure out how much data + * transferred + */ + usbc_hcchar.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCCHARX(channel, usb->index)); + usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCTSIZX(channel, usb->index)); + + /* + * Calculating the number of bytes successfully transferred is dependent + * on the transfer direction + */ + packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt; + if (usbc_hcchar.s.epdir) { + /* + * IN transactions are easy. For every byte received the + * hardware decrements xfersize. All we need to do is subtract + * the current value of xfersize from its starting value and we + * know how many bytes were written to the buffer + */ + bytes_this_transfer = transaction->xfersize - + usbc_hctsiz.s.xfersize; + } else { + /* + * OUT transaction don't decrement xfersize. Instead pktcnt is + * decremented on every successful packet send. The hardware + * does this when it receives an ACK, or NYET. If it doesn't + * receive one of these responses pktcnt doesn't change + */ + bytes_this_transfer = packets_processed * usbc_hcchar.s.mps; + /* + * The last packet may not be a full transfer if we didn't have + * enough data + */ + if (bytes_this_transfer > transaction->xfersize) + bytes_this_transfer = transaction->xfersize; + } + /* Figure out how many bytes were in the last packet of the transfer */ + if (packets_processed) + bytes_in_last_packet = bytes_this_transfer - + (packets_processed - 1) * usbc_hcchar.s.mps; + else + bytes_in_last_packet = bytes_this_transfer; + + /* + * As a special case, setup transactions output the setup header, not + * the user's data. For this reason we don't count setup data as bytes + * transferred + */ + if ((transaction->stage == CVMX_USB_STAGE_SETUP) || + (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE)) + bytes_this_transfer = 0; + + /* + * Add the bytes transferred to the running total. It is important that + * bytes_this_transfer doesn't count any data that needs to be + * retransmitted + */ + transaction->actual_bytes += bytes_this_transfer; + if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS) + buffer_space_left = transaction->iso_packets[0].length - + transaction->actual_bytes; + else + buffer_space_left = transaction->buffer_length - + transaction->actual_bytes; + + /* + * We need to remember the PID toggle state for the next transaction. + * The hardware already updated it for the next transaction + */ + pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0); + + /* + * For high speed bulk out, assume the next transaction will need to do + * a ping before proceeding. If this isn't true the ACK processing below + * will clear this flag + */ + if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) && + (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) && + (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT)) + pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING; + + if (WARN_ON_ONCE(bytes_this_transfer < 0)) { + /* + * In some rare cases the DMA engine seems to get stuck and + * keeps substracting same byte count over and over again. In + * such case we just need to fail every transaction. + */ + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_ERROR); + return 0; + } + + if (usbc_hcint.s.stall) { + /* + * STALL as a response means this transaction cannot be + * completed because the device can't process transactions. Tell + * the user. Any data that was transferred will be counted on + * the actual bytes transferred + */ + pipe->pid_toggle = 0; + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_STALL); + } else if (usbc_hcint.s.xacterr) { + /* + * XactErr as a response means the device signaled + * something wrong with the transfer. For example, PID + * toggle errors cause these. + */ + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_XACTERR); + } else if (usbc_hcint.s.bblerr) { + /* Babble Error (BblErr) */ + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_BABBLEERR); + } else if (usbc_hcint.s.datatglerr) { + /* Data toggle error */ + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_DATATGLERR); + } else if (usbc_hcint.s.nyet) { + /* + * NYET as a response is only allowed in three cases: as a + * response to a ping, as a response to a split transaction, and + * as a response to a bulk out. The ping case is handled by + * hardware, so we only have splits and bulk out + */ + if (!cvmx_usb_pipe_needs_split(usb, pipe)) { + transaction->retries = 0; + /* + * If there is more data to go then we need to try + * again. Otherwise this transaction is complete + */ + if ((buffer_space_left == 0) || + (bytes_in_last_packet < pipe->max_packet)) + cvmx_usb_complete(usb, pipe, + transaction, + CVMX_USB_STATUS_OK); + } else { + /* + * Split transactions retry the split complete 4 times + * then rewind to the start split and do the entire + * transactions again + */ + transaction->retries++; + if ((transaction->retries & 0x3) == 0) { + /* + * Rewind to the beginning of the transaction by + * anding off the split complete bit + */ + transaction->stage &= ~1; + pipe->split_sc_frame = -1; + } + } + } else if (usbc_hcint.s.ack) { + transaction->retries = 0; + /* + * The ACK bit can only be checked after the other error bits. + * This is because a multi packet transfer may succeed in a + * number of packets and then get a different response on the + * last packet. In this case both ACK and the last response bit + * will be set. If none of the other response bits is set, then + * the last packet must have been an ACK + * + * Since we got an ACK, we know we don't need to do a ping on + * this pipe + */ + pipe->flags &= ~CVMX_USB_PIPE_FLAGS_NEED_PING; + + switch (transaction->type) { + case CVMX_USB_TRANSFER_CONTROL: + cvmx_usb_transfer_control(usb, pipe, transaction, + usbc_hcchar, + buffer_space_left, + bytes_in_last_packet); + break; + case CVMX_USB_TRANSFER_BULK: + cvmx_usb_transfer_bulk(usb, pipe, transaction, + usbc_hcint, buffer_space_left, + bytes_in_last_packet); + break; + case CVMX_USB_TRANSFER_INTERRUPT: + cvmx_usb_transfer_intr(usb, pipe, transaction, + buffer_space_left, + bytes_in_last_packet); + break; + case CVMX_USB_TRANSFER_ISOCHRONOUS: + cvmx_usb_transfer_isoc(usb, pipe, transaction, + buffer_space_left, + bytes_in_last_packet, + bytes_this_transfer); + break; + } + } else if (usbc_hcint.s.nak) { + /* + * If this was a split then clear our split in progress marker. + */ + if (usb->active_split == transaction) + usb->active_split = NULL; + /* + * NAK as a response means the device couldn't accept the + * transaction, but it should be retried in the future. Rewind + * to the beginning of the transaction by anding off the split + * complete bit. Retry in the next interval + */ + transaction->retries = 0; + transaction->stage &= ~1; + pipe->next_tx_frame += pipe->interval; + if (pipe->next_tx_frame < usb->frame_number) + pipe->next_tx_frame = usb->frame_number + + pipe->interval - + (usb->frame_number - pipe->next_tx_frame) % + pipe->interval; + } else { + struct cvmx_usb_port_status port; + + port = cvmx_usb_get_status(usb); + if (port.port_enabled) { + /* We'll retry the exact same transaction again */ + transaction->retries++; + } else { + /* + * We get channel halted interrupts with no result bits + * sets when the cable is unplugged + */ + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_ERROR); + } + } + return 0; +} + +static void octeon_usb_port_callback(struct octeon_hcd *usb) +{ + spin_unlock(&usb->lock); + usb_hcd_poll_rh_status(octeon_to_hcd(usb)); + spin_lock(&usb->lock); +} + +/** + * Poll the USB block for status and call all needed callback + * handlers. This function is meant to be called in the interrupt + * handler for the USB controller. It can also be called + * periodically in a loop for non-interrupt based operation. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * + * Returns: 0 or a negative error code. + */ +static int cvmx_usb_poll(struct octeon_hcd *usb) +{ + union cvmx_usbcx_hfnum usbc_hfnum; + union cvmx_usbcx_gintsts usbc_gintsts; + + prefetch_range(usb, sizeof(*usb)); + + /* Update the frame counter */ + usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index)); + if ((usb->frame_number & 0x3fff) > usbc_hfnum.s.frnum) + usb->frame_number += 0x4000; + usb->frame_number &= ~0x3fffull; + usb->frame_number |= usbc_hfnum.s.frnum; + + /* Read the pending interrupts */ + usbc_gintsts.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_GINTSTS(usb->index)); + + /* Clear the interrupts now that we know about them */ + cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index), + usbc_gintsts.u32); + + if (usbc_gintsts.s.rxflvl) { + /* + * RxFIFO Non-Empty (RxFLvl) + * Indicates that there is at least one packet pending to be + * read from the RxFIFO. + * + * In DMA mode this is handled by hardware + */ + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) + cvmx_usb_poll_rx_fifo(usb); + } + if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp) { + /* Fill the Tx FIFOs when not in DMA mode */ + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) + cvmx_usb_poll_tx_fifo(usb); + } + if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint) { + union cvmx_usbcx_hprt usbc_hprt; + /* + * Disconnect Detected Interrupt (DisconnInt) + * Asserted when a device disconnect is detected. + * + * Host Port Interrupt (PrtInt) + * The core sets this bit to indicate a change in port status of + * one of the O2P USB core ports in Host mode. The application + * must read the Host Port Control and Status (HPRT) register to + * determine the exact event that caused this interrupt. The + * application must clear the appropriate status bit in the Host + * Port Control and Status register to clear this bit. + * + * Call the user's port callback + */ + octeon_usb_port_callback(usb); + /* Clear the port change bits */ + usbc_hprt.u32 = + cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index)); + usbc_hprt.s.prtena = 0; + cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index), + usbc_hprt.u32); + } + if (usbc_gintsts.s.hchint) { + /* + * Host Channels Interrupt (HChInt) + * The core sets this bit to indicate that an interrupt is + * pending on one of the channels of the core (in Host mode). + * The application must read the Host All Channels Interrupt + * (HAINT) register to determine the exact number of the channel + * on which the interrupt occurred, and then read the + * corresponding Host Channel-n Interrupt (HCINTn) register to + * determine the exact cause of the interrupt. The application + * must clear the appropriate status bit in the HCINTn register + * to clear this bit. + */ + union cvmx_usbcx_haint usbc_haint; + + usbc_haint.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HAINT(usb->index)); + while (usbc_haint.u32) { + int channel; + + channel = __fls(usbc_haint.u32); + cvmx_usb_poll_channel(usb, channel); + usbc_haint.u32 ^= 1 << channel; + } + } + + cvmx_usb_schedule(usb, usbc_gintsts.s.sof); + + return 0; +} + +/* convert between an HCD pointer and the corresponding struct octeon_hcd */ +static inline struct octeon_hcd *hcd_to_octeon(struct usb_hcd *hcd) +{ + return (struct octeon_hcd *)(hcd->hcd_priv); +} + +static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd) +{ + struct octeon_hcd *usb = hcd_to_octeon(hcd); + unsigned long flags; + + spin_lock_irqsave(&usb->lock, flags); + cvmx_usb_poll(usb); + spin_unlock_irqrestore(&usb->lock, flags); + return IRQ_HANDLED; +} + +static int octeon_usb_start(struct usb_hcd *hcd) +{ + hcd->state = HC_STATE_RUNNING; + return 0; +} + +static void octeon_usb_stop(struct usb_hcd *hcd) +{ + hcd->state = HC_STATE_HALT; +} + +static int octeon_usb_get_frame_number(struct usb_hcd *hcd) +{ + struct octeon_hcd *usb = hcd_to_octeon(hcd); + + return cvmx_usb_get_frame_number(usb); +} + +static int octeon_usb_urb_enqueue(struct usb_hcd *hcd, + struct urb *urb, + gfp_t mem_flags) +{ + struct octeon_hcd *usb = hcd_to_octeon(hcd); + struct device *dev = hcd->self.controller; + struct cvmx_usb_transaction *transaction = NULL; + struct cvmx_usb_pipe *pipe; + unsigned long flags; + struct cvmx_usb_iso_packet *iso_packet; + struct usb_host_endpoint *ep = urb->ep; + int rc; + + urb->status = 0; + spin_lock_irqsave(&usb->lock, flags); + + rc = usb_hcd_link_urb_to_ep(hcd, urb); + if (rc) { + spin_unlock_irqrestore(&usb->lock, flags); + return rc; + } + + if (!ep->hcpriv) { + enum cvmx_usb_transfer transfer_type; + enum cvmx_usb_speed speed; + int split_device = 0; + int split_port = 0; + + switch (usb_pipetype(urb->pipe)) { + case PIPE_ISOCHRONOUS: + transfer_type = CVMX_USB_TRANSFER_ISOCHRONOUS; + break; + case PIPE_INTERRUPT: + transfer_type = CVMX_USB_TRANSFER_INTERRUPT; + break; + case PIPE_CONTROL: + transfer_type = CVMX_USB_TRANSFER_CONTROL; + break; + default: + transfer_type = CVMX_USB_TRANSFER_BULK; + break; + } + switch (urb->dev->speed) { + case USB_SPEED_LOW: + speed = CVMX_USB_SPEED_LOW; + break; + case USB_SPEED_FULL: + speed = CVMX_USB_SPEED_FULL; + break; + default: + speed = CVMX_USB_SPEED_HIGH; + break; + } + /* + * For slow devices on high speed ports we need to find the hub + * that does the speed translation so we know where to send the + * split transactions. + */ + if (speed != CVMX_USB_SPEED_HIGH) { + /* + * Start at this device and work our way up the usb + * tree. + */ + struct usb_device *dev = urb->dev; + + while (dev->parent) { + /* + * If our parent is high speed then he'll + * receive the splits. + */ + if (dev->parent->speed == USB_SPEED_HIGH) { + split_device = dev->parent->devnum; + split_port = dev->portnum; + break; + } + /* + * Move up the tree one level. If we make it all + * the way up the tree, then the port must not + * be in high speed mode and we don't need a + * split. + */ + dev = dev->parent; + } + } + pipe = cvmx_usb_open_pipe(usb, usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe), speed, + le16_to_cpu(ep->desc.wMaxPacketSize) + & 0x7ff, + transfer_type, + usb_pipein(urb->pipe) ? + CVMX_USB_DIRECTION_IN : + CVMX_USB_DIRECTION_OUT, + urb->interval, + (le16_to_cpu(ep->desc.wMaxPacketSize) + >> 11) & 0x3, + split_device, split_port); + if (!pipe) { + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&usb->lock, flags); + dev_dbg(dev, "Failed to create pipe\n"); + return -ENOMEM; + } + ep->hcpriv = pipe; + } else { + pipe = ep->hcpriv; + } + + switch (usb_pipetype(urb->pipe)) { + case PIPE_ISOCHRONOUS: + dev_dbg(dev, "Submit isochronous to %d.%d\n", + usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe)); + /* + * Allocate a structure to use for our private list of + * isochronous packets. + */ + iso_packet = kmalloc_array(urb->number_of_packets, + sizeof(struct cvmx_usb_iso_packet), + GFP_ATOMIC); + if (iso_packet) { + int i; + /* Fill the list with the data from the URB */ + for (i = 0; i < urb->number_of_packets; i++) { + iso_packet[i].offset = + urb->iso_frame_desc[i].offset; + iso_packet[i].length = + urb->iso_frame_desc[i].length; + iso_packet[i].status = CVMX_USB_STATUS_ERROR; + } + /* + * Store a pointer to the list in the URB setup_packet + * field. We know this currently isn't being used and + * this saves us a bunch of logic. + */ + urb->setup_packet = (char *)iso_packet; + transaction = cvmx_usb_submit_isochronous(usb, + pipe, urb); + /* + * If submit failed we need to free our private packet + * list. + */ + if (!transaction) { + urb->setup_packet = NULL; + kfree(iso_packet); + } + } + break; + case PIPE_INTERRUPT: + dev_dbg(dev, "Submit interrupt to %d.%d\n", + usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe)); + transaction = cvmx_usb_submit_interrupt(usb, pipe, urb); + break; + case PIPE_CONTROL: + dev_dbg(dev, "Submit control to %d.%d\n", + usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe)); + transaction = cvmx_usb_submit_control(usb, pipe, urb); + break; + case PIPE_BULK: + dev_dbg(dev, "Submit bulk to %d.%d\n", + usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe)); + transaction = cvmx_usb_submit_bulk(usb, pipe, urb); + break; + } + if (!transaction) { + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&usb->lock, flags); + dev_dbg(dev, "Failed to submit\n"); + return -ENOMEM; + } + urb->hcpriv = transaction; + spin_unlock_irqrestore(&usb->lock, flags); + return 0; +} + +static int octeon_usb_urb_dequeue(struct usb_hcd *hcd, + struct urb *urb, + int status) +{ + struct octeon_hcd *usb = hcd_to_octeon(hcd); + unsigned long flags; + int rc; + + if (!urb->dev) + return -EINVAL; + + spin_lock_irqsave(&usb->lock, flags); + + rc = usb_hcd_check_unlink_urb(hcd, urb, status); + if (rc) + goto out; + + urb->status = status; + cvmx_usb_cancel(usb, urb->ep->hcpriv, urb->hcpriv); + +out: + spin_unlock_irqrestore(&usb->lock, flags); + + return rc; +} + +static void octeon_usb_endpoint_disable(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ + struct device *dev = hcd->self.controller; + + if (ep->hcpriv) { + struct octeon_hcd *usb = hcd_to_octeon(hcd); + struct cvmx_usb_pipe *pipe = ep->hcpriv; + unsigned long flags; + + spin_lock_irqsave(&usb->lock, flags); + cvmx_usb_cancel_all(usb, pipe); + if (cvmx_usb_close_pipe(usb, pipe)) + dev_dbg(dev, "Closing pipe %p failed\n", pipe); + spin_unlock_irqrestore(&usb->lock, flags); + ep->hcpriv = NULL; + } +} + +static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf) +{ + struct octeon_hcd *usb = hcd_to_octeon(hcd); + struct cvmx_usb_port_status port_status; + unsigned long flags; + + spin_lock_irqsave(&usb->lock, flags); + port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); + buf[0] = port_status.connect_change << 1; + + return buf[0] != 0; +} + +static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, + u16 wIndex, char *buf, u16 wLength) +{ + struct octeon_hcd *usb = hcd_to_octeon(hcd); + struct device *dev = hcd->self.controller; + struct cvmx_usb_port_status usb_port_status; + int port_status; + struct usb_hub_descriptor *desc; + unsigned long flags; + + switch (typeReq) { + case ClearHubFeature: + dev_dbg(dev, "ClearHubFeature\n"); + switch (wValue) { + case C_HUB_LOCAL_POWER: + case C_HUB_OVER_CURRENT: + /* Nothing required here */ + break; + default: + return -EINVAL; + } + break; + case ClearPortFeature: + dev_dbg(dev, "ClearPortFeature\n"); + if (wIndex != 1) { + dev_dbg(dev, " INVALID\n"); + return -EINVAL; + } + + switch (wValue) { + case USB_PORT_FEAT_ENABLE: + dev_dbg(dev, " ENABLE\n"); + spin_lock_irqsave(&usb->lock, flags); + cvmx_usb_disable(usb); + spin_unlock_irqrestore(&usb->lock, flags); + break; + case USB_PORT_FEAT_SUSPEND: + dev_dbg(dev, " SUSPEND\n"); + /* Not supported on Octeon */ + break; + case USB_PORT_FEAT_POWER: + dev_dbg(dev, " POWER\n"); + /* Not supported on Octeon */ + break; + case USB_PORT_FEAT_INDICATOR: + dev_dbg(dev, " INDICATOR\n"); + /* Port inidicator not supported */ + break; + case USB_PORT_FEAT_C_CONNECTION: + dev_dbg(dev, " C_CONNECTION\n"); + /* Clears drivers internal connect status change flag */ + spin_lock_irqsave(&usb->lock, flags); + usb->port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); + break; + case USB_PORT_FEAT_C_RESET: + dev_dbg(dev, " C_RESET\n"); + /* + * Clears the driver's internal Port Reset Change flag. + */ + spin_lock_irqsave(&usb->lock, flags); + usb->port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); + break; + case USB_PORT_FEAT_C_ENABLE: + dev_dbg(dev, " C_ENABLE\n"); + /* + * Clears the driver's internal Port Enable/Disable + * Change flag. + */ + spin_lock_irqsave(&usb->lock, flags); + usb->port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); + break; + case USB_PORT_FEAT_C_SUSPEND: + dev_dbg(dev, " C_SUSPEND\n"); + /* + * Clears the driver's internal Port Suspend Change + * flag, which is set when resume signaling on the host + * port is complete. + */ + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + dev_dbg(dev, " C_OVER_CURRENT\n"); + /* Clears the driver's overcurrent Change flag */ + spin_lock_irqsave(&usb->lock, flags); + usb->port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); + break; + default: + dev_dbg(dev, " UNKNOWN\n"); + return -EINVAL; + } + break; + case GetHubDescriptor: + dev_dbg(dev, "GetHubDescriptor\n"); + desc = (struct usb_hub_descriptor *)buf; + desc->bDescLength = 9; + desc->bDescriptorType = 0x29; + desc->bNbrPorts = 1; + desc->wHubCharacteristics = cpu_to_le16(0x08); + desc->bPwrOn2PwrGood = 1; + desc->bHubContrCurrent = 0; + desc->u.hs.DeviceRemovable[0] = 0; + desc->u.hs.DeviceRemovable[1] = 0xff; + break; + case GetHubStatus: + dev_dbg(dev, "GetHubStatus\n"); + *(__le32 *)buf = 0; + break; + case GetPortStatus: + dev_dbg(dev, "GetPortStatus\n"); + if (wIndex != 1) { + dev_dbg(dev, " INVALID\n"); + return -EINVAL; + } + + spin_lock_irqsave(&usb->lock, flags); + usb_port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); + port_status = 0; + + if (usb_port_status.connect_change) { + port_status |= (1 << USB_PORT_FEAT_C_CONNECTION); + dev_dbg(dev, " C_CONNECTION\n"); + } + + if (usb_port_status.port_enabled) { + port_status |= (1 << USB_PORT_FEAT_C_ENABLE); + dev_dbg(dev, " C_ENABLE\n"); + } + + if (usb_port_status.connected) { + port_status |= (1 << USB_PORT_FEAT_CONNECTION); + dev_dbg(dev, " CONNECTION\n"); + } + + if (usb_port_status.port_enabled) { + port_status |= (1 << USB_PORT_FEAT_ENABLE); + dev_dbg(dev, " ENABLE\n"); + } + + if (usb_port_status.port_over_current) { + port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT); + dev_dbg(dev, " OVER_CURRENT\n"); + } + + if (usb_port_status.port_powered) { + port_status |= (1 << USB_PORT_FEAT_POWER); + dev_dbg(dev, " POWER\n"); + } + + if (usb_port_status.port_speed == CVMX_USB_SPEED_HIGH) { + port_status |= USB_PORT_STAT_HIGH_SPEED; + dev_dbg(dev, " HIGHSPEED\n"); + } else if (usb_port_status.port_speed == CVMX_USB_SPEED_LOW) { + port_status |= (1 << USB_PORT_FEAT_LOWSPEED); + dev_dbg(dev, " LOWSPEED\n"); + } + + *((__le32 *)buf) = cpu_to_le32(port_status); + break; + case SetHubFeature: + dev_dbg(dev, "SetHubFeature\n"); + /* No HUB features supported */ + break; + case SetPortFeature: + dev_dbg(dev, "SetPortFeature\n"); + if (wIndex != 1) { + dev_dbg(dev, " INVALID\n"); + return -EINVAL; + } + + switch (wValue) { + case USB_PORT_FEAT_SUSPEND: + dev_dbg(dev, " SUSPEND\n"); + return -EINVAL; + case USB_PORT_FEAT_POWER: + dev_dbg(dev, " POWER\n"); + /* + * Program the port power bit to drive VBUS on the USB. + */ + spin_lock_irqsave(&usb->lock, flags); + USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), + cvmx_usbcx_hprt, prtpwr, 1); + spin_unlock_irqrestore(&usb->lock, flags); + return 0; + case USB_PORT_FEAT_RESET: + dev_dbg(dev, " RESET\n"); + spin_lock_irqsave(&usb->lock, flags); + cvmx_usb_reset_port(usb); + spin_unlock_irqrestore(&usb->lock, flags); + return 0; + case USB_PORT_FEAT_INDICATOR: + dev_dbg(dev, " INDICATOR\n"); + /* Not supported */ + break; + default: + dev_dbg(dev, " UNKNOWN\n"); + return -EINVAL; + } + break; + default: + dev_dbg(dev, "Unknown root hub request\n"); + return -EINVAL; + } + return 0; +} + +static const struct hc_driver octeon_hc_driver = { + .description = "Octeon USB", + .product_desc = "Octeon Host Controller", + .hcd_priv_size = sizeof(struct octeon_hcd), + .irq = octeon_usb_irq, + .flags = HCD_MEMORY | HCD_DMA | HCD_USB2, + .start = octeon_usb_start, + .stop = octeon_usb_stop, + .urb_enqueue = octeon_usb_urb_enqueue, + .urb_dequeue = octeon_usb_urb_dequeue, + .endpoint_disable = octeon_usb_endpoint_disable, + .get_frame_number = octeon_usb_get_frame_number, + .hub_status_data = octeon_usb_hub_status_data, + .hub_control = octeon_usb_hub_control, + .map_urb_for_dma = octeon_map_urb_for_dma, + .unmap_urb_for_dma = octeon_unmap_urb_for_dma, +}; + +static int octeon_usb_probe(struct platform_device *pdev) +{ + int status; + int initialize_flags; + int usb_num; + struct resource *res_mem; + struct device_node *usbn_node; + int irq = platform_get_irq(pdev, 0); + struct device *dev = &pdev->dev; + struct octeon_hcd *usb; + struct usb_hcd *hcd; + u32 clock_rate = 48000000; + bool is_crystal_clock = false; + const char *clock_type; + int i; + + if (!dev->of_node) { + dev_err(dev, "Error: empty of_node\n"); + return -ENXIO; + } + usbn_node = dev->of_node->parent; + + i = of_property_read_u32(usbn_node, + "clock-frequency", &clock_rate); + if (i) + i = of_property_read_u32(usbn_node, + "refclk-frequency", &clock_rate); + if (i) { + dev_err(dev, "No USBN \"clock-frequency\"\n"); + return -ENXIO; + } + switch (clock_rate) { + case 12000000: + initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ; + break; + case 24000000: + initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ; + break; + case 48000000: + initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ; + break; + default: + dev_err(dev, "Illegal USBN \"clock-frequency\" %u\n", + clock_rate); + return -ENXIO; + } + + i = of_property_read_string(usbn_node, + "cavium,refclk-type", &clock_type); + if (i) + i = of_property_read_string(usbn_node, + "refclk-type", &clock_type); + + if (!i && strcmp("crystal", clock_type) == 0) + is_crystal_clock = true; + + if (is_crystal_clock) + initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI; + else + initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res_mem) { + dev_err(dev, "found no memory resource\n"); + return -ENXIO; + } + usb_num = (res_mem->start >> 44) & 1; + + if (irq < 0) { + /* Defective device tree, but we know how to fix it. */ + irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56; + + irq = irq_create_mapping(NULL, hwirq); + } + + /* + * Set the DMA mask to 64bits so we get buffers already translated for + * DMA. + */ + i = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (i) + return i; + + /* + * Only cn52XX and cn56XX have DWC_OTG USB hardware and the + * IOB priority registers. Under heavy network load USB + * hardware can be starved by the IOB causing a crash. Give + * it a priority boost if it has been waiting more than 400 + * cycles to avoid this situation. + * + * Testing indicates that a cnt_val of 8192 is not sufficient, + * but no failures are seen with 4096. We choose a value of + * 400 to give a safety factor of 10. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) { + union cvmx_iob_n2c_l2c_pri_cnt pri_cnt; + + pri_cnt.u64 = 0; + pri_cnt.s.cnt_enb = 1; + pri_cnt.s.cnt_val = 400; + cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64); + } + + hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev)); + if (!hcd) { + dev_dbg(dev, "Failed to allocate memory for HCD\n"); + return -1; + } + hcd->uses_new_polling = 1; + usb = (struct octeon_hcd *)hcd->hcd_priv; + + spin_lock_init(&usb->lock); + + usb->init_flags = initialize_flags; + + /* Initialize the USB state structure */ + usb->index = usb_num; + INIT_LIST_HEAD(&usb->idle_pipes); + for (i = 0; i < ARRAY_SIZE(usb->active_pipes); i++) + INIT_LIST_HEAD(&usb->active_pipes[i]); + + /* Due to an errata, CN31XX doesn't support DMA */ + if (OCTEON_IS_MODEL(OCTEON_CN31XX)) { + usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA; + /* Only use one channel with non DMA */ + usb->idle_hardware_channels = 0x1; + } else if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) { + /* CN5XXX have an errata with channel 3 */ + usb->idle_hardware_channels = 0xf7; + } else { + usb->idle_hardware_channels = 0xff; + } + + status = cvmx_usb_initialize(dev, usb); + if (status) { + dev_dbg(dev, "USB initialization failed with %d\n", status); + usb_put_hcd(hcd); + return -1; + } + + status = usb_add_hcd(hcd, irq, 0); + if (status) { + dev_dbg(dev, "USB add HCD failed with %d\n", status); + usb_put_hcd(hcd); + return -1; + } + device_wakeup_enable(hcd->self.controller); + + dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq); + + return 0; +} + +static int octeon_usb_remove(struct platform_device *pdev) +{ + int status; + struct device *dev = &pdev->dev; + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct octeon_hcd *usb = hcd_to_octeon(hcd); + unsigned long flags; + + usb_remove_hcd(hcd); + spin_lock_irqsave(&usb->lock, flags); + status = cvmx_usb_shutdown(usb); + spin_unlock_irqrestore(&usb->lock, flags); + if (status) + dev_dbg(dev, "USB shutdown failed with %d\n", status); + + usb_put_hcd(hcd); + + return 0; +} + +static const struct of_device_id octeon_usb_match[] = { + { + .compatible = "cavium,octeon-5750-usbc", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, octeon_usb_match); + +static struct platform_driver octeon_usb_driver = { + .driver = { + .name = "octeon-hcd", + .of_match_table = octeon_usb_match, + }, + .probe = octeon_usb_probe, + .remove = octeon_usb_remove, +}; + +static int __init octeon_usb_driver_init(void) +{ + if (usb_disabled()) + return 0; + + return platform_driver_register(&octeon_usb_driver); +} +module_init(octeon_usb_driver_init); + +static void __exit octeon_usb_driver_exit(void) +{ + if (usb_disabled()) + return; + + platform_driver_unregister(&octeon_usb_driver); +} +module_exit(octeon_usb_driver_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cavium, Inc. "); +MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver."); diff --git a/drivers/usb/host/octeon-hcd.h b/drivers/usb/host/octeon-hcd.h new file mode 100644 index 0000000000..9ed619c93a --- /dev/null +++ b/drivers/usb/host/octeon-hcd.h @@ -0,0 +1,1847 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Octeon HCD hardware register definitions. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Some parts of the code were originally released under BSD license: + * + * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights + * reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of Cavium Networks nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * This Software, including technical data, may be subject to U.S. export + * control laws, including the U.S. Export Administration Act and its associated + * regulations, and may be subject to export or import regulations in other + * countries. + * + * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" + * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR + * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO + * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION + * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM + * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, + * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF + * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR + * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR + * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. + */ + +#ifndef __OCTEON_HCD_H__ +#define __OCTEON_HCD_H__ + +#include + +#define CVMX_USBCXBASE 0x00016F0010000000ull +#define CVMX_USBCXREG1(reg, bid) \ + (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \ + ((bid) & 1) * 0x100000000000ull) +#define CVMX_USBCXREG2(reg, bid, off) \ + (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \ + (((off) & 7) + ((bid) & 1) * 0x8000000000ull) * 32) + +#define CVMX_USBCX_GAHBCFG(bid) CVMX_USBCXREG1(0x008, bid) +#define CVMX_USBCX_GHWCFG3(bid) CVMX_USBCXREG1(0x04c, bid) +#define CVMX_USBCX_GINTMSK(bid) CVMX_USBCXREG1(0x018, bid) +#define CVMX_USBCX_GINTSTS(bid) CVMX_USBCXREG1(0x014, bid) +#define CVMX_USBCX_GNPTXFSIZ(bid) CVMX_USBCXREG1(0x028, bid) +#define CVMX_USBCX_GNPTXSTS(bid) CVMX_USBCXREG1(0x02c, bid) +#define CVMX_USBCX_GOTGCTL(bid) CVMX_USBCXREG1(0x000, bid) +#define CVMX_USBCX_GRSTCTL(bid) CVMX_USBCXREG1(0x010, bid) +#define CVMX_USBCX_GRXFSIZ(bid) CVMX_USBCXREG1(0x024, bid) +#define CVMX_USBCX_GRXSTSPH(bid) CVMX_USBCXREG1(0x020, bid) +#define CVMX_USBCX_GUSBCFG(bid) CVMX_USBCXREG1(0x00c, bid) +#define CVMX_USBCX_HAINT(bid) CVMX_USBCXREG1(0x414, bid) +#define CVMX_USBCX_HAINTMSK(bid) CVMX_USBCXREG1(0x418, bid) +#define CVMX_USBCX_HCCHARX(off, bid) CVMX_USBCXREG2(0x500, bid, off) +#define CVMX_USBCX_HCFG(bid) CVMX_USBCXREG1(0x400, bid) +#define CVMX_USBCX_HCINTMSKX(off, bid) CVMX_USBCXREG2(0x50c, bid, off) +#define CVMX_USBCX_HCINTX(off, bid) CVMX_USBCXREG2(0x508, bid, off) +#define CVMX_USBCX_HCSPLTX(off, bid) CVMX_USBCXREG2(0x504, bid, off) +#define CVMX_USBCX_HCTSIZX(off, bid) CVMX_USBCXREG2(0x510, bid, off) +#define CVMX_USBCX_HFIR(bid) CVMX_USBCXREG1(0x404, bid) +#define CVMX_USBCX_HFNUM(bid) CVMX_USBCXREG1(0x408, bid) +#define CVMX_USBCX_HPRT(bid) CVMX_USBCXREG1(0x440, bid) +#define CVMX_USBCX_HPTXFSIZ(bid) CVMX_USBCXREG1(0x100, bid) +#define CVMX_USBCX_HPTXSTS(bid) CVMX_USBCXREG1(0x410, bid) + +#define CVMX_USBNXBID1(bid) (((bid) & 1) * 0x10000000ull) +#define CVMX_USBNXBID2(bid) (((bid) & 1) * 0x100000000000ull) + +#define CVMX_USBNXREG1(reg, bid) \ + (CVMX_ADD_IO_SEG(0x0001180068000000ull | reg) + CVMX_USBNXBID1(bid)) +#define CVMX_USBNXREG2(reg, bid) \ + (CVMX_ADD_IO_SEG(0x00016F0000000000ull | reg) + CVMX_USBNXBID2(bid)) + +#define CVMX_USBNX_CLK_CTL(bid) CVMX_USBNXREG1(0x10, bid) +#define CVMX_USBNX_DMA0_INB_CHN0(bid) CVMX_USBNXREG2(0x818, bid) +#define CVMX_USBNX_DMA0_OUTB_CHN0(bid) CVMX_USBNXREG2(0x858, bid) +#define CVMX_USBNX_USBP_CTL_STATUS(bid) CVMX_USBNXREG1(0x18, bid) + +/** + * cvmx_usbc#_gahbcfg + * + * Core AHB Configuration Register (GAHBCFG) + * + * This register can be used to configure the core after power-on or a change in + * mode of operation. This register mainly contains AHB system-related + * configuration parameters. The AHB is the processor interface to the O2P USB + * core. In general, software need not know about this interface except to + * program the values as specified. + * + * The application must program this register as part of the O2P USB core + * initialization. Do not change this register after the initial programming. + */ +union cvmx_usbcx_gahbcfg { + u32 u32; + /** + * struct cvmx_usbcx_gahbcfg_s + * @ptxfemplvl: Periodic TxFIFO Empty Level (PTxFEmpLvl) + * Software should set this bit to 0x1. + * Indicates when the Periodic TxFIFO Empty Interrupt bit in the + * Core Interrupt register (GINTSTS.PTxFEmp) is triggered. This + * bit is used only in Slave mode. + * * 1'b0: GINTSTS.PTxFEmp interrupt indicates that the Periodic + * TxFIFO is half empty + * * 1'b1: GINTSTS.PTxFEmp interrupt indicates that the Periodic + * TxFIFO is completely empty + * @nptxfemplvl: Non-Periodic TxFIFO Empty Level (NPTxFEmpLvl) + * Software should set this bit to 0x1. + * Indicates when the Non-Periodic TxFIFO Empty Interrupt bit in + * the Core Interrupt register (GINTSTS.NPTxFEmp) is triggered. + * This bit is used only in Slave mode. + * * 1'b0: GINTSTS.NPTxFEmp interrupt indicates that the Non- + * Periodic TxFIFO is half empty + * * 1'b1: GINTSTS.NPTxFEmp interrupt indicates that the Non- + * Periodic TxFIFO is completely empty + * @dmaen: DMA Enable (DMAEn) + * * 1'b0: Core operates in Slave mode + * * 1'b1: Core operates in a DMA mode + * @hbstlen: Burst Length/Type (HBstLen) + * This field has not effect and should be left as 0x0. + * @glblintrmsk: Global Interrupt Mask (GlblIntrMsk) + * Software should set this field to 0x1. + * The application uses this bit to mask or unmask the interrupt + * line assertion to itself. Irrespective of this bit's setting, + * the interrupt status registers are updated by the core. + * * 1'b0: Mask the interrupt assertion to the application. + * * 1'b1: Unmask the interrupt assertion to the application. + */ + struct cvmx_usbcx_gahbcfg_s { + __BITFIELD_FIELD(u32 reserved_9_31 : 23, + __BITFIELD_FIELD(u32 ptxfemplvl : 1, + __BITFIELD_FIELD(u32 nptxfemplvl : 1, + __BITFIELD_FIELD(u32 reserved_6_6 : 1, + __BITFIELD_FIELD(u32 dmaen : 1, + __BITFIELD_FIELD(u32 hbstlen : 4, + __BITFIELD_FIELD(u32 glblintrmsk : 1, + ;))))))) + } s; +}; + +/** + * cvmx_usbc#_ghwcfg3 + * + * User HW Config3 Register (GHWCFG3) + * + * This register contains the configuration options of the O2P USB core. + */ +union cvmx_usbcx_ghwcfg3 { + u32 u32; + /** + * struct cvmx_usbcx_ghwcfg3_s + * @dfifodepth: DFIFO Depth (DfifoDepth) + * This value is in terms of 32-bit words. + * * Minimum value is 32 + * * Maximum value is 32768 + * @ahbphysync: AHB and PHY Synchronous (AhbPhySync) + * Indicates whether AHB and PHY clocks are synchronous to + * each other. + * * 1'b0: No + * * 1'b1: Yes + * This bit is tied to 1. + * @rsttype: Reset Style for Clocked always Blocks in RTL (RstType) + * * 1'b0: Asynchronous reset is used in the core + * * 1'b1: Synchronous reset is used in the core + * @optfeature: Optional Features Removed (OptFeature) + * Indicates whether the User ID register, GPIO interface ports, + * and SOF toggle and counter ports were removed for gate count + * optimization. + * @vendor_control_interface_support: Vendor Control Interface Support + * * 1'b0: Vendor Control Interface is not available on the core. + * * 1'b1: Vendor Control Interface is available. + * @i2c_selection: I2C Selection + * * 1'b0: I2C Interface is not available on the core. + * * 1'b1: I2C Interface is available on the core. + * @otgen: OTG Function Enabled (OtgEn) + * The application uses this bit to indicate the O2P USB core's + * OTG capabilities. + * * 1'b0: Not OTG capable + * * 1'b1: OTG Capable + * @pktsizewidth: Width of Packet Size Counters (PktSizeWidth) + * * 3'b000: 4 bits + * * 3'b001: 5 bits + * * 3'b010: 6 bits + * * 3'b011: 7 bits + * * 3'b100: 8 bits + * * 3'b101: 9 bits + * * 3'b110: 10 bits + * * Others: Reserved + * @xfersizewidth: Width of Transfer Size Counters (XferSizeWidth) + * * 4'b0000: 11 bits + * * 4'b0001: 12 bits + * - ... + * * 4'b1000: 19 bits + * * Others: Reserved + */ + struct cvmx_usbcx_ghwcfg3_s { + __BITFIELD_FIELD(u32 dfifodepth : 16, + __BITFIELD_FIELD(u32 reserved_13_15 : 3, + __BITFIELD_FIELD(u32 ahbphysync : 1, + __BITFIELD_FIELD(u32 rsttype : 1, + __BITFIELD_FIELD(u32 optfeature : 1, + __BITFIELD_FIELD(u32 vendor_control_interface_support : 1, + __BITFIELD_FIELD(u32 i2c_selection : 1, + __BITFIELD_FIELD(u32 otgen : 1, + __BITFIELD_FIELD(u32 pktsizewidth : 3, + __BITFIELD_FIELD(u32 xfersizewidth : 4, + ;)))))))))) + } s; +}; + +/** + * cvmx_usbc#_gintmsk + * + * Core Interrupt Mask Register (GINTMSK) + * + * This register works with the Core Interrupt register to interrupt the + * application. When an interrupt bit is masked, the interrupt associated with + * that bit will not be generated. However, the Core Interrupt (GINTSTS) + * register bit corresponding to that interrupt will still be set. + * Mask interrupt: 1'b0, Unmask interrupt: 1'b1 + */ +union cvmx_usbcx_gintmsk { + u32 u32; + /** + * struct cvmx_usbcx_gintmsk_s + * @wkupintmsk: Resume/Remote Wakeup Detected Interrupt Mask + * (WkUpIntMsk) + * @sessreqintmsk: Session Request/New Session Detected Interrupt Mask + * (SessReqIntMsk) + * @disconnintmsk: Disconnect Detected Interrupt Mask (DisconnIntMsk) + * @conidstschngmsk: Connector ID Status Change Mask (ConIDStsChngMsk) + * @ptxfempmsk: Periodic TxFIFO Empty Mask (PTxFEmpMsk) + * @hchintmsk: Host Channels Interrupt Mask (HChIntMsk) + * @prtintmsk: Host Port Interrupt Mask (PrtIntMsk) + * @fetsuspmsk: Data Fetch Suspended Mask (FetSuspMsk) + * @incomplpmsk: Incomplete Periodic Transfer Mask (incomplPMsk) + * Incomplete Isochronous OUT Transfer Mask + * (incompISOOUTMsk) + * @incompisoinmsk: Incomplete Isochronous IN Transfer Mask + * (incompISOINMsk) + * @oepintmsk: OUT Endpoints Interrupt Mask (OEPIntMsk) + * @inepintmsk: IN Endpoints Interrupt Mask (INEPIntMsk) + * @epmismsk: Endpoint Mismatch Interrupt Mask (EPMisMsk) + * @eopfmsk: End of Periodic Frame Interrupt Mask (EOPFMsk) + * @isooutdropmsk: Isochronous OUT Packet Dropped Interrupt Mask + * (ISOOutDropMsk) + * @enumdonemsk: Enumeration Done Mask (EnumDoneMsk) + * @usbrstmsk: USB Reset Mask (USBRstMsk) + * @usbsuspmsk: USB Suspend Mask (USBSuspMsk) + * @erlysuspmsk: Early Suspend Mask (ErlySuspMsk) + * @i2cint: I2C Interrupt Mask (I2CINT) + * @ulpickintmsk: ULPI Carkit Interrupt Mask (ULPICKINTMsk) + * I2C Carkit Interrupt Mask (I2CCKINTMsk) + * @goutnakeffmsk: Global OUT NAK Effective Mask (GOUTNakEffMsk) + * @ginnakeffmsk: Global Non-Periodic IN NAK Effective Mask + * (GINNakEffMsk) + * @nptxfempmsk: Non-Periodic TxFIFO Empty Mask (NPTxFEmpMsk) + * @rxflvlmsk: Receive FIFO Non-Empty Mask (RxFLvlMsk) + * @sofmsk: Start of (micro)Frame Mask (SofMsk) + * @otgintmsk: OTG Interrupt Mask (OTGIntMsk) + * @modemismsk: Mode Mismatch Interrupt Mask (ModeMisMsk) + */ + struct cvmx_usbcx_gintmsk_s { + __BITFIELD_FIELD(u32 wkupintmsk : 1, + __BITFIELD_FIELD(u32 sessreqintmsk : 1, + __BITFIELD_FIELD(u32 disconnintmsk : 1, + __BITFIELD_FIELD(u32 conidstschngmsk : 1, + __BITFIELD_FIELD(u32 reserved_27_27 : 1, + __BITFIELD_FIELD(u32 ptxfempmsk : 1, + __BITFIELD_FIELD(u32 hchintmsk : 1, + __BITFIELD_FIELD(u32 prtintmsk : 1, + __BITFIELD_FIELD(u32 reserved_23_23 : 1, + __BITFIELD_FIELD(u32 fetsuspmsk : 1, + __BITFIELD_FIELD(u32 incomplpmsk : 1, + __BITFIELD_FIELD(u32 incompisoinmsk : 1, + __BITFIELD_FIELD(u32 oepintmsk : 1, + __BITFIELD_FIELD(u32 inepintmsk : 1, + __BITFIELD_FIELD(u32 epmismsk : 1, + __BITFIELD_FIELD(u32 reserved_16_16 : 1, + __BITFIELD_FIELD(u32 eopfmsk : 1, + __BITFIELD_FIELD(u32 isooutdropmsk : 1, + __BITFIELD_FIELD(u32 enumdonemsk : 1, + __BITFIELD_FIELD(u32 usbrstmsk : 1, + __BITFIELD_FIELD(u32 usbsuspmsk : 1, + __BITFIELD_FIELD(u32 erlysuspmsk : 1, + __BITFIELD_FIELD(u32 i2cint : 1, + __BITFIELD_FIELD(u32 ulpickintmsk : 1, + __BITFIELD_FIELD(u32 goutnakeffmsk : 1, + __BITFIELD_FIELD(u32 ginnakeffmsk : 1, + __BITFIELD_FIELD(u32 nptxfempmsk : 1, + __BITFIELD_FIELD(u32 rxflvlmsk : 1, + __BITFIELD_FIELD(u32 sofmsk : 1, + __BITFIELD_FIELD(u32 otgintmsk : 1, + __BITFIELD_FIELD(u32 modemismsk : 1, + __BITFIELD_FIELD(u32 reserved_0_0 : 1, + ;)))))))))))))))))))))))))))))))) + } s; +}; + +/** + * cvmx_usbc#_gintsts + * + * Core Interrupt Register (GINTSTS) + * + * This register interrupts the application for system-level events in the + * current mode of operation (Device mode or Host mode). It is shown in + * Interrupt. Some of the bits in this register are valid only in Host mode, + * while others are valid in Device mode only. This register also indicates the + * current mode of operation. In order to clear the interrupt status bits of + * type R_SS_WC, the application must write 1'b1 into the bit. The FIFO status + * interrupts are read only; once software reads from or writes to the FIFO + * while servicing these interrupts, FIFO interrupt conditions are cleared + * automatically. + */ +union cvmx_usbcx_gintsts { + u32 u32; + /** + * struct cvmx_usbcx_gintsts_s + * @wkupint: Resume/Remote Wakeup Detected Interrupt (WkUpInt) + * In Device mode, this interrupt is asserted when a resume is + * detected on the USB. In Host mode, this interrupt is asserted + * when a remote wakeup is detected on the USB. + * For more information on how to use this interrupt, see "Partial + * Power-Down and Clock Gating Programming Model" on + * page 353. + * @sessreqint: Session Request/New Session Detected Interrupt + * (SessReqInt) + * In Host mode, this interrupt is asserted when a session request + * is detected from the device. In Device mode, this interrupt is + * asserted when the utmiotg_bvalid signal goes high. + * For more information on how to use this interrupt, see "Partial + * Power-Down and Clock Gating Programming Model" on + * page 353. + * @disconnint: Disconnect Detected Interrupt (DisconnInt) + * Asserted when a device disconnect is detected. + * @conidstschng: Connector ID Status Change (ConIDStsChng) + * The core sets this bit when there is a change in connector ID + * status. + * @ptxfemp: Periodic TxFIFO Empty (PTxFEmp) + * Asserted when the Periodic Transmit FIFO is either half or + * completely empty and there is space for at least one entry to be + * written in the Periodic Request Queue. The half or completely + * empty status is determined by the Periodic TxFIFO Empty Level + * bit in the Core AHB Configuration register + * (GAHBCFG.PTxFEmpLvl). + * @hchint: Host Channels Interrupt (HChInt) + * The core sets this bit to indicate that an interrupt is pending + * on one of the channels of the core (in Host mode). The + * application must read the Host All Channels Interrupt (HAINT) + * register to determine the exact number of the channel on which + * the interrupt occurred, and then read the corresponding Host + * Channel-n Interrupt (HCINTn) register to determine the exact + * cause of the interrupt. The application must clear the + * appropriate status bit in the HCINTn register to clear this bit. + * @prtint: Host Port Interrupt (PrtInt) + * The core sets this bit to indicate a change in port status of + * one of the O2P USB core ports in Host mode. The application must + * read the Host Port Control and Status (HPRT) register to + * determine the exact event that caused this interrupt. The + * application must clear the appropriate status bit in the Host + * Port Control and Status register to clear this bit. + * @fetsusp: Data Fetch Suspended (FetSusp) + * This interrupt is valid only in DMA mode. This interrupt + * indicates that the core has stopped fetching data for IN + * endpoints due to the unavailability of TxFIFO space or Request + * Queue space. This interrupt is used by the application for an + * endpoint mismatch algorithm. + * @incomplp: Incomplete Periodic Transfer (incomplP) + * In Host mode, the core sets this interrupt bit when there are + * incomplete periodic transactions still pending which are + * scheduled for the current microframe. + * Incomplete Isochronous OUT Transfer (incompISOOUT) + * The Device mode, the core sets this interrupt to indicate that + * there is at least one isochronous OUT endpoint on which the + * transfer is not completed in the current microframe. This + * interrupt is asserted along with the End of Periodic Frame + * Interrupt (EOPF) bit in this register. + * @incompisoin: Incomplete Isochronous IN Transfer (incompISOIN) + * The core sets this interrupt to indicate that there is at least + * one isochronous IN endpoint on which the transfer is not + * completed in the current microframe. This interrupt is asserted + * along with the End of Periodic Frame Interrupt (EOPF) bit in + * this register. + * @oepint: OUT Endpoints Interrupt (OEPInt) + * The core sets this bit to indicate that an interrupt is pending + * on one of the OUT endpoints of the core (in Device mode). The + * application must read the Device All Endpoints Interrupt + * (DAINT) register to determine the exact number of the OUT + * endpoint on which the interrupt occurred, and then read the + * corresponding Device OUT Endpoint-n Interrupt (DOEPINTn) + * register to determine the exact cause of the interrupt. The + * application must clear the appropriate status bit in the + * corresponding DOEPINTn register to clear this bit. + * @iepint: IN Endpoints Interrupt (IEPInt) + * The core sets this bit to indicate that an interrupt is pending + * on one of the IN endpoints of the core (in Device mode). The + * application must read the Device All Endpoints Interrupt + * (DAINT) register to determine the exact number of the IN + * endpoint on which the interrupt occurred, and then read the + * corresponding Device IN Endpoint-n Interrupt (DIEPINTn) + * register to determine the exact cause of the interrupt. The + * application must clear the appropriate status bit in the + * corresponding DIEPINTn register to clear this bit. + * @epmis: Endpoint Mismatch Interrupt (EPMis) + * Indicates that an IN token has been received for a non-periodic + * endpoint, but the data for another endpoint is present in the + * top of the Non-Periodic Transmit FIFO and the IN endpoint + * mismatch count programmed by the application has expired. + * @eopf: End of Periodic Frame Interrupt (EOPF) + * Indicates that the period specified in the Periodic Frame + * Interval field of the Device Configuration register + * (DCFG.PerFrInt) has been reached in the current microframe. + * @isooutdrop: Isochronous OUT Packet Dropped Interrupt (ISOOutDrop) + * The core sets this bit when it fails to write an isochronous OUT + * packet into the RxFIFO because the RxFIFO doesn't have + * enough space to accommodate a maximum packet size packet + * for the isochronous OUT endpoint. + * @enumdone: Enumeration Done (EnumDone) + * The core sets this bit to indicate that speed enumeration is + * complete. The application must read the Device Status (DSTS) + * register to obtain the enumerated speed. + * @usbrst: USB Reset (USBRst) + * The core sets this bit to indicate that a reset is detected on + * the USB. + * @usbsusp: USB Suspend (USBSusp) + * The core sets this bit to indicate that a suspend was detected + * on the USB. The core enters the Suspended state when there + * is no activity on the phy_line_state_i signal for an extended + * period of time. + * @erlysusp: Early Suspend (ErlySusp) + * The core sets this bit to indicate that an Idle state has been + * detected on the USB for 3 ms. + * @i2cint: I2C Interrupt (I2CINT) + * This bit is always 0x0. + * @ulpickint: ULPI Carkit Interrupt (ULPICKINT) + * This bit is always 0x0. + * @goutnakeff: Global OUT NAK Effective (GOUTNakEff) + * Indicates that the Set Global OUT NAK bit in the Device Control + * register (DCTL.SGOUTNak), set by the application, has taken + * effect in the core. This bit can be cleared by writing the Clear + * Global OUT NAK bit in the Device Control register + * (DCTL.CGOUTNak). + * @ginnakeff: Global IN Non-Periodic NAK Effective (GINNakEff) + * Indicates that the Set Global Non-Periodic IN NAK bit in the + * Device Control register (DCTL.SGNPInNak), set by the + * application, has taken effect in the core. That is, the core has + * sampled the Global IN NAK bit set by the application. This bit + * can be cleared by clearing the Clear Global Non-Periodic IN + * NAK bit in the Device Control register (DCTL.CGNPInNak). + * This interrupt does not necessarily mean that a NAK handshake + * is sent out on the USB. The STALL bit takes precedence over + * the NAK bit. + * @nptxfemp: Non-Periodic TxFIFO Empty (NPTxFEmp) + * This interrupt is asserted when the Non-Periodic TxFIFO is + * either half or completely empty, and there is space for at least + * one entry to be written to the Non-Periodic Transmit Request + * Queue. The half or completely empty status is determined by + * the Non-Periodic TxFIFO Empty Level bit in the Core AHB + * Configuration register (GAHBCFG.NPTxFEmpLvl). + * @rxflvl: RxFIFO Non-Empty (RxFLvl) + * Indicates that there is at least one packet pending to be read + * from the RxFIFO. + * @sof: Start of (micro)Frame (Sof) + * In Host mode, the core sets this bit to indicate that an SOF + * (FS), micro-SOF (HS), or Keep-Alive (LS) is transmitted on the + * USB. The application must write a 1 to this bit to clear the + * interrupt. + * In Device mode, in the core sets this bit to indicate that an + * SOF token has been received on the USB. The application can read + * the Device Status register to get the current (micro)frame + * number. This interrupt is seen only when the core is operating + * at either HS or FS. + * @otgint: OTG Interrupt (OTGInt) + * The core sets this bit to indicate an OTG protocol event. The + * application must read the OTG Interrupt Status (GOTGINT) + * register to determine the exact event that caused this + * interrupt. The application must clear the appropriate status bit + * in the GOTGINT register to clear this bit. + * @modemis: Mode Mismatch Interrupt (ModeMis) + * The core sets this bit when the application is trying to access: + * * A Host mode register, when the core is operating in Device + * mode + * * A Device mode register, when the core is operating in Host + * mode + * The register access is completed on the AHB with an OKAY + * response, but is ignored by the core internally and doesn't + * affect the operation of the core. + * @curmod: Current Mode of Operation (CurMod) + * Indicates the current mode of operation. + * * 1'b0: Device mode + * * 1'b1: Host mode + */ + struct cvmx_usbcx_gintsts_s { + __BITFIELD_FIELD(u32 wkupint : 1, + __BITFIELD_FIELD(u32 sessreqint : 1, + __BITFIELD_FIELD(u32 disconnint : 1, + __BITFIELD_FIELD(u32 conidstschng : 1, + __BITFIELD_FIELD(u32 reserved_27_27 : 1, + __BITFIELD_FIELD(u32 ptxfemp : 1, + __BITFIELD_FIELD(u32 hchint : 1, + __BITFIELD_FIELD(u32 prtint : 1, + __BITFIELD_FIELD(u32 reserved_23_23 : 1, + __BITFIELD_FIELD(u32 fetsusp : 1, + __BITFIELD_FIELD(u32 incomplp : 1, + __BITFIELD_FIELD(u32 incompisoin : 1, + __BITFIELD_FIELD(u32 oepint : 1, + __BITFIELD_FIELD(u32 iepint : 1, + __BITFIELD_FIELD(u32 epmis : 1, + __BITFIELD_FIELD(u32 reserved_16_16 : 1, + __BITFIELD_FIELD(u32 eopf : 1, + __BITFIELD_FIELD(u32 isooutdrop : 1, + __BITFIELD_FIELD(u32 enumdone : 1, + __BITFIELD_FIELD(u32 usbrst : 1, + __BITFIELD_FIELD(u32 usbsusp : 1, + __BITFIELD_FIELD(u32 erlysusp : 1, + __BITFIELD_FIELD(u32 i2cint : 1, + __BITFIELD_FIELD(u32 ulpickint : 1, + __BITFIELD_FIELD(u32 goutnakeff : 1, + __BITFIELD_FIELD(u32 ginnakeff : 1, + __BITFIELD_FIELD(u32 nptxfemp : 1, + __BITFIELD_FIELD(u32 rxflvl : 1, + __BITFIELD_FIELD(u32 sof : 1, + __BITFIELD_FIELD(u32 otgint : 1, + __BITFIELD_FIELD(u32 modemis : 1, + __BITFIELD_FIELD(u32 curmod : 1, + ;)))))))))))))))))))))))))))))))) + } s; +}; + +/** + * cvmx_usbc#_gnptxfsiz + * + * Non-Periodic Transmit FIFO Size Register (GNPTXFSIZ) + * + * The application can program the RAM size and the memory start address for the + * Non-Periodic TxFIFO. + */ +union cvmx_usbcx_gnptxfsiz { + u32 u32; + /** + * struct cvmx_usbcx_gnptxfsiz_s + * @nptxfdep: Non-Periodic TxFIFO Depth (NPTxFDep) + * This value is in terms of 32-bit words. + * Minimum value is 16 + * Maximum value is 32768 + * @nptxfstaddr: Non-Periodic Transmit RAM Start Address (NPTxFStAddr) + * This field contains the memory start address for Non-Periodic + * Transmit FIFO RAM. + */ + struct cvmx_usbcx_gnptxfsiz_s { + __BITFIELD_FIELD(u32 nptxfdep : 16, + __BITFIELD_FIELD(u32 nptxfstaddr : 16, + ;)) + } s; +}; + +/** + * cvmx_usbc#_gnptxsts + * + * Non-Periodic Transmit FIFO/Queue Status Register (GNPTXSTS) + * + * This read-only register contains the free space information for the + * Non-Periodic TxFIFO and the Non-Periodic Transmit Request Queue. + */ +union cvmx_usbcx_gnptxsts { + u32 u32; + /** + * struct cvmx_usbcx_gnptxsts_s + * @nptxqtop: Top of the Non-Periodic Transmit Request Queue (NPTxQTop) + * Entry in the Non-Periodic Tx Request Queue that is currently + * being processed by the MAC. + * * Bits [30:27]: Channel/endpoint number + * * Bits [26:25]: + * - 2'b00: IN/OUT token + * - 2'b01: Zero-length transmit packet (device IN/host OUT) + * - 2'b10: PING/CSPLIT token + * - 2'b11: Channel halt command + * * Bit [24]: Terminate (last entry for selected channel/endpoint) + * @nptxqspcavail: Non-Periodic Transmit Request Queue Space Available + * (NPTxQSpcAvail) + * Indicates the amount of free space available in the Non- + * Periodic Transmit Request Queue. This queue holds both IN + * and OUT requests in Host mode. Device mode has only IN + * requests. + * * 8'h0: Non-Periodic Transmit Request Queue is full + * * 8'h1: 1 location available + * * 8'h2: 2 locations available + * * n: n locations available (0..8) + * * Others: Reserved + * @nptxfspcavail: Non-Periodic TxFIFO Space Avail (NPTxFSpcAvail) + * Indicates the amount of free space available in the Non- + * Periodic TxFIFO. + * Values are in terms of 32-bit words. + * * 16'h0: Non-Periodic TxFIFO is full + * * 16'h1: 1 word available + * * 16'h2: 2 words available + * * 16'hn: n words available (where 0..32768) + * * 16'h8000: 32768 words available + * * Others: Reserved + */ + struct cvmx_usbcx_gnptxsts_s { + __BITFIELD_FIELD(u32 reserved_31_31 : 1, + __BITFIELD_FIELD(u32 nptxqtop : 7, + __BITFIELD_FIELD(u32 nptxqspcavail : 8, + __BITFIELD_FIELD(u32 nptxfspcavail : 16, + ;)))) + } s; +}; + +/** + * cvmx_usbc#_grstctl + * + * Core Reset Register (GRSTCTL) + * + * The application uses this register to reset various hardware features inside + * the core. + */ +union cvmx_usbcx_grstctl { + u32 u32; + /** + * struct cvmx_usbcx_grstctl_s + * @ahbidle: AHB Master Idle (AHBIdle) + * Indicates that the AHB Master State Machine is in the IDLE + * condition. + * @dmareq: DMA Request Signal (DMAReq) + * Indicates that the DMA request is in progress. Used for debug. + * @txfnum: TxFIFO Number (TxFNum) + * This is the FIFO number that must be flushed using the TxFIFO + * Flush bit. This field must not be changed until the core clears + * the TxFIFO Flush bit. + * * 5'h0: Non-Periodic TxFIFO flush + * * 5'h1: Periodic TxFIFO 1 flush in Device mode or Periodic + * TxFIFO flush in Host mode + * * 5'h2: Periodic TxFIFO 2 flush in Device mode + * - ... + * * 5'hF: Periodic TxFIFO 15 flush in Device mode + * * 5'h10: Flush all the Periodic and Non-Periodic TxFIFOs in the + * core + * @txfflsh: TxFIFO Flush (TxFFlsh) + * This bit selectively flushes a single or all transmit FIFOs, but + * cannot do so if the core is in the midst of a transaction. + * The application must only write this bit after checking that the + * core is neither writing to the TxFIFO nor reading from the + * TxFIFO. + * The application must wait until the core clears this bit before + * performing any operations. This bit takes 8 clocks (of phy_clk + * or hclk, whichever is slower) to clear. + * @rxfflsh: RxFIFO Flush (RxFFlsh) + * The application can flush the entire RxFIFO using this bit, but + * must first ensure that the core is not in the middle of a + * transaction. + * The application must only write to this bit after checking that + * the core is neither reading from the RxFIFO nor writing to the + * RxFIFO. + * The application must wait until the bit is cleared before + * performing any other operations. This bit will take 8 clocks + * (slowest of PHY or AHB clock) to clear. + * @intknqflsh: IN Token Sequence Learning Queue Flush (INTknQFlsh) + * The application writes this bit to flush the IN Token Sequence + * Learning Queue. + * @frmcntrrst: Host Frame Counter Reset (FrmCntrRst) + * The application writes this bit to reset the (micro)frame number + * counter inside the core. When the (micro)frame counter is reset, + * the subsequent SOF sent out by the core will have a + * (micro)frame number of 0. + * @hsftrst: HClk Soft Reset (HSftRst) + * The application uses this bit to flush the control logic in the + * AHB Clock domain. Only AHB Clock Domain pipelines are reset. + * * FIFOs are not flushed with this bit. + * * All state machines in the AHB clock domain are reset to the + * Idle state after terminating the transactions on the AHB, + * following the protocol. + * * CSR control bits used by the AHB clock domain state + * machines are cleared. + * * To clear this interrupt, status mask bits that control the + * interrupt status and are generated by the AHB clock domain + * state machine are cleared. + * * Because interrupt status bits are not cleared, the application + * can get the status of any core events that occurred after it set + * this bit. + * This is a self-clearing bit that the core clears after all + * necessary logic is reset in the core. This may take several + * clocks, depending on the core's current state. + * @csftrst: Core Soft Reset (CSftRst) + * Resets the hclk and phy_clock domains as follows: + * * Clears the interrupts and all the CSR registers except the + * following register bits: + * - PCGCCTL.RstPdwnModule + * - PCGCCTL.GateHclk + * - PCGCCTL.PwrClmp + * - PCGCCTL.StopPPhyLPwrClkSelclk + * - GUSBCFG.PhyLPwrClkSel + * - GUSBCFG.DDRSel + * - GUSBCFG.PHYSel + * - GUSBCFG.FSIntf + * - GUSBCFG.ULPI_UTMI_Sel + * - GUSBCFG.PHYIf + * - HCFG.FSLSPclkSel + * - DCFG.DevSpd + * * All module state machines (except the AHB Slave Unit) are + * reset to the IDLE state, and all the transmit FIFOs and the + * receive FIFO are flushed. + * * Any transactions on the AHB Master are terminated as soon + * as possible, after gracefully completing the last data phase of + * an AHB transfer. Any transactions on the USB are terminated + * immediately. + * The application can write to this bit any time it wants to reset + * the core. This is a self-clearing bit and the core clears this + * bit after all the necessary logic is reset in the core, which + * may take several clocks, depending on the current state of the + * core. Once this bit is cleared software should wait at least 3 + * PHY clocks before doing any access to the PHY domain + * (synchronization delay). Software should also should check that + * bit 31 of this register is 1 (AHB Master is IDLE) before + * starting any operation. + * Typically software reset is used during software development + * and also when you dynamically change the PHY selection bits + * in the USB configuration registers listed above. When you + * change the PHY, the corresponding clock for the PHY is + * selected and used in the PHY domain. Once a new clock is + * selected, the PHY domain has to be reset for proper operation. + */ + struct cvmx_usbcx_grstctl_s { + __BITFIELD_FIELD(u32 ahbidle : 1, + __BITFIELD_FIELD(u32 dmareq : 1, + __BITFIELD_FIELD(u32 reserved_11_29 : 19, + __BITFIELD_FIELD(u32 txfnum : 5, + __BITFIELD_FIELD(u32 txfflsh : 1, + __BITFIELD_FIELD(u32 rxfflsh : 1, + __BITFIELD_FIELD(u32 intknqflsh : 1, + __BITFIELD_FIELD(u32 frmcntrrst : 1, + __BITFIELD_FIELD(u32 hsftrst : 1, + __BITFIELD_FIELD(u32 csftrst : 1, + ;)))))))))) + } s; +}; + +/** + * cvmx_usbc#_grxfsiz + * + * Receive FIFO Size Register (GRXFSIZ) + * + * The application can program the RAM size that must be allocated to the + * RxFIFO. + */ +union cvmx_usbcx_grxfsiz { + u32 u32; + /** + * struct cvmx_usbcx_grxfsiz_s + * @rxfdep: RxFIFO Depth (RxFDep) + * This value is in terms of 32-bit words. + * * Minimum value is 16 + * * Maximum value is 32768 + */ + struct cvmx_usbcx_grxfsiz_s { + __BITFIELD_FIELD(u32 reserved_16_31 : 16, + __BITFIELD_FIELD(u32 rxfdep : 16, + ;)) + } s; +}; + +/** + * cvmx_usbc#_grxstsph + * + * Receive Status Read and Pop Register, Host Mode (GRXSTSPH) + * + * A read to the Receive Status Read and Pop register returns and additionally + * pops the top data entry out of the RxFIFO. + * This Description is only valid when the core is in Host Mode. For Device Mode + * use USBC_GRXSTSPD instead. + * NOTE: GRXSTSPH and GRXSTSPD are physically the same register and share the + * same offset in the O2P USB core. The offset difference shown in this + * document is for software clarity and is actually ignored by the + * hardware. + */ +union cvmx_usbcx_grxstsph { + u32 u32; + /** + * struct cvmx_usbcx_grxstsph_s + * @pktsts: Packet Status (PktSts) + * Indicates the status of the received packet + * * 4'b0010: IN data packet received + * * 4'b0011: IN transfer completed (triggers an interrupt) + * * 4'b0101: Data toggle error (triggers an interrupt) + * * 4'b0111: Channel halted (triggers an interrupt) + * * Others: Reserved + * @dpid: Data PID (DPID) + * * 2'b00: DATA0 + * * 2'b10: DATA1 + * * 2'b01: DATA2 + * * 2'b11: MDATA + * @bcnt: Byte Count (BCnt) + * Indicates the byte count of the received IN data packet + * @chnum: Channel Number (ChNum) + * Indicates the channel number to which the current received + * packet belongs. + */ + struct cvmx_usbcx_grxstsph_s { + __BITFIELD_FIELD(u32 reserved_21_31 : 11, + __BITFIELD_FIELD(u32 pktsts : 4, + __BITFIELD_FIELD(u32 dpid : 2, + __BITFIELD_FIELD(u32 bcnt : 11, + __BITFIELD_FIELD(u32 chnum : 4, + ;))))) + } s; +}; + +/** + * cvmx_usbc#_gusbcfg + * + * Core USB Configuration Register (GUSBCFG) + * + * This register can be used to configure the core after power-on or a changing + * to Host mode or Device mode. It contains USB and USB-PHY related + * configuration parameters. The application must program this register before + * starting any transactions on either the AHB or the USB. Do not make changes + * to this register after the initial programming. + */ +union cvmx_usbcx_gusbcfg { + u32 u32; + /** + * struct cvmx_usbcx_gusbcfg_s + * @otgi2csel: UTMIFS or I2C Interface Select (OtgI2CSel) + * This bit is always 0x0. + * @phylpwrclksel: PHY Low-Power Clock Select (PhyLPwrClkSel) + * Software should set this bit to 0x0. + * Selects either 480-MHz or 48-MHz (low-power) PHY mode. In + * FS and LS modes, the PHY can usually operate on a 48-MHz + * clock to save power. + * * 1'b0: 480-MHz Internal PLL clock + * * 1'b1: 48-MHz External Clock + * In 480 MHz mode, the UTMI interface operates at either 60 or + * 30-MHz, depending upon whether 8- or 16-bit data width is + * selected. In 48-MHz mode, the UTMI interface operates at 48 + * MHz in FS mode and at either 48 or 6 MHz in LS mode + * (depending on the PHY vendor). + * This bit drives the utmi_fsls_low_power core output signal, and + * is valid only for UTMI+ PHYs. + * @usbtrdtim: USB Turnaround Time (USBTrdTim) + * Sets the turnaround time in PHY clocks. + * Specifies the response time for a MAC request to the Packet + * FIFO Controller (PFC) to fetch data from the DFIFO (SPRAM). + * This must be programmed to 0x5. + * @hnpcap: HNP-Capable (HNPCap) + * This bit is always 0x0. + * @srpcap: SRP-Capable (SRPCap) + * This bit is always 0x0. + * @ddrsel: ULPI DDR Select (DDRSel) + * Software should set this bit to 0x0. + * @physel: USB 2.0 High-Speed PHY or USB 1.1 Full-Speed Serial + * Software should set this bit to 0x0. + * @fsintf: Full-Speed Serial Interface Select (FSIntf) + * Software should set this bit to 0x0. + * @ulpi_utmi_sel: ULPI or UTMI+ Select (ULPI_UTMI_Sel) + * This bit is always 0x0. + * @phyif: PHY Interface (PHYIf) + * This bit is always 0x1. + * @toutcal: HS/FS Timeout Calibration (TOutCal) + * The number of PHY clocks that the application programs in this + * field is added to the high-speed/full-speed interpacket timeout + * duration in the core to account for any additional delays + * introduced by the PHY. This may be required, since the delay + * introduced by the PHY in generating the linestate condition may + * vary from one PHY to another. + * The USB standard timeout value for high-speed operation is + * 736 to 816 (inclusive) bit times. The USB standard timeout + * value for full-speed operation is 16 to 18 (inclusive) bit + * times. The application must program this field based on the + * speed of enumeration. The number of bit times added per PHY + * clock are: + * High-speed operation: + * * One 30-MHz PHY clock = 16 bit times + * * One 60-MHz PHY clock = 8 bit times + * Full-speed operation: + * * One 30-MHz PHY clock = 0.4 bit times + * * One 60-MHz PHY clock = 0.2 bit times + * * One 48-MHz PHY clock = 0.25 bit times + */ + struct cvmx_usbcx_gusbcfg_s { + __BITFIELD_FIELD(u32 reserved_17_31 : 15, + __BITFIELD_FIELD(u32 otgi2csel : 1, + __BITFIELD_FIELD(u32 phylpwrclksel : 1, + __BITFIELD_FIELD(u32 reserved_14_14 : 1, + __BITFIELD_FIELD(u32 usbtrdtim : 4, + __BITFIELD_FIELD(u32 hnpcap : 1, + __BITFIELD_FIELD(u32 srpcap : 1, + __BITFIELD_FIELD(u32 ddrsel : 1, + __BITFIELD_FIELD(u32 physel : 1, + __BITFIELD_FIELD(u32 fsintf : 1, + __BITFIELD_FIELD(u32 ulpi_utmi_sel : 1, + __BITFIELD_FIELD(u32 phyif : 1, + __BITFIELD_FIELD(u32 toutcal : 3, + ;))))))))))))) + } s; +}; + +/** + * cvmx_usbc#_haint + * + * Host All Channels Interrupt Register (HAINT) + * + * When a significant event occurs on a channel, the Host All Channels Interrupt + * register interrupts the application using the Host Channels Interrupt bit of + * the Core Interrupt register (GINTSTS.HChInt). This is shown in Interrupt. + * There is one interrupt bit per channel, up to a maximum of 16 bits. Bits in + * this register are set and cleared when the application sets and clears bits + * in the corresponding Host Channel-n Interrupt register. + */ +union cvmx_usbcx_haint { + u32 u32; + /** + * struct cvmx_usbcx_haint_s + * @haint: Channel Interrupts (HAINT) + * One bit per channel: Bit 0 for Channel 0, bit 15 for Channel 15 + */ + struct cvmx_usbcx_haint_s { + __BITFIELD_FIELD(u32 reserved_16_31 : 16, + __BITFIELD_FIELD(u32 haint : 16, + ;)) + } s; +}; + +/** + * cvmx_usbc#_haintmsk + * + * Host All Channels Interrupt Mask Register (HAINTMSK) + * + * The Host All Channel Interrupt Mask register works with the Host All Channel + * Interrupt register to interrupt the application when an event occurs on a + * channel. There is one interrupt mask bit per channel, up to a maximum of 16 + * bits. + * Mask interrupt: 1'b0 Unmask interrupt: 1'b1 + */ +union cvmx_usbcx_haintmsk { + u32 u32; + /** + * struct cvmx_usbcx_haintmsk_s + * @haintmsk: Channel Interrupt Mask (HAINTMsk) + * One bit per channel: Bit 0 for channel 0, bit 15 for channel 15 + */ + struct cvmx_usbcx_haintmsk_s { + __BITFIELD_FIELD(u32 reserved_16_31 : 16, + __BITFIELD_FIELD(u32 haintmsk : 16, + ;)) + } s; +}; + +/** + * cvmx_usbc#_hcchar# + * + * Host Channel-n Characteristics Register (HCCHAR) + * + */ +union cvmx_usbcx_hccharx { + u32 u32; + /** + * struct cvmx_usbcx_hccharx_s + * @chena: Channel Enable (ChEna) + * This field is set by the application and cleared by the OTG + * host. + * * 1'b0: Channel disabled + * * 1'b1: Channel enabled + * @chdis: Channel Disable (ChDis) + * The application sets this bit to stop transmitting/receiving + * data on a channel, even before the transfer for that channel is + * complete. The application must wait for the Channel Disabled + * interrupt before treating the channel as disabled. + * @oddfrm: Odd Frame (OddFrm) + * This field is set (reset) by the application to indicate that + * the OTG host must perform a transfer in an odd (micro)frame. + * This field is applicable for only periodic (isochronous and + * interrupt) transactions. + * * 1'b0: Even (micro)frame + * * 1'b1: Odd (micro)frame + * @devaddr: Device Address (DevAddr) + * This field selects the specific device serving as the data + * source or sink. + * @ec: Multi Count (MC) / Error Count (EC) + * When the Split Enable bit of the Host Channel-n Split Control + * register (HCSPLTn.SpltEna) is reset (1'b0), this field indicates + * to the host the number of transactions that should be executed + * per microframe for this endpoint. + * * 2'b00: Reserved. This field yields undefined results. + * * 2'b01: 1 transaction + * * 2'b10: 2 transactions to be issued for this endpoint per + * microframe + * * 2'b11: 3 transactions to be issued for this endpoint per + * microframe + * When HCSPLTn.SpltEna is set (1'b1), this field indicates the + * number of immediate retries to be performed for a periodic split + * transactions on transaction errors. This field must be set to at + * least 2'b01. + * @eptype: Endpoint Type (EPType) + * Indicates the transfer type selected. + * * 2'b00: Control + * * 2'b01: Isochronous + * * 2'b10: Bulk + * * 2'b11: Interrupt + * @lspddev: Low-Speed Device (LSpdDev) + * This field is set by the application to indicate that this + * channel is communicating to a low-speed device. + * @epdir: Endpoint Direction (EPDir) + * Indicates whether the transaction is IN or OUT. + * * 1'b0: OUT + * * 1'b1: IN + * @epnum: Endpoint Number (EPNum) + * Indicates the endpoint number on the device serving as the + * data source or sink. + * @mps: Maximum Packet Size (MPS) + * Indicates the maximum packet size of the associated endpoint. + */ + struct cvmx_usbcx_hccharx_s { + __BITFIELD_FIELD(u32 chena : 1, + __BITFIELD_FIELD(u32 chdis : 1, + __BITFIELD_FIELD(u32 oddfrm : 1, + __BITFIELD_FIELD(u32 devaddr : 7, + __BITFIELD_FIELD(u32 ec : 2, + __BITFIELD_FIELD(u32 eptype : 2, + __BITFIELD_FIELD(u32 lspddev : 1, + __BITFIELD_FIELD(u32 reserved_16_16 : 1, + __BITFIELD_FIELD(u32 epdir : 1, + __BITFIELD_FIELD(u32 epnum : 4, + __BITFIELD_FIELD(u32 mps : 11, + ;))))))))))) + } s; +}; + +/** + * cvmx_usbc#_hcfg + * + * Host Configuration Register (HCFG) + * + * This register configures the core after power-on. Do not make changes to this + * register after initializing the host. + */ +union cvmx_usbcx_hcfg { + u32 u32; + /** + * struct cvmx_usbcx_hcfg_s + * @fslssupp: FS- and LS-Only Support (FSLSSupp) + * The application uses this bit to control the core's enumeration + * speed. Using this bit, the application can make the core + * enumerate as a FS host, even if the connected device supports + * HS traffic. Do not make changes to this field after initial + * programming. + * * 1'b0: HS/FS/LS, based on the maximum speed supported by + * the connected device + * * 1'b1: FS/LS-only, even if the connected device can support HS + * @fslspclksel: FS/LS PHY Clock Select (FSLSPclkSel) + * When the core is in FS Host mode + * * 2'b00: PHY clock is running at 30/60 MHz + * * 2'b01: PHY clock is running at 48 MHz + * * Others: Reserved + * When the core is in LS Host mode + * * 2'b00: PHY clock is running at 30/60 MHz. When the + * UTMI+/ULPI PHY Low Power mode is not selected, use + * 30/60 MHz. + * * 2'b01: PHY clock is running at 48 MHz. When the UTMI+ + * PHY Low Power mode is selected, use 48MHz if the PHY + * supplies a 48 MHz clock during LS mode. + * * 2'b10: PHY clock is running at 6 MHz. In USB 1.1 FS mode, + * use 6 MHz when the UTMI+ PHY Low Power mode is + * selected and the PHY supplies a 6 MHz clock during LS + * mode. If you select a 6 MHz clock during LS mode, you must + * do a soft reset. + * * 2'b11: Reserved + */ + struct cvmx_usbcx_hcfg_s { + __BITFIELD_FIELD(u32 reserved_3_31 : 29, + __BITFIELD_FIELD(u32 fslssupp : 1, + __BITFIELD_FIELD(u32 fslspclksel : 2, + ;))) + } s; +}; + +/** + * cvmx_usbc#_hcint# + * + * Host Channel-n Interrupt Register (HCINT) + * + * This register indicates the status of a channel with respect to USB- and + * AHB-related events. The application must read this register when the Host + * Channels Interrupt bit of the Core Interrupt register (GINTSTS.HChInt) is + * set. Before the application can read this register, it must first read + * the Host All Channels Interrupt (HAINT) register to get the exact channel + * number for the Host Channel-n Interrupt register. The application must clear + * the appropriate bit in this register to clear the corresponding bits in the + * HAINT and GINTSTS registers. + */ +union cvmx_usbcx_hcintx { + u32 u32; + /** + * struct cvmx_usbcx_hcintx_s + * @datatglerr: Data Toggle Error (DataTglErr) + * @frmovrun: Frame Overrun (FrmOvrun) + * @bblerr: Babble Error (BblErr) + * @xacterr: Transaction Error (XactErr) + * @nyet: NYET Response Received Interrupt (NYET) + * @ack: ACK Response Received Interrupt (ACK) + * @nak: NAK Response Received Interrupt (NAK) + * @stall: STALL Response Received Interrupt (STALL) + * @ahberr: This bit is always 0x0. + * @chhltd: Channel Halted (ChHltd) + * Indicates the transfer completed abnormally either because of + * any USB transaction error or in response to disable request by + * the application. + * @xfercompl: Transfer Completed (XferCompl) + * Transfer completed normally without any errors. + */ + struct cvmx_usbcx_hcintx_s { + __BITFIELD_FIELD(u32 reserved_11_31 : 21, + __BITFIELD_FIELD(u32 datatglerr : 1, + __BITFIELD_FIELD(u32 frmovrun : 1, + __BITFIELD_FIELD(u32 bblerr : 1, + __BITFIELD_FIELD(u32 xacterr : 1, + __BITFIELD_FIELD(u32 nyet : 1, + __BITFIELD_FIELD(u32 ack : 1, + __BITFIELD_FIELD(u32 nak : 1, + __BITFIELD_FIELD(u32 stall : 1, + __BITFIELD_FIELD(u32 ahberr : 1, + __BITFIELD_FIELD(u32 chhltd : 1, + __BITFIELD_FIELD(u32 xfercompl : 1, + ;)))))))))))) + } s; +}; + +/** + * cvmx_usbc#_hcintmsk# + * + * Host Channel-n Interrupt Mask Register (HCINTMSKn) + * + * This register reflects the mask for each channel status described in the + * previous section. + * Mask interrupt: 1'b0 Unmask interrupt: 1'b1 + */ +union cvmx_usbcx_hcintmskx { + u32 u32; + /** + * struct cvmx_usbcx_hcintmskx_s + * @datatglerrmsk: Data Toggle Error Mask (DataTglErrMsk) + * @frmovrunmsk: Frame Overrun Mask (FrmOvrunMsk) + * @bblerrmsk: Babble Error Mask (BblErrMsk) + * @xacterrmsk: Transaction Error Mask (XactErrMsk) + * @nyetmsk: NYET Response Received Interrupt Mask (NyetMsk) + * @ackmsk: ACK Response Received Interrupt Mask (AckMsk) + * @nakmsk: NAK Response Received Interrupt Mask (NakMsk) + * @stallmsk: STALL Response Received Interrupt Mask (StallMsk) + * @ahberrmsk: AHB Error Mask (AHBErrMsk) + * @chhltdmsk: Channel Halted Mask (ChHltdMsk) + * @xfercomplmsk: Transfer Completed Mask (XferComplMsk) + */ + struct cvmx_usbcx_hcintmskx_s { + __BITFIELD_FIELD(u32 reserved_11_31 : 21, + __BITFIELD_FIELD(u32 datatglerrmsk : 1, + __BITFIELD_FIELD(u32 frmovrunmsk : 1, + __BITFIELD_FIELD(u32 bblerrmsk : 1, + __BITFIELD_FIELD(u32 xacterrmsk : 1, + __BITFIELD_FIELD(u32 nyetmsk : 1, + __BITFIELD_FIELD(u32 ackmsk : 1, + __BITFIELD_FIELD(u32 nakmsk : 1, + __BITFIELD_FIELD(u32 stallmsk : 1, + __BITFIELD_FIELD(u32 ahberrmsk : 1, + __BITFIELD_FIELD(u32 chhltdmsk : 1, + __BITFIELD_FIELD(u32 xfercomplmsk : 1, + ;)))))))))))) + } s; +}; + +/** + * cvmx_usbc#_hcsplt# + * + * Host Channel-n Split Control Register (HCSPLT) + * + */ +union cvmx_usbcx_hcspltx { + u32 u32; + /** + * struct cvmx_usbcx_hcspltx_s + * @spltena: Split Enable (SpltEna) + * The application sets this field to indicate that this channel is + * enabled to perform split transactions. + * @compsplt: Do Complete Split (CompSplt) + * The application sets this field to request the OTG host to + * perform a complete split transaction. + * @xactpos: Transaction Position (XactPos) + * This field is used to determine whether to send all, first, + * middle, or last payloads with each OUT transaction. + * * 2'b11: All. This is the entire data payload is of this + * transaction (which is less than or equal to 188 bytes). + * * 2'b10: Begin. This is the first data payload of this + * transaction (which is larger than 188 bytes). + * * 2'b00: Mid. This is the middle payload of this transaction + * (which is larger than 188 bytes). + * * 2'b01: End. This is the last payload of this transaction + * (which is larger than 188 bytes). + * @hubaddr: Hub Address (HubAddr) + * This field holds the device address of the transaction + * translator's hub. + * @prtaddr: Port Address (PrtAddr) + * This field is the port number of the recipient transaction + * translator. + */ + struct cvmx_usbcx_hcspltx_s { + __BITFIELD_FIELD(u32 spltena : 1, + __BITFIELD_FIELD(u32 reserved_17_30 : 14, + __BITFIELD_FIELD(u32 compsplt : 1, + __BITFIELD_FIELD(u32 xactpos : 2, + __BITFIELD_FIELD(u32 hubaddr : 7, + __BITFIELD_FIELD(u32 prtaddr : 7, + ;)))))) + } s; +}; + +/** + * cvmx_usbc#_hctsiz# + * + * Host Channel-n Transfer Size Register (HCTSIZ) + * + */ +union cvmx_usbcx_hctsizx { + u32 u32; + /** + * struct cvmx_usbcx_hctsizx_s + * @dopng: Do Ping (DoPng) + * Setting this field to 1 directs the host to do PING protocol. + * @pid: PID (Pid) + * The application programs this field with the type of PID to use + * for the initial transaction. The host will maintain this field + * for the rest of the transfer. + * * 2'b00: DATA0 + * * 2'b01: DATA2 + * * 2'b10: DATA1 + * * 2'b11: MDATA (non-control)/SETUP (control) + * @pktcnt: Packet Count (PktCnt) + * This field is programmed by the application with the expected + * number of packets to be transmitted (OUT) or received (IN). + * The host decrements this count on every successful + * transmission or reception of an OUT/IN packet. Once this count + * reaches zero, the application is interrupted to indicate normal + * completion. + * @xfersize: Transfer Size (XferSize) + * For an OUT, this field is the number of data bytes the host will + * send during the transfer. + * For an IN, this field is the buffer size that the application + * has reserved for the transfer. The application is expected to + * program this field as an integer multiple of the maximum packet + * size for IN transactions (periodic and non-periodic). + */ + struct cvmx_usbcx_hctsizx_s { + __BITFIELD_FIELD(u32 dopng : 1, + __BITFIELD_FIELD(u32 pid : 2, + __BITFIELD_FIELD(u32 pktcnt : 10, + __BITFIELD_FIELD(u32 xfersize : 19, + ;)))) + } s; +}; + +/** + * cvmx_usbc#_hfir + * + * Host Frame Interval Register (HFIR) + * + * This register stores the frame interval information for the current speed to + * which the O2P USB core has enumerated. + */ +union cvmx_usbcx_hfir { + u32 u32; + /** + * struct cvmx_usbcx_hfir_s + * @frint: Frame Interval (FrInt) + * The value that the application programs to this field specifies + * the interval between two consecutive SOFs (FS) or micro- + * SOFs (HS) or Keep-Alive tokens (HS). This field contains the + * number of PHY clocks that constitute the required frame + * interval. The default value set in this field for a FS operation + * when the PHY clock frequency is 60 MHz. The application can + * write a value to this register only after the Port Enable bit of + * the Host Port Control and Status register (HPRT.PrtEnaPort) + * has been set. If no value is programmed, the core calculates + * the value based on the PHY clock specified in the FS/LS PHY + * Clock Select field of the Host Configuration register + * (HCFG.FSLSPclkSel). Do not change the value of this field + * after the initial configuration. + * * 125 us (PHY clock frequency for HS) + * * 1 ms (PHY clock frequency for FS/LS) + */ + struct cvmx_usbcx_hfir_s { + __BITFIELD_FIELD(u32 reserved_16_31 : 16, + __BITFIELD_FIELD(u32 frint : 16, + ;)) + } s; +}; + +/** + * cvmx_usbc#_hfnum + * + * Host Frame Number/Frame Time Remaining Register (HFNUM) + * + * This register indicates the current frame number. + * It also indicates the time remaining (in terms of the number of PHY clocks) + * in the current (micro)frame. + */ +union cvmx_usbcx_hfnum { + u32 u32; + /** + * struct cvmx_usbcx_hfnum_s + * @frrem: Frame Time Remaining (FrRem) + * Indicates the amount of time remaining in the current + * microframe (HS) or frame (FS/LS), in terms of PHY clocks. + * This field decrements on each PHY clock. When it reaches + * zero, this field is reloaded with the value in the Frame + * Interval register and a new SOF is transmitted on the USB. + * @frnum: Frame Number (FrNum) + * This field increments when a new SOF is transmitted on the + * USB, and is reset to 0 when it reaches 16'h3FFF. + */ + struct cvmx_usbcx_hfnum_s { + __BITFIELD_FIELD(u32 frrem : 16, + __BITFIELD_FIELD(u32 frnum : 16, + ;)) + } s; +}; + +/** + * cvmx_usbc#_hprt + * + * Host Port Control and Status Register (HPRT) + * + * This register is available in both Host and Device modes. + * Currently, the OTG Host supports only one port. + * A single register holds USB port-related information such as USB reset, + * enable, suspend, resume, connect status, and test mode for each port. The + * R_SS_WC bits in this register can trigger an interrupt to the application + * through the Host Port Interrupt bit of the Core Interrupt register + * (GINTSTS.PrtInt). On a Port Interrupt, the application must read this + * register and clear the bit that caused the interrupt. For the R_SS_WC bits, + * the application must write a 1 to the bit to clear the interrupt. + */ +union cvmx_usbcx_hprt { + u32 u32; + /** + * struct cvmx_usbcx_hprt_s + * @prtspd: Port Speed (PrtSpd) + * Indicates the speed of the device attached to this port. + * * 2'b00: High speed + * * 2'b01: Full speed + * * 2'b10: Low speed + * * 2'b11: Reserved + * @prttstctl: Port Test Control (PrtTstCtl) + * The application writes a nonzero value to this field to put + * the port into a Test mode, and the corresponding pattern is + * signaled on the port. + * * 4'b0000: Test mode disabled + * * 4'b0001: Test_J mode + * * 4'b0010: Test_K mode + * * 4'b0011: Test_SE0_NAK mode + * * 4'b0100: Test_Packet mode + * * 4'b0101: Test_Force_Enable + * * Others: Reserved + * PrtSpd must be zero (i.e. the interface must be in high-speed + * mode) to use the PrtTstCtl test modes. + * @prtpwr: Port Power (PrtPwr) + * The application uses this field to control power to this port, + * and the core clears this bit on an overcurrent condition. + * * 1'b0: Power off + * * 1'b1: Power on + * @prtlnsts: Port Line Status (PrtLnSts) + * Indicates the current logic level USB data lines + * * Bit [10]: Logic level of D- + * * Bit [11]: Logic level of D+ + * @prtrst: Port Reset (PrtRst) + * When the application sets this bit, a reset sequence is + * started on this port. The application must time the reset + * period and clear this bit after the reset sequence is + * complete. + * * 1'b0: Port not in reset + * * 1'b1: Port in reset + * The application must leave this bit set for at least a + * minimum duration mentioned below to start a reset on the + * port. The application can leave it set for another 10 ms in + * addition to the required minimum duration, before clearing + * the bit, even though there is no maximum limit set by the + * USB standard. + * * High speed: 50 ms + * * Full speed/Low speed: 10 ms + * @prtsusp: Port Suspend (PrtSusp) + * The application sets this bit to put this port in Suspend + * mode. The core only stops sending SOFs when this is set. + * To stop the PHY clock, the application must set the Port + * Clock Stop bit, which will assert the suspend input pin of + * the PHY. + * The read value of this bit reflects the current suspend + * status of the port. This bit is cleared by the core after a + * remote wakeup signal is detected or the application sets + * the Port Reset bit or Port Resume bit in this register or the + * Resume/Remote Wakeup Detected Interrupt bit or + * Disconnect Detected Interrupt bit in the Core Interrupt + * register (GINTSTS.WkUpInt or GINTSTS.DisconnInt, + * respectively). + * * 1'b0: Port not in Suspend mode + * * 1'b1: Port in Suspend mode + * @prtres: Port Resume (PrtRes) + * The application sets this bit to drive resume signaling on + * the port. The core continues to drive the resume signal + * until the application clears this bit. + * If the core detects a USB remote wakeup sequence, as + * indicated by the Port Resume/Remote Wakeup Detected + * Interrupt bit of the Core Interrupt register + * (GINTSTS.WkUpInt), the core starts driving resume + * signaling without application intervention and clears this bit + * when it detects a disconnect condition. The read value of + * this bit indicates whether the core is currently driving + * resume signaling. + * * 1'b0: No resume driven + * * 1'b1: Resume driven + * @prtovrcurrchng: Port Overcurrent Change (PrtOvrCurrChng) + * The core sets this bit when the status of the Port + * Overcurrent Active bit (bit 4) in this register changes. + * @prtovrcurract: Port Overcurrent Active (PrtOvrCurrAct) + * Indicates the overcurrent condition of the port. + * * 1'b0: No overcurrent condition + * * 1'b1: Overcurrent condition + * @prtenchng: Port Enable/Disable Change (PrtEnChng) + * The core sets this bit when the status of the Port Enable bit + * [2] of this register changes. + * @prtena: Port Enable (PrtEna) + * A port is enabled only by the core after a reset sequence, + * and is disabled by an overcurrent condition, a disconnect + * condition, or by the application clearing this bit. The + * application cannot set this bit by a register write. It can only + * clear it to disable the port. This bit does not trigger any + * interrupt to the application. + * * 1'b0: Port disabled + * * 1'b1: Port enabled + * @prtconndet: Port Connect Detected (PrtConnDet) + * The core sets this bit when a device connection is detected + * to trigger an interrupt to the application using the Host Port + * Interrupt bit of the Core Interrupt register (GINTSTS.PrtInt). + * The application must write a 1 to this bit to clear the + * interrupt. + * @prtconnsts: Port Connect Status (PrtConnSts) + * * 0: No device is attached to the port. + * * 1: A device is attached to the port. + */ + struct cvmx_usbcx_hprt_s { + __BITFIELD_FIELD(u32 reserved_19_31 : 13, + __BITFIELD_FIELD(u32 prtspd : 2, + __BITFIELD_FIELD(u32 prttstctl : 4, + __BITFIELD_FIELD(u32 prtpwr : 1, + __BITFIELD_FIELD(u32 prtlnsts : 2, + __BITFIELD_FIELD(u32 reserved_9_9 : 1, + __BITFIELD_FIELD(u32 prtrst : 1, + __BITFIELD_FIELD(u32 prtsusp : 1, + __BITFIELD_FIELD(u32 prtres : 1, + __BITFIELD_FIELD(u32 prtovrcurrchng : 1, + __BITFIELD_FIELD(u32 prtovrcurract : 1, + __BITFIELD_FIELD(u32 prtenchng : 1, + __BITFIELD_FIELD(u32 prtena : 1, + __BITFIELD_FIELD(u32 prtconndet : 1, + __BITFIELD_FIELD(u32 prtconnsts : 1, + ;))))))))))))))) + } s; +}; + +/** + * cvmx_usbc#_hptxfsiz + * + * Host Periodic Transmit FIFO Size Register (HPTXFSIZ) + * + * This register holds the size and the memory start address of the Periodic + * TxFIFO, as shown in Figures 310 and 311. + */ +union cvmx_usbcx_hptxfsiz { + u32 u32; + /** + * struct cvmx_usbcx_hptxfsiz_s + * @ptxfsize: Host Periodic TxFIFO Depth (PTxFSize) + * This value is in terms of 32-bit words. + * * Minimum value is 16 + * * Maximum value is 32768 + * @ptxfstaddr: Host Periodic TxFIFO Start Address (PTxFStAddr) + */ + struct cvmx_usbcx_hptxfsiz_s { + __BITFIELD_FIELD(u32 ptxfsize : 16, + __BITFIELD_FIELD(u32 ptxfstaddr : 16, + ;)) + } s; +}; + +/** + * cvmx_usbc#_hptxsts + * + * Host Periodic Transmit FIFO/Queue Status Register (HPTXSTS) + * + * This read-only register contains the free space information for the Periodic + * TxFIFO and the Periodic Transmit Request Queue + */ +union cvmx_usbcx_hptxsts { + u32 u32; + /** + * struct cvmx_usbcx_hptxsts_s + * @ptxqtop: Top of the Periodic Transmit Request Queue (PTxQTop) + * This indicates the entry in the Periodic Tx Request Queue that + * is currently being processes by the MAC. + * This register is used for debugging. + * * Bit [31]: Odd/Even (micro)frame + * - 1'b0: send in even (micro)frame + * - 1'b1: send in odd (micro)frame + * * Bits [30:27]: Channel/endpoint number + * * Bits [26:25]: Type + * - 2'b00: IN/OUT + * - 2'b01: Zero-length packet + * - 2'b10: CSPLIT + * - 2'b11: Disable channel command + * * Bit [24]: Terminate (last entry for the selected + * channel/endpoint) + * @ptxqspcavail: Periodic Transmit Request Queue Space Available + * (PTxQSpcAvail) + * Indicates the number of free locations available to be written + * in the Periodic Transmit Request Queue. This queue holds both + * IN and OUT requests. + * * 8'h0: Periodic Transmit Request Queue is full + * * 8'h1: 1 location available + * * 8'h2: 2 locations available + * * n: n locations available (0..8) + * * Others: Reserved + * @ptxfspcavail: Periodic Transmit Data FIFO Space Available + * (PTxFSpcAvail) + * Indicates the number of free locations available to be written + * to in the Periodic TxFIFO. + * Values are in terms of 32-bit words + * * 16'h0: Periodic TxFIFO is full + * * 16'h1: 1 word available + * * 16'h2: 2 words available + * * 16'hn: n words available (where 0..32768) + * * 16'h8000: 32768 words available + * * Others: Reserved + */ + struct cvmx_usbcx_hptxsts_s { + __BITFIELD_FIELD(u32 ptxqtop : 8, + __BITFIELD_FIELD(u32 ptxqspcavail : 8, + __BITFIELD_FIELD(u32 ptxfspcavail : 16, + ;))) + } s; +}; + +/** + * cvmx_usbn#_clk_ctl + * + * USBN_CLK_CTL = USBN's Clock Control + * + * This register is used to control the frequency of the hclk and the + * hreset and phy_rst signals. + */ +union cvmx_usbnx_clk_ctl { + u64 u64; + /** + * struct cvmx_usbnx_clk_ctl_s + * @divide2: The 'hclk' used by the USB subsystem is derived + * from the eclk. + * Also see the field DIVIDE. DIVIDE2<1> must currently + * be zero because it is not implemented, so the maximum + * ratio of eclk/hclk is currently 16. + * The actual divide number for hclk is: + * (DIVIDE2 + 1) * (DIVIDE + 1) + * @hclk_rst: When this field is '0' the HCLK-DIVIDER used to + * generate the hclk in the USB Subsystem is held + * in reset. This bit must be set to '0' before + * changing the value os DIVIDE in this register. + * The reset to the HCLK_DIVIDERis also asserted + * when core reset is asserted. + * @p_x_on: Force USB-PHY on during suspend. + * '1' USB-PHY XO block is powered-down during + * suspend. + * '0' USB-PHY XO block is powered-up during + * suspend. + * The value of this field must be set while POR is + * active. + * @p_rtype: PHY reference clock type + * On CN50XX/CN52XX/CN56XX the values are: + * '0' The USB-PHY uses a 12MHz crystal as a clock source + * at the USB_XO and USB_XI pins. + * '1' Reserved. + * '2' The USB_PHY uses 12/24/48MHz 2.5V board clock at the + * USB_XO pin. USB_XI should be tied to ground in this + * case. + * '3' Reserved. + * On CN3xxx bits 14 and 15 are p_xenbn and p_rclk and values are: + * '0' Reserved. + * '1' Reserved. + * '2' The PHY PLL uses the XO block output as a reference. + * The XO block uses an external clock supplied on the + * XO pin. USB_XI should be tied to ground for this + * usage. + * '3' The XO block uses the clock from a crystal. + * @p_com_on: '0' Force USB-PHY XO Bias, Bandgap and PLL to + * remain powered in Suspend Mode. + * '1' The USB-PHY XO Bias, Bandgap and PLL are + * powered down in suspend mode. + * The value of this field must be set while POR is + * active. + * @p_c_sel: Phy clock speed select. + * Selects the reference clock / crystal frequency. + * '11': Reserved + * '10': 48 MHz (reserved when a crystal is used) + * '01': 24 MHz (reserved when a crystal is used) + * '00': 12 MHz + * The value of this field must be set while POR is + * active. + * NOTE: if a crystal is used as a reference clock, + * this field must be set to 12 MHz. + * @cdiv_byp: Used to enable the bypass input to the USB_CLK_DIV. + * @sd_mode: Scaledown mode for the USBC. Control timing events + * in the USBC, for normal operation this must be '0'. + * @s_bist: Starts bist on the hclk memories, during the '0' + * to '1' transition. + * @por: Power On Reset for the PHY. + * Resets all the PHYS registers and state machines. + * @enable: When '1' allows the generation of the hclk. When + * '0' the hclk will not be generated. SEE DIVIDE + * field of this register. + * @prst: When this field is '0' the reset associated with + * the phy_clk functionality in the USB Subsystem is + * help in reset. This bit should not be set to '1' + * until the time it takes 6 clocks (hclk or phy_clk, + * whichever is slower) has passed. Under normal + * operation once this bit is set to '1' it should not + * be set to '0'. + * @hrst: When this field is '0' the reset associated with + * the hclk functioanlity in the USB Subsystem is + * held in reset.This bit should not be set to '1' + * until 12ms after phy_clk is stable. Under normal + * operation, once this bit is set to '1' it should + * not be set to '0'. + * @divide: The frequency of 'hclk' used by the USB subsystem + * is the eclk frequency divided by the value of + * (DIVIDE2 + 1) * (DIVIDE + 1), also see the field + * DIVIDE2 of this register. + * The hclk frequency should be less than 125Mhz. + * After writing a value to this field the SW should + * read the field for the value written. + * The ENABLE field of this register should not be set + * until AFTER this field is set and then read. + */ + struct cvmx_usbnx_clk_ctl_s { + __BITFIELD_FIELD(u64 reserved_20_63 : 44, + __BITFIELD_FIELD(u64 divide2 : 2, + __BITFIELD_FIELD(u64 hclk_rst : 1, + __BITFIELD_FIELD(u64 p_x_on : 1, + __BITFIELD_FIELD(u64 p_rtype : 2, + __BITFIELD_FIELD(u64 p_com_on : 1, + __BITFIELD_FIELD(u64 p_c_sel : 2, + __BITFIELD_FIELD(u64 cdiv_byp : 1, + __BITFIELD_FIELD(u64 sd_mode : 2, + __BITFIELD_FIELD(u64 s_bist : 1, + __BITFIELD_FIELD(u64 por : 1, + __BITFIELD_FIELD(u64 enable : 1, + __BITFIELD_FIELD(u64 prst : 1, + __BITFIELD_FIELD(u64 hrst : 1, + __BITFIELD_FIELD(u64 divide : 3, + ;))))))))))))))) + } s; +}; + +/** + * cvmx_usbn#_usbp_ctl_status + * + * USBN_USBP_CTL_STATUS = USBP Control And Status Register + * + * Contains general control and status information for the USBN block. + */ +union cvmx_usbnx_usbp_ctl_status { + u64 u64; + /** + * struct cvmx_usbnx_usbp_ctl_status_s + * @txrisetune: HS Transmitter Rise/Fall Time Adjustment + * @txvreftune: HS DC Voltage Level Adjustment + * @txfslstune: FS/LS Source Impedance Adjustment + * @txhsxvtune: Transmitter High-Speed Crossover Adjustment + * @sqrxtune: Squelch Threshold Adjustment + * @compdistune: Disconnect Threshold Adjustment + * @otgtune: VBUS Valid Threshold Adjustment + * @otgdisable: OTG Block Disable + * @portreset: Per_Port Reset + * @drvvbus: Drive VBUS + * @lsbist: Low-Speed BIST Enable. + * @fsbist: Full-Speed BIST Enable. + * @hsbist: High-Speed BIST Enable. + * @bist_done: PHY Bist Done. + * Asserted at the end of the PHY BIST sequence. + * @bist_err: PHY Bist Error. + * Indicates an internal error was detected during + * the BIST sequence. + * @tdata_out: PHY Test Data Out. + * Presents either internally generated signals or + * test register contents, based upon the value of + * test_data_out_sel. + * @siddq: Drives the USBP (USB-PHY) SIDDQ input. + * Normally should be set to zero. + * When customers have no intent to use USB PHY + * interface, they should: + * - still provide 3.3V to USB_VDD33, and + * - tie USB_REXT to 3.3V supply, and + * - set USBN*_USBP_CTL_STATUS[SIDDQ]=1 + * @txpreemphasistune: HS Transmitter Pre-Emphasis Enable + * @dma_bmode: When set to 1 the L2C DMA address will be updated + * with byte-counts between packets. When set to 0 + * the L2C DMA address is incremented to the next + * 4-byte aligned address after adding byte-count. + * @usbc_end: Bigendian input to the USB Core. This should be + * set to '0' for operation. + * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP. + * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP. + * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY. + * This signal enables the pull-down resistance on + * the D+ line. '1' pull down-resistance is connected + * to D+/ '0' pull down resistance is not connected + * to D+. When an A/B device is acting as a host + * (downstream-facing port), dp_pulldown and + * dm_pulldown are enabled. This must not toggle + * during normal operation. + * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY. + * This signal enables the pull-down resistance on + * the D- line. '1' pull down-resistance is connected + * to D-. '0' pull down resistance is not connected + * to D-. When an A/B device is acting as a host + * (downstream-facing port), dp_pulldown and + * dm_pulldown are enabled. This must not toggle + * during normal operation. + * @hst_mode: When '0' the USB is acting as HOST, when '1' + * USB is acting as device. This field needs to be + * set while the USB is in reset. + * @tuning: Transmitter Tuning for High-Speed Operation. + * Tunes the current supply and rise/fall output + * times for high-speed operation. + * [20:19] == 11: Current supply increased + * approximately 9% + * [20:19] == 10: Current supply increased + * approximately 4.5% + * [20:19] == 01: Design default. + * [20:19] == 00: Current supply decreased + * approximately 4.5% + * [22:21] == 11: Rise and fall times are increased. + * [22:21] == 10: Design default. + * [22:21] == 01: Rise and fall times are decreased. + * [22:21] == 00: Rise and fall times are decreased + * further as compared to the 01 setting. + * @tx_bs_enh: Transmit Bit Stuffing on [15:8]. + * Enables or disables bit stuffing on data[15:8] + * when bit-stuffing is enabled. + * @tx_bs_en: Transmit Bit Stuffing on [7:0]. + * Enables or disables bit stuffing on data[7:0] + * when bit-stuffing is enabled. + * @loop_enb: PHY Loopback Test Enable. + * '1': During data transmission the receive is + * enabled. + * '0': During data transmission the receive is + * disabled. + * Must be '0' for normal operation. + * @vtest_enb: Analog Test Pin Enable. + * '1' The PHY's analog_test pin is enabled for the + * input and output of applicable analog test signals. + * '0' THe analog_test pin is disabled. + * @bist_enb: Built-In Self Test Enable. + * Used to activate BIST in the PHY. + * @tdata_sel: Test Data Out Select. + * '1' test_data_out[3:0] (PHY) register contents + * are output. '0' internally generated signals are + * output. + * @taddr_in: Mode Address for Test Interface. + * Specifies the register address for writing to or + * reading from the PHY test interface register. + * @tdata_in: Internal Testing Register Input Data and Select + * This is a test bus. Data is present on [3:0], + * and its corresponding select (enable) is present + * on bits [7:4]. + * @ate_reset: Reset input from automatic test equipment. + * This is a test signal. When the USB Core is + * powered up (not in Susned Mode), an automatic + * tester can use this to disable phy_clock and + * free_clk, then re-enable them with an aligned + * phase. + * '1': The phy_clk and free_clk outputs are + * disabled. "0": The phy_clock and free_clk outputs + * are available within a specific period after the + * de-assertion. + */ + struct cvmx_usbnx_usbp_ctl_status_s { + __BITFIELD_FIELD(u64 txrisetune : 1, + __BITFIELD_FIELD(u64 txvreftune : 4, + __BITFIELD_FIELD(u64 txfslstune : 4, + __BITFIELD_FIELD(u64 txhsxvtune : 2, + __BITFIELD_FIELD(u64 sqrxtune : 3, + __BITFIELD_FIELD(u64 compdistune : 3, + __BITFIELD_FIELD(u64 otgtune : 3, + __BITFIELD_FIELD(u64 otgdisable : 1, + __BITFIELD_FIELD(u64 portreset : 1, + __BITFIELD_FIELD(u64 drvvbus : 1, + __BITFIELD_FIELD(u64 lsbist : 1, + __BITFIELD_FIELD(u64 fsbist : 1, + __BITFIELD_FIELD(u64 hsbist : 1, + __BITFIELD_FIELD(u64 bist_done : 1, + __BITFIELD_FIELD(u64 bist_err : 1, + __BITFIELD_FIELD(u64 tdata_out : 4, + __BITFIELD_FIELD(u64 siddq : 1, + __BITFIELD_FIELD(u64 txpreemphasistune : 1, + __BITFIELD_FIELD(u64 dma_bmode : 1, + __BITFIELD_FIELD(u64 usbc_end : 1, + __BITFIELD_FIELD(u64 usbp_bist : 1, + __BITFIELD_FIELD(u64 tclk : 1, + __BITFIELD_FIELD(u64 dp_pulld : 1, + __BITFIELD_FIELD(u64 dm_pulld : 1, + __BITFIELD_FIELD(u64 hst_mode : 1, + __BITFIELD_FIELD(u64 tuning : 4, + __BITFIELD_FIELD(u64 tx_bs_enh : 1, + __BITFIELD_FIELD(u64 tx_bs_en : 1, + __BITFIELD_FIELD(u64 loop_enb : 1, + __BITFIELD_FIELD(u64 vtest_enb : 1, + __BITFIELD_FIELD(u64 bist_enb : 1, + __BITFIELD_FIELD(u64 tdata_sel : 1, + __BITFIELD_FIELD(u64 taddr_in : 4, + __BITFIELD_FIELD(u64 tdata_in : 8, + __BITFIELD_FIELD(u64 ate_reset : 1, + ;))))))))))))))))))))))))))))))))))) + } s; +}; + +#endif /* __OCTEON_HCD_H__ */ diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c new file mode 100644 index 0000000000..d63c63942a --- /dev/null +++ b/drivers/usb/misc/onboard_usb_hub.c @@ -0,0 +1,455 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for onboard USB hubs + * + * Copyright (c) 2022, Google LLC + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "onboard_usb_hub.h" + +static struct usb_device_driver onboard_hub_usbdev_driver; + +/************************** Platform driver **************************/ + +struct usbdev_node { + struct usb_device *udev; + struct list_head list; +}; + +struct onboard_hub { + struct regulator *vdd; + struct device *dev; + const struct onboard_hub_pdata *pdata; + struct gpio_desc *reset_gpio; + bool always_powered_in_suspend; + bool is_powered_on; + bool going_away; + struct list_head udev_list; + struct work_struct attach_usb_driver_work; + struct mutex lock; +}; + +static int onboard_hub_power_on(struct onboard_hub *hub) +{ + int err; + + err = regulator_enable(hub->vdd); + if (err) { + dev_err(hub->dev, "failed to enable regulator: %d\n", err); + return err; + } + + fsleep(hub->pdata->reset_us); + gpiod_set_value_cansleep(hub->reset_gpio, 0); + + hub->is_powered_on = true; + + return 0; +} + +static int onboard_hub_power_off(struct onboard_hub *hub) +{ + int err; + + gpiod_set_value_cansleep(hub->reset_gpio, 1); + + err = regulator_disable(hub->vdd); + if (err) { + dev_err(hub->dev, "failed to disable regulator: %d\n", err); + return err; + } + + hub->is_powered_on = false; + + return 0; +} + +static int __maybe_unused onboard_hub_suspend(struct device *dev) +{ + struct onboard_hub *hub = dev_get_drvdata(dev); + struct usbdev_node *node; + bool power_off = true; + + if (hub->always_powered_in_suspend) + return 0; + + mutex_lock(&hub->lock); + + list_for_each_entry(node, &hub->udev_list, list) { + if (!device_may_wakeup(node->udev->bus->controller)) + continue; + + if (usb_wakeup_enabled_descendants(node->udev)) { + power_off = false; + break; + } + } + + mutex_unlock(&hub->lock); + + if (!power_off) + return 0; + + return onboard_hub_power_off(hub); +} + +static int __maybe_unused onboard_hub_resume(struct device *dev) +{ + struct onboard_hub *hub = dev_get_drvdata(dev); + + if (hub->is_powered_on) + return 0; + + return onboard_hub_power_on(hub); +} + +static inline void get_udev_link_name(const struct usb_device *udev, char *buf, size_t size) +{ + snprintf(buf, size, "usb_dev.%s", dev_name(&udev->dev)); +} + +static int onboard_hub_add_usbdev(struct onboard_hub *hub, struct usb_device *udev) +{ + struct usbdev_node *node; + char link_name[64]; + int err; + + mutex_lock(&hub->lock); + + if (hub->going_away) { + err = -EINVAL; + goto error; + } + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) { + err = -ENOMEM; + goto error; + } + + node->udev = udev; + + list_add(&node->list, &hub->udev_list); + + mutex_unlock(&hub->lock); + + get_udev_link_name(udev, link_name, sizeof(link_name)); + WARN_ON(sysfs_create_link(&hub->dev->kobj, &udev->dev.kobj, link_name)); + + return 0; + +error: + mutex_unlock(&hub->lock); + + return err; +} + +static void onboard_hub_remove_usbdev(struct onboard_hub *hub, const struct usb_device *udev) +{ + struct usbdev_node *node; + char link_name[64]; + + get_udev_link_name(udev, link_name, sizeof(link_name)); + sysfs_remove_link(&hub->dev->kobj, link_name); + + mutex_lock(&hub->lock); + + list_for_each_entry(node, &hub->udev_list, list) { + if (node->udev == udev) { + list_del(&node->list); + kfree(node); + break; + } + } + + mutex_unlock(&hub->lock); +} + +static ssize_t always_powered_in_suspend_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + const struct onboard_hub *hub = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", hub->always_powered_in_suspend); +} + +static ssize_t always_powered_in_suspend_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct onboard_hub *hub = dev_get_drvdata(dev); + bool val; + int ret; + + ret = kstrtobool(buf, &val); + if (ret < 0) + return ret; + + hub->always_powered_in_suspend = val; + + return count; +} +static DEVICE_ATTR_RW(always_powered_in_suspend); + +static struct attribute *onboard_hub_attrs[] = { + &dev_attr_always_powered_in_suspend.attr, + NULL, +}; +ATTRIBUTE_GROUPS(onboard_hub); + +static void onboard_hub_attach_usb_driver(struct work_struct *work) +{ + int err; + + err = driver_attach(&onboard_hub_usbdev_driver.drvwrap.driver); + if (err) + pr_err("Failed to attach USB driver: %d\n", err); +} + +static int onboard_hub_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + struct device *dev = &pdev->dev; + struct onboard_hub *hub; + int err; + + hub = devm_kzalloc(dev, sizeof(*hub), GFP_KERNEL); + if (!hub) + return -ENOMEM; + + of_id = of_match_device(onboard_hub_match, &pdev->dev); + if (!of_id) + return -ENODEV; + + hub->pdata = of_id->data; + if (!hub->pdata) + return -EINVAL; + + hub->vdd = devm_regulator_get(dev, "vdd"); + if (IS_ERR(hub->vdd)) + return PTR_ERR(hub->vdd); + + hub->reset_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(hub->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(hub->reset_gpio), "failed to get reset GPIO\n"); + + hub->dev = dev; + mutex_init(&hub->lock); + INIT_LIST_HEAD(&hub->udev_list); + + dev_set_drvdata(dev, hub); + + err = onboard_hub_power_on(hub); + if (err) + return err; + + /* + * The USB driver might have been detached from the USB devices by + * onboard_hub_remove() (e.g. through an 'unbind' by userspace), + * make sure to re-attach it if needed. + * + * This needs to be done deferred to avoid self-deadlocks on systems + * with nested onboard hubs. + */ + INIT_WORK(&hub->attach_usb_driver_work, onboard_hub_attach_usb_driver); + schedule_work(&hub->attach_usb_driver_work); + + return 0; +} + +static int onboard_hub_remove(struct platform_device *pdev) +{ + struct onboard_hub *hub = dev_get_drvdata(&pdev->dev); + struct usbdev_node *node; + struct usb_device *udev; + + hub->going_away = true; + + if (&hub->attach_usb_driver_work != current_work()) + cancel_work_sync(&hub->attach_usb_driver_work); + + mutex_lock(&hub->lock); + + /* unbind the USB devices to avoid dangling references to this device */ + while (!list_empty(&hub->udev_list)) { + node = list_first_entry(&hub->udev_list, struct usbdev_node, list); + udev = node->udev; + + /* + * Unbinding the driver will call onboard_hub_remove_usbdev(), + * which acquires hub->lock. We must release the lock first. + */ + get_device(&udev->dev); + mutex_unlock(&hub->lock); + device_release_driver(&udev->dev); + put_device(&udev->dev); + mutex_lock(&hub->lock); + } + + mutex_unlock(&hub->lock); + + return onboard_hub_power_off(hub); +} + +MODULE_DEVICE_TABLE(of, onboard_hub_match); + +static const struct dev_pm_ops __maybe_unused onboard_hub_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(onboard_hub_suspend, onboard_hub_resume) +}; + +static struct platform_driver onboard_hub_driver = { + .probe = onboard_hub_probe, + .remove = onboard_hub_remove, + + .driver = { + .name = "onboard-usb-hub", + .of_match_table = onboard_hub_match, + .pm = pm_ptr(&onboard_hub_pm_ops), + .dev_groups = onboard_hub_groups, + }, +}; + +/************************** USB driver **************************/ + +#define VENDOR_ID_MICROCHIP 0x0424 +#define VENDOR_ID_REALTEK 0x0bda +#define VENDOR_ID_TI 0x0451 + +/* + * Returns the onboard_hub platform device that is associated with the USB + * device passed as parameter. + */ +static struct onboard_hub *_find_onboard_hub(struct device *dev) +{ + struct platform_device *pdev; + struct device_node *np; + struct onboard_hub *hub; + + pdev = of_find_device_by_node(dev->of_node); + if (!pdev) { + np = of_parse_phandle(dev->of_node, "peer-hub", 0); + if (!np) { + dev_err(dev, "failed to find device node for peer hub\n"); + return ERR_PTR(-EINVAL); + } + + pdev = of_find_device_by_node(np); + of_node_put(np); + + if (!pdev) + return ERR_PTR(-ENODEV); + } + + hub = dev_get_drvdata(&pdev->dev); + put_device(&pdev->dev); + + /* + * The presence of drvdata ('hub') indicates that the platform driver + * finished probing. This handles the case where (conceivably) we could + * be running at the exact same time as the platform driver's probe. If + * we detect the race we request probe deferral and we'll come back and + * try again. + */ + if (!hub) + return ERR_PTR(-EPROBE_DEFER); + + return hub; +} + +static int onboard_hub_usbdev_probe(struct usb_device *udev) +{ + struct device *dev = &udev->dev; + struct onboard_hub *hub; + int err; + + /* ignore supported hubs without device tree node */ + if (!dev->of_node) + return -ENODEV; + + hub = _find_onboard_hub(dev); + if (IS_ERR(hub)) + return PTR_ERR(hub); + + dev_set_drvdata(dev, hub); + + err = onboard_hub_add_usbdev(hub, udev); + if (err) + return err; + + return 0; +} + +static void onboard_hub_usbdev_disconnect(struct usb_device *udev) +{ + struct onboard_hub *hub = dev_get_drvdata(&udev->dev); + + onboard_hub_remove_usbdev(hub, udev); +} + +static const struct usb_device_id onboard_hub_id_table[] = { + { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */ + { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */ + { USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */ + { USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */ + { USB_DEVICE(VENDOR_ID_REALTEK, 0x5414) }, /* RTS5414 USB 2.1 */ + { USB_DEVICE(VENDOR_ID_TI, 0x8140) }, /* TI USB8041 3.0 */ + { USB_DEVICE(VENDOR_ID_TI, 0x8142) }, /* TI USB8041 2.0 */ + {} +}; +MODULE_DEVICE_TABLE(usb, onboard_hub_id_table); + +static struct usb_device_driver onboard_hub_usbdev_driver = { + .name = "onboard-usb-hub", + .probe = onboard_hub_usbdev_probe, + .disconnect = onboard_hub_usbdev_disconnect, + .generic_subclass = 1, + .supports_autosuspend = 1, + .id_table = onboard_hub_id_table, +}; + +static int __init onboard_hub_init(void) +{ + int ret; + + ret = platform_driver_register(&onboard_hub_driver); + if (ret) + return ret; + + ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE); + if (ret) + platform_driver_unregister(&onboard_hub_driver); + + return ret; +} +module_init(onboard_hub_init); + +static void __exit onboard_hub_exit(void) +{ + usb_deregister_device_driver(&onboard_hub_usbdev_driver); + platform_driver_unregister(&onboard_hub_driver); +} +module_exit(onboard_hub_exit); + +MODULE_AUTHOR("Matthias Kaehlcke "); +MODULE_DESCRIPTION("Driver for discrete onboard USB hubs"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h new file mode 100644 index 0000000000..34beab8bce --- /dev/null +++ b/drivers/usb/misc/onboard_usb_hub.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2022, Google LLC + */ + +#ifndef _USB_MISC_ONBOARD_USB_HUB_H +#define _USB_MISC_ONBOARD_USB_HUB_H + +struct onboard_hub_pdata { + unsigned long reset_us; /* reset pulse width in us */ +}; + +static const struct onboard_hub_pdata microchip_usb424_data = { + .reset_us = 1, +}; + +static const struct onboard_hub_pdata realtek_rts5411_data = { + .reset_us = 0, +}; + +static const struct onboard_hub_pdata ti_tusb8041_data = { + .reset_us = 3000, +}; + +static const struct of_device_id onboard_hub_match[] = { + { .compatible = "usb424,2514", .data = µchip_usb424_data, }, + { .compatible = "usb451,8140", .data = &ti_tusb8041_data, }, + { .compatible = "usb451,8142", .data = &ti_tusb8041_data, }, + { .compatible = "usbbda,411", .data = &realtek_rts5411_data, }, + { .compatible = "usbbda,5411", .data = &realtek_rts5411_data, }, + { .compatible = "usbbda,414", .data = &realtek_rts5411_data, }, + { .compatible = "usbbda,5414", .data = &realtek_rts5411_data, }, + {} +}; + +#endif /* _USB_MISC_ONBOARD_USB_HUB_H */ diff --git a/drivers/usb/misc/onboard_usb_hub_pdevs.c b/drivers/usb/misc/onboard_usb_hub_pdevs.c new file mode 100644 index 0000000000..ed22a18f4a --- /dev/null +++ b/drivers/usb/misc/onboard_usb_hub_pdevs.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * API for creating and destroying USB onboard hub platform devices + * + * Copyright (c) 2022, Google LLC + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "onboard_usb_hub.h" + +struct pdev_list_entry { + struct platform_device *pdev; + struct list_head node; +}; + +static bool of_is_onboard_usb_hub(const struct device_node *np) +{ + return !!of_match_node(onboard_hub_match, np); +} + +/** + * onboard_hub_create_pdevs -- create platform devices for onboard USB hubs + * @parent_hub : parent hub to scan for connected onboard hubs + * @pdev_list : list of onboard hub platform devices owned by the parent hub + * + * Creates a platform device for each supported onboard hub that is connected to + * the given parent hub. The platform device is in charge of initializing the + * hub (enable regulators, take the hub out of reset, ...) and can optionally + * control whether the hub remains powered during system suspend or not. + * + * To keep track of the platform devices they are added to a list that is owned + * by the parent hub. + * + * Some background about the logic in this function, which can be a bit hard + * to follow: + * + * Root hubs don't have dedicated device tree nodes, but use the node of their + * HCD. The primary and secondary HCD are usually represented by a single DT + * node. That means the root hubs of the primary and secondary HCD share the + * same device tree node (the HCD node). As a result this function can be called + * twice with the same DT node for root hubs. We only want to create a single + * platform device for each physical onboard hub, hence for root hubs the loop + * is only executed for the root hub of the primary HCD. Since the function + * scans through all child nodes it still creates pdevs for onboard hubs + * connected to the root hub of the secondary HCD if needed. + * + * Further there must be only one platform device for onboard hubs with a peer + * hub (the hub is a single physical device). To achieve this two measures are + * taken: pdevs for onboard hubs with a peer are only created when the function + * is called on behalf of the parent hub that is connected to the primary HCD + * (directly or through other hubs). For onboard hubs connected to root hubs + * the function processes the nodes of both peers. A platform device is only + * created if the peer hub doesn't have one already. + */ +void onboard_hub_create_pdevs(struct usb_device *parent_hub, struct list_head *pdev_list) +{ + int i; + struct usb_hcd *hcd = bus_to_hcd(parent_hub->bus); + struct device_node *np, *npc; + struct platform_device *pdev; + struct pdev_list_entry *pdle; + + if (!parent_hub->dev.of_node) + return; + + if (!parent_hub->parent && !usb_hcd_is_primary_hcd(hcd)) + return; + + for (i = 1; i <= parent_hub->maxchild; i++) { + np = usb_of_get_device_node(parent_hub, i); + if (!np) + continue; + + if (!of_is_onboard_usb_hub(np)) + goto node_put; + + npc = of_parse_phandle(np, "peer-hub", 0); + if (npc) { + if (!usb_hcd_is_primary_hcd(hcd)) { + of_node_put(npc); + goto node_put; + } + + pdev = of_find_device_by_node(npc); + of_node_put(npc); + + if (pdev) { + put_device(&pdev->dev); + goto node_put; + } + } + + pdev = of_platform_device_create(np, NULL, &parent_hub->dev); + if (!pdev) { + dev_err(&parent_hub->dev, + "failed to create platform device for onboard hub '%pOF'\n", np); + goto node_put; + } + + pdle = kzalloc(sizeof(*pdle), GFP_KERNEL); + if (!pdle) { + of_platform_device_destroy(&pdev->dev, NULL); + goto node_put; + } + + pdle->pdev = pdev; + list_add(&pdle->node, pdev_list); + +node_put: + of_node_put(np); + } +} +EXPORT_SYMBOL_GPL(onboard_hub_create_pdevs); + +/** + * onboard_hub_destroy_pdevs -- free resources of onboard hub platform devices + * @pdev_list : list of onboard hub platform devices + * + * Destroys the platform devices in the given list and frees the memory associated + * with the list entry. + */ +void onboard_hub_destroy_pdevs(struct list_head *pdev_list) +{ + struct pdev_list_entry *pdle, *tmp; + + list_for_each_entry_safe(pdle, tmp, pdev_list, node) { + list_del(&pdle->node); + of_platform_device_destroy(&pdle->pdev->dev, NULL); + kfree(pdle); + } +} +EXPORT_SYMBOL_GPL(onboard_hub_destroy_pdevs); diff --git a/drivers/usb/musb/mpfs.c b/drivers/usb/musb/mpfs.c new file mode 100644 index 0000000000..cea2e81088 --- /dev/null +++ b/drivers/usb/musb/mpfs.c @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PolarFire SoC (MPFS) MUSB Glue Layer + * + * Copyright (c) 2020-2022 Microchip Corporation. All rights reserved. + * Based on {omap2430,tusb6010,ux500}.c + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "musb_core.h" +#include "musb_dma.h" + +#define MPFS_MUSB_MAX_EP_NUM 8 +#define MPFS_MUSB_RAM_BITS 12 + +struct mpfs_glue { + struct device *dev; + struct platform_device *musb; + struct platform_device *phy; + struct clk *clk; +}; + +static struct musb_fifo_cfg mpfs_musb_mode_cfg[] = { + { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, + { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, + { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, + { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, + { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, }, + { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, }, + { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 1024, }, + { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 4096, }, +}; + +static const struct musb_hdrc_config mpfs_musb_hdrc_config = { + .fifo_cfg = mpfs_musb_mode_cfg, + .fifo_cfg_size = ARRAY_SIZE(mpfs_musb_mode_cfg), + .multipoint = true, + .dyn_fifo = true, + .num_eps = MPFS_MUSB_MAX_EP_NUM, + .ram_bits = MPFS_MUSB_RAM_BITS, +}; + +static irqreturn_t mpfs_musb_interrupt(int irq, void *__hci) +{ + unsigned long flags; + irqreturn_t ret = IRQ_NONE; + struct musb *musb = __hci; + + spin_lock_irqsave(&musb->lock, flags); + + musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB); + musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX); + musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX); + + if (musb->int_usb || musb->int_tx || musb->int_rx) { + musb_writeb(musb->mregs, MUSB_INTRUSB, musb->int_usb); + musb_writew(musb->mregs, MUSB_INTRTX, musb->int_tx); + musb_writew(musb->mregs, MUSB_INTRRX, musb->int_rx); + ret = musb_interrupt(musb); + } + + spin_unlock_irqrestore(&musb->lock, flags); + + return ret; +} + +static void mpfs_musb_set_vbus(struct musb *musb, int is_on) +{ + u8 devctl; + + /* + * HDRC controls CPEN, but beware current surges during device + * connect. They can trigger transient overcurrent conditions + * that must be ignored. + */ + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + + if (is_on) { + musb->is_active = 1; + musb->xceiv->otg->default_a = 1; + musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; + devctl |= MUSB_DEVCTL_SESSION; + MUSB_HST_MODE(musb); + } else { + musb->is_active = 0; + + /* + * NOTE: skipping A_WAIT_VFALL -> A_IDLE and + * jumping right to B_IDLE... + */ + musb->xceiv->otg->default_a = 0; + musb->xceiv->otg->state = OTG_STATE_B_IDLE; + devctl &= ~MUSB_DEVCTL_SESSION; + + MUSB_DEV_MODE(musb); + } + + musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); + + dev_dbg(musb->controller, "VBUS %s, devctl %02x\n", + usb_otg_state_string(musb->xceiv->otg->state), + musb_readb(musb->mregs, MUSB_DEVCTL)); +} + +static int mpfs_musb_init(struct musb *musb) +{ + struct device *dev = musb->controller; + + musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); + if (IS_ERR(musb->xceiv)) { + dev_err(dev, "HS UDC: no transceiver configured\n"); + return PTR_ERR(musb->xceiv); + } + + musb->dyn_fifo = true; + musb->isr = mpfs_musb_interrupt; + + musb_platform_set_vbus(musb, 1); + + return 0; +} + +static const struct musb_platform_ops mpfs_ops = { + .quirks = MUSB_DMA_INVENTRA, + .init = mpfs_musb_init, + .fifo_mode = 2, +#ifdef CONFIG_USB_INVENTRA_DMA + .dma_init = musbhs_dma_controller_create, + .dma_exit = musbhs_dma_controller_destroy, +#endif + .set_vbus = mpfs_musb_set_vbus +}; + +static int mpfs_probe(struct platform_device *pdev) +{ + struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct mpfs_glue *glue; + struct platform_device *musb_pdev; + struct device *dev = &pdev->dev; + struct clk *clk; + int ret; + + glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL); + if (!glue) + return -ENOMEM; + + musb_pdev = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO); + if (!musb_pdev) { + dev_err(dev, "failed to allocate musb device\n"); + return -ENOMEM; + } + + clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "failed to get clock\n"); + ret = PTR_ERR(clk); + goto err_phy_release; + } + + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable clock\n"); + goto err_phy_release; + } + + musb_pdev->dev.parent = dev; + musb_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(39); + musb_pdev->dev.dma_mask = &musb_pdev->dev.coherent_dma_mask; + device_set_of_node_from_dev(&musb_pdev->dev, dev); + + glue->dev = dev; + glue->musb = musb_pdev; + glue->clk = clk; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + ret = -ENOMEM; + goto err_clk_disable; + } + + pdata->config = &mpfs_musb_hdrc_config; + pdata->platform_ops = &mpfs_ops; + + pdata->mode = usb_get_dr_mode(dev); + if (pdata->mode == USB_DR_MODE_UNKNOWN) { + dev_info(dev, "No dr_mode property found, defaulting to otg\n"); + pdata->mode = USB_DR_MODE_OTG; + } + + glue->phy = usb_phy_generic_register(); + if (IS_ERR(glue->phy)) { + dev_err(dev, "failed to register usb-phy %ld\n", + PTR_ERR(glue->phy)); + ret = PTR_ERR(glue->phy); + goto err_clk_disable; + } + + platform_set_drvdata(pdev, glue); + + ret = platform_device_add_resources(musb_pdev, pdev->resource, pdev->num_resources); + if (ret) { + dev_err(dev, "failed to add resources\n"); + goto err_clk_disable; + } + + ret = platform_device_add_data(musb_pdev, pdata, sizeof(*pdata)); + if (ret) { + dev_err(dev, "failed to add platform_data\n"); + goto err_clk_disable; + } + + ret = platform_device_add(musb_pdev); + if (ret) { + dev_err(dev, "failed to register musb device\n"); + goto err_clk_disable; + } + + dev_info(&pdev->dev, "Registered MPFS MUSB driver\n"); + return 0; + +err_clk_disable: + clk_disable_unprepare(clk); + +err_phy_release: + usb_phy_generic_unregister(glue->phy); + platform_device_put(musb_pdev); + return ret; +} + +static int mpfs_remove(struct platform_device *pdev) +{ + struct mpfs_glue *glue = platform_get_drvdata(pdev); + + clk_disable_unprepare(glue->clk); + platform_device_unregister(glue->musb); + usb_phy_generic_unregister(pdev); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id mpfs_id_table[] = { + { .compatible = "microchip,mpfs-musb" }, + { } +}; +MODULE_DEVICE_TABLE(of, mpfs_id_table); +#endif + +static struct platform_driver mpfs_musb_driver = { + .probe = mpfs_probe, + .remove = mpfs_remove, + .driver = { + .name = "mpfs-musb", + .of_match_table = of_match_ptr(mpfs_id_table) + }, +}; + +module_platform_driver(mpfs_musb_driver); + +MODULE_DESCRIPTION("PolarFire SoC MUSB Glue Layer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c new file mode 100644 index 0000000000..c0f0842d44 --- /dev/null +++ b/drivers/usb/typec/anx7411.c @@ -0,0 +1,1601 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Driver for Analogix ANX7411 USB Type-C and PD controller + * + * Copyright(c) 2022, Analogix Semiconductor. All rights reserved. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TCPC_ADDRESS1 0x58 +#define TCPC_ADDRESS2 0x56 +#define TCPC_ADDRESS3 0x54 +#define TCPC_ADDRESS4 0x52 +#define SPI_ADDRESS1 0x7e +#define SPI_ADDRESS2 0x6e +#define SPI_ADDRESS3 0x64 +#define SPI_ADDRESS4 0x62 + +struct anx7411_i2c_select { + u8 tcpc_address; + u8 spi_address; +}; + +#define VID_ANALOGIX 0x1F29 +#define PID_ANALOGIX 0x7411 + +/* TCPC register define */ + +#define ANALOG_CTRL_10 0xAA + +#define STATUS_LEN 2 +#define ALERT_0 0xCB +#define RECEIVED_MSG BIT(7) +#define SOFTWARE_INT BIT(6) +#define MSG_LEN 32 +#define HEADER_LEN 2 +#define MSG_HEADER 0x00 +#define MSG_TYPE 0x01 +#define MSG_RAWDATA 0x02 +#define MSG_LEN_MASK 0x1F + +#define ALERT_1 0xCC +#define INTP_POW_ON BIT(7) +#define INTP_POW_OFF BIT(6) + +#define VBUS_THRESHOLD_H 0xDD +#define VBUS_THRESHOLD_L 0xDE + +#define FW_CTRL_0 0xF0 +#define UNSTRUCT_VDM_EN BIT(0) +#define DELAY_200MS BIT(1) +#define VSAFE0 0 +#define VSAFE1 BIT(2) +#define VSAFE2 BIT(3) +#define VSAFE3 (BIT(2) | BIT(3)) +#define FRS_EN BIT(7) + +#define FW_PARAM 0xF1 +#define DONGLE_IOP BIT(0) + +#define FW_CTRL_2 0xF7 +#define SINK_CTRL_DIS_FLAG BIT(5) + +/* SPI register define */ +#define OCM_CTRL_0 0x6E +#define OCM_RESET BIT(6) + +#define MAX_VOLTAGE 0xAC +#define MAX_POWER 0xAD +#define MIN_POWER 0xAE + +#define REQUEST_VOLTAGE 0xAF +#define VOLTAGE_UNIT 100 /* mV per unit */ + +#define REQUEST_CURRENT 0xB1 +#define CURRENT_UNIT 50 /* mA per unit */ + +#define CMD_SEND_BUF 0xC0 +#define CMD_RECV_BUF 0xE0 + +#define REQ_VOL_20V_IN_100MV 0xC8 +#define REQ_CUR_2_25A_IN_50MA 0x2D +#define REQ_CUR_3_25A_IN_50MA 0x41 + +#define DEF_5V 5000 +#define DEF_1_5A 1500 + +#define LOBYTE(w) ((u8)((w) & 0xFF)) +#define HIBYTE(w) ((u8)(((u16)(w) >> 8) & 0xFF)) + +enum anx7411_typec_message_type { + TYPE_SRC_CAP = 0x00, + TYPE_SNK_CAP = 0x01, + TYPE_SNK_IDENTITY = 0x02, + TYPE_SVID = 0x03, + TYPE_SET_SNK_DP_CAP = 0x08, + TYPE_PSWAP_REQ = 0x10, + TYPE_DSWAP_REQ = 0x11, + TYPE_VDM = 0x14, + TYPE_OBJ_REQ = 0x16, + TYPE_DP_ALT_ENTER = 0x19, + TYPE_DP_DISCOVER_MODES_INFO = 0x27, + TYPE_GET_DP_CONFIG = 0x29, + TYPE_DP_CONFIGURE = 0x2A, + TYPE_GET_DP_DISCOVER_MODES_INFO = 0x2E, + TYPE_GET_DP_ALT_ENTER = 0x2F, +}; + +#define FW_CTRL_1 0xB2 +#define AUTO_PD_EN BIT(1) +#define TRYSRC_EN BIT(2) +#define TRYSNK_EN BIT(3) +#define FORCE_SEND_RDO BIT(6) + +#define FW_VER 0xB4 +#define FW_SUBVER 0xB5 + +#define INT_MASK 0xB6 +#define INT_STS 0xB7 +#define OCM_BOOT_UP BIT(0) +#define OC_OV_EVENT BIT(1) +#define VCONN_CHANGE BIT(2) +#define VBUS_CHANGE BIT(3) +#define CC_STATUS_CHANGE BIT(4) +#define DATA_ROLE_CHANGE BIT(5) +#define PR_CONSUMER_GOT_POWER BIT(6) +#define HPD_STATUS_CHANGE BIT(7) + +#define SYSTEM_STSTUS 0xB8 +/* 0: SINK off; 1: SINK on */ +#define SINK_STATUS BIT(1) +/* 0: VCONN off; 1: VCONN on*/ +#define VCONN_STATUS BIT(2) +/* 0: vbus off; 1: vbus on*/ +#define VBUS_STATUS BIT(3) +/* 1: host; 0:device*/ +#define DATA_ROLE BIT(5) +/* 0: Chunking; 1: Unchunked*/ +#define SUPPORT_UNCHUNKING BIT(6) +/* 0: HPD low; 1: HPD high*/ +#define HPD_STATUS BIT(7) + +#define DATA_DFP 1 +#define DATA_UFP 2 +#define POWER_SOURCE 1 +#define POWER_SINK 2 + +#define CC_STATUS 0xB9 +#define CC1_RD BIT(0) +#define CC2_RD BIT(4) +#define CC1_RA BIT(1) +#define CC2_RA BIT(5) +#define CC1_RD BIT(0) +#define CC1_RP(cc) (((cc) >> 2) & 0x03) +#define CC2_RP(cc) (((cc) >> 6) & 0x03) + +#define PD_REV_INIT 0xBA + +#define PD_EXT_MSG_CTRL 0xBB +#define SRC_CAP_EXT_REPLY BIT(0) +#define MANUFACTURER_INFO_REPLY BIT(1) +#define BATTERY_STS_REPLY BIT(2) +#define BATTERY_CAP_REPLY BIT(3) +#define ALERT_REPLY BIT(4) +#define STATUS_REPLY BIT(5) +#define PPS_STATUS_REPLY BIT(6) +#define SNK_CAP_EXT_REPLY BIT(7) + +#define NO_CONNECT 0x00 +#define USB3_1_CONNECTED 0x01 +#define DP_ALT_4LANES 0x02 +#define USB3_1_DP_2LANES 0x03 +#define CC1_CONNECTED 0x01 +#define CC2_CONNECTED 0x02 +#define SELECT_PIN_ASSIGMENT_C 0x04 +#define SELECT_PIN_ASSIGMENT_D 0x08 +#define SELECT_PIN_ASSIGMENT_E 0x10 +#define SELECT_PIN_ASSIGMENT_U 0x00 +#define REDRIVER_ADDRESS 0x20 +#define REDRIVER_OFFSET 0x00 + +#define DP_SVID 0xFF01 +#define VDM_ACK 0x40 +#define VDM_CMD_RES 0x00 +#define VDM_CMD_DIS_ID 0x01 +#define VDM_CMD_DIS_SVID 0x02 +#define VDM_CMD_DIS_MOD 0x03 +#define VDM_CMD_ENTER_MODE 0x04 +#define VDM_CMD_EXIT_MODE 0x05 +#define VDM_CMD_ATTENTION 0x06 +#define VDM_CMD_GET_STS 0x10 +#define VDM_CMD_AND_ACK_MASK 0x5F + +#define MAX_ALTMODE 2 + +#define HAS_SOURCE_CAP BIT(0) +#define HAS_SINK_CAP BIT(1) +#define HAS_SINK_WATT BIT(2) + +enum anx7411_psy_state { + /* copy from drivers/usb/typec/tcpm */ + ANX7411_PSY_OFFLINE = 0, + ANX7411_PSY_FIXED_ONLINE, + + /* private */ + /* PD keep in, but disconnct power to bq25700, + * this state can be active when higher capacity adapter plug in, + * and change to ONLINE state when higher capacity adapter plug out + */ + ANX7411_PSY_HANG = 0xff, +}; + +struct typec_params { + int request_current; /* ma */ + int request_voltage; /* mv */ + int cc_connect; + int cc_orientation_valid; + int cc_status; + int data_role; + int power_role; + int vconn_role; + int dp_altmode_enter; + int cust_altmode_enter; + struct usb_role_switch *role_sw; + struct typec_port *port; + struct typec_partner *partner; + struct typec_mux_dev *typec_mux; + struct typec_switch_dev *typec_switch; + struct typec_altmode *amode[MAX_ALTMODE]; + struct typec_altmode *port_amode[MAX_ALTMODE]; + struct typec_displayport_data data; + int pin_assignment; + struct typec_capability caps; + u32 src_pdo[PDO_MAX_OBJECTS]; + u32 sink_pdo[PDO_MAX_OBJECTS]; + u8 caps_flags; + u8 src_pdo_nr; + u8 sink_pdo_nr; + u8 sink_watt; + u8 sink_voltage; +}; + +#define MAX_BUF_LEN 30 +struct fw_msg { + u8 msg_len; + u8 msg_type; + u8 buf[MAX_BUF_LEN]; +} __packed; + +struct anx7411_data { + int fw_version; + int fw_subversion; + struct i2c_client *tcpc_client; + struct i2c_client *spi_client; + struct fw_msg send_msg; + struct fw_msg recv_msg; + struct gpio_desc *intp_gpiod; + struct fwnode_handle *connector_fwnode; + struct typec_params typec; + int intp_irq; + struct work_struct work; + struct workqueue_struct *workqueue; + /* Lock for interrupt work queue */ + struct mutex lock; + + enum anx7411_psy_state psy_online; + enum power_supply_usb_type usb_type; + struct power_supply *psy; + struct power_supply_desc psy_desc; + struct device *dev; +}; + +static u8 snk_identity[] = { + LOBYTE(VID_ANALOGIX), HIBYTE(VID_ANALOGIX), 0x00, 0x82, /* snk_id_hdr */ + 0x00, 0x00, 0x00, 0x00, /* snk_cert */ + 0x00, 0x00, LOBYTE(PID_ANALOGIX), HIBYTE(PID_ANALOGIX), /* 5snk_ama */ +}; + +static u8 dp_caps[4] = {0xC6, 0x00, 0x00, 0x00}; + +static int anx7411_reg_read(struct i2c_client *client, + u8 reg_addr) +{ + return i2c_smbus_read_byte_data(client, reg_addr); +} + +static int anx7411_reg_block_read(struct i2c_client *client, + u8 reg_addr, u8 len, u8 *buf) +{ + return i2c_smbus_read_i2c_block_data(client, reg_addr, len, buf); +} + +static int anx7411_reg_write(struct i2c_client *client, + u8 reg_addr, u8 reg_val) +{ + return i2c_smbus_write_byte_data(client, reg_addr, reg_val); +} + +static int anx7411_reg_block_write(struct i2c_client *client, + u8 reg_addr, u8 len, u8 *buf) +{ + return i2c_smbus_write_i2c_block_data(client, reg_addr, len, buf); +} + +static struct anx7411_i2c_select anx7411_i2c_addr[] = { + {TCPC_ADDRESS1, SPI_ADDRESS1}, + {TCPC_ADDRESS2, SPI_ADDRESS2}, + {TCPC_ADDRESS3, SPI_ADDRESS3}, + {TCPC_ADDRESS4, SPI_ADDRESS4}, +}; + +static int anx7411_detect_power_mode(struct anx7411_data *ctx) +{ + int ret; + int mode; + + ret = anx7411_reg_read(ctx->spi_client, REQUEST_CURRENT); + if (ret < 0) + return ret; + + ctx->typec.request_current = ret * CURRENT_UNIT; /* 50ma per unit */ + + ret = anx7411_reg_read(ctx->spi_client, REQUEST_VOLTAGE); + if (ret < 0) + return ret; + + ctx->typec.request_voltage = ret * VOLTAGE_UNIT; /* 100mv per unit */ + + if (ctx->psy_online == ANX7411_PSY_OFFLINE) { + ctx->psy_online = ANX7411_PSY_FIXED_ONLINE; + ctx->usb_type = POWER_SUPPLY_USB_TYPE_PD; + power_supply_changed(ctx->psy); + } + + if (!ctx->typec.cc_orientation_valid) + return 0; + + if (ctx->typec.cc_connect == CC1_CONNECTED) + mode = CC1_RP(ctx->typec.cc_status); + else + mode = CC2_RP(ctx->typec.cc_status); + if (mode) { + typec_set_pwr_opmode(ctx->typec.port, mode - 1); + return 0; + } + + typec_set_pwr_opmode(ctx->typec.port, TYPEC_PWR_MODE_PD); + + return 0; +} + +static int anx7411_register_partner(struct anx7411_data *ctx, + int pd, int accessory) +{ + struct typec_partner_desc desc; + struct typec_partner *partner; + + if (ctx->typec.partner) + return 0; + + desc.usb_pd = pd; + desc.accessory = accessory; + desc.identity = NULL; + partner = typec_register_partner(ctx->typec.port, &desc); + if (IS_ERR(partner)) + return PTR_ERR(partner); + + ctx->typec.partner = partner; + + return 0; +} + +static int anx7411_detect_cc_orientation(struct anx7411_data *ctx) +{ + struct device *dev = &ctx->spi_client->dev; + int ret; + int cc1_rd, cc2_rd; + int cc1_ra, cc2_ra; + int cc1_rp, cc2_rp; + + ret = anx7411_reg_read(ctx->spi_client, CC_STATUS); + if (ret < 0) + return ret; + + ctx->typec.cc_status = ret; + + cc1_rd = ret & CC1_RD ? 1 : 0; + cc2_rd = ret & CC2_RD ? 1 : 0; + cc1_ra = ret & CC1_RA ? 1 : 0; + cc2_ra = ret & CC2_RA ? 1 : 0; + cc1_rp = CC1_RP(ret); + cc2_rp = CC2_RP(ret); + + /* Debug cable, nothing to do */ + if (cc1_rd && cc2_rd) { + ctx->typec.cc_orientation_valid = 0; + return anx7411_register_partner(ctx, 0, TYPEC_ACCESSORY_DEBUG); + } + + if (cc1_ra && cc2_ra) { + ctx->typec.cc_orientation_valid = 0; + return anx7411_register_partner(ctx, 0, TYPEC_ACCESSORY_AUDIO); + } + + ctx->typec.cc_orientation_valid = 1; + + ret = anx7411_register_partner(ctx, 1, TYPEC_ACCESSORY_NONE); + if (ret) { + dev_err(dev, "register partner\n"); + return ret; + } + + if (cc1_rd || cc1_rp) { + typec_set_orientation(ctx->typec.port, TYPEC_ORIENTATION_NORMAL); + ctx->typec.cc_connect = CC1_CONNECTED; + } + + if (cc2_rd || cc2_rp) { + typec_set_orientation(ctx->typec.port, TYPEC_ORIENTATION_REVERSE); + ctx->typec.cc_connect = CC2_CONNECTED; + } + + return 0; +} + +static int anx7411_set_mux(struct anx7411_data *ctx, int pin_assignment) +{ + int mode = TYPEC_STATE_SAFE; + + switch (pin_assignment) { + case SELECT_PIN_ASSIGMENT_U: + /* default 4 line USB 3.1 */ + mode = TYPEC_STATE_MODAL; + break; + case SELECT_PIN_ASSIGMENT_C: + case SELECT_PIN_ASSIGMENT_E: + /* 4 line DP */ + mode = TYPEC_STATE_SAFE; + break; + case SELECT_PIN_ASSIGMENT_D: + /* 2 line DP, 2 line USB */ + mode = TYPEC_MODE_USB3; + break; + default: + mode = TYPEC_STATE_SAFE; + break; + } + + ctx->typec.pin_assignment = pin_assignment; + + return typec_set_mode(ctx->typec.port, mode); +} + +static int anx7411_set_usb_role(struct anx7411_data *ctx, enum usb_role role) +{ + if (!ctx->typec.role_sw) + return 0; + + return usb_role_switch_set_role(ctx->typec.role_sw, role); +} + +static int anx7411_data_role_detect(struct anx7411_data *ctx) +{ + int ret; + + ret = anx7411_reg_read(ctx->spi_client, SYSTEM_STSTUS); + if (ret < 0) + return ret; + + ctx->typec.data_role = (ret & DATA_ROLE) ? TYPEC_HOST : TYPEC_DEVICE; + ctx->typec.vconn_role = (ret & VCONN_STATUS) ? TYPEC_SOURCE : TYPEC_SINK; + + typec_set_data_role(ctx->typec.port, ctx->typec.data_role); + + typec_set_vconn_role(ctx->typec.port, ctx->typec.vconn_role); + + if (ctx->typec.data_role == TYPEC_HOST) + return anx7411_set_usb_role(ctx, USB_ROLE_HOST); + + return anx7411_set_usb_role(ctx, USB_ROLE_DEVICE); +} + +static int anx7411_power_role_detect(struct anx7411_data *ctx) +{ + int ret; + + ret = anx7411_reg_read(ctx->spi_client, SYSTEM_STSTUS); + if (ret < 0) + return ret; + + ctx->typec.power_role = (ret & SINK_STATUS) ? TYPEC_SINK : TYPEC_SOURCE; + + if (ctx->typec.power_role == TYPEC_SOURCE) { + ctx->typec.request_current = DEF_1_5A; + ctx->typec.request_voltage = DEF_5V; + } + + typec_set_pwr_role(ctx->typec.port, ctx->typec.power_role); + + return 0; +} + +static int anx7411_cc_status_detect(struct anx7411_data *ctx) +{ + anx7411_detect_cc_orientation(ctx); + anx7411_detect_power_mode(ctx); + + return 0; +} + +static void anx7411_partner_unregister_altmode(struct anx7411_data *ctx) +{ + int i; + + ctx->typec.dp_altmode_enter = 0; + ctx->typec.cust_altmode_enter = 0; + + for (i = 0; i < MAX_ALTMODE; i++) + if (ctx->typec.amode[i]) { + typec_unregister_altmode(ctx->typec.amode[i]); + ctx->typec.amode[i] = NULL; + } + + ctx->typec.pin_assignment = 0; +} + +static int anx7411_typec_register_altmode(struct anx7411_data *ctx, + int svid, int vdo) +{ + struct device *dev = &ctx->spi_client->dev; + struct typec_altmode_desc desc; + int err; + int i; + + desc.svid = svid; + desc.vdo = vdo; + + for (i = 0; i < MAX_ALTMODE; i++) + if (!ctx->typec.amode[i]) + break; + + desc.mode = i + 1; /* start with 1 */ + + if (i >= MAX_ALTMODE) { + dev_err(dev, "no altmode space for registering\n"); + return -ENOMEM; + } + + ctx->typec.amode[i] = typec_partner_register_altmode(ctx->typec.partner, + &desc); + if (IS_ERR(ctx->typec.amode[i])) { + dev_err(dev, "failed to register altmode\n"); + err = PTR_ERR(ctx->typec.amode[i]); + ctx->typec.amode[i] = NULL; + return err; + } + + return 0; +} + +static void anx7411_unregister_partner(struct anx7411_data *ctx) +{ + if (ctx->typec.partner) { + typec_unregister_partner(ctx->typec.partner); + ctx->typec.partner = NULL; + } +} + +static int anx7411_update_altmode(struct anx7411_data *ctx, int svid) +{ + int i; + + if (svid == DP_SVID) + ctx->typec.dp_altmode_enter = 1; + else + ctx->typec.cust_altmode_enter = 1; + + for (i = 0; i < MAX_ALTMODE; i++) { + if (!ctx->typec.amode[i]) + continue; + + if (ctx->typec.amode[i]->svid == svid) { + typec_altmode_update_active(ctx->typec.amode[i], true); + typec_altmode_notify(ctx->typec.amode[i], + ctx->typec.pin_assignment, + &ctx->typec.data); + break; + } + } + + return 0; +} + +static int anx7411_register_altmode(struct anx7411_data *ctx, + bool dp_altmode, u8 *buf) +{ + int ret; + int svid; + int mid; + + if (!ctx->typec.partner) + return 0; + + svid = DP_SVID; + if (dp_altmode) { + mid = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24); + + return anx7411_typec_register_altmode(ctx, svid, mid); + } + + svid = (buf[3] << 8) | buf[2]; + if ((buf[0] & VDM_CMD_AND_ACK_MASK) != (VDM_ACK | VDM_CMD_ENTER_MODE)) + return anx7411_update_altmode(ctx, svid); + + if ((buf[0] & VDM_CMD_AND_ACK_MASK) != (VDM_ACK | VDM_CMD_DIS_MOD)) + return 0; + + mid = buf[4] | (buf[5] << 8) | (buf[6] << 16) | (buf[7] << 24); + + ret = anx7411_typec_register_altmode(ctx, svid, mid); + if (ctx->typec.cust_altmode_enter) + ret |= anx7411_update_altmode(ctx, svid); + + return ret; +} + +static int anx7411_parse_cmd(struct anx7411_data *ctx, u8 type, u8 *buf, u8 len) +{ + struct device *dev = &ctx->spi_client->dev; + u8 cur_50ma, vol_100mv; + + switch (type) { + case TYPE_SRC_CAP: + cur_50ma = anx7411_reg_read(ctx->spi_client, REQUEST_CURRENT); + vol_100mv = anx7411_reg_read(ctx->spi_client, REQUEST_VOLTAGE); + + ctx->typec.request_voltage = vol_100mv * VOLTAGE_UNIT; + ctx->typec.request_current = cur_50ma * CURRENT_UNIT; + + ctx->psy_online = ANX7411_PSY_FIXED_ONLINE; + ctx->usb_type = POWER_SUPPLY_USB_TYPE_PD; + power_supply_changed(ctx->psy); + break; + case TYPE_SNK_CAP: + break; + case TYPE_SVID: + break; + case TYPE_SNK_IDENTITY: + break; + case TYPE_GET_DP_ALT_ENTER: + /* DP alt mode enter success */ + if (buf[0]) + anx7411_update_altmode(ctx, DP_SVID); + break; + case TYPE_DP_ALT_ENTER: + /* Update DP altmode */ + anx7411_update_altmode(ctx, DP_SVID); + break; + case TYPE_OBJ_REQ: + anx7411_detect_power_mode(ctx); + break; + case TYPE_DP_CONFIGURE: + anx7411_set_mux(ctx, buf[1]); + break; + case TYPE_DP_DISCOVER_MODES_INFO: + /* Make sure discover modes valid */ + if (buf[0] | buf[1]) + /* Register DP Altmode */ + anx7411_register_altmode(ctx, 1, buf); + break; + case TYPE_VDM: + /* Register other altmode */ + anx7411_register_altmode(ctx, 0, buf); + break; + default: + dev_err(dev, "ignore message(0x%.02x).\n", type); + break; + } + + return 0; +} + +static u8 checksum(struct device *dev, u8 *buf, u8 len) +{ + u8 ret = 0; + u8 i; + + for (i = 0; i < len; i++) + ret += buf[i]; + + return ret; +} + +static int anx7411_read_msg_ctrl_status(struct i2c_client *client) +{ + return anx7411_reg_read(client, CMD_SEND_BUF); +} + +static int anx7411_wait_msg_empty(struct i2c_client *client) +{ + int val; + + return readx_poll_timeout(anx7411_read_msg_ctrl_status, + client, val, (val < 0) || (val == 0), + 2000, 2000 * 150); +} + +static int anx7411_send_msg(struct anx7411_data *ctx, u8 type, u8 *buf, u8 size) +{ + struct device *dev = &ctx->spi_client->dev; + struct fw_msg *msg = &ctx->send_msg; + u8 crc; + int ret; + + size = min_t(u8, size, (u8)MAX_BUF_LEN); + memcpy(msg->buf, buf, size); + msg->msg_type = type; + /* msg len equals buffer length + msg_type */ + msg->msg_len = size + 1; + + /* Do CRC check for all buffer data and msg_len and msg_type */ + crc = checksum(dev, (u8 *)msg, size + HEADER_LEN); + msg->buf[size] = 0 - crc; + + ret = anx7411_wait_msg_empty(ctx->spi_client); + if (ret) + return ret; + + ret = anx7411_reg_block_write(ctx->spi_client, + CMD_SEND_BUF + 1, size + HEADER_LEN, + &msg->msg_type); + ret |= anx7411_reg_write(ctx->spi_client, CMD_SEND_BUF, + msg->msg_len); + return ret; +} + +static int anx7411_process_cmd(struct anx7411_data *ctx) +{ + struct device *dev = &ctx->spi_client->dev; + struct fw_msg *msg = &ctx->recv_msg; + u8 len; + u8 crc; + int ret; + + /* Read message from firmware */ + ret = anx7411_reg_block_read(ctx->spi_client, CMD_RECV_BUF, + MSG_LEN, (u8 *)msg); + if (ret < 0) + return 0; + + if (!msg->msg_len) + return 0; + + ret = anx7411_reg_write(ctx->spi_client, CMD_RECV_BUF, 0); + if (ret) + return ret; + + len = msg->msg_len & MSG_LEN_MASK; + crc = checksum(dev, (u8 *)msg, len + HEADER_LEN); + if (crc) { + dev_err(dev, "message error crc(0x%.02x)\n", crc); + return -ERANGE; + } + + return anx7411_parse_cmd(ctx, msg->msg_type, msg->buf, len - 1); +} + +static void anx7411_translate_payload(struct device *dev, __le32 *payload, + u32 *pdo, int nr, const char *type) +{ + int i; + + if (nr > PDO_MAX_OBJECTS) { + dev_err(dev, "nr(%d) exceed PDO_MAX_OBJECTS(%d)\n", + nr, PDO_MAX_OBJECTS); + + return; + } + + for (i = 0; i < nr; i++) + payload[i] = cpu_to_le32(pdo[i]); +} + +static int anx7411_config(struct anx7411_data *ctx) +{ + struct device *dev = &ctx->spi_client->dev; + struct typec_params *typecp = &ctx->typec; + __le32 payload[PDO_MAX_OBJECTS]; + int ret; + + /* Config PD FW work under PD 2.0 */ + ret = anx7411_reg_write(ctx->spi_client, PD_REV_INIT, PD_REV20); + ret |= anx7411_reg_write(ctx->tcpc_client, FW_CTRL_0, + UNSTRUCT_VDM_EN | DELAY_200MS | + VSAFE1 | FRS_EN); + ret |= anx7411_reg_write(ctx->spi_client, FW_CTRL_1, + AUTO_PD_EN | FORCE_SEND_RDO); + + /* Set VBUS current threshold */ + ret |= anx7411_reg_write(ctx->tcpc_client, VBUS_THRESHOLD_H, 0xff); + ret |= anx7411_reg_write(ctx->tcpc_client, VBUS_THRESHOLD_L, 0x03); + + /* Fix dongle compatible issue */ + ret |= anx7411_reg_write(ctx->tcpc_client, FW_PARAM, + anx7411_reg_read(ctx->tcpc_client, FW_PARAM) | + DONGLE_IOP); + ret |= anx7411_reg_write(ctx->spi_client, INT_MASK, 0); + + ret |= anx7411_reg_write(ctx->spi_client, PD_EXT_MSG_CTRL, 0xFF); + if (ret) + return ret; + + if (typecp->caps_flags & HAS_SOURCE_CAP) { + anx7411_translate_payload(dev, payload, typecp->src_pdo, + typecp->src_pdo_nr, "source"); + anx7411_send_msg(ctx, TYPE_SRC_CAP, (u8 *)&payload, + typecp->src_pdo_nr * 4); + anx7411_send_msg(ctx, TYPE_SNK_IDENTITY, snk_identity, + sizeof(snk_identity)); + anx7411_send_msg(ctx, TYPE_SET_SNK_DP_CAP, dp_caps, + sizeof(dp_caps)); + } + + if (typecp->caps_flags & HAS_SINK_CAP) { + anx7411_translate_payload(dev, payload, typecp->sink_pdo, + typecp->sink_pdo_nr, "sink"); + anx7411_send_msg(ctx, TYPE_SNK_CAP, (u8 *)&payload, + typecp->sink_pdo_nr * 4); + } + + if (typecp->caps_flags & HAS_SINK_WATT) { + if (typecp->sink_watt) { + ret |= anx7411_reg_write(ctx->spi_client, MAX_POWER, + typecp->sink_watt); + /* Set min power to 1W */ + ret |= anx7411_reg_write(ctx->spi_client, MIN_POWER, 2); + } + + if (typecp->sink_voltage) + ret |= anx7411_reg_write(ctx->spi_client, MAX_VOLTAGE, + typecp->sink_voltage); + if (ret) + return ret; + } + + if (!typecp->caps_flags) + usleep_range(5000, 6000); + + ctx->fw_version = anx7411_reg_read(ctx->spi_client, FW_VER); + ctx->fw_subversion = anx7411_reg_read(ctx->spi_client, FW_SUBVER); + + return 0; +} + +static void anx7411_chip_standby(struct anx7411_data *ctx) +{ + int ret; + u8 cc1, cc2; + struct device *dev = &ctx->spi_client->dev; + + ret = anx7411_reg_write(ctx->spi_client, OCM_CTRL_0, + anx7411_reg_read(ctx->spi_client, OCM_CTRL_0) | + OCM_RESET); + ret |= anx7411_reg_write(ctx->tcpc_client, ANALOG_CTRL_10, 0x80); + /* Set TCPC to RD and DRP enable */ + cc1 = TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT; + cc2 = TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT; + ret |= anx7411_reg_write(ctx->tcpc_client, TCPC_ROLE_CTRL, + TCPC_ROLE_CTRL_DRP | cc1 | cc2); + + /* Send DRP toggle command */ + ret |= anx7411_reg_write(ctx->tcpc_client, TCPC_COMMAND, + TCPC_CMD_LOOK4CONNECTION); + + /* Send TCPC enter standby command */ + ret |= anx7411_reg_write(ctx->tcpc_client, + TCPC_COMMAND, TCPC_CMD_I2C_IDLE); + if (ret) + dev_err(dev, "Chip standby failed\n"); +} + +static void anx7411_work_func(struct work_struct *work) +{ + int ret; + u8 buf[STATUS_LEN]; + u8 int_change; /* Interrupt change */ + u8 int_status; /* Firmware status update */ + u8 alert0, alert1; /* Interrupt alert source */ + struct anx7411_data *ctx = container_of(work, struct anx7411_data, work); + struct device *dev = &ctx->spi_client->dev; + + mutex_lock(&ctx->lock); + + /* Read interrupt change status */ + ret = anx7411_reg_block_read(ctx->spi_client, INT_STS, STATUS_LEN, buf); + if (ret < 0) { + /* Power standby mode, just return */ + goto unlock; + } + int_change = buf[0]; + int_status = buf[1]; + + /* Read alert register */ + ret = anx7411_reg_block_read(ctx->tcpc_client, ALERT_0, STATUS_LEN, buf); + if (ret < 0) + goto unlock; + + alert0 = buf[0]; + alert1 = buf[1]; + + /* Clear interrupt and alert status */ + ret = anx7411_reg_write(ctx->spi_client, INT_STS, 0); + ret |= anx7411_reg_write(ctx->tcpc_client, ALERT_0, alert0); + ret |= anx7411_reg_write(ctx->tcpc_client, ALERT_1, alert1); + if (ret) + goto unlock; + + if (alert1 & INTP_POW_OFF) { + anx7411_partner_unregister_altmode(ctx); + if (anx7411_set_usb_role(ctx, USB_ROLE_NONE)) + dev_err(dev, "Set usb role\n"); + anx7411_unregister_partner(ctx); + ctx->psy_online = ANX7411_PSY_OFFLINE; + ctx->usb_type = POWER_SUPPLY_USB_TYPE_C; + ctx->typec.request_voltage = 0; + ctx->typec.request_current = 0; + power_supply_changed(ctx->psy); + anx7411_chip_standby(ctx); + goto unlock; + } + + if ((alert0 & SOFTWARE_INT) && (int_change & OCM_BOOT_UP)) { + if (anx7411_config(ctx)) + dev_err(dev, "Config failed\n"); + if (anx7411_data_role_detect(ctx)) + dev_err(dev, "set PD data role\n"); + if (anx7411_power_role_detect(ctx)) + dev_err(dev, "set PD power role\n"); + anx7411_set_mux(ctx, SELECT_PIN_ASSIGMENT_C); + } + + if (alert0 & RECEIVED_MSG) + anx7411_process_cmd(ctx); + + ret = (int_status & DATA_ROLE) ? TYPEC_HOST : TYPEC_DEVICE; + if (ctx->typec.data_role != ret) + if (anx7411_data_role_detect(ctx)) + dev_err(dev, "set PD data role\n"); + + ret = (int_status & SINK_STATUS) ? TYPEC_SINK : TYPEC_SOURCE; + if (ctx->typec.power_role != ret) + if (anx7411_power_role_detect(ctx)) + dev_err(dev, "set PD power role\n"); + + if ((alert0 & SOFTWARE_INT) && (int_change & CC_STATUS_CHANGE)) + anx7411_cc_status_detect(ctx); + +unlock: + mutex_unlock(&ctx->lock); +} + +static irqreturn_t anx7411_intr_isr(int irq, void *data) +{ + struct anx7411_data *ctx = (struct anx7411_data *)data; + + queue_work(ctx->workqueue, &ctx->work); + + return IRQ_HANDLED; +} + +static int anx7411_register_i2c_dummy_clients(struct anx7411_data *ctx, + struct i2c_client *client) +{ + int i; + u8 spi_addr; + + for (i = 0; i < ARRAY_SIZE(anx7411_i2c_addr); i++) { + if (client->addr == (anx7411_i2c_addr[i].tcpc_address >> 1)) { + spi_addr = anx7411_i2c_addr[i].spi_address >> 1; + ctx->spi_client = i2c_new_dummy_device(client->adapter, + spi_addr); + if (!IS_ERR(ctx->spi_client)) + return 0; + } + } + + dev_err(&client->dev, "unable to get SPI slave\n"); + return -ENOMEM; +} + +static void anx7411_port_unregister_altmodes(struct typec_altmode **adev) +{ + int i; + + for (i = 0; i < MAX_ALTMODE; i++) + if (adev[i]) { + typec_unregister_altmode(adev[i]); + adev[i] = NULL; + } +} + +static int anx7411_usb_mux_set(struct typec_mux_dev *mux, + struct typec_mux_state *state) +{ + struct anx7411_data *ctx = typec_mux_get_drvdata(mux); + struct device *dev = &ctx->spi_client->dev; + int has_dp; + + has_dp = (state->alt && state->alt->svid == USB_TYPEC_DP_SID && + state->alt->mode == USB_TYPEC_DP_MODE); + if (!has_dp) + dev_err(dev, "dp altmode not register\n"); + + return 0; +} + +static int anx7411_usb_set_orientation(struct typec_switch_dev *sw, + enum typec_orientation orientation) +{ + /* No need set */ + + return 0; +} + +static int anx7411_register_switch(struct anx7411_data *ctx, + struct device *dev, + struct fwnode_handle *fwnode) +{ + struct typec_switch_desc sw_desc = { }; + + sw_desc.fwnode = fwnode; + sw_desc.drvdata = ctx; + sw_desc.name = fwnode_get_name(fwnode); + sw_desc.set = anx7411_usb_set_orientation; + + ctx->typec.typec_switch = typec_switch_register(dev, &sw_desc); + if (IS_ERR(ctx->typec.typec_switch)) { + dev_err(dev, "switch register failed\n"); + return PTR_ERR(ctx->typec.typec_switch); + } + + return 0; +} + +static int anx7411_register_mux(struct anx7411_data *ctx, + struct device *dev, + struct fwnode_handle *fwnode) +{ + struct typec_mux_desc mux_desc = { }; + + mux_desc.fwnode = fwnode; + mux_desc.drvdata = ctx; + mux_desc.name = fwnode_get_name(fwnode); + mux_desc.set = anx7411_usb_mux_set; + + ctx->typec.typec_mux = typec_mux_register(dev, &mux_desc); + if (IS_ERR(ctx->typec.typec_mux)) { + dev_err(dev, "mux register failed\n"); + return PTR_ERR(ctx->typec.typec_mux); + } + + return 0; +} + +static void anx7411_unregister_mux(struct anx7411_data *ctx) +{ + if (ctx->typec.typec_mux) { + typec_mux_unregister(ctx->typec.typec_mux); + ctx->typec.typec_mux = NULL; + } +} + +static void anx7411_unregister_switch(struct anx7411_data *ctx) +{ + if (ctx->typec.typec_switch) { + typec_switch_unregister(ctx->typec.typec_switch); + ctx->typec.typec_switch = NULL; + } +} + +static int anx7411_typec_switch_probe(struct anx7411_data *ctx, + struct device *dev) +{ + int ret; + struct device_node *node; + + node = of_find_node_by_name(dev->of_node, "orientation_switch"); + if (!node) + return 0; + + ret = anx7411_register_switch(ctx, dev, &node->fwnode); + if (ret) { + dev_err(dev, "failed register switch"); + return ret; + } + + node = of_find_node_by_name(dev->of_node, "mode_switch"); + if (!node) { + dev_err(dev, "no typec mux exist"); + ret = -ENODEV; + goto unregister_switch; + } + + ret = anx7411_register_mux(ctx, dev, &node->fwnode); + if (ret) { + dev_err(dev, "failed register mode switch"); + ret = -ENODEV; + goto unregister_switch; + } + + return 0; + +unregister_switch: + anx7411_unregister_switch(ctx); + + return ret; +} + +static int anx7411_typec_port_probe(struct anx7411_data *ctx, + struct device *dev) +{ + struct typec_capability *cap = &ctx->typec.caps; + struct typec_params *typecp = &ctx->typec; + struct fwnode_handle *fwnode; + const char *buf; + int ret, i; + + fwnode = device_get_named_child_node(dev, "connector"); + if (!fwnode) + return -EINVAL; + + ret = fwnode_property_read_string(fwnode, "power-role", &buf); + if (ret) { + dev_err(dev, "power-role not found: %d\n", ret); + return ret; + } + + ret = typec_find_port_power_role(buf); + if (ret < 0) + return ret; + cap->type = ret; + + ret = fwnode_property_read_string(fwnode, "data-role", &buf); + if (ret) { + dev_err(dev, "data-role not found: %d\n", ret); + return ret; + } + + ret = typec_find_port_data_role(buf); + if (ret < 0) + return ret; + cap->data = ret; + + ret = fwnode_property_read_string(fwnode, "try-power-role", &buf); + if (ret) { + dev_err(dev, "try-power-role not found: %d\n", ret); + return ret; + } + + ret = typec_find_power_role(buf); + if (ret < 0) + return ret; + cap->prefer_role = ret; + + /* Get source pdos */ + ret = fwnode_property_count_u32(fwnode, "source-pdos"); + if (ret > 0) { + typecp->src_pdo_nr = min_t(u8, ret, PDO_MAX_OBJECTS); + ret = fwnode_property_read_u32_array(fwnode, "source-pdos", + typecp->src_pdo, + typecp->src_pdo_nr); + if (ret < 0) { + dev_err(dev, "source cap validate failed: %d\n", ret); + return -EINVAL; + } + + typecp->caps_flags |= HAS_SOURCE_CAP; + } + + ret = fwnode_property_count_u32(fwnode, "sink-pdos"); + if (ret > 0) { + typecp->sink_pdo_nr = min_t(u8, ret, PDO_MAX_OBJECTS); + ret = fwnode_property_read_u32_array(fwnode, "sink-pdos", + typecp->sink_pdo, + typecp->sink_pdo_nr); + if (ret < 0) { + dev_err(dev, "sink cap validate failed: %d\n", ret); + return -EINVAL; + } + + for (i = 0; i < typecp->sink_pdo_nr; i++) { + ret = 0; + switch (pdo_type(typecp->sink_pdo[i])) { + case PDO_TYPE_FIXED: + ret = pdo_fixed_voltage(typecp->sink_pdo[i]); + break; + case PDO_TYPE_BATT: + case PDO_TYPE_VAR: + ret = pdo_max_voltage(typecp->sink_pdo[i]); + break; + case PDO_TYPE_APDO: + default: + ret = 0; + break; + } + + /* 100mv per unit */ + typecp->sink_voltage = max(5000, ret) / 100; + } + + typecp->caps_flags |= HAS_SINK_CAP; + } + + if (!fwnode_property_read_u32(fwnode, "op-sink-microwatt", &ret)) { + typecp->sink_watt = ret / 500000; /* 500mw per unit */ + typecp->caps_flags |= HAS_SINK_WATT; + } + + cap->fwnode = fwnode; + + ctx->typec.role_sw = usb_role_switch_get(dev); + if (IS_ERR(ctx->typec.role_sw)) { + dev_err(dev, "USB role switch not found.\n"); + ctx->typec.role_sw = NULL; + } + + ctx->typec.port = typec_register_port(dev, cap); + if (IS_ERR(ctx->typec.port)) { + ret = PTR_ERR(ctx->typec.port); + ctx->typec.port = NULL; + dev_err(dev, "Failed to register type c port %d\n", ret); + return ret; + } + + typec_port_register_altmodes(ctx->typec.port, NULL, ctx, + ctx->typec.port_amode, + MAX_ALTMODE); + return 0; +} + +static int anx7411_typec_check_connection(struct anx7411_data *ctx) +{ + int ret; + + ret = anx7411_reg_read(ctx->spi_client, FW_VER); + if (ret < 0) + return 0; /* No device attached in typec port */ + + /* Clear interrupt and alert status */ + ret = anx7411_reg_write(ctx->spi_client, INT_STS, 0); + ret |= anx7411_reg_write(ctx->tcpc_client, ALERT_0, 0xFF); + ret |= anx7411_reg_write(ctx->tcpc_client, ALERT_1, 0xFF); + if (ret) + return ret; + + ret = anx7411_cc_status_detect(ctx); + ret |= anx7411_power_role_detect(ctx); + ret |= anx7411_data_role_detect(ctx); + ret |= anx7411_set_mux(ctx, SELECT_PIN_ASSIGMENT_C); + if (ret) + return ret; + + ret = anx7411_send_msg(ctx, TYPE_GET_DP_ALT_ENTER, NULL, 0); + ret |= anx7411_send_msg(ctx, TYPE_GET_DP_DISCOVER_MODES_INFO, NULL, 0); + + return ret; +} + +static int __maybe_unused anx7411_runtime_pm_suspend(struct device *dev) +{ + struct anx7411_data *ctx = dev_get_drvdata(dev); + + mutex_lock(&ctx->lock); + + anx7411_partner_unregister_altmode(ctx); + + if (ctx->typec.partner) + anx7411_unregister_partner(ctx); + + mutex_unlock(&ctx->lock); + + return 0; +} + +static int __maybe_unused anx7411_runtime_pm_resume(struct device *dev) +{ + struct anx7411_data *ctx = dev_get_drvdata(dev); + + mutex_lock(&ctx->lock); + /* Detect PD connection */ + if (anx7411_typec_check_connection(ctx)) + dev_err(dev, "check connection"); + + mutex_unlock(&ctx->lock); + + return 0; +} + +static const struct dev_pm_ops anx7411_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(anx7411_runtime_pm_suspend, + anx7411_runtime_pm_resume, NULL) +}; + +static void anx7411_get_gpio_irq(struct anx7411_data *ctx) +{ + struct device *dev = &ctx->tcpc_client->dev; + + ctx->intp_gpiod = devm_gpiod_get_optional(dev, "interrupt", GPIOD_IN); + if (IS_ERR_OR_NULL(ctx->intp_gpiod)) { + dev_err(dev, "no interrupt gpio property\n"); + return; + } + + ctx->intp_irq = gpiod_to_irq(ctx->intp_gpiod); + if (ctx->intp_irq < 0) + dev_err(dev, "failed to get GPIO IRQ\n"); +} + +static enum power_supply_usb_type anx7411_psy_usb_types[] = { + POWER_SUPPLY_USB_TYPE_C, + POWER_SUPPLY_USB_TYPE_PD, + POWER_SUPPLY_USB_TYPE_PD_PPS, +}; + +static enum power_supply_property anx7411_psy_props[] = { + POWER_SUPPLY_PROP_USB_TYPE, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_VOLTAGE_MIN, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_MAX, + POWER_SUPPLY_PROP_CURRENT_NOW, +}; + +static int anx7411_psy_set_prop(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct anx7411_data *ctx = power_supply_get_drvdata(psy); + int ret = 0; + + if (psp == POWER_SUPPLY_PROP_ONLINE) + ctx->psy_online = val->intval; + else + ret = -EINVAL; + + power_supply_changed(ctx->psy); + return ret; +} + +static int anx7411_psy_prop_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + return psp == POWER_SUPPLY_PROP_ONLINE; +} + +static int anx7411_psy_get_prop(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct anx7411_data *ctx = power_supply_get_drvdata(psy); + int ret = 0; + + switch (psp) { + case POWER_SUPPLY_PROP_USB_TYPE: + val->intval = ctx->usb_type; + break; + case POWER_SUPPLY_PROP_ONLINE: + val->intval = ctx->psy_online; + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + case POWER_SUPPLY_PROP_VOLTAGE_MIN: + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + val->intval = (ctx->psy_online) ? + ctx->typec.request_voltage * 1000 : 0; + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + case POWER_SUPPLY_PROP_CURRENT_MAX: + val->intval = (ctx->psy_online) ? + ctx->typec.request_current * 1000 : 0; + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int anx7411_psy_register(struct anx7411_data *ctx) +{ + struct power_supply_desc *psy_desc = &ctx->psy_desc; + struct power_supply_config psy_cfg = {}; + char *psy_name; + + psy_name = devm_kasprintf(ctx->dev, GFP_KERNEL, "anx7411-source-psy-%s", + dev_name(ctx->dev)); + if (!psy_name) + return -ENOMEM; + + psy_desc->name = psy_name; + psy_desc->type = POWER_SUPPLY_TYPE_USB; + psy_desc->usb_types = anx7411_psy_usb_types; + psy_desc->num_usb_types = ARRAY_SIZE(anx7411_psy_usb_types); + psy_desc->properties = anx7411_psy_props; + psy_desc->num_properties = ARRAY_SIZE(anx7411_psy_props); + + psy_desc->get_property = anx7411_psy_get_prop; + psy_desc->set_property = anx7411_psy_set_prop; + psy_desc->property_is_writeable = anx7411_psy_prop_writeable; + + ctx->usb_type = POWER_SUPPLY_USB_TYPE_C; + ctx->psy = devm_power_supply_register(ctx->dev, psy_desc, &psy_cfg); + + if (IS_ERR(ctx->psy)) + dev_warn(ctx->dev, "unable to register psy\n"); + + return PTR_ERR_OR_ZERO(ctx->psy); +} + +static int anx7411_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct anx7411_data *plat; + struct device *dev = &client->dev; + int ret; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) + return -ENODEV; + + plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return -ENOMEM; + + plat->tcpc_client = client; + i2c_set_clientdata(client, plat); + + mutex_init(&plat->lock); + + ret = anx7411_register_i2c_dummy_clients(plat, client); + if (ret) { + dev_err(dev, "fail to reserve I2C bus\n"); + return ret; + } + + ret = anx7411_typec_switch_probe(plat, dev); + if (ret) { + dev_err(dev, "fail to probe typec switch\n"); + goto free_i2c_dummy; + } + + ret = anx7411_typec_port_probe(plat, dev); + if (ret) { + dev_err(dev, "fail to probe typec property.\n"); + ret = -ENODEV; + goto free_typec_switch; + } + + plat->intp_irq = client->irq; + if (!client->irq) + anx7411_get_gpio_irq(plat); + + if (!plat->intp_irq) { + dev_err(dev, "fail to get interrupt IRQ\n"); + ret = -EINVAL; + goto free_typec_port; + } + + plat->dev = dev; + plat->psy_online = ANX7411_PSY_OFFLINE; + ret = anx7411_psy_register(plat); + if (ret) { + dev_err(dev, "register psy\n"); + goto free_typec_port; + } + + INIT_WORK(&plat->work, anx7411_work_func); + plat->workqueue = alloc_workqueue("anx7411_work", + WQ_FREEZABLE | + WQ_MEM_RECLAIM, + 1); + if (!plat->workqueue) { + dev_err(dev, "fail to create work queue\n"); + ret = -ENOMEM; + goto free_typec_port; + } + + ret = devm_request_threaded_irq(dev, plat->intp_irq, + NULL, anx7411_intr_isr, + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + "anx7411-intp", plat); + if (ret) { + dev_err(dev, "fail to request irq\n"); + goto free_wq; + } + + if (anx7411_typec_check_connection(plat)) + dev_err(dev, "check status\n"); + + pm_runtime_enable(dev); + + return 0; + +free_wq: + destroy_workqueue(plat->workqueue); + +free_typec_port: + typec_unregister_port(plat->typec.port); + anx7411_port_unregister_altmodes(plat->typec.port_amode); + +free_typec_switch: + anx7411_unregister_switch(plat); + anx7411_unregister_mux(plat); + +free_i2c_dummy: + i2c_unregister_device(plat->spi_client); + + return ret; +} + +static int anx7411_i2c_remove(struct i2c_client *client) +{ + struct anx7411_data *plat = i2c_get_clientdata(client); + + anx7411_partner_unregister_altmode(plat); + anx7411_unregister_partner(plat); + + if (plat->workqueue) + destroy_workqueue(plat->workqueue); + + if (plat->spi_client) + i2c_unregister_device(plat->spi_client); + + if (plat->typec.role_sw) + usb_role_switch_put(plat->typec.role_sw); + + anx7411_unregister_mux(plat); + + anx7411_unregister_switch(plat); + + if (plat->typec.port) + typec_unregister_port(plat->typec.port); + + anx7411_port_unregister_altmodes(plat->typec.port_amode); + + return 0; +} + +static const struct i2c_device_id anx7411_id[] = { + {"anx7411", 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, anx7411_id); + +static const struct of_device_id anx_match_table[] = { + {.compatible = "analogix,anx7411",}, + {}, +}; + +static struct i2c_driver anx7411_driver = { + .driver = { + .name = "anx7411", + .of_match_table = anx_match_table, + .pm = &anx7411_pm_ops, + }, + .probe = anx7411_i2c_probe, + .remove = anx7411_i2c_remove, + + .id_table = anx7411_id, +}; + +module_i2c_driver(anx7411_driver); + +MODULE_DESCRIPTION("Anx7411 USB Type-C PD driver"); +MODULE_AUTHOR("Xin Ji "); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.1.5"); diff --git a/drivers/usb/typec/mux/fsa4480.c b/drivers/usb/typec/mux/fsa4480.c new file mode 100644 index 0000000000..6184f53671 --- /dev/null +++ b/drivers/usb/typec/mux/fsa4480.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021-2022 Linaro Ltd. + * Copyright (C) 2018-2020 The Linux Foundation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define FSA4480_SWITCH_ENABLE 0x04 +#define FSA4480_SWITCH_SELECT 0x05 +#define FSA4480_SWITCH_STATUS1 0x07 +#define FSA4480_SLOW_L 0x08 +#define FSA4480_SLOW_R 0x09 +#define FSA4480_SLOW_MIC 0x0a +#define FSA4480_SLOW_SENSE 0x0b +#define FSA4480_SLOW_GND 0x0c +#define FSA4480_DELAY_L_R 0x0d +#define FSA4480_DELAY_L_MIC 0x0e +#define FSA4480_DELAY_L_SENSE 0x0f +#define FSA4480_DELAY_L_AGND 0x10 +#define FSA4480_RESET 0x1e +#define FSA4480_MAX_REGISTER 0x1f + +#define FSA4480_ENABLE_DEVICE BIT(7) +#define FSA4480_ENABLE_SBU GENMASK(6, 5) +#define FSA4480_ENABLE_USB GENMASK(4, 3) + +#define FSA4480_SEL_SBU_REVERSE GENMASK(6, 5) +#define FSA4480_SEL_USB GENMASK(4, 3) + +struct fsa4480 { + struct i2c_client *client; + + /* used to serialize concurrent change requests */ + struct mutex lock; + + struct typec_switch_dev *sw; + struct typec_mux_dev *mux; + + struct regmap *regmap; + + u8 cur_enable; + u8 cur_select; +}; + +static const struct regmap_config fsa4480_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = FSA4480_MAX_REGISTER, + /* Accesses only done under fsa4480->lock */ + .disable_locking = true, +}; + +static int fsa4480_switch_set(struct typec_switch_dev *sw, + enum typec_orientation orientation) +{ + struct fsa4480 *fsa = typec_switch_get_drvdata(sw); + u8 new_sel; + + mutex_lock(&fsa->lock); + new_sel = FSA4480_SEL_USB; + if (orientation == TYPEC_ORIENTATION_REVERSE) + new_sel |= FSA4480_SEL_SBU_REVERSE; + + if (new_sel == fsa->cur_select) + goto out_unlock; + + if (fsa->cur_enable & FSA4480_ENABLE_SBU) { + /* Disable SBU output while re-configuring the switch */ + regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, + fsa->cur_enable & ~FSA4480_ENABLE_SBU); + + /* 35us to allow the SBU switch to turn off */ + usleep_range(35, 1000); + } + + regmap_write(fsa->regmap, FSA4480_SWITCH_SELECT, new_sel); + fsa->cur_select = new_sel; + + if (fsa->cur_enable & FSA4480_ENABLE_SBU) { + regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, fsa->cur_enable); + + /* 15us to allow the SBU switch to turn on again */ + usleep_range(15, 1000); + } + +out_unlock: + mutex_unlock(&fsa->lock); + + return 0; +} + +static int fsa4480_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state) +{ + struct fsa4480 *fsa = typec_mux_get_drvdata(mux); + u8 new_enable; + + mutex_lock(&fsa->lock); + + new_enable = FSA4480_ENABLE_DEVICE | FSA4480_ENABLE_USB; + if (state->mode >= TYPEC_DP_STATE_A) + new_enable |= FSA4480_ENABLE_SBU; + + if (new_enable == fsa->cur_enable) + goto out_unlock; + + regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, new_enable); + fsa->cur_enable = new_enable; + + if (new_enable & FSA4480_ENABLE_SBU) { + /* 15us to allow the SBU switch to turn off */ + usleep_range(15, 1000); + } + +out_unlock: + mutex_unlock(&fsa->lock); + + return 0; +} + +static int fsa4480_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct typec_switch_desc sw_desc = { }; + struct typec_mux_desc mux_desc = { }; + struct fsa4480 *fsa; + + fsa = devm_kzalloc(dev, sizeof(*fsa), GFP_KERNEL); + if (!fsa) + return -ENOMEM; + + fsa->client = client; + mutex_init(&fsa->lock); + + fsa->regmap = devm_regmap_init_i2c(client, &fsa4480_regmap_config); + if (IS_ERR(fsa->regmap)) + return dev_err_probe(dev, PTR_ERR(fsa->regmap), "failed to initialize regmap\n"); + + fsa->cur_enable = FSA4480_ENABLE_DEVICE | FSA4480_ENABLE_USB; + fsa->cur_select = FSA4480_SEL_USB; + + /* set default settings */ + regmap_write(fsa->regmap, FSA4480_SLOW_L, 0x00); + regmap_write(fsa->regmap, FSA4480_SLOW_R, 0x00); + regmap_write(fsa->regmap, FSA4480_SLOW_MIC, 0x00); + regmap_write(fsa->regmap, FSA4480_SLOW_SENSE, 0x00); + regmap_write(fsa->regmap, FSA4480_SLOW_GND, 0x00); + regmap_write(fsa->regmap, FSA4480_DELAY_L_R, 0x00); + regmap_write(fsa->regmap, FSA4480_DELAY_L_MIC, 0x00); + regmap_write(fsa->regmap, FSA4480_DELAY_L_SENSE, 0x00); + regmap_write(fsa->regmap, FSA4480_DELAY_L_AGND, 0x09); + regmap_write(fsa->regmap, FSA4480_SWITCH_SELECT, fsa->cur_select); + regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, fsa->cur_enable); + + sw_desc.drvdata = fsa; + sw_desc.fwnode = dev_fwnode(dev); + sw_desc.set = fsa4480_switch_set; + + fsa->sw = typec_switch_register(dev, &sw_desc); + if (IS_ERR(fsa->sw)) + return dev_err_probe(dev, PTR_ERR(fsa->sw), "failed to register typec switch\n"); + + mux_desc.drvdata = fsa; + mux_desc.fwnode = dev_fwnode(dev); + mux_desc.set = fsa4480_mux_set; + + fsa->mux = typec_mux_register(dev, &mux_desc); + if (IS_ERR(fsa->mux)) { + typec_switch_unregister(fsa->sw); + return dev_err_probe(dev, PTR_ERR(fsa->mux), "failed to register typec mux\n"); + } + + i2c_set_clientdata(client, fsa); + return 0; +} + +static int fsa4480_remove(struct i2c_client *client) +{ + struct fsa4480 *fsa = i2c_get_clientdata(client); + + typec_mux_unregister(fsa->mux); + typec_switch_unregister(fsa->sw); + + return 0; +} + +static const struct i2c_device_id fsa4480_table[] = { + { "fsa4480" }, + { } +}; +MODULE_DEVICE_TABLE(i2c, fsa4480_table); + +static const struct of_device_id fsa4480_of_table[] = { + { .compatible = "fcs,fsa4480" }, + { } +}; +MODULE_DEVICE_TABLE(of, fsa4480_of_table); + +static struct i2c_driver fsa4480_driver = { + .driver = { + .name = "fsa4480", + .of_match_table = fsa4480_of_table, + }, + .probe_new = fsa4480_probe, + .remove = fsa4480_remove, + .id_table = fsa4480_table, +}; +module_i2c_driver(fsa4480_driver); + +MODULE_DESCRIPTION("ON Semiconductor FSA4480 driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c new file mode 100644 index 0000000000..dc72005d68 --- /dev/null +++ b/drivers/usb/typec/pd.c @@ -0,0 +1,708 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * USB Power Delivery sysfs entries + * + * Copyright (C) 2022, Intel Corporation + * Author: Heikki Krogerus + */ + +#include +#include + +#include "pd.h" + +static DEFINE_IDA(pd_ida); + +static struct class pd_class = { + .name = "usb_power_delivery", + .owner = THIS_MODULE, +}; + +#define to_pdo(o) container_of(o, struct pdo, dev) + +struct pdo { + struct device dev; + int object_position; + u32 pdo; +}; + +static void pdo_release(struct device *dev) +{ + kfree(to_pdo(dev)); +} + +/* -------------------------------------------------------------------------- */ +/* Fixed Supply */ + +static ssize_t +dual_role_power_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_DUAL_ROLE)); +} +static DEVICE_ATTR_RO(dual_role_power); + +static ssize_t +usb_suspend_supported_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_SUSPEND)); +} +static DEVICE_ATTR_RO(usb_suspend_supported); + +static ssize_t +unconstrained_power_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_EXTPOWER)); +} +static DEVICE_ATTR_RO(unconstrained_power); + +static ssize_t +usb_communication_capable_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_USB_COMM)); +} +static DEVICE_ATTR_RO(usb_communication_capable); + +static ssize_t +dual_role_data_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_DATA_SWAP)); +} +static DEVICE_ATTR_RO(dual_role_data); + +static ssize_t +unchunked_extended_messages_supported_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_UNCHUNK_EXT)); +} +static DEVICE_ATTR_RO(unchunked_extended_messages_supported); + +/* + * REVISIT: Peak Current requires access also to the RDO. +static ssize_t +peak_current_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + ... +} +*/ + +static ssize_t +fast_role_swap_current_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%u\n", to_pdo(dev)->pdo >> PDO_FIXED_FRS_CURR_SHIFT) & 3; +} +static DEVICE_ATTR_RO(fast_role_swap_current); + +static ssize_t voltage_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%umV\n", pdo_fixed_voltage(to_pdo(dev)->pdo)); +} +static DEVICE_ATTR_RO(voltage); + +/* Shared with Variable supplies, both source and sink */ +static ssize_t current_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%umA\n", pdo_max_current(to_pdo(dev)->pdo)); +} + +/* Shared with Variable type supplies */ +static struct device_attribute maximum_current_attr = { + .attr = { + .name = "maximum_current", + .mode = 0444, + }, + .show = current_show, +}; + +static struct device_attribute operational_current_attr = { + .attr = { + .name = "operational_current", + .mode = 0444, + }, + .show = current_show, +}; + +static struct attribute *source_fixed_supply_attrs[] = { + &dev_attr_dual_role_power.attr, + &dev_attr_usb_suspend_supported.attr, + &dev_attr_unconstrained_power.attr, + &dev_attr_usb_communication_capable.attr, + &dev_attr_dual_role_data.attr, + &dev_attr_unchunked_extended_messages_supported.attr, + /*&dev_attr_peak_current.attr,*/ + &dev_attr_voltage.attr, + &maximum_current_attr.attr, + NULL +}; + +static umode_t fixed_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) +{ + if (to_pdo(kobj_to_dev(kobj))->object_position && + /*attr != &dev_attr_peak_current.attr &&*/ + attr != &dev_attr_voltage.attr && + attr != &maximum_current_attr.attr && + attr != &operational_current_attr.attr) + return 0; + + return attr->mode; +} + +static const struct attribute_group source_fixed_supply_group = { + .is_visible = fixed_attr_is_visible, + .attrs = source_fixed_supply_attrs, +}; +__ATTRIBUTE_GROUPS(source_fixed_supply); + +static struct device_type source_fixed_supply_type = { + .name = "pdo", + .release = pdo_release, + .groups = source_fixed_supply_groups, +}; + +static struct attribute *sink_fixed_supply_attrs[] = { + &dev_attr_dual_role_power.attr, + &dev_attr_usb_suspend_supported.attr, + &dev_attr_unconstrained_power.attr, + &dev_attr_usb_communication_capable.attr, + &dev_attr_dual_role_data.attr, + &dev_attr_unchunked_extended_messages_supported.attr, + &dev_attr_fast_role_swap_current.attr, + &dev_attr_voltage.attr, + &operational_current_attr.attr, + NULL +}; + +static const struct attribute_group sink_fixed_supply_group = { + .is_visible = fixed_attr_is_visible, + .attrs = sink_fixed_supply_attrs, +}; +__ATTRIBUTE_GROUPS(sink_fixed_supply); + +static struct device_type sink_fixed_supply_type = { + .name = "pdo", + .release = pdo_release, + .groups = sink_fixed_supply_groups, +}; + +/* -------------------------------------------------------------------------- */ +/* Variable Supply */ + +static ssize_t +maximum_voltage_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%umV\n", pdo_max_voltage(to_pdo(dev)->pdo)); +} +static DEVICE_ATTR_RO(maximum_voltage); + +static ssize_t +minimum_voltage_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%umV\n", pdo_min_voltage(to_pdo(dev)->pdo)); +} +static DEVICE_ATTR_RO(minimum_voltage); + +static struct attribute *source_variable_supply_attrs[] = { + &dev_attr_maximum_voltage.attr, + &dev_attr_minimum_voltage.attr, + &maximum_current_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(source_variable_supply); + +static struct device_type source_variable_supply_type = { + .name = "pdo", + .release = pdo_release, + .groups = source_variable_supply_groups, +}; + +static struct attribute *sink_variable_supply_attrs[] = { + &dev_attr_maximum_voltage.attr, + &dev_attr_minimum_voltage.attr, + &operational_current_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(sink_variable_supply); + +static struct device_type sink_variable_supply_type = { + .name = "pdo", + .release = pdo_release, + .groups = sink_variable_supply_groups, +}; + +/* -------------------------------------------------------------------------- */ +/* Battery */ + +static ssize_t +maximum_power_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%umW\n", pdo_max_power(to_pdo(dev)->pdo)); +} +static DEVICE_ATTR_RO(maximum_power); + +static ssize_t +operational_power_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%umW\n", pdo_max_power(to_pdo(dev)->pdo)); +} +static DEVICE_ATTR_RO(operational_power); + +static struct attribute *source_battery_attrs[] = { + &dev_attr_maximum_voltage.attr, + &dev_attr_minimum_voltage.attr, + &dev_attr_maximum_power.attr, + NULL +}; +ATTRIBUTE_GROUPS(source_battery); + +static struct device_type source_battery_type = { + .name = "pdo", + .release = pdo_release, + .groups = source_battery_groups, +}; + +static struct attribute *sink_battery_attrs[] = { + &dev_attr_maximum_voltage.attr, + &dev_attr_minimum_voltage.attr, + &dev_attr_operational_power.attr, + NULL +}; +ATTRIBUTE_GROUPS(sink_battery); + +static struct device_type sink_battery_type = { + .name = "pdo", + .release = pdo_release, + .groups = sink_battery_groups, +}; + +/* -------------------------------------------------------------------------- */ +/* Standard Power Range (SPR) Programmable Power Supply (PPS) */ + +static ssize_t +pps_power_limited_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & BIT(27))); +} +static DEVICE_ATTR_RO(pps_power_limited); + +static ssize_t +pps_max_voltage_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%umV\n", pdo_pps_apdo_max_voltage(to_pdo(dev)->pdo)); +} + +static ssize_t +pps_min_voltage_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%umV\n", pdo_pps_apdo_min_voltage(to_pdo(dev)->pdo)); +} + +static ssize_t +pps_max_current_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%umA\n", pdo_pps_apdo_max_current(to_pdo(dev)->pdo)); +} + +static struct device_attribute pps_max_voltage_attr = { + .attr = { + .name = "maximum_voltage", + .mode = 0444, + }, + .show = pps_max_voltage_show, +}; + +static struct device_attribute pps_min_voltage_attr = { + .attr = { + .name = "minimum_voltage", + .mode = 0444, + }, + .show = pps_min_voltage_show, +}; + +static struct device_attribute pps_max_current_attr = { + .attr = { + .name = "maximum_current", + .mode = 0444, + }, + .show = pps_max_current_show, +}; + +static struct attribute *source_pps_attrs[] = { + &dev_attr_pps_power_limited.attr, + &pps_max_voltage_attr.attr, + &pps_min_voltage_attr.attr, + &pps_max_current_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(source_pps); + +static struct device_type source_pps_type = { + .name = "pdo", + .release = pdo_release, + .groups = source_pps_groups, +}; + +static struct attribute *sink_pps_attrs[] = { + &pps_max_voltage_attr.attr, + &pps_min_voltage_attr.attr, + &pps_max_current_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(sink_pps); + +static struct device_type sink_pps_type = { + .name = "pdo", + .release = pdo_release, + .groups = sink_pps_groups, +}; + +/* -------------------------------------------------------------------------- */ + +static const char * const supply_name[] = { + [PDO_TYPE_FIXED] = "fixed_supply", + [PDO_TYPE_BATT] = "battery", + [PDO_TYPE_VAR] = "variable_supply", +}; + +static const char * const apdo_supply_name[] = { + [APDO_TYPE_PPS] = "programmable_supply", +}; + +static struct device_type *source_type[] = { + [PDO_TYPE_FIXED] = &source_fixed_supply_type, + [PDO_TYPE_BATT] = &source_battery_type, + [PDO_TYPE_VAR] = &source_variable_supply_type, +}; + +static struct device_type *source_apdo_type[] = { + [APDO_TYPE_PPS] = &source_pps_type, +}; + +static struct device_type *sink_type[] = { + [PDO_TYPE_FIXED] = &sink_fixed_supply_type, + [PDO_TYPE_BATT] = &sink_battery_type, + [PDO_TYPE_VAR] = &sink_variable_supply_type, +}; + +static struct device_type *sink_apdo_type[] = { + [APDO_TYPE_PPS] = &sink_pps_type, +}; + +/* REVISIT: Export when EPR_*_Capabilities need to be supported. */ +static int add_pdo(struct usb_power_delivery_capabilities *cap, u32 pdo, int position) +{ + struct device_type *type; + const char *name; + struct pdo *p; + int ret; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + + p->pdo = pdo; + p->object_position = position; + + if (pdo_type(pdo) == PDO_TYPE_APDO) { + /* FIXME: Only PPS supported for now! Skipping others. */ + if (pdo_apdo_type(pdo) > APDO_TYPE_PPS) { + dev_warn(&cap->dev, "Unknown APDO type. PDO 0x%08x\n", pdo); + kfree(p); + return 0; + } + + if (is_source(cap->role)) + type = source_apdo_type[pdo_apdo_type(pdo)]; + else + type = sink_apdo_type[pdo_apdo_type(pdo)]; + + name = apdo_supply_name[pdo_apdo_type(pdo)]; + } else { + if (is_source(cap->role)) + type = source_type[pdo_type(pdo)]; + else + type = sink_type[pdo_type(pdo)]; + + name = supply_name[pdo_type(pdo)]; + } + + p->dev.parent = &cap->dev; + p->dev.type = type; + dev_set_name(&p->dev, "%u:%s", position + 1, name); + + ret = device_register(&p->dev); + if (ret) { + put_device(&p->dev); + return ret; + } + + return 0; +} + +static int remove_pdo(struct device *dev, void *data) +{ + device_unregister(dev); + return 0; +} + +/* -------------------------------------------------------------------------- */ + +static const char * const cap_name[] = { + [TYPEC_SINK] = "sink-capabilities", + [TYPEC_SOURCE] = "source-capabilities", +}; + +static void pd_capabilities_release(struct device *dev) +{ + kfree(to_usb_power_delivery_capabilities(dev)); +} + +static struct device_type pd_capabilities_type = { + .name = "capabilities", + .release = pd_capabilities_release, +}; + +/** + * usb_power_delivery_register_capabilities - Register a set of capabilities. + * @pd: The USB PD instance that the capabilities belong to. + * @desc: Description of the Capablities Message. + * + * This function registers a Capabilities Message described in @desc. The + * capabilities will have their own sub-directory under @pd in sysfs. + * + * The function returns pointer to struct usb_power_delivery_capabilities, or + * ERR_PRT(errno). + */ +struct usb_power_delivery_capabilities * +usb_power_delivery_register_capabilities(struct usb_power_delivery *pd, + struct usb_power_delivery_capabilities_desc *desc) +{ + struct usb_power_delivery_capabilities *cap; + int ret; + int i; + + cap = kzalloc(sizeof(*cap), GFP_KERNEL); + if (!cap) + return ERR_PTR(-ENOMEM); + + cap->pd = pd; + cap->role = desc->role; + + cap->dev.parent = &pd->dev; + cap->dev.type = &pd_capabilities_type; + dev_set_name(&cap->dev, "%s", cap_name[cap->role]); + + ret = device_register(&cap->dev); + if (ret) { + put_device(&cap->dev); + return ERR_PTR(ret); + } + + for (i = 0; i < PDO_MAX_OBJECTS && desc->pdo[i]; i++) { + ret = add_pdo(cap, desc->pdo[i], i); + if (ret) { + usb_power_delivery_unregister_capabilities(cap); + return ERR_PTR(ret); + } + } + + return cap; +} +EXPORT_SYMBOL_GPL(usb_power_delivery_register_capabilities); + +/** + * usb_power_delivery_unregister_capabilities - Unregister a set of capabilities + * @cap: The capabilities + */ +void usb_power_delivery_unregister_capabilities(struct usb_power_delivery_capabilities *cap) +{ + if (!cap) + return; + + device_for_each_child(&cap->dev, NULL, remove_pdo); + device_unregister(&cap->dev); +} +EXPORT_SYMBOL_GPL(usb_power_delivery_unregister_capabilities); + +/* -------------------------------------------------------------------------- */ + +static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct usb_power_delivery *pd = to_usb_power_delivery(dev); + + return sysfs_emit(buf, "%u.%u\n", (pd->revision >> 8) & 0xff, (pd->revision >> 4) & 0xf); +} +static DEVICE_ATTR_RO(revision); + +static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct usb_power_delivery *pd = to_usb_power_delivery(dev); + + return sysfs_emit(buf, "%u.%u\n", (pd->version >> 8) & 0xff, (pd->version >> 4) & 0xf); +} +static DEVICE_ATTR_RO(version); + +static struct attribute *pd_attrs[] = { + &dev_attr_revision.attr, + &dev_attr_version.attr, + NULL +}; + +static umode_t pd_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) +{ + struct usb_power_delivery *pd = to_usb_power_delivery(kobj_to_dev(kobj)); + + if (attr == &dev_attr_version.attr && !pd->version) + return 0; + + return attr->mode; +} + +static const struct attribute_group pd_group = { + .is_visible = pd_attr_is_visible, + .attrs = pd_attrs, +}; +__ATTRIBUTE_GROUPS(pd); + +static void pd_release(struct device *dev) +{ + struct usb_power_delivery *pd = to_usb_power_delivery(dev); + + ida_simple_remove(&pd_ida, pd->id); + kfree(pd); +} + +static struct device_type pd_type = { + .name = "usb_power_delivery", + .release = pd_release, + .groups = pd_groups, +}; + +struct usb_power_delivery *usb_power_delivery_find(const char *name) +{ + struct device *dev; + + dev = class_find_device_by_name(&pd_class, name); + + return dev ? to_usb_power_delivery(dev) : NULL; +} + +/** + * usb_power_delivery_register - Register USB Power Delivery Support. + * @parent: Parent device. + * @desc: Description of the USB PD contract. + * + * This routine can be used to register USB Power Delivery capabilities that a + * device or devices can support. These capabilities represent all the + * capabilities that can be negotiated with a partner, so not only the Power + * Capabilities that are negotiated using the USB PD Capabilities Message. + * + * The USB Power Delivery Support object that this routine generates can be used + * as the parent object for all the actual USB Power Delivery Messages and + * objects that can be negotiated with the partner. + * + * Returns handle to struct usb_power_delivery or ERR_PTR. + */ +struct usb_power_delivery * +usb_power_delivery_register(struct device *parent, struct usb_power_delivery_desc *desc) +{ + struct usb_power_delivery *pd; + int ret; + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + ret = ida_simple_get(&pd_ida, 0, 0, GFP_KERNEL); + if (ret < 0) { + kfree(pd); + return ERR_PTR(ret); + } + + pd->id = ret; + pd->revision = desc->revision; + pd->version = desc->version; + + pd->dev.parent = parent; + pd->dev.type = &pd_type; + pd->dev.class = &pd_class; + dev_set_name(&pd->dev, "pd%d", pd->id); + + ret = device_register(&pd->dev); + if (ret) { + put_device(&pd->dev); + return ERR_PTR(ret); + } + + return pd; +} +EXPORT_SYMBOL_GPL(usb_power_delivery_register); + +/** + * usb_power_delivery_unregister - Unregister USB Power Delivery Support. + * @pd: The USB PD contract. + */ +void usb_power_delivery_unregister(struct usb_power_delivery *pd) +{ + if (IS_ERR_OR_NULL(pd)) + return; + + device_unregister(&pd->dev); +} +EXPORT_SYMBOL_GPL(usb_power_delivery_unregister); + +/** + * usb_power_delivery_link_device - Link device to its USB PD object. + * @pd: The USB PD instance. + * @dev: The device. + * + * This function can be used to create a symlink named "usb_power_delivery" for + * @dev that points to @pd. + */ +int usb_power_delivery_link_device(struct usb_power_delivery *pd, struct device *dev) +{ + int ret; + + if (IS_ERR_OR_NULL(pd) || !dev) + return 0; + + ret = sysfs_create_link(&dev->kobj, &pd->dev.kobj, "usb_power_delivery"); + if (ret) + return ret; + + get_device(&pd->dev); + get_device(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(usb_power_delivery_link_device); + +/** + * usb_power_delivery_unlink_device - Unlink device from its USB PD object. + * @pd: The USB PD instance. + * @dev: The device. + * + * Remove the symlink that was previously created with pd_link_device(). + */ +void usb_power_delivery_unlink_device(struct usb_power_delivery *pd, struct device *dev) +{ + if (IS_ERR_OR_NULL(pd) || !dev) + return; + + sysfs_remove_link(&dev->kobj, "usb_power_delivery"); + put_device(&pd->dev); + put_device(dev); +} +EXPORT_SYMBOL_GPL(usb_power_delivery_unlink_device); + +/* -------------------------------------------------------------------------- */ + +int __init usb_power_delivery_init(void) +{ + return class_register(&pd_class); +} + +void __exit usb_power_delivery_exit(void) +{ + ida_destroy(&pd_ida); + class_unregister(&pd_class); +} diff --git a/drivers/usb/typec/pd.h b/drivers/usb/typec/pd.h new file mode 100644 index 0000000000..049a1aad44 --- /dev/null +++ b/drivers/usb/typec/pd.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __USB_POWER_DELIVERY__ +#define __USB_POWER_DELIVERY__ + +#include +#include + +struct usb_power_delivery { + struct device dev; + int id; + u16 revision; + u16 version; +}; + +struct usb_power_delivery_capabilities { + struct device dev; + struct usb_power_delivery *pd; + enum typec_role role; +}; + +#define to_usb_power_delivery_capabilities(o) container_of(o, struct usb_power_delivery_capabilities, dev) +#define to_usb_power_delivery(o) container_of(o, struct usb_power_delivery, dev) + +struct usb_power_delivery *usb_power_delivery_find(const char *name); + +int usb_power_delivery_init(void); +void usb_power_delivery_exit(void); + +#endif /* __USB_POWER_DELIVERY__ */ diff --git a/drivers/usb/typec/retimer.c b/drivers/usb/typec/retimer.c new file mode 100644 index 0000000000..2003731f1b --- /dev/null +++ b/drivers/usb/typec/retimer.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2022 Google LLC + * + * USB Type-C Retimer support. + * Author: Prashant Malani + * + */ + +#include +#include +#include +#include +#include +#include + +#include "class.h" +#include "retimer.h" + +static bool dev_name_ends_with(struct device *dev, const char *suffix) +{ + const char *name = dev_name(dev); + const int name_len = strlen(name); + const int suffix_len = strlen(suffix); + + if (suffix_len > name_len) + return false; + + return strcmp(name + (name_len - suffix_len), suffix) == 0; +} + +static int retimer_fwnode_match(struct device *dev, const void *fwnode) +{ + return dev_fwnode(dev) == fwnode && dev_name_ends_with(dev, "-retimer"); +} + +static void *typec_retimer_match(struct fwnode_handle *fwnode, const char *id, void *data) +{ + struct device *dev; + + if (id && !fwnode_property_present(fwnode, id)) + return NULL; + + dev = class_find_device(&retimer_class, NULL, fwnode, + retimer_fwnode_match); + + return dev ? to_typec_retimer(dev) : ERR_PTR(-EPROBE_DEFER); +} + +/** + * fwnode_typec_retimer_get - Find USB Type-C retimer. + * @fwnode: The caller device node. + * + * Finds a retimer linked to the caller. This function is primarily meant for the + * Type-C drivers. Returns a reference to the retimer on success, NULL if no + * matching connection was found, or ERR_PTR(-EPROBE_DEFER) when a connection + * was found but the retimer has not been enumerated yet. + */ +struct typec_retimer *fwnode_typec_retimer_get(struct fwnode_handle *fwnode) +{ + struct typec_retimer *retimer; + + retimer = fwnode_connection_find_match(fwnode, "retimer-switch", NULL, typec_retimer_match); + if (!IS_ERR_OR_NULL(retimer)) + WARN_ON(!try_module_get(retimer->dev.parent->driver->owner)); + + return retimer; +} +EXPORT_SYMBOL_GPL(fwnode_typec_retimer_get); + +/** + * typec_retimer_put - Release handle to a retimer. + * @retimer: USB Type-C Connector Retimer. + * + * Decrements reference count for @retimer. + */ +void typec_retimer_put(struct typec_retimer *retimer) +{ + if (!IS_ERR_OR_NULL(retimer)) { + module_put(retimer->dev.parent->driver->owner); + put_device(&retimer->dev); + } +} +EXPORT_SYMBOL_GPL(typec_retimer_put); + +int typec_retimer_set(struct typec_retimer *retimer, struct typec_retimer_state *state) +{ + if (IS_ERR_OR_NULL(retimer)) + return 0; + + return retimer->set(retimer, state); +} +EXPORT_SYMBOL_GPL(typec_retimer_set); + +static void typec_retimer_release(struct device *dev) +{ + kfree(to_typec_retimer(dev)); +} + +static const struct device_type typec_retimer_dev_type = { + .name = "typec_retimer", + .release = typec_retimer_release, +}; + +/** + * typec_retimer_register - Register a retimer device. + * @parent: Parent device. + * @desc: Retimer description. + * + * Some USB Type-C connectors have their physical lines routed through retimers before they + * reach muxes or host controllers. In some cases (for example: using alternate modes) + * these retimers need to be reconfigured appropriately. This function registers retimer + * switches which route and potentially modify the signals on the Type C physical lines + * enroute to the host controllers. + */ +struct typec_retimer * +typec_retimer_register(struct device *parent, const struct typec_retimer_desc *desc) +{ + struct typec_retimer *retimer; + int ret; + + if (!desc || !desc->set) + return ERR_PTR(-EINVAL); + + retimer = kzalloc(sizeof(*retimer), GFP_KERNEL); + if (!retimer) + return ERR_PTR(-ENOMEM); + + retimer->set = desc->set; + + device_initialize(&retimer->dev); + retimer->dev.parent = parent; + retimer->dev.fwnode = desc->fwnode; + retimer->dev.class = &retimer_class; + retimer->dev.type = &typec_retimer_dev_type; + retimer->dev.driver_data = desc->drvdata; + dev_set_name(&retimer->dev, "%s-retimer", + desc->name ? desc->name : dev_name(parent)); + + ret = device_add(&retimer->dev); + if (ret) { + dev_err(parent, "failed to register retimer (%d)\n", ret); + put_device(&retimer->dev); + return ERR_PTR(ret); + } + + return retimer; +} +EXPORT_SYMBOL_GPL(typec_retimer_register); + +/** + * typec_retimer_unregister - Unregister retimer device. + * @retimer: USB Type-C Connector retimer. + * + * Unregister retimer that was registered with typec_retimer_register(). + */ +void typec_retimer_unregister(struct typec_retimer *retimer) +{ + if (!IS_ERR_OR_NULL(retimer)) + device_unregister(&retimer->dev); +} +EXPORT_SYMBOL_GPL(typec_retimer_unregister); + +void *typec_retimer_get_drvdata(struct typec_retimer *retimer) +{ + return dev_get_drvdata(&retimer->dev); +} +EXPORT_SYMBOL_GPL(typec_retimer_get_drvdata); + +struct class retimer_class = { + .name = "retimer", + .owner = THIS_MODULE, +}; diff --git a/drivers/usb/typec/retimer.h b/drivers/usb/typec/retimer.h new file mode 100644 index 0000000000..fa15951d48 --- /dev/null +++ b/drivers/usb/typec/retimer.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __USB_TYPEC_RETIMER__ +#define __USB_TYPEC_RETIMER__ + +#include + +struct typec_retimer { + struct device dev; + typec_retimer_set_fn_t set; +}; + +#define to_typec_retimer(_dev_) container_of(_dev_, struct typec_retimer, dev) + +#endif /* __USB_TYPEC_RETIMER__ */ diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c new file mode 100644 index 0000000000..061551d464 --- /dev/null +++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c @@ -0,0 +1,777 @@ +// SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +/* + * UCSI driver for STMicroelectronics STM32G0 Type-C PD controller + * + * Copyright (C) 2022, STMicroelectronics - All Rights Reserved + * Author: Fabrice Gasnier . + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ucsi.h" + +/* STM32G0 I2C bootloader addr: 0b1010001x (See AN2606) */ +#define STM32G0_I2C_BL_ADDR (0xa2 >> 1) + +/* STM32G0 I2C bootloader max data size */ +#define STM32G0_I2C_BL_SZ 256 + +/* STM32 I2C bootloader commands (See AN4221) */ +#define STM32_CMD_GVR 0x01 /* Gets the bootloader version */ +#define STM32_CMD_GVR_LEN 1 +#define STM32_CMD_RM 0x11 /* Reag memory */ +#define STM32_CMD_WM 0x31 /* Write memory */ +#define STM32_CMD_ADDR_LEN 5 /* Address len for go, mem write... */ +#define STM32_CMD_ERASE 0x44 /* Erase page, bank or all */ +#define STM32_CMD_ERASE_SPECIAL_LEN 3 +#define STM32_CMD_GLOBAL_MASS_ERASE 0xffff /* All-bank erase */ + +/* STM32 I2C bootloader answer status */ +#define STM32G0_I2C_BL_ACK 0x79 +#define STM32G0_I2C_BL_NACK 0x1f +#define STM32G0_I2C_BL_BUSY 0x76 + +/* STM32G0 flash definitions */ +#define STM32G0_USER_OPTION_BYTES 0x1fff7800 +#define STM32G0_USER_OB_NBOOT0 BIT(26) +#define STM32G0_USER_OB_NBOOT_SEL BIT(24) +#define STM32G0_USER_OB_BOOT_MAIN (STM32G0_USER_OB_NBOOT0 | STM32G0_USER_OB_NBOOT_SEL) +#define STM32G0_MAIN_MEM_ADDR 0x08000000 + +/* STM32 Firmware definitions: additional commands */ +#define STM32G0_FW_GETVER 0x00 /* Gets the firmware version */ +#define STM32G0_FW_GETVER_LEN 4 +#define STM32G0_FW_RSTGOBL 0x21 /* Reset and go to bootloader */ +#define STM32G0_FW_KEYWORD 0xa56959a6 + +/* ucsi_stm32g0_fw_info located at the end of the firmware */ +struct ucsi_stm32g0_fw_info { + u32 version; + u32 keyword; +}; + +struct ucsi_stm32g0 { + struct i2c_client *client; + struct i2c_client *i2c_bl; + bool in_bootloader; + u8 bl_version; + struct completion complete; + struct device *dev; + unsigned long flags; + const char *fw_name; + struct ucsi *ucsi; + bool suspended; + bool wakeup_event; +}; + +/* + * Bootloader commands helpers: + * - send command (2 bytes) + * - check ack + * Then either: + * - receive data + * - receive data + check ack + * - send data + check ack + * These operations depends on the command and have various length. + */ +static int ucsi_stm32g0_bl_check_ack(struct ucsi *ucsi) +{ + struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi); + struct i2c_client *client = g0->i2c_bl; + unsigned char ack; + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = I2C_M_RD, + .len = 1, + .buf = &ack, + }, + }; + int ret; + + ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); + if (ret != ARRAY_SIZE(msg)) { + dev_err(g0->dev, "i2c bl ack (%02x), error: %d\n", client->addr, ret); + + return ret < 0 ? ret : -EIO; + } + + /* The 'ack' byte should contain bootloader answer: ack/nack/busy */ + switch (ack) { + case STM32G0_I2C_BL_ACK: + return 0; + case STM32G0_I2C_BL_NACK: + return -ENOENT; + case STM32G0_I2C_BL_BUSY: + return -EBUSY; + default: + dev_err(g0->dev, "i2c bl ack (%02x), invalid byte: %02x\n", + client->addr, ack); + return -EINVAL; + } +} + +static int ucsi_stm32g0_bl_cmd_check_ack(struct ucsi *ucsi, unsigned int cmd, bool check_ack) +{ + struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi); + struct i2c_client *client = g0->i2c_bl; + unsigned char buf[2]; + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = 0, + .len = sizeof(buf), + .buf = buf, + }, + }; + int ret; + + /* + * Send STM32 bootloader command format is two bytes: + * - command code + * - XOR'ed command code + */ + buf[0] = cmd; + buf[1] = cmd ^ 0xff; + + ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); + if (ret != ARRAY_SIZE(msg)) { + dev_dbg(g0->dev, "i2c bl cmd %d (%02x), error: %d\n", cmd, client->addr, ret); + + return ret < 0 ? ret : -EIO; + } + + if (check_ack) + return ucsi_stm32g0_bl_check_ack(ucsi); + + return 0; +} + +static int ucsi_stm32g0_bl_cmd(struct ucsi *ucsi, unsigned int cmd) +{ + return ucsi_stm32g0_bl_cmd_check_ack(ucsi, cmd, true); +} + +static int ucsi_stm32g0_bl_rcv_check_ack(struct ucsi *ucsi, void *data, size_t len, bool check_ack) +{ + struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi); + struct i2c_client *client = g0->i2c_bl; + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = I2C_M_RD, + .len = len, + .buf = data, + }, + }; + int ret; + + ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); + if (ret != ARRAY_SIZE(msg)) { + dev_err(g0->dev, "i2c bl rcv %02x, error: %d\n", client->addr, ret); + + return ret < 0 ? ret : -EIO; + } + + if (check_ack) + return ucsi_stm32g0_bl_check_ack(ucsi); + + return 0; +} + +static int ucsi_stm32g0_bl_rcv(struct ucsi *ucsi, void *data, size_t len) +{ + return ucsi_stm32g0_bl_rcv_check_ack(ucsi, data, len, true); +} + +static int ucsi_stm32g0_bl_rcv_woack(struct ucsi *ucsi, void *data, size_t len) +{ + return ucsi_stm32g0_bl_rcv_check_ack(ucsi, data, len, false); +} + +static int ucsi_stm32g0_bl_send(struct ucsi *ucsi, void *data, size_t len) +{ + struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi); + struct i2c_client *client = g0->i2c_bl; + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = 0, + .len = len, + .buf = data, + }, + }; + int ret; + + ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); + if (ret != ARRAY_SIZE(msg)) { + dev_err(g0->dev, "i2c bl send %02x, error: %d\n", client->addr, ret); + + return ret < 0 ? ret : -EIO; + } + + return ucsi_stm32g0_bl_check_ack(ucsi); +} + +/* Bootloader commands */ +static int ucsi_stm32g0_bl_get_version(struct ucsi *ucsi, u8 *bl_version) +{ + int ret; + + ret = ucsi_stm32g0_bl_cmd(ucsi, STM32_CMD_GVR); + if (ret) + return ret; + + return ucsi_stm32g0_bl_rcv(ucsi, bl_version, STM32_CMD_GVR_LEN); +} + +static int ucsi_stm32g0_bl_send_addr(struct ucsi *ucsi, u32 addr) +{ + u8 data8[STM32_CMD_ADDR_LEN]; + + /* Address format: 4 bytes addr (MSB first) + XOR'ed addr bytes */ + put_unaligned_be32(addr, data8); + data8[4] = data8[0] ^ data8[1] ^ data8[2] ^ data8[3]; + + return ucsi_stm32g0_bl_send(ucsi, data8, STM32_CMD_ADDR_LEN); +} + +static int ucsi_stm32g0_bl_global_mass_erase(struct ucsi *ucsi) +{ + u8 data8[4]; + u16 *data16 = (u16 *)&data8[0]; + int ret; + + data16[0] = STM32_CMD_GLOBAL_MASS_ERASE; + data8[2] = data8[0] ^ data8[1]; + + ret = ucsi_stm32g0_bl_cmd(ucsi, STM32_CMD_ERASE); + if (ret) + return ret; + + return ucsi_stm32g0_bl_send(ucsi, data8, STM32_CMD_ERASE_SPECIAL_LEN); +} + +static int ucsi_stm32g0_bl_write(struct ucsi *ucsi, u32 addr, const void *data, size_t len) +{ + u8 *data8; + int i, ret; + + if (!len || len > STM32G0_I2C_BL_SZ) + return -EINVAL; + + /* Write memory: len bytes -1, data up to 256 bytes + XOR'ed bytes */ + data8 = kmalloc(STM32G0_I2C_BL_SZ + 2, GFP_KERNEL); + if (!data8) + return -ENOMEM; + + ret = ucsi_stm32g0_bl_cmd(ucsi, STM32_CMD_WM); + if (ret) + goto free; + + ret = ucsi_stm32g0_bl_send_addr(ucsi, addr); + if (ret) + goto free; + + data8[0] = len - 1; + memcpy(data8 + 1, data, len); + data8[len + 1] = data8[0]; + for (i = 1; i <= len; i++) + data8[len + 1] ^= data8[i]; + + ret = ucsi_stm32g0_bl_send(ucsi, data8, len + 2); +free: + kfree(data8); + + return ret; +} + +static int ucsi_stm32g0_bl_read(struct ucsi *ucsi, u32 addr, void *data, size_t len) +{ + int ret; + + if (!len || len > STM32G0_I2C_BL_SZ) + return -EINVAL; + + ret = ucsi_stm32g0_bl_cmd(ucsi, STM32_CMD_RM); + if (ret) + return ret; + + ret = ucsi_stm32g0_bl_send_addr(ucsi, addr); + if (ret) + return ret; + + ret = ucsi_stm32g0_bl_cmd(ucsi, len - 1); + if (ret) + return ret; + + return ucsi_stm32g0_bl_rcv_woack(ucsi, data, len); +} + +/* Firmware commands (the same address as the bootloader) */ +static int ucsi_stm32g0_fw_cmd(struct ucsi *ucsi, unsigned int cmd) +{ + return ucsi_stm32g0_bl_cmd_check_ack(ucsi, cmd, false); +} + +static int ucsi_stm32g0_fw_rcv(struct ucsi *ucsi, void *data, size_t len) +{ + return ucsi_stm32g0_bl_rcv_woack(ucsi, data, len); +} + +/* UCSI ops */ +static int ucsi_stm32g0_read(struct ucsi *ucsi, unsigned int offset, void *val, size_t len) +{ + struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi); + struct i2c_client *client = g0->client; + u8 reg = offset; + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = 0, + .len = 1, + .buf = ®, + }, + { + .addr = client->addr, + .flags = I2C_M_RD, + .len = len, + .buf = val, + }, + }; + int ret; + + ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); + if (ret != ARRAY_SIZE(msg)) { + dev_err(g0->dev, "i2c read %02x, %02x error: %d\n", client->addr, reg, ret); + + return ret < 0 ? ret : -EIO; + } + + return 0; +} + +static int ucsi_stm32g0_async_write(struct ucsi *ucsi, unsigned int offset, const void *val, + size_t len) +{ + struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi); + struct i2c_client *client = g0->client; + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = 0, + } + }; + unsigned char *buf; + int ret; + + buf = kmalloc(len + 1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf[0] = offset; + memcpy(&buf[1], val, len); + msg[0].len = len + 1; + msg[0].buf = buf; + + ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); + kfree(buf); + if (ret != ARRAY_SIZE(msg)) { + dev_err(g0->dev, "i2c write %02x, %02x error: %d\n", client->addr, offset, ret); + + return ret < 0 ? ret : -EIO; + } + + return 0; +} + +static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const void *val, + size_t len) +{ + struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi); + int ret; + + set_bit(COMMAND_PENDING, &g0->flags); + + ret = ucsi_stm32g0_async_write(ucsi, offset, val, len); + if (ret) + goto out_clear_bit; + + if (!wait_for_completion_timeout(&g0->complete, msecs_to_jiffies(5000))) + ret = -ETIMEDOUT; + +out_clear_bit: + clear_bit(COMMAND_PENDING, &g0->flags); + + return ret; +} + +static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data) +{ + struct ucsi_stm32g0 *g0 = data; + u32 cci; + int ret; + + if (g0->suspended) + g0->wakeup_event = true; + + ret = ucsi_stm32g0_read(g0->ucsi, UCSI_CCI, &cci, sizeof(cci)); + if (ret) + return IRQ_NONE; + + if (UCSI_CCI_CONNECTOR(cci)) + ucsi_connector_change(g0->ucsi, UCSI_CCI_CONNECTOR(cci)); + + if (test_bit(COMMAND_PENDING, &g0->flags) && + cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE)) + complete(&g0->complete); + + return IRQ_HANDLED; +} + +static const struct ucsi_operations ucsi_stm32g0_ops = { + .read = ucsi_stm32g0_read, + .sync_write = ucsi_stm32g0_sync_write, + .async_write = ucsi_stm32g0_async_write, +}; + +static int ucsi_stm32g0_register(struct ucsi *ucsi) +{ + struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi); + struct i2c_client *client = g0->client; + int ret; + + /* Request alert interrupt */ + ret = request_threaded_irq(client->irq, NULL, ucsi_stm32g0_irq_handler, IRQF_ONESHOT, + dev_name(g0->dev), g0); + if (ret) { + dev_err(g0->dev, "request IRQ failed: %d\n", ret); + return ret; + } + + ret = ucsi_register(ucsi); + if (ret) { + dev_err_probe(g0->dev, ret, "ucsi_register failed\n"); + free_irq(client->irq, g0); + return ret; + } + + return 0; +} + +static void ucsi_stm32g0_unregister(struct ucsi *ucsi) +{ + struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi); + struct i2c_client *client = g0->client; + + ucsi_unregister(ucsi); + free_irq(client->irq, g0); +} + +static void ucsi_stm32g0_fw_cb(const struct firmware *fw, void *context) +{ + struct ucsi_stm32g0 *g0; + const u8 *data, *end; + const struct ucsi_stm32g0_fw_info *fw_info; + u32 addr = STM32G0_MAIN_MEM_ADDR, ob, fw_version; + int ret, size; + + if (!context) + return; + + g0 = ucsi_get_drvdata(context); + + if (!fw) + goto fw_release; + + fw_info = (struct ucsi_stm32g0_fw_info *)(fw->data + fw->size - sizeof(*fw_info)); + + if (!g0->in_bootloader) { + /* Read running firmware version */ + ret = ucsi_stm32g0_fw_cmd(g0->ucsi, STM32G0_FW_GETVER); + if (ret) { + dev_err(g0->dev, "Get version cmd failed %d\n", ret); + goto fw_release; + } + ret = ucsi_stm32g0_fw_rcv(g0->ucsi, &fw_version, + STM32G0_FW_GETVER_LEN); + if (ret) { + dev_err(g0->dev, "Get version failed %d\n", ret); + goto fw_release; + } + + /* Sanity check on keyword and firmware version */ + if (fw_info->keyword != STM32G0_FW_KEYWORD || fw_info->version == fw_version) + goto fw_release; + + dev_info(g0->dev, "Flashing FW: %08x (%08x cur)\n", fw_info->version, fw_version); + + /* Switch to bootloader mode */ + ucsi_stm32g0_unregister(g0->ucsi); + ret = ucsi_stm32g0_fw_cmd(g0->ucsi, STM32G0_FW_RSTGOBL); + if (ret) { + dev_err(g0->dev, "bootloader cmd failed %d\n", ret); + goto fw_release; + } + g0->in_bootloader = true; + + /* STM32G0 reboot delay */ + msleep(100); + } + + ret = ucsi_stm32g0_bl_global_mass_erase(g0->ucsi); + if (ret) { + dev_err(g0->dev, "Erase failed %d\n", ret); + goto fw_release; + } + + data = fw->data; + end = fw->data + fw->size; + while (data < end) { + if ((end - data) < STM32G0_I2C_BL_SZ) + size = end - data; + else + size = STM32G0_I2C_BL_SZ; + + ret = ucsi_stm32g0_bl_write(g0->ucsi, addr, data, size); + if (ret) { + dev_err(g0->dev, "Write failed %d\n", ret); + goto fw_release; + } + addr += size; + data += size; + } + + dev_dbg(g0->dev, "Configure to boot from main flash\n"); + + ret = ucsi_stm32g0_bl_read(g0->ucsi, STM32G0_USER_OPTION_BYTES, &ob, sizeof(ob)); + if (ret) { + dev_err(g0->dev, "read user option bytes failed %d\n", ret); + goto fw_release; + } + + dev_dbg(g0->dev, "STM32G0_USER_OPTION_BYTES 0x%08x\n", ob); + + /* Configure user option bytes to boot from main flash next time */ + ob |= STM32G0_USER_OB_BOOT_MAIN; + + /* Writing option bytes will also reset G0 for updates to be loaded */ + ret = ucsi_stm32g0_bl_write(g0->ucsi, STM32G0_USER_OPTION_BYTES, &ob, sizeof(ob)); + if (ret) { + dev_err(g0->dev, "write user option bytes failed %d\n", ret); + goto fw_release; + } + + dev_info(g0->dev, "Starting, option bytes:0x%08x\n", ob); + + /* STM32G0 FW boot delay */ + msleep(500); + + /* Register UCSI interface */ + if (!ucsi_stm32g0_register(g0->ucsi)) + g0->in_bootloader = false; + +fw_release: + release_firmware(fw); +} + +static int ucsi_stm32g0_probe_bootloader(struct ucsi *ucsi) +{ + struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi); + int ret; + u16 ucsi_version; + + /* firmware-name is optional */ + if (device_property_present(g0->dev, "firmware-name")) { + ret = device_property_read_string(g0->dev, "firmware-name", &g0->fw_name); + if (ret < 0) + return dev_err_probe(g0->dev, ret, "Error reading firmware-name\n"); + } + + if (g0->fw_name) { + /* STM32G0 in bootloader mode communicates at reserved address 0x51 */ + g0->i2c_bl = i2c_new_dummy_device(g0->client->adapter, STM32G0_I2C_BL_ADDR); + if (IS_ERR(g0->i2c_bl)) { + ret = dev_err_probe(g0->dev, PTR_ERR(g0->i2c_bl), + "Failed to register booloader I2C address\n"); + return ret; + } + } + + /* + * Try to guess if the STM32G0 is running a UCSI firmware. First probe the UCSI FW at its + * i2c address. Fallback to bootloader i2c address only if firmware-name is specified. + */ + ret = ucsi_stm32g0_read(ucsi, UCSI_VERSION, &ucsi_version, sizeof(ucsi_version)); + if (!ret || !g0->fw_name) + return ret; + + /* Speculatively read the bootloader version that has a known length. */ + ret = ucsi_stm32g0_bl_get_version(ucsi, &g0->bl_version); + if (ret < 0) { + i2c_unregister_device(g0->i2c_bl); + return ret; + } + + /* Device in bootloader mode */ + g0->in_bootloader = true; + dev_info(g0->dev, "Bootloader Version 0x%02x\n", g0->bl_version); + + return 0; +} + +static int ucsi_stm32g0_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct ucsi_stm32g0 *g0; + int ret; + + g0 = devm_kzalloc(dev, sizeof(*g0), GFP_KERNEL); + if (!g0) + return -ENOMEM; + + g0->dev = dev; + g0->client = client; + init_completion(&g0->complete); + i2c_set_clientdata(client, g0); + + g0->ucsi = ucsi_create(dev, &ucsi_stm32g0_ops); + if (IS_ERR(g0->ucsi)) + return PTR_ERR(g0->ucsi); + + ucsi_set_drvdata(g0->ucsi, g0); + + ret = ucsi_stm32g0_probe_bootloader(g0->ucsi); + if (ret < 0) + goto destroy; + + /* + * Don't register in bootloader mode: wait for the firmware to be loaded and started before + * registering UCSI device. + */ + if (!g0->in_bootloader) { + ret = ucsi_stm32g0_register(g0->ucsi); + if (ret < 0) + goto freei2c; + } + + if (g0->fw_name) { + /* + * Asynchronously flash (e.g. bootloader mode) or update the running firmware, + * not to hang the boot process + */ + ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, g0->fw_name, g0->dev, + GFP_KERNEL, g0->ucsi, ucsi_stm32g0_fw_cb); + if (ret < 0) { + dev_err_probe(dev, ret, "firmware request failed\n"); + goto unregister; + } + } + + return 0; + +unregister: + if (!g0->in_bootloader) + ucsi_stm32g0_unregister(g0->ucsi); +freei2c: + if (g0->fw_name) + i2c_unregister_device(g0->i2c_bl); +destroy: + ucsi_destroy(g0->ucsi); + + return ret; +} + +static int ucsi_stm32g0_remove(struct i2c_client *client) +{ + struct ucsi_stm32g0 *g0 = i2c_get_clientdata(client); + + if (!g0->in_bootloader) + ucsi_stm32g0_unregister(g0->ucsi); + if (g0->fw_name) + i2c_unregister_device(g0->i2c_bl); + ucsi_destroy(g0->ucsi); + + return 0; +} + +static int ucsi_stm32g0_suspend(struct device *dev) +{ + struct ucsi_stm32g0 *g0 = dev_get_drvdata(dev); + struct i2c_client *client = g0->client; + + if (g0->in_bootloader) + return 0; + + /* Keep the interrupt disabled until the i2c bus has been resumed */ + disable_irq(client->irq); + + g0->suspended = true; + g0->wakeup_event = false; + + if (device_may_wakeup(dev) || device_wakeup_path(dev)) + enable_irq_wake(client->irq); + + return 0; +} + +static int ucsi_stm32g0_resume(struct device *dev) +{ + struct ucsi_stm32g0 *g0 = dev_get_drvdata(dev); + struct i2c_client *client = g0->client; + + if (g0->in_bootloader) + return 0; + + if (device_may_wakeup(dev) || device_wakeup_path(dev)) + disable_irq_wake(client->irq); + + enable_irq(client->irq); + + /* Enforce any pending handler gets called to signal a wakeup_event */ + synchronize_irq(client->irq); + + if (g0->wakeup_event) + pm_wakeup_event(g0->dev, 0); + + g0->suspended = false; + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(ucsi_stm32g0_pm_ops, ucsi_stm32g0_suspend, ucsi_stm32g0_resume); + +static const struct of_device_id __maybe_unused ucsi_stm32g0_typec_of_match[] = { + { .compatible = "st,stm32g0-typec" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ucsi_stm32g0_typec_of_match); + +static const struct i2c_device_id ucsi_stm32g0_typec_i2c_devid[] = { + {"stm32g0-typec", 0}, + {}, +}; +MODULE_DEVICE_TABLE(i2c, ucsi_stm32g0_typec_i2c_devid); + +static struct i2c_driver ucsi_stm32g0_i2c_driver = { + .driver = { + .name = "ucsi-stm32g0-i2c", + .of_match_table = of_match_ptr(ucsi_stm32g0_typec_of_match), + .pm = pm_sleep_ptr(&ucsi_stm32g0_pm_ops), + }, + .probe = ucsi_stm32g0_probe, + .remove = ucsi_stm32g0_remove, + .id_table = ucsi_stm32g0_typec_i2c_devid +}; +module_i2c_driver(ucsi_stm32g0_i2c_driver); + +MODULE_AUTHOR("Fabrice Gasnier "); +MODULE_DESCRIPTION("STMicroelectronics STM32G0 Type-C controller"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_ALIAS("platform:ucsi-stm32g0"); diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c new file mode 100644 index 0000000000..7cb56c382c --- /dev/null +++ b/drivers/vfio/vfio_main.c @@ -0,0 +1,2135 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VFIO core + * + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "vfio.h" + +#define DRIVER_VERSION "0.3" +#define DRIVER_AUTHOR "Alex Williamson " +#define DRIVER_DESC "VFIO - User Level meta-driver" + +static struct vfio { + struct class *class; + struct list_head iommu_drivers_list; + struct mutex iommu_drivers_lock; + struct list_head group_list; + struct mutex group_lock; /* locks group_list */ + struct ida group_ida; + dev_t group_devt; +} vfio; + +struct vfio_iommu_driver { + const struct vfio_iommu_driver_ops *ops; + struct list_head vfio_next; +}; + +struct vfio_container { + struct kref kref; + struct list_head group_list; + struct rw_semaphore group_lock; + struct vfio_iommu_driver *iommu_driver; + void *iommu_data; + bool noiommu; +}; + +struct vfio_group { + struct device dev; + struct cdev cdev; + refcount_t users; + unsigned int container_users; + struct iommu_group *iommu_group; + struct vfio_container *container; + struct list_head device_list; + struct mutex device_lock; + struct list_head vfio_next; + struct list_head container_next; + enum vfio_group_type type; + unsigned int dev_counter; + struct rw_semaphore group_rwsem; + struct kvm *kvm; + struct file *opened_file; + struct blocking_notifier_head notifier; +}; + +#ifdef CONFIG_VFIO_NOIOMMU +static bool noiommu __read_mostly; +module_param_named(enable_unsafe_noiommu_mode, + noiommu, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)"); +#endif + +static DEFINE_XARRAY(vfio_device_set_xa); +static const struct file_operations vfio_group_fops; + +int vfio_assign_device_set(struct vfio_device *device, void *set_id) +{ + unsigned long idx = (unsigned long)set_id; + struct vfio_device_set *new_dev_set; + struct vfio_device_set *dev_set; + + if (WARN_ON(!set_id)) + return -EINVAL; + + /* + * Atomically acquire a singleton object in the xarray for this set_id + */ + xa_lock(&vfio_device_set_xa); + dev_set = xa_load(&vfio_device_set_xa, idx); + if (dev_set) + goto found_get_ref; + xa_unlock(&vfio_device_set_xa); + + new_dev_set = kzalloc(sizeof(*new_dev_set), GFP_KERNEL); + if (!new_dev_set) + return -ENOMEM; + mutex_init(&new_dev_set->lock); + INIT_LIST_HEAD(&new_dev_set->device_list); + new_dev_set->set_id = set_id; + + xa_lock(&vfio_device_set_xa); + dev_set = __xa_cmpxchg(&vfio_device_set_xa, idx, NULL, new_dev_set, + GFP_KERNEL); + if (!dev_set) { + dev_set = new_dev_set; + goto found_get_ref; + } + + kfree(new_dev_set); + if (xa_is_err(dev_set)) { + xa_unlock(&vfio_device_set_xa); + return xa_err(dev_set); + } + +found_get_ref: + dev_set->device_count++; + xa_unlock(&vfio_device_set_xa); + mutex_lock(&dev_set->lock); + device->dev_set = dev_set; + list_add_tail(&device->dev_set_list, &dev_set->device_list); + mutex_unlock(&dev_set->lock); + return 0; +} +EXPORT_SYMBOL_GPL(vfio_assign_device_set); + +static void vfio_release_device_set(struct vfio_device *device) +{ + struct vfio_device_set *dev_set = device->dev_set; + + if (!dev_set) + return; + + mutex_lock(&dev_set->lock); + list_del(&device->dev_set_list); + mutex_unlock(&dev_set->lock); + + xa_lock(&vfio_device_set_xa); + if (!--dev_set->device_count) { + __xa_erase(&vfio_device_set_xa, + (unsigned long)dev_set->set_id); + mutex_destroy(&dev_set->lock); + kfree(dev_set); + } + xa_unlock(&vfio_device_set_xa); +} + +#ifdef CONFIG_VFIO_NOIOMMU +static void *vfio_noiommu_open(unsigned long arg) +{ + if (arg != VFIO_NOIOMMU_IOMMU) + return ERR_PTR(-EINVAL); + if (!capable(CAP_SYS_RAWIO)) + return ERR_PTR(-EPERM); + + return NULL; +} + +static void vfio_noiommu_release(void *iommu_data) +{ +} + +static long vfio_noiommu_ioctl(void *iommu_data, + unsigned int cmd, unsigned long arg) +{ + if (cmd == VFIO_CHECK_EXTENSION) + return noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0; + + return -ENOTTY; +} + +static int vfio_noiommu_attach_group(void *iommu_data, + struct iommu_group *iommu_group, enum vfio_group_type type) +{ + return 0; +} + +static void vfio_noiommu_detach_group(void *iommu_data, + struct iommu_group *iommu_group) +{ +} + +static const struct vfio_iommu_driver_ops vfio_noiommu_ops = { + .name = "vfio-noiommu", + .owner = THIS_MODULE, + .open = vfio_noiommu_open, + .release = vfio_noiommu_release, + .ioctl = vfio_noiommu_ioctl, + .attach_group = vfio_noiommu_attach_group, + .detach_group = vfio_noiommu_detach_group, +}; + +/* + * Only noiommu containers can use vfio-noiommu and noiommu containers can only + * use vfio-noiommu. + */ +static inline bool vfio_iommu_driver_allowed(struct vfio_container *container, + const struct vfio_iommu_driver *driver) +{ + return container->noiommu == (driver->ops == &vfio_noiommu_ops); +} +#else +static inline bool vfio_iommu_driver_allowed(struct vfio_container *container, + const struct vfio_iommu_driver *driver) +{ + return true; +} +#endif /* CONFIG_VFIO_NOIOMMU */ + +/* + * IOMMU driver registration + */ +int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops) +{ + struct vfio_iommu_driver *driver, *tmp; + + if (WARN_ON(!ops->register_device != !ops->unregister_device)) + return -EINVAL; + + driver = kzalloc(sizeof(*driver), GFP_KERNEL); + if (!driver) + return -ENOMEM; + + driver->ops = ops; + + mutex_lock(&vfio.iommu_drivers_lock); + + /* Check for duplicates */ + list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) { + if (tmp->ops == ops) { + mutex_unlock(&vfio.iommu_drivers_lock); + kfree(driver); + return -EINVAL; + } + } + + list_add(&driver->vfio_next, &vfio.iommu_drivers_list); + + mutex_unlock(&vfio.iommu_drivers_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(vfio_register_iommu_driver); + +void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops) +{ + struct vfio_iommu_driver *driver; + + mutex_lock(&vfio.iommu_drivers_lock); + list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { + if (driver->ops == ops) { + list_del(&driver->vfio_next); + mutex_unlock(&vfio.iommu_drivers_lock); + kfree(driver); + return; + } + } + mutex_unlock(&vfio.iommu_drivers_lock); +} +EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver); + +static void vfio_group_get(struct vfio_group *group); + +/* + * Container objects - containers are created when /dev/vfio/vfio is + * opened, but their lifecycle extends until the last user is done, so + * it's freed via kref. Must support container/group/device being + * closed in any order. + */ +static void vfio_container_get(struct vfio_container *container) +{ + kref_get(&container->kref); +} + +static void vfio_container_release(struct kref *kref) +{ + struct vfio_container *container; + container = container_of(kref, struct vfio_container, kref); + + kfree(container); +} + +static void vfio_container_put(struct vfio_container *container) +{ + kref_put(&container->kref, vfio_container_release); +} + +/* + * Group objects - create, release, get, put, search + */ +static struct vfio_group * +__vfio_group_get_from_iommu(struct iommu_group *iommu_group) +{ + struct vfio_group *group; + + list_for_each_entry(group, &vfio.group_list, vfio_next) { + if (group->iommu_group == iommu_group) { + vfio_group_get(group); + return group; + } + } + return NULL; +} + +static struct vfio_group * +vfio_group_get_from_iommu(struct iommu_group *iommu_group) +{ + struct vfio_group *group; + + mutex_lock(&vfio.group_lock); + group = __vfio_group_get_from_iommu(iommu_group); + mutex_unlock(&vfio.group_lock); + return group; +} + +static void vfio_group_release(struct device *dev) +{ + struct vfio_group *group = container_of(dev, struct vfio_group, dev); + + mutex_destroy(&group->device_lock); + iommu_group_put(group->iommu_group); + ida_free(&vfio.group_ida, MINOR(group->dev.devt)); + kfree(group); +} + +static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group, + enum vfio_group_type type) +{ + struct vfio_group *group; + int minor; + + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) + return ERR_PTR(-ENOMEM); + + minor = ida_alloc_max(&vfio.group_ida, MINORMASK, GFP_KERNEL); + if (minor < 0) { + kfree(group); + return ERR_PTR(minor); + } + + device_initialize(&group->dev); + group->dev.devt = MKDEV(MAJOR(vfio.group_devt), minor); + group->dev.class = vfio.class; + group->dev.release = vfio_group_release; + cdev_init(&group->cdev, &vfio_group_fops); + group->cdev.owner = THIS_MODULE; + + refcount_set(&group->users, 1); + init_rwsem(&group->group_rwsem); + INIT_LIST_HEAD(&group->device_list); + mutex_init(&group->device_lock); + group->iommu_group = iommu_group; + /* put in vfio_group_release() */ + iommu_group_ref_get(iommu_group); + group->type = type; + BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); + + return group; +} + +static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group, + enum vfio_group_type type) +{ + struct vfio_group *group; + struct vfio_group *ret; + int err; + + group = vfio_group_alloc(iommu_group, type); + if (IS_ERR(group)) + return group; + + err = dev_set_name(&group->dev, "%s%d", + group->type == VFIO_NO_IOMMU ? "noiommu-" : "", + iommu_group_id(iommu_group)); + if (err) { + ret = ERR_PTR(err); + goto err_put; + } + + mutex_lock(&vfio.group_lock); + + /* Did we race creating this group? */ + ret = __vfio_group_get_from_iommu(iommu_group); + if (ret) + goto err_unlock; + + err = cdev_device_add(&group->cdev, &group->dev); + if (err) { + ret = ERR_PTR(err); + goto err_unlock; + } + + list_add(&group->vfio_next, &vfio.group_list); + + mutex_unlock(&vfio.group_lock); + return group; + +err_unlock: + mutex_unlock(&vfio.group_lock); +err_put: + put_device(&group->dev); + return ret; +} + +static void vfio_group_put(struct vfio_group *group) +{ + if (!refcount_dec_and_mutex_lock(&group->users, &vfio.group_lock)) + return; + + /* + * These data structures all have paired operations that can only be + * undone when the caller holds a live reference on the group. Since all + * pairs must be undone these WARN_ON's indicate some caller did not + * properly hold the group reference. + */ + WARN_ON(!list_empty(&group->device_list)); + WARN_ON(group->container || group->container_users); + WARN_ON(group->notifier.head); + + list_del(&group->vfio_next); + cdev_device_del(&group->cdev, &group->dev); + mutex_unlock(&vfio.group_lock); + + put_device(&group->dev); +} + +static void vfio_group_get(struct vfio_group *group) +{ + refcount_inc(&group->users); +} + +/* + * Device objects - create, release, get, put, search + */ +/* Device reference always implies a group reference */ +static void vfio_device_put(struct vfio_device *device) +{ + if (refcount_dec_and_test(&device->refcount)) + complete(&device->comp); +} + +static bool vfio_device_try_get(struct vfio_device *device) +{ + return refcount_inc_not_zero(&device->refcount); +} + +static struct vfio_device *vfio_group_get_device(struct vfio_group *group, + struct device *dev) +{ + struct vfio_device *device; + + mutex_lock(&group->device_lock); + list_for_each_entry(device, &group->device_list, group_next) { + if (device->dev == dev && vfio_device_try_get(device)) { + mutex_unlock(&group->device_lock); + return device; + } + } + mutex_unlock(&group->device_lock); + return NULL; +} + +/* + * VFIO driver API + */ +void vfio_init_group_dev(struct vfio_device *device, struct device *dev, + const struct vfio_device_ops *ops) +{ + init_completion(&device->comp); + device->dev = dev; + device->ops = ops; +} +EXPORT_SYMBOL_GPL(vfio_init_group_dev); + +void vfio_uninit_group_dev(struct vfio_device *device) +{ + vfio_release_device_set(device); +} +EXPORT_SYMBOL_GPL(vfio_uninit_group_dev); + +static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev, + enum vfio_group_type type) +{ + struct iommu_group *iommu_group; + struct vfio_group *group; + int ret; + + iommu_group = iommu_group_alloc(); + if (IS_ERR(iommu_group)) + return ERR_CAST(iommu_group); + + ret = iommu_group_set_name(iommu_group, "vfio-noiommu"); + if (ret) + goto out_put_group; + ret = iommu_group_add_device(iommu_group, dev); + if (ret) + goto out_put_group; + + group = vfio_create_group(iommu_group, type); + if (IS_ERR(group)) { + ret = PTR_ERR(group); + goto out_remove_device; + } + iommu_group_put(iommu_group); + return group; + +out_remove_device: + iommu_group_remove_device(dev); +out_put_group: + iommu_group_put(iommu_group); + return ERR_PTR(ret); +} + +static struct vfio_group *vfio_group_find_or_alloc(struct device *dev) +{ + struct iommu_group *iommu_group; + struct vfio_group *group; + + iommu_group = iommu_group_get(dev); +#ifdef CONFIG_VFIO_NOIOMMU + if (!iommu_group && noiommu) { + /* + * With noiommu enabled, create an IOMMU group for devices that + * don't already have one, implying no IOMMU hardware/driver + * exists. Taint the kernel because we're about to give a DMA + * capable device to a user without IOMMU protection. + */ + group = vfio_noiommu_group_alloc(dev, VFIO_NO_IOMMU); + if (!IS_ERR(group)) { + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n"); + } + return group; + } +#endif + if (!iommu_group) + return ERR_PTR(-EINVAL); + + /* + * VFIO always sets IOMMU_CACHE because we offer no way for userspace to + * restore cache coherency. It has to be checked here because it is only + * valid for cases where we are using iommu groups. + */ + if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) { + iommu_group_put(iommu_group); + return ERR_PTR(-EINVAL); + } + + group = vfio_group_get_from_iommu(iommu_group); + if (!group) + group = vfio_create_group(iommu_group, VFIO_IOMMU); + + /* The vfio_group holds a reference to the iommu_group */ + iommu_group_put(iommu_group); + return group; +} + +static int __vfio_register_dev(struct vfio_device *device, + struct vfio_group *group) +{ + struct vfio_device *existing_device; + + if (IS_ERR(group)) + return PTR_ERR(group); + + /* + * If the driver doesn't specify a set then the device is added to a + * singleton set just for itself. + */ + if (!device->dev_set) + vfio_assign_device_set(device, device); + + existing_device = vfio_group_get_device(group, device->dev); + if (existing_device) { + dev_WARN(device->dev, "Device already exists on group %d\n", + iommu_group_id(group->iommu_group)); + vfio_device_put(existing_device); + if (group->type == VFIO_NO_IOMMU || + group->type == VFIO_EMULATED_IOMMU) + iommu_group_remove_device(device->dev); + vfio_group_put(group); + return -EBUSY; + } + + /* Our reference on group is moved to the device */ + device->group = group; + + /* Refcounting can't start until the driver calls register */ + refcount_set(&device->refcount, 1); + + mutex_lock(&group->device_lock); + list_add(&device->group_next, &group->device_list); + group->dev_counter++; + mutex_unlock(&group->device_lock); + + return 0; +} + +int vfio_register_group_dev(struct vfio_device *device) +{ + return __vfio_register_dev(device, + vfio_group_find_or_alloc(device->dev)); +} +EXPORT_SYMBOL_GPL(vfio_register_group_dev); + +/* + * Register a virtual device without IOMMU backing. The user of this + * device must not be able to directly trigger unmediated DMA. + */ +int vfio_register_emulated_iommu_dev(struct vfio_device *device) +{ + return __vfio_register_dev(device, + vfio_noiommu_group_alloc(device->dev, VFIO_EMULATED_IOMMU)); +} +EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev); + +static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, + char *buf) +{ + struct vfio_device *it, *device = ERR_PTR(-ENODEV); + + mutex_lock(&group->device_lock); + list_for_each_entry(it, &group->device_list, group_next) { + int ret; + + if (it->ops->match) { + ret = it->ops->match(it, buf); + if (ret < 0) { + device = ERR_PTR(ret); + break; + } + } else { + ret = !strcmp(dev_name(it->dev), buf); + } + + if (ret && vfio_device_try_get(it)) { + device = it; + break; + } + } + mutex_unlock(&group->device_lock); + + return device; +} + +/* + * Decrement the device reference count and wait for the device to be + * removed. Open file descriptors for the device... */ +void vfio_unregister_group_dev(struct vfio_device *device) +{ + struct vfio_group *group = device->group; + unsigned int i = 0; + bool interrupted = false; + long rc; + + vfio_device_put(device); + rc = try_wait_for_completion(&device->comp); + while (rc <= 0) { + if (device->ops->request) + device->ops->request(device, i++); + + if (interrupted) { + rc = wait_for_completion_timeout(&device->comp, + HZ * 10); + } else { + rc = wait_for_completion_interruptible_timeout( + &device->comp, HZ * 10); + if (rc < 0) { + interrupted = true; + dev_warn(device->dev, + "Device is currently in use, task" + " \"%s\" (%d) " + "blocked until device is released", + current->comm, task_pid_nr(current)); + } + } + } + + mutex_lock(&group->device_lock); + list_del(&device->group_next); + group->dev_counter--; + mutex_unlock(&group->device_lock); + + if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU) + iommu_group_remove_device(device->dev); + + /* Matches the get in vfio_register_group_dev() */ + vfio_group_put(group); +} +EXPORT_SYMBOL_GPL(vfio_unregister_group_dev); + +/* + * VFIO base fd, /dev/vfio/vfio + */ +static long vfio_ioctl_check_extension(struct vfio_container *container, + unsigned long arg) +{ + struct vfio_iommu_driver *driver; + long ret = 0; + + down_read(&container->group_lock); + + driver = container->iommu_driver; + + switch (arg) { + /* No base extensions yet */ + default: + /* + * If no driver is set, poll all registered drivers for + * extensions and return the first positive result. If + * a driver is already set, further queries will be passed + * only to that driver. + */ + if (!driver) { + mutex_lock(&vfio.iommu_drivers_lock); + list_for_each_entry(driver, &vfio.iommu_drivers_list, + vfio_next) { + + if (!list_empty(&container->group_list) && + !vfio_iommu_driver_allowed(container, + driver)) + continue; + if (!try_module_get(driver->ops->owner)) + continue; + + ret = driver->ops->ioctl(NULL, + VFIO_CHECK_EXTENSION, + arg); + module_put(driver->ops->owner); + if (ret > 0) + break; + } + mutex_unlock(&vfio.iommu_drivers_lock); + } else + ret = driver->ops->ioctl(container->iommu_data, + VFIO_CHECK_EXTENSION, arg); + } + + up_read(&container->group_lock); + + return ret; +} + +/* hold write lock on container->group_lock */ +static int __vfio_container_attach_groups(struct vfio_container *container, + struct vfio_iommu_driver *driver, + void *data) +{ + struct vfio_group *group; + int ret = -ENODEV; + + list_for_each_entry(group, &container->group_list, container_next) { + ret = driver->ops->attach_group(data, group->iommu_group, + group->type); + if (ret) + goto unwind; + } + + return ret; + +unwind: + list_for_each_entry_continue_reverse(group, &container->group_list, + container_next) { + driver->ops->detach_group(data, group->iommu_group); + } + + return ret; +} + +static long vfio_ioctl_set_iommu(struct vfio_container *container, + unsigned long arg) +{ + struct vfio_iommu_driver *driver; + long ret = -ENODEV; + + down_write(&container->group_lock); + + /* + * The container is designed to be an unprivileged interface while + * the group can be assigned to specific users. Therefore, only by + * adding a group to a container does the user get the privilege of + * enabling the iommu, which may allocate finite resources. There + * is no unset_iommu, but by removing all the groups from a container, + * the container is deprivileged and returns to an unset state. + */ + if (list_empty(&container->group_list) || container->iommu_driver) { + up_write(&container->group_lock); + return -EINVAL; + } + + mutex_lock(&vfio.iommu_drivers_lock); + list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { + void *data; + + if (!vfio_iommu_driver_allowed(container, driver)) + continue; + if (!try_module_get(driver->ops->owner)) + continue; + + /* + * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION, + * so test which iommu driver reported support for this + * extension and call open on them. We also pass them the + * magic, allowing a single driver to support multiple + * interfaces if they'd like. + */ + if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) { + module_put(driver->ops->owner); + continue; + } + + data = driver->ops->open(arg); + if (IS_ERR(data)) { + ret = PTR_ERR(data); + module_put(driver->ops->owner); + continue; + } + + ret = __vfio_container_attach_groups(container, driver, data); + if (ret) { + driver->ops->release(data); + module_put(driver->ops->owner); + continue; + } + + container->iommu_driver = driver; + container->iommu_data = data; + break; + } + + mutex_unlock(&vfio.iommu_drivers_lock); + up_write(&container->group_lock); + + return ret; +} + +static long vfio_fops_unl_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + struct vfio_container *container = filep->private_data; + struct vfio_iommu_driver *driver; + void *data; + long ret = -EINVAL; + + if (!container) + return ret; + + switch (cmd) { + case VFIO_GET_API_VERSION: + ret = VFIO_API_VERSION; + break; + case VFIO_CHECK_EXTENSION: + ret = vfio_ioctl_check_extension(container, arg); + break; + case VFIO_SET_IOMMU: + ret = vfio_ioctl_set_iommu(container, arg); + break; + default: + driver = container->iommu_driver; + data = container->iommu_data; + + if (driver) /* passthrough all unrecognized ioctls */ + ret = driver->ops->ioctl(data, cmd, arg); + } + + return ret; +} + +static int vfio_fops_open(struct inode *inode, struct file *filep) +{ + struct vfio_container *container; + + container = kzalloc(sizeof(*container), GFP_KERNEL); + if (!container) + return -ENOMEM; + + INIT_LIST_HEAD(&container->group_list); + init_rwsem(&container->group_lock); + kref_init(&container->kref); + + filep->private_data = container; + + return 0; +} + +static int vfio_fops_release(struct inode *inode, struct file *filep) +{ + struct vfio_container *container = filep->private_data; + struct vfio_iommu_driver *driver = container->iommu_driver; + + if (driver && driver->ops->notify) + driver->ops->notify(container->iommu_data, + VFIO_IOMMU_CONTAINER_CLOSE); + + filep->private_data = NULL; + + vfio_container_put(container); + + return 0; +} + +static const struct file_operations vfio_fops = { + .owner = THIS_MODULE, + .open = vfio_fops_open, + .release = vfio_fops_release, + .unlocked_ioctl = vfio_fops_unl_ioctl, + .compat_ioctl = compat_ptr_ioctl, +}; + +/* + * VFIO Group fd, /dev/vfio/$GROUP + */ +static void __vfio_group_unset_container(struct vfio_group *group) +{ + struct vfio_container *container = group->container; + struct vfio_iommu_driver *driver; + + lockdep_assert_held_write(&group->group_rwsem); + + down_write(&container->group_lock); + + driver = container->iommu_driver; + if (driver) + driver->ops->detach_group(container->iommu_data, + group->iommu_group); + + if (group->type == VFIO_IOMMU) + iommu_group_release_dma_owner(group->iommu_group); + + group->container = NULL; + group->container_users = 0; + list_del(&group->container_next); + + /* Detaching the last group deprivileges a container, remove iommu */ + if (driver && list_empty(&container->group_list)) { + driver->ops->release(container->iommu_data); + module_put(driver->ops->owner); + container->iommu_driver = NULL; + container->iommu_data = NULL; + } + + up_write(&container->group_lock); + + vfio_container_put(container); +} + +/* + * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or + * if there was no container to unset. Since the ioctl is called on + * the group, we know that still exists, therefore the only valid + * transition here is 1->0. + */ +static int vfio_group_unset_container(struct vfio_group *group) +{ + lockdep_assert_held_write(&group->group_rwsem); + + if (!group->container) + return -EINVAL; + if (group->container_users != 1) + return -EBUSY; + __vfio_group_unset_container(group); + return 0; +} + +static int vfio_group_set_container(struct vfio_group *group, int container_fd) +{ + struct fd f; + struct vfio_container *container; + struct vfio_iommu_driver *driver; + int ret = 0; + + lockdep_assert_held_write(&group->group_rwsem); + + if (group->container || WARN_ON(group->container_users)) + return -EINVAL; + + if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) + return -EPERM; + + f = fdget(container_fd); + if (!f.file) + return -EBADF; + + /* Sanity check, is this really our fd? */ + if (f.file->f_op != &vfio_fops) { + fdput(f); + return -EINVAL; + } + + container = f.file->private_data; + WARN_ON(!container); /* fget ensures we don't race vfio_release */ + + down_write(&container->group_lock); + + /* Real groups and fake groups cannot mix */ + if (!list_empty(&container->group_list) && + container->noiommu != (group->type == VFIO_NO_IOMMU)) { + ret = -EPERM; + goto unlock_out; + } + + if (group->type == VFIO_IOMMU) { + ret = iommu_group_claim_dma_owner(group->iommu_group, f.file); + if (ret) + goto unlock_out; + } + + driver = container->iommu_driver; + if (driver) { + ret = driver->ops->attach_group(container->iommu_data, + group->iommu_group, + group->type); + if (ret) { + if (group->type == VFIO_IOMMU) + iommu_group_release_dma_owner( + group->iommu_group); + goto unlock_out; + } + } + + group->container = container; + group->container_users = 1; + container->noiommu = (group->type == VFIO_NO_IOMMU); + list_add(&group->container_next, &container->group_list); + + /* Get a reference on the container and mark a user within the group */ + vfio_container_get(container); + +unlock_out: + up_write(&container->group_lock); + fdput(f); + return ret; +} + +static const struct file_operations vfio_device_fops; + +/* true if the vfio_device has open_device() called but not close_device() */ +static bool vfio_assert_device_open(struct vfio_device *device) +{ + return !WARN_ON_ONCE(!READ_ONCE(device->open_count)); +} + +static int vfio_device_assign_container(struct vfio_device *device) +{ + struct vfio_group *group = device->group; + + lockdep_assert_held_write(&group->group_rwsem); + + if (!group->container || !group->container->iommu_driver || + WARN_ON(!group->container_users)) + return -EINVAL; + + if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) + return -EPERM; + + get_file(group->opened_file); + group->container_users++; + return 0; +} + +static void vfio_device_unassign_container(struct vfio_device *device) +{ + down_write(&device->group->group_rwsem); + WARN_ON(device->group->container_users <= 1); + device->group->container_users--; + fput(device->group->opened_file); + up_write(&device->group->group_rwsem); +} + +static struct file *vfio_device_open(struct vfio_device *device) +{ + struct vfio_iommu_driver *iommu_driver; + struct file *filep; + int ret; + + down_write(&device->group->group_rwsem); + ret = vfio_device_assign_container(device); + up_write(&device->group->group_rwsem); + if (ret) + return ERR_PTR(ret); + + if (!try_module_get(device->dev->driver->owner)) { + ret = -ENODEV; + goto err_unassign_container; + } + + mutex_lock(&device->dev_set->lock); + device->open_count++; + if (device->open_count == 1) { + /* + * Here we pass the KVM pointer with the group under the read + * lock. If the device driver will use it, it must obtain a + * reference and release it during close_device. + */ + down_read(&device->group->group_rwsem); + device->kvm = device->group->kvm; + + if (device->ops->open_device) { + ret = device->ops->open_device(device); + if (ret) + goto err_undo_count; + } + + iommu_driver = device->group->container->iommu_driver; + if (iommu_driver && iommu_driver->ops->register_device) + iommu_driver->ops->register_device( + device->group->container->iommu_data, device); + + up_read(&device->group->group_rwsem); + } + mutex_unlock(&device->dev_set->lock); + + /* + * We can't use anon_inode_getfd() because we need to modify + * the f_mode flags directly to allow more than just ioctls + */ + filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, + device, O_RDWR); + if (IS_ERR(filep)) { + ret = PTR_ERR(filep); + goto err_close_device; + } + + /* + * TODO: add an anon_inode interface to do this. + * Appears to be missing by lack of need rather than + * explicitly prevented. Now there's need. + */ + filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE); + + if (device->group->type == VFIO_NO_IOMMU) + dev_warn(device->dev, "vfio-noiommu device opened by user " + "(%s:%d)\n", current->comm, task_pid_nr(current)); + /* + * On success the ref of device is moved to the file and + * put in vfio_device_fops_release() + */ + return filep; + +err_close_device: + mutex_lock(&device->dev_set->lock); + down_read(&device->group->group_rwsem); + if (device->open_count == 1 && device->ops->close_device) { + device->ops->close_device(device); + + iommu_driver = device->group->container->iommu_driver; + if (iommu_driver && iommu_driver->ops->unregister_device) + iommu_driver->ops->unregister_device( + device->group->container->iommu_data, device); + } +err_undo_count: + up_read(&device->group->group_rwsem); + device->open_count--; + if (device->open_count == 0 && device->kvm) + device->kvm = NULL; + mutex_unlock(&device->dev_set->lock); + module_put(device->dev->driver->owner); +err_unassign_container: + vfio_device_unassign_container(device); + return ERR_PTR(ret); +} + +static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) +{ + struct vfio_device *device; + struct file *filep; + int fdno; + int ret; + + device = vfio_device_get_from_name(group, buf); + if (IS_ERR(device)) + return PTR_ERR(device); + + fdno = get_unused_fd_flags(O_CLOEXEC); + if (fdno < 0) { + ret = fdno; + goto err_put_device; + } + + filep = vfio_device_open(device); + if (IS_ERR(filep)) { + ret = PTR_ERR(filep); + goto err_put_fdno; + } + + fd_install(fdno, filep); + return fdno; + +err_put_fdno: + put_unused_fd(fdno); +err_put_device: + vfio_device_put(device); + return ret; +} + +static long vfio_group_fops_unl_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + struct vfio_group *group = filep->private_data; + long ret = -ENOTTY; + + switch (cmd) { + case VFIO_GROUP_GET_STATUS: + { + struct vfio_group_status status; + unsigned long minsz; + + minsz = offsetofend(struct vfio_group_status, flags); + + if (copy_from_user(&status, (void __user *)arg, minsz)) + return -EFAULT; + + if (status.argsz < minsz) + return -EINVAL; + + status.flags = 0; + + down_read(&group->group_rwsem); + if (group->container) + status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET | + VFIO_GROUP_FLAGS_VIABLE; + else if (!iommu_group_dma_owner_claimed(group->iommu_group)) + status.flags |= VFIO_GROUP_FLAGS_VIABLE; + up_read(&group->group_rwsem); + + if (copy_to_user((void __user *)arg, &status, minsz)) + return -EFAULT; + + ret = 0; + break; + } + case VFIO_GROUP_SET_CONTAINER: + { + int fd; + + if (get_user(fd, (int __user *)arg)) + return -EFAULT; + + if (fd < 0) + return -EINVAL; + + down_write(&group->group_rwsem); + ret = vfio_group_set_container(group, fd); + up_write(&group->group_rwsem); + break; + } + case VFIO_GROUP_UNSET_CONTAINER: + down_write(&group->group_rwsem); + ret = vfio_group_unset_container(group); + up_write(&group->group_rwsem); + break; + case VFIO_GROUP_GET_DEVICE_FD: + { + char *buf; + + buf = strndup_user((const char __user *)arg, PAGE_SIZE); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + ret = vfio_group_get_device_fd(group, buf); + kfree(buf); + break; + } + } + + return ret; +} + +static int vfio_group_fops_open(struct inode *inode, struct file *filep) +{ + struct vfio_group *group = + container_of(inode->i_cdev, struct vfio_group, cdev); + int ret; + + down_write(&group->group_rwsem); + + /* users can be zero if this races with vfio_group_put() */ + if (!refcount_inc_not_zero(&group->users)) { + ret = -ENODEV; + goto err_unlock; + } + + if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) { + ret = -EPERM; + goto err_put; + } + + /* + * Do we need multiple instances of the group open? Seems not. + */ + if (group->opened_file) { + ret = -EBUSY; + goto err_put; + } + group->opened_file = filep; + filep->private_data = group; + + up_write(&group->group_rwsem); + return 0; +err_put: + vfio_group_put(group); +err_unlock: + up_write(&group->group_rwsem); + return ret; +} + +static int vfio_group_fops_release(struct inode *inode, struct file *filep) +{ + struct vfio_group *group = filep->private_data; + + filep->private_data = NULL; + + down_write(&group->group_rwsem); + /* + * Device FDs hold a group file reference, therefore the group release + * is only called when there are no open devices. + */ + WARN_ON(group->notifier.head); + if (group->container) { + WARN_ON(group->container_users != 1); + __vfio_group_unset_container(group); + } + group->opened_file = NULL; + up_write(&group->group_rwsem); + + vfio_group_put(group); + + return 0; +} + +static const struct file_operations vfio_group_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = vfio_group_fops_unl_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .open = vfio_group_fops_open, + .release = vfio_group_fops_release, +}; + +/* + * VFIO Device fd + */ +static int vfio_device_fops_release(struct inode *inode, struct file *filep) +{ + struct vfio_device *device = filep->private_data; + struct vfio_iommu_driver *iommu_driver; + + mutex_lock(&device->dev_set->lock); + vfio_assert_device_open(device); + down_read(&device->group->group_rwsem); + if (device->open_count == 1 && device->ops->close_device) + device->ops->close_device(device); + + iommu_driver = device->group->container->iommu_driver; + if (iommu_driver && iommu_driver->ops->unregister_device) + iommu_driver->ops->unregister_device( + device->group->container->iommu_data, device); + up_read(&device->group->group_rwsem); + device->open_count--; + if (device->open_count == 0) + device->kvm = NULL; + mutex_unlock(&device->dev_set->lock); + + module_put(device->dev->driver->owner); + + vfio_device_unassign_container(device); + + vfio_device_put(device); + + return 0; +} + +/* + * vfio_mig_get_next_state - Compute the next step in the FSM + * @cur_fsm - The current state the device is in + * @new_fsm - The target state to reach + * @next_fsm - Pointer to the next step to get to new_fsm + * + * Return 0 upon success, otherwise -errno + * Upon success the next step in the state progression between cur_fsm and + * new_fsm will be set in next_fsm. + * + * This breaks down requests for combination transitions into smaller steps and + * returns the next step to get to new_fsm. The function may need to be called + * multiple times before reaching new_fsm. + * + */ +int vfio_mig_get_next_state(struct vfio_device *device, + enum vfio_device_mig_state cur_fsm, + enum vfio_device_mig_state new_fsm, + enum vfio_device_mig_state *next_fsm) +{ + enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_RUNNING_P2P + 1 }; + /* + * The coding in this table requires the driver to implement the + * following FSM arcs: + * RESUMING -> STOP + * STOP -> RESUMING + * STOP -> STOP_COPY + * STOP_COPY -> STOP + * + * If P2P is supported then the driver must also implement these FSM + * arcs: + * RUNNING -> RUNNING_P2P + * RUNNING_P2P -> RUNNING + * RUNNING_P2P -> STOP + * STOP -> RUNNING_P2P + * Without P2P the driver must implement: + * RUNNING -> STOP + * STOP -> RUNNING + * + * The coding will step through multiple states for some combination + * transitions; if all optional features are supported, this means the + * following ones: + * RESUMING -> STOP -> RUNNING_P2P + * RESUMING -> STOP -> RUNNING_P2P -> RUNNING + * RESUMING -> STOP -> STOP_COPY + * RUNNING -> RUNNING_P2P -> STOP + * RUNNING -> RUNNING_P2P -> STOP -> RESUMING + * RUNNING -> RUNNING_P2P -> STOP -> STOP_COPY + * RUNNING_P2P -> STOP -> RESUMING + * RUNNING_P2P -> STOP -> STOP_COPY + * STOP -> RUNNING_P2P -> RUNNING + * STOP_COPY -> STOP -> RESUMING + * STOP_COPY -> STOP -> RUNNING_P2P + * STOP_COPY -> STOP -> RUNNING_P2P -> RUNNING + */ + static const u8 vfio_from_fsm_table[VFIO_DEVICE_NUM_STATES][VFIO_DEVICE_NUM_STATES] = { + [VFIO_DEVICE_STATE_STOP] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_RUNNING] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_STOP_COPY] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_RESUMING] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_RUNNING_P2P] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_ERROR] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + }; + + static const unsigned int state_flags_table[VFIO_DEVICE_NUM_STATES] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_MIGRATION_STOP_COPY, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_MIGRATION_STOP_COPY, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_MIGRATION_STOP_COPY, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_MIGRATION_STOP_COPY, + [VFIO_DEVICE_STATE_RUNNING_P2P] = + VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P, + [VFIO_DEVICE_STATE_ERROR] = ~0U, + }; + + if (WARN_ON(cur_fsm >= ARRAY_SIZE(vfio_from_fsm_table) || + (state_flags_table[cur_fsm] & device->migration_flags) != + state_flags_table[cur_fsm])) + return -EINVAL; + + if (new_fsm >= ARRAY_SIZE(vfio_from_fsm_table) || + (state_flags_table[new_fsm] & device->migration_flags) != + state_flags_table[new_fsm]) + return -EINVAL; + + /* + * Arcs touching optional and unsupported states are skipped over. The + * driver will instead see an arc from the original state to the next + * logical state, as per the above comment. + */ + *next_fsm = vfio_from_fsm_table[cur_fsm][new_fsm]; + while ((state_flags_table[*next_fsm] & device->migration_flags) != + state_flags_table[*next_fsm]) + *next_fsm = vfio_from_fsm_table[*next_fsm][new_fsm]; + + return (*next_fsm != VFIO_DEVICE_STATE_ERROR) ? 0 : -EINVAL; +} +EXPORT_SYMBOL_GPL(vfio_mig_get_next_state); + +/* + * Convert the drivers's struct file into a FD number and return it to userspace + */ +static int vfio_ioct_mig_return_fd(struct file *filp, void __user *arg, + struct vfio_device_feature_mig_state *mig) +{ + int ret; + int fd; + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + ret = fd; + goto out_fput; + } + + mig->data_fd = fd; + if (copy_to_user(arg, mig, sizeof(*mig))) { + ret = -EFAULT; + goto out_put_unused; + } + fd_install(fd, filp); + return 0; + +out_put_unused: + put_unused_fd(fd); +out_fput: + fput(filp); + return ret; +} + +static int +vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device, + u32 flags, void __user *arg, + size_t argsz) +{ + size_t minsz = + offsetofend(struct vfio_device_feature_mig_state, data_fd); + struct vfio_device_feature_mig_state mig; + struct file *filp = NULL; + int ret; + + if (!device->mig_ops) + return -ENOTTY; + + ret = vfio_check_feature(flags, argsz, + VFIO_DEVICE_FEATURE_SET | + VFIO_DEVICE_FEATURE_GET, + sizeof(mig)); + if (ret != 1) + return ret; + + if (copy_from_user(&mig, arg, minsz)) + return -EFAULT; + + if (flags & VFIO_DEVICE_FEATURE_GET) { + enum vfio_device_mig_state curr_state; + + ret = device->mig_ops->migration_get_state(device, + &curr_state); + if (ret) + return ret; + mig.device_state = curr_state; + goto out_copy; + } + + /* Handle the VFIO_DEVICE_FEATURE_SET */ + filp = device->mig_ops->migration_set_state(device, mig.device_state); + if (IS_ERR(filp) || !filp) + goto out_copy; + + return vfio_ioct_mig_return_fd(filp, arg, &mig); +out_copy: + mig.data_fd = -1; + if (copy_to_user(arg, &mig, sizeof(mig))) + return -EFAULT; + if (IS_ERR(filp)) + return PTR_ERR(filp); + return 0; +} + +static int vfio_ioctl_device_feature_migration(struct vfio_device *device, + u32 flags, void __user *arg, + size_t argsz) +{ + struct vfio_device_feature_migration mig = { + .flags = device->migration_flags, + }; + int ret; + + if (!device->mig_ops) + return -ENOTTY; + + ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET, + sizeof(mig)); + if (ret != 1) + return ret; + if (copy_to_user(arg, &mig, sizeof(mig))) + return -EFAULT; + return 0; +} + +static int vfio_ioctl_device_feature(struct vfio_device *device, + struct vfio_device_feature __user *arg) +{ + size_t minsz = offsetofend(struct vfio_device_feature, flags); + struct vfio_device_feature feature; + + if (copy_from_user(&feature, arg, minsz)) + return -EFAULT; + + if (feature.argsz < minsz) + return -EINVAL; + + /* Check unknown flags */ + if (feature.flags & + ~(VFIO_DEVICE_FEATURE_MASK | VFIO_DEVICE_FEATURE_SET | + VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_PROBE)) + return -EINVAL; + + /* GET & SET are mutually exclusive except with PROBE */ + if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) && + (feature.flags & VFIO_DEVICE_FEATURE_SET) && + (feature.flags & VFIO_DEVICE_FEATURE_GET)) + return -EINVAL; + + switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) { + case VFIO_DEVICE_FEATURE_MIGRATION: + return vfio_ioctl_device_feature_migration( + device, feature.flags, arg->data, + feature.argsz - minsz); + case VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE: + return vfio_ioctl_device_feature_mig_device_state( + device, feature.flags, arg->data, + feature.argsz - minsz); + default: + if (unlikely(!device->ops->device_feature)) + return -EINVAL; + return device->ops->device_feature(device, feature.flags, + arg->data, + feature.argsz - minsz); + } +} + +static long vfio_device_fops_unl_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + struct vfio_device *device = filep->private_data; + + switch (cmd) { + case VFIO_DEVICE_FEATURE: + return vfio_ioctl_device_feature(device, (void __user *)arg); + default: + if (unlikely(!device->ops->ioctl)) + return -EINVAL; + return device->ops->ioctl(device, cmd, arg); + } +} + +static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf, + size_t count, loff_t *ppos) +{ + struct vfio_device *device = filep->private_data; + + if (unlikely(!device->ops->read)) + return -EINVAL; + + return device->ops->read(device, buf, count, ppos); +} + +static ssize_t vfio_device_fops_write(struct file *filep, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct vfio_device *device = filep->private_data; + + if (unlikely(!device->ops->write)) + return -EINVAL; + + return device->ops->write(device, buf, count, ppos); +} + +static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma) +{ + struct vfio_device *device = filep->private_data; + + if (unlikely(!device->ops->mmap)) + return -EINVAL; + + return device->ops->mmap(device, vma); +} + +static const struct file_operations vfio_device_fops = { + .owner = THIS_MODULE, + .release = vfio_device_fops_release, + .read = vfio_device_fops_read, + .write = vfio_device_fops_write, + .unlocked_ioctl = vfio_device_fops_unl_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .mmap = vfio_device_fops_mmap, +}; + +/** + * vfio_file_iommu_group - Return the struct iommu_group for the vfio group file + * @file: VFIO group file + * + * The returned iommu_group is valid as long as a ref is held on the file. + */ +struct iommu_group *vfio_file_iommu_group(struct file *file) +{ + struct vfio_group *group = file->private_data; + + if (file->f_op != &vfio_group_fops) + return NULL; + return group->iommu_group; +} +EXPORT_SYMBOL_GPL(vfio_file_iommu_group); + +/** + * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file + * is always CPU cache coherent + * @file: VFIO group file + * + * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop + * bit in DMA transactions. A return of false indicates that the user has + * rights to access additional instructions such as wbinvd on x86. + */ +bool vfio_file_enforced_coherent(struct file *file) +{ + struct vfio_group *group = file->private_data; + bool ret; + + if (file->f_op != &vfio_group_fops) + return true; + + down_read(&group->group_rwsem); + if (group->container) { + ret = vfio_ioctl_check_extension(group->container, + VFIO_DMA_CC_IOMMU); + } else { + /* + * Since the coherency state is determined only once a container + * is attached the user must do so before they can prove they + * have permission. + */ + ret = true; + } + up_read(&group->group_rwsem); + return ret; +} +EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent); + +/** + * vfio_file_set_kvm - Link a kvm with VFIO drivers + * @file: VFIO group file + * @kvm: KVM to link + * + * When a VFIO device is first opened the KVM will be available in + * device->kvm if one was associated with the group. + */ +void vfio_file_set_kvm(struct file *file, struct kvm *kvm) +{ + struct vfio_group *group = file->private_data; + + if (file->f_op != &vfio_group_fops) + return; + + down_write(&group->group_rwsem); + group->kvm = kvm; + up_write(&group->group_rwsem); +} +EXPORT_SYMBOL_GPL(vfio_file_set_kvm); + +/** + * vfio_file_has_dev - True if the VFIO file is a handle for device + * @file: VFIO file to check + * @device: Device that must be part of the file + * + * Returns true if given file has permission to manipulate the given device. + */ +bool vfio_file_has_dev(struct file *file, struct vfio_device *device) +{ + struct vfio_group *group = file->private_data; + + if (file->f_op != &vfio_group_fops) + return false; + + return group == device->group; +} +EXPORT_SYMBOL_GPL(vfio_file_has_dev); + +/* + * Sub-module support + */ +/* + * Helper for managing a buffer of info chain capabilities, allocate or + * reallocate a buffer with additional @size, filling in @id and @version + * of the capability. A pointer to the new capability is returned. + * + * NB. The chain is based at the head of the buffer, so new entries are + * added to the tail, vfio_info_cap_shift() should be called to fixup the + * next offsets prior to copying to the user buffer. + */ +struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps, + size_t size, u16 id, u16 version) +{ + void *buf; + struct vfio_info_cap_header *header, *tmp; + + buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL); + if (!buf) { + kfree(caps->buf); + caps->buf = NULL; + caps->size = 0; + return ERR_PTR(-ENOMEM); + } + + caps->buf = buf; + header = buf + caps->size; + + /* Eventually copied to user buffer, zero */ + memset(header, 0, size); + + header->id = id; + header->version = version; + + /* Add to the end of the capability chain */ + for (tmp = buf; tmp->next; tmp = buf + tmp->next) + ; /* nothing */ + + tmp->next = caps->size; + caps->size += size; + + return header; +} +EXPORT_SYMBOL_GPL(vfio_info_cap_add); + +void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset) +{ + struct vfio_info_cap_header *tmp; + void *buf = (void *)caps->buf; + + for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset) + tmp->next += offset; +} +EXPORT_SYMBOL(vfio_info_cap_shift); + +int vfio_info_add_capability(struct vfio_info_cap *caps, + struct vfio_info_cap_header *cap, size_t size) +{ + struct vfio_info_cap_header *header; + + header = vfio_info_cap_add(caps, size, cap->id, cap->version); + if (IS_ERR(header)) + return PTR_ERR(header); + + memcpy(header + 1, cap + 1, size - sizeof(*header)); + + return 0; +} +EXPORT_SYMBOL(vfio_info_add_capability); + +int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs, + int max_irq_type, size_t *data_size) +{ + unsigned long minsz; + size_t size; + + minsz = offsetofend(struct vfio_irq_set, count); + + if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) || + (hdr->count >= (U32_MAX - hdr->start)) || + (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | + VFIO_IRQ_SET_ACTION_TYPE_MASK))) + return -EINVAL; + + if (data_size) + *data_size = 0; + + if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs) + return -EINVAL; + + switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { + case VFIO_IRQ_SET_DATA_NONE: + size = 0; + break; + case VFIO_IRQ_SET_DATA_BOOL: + size = sizeof(uint8_t); + break; + case VFIO_IRQ_SET_DATA_EVENTFD: + size = sizeof(int32_t); + break; + default: + return -EINVAL; + } + + if (size) { + if (hdr->argsz - minsz < hdr->count * size) + return -EINVAL; + + if (!data_size) + return -EINVAL; + + *data_size = hdr->count * size; + } + + return 0; +} +EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare); + +/* + * Pin contiguous user pages and return their associated host pages for local + * domain only. + * @device [in] : device + * @iova [in] : starting IOVA of user pages to be pinned. + * @npage [in] : count of pages to be pinned. This count should not + * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. + * @prot [in] : protection flags + * @pages[out] : array of host pages + * Return error or number of pages pinned. + */ +int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova, + int npage, int prot, struct page **pages) +{ + struct vfio_container *container; + struct vfio_group *group = device->group; + struct vfio_iommu_driver *driver; + int ret; + + if (!pages || !npage || !vfio_assert_device_open(device)) + return -EINVAL; + + if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) + return -E2BIG; + + if (group->dev_counter > 1) + return -EINVAL; + + /* group->container cannot change while a vfio device is open */ + container = group->container; + driver = container->iommu_driver; + if (likely(driver && driver->ops->pin_pages)) + ret = driver->ops->pin_pages(container->iommu_data, + group->iommu_group, iova, + npage, prot, pages); + else + ret = -ENOTTY; + + return ret; +} +EXPORT_SYMBOL(vfio_pin_pages); + +/* + * Unpin contiguous host pages for local domain only. + * @device [in] : device + * @iova [in] : starting address of user pages to be unpinned. + * @npage [in] : count of pages to be unpinned. This count should not + * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. + */ +void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage) +{ + struct vfio_container *container; + struct vfio_iommu_driver *driver; + + if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES)) + return; + + if (WARN_ON(!vfio_assert_device_open(device))) + return; + + /* group->container cannot change while a vfio device is open */ + container = device->group->container; + driver = container->iommu_driver; + + driver->ops->unpin_pages(container->iommu_data, iova, npage); +} +EXPORT_SYMBOL(vfio_unpin_pages); + +/* + * This interface allows the CPUs to perform some sort of virtual DMA on + * behalf of the device. + * + * CPUs read/write from/into a range of IOVAs pointing to user space memory + * into/from a kernel buffer. + * + * As the read/write of user space memory is conducted via the CPUs and is + * not a real device DMA, it is not necessary to pin the user space memory. + * + * @device [in] : VFIO device + * @iova [in] : base IOVA of a user space buffer + * @data [in] : pointer to kernel buffer + * @len [in] : kernel buffer length + * @write : indicate read or write + * Return error code on failure or 0 on success. + */ +int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data, + size_t len, bool write) +{ + struct vfio_container *container; + struct vfio_iommu_driver *driver; + int ret = 0; + + if (!data || len <= 0 || !vfio_assert_device_open(device)) + return -EINVAL; + + /* group->container cannot change while a vfio device is open */ + container = device->group->container; + driver = container->iommu_driver; + + if (likely(driver && driver->ops->dma_rw)) + ret = driver->ops->dma_rw(container->iommu_data, + iova, data, len, write); + else + ret = -ENOTTY; + return ret; +} +EXPORT_SYMBOL(vfio_dma_rw); + +/* + * Module/class support + */ +static char *vfio_devnode(struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); +} + +static struct miscdevice vfio_dev = { + .minor = VFIO_MINOR, + .name = "vfio", + .fops = &vfio_fops, + .nodename = "vfio/vfio", + .mode = S_IRUGO | S_IWUGO, +}; + +static int __init vfio_init(void) +{ + int ret; + + ida_init(&vfio.group_ida); + mutex_init(&vfio.group_lock); + mutex_init(&vfio.iommu_drivers_lock); + INIT_LIST_HEAD(&vfio.group_list); + INIT_LIST_HEAD(&vfio.iommu_drivers_list); + + ret = misc_register(&vfio_dev); + if (ret) { + pr_err("vfio: misc device register failed\n"); + return ret; + } + + /* /dev/vfio/$GROUP */ + vfio.class = class_create(THIS_MODULE, "vfio"); + if (IS_ERR(vfio.class)) { + ret = PTR_ERR(vfio.class); + goto err_class; + } + + vfio.class->devnode = vfio_devnode; + + ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio"); + if (ret) + goto err_alloc_chrdev; + +#ifdef CONFIG_VFIO_NOIOMMU + ret = vfio_register_iommu_driver(&vfio_noiommu_ops); +#endif + if (ret) + goto err_driver_register; + + pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); + return 0; + +err_driver_register: + unregister_chrdev_region(vfio.group_devt, MINORMASK + 1); +err_alloc_chrdev: + class_destroy(vfio.class); + vfio.class = NULL; +err_class: + misc_deregister(&vfio_dev); + return ret; +} + +static void __exit vfio_cleanup(void) +{ + WARN_ON(!list_empty(&vfio.group_list)); + +#ifdef CONFIG_VFIO_NOIOMMU + vfio_unregister_iommu_driver(&vfio_noiommu_ops); +#endif + ida_destroy(&vfio.group_ida); + unregister_chrdev_region(vfio.group_devt, MINORMASK + 1); + class_destroy(vfio.class); + vfio.class = NULL; + misc_deregister(&vfio_dev); + xa_destroy(&vfio_device_set_xa); +} + +module_init(vfio_init); +module_exit(vfio_cleanup); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_ALIAS_MISCDEV(VFIO_MINOR); +MODULE_ALIAS("devname:vfio/vfio"); +MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce"); diff --git a/drivers/video/aperture.c b/drivers/video/aperture.c new file mode 100644 index 0000000000..538f2d40ac --- /dev/null +++ b/drivers/video/aperture.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: MIT + +#include +#include +#include /* for old fbdev helpers */ +#include +#include +#include +#include +#include +#include +#include + +/** + * DOC: overview + * + * A graphics device might be supported by different drivers, but only one + * driver can be active at any given time. Many systems load a generic + * graphics drivers, such as EFI-GOP or VESA, early during the boot process. + * During later boot stages, they replace the generic driver with a dedicated, + * hardware-specific driver. To take over the device the dedicated driver + * first has to remove the generic driver. Aperture functions manage + * ownership of framebuffer memory and hand-over between drivers. + * + * Graphics drivers should call aperture_remove_conflicting_devices() + * at the top of their probe function. The function removes any generic + * driver that is currently associated with the given framebuffer memory. + * An example for a graphics device on the platform bus is shown below. + * + * .. code-block:: c + * + * static int example_probe(struct platform_device *pdev) + * { + * struct resource *mem; + * resource_size_t base, size; + * int ret; + * + * mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + * if (!mem) + * return -ENODEV; + * base = mem->start; + * size = resource_size(mem); + * + * ret = aperture_remove_conflicting_devices(base, size, false, "example"); + * if (ret) + * return ret; + * + * // Initialize the hardware + * ... + * + * return 0; + * } + * + * static const struct platform_driver example_driver = { + * .probe = example_probe, + * ... + * }; + * + * The given example reads the platform device's I/O-memory range from the + * device instance. An active framebuffer will be located within this range. + * The call to aperture_remove_conflicting_devices() releases drivers that + * have previously claimed ownership of the range and are currently driving + * output on the framebuffer. If successful, the new driver can take over + * the device. + * + * While the given example uses a platform device, the aperture helpers work + * with every bus that has an addressable framebuffer. In the case of PCI, + * device drivers can also call aperture_remove_conflicting_pci_devices() and + * let the function detect the apertures automatically. Device drivers without + * knowledge of the framebuffer's location can call + * aperture_remove_all_conflicting_devices(), which removes all known devices. + * + * Drivers that are susceptible to being removed by other drivers, such as + * generic EFI or VESA drivers, have to register themselves as owners of their + * framebuffer apertures. Ownership of the framebuffer memory is achieved + * by calling devm_aperture_acquire_for_platform_device(). If successful, the + * driveris the owner of the framebuffer range. The function fails if the + * framebuffer is already owned by another driver. See below for an example. + * + * .. code-block:: c + * + * static int generic_probe(struct platform_device *pdev) + * { + * struct resource *mem; + * resource_size_t base, size; + * + * mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + * if (!mem) + * return -ENODEV; + * base = mem->start; + * size = resource_size(mem); + * + * ret = devm_aperture_acquire_for_platform_device(pdev, base, size); + * if (ret) + * return ret; + * + * // Initialize the hardware + * ... + * + * return 0; + * } + * + * static int generic_remove(struct platform_device *) + * { + * // Hot-unplug the device + * ... + * + * return 0; + * } + * + * static const struct platform_driver generic_driver = { + * .probe = generic_probe, + * .remove = generic_remove, + * ... + * }; + * + * The similar to the previous example, the generic driver claims ownership + * of the framebuffer memory from its probe function. This will fail if the + * memory range, or parts of it, is already owned by another driver. + * + * If successful, the generic driver is now subject to forced removal by + * another driver. This only works for platform drivers that support hot + * unplugging. When a driver calls aperture_remove_conflicting_devices() + * et al for the registered framebuffer range, the aperture helpers call + * platform_device_unregister() and the generic driver unloads itself. The + * generic driver also has to provide a remove function to make this work. + * Once hot unplugged fro mhardware, it may not access the device's + * registers, framebuffer memory, ROM, etc afterwards. + */ + +struct aperture_range { + struct device *dev; + resource_size_t base; + resource_size_t size; + struct list_head lh; + void (*detach)(struct device *dev); +}; + +static LIST_HEAD(apertures); +static DEFINE_MUTEX(apertures_lock); + +static bool overlap(resource_size_t base1, resource_size_t end1, + resource_size_t base2, resource_size_t end2) +{ + return (base1 < end2) && (end1 > base2); +} + +static void devm_aperture_acquire_release(void *data) +{ + struct aperture_range *ap = data; + bool detached = !ap->dev; + + if (detached) + return; + + mutex_lock(&apertures_lock); + list_del(&ap->lh); + mutex_unlock(&apertures_lock); +} + +static int devm_aperture_acquire(struct device *dev, + resource_size_t base, resource_size_t size, + void (*detach)(struct device *)) +{ + size_t end = base + size; + struct list_head *pos; + struct aperture_range *ap; + + mutex_lock(&apertures_lock); + + list_for_each(pos, &apertures) { + ap = container_of(pos, struct aperture_range, lh); + if (overlap(base, end, ap->base, ap->base + ap->size)) { + mutex_unlock(&apertures_lock); + return -EBUSY; + } + } + + ap = devm_kzalloc(dev, sizeof(*ap), GFP_KERNEL); + if (!ap) { + mutex_unlock(&apertures_lock); + return -ENOMEM; + } + + ap->dev = dev; + ap->base = base; + ap->size = size; + ap->detach = detach; + INIT_LIST_HEAD(&ap->lh); + + list_add(&ap->lh, &apertures); + + mutex_unlock(&apertures_lock); + + return devm_add_action_or_reset(dev, devm_aperture_acquire_release, ap); +} + +static void aperture_detach_platform_device(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + + /* + * Remove the device from the device hierarchy. This is the right thing + * to do for firmware-based DRM drivers, such as EFI, VESA or VGA. After + * the new driver takes over the hardware, the firmware device's state + * will be lost. + * + * For non-platform devices, a new callback would be required. + * + * If the aperture helpers ever need to handle native drivers, this call + * would only have to unplug the DRM device, so that the hardware device + * stays around after detachment. + */ + platform_device_unregister(pdev); +} + +/** + * devm_aperture_acquire_for_platform_device - Acquires ownership of an aperture + * on behalf of a platform device. + * @pdev: the platform device to own the aperture + * @base: the aperture's byte offset in physical memory + * @size: the aperture size in bytes + * + * Installs the given device as the new owner of the aperture. The function + * expects the aperture to be provided by a platform device. If another + * driver takes over ownership of the aperture, aperture helpers will then + * unregister the platform device automatically. All acquired apertures are + * released automatically when the underlying device goes away. + * + * The function fails if the aperture, or parts of it, is currently + * owned by another device. To evict current owners, callers should use + * remove_conflicting_devices() et al. before calling this function. + * + * Returns: + * 0 on success, or a negative errno value otherwise. + */ +int devm_aperture_acquire_for_platform_device(struct platform_device *pdev, + resource_size_t base, + resource_size_t size) +{ + return devm_aperture_acquire(&pdev->dev, base, size, aperture_detach_platform_device); +} +EXPORT_SYMBOL(devm_aperture_acquire_for_platform_device); + +static void aperture_detach_devices(resource_size_t base, resource_size_t size) +{ + resource_size_t end = base + size; + struct list_head *pos, *n; + + mutex_lock(&apertures_lock); + + list_for_each_safe(pos, n, &apertures) { + struct aperture_range *ap = container_of(pos, struct aperture_range, lh); + struct device *dev = ap->dev; + + if (WARN_ON_ONCE(!dev)) + continue; + + if (!overlap(base, end, ap->base, ap->base + ap->size)) + continue; + + ap->dev = NULL; /* detach from device */ + list_del(&ap->lh); + + ap->detach(dev); + } + + mutex_unlock(&apertures_lock); +} + +/** + * aperture_remove_conflicting_devices - remove devices in the given range + * @base: the aperture's base address in physical memory + * @size: aperture size in bytes + * @primary: also kick vga16fb if present; only relevant for VGA devices + * @name: a descriptive name of the requesting driver + * + * This function removes devices that own apertures within @base and @size. + * + * Returns: + * 0 on success, or a negative errno code otherwise + */ +int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size, + bool primary, const char *name) +{ +#if IS_REACHABLE(CONFIG_FB) + struct apertures_struct *a; + int ret; + + a = alloc_apertures(1); + if (!a) + return -ENOMEM; + + a->ranges[0].base = base; + a->ranges[0].size = size; + + ret = remove_conflicting_framebuffers(a, name, primary); + kfree(a); + + if (ret) + return ret; +#endif + + aperture_detach_devices(base, size); + + return 0; +} +EXPORT_SYMBOL(aperture_remove_conflicting_devices); + +/** + * aperture_remove_conflicting_pci_devices - remove existing framebuffers for PCI devices + * @pdev: PCI device + * @name: a descriptive name of the requesting driver + * + * This function removes devices that own apertures within any of @pdev's + * memory bars. The function assumes that PCI device with shadowed ROM + * drives a primary display and therefore kicks out vga16fb as well. + * + * Returns: + * 0 on success, or a negative errno code otherwise + */ +int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name) +{ + resource_size_t base, size; + int bar, ret; + + /* + * WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. + */ +#if IS_REACHABLE(CONFIG_FB) + ret = remove_conflicting_pci_framebuffers(pdev, name); + if (ret) + return ret; +#endif + ret = vga_remove_vgacon(pdev); + if (ret) + return ret; + + for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) { + if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) + continue; + base = pci_resource_start(pdev, bar); + size = pci_resource_len(pdev, bar); + aperture_detach_devices(base, size); + } + + return 0; + +} +EXPORT_SYMBOL(aperture_remove_conflicting_pci_devices); diff --git a/drivers/video/fbdev/omap/lcd_dma.c b/drivers/video/fbdev/omap/lcd_dma.c new file mode 100644 index 0000000000..f85817635a --- /dev/null +++ b/drivers/video/fbdev/omap/lcd_dma.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/arch/arm/mach-omap1/lcd_dma.c + * + * Extracted from arch/arm/plat-omap/dma.c + * Copyright (C) 2003 - 2008 Nokia Corporation + * Author: Juha Yrjölä + * DMA channel linking for 1610 by Samuel Ortiz + * Graphics DMA and LCD DMA graphics tranformations + * by Imre Deak + * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc. + * Merged to support both OMAP1 and OMAP2 by Tony Lindgren + * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc. + * + * Copyright (C) 2009 Texas Instruments + * Added OMAP4 support - Santosh Shilimkar + * + * Support functions for the OMAP internal DMA channels. + */ + +#include +#include +#include +#include + +#include + +#include +#include + +#include "lcdc.h" +#include "lcd_dma.h" + +int omap_lcd_dma_running(void) +{ + /* + * On OMAP1510, internal LCD controller will start the transfer + * when it gets enabled, so assume DMA running if LCD enabled. + */ + if (cpu_is_omap15xx()) + if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN) + return 1; + + /* Check if LCD DMA is running */ + if (cpu_is_omap16xx()) + if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN) + return 1; + + return 0; +} + +static struct lcd_dma_info { + spinlock_t lock; + int reserved; + void (*callback)(u16 status, void *data); + void *cb_data; + + int active; + unsigned long addr; + int rotate, data_type, xres, yres; + int vxres; + int mirror; + int xscale, yscale; + int ext_ctrl; + int src_port; + int single_transfer; +} lcd_dma; + +void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres, + int data_type) +{ + lcd_dma.addr = addr; + lcd_dma.data_type = data_type; + lcd_dma.xres = fb_xres; + lcd_dma.yres = fb_yres; +} +EXPORT_SYMBOL(omap_set_lcd_dma_b1); + +void omap_set_lcd_dma_ext_controller(int external) +{ + lcd_dma.ext_ctrl = external; +} +EXPORT_SYMBOL(omap_set_lcd_dma_ext_controller); + +void omap_set_lcd_dma_single_transfer(int single) +{ + lcd_dma.single_transfer = single; +} +EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer); + +void omap_set_lcd_dma_b1_rotation(int rotate) +{ + if (cpu_is_omap15xx()) { + printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n"); + BUG(); + return; + } + lcd_dma.rotate = rotate; +} +EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation); + +void omap_set_lcd_dma_b1_mirror(int mirror) +{ + if (cpu_is_omap15xx()) { + printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n"); + BUG(); + } + lcd_dma.mirror = mirror; +} +EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror); + +void omap_set_lcd_dma_b1_vxres(unsigned long vxres) +{ + if (cpu_is_omap15xx()) { + pr_err("DMA virtual resolution is not supported in 1510 mode\n"); + BUG(); + } + lcd_dma.vxres = vxres; +} +EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres); + +void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale) +{ + if (cpu_is_omap15xx()) { + printk(KERN_ERR "DMA scale is not supported in 1510 mode\n"); + BUG(); + } + lcd_dma.xscale = xscale; + lcd_dma.yscale = yscale; +} +EXPORT_SYMBOL(omap_set_lcd_dma_b1_scale); + +static void set_b1_regs(void) +{ + unsigned long top, bottom; + int es; + u16 w; + unsigned long en, fn; + long ei, fi; + unsigned long vxres; + unsigned int xscale, yscale; + + switch (lcd_dma.data_type) { + case OMAP_DMA_DATA_TYPE_S8: + es = 1; + break; + case OMAP_DMA_DATA_TYPE_S16: + es = 2; + break; + case OMAP_DMA_DATA_TYPE_S32: + es = 4; + break; + default: + BUG(); + return; + } + + vxres = lcd_dma.vxres ? lcd_dma.vxres : lcd_dma.xres; + xscale = lcd_dma.xscale ? lcd_dma.xscale : 1; + yscale = lcd_dma.yscale ? lcd_dma.yscale : 1; + BUG_ON(vxres < lcd_dma.xres); + +#define PIXADDR(x, y) (lcd_dma.addr + \ + ((y) * vxres * yscale + (x) * xscale) * es) +#define PIXSTEP(sx, sy, dx, dy) (PIXADDR(dx, dy) - PIXADDR(sx, sy) - es + 1) + + switch (lcd_dma.rotate) { + case 0: + if (!lcd_dma.mirror) { + top = PIXADDR(0, 0); + bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); + /* 1510 DMA requires the bottom address to be 2 more + * than the actual last memory access location. */ + if (cpu_is_omap15xx() && + lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32) + bottom += 2; + ei = PIXSTEP(0, 0, 1, 0); + fi = PIXSTEP(lcd_dma.xres - 1, 0, 0, 1); + } else { + top = PIXADDR(lcd_dma.xres - 1, 0); + bottom = PIXADDR(0, lcd_dma.yres - 1); + ei = PIXSTEP(1, 0, 0, 0); + fi = PIXSTEP(0, 0, lcd_dma.xres - 1, 1); + } + en = lcd_dma.xres; + fn = lcd_dma.yres; + break; + case 90: + if (!lcd_dma.mirror) { + top = PIXADDR(0, lcd_dma.yres - 1); + bottom = PIXADDR(lcd_dma.xres - 1, 0); + ei = PIXSTEP(0, 1, 0, 0); + fi = PIXSTEP(0, 0, 1, lcd_dma.yres - 1); + } else { + top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); + bottom = PIXADDR(0, 0); + ei = PIXSTEP(0, 1, 0, 0); + fi = PIXSTEP(1, 0, 0, lcd_dma.yres - 1); + } + en = lcd_dma.yres; + fn = lcd_dma.xres; + break; + case 180: + if (!lcd_dma.mirror) { + top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); + bottom = PIXADDR(0, 0); + ei = PIXSTEP(1, 0, 0, 0); + fi = PIXSTEP(0, 1, lcd_dma.xres - 1, 0); + } else { + top = PIXADDR(0, lcd_dma.yres - 1); + bottom = PIXADDR(lcd_dma.xres - 1, 0); + ei = PIXSTEP(0, 0, 1, 0); + fi = PIXSTEP(lcd_dma.xres - 1, 1, 0, 0); + } + en = lcd_dma.xres; + fn = lcd_dma.yres; + break; + case 270: + if (!lcd_dma.mirror) { + top = PIXADDR(lcd_dma.xres - 1, 0); + bottom = PIXADDR(0, lcd_dma.yres - 1); + ei = PIXSTEP(0, 0, 0, 1); + fi = PIXSTEP(1, lcd_dma.yres - 1, 0, 0); + } else { + top = PIXADDR(0, 0); + bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); + ei = PIXSTEP(0, 0, 0, 1); + fi = PIXSTEP(0, lcd_dma.yres - 1, 1, 0); + } + en = lcd_dma.yres; + fn = lcd_dma.xres; + break; + default: + BUG(); + return; /* Suppress warning about uninitialized vars */ + } + + if (cpu_is_omap15xx()) { + omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U); + omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L); + omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U); + omap_writew(bottom, OMAP1510_DMA_LCD_BOT_F1_L); + + return; + } + + /* 1610 regs */ + omap_writew(top >> 16, OMAP1610_DMA_LCD_TOP_B1_U); + omap_writew(top, OMAP1610_DMA_LCD_TOP_B1_L); + omap_writew(bottom >> 16, OMAP1610_DMA_LCD_BOT_B1_U); + omap_writew(bottom, OMAP1610_DMA_LCD_BOT_B1_L); + + omap_writew(en, OMAP1610_DMA_LCD_SRC_EN_B1); + omap_writew(fn, OMAP1610_DMA_LCD_SRC_FN_B1); + + w = omap_readw(OMAP1610_DMA_LCD_CSDP); + w &= ~0x03; + w |= lcd_dma.data_type; + omap_writew(w, OMAP1610_DMA_LCD_CSDP); + + w = omap_readw(OMAP1610_DMA_LCD_CTRL); + /* Always set the source port as SDRAM for now*/ + w &= ~(0x03 << 6); + if (lcd_dma.callback != NULL) + w |= 1 << 1; /* Block interrupt enable */ + else + w &= ~(1 << 1); + omap_writew(w, OMAP1610_DMA_LCD_CTRL); + + if (!(lcd_dma.rotate || lcd_dma.mirror || + lcd_dma.vxres || lcd_dma.xscale || lcd_dma.yscale)) + return; + + w = omap_readw(OMAP1610_DMA_LCD_CCR); + /* Set the double-indexed addressing mode */ + w |= (0x03 << 12); + omap_writew(w, OMAP1610_DMA_LCD_CCR); + + omap_writew(ei, OMAP1610_DMA_LCD_SRC_EI_B1); + omap_writew(fi >> 16, OMAP1610_DMA_LCD_SRC_FI_B1_U); + omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L); +} + +static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id) +{ + u16 w; + + w = omap_readw(OMAP1610_DMA_LCD_CTRL); + if (unlikely(!(w & (1 << 3)))) { + printk(KERN_WARNING "Spurious LCD DMA IRQ\n"); + return IRQ_NONE; + } + /* Ack the IRQ */ + w |= (1 << 3); + omap_writew(w, OMAP1610_DMA_LCD_CTRL); + lcd_dma.active = 0; + if (lcd_dma.callback != NULL) + lcd_dma.callback(w, lcd_dma.cb_data); + + return IRQ_HANDLED; +} + +int omap_request_lcd_dma(void (*callback)(u16 status, void *data), + void *data) +{ + spin_lock_irq(&lcd_dma.lock); + if (lcd_dma.reserved) { + spin_unlock_irq(&lcd_dma.lock); + printk(KERN_ERR "LCD DMA channel already reserved\n"); + BUG(); + return -EBUSY; + } + lcd_dma.reserved = 1; + spin_unlock_irq(&lcd_dma.lock); + lcd_dma.callback = callback; + lcd_dma.cb_data = data; + lcd_dma.active = 0; + lcd_dma.single_transfer = 0; + lcd_dma.rotate = 0; + lcd_dma.vxres = 0; + lcd_dma.mirror = 0; + lcd_dma.xscale = 0; + lcd_dma.yscale = 0; + lcd_dma.ext_ctrl = 0; + lcd_dma.src_port = 0; + + return 0; +} +EXPORT_SYMBOL(omap_request_lcd_dma); + +void omap_free_lcd_dma(void) +{ + spin_lock(&lcd_dma.lock); + if (!lcd_dma.reserved) { + spin_unlock(&lcd_dma.lock); + printk(KERN_ERR "LCD DMA is not reserved\n"); + BUG(); + return; + } + if (!cpu_is_omap15xx()) + omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, + OMAP1610_DMA_LCD_CCR); + lcd_dma.reserved = 0; + spin_unlock(&lcd_dma.lock); +} +EXPORT_SYMBOL(omap_free_lcd_dma); + +void omap_enable_lcd_dma(void) +{ + u16 w; + + /* + * Set the Enable bit only if an external controller is + * connected. Otherwise the OMAP internal controller will + * start the transfer when it gets enabled. + */ + if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl) + return; + + w = omap_readw(OMAP1610_DMA_LCD_CTRL); + w |= 1 << 8; + omap_writew(w, OMAP1610_DMA_LCD_CTRL); + + lcd_dma.active = 1; + + w = omap_readw(OMAP1610_DMA_LCD_CCR); + w |= 1 << 7; + omap_writew(w, OMAP1610_DMA_LCD_CCR); +} +EXPORT_SYMBOL(omap_enable_lcd_dma); + +void omap_setup_lcd_dma(void) +{ + BUG_ON(lcd_dma.active); + if (!cpu_is_omap15xx()) { + /* Set some reasonable defaults */ + omap_writew(0x5440, OMAP1610_DMA_LCD_CCR); + omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP); + omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL); + } + set_b1_regs(); + if (!cpu_is_omap15xx()) { + u16 w; + + w = omap_readw(OMAP1610_DMA_LCD_CCR); + /* + * If DMA was already active set the end_prog bit to have + * the programmed register set loaded into the active + * register set. + */ + w |= 1 << 11; /* End_prog */ + if (!lcd_dma.single_transfer) + w |= (3 << 8); /* Auto_init, repeat */ + omap_writew(w, OMAP1610_DMA_LCD_CCR); + } +} +EXPORT_SYMBOL(omap_setup_lcd_dma); + +void omap_stop_lcd_dma(void) +{ + u16 w; + + lcd_dma.active = 0; + if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl) + return; + + w = omap_readw(OMAP1610_DMA_LCD_CCR); + w &= ~(1 << 7); + omap_writew(w, OMAP1610_DMA_LCD_CCR); + + w = omap_readw(OMAP1610_DMA_LCD_CTRL); + w &= ~(1 << 8); + omap_writew(w, OMAP1610_DMA_LCD_CTRL); +} +EXPORT_SYMBOL(omap_stop_lcd_dma); + +static int __init omap_init_lcd_dma(void) +{ + int r; + + if (!cpu_class_is_omap1()) + return -ENODEV; + + if (cpu_is_omap16xx()) { + u16 w; + + /* this would prevent OMAP sleep */ + w = omap_readw(OMAP1610_DMA_LCD_CTRL); + w &= ~(1 << 8); + omap_writew(w, OMAP1610_DMA_LCD_CTRL); + } + + spin_lock_init(&lcd_dma.lock); + + r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0, + "LCD DMA", NULL); + if (r != 0) + pr_err("unable to request IRQ for LCD DMA (error %d)\n", r); + + return r; +} + +arch_initcall(omap_init_lcd_dma); + diff --git a/drivers/video/fbdev/omap/lcd_dma.h b/drivers/video/fbdev/omap/lcd_dma.h new file mode 100644 index 0000000000..1b47801973 --- /dev/null +++ b/drivers/video/fbdev/omap/lcd_dma.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * arch/arm/mach-omap1/include/mach/lcd_dma.h + * + * Extracted from arch/arm/plat-omap/include/plat/dma.h + * Copyright (C) 2003 Nokia Corporation + * Author: Juha Yrjölä + */ +#ifndef __MACH_OMAP1_LCD_DMA_H__ +#define __MACH_OMAP1_LCD_DMA_H__ + +/* Hardware registers for LCD DMA */ +#define OMAP1510_DMA_LCD_BASE (0xfffedb00) +#define OMAP1510_DMA_LCD_CTRL (OMAP1510_DMA_LCD_BASE + 0x00) +#define OMAP1510_DMA_LCD_TOP_F1_L (OMAP1510_DMA_LCD_BASE + 0x02) +#define OMAP1510_DMA_LCD_TOP_F1_U (OMAP1510_DMA_LCD_BASE + 0x04) +#define OMAP1510_DMA_LCD_BOT_F1_L (OMAP1510_DMA_LCD_BASE + 0x06) +#define OMAP1510_DMA_LCD_BOT_F1_U (OMAP1510_DMA_LCD_BASE + 0x08) + +#define OMAP1610_DMA_LCD_BASE (0xfffee300) +#define OMAP1610_DMA_LCD_CSDP (OMAP1610_DMA_LCD_BASE + 0xc0) +#define OMAP1610_DMA_LCD_CCR (OMAP1610_DMA_LCD_BASE + 0xc2) +#define OMAP1610_DMA_LCD_CTRL (OMAP1610_DMA_LCD_BASE + 0xc4) +#define OMAP1610_DMA_LCD_TOP_B1_L (OMAP1610_DMA_LCD_BASE + 0xc8) +#define OMAP1610_DMA_LCD_TOP_B1_U (OMAP1610_DMA_LCD_BASE + 0xca) +#define OMAP1610_DMA_LCD_BOT_B1_L (OMAP1610_DMA_LCD_BASE + 0xcc) +#define OMAP1610_DMA_LCD_BOT_B1_U (OMAP1610_DMA_LCD_BASE + 0xce) +#define OMAP1610_DMA_LCD_TOP_B2_L (OMAP1610_DMA_LCD_BASE + 0xd0) +#define OMAP1610_DMA_LCD_TOP_B2_U (OMAP1610_DMA_LCD_BASE + 0xd2) +#define OMAP1610_DMA_LCD_BOT_B2_L (OMAP1610_DMA_LCD_BASE + 0xd4) +#define OMAP1610_DMA_LCD_BOT_B2_U (OMAP1610_DMA_LCD_BASE + 0xd6) +#define OMAP1610_DMA_LCD_SRC_EI_B1 (OMAP1610_DMA_LCD_BASE + 0xd8) +#define OMAP1610_DMA_LCD_SRC_FI_B1_L (OMAP1610_DMA_LCD_BASE + 0xda) +#define OMAP1610_DMA_LCD_SRC_EN_B1 (OMAP1610_DMA_LCD_BASE + 0xe0) +#define OMAP1610_DMA_LCD_SRC_FN_B1 (OMAP1610_DMA_LCD_BASE + 0xe4) +#define OMAP1610_DMA_LCD_LCH_CTRL (OMAP1610_DMA_LCD_BASE + 0xea) +#define OMAP1610_DMA_LCD_SRC_FI_B1_U (OMAP1610_DMA_LCD_BASE + 0xf4) + +/* LCD DMA block numbers */ +enum { + OMAP_LCD_DMA_B1_TOP, + OMAP_LCD_DMA_B1_BOTTOM, + OMAP_LCD_DMA_B2_TOP, + OMAP_LCD_DMA_B2_BOTTOM +}; + +/* LCD DMA functions */ +extern int omap_request_lcd_dma(void (*callback)(u16 status, void *data), + void *data); +extern void omap_free_lcd_dma(void); +extern void omap_setup_lcd_dma(void); +extern void omap_enable_lcd_dma(void); +extern void omap_stop_lcd_dma(void); +extern void omap_set_lcd_dma_ext_controller(int external); +extern void omap_set_lcd_dma_single_transfer(int single); +extern void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres, + int data_type); +extern void omap_set_lcd_dma_b1_rotation(int rotate); +extern void omap_set_lcd_dma_b1_vxres(unsigned long vxres); +extern void omap_set_lcd_dma_b1_mirror(int mirror); +extern void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale); + +#endif /* __MACH_OMAP1_LCD_DMA_H__ */ diff --git a/drivers/video/fbdev/pxa3xx-regs.h b/drivers/video/fbdev/pxa3xx-regs.h new file mode 100644 index 0000000000..6a96610ef9 --- /dev/null +++ b/drivers/video/fbdev/pxa3xx-regs.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ARCH_REGS_LCD_H +#define __ASM_ARCH_REGS_LCD_H + +/* + * LCD Controller Registers and Bits Definitions + */ +#define LCCR0 (0x000) /* LCD Controller Control Register 0 */ +#define LCCR1 (0x004) /* LCD Controller Control Register 1 */ +#define LCCR2 (0x008) /* LCD Controller Control Register 2 */ +#define LCCR3 (0x00C) /* LCD Controller Control Register 3 */ +#define LCCR4 (0x010) /* LCD Controller Control Register 4 */ +#define LCCR5 (0x014) /* LCD Controller Control Register 5 */ +#define LCSR (0x038) /* LCD Controller Status Register 0 */ +#define LCSR1 (0x034) /* LCD Controller Status Register 1 */ +#define LIIDR (0x03C) /* LCD Controller Interrupt ID Register */ +#define TMEDRGBR (0x040) /* TMED RGB Seed Register */ +#define TMEDCR (0x044) /* TMED Control Register */ + +#define FBR0 (0x020) /* DMA Channel 0 Frame Branch Register */ +#define FBR1 (0x024) /* DMA Channel 1 Frame Branch Register */ +#define FBR2 (0x028) /* DMA Channel 2 Frame Branch Register */ +#define FBR3 (0x02C) /* DMA Channel 2 Frame Branch Register */ +#define FBR4 (0x030) /* DMA Channel 2 Frame Branch Register */ +#define FBR5 (0x110) /* DMA Channel 2 Frame Branch Register */ +#define FBR6 (0x114) /* DMA Channel 2 Frame Branch Register */ + +#define OVL1C1 (0x050) /* Overlay 1 Control Register 1 */ +#define OVL1C2 (0x060) /* Overlay 1 Control Register 2 */ +#define OVL2C1 (0x070) /* Overlay 2 Control Register 1 */ +#define OVL2C2 (0x080) /* Overlay 2 Control Register 2 */ + +#define CMDCR (0x100) /* Command Control Register */ +#define PRSR (0x104) /* Panel Read Status Register */ + +#define LCCR3_BPP(x) ((((x) & 0x7) << 24) | (((x) & 0x8) ? (1 << 29) : 0)) + +#define LCCR3_PDFOR_0 (0 << 30) +#define LCCR3_PDFOR_1 (1 << 30) +#define LCCR3_PDFOR_2 (2 << 30) +#define LCCR3_PDFOR_3 (3 << 30) + +#define LCCR4_PAL_FOR_0 (0 << 15) +#define LCCR4_PAL_FOR_1 (1 << 15) +#define LCCR4_PAL_FOR_2 (2 << 15) +#define LCCR4_PAL_FOR_3 (3 << 15) +#define LCCR4_PAL_FOR_MASK (3 << 15) + +#define FDADR0 (0x200) /* DMA Channel 0 Frame Descriptor Address Register */ +#define FDADR1 (0x210) /* DMA Channel 1 Frame Descriptor Address Register */ +#define FDADR2 (0x220) /* DMA Channel 2 Frame Descriptor Address Register */ +#define FDADR3 (0x230) /* DMA Channel 3 Frame Descriptor Address Register */ +#define FDADR4 (0x240) /* DMA Channel 4 Frame Descriptor Address Register */ +#define FDADR5 (0x250) /* DMA Channel 5 Frame Descriptor Address Register */ +#define FDADR6 (0x260) /* DMA Channel 6 Frame Descriptor Address Register */ + +#define LCCR0_ENB (1 << 0) /* LCD Controller enable */ +#define LCCR0_CMS (1 << 1) /* Color/Monochrome Display Select */ +#define LCCR0_Color (LCCR0_CMS*0) /* Color display */ +#define LCCR0_Mono (LCCR0_CMS*1) /* Monochrome display */ +#define LCCR0_SDS (1 << 2) /* Single/Dual Panel Display Select */ +#define LCCR0_Sngl (LCCR0_SDS*0) /* Single panel display */ +#define LCCR0_Dual (LCCR0_SDS*1) /* Dual panel display */ + +#define LCCR0_LDM (1 << 3) /* LCD Disable Done Mask */ +#define LCCR0_SFM (1 << 4) /* Start of frame mask */ +#define LCCR0_IUM (1 << 5) /* Input FIFO underrun mask */ +#define LCCR0_EFM (1 << 6) /* End of Frame mask */ +#define LCCR0_PAS (1 << 7) /* Passive/Active display Select */ +#define LCCR0_Pas (LCCR0_PAS*0) /* Passive display (STN) */ +#define LCCR0_Act (LCCR0_PAS*1) /* Active display (TFT) */ +#define LCCR0_DPD (1 << 9) /* Double Pixel Data (monochrome) */ +#define LCCR0_4PixMono (LCCR0_DPD*0) /* 4-Pixel/clock Monochrome display */ +#define LCCR0_8PixMono (LCCR0_DPD*1) /* 8-Pixel/clock Monochrome display */ +#define LCCR0_DIS (1 << 10) /* LCD Disable */ +#define LCCR0_QDM (1 << 11) /* LCD Quick Disable mask */ +#define LCCR0_PDD (0xff << 12) /* Palette DMA request delay */ +#define LCCR0_PDD_S 12 +#define LCCR0_BM (1 << 20) /* Branch mask */ +#define LCCR0_OUM (1 << 21) /* Output FIFO underrun mask */ +#define LCCR0_LCDT (1 << 22) /* LCD panel type */ +#define LCCR0_RDSTM (1 << 23) /* Read status interrupt mask */ +#define LCCR0_CMDIM (1 << 24) /* Command interrupt mask */ +#define LCCR0_OUC (1 << 25) /* Overlay Underlay control bit */ +#define LCCR0_LDDALT (1 << 26) /* LDD alternate mapping control */ + +#define Fld(Size, Shft) (((Size) << 16) + (Shft)) +#define FShft(Field) ((Field) & 0x0000FFFF) + +#define LCCR1_PPL Fld (10, 0) /* Pixels Per Line - 1 */ +#define LCCR1_DisWdth(Pixel) (((Pixel) - 1) << FShft (LCCR1_PPL)) + +#define LCCR1_HSW Fld (6, 10) /* Horizontal Synchronization */ +#define LCCR1_HorSnchWdth(Tpix) (((Tpix) - 1) << FShft (LCCR1_HSW)) + +#define LCCR1_ELW Fld (8, 16) /* End-of-Line pixel clock Wait - 1 */ +#define LCCR1_EndLnDel(Tpix) (((Tpix) - 1) << FShft (LCCR1_ELW)) + +#define LCCR1_BLW Fld (8, 24) /* Beginning-of-Line pixel clock */ +#define LCCR1_BegLnDel(Tpix) (((Tpix) - 1) << FShft (LCCR1_BLW)) + +#define LCCR2_LPP Fld (10, 0) /* Line Per Panel - 1 */ +#define LCCR2_DisHght(Line) (((Line) - 1) << FShft (LCCR2_LPP)) + +#define LCCR2_VSW Fld (6, 10) /* Vertical Synchronization pulse - 1 */ +#define LCCR2_VrtSnchWdth(Tln) (((Tln) - 1) << FShft (LCCR2_VSW)) + +#define LCCR2_EFW Fld (8, 16) /* End-of-Frame line clock Wait */ +#define LCCR2_EndFrmDel(Tln) ((Tln) << FShft (LCCR2_EFW)) + +#define LCCR2_BFW Fld (8, 24) /* Beginning-of-Frame line clock */ +#define LCCR2_BegFrmDel(Tln) ((Tln) << FShft (LCCR2_BFW)) + +#define LCCR3_API (0xf << 16) /* AC Bias pin trasitions per interrupt */ +#define LCCR3_API_S 16 +#define LCCR3_VSP (1 << 20) /* vertical sync polarity */ +#define LCCR3_HSP (1 << 21) /* horizontal sync polarity */ +#define LCCR3_PCP (1 << 22) /* Pixel Clock Polarity (L_PCLK) */ +#define LCCR3_PixRsEdg (LCCR3_PCP*0) /* Pixel clock Rising-Edge */ +#define LCCR3_PixFlEdg (LCCR3_PCP*1) /* Pixel clock Falling-Edge */ + +#define LCCR3_OEP (1 << 23) /* Output Enable Polarity */ +#define LCCR3_OutEnH (LCCR3_OEP*0) /* Output Enable active High */ +#define LCCR3_OutEnL (LCCR3_OEP*1) /* Output Enable active Low */ + +#define LCCR3_DPC (1 << 27) /* double pixel clock mode */ +#define LCCR3_PCD Fld (8, 0) /* Pixel Clock Divisor */ +#define LCCR3_PixClkDiv(Div) (((Div) << FShft (LCCR3_PCD))) + +#define LCCR3_ACB Fld (8, 8) /* AC Bias */ +#define LCCR3_Acb(Acb) (((Acb) << FShft (LCCR3_ACB))) + +#define LCCR3_HorSnchH (LCCR3_HSP*0) /* HSP Active High */ +#define LCCR3_HorSnchL (LCCR3_HSP*1) /* HSP Active Low */ + +#define LCCR3_VrtSnchH (LCCR3_VSP*0) /* VSP Active High */ +#define LCCR3_VrtSnchL (LCCR3_VSP*1) /* VSP Active Low */ + +#define LCCR5_IUM(x) (1 << ((x) + 23)) /* input underrun mask */ +#define LCCR5_BSM(x) (1 << ((x) + 15)) /* branch mask */ +#define LCCR5_EOFM(x) (1 << ((x) + 7)) /* end of frame mask */ +#define LCCR5_SOFM(x) (1 << ((x) + 0)) /* start of frame mask */ + +#define LCSR_LDD (1 << 0) /* LCD Disable Done */ +#define LCSR_SOF (1 << 1) /* Start of frame */ +#define LCSR_BER (1 << 2) /* Bus error */ +#define LCSR_ABC (1 << 3) /* AC Bias count */ +#define LCSR_IUL (1 << 4) /* input FIFO underrun Lower panel */ +#define LCSR_IUU (1 << 5) /* input FIFO underrun Upper panel */ +#define LCSR_OU (1 << 6) /* output FIFO underrun */ +#define LCSR_QD (1 << 7) /* quick disable */ +#define LCSR_EOF (1 << 8) /* end of frame */ +#define LCSR_BS (1 << 9) /* branch status */ +#define LCSR_SINT (1 << 10) /* subsequent interrupt */ +#define LCSR_RD_ST (1 << 11) /* read status */ +#define LCSR_CMD_INT (1 << 12) /* command interrupt */ + +#define LCSR1_IU(x) (1 << ((x) + 23)) /* Input FIFO underrun */ +#define LCSR1_BS(x) (1 << ((x) + 15)) /* Branch Status */ +#define LCSR1_EOF(x) (1 << ((x) + 7)) /* End of Frame Status */ +#define LCSR1_SOF(x) (1 << ((x) - 1)) /* Start of Frame Status */ + +#define LDCMD_PAL (1 << 26) /* instructs DMA to load palette buffer */ + +/* overlay control registers */ +#define OVLxC1_PPL(x) ((((x) - 1) & 0x3ff) << 0) /* Pixels Per Line */ +#define OVLxC1_LPO(x) ((((x) - 1) & 0x3ff) << 10) /* Number of Lines */ +#define OVLxC1_BPP(x) (((x) & 0xf) << 20) /* Bits Per Pixel */ +#define OVLxC1_OEN (1 << 31) /* Enable bit for Overlay */ +#define OVLxC2_XPOS(x) (((x) & 0x3ff) << 0) /* Horizontal Position */ +#define OVLxC2_YPOS(x) (((x) & 0x3ff) << 10) /* Vertical Position */ +#define OVL2C2_PFOR(x) (((x) & 0x7) << 20) /* Pixel Format */ + +/* smartpanel related */ +#define PRSR_DATA(x) ((x) & 0xff) /* Panel Data */ +#define PRSR_A0 (1 << 8) /* Read Data Source */ +#define PRSR_ST_OK (1 << 9) /* Status OK */ +#define PRSR_CON_NT (1 << 10) /* Continue to Next Command */ + +#endif /* __ASM_ARCH_REGS_LCD_H */ diff --git a/drivers/virt/coco/efi_secret/Kconfig b/drivers/virt/coco/efi_secret/Kconfig new file mode 100644 index 0000000000..4404d198f3 --- /dev/null +++ b/drivers/virt/coco/efi_secret/Kconfig @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +config EFI_SECRET + tristate "EFI secret area securityfs support" + depends on EFI && X86_64 + select EFI_COCO_SECRET + select SECURITYFS + help + This is a driver for accessing the EFI secret area via securityfs. + The EFI secret area is a memory area designated by the firmware for + confidential computing secret injection (for example for AMD SEV + guests). The driver exposes the secrets as files in + /secrets/coco. Files can be read and deleted (deleting + a file wipes the secret from memory). + + To compile this driver as a module, choose M here. + The module will be called efi_secret. diff --git a/drivers/virt/coco/efi_secret/Makefile b/drivers/virt/coco/efi_secret/Makefile new file mode 100644 index 0000000000..c7047ce804 --- /dev/null +++ b/drivers/virt/coco/efi_secret/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_EFI_SECRET) += efi_secret.o diff --git a/drivers/virt/coco/efi_secret/efi_secret.c b/drivers/virt/coco/efi_secret/efi_secret.c new file mode 100644 index 0000000000..e700a5ef70 --- /dev/null +++ b/drivers/virt/coco/efi_secret/efi_secret.c @@ -0,0 +1,349 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * efi_secret module + * + * Copyright (C) 2022 IBM Corporation + * Author: Dov Murik + */ + +/** + * DOC: efi_secret: Allow reading EFI confidential computing (coco) secret area + * via securityfs interface. + * + * When the module is loaded (and securityfs is mounted, typically under + * /sys/kernel/security), a "secrets/coco" directory is created in securityfs. + * In it, a file is created for each secret entry. The name of each such file + * is the GUID of the secret entry, and its content is the secret data. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define EFI_SECRET_NUM_FILES 64 + +struct efi_secret { + struct dentry *secrets_dir; + struct dentry *fs_dir; + struct dentry *fs_files[EFI_SECRET_NUM_FILES]; + void __iomem *secret_data; + u64 secret_data_len; +}; + +/* + * Structure of the EFI secret area + * + * Offset Length + * (bytes) (bytes) Usage + * ------- ------- ----- + * 0 16 Secret table header GUID (must be 1e74f542-71dd-4d66-963e-ef4287ff173b) + * 16 4 Length of bytes of the entire secret area + * + * 20 16 First secret entry's GUID + * 36 4 First secret entry's length in bytes (= 16 + 4 + x) + * 40 x First secret entry's data + * + * 40+x 16 Second secret entry's GUID + * 56+x 4 Second secret entry's length in bytes (= 16 + 4 + y) + * 60+x y Second secret entry's data + * + * (... and so on for additional entries) + * + * The GUID of each secret entry designates the usage of the secret data. + */ + +/** + * struct secret_header - Header of entire secret area; this should be followed + * by instances of struct secret_entry. + * @guid: Must be EFI_SECRET_TABLE_HEADER_GUID + * @len: Length in bytes of entire secret area, including header + */ +struct secret_header { + efi_guid_t guid; + u32 len; +} __attribute((packed)); + +/** + * struct secret_entry - Holds one secret entry + * @guid: Secret-specific GUID (or NULL_GUID if this secret entry was deleted) + * @len: Length of secret entry, including its guid and len fields + * @data: The secret data (full of zeros if this secret entry was deleted) + */ +struct secret_entry { + efi_guid_t guid; + u32 len; + u8 data[]; +} __attribute((packed)); + +static size_t secret_entry_data_len(struct secret_entry *e) +{ + return e->len - sizeof(*e); +} + +static struct efi_secret the_efi_secret; + +static inline struct efi_secret *efi_secret_get(void) +{ + return &the_efi_secret; +} + +static int efi_secret_bin_file_show(struct seq_file *file, void *data) +{ + struct secret_entry *e = file->private; + + if (e) + seq_write(file, e->data, secret_entry_data_len(e)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(efi_secret_bin_file); + +/* + * Overwrite memory content with zeroes, and ensure that dirty cache lines are + * actually written back to memory, to clear out the secret. + */ +static void wipe_memory(void *addr, size_t size) +{ + memzero_explicit(addr, size); +#ifdef CONFIG_X86 + clflush_cache_range(addr, size); +#endif +} + +static int efi_secret_unlink(struct inode *dir, struct dentry *dentry) +{ + struct efi_secret *s = efi_secret_get(); + struct inode *inode = d_inode(dentry); + struct secret_entry *e = (struct secret_entry *)inode->i_private; + int i; + + if (e) { + /* Zero out the secret data */ + wipe_memory(e->data, secret_entry_data_len(e)); + e->guid = NULL_GUID; + } + + inode->i_private = NULL; + + for (i = 0; i < EFI_SECRET_NUM_FILES; i++) + if (s->fs_files[i] == dentry) + s->fs_files[i] = NULL; + + /* + * securityfs_remove tries to lock the directory's inode, but we reach + * the unlink callback when it's already locked + */ + inode_unlock(dir); + securityfs_remove(dentry); + inode_lock(dir); + + return 0; +} + +static const struct inode_operations efi_secret_dir_inode_operations = { + .lookup = simple_lookup, + .unlink = efi_secret_unlink, +}; + +static int efi_secret_map_area(struct platform_device *dev) +{ + int ret; + struct efi_secret *s = efi_secret_get(); + struct linux_efi_coco_secret_area *secret_area; + + if (efi.coco_secret == EFI_INVALID_TABLE_ADDR) { + dev_err(&dev->dev, "Secret area address is not available\n"); + return -EINVAL; + } + + secret_area = memremap(efi.coco_secret, sizeof(*secret_area), MEMREMAP_WB); + if (secret_area == NULL) { + dev_err(&dev->dev, "Could not map secret area EFI config entry\n"); + return -ENOMEM; + } + if (!secret_area->base_pa || secret_area->size < sizeof(struct secret_header)) { + dev_err(&dev->dev, + "Invalid secret area memory location (base_pa=0x%llx size=0x%llx)\n", + secret_area->base_pa, secret_area->size); + ret = -EINVAL; + goto unmap; + } + + s->secret_data = ioremap_encrypted(secret_area->base_pa, secret_area->size); + if (s->secret_data == NULL) { + dev_err(&dev->dev, "Could not map secret area\n"); + ret = -ENOMEM; + goto unmap; + } + + s->secret_data_len = secret_area->size; + ret = 0; + +unmap: + memunmap(secret_area); + return ret; +} + +static void efi_secret_securityfs_teardown(struct platform_device *dev) +{ + struct efi_secret *s = efi_secret_get(); + int i; + + for (i = (EFI_SECRET_NUM_FILES - 1); i >= 0; i--) { + securityfs_remove(s->fs_files[i]); + s->fs_files[i] = NULL; + } + + securityfs_remove(s->fs_dir); + s->fs_dir = NULL; + + securityfs_remove(s->secrets_dir); + s->secrets_dir = NULL; + + dev_dbg(&dev->dev, "Removed securityfs entries\n"); +} + +static int efi_secret_securityfs_setup(struct platform_device *dev) +{ + struct efi_secret *s = efi_secret_get(); + int ret = 0, i = 0, bytes_left; + unsigned char *ptr; + struct secret_header *h; + struct secret_entry *e; + struct dentry *dent; + char guid_str[EFI_VARIABLE_GUID_LEN + 1]; + + ptr = (void __force *)s->secret_data; + h = (struct secret_header *)ptr; + if (efi_guidcmp(h->guid, EFI_SECRET_TABLE_HEADER_GUID)) { + /* + * This is not an error: it just means that EFI defines secret + * area but it was not populated by the Guest Owner. + */ + dev_dbg(&dev->dev, "EFI secret area does not start with correct GUID\n"); + return -ENODEV; + } + if (h->len < sizeof(*h)) { + dev_err(&dev->dev, "EFI secret area reported length is too small\n"); + return -EINVAL; + } + if (h->len > s->secret_data_len) { + dev_err(&dev->dev, "EFI secret area reported length is too big\n"); + return -EINVAL; + } + + s->secrets_dir = NULL; + s->fs_dir = NULL; + memset(s->fs_files, 0, sizeof(s->fs_files)); + + dent = securityfs_create_dir("secrets", NULL); + if (IS_ERR(dent)) { + dev_err(&dev->dev, "Error creating secrets securityfs directory entry err=%ld\n", + PTR_ERR(dent)); + return PTR_ERR(dent); + } + s->secrets_dir = dent; + + dent = securityfs_create_dir("coco", s->secrets_dir); + if (IS_ERR(dent)) { + dev_err(&dev->dev, "Error creating coco securityfs directory entry err=%ld\n", + PTR_ERR(dent)); + return PTR_ERR(dent); + } + d_inode(dent)->i_op = &efi_secret_dir_inode_operations; + s->fs_dir = dent; + + bytes_left = h->len - sizeof(*h); + ptr += sizeof(*h); + while (bytes_left >= (int)sizeof(*e) && i < EFI_SECRET_NUM_FILES) { + e = (struct secret_entry *)ptr; + if (e->len < sizeof(*e) || e->len > (unsigned int)bytes_left) { + dev_err(&dev->dev, "EFI secret area is corrupted\n"); + ret = -EINVAL; + goto err_cleanup; + } + + /* Skip deleted entries (which will have NULL_GUID) */ + if (efi_guidcmp(e->guid, NULL_GUID)) { + efi_guid_to_str(&e->guid, guid_str); + + dent = securityfs_create_file(guid_str, 0440, s->fs_dir, (void *)e, + &efi_secret_bin_file_fops); + if (IS_ERR(dent)) { + dev_err(&dev->dev, "Error creating efi_secret securityfs entry\n"); + ret = PTR_ERR(dent); + goto err_cleanup; + } + + s->fs_files[i++] = dent; + } + ptr += e->len; + bytes_left -= e->len; + } + + dev_info(&dev->dev, "Created %d entries in securityfs secrets/coco\n", i); + return 0; + +err_cleanup: + efi_secret_securityfs_teardown(dev); + return ret; +} + +static void efi_secret_unmap_area(void) +{ + struct efi_secret *s = efi_secret_get(); + + if (s->secret_data) { + iounmap(s->secret_data); + s->secret_data = NULL; + s->secret_data_len = 0; + } +} + +static int efi_secret_probe(struct platform_device *dev) +{ + int ret; + + ret = efi_secret_map_area(dev); + if (ret) + return ret; + + ret = efi_secret_securityfs_setup(dev); + if (ret) + goto err_unmap; + + return ret; + +err_unmap: + efi_secret_unmap_area(); + return ret; +} + +static int efi_secret_remove(struct platform_device *dev) +{ + efi_secret_securityfs_teardown(dev); + efi_secret_unmap_area(); + return 0; +} + +static struct platform_driver efi_secret_driver = { + .probe = efi_secret_probe, + .remove = efi_secret_remove, + .driver = { + .name = "efi_secret", + }, +}; + +module_platform_driver(efi_secret_driver); + +MODULE_DESCRIPTION("Confidential computing EFI secret area access"); +MODULE_AUTHOR("IBM"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:efi_secret"); diff --git a/drivers/virt/coco/sev-guest/Kconfig b/drivers/virt/coco/sev-guest/Kconfig new file mode 100644 index 0000000000..f9db0799ae --- /dev/null +++ b/drivers/virt/coco/sev-guest/Kconfig @@ -0,0 +1,14 @@ +config SEV_GUEST + tristate "AMD SEV Guest driver" + default m + depends on AMD_MEM_ENCRYPT + select CRYPTO_AEAD2 + select CRYPTO_GCM + help + SEV-SNP firmware provides the guest a mechanism to communicate with + the PSP without risk from a malicious hypervisor who wishes to read, + alter, drop or replay the messages sent. The driver provides + userspace interface to communicate with the PSP to request the + attestation report and more. + + If you choose 'M' here, this module will be called sev-guest. diff --git a/drivers/virt/coco/sev-guest/Makefile b/drivers/virt/coco/sev-guest/Makefile new file mode 100644 index 0000000000..63d67c2772 --- /dev/null +++ b/drivers/virt/coco/sev-guest/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_SEV_GUEST) += sev-guest.o diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c new file mode 100644 index 0000000000..f422f9c58b --- /dev/null +++ b/drivers/virt/coco/sev-guest/sev-guest.c @@ -0,0 +1,746 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD Secure Encrypted Virtualization (SEV) guest driver interface + * + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Author: Brijesh Singh + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "sev-guest.h" + +#define DEVICE_NAME "sev-guest" +#define AAD_LEN 48 +#define MSG_HDR_VER 1 + +struct snp_guest_crypto { + struct crypto_aead *tfm; + u8 *iv, *authtag; + int iv_len, a_len; +}; + +struct snp_guest_dev { + struct device *dev; + struct miscdevice misc; + + void *certs_data; + struct snp_guest_crypto *crypto; + struct snp_guest_msg *request, *response; + struct snp_secrets_page_layout *layout; + struct snp_req_data input; + u32 *os_area_msg_seqno; + u8 *vmpck; +}; + +static u32 vmpck_id; +module_param(vmpck_id, uint, 0444); +MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP."); + +/* Mutex to serialize the shared buffer access and command handling. */ +static DEFINE_MUTEX(snp_cmd_mutex); + +static bool is_vmpck_empty(struct snp_guest_dev *snp_dev) +{ + char zero_key[VMPCK_KEY_LEN] = {0}; + + if (snp_dev->vmpck) + return !memcmp(snp_dev->vmpck, zero_key, VMPCK_KEY_LEN); + + return true; +} + +static void snp_disable_vmpck(struct snp_guest_dev *snp_dev) +{ + memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN); + snp_dev->vmpck = NULL; +} + +static inline u64 __snp_get_msg_seqno(struct snp_guest_dev *snp_dev) +{ + u64 count; + + lockdep_assert_held(&snp_cmd_mutex); + + /* Read the current message sequence counter from secrets pages */ + count = *snp_dev->os_area_msg_seqno; + + return count + 1; +} + +/* Return a non-zero on success */ +static u64 snp_get_msg_seqno(struct snp_guest_dev *snp_dev) +{ + u64 count = __snp_get_msg_seqno(snp_dev); + + /* + * The message sequence counter for the SNP guest request is a 64-bit + * value but the version 2 of GHCB specification defines a 32-bit storage + * for it. If the counter exceeds the 32-bit value then return zero. + * The caller should check the return value, but if the caller happens to + * not check the value and use it, then the firmware treats zero as an + * invalid number and will fail the message request. + */ + if (count >= UINT_MAX) { + dev_err(snp_dev->dev, "request message sequence counter overflow\n"); + return 0; + } + + return count; +} + +static void snp_inc_msg_seqno(struct snp_guest_dev *snp_dev) +{ + /* + * The counter is also incremented by the PSP, so increment it by 2 + * and save in secrets page. + */ + *snp_dev->os_area_msg_seqno += 2; +} + +static inline struct snp_guest_dev *to_snp_dev(struct file *file) +{ + struct miscdevice *dev = file->private_data; + + return container_of(dev, struct snp_guest_dev, misc); +} + +static struct snp_guest_crypto *init_crypto(struct snp_guest_dev *snp_dev, u8 *key, size_t keylen) +{ + struct snp_guest_crypto *crypto; + + crypto = kzalloc(sizeof(*crypto), GFP_KERNEL_ACCOUNT); + if (!crypto) + return NULL; + + crypto->tfm = crypto_alloc_aead("gcm(aes)", 0, 0); + if (IS_ERR(crypto->tfm)) + goto e_free; + + if (crypto_aead_setkey(crypto->tfm, key, keylen)) + goto e_free_crypto; + + crypto->iv_len = crypto_aead_ivsize(crypto->tfm); + crypto->iv = kmalloc(crypto->iv_len, GFP_KERNEL_ACCOUNT); + if (!crypto->iv) + goto e_free_crypto; + + if (crypto_aead_authsize(crypto->tfm) > MAX_AUTHTAG_LEN) { + if (crypto_aead_setauthsize(crypto->tfm, MAX_AUTHTAG_LEN)) { + dev_err(snp_dev->dev, "failed to set authsize to %d\n", MAX_AUTHTAG_LEN); + goto e_free_iv; + } + } + + crypto->a_len = crypto_aead_authsize(crypto->tfm); + crypto->authtag = kmalloc(crypto->a_len, GFP_KERNEL_ACCOUNT); + if (!crypto->authtag) + goto e_free_auth; + + return crypto; + +e_free_auth: + kfree(crypto->authtag); +e_free_iv: + kfree(crypto->iv); +e_free_crypto: + crypto_free_aead(crypto->tfm); +e_free: + kfree(crypto); + + return NULL; +} + +static void deinit_crypto(struct snp_guest_crypto *crypto) +{ + crypto_free_aead(crypto->tfm); + kfree(crypto->iv); + kfree(crypto->authtag); + kfree(crypto); +} + +static int enc_dec_message(struct snp_guest_crypto *crypto, struct snp_guest_msg *msg, + u8 *src_buf, u8 *dst_buf, size_t len, bool enc) +{ + struct snp_guest_msg_hdr *hdr = &msg->hdr; + struct scatterlist src[3], dst[3]; + DECLARE_CRYPTO_WAIT(wait); + struct aead_request *req; + int ret; + + req = aead_request_alloc(crypto->tfm, GFP_KERNEL); + if (!req) + return -ENOMEM; + + /* + * AEAD memory operations: + * +------ AAD -------+------- DATA -----+---- AUTHTAG----+ + * | msg header | plaintext | hdr->authtag | + * | bytes 30h - 5Fh | or | | + * | | cipher | | + * +------------------+------------------+----------------+ + */ + sg_init_table(src, 3); + sg_set_buf(&src[0], &hdr->algo, AAD_LEN); + sg_set_buf(&src[1], src_buf, hdr->msg_sz); + sg_set_buf(&src[2], hdr->authtag, crypto->a_len); + + sg_init_table(dst, 3); + sg_set_buf(&dst[0], &hdr->algo, AAD_LEN); + sg_set_buf(&dst[1], dst_buf, hdr->msg_sz); + sg_set_buf(&dst[2], hdr->authtag, crypto->a_len); + + aead_request_set_ad(req, AAD_LEN); + aead_request_set_tfm(req, crypto->tfm); + aead_request_set_callback(req, 0, crypto_req_done, &wait); + + aead_request_set_crypt(req, src, dst, len, crypto->iv); + ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req), &wait); + + aead_request_free(req); + return ret; +} + +static int __enc_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg, + void *plaintext, size_t len) +{ + struct snp_guest_crypto *crypto = snp_dev->crypto; + struct snp_guest_msg_hdr *hdr = &msg->hdr; + + memset(crypto->iv, 0, crypto->iv_len); + memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno)); + + return enc_dec_message(crypto, msg, plaintext, msg->payload, len, true); +} + +static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg, + void *plaintext, size_t len) +{ + struct snp_guest_crypto *crypto = snp_dev->crypto; + struct snp_guest_msg_hdr *hdr = &msg->hdr; + + /* Build IV with response buffer sequence number */ + memset(crypto->iv, 0, crypto->iv_len); + memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno)); + + return enc_dec_message(crypto, msg, msg->payload, plaintext, len, false); +} + +static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz) +{ + struct snp_guest_crypto *crypto = snp_dev->crypto; + struct snp_guest_msg *resp = snp_dev->response; + struct snp_guest_msg *req = snp_dev->request; + struct snp_guest_msg_hdr *req_hdr = &req->hdr; + struct snp_guest_msg_hdr *resp_hdr = &resp->hdr; + + dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n", + resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz); + + /* Verify that the sequence counter is incremented by 1 */ + if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1))) + return -EBADMSG; + + /* Verify response message type and version number. */ + if (resp_hdr->msg_type != (req_hdr->msg_type + 1) || + resp_hdr->msg_version != req_hdr->msg_version) + return -EBADMSG; + + /* + * If the message size is greater than our buffer length then return + * an error. + */ + if (unlikely((resp_hdr->msg_sz + crypto->a_len) > sz)) + return -EBADMSG; + + /* Decrypt the payload */ + return dec_payload(snp_dev, resp, payload, resp_hdr->msg_sz + crypto->a_len); +} + +static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type, + void *payload, size_t sz) +{ + struct snp_guest_msg *req = snp_dev->request; + struct snp_guest_msg_hdr *hdr = &req->hdr; + + memset(req, 0, sizeof(*req)); + + hdr->algo = SNP_AEAD_AES_256_GCM; + hdr->hdr_version = MSG_HDR_VER; + hdr->hdr_sz = sizeof(*hdr); + hdr->msg_type = type; + hdr->msg_version = version; + hdr->msg_seqno = seqno; + hdr->msg_vmpck = vmpck_id; + hdr->msg_sz = sz; + + /* Verify the sequence number is non-zero */ + if (!hdr->msg_seqno) + return -ENOSR; + + dev_dbg(snp_dev->dev, "request [seqno %lld type %d version %d sz %d]\n", + hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz); + + return __enc_payload(snp_dev, req, payload, sz); +} + +static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver, + u8 type, void *req_buf, size_t req_sz, void *resp_buf, + u32 resp_sz, __u64 *fw_err) +{ + unsigned long err; + u64 seqno; + int rc; + + /* Get message sequence and verify that its a non-zero */ + seqno = snp_get_msg_seqno(snp_dev); + if (!seqno) + return -EIO; + + memset(snp_dev->response, 0, sizeof(struct snp_guest_msg)); + + /* Encrypt the userspace provided payload */ + rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz); + if (rc) + return rc; + + /* Call firmware to process the request */ + rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err); + if (fw_err) + *fw_err = err; + + if (rc) + return rc; + + /* + * The verify_and_dec_payload() will fail only if the hypervisor is + * actively modifying the message header or corrupting the encrypted payload. + * This hints that hypervisor is acting in a bad faith. Disable the VMPCK so that + * the key cannot be used for any communication. The key is disabled to ensure + * that AES-GCM does not use the same IV while encrypting the request payload. + */ + rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz); + if (rc) { + dev_alert(snp_dev->dev, + "Detected unexpected decode failure, disabling the vmpck_id %d\n", + vmpck_id); + snp_disable_vmpck(snp_dev); + return rc; + } + + /* Increment to new message sequence after payload decryption was successful. */ + snp_inc_msg_seqno(snp_dev); + + return 0; +} + +static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) +{ + struct snp_guest_crypto *crypto = snp_dev->crypto; + struct snp_report_resp *resp; + struct snp_report_req req; + int rc, resp_len; + + lockdep_assert_held(&snp_cmd_mutex); + + if (!arg->req_data || !arg->resp_data) + return -EINVAL; + + if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req))) + return -EFAULT; + + /* + * The intermediate response buffer is used while decrypting the + * response payload. Make sure that it has enough space to cover the + * authtag. + */ + resp_len = sizeof(resp->data) + crypto->a_len; + resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); + if (!resp) + return -ENOMEM; + + rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version, + SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data, + resp_len, &arg->fw_err); + if (rc) + goto e_free; + + if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp))) + rc = -EFAULT; + +e_free: + kfree(resp); + return rc; +} + +static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) +{ + struct snp_guest_crypto *crypto = snp_dev->crypto; + struct snp_derived_key_resp resp = {0}; + struct snp_derived_key_req req; + int rc, resp_len; + /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */ + u8 buf[64 + 16]; + + lockdep_assert_held(&snp_cmd_mutex); + + if (!arg->req_data || !arg->resp_data) + return -EINVAL; + + /* + * The intermediate response buffer is used while decrypting the + * response payload. Make sure that it has enough space to cover the + * authtag. + */ + resp_len = sizeof(resp.data) + crypto->a_len; + if (sizeof(buf) < resp_len) + return -ENOMEM; + + if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req))) + return -EFAULT; + + rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version, + SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len, + &arg->fw_err); + if (rc) + return rc; + + memcpy(resp.data, buf, sizeof(resp.data)); + if (copy_to_user((void __user *)arg->resp_data, &resp, sizeof(resp))) + rc = -EFAULT; + + /* The response buffer contains the sensitive data, explicitly clear it. */ + memzero_explicit(buf, sizeof(buf)); + memzero_explicit(&resp, sizeof(resp)); + return rc; +} + +static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) +{ + struct snp_guest_crypto *crypto = snp_dev->crypto; + struct snp_ext_report_req req; + struct snp_report_resp *resp; + int ret, npages = 0, resp_len; + + lockdep_assert_held(&snp_cmd_mutex); + + if (!arg->req_data || !arg->resp_data) + return -EINVAL; + + if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req))) + return -EFAULT; + + /* userspace does not want certificate data */ + if (!req.certs_len || !req.certs_address) + goto cmd; + + if (req.certs_len > SEV_FW_BLOB_MAX_SIZE || + !IS_ALIGNED(req.certs_len, PAGE_SIZE)) + return -EINVAL; + + if (!access_ok((const void __user *)req.certs_address, req.certs_len)) + return -EFAULT; + + /* + * Initialize the intermediate buffer with all zeros. This buffer + * is used in the guest request message to get the certs blob from + * the host. If host does not supply any certs in it, then copy + * zeros to indicate that certificate data was not provided. + */ + memset(snp_dev->certs_data, 0, req.certs_len); + npages = req.certs_len >> PAGE_SHIFT; +cmd: + /* + * The intermediate response buffer is used while decrypting the + * response payload. Make sure that it has enough space to cover the + * authtag. + */ + resp_len = sizeof(resp->data) + crypto->a_len; + resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); + if (!resp) + return -ENOMEM; + + snp_dev->input.data_npages = npages; + ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg->msg_version, + SNP_MSG_REPORT_REQ, &req.data, + sizeof(req.data), resp->data, resp_len, &arg->fw_err); + + /* If certs length is invalid then copy the returned length */ + if (arg->fw_err == SNP_GUEST_REQ_INVALID_LEN) { + req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT; + + if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req))) + ret = -EFAULT; + } + + if (ret) + goto e_free; + + if (npages && + copy_to_user((void __user *)req.certs_address, snp_dev->certs_data, + req.certs_len)) { + ret = -EFAULT; + goto e_free; + } + + if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp))) + ret = -EFAULT; + +e_free: + kfree(resp); + return ret; +} + +static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + struct snp_guest_dev *snp_dev = to_snp_dev(file); + void __user *argp = (void __user *)arg; + struct snp_guest_request_ioctl input; + int ret = -ENOTTY; + + if (copy_from_user(&input, argp, sizeof(input))) + return -EFAULT; + + input.fw_err = 0xff; + + /* Message version must be non-zero */ + if (!input.msg_version) + return -EINVAL; + + mutex_lock(&snp_cmd_mutex); + + /* Check if the VMPCK is not empty */ + if (is_vmpck_empty(snp_dev)) { + dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n"); + mutex_unlock(&snp_cmd_mutex); + return -ENOTTY; + } + + switch (ioctl) { + case SNP_GET_REPORT: + ret = get_report(snp_dev, &input); + break; + case SNP_GET_DERIVED_KEY: + ret = get_derived_key(snp_dev, &input); + break; + case SNP_GET_EXT_REPORT: + ret = get_ext_report(snp_dev, &input); + break; + default: + break; + } + + mutex_unlock(&snp_cmd_mutex); + + if (input.fw_err && copy_to_user(argp, &input, sizeof(input))) + return -EFAULT; + + return ret; +} + +static void free_shared_pages(void *buf, size_t sz) +{ + unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; + int ret; + + if (!buf) + return; + + ret = set_memory_encrypted((unsigned long)buf, npages); + if (ret) { + WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n"); + return; + } + + __free_pages(virt_to_page(buf), get_order(sz)); +} + +static void *alloc_shared_pages(struct device *dev, size_t sz) +{ + unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; + struct page *page; + int ret; + + page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz)); + if (!page) + return NULL; + + ret = set_memory_decrypted((unsigned long)page_address(page), npages); + if (ret) { + dev_err(dev, "failed to mark page shared, ret=%d\n", ret); + __free_pages(page, get_order(sz)); + return NULL; + } + + return page_address(page); +} + +static const struct file_operations snp_guest_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = snp_guest_ioctl, +}; + +static u8 *get_vmpck(int id, struct snp_secrets_page_layout *layout, u32 **seqno) +{ + u8 *key = NULL; + + switch (id) { + case 0: + *seqno = &layout->os_area.msg_seqno_0; + key = layout->vmpck0; + break; + case 1: + *seqno = &layout->os_area.msg_seqno_1; + key = layout->vmpck1; + break; + case 2: + *seqno = &layout->os_area.msg_seqno_2; + key = layout->vmpck2; + break; + case 3: + *seqno = &layout->os_area.msg_seqno_3; + key = layout->vmpck3; + break; + default: + break; + } + + return key; +} + +static int __init sev_guest_probe(struct platform_device *pdev) +{ + struct snp_secrets_page_layout *layout; + struct sev_guest_platform_data *data; + struct device *dev = &pdev->dev; + struct snp_guest_dev *snp_dev; + struct miscdevice *misc; + void __iomem *mapping; + int ret; + + if (!dev->platform_data) + return -ENODEV; + + data = (struct sev_guest_platform_data *)dev->platform_data; + mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE); + if (!mapping) + return -ENODEV; + + layout = (__force void *)mapping; + + ret = -ENOMEM; + snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL); + if (!snp_dev) + goto e_unmap; + + ret = -EINVAL; + snp_dev->vmpck = get_vmpck(vmpck_id, layout, &snp_dev->os_area_msg_seqno); + if (!snp_dev->vmpck) { + dev_err(dev, "invalid vmpck id %d\n", vmpck_id); + goto e_unmap; + } + + /* Verify that VMPCK is not zero. */ + if (is_vmpck_empty(snp_dev)) { + dev_err(dev, "vmpck id %d is null\n", vmpck_id); + goto e_unmap; + } + + platform_set_drvdata(pdev, snp_dev); + snp_dev->dev = dev; + snp_dev->layout = layout; + + /* Allocate the shared page used for the request and response message. */ + snp_dev->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg)); + if (!snp_dev->request) + goto e_unmap; + + snp_dev->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg)); + if (!snp_dev->response) + goto e_free_request; + + snp_dev->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE); + if (!snp_dev->certs_data) + goto e_free_response; + + ret = -EIO; + snp_dev->crypto = init_crypto(snp_dev, snp_dev->vmpck, VMPCK_KEY_LEN); + if (!snp_dev->crypto) + goto e_free_cert_data; + + misc = &snp_dev->misc; + misc->minor = MISC_DYNAMIC_MINOR; + misc->name = DEVICE_NAME; + misc->fops = &snp_guest_fops; + + /* initial the input address for guest request */ + snp_dev->input.req_gpa = __pa(snp_dev->request); + snp_dev->input.resp_gpa = __pa(snp_dev->response); + snp_dev->input.data_gpa = __pa(snp_dev->certs_data); + + ret = misc_register(misc); + if (ret) + goto e_free_cert_data; + + dev_info(dev, "Initialized SEV guest driver (using vmpck_id %d)\n", vmpck_id); + return 0; + +e_free_cert_data: + free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE); +e_free_response: + free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg)); +e_free_request: + free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); +e_unmap: + iounmap(mapping); + return ret; +} + +static int __exit sev_guest_remove(struct platform_device *pdev) +{ + struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev); + + free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE); + free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg)); + free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); + deinit_crypto(snp_dev->crypto); + misc_deregister(&snp_dev->misc); + + return 0; +} + +/* + * This driver is meant to be a common SEV guest interface driver and to + * support any SEV guest API. As such, even though it has been introduced + * with the SEV-SNP support, it is named "sev-guest". + */ +static struct platform_driver sev_guest_driver = { + .remove = __exit_p(sev_guest_remove), + .driver = { + .name = "sev-guest", + }, +}; + +module_platform_driver_probe(sev_guest_driver, sev_guest_probe); + +MODULE_AUTHOR("Brijesh Singh "); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0.0"); +MODULE_DESCRIPTION("AMD SEV Guest Driver"); diff --git a/drivers/virt/coco/sev-guest/sev-guest.h b/drivers/virt/coco/sev-guest/sev-guest.h new file mode 100644 index 0000000000..21bda26fdb --- /dev/null +++ b/drivers/virt/coco/sev-guest/sev-guest.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Author: Brijesh Singh + * + * SEV-SNP API spec is available at https://developer.amd.com/sev + */ + +#ifndef __VIRT_SEVGUEST_H__ +#define __VIRT_SEVGUEST_H__ + +#include + +#define MAX_AUTHTAG_LEN 32 + +/* See SNP spec SNP_GUEST_REQUEST section for the structure */ +enum msg_type { + SNP_MSG_TYPE_INVALID = 0, + SNP_MSG_CPUID_REQ, + SNP_MSG_CPUID_RSP, + SNP_MSG_KEY_REQ, + SNP_MSG_KEY_RSP, + SNP_MSG_REPORT_REQ, + SNP_MSG_REPORT_RSP, + SNP_MSG_EXPORT_REQ, + SNP_MSG_EXPORT_RSP, + SNP_MSG_IMPORT_REQ, + SNP_MSG_IMPORT_RSP, + SNP_MSG_ABSORB_REQ, + SNP_MSG_ABSORB_RSP, + SNP_MSG_VMRK_REQ, + SNP_MSG_VMRK_RSP, + + SNP_MSG_TYPE_MAX +}; + +enum aead_algo { + SNP_AEAD_INVALID, + SNP_AEAD_AES_256_GCM, +}; + +struct snp_guest_msg_hdr { + u8 authtag[MAX_AUTHTAG_LEN]; + u64 msg_seqno; + u8 rsvd1[8]; + u8 algo; + u8 hdr_version; + u16 hdr_sz; + u8 msg_type; + u8 msg_version; + u16 msg_sz; + u32 rsvd2; + u8 msg_vmpck; + u8 rsvd3[35]; +} __packed; + +struct snp_guest_msg { + struct snp_guest_msg_hdr hdr; + u8 payload[4000]; +} __packed; + +#endif /* __VIRT_SEVGUEST_H__ */ diff --git a/drivers/virtio/virtio_anchor.c b/drivers/virtio/virtio_anchor.c new file mode 100644 index 0000000000..4d6a5d269b --- /dev/null +++ b/drivers/virtio/virtio_anchor.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include + +bool virtio_require_restricted_mem_acc(struct virtio_device *dev) +{ + return true; +} +EXPORT_SYMBOL_GPL(virtio_require_restricted_mem_acc); + +static bool virtio_no_restricted_mem_acc(struct virtio_device *dev) +{ + return false; +} + +bool (*virtio_check_mem_acc_cb)(struct virtio_device *dev) = + virtio_no_restricted_mem_acc; +EXPORT_SYMBOL_GPL(virtio_check_mem_acc_cb); diff --git a/drivers/watchdog/gxp-wdt.c b/drivers/watchdog/gxp-wdt.c new file mode 100644 index 0000000000..2fd85be882 --- /dev/null +++ b/drivers/watchdog/gxp-wdt.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */ + +#include +#include +#include +#include +#include +#include + +#define MASK_WDGCS_ENABLE 0x01 +#define MASK_WDGCS_RELOAD 0x04 +#define MASK_WDGCS_NMIEN 0x08 +#define MASK_WDGCS_WARN 0x80 + +#define WDT_MAX_TIMEOUT_MS 655350 +#define WDT_DEFAULT_TIMEOUT 30 +#define SECS_TO_WDOG_TICKS(x) ((x) * 100) +#define WDOG_TICKS_TO_SECS(x) ((x) / 100) + +#define GXP_WDT_CNT_OFS 0x10 +#define GXP_WDT_CTRL_OFS 0x16 + +struct gxp_wdt { + void __iomem *base; + struct watchdog_device wdd; +}; + +static void gxp_wdt_enable_reload(struct gxp_wdt *drvdata) +{ + u8 val; + + val = readb(drvdata->base + GXP_WDT_CTRL_OFS); + val |= (MASK_WDGCS_ENABLE | MASK_WDGCS_RELOAD); + writeb(val, drvdata->base + GXP_WDT_CTRL_OFS); +} + +static int gxp_wdt_start(struct watchdog_device *wdd) +{ + struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd); + + writew(SECS_TO_WDOG_TICKS(wdd->timeout), drvdata->base + GXP_WDT_CNT_OFS); + gxp_wdt_enable_reload(drvdata); + return 0; +} + +static int gxp_wdt_stop(struct watchdog_device *wdd) +{ + struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd); + u8 val; + + val = readb_relaxed(drvdata->base + GXP_WDT_CTRL_OFS); + val &= ~MASK_WDGCS_ENABLE; + writeb(val, drvdata->base + GXP_WDT_CTRL_OFS); + return 0; +} + +static int gxp_wdt_set_timeout(struct watchdog_device *wdd, + unsigned int timeout) +{ + struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd); + u32 actual; + + wdd->timeout = timeout; + actual = min(timeout * 100, wdd->max_hw_heartbeat_ms / 10); + writew(actual, drvdata->base + GXP_WDT_CNT_OFS); + + return 0; +} + +static unsigned int gxp_wdt_get_timeleft(struct watchdog_device *wdd) +{ + struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd); + u32 val = readw(drvdata->base + GXP_WDT_CNT_OFS); + + return WDOG_TICKS_TO_SECS(val); +} + +static int gxp_wdt_ping(struct watchdog_device *wdd) +{ + struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd); + + gxp_wdt_enable_reload(drvdata); + return 0; +} + +static int gxp_restart(struct watchdog_device *wdd, unsigned long action, + void *data) +{ + struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd); + + writew(1, drvdata->base + GXP_WDT_CNT_OFS); + gxp_wdt_enable_reload(drvdata); + mdelay(100); + return 0; +} + +static const struct watchdog_ops gxp_wdt_ops = { + .owner = THIS_MODULE, + .start = gxp_wdt_start, + .stop = gxp_wdt_stop, + .ping = gxp_wdt_ping, + .set_timeout = gxp_wdt_set_timeout, + .get_timeleft = gxp_wdt_get_timeleft, + .restart = gxp_restart, +}; + +static const struct watchdog_info gxp_wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, + .identity = "HPE GXP Watchdog timer", +}; + +static int gxp_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct gxp_wdt *drvdata; + int err; + u8 val; + + drvdata = devm_kzalloc(dev, sizeof(struct gxp_wdt), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + /* + * The register area where the timer and watchdog reside is disarranged. + * Hence mapping individual register blocks for the timer and watchdog + * is not recommended as they would have access to each others + * registers. Based on feedback the watchdog is no longer part of the + * device tree file and the timer driver now creates the watchdog as a + * child device. During the watchdogs creation, the timer driver passes + * the base address to the watchdog over the private interface. + */ + + drvdata->base = (void __iomem *)dev->platform_data; + + drvdata->wdd.info = &gxp_wdt_info; + drvdata->wdd.ops = &gxp_wdt_ops; + drvdata->wdd.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT_MS; + drvdata->wdd.parent = dev; + drvdata->wdd.timeout = WDT_DEFAULT_TIMEOUT; + + watchdog_set_drvdata(&drvdata->wdd, drvdata); + watchdog_set_nowayout(&drvdata->wdd, WATCHDOG_NOWAYOUT); + + val = readb(drvdata->base + GXP_WDT_CTRL_OFS); + + if (val & MASK_WDGCS_ENABLE) + set_bit(WDOG_HW_RUNNING, &drvdata->wdd.status); + + watchdog_set_restart_priority(&drvdata->wdd, 128); + + watchdog_stop_on_reboot(&drvdata->wdd); + err = devm_watchdog_register_device(dev, &drvdata->wdd); + if (err) { + dev_err(dev, "Failed to register watchdog device"); + return err; + } + + dev_info(dev, "HPE GXP watchdog timer"); + + return 0; +} + +static struct platform_driver gxp_wdt_driver = { + .probe = gxp_wdt_probe, + .driver = { + .name = "gxp-wdt", + }, +}; +module_platform_driver(gxp_wdt_driver); + +MODULE_AUTHOR("Nick Hawkins "); +MODULE_AUTHOR("Jean-Marie Verdun "); +MODULE_DESCRIPTION("Driver for GXP watchdog timer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/pseries-wdt.c b/drivers/watchdog/pseries-wdt.c new file mode 100644 index 0000000000..7f53b52934 --- /dev/null +++ b/drivers/watchdog/pseries-wdt.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2022 International Business Machines, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "pseries-wdt" + +/* + * H_WATCHDOG Input + * + * R4: "flags": + * + * Bits 48-55: "operation" + */ +#define PSERIES_WDTF_OP_START 0x100UL /* start timer */ +#define PSERIES_WDTF_OP_STOP 0x200UL /* stop timer */ +#define PSERIES_WDTF_OP_QUERY 0x300UL /* query timer capabilities */ + +/* + * Bits 56-63: "timeoutAction" (for "Start Watchdog" only) + */ +#define PSERIES_WDTF_ACTION_HARD_POWEROFF 0x1UL /* poweroff */ +#define PSERIES_WDTF_ACTION_HARD_RESTART 0x2UL /* restart */ +#define PSERIES_WDTF_ACTION_DUMP_RESTART 0x3UL /* dump + restart */ + +/* + * H_WATCHDOG Output + * + * R3: Return code + * + * H_SUCCESS The operation completed. + * + * H_BUSY The hypervisor is too busy; retry the operation. + * + * H_PARAMETER The given "flags" are somehow invalid. Either the + * "operation" or "timeoutAction" is invalid, or a + * reserved bit is set. + * + * H_P2 The given "watchdogNumber" is zero or exceeds the + * supported maximum value. + * + * H_P3 The given "timeoutInMs" is below the supported + * minimum value. + * + * H_NOOP The given "watchdogNumber" is already stopped. + * + * H_HARDWARE The operation failed for ineffable reasons. + * + * H_FUNCTION The H_WATCHDOG hypercall is not supported by this + * hypervisor. + * + * R4: + * + * - For the "Query Watchdog Capabilities" operation, a 64-bit + * structure: + */ +#define PSERIES_WDTQ_MIN_TIMEOUT(cap) (((cap) >> 48) & 0xffff) +#define PSERIES_WDTQ_MAX_NUMBER(cap) (((cap) >> 32) & 0xffff) + +static const unsigned long pseries_wdt_action[] = { + [0] = PSERIES_WDTF_ACTION_HARD_POWEROFF, + [1] = PSERIES_WDTF_ACTION_HARD_RESTART, + [2] = PSERIES_WDTF_ACTION_DUMP_RESTART, +}; + +#define WATCHDOG_ACTION 1 +static unsigned int action = WATCHDOG_ACTION; +module_param(action, uint, 0444); +MODULE_PARM_DESC(action, "Action taken when watchdog expires (default=" + __MODULE_STRING(WATCHDOG_ACTION) ")"); + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0444); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +#define WATCHDOG_TIMEOUT 60 +static unsigned int timeout = WATCHDOG_TIMEOUT; +module_param(timeout, uint, 0444); +MODULE_PARM_DESC(timeout, "Initial watchdog timeout in seconds (default=" + __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); + +struct pseries_wdt { + struct watchdog_device wd; + unsigned long action; + unsigned long num; /* Watchdog numbers are 1-based */ +}; + +static int pseries_wdt_start(struct watchdog_device *wdd) +{ + struct pseries_wdt *pw = watchdog_get_drvdata(wdd); + struct device *dev = wdd->parent; + unsigned long flags, msecs; + long rc; + + flags = pw->action | PSERIES_WDTF_OP_START; + msecs = wdd->timeout * MSEC_PER_SEC; + rc = plpar_hcall_norets(H_WATCHDOG, flags, pw->num, msecs); + if (rc != H_SUCCESS) { + dev_crit(dev, "H_WATCHDOG: %ld: failed to start timer %lu", + rc, pw->num); + return -EIO; + } + return 0; +} + +static int pseries_wdt_stop(struct watchdog_device *wdd) +{ + struct pseries_wdt *pw = watchdog_get_drvdata(wdd); + struct device *dev = wdd->parent; + long rc; + + rc = plpar_hcall_norets(H_WATCHDOG, PSERIES_WDTF_OP_STOP, pw->num); + if (rc != H_SUCCESS && rc != H_NOOP) { + dev_crit(dev, "H_WATCHDOG: %ld: failed to stop timer %lu", + rc, pw->num); + return -EIO; + } + return 0; +} + +static struct watchdog_info pseries_wdt_info = { + .identity = DRV_NAME, + .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT + | WDIOF_PRETIMEOUT, +}; + +static const struct watchdog_ops pseries_wdt_ops = { + .owner = THIS_MODULE, + .start = pseries_wdt_start, + .stop = pseries_wdt_stop, +}; + +static int pseries_wdt_probe(struct platform_device *pdev) +{ + unsigned long ret[PLPAR_HCALL_BUFSIZE] = { 0 }; + struct pseries_wdt *pw; + unsigned long cap; + long msecs, rc; + int err; + + rc = plpar_hcall(H_WATCHDOG, ret, PSERIES_WDTF_OP_QUERY); + if (rc == H_FUNCTION) + return -ENODEV; + if (rc != H_SUCCESS) + return -EIO; + cap = ret[0]; + + pw = devm_kzalloc(&pdev->dev, sizeof(*pw), GFP_KERNEL); + if (!pw) + return -ENOMEM; + + /* + * Assume watchdogNumber 1 for now. If we ever support + * multiple timers we will need to devise a way to choose a + * distinct watchdogNumber for each platform device at device + * registration time. + */ + pw->num = 1; + if (PSERIES_WDTQ_MAX_NUMBER(cap) < pw->num) + return -ENODEV; + + if (action >= ARRAY_SIZE(pseries_wdt_action)) + return -EINVAL; + pw->action = pseries_wdt_action[action]; + + pw->wd.parent = &pdev->dev; + pw->wd.info = &pseries_wdt_info; + pw->wd.ops = &pseries_wdt_ops; + msecs = PSERIES_WDTQ_MIN_TIMEOUT(cap); + pw->wd.min_timeout = DIV_ROUND_UP(msecs, MSEC_PER_SEC); + pw->wd.max_timeout = UINT_MAX / 1000; /* from linux/watchdog.h */ + pw->wd.timeout = timeout; + if (watchdog_init_timeout(&pw->wd, 0, NULL)) + return -EINVAL; + watchdog_set_nowayout(&pw->wd, nowayout); + watchdog_stop_on_reboot(&pw->wd); + watchdog_stop_on_unregister(&pw->wd); + watchdog_set_drvdata(&pw->wd, pw); + + err = devm_watchdog_register_device(&pdev->dev, &pw->wd); + if (err) + return err; + + platform_set_drvdata(pdev, &pw->wd); + + return 0; +} + +static int pseries_wdt_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct watchdog_device *wd = platform_get_drvdata(pdev); + + if (watchdog_active(wd)) + return pseries_wdt_stop(wd); + return 0; +} + +static int pseries_wdt_resume(struct platform_device *pdev) +{ + struct watchdog_device *wd = platform_get_drvdata(pdev); + + if (watchdog_active(wd)) + return pseries_wdt_start(wd); + return 0; +} + +static const struct platform_device_id pseries_wdt_id[] = { + { .name = "pseries-wdt" }, + {} +}; +MODULE_DEVICE_TABLE(platform, pseries_wdt_id); + +static struct platform_driver pseries_wdt_driver = { + .driver = { + .name = DRV_NAME, + }, + .id_table = pseries_wdt_id, + .probe = pseries_wdt_probe, + .resume = pseries_wdt_resume, + .suspend = pseries_wdt_suspend, +}; +module_platform_driver(pseries_wdt_driver); + +MODULE_AUTHOR("Alexey Kardashevskiy"); +MODULE_AUTHOR("Scott Cheloha"); +MODULE_DESCRIPTION("POWER Architecture Platform Watchdog Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/rzn1_wdt.c b/drivers/watchdog/rzn1_wdt.c new file mode 100644 index 0000000000..55ab384b99 --- /dev/null +++ b/drivers/watchdog/rzn1_wdt.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RZ/N1 Watchdog timer. + * This is a 12-bit timer driver from a (62.5/16384) MHz clock. It can't even + * cope with 2 seconds. + * + * Copyright 2018 Renesas Electronics Europe Ltd. + * + * Derived from Ralink RT288x watchdog timer. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEFAULT_TIMEOUT 60 + +#define RZN1_WDT_RETRIGGER 0x0 +#define RZN1_WDT_RETRIGGER_RELOAD_VAL 0 +#define RZN1_WDT_RETRIGGER_RELOAD_VAL_MASK 0xfff +#define RZN1_WDT_RETRIGGER_PRESCALE BIT(12) +#define RZN1_WDT_RETRIGGER_ENABLE BIT(13) +#define RZN1_WDT_RETRIGGER_WDSI (0x2 << 14) + +#define RZN1_WDT_PRESCALER 16384 +#define RZN1_WDT_MAX 4095 + +struct rzn1_watchdog { + struct watchdog_device wdtdev; + void __iomem *base; + unsigned long clk_rate_khz; +}; + +static inline uint32_t max_heart_beat_ms(unsigned long clk_rate_khz) +{ + return (RZN1_WDT_MAX * RZN1_WDT_PRESCALER) / clk_rate_khz; +} + +static inline uint32_t compute_reload_value(uint32_t tick_ms, + unsigned long clk_rate_khz) +{ + return (tick_ms * clk_rate_khz) / RZN1_WDT_PRESCALER; +} + +static int rzn1_wdt_ping(struct watchdog_device *w) +{ + struct rzn1_watchdog *wdt = watchdog_get_drvdata(w); + + /* Any value retrigggers the watchdog */ + writel(0, wdt->base + RZN1_WDT_RETRIGGER); + + return 0; +} + +static int rzn1_wdt_start(struct watchdog_device *w) +{ + struct rzn1_watchdog *wdt = watchdog_get_drvdata(w); + u32 val; + + /* + * The hardware allows you to write to this reg only once. + * Since this includes the reload value, there is no way to change the + * timeout once started. Also note that the WDT clock is half the bus + * fabric clock rate, so if the bus fabric clock rate is changed after + * the WDT is started, the WDT interval will be wrong. + */ + val = RZN1_WDT_RETRIGGER_WDSI; + val |= RZN1_WDT_RETRIGGER_ENABLE; + val |= RZN1_WDT_RETRIGGER_PRESCALE; + val |= compute_reload_value(w->max_hw_heartbeat_ms, wdt->clk_rate_khz); + writel(val, wdt->base + RZN1_WDT_RETRIGGER); + + return 0; +} + +static irqreturn_t rzn1_wdt_irq(int irq, void *_wdt) +{ + pr_crit("RZN1 Watchdog. Initiating system reboot\n"); + emergency_restart(); + + return IRQ_HANDLED; +} + +static struct watchdog_info rzn1_wdt_info = { + .identity = "RZ/N1 Watchdog", + .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, +}; + +static const struct watchdog_ops rzn1_wdt_ops = { + .owner = THIS_MODULE, + .start = rzn1_wdt_start, + .ping = rzn1_wdt_ping, +}; + +static void rzn1_wdt_clk_disable_unprepare(void *data) +{ + clk_disable_unprepare(data); +} + +static int rzn1_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rzn1_watchdog *wdt; + struct device_node *np = dev->of_node; + struct clk *clk; + unsigned long clk_rate; + int ret; + int irq; + + wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return -ENOMEM; + + wdt->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(wdt->base)) + return PTR_ERR(wdt->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(dev, irq, rzn1_wdt_irq, 0, + np->name, wdt); + if (ret) { + dev_err(dev, "failed to request irq %d\n", irq); + return ret; + } + + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) { + dev_err(dev, "failed to get the clock\n"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(dev, "failed to prepare/enable the clock\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, rzn1_wdt_clk_disable_unprepare, + clk); + if (ret) + return ret; + + clk_rate = clk_get_rate(clk); + if (!clk_rate) { + dev_err(dev, "failed to get the clock rate\n"); + return -EINVAL; + } + + wdt->clk_rate_khz = clk_rate / 1000; + wdt->wdtdev.info = &rzn1_wdt_info, + wdt->wdtdev.ops = &rzn1_wdt_ops, + wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS, + wdt->wdtdev.parent = dev; + /* + * The period of the watchdog cannot be changed once set + * and is limited to a very short period. + * Configure it for a 1s period once and for all, and + * rely on the heart-beat provided by the watchdog core + * to make this usable by the user-space. + */ + wdt->wdtdev.max_hw_heartbeat_ms = max_heart_beat_ms(wdt->clk_rate_khz); + if (wdt->wdtdev.max_hw_heartbeat_ms > 1000) + wdt->wdtdev.max_hw_heartbeat_ms = 1000; + + wdt->wdtdev.timeout = DEFAULT_TIMEOUT; + ret = watchdog_init_timeout(&wdt->wdtdev, 0, dev); + if (ret) + return ret; + + watchdog_set_drvdata(&wdt->wdtdev, wdt); + + return devm_watchdog_register_device(dev, &wdt->wdtdev); +} + + +static const struct of_device_id rzn1_wdt_match[] = { + { .compatible = "renesas,rzn1-wdt" }, + {}, +}; +MODULE_DEVICE_TABLE(of, rzn1_wdt_match); + +static struct platform_driver rzn1_wdt_driver = { + .probe = rzn1_wdt_probe, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = rzn1_wdt_match, + }, +}; + +module_platform_driver(rzn1_wdt_driver); + +MODULE_DESCRIPTION("Renesas RZ/N1 hardware watchdog"); +MODULE_AUTHOR("Phil Edworthy "); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/sunplus_wdt.c b/drivers/watchdog/sunplus_wdt.c new file mode 100644 index 0000000000..e2d8c532bc --- /dev/null +++ b/drivers/watchdog/sunplus_wdt.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * sunplus Watchdog Driver + * + * Copyright (C) 2021 Sunplus Technology Co., Ltd. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#define WDT_CTRL 0x00 +#define WDT_CNT 0x04 + +#define WDT_STOP 0x3877 +#define WDT_RESUME 0x4A4B +#define WDT_CLRIRQ 0x7482 +#define WDT_UNLOCK 0xAB00 +#define WDT_LOCK 0xAB01 +#define WDT_CONMAX 0xDEAF + +/* TIMEOUT_MAX = ffff0/90kHz =11.65, so longer than 11 seconds will time out. */ +#define SP_WDT_MAX_TIMEOUT 11U +#define SP_WDT_DEFAULT_TIMEOUT 10 + +#define STC_CLK 90000 + +#define DEVICE_NAME "sunplus-wdt" + +static unsigned int timeout; +module_param(timeout, int, 0); +MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds"); + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +struct sp_wdt_priv { + struct watchdog_device wdev; + void __iomem *base; + struct clk *clk; + struct reset_control *rstc; +}; + +static int sp_wdt_restart(struct watchdog_device *wdev, + unsigned long action, void *data) +{ + struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev); + void __iomem *base = priv->base; + + writel(WDT_STOP, base + WDT_CTRL); + writel(WDT_UNLOCK, base + WDT_CTRL); + writel(0x0001, base + WDT_CNT); + writel(WDT_LOCK, base + WDT_CTRL); + writel(WDT_RESUME, base + WDT_CTRL); + + return 0; +} + +static int sp_wdt_ping(struct watchdog_device *wdev) +{ + struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev); + void __iomem *base = priv->base; + u32 count; + + if (wdev->timeout > SP_WDT_MAX_TIMEOUT) { + /* WDT_CONMAX sets the count to the maximum (down-counting). */ + writel(WDT_CONMAX, base + WDT_CTRL); + } else { + writel(WDT_UNLOCK, base + WDT_CTRL); + /* + * Watchdog timer is a 20-bit down-counting based on STC_CLK. + * This register bits[16:0] is from bit[19:4] of the watchdog + * timer counter. + */ + count = (wdev->timeout * STC_CLK) >> 4; + writel(count, base + WDT_CNT); + writel(WDT_LOCK, base + WDT_CTRL); + } + + return 0; +} + +static int sp_wdt_stop(struct watchdog_device *wdev) +{ + struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev); + void __iomem *base = priv->base; + + writel(WDT_STOP, base + WDT_CTRL); + + return 0; +} + +static int sp_wdt_start(struct watchdog_device *wdev) +{ + struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev); + void __iomem *base = priv->base; + + writel(WDT_RESUME, base + WDT_CTRL); + + return 0; +} + +static unsigned int sp_wdt_get_timeleft(struct watchdog_device *wdev) +{ + struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev); + void __iomem *base = priv->base; + u32 val; + + val = readl(base + WDT_CNT); + val &= 0xffff; + val = val << 4; + + return val; +} + +static const struct watchdog_info sp_wdt_info = { + .identity = DEVICE_NAME, + .options = WDIOF_SETTIMEOUT | + WDIOF_MAGICCLOSE | + WDIOF_KEEPALIVEPING, +}; + +static const struct watchdog_ops sp_wdt_ops = { + .owner = THIS_MODULE, + .start = sp_wdt_start, + .stop = sp_wdt_stop, + .ping = sp_wdt_ping, + .get_timeleft = sp_wdt_get_timeleft, + .restart = sp_wdt_restart, +}; + +static void sp_clk_disable_unprepare(void *data) +{ + clk_disable_unprepare(data); +} + +static void sp_reset_control_assert(void *data) +{ + reset_control_assert(data); +} + +static int sp_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sp_wdt_priv *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to get clock\n"); + + ret = clk_prepare_enable(priv->clk); + if (ret) + return dev_err_probe(dev, ret, "Failed to enable clock\n"); + + ret = devm_add_action_or_reset(dev, sp_clk_disable_unprepare, priv->clk); + if (ret) + return ret; + + /* The timer and watchdog shared the STC reset */ + priv->rstc = devm_reset_control_get_shared(dev, NULL); + if (IS_ERR(priv->rstc)) + return dev_err_probe(dev, PTR_ERR(priv->rstc), "Failed to get reset\n"); + + reset_control_deassert(priv->rstc); + + ret = devm_add_action_or_reset(dev, sp_reset_control_assert, priv->rstc); + if (ret) + return ret; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->wdev.info = &sp_wdt_info; + priv->wdev.ops = &sp_wdt_ops; + priv->wdev.timeout = SP_WDT_DEFAULT_TIMEOUT; + priv->wdev.max_hw_heartbeat_ms = SP_WDT_MAX_TIMEOUT * 1000; + priv->wdev.min_timeout = 1; + priv->wdev.parent = dev; + + watchdog_set_drvdata(&priv->wdev, priv); + watchdog_init_timeout(&priv->wdev, timeout, dev); + watchdog_set_nowayout(&priv->wdev, nowayout); + watchdog_stop_on_reboot(&priv->wdev); + watchdog_set_restart_priority(&priv->wdev, 128); + + return devm_watchdog_register_device(dev, &priv->wdev); +} + +static const struct of_device_id sp_wdt_of_match[] = { + {.compatible = "sunplus,sp7021-wdt", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sp_wdt_of_match); + +static struct platform_driver sp_wdt_driver = { + .probe = sp_wdt_probe, + .driver = { + .name = DEVICE_NAME, + .of_match_table = sp_wdt_of_match, + }, +}; + +module_platform_driver(sp_wdt_driver); + +MODULE_AUTHOR("Xiantao Hu "); +MODULE_DESCRIPTION("Sunplus Watchdog Timer Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/xen/grant-dma-iommu.c b/drivers/xen/grant-dma-iommu.c new file mode 100644 index 0000000000..16b8bc0c0b --- /dev/null +++ b/drivers/xen/grant-dma-iommu.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Stub IOMMU driver which does nothing. + * The main purpose of it being present is to reuse generic IOMMU device tree + * bindings by Xen grant DMA-mapping layer. + * + * Copyright (C) 2022 EPAM Systems Inc. + */ + +#include +#include +#include + +struct grant_dma_iommu_device { + struct device *dev; + struct iommu_device iommu; +}; + +/* Nothing is really needed here */ +static const struct iommu_ops grant_dma_iommu_ops; + +static const struct of_device_id grant_dma_iommu_of_match[] = { + { .compatible = "xen,grant-dma" }, + { }, +}; + +static int grant_dma_iommu_probe(struct platform_device *pdev) +{ + struct grant_dma_iommu_device *mmu; + int ret; + + mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); + if (!mmu) + return -ENOMEM; + + mmu->dev = &pdev->dev; + + ret = iommu_device_register(&mmu->iommu, &grant_dma_iommu_ops, &pdev->dev); + if (ret) + return ret; + + platform_set_drvdata(pdev, mmu); + + return 0; +} + +static int grant_dma_iommu_remove(struct platform_device *pdev) +{ + struct grant_dma_iommu_device *mmu = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + iommu_device_unregister(&mmu->iommu); + + return 0; +} + +static struct platform_driver grant_dma_iommu_driver = { + .driver = { + .name = "grant-dma-iommu", + .of_match_table = grant_dma_iommu_of_match, + }, + .probe = grant_dma_iommu_probe, + .remove = grant_dma_iommu_remove, +}; + +static int __init grant_dma_iommu_init(void) +{ + struct device_node *iommu_np; + + iommu_np = of_find_matching_node(NULL, grant_dma_iommu_of_match); + if (!iommu_np) + return 0; + + of_node_put(iommu_np); + + return platform_driver_register(&grant_dma_iommu_driver); +} +subsys_initcall(grant_dma_iommu_init); diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c new file mode 100644 index 0000000000..8973fc1e9c --- /dev/null +++ b/drivers/xen/grant-dma-ops.c @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Xen grant DMA-mapping layer - contains special DMA-mapping routines + * for providing grant references as DMA addresses to be used by frontends + * (e.g. virtio) in Xen guests + * + * Copyright (c) 2021, Juergen Gross + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct xen_grant_dma_data { + /* The ID of backend domain */ + domid_t backend_domid; + /* Is device behaving sane? */ + bool broken; +}; + +static DEFINE_XARRAY(xen_grant_dma_devices); + +#define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63) + +static inline dma_addr_t grant_to_dma(grant_ref_t grant) +{ + return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT); +} + +static inline grant_ref_t dma_to_grant(dma_addr_t dma) +{ + return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT); +} + +static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev) +{ + struct xen_grant_dma_data *data; + + xa_lock(&xen_grant_dma_devices); + data = xa_load(&xen_grant_dma_devices, (unsigned long)dev); + xa_unlock(&xen_grant_dma_devices); + + return data; +} + +/* + * DMA ops for Xen frontends (e.g. virtio). + * + * Used to act as a kind of software IOMMU for Xen guests by using grants as + * DMA addresses. + * Such a DMA address is formed by using the grant reference as a frame + * number and setting the highest address bit (this bit is for the backend + * to be able to distinguish it from e.g. a mmio address). + */ +static void *xen_grant_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + unsigned long pfn; + grant_ref_t grant; + void *ret; + + data = find_xen_grant_dma_data(dev); + if (!data) + return NULL; + + if (unlikely(data->broken)) + return NULL; + + ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp); + if (!ret) + return NULL; + + pfn = virt_to_pfn(ret); + + if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) { + free_pages_exact(ret, n_pages * PAGE_SIZE); + return NULL; + } + + for (i = 0; i < n_pages; i++) { + gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, + pfn_to_gfn(pfn + i), 0); + } + + *dma_handle = grant_to_dma(grant); + + return ret; +} + +static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + grant_ref_t grant; + + data = find_xen_grant_dma_data(dev); + if (!data) + return; + + if (unlikely(data->broken)) + return; + + grant = dma_to_grant(dma_handle); + + for (i = 0; i < n_pages; i++) { + if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) { + dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n"); + data->broken = true; + return; + } + } + + gnttab_free_grant_reference_seq(grant, n_pages); + + free_pages_exact(vaddr, n_pages * PAGE_SIZE); +} + +static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, + enum dma_data_direction dir, + gfp_t gfp) +{ + void *vaddr; + + vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0); + if (!vaddr) + return NULL; + + return virt_to_page(vaddr); +} + +static void xen_grant_dma_free_pages(struct device *dev, size_t size, + struct page *vaddr, dma_addr_t dma_handle, + enum dma_data_direction dir) +{ + xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0); +} + +static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + grant_ref_t grant; + dma_addr_t dma_handle; + + if (WARN_ON(dir == DMA_NONE)) + return DMA_MAPPING_ERROR; + + data = find_xen_grant_dma_data(dev); + if (!data) + return DMA_MAPPING_ERROR; + + if (unlikely(data->broken)) + return DMA_MAPPING_ERROR; + + if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) + return DMA_MAPPING_ERROR; + + for (i = 0; i < n_pages; i++) { + gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, + xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE); + } + + dma_handle = grant_to_dma(grant) + offset; + + return dma_handle; +} + +static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + grant_ref_t grant; + + if (WARN_ON(dir == DMA_NONE)) + return; + + data = find_xen_grant_dma_data(dev); + if (!data) + return; + + if (unlikely(data->broken)) + return; + + grant = dma_to_grant(dma_handle); + + for (i = 0; i < n_pages; i++) { + if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) { + dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n"); + data->broken = true; + return; + } + } + + gnttab_free_grant_reference_seq(grant, n_pages); +} + +static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct scatterlist *s; + unsigned int i; + + if (WARN_ON(dir == DMA_NONE)) + return; + + for_each_sg(sg, s, nents, i) + xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir, + attrs); +} + +static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct scatterlist *s; + unsigned int i; + + if (WARN_ON(dir == DMA_NONE)) + return -EINVAL; + + for_each_sg(sg, s, nents, i) { + s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset, + s->length, dir, attrs); + if (s->dma_address == DMA_MAPPING_ERROR) + goto out; + + sg_dma_len(s) = s->length; + } + + return nents; + +out: + xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); + sg_dma_len(sg) = 0; + + return -EIO; +} + +static int xen_grant_dma_supported(struct device *dev, u64 mask) +{ + return mask == DMA_BIT_MASK(64); +} + +static const struct dma_map_ops xen_grant_dma_ops = { + .alloc = xen_grant_dma_alloc, + .free = xen_grant_dma_free, + .alloc_pages = xen_grant_dma_alloc_pages, + .free_pages = xen_grant_dma_free_pages, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, + .map_page = xen_grant_dma_map_page, + .unmap_page = xen_grant_dma_unmap_page, + .map_sg = xen_grant_dma_map_sg, + .unmap_sg = xen_grant_dma_unmap_sg, + .dma_supported = xen_grant_dma_supported, +}; + +bool xen_is_grant_dma_device(struct device *dev) +{ + struct device_node *iommu_np; + bool has_iommu; + + /* XXX Handle only DT devices for now */ + if (!dev->of_node) + return false; + + iommu_np = of_parse_phandle(dev->of_node, "iommus", 0); + has_iommu = iommu_np && of_device_is_compatible(iommu_np, "xen,grant-dma"); + of_node_put(iommu_np); + + return has_iommu; +} + +bool xen_virtio_mem_acc(struct virtio_device *dev) +{ + if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) + return true; + + return xen_is_grant_dma_device(dev->dev.parent); +} + +void xen_grant_setup_dma_ops(struct device *dev) +{ + struct xen_grant_dma_data *data; + struct of_phandle_args iommu_spec; + + data = find_xen_grant_dma_data(dev); + if (data) { + dev_err(dev, "Xen grant DMA data is already created\n"); + return; + } + + /* XXX ACPI device unsupported for now */ + if (!dev->of_node) + goto err; + + if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", + 0, &iommu_spec)) { + dev_err(dev, "Cannot parse iommus property\n"); + goto err; + } + + if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") || + iommu_spec.args_count != 1) { + dev_err(dev, "Incompatible IOMMU node\n"); + of_node_put(iommu_spec.np); + goto err; + } + + of_node_put(iommu_spec.np); + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + goto err; + + /* + * The endpoint ID here means the ID of the domain where the corresponding + * backend is running + */ + data->backend_domid = iommu_spec.args[0]; + + if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data, + GFP_KERNEL))) { + dev_err(dev, "Cannot store Xen grant DMA data\n"); + goto err; + } + + dev->dma_ops = &xen_grant_dma_ops; + + return; + +err: + dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n"); +} + +MODULE_DESCRIPTION("Xen grant DMA-mapping layer"); +MODULE_AUTHOR("Juergen Gross "); +MODULE_LICENSE("GPL"); diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c new file mode 100644 index 0000000000..0254ed39f6 --- /dev/null +++ b/fs/cachefiles/ondemand.c @@ -0,0 +1,514 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include +#include +#include +#include "internal.h" + +static int cachefiles_ondemand_fd_release(struct inode *inode, + struct file *file) +{ + struct cachefiles_object *object = file->private_data; + struct cachefiles_cache *cache = object->volume->cache; + int object_id = object->ondemand_id; + struct cachefiles_req *req; + XA_STATE(xas, &cache->reqs, 0); + + xa_lock(&cache->reqs); + object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; + + /* + * Flush all pending READ requests since their completion depends on + * anon_fd. + */ + xas_for_each(&xas, req, ULONG_MAX) { + if (req->msg.object_id == object_id && + req->msg.opcode == CACHEFILES_OP_READ) { + req->error = -EIO; + complete(&req->done); + xas_store(&xas, NULL); + } + } + xa_unlock(&cache->reqs); + + xa_erase(&cache->ondemand_ids, object_id); + trace_cachefiles_ondemand_fd_release(object, object_id); + cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd); + cachefiles_put_unbind_pincount(cache); + return 0; +} + +static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb, + struct iov_iter *iter) +{ + struct cachefiles_object *object = kiocb->ki_filp->private_data; + struct cachefiles_cache *cache = object->volume->cache; + struct file *file = object->file; + size_t len = iter->count; + loff_t pos = kiocb->ki_pos; + const struct cred *saved_cred; + int ret; + + if (!file) + return -ENOBUFS; + + cachefiles_begin_secure(cache, &saved_cred); + ret = __cachefiles_prepare_write(object, file, &pos, &len, true); + cachefiles_end_secure(cache, saved_cred); + if (ret < 0) + return ret; + + trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len); + ret = __cachefiles_write(object, file, pos, iter, NULL, NULL); + if (!ret) + ret = len; + + return ret; +} + +static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos, + int whence) +{ + struct cachefiles_object *object = filp->private_data; + struct file *file = object->file; + + if (!file) + return -ENOBUFS; + + return vfs_llseek(file, pos, whence); +} + +static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) +{ + struct cachefiles_object *object = filp->private_data; + struct cachefiles_cache *cache = object->volume->cache; + struct cachefiles_req *req; + unsigned long id; + + if (ioctl != CACHEFILES_IOC_READ_COMPLETE) + return -EINVAL; + + if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) + return -EOPNOTSUPP; + + id = arg; + req = xa_erase(&cache->reqs, id); + if (!req) + return -EINVAL; + + trace_cachefiles_ondemand_cread(object, id); + complete(&req->done); + return 0; +} + +static const struct file_operations cachefiles_ondemand_fd_fops = { + .owner = THIS_MODULE, + .release = cachefiles_ondemand_fd_release, + .write_iter = cachefiles_ondemand_fd_write_iter, + .llseek = cachefiles_ondemand_fd_llseek, + .unlocked_ioctl = cachefiles_ondemand_fd_ioctl, +}; + +/* + * OPEN request Completion (copen) + * - command: "copen ," + * indicates the object size if >=0, error code if negative + */ +int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) +{ + struct cachefiles_req *req; + struct fscache_cookie *cookie; + char *pid, *psize; + unsigned long id; + long size; + int ret; + + if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) + return -EOPNOTSUPP; + + if (!*args) { + pr_err("Empty id specified\n"); + return -EINVAL; + } + + pid = args; + psize = strchr(args, ','); + if (!psize) { + pr_err("Cache size is not specified\n"); + return -EINVAL; + } + + *psize = 0; + psize++; + + ret = kstrtoul(pid, 0, &id); + if (ret) + return ret; + + req = xa_erase(&cache->reqs, id); + if (!req) + return -EINVAL; + + /* fail OPEN request if copen format is invalid */ + ret = kstrtol(psize, 0, &size); + if (ret) { + req->error = ret; + goto out; + } + + /* fail OPEN request if daemon reports an error */ + if (size < 0) { + if (!IS_ERR_VALUE(size)) { + req->error = -EINVAL; + ret = -EINVAL; + } else { + req->error = size; + ret = 0; + } + goto out; + } + + cookie = req->object->cookie; + cookie->object_size = size; + if (size) + clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); + else + set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); + trace_cachefiles_ondemand_copen(req->object, id, size); + +out: + complete(&req->done); + return ret; +} + +static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) +{ + struct cachefiles_object *object; + struct cachefiles_cache *cache; + struct cachefiles_open *load; + struct file *file; + u32 object_id; + int ret, fd; + + object = cachefiles_grab_object(req->object, + cachefiles_obj_get_ondemand_fd); + cache = object->volume->cache; + + ret = xa_alloc_cyclic(&cache->ondemand_ids, &object_id, NULL, + XA_LIMIT(1, INT_MAX), + &cache->ondemand_id_next, GFP_KERNEL); + if (ret < 0) + goto err; + + fd = get_unused_fd_flags(O_WRONLY); + if (fd < 0) { + ret = fd; + goto err_free_id; + } + + file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops, + object, O_WRONLY); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto err_put_fd; + } + + file->f_mode |= FMODE_PWRITE | FMODE_LSEEK; + fd_install(fd, file); + + load = (void *)req->msg.data; + load->fd = fd; + req->msg.object_id = object_id; + object->ondemand_id = object_id; + + cachefiles_get_unbind_pincount(cache); + trace_cachefiles_ondemand_open(object, &req->msg, load); + return 0; + +err_put_fd: + put_unused_fd(fd); +err_free_id: + xa_erase(&cache->ondemand_ids, object_id); +err: + cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd); + return ret; +} + +ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, + char __user *_buffer, size_t buflen) +{ + struct cachefiles_req *req; + struct cachefiles_msg *msg; + unsigned long id = 0; + size_t n; + int ret = 0; + XA_STATE(xas, &cache->reqs, cache->req_id_next); + + /* + * Cyclically search for a request that has not ever been processed, + * to prevent requests from being processed repeatedly, and make + * request distribution fair. + */ + xa_lock(&cache->reqs); + req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW); + if (!req && cache->req_id_next > 0) { + xas_set(&xas, 0); + req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW); + } + if (!req) { + xa_unlock(&cache->reqs); + return 0; + } + + msg = &req->msg; + n = msg->len; + + if (n > buflen) { + xa_unlock(&cache->reqs); + return -EMSGSIZE; + } + + xas_clear_mark(&xas, CACHEFILES_REQ_NEW); + cache->req_id_next = xas.xa_index + 1; + xa_unlock(&cache->reqs); + + id = xas.xa_index; + msg->msg_id = id; + + if (msg->opcode == CACHEFILES_OP_OPEN) { + ret = cachefiles_ondemand_get_fd(req); + if (ret) + goto error; + } + + if (copy_to_user(_buffer, msg, n) != 0) { + ret = -EFAULT; + goto err_put_fd; + } + + /* CLOSE request has no reply */ + if (msg->opcode == CACHEFILES_OP_CLOSE) { + xa_erase(&cache->reqs, id); + complete(&req->done); + } + + return n; + +err_put_fd: + if (msg->opcode == CACHEFILES_OP_OPEN) + close_fd(((struct cachefiles_open *)msg->data)->fd); +error: + xa_erase(&cache->reqs, id); + req->error = ret; + complete(&req->done); + return ret; +} + +typedef int (*init_req_fn)(struct cachefiles_req *req, void *private); + +static int cachefiles_ondemand_send_req(struct cachefiles_object *object, + enum cachefiles_opcode opcode, + size_t data_len, + init_req_fn init_req, + void *private) +{ + struct cachefiles_cache *cache = object->volume->cache; + struct cachefiles_req *req; + XA_STATE(xas, &cache->reqs, 0); + int ret; + + if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) + return 0; + + if (test_bit(CACHEFILES_DEAD, &cache->flags)) + return -EIO; + + req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL); + if (!req) + return -ENOMEM; + + req->object = object; + init_completion(&req->done); + req->msg.opcode = opcode; + req->msg.len = sizeof(struct cachefiles_msg) + data_len; + + ret = init_req(req, private); + if (ret) + goto out; + + do { + /* + * Stop enqueuing the request when daemon is dying. The + * following two operations need to be atomic as a whole. + * 1) check cache state, and + * 2) enqueue request if cache is alive. + * Otherwise the request may be enqueued after xarray has been + * flushed, leaving the orphan request never being completed. + * + * CPU 1 CPU 2 + * ===== ===== + * test CACHEFILES_DEAD bit + * set CACHEFILES_DEAD bit + * flush requests in the xarray + * enqueue the request + */ + xas_lock(&xas); + + if (test_bit(CACHEFILES_DEAD, &cache->flags)) { + xas_unlock(&xas); + ret = -EIO; + goto out; + } + + /* coupled with the barrier in cachefiles_flush_reqs() */ + smp_mb(); + + if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) { + WARN_ON_ONCE(object->ondemand_id == 0); + xas_unlock(&xas); + ret = -EIO; + goto out; + } + + xas.xa_index = 0; + xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK); + if (xas.xa_node == XAS_RESTART) + xas_set_err(&xas, -EBUSY); + xas_store(&xas, req); + xas_clear_mark(&xas, XA_FREE_MARK); + xas_set_mark(&xas, CACHEFILES_REQ_NEW); + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + + ret = xas_error(&xas); + if (ret) + goto out; + + wake_up_all(&cache->daemon_pollwq); + wait_for_completion(&req->done); + ret = req->error; +out: + kfree(req); + return ret; +} + +static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req, + void *private) +{ + struct cachefiles_object *object = req->object; + struct fscache_cookie *cookie = object->cookie; + struct fscache_volume *volume = object->volume->vcookie; + struct cachefiles_open *load = (void *)req->msg.data; + size_t volume_key_size, cookie_key_size; + void *volume_key, *cookie_key; + + /* + * Volume key is a NUL-terminated string. key[0] stores strlen() of the + * string, followed by the content of the string (excluding '\0'). + */ + volume_key_size = volume->key[0] + 1; + volume_key = volume->key + 1; + + /* Cookie key is binary data, which is netfs specific. */ + cookie_key_size = cookie->key_len; + cookie_key = fscache_get_key(cookie); + + if (!(object->cookie->advice & FSCACHE_ADV_WANT_CACHE_SIZE)) { + pr_err("WANT_CACHE_SIZE is needed for on-demand mode\n"); + return -EINVAL; + } + + load->volume_key_size = volume_key_size; + load->cookie_key_size = cookie_key_size; + memcpy(load->data, volume_key, volume_key_size); + memcpy(load->data + volume_key_size, cookie_key, cookie_key_size); + + return 0; +} + +static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, + void *private) +{ + struct cachefiles_object *object = req->object; + int object_id = object->ondemand_id; + + /* + * It's possible that object id is still 0 if the cookie looking up + * phase failed before OPEN request has ever been sent. Also avoid + * sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means + * anon_fd has already been closed. + */ + if (object_id <= 0) + return -ENOENT; + + req->msg.object_id = object_id; + trace_cachefiles_ondemand_close(object, &req->msg); + return 0; +} + +struct cachefiles_read_ctx { + loff_t off; + size_t len; +}; + +static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req, + void *private) +{ + struct cachefiles_object *object = req->object; + struct cachefiles_read *load = (void *)req->msg.data; + struct cachefiles_read_ctx *read_ctx = private; + int object_id = object->ondemand_id; + + /* Stop enqueuing requests when daemon has closed anon_fd. */ + if (object_id <= 0) { + WARN_ON_ONCE(object_id == 0); + pr_info_once("READ: anonymous fd closed prematurely.\n"); + return -EIO; + } + + req->msg.object_id = object_id; + load->off = read_ctx->off; + load->len = read_ctx->len; + trace_cachefiles_ondemand_read(object, &req->msg, load); + return 0; +} + +int cachefiles_ondemand_init_object(struct cachefiles_object *object) +{ + struct fscache_cookie *cookie = object->cookie; + struct fscache_volume *volume = object->volume->vcookie; + size_t volume_key_size, cookie_key_size, data_len; + + /* + * CacheFiles will firstly check the cache file under the root cache + * directory. If the coherency check failed, it will fallback to + * creating a new tmpfile as the cache file. Reuse the previously + * allocated object ID if any. + */ + if (object->ondemand_id > 0) + return 0; + + volume_key_size = volume->key[0] + 1; + cookie_key_size = cookie->key_len; + data_len = sizeof(struct cachefiles_open) + + volume_key_size + cookie_key_size; + + return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN, + data_len, cachefiles_ondemand_init_open_req, NULL); +} + +void cachefiles_ondemand_clean_object(struct cachefiles_object *object) +{ + cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0, + cachefiles_ondemand_init_close_req, NULL); +} + +int cachefiles_ondemand_read(struct cachefiles_object *object, + loff_t pos, size_t len) +{ + struct cachefiles_read_ctx read_ctx = {pos, len}; + + return cachefiles_ondemand_send_req(object, CACHEFILES_OP_READ, + sizeof(struct cachefiles_read), + cachefiles_ondemand_init_read_req, &read_ctx); +} diff --git a/fs/cifs/cached_dir.c b/fs/cifs/cached_dir.c new file mode 100644 index 0000000000..b401339f6e --- /dev/null +++ b/fs/cifs/cached_dir.c @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Functions to handle the cached directory entries + * + * Copyright (c) 2022, Ronnie Sahlberg + */ + +#include "cifsglob.h" +#include "cifsproto.h" +#include "cifs_debug.h" +#include "smb2proto.h" +#include "cached_dir.h" + +/* + * Open the and cache a directory handle. + * If error then *cfid is not initialized. + */ +int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, + const char *path, + struct cifs_sb_info *cifs_sb, + bool lookup_only, struct cached_fid **ret_cfid) +{ + struct cifs_ses *ses; + struct TCP_Server_Info *server; + struct cifs_open_parms oparms; + struct smb2_create_rsp *o_rsp = NULL; + struct smb2_query_info_rsp *qi_rsp = NULL; + int resp_buftype[2]; + struct smb_rqst rqst[2]; + struct kvec rsp_iov[2]; + struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; + struct kvec qi_iov[1]; + int rc, flags = 0; + __le16 utf16_path = 0; /* Null - since an open of top of share */ + u8 oplock = SMB2_OPLOCK_LEVEL_II; + struct cifs_fid *pfid; + struct dentry *dentry; + struct cached_fid *cfid; + + if (tcon == NULL || tcon->nohandlecache || + is_smb1_server(tcon->ses->server)) + return -EOPNOTSUPP; + + ses = tcon->ses; + server = ses->server; + + if (cifs_sb->root == NULL) + return -ENOENT; + + if (strlen(path)) + return -ENOENT; + + dentry = cifs_sb->root; + + cfid = tcon->cfid; + mutex_lock(&cfid->fid_mutex); + if (cfid->is_valid) { + cifs_dbg(FYI, "found a cached root file handle\n"); + *ret_cfid = cfid; + kref_get(&cfid->refcount); + mutex_unlock(&cfid->fid_mutex); + return 0; + } + + /* + * We do not hold the lock for the open because in case + * SMB2_open needs to reconnect, it will end up calling + * cifs_mark_open_files_invalid() which takes the lock again + * thus causing a deadlock + */ + mutex_unlock(&cfid->fid_mutex); + + if (lookup_only) + return -ENOENT; + + if (smb3_encryption_required(tcon)) + flags |= CIFS_TRANSFORM_REQ; + + if (!server->ops->new_lease_key) + return -EIO; + + pfid = &cfid->fid; + server->ops->new_lease_key(pfid); + + memset(rqst, 0, sizeof(rqst)); + resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; + memset(rsp_iov, 0, sizeof(rsp_iov)); + + /* Open */ + memset(&open_iov, 0, sizeof(open_iov)); + rqst[0].rq_iov = open_iov; + rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; + + oparms.tcon = tcon; + oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE); + oparms.desired_access = FILE_READ_ATTRIBUTES; + oparms.disposition = FILE_OPEN; + oparms.fid = pfid; + oparms.reconnect = false; + + rc = SMB2_open_init(tcon, server, + &rqst[0], &oplock, &oparms, &utf16_path); + if (rc) + goto oshr_free; + smb2_set_next_command(tcon, &rqst[0]); + + memset(&qi_iov, 0, sizeof(qi_iov)); + rqst[1].rq_iov = qi_iov; + rqst[1].rq_nvec = 1; + + rc = SMB2_query_info_init(tcon, server, + &rqst[1], COMPOUND_FID, + COMPOUND_FID, FILE_ALL_INFORMATION, + SMB2_O_INFO_FILE, 0, + sizeof(struct smb2_file_all_info) + + PATH_MAX * 2, 0, NULL); + if (rc) + goto oshr_free; + + smb2_set_related(&rqst[1]); + + rc = compound_send_recv(xid, ses, server, + flags, 2, rqst, + resp_buftype, rsp_iov); + mutex_lock(&cfid->fid_mutex); + + /* + * Now we need to check again as the cached root might have + * been successfully re-opened from a concurrent process + */ + + if (cfid->is_valid) { + /* work was already done */ + + /* stash fids for close() later */ + struct cifs_fid fid = { + .persistent_fid = pfid->persistent_fid, + .volatile_fid = pfid->volatile_fid, + }; + + /* + * caller expects this func to set the fid in cfid to valid + * cached root, so increment the refcount. + */ + kref_get(&cfid->refcount); + + mutex_unlock(&cfid->fid_mutex); + + if (rc == 0) { + /* close extra handle outside of crit sec */ + SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); + } + rc = 0; + goto oshr_free; + } + + /* Cached root is still invalid, continue normaly */ + + if (rc) { + if (rc == -EREMCHG) { + tcon->need_reconnect = true; + pr_warn_once("server share %s deleted\n", + tcon->treeName); + } + goto oshr_exit; + } + + atomic_inc(&tcon->num_remote_opens); + + o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; + oparms.fid->persistent_fid = o_rsp->PersistentFileId; + oparms.fid->volatile_fid = o_rsp->VolatileFileId; +#ifdef CONFIG_CIFS_DEBUG2 + oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId); +#endif /* CIFS_DEBUG2 */ + + cfid->tcon = tcon; + cfid->is_valid = true; + cfid->dentry = dentry; + dget(dentry); + kref_init(&cfid->refcount); + + /* BB TBD check to see if oplock level check can be removed below */ + if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) { + /* + * See commit 2f94a3125b87. Increment the refcount when we + * get a lease for root, release it if lease break occurs + */ + kref_get(&cfid->refcount); + cfid->has_lease = true; + smb2_parse_contexts(server, o_rsp, + &oparms.fid->epoch, + oparms.fid->lease_key, &oplock, + NULL, NULL); + } else + goto oshr_exit; + + qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; + if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) + goto oshr_exit; + if (!smb2_validate_and_copy_iov( + le16_to_cpu(qi_rsp->OutputBufferOffset), + sizeof(struct smb2_file_all_info), + &rsp_iov[1], sizeof(struct smb2_file_all_info), + (char *)&cfid->file_all_info)) + cfid->file_all_info_is_valid = true; + + cfid->time = jiffies; + +oshr_exit: + mutex_unlock(&cfid->fid_mutex); +oshr_free: + SMB2_open_free(&rqst[0]); + SMB2_query_info_free(&rqst[1]); + free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); + free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); + if (rc == 0) + *ret_cfid = cfid; + + return rc; +} + +int open_cached_dir_by_dentry(struct cifs_tcon *tcon, + struct dentry *dentry, + struct cached_fid **ret_cfid) +{ + struct cached_fid *cfid; + + cfid = tcon->cfid; + + mutex_lock(&cfid->fid_mutex); + if (cfid->dentry == dentry) { + cifs_dbg(FYI, "found a cached root file handle by dentry\n"); + *ret_cfid = cfid; + kref_get(&cfid->refcount); + mutex_unlock(&cfid->fid_mutex); + return 0; + } + mutex_unlock(&cfid->fid_mutex); + return -ENOENT; +} + +static void +smb2_close_cached_fid(struct kref *ref) +{ + struct cached_fid *cfid = container_of(ref, struct cached_fid, + refcount); + struct cached_dirent *dirent, *q; + + if (cfid->is_valid) { + cifs_dbg(FYI, "clear cached root file handle\n"); + SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, + cfid->fid.volatile_fid); + } + + /* + * We only check validity above to send SMB2_close, + * but we still need to invalidate these entries + * when this function is called + */ + cfid->is_valid = false; + cfid->file_all_info_is_valid = false; + cfid->has_lease = false; + if (cfid->dentry) { + dput(cfid->dentry); + cfid->dentry = NULL; + } + /* + * Delete all cached dirent names + */ + mutex_lock(&cfid->dirents.de_mutex); + list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) { + list_del(&dirent->entry); + kfree(dirent->name); + kfree(dirent); + } + cfid->dirents.is_valid = 0; + cfid->dirents.is_failed = 0; + cfid->dirents.ctx = NULL; + cfid->dirents.pos = 0; + mutex_unlock(&cfid->dirents.de_mutex); + +} + +void close_cached_dir(struct cached_fid *cfid) +{ + mutex_lock(&cfid->fid_mutex); + kref_put(&cfid->refcount, smb2_close_cached_fid); + mutex_unlock(&cfid->fid_mutex); +} + +void close_cached_dir_lease_locked(struct cached_fid *cfid) +{ + if (cfid->has_lease) { + cfid->has_lease = false; + kref_put(&cfid->refcount, smb2_close_cached_fid); + } +} + +void close_cached_dir_lease(struct cached_fid *cfid) +{ + mutex_lock(&cfid->fid_mutex); + close_cached_dir_lease_locked(cfid); + mutex_unlock(&cfid->fid_mutex); +} + +/* + * Called from cifs_kill_sb when we unmount a share + */ +void close_all_cached_dirs(struct cifs_sb_info *cifs_sb) +{ + struct rb_root *root = &cifs_sb->tlink_tree; + struct rb_node *node; + struct cached_fid *cfid; + struct cifs_tcon *tcon; + struct tcon_link *tlink; + + for (node = rb_first(root); node; node = rb_next(node)) { + tlink = rb_entry(node, struct tcon_link, tl_rbnode); + tcon = tlink_tcon(tlink); + if (IS_ERR(tcon)) + continue; + cfid = tcon->cfid; + mutex_lock(&cfid->fid_mutex); + if (cfid->dentry) { + dput(cfid->dentry); + cfid->dentry = NULL; + } + mutex_unlock(&cfid->fid_mutex); + } +} + +/* + * Invalidate and close all cached dirs when a TCON has been reset + * due to a session loss. + */ +void invalidate_all_cached_dirs(struct cifs_tcon *tcon) +{ + mutex_lock(&tcon->cfid->fid_mutex); + tcon->cfid->is_valid = false; + /* cached handle is not valid, so SMB2_CLOSE won't be sent below */ + close_cached_dir_lease_locked(tcon->cfid); + memset(&tcon->cfid->fid, 0, sizeof(struct cifs_fid)); + mutex_unlock(&tcon->cfid->fid_mutex); +} + +static void +smb2_cached_lease_break(struct work_struct *work) +{ + struct cached_fid *cfid = container_of(work, + struct cached_fid, lease_break); + + close_cached_dir_lease(cfid); +} + +int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]) +{ + if (tcon->cfid->is_valid && + !memcmp(lease_key, + tcon->cfid->fid.lease_key, + SMB2_LEASE_KEY_SIZE)) { + tcon->cfid->time = 0; + INIT_WORK(&tcon->cfid->lease_break, + smb2_cached_lease_break); + queue_work(cifsiod_wq, + &tcon->cfid->lease_break); + return true; + } + return false; +} + +struct cached_fid *init_cached_dir(void) +{ + struct cached_fid *cfid; + + cfid = kzalloc(sizeof(*cfid), GFP_KERNEL); + if (!cfid) + return NULL; + INIT_LIST_HEAD(&cfid->dirents.entries); + mutex_init(&cfid->dirents.de_mutex); + mutex_init(&cfid->fid_mutex); + return cfid; +} + +void free_cached_dir(struct cifs_tcon *tcon) +{ + kfree(tcon->cfid); +} diff --git a/fs/cifs/cached_dir.h b/fs/cifs/cached_dir.h new file mode 100644 index 0000000000..bd262dc8b1 --- /dev/null +++ b/fs/cifs/cached_dir.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Functions to handle the cached directory entries + * + * Copyright (c) 2022, Ronnie Sahlberg + */ + +#ifndef _CACHED_DIR_H +#define _CACHED_DIR_H + + +struct cached_dirent { + struct list_head entry; + char *name; + int namelen; + loff_t pos; + + struct cifs_fattr fattr; +}; + +struct cached_dirents { + bool is_valid:1; + bool is_failed:1; + struct dir_context *ctx; /* + * Only used to make sure we only take entries + * from a single context. Never dereferenced. + */ + struct mutex de_mutex; + int pos; /* Expected ctx->pos */ + struct list_head entries; +}; + +struct cached_fid { + bool is_valid:1; /* Do we have a useable root fid */ + bool file_all_info_is_valid:1; + bool has_lease:1; + unsigned long time; /* jiffies of when lease was taken */ + struct kref refcount; + struct cifs_fid fid; + struct mutex fid_mutex; + struct cifs_tcon *tcon; + struct dentry *dentry; + struct work_struct lease_break; + struct smb2_file_all_info file_all_info; + struct cached_dirents dirents; +}; + +extern struct cached_fid *init_cached_dir(void); +extern void free_cached_dir(struct cifs_tcon *tcon); +extern int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, + const char *path, + struct cifs_sb_info *cifs_sb, + bool lookup_only, struct cached_fid **cfid); +extern int open_cached_dir_by_dentry(struct cifs_tcon *tcon, + struct dentry *dentry, + struct cached_fid **cfid); +extern void close_cached_dir(struct cached_fid *cfid); +extern void close_cached_dir_lease(struct cached_fid *cfid); +extern void close_cached_dir_lease_locked(struct cached_fid *cfid); +extern void close_all_cached_dirs(struct cifs_sb_info *cifs_sb); +extern void invalidate_all_cached_dirs(struct cifs_tcon *tcon); +extern int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]); + +#endif /* _CACHED_DIR_H */ diff --git a/fs/efivarfs/vars.c b/fs/efivarfs/vars.c new file mode 100644 index 0000000000..a0ef63cfce --- /dev/null +++ b/fs/efivarfs/vars.c @@ -0,0 +1,738 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Originally from efivars.c + * + * Copyright (C) 2001,2003,2004 Dell + * Copyright (C) 2004 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" + +MODULE_IMPORT_NS(EFIVAR); + +static bool +validate_device_path(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) +{ + struct efi_generic_dev_path *node; + int offset = 0; + + node = (struct efi_generic_dev_path *)buffer; + + if (len < sizeof(*node)) + return false; + + while (offset <= len - sizeof(*node) && + node->length >= sizeof(*node) && + node->length <= len - offset) { + offset += node->length; + + if ((node->type == EFI_DEV_END_PATH || + node->type == EFI_DEV_END_PATH2) && + node->sub_type == EFI_DEV_END_ENTIRE) + return true; + + node = (struct efi_generic_dev_path *)(buffer + offset); + } + + /* + * If we're here then either node->length pointed past the end + * of the buffer or we reached the end of the buffer without + * finding a device path end node. + */ + return false; +} + +static bool +validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) +{ + /* An array of 16-bit integers */ + if ((len % 2) != 0) + return false; + + return true; +} + +static bool +validate_load_option(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) +{ + u16 filepathlength; + int i, desclength = 0, namelen; + + namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN); + + /* Either "Boot" or "Driver" followed by four digits of hex */ + for (i = match; i < match+4; i++) { + if (var_name[i] > 127 || + hex_to_bin(var_name[i] & 0xff) < 0) + return true; + } + + /* Reject it if there's 4 digits of hex and then further content */ + if (namelen > match + 4) + return false; + + /* A valid entry must be at least 8 bytes */ + if (len < 8) + return false; + + filepathlength = buffer[4] | buffer[5] << 8; + + /* + * There's no stored length for the description, so it has to be + * found by hand + */ + desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; + + /* Each boot entry must have a descriptor */ + if (!desclength) + return false; + + /* + * If the sum of the length of the description, the claimed filepath + * length and the original header are greater than the length of the + * variable, it's malformed + */ + if ((desclength + filepathlength + 6) > len) + return false; + + /* + * And, finally, check the filepath + */ + return validate_device_path(var_name, match, buffer + desclength + 6, + filepathlength); +} + +static bool +validate_uint16(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) +{ + /* A single 16-bit integer */ + if (len != 2) + return false; + + return true; +} + +static bool +validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) +{ + int i; + + for (i = 0; i < len; i++) { + if (buffer[i] > 127) + return false; + + if (buffer[i] == 0) + return true; + } + + return false; +} + +struct variable_validate { + efi_guid_t vendor; + char *name; + bool (*validate)(efi_char16_t *var_name, int match, u8 *data, + unsigned long len); +}; + +/* + * This is the list of variables we need to validate, as well as the + * whitelist for what we think is safe not to default to immutable. + * + * If it has a validate() method that's not NULL, it'll go into the + * validation routine. If not, it is assumed valid, but still used for + * whitelisting. + * + * Note that it's sorted by {vendor,name}, but globbed names must come after + * any other name with the same prefix. + */ +static const struct variable_validate variable_validate[] = { + { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 }, + { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order }, + { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option }, + { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order }, + { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option }, + { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path }, + { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path }, + { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path }, + { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path }, + { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path }, + { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path }, + { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string }, + { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL }, + { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string }, + { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 }, + { LINUX_EFI_CRASH_GUID, "*", NULL }, + { NULL_GUID, "", NULL }, +}; + +/* + * Check if @var_name matches the pattern given in @match_name. + * + * @var_name: an array of @len non-NUL characters. + * @match_name: a NUL-terminated pattern string, optionally ending in "*". A + * final "*" character matches any trailing characters @var_name, + * including the case when there are none left in @var_name. + * @match: on output, the number of non-wildcard characters in @match_name + * that @var_name matches, regardless of the return value. + * @return: whether @var_name fully matches @match_name. + */ +static bool +variable_matches(const char *var_name, size_t len, const char *match_name, + int *match) +{ + for (*match = 0; ; (*match)++) { + char c = match_name[*match]; + + switch (c) { + case '*': + /* Wildcard in @match_name means we've matched. */ + return true; + + case '\0': + /* @match_name has ended. Has @var_name too? */ + return (*match == len); + + default: + /* + * We've reached a non-wildcard char in @match_name. + * Continue only if there's an identical character in + * @var_name. + */ + if (*match < len && c == var_name[*match]) + continue; + return false; + } + } +} + +bool +efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, + unsigned long data_size) +{ + int i; + unsigned long utf8_size; + u8 *utf8_name; + + utf8_size = ucs2_utf8size(var_name); + utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL); + if (!utf8_name) + return false; + + ucs2_as_utf8(utf8_name, var_name, utf8_size); + utf8_name[utf8_size] = '\0'; + + for (i = 0; variable_validate[i].name[0] != '\0'; i++) { + const char *name = variable_validate[i].name; + int match = 0; + + if (efi_guidcmp(vendor, variable_validate[i].vendor)) + continue; + + if (variable_matches(utf8_name, utf8_size+1, name, &match)) { + if (variable_validate[i].validate == NULL) + break; + kfree(utf8_name); + return variable_validate[i].validate(var_name, match, + data, data_size); + } + } + kfree(utf8_name); + return true; +} + +bool +efivar_variable_is_removable(efi_guid_t vendor, const char *var_name, + size_t len) +{ + int i; + bool found = false; + int match = 0; + + /* + * Check if our variable is in the validated variables list + */ + for (i = 0; variable_validate[i].name[0] != '\0'; i++) { + if (efi_guidcmp(variable_validate[i].vendor, vendor)) + continue; + + if (variable_matches(var_name, len, + variable_validate[i].name, &match)) { + found = true; + break; + } + } + + /* + * If it's in our list, it is removable. + */ + return found; +} + +static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor, + struct list_head *head) +{ + struct efivar_entry *entry, *n; + unsigned long strsize1, strsize2; + bool found = false; + + strsize1 = ucs2_strsize(variable_name, 1024); + list_for_each_entry_safe(entry, n, head, list) { + strsize2 = ucs2_strsize(entry->var.VariableName, 1024); + if (strsize1 == strsize2 && + !memcmp(variable_name, &(entry->var.VariableName), + strsize2) && + !efi_guidcmp(entry->var.VendorGuid, + *vendor)) { + found = true; + break; + } + } + return found; +} + +/* + * Returns the size of variable_name, in bytes, including the + * terminating NULL character, or variable_name_size if no NULL + * character is found among the first variable_name_size bytes. + */ +static unsigned long var_name_strnsize(efi_char16_t *variable_name, + unsigned long variable_name_size) +{ + unsigned long len; + efi_char16_t c; + + /* + * The variable name is, by definition, a NULL-terminated + * string, so make absolutely sure that variable_name_size is + * the value we expect it to be. If not, return the real size. + */ + for (len = 2; len <= variable_name_size; len += sizeof(c)) { + c = variable_name[(len / sizeof(c)) - 1]; + if (!c) + break; + } + + return min(len, variable_name_size); +} + +/* + * Print a warning when duplicate EFI variables are encountered and + * disable the sysfs workqueue since the firmware is buggy. + */ +static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid, + unsigned long len16) +{ + size_t i, len8 = len16 / sizeof(efi_char16_t); + char *str8; + + str8 = kzalloc(len8, GFP_KERNEL); + if (!str8) + return; + + for (i = 0; i < len8; i++) + str8[i] = str16[i]; + + printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n", + str8, vendor_guid); + kfree(str8); +} + +/** + * efivar_init - build the initial list of EFI variables + * @func: callback function to invoke for every variable + * @data: function-specific data to pass to @func + * @duplicates: error if we encounter duplicates on @head? + * @head: initialised head of variable list + * + * Get every EFI variable from the firmware and invoke @func. @func + * should call efivar_entry_add() to build the list of variables. + * + * Returns 0 on success, or a kernel error code on failure. + */ +int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), + void *data, bool duplicates, struct list_head *head) +{ + unsigned long variable_name_size = 1024; + efi_char16_t *variable_name; + efi_status_t status; + efi_guid_t vendor_guid; + int err = 0; + + variable_name = kzalloc(variable_name_size, GFP_KERNEL); + if (!variable_name) { + printk(KERN_ERR "efivars: Memory allocation failed.\n"); + return -ENOMEM; + } + + err = efivar_lock(); + if (err) + goto free; + + /* + * Per EFI spec, the maximum storage allocated for both + * the variable name and variable data is 1024 bytes. + */ + + do { + variable_name_size = 1024; + + status = efivar_get_next_variable(&variable_name_size, + variable_name, + &vendor_guid); + switch (status) { + case EFI_SUCCESS: + variable_name_size = var_name_strnsize(variable_name, + variable_name_size); + + /* + * Some firmware implementations return the + * same variable name on multiple calls to + * get_next_variable(). Terminate the loop + * immediately as there is no guarantee that + * we'll ever see a different variable name, + * and may end up looping here forever. + */ + if (duplicates && + variable_is_present(variable_name, &vendor_guid, + head)) { + dup_variable_bug(variable_name, &vendor_guid, + variable_name_size); + status = EFI_NOT_FOUND; + } else { + err = func(variable_name, vendor_guid, + variable_name_size, data); + if (err) + status = EFI_NOT_FOUND; + } + break; + case EFI_UNSUPPORTED: + err = -EOPNOTSUPP; + status = EFI_NOT_FOUND; + break; + case EFI_NOT_FOUND: + break; + default: + printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n", + status); + status = EFI_NOT_FOUND; + break; + } + + } while (status != EFI_NOT_FOUND); + + efivar_unlock(); +free: + kfree(variable_name); + + return err; +} + +/** + * efivar_entry_add - add entry to variable list + * @entry: entry to add to list + * @head: list head + * + * Returns 0 on success, or a kernel error code on failure. + */ +int efivar_entry_add(struct efivar_entry *entry, struct list_head *head) +{ + int err; + + err = efivar_lock(); + if (err) + return err; + list_add(&entry->list, head); + efivar_unlock(); + + return 0; +} + +/** + * __efivar_entry_add - add entry to variable list + * @entry: entry to add to list + * @head: list head + */ +void __efivar_entry_add(struct efivar_entry *entry, struct list_head *head) +{ + list_add(&entry->list, head); +} + +/** + * efivar_entry_remove - remove entry from variable list + * @entry: entry to remove from list + * + * Returns 0 on success, or a kernel error code on failure. + */ +void efivar_entry_remove(struct efivar_entry *entry) +{ + list_del(&entry->list); +} + +/* + * efivar_entry_list_del_unlock - remove entry from variable list + * @entry: entry to remove + * + * Remove @entry from the variable list and release the list lock. + * + * NOTE: slightly weird locking semantics here - we expect to be + * called with the efivars lock already held, and we release it before + * returning. This is because this function is usually called after + * set_variable() while the lock is still held. + */ +static void efivar_entry_list_del_unlock(struct efivar_entry *entry) +{ + list_del(&entry->list); + efivar_unlock(); +} + +/** + * efivar_entry_delete - delete variable and remove entry from list + * @entry: entry containing variable to delete + * + * Delete the variable from the firmware and remove @entry from the + * variable list. It is the caller's responsibility to free @entry + * once we return. + * + * Returns 0 on success, -EINTR if we can't grab the semaphore, + * converted EFI status code if set_variable() fails. + */ +int efivar_entry_delete(struct efivar_entry *entry) +{ + efi_status_t status; + int err; + + err = efivar_lock(); + if (err) + return err; + + status = efivar_set_variable_locked(entry->var.VariableName, + &entry->var.VendorGuid, + 0, 0, NULL, false); + if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND)) { + efivar_unlock(); + return efi_status_to_err(status); + } + + efivar_entry_list_del_unlock(entry); + return 0; +} + +/** + * efivar_entry_size - obtain the size of a variable + * @entry: entry for this variable + * @size: location to store the variable's size + */ +int efivar_entry_size(struct efivar_entry *entry, unsigned long *size) +{ + efi_status_t status; + int err; + + *size = 0; + + err = efivar_lock(); + if (err) + return err; + + status = efivar_get_variable(entry->var.VariableName, + &entry->var.VendorGuid, NULL, size, NULL); + efivar_unlock(); + + if (status != EFI_BUFFER_TOO_SMALL) + return efi_status_to_err(status); + + return 0; +} + +/** + * __efivar_entry_get - call get_variable() + * @entry: read data for this variable + * @attributes: variable attributes + * @size: size of @data buffer + * @data: buffer to store variable data + * + * The caller MUST call efivar_entry_iter_begin() and + * efivar_entry_iter_end() before and after the invocation of this + * function, respectively. + */ +int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes, + unsigned long *size, void *data) +{ + efi_status_t status; + + status = efivar_get_variable(entry->var.VariableName, + &entry->var.VendorGuid, + attributes, size, data); + + return efi_status_to_err(status); +} + +/** + * efivar_entry_get - call get_variable() + * @entry: read data for this variable + * @attributes: variable attributes + * @size: size of @data buffer + * @data: buffer to store variable data + */ +int efivar_entry_get(struct efivar_entry *entry, u32 *attributes, + unsigned long *size, void *data) +{ + int err; + + err = efivar_lock(); + if (err) + return err; + err = __efivar_entry_get(entry, attributes, size, data); + efivar_unlock(); + + return 0; +} + +/** + * efivar_entry_set_get_size - call set_variable() and get new size (atomic) + * @entry: entry containing variable to set and get + * @attributes: attributes of variable to be written + * @size: size of data buffer + * @data: buffer containing data to write + * @set: did the set_variable() call succeed? + * + * This is a pretty special (complex) function. See efivarfs_file_write(). + * + * Atomically call set_variable() for @entry and if the call is + * successful, return the new size of the variable from get_variable() + * in @size. The success of set_variable() is indicated by @set. + * + * Returns 0 on success, -EINVAL if the variable data is invalid, + * -ENOSPC if the firmware does not have enough available space, or a + * converted EFI status code if either of set_variable() or + * get_variable() fail. + * + * If the EFI variable does not exist when calling set_variable() + * (EFI_NOT_FOUND), @entry is removed from the variable list. + */ +int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, + unsigned long *size, void *data, bool *set) +{ + efi_char16_t *name = entry->var.VariableName; + efi_guid_t *vendor = &entry->var.VendorGuid; + efi_status_t status; + int err; + + *set = false; + + if (efivar_validate(*vendor, name, data, *size) == false) + return -EINVAL; + + /* + * The lock here protects the get_variable call, the conditional + * set_variable call, and removal of the variable from the efivars + * list (in the case of an authenticated delete). + */ + err = efivar_lock(); + if (err) + return err; + + /* + * Ensure that the available space hasn't shrunk below the safe level + */ + status = check_var_size(attributes, *size + ucs2_strsize(name, 1024)); + if (status != EFI_SUCCESS) { + if (status != EFI_UNSUPPORTED) { + err = efi_status_to_err(status); + goto out; + } + + if (*size > 65536) { + err = -ENOSPC; + goto out; + } + } + + status = efivar_set_variable_locked(name, vendor, attributes, *size, + data, false); + if (status != EFI_SUCCESS) { + err = efi_status_to_err(status); + goto out; + } + + *set = true; + + /* + * Writing to the variable may have caused a change in size (which + * could either be an append or an overwrite), or the variable to be + * deleted. Perform a GetVariable() so we can tell what actually + * happened. + */ + *size = 0; + status = efivar_get_variable(entry->var.VariableName, + &entry->var.VendorGuid, + NULL, size, NULL); + + if (status == EFI_NOT_FOUND) + efivar_entry_list_del_unlock(entry); + else + efivar_unlock(); + + if (status && status != EFI_BUFFER_TOO_SMALL) + return efi_status_to_err(status); + + return 0; + +out: + efivar_unlock(); + return err; + +} + +/** + * efivar_entry_iter - iterate over variable list + * @func: callback function + * @head: head of variable list + * @data: function-specific data to pass to callback + * + * Iterate over the list of EFI variables and call @func with every + * entry on the list. It is safe for @func to remove entries in the + * list via efivar_entry_delete() while iterating. + * + * Some notes for the callback function: + * - a non-zero return value indicates an error and terminates the loop + * - @func is called from atomic context + */ +int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), + struct list_head *head, void *data) +{ + struct efivar_entry *entry, *n; + int err = 0; + + err = efivar_lock(); + if (err) + return err; + + list_for_each_entry_safe(entry, n, head, list) { + err = func(entry, data); + if (err) + break; + } + efivar_unlock(); + + return err; +} diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c new file mode 100644 index 0000000000..8e01d89c33 --- /dev/null +++ b/fs/erofs/fscache.c @@ -0,0 +1,520 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2022, Alibaba Cloud + */ +#include +#include "internal.h" + +static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space *mapping, + loff_t start, size_t len) +{ + struct netfs_io_request *rreq; + + rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL); + if (!rreq) + return ERR_PTR(-ENOMEM); + + rreq->start = start; + rreq->len = len; + rreq->mapping = mapping; + rreq->inode = mapping->host; + INIT_LIST_HEAD(&rreq->subrequests); + refcount_set(&rreq->ref, 1); + return rreq; +} + +static void erofs_fscache_put_request(struct netfs_io_request *rreq) +{ + if (!refcount_dec_and_test(&rreq->ref)) + return; + if (rreq->cache_resources.ops) + rreq->cache_resources.ops->end_operation(&rreq->cache_resources); + kfree(rreq); +} + +static void erofs_fscache_put_subrequest(struct netfs_io_subrequest *subreq) +{ + if (!refcount_dec_and_test(&subreq->ref)) + return; + erofs_fscache_put_request(subreq->rreq); + kfree(subreq); +} + +static void erofs_fscache_clear_subrequests(struct netfs_io_request *rreq) +{ + struct netfs_io_subrequest *subreq; + + while (!list_empty(&rreq->subrequests)) { + subreq = list_first_entry(&rreq->subrequests, + struct netfs_io_subrequest, rreq_link); + list_del(&subreq->rreq_link); + erofs_fscache_put_subrequest(subreq); + } +} + +static void erofs_fscache_rreq_unlock_folios(struct netfs_io_request *rreq) +{ + struct netfs_io_subrequest *subreq; + struct folio *folio; + unsigned int iopos = 0; + pgoff_t start_page = rreq->start / PAGE_SIZE; + pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1; + bool subreq_failed = false; + + XA_STATE(xas, &rreq->mapping->i_pages, start_page); + + subreq = list_first_entry(&rreq->subrequests, + struct netfs_io_subrequest, rreq_link); + subreq_failed = (subreq->error < 0); + + rcu_read_lock(); + xas_for_each(&xas, folio, last_page) { + unsigned int pgpos = + (folio_index(folio) - start_page) * PAGE_SIZE; + unsigned int pgend = pgpos + folio_size(folio); + bool pg_failed = false; + + for (;;) { + if (!subreq) { + pg_failed = true; + break; + } + + pg_failed |= subreq_failed; + if (pgend < iopos + subreq->len) + break; + + iopos += subreq->len; + if (!list_is_last(&subreq->rreq_link, + &rreq->subrequests)) { + subreq = list_next_entry(subreq, rreq_link); + subreq_failed = (subreq->error < 0); + } else { + subreq = NULL; + subreq_failed = false; + } + if (pgend == iopos) + break; + } + + if (!pg_failed) + folio_mark_uptodate(folio); + + folio_unlock(folio); + } + rcu_read_unlock(); +} + +static void erofs_fscache_rreq_complete(struct netfs_io_request *rreq) +{ + erofs_fscache_rreq_unlock_folios(rreq); + erofs_fscache_clear_subrequests(rreq); + erofs_fscache_put_request(rreq); +} + +static void erofc_fscache_subreq_complete(void *priv, + ssize_t transferred_or_error, bool was_async) +{ + struct netfs_io_subrequest *subreq = priv; + struct netfs_io_request *rreq = subreq->rreq; + + if (IS_ERR_VALUE(transferred_or_error)) + subreq->error = transferred_or_error; + + if (atomic_dec_and_test(&rreq->nr_outstanding)) + erofs_fscache_rreq_complete(rreq); + + erofs_fscache_put_subrequest(subreq); +} + +/* + * Read data from fscache and fill the read data into page cache described by + * @rreq, which shall be both aligned with PAGE_SIZE. @pstart describes + * the start physical address in the cache file. + */ +static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie, + struct netfs_io_request *rreq, loff_t pstart) +{ + enum netfs_io_source source; + struct super_block *sb = rreq->mapping->host->i_sb; + struct netfs_io_subrequest *subreq; + struct netfs_cache_resources *cres = &rreq->cache_resources; + struct iov_iter iter; + loff_t start = rreq->start; + size_t len = rreq->len; + size_t done = 0; + int ret; + + atomic_set(&rreq->nr_outstanding, 1); + + ret = fscache_begin_read_operation(cres, cookie); + if (ret) + goto out; + + while (done < len) { + subreq = kzalloc(sizeof(struct netfs_io_subrequest), + GFP_KERNEL); + if (subreq) { + INIT_LIST_HEAD(&subreq->rreq_link); + refcount_set(&subreq->ref, 2); + subreq->rreq = rreq; + refcount_inc(&rreq->ref); + } else { + ret = -ENOMEM; + goto out; + } + + subreq->start = pstart + done; + subreq->len = len - done; + subreq->flags = 1 << NETFS_SREQ_ONDEMAND; + + list_add_tail(&subreq->rreq_link, &rreq->subrequests); + + source = cres->ops->prepare_read(subreq, LLONG_MAX); + if (WARN_ON(subreq->len == 0)) + source = NETFS_INVALID_READ; + if (source != NETFS_READ_FROM_CACHE) { + erofs_err(sb, "failed to fscache prepare_read (source %d)", + source); + ret = -EIO; + subreq->error = ret; + erofs_fscache_put_subrequest(subreq); + goto out; + } + + atomic_inc(&rreq->nr_outstanding); + + iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, + start + done, subreq->len); + + ret = fscache_read(cres, subreq->start, &iter, + NETFS_READ_HOLE_FAIL, + erofc_fscache_subreq_complete, subreq); + if (ret == -EIOCBQUEUED) + ret = 0; + if (ret) { + erofs_err(sb, "failed to fscache_read (ret %d)", ret); + goto out; + } + + done += subreq->len; + } +out: + if (atomic_dec_and_test(&rreq->nr_outstanding)) + erofs_fscache_rreq_complete(rreq); + + return ret; +} + +static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio) +{ + int ret; + struct super_block *sb = folio_mapping(folio)->host->i_sb; + struct netfs_io_request *rreq; + struct erofs_map_dev mdev = { + .m_deviceid = 0, + .m_pa = folio_pos(folio), + }; + + ret = erofs_map_dev(sb, &mdev); + if (ret) + goto out; + + rreq = erofs_fscache_alloc_request(folio_mapping(folio), + folio_pos(folio), folio_size(folio)); + if (IS_ERR(rreq)) + goto out; + + return erofs_fscache_read_folios_async(mdev.m_fscache->cookie, + rreq, mdev.m_pa); +out: + folio_unlock(folio); + return ret; +} + +static int erofs_fscache_read_folio_inline(struct folio *folio, + struct erofs_map_blocks *map) +{ + struct super_block *sb = folio_mapping(folio)->host->i_sb; + struct erofs_buf buf = __EROFS_BUF_INITIALIZER; + erofs_blk_t blknr; + size_t offset, len; + void *src, *dst; + + /* For tail packing layout, the offset may be non-zero. */ + offset = erofs_blkoff(map->m_pa); + blknr = erofs_blknr(map->m_pa); + len = map->m_llen; + + src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP); + if (IS_ERR(src)) + return PTR_ERR(src); + + dst = kmap_local_folio(folio, 0); + memcpy(dst, src + offset, len); + memset(dst + len, 0, PAGE_SIZE - len); + kunmap_local(dst); + + erofs_put_metabuf(&buf); + return 0; +} + +static int erofs_fscache_read_folio(struct file *file, struct folio *folio) +{ + struct inode *inode = folio_mapping(folio)->host; + struct super_block *sb = inode->i_sb; + struct erofs_map_blocks map; + struct erofs_map_dev mdev; + struct netfs_io_request *rreq; + erofs_off_t pos; + loff_t pstart; + int ret; + + DBG_BUGON(folio_size(folio) != EROFS_BLKSIZ); + + pos = folio_pos(folio); + map.m_la = pos; + + ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); + if (ret) + goto out_unlock; + + if (!(map.m_flags & EROFS_MAP_MAPPED)) { + folio_zero_range(folio, 0, folio_size(folio)); + goto out_uptodate; + } + + if (map.m_flags & EROFS_MAP_META) { + ret = erofs_fscache_read_folio_inline(folio, &map); + goto out_uptodate; + } + + mdev = (struct erofs_map_dev) { + .m_deviceid = map.m_deviceid, + .m_pa = map.m_pa, + }; + + ret = erofs_map_dev(sb, &mdev); + if (ret) + goto out_unlock; + + + rreq = erofs_fscache_alloc_request(folio_mapping(folio), + folio_pos(folio), folio_size(folio)); + if (IS_ERR(rreq)) + goto out_unlock; + + pstart = mdev.m_pa + (pos - map.m_la); + return erofs_fscache_read_folios_async(mdev.m_fscache->cookie, + rreq, pstart); + +out_uptodate: + if (!ret) + folio_mark_uptodate(folio); +out_unlock: + folio_unlock(folio); + return ret; +} + +static void erofs_fscache_advance_folios(struct readahead_control *rac, + size_t len, bool unlock) +{ + while (len) { + struct folio *folio = readahead_folio(rac); + len -= folio_size(folio); + if (unlock) { + folio_mark_uptodate(folio); + folio_unlock(folio); + } + } +} + +static void erofs_fscache_readahead(struct readahead_control *rac) +{ + struct inode *inode = rac->mapping->host; + struct super_block *sb = inode->i_sb; + size_t len, count, done = 0; + erofs_off_t pos; + loff_t start, offset; + int ret; + + if (!readahead_count(rac)) + return; + + start = readahead_pos(rac); + len = readahead_length(rac); + + do { + struct erofs_map_blocks map; + struct erofs_map_dev mdev; + struct netfs_io_request *rreq; + + pos = start + done; + map.m_la = pos; + + ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); + if (ret) + return; + + offset = start + done; + count = min_t(size_t, map.m_llen - (pos - map.m_la), + len - done); + + if (!(map.m_flags & EROFS_MAP_MAPPED)) { + struct iov_iter iter; + + iov_iter_xarray(&iter, READ, &rac->mapping->i_pages, + offset, count); + iov_iter_zero(count, &iter); + + erofs_fscache_advance_folios(rac, count, true); + ret = count; + continue; + } + + if (map.m_flags & EROFS_MAP_META) { + struct folio *folio = readahead_folio(rac); + + ret = erofs_fscache_read_folio_inline(folio, &map); + if (!ret) { + folio_mark_uptodate(folio); + ret = folio_size(folio); + } + + folio_unlock(folio); + continue; + } + + mdev = (struct erofs_map_dev) { + .m_deviceid = map.m_deviceid, + .m_pa = map.m_pa, + }; + ret = erofs_map_dev(sb, &mdev); + if (ret) + return; + + rreq = erofs_fscache_alloc_request(rac->mapping, offset, count); + if (IS_ERR(rreq)) + return; + /* + * Drop the ref of folios here. Unlock them in + * rreq_unlock_folios() when rreq complete. + */ + erofs_fscache_advance_folios(rac, count, false); + ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie, + rreq, mdev.m_pa + (pos - map.m_la)); + if (!ret) + ret = count; + } while (ret > 0 && ((done += ret) < len)); +} + +static const struct address_space_operations erofs_fscache_meta_aops = { + .read_folio = erofs_fscache_meta_read_folio, +}; + +const struct address_space_operations erofs_fscache_access_aops = { + .read_folio = erofs_fscache_read_folio, + .readahead = erofs_fscache_readahead, +}; + +int erofs_fscache_register_cookie(struct super_block *sb, + struct erofs_fscache **fscache, + char *name, bool need_inode) +{ + struct fscache_volume *volume = EROFS_SB(sb)->volume; + struct erofs_fscache *ctx; + struct fscache_cookie *cookie; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE, + name, strlen(name), NULL, 0, 0); + if (!cookie) { + erofs_err(sb, "failed to get cookie for %s", name); + ret = -EINVAL; + goto err; + } + + fscache_use_cookie(cookie, false); + ctx->cookie = cookie; + + if (need_inode) { + struct inode *const inode = new_inode(sb); + + if (!inode) { + erofs_err(sb, "failed to get anon inode for %s", name); + ret = -ENOMEM; + goto err_cookie; + } + + set_nlink(inode, 1); + inode->i_size = OFFSET_MAX; + inode->i_mapping->a_ops = &erofs_fscache_meta_aops; + mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); + + ctx->inode = inode; + } + + *fscache = ctx; + return 0; + +err_cookie: + fscache_unuse_cookie(ctx->cookie, NULL, NULL); + fscache_relinquish_cookie(ctx->cookie, false); + ctx->cookie = NULL; +err: + kfree(ctx); + return ret; +} + +void erofs_fscache_unregister_cookie(struct erofs_fscache **fscache) +{ + struct erofs_fscache *ctx = *fscache; + + if (!ctx) + return; + + fscache_unuse_cookie(ctx->cookie, NULL, NULL); + fscache_relinquish_cookie(ctx->cookie, false); + ctx->cookie = NULL; + + iput(ctx->inode); + ctx->inode = NULL; + + kfree(ctx); + *fscache = NULL; +} + +int erofs_fscache_register_fs(struct super_block *sb) +{ + struct erofs_sb_info *sbi = EROFS_SB(sb); + struct fscache_volume *volume; + char *name; + int ret = 0; + + name = kasprintf(GFP_KERNEL, "erofs,%s", sbi->opt.fsid); + if (!name) + return -ENOMEM; + + volume = fscache_acquire_volume(name, NULL, NULL, 0); + if (IS_ERR_OR_NULL(volume)) { + erofs_err(sb, "failed to register volume for %s", name); + ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP; + volume = NULL; + } + + sbi->volume = volume; + kfree(name); + return ret; +} + +void erofs_fscache_unregister_fs(struct super_block *sb) +{ + struct erofs_sb_info *sbi = EROFS_SB(sb); + + fscache_relinquish_volume(sbi->volume, NULL, false); + sbi->volume = NULL; +} diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c new file mode 100644 index 0000000000..e20ac0654b --- /dev/null +++ b/fs/ext4/crypto.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include "ext4.h" +#include "xattr.h" +#include "ext4_jbd2.h" + +static void ext4_fname_from_fscrypt_name(struct ext4_filename *dst, + const struct fscrypt_name *src) +{ + memset(dst, 0, sizeof(*dst)); + + dst->usr_fname = src->usr_fname; + dst->disk_name = src->disk_name; + dst->hinfo.hash = src->hash; + dst->hinfo.minor_hash = src->minor_hash; + dst->crypto_buf = src->crypto_buf; +} + +int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname, + int lookup, struct ext4_filename *fname) +{ + struct fscrypt_name name; + int err; + + err = fscrypt_setup_filename(dir, iname, lookup, &name); + if (err) + return err; + + ext4_fname_from_fscrypt_name(fname, &name); + +#if IS_ENABLED(CONFIG_UNICODE) + err = ext4_fname_setup_ci_filename(dir, iname, fname); +#endif + return err; +} + +int ext4_fname_prepare_lookup(struct inode *dir, struct dentry *dentry, + struct ext4_filename *fname) +{ + struct fscrypt_name name; + int err; + + err = fscrypt_prepare_lookup(dir, dentry, &name); + if (err) + return err; + + ext4_fname_from_fscrypt_name(fname, &name); + +#if IS_ENABLED(CONFIG_UNICODE) + err = ext4_fname_setup_ci_filename(dir, &dentry->d_name, fname); +#endif + return err; +} + +void ext4_fname_free_filename(struct ext4_filename *fname) +{ + struct fscrypt_name name; + + name.crypto_buf = fname->crypto_buf; + fscrypt_free_filename(&name); + + fname->crypto_buf.name = NULL; + fname->usr_fname = NULL; + fname->disk_name.name = NULL; + +#if IS_ENABLED(CONFIG_UNICODE) + kfree(fname->cf_name.name); + fname->cf_name.name = NULL; +#endif +} + +static bool uuid_is_zero(__u8 u[16]) +{ + int i; + + for (i = 0; i < 16; i++) + if (u[i]) + return false; + return true; +} + +int ext4_ioctl_get_encryption_pwsalt(struct file *filp, void __user *arg) +{ + struct super_block *sb = file_inode(filp)->i_sb; + struct ext4_sb_info *sbi = EXT4_SB(sb); + int err, err2; + handle_t *handle; + + if (!ext4_has_feature_encrypt(sb)) + return -EOPNOTSUPP; + + if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) { + err = mnt_want_write_file(filp); + if (err) + return err; + handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); + if (IS_ERR(handle)) { + err = PTR_ERR(handle); + goto pwsalt_err_exit; + } + err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, + EXT4_JTR_NONE); + if (err) + goto pwsalt_err_journal; + lock_buffer(sbi->s_sbh); + generate_random_uuid(sbi->s_es->s_encrypt_pw_salt); + ext4_superblock_csum_set(sb); + unlock_buffer(sbi->s_sbh); + err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); +pwsalt_err_journal: + err2 = ext4_journal_stop(handle); + if (err2 && !err) + err = err2; +pwsalt_err_exit: + mnt_drop_write_file(filp); + if (err) + return err; + } + + if (copy_to_user(arg, sbi->s_es->s_encrypt_pw_salt, 16)) + return -EFAULT; + return 0; +} + +static int ext4_get_context(struct inode *inode, void *ctx, size_t len) +{ + return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len); +} + +static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, + void *fs_data) +{ + handle_t *handle = fs_data; + int res, res2, credits, retries = 0; + + /* + * Encrypting the root directory is not allowed because e2fsck expects + * lost+found to exist and be unencrypted, and encrypting the root + * directory would imply encrypting the lost+found directory as well as + * the filename "lost+found" itself. + */ + if (inode->i_ino == EXT4_ROOT_INO) + return -EPERM; + + if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode))) + return -EINVAL; + + if (ext4_test_inode_flag(inode, EXT4_INODE_DAX)) + return -EOPNOTSUPP; + + res = ext4_convert_inline_data(inode); + if (res) + return res; + + /* + * If a journal handle was specified, then the encryption context is + * being set on a new inode via inheritance and is part of a larger + * transaction to create the inode. Otherwise the encryption context is + * being set on an existing inode in its own transaction. Only in the + * latter case should the "retry on ENOSPC" logic be used. + */ + + if (handle) { + res = ext4_xattr_set_handle(handle, inode, + EXT4_XATTR_INDEX_ENCRYPTION, + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, + ctx, len, 0); + if (!res) { + ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); + ext4_clear_inode_state(inode, + EXT4_STATE_MAY_INLINE_DATA); + /* + * Update inode->i_flags - S_ENCRYPTED will be enabled, + * S_DAX may be disabled + */ + ext4_set_inode_flags(inode, false); + } + return res; + } + + res = dquot_initialize(inode); + if (res) + return res; +retry: + res = ext4_xattr_set_credits(inode, len, false /* is_create */, + &credits); + if (res) + return res; + + handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION, + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, + ctx, len, 0); + if (!res) { + ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); + /* + * Update inode->i_flags - S_ENCRYPTED will be enabled, + * S_DAX may be disabled + */ + ext4_set_inode_flags(inode, false); + res = ext4_mark_inode_dirty(handle, inode); + if (res) + EXT4_ERROR_INODE(inode, "Failed to mark inode dirty"); + } + res2 = ext4_journal_stop(handle); + + if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) + goto retry; + if (!res) + res = res2; + return res; +} + +static const union fscrypt_policy *ext4_get_dummy_policy(struct super_block *sb) +{ + return EXT4_SB(sb)->s_dummy_enc_policy.policy; +} + +static bool ext4_has_stable_inodes(struct super_block *sb) +{ + return ext4_has_feature_stable_inodes(sb); +} + +static void ext4_get_ino_and_lblk_bits(struct super_block *sb, + int *ino_bits_ret, int *lblk_bits_ret) +{ + *ino_bits_ret = 8 * sizeof(EXT4_SB(sb)->s_es->s_inodes_count); + *lblk_bits_ret = 8 * sizeof(ext4_lblk_t); +} + +const struct fscrypt_operations ext4_cryptops = { + .key_prefix = "ext4:", + .get_context = ext4_get_context, + .set_context = ext4_set_context, + .get_dummy_policy = ext4_get_dummy_policy, + .empty_dir = ext4_empty_dir, + .has_stable_inodes = ext4_has_stable_inodes, + .get_ino_and_lblk_bits = ext4_get_ino_and_lblk_bits, +}; diff --git a/fs/xfs/xfs_attr_item.c b/fs/xfs/xfs_attr_item.c new file mode 100644 index 0000000000..5077a7ad56 --- /dev/null +++ b/fs/xfs/xfs_attr_item.c @@ -0,0 +1,888 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2022 Oracle. All Rights Reserved. + * Author: Allison Henderson + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_shared.h" +#include "xfs_mount.h" +#include "xfs_defer.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_bmap_btree.h" +#include "xfs_trans_priv.h" +#include "xfs_log.h" +#include "xfs_inode.h" +#include "xfs_da_format.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_attr_item.h" +#include "xfs_trace.h" +#include "xfs_trans_space.h" +#include "xfs_errortag.h" +#include "xfs_error.h" +#include "xfs_log_priv.h" +#include "xfs_log_recover.h" + +struct kmem_cache *xfs_attri_cache; +struct kmem_cache *xfs_attrd_cache; + +static const struct xfs_item_ops xfs_attri_item_ops; +static const struct xfs_item_ops xfs_attrd_item_ops; +static struct xfs_attrd_log_item *xfs_trans_get_attrd(struct xfs_trans *tp, + struct xfs_attri_log_item *attrip); + +static inline struct xfs_attri_log_item *ATTRI_ITEM(struct xfs_log_item *lip) +{ + return container_of(lip, struct xfs_attri_log_item, attri_item); +} + +/* + * Shared xattr name/value buffers for logged extended attribute operations + * + * When logging updates to extended attributes, we can create quite a few + * attribute log intent items for a single xattr update. To avoid cycling the + * memory allocator and memcpy overhead, the name (and value, for setxattr) + * are kept in a refcounted object that is shared across all related log items + * and the upper-level deferred work state structure. The shared buffer has + * a control structure, followed by the name, and then the value. + */ + +static inline struct xfs_attri_log_nameval * +xfs_attri_log_nameval_get( + struct xfs_attri_log_nameval *nv) +{ + if (!refcount_inc_not_zero(&nv->refcount)) + return NULL; + return nv; +} + +static inline void +xfs_attri_log_nameval_put( + struct xfs_attri_log_nameval *nv) +{ + if (!nv) + return; + if (refcount_dec_and_test(&nv->refcount)) + kvfree(nv); +} + +static inline struct xfs_attri_log_nameval * +xfs_attri_log_nameval_alloc( + const void *name, + unsigned int name_len, + const void *value, + unsigned int value_len) +{ + struct xfs_attri_log_nameval *nv; + + /* + * This could be over 64kB in length, so we have to use kvmalloc() for + * this. But kvmalloc() utterly sucks, so we use our own version. + */ + nv = xlog_kvmalloc(sizeof(struct xfs_attri_log_nameval) + + name_len + value_len); + if (!nv) + return nv; + + nv->name.i_addr = nv + 1; + nv->name.i_len = name_len; + nv->name.i_type = XLOG_REG_TYPE_ATTR_NAME; + memcpy(nv->name.i_addr, name, name_len); + + if (value_len) { + nv->value.i_addr = nv->name.i_addr + name_len; + nv->value.i_len = value_len; + memcpy(nv->value.i_addr, value, value_len); + } else { + nv->value.i_addr = NULL; + nv->value.i_len = 0; + } + nv->value.i_type = XLOG_REG_TYPE_ATTR_VALUE; + + refcount_set(&nv->refcount, 1); + return nv; +} + +STATIC void +xfs_attri_item_free( + struct xfs_attri_log_item *attrip) +{ + kmem_free(attrip->attri_item.li_lv_shadow); + xfs_attri_log_nameval_put(attrip->attri_nameval); + kmem_cache_free(xfs_attri_cache, attrip); +} + +/* + * Freeing the attrip requires that we remove it from the AIL if it has already + * been placed there. However, the ATTRI may not yet have been placed in the + * AIL when called by xfs_attri_release() from ATTRD processing due to the + * ordering of committed vs unpin operations in bulk insert operations. Hence + * the reference count to ensure only the last caller frees the ATTRI. + */ +STATIC void +xfs_attri_release( + struct xfs_attri_log_item *attrip) +{ + ASSERT(atomic_read(&attrip->attri_refcount) > 0); + if (!atomic_dec_and_test(&attrip->attri_refcount)) + return; + + xfs_trans_ail_delete(&attrip->attri_item, 0); + xfs_attri_item_free(attrip); +} + +STATIC void +xfs_attri_item_size( + struct xfs_log_item *lip, + int *nvecs, + int *nbytes) +{ + struct xfs_attri_log_item *attrip = ATTRI_ITEM(lip); + struct xfs_attri_log_nameval *nv = attrip->attri_nameval; + + *nvecs += 2; + *nbytes += sizeof(struct xfs_attri_log_format) + + xlog_calc_iovec_len(nv->name.i_len); + + if (!nv->value.i_len) + return; + + *nvecs += 1; + *nbytes += xlog_calc_iovec_len(nv->value.i_len); +} + +/* + * This is called to fill in the log iovecs for the given attri log + * item. We use 1 iovec for the attri_format_item, 1 for the name, and + * another for the value if it is present + */ +STATIC void +xfs_attri_item_format( + struct xfs_log_item *lip, + struct xfs_log_vec *lv) +{ + struct xfs_attri_log_item *attrip = ATTRI_ITEM(lip); + struct xfs_log_iovec *vecp = NULL; + struct xfs_attri_log_nameval *nv = attrip->attri_nameval; + + attrip->attri_format.alfi_type = XFS_LI_ATTRI; + attrip->attri_format.alfi_size = 1; + + /* + * This size accounting must be done before copying the attrip into the + * iovec. If we do it after, the wrong size will be recorded to the log + * and we trip across assertion checks for bad region sizes later during + * the log recovery. + */ + + ASSERT(nv->name.i_len > 0); + attrip->attri_format.alfi_size++; + + if (nv->value.i_len > 0) + attrip->attri_format.alfi_size++; + + xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ATTRI_FORMAT, + &attrip->attri_format, + sizeof(struct xfs_attri_log_format)); + xlog_copy_from_iovec(lv, &vecp, &nv->name); + if (nv->value.i_len > 0) + xlog_copy_from_iovec(lv, &vecp, &nv->value); +} + +/* + * The unpin operation is the last place an ATTRI is manipulated in the log. It + * is either inserted in the AIL or aborted in the event of a log I/O error. In + * either case, the ATTRI transaction has been successfully committed to make + * it this far. Therefore, we expect whoever committed the ATTRI to either + * construct and commit the ATTRD or drop the ATTRD's reference in the event of + * error. Simply drop the log's ATTRI reference now that the log is done with + * it. + */ +STATIC void +xfs_attri_item_unpin( + struct xfs_log_item *lip, + int remove) +{ + xfs_attri_release(ATTRI_ITEM(lip)); +} + + +STATIC void +xfs_attri_item_release( + struct xfs_log_item *lip) +{ + xfs_attri_release(ATTRI_ITEM(lip)); +} + +/* + * Allocate and initialize an attri item. Caller may allocate an additional + * trailing buffer for name and value + */ +STATIC struct xfs_attri_log_item * +xfs_attri_init( + struct xfs_mount *mp, + struct xfs_attri_log_nameval *nv) +{ + struct xfs_attri_log_item *attrip; + + attrip = kmem_cache_zalloc(xfs_attri_cache, GFP_NOFS | __GFP_NOFAIL); + + /* + * Grab an extra reference to the name/value buffer for this log item. + * The caller retains its own reference! + */ + attrip->attri_nameval = xfs_attri_log_nameval_get(nv); + ASSERT(attrip->attri_nameval); + + xfs_log_item_init(mp, &attrip->attri_item, XFS_LI_ATTRI, + &xfs_attri_item_ops); + attrip->attri_format.alfi_id = (uintptr_t)(void *)attrip; + atomic_set(&attrip->attri_refcount, 2); + + return attrip; +} + +/* + * Copy an attr format buffer from the given buf, and into the destination attr + * format structure. + */ +STATIC int +xfs_attri_copy_format( + struct xfs_log_iovec *buf, + struct xfs_attri_log_format *dst_attr_fmt) +{ + struct xfs_attri_log_format *src_attr_fmt = buf->i_addr; + size_t len; + + len = sizeof(struct xfs_attri_log_format); + if (buf->i_len != len) { + XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); + return -EFSCORRUPTED; + } + + memcpy((char *)dst_attr_fmt, (char *)src_attr_fmt, len); + return 0; +} + +static inline struct xfs_attrd_log_item *ATTRD_ITEM(struct xfs_log_item *lip) +{ + return container_of(lip, struct xfs_attrd_log_item, attrd_item); +} + +STATIC void +xfs_attrd_item_free(struct xfs_attrd_log_item *attrdp) +{ + kmem_free(attrdp->attrd_item.li_lv_shadow); + kmem_cache_free(xfs_attrd_cache, attrdp); +} + +STATIC void +xfs_attrd_item_size( + struct xfs_log_item *lip, + int *nvecs, + int *nbytes) +{ + *nvecs += 1; + *nbytes += sizeof(struct xfs_attrd_log_format); +} + +/* + * This is called to fill in the log iovecs for the given attrd log item. We use + * only 1 iovec for the attrd_format, and we point that at the attr_log_format + * structure embedded in the attrd item. + */ +STATIC void +xfs_attrd_item_format( + struct xfs_log_item *lip, + struct xfs_log_vec *lv) +{ + struct xfs_attrd_log_item *attrdp = ATTRD_ITEM(lip); + struct xfs_log_iovec *vecp = NULL; + + attrdp->attrd_format.alfd_type = XFS_LI_ATTRD; + attrdp->attrd_format.alfd_size = 1; + + xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ATTRD_FORMAT, + &attrdp->attrd_format, + sizeof(struct xfs_attrd_log_format)); +} + +/* + * The ATTRD is either committed or aborted if the transaction is canceled. If + * the transaction is canceled, drop our reference to the ATTRI and free the + * ATTRD. + */ +STATIC void +xfs_attrd_item_release( + struct xfs_log_item *lip) +{ + struct xfs_attrd_log_item *attrdp = ATTRD_ITEM(lip); + + xfs_attri_release(attrdp->attrd_attrip); + xfs_attrd_item_free(attrdp); +} + +static struct xfs_log_item * +xfs_attrd_item_intent( + struct xfs_log_item *lip) +{ + return &ATTRD_ITEM(lip)->attrd_attrip->attri_item; +} + +/* + * Performs one step of an attribute update intent and marks the attrd item + * dirty.. An attr operation may be a set or a remove. Note that the + * transaction is marked dirty regardless of whether the operation succeeds or + * fails to support the ATTRI/ATTRD lifecycle rules. + */ +STATIC int +xfs_xattri_finish_update( + struct xfs_attr_intent *attr, + struct xfs_attrd_log_item *attrdp) +{ + struct xfs_da_args *args = attr->xattri_da_args; + int error; + + if (XFS_TEST_ERROR(false, args->dp->i_mount, XFS_ERRTAG_LARP)) { + error = -EIO; + goto out; + } + + error = xfs_attr_set_iter(attr); + if (!error && attr->xattri_dela_state != XFS_DAS_DONE) + error = -EAGAIN; +out: + /* + * Mark the transaction dirty, even on error. This ensures the + * transaction is aborted, which: + * + * 1.) releases the ATTRI and frees the ATTRD + * 2.) shuts down the filesystem + */ + args->trans->t_flags |= XFS_TRANS_DIRTY | XFS_TRANS_HAS_INTENT_DONE; + + /* + * attr intent/done items are null when logged attributes are disabled + */ + if (attrdp) + set_bit(XFS_LI_DIRTY, &attrdp->attrd_item.li_flags); + + return error; +} + +/* Log an attr to the intent item. */ +STATIC void +xfs_attr_log_item( + struct xfs_trans *tp, + struct xfs_attri_log_item *attrip, + const struct xfs_attr_intent *attr) +{ + struct xfs_attri_log_format *attrp; + + tp->t_flags |= XFS_TRANS_DIRTY; + set_bit(XFS_LI_DIRTY, &attrip->attri_item.li_flags); + + /* + * At this point the xfs_attr_intent has been constructed, and we've + * created the log intent. Fill in the attri log item and log format + * structure with fields from this xfs_attr_intent + */ + attrp = &attrip->attri_format; + attrp->alfi_ino = attr->xattri_da_args->dp->i_ino; + ASSERT(!(attr->xattri_op_flags & ~XFS_ATTRI_OP_FLAGS_TYPE_MASK)); + attrp->alfi_op_flags = attr->xattri_op_flags; + attrp->alfi_value_len = attr->xattri_nameval->value.i_len; + attrp->alfi_name_len = attr->xattri_nameval->name.i_len; + ASSERT(!(attr->xattri_da_args->attr_filter & ~XFS_ATTRI_FILTER_MASK)); + attrp->alfi_attr_filter = attr->xattri_da_args->attr_filter; +} + +/* Get an ATTRI. */ +static struct xfs_log_item * +xfs_attr_create_intent( + struct xfs_trans *tp, + struct list_head *items, + unsigned int count, + bool sort) +{ + struct xfs_mount *mp = tp->t_mountp; + struct xfs_attri_log_item *attrip; + struct xfs_attr_intent *attr; + struct xfs_da_args *args; + + ASSERT(count == 1); + + /* + * Each attr item only performs one attribute operation at a time, so + * this is a list of one + */ + attr = list_first_entry_or_null(items, struct xfs_attr_intent, + xattri_list); + args = attr->xattri_da_args; + + if (!(args->op_flags & XFS_DA_OP_LOGGED)) + return NULL; + + /* + * Create a buffer to store the attribute name and value. This buffer + * will be shared between the higher level deferred xattr work state + * and the lower level xattr log items. + */ + if (!attr->xattri_nameval) { + /* + * Transfer our reference to the name/value buffer to the + * deferred work state structure. + */ + attr->xattri_nameval = xfs_attri_log_nameval_alloc(args->name, + args->namelen, args->value, args->valuelen); + } + if (!attr->xattri_nameval) + return ERR_PTR(-ENOMEM); + + attrip = xfs_attri_init(mp, attr->xattri_nameval); + xfs_trans_add_item(tp, &attrip->attri_item); + xfs_attr_log_item(tp, attrip, attr); + + return &attrip->attri_item; +} + +static inline void +xfs_attr_free_item( + struct xfs_attr_intent *attr) +{ + if (attr->xattri_da_state) + xfs_da_state_free(attr->xattri_da_state); + xfs_attri_log_nameval_put(attr->xattri_nameval); + if (attr->xattri_da_args->op_flags & XFS_DA_OP_RECOVERY) + kmem_free(attr); + else + kmem_cache_free(xfs_attr_intent_cache, attr); +} + +/* Process an attr. */ +STATIC int +xfs_attr_finish_item( + struct xfs_trans *tp, + struct xfs_log_item *done, + struct list_head *item, + struct xfs_btree_cur **state) +{ + struct xfs_attr_intent *attr; + struct xfs_attrd_log_item *done_item = NULL; + int error; + + attr = container_of(item, struct xfs_attr_intent, xattri_list); + if (done) + done_item = ATTRD_ITEM(done); + + /* + * Always reset trans after EAGAIN cycle + * since the transaction is new + */ + attr->xattri_da_args->trans = tp; + + error = xfs_xattri_finish_update(attr, done_item); + if (error != -EAGAIN) + xfs_attr_free_item(attr); + + return error; +} + +/* Abort all pending ATTRs. */ +STATIC void +xfs_attr_abort_intent( + struct xfs_log_item *intent) +{ + xfs_attri_release(ATTRI_ITEM(intent)); +} + +/* Cancel an attr */ +STATIC void +xfs_attr_cancel_item( + struct list_head *item) +{ + struct xfs_attr_intent *attr; + + attr = container_of(item, struct xfs_attr_intent, xattri_list); + xfs_attr_free_item(attr); +} + +STATIC bool +xfs_attri_item_match( + struct xfs_log_item *lip, + uint64_t intent_id) +{ + return ATTRI_ITEM(lip)->attri_format.alfi_id == intent_id; +} + +/* Is this recovered ATTRI format ok? */ +static inline bool +xfs_attri_validate( + struct xfs_mount *mp, + struct xfs_attri_log_format *attrp) +{ + unsigned int op = attrp->alfi_op_flags & + XFS_ATTRI_OP_FLAGS_TYPE_MASK; + + if (attrp->__pad != 0) + return false; + + if (attrp->alfi_op_flags & ~XFS_ATTRI_OP_FLAGS_TYPE_MASK) + return false; + + if (attrp->alfi_attr_filter & ~XFS_ATTRI_FILTER_MASK) + return false; + + /* alfi_op_flags should be either a set or remove */ + switch (op) { + case XFS_ATTRI_OP_FLAGS_SET: + case XFS_ATTRI_OP_FLAGS_REPLACE: + case XFS_ATTRI_OP_FLAGS_REMOVE: + break; + default: + return false; + } + + if (attrp->alfi_value_len > XATTR_SIZE_MAX) + return false; + + if ((attrp->alfi_name_len > XATTR_NAME_MAX) || + (attrp->alfi_name_len == 0)) + return false; + + return xfs_verify_ino(mp, attrp->alfi_ino); +} + +/* + * Process an attr intent item that was recovered from the log. We need to + * delete the attr that it describes. + */ +STATIC int +xfs_attri_item_recover( + struct xfs_log_item *lip, + struct list_head *capture_list) +{ + struct xfs_attri_log_item *attrip = ATTRI_ITEM(lip); + struct xfs_attr_intent *attr; + struct xfs_mount *mp = lip->li_log->l_mp; + struct xfs_inode *ip; + struct xfs_da_args *args; + struct xfs_trans *tp; + struct xfs_trans_res tres; + struct xfs_attri_log_format *attrp; + struct xfs_attri_log_nameval *nv = attrip->attri_nameval; + int error; + int total; + int local; + struct xfs_attrd_log_item *done_item = NULL; + + /* + * First check the validity of the attr described by the ATTRI. If any + * are bad, then assume that all are bad and just toss the ATTRI. + */ + attrp = &attrip->attri_format; + if (!xfs_attri_validate(mp, attrp) || + !xfs_attr_namecheck(nv->name.i_addr, nv->name.i_len)) + return -EFSCORRUPTED; + + error = xlog_recover_iget(mp, attrp->alfi_ino, &ip); + if (error) + return error; + + attr = kmem_zalloc(sizeof(struct xfs_attr_intent) + + sizeof(struct xfs_da_args), KM_NOFS); + args = (struct xfs_da_args *)(attr + 1); + + attr->xattri_da_args = args; + attr->xattri_op_flags = attrp->alfi_op_flags & + XFS_ATTRI_OP_FLAGS_TYPE_MASK; + + /* + * We're reconstructing the deferred work state structure from the + * recovered log item. Grab a reference to the name/value buffer and + * attach it to the new work state. + */ + attr->xattri_nameval = xfs_attri_log_nameval_get(nv); + ASSERT(attr->xattri_nameval); + + args->dp = ip; + args->geo = mp->m_attr_geo; + args->whichfork = XFS_ATTR_FORK; + args->name = nv->name.i_addr; + args->namelen = nv->name.i_len; + args->hashval = xfs_da_hashname(args->name, args->namelen); + args->attr_filter = attrp->alfi_attr_filter & XFS_ATTRI_FILTER_MASK; + args->op_flags = XFS_DA_OP_RECOVERY | XFS_DA_OP_OKNOENT | + XFS_DA_OP_LOGGED; + + ASSERT(xfs_sb_version_haslogxattrs(&mp->m_sb)); + + switch (attr->xattri_op_flags) { + case XFS_ATTRI_OP_FLAGS_SET: + case XFS_ATTRI_OP_FLAGS_REPLACE: + args->value = nv->value.i_addr; + args->valuelen = nv->value.i_len; + args->total = xfs_attr_calc_size(args, &local); + if (xfs_inode_hasattr(args->dp)) + attr->xattri_dela_state = xfs_attr_init_replace_state(args); + else + attr->xattri_dela_state = xfs_attr_init_add_state(args); + break; + case XFS_ATTRI_OP_FLAGS_REMOVE: + if (!xfs_inode_hasattr(args->dp)) + goto out; + attr->xattri_dela_state = xfs_attr_init_remove_state(args); + break; + default: + ASSERT(0); + error = -EFSCORRUPTED; + goto out; + } + + xfs_init_attr_trans(args, &tres, &total); + error = xfs_trans_alloc(mp, &tres, total, 0, XFS_TRANS_RESERVE, &tp); + if (error) + goto out; + + args->trans = tp; + done_item = xfs_trans_get_attrd(tp, attrip); + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, 0); + + error = xfs_xattri_finish_update(attr, done_item); + if (error == -EAGAIN) { + /* + * There's more work to do, so add the intent item to this + * transaction so that we can continue it later. + */ + xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_ATTR, &attr->xattri_list); + error = xfs_defer_ops_capture_and_commit(tp, capture_list); + if (error) + goto out_unlock; + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_irele(ip); + return 0; + } + if (error) { + xfs_trans_cancel(tp); + goto out_unlock; + } + + error = xfs_defer_ops_capture_and_commit(tp, capture_list); +out_unlock: + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_irele(ip); +out: + xfs_attr_free_item(attr); + return error; +} + +/* Re-log an intent item to push the log tail forward. */ +static struct xfs_log_item * +xfs_attri_item_relog( + struct xfs_log_item *intent, + struct xfs_trans *tp) +{ + struct xfs_attrd_log_item *attrdp; + struct xfs_attri_log_item *old_attrip; + struct xfs_attri_log_item *new_attrip; + struct xfs_attri_log_format *new_attrp; + struct xfs_attri_log_format *old_attrp; + + old_attrip = ATTRI_ITEM(intent); + old_attrp = &old_attrip->attri_format; + + tp->t_flags |= XFS_TRANS_DIRTY; + attrdp = xfs_trans_get_attrd(tp, old_attrip); + set_bit(XFS_LI_DIRTY, &attrdp->attrd_item.li_flags); + + /* + * Create a new log item that shares the same name/value buffer as the + * old log item. + */ + new_attrip = xfs_attri_init(tp->t_mountp, old_attrip->attri_nameval); + new_attrp = &new_attrip->attri_format; + + new_attrp->alfi_ino = old_attrp->alfi_ino; + new_attrp->alfi_op_flags = old_attrp->alfi_op_flags; + new_attrp->alfi_value_len = old_attrp->alfi_value_len; + new_attrp->alfi_name_len = old_attrp->alfi_name_len; + new_attrp->alfi_attr_filter = old_attrp->alfi_attr_filter; + + xfs_trans_add_item(tp, &new_attrip->attri_item); + set_bit(XFS_LI_DIRTY, &new_attrip->attri_item.li_flags); + + return &new_attrip->attri_item; +} + +STATIC int +xlog_recover_attri_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + struct xfs_mount *mp = log->l_mp; + struct xfs_attri_log_item *attrip; + struct xfs_attri_log_format *attri_formatp; + struct xfs_attri_log_nameval *nv; + const void *attr_value = NULL; + const void *attr_name; + int error; + + attri_formatp = item->ri_buf[0].i_addr; + attr_name = item->ri_buf[1].i_addr; + + /* Validate xfs_attri_log_format before the large memory allocation */ + if (!xfs_attri_validate(mp, attri_formatp)) { + XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); + return -EFSCORRUPTED; + } + + if (!xfs_attr_namecheck(attr_name, attri_formatp->alfi_name_len)) { + XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); + return -EFSCORRUPTED; + } + + if (attri_formatp->alfi_value_len) + attr_value = item->ri_buf[2].i_addr; + + /* + * Memory alloc failure will cause replay to abort. We attach the + * name/value buffer to the recovered incore log item and drop our + * reference. + */ + nv = xfs_attri_log_nameval_alloc(attr_name, + attri_formatp->alfi_name_len, attr_value, + attri_formatp->alfi_value_len); + if (!nv) + return -ENOMEM; + + attrip = xfs_attri_init(mp, nv); + error = xfs_attri_copy_format(&item->ri_buf[0], &attrip->attri_format); + if (error) + goto out; + + /* + * The ATTRI has two references. One for the ATTRD and one for ATTRI to + * ensure it makes it into the AIL. Insert the ATTRI into the AIL + * directly and drop the ATTRI reference. Note that + * xfs_trans_ail_update() drops the AIL lock. + */ + xfs_trans_ail_insert(log->l_ailp, &attrip->attri_item, lsn); + xfs_attri_release(attrip); + xfs_attri_log_nameval_put(nv); + return 0; +out: + xfs_attri_item_free(attrip); + xfs_attri_log_nameval_put(nv); + return error; +} + +/* + * This routine is called to allocate an "attr free done" log item. + */ +static struct xfs_attrd_log_item * +xfs_trans_get_attrd(struct xfs_trans *tp, + struct xfs_attri_log_item *attrip) +{ + struct xfs_attrd_log_item *attrdp; + + ASSERT(tp != NULL); + + attrdp = kmem_cache_zalloc(xfs_attrd_cache, GFP_NOFS | __GFP_NOFAIL); + + xfs_log_item_init(tp->t_mountp, &attrdp->attrd_item, XFS_LI_ATTRD, + &xfs_attrd_item_ops); + attrdp->attrd_attrip = attrip; + attrdp->attrd_format.alfd_alf_id = attrip->attri_format.alfi_id; + + xfs_trans_add_item(tp, &attrdp->attrd_item); + return attrdp; +} + +/* Get an ATTRD so we can process all the attrs. */ +static struct xfs_log_item * +xfs_attr_create_done( + struct xfs_trans *tp, + struct xfs_log_item *intent, + unsigned int count) +{ + if (!intent) + return NULL; + + return &xfs_trans_get_attrd(tp, ATTRI_ITEM(intent))->attrd_item; +} + +const struct xfs_defer_op_type xfs_attr_defer_type = { + .max_items = 1, + .create_intent = xfs_attr_create_intent, + .abort_intent = xfs_attr_abort_intent, + .create_done = xfs_attr_create_done, + .finish_item = xfs_attr_finish_item, + .cancel_item = xfs_attr_cancel_item, +}; + +/* + * This routine is called when an ATTRD format structure is found in a committed + * transaction in the log. Its purpose is to cancel the corresponding ATTRI if + * it was still in the log. To do this it searches the AIL for the ATTRI with + * an id equal to that in the ATTRD format structure. If we find it we drop + * the ATTRD reference, which removes the ATTRI from the AIL and frees it. + */ +STATIC int +xlog_recover_attrd_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + struct xfs_attrd_log_format *attrd_formatp; + + attrd_formatp = item->ri_buf[0].i_addr; + if (item->ri_buf[0].i_len != sizeof(struct xfs_attrd_log_format)) { + XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); + return -EFSCORRUPTED; + } + + xlog_recover_release_intent(log, XFS_LI_ATTRI, + attrd_formatp->alfd_alf_id); + return 0; +} + +static const struct xfs_item_ops xfs_attri_item_ops = { + .flags = XFS_ITEM_INTENT, + .iop_size = xfs_attri_item_size, + .iop_format = xfs_attri_item_format, + .iop_unpin = xfs_attri_item_unpin, + .iop_release = xfs_attri_item_release, + .iop_recover = xfs_attri_item_recover, + .iop_match = xfs_attri_item_match, + .iop_relog = xfs_attri_item_relog, +}; + +const struct xlog_recover_item_ops xlog_attri_item_ops = { + .item_type = XFS_LI_ATTRI, + .commit_pass2 = xlog_recover_attri_commit_pass2, +}; + +static const struct xfs_item_ops xfs_attrd_item_ops = { + .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED | + XFS_ITEM_INTENT_DONE, + .iop_size = xfs_attrd_item_size, + .iop_format = xfs_attrd_item_format, + .iop_release = xfs_attrd_item_release, + .iop_intent = xfs_attrd_item_intent, +}; + +const struct xlog_recover_item_ops xlog_attrd_item_ops = { + .item_type = XFS_LI_ATTRD, + .commit_pass2 = xlog_recover_attrd_commit_pass2, +}; diff --git a/fs/xfs/xfs_attr_item.h b/fs/xfs/xfs_attr_item.h new file mode 100644 index 0000000000..3280a79302 --- /dev/null +++ b/fs/xfs/xfs_attr_item.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later + * + * Copyright (C) 2022 Oracle. All Rights Reserved. + * Author: Allison Henderson + */ +#ifndef __XFS_ATTR_ITEM_H__ +#define __XFS_ATTR_ITEM_H__ + +/* kernel only ATTRI/ATTRD definitions */ + +struct xfs_mount; +struct kmem_zone; + +struct xfs_attri_log_nameval { + struct xfs_log_iovec name; + struct xfs_log_iovec value; + refcount_t refcount; + + /* name and value follow the end of this struct */ +}; + +/* + * This is the "attr intention" log item. It is used to log the fact that some + * extended attribute operations need to be processed. An operation is + * currently either a set or remove. Set or remove operations are described by + * the xfs_attr_intent which may be logged to this intent. + * + * During a normal attr operation, name and value point to the name and value + * fields of the caller's xfs_da_args structure. During a recovery, the name + * and value buffers are copied from the log, and stored in a trailing buffer + * attached to the xfs_attr_intent until they are committed. They are freed + * when the xfs_attr_intent itself is freed when the work is done. + */ +struct xfs_attri_log_item { + struct xfs_log_item attri_item; + atomic_t attri_refcount; + struct xfs_attri_log_nameval *attri_nameval; + struct xfs_attri_log_format attri_format; +}; + +/* + * This is the "attr done" log item. It is used to log the fact that some attrs + * earlier mentioned in an attri item have been freed. + */ +struct xfs_attrd_log_item { + struct xfs_log_item attrd_item; + struct xfs_attri_log_item *attrd_attrip; + struct xfs_attrd_log_format attrd_format; +}; + +extern struct kmem_cache *xfs_attri_cache; +extern struct kmem_cache *xfs_attrd_cache; + +#endif /* __XFS_ATTR_ITEM_H__ */ diff --git a/fs/xfs/xfs_iunlink_item.c b/fs/xfs/xfs_iunlink_item.c new file mode 100644 index 0000000000..43005ce8bd --- /dev/null +++ b/fs/xfs/xfs_iunlink_item.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020-2022, Red Hat, Inc. + * All Rights Reserved. + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_log_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_inode.h" +#include "xfs_trans.h" +#include "xfs_trans_priv.h" +#include "xfs_ag.h" +#include "xfs_iunlink_item.h" +#include "xfs_trace.h" +#include "xfs_error.h" + +struct kmem_cache *xfs_iunlink_cache; + +static inline struct xfs_iunlink_item *IUL_ITEM(struct xfs_log_item *lip) +{ + return container_of(lip, struct xfs_iunlink_item, item); +} + +static void +xfs_iunlink_item_release( + struct xfs_log_item *lip) +{ + struct xfs_iunlink_item *iup = IUL_ITEM(lip); + + xfs_perag_put(iup->pag); + kmem_cache_free(xfs_iunlink_cache, IUL_ITEM(lip)); +} + + +static uint64_t +xfs_iunlink_item_sort( + struct xfs_log_item *lip) +{ + return IUL_ITEM(lip)->ip->i_ino; +} + +/* + * Look up the inode cluster buffer and log the on-disk unlinked inode change + * we need to make. + */ +static int +xfs_iunlink_log_dinode( + struct xfs_trans *tp, + struct xfs_iunlink_item *iup) +{ + struct xfs_mount *mp = tp->t_mountp; + struct xfs_inode *ip = iup->ip; + struct xfs_dinode *dip; + struct xfs_buf *ibp; + int offset; + int error; + + error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp); + if (error) + return error; + /* + * Don't log the unlinked field on stale buffers as this may be the + * transaction that frees the inode cluster and relogging the buffer + * here will incorrectly remove the stale state. + */ + if (ibp->b_flags & XBF_STALE) + goto out; + + dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset); + + /* Make sure the old pointer isn't garbage. */ + if (be32_to_cpu(dip->di_next_unlinked) != iup->old_agino) { + xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip, + sizeof(*dip), __this_address); + error = -EFSCORRUPTED; + goto out; + } + + trace_xfs_iunlink_update_dinode(mp, iup->pag->pag_agno, + XFS_INO_TO_AGINO(mp, ip->i_ino), + be32_to_cpu(dip->di_next_unlinked), iup->next_agino); + + dip->di_next_unlinked = cpu_to_be32(iup->next_agino); + offset = ip->i_imap.im_boffset + + offsetof(struct xfs_dinode, di_next_unlinked); + + xfs_dinode_calc_crc(mp, dip); + xfs_trans_inode_buf(tp, ibp); + xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1); + return 0; +out: + xfs_trans_brelse(tp, ibp); + return error; +} + +/* + * On precommit, we grab the inode cluster buffer for the inode number we were + * passed, then update the next unlinked field for that inode in the buffer and + * log the buffer. This ensures that the inode cluster buffer was logged in the + * correct order w.r.t. other inode cluster buffers. We can then remove the + * iunlink item from the transaction and release it as it is has now served it's + * purpose. + */ +static int +xfs_iunlink_item_precommit( + struct xfs_trans *tp, + struct xfs_log_item *lip) +{ + struct xfs_iunlink_item *iup = IUL_ITEM(lip); + int error; + + error = xfs_iunlink_log_dinode(tp, iup); + list_del(&lip->li_trans); + xfs_iunlink_item_release(lip); + return error; +} + +static const struct xfs_item_ops xfs_iunlink_item_ops = { + .iop_release = xfs_iunlink_item_release, + .iop_sort = xfs_iunlink_item_sort, + .iop_precommit = xfs_iunlink_item_precommit, +}; + + +/* + * Initialize the inode log item for a newly allocated (in-core) inode. + * + * Inode extents can only reside within an AG. Hence specify the starting + * block for the inode chunk by offset within an AG as well as the + * length of the allocated extent. + * + * This joins the item to the transaction and marks it dirty so + * that we don't need a separate call to do this, nor does the + * caller need to know anything about the iunlink item. + */ +int +xfs_iunlink_log_inode( + struct xfs_trans *tp, + struct xfs_inode *ip, + struct xfs_perag *pag, + xfs_agino_t next_agino) +{ + struct xfs_mount *mp = tp->t_mountp; + struct xfs_iunlink_item *iup; + + ASSERT(xfs_verify_agino_or_null(pag, next_agino)); + ASSERT(xfs_verify_agino_or_null(pag, ip->i_next_unlinked)); + + /* + * Since we're updating a linked list, we should never find that the + * current pointer is the same as the new value, unless we're + * terminating the list. + */ + if (ip->i_next_unlinked == next_agino) { + if (next_agino != NULLAGINO) + return -EFSCORRUPTED; + return 0; + } + + iup = kmem_cache_zalloc(xfs_iunlink_cache, GFP_KERNEL | __GFP_NOFAIL); + xfs_log_item_init(mp, &iup->item, XFS_LI_IUNLINK, + &xfs_iunlink_item_ops); + + iup->ip = ip; + iup->next_agino = next_agino; + iup->old_agino = ip->i_next_unlinked; + + atomic_inc(&pag->pag_ref); + iup->pag = pag; + + xfs_trans_add_item(tp, &iup->item); + tp->t_flags |= XFS_TRANS_DIRTY; + set_bit(XFS_LI_DIRTY, &iup->item.li_flags); + return 0; +} + diff --git a/fs/xfs/xfs_iunlink_item.h b/fs/xfs/xfs_iunlink_item.h new file mode 100644 index 0000000000..c793cdcacc --- /dev/null +++ b/fs/xfs/xfs_iunlink_item.h @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020-2022, Red Hat, Inc. + * All Rights Reserved. + */ +#ifndef XFS_IUNLINK_ITEM_H +#define XFS_IUNLINK_ITEM_H 1 + +struct xfs_trans; +struct xfs_inode; +struct xfs_perag; + +/* in memory log item structure */ +struct xfs_iunlink_item { + struct xfs_log_item item; + struct xfs_inode *ip; + struct xfs_perag *pag; + xfs_agino_t next_agino; + xfs_agino_t old_agino; +}; + +extern struct kmem_cache *xfs_iunlink_cache; + +int xfs_iunlink_log_inode(struct xfs_trans *tp, struct xfs_inode *ip, + struct xfs_perag *pag, xfs_agino_t next_agino); + +#endif /* XFS_IUNLINK_ITEM_H */ diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c new file mode 100644 index 0000000000..69d9c83ea4 --- /dev/null +++ b/fs/xfs/xfs_notify_failure.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Fujitsu. All Rights Reserved. + */ + +#include "xfs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_log_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_alloc.h" +#include "xfs_bit.h" +#include "xfs_btree.h" +#include "xfs_inode.h" +#include "xfs_icache.h" +#include "xfs_rmap.h" +#include "xfs_rmap_btree.h" +#include "xfs_rtalloc.h" +#include "xfs_trans.h" +#include "xfs_ag.h" + +#include +#include + +struct failure_info { + xfs_agblock_t startblock; + xfs_extlen_t blockcount; + int mf_flags; +}; + +static pgoff_t +xfs_failure_pgoff( + struct xfs_mount *mp, + const struct xfs_rmap_irec *rec, + const struct failure_info *notify) +{ + loff_t pos = XFS_FSB_TO_B(mp, rec->rm_offset); + + if (notify->startblock > rec->rm_startblock) + pos += XFS_FSB_TO_B(mp, + notify->startblock - rec->rm_startblock); + return pos >> PAGE_SHIFT; +} + +static unsigned long +xfs_failure_pgcnt( + struct xfs_mount *mp, + const struct xfs_rmap_irec *rec, + const struct failure_info *notify) +{ + xfs_agblock_t end_rec; + xfs_agblock_t end_notify; + xfs_agblock_t start_cross; + xfs_agblock_t end_cross; + + start_cross = max(rec->rm_startblock, notify->startblock); + + end_rec = rec->rm_startblock + rec->rm_blockcount; + end_notify = notify->startblock + notify->blockcount; + end_cross = min(end_rec, end_notify); + + return XFS_FSB_TO_B(mp, end_cross - start_cross) >> PAGE_SHIFT; +} + +static int +xfs_dax_failure_fn( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *data) +{ + struct xfs_mount *mp = cur->bc_mp; + struct xfs_inode *ip; + struct failure_info *notify = data; + int error = 0; + + if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || + (rec->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))) { + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK); + return -EFSCORRUPTED; + } + + /* Get files that incore, filter out others that are not in use. */ + error = xfs_iget(mp, cur->bc_tp, rec->rm_owner, XFS_IGET_INCORE, + 0, &ip); + /* Continue the rmap query if the inode isn't incore */ + if (error == -ENODATA) + return 0; + if (error) + return error; + + error = mf_dax_kill_procs(VFS_I(ip)->i_mapping, + xfs_failure_pgoff(mp, rec, notify), + xfs_failure_pgcnt(mp, rec, notify), + notify->mf_flags); + xfs_irele(ip); + return error; +} + +static int +xfs_dax_notify_ddev_failure( + struct xfs_mount *mp, + xfs_daddr_t daddr, + xfs_daddr_t bblen, + int mf_flags) +{ + struct xfs_trans *tp = NULL; + struct xfs_btree_cur *cur = NULL; + struct xfs_buf *agf_bp = NULL; + int error = 0; + xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, daddr); + xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno); + xfs_fsblock_t end_fsbno = XFS_DADDR_TO_FSB(mp, daddr + bblen); + xfs_agnumber_t end_agno = XFS_FSB_TO_AGNO(mp, end_fsbno); + + error = xfs_trans_alloc_empty(mp, &tp); + if (error) + return error; + + for (; agno <= end_agno; agno++) { + struct xfs_rmap_irec ri_low = { }; + struct xfs_rmap_irec ri_high; + struct failure_info notify; + struct xfs_agf *agf; + xfs_agblock_t agend; + struct xfs_perag *pag; + + pag = xfs_perag_get(mp, agno); + error = xfs_alloc_read_agf(pag, tp, 0, &agf_bp); + if (error) { + xfs_perag_put(pag); + break; + } + + cur = xfs_rmapbt_init_cursor(mp, tp, agf_bp, pag); + + /* + * Set the rmap range from ri_low to ri_high, which represents + * a [start, end] where we looking for the files or metadata. + */ + memset(&ri_high, 0xFF, sizeof(ri_high)); + ri_low.rm_startblock = XFS_FSB_TO_AGBNO(mp, fsbno); + if (agno == end_agno) + ri_high.rm_startblock = XFS_FSB_TO_AGBNO(mp, end_fsbno); + + agf = agf_bp->b_addr; + agend = min(be32_to_cpu(agf->agf_length), + ri_high.rm_startblock); + notify.startblock = ri_low.rm_startblock; + notify.blockcount = agend - ri_low.rm_startblock; + + error = xfs_rmap_query_range(cur, &ri_low, &ri_high, + xfs_dax_failure_fn, ¬ify); + xfs_btree_del_cursor(cur, error); + xfs_trans_brelse(tp, agf_bp); + xfs_perag_put(pag); + if (error) + break; + + fsbno = XFS_AGB_TO_FSB(mp, agno + 1, 0); + } + + xfs_trans_cancel(tp); + return error; +} + +static int +xfs_dax_notify_failure( + struct dax_device *dax_dev, + u64 offset, + u64 len, + int mf_flags) +{ + struct xfs_mount *mp = dax_holder(dax_dev); + u64 ddev_start; + u64 ddev_end; + + if (!(mp->m_sb.sb_flags & SB_BORN)) { + xfs_warn(mp, "filesystem is not ready for notify_failure()!"); + return -EIO; + } + + if (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_daxdev == dax_dev) { + xfs_warn(mp, + "notify_failure() not supported on realtime device!"); + return -EOPNOTSUPP; + } + + if (mp->m_logdev_targp && mp->m_logdev_targp->bt_daxdev == dax_dev && + mp->m_logdev_targp != mp->m_ddev_targp) { + xfs_err(mp, "ondisk log corrupt, shutting down fs!"); + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK); + return -EFSCORRUPTED; + } + + if (!xfs_has_rmapbt(mp)) { + xfs_warn(mp, "notify_failure() needs rmapbt enabled!"); + return -EOPNOTSUPP; + } + + ddev_start = mp->m_ddev_targp->bt_dax_part_off; + ddev_end = ddev_start + bdev_nr_bytes(mp->m_ddev_targp->bt_bdev) - 1; + + /* Ignore the range out of filesystem area */ + if (offset + len < ddev_start) + return -ENXIO; + if (offset > ddev_end) + return -ENXIO; + + /* Calculate the real range when it touches the boundary */ + if (offset > ddev_start) + offset -= ddev_start; + else { + len -= ddev_start - offset; + offset = 0; + } + if (offset + len > ddev_end) + len -= ddev_end - offset; + + return xfs_dax_notify_ddev_failure(mp, BTOBB(offset), BTOBB(len), + mf_flags); +} + +const struct dax_holder_operations xfs_dax_holder_operations = { + .notify_failure = xfs_dax_notify_failure, +}; diff --git a/fs/xfs/xfs_xattr.h b/fs/xfs/xfs_xattr.h new file mode 100644 index 0000000000..2b09133b1b --- /dev/null +++ b/fs/xfs/xfs_xattr.h @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + */ +#ifndef __XFS_XATTR_H__ +#define __XFS_XATTR_H__ + +int xfs_attr_change(struct xfs_da_args *args); + +extern const struct xattr_handler *xfs_xattr_handlers[]; + +#endif /* __XFS_XATTR_H__ */ diff --git a/fs/zonefs/sysfs.c b/fs/zonefs/sysfs.c new file mode 100644 index 0000000000..9cb6755ce3 --- /dev/null +++ b/fs/zonefs/sysfs.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Simple file system for zoned block devices exposing zones as files. + * + * Copyright (C) 2022 Western Digital Corporation or its affiliates. + */ +#include +#include +#include + +#include "zonefs.h" + +struct zonefs_sysfs_attr { + struct attribute attr; + ssize_t (*show)(struct zonefs_sb_info *sbi, char *buf); +}; + +static inline struct zonefs_sysfs_attr *to_attr(struct attribute *attr) +{ + return container_of(attr, struct zonefs_sysfs_attr, attr); +} + +#define ZONEFS_SYSFS_ATTR_RO(name) \ +static struct zonefs_sysfs_attr zonefs_sysfs_attr_##name = __ATTR_RO(name) + +#define ATTR_LIST(name) &zonefs_sysfs_attr_##name.attr + +static ssize_t zonefs_sysfs_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct zonefs_sb_info *sbi = + container_of(kobj, struct zonefs_sb_info, s_kobj); + struct zonefs_sysfs_attr *zonefs_attr = + container_of(attr, struct zonefs_sysfs_attr, attr); + + if (!zonefs_attr->show) + return 0; + + return zonefs_attr->show(sbi, buf); +} + +static ssize_t max_wro_seq_files_show(struct zonefs_sb_info *sbi, char *buf) +{ + return sysfs_emit(buf, "%u\n", sbi->s_max_wro_seq_files); +} +ZONEFS_SYSFS_ATTR_RO(max_wro_seq_files); + +static ssize_t nr_wro_seq_files_show(struct zonefs_sb_info *sbi, char *buf) +{ + return sysfs_emit(buf, "%d\n", atomic_read(&sbi->s_wro_seq_files)); +} +ZONEFS_SYSFS_ATTR_RO(nr_wro_seq_files); + +static ssize_t max_active_seq_files_show(struct zonefs_sb_info *sbi, char *buf) +{ + return sysfs_emit(buf, "%u\n", sbi->s_max_active_seq_files); +} +ZONEFS_SYSFS_ATTR_RO(max_active_seq_files); + +static ssize_t nr_active_seq_files_show(struct zonefs_sb_info *sbi, char *buf) +{ + return sysfs_emit(buf, "%d\n", atomic_read(&sbi->s_active_seq_files)); +} +ZONEFS_SYSFS_ATTR_RO(nr_active_seq_files); + +static struct attribute *zonefs_sysfs_attrs[] = { + ATTR_LIST(max_wro_seq_files), + ATTR_LIST(nr_wro_seq_files), + ATTR_LIST(max_active_seq_files), + ATTR_LIST(nr_active_seq_files), + NULL, +}; +ATTRIBUTE_GROUPS(zonefs_sysfs); + +static void zonefs_sysfs_sb_release(struct kobject *kobj) +{ + struct zonefs_sb_info *sbi = + container_of(kobj, struct zonefs_sb_info, s_kobj); + + complete(&sbi->s_kobj_unregister); +} + +static const struct sysfs_ops zonefs_sysfs_attr_ops = { + .show = zonefs_sysfs_attr_show, +}; + +static struct kobj_type zonefs_sb_ktype = { + .default_groups = zonefs_sysfs_groups, + .sysfs_ops = &zonefs_sysfs_attr_ops, + .release = zonefs_sysfs_sb_release, +}; + +static struct kobject *zonefs_sysfs_root; + +int zonefs_sysfs_register(struct super_block *sb) +{ + struct zonefs_sb_info *sbi = ZONEFS_SB(sb); + int ret; + + init_completion(&sbi->s_kobj_unregister); + ret = kobject_init_and_add(&sbi->s_kobj, &zonefs_sb_ktype, + zonefs_sysfs_root, "%s", sb->s_id); + if (ret) { + kobject_put(&sbi->s_kobj); + wait_for_completion(&sbi->s_kobj_unregister); + return ret; + } + + sbi->s_sysfs_registered = true; + + return 0; +} + +void zonefs_sysfs_unregister(struct super_block *sb) +{ + struct zonefs_sb_info *sbi = ZONEFS_SB(sb); + + if (!sbi || !sbi->s_sysfs_registered) + return; + + kobject_del(&sbi->s_kobj); + kobject_put(&sbi->s_kobj); + wait_for_completion(&sbi->s_kobj_unregister); +} + +int __init zonefs_sysfs_init(void) +{ + zonefs_sysfs_root = kobject_create_and_add("zonefs", fs_kobj); + if (!zonefs_sysfs_root) + return -ENOMEM; + + return 0; +} + +void zonefs_sysfs_exit(void) +{ + kobject_put(zonefs_sysfs_root); + zonefs_sysfs_root = NULL; +} diff --git a/include/asm-generic/archrandom.h b/include/asm-generic/archrandom.h new file mode 100644 index 0000000000..3cd7f980cf --- /dev/null +++ b/include/asm-generic/archrandom.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_ARCHRANDOM_H__ +#define __ASM_GENERIC_ARCHRANDOM_H__ + +static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) +{ + return 0; +} + +static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) +{ + return 0; +} + +#endif diff --git a/include/asm-generic/bitops/generic-non-atomic.h b/include/asm-generic/bitops/generic-non-atomic.h new file mode 100644 index 0000000000..564a8c675d --- /dev/null +++ b/include/asm-generic/bitops/generic-non-atomic.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H +#define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H + +#include +#include + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +/* + * Generic definitions for bit operations, should not be used in regular code + * directly. + */ + +/** + * generic___set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __always_inline void +generic___set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p |= mask; +} + +static __always_inline void +generic___clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p &= ~mask; +} + +/** + * generic___change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __always_inline void +generic___change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p ^= mask; +} + +/** + * generic___test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __always_inline bool +generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +/** + * generic___test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __always_inline bool +generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +/* WARNING: non atomic and it can be reordered! */ +static __always_inline bool +generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/** + * generic_test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static __always_inline bool +generic_test_bit(unsigned long nr, const volatile unsigned long *addr) +{ + /* + * Unlike the bitops with the '__' prefix above, this one *is* atomic, + * so `volatile` must always stay here with no cast-aways. See + * `Documentation/atomic_bitops.txt` for the details. + */ + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + +/** + * generic_test_bit_acquire - Determine, with acquire semantics, whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static __always_inline bool +generic_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + +/* + * const_*() definitions provide good compile-time optimizations when + * the passed arguments can be resolved at compile time. + */ +#define const___set_bit generic___set_bit +#define const___clear_bit generic___clear_bit +#define const___change_bit generic___change_bit +#define const___test_and_set_bit generic___test_and_set_bit +#define const___test_and_clear_bit generic___test_and_clear_bit +#define const___test_and_change_bit generic___test_and_change_bit +#define const_test_bit_acquire generic_test_bit_acquire + +/** + * const_test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + * + * A version of generic_test_bit() which discards the `volatile` qualifier to + * allow a compiler to optimize code harder. Non-atomic and to be called only + * for testing compile-time constants, e.g. by the corresponding macros, not + * directly from "regular" code. + */ +static __always_inline bool +const_test_bit(unsigned long nr, const volatile unsigned long *addr) +{ + const unsigned long *p = (const unsigned long *)addr + BIT_WORD(nr); + unsigned long mask = BIT_MASK(nr); + unsigned long val = *p; + + return !!(val & mask); +} + +#endif /* __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H */ diff --git a/include/asm-generic/bitops/non-instrumented-non-atomic.h b/include/asm-generic/bitops/non-instrumented-non-atomic.h new file mode 100644 index 0000000000..0ddc78dfc3 --- /dev/null +++ b/include/asm-generic/bitops/non-instrumented-non-atomic.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H +#define __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H + +#define ___set_bit arch___set_bit +#define ___clear_bit arch___clear_bit +#define ___change_bit arch___change_bit + +#define ___test_and_set_bit arch___test_and_set_bit +#define ___test_and_clear_bit arch___test_and_clear_bit +#define ___test_and_change_bit arch___test_and_change_bit + +#define _test_bit arch_test_bit +#define _test_bit_acquire arch_test_bit_acquire + +#endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */ diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h new file mode 100644 index 0000000000..8962bb7309 --- /dev/null +++ b/include/asm-generic/spinlock_types.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_GENERIC_SPINLOCK_TYPES_H +#define __ASM_GENERIC_SPINLOCK_TYPES_H + +#include +typedef atomic_t arch_spinlock_t; + +/* + * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the + * include. + */ +#include + +#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0) + +#endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */ diff --git a/include/clocksource/timer-goldfish.h b/include/clocksource/timer-goldfish.h new file mode 100644 index 0000000000..05a3a4f610 --- /dev/null +++ b/include/clocksource/timer-goldfish.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * goldfish-timer clocksource + * Registers definition for the goldfish-timer device + */ + +#ifndef _CLOCKSOURCE_TIMER_GOLDFISH_H +#define _CLOCKSOURCE_TIMER_GOLDFISH_H + +/* + * TIMER_TIME_LOW get low bits of current time and update TIMER_TIME_HIGH + * TIMER_TIME_HIGH get high bits of time at last TIMER_TIME_LOW read + * TIMER_ALARM_LOW set low bits of alarm and activate it + * TIMER_ALARM_HIGH set high bits of next alarm + * TIMER_IRQ_ENABLED enable alarm interrupt + * TIMER_CLEAR_ALARM disarm an existing alarm + * TIMER_ALARM_STATUS alarm status (running or not) + * TIMER_CLEAR_INTERRUPT clear interrupt + */ +#define TIMER_TIME_LOW 0x00 +#define TIMER_TIME_HIGH 0x04 +#define TIMER_ALARM_LOW 0x08 +#define TIMER_ALARM_HIGH 0x0c +#define TIMER_IRQ_ENABLED 0x10 +#define TIMER_CLEAR_ALARM 0x14 +#define TIMER_ALARM_STATUS 0x18 +#define TIMER_CLEAR_INTERRUPT 0x1c + +extern int goldfish_timer_init(int irq, void __iomem *base); + +#endif /* _CLOCKSOURCE_TIMER_GOLDFISH_H */ diff --git a/include/clocksource/timer-xilinx.h b/include/clocksource/timer-xilinx.h new file mode 100644 index 0000000000..c0f56fe6d2 --- /dev/null +++ b/include/clocksource/timer-xilinx.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2021 Sean Anderson + */ + +#ifndef XILINX_TIMER_H +#define XILINX_TIMER_H + +#include + +#define TCSR0 0x00 +#define TLR0 0x04 +#define TCR0 0x08 +#define TCSR1 0x10 +#define TLR1 0x14 +#define TCR1 0x18 + +#define TCSR_MDT BIT(0) +#define TCSR_UDT BIT(1) +#define TCSR_GENT BIT(2) +#define TCSR_CAPT BIT(3) +#define TCSR_ARHT BIT(4) +#define TCSR_LOAD BIT(5) +#define TCSR_ENIT BIT(6) +#define TCSR_ENT BIT(7) +#define TCSR_TINT BIT(8) +#define TCSR_PWMA BIT(9) +#define TCSR_ENALL BIT(10) +#define TCSR_CASC BIT(11) + +struct clk; +struct device_node; +struct regmap; + +/** + * struct xilinx_timer_priv - Private data for Xilinx AXI timer drivers + * @map: Regmap of the device, possibly with an offset + * @clk: Parent clock + * @max: Maximum value of the counters + */ +struct xilinx_timer_priv { + struct regmap *map; + struct clk *clk; + u32 max; +}; + +/** + * xilinx_timer_tlr_cycles() - Calculate the TLR for a period specified + * in clock cycles + * @priv: The timer's private data + * @tcsr: The value of the TCSR register for this counter + * @cycles: The number of cycles in this period + * + * Callers of this function MUST ensure that @cycles is representable as + * a TLR. + * + * Return: The calculated value for TLR + */ +u32 xilinx_timer_tlr_cycles(struct xilinx_timer_priv *priv, u32 tcsr, + u64 cycles); + +/** + * xilinx_timer_get_period() - Get the current period of a counter + * @priv: The timer's private data + * @tlr: The value of TLR for this counter + * @tcsr: The value of TCSR for this counter + * + * Return: The period, in ns + */ +unsigned int xilinx_timer_get_period(struct xilinx_timer_priv *priv, + u32 tlr, u32 tcsr); + +#endif /* XILINX_TIMER_H */ diff --git a/include/crypto/aria.h b/include/crypto/aria.h new file mode 100644 index 0000000000..4a86661788 --- /dev/null +++ b/include/crypto/aria.h @@ -0,0 +1,461 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic API. + * + * ARIA Cipher Algorithm. + * + * Documentation of ARIA can be found in RFC 5794. + * Copyright (c) 2022 Taehee Yoo + * Copyright (c) 2022 Taehee Yoo + * + * Information for ARIA + * http://210.104.33.10/ARIA/index-e.html (English) + * http://seed.kisa.or.kr/ (Korean) + * + * Public domain version is distributed above. + */ + +#ifndef _CRYPTO_ARIA_H +#define _CRYPTO_ARIA_H + +#include +#include +#include +#include +#include +#include + +#define ARIA_MIN_KEY_SIZE 16 +#define ARIA_MAX_KEY_SIZE 32 +#define ARIA_BLOCK_SIZE 16 +#define ARIA_MAX_RD_KEYS 17 +#define ARIA_RD_KEY_WORDS (ARIA_BLOCK_SIZE / sizeof(u32)) + +struct aria_ctx { + int key_length; + int rounds; + u32 enc_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS]; + u32 dec_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS]; +}; + +static const u32 key_rc[5][4] = { + { 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0 }, + { 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 }, + { 0xdb92371d, 0x2126e970, 0x03249775, 0x04e8c90e }, + { 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0 }, + { 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 } +}; + +static const u32 s1[256] = { + 0x00636363, 0x007c7c7c, 0x00777777, 0x007b7b7b, + 0x00f2f2f2, 0x006b6b6b, 0x006f6f6f, 0x00c5c5c5, + 0x00303030, 0x00010101, 0x00676767, 0x002b2b2b, + 0x00fefefe, 0x00d7d7d7, 0x00ababab, 0x00767676, + 0x00cacaca, 0x00828282, 0x00c9c9c9, 0x007d7d7d, + 0x00fafafa, 0x00595959, 0x00474747, 0x00f0f0f0, + 0x00adadad, 0x00d4d4d4, 0x00a2a2a2, 0x00afafaf, + 0x009c9c9c, 0x00a4a4a4, 0x00727272, 0x00c0c0c0, + 0x00b7b7b7, 0x00fdfdfd, 0x00939393, 0x00262626, + 0x00363636, 0x003f3f3f, 0x00f7f7f7, 0x00cccccc, + 0x00343434, 0x00a5a5a5, 0x00e5e5e5, 0x00f1f1f1, + 0x00717171, 0x00d8d8d8, 0x00313131, 0x00151515, + 0x00040404, 0x00c7c7c7, 0x00232323, 0x00c3c3c3, + 0x00181818, 0x00969696, 0x00050505, 0x009a9a9a, + 0x00070707, 0x00121212, 0x00808080, 0x00e2e2e2, + 0x00ebebeb, 0x00272727, 0x00b2b2b2, 0x00757575, + 0x00090909, 0x00838383, 0x002c2c2c, 0x001a1a1a, + 0x001b1b1b, 0x006e6e6e, 0x005a5a5a, 0x00a0a0a0, + 0x00525252, 0x003b3b3b, 0x00d6d6d6, 0x00b3b3b3, + 0x00292929, 0x00e3e3e3, 0x002f2f2f, 0x00848484, + 0x00535353, 0x00d1d1d1, 0x00000000, 0x00ededed, + 0x00202020, 0x00fcfcfc, 0x00b1b1b1, 0x005b5b5b, + 0x006a6a6a, 0x00cbcbcb, 0x00bebebe, 0x00393939, + 0x004a4a4a, 0x004c4c4c, 0x00585858, 0x00cfcfcf, + 0x00d0d0d0, 0x00efefef, 0x00aaaaaa, 0x00fbfbfb, + 0x00434343, 0x004d4d4d, 0x00333333, 0x00858585, + 0x00454545, 0x00f9f9f9, 0x00020202, 0x007f7f7f, + 0x00505050, 0x003c3c3c, 0x009f9f9f, 0x00a8a8a8, + 0x00515151, 0x00a3a3a3, 0x00404040, 0x008f8f8f, + 0x00929292, 0x009d9d9d, 0x00383838, 0x00f5f5f5, + 0x00bcbcbc, 0x00b6b6b6, 0x00dadada, 0x00212121, + 0x00101010, 0x00ffffff, 0x00f3f3f3, 0x00d2d2d2, + 0x00cdcdcd, 0x000c0c0c, 0x00131313, 0x00ececec, + 0x005f5f5f, 0x00979797, 0x00444444, 0x00171717, + 0x00c4c4c4, 0x00a7a7a7, 0x007e7e7e, 0x003d3d3d, + 0x00646464, 0x005d5d5d, 0x00191919, 0x00737373, + 0x00606060, 0x00818181, 0x004f4f4f, 0x00dcdcdc, + 0x00222222, 0x002a2a2a, 0x00909090, 0x00888888, + 0x00464646, 0x00eeeeee, 0x00b8b8b8, 0x00141414, + 0x00dedede, 0x005e5e5e, 0x000b0b0b, 0x00dbdbdb, + 0x00e0e0e0, 0x00323232, 0x003a3a3a, 0x000a0a0a, + 0x00494949, 0x00060606, 0x00242424, 0x005c5c5c, + 0x00c2c2c2, 0x00d3d3d3, 0x00acacac, 0x00626262, + 0x00919191, 0x00959595, 0x00e4e4e4, 0x00797979, + 0x00e7e7e7, 0x00c8c8c8, 0x00373737, 0x006d6d6d, + 0x008d8d8d, 0x00d5d5d5, 0x004e4e4e, 0x00a9a9a9, + 0x006c6c6c, 0x00565656, 0x00f4f4f4, 0x00eaeaea, + 0x00656565, 0x007a7a7a, 0x00aeaeae, 0x00080808, + 0x00bababa, 0x00787878, 0x00252525, 0x002e2e2e, + 0x001c1c1c, 0x00a6a6a6, 0x00b4b4b4, 0x00c6c6c6, + 0x00e8e8e8, 0x00dddddd, 0x00747474, 0x001f1f1f, + 0x004b4b4b, 0x00bdbdbd, 0x008b8b8b, 0x008a8a8a, + 0x00707070, 0x003e3e3e, 0x00b5b5b5, 0x00666666, + 0x00484848, 0x00030303, 0x00f6f6f6, 0x000e0e0e, + 0x00616161, 0x00353535, 0x00575757, 0x00b9b9b9, + 0x00868686, 0x00c1c1c1, 0x001d1d1d, 0x009e9e9e, + 0x00e1e1e1, 0x00f8f8f8, 0x00989898, 0x00111111, + 0x00696969, 0x00d9d9d9, 0x008e8e8e, 0x00949494, + 0x009b9b9b, 0x001e1e1e, 0x00878787, 0x00e9e9e9, + 0x00cecece, 0x00555555, 0x00282828, 0x00dfdfdf, + 0x008c8c8c, 0x00a1a1a1, 0x00898989, 0x000d0d0d, + 0x00bfbfbf, 0x00e6e6e6, 0x00424242, 0x00686868, + 0x00414141, 0x00999999, 0x002d2d2d, 0x000f0f0f, + 0x00b0b0b0, 0x00545454, 0x00bbbbbb, 0x00161616 +}; + +static const u32 s2[256] = { + 0xe200e2e2, 0x4e004e4e, 0x54005454, 0xfc00fcfc, + 0x94009494, 0xc200c2c2, 0x4a004a4a, 0xcc00cccc, + 0x62006262, 0x0d000d0d, 0x6a006a6a, 0x46004646, + 0x3c003c3c, 0x4d004d4d, 0x8b008b8b, 0xd100d1d1, + 0x5e005e5e, 0xfa00fafa, 0x64006464, 0xcb00cbcb, + 0xb400b4b4, 0x97009797, 0xbe00bebe, 0x2b002b2b, + 0xbc00bcbc, 0x77007777, 0x2e002e2e, 0x03000303, + 0xd300d3d3, 0x19001919, 0x59005959, 0xc100c1c1, + 0x1d001d1d, 0x06000606, 0x41004141, 0x6b006b6b, + 0x55005555, 0xf000f0f0, 0x99009999, 0x69006969, + 0xea00eaea, 0x9c009c9c, 0x18001818, 0xae00aeae, + 0x63006363, 0xdf00dfdf, 0xe700e7e7, 0xbb00bbbb, + 0x00000000, 0x73007373, 0x66006666, 0xfb00fbfb, + 0x96009696, 0x4c004c4c, 0x85008585, 0xe400e4e4, + 0x3a003a3a, 0x09000909, 0x45004545, 0xaa00aaaa, + 0x0f000f0f, 0xee00eeee, 0x10001010, 0xeb00ebeb, + 0x2d002d2d, 0x7f007f7f, 0xf400f4f4, 0x29002929, + 0xac00acac, 0xcf00cfcf, 0xad00adad, 0x91009191, + 0x8d008d8d, 0x78007878, 0xc800c8c8, 0x95009595, + 0xf900f9f9, 0x2f002f2f, 0xce00cece, 0xcd00cdcd, + 0x08000808, 0x7a007a7a, 0x88008888, 0x38003838, + 0x5c005c5c, 0x83008383, 0x2a002a2a, 0x28002828, + 0x47004747, 0xdb00dbdb, 0xb800b8b8, 0xc700c7c7, + 0x93009393, 0xa400a4a4, 0x12001212, 0x53005353, + 0xff00ffff, 0x87008787, 0x0e000e0e, 0x31003131, + 0x36003636, 0x21002121, 0x58005858, 0x48004848, + 0x01000101, 0x8e008e8e, 0x37003737, 0x74007474, + 0x32003232, 0xca00caca, 0xe900e9e9, 0xb100b1b1, + 0xb700b7b7, 0xab00abab, 0x0c000c0c, 0xd700d7d7, + 0xc400c4c4, 0x56005656, 0x42004242, 0x26002626, + 0x07000707, 0x98009898, 0x60006060, 0xd900d9d9, + 0xb600b6b6, 0xb900b9b9, 0x11001111, 0x40004040, + 0xec00ecec, 0x20002020, 0x8c008c8c, 0xbd00bdbd, + 0xa000a0a0, 0xc900c9c9, 0x84008484, 0x04000404, + 0x49004949, 0x23002323, 0xf100f1f1, 0x4f004f4f, + 0x50005050, 0x1f001f1f, 0x13001313, 0xdc00dcdc, + 0xd800d8d8, 0xc000c0c0, 0x9e009e9e, 0x57005757, + 0xe300e3e3, 0xc300c3c3, 0x7b007b7b, 0x65006565, + 0x3b003b3b, 0x02000202, 0x8f008f8f, 0x3e003e3e, + 0xe800e8e8, 0x25002525, 0x92009292, 0xe500e5e5, + 0x15001515, 0xdd00dddd, 0xfd00fdfd, 0x17001717, + 0xa900a9a9, 0xbf00bfbf, 0xd400d4d4, 0x9a009a9a, + 0x7e007e7e, 0xc500c5c5, 0x39003939, 0x67006767, + 0xfe00fefe, 0x76007676, 0x9d009d9d, 0x43004343, + 0xa700a7a7, 0xe100e1e1, 0xd000d0d0, 0xf500f5f5, + 0x68006868, 0xf200f2f2, 0x1b001b1b, 0x34003434, + 0x70007070, 0x05000505, 0xa300a3a3, 0x8a008a8a, + 0xd500d5d5, 0x79007979, 0x86008686, 0xa800a8a8, + 0x30003030, 0xc600c6c6, 0x51005151, 0x4b004b4b, + 0x1e001e1e, 0xa600a6a6, 0x27002727, 0xf600f6f6, + 0x35003535, 0xd200d2d2, 0x6e006e6e, 0x24002424, + 0x16001616, 0x82008282, 0x5f005f5f, 0xda00dada, + 0xe600e6e6, 0x75007575, 0xa200a2a2, 0xef00efef, + 0x2c002c2c, 0xb200b2b2, 0x1c001c1c, 0x9f009f9f, + 0x5d005d5d, 0x6f006f6f, 0x80008080, 0x0a000a0a, + 0x72007272, 0x44004444, 0x9b009b9b, 0x6c006c6c, + 0x90009090, 0x0b000b0b, 0x5b005b5b, 0x33003333, + 0x7d007d7d, 0x5a005a5a, 0x52005252, 0xf300f3f3, + 0x61006161, 0xa100a1a1, 0xf700f7f7, 0xb000b0b0, + 0xd600d6d6, 0x3f003f3f, 0x7c007c7c, 0x6d006d6d, + 0xed00eded, 0x14001414, 0xe000e0e0, 0xa500a5a5, + 0x3d003d3d, 0x22002222, 0xb300b3b3, 0xf800f8f8, + 0x89008989, 0xde00dede, 0x71007171, 0x1a001a1a, + 0xaf00afaf, 0xba00baba, 0xb500b5b5, 0x81008181 +}; + +static const u32 x1[256] = { + 0x52520052, 0x09090009, 0x6a6a006a, 0xd5d500d5, + 0x30300030, 0x36360036, 0xa5a500a5, 0x38380038, + 0xbfbf00bf, 0x40400040, 0xa3a300a3, 0x9e9e009e, + 0x81810081, 0xf3f300f3, 0xd7d700d7, 0xfbfb00fb, + 0x7c7c007c, 0xe3e300e3, 0x39390039, 0x82820082, + 0x9b9b009b, 0x2f2f002f, 0xffff00ff, 0x87870087, + 0x34340034, 0x8e8e008e, 0x43430043, 0x44440044, + 0xc4c400c4, 0xdede00de, 0xe9e900e9, 0xcbcb00cb, + 0x54540054, 0x7b7b007b, 0x94940094, 0x32320032, + 0xa6a600a6, 0xc2c200c2, 0x23230023, 0x3d3d003d, + 0xeeee00ee, 0x4c4c004c, 0x95950095, 0x0b0b000b, + 0x42420042, 0xfafa00fa, 0xc3c300c3, 0x4e4e004e, + 0x08080008, 0x2e2e002e, 0xa1a100a1, 0x66660066, + 0x28280028, 0xd9d900d9, 0x24240024, 0xb2b200b2, + 0x76760076, 0x5b5b005b, 0xa2a200a2, 0x49490049, + 0x6d6d006d, 0x8b8b008b, 0xd1d100d1, 0x25250025, + 0x72720072, 0xf8f800f8, 0xf6f600f6, 0x64640064, + 0x86860086, 0x68680068, 0x98980098, 0x16160016, + 0xd4d400d4, 0xa4a400a4, 0x5c5c005c, 0xcccc00cc, + 0x5d5d005d, 0x65650065, 0xb6b600b6, 0x92920092, + 0x6c6c006c, 0x70700070, 0x48480048, 0x50500050, + 0xfdfd00fd, 0xeded00ed, 0xb9b900b9, 0xdada00da, + 0x5e5e005e, 0x15150015, 0x46460046, 0x57570057, + 0xa7a700a7, 0x8d8d008d, 0x9d9d009d, 0x84840084, + 0x90900090, 0xd8d800d8, 0xabab00ab, 0x00000000, + 0x8c8c008c, 0xbcbc00bc, 0xd3d300d3, 0x0a0a000a, + 0xf7f700f7, 0xe4e400e4, 0x58580058, 0x05050005, + 0xb8b800b8, 0xb3b300b3, 0x45450045, 0x06060006, + 0xd0d000d0, 0x2c2c002c, 0x1e1e001e, 0x8f8f008f, + 0xcaca00ca, 0x3f3f003f, 0x0f0f000f, 0x02020002, + 0xc1c100c1, 0xafaf00af, 0xbdbd00bd, 0x03030003, + 0x01010001, 0x13130013, 0x8a8a008a, 0x6b6b006b, + 0x3a3a003a, 0x91910091, 0x11110011, 0x41410041, + 0x4f4f004f, 0x67670067, 0xdcdc00dc, 0xeaea00ea, + 0x97970097, 0xf2f200f2, 0xcfcf00cf, 0xcece00ce, + 0xf0f000f0, 0xb4b400b4, 0xe6e600e6, 0x73730073, + 0x96960096, 0xacac00ac, 0x74740074, 0x22220022, + 0xe7e700e7, 0xadad00ad, 0x35350035, 0x85850085, + 0xe2e200e2, 0xf9f900f9, 0x37370037, 0xe8e800e8, + 0x1c1c001c, 0x75750075, 0xdfdf00df, 0x6e6e006e, + 0x47470047, 0xf1f100f1, 0x1a1a001a, 0x71710071, + 0x1d1d001d, 0x29290029, 0xc5c500c5, 0x89890089, + 0x6f6f006f, 0xb7b700b7, 0x62620062, 0x0e0e000e, + 0xaaaa00aa, 0x18180018, 0xbebe00be, 0x1b1b001b, + 0xfcfc00fc, 0x56560056, 0x3e3e003e, 0x4b4b004b, + 0xc6c600c6, 0xd2d200d2, 0x79790079, 0x20200020, + 0x9a9a009a, 0xdbdb00db, 0xc0c000c0, 0xfefe00fe, + 0x78780078, 0xcdcd00cd, 0x5a5a005a, 0xf4f400f4, + 0x1f1f001f, 0xdddd00dd, 0xa8a800a8, 0x33330033, + 0x88880088, 0x07070007, 0xc7c700c7, 0x31310031, + 0xb1b100b1, 0x12120012, 0x10100010, 0x59590059, + 0x27270027, 0x80800080, 0xecec00ec, 0x5f5f005f, + 0x60600060, 0x51510051, 0x7f7f007f, 0xa9a900a9, + 0x19190019, 0xb5b500b5, 0x4a4a004a, 0x0d0d000d, + 0x2d2d002d, 0xe5e500e5, 0x7a7a007a, 0x9f9f009f, + 0x93930093, 0xc9c900c9, 0x9c9c009c, 0xefef00ef, + 0xa0a000a0, 0xe0e000e0, 0x3b3b003b, 0x4d4d004d, + 0xaeae00ae, 0x2a2a002a, 0xf5f500f5, 0xb0b000b0, + 0xc8c800c8, 0xebeb00eb, 0xbbbb00bb, 0x3c3c003c, + 0x83830083, 0x53530053, 0x99990099, 0x61610061, + 0x17170017, 0x2b2b002b, 0x04040004, 0x7e7e007e, + 0xbaba00ba, 0x77770077, 0xd6d600d6, 0x26260026, + 0xe1e100e1, 0x69690069, 0x14140014, 0x63630063, + 0x55550055, 0x21210021, 0x0c0c000c, 0x7d7d007d +}; + +static const u32 x2[256] = { + 0x30303000, 0x68686800, 0x99999900, 0x1b1b1b00, + 0x87878700, 0xb9b9b900, 0x21212100, 0x78787800, + 0x50505000, 0x39393900, 0xdbdbdb00, 0xe1e1e100, + 0x72727200, 0x09090900, 0x62626200, 0x3c3c3c00, + 0x3e3e3e00, 0x7e7e7e00, 0x5e5e5e00, 0x8e8e8e00, + 0xf1f1f100, 0xa0a0a000, 0xcccccc00, 0xa3a3a300, + 0x2a2a2a00, 0x1d1d1d00, 0xfbfbfb00, 0xb6b6b600, + 0xd6d6d600, 0x20202000, 0xc4c4c400, 0x8d8d8d00, + 0x81818100, 0x65656500, 0xf5f5f500, 0x89898900, + 0xcbcbcb00, 0x9d9d9d00, 0x77777700, 0xc6c6c600, + 0x57575700, 0x43434300, 0x56565600, 0x17171700, + 0xd4d4d400, 0x40404000, 0x1a1a1a00, 0x4d4d4d00, + 0xc0c0c000, 0x63636300, 0x6c6c6c00, 0xe3e3e300, + 0xb7b7b700, 0xc8c8c800, 0x64646400, 0x6a6a6a00, + 0x53535300, 0xaaaaaa00, 0x38383800, 0x98989800, + 0x0c0c0c00, 0xf4f4f400, 0x9b9b9b00, 0xededed00, + 0x7f7f7f00, 0x22222200, 0x76767600, 0xafafaf00, + 0xdddddd00, 0x3a3a3a00, 0x0b0b0b00, 0x58585800, + 0x67676700, 0x88888800, 0x06060600, 0xc3c3c300, + 0x35353500, 0x0d0d0d00, 0x01010100, 0x8b8b8b00, + 0x8c8c8c00, 0xc2c2c200, 0xe6e6e600, 0x5f5f5f00, + 0x02020200, 0x24242400, 0x75757500, 0x93939300, + 0x66666600, 0x1e1e1e00, 0xe5e5e500, 0xe2e2e200, + 0x54545400, 0xd8d8d800, 0x10101000, 0xcecece00, + 0x7a7a7a00, 0xe8e8e800, 0x08080800, 0x2c2c2c00, + 0x12121200, 0x97979700, 0x32323200, 0xababab00, + 0xb4b4b400, 0x27272700, 0x0a0a0a00, 0x23232300, + 0xdfdfdf00, 0xefefef00, 0xcacaca00, 0xd9d9d900, + 0xb8b8b800, 0xfafafa00, 0xdcdcdc00, 0x31313100, + 0x6b6b6b00, 0xd1d1d100, 0xadadad00, 0x19191900, + 0x49494900, 0xbdbdbd00, 0x51515100, 0x96969600, + 0xeeeeee00, 0xe4e4e400, 0xa8a8a800, 0x41414100, + 0xdadada00, 0xffffff00, 0xcdcdcd00, 0x55555500, + 0x86868600, 0x36363600, 0xbebebe00, 0x61616100, + 0x52525200, 0xf8f8f800, 0xbbbbbb00, 0x0e0e0e00, + 0x82828200, 0x48484800, 0x69696900, 0x9a9a9a00, + 0xe0e0e000, 0x47474700, 0x9e9e9e00, 0x5c5c5c00, + 0x04040400, 0x4b4b4b00, 0x34343400, 0x15151500, + 0x79797900, 0x26262600, 0xa7a7a700, 0xdedede00, + 0x29292900, 0xaeaeae00, 0x92929200, 0xd7d7d700, + 0x84848400, 0xe9e9e900, 0xd2d2d200, 0xbababa00, + 0x5d5d5d00, 0xf3f3f300, 0xc5c5c500, 0xb0b0b000, + 0xbfbfbf00, 0xa4a4a400, 0x3b3b3b00, 0x71717100, + 0x44444400, 0x46464600, 0x2b2b2b00, 0xfcfcfc00, + 0xebebeb00, 0x6f6f6f00, 0xd5d5d500, 0xf6f6f600, + 0x14141400, 0xfefefe00, 0x7c7c7c00, 0x70707000, + 0x5a5a5a00, 0x7d7d7d00, 0xfdfdfd00, 0x2f2f2f00, + 0x18181800, 0x83838300, 0x16161600, 0xa5a5a500, + 0x91919100, 0x1f1f1f00, 0x05050500, 0x95959500, + 0x74747400, 0xa9a9a900, 0xc1c1c100, 0x5b5b5b00, + 0x4a4a4a00, 0x85858500, 0x6d6d6d00, 0x13131300, + 0x07070700, 0x4f4f4f00, 0x4e4e4e00, 0x45454500, + 0xb2b2b200, 0x0f0f0f00, 0xc9c9c900, 0x1c1c1c00, + 0xa6a6a600, 0xbcbcbc00, 0xececec00, 0x73737300, + 0x90909000, 0x7b7b7b00, 0xcfcfcf00, 0x59595900, + 0x8f8f8f00, 0xa1a1a100, 0xf9f9f900, 0x2d2d2d00, + 0xf2f2f200, 0xb1b1b100, 0x00000000, 0x94949400, + 0x37373700, 0x9f9f9f00, 0xd0d0d000, 0x2e2e2e00, + 0x9c9c9c00, 0x6e6e6e00, 0x28282800, 0x3f3f3f00, + 0x80808000, 0xf0f0f000, 0x3d3d3d00, 0xd3d3d300, + 0x25252500, 0x8a8a8a00, 0xb5b5b500, 0xe7e7e700, + 0x42424200, 0xb3b3b300, 0xc7c7c700, 0xeaeaea00, + 0xf7f7f700, 0x4c4c4c00, 0x11111100, 0x33333300, + 0x03030300, 0xa2a2a200, 0xacacac00, 0x60606000 +}; + +static inline u32 rotl32(u32 v, u32 r) +{ + return ((v << r) | (v >> (32 - r))); +} + +static inline u32 rotr32(u32 v, u32 r) +{ + return ((v >> r) | (v << (32 - r))); +} + +static inline u32 bswap32(u32 v) +{ + return ((v << 24) ^ + (v >> 24) ^ + ((v & 0x0000ff00) << 8) ^ + ((v & 0x00ff0000) >> 8)); +} + +static inline u8 get_u8(u32 x, u32 y) +{ + return (x >> ((3 - y) * 8)); +} + +static inline u32 make_u32(u8 v0, u8 v1, u8 v2, u8 v3) +{ + return ((u32)v0 << 24) | ((u32)v1 << 16) | ((u32)v2 << 8) | ((u32)v3); +} + +static inline u32 aria_m(u32 t0) +{ + return rotr32(t0, 8) ^ rotr32(t0 ^ rotr32(t0, 8), 16); +} + +/* S-Box Layer 1 + M */ +static inline void aria_sbox_layer1_with_pre_diff(u32 *t0, u32 *t1, u32 *t2, + u32 *t3) +{ + *t0 = s1[get_u8(*t0, 0)] ^ + s2[get_u8(*t0, 1)] ^ + x1[get_u8(*t0, 2)] ^ + x2[get_u8(*t0, 3)]; + *t1 = s1[get_u8(*t1, 0)] ^ + s2[get_u8(*t1, 1)] ^ + x1[get_u8(*t1, 2)] ^ + x2[get_u8(*t1, 3)]; + *t2 = s1[get_u8(*t2, 0)] ^ + s2[get_u8(*t2, 1)] ^ + x1[get_u8(*t2, 2)] ^ + x2[get_u8(*t2, 3)]; + *t3 = s1[get_u8(*t3, 0)] ^ + s2[get_u8(*t3, 1)] ^ + x1[get_u8(*t3, 2)] ^ + x2[get_u8(*t3, 3)]; +} + +/* S-Box Layer 2 + M */ +static inline void aria_sbox_layer2_with_pre_diff(u32 *t0, u32 *t1, u32 *t2, + u32 *t3) +{ + *t0 = x1[get_u8(*t0, 0)] ^ + x2[get_u8(*t0, 1)] ^ + s1[get_u8(*t0, 2)] ^ + s2[get_u8(*t0, 3)]; + *t1 = x1[get_u8(*t1, 0)] ^ + x2[get_u8(*t1, 1)] ^ + s1[get_u8(*t1, 2)] ^ + s2[get_u8(*t1, 3)]; + *t2 = x1[get_u8(*t2, 0)] ^ + x2[get_u8(*t2, 1)] ^ + s1[get_u8(*t2, 2)] ^ + s2[get_u8(*t2, 3)]; + *t3 = x1[get_u8(*t3, 0)] ^ + x2[get_u8(*t3, 1)] ^ + s1[get_u8(*t3, 2)] ^ + s2[get_u8(*t3, 3)]; +} + +/* Word-level diffusion */ +static inline void aria_diff_word(u32 *t0, u32 *t1, u32 *t2, u32 *t3) +{ + *t1 ^= *t2; + *t2 ^= *t3; + *t0 ^= *t1; + + *t3 ^= *t1; + *t2 ^= *t0; + *t1 ^= *t2; +} + +/* Byte-level diffusion */ +static inline void aria_diff_byte(u32 *t1, u32 *t2, u32 *t3) +{ + *t1 = ((*t1 << 8) & 0xff00ff00) ^ ((*t1 >> 8) & 0x00ff00ff); + *t2 = rotr32(*t2, 16); + *t3 = bswap32(*t3); +} + +/* Key XOR Layer */ +static inline void aria_add_round_key(u32 *rk, u32 *t0, u32 *t1, u32 *t2, + u32 *t3) +{ + *t0 ^= rk[0]; + *t1 ^= rk[1]; + *t2 ^= rk[2]; + *t3 ^= rk[3]; +} +/* Odd round Substitution & Diffusion */ +static inline void aria_subst_diff_odd(u32 *t0, u32 *t1, u32 *t2, u32 *t3) +{ + aria_sbox_layer1_with_pre_diff(t0, t1, t2, t3); + aria_diff_word(t0, t1, t2, t3); + aria_diff_byte(t1, t2, t3); + aria_diff_word(t0, t1, t2, t3); +} + +/* Even round Substitution & Diffusion */ +static inline void aria_subst_diff_even(u32 *t0, u32 *t1, u32 *t2, u32 *t3) +{ + aria_sbox_layer2_with_pre_diff(t0, t1, t2, t3); + aria_diff_word(t0, t1, t2, t3); + aria_diff_byte(t3, t0, t1); + aria_diff_word(t0, t1, t2, t3); +} + +/* Q, R Macro expanded ARIA GSRK */ +static inline void aria_gsrk(u32 *rk, u32 *x, u32 *y, u32 n) +{ + int q = 4 - (n / 32); + int r = n % 32; + + rk[0] = (x[0]) ^ + ((y[q % 4]) >> r) ^ + ((y[(q + 3) % 4]) << (32 - r)); + rk[1] = (x[1]) ^ + ((y[(q + 1) % 4]) >> r) ^ + ((y[q % 4]) << (32 - r)); + rk[2] = (x[2]) ^ + ((y[(q + 2) % 4]) >> r) ^ + ((y[(q + 1) % 4]) << (32 - r)); + rk[3] = (x[3]) ^ + ((y[(q + 3) % 4]) >> r) ^ + ((y[(q + 2) % 4]) << (32 - r)); +} + +#endif diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h new file mode 100644 index 0000000000..1d630f371f --- /dev/null +++ b/include/crypto/polyval.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the Polyval hash algorithm + * + * Copyright 2021 Google LLC + */ + +#ifndef _CRYPTO_POLYVAL_H +#define _CRYPTO_POLYVAL_H + +#include +#include + +#define POLYVAL_BLOCK_SIZE 16 +#define POLYVAL_DIGEST_SIZE 16 + +void polyval_mul_non4k(u8 *op1, const u8 *op2); + +void polyval_update_non4k(const u8 *key, const u8 *in, + size_t nblocks, u8 *accumulator); + +#endif diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h new file mode 100644 index 0000000000..9e3aff7e68 --- /dev/null +++ b/include/drm/display/drm_dp.h @@ -0,0 +1,1690 @@ +/* + * Copyright © 2008 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef _DRM_DP_H_ +#define _DRM_DP_H_ + +#include + +/* + * Unless otherwise noted, all values are from the DP 1.1a spec. Note that + * DP and DPCD versions are independent. Differences from 1.0 are not noted, + * 1.0 devices basically don't exist in the wild. + * + * Abbreviations, in chronological order: + * + * eDP: Embedded DisplayPort version 1 + * DPI: DisplayPort Interoperability Guideline v1.1a + * 1.2: DisplayPort 1.2 + * MST: Multistream Transport - part of DP 1.2a + * + * 1.2 formally includes both eDP and DPI definitions. + */ + +/* MSA (Main Stream Attribute) MISC bits (as MISC1<<8|MISC0) */ +#define DP_MSA_MISC_SYNC_CLOCK (1 << 0) +#define DP_MSA_MISC_INTERLACE_VTOTAL_EVEN (1 << 8) +#define DP_MSA_MISC_STEREO_NO_3D (0 << 9) +#define DP_MSA_MISC_STEREO_PROG_RIGHT_EYE (1 << 9) +#define DP_MSA_MISC_STEREO_PROG_LEFT_EYE (3 << 9) +/* bits per component for non-RAW */ +#define DP_MSA_MISC_6_BPC (0 << 5) +#define DP_MSA_MISC_8_BPC (1 << 5) +#define DP_MSA_MISC_10_BPC (2 << 5) +#define DP_MSA_MISC_12_BPC (3 << 5) +#define DP_MSA_MISC_16_BPC (4 << 5) +/* bits per component for RAW */ +#define DP_MSA_MISC_RAW_6_BPC (1 << 5) +#define DP_MSA_MISC_RAW_7_BPC (2 << 5) +#define DP_MSA_MISC_RAW_8_BPC (3 << 5) +#define DP_MSA_MISC_RAW_10_BPC (4 << 5) +#define DP_MSA_MISC_RAW_12_BPC (5 << 5) +#define DP_MSA_MISC_RAW_14_BPC (6 << 5) +#define DP_MSA_MISC_RAW_16_BPC (7 << 5) +/* pixel encoding/colorimetry format */ +#define _DP_MSA_MISC_COLOR(misc1_7, misc0_21, misc0_3, misc0_4) \ + ((misc1_7) << 15 | (misc0_4) << 4 | (misc0_3) << 3 | ((misc0_21) << 1)) +#define DP_MSA_MISC_COLOR_RGB _DP_MSA_MISC_COLOR(0, 0, 0, 0) +#define DP_MSA_MISC_COLOR_CEA_RGB _DP_MSA_MISC_COLOR(0, 0, 1, 0) +#define DP_MSA_MISC_COLOR_RGB_WIDE_FIXED _DP_MSA_MISC_COLOR(0, 3, 0, 0) +#define DP_MSA_MISC_COLOR_RGB_WIDE_FLOAT _DP_MSA_MISC_COLOR(0, 3, 0, 1) +#define DP_MSA_MISC_COLOR_Y_ONLY _DP_MSA_MISC_COLOR(1, 0, 0, 0) +#define DP_MSA_MISC_COLOR_RAW _DP_MSA_MISC_COLOR(1, 1, 0, 0) +#define DP_MSA_MISC_COLOR_YCBCR_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 1, 0) +#define DP_MSA_MISC_COLOR_YCBCR_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 1, 1) +#define DP_MSA_MISC_COLOR_YCBCR_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 1, 0) +#define DP_MSA_MISC_COLOR_YCBCR_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 1, 1) +#define DP_MSA_MISC_COLOR_XVYCC_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 0, 0) +#define DP_MSA_MISC_COLOR_XVYCC_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 0, 1) +#define DP_MSA_MISC_COLOR_XVYCC_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 0, 0) +#define DP_MSA_MISC_COLOR_XVYCC_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 0, 1) +#define DP_MSA_MISC_COLOR_OPRGB _DP_MSA_MISC_COLOR(0, 0, 1, 1) +#define DP_MSA_MISC_COLOR_DCI_P3 _DP_MSA_MISC_COLOR(0, 3, 1, 0) +#define DP_MSA_MISC_COLOR_COLOR_PROFILE _DP_MSA_MISC_COLOR(0, 3, 1, 1) +#define DP_MSA_MISC_COLOR_VSC_SDP (1 << 14) + +#define DP_AUX_MAX_PAYLOAD_BYTES 16 + +#define DP_AUX_I2C_WRITE 0x0 +#define DP_AUX_I2C_READ 0x1 +#define DP_AUX_I2C_WRITE_STATUS_UPDATE 0x2 +#define DP_AUX_I2C_MOT 0x4 +#define DP_AUX_NATIVE_WRITE 0x8 +#define DP_AUX_NATIVE_READ 0x9 + +#define DP_AUX_NATIVE_REPLY_ACK (0x0 << 0) +#define DP_AUX_NATIVE_REPLY_NACK (0x1 << 0) +#define DP_AUX_NATIVE_REPLY_DEFER (0x2 << 0) +#define DP_AUX_NATIVE_REPLY_MASK (0x3 << 0) + +#define DP_AUX_I2C_REPLY_ACK (0x0 << 2) +#define DP_AUX_I2C_REPLY_NACK (0x1 << 2) +#define DP_AUX_I2C_REPLY_DEFER (0x2 << 2) +#define DP_AUX_I2C_REPLY_MASK (0x3 << 2) + +/* DPCD Field Address Mapping */ + +/* Receiver Capability */ +#define DP_DPCD_REV 0x000 +# define DP_DPCD_REV_10 0x10 +# define DP_DPCD_REV_11 0x11 +# define DP_DPCD_REV_12 0x12 +# define DP_DPCD_REV_13 0x13 +# define DP_DPCD_REV_14 0x14 + +#define DP_MAX_LINK_RATE 0x001 + +#define DP_MAX_LANE_COUNT 0x002 +# define DP_MAX_LANE_COUNT_MASK 0x1f +# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */ +# define DP_ENHANCED_FRAME_CAP (1 << 7) + +#define DP_MAX_DOWNSPREAD 0x003 +# define DP_MAX_DOWNSPREAD_0_5 (1 << 0) +# define DP_STREAM_REGENERATION_STATUS_CAP (1 << 1) /* 2.0 */ +# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6) +# define DP_TPS4_SUPPORTED (1 << 7) + +#define DP_NORP 0x004 + +#define DP_DOWNSTREAMPORT_PRESENT 0x005 +# define DP_DWN_STRM_PORT_PRESENT (1 << 0) +# define DP_DWN_STRM_PORT_TYPE_MASK 0x06 +# define DP_DWN_STRM_PORT_TYPE_DP (0 << 1) +# define DP_DWN_STRM_PORT_TYPE_ANALOG (1 << 1) +# define DP_DWN_STRM_PORT_TYPE_TMDS (2 << 1) +# define DP_DWN_STRM_PORT_TYPE_OTHER (3 << 1) +# define DP_FORMAT_CONVERSION (1 << 3) +# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */ + +#define DP_MAIN_LINK_CHANNEL_CODING 0x006 +# define DP_CAP_ANSI_8B10B (1 << 0) +# define DP_CAP_ANSI_128B132B (1 << 1) /* 2.0 */ + +#define DP_DOWN_STREAM_PORT_COUNT 0x007 +# define DP_PORT_COUNT_MASK 0x0f +# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */ +# define DP_OUI_SUPPORT (1 << 7) + +#define DP_RECEIVE_PORT_0_CAP_0 0x008 +# define DP_LOCAL_EDID_PRESENT (1 << 1) +# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2) + +#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009 + +#define DP_RECEIVE_PORT_1_CAP_0 0x00a +#define DP_RECEIVE_PORT_1_BUFFER_SIZE 0x00b + +#define DP_I2C_SPEED_CAP 0x00c /* DPI */ +# define DP_I2C_SPEED_1K 0x01 +# define DP_I2C_SPEED_5K 0x02 +# define DP_I2C_SPEED_10K 0x04 +# define DP_I2C_SPEED_100K 0x08 +# define DP_I2C_SPEED_400K 0x10 +# define DP_I2C_SPEED_1M 0x20 + +#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */ +# define DP_ALTERNATE_SCRAMBLER_RESET_CAP (1 << 0) +# define DP_FRAMING_CHANGE_CAP (1 << 1) +# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */ + +#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ +# define DP_TRAINING_AUX_RD_MASK 0x7F /* DP 1.3 */ +# define DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT (1 << 7) /* DP 1.3 */ + +#define DP_ADAPTER_CAP 0x00f /* 1.2 */ +# define DP_FORCE_LOAD_SENSE_CAP (1 << 0) +# define DP_ALTERNATE_I2C_PATTERN_CAP (1 << 1) + +#define DP_SUPPORTED_LINK_RATES 0x010 /* eDP 1.4 */ +# define DP_MAX_SUPPORTED_RATES 8 /* 16-bit little-endian */ + +/* Multiple stream transport */ +#define DP_FAUX_CAP 0x020 /* 1.2 */ +# define DP_FAUX_CAP_1 (1 << 0) + +#define DP_SINK_VIDEO_FALLBACK_FORMATS 0x020 /* 2.0 */ +# define DP_FALLBACK_1024x768_60HZ_24BPP (1 << 0) +# define DP_FALLBACK_1280x720_60HZ_24BPP (1 << 1) +# define DP_FALLBACK_1920x1080_60HZ_24BPP (1 << 2) + +#define DP_MSTM_CAP 0x021 /* 1.2 */ +# define DP_MST_CAP (1 << 0) +# define DP_SINGLE_STREAM_SIDEBAND_MSG (1 << 1) /* 2.0 */ + +#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */ + +/* AV_SYNC_DATA_BLOCK 1.2 */ +#define DP_AV_GRANULARITY 0x023 +# define DP_AG_FACTOR_MASK (0xf << 0) +# define DP_AG_FACTOR_3MS (0 << 0) +# define DP_AG_FACTOR_2MS (1 << 0) +# define DP_AG_FACTOR_1MS (2 << 0) +# define DP_AG_FACTOR_500US (3 << 0) +# define DP_AG_FACTOR_200US (4 << 0) +# define DP_AG_FACTOR_100US (5 << 0) +# define DP_AG_FACTOR_10US (6 << 0) +# define DP_AG_FACTOR_1US (7 << 0) +# define DP_VG_FACTOR_MASK (0xf << 4) +# define DP_VG_FACTOR_3MS (0 << 4) +# define DP_VG_FACTOR_2MS (1 << 4) +# define DP_VG_FACTOR_1MS (2 << 4) +# define DP_VG_FACTOR_500US (3 << 4) +# define DP_VG_FACTOR_200US (4 << 4) +# define DP_VG_FACTOR_100US (5 << 4) + +#define DP_AUD_DEC_LAT0 0x024 +#define DP_AUD_DEC_LAT1 0x025 + +#define DP_AUD_PP_LAT0 0x026 +#define DP_AUD_PP_LAT1 0x027 + +#define DP_VID_INTER_LAT 0x028 + +#define DP_VID_PROG_LAT 0x029 + +#define DP_REP_LAT 0x02a + +#define DP_AUD_DEL_INS0 0x02b +#define DP_AUD_DEL_INS1 0x02c +#define DP_AUD_DEL_INS2 0x02d +/* End of AV_SYNC_DATA_BLOCK */ + +#define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */ +# define DP_ALPM_CAP (1 << 0) + +#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */ +# define DP_AUX_FRAME_SYNC_CAP (1 << 0) + +#define DP_GUID 0x030 /* 1.2 */ + +#define DP_DSC_SUPPORT 0x060 /* DP 1.4 */ +# define DP_DSC_DECOMPRESSION_IS_SUPPORTED (1 << 0) + +#define DP_DSC_REV 0x061 +# define DP_DSC_MAJOR_MASK (0xf << 0) +# define DP_DSC_MINOR_MASK (0xf << 4) +# define DP_DSC_MAJOR_SHIFT 0 +# define DP_DSC_MINOR_SHIFT 4 + +#define DP_DSC_RC_BUF_BLK_SIZE 0x062 +# define DP_DSC_RC_BUF_BLK_SIZE_1 0x0 +# define DP_DSC_RC_BUF_BLK_SIZE_4 0x1 +# define DP_DSC_RC_BUF_BLK_SIZE_16 0x2 +# define DP_DSC_RC_BUF_BLK_SIZE_64 0x3 + +#define DP_DSC_RC_BUF_SIZE 0x063 + +#define DP_DSC_SLICE_CAP_1 0x064 +# define DP_DSC_1_PER_DP_DSC_SINK (1 << 0) +# define DP_DSC_2_PER_DP_DSC_SINK (1 << 1) +# define DP_DSC_4_PER_DP_DSC_SINK (1 << 3) +# define DP_DSC_6_PER_DP_DSC_SINK (1 << 4) +# define DP_DSC_8_PER_DP_DSC_SINK (1 << 5) +# define DP_DSC_10_PER_DP_DSC_SINK (1 << 6) +# define DP_DSC_12_PER_DP_DSC_SINK (1 << 7) + +#define DP_DSC_LINE_BUF_BIT_DEPTH 0x065 +# define DP_DSC_LINE_BUF_BIT_DEPTH_MASK (0xf << 0) +# define DP_DSC_LINE_BUF_BIT_DEPTH_9 0x0 +# define DP_DSC_LINE_BUF_BIT_DEPTH_10 0x1 +# define DP_DSC_LINE_BUF_BIT_DEPTH_11 0x2 +# define DP_DSC_LINE_BUF_BIT_DEPTH_12 0x3 +# define DP_DSC_LINE_BUF_BIT_DEPTH_13 0x4 +# define DP_DSC_LINE_BUF_BIT_DEPTH_14 0x5 +# define DP_DSC_LINE_BUF_BIT_DEPTH_15 0x6 +# define DP_DSC_LINE_BUF_BIT_DEPTH_16 0x7 +# define DP_DSC_LINE_BUF_BIT_DEPTH_8 0x8 + +#define DP_DSC_BLK_PREDICTION_SUPPORT 0x066 +# define DP_DSC_BLK_PREDICTION_IS_SUPPORTED (1 << 0) + +#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */ + +#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */ +# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0) +# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8 + +#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069 +# define DP_DSC_RGB (1 << 0) +# define DP_DSC_YCbCr444 (1 << 1) +# define DP_DSC_YCbCr422_Simple (1 << 2) +# define DP_DSC_YCbCr422_Native (1 << 3) +# define DP_DSC_YCbCr420_Native (1 << 4) + +#define DP_DSC_DEC_COLOR_DEPTH_CAP 0x06A +# define DP_DSC_8_BPC (1 << 1) +# define DP_DSC_10_BPC (1 << 2) +# define DP_DSC_12_BPC (1 << 3) + +#define DP_DSC_PEAK_THROUGHPUT 0x06B +# define DP_DSC_THROUGHPUT_MODE_0_MASK (0xf << 0) +# define DP_DSC_THROUGHPUT_MODE_0_SHIFT 0 +# define DP_DSC_THROUGHPUT_MODE_0_UNSUPPORTED 0 +# define DP_DSC_THROUGHPUT_MODE_0_340 (1 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_400 (2 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_450 (3 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_500 (4 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_550 (5 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_600 (6 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_650 (7 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_700 (8 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_750 (9 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_800 (10 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_850 (11 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_900 (12 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_950 (13 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_1000 (14 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_170 (15 << 0) /* 1.4a */ +# define DP_DSC_THROUGHPUT_MODE_1_MASK (0xf << 4) +# define DP_DSC_THROUGHPUT_MODE_1_SHIFT 4 +# define DP_DSC_THROUGHPUT_MODE_1_UNSUPPORTED 0 +# define DP_DSC_THROUGHPUT_MODE_1_340 (1 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_400 (2 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_450 (3 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_500 (4 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_550 (5 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_600 (6 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_650 (7 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_700 (8 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_750 (9 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_800 (10 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_850 (11 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_900 (12 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_950 (13 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_170 (15 << 4) + +#define DP_DSC_MAX_SLICE_WIDTH 0x06C +#define DP_DSC_MIN_SLICE_WIDTH_VALUE 2560 +#define DP_DSC_SLICE_WIDTH_MULTIPLIER 320 + +#define DP_DSC_SLICE_CAP_2 0x06D +# define DP_DSC_16_PER_DP_DSC_SINK (1 << 0) +# define DP_DSC_20_PER_DP_DSC_SINK (1 << 1) +# define DP_DSC_24_PER_DP_DSC_SINK (1 << 2) + +#define DP_DSC_BITS_PER_PIXEL_INC 0x06F +# define DP_DSC_BITS_PER_PIXEL_1_16 0x0 +# define DP_DSC_BITS_PER_PIXEL_1_8 0x1 +# define DP_DSC_BITS_PER_PIXEL_1_4 0x2 +# define DP_DSC_BITS_PER_PIXEL_1_2 0x3 +# define DP_DSC_BITS_PER_PIXEL_1 0x4 + +#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */ +# define DP_PSR_IS_SUPPORTED 1 +# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */ +# define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */ +# define DP_PSR2_WITH_Y_COORD_ET_SUPPORTED 4 /* eDP 1.5, adopted eDP 1.4b SCR */ + +#define DP_PSR_CAPS 0x071 /* XXX 1.2? */ +# define DP_PSR_NO_TRAIN_ON_EXIT 1 +# define DP_PSR_SETUP_TIME_330 (0 << 1) +# define DP_PSR_SETUP_TIME_275 (1 << 1) +# define DP_PSR_SETUP_TIME_220 (2 << 1) +# define DP_PSR_SETUP_TIME_165 (3 << 1) +# define DP_PSR_SETUP_TIME_110 (4 << 1) +# define DP_PSR_SETUP_TIME_55 (5 << 1) +# define DP_PSR_SETUP_TIME_0 (6 << 1) +# define DP_PSR_SETUP_TIME_MASK (7 << 1) +# define DP_PSR_SETUP_TIME_SHIFT 1 +# define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */ +# define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */ +# define DP_PSR2_SU_AUX_FRAME_SYNC_NOT_NEEDED (1 << 6)/* eDP 1.5, adopted eDP 1.4b SCR */ + +#define DP_PSR2_SU_X_GRANULARITY 0x072 /* eDP 1.4b */ +#define DP_PSR2_SU_Y_GRANULARITY 0x074 /* eDP 1.4b */ + +/* + * 0x80-0x8f describe downstream port capabilities, but there are two layouts + * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not, + * each port's descriptor is one byte wide. If it was set, each port's is + * four bytes wide, starting with the one byte from the base info. As of + * DP interop v1.1a only VGA defines additional detail. + */ + +/* offset 0 */ +#define DP_DOWNSTREAM_PORT_0 0x80 +# define DP_DS_PORT_TYPE_MASK (7 << 0) +# define DP_DS_PORT_TYPE_DP 0 +# define DP_DS_PORT_TYPE_VGA 1 +# define DP_DS_PORT_TYPE_DVI 2 +# define DP_DS_PORT_TYPE_HDMI 3 +# define DP_DS_PORT_TYPE_NON_EDID 4 +# define DP_DS_PORT_TYPE_DP_DUALMODE 5 +# define DP_DS_PORT_TYPE_WIRELESS 6 +# define DP_DS_PORT_HPD (1 << 3) +# define DP_DS_NON_EDID_MASK (0xf << 4) +# define DP_DS_NON_EDID_720x480i_60 (1 << 4) +# define DP_DS_NON_EDID_720x480i_50 (2 << 4) +# define DP_DS_NON_EDID_1920x1080i_60 (3 << 4) +# define DP_DS_NON_EDID_1920x1080i_50 (4 << 4) +# define DP_DS_NON_EDID_1280x720_60 (5 << 4) +# define DP_DS_NON_EDID_1280x720_50 (7 << 4) +/* offset 1 for VGA is maximum megapixels per second / 8 */ +/* offset 1 for DVI/HDMI is maximum TMDS clock in Mbps / 2.5 */ +/* offset 2 for VGA/DVI/HDMI */ +# define DP_DS_MAX_BPC_MASK (3 << 0) +# define DP_DS_8BPC 0 +# define DP_DS_10BPC 1 +# define DP_DS_12BPC 2 +# define DP_DS_16BPC 3 +/* HDMI2.1 PCON FRL CONFIGURATION */ +# define DP_PCON_MAX_FRL_BW (7 << 2) +# define DP_PCON_MAX_0GBPS (0 << 2) +# define DP_PCON_MAX_9GBPS (1 << 2) +# define DP_PCON_MAX_18GBPS (2 << 2) +# define DP_PCON_MAX_24GBPS (3 << 2) +# define DP_PCON_MAX_32GBPS (4 << 2) +# define DP_PCON_MAX_40GBPS (5 << 2) +# define DP_PCON_MAX_48GBPS (6 << 2) +# define DP_PCON_SOURCE_CTL_MODE (1 << 5) + +/* offset 3 for DVI */ +# define DP_DS_DVI_DUAL_LINK (1 << 1) +# define DP_DS_DVI_HIGH_COLOR_DEPTH (1 << 2) +/* offset 3 for HDMI */ +# define DP_DS_HDMI_FRAME_SEQ_TO_FRAME_PACK (1 << 0) +# define DP_DS_HDMI_YCBCR422_PASS_THROUGH (1 << 1) +# define DP_DS_HDMI_YCBCR420_PASS_THROUGH (1 << 2) +# define DP_DS_HDMI_YCBCR444_TO_422_CONV (1 << 3) +# define DP_DS_HDMI_YCBCR444_TO_420_CONV (1 << 4) + +/* + * VESA DP-to-HDMI PCON Specification adds caps for colorspace + * conversion in DFP cap DPCD 83h. Sec6.1 Table-3. + * Based on the available support the source can enable + * color conversion by writing into PROTOCOL_COVERTER_CONTROL_2 + * DPCD 3052h. + */ +# define DP_DS_HDMI_BT601_RGB_YCBCR_CONV (1 << 5) +# define DP_DS_HDMI_BT709_RGB_YCBCR_CONV (1 << 6) +# define DP_DS_HDMI_BT2020_RGB_YCBCR_CONV (1 << 7) + +#define DP_MAX_DOWNSTREAM_PORTS 0x10 + +/* DP Forward error Correction Registers */ +#define DP_FEC_CAPABILITY 0x090 /* 1.4 */ +# define DP_FEC_CAPABLE (1 << 0) +# define DP_FEC_UNCORR_BLK_ERROR_COUNT_CAP (1 << 1) +# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2) +# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3) +#define DP_FEC_CAPABILITY_1 0x091 /* 2.0 */ + +/* DP-HDMI2.1 PCON DSC ENCODER SUPPORT */ +#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xD /* 0x92 through 0x9E */ +#define DP_PCON_DSC_ENCODER 0x092 +# define DP_PCON_DSC_ENCODER_SUPPORTED (1 << 0) +# define DP_PCON_DSC_PPS_ENC_OVERRIDE (1 << 1) + +/* DP-HDMI2.1 PCON DSC Version */ +#define DP_PCON_DSC_VERSION 0x093 +# define DP_PCON_DSC_MAJOR_MASK (0xF << 0) +# define DP_PCON_DSC_MINOR_MASK (0xF << 4) +# define DP_PCON_DSC_MAJOR_SHIFT 0 +# define DP_PCON_DSC_MINOR_SHIFT 4 + +/* DP-HDMI2.1 PCON DSC RC Buffer block size */ +#define DP_PCON_DSC_RC_BUF_BLK_INFO 0x094 +# define DP_PCON_DSC_RC_BUF_BLK_SIZE (0x3 << 0) +# define DP_PCON_DSC_RC_BUF_BLK_1KB 0 +# define DP_PCON_DSC_RC_BUF_BLK_4KB 1 +# define DP_PCON_DSC_RC_BUF_BLK_16KB 2 +# define DP_PCON_DSC_RC_BUF_BLK_64KB 3 + +/* DP-HDMI2.1 PCON DSC RC Buffer size */ +#define DP_PCON_DSC_RC_BUF_SIZE 0x095 + +/* DP-HDMI2.1 PCON DSC Slice capabilities-1 */ +#define DP_PCON_DSC_SLICE_CAP_1 0x096 +# define DP_PCON_DSC_1_PER_DSC_ENC (0x1 << 0) +# define DP_PCON_DSC_2_PER_DSC_ENC (0x1 << 1) +# define DP_PCON_DSC_4_PER_DSC_ENC (0x1 << 3) +# define DP_PCON_DSC_6_PER_DSC_ENC (0x1 << 4) +# define DP_PCON_DSC_8_PER_DSC_ENC (0x1 << 5) +# define DP_PCON_DSC_10_PER_DSC_ENC (0x1 << 6) +# define DP_PCON_DSC_12_PER_DSC_ENC (0x1 << 7) + +#define DP_PCON_DSC_BUF_BIT_DEPTH 0x097 +# define DP_PCON_DSC_BIT_DEPTH_MASK (0xF << 0) +# define DP_PCON_DSC_DEPTH_9_BITS 0 +# define DP_PCON_DSC_DEPTH_10_BITS 1 +# define DP_PCON_DSC_DEPTH_11_BITS 2 +# define DP_PCON_DSC_DEPTH_12_BITS 3 +# define DP_PCON_DSC_DEPTH_13_BITS 4 +# define DP_PCON_DSC_DEPTH_14_BITS 5 +# define DP_PCON_DSC_DEPTH_15_BITS 6 +# define DP_PCON_DSC_DEPTH_16_BITS 7 +# define DP_PCON_DSC_DEPTH_8_BITS 8 + +#define DP_PCON_DSC_BLOCK_PREDICTION 0x098 +# define DP_PCON_DSC_BLOCK_PRED_SUPPORT (0x1 << 0) + +#define DP_PCON_DSC_ENC_COLOR_FMT_CAP 0x099 +# define DP_PCON_DSC_ENC_RGB (0x1 << 0) +# define DP_PCON_DSC_ENC_YUV444 (0x1 << 1) +# define DP_PCON_DSC_ENC_YUV422_S (0x1 << 2) +# define DP_PCON_DSC_ENC_YUV422_N (0x1 << 3) +# define DP_PCON_DSC_ENC_YUV420_N (0x1 << 4) + +#define DP_PCON_DSC_ENC_COLOR_DEPTH_CAP 0x09A +# define DP_PCON_DSC_ENC_8BPC (0x1 << 1) +# define DP_PCON_DSC_ENC_10BPC (0x1 << 2) +# define DP_PCON_DSC_ENC_12BPC (0x1 << 3) + +#define DP_PCON_DSC_MAX_SLICE_WIDTH 0x09B + +/* DP-HDMI2.1 PCON DSC Slice capabilities-2 */ +#define DP_PCON_DSC_SLICE_CAP_2 0x09C +# define DP_PCON_DSC_16_PER_DSC_ENC (0x1 << 0) +# define DP_PCON_DSC_20_PER_DSC_ENC (0x1 << 1) +# define DP_PCON_DSC_24_PER_DSC_ENC (0x1 << 2) + +/* DP-HDMI2.1 PCON HDMI TX Encoder Bits/pixel increment */ +#define DP_PCON_DSC_BPP_INCR 0x09E +# define DP_PCON_DSC_BPP_INCR_MASK (0x7 << 0) +# define DP_PCON_DSC_ONE_16TH_BPP 0 +# define DP_PCON_DSC_ONE_8TH_BPP 1 +# define DP_PCON_DSC_ONE_4TH_BPP 2 +# define DP_PCON_DSC_ONE_HALF_BPP 3 +# define DP_PCON_DSC_ONE_BPP 4 + +/* DP Extended DSC Capabilities */ +#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */ +#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1 +#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2 + +/* DFP Capability Extension */ +#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */ + +/* Link Configuration */ +#define DP_LINK_BW_SET 0x100 +# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */ +# define DP_LINK_BW_1_62 0x06 +# define DP_LINK_BW_2_7 0x0a +# define DP_LINK_BW_5_4 0x14 /* 1.2 */ +# define DP_LINK_BW_8_1 0x1e /* 1.4 */ +# define DP_LINK_BW_10 0x01 /* 2.0 128b/132b Link Layer */ +# define DP_LINK_BW_13_5 0x04 /* 2.0 128b/132b Link Layer */ +# define DP_LINK_BW_20 0x02 /* 2.0 128b/132b Link Layer */ + +#define DP_LANE_COUNT_SET 0x101 +# define DP_LANE_COUNT_MASK 0x0f +# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) + +#define DP_TRAINING_PATTERN_SET 0x102 +# define DP_TRAINING_PATTERN_DISABLE 0 +# define DP_TRAINING_PATTERN_1 1 +# define DP_TRAINING_PATTERN_2 2 +# define DP_TRAINING_PATTERN_2_CDS 3 /* 2.0 E11 */ +# define DP_TRAINING_PATTERN_3 3 /* 1.2 */ +# define DP_TRAINING_PATTERN_4 7 /* 1.4 */ +# define DP_TRAINING_PATTERN_MASK 0x3 +# define DP_TRAINING_PATTERN_MASK_1_4 0xf + +/* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */ +# define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2) +# define DP_LINK_QUAL_PATTERN_11_D10_2 (1 << 2) +# define DP_LINK_QUAL_PATTERN_11_ERROR_RATE (2 << 2) +# define DP_LINK_QUAL_PATTERN_11_PRBS7 (3 << 2) +# define DP_LINK_QUAL_PATTERN_11_MASK (3 << 2) + +# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4) +# define DP_LINK_SCRAMBLING_DISABLE (1 << 5) + +# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6) +# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6) +# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6) +# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6) + +#define DP_TRAINING_LANE0_SET 0x103 +#define DP_TRAINING_LANE1_SET 0x104 +#define DP_TRAINING_LANE2_SET 0x105 +#define DP_TRAINING_LANE3_SET 0x106 + +# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 +# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 +# define DP_TRAIN_MAX_SWING_REACHED (1 << 2) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_0 (0 << 0) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_1 (1 << 0) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_2 (2 << 0) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_3 (3 << 0) + +# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_0 (0 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_1 (1 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_2 (2 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_3 (3 << 3) + +# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 +# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) + +# define DP_TX_FFE_PRESET_VALUE_MASK (0xf << 0) /* 2.0 128b/132b Link Layer */ + +#define DP_DOWNSPREAD_CTRL 0x107 +# define DP_SPREAD_AMP_0_5 (1 << 4) +# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */ + +#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 +# define DP_SET_ANSI_8B10B (1 << 0) +# define DP_SET_ANSI_128B132B (1 << 1) + +#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */ +/* bitmask as for DP_I2C_SPEED_CAP */ + +#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */ +# define DP_ALTERNATE_SCRAMBLER_RESET_ENABLE (1 << 0) +# define DP_FRAMING_CHANGE_ENABLE (1 << 1) +# define DP_PANEL_SELF_TEST_ENABLE (1 << 7) + +#define DP_LINK_QUAL_LANE0_SET 0x10b /* DPCD >= 1.2 */ +#define DP_LINK_QUAL_LANE1_SET 0x10c +#define DP_LINK_QUAL_LANE2_SET 0x10d +#define DP_LINK_QUAL_LANE3_SET 0x10e +# define DP_LINK_QUAL_PATTERN_DISABLE 0 +# define DP_LINK_QUAL_PATTERN_D10_2 1 +# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2 +# define DP_LINK_QUAL_PATTERN_PRBS7 3 +# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4 +# define DP_LINK_QUAL_PATTERN_CP2520_PAT_1 5 +# define DP_LINK_QUAL_PATTERN_CP2520_PAT_2 6 +# define DP_LINK_QUAL_PATTERN_CP2520_PAT_3 7 +/* DP 2.0 UHBR10, UHBR13.5, UHBR20 */ +# define DP_LINK_QUAL_PATTERN_128B132B_TPS1 0x08 +# define DP_LINK_QUAL_PATTERN_128B132B_TPS2 0x10 +# define DP_LINK_QUAL_PATTERN_PRSBS9 0x18 +# define DP_LINK_QUAL_PATTERN_PRSBS11 0x20 +# define DP_LINK_QUAL_PATTERN_PRSBS15 0x28 +# define DP_LINK_QUAL_PATTERN_PRSBS23 0x30 +# define DP_LINK_QUAL_PATTERN_PRSBS31 0x38 +# define DP_LINK_QUAL_PATTERN_CUSTOM 0x40 +# define DP_LINK_QUAL_PATTERN_SQUARE 0x48 + +#define DP_TRAINING_LANE0_1_SET2 0x10f +#define DP_TRAINING_LANE2_3_SET2 0x110 +# define DP_LANE02_POST_CURSOR2_SET_MASK (3 << 0) +# define DP_LANE02_MAX_POST_CURSOR2_REACHED (1 << 2) +# define DP_LANE13_POST_CURSOR2_SET_MASK (3 << 4) +# define DP_LANE13_MAX_POST_CURSOR2_REACHED (1 << 6) + +#define DP_MSTM_CTRL 0x111 /* 1.2 */ +# define DP_MST_EN (1 << 0) +# define DP_UP_REQ_EN (1 << 1) +# define DP_UPSTREAM_IS_SRC (1 << 2) + +#define DP_AUDIO_DELAY0 0x112 /* 1.2 */ +#define DP_AUDIO_DELAY1 0x113 +#define DP_AUDIO_DELAY2 0x114 + +#define DP_LINK_RATE_SET 0x115 /* eDP 1.4 */ +# define DP_LINK_RATE_SET_SHIFT 0 +# define DP_LINK_RATE_SET_MASK (7 << 0) + +#define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */ +# define DP_ALPM_ENABLE (1 << 0) +# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1) + +#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */ +# define DP_AUX_FRAME_SYNC_ENABLE (1 << 0) +# define DP_IRQ_HPD_ENABLE (1 << 1) + +#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */ +# define DP_PWR_NOT_NEEDED (1 << 0) + +#define DP_FEC_CONFIGURATION 0x120 /* 1.4 */ +# define DP_FEC_READY (1 << 0) +# define DP_FEC_ERR_COUNT_SEL_MASK (7 << 1) +# define DP_FEC_ERR_COUNT_DIS (0 << 1) +# define DP_FEC_UNCORR_BLK_ERROR_COUNT (1 << 1) +# define DP_FEC_CORR_BLK_ERROR_COUNT (2 << 1) +# define DP_FEC_BIT_ERROR_COUNT (3 << 1) +# define DP_FEC_LANE_SELECT_MASK (3 << 4) +# define DP_FEC_LANE_0_SELECT (0 << 4) +# define DP_FEC_LANE_1_SELECT (1 << 4) +# define DP_FEC_LANE_2_SELECT (2 << 4) +# define DP_FEC_LANE_3_SELECT (3 << 4) + +#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */ +# define DP_AUX_FRAME_SYNC_VALID (1 << 0) + +#define DP_DSC_ENABLE 0x160 /* DP 1.4 */ +# define DP_DECOMPRESSION_EN (1 << 0) +#define DP_DSC_CONFIGURATION 0x161 /* DP 2.0 */ + +#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ +# define DP_PSR_ENABLE BIT(0) +# define DP_PSR_MAIN_LINK_ACTIVE BIT(1) +# define DP_PSR_CRC_VERIFICATION BIT(2) +# define DP_PSR_FRAME_CAPTURE BIT(3) +# define DP_PSR_SU_REGION_SCANLINE_CAPTURE BIT(4) /* eDP 1.4a */ +# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS BIT(5) /* eDP 1.4a */ +# define DP_PSR_ENABLE_PSR2 BIT(6) /* eDP 1.4a */ + +#define DP_ADAPTER_CTRL 0x1a0 +# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) + +#define DP_BRANCH_DEVICE_CTRL 0x1a1 +# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0) + +#define DP_PAYLOAD_ALLOCATE_SET 0x1c0 +#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1 +#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2 + +/* Link/Sink Device Status */ +#define DP_SINK_COUNT 0x200 +/* prior to 1.2 bit 7 was reserved mbz */ +# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f)) +# define DP_SINK_CP_READY (1 << 6) + +#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201 +# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0) +# define DP_AUTOMATED_TEST_REQUEST (1 << 1) +# define DP_CP_IRQ (1 << 2) +# define DP_MCCS_IRQ (1 << 3) +# define DP_DOWN_REP_MSG_RDY (1 << 4) /* 1.2 MST */ +# define DP_UP_REQ_MSG_RDY (1 << 5) /* 1.2 MST */ +# define DP_SINK_SPECIFIC_IRQ (1 << 6) + +#define DP_LANE0_1_STATUS 0x202 +#define DP_LANE2_3_STATUS 0x203 +# define DP_LANE_CR_DONE (1 << 0) +# define DP_LANE_CHANNEL_EQ_DONE (1 << 1) +# define DP_LANE_SYMBOL_LOCKED (1 << 2) + +#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \ + DP_LANE_CHANNEL_EQ_DONE | \ + DP_LANE_SYMBOL_LOCKED) + +#define DP_LANE_ALIGN_STATUS_UPDATED 0x204 +#define DP_INTERLANE_ALIGN_DONE (1 << 0) +#define DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE (1 << 2) /* 2.0 E11 */ +#define DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE (1 << 3) /* 2.0 E11 */ +#define DP_128B132B_LT_FAILED (1 << 4) /* 2.0 E11 */ +#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) +#define DP_LINK_STATUS_UPDATED (1 << 7) + +#define DP_SINK_STATUS 0x205 +# define DP_RECEIVE_PORT_0_STATUS (1 << 0) +# define DP_RECEIVE_PORT_1_STATUS (1 << 1) +# define DP_STREAM_REGENERATION_STATUS (1 << 2) /* 2.0 */ +# define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3) /* 2.0 */ + +#define DP_ADJUST_REQUEST_LANE0_1 0x206 +#define DP_ADJUST_REQUEST_LANE2_3 0x207 +# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 +# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 +# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c +# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 +# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 +# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 +# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 +# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 + +/* DP 2.0 128b/132b Link Layer */ +# define DP_ADJUST_TX_FFE_PRESET_LANE0_MASK (0xf << 0) +# define DP_ADJUST_TX_FFE_PRESET_LANE0_SHIFT 0 +# define DP_ADJUST_TX_FFE_PRESET_LANE1_MASK (0xf << 4) +# define DP_ADJUST_TX_FFE_PRESET_LANE1_SHIFT 4 + +#define DP_ADJUST_REQUEST_POST_CURSOR2 0x20c +# define DP_ADJUST_POST_CURSOR2_LANE0_MASK 0x03 +# define DP_ADJUST_POST_CURSOR2_LANE0_SHIFT 0 +# define DP_ADJUST_POST_CURSOR2_LANE1_MASK 0x0c +# define DP_ADJUST_POST_CURSOR2_LANE1_SHIFT 2 +# define DP_ADJUST_POST_CURSOR2_LANE2_MASK 0x30 +# define DP_ADJUST_POST_CURSOR2_LANE2_SHIFT 4 +# define DP_ADJUST_POST_CURSOR2_LANE3_MASK 0xc0 +# define DP_ADJUST_POST_CURSOR2_LANE3_SHIFT 6 + +#define DP_TEST_REQUEST 0x218 +# define DP_TEST_LINK_TRAINING (1 << 0) +# define DP_TEST_LINK_VIDEO_PATTERN (1 << 1) +# define DP_TEST_LINK_EDID_READ (1 << 2) +# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */ +# define DP_TEST_LINK_FAUX_PATTERN (1 << 4) /* DPCD >= 1.2 */ +# define DP_TEST_LINK_AUDIO_PATTERN (1 << 5) /* DPCD >= 1.2 */ +# define DP_TEST_LINK_AUDIO_DISABLED_VIDEO (1 << 6) /* DPCD >= 1.2 */ + +#define DP_TEST_LINK_RATE 0x219 +# define DP_LINK_RATE_162 (0x6) +# define DP_LINK_RATE_27 (0xa) + +#define DP_TEST_LANE_COUNT 0x220 + +#define DP_TEST_PATTERN 0x221 +# define DP_NO_TEST_PATTERN 0x0 +# define DP_COLOR_RAMP 0x1 +# define DP_BLACK_AND_WHITE_VERTICAL_LINES 0x2 +# define DP_COLOR_SQUARE 0x3 + +#define DP_TEST_H_TOTAL_HI 0x222 +#define DP_TEST_H_TOTAL_LO 0x223 + +#define DP_TEST_V_TOTAL_HI 0x224 +#define DP_TEST_V_TOTAL_LO 0x225 + +#define DP_TEST_H_START_HI 0x226 +#define DP_TEST_H_START_LO 0x227 + +#define DP_TEST_V_START_HI 0x228 +#define DP_TEST_V_START_LO 0x229 + +#define DP_TEST_HSYNC_HI 0x22A +# define DP_TEST_HSYNC_POLARITY (1 << 7) +# define DP_TEST_HSYNC_WIDTH_HI_MASK (127 << 0) +#define DP_TEST_HSYNC_WIDTH_LO 0x22B + +#define DP_TEST_VSYNC_HI 0x22C +# define DP_TEST_VSYNC_POLARITY (1 << 7) +# define DP_TEST_VSYNC_WIDTH_HI_MASK (127 << 0) +#define DP_TEST_VSYNC_WIDTH_LO 0x22D + +#define DP_TEST_H_WIDTH_HI 0x22E +#define DP_TEST_H_WIDTH_LO 0x22F + +#define DP_TEST_V_HEIGHT_HI 0x230 +#define DP_TEST_V_HEIGHT_LO 0x231 + +#define DP_TEST_MISC0 0x232 +# define DP_TEST_SYNC_CLOCK (1 << 0) +# define DP_TEST_COLOR_FORMAT_MASK (3 << 1) +# define DP_TEST_COLOR_FORMAT_SHIFT 1 +# define DP_COLOR_FORMAT_RGB (0 << 1) +# define DP_COLOR_FORMAT_YCbCr422 (1 << 1) +# define DP_COLOR_FORMAT_YCbCr444 (2 << 1) +# define DP_TEST_DYNAMIC_RANGE_VESA (0 << 3) +# define DP_TEST_DYNAMIC_RANGE_CEA (1 << 3) +# define DP_TEST_YCBCR_COEFFICIENTS (1 << 4) +# define DP_YCBCR_COEFFICIENTS_ITU601 (0 << 4) +# define DP_YCBCR_COEFFICIENTS_ITU709 (1 << 4) +# define DP_TEST_BIT_DEPTH_MASK (7 << 5) +# define DP_TEST_BIT_DEPTH_SHIFT 5 +# define DP_TEST_BIT_DEPTH_6 (0 << 5) +# define DP_TEST_BIT_DEPTH_8 (1 << 5) +# define DP_TEST_BIT_DEPTH_10 (2 << 5) +# define DP_TEST_BIT_DEPTH_12 (3 << 5) +# define DP_TEST_BIT_DEPTH_16 (4 << 5) + +#define DP_TEST_MISC1 0x233 +# define DP_TEST_REFRESH_DENOMINATOR (1 << 0) +# define DP_TEST_INTERLACED (1 << 1) + +#define DP_TEST_REFRESH_RATE_NUMERATOR 0x234 + +#define DP_TEST_MISC0 0x232 + +#define DP_TEST_CRC_R_CR 0x240 +#define DP_TEST_CRC_G_Y 0x242 +#define DP_TEST_CRC_B_CB 0x244 + +#define DP_TEST_SINK_MISC 0x246 +# define DP_TEST_CRC_SUPPORTED (1 << 5) +# define DP_TEST_COUNT_MASK 0xf + +#define DP_PHY_TEST_PATTERN 0x248 +# define DP_PHY_TEST_PATTERN_SEL_MASK 0x7 +# define DP_PHY_TEST_PATTERN_NONE 0x0 +# define DP_PHY_TEST_PATTERN_D10_2 0x1 +# define DP_PHY_TEST_PATTERN_ERROR_COUNT 0x2 +# define DP_PHY_TEST_PATTERN_PRBS7 0x3 +# define DP_PHY_TEST_PATTERN_80BIT_CUSTOM 0x4 +# define DP_PHY_TEST_PATTERN_CP2520 0x5 + +#define DP_PHY_SQUARE_PATTERN 0x249 + +#define DP_TEST_HBR2_SCRAMBLER_RESET 0x24A +#define DP_TEST_80BIT_CUSTOM_PATTERN_7_0 0x250 +#define DP_TEST_80BIT_CUSTOM_PATTERN_15_8 0x251 +#define DP_TEST_80BIT_CUSTOM_PATTERN_23_16 0x252 +#define DP_TEST_80BIT_CUSTOM_PATTERN_31_24 0x253 +#define DP_TEST_80BIT_CUSTOM_PATTERN_39_32 0x254 +#define DP_TEST_80BIT_CUSTOM_PATTERN_47_40 0x255 +#define DP_TEST_80BIT_CUSTOM_PATTERN_55_48 0x256 +#define DP_TEST_80BIT_CUSTOM_PATTERN_63_56 0x257 +#define DP_TEST_80BIT_CUSTOM_PATTERN_71_64 0x258 +#define DP_TEST_80BIT_CUSTOM_PATTERN_79_72 0x259 + +#define DP_TEST_RESPONSE 0x260 +# define DP_TEST_ACK (1 << 0) +# define DP_TEST_NAK (1 << 1) +# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2) + +#define DP_TEST_EDID_CHECKSUM 0x261 + +#define DP_TEST_SINK 0x270 +# define DP_TEST_SINK_START (1 << 0) +#define DP_TEST_AUDIO_MODE 0x271 +#define DP_TEST_AUDIO_PATTERN_TYPE 0x272 +#define DP_TEST_AUDIO_PERIOD_CH1 0x273 +#define DP_TEST_AUDIO_PERIOD_CH2 0x274 +#define DP_TEST_AUDIO_PERIOD_CH3 0x275 +#define DP_TEST_AUDIO_PERIOD_CH4 0x276 +#define DP_TEST_AUDIO_PERIOD_CH5 0x277 +#define DP_TEST_AUDIO_PERIOD_CH6 0x278 +#define DP_TEST_AUDIO_PERIOD_CH7 0x279 +#define DP_TEST_AUDIO_PERIOD_CH8 0x27A + +#define DP_FEC_STATUS 0x280 /* 1.4 */ +# define DP_FEC_DECODE_EN_DETECTED (1 << 0) +# define DP_FEC_DECODE_DIS_DETECTED (1 << 1) + +#define DP_FEC_ERROR_COUNT_LSB 0x0281 /* 1.4 */ + +#define DP_FEC_ERROR_COUNT_MSB 0x0282 /* 1.4 */ +# define DP_FEC_ERROR_COUNT_MASK 0x7F +# define DP_FEC_ERR_COUNT_VALID (1 << 7) + +#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */ +# define DP_PAYLOAD_TABLE_UPDATED (1 << 0) +# define DP_PAYLOAD_ACT_HANDLED (1 << 1) + +#define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */ +/* up to ID_SLOT_63 at 0x2ff */ + +/* Source Device-specific */ +#define DP_SOURCE_OUI 0x300 + +/* Sink Device-specific */ +#define DP_SINK_OUI 0x400 + +/* Branch Device-specific */ +#define DP_BRANCH_OUI 0x500 +#define DP_BRANCH_ID 0x503 +#define DP_BRANCH_REVISION_START 0x509 +#define DP_BRANCH_HW_REV 0x509 +#define DP_BRANCH_SW_REV 0x50A + +/* Link/Sink Device Power Control */ +#define DP_SET_POWER 0x600 +# define DP_SET_POWER_D0 0x1 +# define DP_SET_POWER_D3 0x2 +# define DP_SET_POWER_MASK 0x3 +# define DP_SET_POWER_D3_AUX_ON 0x5 + +/* eDP-specific */ +#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */ +# define DP_EDP_11 0x00 +# define DP_EDP_12 0x01 +# define DP_EDP_13 0x02 +# define DP_EDP_14 0x03 +# define DP_EDP_14a 0x04 /* eDP 1.4a */ +# define DP_EDP_14b 0x05 /* eDP 1.4b */ + +#define DP_EDP_GENERAL_CAP_1 0x701 +# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0) +# define DP_EDP_BACKLIGHT_PIN_ENABLE_CAP (1 << 1) +# define DP_EDP_BACKLIGHT_AUX_ENABLE_CAP (1 << 2) +# define DP_EDP_PANEL_SELF_TEST_PIN_ENABLE_CAP (1 << 3) +# define DP_EDP_PANEL_SELF_TEST_AUX_ENABLE_CAP (1 << 4) +# define DP_EDP_FRC_ENABLE_CAP (1 << 5) +# define DP_EDP_COLOR_ENGINE_CAP (1 << 6) +# define DP_EDP_SET_POWER_CAP (1 << 7) + +#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702 +# define DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP (1 << 0) +# define DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP (1 << 1) +# define DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT (1 << 2) +# define DP_EDP_BACKLIGHT_AUX_PWM_PRODUCT_CAP (1 << 3) +# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_CAP (1 << 4) +# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP (1 << 5) +# define DP_EDP_DYNAMIC_BACKLIGHT_CAP (1 << 6) +# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_CAP (1 << 7) + +#define DP_EDP_GENERAL_CAP_2 0x703 +# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0) + +#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */ +# define DP_EDP_X_REGION_CAP_MASK (0xf << 0) +# define DP_EDP_X_REGION_CAP_SHIFT 0 +# define DP_EDP_Y_REGION_CAP_MASK (0xf << 4) +# define DP_EDP_Y_REGION_CAP_SHIFT 4 + +#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720 +# define DP_EDP_BACKLIGHT_ENABLE (1 << 0) +# define DP_EDP_BLACK_VIDEO_ENABLE (1 << 1) +# define DP_EDP_FRC_ENABLE (1 << 2) +# define DP_EDP_COLOR_ENGINE_ENABLE (1 << 3) +# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_ENABLE (1 << 7) + +#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721 +# define DP_EDP_BACKLIGHT_CONTROL_MODE_MASK (3 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PWM (0 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET (1 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD (2 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT (3 << 0) +# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_ENABLE (1 << 2) +# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE (1 << 3) +# define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4) +# define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5) +# define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */ + +#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722 +#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723 + +#define DP_EDP_PWMGEN_BIT_COUNT 0x724 +#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN 0x725 +#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX 0x726 +# define DP_EDP_PWMGEN_BIT_COUNT_MASK (0x1f << 0) + +#define DP_EDP_BACKLIGHT_CONTROL_STATUS 0x727 + +#define DP_EDP_BACKLIGHT_FREQ_SET 0x728 +# define DP_EDP_BACKLIGHT_FREQ_BASE_KHZ 27000 + +#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MSB 0x72a +#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MID 0x72b +#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_LSB 0x72c + +#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MSB 0x72d +#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MID 0x72e +#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB 0x72f + +#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732 +#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733 + +#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */ +#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */ + +#define DP_EDP_MSO_LINK_CAPABILITIES 0x7a4 /* eDP 1.4 */ +# define DP_EDP_MSO_NUMBER_OF_LINKS_MASK (7 << 0) +# define DP_EDP_MSO_NUMBER_OF_LINKS_SHIFT 0 +# define DP_EDP_MSO_INDEPENDENT_LINK_BIT (1 << 3) + +/* Sideband MSG Buffers */ +#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */ +#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */ +#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */ +#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */ + +/* DPRX Event Status Indicator */ +#define DP_SINK_COUNT_ESI 0x2002 /* same as 0x200 */ +#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* same as 0x201 */ + +#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */ +# define DP_RX_GTC_MSTR_REQ_STATUS_CHANGE (1 << 0) +# define DP_LOCK_ACQUISITION_REQUEST (1 << 1) +# define DP_CEC_IRQ (1 << 2) + +#define DP_LINK_SERVICE_IRQ_VECTOR_ESI0 0x2005 /* 1.2 */ +# define RX_CAP_CHANGED (1 << 0) +# define LINK_STATUS_CHANGED (1 << 1) +# define STREAM_STATUS_CHANGED (1 << 2) +# define HDMI_LINK_STATUS_CHANGED (1 << 3) +# define CONNECTED_OFF_ENTRY_REQUESTED (1 << 4) + +#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ +# define DP_PSR_LINK_CRC_ERROR (1 << 0) +# define DP_PSR_RFB_STORAGE_ERROR (1 << 1) +# define DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) /* eDP 1.4 */ + +#define DP_PSR_ESI 0x2007 /* XXX 1.2? */ +# define DP_PSR_CAPS_CHANGE (1 << 0) + +#define DP_PSR_STATUS 0x2008 /* XXX 1.2? */ +# define DP_PSR_SINK_INACTIVE 0 +# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1 +# define DP_PSR_SINK_ACTIVE_RFB 2 +# define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3 +# define DP_PSR_SINK_ACTIVE_RESYNC 4 +# define DP_PSR_SINK_INTERNAL_ERROR 7 +# define DP_PSR_SINK_STATE_MASK 0x07 + +#define DP_SYNCHRONIZATION_LATENCY_IN_SINK 0x2009 /* edp 1.4 */ +# define DP_MAX_RESYNC_FRAME_COUNT_MASK (0xf << 0) +# define DP_MAX_RESYNC_FRAME_COUNT_SHIFT 0 +# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_MASK (0xf << 4) +# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_SHIFT 4 + +#define DP_LAST_RECEIVED_PSR_SDP 0x200a /* eDP 1.2 */ +# define DP_PSR_STATE_BIT (1 << 0) /* eDP 1.2 */ +# define DP_UPDATE_RFB_BIT (1 << 1) /* eDP 1.2 */ +# define DP_CRC_VALID_BIT (1 << 2) /* eDP 1.2 */ +# define DP_SU_VALID (1 << 3) /* eDP 1.4 */ +# define DP_FIRST_SCAN_LINE_SU_REGION (1 << 4) /* eDP 1.4 */ +# define DP_LAST_SCAN_LINE_SU_REGION (1 << 5) /* eDP 1.4 */ +# define DP_Y_COORDINATE_VALID (1 << 6) /* eDP 1.4a */ + +#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */ +# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0) + +#define DP_LANE0_1_STATUS_ESI 0x200c /* status same as 0x202 */ +#define DP_LANE2_3_STATUS_ESI 0x200d /* status same as 0x203 */ +#define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */ +#define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */ + +/* Extended Receiver Capability: See DP_DPCD_REV for definitions */ +#define DP_DP13_DPCD_REV 0x2200 + +#define DP_DPRX_FEATURE_ENUMERATION_LIST 0x2210 /* DP 1.3 */ +# define DP_GTC_CAP (1 << 0) /* DP 1.3 */ +# define DP_SST_SPLIT_SDP_CAP (1 << 1) /* DP 1.4 */ +# define DP_AV_SYNC_CAP (1 << 2) /* DP 1.3 */ +# define DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED (1 << 3) /* DP 1.3 */ +# define DP_VSC_EXT_VESA_SDP_SUPPORTED (1 << 4) /* DP 1.4 */ +# define DP_VSC_EXT_VESA_SDP_CHAINING_SUPPORTED (1 << 5) /* DP 1.4 */ +# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */ +# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */ + +#define DP_128B132B_SUPPORTED_LINK_RATES 0x2215 /* 2.0 */ +# define DP_UHBR10 (1 << 0) +# define DP_UHBR20 (1 << 1) +# define DP_UHBR13_5 (1 << 2) + +#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */ +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_1MS_UNIT (1 << 7) +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US 0x00 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS 0x01 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_8_MS 0x02 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_12_MS 0x03 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_16_MS 0x04 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_32_MS 0x05 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_64_MS 0x06 + +#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0x2230 +#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0x2250 + +/* DSC Extended Capability Branch Total DSC Resources */ +#define DP_DSC_SUPPORT_AND_DSC_DECODER_COUNT 0x2260 /* 2.0 */ +# define DP_DSC_DECODER_COUNT_MASK (0b111 << 5) +# define DP_DSC_DECODER_COUNT_SHIFT 5 +#define DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0 0x2270 /* 2.0 */ +# define DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK (1 << 0) +# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK (0b111 << 1) +# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT 1 + +/* Protocol Converter Extension */ +/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */ +#define DP_CEC_TUNNELING_CAPABILITY 0x3000 +# define DP_CEC_TUNNELING_CAPABLE (1 << 0) +# define DP_CEC_SNOOPING_CAPABLE (1 << 1) +# define DP_CEC_MULTIPLE_LA_CAPABLE (1 << 2) + +#define DP_CEC_TUNNELING_CONTROL 0x3001 +# define DP_CEC_TUNNELING_ENABLE (1 << 0) +# define DP_CEC_SNOOPING_ENABLE (1 << 1) + +#define DP_CEC_RX_MESSAGE_INFO 0x3002 +# define DP_CEC_RX_MESSAGE_LEN_MASK (0xf << 0) +# define DP_CEC_RX_MESSAGE_LEN_SHIFT 0 +# define DP_CEC_RX_MESSAGE_HPD_STATE (1 << 4) +# define DP_CEC_RX_MESSAGE_HPD_LOST (1 << 5) +# define DP_CEC_RX_MESSAGE_ACKED (1 << 6) +# define DP_CEC_RX_MESSAGE_ENDED (1 << 7) + +#define DP_CEC_TX_MESSAGE_INFO 0x3003 +# define DP_CEC_TX_MESSAGE_LEN_MASK (0xf << 0) +# define DP_CEC_TX_MESSAGE_LEN_SHIFT 0 +# define DP_CEC_TX_RETRY_COUNT_MASK (0x7 << 4) +# define DP_CEC_TX_RETRY_COUNT_SHIFT 4 +# define DP_CEC_TX_MESSAGE_SEND (1 << 7) + +#define DP_CEC_TUNNELING_IRQ_FLAGS 0x3004 +# define DP_CEC_RX_MESSAGE_INFO_VALID (1 << 0) +# define DP_CEC_RX_MESSAGE_OVERFLOW (1 << 1) +# define DP_CEC_TX_MESSAGE_SENT (1 << 4) +# define DP_CEC_TX_LINE_ERROR (1 << 5) +# define DP_CEC_TX_ADDRESS_NACK_ERROR (1 << 6) +# define DP_CEC_TX_DATA_NACK_ERROR (1 << 7) + +#define DP_CEC_LOGICAL_ADDRESS_MASK 0x300E /* 0x300F word */ +# define DP_CEC_LOGICAL_ADDRESS_0 (1 << 0) +# define DP_CEC_LOGICAL_ADDRESS_1 (1 << 1) +# define DP_CEC_LOGICAL_ADDRESS_2 (1 << 2) +# define DP_CEC_LOGICAL_ADDRESS_3 (1 << 3) +# define DP_CEC_LOGICAL_ADDRESS_4 (1 << 4) +# define DP_CEC_LOGICAL_ADDRESS_5 (1 << 5) +# define DP_CEC_LOGICAL_ADDRESS_6 (1 << 6) +# define DP_CEC_LOGICAL_ADDRESS_7 (1 << 7) +#define DP_CEC_LOGICAL_ADDRESS_MASK_2 0x300F /* 0x300E word */ +# define DP_CEC_LOGICAL_ADDRESS_8 (1 << 0) +# define DP_CEC_LOGICAL_ADDRESS_9 (1 << 1) +# define DP_CEC_LOGICAL_ADDRESS_10 (1 << 2) +# define DP_CEC_LOGICAL_ADDRESS_11 (1 << 3) +# define DP_CEC_LOGICAL_ADDRESS_12 (1 << 4) +# define DP_CEC_LOGICAL_ADDRESS_13 (1 << 5) +# define DP_CEC_LOGICAL_ADDRESS_14 (1 << 6) +# define DP_CEC_LOGICAL_ADDRESS_15 (1 << 7) + +#define DP_CEC_RX_MESSAGE_BUFFER 0x3010 +#define DP_CEC_TX_MESSAGE_BUFFER 0x3020 +#define DP_CEC_MESSAGE_BUFFER_LENGTH 0x10 + +/* PCON CONFIGURE-1 FRL FOR HDMI SINK */ +#define DP_PCON_HDMI_LINK_CONFIG_1 0x305A +# define DP_PCON_ENABLE_MAX_FRL_BW (7 << 0) +# define DP_PCON_ENABLE_MAX_BW_0GBPS 0 +# define DP_PCON_ENABLE_MAX_BW_9GBPS 1 +# define DP_PCON_ENABLE_MAX_BW_18GBPS 2 +# define DP_PCON_ENABLE_MAX_BW_24GBPS 3 +# define DP_PCON_ENABLE_MAX_BW_32GBPS 4 +# define DP_PCON_ENABLE_MAX_BW_40GBPS 5 +# define DP_PCON_ENABLE_MAX_BW_48GBPS 6 +# define DP_PCON_ENABLE_SOURCE_CTL_MODE (1 << 3) +# define DP_PCON_ENABLE_CONCURRENT_LINK (1 << 4) +# define DP_PCON_ENABLE_SEQUENTIAL_LINK (0 << 4) +# define DP_PCON_ENABLE_LINK_FRL_MODE (1 << 5) +# define DP_PCON_ENABLE_HPD_READY (1 << 6) +# define DP_PCON_ENABLE_HDMI_LINK (1 << 7) + +/* PCON CONFIGURE-2 FRL FOR HDMI SINK */ +#define DP_PCON_HDMI_LINK_CONFIG_2 0x305B +# define DP_PCON_MAX_LINK_BW_MASK (0x3F << 0) +# define DP_PCON_FRL_BW_MASK_9GBPS (1 << 0) +# define DP_PCON_FRL_BW_MASK_18GBPS (1 << 1) +# define DP_PCON_FRL_BW_MASK_24GBPS (1 << 2) +# define DP_PCON_FRL_BW_MASK_32GBPS (1 << 3) +# define DP_PCON_FRL_BW_MASK_40GBPS (1 << 4) +# define DP_PCON_FRL_BW_MASK_48GBPS (1 << 5) +# define DP_PCON_FRL_LINK_TRAIN_EXTENDED (1 << 6) +# define DP_PCON_FRL_LINK_TRAIN_NORMAL (0 << 6) + +/* PCON HDMI LINK STATUS */ +#define DP_PCON_HDMI_TX_LINK_STATUS 0x303B +# define DP_PCON_HDMI_TX_LINK_ACTIVE (1 << 0) +# define DP_PCON_FRL_READY (1 << 1) + +/* PCON HDMI POST FRL STATUS */ +#define DP_PCON_HDMI_POST_FRL_STATUS 0x3036 +# define DP_PCON_HDMI_LINK_MODE (1 << 0) +# define DP_PCON_HDMI_MODE_TMDS 0 +# define DP_PCON_HDMI_MODE_FRL 1 +# define DP_PCON_HDMI_FRL_TRAINED_BW (0x3F << 1) +# define DP_PCON_FRL_TRAINED_BW_9GBPS (1 << 1) +# define DP_PCON_FRL_TRAINED_BW_18GBPS (1 << 2) +# define DP_PCON_FRL_TRAINED_BW_24GBPS (1 << 3) +# define DP_PCON_FRL_TRAINED_BW_32GBPS (1 << 4) +# define DP_PCON_FRL_TRAINED_BW_40GBPS (1 << 5) +# define DP_PCON_FRL_TRAINED_BW_48GBPS (1 << 6) + +#define DP_PROTOCOL_CONVERTER_CONTROL_0 0x3050 /* DP 1.3 */ +# define DP_HDMI_DVI_OUTPUT_CONFIG (1 << 0) /* DP 1.3 */ +#define DP_PROTOCOL_CONVERTER_CONTROL_1 0x3051 /* DP 1.3 */ +# define DP_CONVERSION_TO_YCBCR420_ENABLE (1 << 0) /* DP 1.3 */ +# define DP_HDMI_EDID_PROCESSING_DISABLE (1 << 1) /* DP 1.4 */ +# define DP_HDMI_AUTONOMOUS_SCRAMBLING_DISABLE (1 << 2) /* DP 1.4 */ +# define DP_HDMI_FORCE_SCRAMBLING (1 << 3) /* DP 1.4 */ +#define DP_PROTOCOL_CONVERTER_CONTROL_2 0x3052 /* DP 1.3 */ +# define DP_CONVERSION_TO_YCBCR422_ENABLE (1 << 0) /* DP 1.3 */ +# define DP_PCON_ENABLE_DSC_ENCODER (1 << 1) +# define DP_PCON_ENCODER_PPS_OVERRIDE_MASK (0x3 << 2) +# define DP_PCON_ENC_PPS_OVERRIDE_DISABLED 0 +# define DP_PCON_ENC_PPS_OVERRIDE_EN_PARAMS 1 +# define DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER 2 +# define DP_CONVERSION_RGB_YCBCR_MASK (7 << 4) +# define DP_CONVERSION_BT601_RGB_YCBCR_ENABLE (1 << 4) +# define DP_CONVERSION_BT709_RGB_YCBCR_ENABLE (1 << 5) +# define DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE (1 << 6) + +/* PCON Downstream HDMI ERROR Status per Lane */ +#define DP_PCON_HDMI_ERROR_STATUS_LN0 0x3037 +#define DP_PCON_HDMI_ERROR_STATUS_LN1 0x3038 +#define DP_PCON_HDMI_ERROR_STATUS_LN2 0x3039 +#define DP_PCON_HDMI_ERROR_STATUS_LN3 0x303A +# define DP_PCON_HDMI_ERROR_COUNT_MASK (0x7 << 0) +# define DP_PCON_HDMI_ERROR_COUNT_THREE_PLUS (1 << 0) +# define DP_PCON_HDMI_ERROR_COUNT_TEN_PLUS (1 << 1) +# define DP_PCON_HDMI_ERROR_COUNT_HUNDRED_PLUS (1 << 2) + +/* PCON HDMI CONFIG PPS Override Buffer + * Valid Offsets to be added to Base : 0-127 + */ +#define DP_PCON_HDMI_PPS_OVERRIDE_BASE 0x3100 + +/* PCON HDMI CONFIG PPS Override Parameter: Slice height + * Offset-0 8LSBs of the Slice height. + * Offset-1 8MSBs of the Slice height. + */ +#define DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT 0x3180 + +/* PCON HDMI CONFIG PPS Override Parameter: Slice width + * Offset-0 8LSBs of the Slice width. + * Offset-1 8MSBs of the Slice width. + */ +#define DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH 0x3182 + +/* PCON HDMI CONFIG PPS Override Parameter: bits_per_pixel + * Offset-0 8LSBs of the bits_per_pixel. + * Offset-1 2MSBs of the bits_per_pixel. + */ +#define DP_PCON_HDMI_PPS_OVRD_BPP 0x3184 + +/* HDCP 1.3 and HDCP 2.2 */ +#define DP_AUX_HDCP_BKSV 0x68000 +#define DP_AUX_HDCP_RI_PRIME 0x68005 +#define DP_AUX_HDCP_AKSV 0x68007 +#define DP_AUX_HDCP_AN 0x6800C +#define DP_AUX_HDCP_V_PRIME(h) (0x68014 + h * 4) +#define DP_AUX_HDCP_BCAPS 0x68028 +# define DP_BCAPS_REPEATER_PRESENT BIT(1) +# define DP_BCAPS_HDCP_CAPABLE BIT(0) +#define DP_AUX_HDCP_BSTATUS 0x68029 +# define DP_BSTATUS_REAUTH_REQ BIT(3) +# define DP_BSTATUS_LINK_FAILURE BIT(2) +# define DP_BSTATUS_R0_PRIME_READY BIT(1) +# define DP_BSTATUS_READY BIT(0) +#define DP_AUX_HDCP_BINFO 0x6802A +#define DP_AUX_HDCP_KSV_FIFO 0x6802C +#define DP_AUX_HDCP_AINFO 0x6803B + +/* DP HDCP2.2 parameter offsets in DPCD address space */ +#define DP_HDCP_2_2_REG_RTX_OFFSET 0x69000 +#define DP_HDCP_2_2_REG_TXCAPS_OFFSET 0x69008 +#define DP_HDCP_2_2_REG_CERT_RX_OFFSET 0x6900B +#define DP_HDCP_2_2_REG_RRX_OFFSET 0x69215 +#define DP_HDCP_2_2_REG_RX_CAPS_OFFSET 0x6921D +#define DP_HDCP_2_2_REG_EKPUB_KM_OFFSET 0x69220 +#define DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET 0x692A0 +#define DP_HDCP_2_2_REG_M_OFFSET 0x692B0 +#define DP_HDCP_2_2_REG_HPRIME_OFFSET 0x692C0 +#define DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET 0x692E0 +#define DP_HDCP_2_2_REG_RN_OFFSET 0x692F0 +#define DP_HDCP_2_2_REG_LPRIME_OFFSET 0x692F8 +#define DP_HDCP_2_2_REG_EDKEY_KS_OFFSET 0x69318 +#define DP_HDCP_2_2_REG_RIV_OFFSET 0x69328 +#define DP_HDCP_2_2_REG_RXINFO_OFFSET 0x69330 +#define DP_HDCP_2_2_REG_SEQ_NUM_V_OFFSET 0x69332 +#define DP_HDCP_2_2_REG_VPRIME_OFFSET 0x69335 +#define DP_HDCP_2_2_REG_RECV_ID_LIST_OFFSET 0x69345 +#define DP_HDCP_2_2_REG_V_OFFSET 0x693E0 +#define DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET 0x693F0 +#define DP_HDCP_2_2_REG_K_OFFSET 0x693F3 +#define DP_HDCP_2_2_REG_STREAM_ID_TYPE_OFFSET 0x693F5 +#define DP_HDCP_2_2_REG_MPRIME_OFFSET 0x69473 +#define DP_HDCP_2_2_REG_RXSTATUS_OFFSET 0x69493 +#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494 +#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518 + +/* LTTPR: Link Training (LT)-tunable PHY Repeaters */ +#define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */ +#define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */ +#define DP_PHY_REPEATER_CNT 0xf0002 /* 1.3 */ +#define DP_PHY_REPEATER_MODE 0xf0003 /* 1.3 */ +#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */ +#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */ +#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */ +#define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xf0006 /* 2.0 */ +# define DP_PHY_REPEATER_128B132B_SUPPORTED (1 << 0) +/* See DP_128B132B_SUPPORTED_LINK_RATES for values */ +#define DP_PHY_REPEATER_128B132B_RATES 0xf0007 /* 2.0 */ +#define DP_PHY_REPEATER_EQ_DONE 0xf0008 /* 2.0 E11 */ + +enum drm_dp_phy { + DP_PHY_DPRX, + + DP_PHY_LTTPR1, + DP_PHY_LTTPR2, + DP_PHY_LTTPR3, + DP_PHY_LTTPR4, + DP_PHY_LTTPR5, + DP_PHY_LTTPR6, + DP_PHY_LTTPR7, + DP_PHY_LTTPR8, + + DP_MAX_LTTPR_COUNT = DP_PHY_LTTPR8, +}; + +#define DP_PHY_LTTPR(i) (DP_PHY_LTTPR1 + (i)) + +#define __DP_LTTPR1_BASE 0xf0010 /* 1.3 */ +#define __DP_LTTPR2_BASE 0xf0060 /* 1.3 */ +#define DP_LTTPR_BASE(dp_phy) \ + (__DP_LTTPR1_BASE + (__DP_LTTPR2_BASE - __DP_LTTPR1_BASE) * \ + ((dp_phy) - DP_PHY_LTTPR1)) + +#define DP_LTTPR_REG(dp_phy, lttpr1_reg) \ + (DP_LTTPR_BASE(dp_phy) - DP_LTTPR_BASE(DP_PHY_LTTPR1) + (lttpr1_reg)) + +#define DP_TRAINING_PATTERN_SET_PHY_REPEATER1 0xf0010 /* 1.3 */ +#define DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy) \ + DP_LTTPR_REG(dp_phy, DP_TRAINING_PATTERN_SET_PHY_REPEATER1) + +#define DP_TRAINING_LANE0_SET_PHY_REPEATER1 0xf0011 /* 1.3 */ +#define DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy) \ + DP_LTTPR_REG(dp_phy, DP_TRAINING_LANE0_SET_PHY_REPEATER1) + +#define DP_TRAINING_LANE1_SET_PHY_REPEATER1 0xf0012 /* 1.3 */ +#define DP_TRAINING_LANE2_SET_PHY_REPEATER1 0xf0013 /* 1.3 */ +#define DP_TRAINING_LANE3_SET_PHY_REPEATER1 0xf0014 /* 1.3 */ +#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0020 /* 1.4a */ +#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy) \ + DP_LTTPR_REG(dp_phy, DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1) + +#define DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1 0xf0021 /* 1.4a */ +# define DP_VOLTAGE_SWING_LEVEL_3_SUPPORTED BIT(0) +# define DP_PRE_EMPHASIS_LEVEL_3_SUPPORTED BIT(1) + +#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0022 /* 2.0 */ +#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy) \ + DP_LTTPR_REG(dp_phy, DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1) +/* see DP_128B132B_TRAINING_AUX_RD_INTERVAL for values */ + +#define DP_LANE0_1_STATUS_PHY_REPEATER1 0xf0030 /* 1.3 */ +#define DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy) \ + DP_LTTPR_REG(dp_phy, DP_LANE0_1_STATUS_PHY_REPEATER1) + +#define DP_LANE2_3_STATUS_PHY_REPEATER1 0xf0031 /* 1.3 */ + +#define DP_LANE_ALIGN_STATUS_UPDATED_PHY_REPEATER1 0xf0032 /* 1.3 */ +#define DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 0xf0033 /* 1.3 */ +#define DP_ADJUST_REQUEST_LANE2_3_PHY_REPEATER1 0xf0034 /* 1.3 */ +#define DP_SYMBOL_ERROR_COUNT_LANE0_PHY_REPEATER1 0xf0035 /* 1.3 */ +#define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1 0xf0037 /* 1.3 */ +#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */ +#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */ + +#define __DP_FEC1_BASE 0xf0290 /* 1.4 */ +#define __DP_FEC2_BASE 0xf0298 /* 1.4 */ +#define DP_FEC_BASE(dp_phy) \ + (__DP_FEC1_BASE + ((__DP_FEC2_BASE - __DP_FEC1_BASE) * \ + ((dp_phy) - DP_PHY_LTTPR1))) + +#define DP_FEC_REG(dp_phy, fec1_reg) \ + (DP_FEC_BASE(dp_phy) - DP_FEC_BASE(DP_PHY_LTTPR1) + fec1_reg) + +#define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */ +#define DP_FEC_STATUS_PHY_REPEATER(dp_phy) \ + DP_FEC_REG(dp_phy, DP_FEC_STATUS_PHY_REPEATER1) + +#define DP_FEC_ERROR_COUNT_PHY_REPEATER1 0xf0291 /* 1.4 */ +#define DP_FEC_CAPABILITY_PHY_REPEATER1 0xf0294 /* 1.4a */ + +#define DP_LTTPR_MAX_ADD 0xf02ff /* 1.4 */ + +#define DP_DPCD_MAX_ADD 0xfffff /* 1.4 */ + +/* Repeater modes */ +#define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */ +#define DP_PHY_REPEATER_MODE_NON_TRANSPARENT 0xaa /* 1.3 */ + +/* DP HDCP message start offsets in DPCD address space */ +#define DP_HDCP_2_2_AKE_INIT_OFFSET DP_HDCP_2_2_REG_RTX_OFFSET +#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET DP_HDCP_2_2_REG_CERT_RX_OFFSET +#define DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKPUB_KM_OFFSET +#define DP_HDCP_2_2_AKE_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET +#define DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET DP_HDCP_2_2_REG_HPRIME_OFFSET +#define DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET \ + DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET +#define DP_HDCP_2_2_LC_INIT_OFFSET DP_HDCP_2_2_REG_RN_OFFSET +#define DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET DP_HDCP_2_2_REG_LPRIME_OFFSET +#define DP_HDCP_2_2_SKE_SEND_EKS_OFFSET DP_HDCP_2_2_REG_EDKEY_KS_OFFSET +#define DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET DP_HDCP_2_2_REG_RXINFO_OFFSET +#define DP_HDCP_2_2_REP_SEND_ACK_OFFSET DP_HDCP_2_2_REG_V_OFFSET +#define DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET +#define DP_HDCP_2_2_REP_STREAM_READY_OFFSET DP_HDCP_2_2_REG_MPRIME_OFFSET + +#define HDCP_2_2_DP_RXSTATUS_LEN 1 +#define HDCP_2_2_DP_RXSTATUS_READY(x) ((x) & BIT(0)) +#define HDCP_2_2_DP_RXSTATUS_H_PRIME(x) ((x) & BIT(1)) +#define HDCP_2_2_DP_RXSTATUS_PAIRING(x) ((x) & BIT(2)) +#define HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3)) +#define HDCP_2_2_DP_RXSTATUS_LINK_FAILED(x) ((x) & BIT(4)) + +/* DP 1.2 Sideband message defines */ +/* peer device type - DP 1.2a Table 2-92 */ +#define DP_PEER_DEVICE_NONE 0x0 +#define DP_PEER_DEVICE_SOURCE_OR_SST 0x1 +#define DP_PEER_DEVICE_MST_BRANCHING 0x2 +#define DP_PEER_DEVICE_SST_SINK 0x3 +#define DP_PEER_DEVICE_DP_LEGACY_CONV 0x4 + +/* DP 1.2 MST sideband request names DP 1.2a Table 2-80 */ +#define DP_GET_MSG_TRANSACTION_VERSION 0x00 /* DP 1.3 */ +#define DP_LINK_ADDRESS 0x01 +#define DP_CONNECTION_STATUS_NOTIFY 0x02 +#define DP_ENUM_PATH_RESOURCES 0x10 +#define DP_ALLOCATE_PAYLOAD 0x11 +#define DP_QUERY_PAYLOAD 0x12 +#define DP_RESOURCE_STATUS_NOTIFY 0x13 +#define DP_CLEAR_PAYLOAD_ID_TABLE 0x14 +#define DP_REMOTE_DPCD_READ 0x20 +#define DP_REMOTE_DPCD_WRITE 0x21 +#define DP_REMOTE_I2C_READ 0x22 +#define DP_REMOTE_I2C_WRITE 0x23 +#define DP_POWER_UP_PHY 0x24 +#define DP_POWER_DOWN_PHY 0x25 +#define DP_SINK_EVENT_NOTIFY 0x30 +#define DP_QUERY_STREAM_ENC_STATUS 0x38 +#define DP_QUERY_STREAM_ENC_STATUS_STATE_NO_EXIST 0 +#define DP_QUERY_STREAM_ENC_STATUS_STATE_INACTIVE 1 +#define DP_QUERY_STREAM_ENC_STATUS_STATE_ACTIVE 2 + +/* DP 1.2 MST sideband reply types */ +#define DP_SIDEBAND_REPLY_ACK 0x00 +#define DP_SIDEBAND_REPLY_NAK 0x01 + +/* DP 1.2 MST sideband nak reasons - table 2.84 */ +#define DP_NAK_WRITE_FAILURE 0x01 +#define DP_NAK_INVALID_READ 0x02 +#define DP_NAK_CRC_FAILURE 0x03 +#define DP_NAK_BAD_PARAM 0x04 +#define DP_NAK_DEFER 0x05 +#define DP_NAK_LINK_FAILURE 0x06 +#define DP_NAK_NO_RESOURCES 0x07 +#define DP_NAK_DPCD_FAIL 0x08 +#define DP_NAK_I2C_NAK 0x09 +#define DP_NAK_ALLOCATE_FAIL 0x0a + +#define MODE_I2C_START 1 +#define MODE_I2C_WRITE 2 +#define MODE_I2C_READ 4 +#define MODE_I2C_STOP 8 + +/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */ +#define DP_MST_PHYSICAL_PORT_0 0 +#define DP_MST_LOGICAL_PORT_0 8 + +#define DP_LINK_CONSTANT_N_VALUE 0x8000 +#define DP_LINK_STATUS_SIZE 6 + +#define DP_BRANCH_OUI_HEADER_SIZE 0xc +#define DP_RECEIVER_CAP_SIZE 0xf +#define DP_DSC_RECEIVER_CAP_SIZE 0xf +#define EDP_PSR_RECEIVER_CAP_SIZE 2 +#define EDP_DISPLAY_CTL_CAP_SIZE 3 +#define DP_LTTPR_COMMON_CAP_SIZE 8 +#define DP_LTTPR_PHY_CAP_SIZE 3 + +#define DP_SDP_AUDIO_TIMESTAMP 0x01 +#define DP_SDP_AUDIO_STREAM 0x02 +#define DP_SDP_EXTENSION 0x04 /* DP 1.1 */ +#define DP_SDP_AUDIO_COPYMANAGEMENT 0x05 /* DP 1.2 */ +#define DP_SDP_ISRC 0x06 /* DP 1.2 */ +#define DP_SDP_VSC 0x07 /* DP 1.2 */ +#define DP_SDP_CAMERA_GENERIC(i) (0x08 + (i)) /* 0-7, DP 1.3 */ +#define DP_SDP_PPS 0x10 /* DP 1.4 */ +#define DP_SDP_VSC_EXT_VESA 0x20 /* DP 1.4 */ +#define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */ +/* 0x80+ CEA-861 infoframe types */ + +/** + * struct dp_sdp_header - DP secondary data packet header + * @HB0: Secondary Data Packet ID + * @HB1: Secondary Data Packet Type + * @HB2: Secondary Data Packet Specific header, Byte 0 + * @HB3: Secondary Data packet Specific header, Byte 1 + */ +struct dp_sdp_header { + u8 HB0; + u8 HB1; + u8 HB2; + u8 HB3; +} __packed; + +#define EDP_SDP_HEADER_REVISION_MASK 0x1F +#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F +#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F + +/** + * struct dp_sdp - DP secondary data packet + * @sdp_header: DP secondary data packet header + * @db: DP secondaray data packet data blocks + * VSC SDP Payload for PSR + * db[0]: Stereo Interface + * db[1]: 0 - PSR State; 1 - Update RFB; 2 - CRC Valid + * db[2]: CRC value bits 7:0 of the R or Cr component + * db[3]: CRC value bits 15:8 of the R or Cr component + * db[4]: CRC value bits 7:0 of the G or Y component + * db[5]: CRC value bits 15:8 of the G or Y component + * db[6]: CRC value bits 7:0 of the B or Cb component + * db[7]: CRC value bits 15:8 of the B or Cb component + * db[8] - db[31]: Reserved + * VSC SDP Payload for Pixel Encoding/Colorimetry Format + * db[0] - db[15]: Reserved + * db[16]: Pixel Encoding and Colorimetry Formats + * db[17]: Dynamic Range and Component Bit Depth + * db[18]: Content Type + * db[19] - db[31]: Reserved + */ +struct dp_sdp { + struct dp_sdp_header sdp_header; + u8 db[32]; +} __packed; + +#define EDP_VSC_PSR_STATE_ACTIVE (1<<0) +#define EDP_VSC_PSR_UPDATE_RFB (1<<1) +#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2) + +/** + * enum dp_pixelformat - drm DP Pixel encoding formats + * + * This enum is used to indicate DP VSC SDP Pixel encoding formats. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] + * + * @DP_PIXELFORMAT_RGB: RGB pixel encoding format + * @DP_PIXELFORMAT_YUV444: YCbCr 4:4:4 pixel encoding format + * @DP_PIXELFORMAT_YUV422: YCbCr 4:2:2 pixel encoding format + * @DP_PIXELFORMAT_YUV420: YCbCr 4:2:0 pixel encoding format + * @DP_PIXELFORMAT_Y_ONLY: Y Only pixel encoding format + * @DP_PIXELFORMAT_RAW: RAW pixel encoding format + * @DP_PIXELFORMAT_RESERVED: Reserved pixel encoding format + */ +enum dp_pixelformat { + DP_PIXELFORMAT_RGB = 0, + DP_PIXELFORMAT_YUV444 = 0x1, + DP_PIXELFORMAT_YUV422 = 0x2, + DP_PIXELFORMAT_YUV420 = 0x3, + DP_PIXELFORMAT_Y_ONLY = 0x4, + DP_PIXELFORMAT_RAW = 0x5, + DP_PIXELFORMAT_RESERVED = 0x6, +}; + +/** + * enum dp_colorimetry - drm DP Colorimetry formats + * + * This enum is used to indicate DP VSC SDP Colorimetry formats. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] and a name of enum member follows DRM_MODE_COLORIMETRY definition. + * + * @DP_COLORIMETRY_DEFAULT: sRGB (IEC 61966-2-1) or + * ITU-R BT.601 colorimetry format + * @DP_COLORIMETRY_RGB_WIDE_FIXED: RGB wide gamut fixed point colorimetry format + * @DP_COLORIMETRY_BT709_YCC: ITU-R BT.709 colorimetry format + * @DP_COLORIMETRY_RGB_WIDE_FLOAT: RGB wide gamut floating point + * (scRGB (IEC 61966-2-2)) colorimetry format + * @DP_COLORIMETRY_XVYCC_601: xvYCC601 colorimetry format + * @DP_COLORIMETRY_OPRGB: OpRGB colorimetry format + * @DP_COLORIMETRY_XVYCC_709: xvYCC709 colorimetry format + * @DP_COLORIMETRY_DCI_P3_RGB: DCI-P3 (SMPTE RP 431-2) colorimetry format + * @DP_COLORIMETRY_SYCC_601: sYCC601 colorimetry format + * @DP_COLORIMETRY_RGB_CUSTOM: RGB Custom Color Profile colorimetry format + * @DP_COLORIMETRY_OPYCC_601: opYCC601 colorimetry format + * @DP_COLORIMETRY_BT2020_RGB: ITU-R BT.2020 R' G' B' colorimetry format + * @DP_COLORIMETRY_BT2020_CYCC: ITU-R BT.2020 Y'c C'bc C'rc colorimetry format + * @DP_COLORIMETRY_BT2020_YCC: ITU-R BT.2020 Y' C'b C'r colorimetry format + */ +enum dp_colorimetry { + DP_COLORIMETRY_DEFAULT = 0, + DP_COLORIMETRY_RGB_WIDE_FIXED = 0x1, + DP_COLORIMETRY_BT709_YCC = 0x1, + DP_COLORIMETRY_RGB_WIDE_FLOAT = 0x2, + DP_COLORIMETRY_XVYCC_601 = 0x2, + DP_COLORIMETRY_OPRGB = 0x3, + DP_COLORIMETRY_XVYCC_709 = 0x3, + DP_COLORIMETRY_DCI_P3_RGB = 0x4, + DP_COLORIMETRY_SYCC_601 = 0x4, + DP_COLORIMETRY_RGB_CUSTOM = 0x5, + DP_COLORIMETRY_OPYCC_601 = 0x5, + DP_COLORIMETRY_BT2020_RGB = 0x6, + DP_COLORIMETRY_BT2020_CYCC = 0x6, + DP_COLORIMETRY_BT2020_YCC = 0x7, +}; + +/** + * enum dp_dynamic_range - drm DP Dynamic Range + * + * This enum is used to indicate DP VSC SDP Dynamic Range. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] + * + * @DP_DYNAMIC_RANGE_VESA: VESA range + * @DP_DYNAMIC_RANGE_CTA: CTA range + */ +enum dp_dynamic_range { + DP_DYNAMIC_RANGE_VESA = 0, + DP_DYNAMIC_RANGE_CTA = 1, +}; + +/** + * enum dp_content_type - drm DP Content Type + * + * This enum is used to indicate DP VSC SDP Content Types. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] + * CTA-861-G defines content types and expected processing by a sink device + * + * @DP_CONTENT_TYPE_NOT_DEFINED: Not defined type + * @DP_CONTENT_TYPE_GRAPHICS: Graphics type + * @DP_CONTENT_TYPE_PHOTO: Photo type + * @DP_CONTENT_TYPE_VIDEO: Video type + * @DP_CONTENT_TYPE_GAME: Game type + */ +enum dp_content_type { + DP_CONTENT_TYPE_NOT_DEFINED = 0x00, + DP_CONTENT_TYPE_GRAPHICS = 0x01, + DP_CONTENT_TYPE_PHOTO = 0x02, + DP_CONTENT_TYPE_VIDEO = 0x03, + DP_CONTENT_TYPE_GAME = 0x04, +}; + +#endif /* _DRM_DP_H_ */ diff --git a/include/drm/display/drm_dp_aux_bus.h b/include/drm/display/drm_dp_aux_bus.h new file mode 100644 index 0000000000..8a0a486383 --- /dev/null +++ b/include/drm/display/drm_dp_aux_bus.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2021 Google Inc. + * + * The DP AUX bus is used for devices that are connected over a DisplayPort + * AUX bus. The devices on the far side of the bus are referred to as + * endpoints in this code. + */ + +#ifndef _DP_AUX_BUS_H_ +#define _DP_AUX_BUS_H_ + +#include +#include + +/** + * struct dp_aux_ep_device - Main dev structure for DP AUX endpoints + * + * This is used to instantiate devices that are connected via a DP AUX + * bus. Usually the device is a panel, but conceivable other devices could + * be hooked up there. + */ +struct dp_aux_ep_device { + /** @dev: The normal dev pointer */ + struct device dev; + /** @aux: Pointer to the aux bus */ + struct drm_dp_aux *aux; +}; + +struct dp_aux_ep_driver { + int (*probe)(struct dp_aux_ep_device *aux_ep); + void (*remove)(struct dp_aux_ep_device *aux_ep); + void (*shutdown)(struct dp_aux_ep_device *aux_ep); + struct device_driver driver; +}; + +static inline struct dp_aux_ep_device *to_dp_aux_ep_dev(struct device *dev) +{ + return container_of(dev, struct dp_aux_ep_device, dev); +} + +static inline struct dp_aux_ep_driver *to_dp_aux_ep_drv(struct device_driver *drv) +{ + return container_of(drv, struct dp_aux_ep_driver, driver); +} + +int of_dp_aux_populate_bus(struct drm_dp_aux *aux, + int (*done_probing)(struct drm_dp_aux *aux)); +void of_dp_aux_depopulate_bus(struct drm_dp_aux *aux); +int devm_of_dp_aux_populate_bus(struct drm_dp_aux *aux, + int (*done_probing)(struct drm_dp_aux *aux)); + +/* Deprecated versions of the above functions. To be removed when no callers. */ +static inline int of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux) +{ + int ret; + + ret = of_dp_aux_populate_bus(aux, NULL); + + /* New API returns -ENODEV for no child case; adapt to old assumption */ + return (ret != -ENODEV) ? ret : 0; +} + +static inline int devm_of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux) +{ + int ret; + + ret = devm_of_dp_aux_populate_bus(aux, NULL); + + /* New API returns -ENODEV for no child case; adapt to old assumption */ + return (ret != -ENODEV) ? ret : 0; +} + +static inline void of_dp_aux_depopulate_ep_devices(struct drm_dp_aux *aux) +{ + of_dp_aux_depopulate_bus(aux); +} + +#define dp_aux_dp_driver_register(aux_ep_drv) \ + __dp_aux_dp_driver_register(aux_ep_drv, THIS_MODULE) +int __dp_aux_dp_driver_register(struct dp_aux_ep_driver *aux_ep_drv, + struct module *owner); +void dp_aux_dp_driver_unregister(struct dp_aux_ep_driver *aux_ep_drv); + +#endif /* _DP_AUX_BUS_H_ */ diff --git a/include/drm/display/drm_dp_dual_mode_helper.h b/include/drm/display/drm_dp_dual_mode_helper.h new file mode 100644 index 0000000000..7ee4822650 --- /dev/null +++ b/include/drm/display/drm_dp_dual_mode_helper.h @@ -0,0 +1,121 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_DP_DUAL_MODE_HELPER_H +#define DRM_DP_DUAL_MODE_HELPER_H + +#include + +/* + * Optional for type 1 DVI adaptors + * Mandatory for type 1 HDMI and type 2 adaptors + */ +#define DP_DUAL_MODE_HDMI_ID 0x00 /* 00-0f */ +#define DP_DUAL_MODE_HDMI_ID_LEN 16 +/* + * Optional for type 1 adaptors + * Mandatory for type 2 adaptors + */ +#define DP_DUAL_MODE_ADAPTOR_ID 0x10 +#define DP_DUAL_MODE_REV_MASK 0x07 +#define DP_DUAL_MODE_REV_TYPE2 0x00 +#define DP_DUAL_MODE_TYPE_MASK 0xf0 +#define DP_DUAL_MODE_TYPE_TYPE2 0xa0 +/* This field is marked reserved in dual mode spec, used in LSPCON */ +#define DP_DUAL_MODE_TYPE_HAS_DPCD 0x08 +#define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/ +#define DP_DUAL_IEEE_OUI_LEN 3 +#define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */ +#define DP_DUAL_DEVICE_ID_LEN 6 +#define DP_DUAL_MODE_HARDWARE_REV 0x1a +#define DP_DUAL_MODE_FIRMWARE_MAJOR_REV 0x1b +#define DP_DUAL_MODE_FIRMWARE_MINOR_REV 0x1c +#define DP_DUAL_MODE_MAX_TMDS_CLOCK 0x1d +#define DP_DUAL_MODE_I2C_SPEED_CAP 0x1e +#define DP_DUAL_MODE_TMDS_OEN 0x20 +#define DP_DUAL_MODE_TMDS_DISABLE 0x01 +#define DP_DUAL_MODE_HDMI_PIN_CTRL 0x21 +#define DP_DUAL_MODE_CEC_ENABLE 0x01 +#define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22 + +/* LSPCON specific registers, defined by MCA */ +#define DP_DUAL_MODE_LSPCON_MODE_CHANGE 0x40 +#define DP_DUAL_MODE_LSPCON_CURRENT_MODE 0x41 +#define DP_DUAL_MODE_LSPCON_MODE_PCON 0x1 + +struct drm_device; +struct i2c_adapter; + +ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, + u8 offset, void *buffer, size_t size); +ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, + u8 offset, const void *buffer, size_t size); + +/** + * enum drm_lspcon_mode + * @DRM_LSPCON_MODE_INVALID: No LSPCON. + * @DRM_LSPCON_MODE_LS: Level shifter mode of LSPCON + * which drives DP++ to HDMI 1.4 conversion. + * @DRM_LSPCON_MODE_PCON: Protocol converter mode of LSPCON + * which drives DP++ to HDMI 2.0 active conversion. + */ +enum drm_lspcon_mode { + DRM_LSPCON_MODE_INVALID, + DRM_LSPCON_MODE_LS, + DRM_LSPCON_MODE_PCON, +}; + +/** + * enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor + * @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor + * @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor + * @DRM_DP_DUAL_MODE_TYPE1_DVI: Type 1 DVI adaptor + * @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor + * @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor + * @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor + * @DRM_DP_DUAL_MODE_LSPCON: Level shifter / protocol converter + */ +enum drm_dp_dual_mode_type { + DRM_DP_DUAL_MODE_NONE, + DRM_DP_DUAL_MODE_UNKNOWN, + DRM_DP_DUAL_MODE_TYPE1_DVI, + DRM_DP_DUAL_MODE_TYPE1_HDMI, + DRM_DP_DUAL_MODE_TYPE2_DVI, + DRM_DP_DUAL_MODE_TYPE2_HDMI, + DRM_DP_DUAL_MODE_LSPCON, +}; + +enum drm_dp_dual_mode_type +drm_dp_dual_mode_detect(const struct drm_device *dev, struct i2c_adapter *adapter); +int drm_dp_dual_mode_max_tmds_clock(const struct drm_device *dev, enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter); +int drm_dp_dual_mode_get_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter, bool *enabled); +int drm_dp_dual_mode_set_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter, bool enable); +const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type); + +int drm_lspcon_get_mode(const struct drm_device *dev, struct i2c_adapter *adapter, + enum drm_lspcon_mode *current_mode); +int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter, + enum drm_lspcon_mode reqd_mode); +#endif diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h new file mode 100644 index 0000000000..db0fe9f8a6 --- /dev/null +++ b/include/drm/display/drm_dp_helper.h @@ -0,0 +1,764 @@ +/* + * Copyright © 2008 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef _DRM_DP_HELPER_H_ +#define _DRM_DP_HELPER_H_ + +#include +#include + +#include +#include + +struct drm_device; +struct drm_dp_aux; +struct drm_panel; + +bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane); +u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane); +u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane); + +int drm_dp_read_clock_recovery_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, bool uhbr); +int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, bool uhbr); + +void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +void drm_dp_lttpr_link_train_clock_recovery_delay(void); +void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux, + const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); + +int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux); +bool drm_dp_128b132b_lane_channel_eq_done(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_128b132b_lane_symbol_locked(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_128b132b_eq_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]); +bool drm_dp_128b132b_cds_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]); +bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SIZE]); + +u8 drm_dp_link_rate_to_bw_code(int link_rate); +int drm_dp_bw_code_to_link_rate(u8 link_bw); + +/** + * struct drm_dp_vsc_sdp - drm DP VSC SDP + * + * This structure represents a DP VSC SDP of drm + * It is based on DP 1.4 spec [Table 2-116: VSC SDP Header Bytes] and + * [Table 2-117: VSC SDP Payload for DB16 through DB18] + * + * @sdp_type: secondary-data packet type + * @revision: revision number + * @length: number of valid data bytes + * @pixelformat: pixel encoding format + * @colorimetry: colorimetry format + * @bpc: bit per color + * @dynamic_range: dynamic range information + * @content_type: CTA-861-G defines content types and expected processing by a sink device + */ +struct drm_dp_vsc_sdp { + unsigned char sdp_type; + unsigned char revision; + unsigned char length; + enum dp_pixelformat pixelformat; + enum dp_colorimetry colorimetry; + int bpc; + enum dp_dynamic_range dynamic_range; + enum dp_content_type content_type; +}; + +void drm_dp_vsc_sdp_log(const char *level, struct device *dev, + const struct drm_dp_vsc_sdp *vsc); + +int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]); + +static inline int +drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); +} + +static inline u8 +drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; +} + +static inline bool +drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x11 && + (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); +} + +static inline bool +drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x11 && + (dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); +} + +static inline bool +drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x12 && + dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED; +} + +static inline bool +drm_dp_max_downspread(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x11 || + dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5; +} + +static inline bool +drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x14 && + dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED; +} + +static inline u8 +drm_dp_training_pattern_mask(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return (dpcd[DP_DPCD_REV] >= 0x14) ? DP_TRAINING_PATTERN_MASK_1_4 : + DP_TRAINING_PATTERN_MASK; +} + +static inline bool +drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT; +} + +/* DP/eDP DSC support */ +u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], + bool is_edp); +u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); +int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE], + u8 dsc_bpc[3]); + +static inline bool +drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] & + DP_DSC_DECOMPRESSION_IS_SUPPORTED; +} + +static inline u16 +drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] | + (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] & + DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK << + DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT); +} + +static inline u32 +drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + /* Max Slicewidth = Number of Pixels * 320 */ + return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] * + DP_DSC_SLICE_WIDTH_MULTIPLIER; +} + +/* Forward Error Correction Support on DP 1.4 */ +static inline bool +drm_dp_sink_supports_fec(const u8 fec_capable) +{ + return fec_capable & DP_FEC_CAPABLE; +} + +static inline bool +drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_8B10B; +} + +static inline bool +drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_EDP_CONFIGURATION_CAP] & + DP_ALTERNATE_SCRAMBLER_RESET_CAP; +} + +/* Ignore MSA timing for Adaptive Sync support on DP 1.4 */ +static inline bool +drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DOWN_STREAM_PORT_COUNT] & + DP_MSA_TIMING_PAR_IGNORED; +} + +/** + * drm_edp_backlight_supported() - Check an eDP DPCD for VESA backlight support + * @edp_dpcd: The DPCD to check + * + * Note that currently this function will return %false for panels which support various DPCD + * backlight features but which require the brightness be set through PWM, and don't support setting + * the brightness level via the DPCD. + * + * Returns: %True if @edp_dpcd indicates that VESA backlight controls are supported, %false + * otherwise + */ +static inline bool +drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]) +{ + return !!(edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP); +} + +/* + * DisplayPort AUX channel + */ + +/** + * struct drm_dp_aux_msg - DisplayPort AUX channel transaction + * @address: address of the (first) register to access + * @request: contains the type of transaction (see DP_AUX_* macros) + * @reply: upon completion, contains the reply type of the transaction + * @buffer: pointer to a transmission or reception buffer + * @size: size of @buffer + */ +struct drm_dp_aux_msg { + unsigned int address; + u8 request; + u8 reply; + void *buffer; + size_t size; +}; + +struct cec_adapter; +struct edid; +struct drm_connector; + +/** + * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX + * @lock: mutex protecting this struct + * @adap: the CEC adapter for CEC-Tunneling-over-AUX support. + * @connector: the connector this CEC adapter is associated with + * @unregister_work: unregister the CEC adapter + */ +struct drm_dp_aux_cec { + struct mutex lock; + struct cec_adapter *adap; + struct drm_connector *connector; + struct delayed_work unregister_work; +}; + +/** + * struct drm_dp_aux - DisplayPort AUX channel + * + * An AUX channel can also be used to transport I2C messages to a sink. A + * typical application of that is to access an EDID that's present in the sink + * device. The @transfer() function can also be used to execute such + * transactions. The drm_dp_aux_register() function registers an I2C adapter + * that can be passed to drm_probe_ddc(). Upon removal, drivers should call + * drm_dp_aux_unregister() to remove the I2C adapter. The I2C adapter uses long + * transfers by default; if a partial response is received, the adapter will + * drop down to the size given by the partial response for this transaction + * only. + */ +struct drm_dp_aux { + /** + * @name: user-visible name of this AUX channel and the + * I2C-over-AUX adapter. + * + * It's also used to specify the name of the I2C adapter. If set + * to %NULL, dev_name() of @dev will be used. + */ + const char *name; + + /** + * @ddc: I2C adapter that can be used for I2C-over-AUX + * communication + */ + struct i2c_adapter ddc; + + /** + * @dev: pointer to struct device that is the parent for this + * AUX channel. + */ + struct device *dev; + + /** + * @drm_dev: pointer to the &drm_device that owns this AUX channel. + * Beware, this may be %NULL before drm_dp_aux_register() has been + * called. + * + * It should be set to the &drm_device that will be using this AUX + * channel as early as possible. For many graphics drivers this should + * happen before drm_dp_aux_init(), however it's perfectly fine to set + * this field later so long as it's assigned before calling + * drm_dp_aux_register(). + */ + struct drm_device *drm_dev; + + /** + * @crtc: backpointer to the crtc that is currently using this + * AUX channel + */ + struct drm_crtc *crtc; + + /** + * @hw_mutex: internal mutex used for locking transfers. + * + * Note that if the underlying hardware is shared among multiple + * channels, the driver needs to do additional locking to + * prevent concurrent access. + */ + struct mutex hw_mutex; + + /** + * @crc_work: worker that captures CRCs for each frame + */ + struct work_struct crc_work; + + /** + * @crc_count: counter of captured frame CRCs + */ + u8 crc_count; + + /** + * @transfer: transfers a message representing a single AUX + * transaction. + * + * This is a hardware-specific implementation of how + * transactions are executed that the drivers must provide. + * + * A pointer to a &drm_dp_aux_msg structure describing the + * transaction is passed into this function. Upon success, the + * implementation should return the number of payload bytes that + * were transferred, or a negative error-code on failure. + * + * Helpers will propagate these errors, with the exception of + * the %-EBUSY error, which causes a transaction to be retried. + * On a short, helpers will return %-EPROTO to make it simpler + * to check for failure. + * + * The @transfer() function must only modify the reply field of + * the &drm_dp_aux_msg structure. The retry logic and i2c + * helpers assume this is the case. + * + * Also note that this callback can be called no matter the + * state @dev is in and also no matter what state the panel is + * in. It's expected: + * + * - If the @dev providing the AUX bus is currently unpowered then + * it will power itself up for the transfer. + * + * - If we're on eDP (using a drm_panel) and the panel is not in a + * state where it can respond (it's not powered or it's in a + * low power state) then this function may return an error, but + * not crash. It's up to the caller of this code to make sure that + * the panel is powered on if getting an error back is not OK. If a + * drm_panel driver is initiating a DP AUX transfer it may power + * itself up however it wants. All other code should ensure that + * the pre_enable() bridge chain (which eventually calls the + * drm_panel prepare function) has powered the panel. + */ + ssize_t (*transfer)(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg); + + /** + * @wait_hpd_asserted: wait for HPD to be asserted + * + * This is mainly useful for eDP panels drivers to wait for an eDP + * panel to finish powering on. This is an optional function. + * + * This function will efficiently wait for the HPD signal to be + * asserted. The `wait_us` parameter that is passed in says that we + * know that the HPD signal is expected to be asserted within `wait_us` + * microseconds. This function could wait for longer than `wait_us` if + * the logic in the DP controller has a long debouncing time. The + * important thing is that if this function returns success that the + * DP controller is ready to send AUX transactions. + * + * This function returns 0 if HPD was asserted or -ETIMEDOUT if time + * expired and HPD wasn't asserted. This function should not print + * timeout errors to the log. + * + * The semantics of this function are designed to match the + * readx_poll_timeout() function. That means a `wait_us` of 0 means + * to wait forever. Like readx_poll_timeout(), this function may sleep. + * + * NOTE: this function specifically reports the state of the HPD pin + * that's associated with the DP AUX channel. This is different from + * the HPD concept in much of the rest of DRM which is more about + * physical presence of a display. For eDP, for instance, a display is + * assumed always present even if the HPD pin is deasserted. + */ + int (*wait_hpd_asserted)(struct drm_dp_aux *aux, unsigned long wait_us); + + /** + * @i2c_nack_count: Counts I2C NACKs, used for DP validation. + */ + unsigned i2c_nack_count; + /** + * @i2c_defer_count: Counts I2C DEFERs, used for DP validation. + */ + unsigned i2c_defer_count; + /** + * @cec: struct containing fields used for CEC-Tunneling-over-AUX. + */ + struct drm_dp_aux_cec cec; + /** + * @is_remote: Is this AUX CH actually using sideband messaging. + */ + bool is_remote; +}; + +int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset); +ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, + void *buffer, size_t size); +ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, + void *buffer, size_t size); + +/** + * drm_dp_dpcd_readb() - read a single byte from the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the register to read + * @valuep: location where the value of the register will be stored + * + * Returns the number of bytes transferred (1) on success, or a negative + * error code on failure. + */ +static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux, + unsigned int offset, u8 *valuep) +{ + return drm_dp_dpcd_read(aux, offset, valuep, 1); +} + +/** + * drm_dp_dpcd_writeb() - write a single byte to the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the register to write + * @value: value to write to the register + * + * Returns the number of bytes transferred (1) on success, or a negative + * error code on failure. + */ +static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, + unsigned int offset, u8 value) +{ + return drm_dp_dpcd_write(aux, offset, &value, 1); +} + +int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux, + u8 dpcd[DP_RECEIVER_CAP_SIZE]); + +int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, + u8 status[DP_LINK_STATUS_SIZE]); + +int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux, + enum drm_dp_phy dp_phy, + u8 link_status[DP_LINK_STATUS_SIZE]); + +bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, + u8 real_edid_checksum); + +int drm_dp_read_downstream_info(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]); +bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], u8 type); +bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +struct drm_display_mode *drm_dp_downstream_mode(struct drm_device *dev, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]); +void drm_dp_downstream_debug(struct seq_file *m, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid, + struct drm_dp_aux *aux); +enum drm_mode_subconnector +drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +void drm_dp_set_subconnector_property(struct drm_connector *connector, + enum drm_connector_status status, + const u8 *dpcd, + const u8 port_cap[4]); + +struct drm_dp_desc; +bool drm_dp_read_sink_count_cap(struct drm_connector *connector, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const struct drm_dp_desc *desc); +int drm_dp_read_sink_count(struct drm_dp_aux *aux); + +int drm_dp_read_lttpr_common_caps(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); +int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, + u8 caps[DP_LTTPR_PHY_CAP_SIZE]); +int drm_dp_lttpr_count(const u8 cap[DP_LTTPR_COMMON_CAP_SIZE]); +int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); +int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); +bool drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); +bool drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); + +void drm_dp_remote_aux_init(struct drm_dp_aux *aux); +void drm_dp_aux_init(struct drm_dp_aux *aux); +int drm_dp_aux_register(struct drm_dp_aux *aux); +void drm_dp_aux_unregister(struct drm_dp_aux *aux); + +int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc); +int drm_dp_stop_crc(struct drm_dp_aux *aux); + +struct drm_dp_dpcd_ident { + u8 oui[3]; + u8 device_id[6]; + u8 hw_rev; + u8 sw_major_rev; + u8 sw_minor_rev; +} __packed; + +/** + * struct drm_dp_desc - DP branch/sink device descriptor + * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch). + * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks. + */ +struct drm_dp_desc { + struct drm_dp_dpcd_ident ident; + u32 quirks; +}; + +int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, + bool is_branch); + +/** + * enum drm_dp_quirk - Display Port sink/branch device specific quirks + * + * Display Port sink and branch devices in the wild have a variety of bugs, try + * to collect them here. The quirks are shared, but it's up to the drivers to + * implement workarounds for them. + */ +enum drm_dp_quirk { + /** + * @DP_DPCD_QUIRK_CONSTANT_N: + * + * The device requires main link attributes Mvid and Nvid to be limited + * to 16 bits. So will give a constant value (0x8000) for compatability. + */ + DP_DPCD_QUIRK_CONSTANT_N, + /** + * @DP_DPCD_QUIRK_NO_PSR: + * + * The device does not support PSR even if reports that it supports or + * driver still need to implement proper handling for such device. + */ + DP_DPCD_QUIRK_NO_PSR, + /** + * @DP_DPCD_QUIRK_NO_SINK_COUNT: + * + * The device does not set SINK_COUNT to a non-zero value. + * The driver should ignore SINK_COUNT during detection. Note that + * drm_dp_read_sink_count_cap() automatically checks for this quirk. + */ + DP_DPCD_QUIRK_NO_SINK_COUNT, + /** + * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD: + * + * The device supports MST DSC despite not supporting Virtual DPCD. + * The DSC caps can be read from the physical aux instead. + */ + DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD, + /** + * @DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS: + * + * The device supports a link rate of 3.24 Gbps (multiplier 0xc) despite + * the DP_MAX_LINK_RATE register reporting a lower max multiplier. + */ + DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS, +}; + +/** + * drm_dp_has_quirk() - does the DP device have a specific quirk + * @desc: Device descriptor filled by drm_dp_read_desc() + * @quirk: Quirk to query for + * + * Return true if DP device identified by @desc has @quirk. + */ +static inline bool +drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) +{ + return desc->quirks & BIT(quirk); +} + +/** + * struct drm_edp_backlight_info - Probed eDP backlight info struct + * @pwmgen_bit_count: The pwmgen bit count + * @pwm_freq_pre_divider: The PWM frequency pre-divider value being used for this backlight, if any + * @max: The maximum backlight level that may be set + * @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register? + * @aux_enable: Does the panel support the AUX enable cap? + * @aux_set: Does the panel support setting the brightness through AUX? + * + * This structure contains various data about an eDP backlight, which can be populated by using + * drm_edp_backlight_init(). + */ +struct drm_edp_backlight_info { + u8 pwmgen_bit_count; + u8 pwm_freq_pre_divider; + u16 max; + + bool lsb_reg_used : 1; + bool aux_enable : 1; + bool aux_set : 1; +}; + +int +drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl, + u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE], + u16 *current_level, u8 *current_mode); +int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, + u16 level); +int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, + u16 level); +int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl); + +#if IS_ENABLED(CONFIG_DRM_KMS_HELPER) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ + (IS_MODULE(CONFIG_DRM_KMS_HELPER) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE))) + +int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux); + +#else + +static inline int drm_panel_dp_aux_backlight(struct drm_panel *panel, + struct drm_dp_aux *aux) +{ + return 0; +} + +#endif + +#ifdef CONFIG_DRM_DP_CEC +void drm_dp_cec_irq(struct drm_dp_aux *aux); +void drm_dp_cec_register_connector(struct drm_dp_aux *aux, + struct drm_connector *connector); +void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux); +void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid); +void drm_dp_cec_unset_edid(struct drm_dp_aux *aux); +#else +static inline void drm_dp_cec_irq(struct drm_dp_aux *aux) +{ +} + +static inline void +drm_dp_cec_register_connector(struct drm_dp_aux *aux, + struct drm_connector *connector) +{ +} + +static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux) +{ +} + +static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux, + const struct edid *edid) +{ +} + +static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux) +{ +} + +#endif + +/** + * struct drm_dp_phy_test_params - DP Phy Compliance parameters + * @link_rate: Requested Link rate from DPCD 0x219 + * @num_lanes: Number of lanes requested by sing through DPCD 0x220 + * @phy_pattern: DP Phy test pattern from DPCD 0x248 + * @hbr2_reset: DP HBR2_COMPLIANCE_SCRAMBLER_RESET from DCPD 0x24A and 0x24B + * @custom80: DP Test_80BIT_CUSTOM_PATTERN from DPCDs 0x250 through 0x259 + * @enhanced_frame_cap: flag for enhanced frame capability. + */ +struct drm_dp_phy_test_params { + int link_rate; + u8 num_lanes; + u8 phy_pattern; + u8 hbr2_reset[2]; + u8 custom80[10]; + bool enhanced_frame_cap; +}; + +int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux, + struct drm_dp_phy_test_params *data); +int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux, + struct drm_dp_phy_test_params *data, u8 dp_rev); +int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd); +bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux); +int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps, + u8 frl_mode); +int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask, + u8 frl_type); +int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux); +int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux); + +bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux); +int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask); +void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux, + struct drm_connector *connector); +bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); +int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); +int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); +int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); +int drm_dp_pcon_pps_default(struct drm_dp_aux *aux); +int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128]); +int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6]); +bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], u8 color_spc); +int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc); + +#endif /* _DRM_DP_HELPER_H_ */ diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h new file mode 100644 index 0000000000..10adec068b --- /dev/null +++ b/include/drm/display/drm_dp_mst_helper.h @@ -0,0 +1,972 @@ +/* + * Copyright © 2014 Red Hat. + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ +#ifndef _DRM_DP_MST_HELPER_H_ +#define _DRM_DP_MST_HELPER_H_ + +#include +#include +#include + +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) +#include +#include + +enum drm_dp_mst_topology_ref_type { + DRM_DP_MST_TOPOLOGY_REF_GET, + DRM_DP_MST_TOPOLOGY_REF_PUT, +}; + +struct drm_dp_mst_topology_ref_history { + struct drm_dp_mst_topology_ref_entry { + enum drm_dp_mst_topology_ref_type type; + int count; + ktime_t ts_nsec; + depot_stack_handle_t backtrace; + } *entries; + int len; +}; +#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */ + +struct drm_dp_mst_branch; + +/** + * struct drm_dp_vcpi - Virtual Channel Payload Identifier + * @vcpi: Virtual channel ID. + * @pbn: Payload Bandwidth Number for this channel + * @aligned_pbn: PBN aligned with slot size + * @num_slots: number of slots for this PBN + */ +struct drm_dp_vcpi { + int vcpi; + int pbn; + int aligned_pbn; + int num_slots; +}; + +/** + * struct drm_dp_mst_port - MST port + * @port_num: port number + * @input: if this port is an input port. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @mcs: message capability status - DP 1.2 spec. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @pdt: Peer Device Type. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @ldps: Legacy Device Plug Status. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @dpcd_rev: DPCD revision of device on this port. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @num_sdp_streams: Number of simultaneous streams. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @num_sdp_stream_sinks: Number of stream sinks. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @full_pbn: Max possible bandwidth for this port. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @next: link to next port on this branch device + * @aux: i2c aux transport to talk to device connected to this port, protected + * by &drm_dp_mst_topology_mgr.base.lock. + * @parent: branch device parent of this port + * @vcpi: Virtual Channel Payload info for this port. + * @connector: DRM connector this port is connected to. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @mgr: topology manager this port lives under. + * + * This structure represents an MST port endpoint on a device somewhere + * in the MST topology. + */ +struct drm_dp_mst_port { + /** + * @topology_kref: refcount for this port's lifetime in the topology, + * only the DP MST helpers should need to touch this + */ + struct kref topology_kref; + + /** + * @malloc_kref: refcount for the memory allocation containing this + * structure. See drm_dp_mst_get_port_malloc() and + * drm_dp_mst_put_port_malloc(). + */ + struct kref malloc_kref; + +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + /** + * @topology_ref_history: A history of each topology + * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS. + */ + struct drm_dp_mst_topology_ref_history topology_ref_history; +#endif + + u8 port_num; + bool input; + bool mcs; + bool ddps; + u8 pdt; + bool ldps; + u8 dpcd_rev; + u8 num_sdp_streams; + u8 num_sdp_stream_sinks; + uint16_t full_pbn; + struct list_head next; + /** + * @mstb: the branch device connected to this port, if there is one. + * This should be considered protected for reading by + * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this: + * &drm_dp_mst_topology_mgr.up_req_work and + * &drm_dp_mst_topology_mgr.work, which do not grab + * &drm_dp_mst_topology_mgr.lock during reads but are the only + * updaters of this list and are protected from writing concurrently + * by &drm_dp_mst_topology_mgr.probe_lock. + */ + struct drm_dp_mst_branch *mstb; + struct drm_dp_aux aux; /* i2c bus for this port? */ + struct drm_dp_mst_branch *parent; + + struct drm_dp_vcpi vcpi; + struct drm_connector *connector; + struct drm_dp_mst_topology_mgr *mgr; + + /** + * @cached_edid: for DP logical ports - make tiling work by ensuring + * that the EDID for all connectors is read immediately. + */ + struct edid *cached_edid; + /** + * @has_audio: Tracks whether the sink connector to this port is + * audio-capable. + */ + bool has_audio; + + /** + * @fec_capable: bool indicating if FEC can be supported up to that + * point in the MST topology. + */ + bool fec_capable; +}; + +/* sideband msg header - not bit struct */ +struct drm_dp_sideband_msg_hdr { + u8 lct; + u8 lcr; + u8 rad[8]; + bool broadcast; + bool path_msg; + u8 msg_len; + bool somt; + bool eomt; + bool seqno; +}; + +struct drm_dp_sideband_msg_rx { + u8 chunk[48]; + u8 msg[256]; + u8 curchunk_len; + u8 curchunk_idx; /* chunk we are parsing now */ + u8 curchunk_hdrlen; + u8 curlen; /* total length of the msg */ + bool have_somt; + bool have_eomt; + struct drm_dp_sideband_msg_hdr initial_hdr; +}; + +/** + * struct drm_dp_mst_branch - MST branch device. + * @rad: Relative Address to talk to this branch device. + * @lct: Link count total to talk to this branch device. + * @num_ports: number of ports on the branch. + * @port_parent: pointer to the port parent, NULL if toplevel. + * @mgr: topology manager for this branch device. + * @link_address_sent: if a link address message has been sent to this device yet. + * @guid: guid for DP 1.2 branch device. port under this branch can be + * identified by port #. + * + * This structure represents an MST branch device, there is one + * primary branch device at the root, along with any other branches connected + * to downstream port of parent branches. + */ +struct drm_dp_mst_branch { + /** + * @topology_kref: refcount for this branch device's lifetime in the + * topology, only the DP MST helpers should need to touch this + */ + struct kref topology_kref; + + /** + * @malloc_kref: refcount for the memory allocation containing this + * structure. See drm_dp_mst_get_mstb_malloc() and + * drm_dp_mst_put_mstb_malloc(). + */ + struct kref malloc_kref; + +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + /** + * @topology_ref_history: A history of each topology + * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS. + */ + struct drm_dp_mst_topology_ref_history topology_ref_history; +#endif + + /** + * @destroy_next: linked-list entry used by + * drm_dp_delayed_destroy_work() + */ + struct list_head destroy_next; + + u8 rad[8]; + u8 lct; + int num_ports; + + /** + * @ports: the list of ports on this branch device. This should be + * considered protected for reading by &drm_dp_mst_topology_mgr.lock. + * There are two exceptions to this: + * &drm_dp_mst_topology_mgr.up_req_work and + * &drm_dp_mst_topology_mgr.work, which do not grab + * &drm_dp_mst_topology_mgr.lock during reads but are the only + * updaters of this list and are protected from updating the list + * concurrently by @drm_dp_mst_topology_mgr.probe_lock + */ + struct list_head ports; + + struct drm_dp_mst_port *port_parent; + struct drm_dp_mst_topology_mgr *mgr; + + bool link_address_sent; + + /* global unique identifier to identify branch devices */ + u8 guid[16]; +}; + + +struct drm_dp_nak_reply { + u8 guid[16]; + u8 reason; + u8 nak_data; +}; + +struct drm_dp_link_address_ack_reply { + u8 guid[16]; + u8 nports; + struct drm_dp_link_addr_reply_port { + bool input_port; + u8 peer_device_type; + u8 port_number; + bool mcs; + bool ddps; + bool legacy_device_plug_status; + u8 dpcd_revision; + u8 peer_guid[16]; + u8 num_sdp_streams; + u8 num_sdp_stream_sinks; + } ports[16]; +}; + +struct drm_dp_remote_dpcd_read_ack_reply { + u8 port_number; + u8 num_bytes; + u8 bytes[255]; +}; + +struct drm_dp_remote_dpcd_write_ack_reply { + u8 port_number; +}; + +struct drm_dp_remote_dpcd_write_nak_reply { + u8 port_number; + u8 reason; + u8 bytes_written_before_failure; +}; + +struct drm_dp_remote_i2c_read_ack_reply { + u8 port_number; + u8 num_bytes; + u8 bytes[255]; +}; + +struct drm_dp_remote_i2c_read_nak_reply { + u8 port_number; + u8 nak_reason; + u8 i2c_nak_transaction; +}; + +struct drm_dp_remote_i2c_write_ack_reply { + u8 port_number; +}; + +struct drm_dp_query_stream_enc_status_ack_reply { + /* Bit[23:16]- Stream Id */ + u8 stream_id; + + /* Bit[15]- Signed */ + bool reply_signed; + + /* Bit[10:8]- Stream Output Sink Type */ + bool unauthorizable_device_present; + bool legacy_device_present; + bool query_capable_device_present; + + /* Bit[12:11]- Stream Output CP Type */ + bool hdcp_1x_device_present; + bool hdcp_2x_device_present; + + /* Bit[4]- Stream Authentication */ + bool auth_completed; + + /* Bit[3]- Stream Encryption */ + bool encryption_enabled; + + /* Bit[2]- Stream Repeater Function Present */ + bool repeater_present; + + /* Bit[1:0]- Stream State */ + u8 state; +}; + +#define DRM_DP_MAX_SDP_STREAMS 16 +struct drm_dp_allocate_payload { + u8 port_number; + u8 number_sdp_streams; + u8 vcpi; + u16 pbn; + u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS]; +}; + +struct drm_dp_allocate_payload_ack_reply { + u8 port_number; + u8 vcpi; + u16 allocated_pbn; +}; + +struct drm_dp_connection_status_notify { + u8 guid[16]; + u8 port_number; + bool legacy_device_plug_status; + bool displayport_device_plug_status; + bool message_capability_status; + bool input_port; + u8 peer_device_type; +}; + +struct drm_dp_remote_dpcd_read { + u8 port_number; + u32 dpcd_address; + u8 num_bytes; +}; + +struct drm_dp_remote_dpcd_write { + u8 port_number; + u32 dpcd_address; + u8 num_bytes; + u8 *bytes; +}; + +#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4 +struct drm_dp_remote_i2c_read { + u8 num_transactions; + u8 port_number; + struct drm_dp_remote_i2c_read_tx { + u8 i2c_dev_id; + u8 num_bytes; + u8 *bytes; + u8 no_stop_bit; + u8 i2c_transaction_delay; + } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS]; + u8 read_i2c_device_id; + u8 num_bytes_read; +}; + +struct drm_dp_remote_i2c_write { + u8 port_number; + u8 write_i2c_device_id; + u8 num_bytes; + u8 *bytes; +}; + +struct drm_dp_query_stream_enc_status { + u8 stream_id; + u8 client_id[7]; /* 56-bit nonce */ + u8 stream_event; + bool valid_stream_event; + u8 stream_behavior; + u8 valid_stream_behavior; +}; + +/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */ +struct drm_dp_port_number_req { + u8 port_number; +}; + +struct drm_dp_enum_path_resources_ack_reply { + u8 port_number; + bool fec_capable; + u16 full_payload_bw_number; + u16 avail_payload_bw_number; +}; + +/* covers POWER_DOWN_PHY, POWER_UP_PHY */ +struct drm_dp_port_number_rep { + u8 port_number; +}; + +struct drm_dp_query_payload { + u8 port_number; + u8 vcpi; +}; + +struct drm_dp_resource_status_notify { + u8 port_number; + u8 guid[16]; + u16 available_pbn; +}; + +struct drm_dp_query_payload_ack_reply { + u8 port_number; + u16 allocated_pbn; +}; + +struct drm_dp_sideband_msg_req_body { + u8 req_type; + union ack_req { + struct drm_dp_connection_status_notify conn_stat; + struct drm_dp_port_number_req port_num; + struct drm_dp_resource_status_notify resource_stat; + + struct drm_dp_query_payload query_payload; + struct drm_dp_allocate_payload allocate_payload; + + struct drm_dp_remote_dpcd_read dpcd_read; + struct drm_dp_remote_dpcd_write dpcd_write; + + struct drm_dp_remote_i2c_read i2c_read; + struct drm_dp_remote_i2c_write i2c_write; + + struct drm_dp_query_stream_enc_status enc_status; + } u; +}; + +struct drm_dp_sideband_msg_reply_body { + u8 reply_type; + u8 req_type; + union ack_replies { + struct drm_dp_nak_reply nak; + struct drm_dp_link_address_ack_reply link_addr; + struct drm_dp_port_number_rep port_number; + + struct drm_dp_enum_path_resources_ack_reply path_resources; + struct drm_dp_allocate_payload_ack_reply allocate_payload; + struct drm_dp_query_payload_ack_reply query_payload; + + struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack; + struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack; + struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack; + + struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack; + struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack; + struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack; + + struct drm_dp_query_stream_enc_status_ack_reply enc_status; + } u; +}; + +/* msg is queued to be put into a slot */ +#define DRM_DP_SIDEBAND_TX_QUEUED 0 +/* msg has started transmitting on a slot - still on msgq */ +#define DRM_DP_SIDEBAND_TX_START_SEND 1 +/* msg has finished transmitting on a slot - removed from msgq only in slot */ +#define DRM_DP_SIDEBAND_TX_SENT 2 +/* msg has received a response - removed from slot */ +#define DRM_DP_SIDEBAND_TX_RX 3 +#define DRM_DP_SIDEBAND_TX_TIMEOUT 4 + +struct drm_dp_sideband_msg_tx { + u8 msg[256]; + u8 chunk[48]; + u8 cur_offset; + u8 cur_len; + struct drm_dp_mst_branch *dst; + struct list_head next; + int seqno; + int state; + bool path_msg; + struct drm_dp_sideband_msg_reply_body reply; +}; + +/* sideband msg handler */ +struct drm_dp_mst_topology_mgr; +struct drm_dp_mst_topology_cbs { + /* create a connector for a port */ + struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); + /* + * Checks for any pending MST interrupts, passing them to MST core for + * processing, the same way an HPD IRQ pulse handler would do this. + * If provided MST core calls this callback from a poll-waiting loop + * when waiting for MST down message replies. The driver is expected + * to guard against a race between this callback and the driver's HPD + * IRQ pulse handler. + */ + void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr); +}; + +#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8) + +#define DP_PAYLOAD_LOCAL 1 +#define DP_PAYLOAD_REMOTE 2 +#define DP_PAYLOAD_DELETE_LOCAL 3 + +struct drm_dp_payload { + int payload_state; + int start_slot; + int num_slots; + int vcpi; +}; + +#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base) + +struct drm_dp_vcpi_allocation { + struct drm_dp_mst_port *port; + int vcpi; + int pbn; + bool dsc_enabled; + struct list_head next; +}; + +struct drm_dp_mst_topology_state { + struct drm_private_state base; + struct list_head vcpis; + struct drm_dp_mst_topology_mgr *mgr; + u8 total_avail_slots; + u8 start_slot; +}; + +#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base) + +/** + * struct drm_dp_mst_topology_mgr - DisplayPort MST manager + * + * This struct represents the toplevel displayport MST topology manager. + * There should be one instance of this for every MST capable DP connector + * on the GPU. + */ +struct drm_dp_mst_topology_mgr { + /** + * @base: Base private object for atomic + */ + struct drm_private_obj base; + + /** + * @dev: device pointer for adding i2c devices etc. + */ + struct drm_device *dev; + /** + * @cbs: callbacks for connector addition and destruction. + */ + const struct drm_dp_mst_topology_cbs *cbs; + /** + * @max_dpcd_transaction_bytes: maximum number of bytes to read/write + * in one go. + */ + int max_dpcd_transaction_bytes; + /** + * @aux: AUX channel for the DP MST connector this topolgy mgr is + * controlling. + */ + struct drm_dp_aux *aux; + /** + * @max_payloads: maximum number of payloads the GPU can generate. + */ + int max_payloads; + /** + * @max_lane_count: maximum number of lanes the GPU can drive. + */ + int max_lane_count; + /** + * @max_link_rate: maximum link rate per lane GPU can output, in kHz. + */ + int max_link_rate; + /** + * @conn_base_id: DRM connector ID this mgr is connected to. Only used + * to build the MST connector path value. + */ + int conn_base_id; + + /** + * @up_req_recv: Message receiver state for up requests. + */ + struct drm_dp_sideband_msg_rx up_req_recv; + + /** + * @down_rep_recv: Message receiver state for replies to down + * requests. + */ + struct drm_dp_sideband_msg_rx down_rep_recv; + + /** + * @lock: protects @mst_state, @mst_primary, @dpcd, and + * @payload_id_table_cleared. + */ + struct mutex lock; + + /** + * @probe_lock: Prevents @work and @up_req_work, the only writers of + * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing + * while they update the topology. + */ + struct mutex probe_lock; + + /** + * @mst_state: If this manager is enabled for an MST capable port. False + * if no MST sink/branch devices is connected. + */ + bool mst_state : 1; + + /** + * @payload_id_table_cleared: Whether or not we've cleared the payload + * ID table for @mst_primary. Protected by @lock. + */ + bool payload_id_table_cleared : 1; + + /** + * @mst_primary: Pointer to the primary/first branch device. + */ + struct drm_dp_mst_branch *mst_primary; + + /** + * @dpcd: Cache of DPCD for primary port. + */ + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + /** + * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0. + */ + u8 sink_count; + /** + * @pbn_div: PBN to slots divisor. + */ + int pbn_div; + + /** + * @funcs: Atomic helper callbacks + */ + const struct drm_private_state_funcs *funcs; + + /** + * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state + */ + struct mutex qlock; + + /** + * @tx_msg_downq: List of pending down requests + */ + struct list_head tx_msg_downq; + + /** + * @payload_lock: Protect payload information. + */ + struct mutex payload_lock; + /** + * @proposed_vcpis: Array of pointers for the new VCPI allocation. The + * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of + * this array is determined by @max_payloads. + */ + struct drm_dp_vcpi **proposed_vcpis; + /** + * @payloads: Array of payloads. The size of this array is determined + * by @max_payloads. + */ + struct drm_dp_payload *payloads; + /** + * @payload_mask: Elements of @payloads actually in use. Since + * reallocation of active outputs isn't possible gaps can be created by + * disabling outputs out of order compared to how they've been enabled. + */ + unsigned long payload_mask; + /** + * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis. + */ + unsigned long vcpi_mask; + + /** + * @tx_waitq: Wait to queue stall for the tx worker. + */ + wait_queue_head_t tx_waitq; + /** + * @work: Probe work. + */ + struct work_struct work; + /** + * @tx_work: Sideband transmit worker. This can nest within the main + * @work worker for each transaction @work launches. + */ + struct work_struct tx_work; + + /** + * @destroy_port_list: List of to be destroyed connectors. + */ + struct list_head destroy_port_list; + /** + * @destroy_branch_device_list: List of to be destroyed branch + * devices. + */ + struct list_head destroy_branch_device_list; + /** + * @delayed_destroy_lock: Protects @destroy_port_list and + * @destroy_branch_device_list. + */ + struct mutex delayed_destroy_lock; + + /** + * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items. + * A dedicated WQ makes it possible to drain any requeued work items + * on it. + */ + struct workqueue_struct *delayed_destroy_wq; + + /** + * @delayed_destroy_work: Work item to destroy MST port and branch + * devices, needed to avoid locking inversion. + */ + struct work_struct delayed_destroy_work; + + /** + * @up_req_list: List of pending up requests from the topology that + * need to be processed, in chronological order. + */ + struct list_head up_req_list; + /** + * @up_req_lock: Protects @up_req_list + */ + struct mutex up_req_lock; + /** + * @up_req_work: Work item to process up requests received from the + * topology. Needed to avoid blocking hotplug handling and sideband + * transmissions. + */ + struct work_struct up_req_work; + +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + /** + * @topology_ref_history_lock: protects + * &drm_dp_mst_port.topology_ref_history and + * &drm_dp_mst_branch.topology_ref_history. + */ + struct mutex topology_ref_history_lock; +#endif +}; + +int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, + struct drm_device *dev, struct drm_dp_aux *aux, + int max_dpcd_transaction_bytes, + int max_payloads, + int max_lane_count, int max_link_rate, + int conn_base_id); + +void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); + +bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state); + +int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); + + +int +drm_dp_mst_detect_port(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + +struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + +int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, + int link_rate, int link_lane_count); + +int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc); + +bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, int pbn, int slots); + +int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + + +void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + +void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap); + +void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + + +int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, + int pbn); + + +int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot); + + +int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr); + +int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr); + +void drm_dp_mst_dump_topology(struct seq_file *m, + struct drm_dp_mst_topology_mgr *mgr); + +void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); +int __must_check +drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, + bool sync); + +ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux, + unsigned int offset, void *buffer, size_t size); +ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux, + unsigned int offset, void *buffer, size_t size); + +int drm_dp_mst_connector_late_register(struct drm_connector *connector, + struct drm_dp_mst_port *port); +void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, + struct drm_dp_mst_port *port); + +struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr); +int __must_check +drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, int pbn, + int pbn_div); +int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, + struct drm_dp_mst_port *port, + int pbn, int pbn_div, + bool enable); +int __must_check +drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr); +int __must_check +drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); +int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, bool power_up); +int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + struct drm_dp_query_stream_enc_status_ack_reply *status); +int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state); + +void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port); +void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port); + +struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port); + +extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs; + +/** + * __drm_dp_mst_state_iter_get - private atomic state iterator function for + * macro-internal use + * @state: &struct drm_atomic_state pointer + * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor + * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state + * iteration cursor + * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state + * iteration cursor + * @i: int iteration cursor, for macro-internal use + * + * Used by for_each_oldnew_mst_mgr_in_state(), + * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't + * call this directly. + * + * Returns: + * True if the current &struct drm_private_obj is a &struct + * drm_dp_mst_topology_mgr, false otherwise. + */ +static inline bool +__drm_dp_mst_state_iter_get(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr **mgr, + struct drm_dp_mst_topology_state **old_state, + struct drm_dp_mst_topology_state **new_state, + int i) +{ + struct __drm_private_objs_state *objs_state = &state->private_objs[i]; + + if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs) + return false; + + *mgr = to_dp_mst_topology_mgr(objs_state->ptr); + if (old_state) + *old_state = to_dp_mst_topology_state(objs_state->old_state); + if (new_state) + *new_state = to_dp_mst_topology_state(objs_state->new_state); + + return true; +} + +/** + * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology + * managers in an atomic update + * @__state: &struct drm_atomic_state pointer + * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor + * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old + * state + * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new + * state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all DRM DP MST topology managers in an atomic update, + * tracking both old and new state. This is useful in places where the state + * delta needs to be considered, for example in atomic check functions. + */ +#define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \ + for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ + for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i))) + +/** + * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers + * in an atomic update + * @__state: &struct drm_atomic_state pointer + * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor + * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old + * state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all DRM DP MST topology managers in an atomic update, + * tracking only the old state. This is useful in disable functions, where we + * need the old state the hardware is still in. + */ +#define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \ + for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ + for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i))) + +/** + * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers + * in an atomic update + * @__state: &struct drm_atomic_state pointer + * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor + * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new + * state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all DRM DP MST topology managers in an atomic update, + * tracking only the new state. This is useful in enable functions, where we + * need the new state the hardware should be in when the atomic commit + * operation has completed. + */ +#define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \ + for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ + for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i))) + +#endif diff --git a/include/drm/display/drm_dsc.h b/include/drm/display/drm_dsc.h new file mode 100644 index 0000000000..bc90273d06 --- /dev/null +++ b/include/drm/display/drm_dsc.h @@ -0,0 +1,605 @@ +/* SPDX-License-Identifier: MIT + * Copyright (C) 2018 Intel Corp. + * + * Authors: + * Manasi Navare + */ + +#ifndef DRM_DSC_H_ +#define DRM_DSC_H_ + +#include + +/* VESA Display Stream Compression DSC 1.2 constants */ +#define DSC_NUM_BUF_RANGES 15 +#define DSC_MUX_WORD_SIZE_8_10_BPC 48 +#define DSC_MUX_WORD_SIZE_12_BPC 64 +#define DSC_RC_PIXELS_PER_GROUP 3 +#define DSC_SCALE_DECREMENT_INTERVAL_MAX 4095 +#define DSC_RANGE_BPG_OFFSET_MASK 0x3f + +/* DSC Rate Control Constants */ +#define DSC_RC_MODEL_SIZE_CONST 8192 +#define DSC_RC_EDGE_FACTOR_CONST 6 +#define DSC_RC_TGT_OFFSET_HI_CONST 3 +#define DSC_RC_TGT_OFFSET_LO_CONST 3 + +/* DSC PPS constants and macros */ +#define DSC_PPS_VERSION_MAJOR_SHIFT 4 +#define DSC_PPS_BPC_SHIFT 4 +#define DSC_PPS_MSB_SHIFT 8 +#define DSC_PPS_LSB_MASK (0xFF << 0) +#define DSC_PPS_BPP_HIGH_MASK (0x3 << 8) +#define DSC_PPS_VBR_EN_SHIFT 2 +#define DSC_PPS_SIMPLE422_SHIFT 3 +#define DSC_PPS_CONVERT_RGB_SHIFT 4 +#define DSC_PPS_BLOCK_PRED_EN_SHIFT 5 +#define DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK (0x3 << 8) +#define DSC_PPS_SCALE_DEC_INT_HIGH_MASK (0xF << 8) +#define DSC_PPS_RC_TGT_OFFSET_HI_SHIFT 4 +#define DSC_PPS_RC_RANGE_MINQP_SHIFT 11 +#define DSC_PPS_RC_RANGE_MAXQP_SHIFT 6 +#define DSC_PPS_NATIVE_420_SHIFT 1 +#define DSC_1_2_MAX_LINEBUF_DEPTH_BITS 16 +#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL 0 +#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS 13 + +/** + * struct drm_dsc_rc_range_parameters - DSC Rate Control range parameters + * + * This defines different rate control parameters used by the DSC engine + * to compress the frame. + */ +struct drm_dsc_rc_range_parameters { + /** + * @range_min_qp: Min Quantization Parameters allowed for this range + */ + u8 range_min_qp; + /** + * @range_max_qp: Max Quantization Parameters allowed for this range + */ + u8 range_max_qp; + /** + * @range_bpg_offset: + * Bits/group offset to apply to target for this group + */ + u8 range_bpg_offset; +}; + +/** + * struct drm_dsc_config - Parameters required to configure DSC + * + * Driver populates this structure with all the parameters required + * to configure the display stream compression on the source. + */ +struct drm_dsc_config { + /** + * @line_buf_depth: + * Bits per component for previous reconstructed line buffer + */ + u8 line_buf_depth; + /** + * @bits_per_component: Bits per component to code (8/10/12) + */ + u8 bits_per_component; + /** + * @convert_rgb: + * Flag to indicate if RGB - YCoCg conversion is needed + * True if RGB input, False if YCoCg input + */ + bool convert_rgb; + /** + * @slice_count: Number fo slices per line used by the DSC encoder + */ + u8 slice_count; + /** + * @slice_width: Width of each slice in pixels + */ + u16 slice_width; + /** + * @slice_height: Slice height in pixels + */ + u16 slice_height; + /** + * @simple_422: True if simple 4_2_2 mode is enabled else False + */ + bool simple_422; + /** + * @pic_width: Width of the input display frame in pixels + */ + u16 pic_width; + /** + * @pic_height: Vertical height of the input display frame + */ + u16 pic_height; + /** + * @rc_tgt_offset_high: + * Offset to bits/group used by RC to determine QP adjustment + */ + u8 rc_tgt_offset_high; + /** + * @rc_tgt_offset_low: + * Offset to bits/group used by RC to determine QP adjustment + */ + u8 rc_tgt_offset_low; + /** + * @bits_per_pixel: + * Target bits per pixel with 4 fractional bits, bits_per_pixel << 4 + */ + u16 bits_per_pixel; + /** + * @rc_edge_factor: + * Factor to determine if an edge is present based on the bits produced + */ + u8 rc_edge_factor; + /** + * @rc_quant_incr_limit1: + * Slow down incrementing once the range reaches this value + */ + u8 rc_quant_incr_limit1; + /** + * @rc_quant_incr_limit0: + * Slow down incrementing once the range reaches this value + */ + u8 rc_quant_incr_limit0; + /** + * @initial_xmit_delay: + * Number of pixels to delay the initial transmission + */ + u16 initial_xmit_delay; + /** + * @initial_dec_delay: + * Initial decoder delay, number of pixel times that the decoder + * accumulates data in its rate buffer before starting to decode + * and output pixels. + */ + u16 initial_dec_delay; + /** + * @block_pred_enable: + * True if block prediction is used to code any groups within the + * picture. False if BP not used + */ + bool block_pred_enable; + /** + * @first_line_bpg_offset: + * Number of additional bits allocated for each group on the first + * line of slice. + */ + u8 first_line_bpg_offset; + /** + * @initial_offset: Value to use for RC model offset at slice start + */ + u16 initial_offset; + /** + * @rc_buf_thresh: Thresholds defining each of the buffer ranges + */ + u16 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1]; + /** + * @rc_range_params: + * Parameters for each of the RC ranges defined in + * &struct drm_dsc_rc_range_parameters + */ + struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES]; + /** + * @rc_model_size: Total size of RC model + */ + u16 rc_model_size; + /** + * @flatness_min_qp: Minimum QP where flatness information is sent + */ + u8 flatness_min_qp; + /** + * @flatness_max_qp: Maximum QP where flatness information is sent + */ + u8 flatness_max_qp; + /** + * @initial_scale_value: Initial value for the scale factor + */ + u8 initial_scale_value; + /** + * @scale_decrement_interval: + * Specifies number of group times between decrementing the scale factor + * at beginning of a slice. + */ + u16 scale_decrement_interval; + /** + * @scale_increment_interval: + * Number of group times between incrementing the scale factor value + * used at the beginning of a slice. + */ + u16 scale_increment_interval; + /** + * @nfl_bpg_offset: Non first line BPG offset to be used + */ + u16 nfl_bpg_offset; + /** + * @slice_bpg_offset: BPG offset used to enforce slice bit + */ + u16 slice_bpg_offset; + /** + * @final_offset: Final RC linear transformation offset value + */ + u16 final_offset; + /** + * @vbr_enable: True if VBR mode is enabled, false if disabled + */ + bool vbr_enable; + /** + * @mux_word_size: Mux word size (in bits) for SSM mode + */ + u8 mux_word_size; + /** + * @slice_chunk_size: + * The (max) size in bytes of the "chunks" that are used in slice + * multiplexing. + */ + u16 slice_chunk_size; + /** + * @rc_bits: Rate control buffer size in bits + */ + u16 rc_bits; + /** + * @dsc_version_minor: DSC minor version + */ + u8 dsc_version_minor; + /** + * @dsc_version_major: DSC major version + */ + u8 dsc_version_major; + /** + * @native_422: True if Native 4:2:2 supported, else false + */ + bool native_422; + /** + * @native_420: True if Native 4:2:0 supported else false. + */ + bool native_420; + /** + * @second_line_bpg_offset: + * Additional bits/grp for seconnd line of slice for native 4:2:0 + */ + u8 second_line_bpg_offset; + /** + * @nsl_bpg_offset: + * Num of bits deallocated for each grp that is not in second line of + * slice + */ + u16 nsl_bpg_offset; + /** + * @second_line_offset_adj: + * Offset adjustment for second line in Native 4:2:0 mode + */ + u16 second_line_offset_adj; +}; + +/** + * struct drm_dsc_picture_parameter_set - Represents 128 bytes of + * Picture Parameter Set + * + * The VESA DSC standard defines picture parameter set (PPS) which display + * stream compression encoders must communicate to decoders. + * The PPS is encapsulated in 128 bytes (PPS 0 through PPS 127). The fields in + * this structure are as per Table 4.1 in Vesa DSC specification v1.1/v1.2. + * The PPS fields that span over more than a byte should be stored in Big Endian + * format. + */ +struct drm_dsc_picture_parameter_set { + /** + * @dsc_version: + * PPS0[3:0] - dsc_version_minor: Contains Minor version of DSC + * PPS0[7:4] - dsc_version_major: Contains major version of DSC + */ + u8 dsc_version; + /** + * @pps_identifier: + * PPS1[7:0] - Application specific identifier that can be + * used to differentiate between different PPS tables. + */ + u8 pps_identifier; + /** + * @pps_reserved: + * PPS2[7:0]- RESERVED Byte + */ + u8 pps_reserved; + /** + * @pps_3: + * PPS3[3:0] - linebuf_depth: Contains linebuffer bit depth used to + * generate the bitstream. (0x0 - 16 bits for DSC 1.2, 0x8 - 8 bits, + * 0xA - 10 bits, 0xB - 11 bits, 0xC - 12 bits, 0xD - 13 bits, + * 0xE - 14 bits for DSC1.2, 0xF - 14 bits for DSC 1.2. + * PPS3[7:4] - bits_per_component: Bits per component for the original + * pixels of the encoded picture. + * 0x0 = 16bpc (allowed only when dsc_version_minor = 0x2) + * 0x8 = 8bpc, 0xA = 10bpc, 0xC = 12bpc, 0xE = 14bpc (also + * allowed only when dsc_minor_version = 0x2) + */ + u8 pps_3; + /** + * @pps_4: + * PPS4[1:0] -These are the most significant 2 bits of + * compressed BPP bits_per_pixel[9:0] syntax element. + * PPS4[2] - vbr_enable: 0 = VBR disabled, 1 = VBR enabled + * PPS4[3] - simple_422: Indicates if decoder drops samples to + * reconstruct the 4:2:2 picture. + * PPS4[4] - Convert_rgb: Indicates if DSC color space conversion is + * active. + * PPS4[5] - blobk_pred_enable: Indicates if BP is used to code any + * groups in picture + * PPS4[7:6] - Reseved bits + */ + u8 pps_4; + /** + * @bits_per_pixel_low: + * PPS5[7:0] - This indicates the lower significant 8 bits of + * the compressed BPP bits_per_pixel[9:0] element. + */ + u8 bits_per_pixel_low; + /** + * @pic_height: + * PPS6[7:0], PPS7[7:0] -pic_height: Specifies the number of pixel rows + * within the raster. + */ + __be16 pic_height; + /** + * @pic_width: + * PPS8[7:0], PPS9[7:0] - pic_width: Number of pixel columns within + * the raster. + */ + __be16 pic_width; + /** + * @slice_height: + * PPS10[7:0], PPS11[7:0] - Slice height in units of pixels. + */ + __be16 slice_height; + /** + * @slice_width: + * PPS12[7:0], PPS13[7:0] - Slice width in terms of pixels. + */ + __be16 slice_width; + /** + * @chunk_size: + * PPS14[7:0], PPS15[7:0] - Size in units of bytes of the chunks + * that are used for slice multiplexing. + */ + __be16 chunk_size; + /** + * @initial_xmit_delay_high: + * PPS16[1:0] - Most Significant two bits of initial transmission delay. + * It specifies the number of pixel times that the encoder waits before + * transmitting data from its rate buffer. + * PPS16[7:2] - Reserved + */ + u8 initial_xmit_delay_high; + /** + * @initial_xmit_delay_low: + * PPS17[7:0] - Least significant 8 bits of initial transmission delay. + */ + u8 initial_xmit_delay_low; + /** + * @initial_dec_delay: + * + * PPS18[7:0], PPS19[7:0] - Initial decoding delay which is the number + * of pixel times that the decoder accumulates data in its rate buffer + * before starting to decode and output pixels. + */ + __be16 initial_dec_delay; + /** + * @pps20_reserved: + * + * PPS20[7:0] - Reserved + */ + u8 pps20_reserved; + /** + * @initial_scale_value: + * PPS21[5:0] - Initial rcXformScale factor used at beginning + * of a slice. + * PPS21[7:6] - Reserved + */ + u8 initial_scale_value; + /** + * @scale_increment_interval: + * PPS22[7:0], PPS23[7:0] - Number of group times between incrementing + * the rcXformScale factor at end of a slice. + */ + __be16 scale_increment_interval; + /** + * @scale_decrement_interval_high: + * PPS24[3:0] - Higher 4 bits indicating number of group times between + * decrementing the rcXformScale factor at beginning of a slice. + * PPS24[7:4] - Reserved + */ + u8 scale_decrement_interval_high; + /** + * @scale_decrement_interval_low: + * PPS25[7:0] - Lower 8 bits of scale decrement interval + */ + u8 scale_decrement_interval_low; + /** + * @pps26_reserved: + * PPS26[7:0] + */ + u8 pps26_reserved; + /** + * @first_line_bpg_offset: + * PPS27[4:0] - Number of additional bits that are allocated + * for each group on first line of a slice. + * PPS27[7:5] - Reserved + */ + u8 first_line_bpg_offset; + /** + * @nfl_bpg_offset: + * PPS28[7:0], PPS29[7:0] - Number of bits including frac bits + * deallocated for each group for groups after the first line of slice. + */ + __be16 nfl_bpg_offset; + /** + * @slice_bpg_offset: + * PPS30, PPS31[7:0] - Number of bits that are deallocated for each + * group to enforce the slice constraint. + */ + __be16 slice_bpg_offset; + /** + * @initial_offset: + * PPS32,33[7:0] - Initial value for rcXformOffset + */ + __be16 initial_offset; + /** + * @final_offset: + * PPS34,35[7:0] - Maximum end-of-slice value for rcXformOffset + */ + __be16 final_offset; + /** + * @flatness_min_qp: + * PPS36[4:0] - Minimum QP at which flatness is signaled and + * flatness QP adjustment is made. + * PPS36[7:5] - Reserved + */ + u8 flatness_min_qp; + /** + * @flatness_max_qp: + * PPS37[4:0] - Max QP at which flatness is signalled and + * the flatness adjustment is made. + * PPS37[7:5] - Reserved + */ + u8 flatness_max_qp; + /** + * @rc_model_size: + * PPS38,39[7:0] - Number of bits within RC Model. + */ + __be16 rc_model_size; + /** + * @rc_edge_factor: + * PPS40[3:0] - Ratio of current activity vs, previous + * activity to determine presence of edge. + * PPS40[7:4] - Reserved + */ + u8 rc_edge_factor; + /** + * @rc_quant_incr_limit0: + * PPS41[4:0] - QP threshold used in short term RC + * PPS41[7:5] - Reserved + */ + u8 rc_quant_incr_limit0; + /** + * @rc_quant_incr_limit1: + * PPS42[4:0] - QP threshold used in short term RC + * PPS42[7:5] - Reserved + */ + u8 rc_quant_incr_limit1; + /** + * @rc_tgt_offset: + * PPS43[3:0] - Lower end of the variability range around the target + * bits per group that is allowed by short term RC. + * PPS43[7:4]- Upper end of the variability range around the target + * bits per group that i allowed by short term rc. + */ + u8 rc_tgt_offset; + /** + * @rc_buf_thresh: + * PPS44[7:0] - PPS57[7:0] - Specifies the thresholds in RC model for + * the 15 ranges defined by 14 thresholds. + */ + u8 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1]; + /** + * @rc_range_parameters: + * PPS58[7:0] - PPS87[7:0] + * Parameters that correspond to each of the 15 ranges. + */ + __be16 rc_range_parameters[DSC_NUM_BUF_RANGES]; + /** + * @native_422_420: + * PPS88[0] - 0 = Native 4:2:2 not used + * 1 = Native 4:2:2 used + * PPS88[1] - 0 = Native 4:2:0 not use + * 1 = Native 4:2:0 used + * PPS88[7:2] - Reserved 6 bits + */ + u8 native_422_420; + /** + * @second_line_bpg_offset: + * PPS89[4:0] - Additional bits/group budget for the + * second line of a slice in Native 4:2:0 mode. + * Set to 0 if DSC minor version is 1 or native420 is 0. + * PPS89[7:5] - Reserved + */ + u8 second_line_bpg_offset; + /** + * @nsl_bpg_offset: + * PPS90[7:0], PPS91[7:0] - Number of bits that are deallocated + * for each group that is not in the second line of a slice. + */ + __be16 nsl_bpg_offset; + /** + * @second_line_offset_adj: + * PPS92[7:0], PPS93[7:0] - Used as offset adjustment for the second + * line in Native 4:2:0 mode. + */ + __be16 second_line_offset_adj; + /** + * @pps_long_94_reserved: + * PPS 94, 95, 96, 97 - Reserved + */ + u32 pps_long_94_reserved; + /** + * @pps_long_98_reserved: + * PPS 98, 99, 100, 101 - Reserved + */ + u32 pps_long_98_reserved; + /** + * @pps_long_102_reserved: + * PPS 102, 103, 104, 105 - Reserved + */ + u32 pps_long_102_reserved; + /** + * @pps_long_106_reserved: + * PPS 106, 107, 108, 109 - reserved + */ + u32 pps_long_106_reserved; + /** + * @pps_long_110_reserved: + * PPS 110, 111, 112, 113 - reserved + */ + u32 pps_long_110_reserved; + /** + * @pps_long_114_reserved: + * PPS 114 - 117 - reserved + */ + u32 pps_long_114_reserved; + /** + * @pps_long_118_reserved: + * PPS 118 - 121 - reserved + */ + u32 pps_long_118_reserved; + /** + * @pps_long_122_reserved: + * PPS 122- 125 - reserved + */ + u32 pps_long_122_reserved; + /** + * @pps_short_126_reserved: + * PPS 126, 127 - reserved + */ + __be16 pps_short_126_reserved; +} __packed; + +/** + * struct drm_dsc_pps_infoframe - DSC infoframe carrying the Picture Parameter + * Set Metadata + * + * This structure represents the DSC PPS infoframe required to send the Picture + * Parameter Set metadata required before enabling VESA Display Stream + * Compression. This is based on the DP Secondary Data Packet structure and + * comprises of SDP Header as defined &struct dp_sdp_header in drm_dp_helper.h + * and PPS payload defined in &struct drm_dsc_picture_parameter_set. + * + * @pps_header: Header for PPS as per DP SDP header format of type + * &struct dp_sdp_header + * @pps_payload: PPS payload fields as per DSC specification Table 4-1 + * as represented in &struct drm_dsc_picture_parameter_set + */ +struct drm_dsc_pps_infoframe { + struct dp_sdp_header pps_header; + struct drm_dsc_picture_parameter_set pps_payload; +} __packed; + +#endif /* _DRM_DSC_H_ */ diff --git a/include/drm/display/drm_dsc_helper.h b/include/drm/display/drm_dsc_helper.h new file mode 100644 index 0000000000..8b41edbbab --- /dev/null +++ b/include/drm/display/drm_dsc_helper.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT + * Copyright (C) 2018 Intel Corp. + * + * Authors: + * Manasi Navare + */ + +#ifndef DRM_DSC_HELPER_H_ +#define DRM_DSC_HELPER_H_ + +#include + +void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header); +int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size); +void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp, + const struct drm_dsc_config *dsc_cfg); +int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg); + +#endif /* _DRM_DSC_HELPER_H_ */ + diff --git a/include/drm/display/drm_hdcp.h b/include/drm/display/drm_hdcp.h new file mode 100644 index 0000000000..96a99b1377 --- /dev/null +++ b/include/drm/display/drm_hdcp.h @@ -0,0 +1,298 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2017 Google, Inc. + * + * Authors: + * Sean Paul + */ + +#ifndef _DRM_HDCP_H_ +#define _DRM_HDCP_H_ + +#include + +/* Period of hdcp checks (to ensure we're still authenticated) */ +#define DRM_HDCP_CHECK_PERIOD_MS (128 * 16) +#define DRM_HDCP2_CHECK_PERIOD_MS 500 + +/* Shared lengths/masks between HDMI/DVI/DisplayPort */ +#define DRM_HDCP_AN_LEN 8 +#define DRM_HDCP_BSTATUS_LEN 2 +#define DRM_HDCP_KSV_LEN 5 +#define DRM_HDCP_RI_LEN 2 +#define DRM_HDCP_V_PRIME_PART_LEN 4 +#define DRM_HDCP_V_PRIME_NUM_PARTS 5 +#define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x7f) +#define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3)) +#define DRM_HDCP_MAX_DEVICE_EXCEEDED(x) (x & BIT(7)) + +/* Slave address for the HDCP registers in the receiver */ +#define DRM_HDCP_DDC_ADDR 0x3A + +/* Value to use at the end of the SHA-1 bytestream used for repeaters */ +#define DRM_HDCP_SHA1_TERMINATOR 0x80 + +/* HDCP register offsets for HDMI/DVI devices */ +#define DRM_HDCP_DDC_BKSV 0x00 +#define DRM_HDCP_DDC_RI_PRIME 0x08 +#define DRM_HDCP_DDC_AKSV 0x10 +#define DRM_HDCP_DDC_AN 0x18 +#define DRM_HDCP_DDC_V_PRIME(h) (0x20 + h * 4) +#define DRM_HDCP_DDC_BCAPS 0x40 +#define DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT BIT(6) +#define DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY BIT(5) +#define DRM_HDCP_DDC_BSTATUS 0x41 +#define DRM_HDCP_DDC_KSV_FIFO 0x43 + +#define DRM_HDCP_1_4_SRM_ID 0x8 +#define DRM_HDCP_1_4_VRL_LENGTH_SIZE 3 +#define DRM_HDCP_1_4_DCP_SIG_SIZE 40 + +/* Protocol message definition for HDCP2.2 specification */ +/* + * Protected content streams are classified into 2 types: + * - Type0: Can be transmitted with HDCP 1.4+ + * - Type1: Can be transmitted with HDCP 2.2+ + */ +#define HDCP_STREAM_TYPE0 0x00 +#define HDCP_STREAM_TYPE1 0x01 + +/* HDCP2.2 Msg IDs */ +#define HDCP_2_2_NULL_MSG 1 +#define HDCP_2_2_AKE_INIT 2 +#define HDCP_2_2_AKE_SEND_CERT 3 +#define HDCP_2_2_AKE_NO_STORED_KM 4 +#define HDCP_2_2_AKE_STORED_KM 5 +#define HDCP_2_2_AKE_SEND_HPRIME 7 +#define HDCP_2_2_AKE_SEND_PAIRING_INFO 8 +#define HDCP_2_2_LC_INIT 9 +#define HDCP_2_2_LC_SEND_LPRIME 10 +#define HDCP_2_2_SKE_SEND_EKS 11 +#define HDCP_2_2_REP_SEND_RECVID_LIST 12 +#define HDCP_2_2_REP_SEND_ACK 15 +#define HDCP_2_2_REP_STREAM_MANAGE 16 +#define HDCP_2_2_REP_STREAM_READY 17 + +#define HDCP_2_2_RTX_LEN 8 +#define HDCP_2_2_RRX_LEN 8 + +#define HDCP_2_2_K_PUB_RX_MOD_N_LEN 128 +#define HDCP_2_2_K_PUB_RX_EXP_E_LEN 3 +#define HDCP_2_2_K_PUB_RX_LEN (HDCP_2_2_K_PUB_RX_MOD_N_LEN + \ + HDCP_2_2_K_PUB_RX_EXP_E_LEN) + +#define HDCP_2_2_DCP_LLC_SIG_LEN 384 + +#define HDCP_2_2_E_KPUB_KM_LEN 128 +#define HDCP_2_2_E_KH_KM_M_LEN (16 + 16) +#define HDCP_2_2_H_PRIME_LEN 32 +#define HDCP_2_2_E_KH_KM_LEN 16 +#define HDCP_2_2_RN_LEN 8 +#define HDCP_2_2_L_PRIME_LEN 32 +#define HDCP_2_2_E_DKEY_KS_LEN 16 +#define HDCP_2_2_RIV_LEN 8 +#define HDCP_2_2_SEQ_NUM_LEN 3 +#define HDCP_2_2_V_PRIME_HALF_LEN (HDCP_2_2_L_PRIME_LEN / 2) +#define HDCP_2_2_RECEIVER_ID_LEN DRM_HDCP_KSV_LEN +#define HDCP_2_2_MAX_DEVICE_COUNT 31 +#define HDCP_2_2_RECEIVER_IDS_MAX_LEN (HDCP_2_2_RECEIVER_ID_LEN * \ + HDCP_2_2_MAX_DEVICE_COUNT) +#define HDCP_2_2_MPRIME_LEN 32 + +/* Following Macros take a byte at a time for bit(s) masking */ +/* + * TODO: HDCP_2_2_MAX_CONTENT_STREAMS_CNT is based upon actual + * H/W MST streams capacity. + * This required to be moved out to platform specific header. + */ +#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT 4 +#define HDCP_2_2_TXCAP_MASK_LEN 2 +#define HDCP_2_2_RXCAPS_LEN 3 +#define HDCP_2_2_RX_REPEATER(x) ((x) & BIT(0)) +#define HDCP_2_2_DP_HDCP_CAPABLE(x) ((x) & BIT(1)) +#define HDCP_2_2_RXINFO_LEN 2 + +/* HDCP1.x compliant device in downstream */ +#define HDCP_2_2_HDCP1_DEVICE_CONNECTED(x) ((x) & BIT(0)) + +/* HDCP2.0 Compliant repeater in downstream */ +#define HDCP_2_2_HDCP_2_0_REP_CONNECTED(x) ((x) & BIT(1)) +#define HDCP_2_2_MAX_CASCADE_EXCEEDED(x) ((x) & BIT(2)) +#define HDCP_2_2_MAX_DEVS_EXCEEDED(x) ((x) & BIT(3)) +#define HDCP_2_2_DEV_COUNT_LO(x) (((x) & (0xF << 4)) >> 4) +#define HDCP_2_2_DEV_COUNT_HI(x) ((x) & BIT(0)) +#define HDCP_2_2_DEPTH(x) (((x) & (0x7 << 1)) >> 1) + +struct hdcp2_cert_rx { + u8 receiver_id[HDCP_2_2_RECEIVER_ID_LEN]; + u8 kpub_rx[HDCP_2_2_K_PUB_RX_LEN]; + u8 reserved[2]; + u8 dcp_signature[HDCP_2_2_DCP_LLC_SIG_LEN]; +} __packed; + +struct hdcp2_streamid_type { + u8 stream_id; + u8 stream_type; +} __packed; + +/* + * The TxCaps field specified in the HDCP HDMI, DP specs + * This field is big endian as specified in the errata. + */ +struct hdcp2_tx_caps { + /* Transmitter must set this to 0x2 */ + u8 version; + + /* Reserved for HDCP and DP Spec. Read as Zero */ + u8 tx_cap_mask[HDCP_2_2_TXCAP_MASK_LEN]; +} __packed; + +/* Main structures for HDCP2.2 protocol communication */ +struct hdcp2_ake_init { + u8 msg_id; + u8 r_tx[HDCP_2_2_RTX_LEN]; + struct hdcp2_tx_caps tx_caps; +} __packed; + +struct hdcp2_ake_send_cert { + u8 msg_id; + struct hdcp2_cert_rx cert_rx; + u8 r_rx[HDCP_2_2_RRX_LEN]; + u8 rx_caps[HDCP_2_2_RXCAPS_LEN]; +} __packed; + +struct hdcp2_ake_no_stored_km { + u8 msg_id; + u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN]; +} __packed; + +struct hdcp2_ake_stored_km { + u8 msg_id; + u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN]; +} __packed; + +struct hdcp2_ake_send_hprime { + u8 msg_id; + u8 h_prime[HDCP_2_2_H_PRIME_LEN]; +} __packed; + +struct hdcp2_ake_send_pairing_info { + u8 msg_id; + u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN]; +} __packed; + +struct hdcp2_lc_init { + u8 msg_id; + u8 r_n[HDCP_2_2_RN_LEN]; +} __packed; + +struct hdcp2_lc_send_lprime { + u8 msg_id; + u8 l_prime[HDCP_2_2_L_PRIME_LEN]; +} __packed; + +struct hdcp2_ske_send_eks { + u8 msg_id; + u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN]; + u8 riv[HDCP_2_2_RIV_LEN]; +} __packed; + +struct hdcp2_rep_send_receiverid_list { + u8 msg_id; + u8 rx_info[HDCP_2_2_RXINFO_LEN]; + u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN]; + u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN]; + u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN]; +} __packed; + +struct hdcp2_rep_send_ack { + u8 msg_id; + u8 v[HDCP_2_2_V_PRIME_HALF_LEN]; +} __packed; + +struct hdcp2_rep_stream_manage { + u8 msg_id; + u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN]; + __be16 k; + struct hdcp2_streamid_type streams[HDCP_2_2_MAX_CONTENT_STREAMS_CNT]; +} __packed; + +struct hdcp2_rep_stream_ready { + u8 msg_id; + u8 m_prime[HDCP_2_2_MPRIME_LEN]; +} __packed; + +/* HDCP2.2 TIMEOUTs in mSec */ +#define HDCP_2_2_CERT_TIMEOUT_MS 100 +#define HDCP_2_2_DP_CERT_READ_TIMEOUT_MS 110 +#define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS 1000 +#define HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS 200 +#define HDCP_2_2_DP_HPRIME_READ_TIMEOUT_MS 7 +#define HDCP_2_2_PAIRING_TIMEOUT_MS 200 +#define HDCP_2_2_DP_PAIRING_READ_TIMEOUT_MS 5 +#define HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS 20 +#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 16 +#define HDCP_2_2_RECVID_LIST_TIMEOUT_MS 3000 +#define HDCP_2_2_STREAM_READY_TIMEOUT_MS 100 + +/* HDMI HDCP2.2 Register Offsets */ +#define HDCP_2_2_HDMI_REG_VER_OFFSET 0x50 +#define HDCP_2_2_HDMI_REG_WR_MSG_OFFSET 0x60 +#define HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET 0x70 +#define HDCP_2_2_HDMI_REG_RD_MSG_OFFSET 0x80 +#define HDCP_2_2_HDMI_REG_DBG_OFFSET 0xC0 + +#define HDCP_2_2_HDMI_SUPPORT_MASK BIT(2) +#define HDCP_2_2_RX_CAPS_VERSION_VAL 0x02 +#define HDCP_2_2_SEQ_NUM_MAX 0xFFFFFF +#define HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN 200 + +/* Below macros take a byte at a time and mask the bit(s) */ +#define HDCP_2_2_HDMI_RXSTATUS_LEN 2 +#define HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(x) ((x) & 0x3) +#define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2)) +#define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3)) + +/* + * Helper functions to convert 24bit big endian hdcp sequence number to + * host format and back + */ +static inline +u32 drm_hdcp_be24_to_cpu(const u8 seq_num[HDCP_2_2_SEQ_NUM_LEN]) +{ + return (u32)(seq_num[2] | seq_num[1] << 8 | seq_num[0] << 16); +} + +static inline +void drm_hdcp_cpu_to_be24(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val) +{ + seq_num[0] = val >> 16; + seq_num[1] = val >> 8; + seq_num[2] = val; +} + +#define DRM_HDCP_SRM_GEN1_MAX_BYTES (5 * 1024) +#define DRM_HDCP_1_4_SRM_ID 0x8 +#define DRM_HDCP_SRM_ID_MASK (0xF << 4) +#define DRM_HDCP_1_4_VRL_LENGTH_SIZE 3 +#define DRM_HDCP_1_4_DCP_SIG_SIZE 40 +#define DRM_HDCP_2_SRM_ID 0x9 +#define DRM_HDCP_2_INDICATOR 0x1 +#define DRM_HDCP_2_INDICATOR_MASK 0xF +#define DRM_HDCP_2_VRL_LENGTH_SIZE 3 +#define DRM_HDCP_2_DCP_SIG_SIZE 384 +#define DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ 4 +#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte) (((byte) & 0xC0) >> 6) + +struct hdcp_srm_header { + u8 srm_id; + u8 reserved; + __be16 srm_version; + u8 srm_gen_no; +} __packed; + +/* Content Type classification for HDCP2.2 vs others */ +#define DRM_MODE_HDCP_CONTENT_TYPE0 0 +#define DRM_MODE_HDCP_CONTENT_TYPE1 1 + +#endif diff --git a/include/drm/display/drm_hdcp_helper.h b/include/drm/display/drm_hdcp_helper.h new file mode 100644 index 0000000000..8aaf87bf27 --- /dev/null +++ b/include/drm/display/drm_hdcp_helper.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2017 Google, Inc. + * + * Authors: + * Sean Paul + */ + +#ifndef _DRM_HDCP_HELPER_H_INCLUDED_ +#define _DRM_HDCP_HELPER_H_INCLUDED_ + +#include + +struct drm_device; +struct drm_connector; + +int drm_hdcp_check_ksvs_revoked(struct drm_device *dev, u8 *ksvs, u32 ksv_count); +int drm_connector_attach_content_protection_property(struct drm_connector *connector, + bool hdcp_content_type); +void drm_hdcp_update_content_protection(struct drm_connector *connector, u64 val); + +#endif diff --git a/include/drm/display/drm_hdmi_helper.h b/include/drm/display/drm_hdmi_helper.h new file mode 100644 index 0000000000..76d234826e --- /dev/null +++ b/include/drm/display/drm_hdmi_helper.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_HDMI_HELPER +#define DRM_HDMI_HELPER + +#include + +struct drm_connector; +struct drm_connector_state; +struct drm_display_mode; + +void +drm_hdmi_avi_infoframe_colorimetry(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state); + +void +drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state); + +int +drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame, + const struct drm_connector_state *conn_state); + +void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state); + +#endif diff --git a/include/drm/display/drm_scdc.h b/include/drm/display/drm_scdc.h new file mode 100644 index 0000000000..3d58f37e8e --- /dev/null +++ b/include/drm/display/drm_scdc.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2015 NVIDIA Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_SCDC_H +#define DRM_SCDC_H + +#define SCDC_SINK_VERSION 0x01 + +#define SCDC_SOURCE_VERSION 0x02 + +#define SCDC_UPDATE_0 0x10 +#define SCDC_READ_REQUEST_TEST (1 << 2) +#define SCDC_CED_UPDATE (1 << 1) +#define SCDC_STATUS_UPDATE (1 << 0) + +#define SCDC_UPDATE_1 0x11 + +#define SCDC_TMDS_CONFIG 0x20 +#define SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 (1 << 1) +#define SCDC_TMDS_BIT_CLOCK_RATIO_BY_10 (0 << 1) +#define SCDC_SCRAMBLING_ENABLE (1 << 0) + +#define SCDC_SCRAMBLER_STATUS 0x21 +#define SCDC_SCRAMBLING_STATUS (1 << 0) + +#define SCDC_CONFIG_0 0x30 +#define SCDC_READ_REQUEST_ENABLE (1 << 0) + +#define SCDC_STATUS_FLAGS_0 0x40 +#define SCDC_CH2_LOCK (1 << 3) +#define SCDC_CH1_LOCK (1 << 2) +#define SCDC_CH0_LOCK (1 << 1) +#define SCDC_CH_LOCK_MASK (SCDC_CH2_LOCK | SCDC_CH1_LOCK | SCDC_CH0_LOCK) +#define SCDC_CLOCK_DETECT (1 << 0) + +#define SCDC_STATUS_FLAGS_1 0x41 + +#define SCDC_ERR_DET_0_L 0x50 +#define SCDC_ERR_DET_0_H 0x51 +#define SCDC_ERR_DET_1_L 0x52 +#define SCDC_ERR_DET_1_H 0x53 +#define SCDC_ERR_DET_2_L 0x54 +#define SCDC_ERR_DET_2_H 0x55 +#define SCDC_CHANNEL_VALID (1 << 7) + +#define SCDC_ERR_DET_CHECKSUM 0x56 + +#define SCDC_TEST_CONFIG_0 0xc0 +#define SCDC_TEST_READ_REQUEST (1 << 7) +#define SCDC_TEST_READ_REQUEST_DELAY(x) ((x) & 0x7f) + +#define SCDC_MANUFACTURER_IEEE_OUI 0xd0 +#define SCDC_MANUFACTURER_IEEE_OUI_SIZE 3 + +#define SCDC_DEVICE_ID 0xd3 +#define SCDC_DEVICE_ID_SIZE 8 + +#define SCDC_DEVICE_HARDWARE_REVISION 0xdb +#define SCDC_GET_DEVICE_HARDWARE_REVISION_MAJOR(x) (((x) >> 4) & 0xf) +#define SCDC_GET_DEVICE_HARDWARE_REVISION_MINOR(x) (((x) >> 0) & 0xf) + +#define SCDC_DEVICE_SOFTWARE_MAJOR_REVISION 0xdc +#define SCDC_DEVICE_SOFTWARE_MINOR_REVISION 0xdd + +#define SCDC_MANUFACTURER_SPECIFIC 0xde +#define SCDC_MANUFACTURER_SPECIFIC_SIZE 34 + +#endif diff --git a/include/drm/display/drm_scdc_helper.h b/include/drm/display/drm_scdc_helper.h new file mode 100644 index 0000000000..ded01fd948 --- /dev/null +++ b/include/drm/display/drm_scdc_helper.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2015 NVIDIA Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_SCDC_HELPER_H +#define DRM_SCDC_HELPER_H + +#include + +#include + +struct i2c_adapter; + +ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer, + size_t size); +ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset, + const void *buffer, size_t size); + +/** + * drm_scdc_readb - read a single byte from SCDC + * @adapter: I2C adapter + * @offset: offset of register to read + * @value: return location for the register value + * + * Reads a single byte from SCDC. This is a convenience wrapper around the + * drm_scdc_read() function. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_scdc_readb(struct i2c_adapter *adapter, u8 offset, + u8 *value) +{ + return drm_scdc_read(adapter, offset, value, sizeof(*value)); +} + +/** + * drm_scdc_writeb - write a single byte to SCDC + * @adapter: I2C adapter + * @offset: offset of register to read + * @value: return location for the register value + * + * Writes a single byte to SCDC. This is a convenience wrapper around the + * drm_scdc_write() function. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset, + u8 value) +{ + return drm_scdc_write(adapter, offset, &value, sizeof(value)); +} + +bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter); + +bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable); +bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set); + +#endif diff --git a/include/dt-bindings/clock/en7523-clk.h b/include/dt-bindings/clock/en7523-clk.h new file mode 100644 index 0000000000..717d23a5e5 --- /dev/null +++ b/include/dt-bindings/clock/en7523-clk.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_ +#define _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_ + +#define EN7523_CLK_GSW 0 +#define EN7523_CLK_EMI 1 +#define EN7523_CLK_BUS 2 +#define EN7523_CLK_SLIC 3 +#define EN7523_CLK_SPI 4 +#define EN7523_CLK_NPU 5 +#define EN7523_CLK_CRYPTO 6 +#define EN7523_CLK_PCIE 7 + +#define EN7523_NUM_CLOCKS 8 + +#endif /* _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_ */ diff --git a/include/dt-bindings/clock/mt8186-clk.h b/include/dt-bindings/clock/mt8186-clk.h new file mode 100644 index 0000000000..a70bf67af4 --- /dev/null +++ b/include/dt-bindings/clock/mt8186-clk.h @@ -0,0 +1,445 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* + * Copyright (c) 2022 MediaTek Inc. + * Author: Chun-Jie Chen + */ + +#ifndef _DT_BINDINGS_CLK_MT8186_H +#define _DT_BINDINGS_CLK_MT8186_H + +/* MCUSYS */ + +#define CLK_MCU_ARMPLL_LL_SEL 0 +#define CLK_MCU_ARMPLL_BL_SEL 1 +#define CLK_MCU_ARMPLL_BUS_SEL 2 +#define CLK_MCU_NR_CLK 3 + +/* TOPCKGEN */ + +#define CLK_TOP_AXI 0 +#define CLK_TOP_SCP 1 +#define CLK_TOP_MFG 2 +#define CLK_TOP_CAMTG 3 +#define CLK_TOP_CAMTG1 4 +#define CLK_TOP_CAMTG2 5 +#define CLK_TOP_CAMTG3 6 +#define CLK_TOP_CAMTG4 7 +#define CLK_TOP_CAMTG5 8 +#define CLK_TOP_CAMTG6 9 +#define CLK_TOP_UART 10 +#define CLK_TOP_SPI 11 +#define CLK_TOP_MSDC50_0_HCLK 12 +#define CLK_TOP_MSDC50_0 13 +#define CLK_TOP_MSDC30_1 14 +#define CLK_TOP_AUDIO 15 +#define CLK_TOP_AUD_INTBUS 16 +#define CLK_TOP_AUD_1 17 +#define CLK_TOP_AUD_2 18 +#define CLK_TOP_AUD_ENGEN1 19 +#define CLK_TOP_AUD_ENGEN2 20 +#define CLK_TOP_DISP_PWM 21 +#define CLK_TOP_SSPM 22 +#define CLK_TOP_DXCC 23 +#define CLK_TOP_USB_TOP 24 +#define CLK_TOP_SRCK 25 +#define CLK_TOP_SPM 26 +#define CLK_TOP_I2C 27 +#define CLK_TOP_PWM 28 +#define CLK_TOP_SENINF 29 +#define CLK_TOP_SENINF1 30 +#define CLK_TOP_SENINF2 31 +#define CLK_TOP_SENINF3 32 +#define CLK_TOP_AES_MSDCFDE 33 +#define CLK_TOP_PWRAP_ULPOSC 34 +#define CLK_TOP_CAMTM 35 +#define CLK_TOP_VENC 36 +#define CLK_TOP_CAM 37 +#define CLK_TOP_IMG1 38 +#define CLK_TOP_IPE 39 +#define CLK_TOP_DPMAIF 40 +#define CLK_TOP_VDEC 41 +#define CLK_TOP_DISP 42 +#define CLK_TOP_MDP 43 +#define CLK_TOP_AUDIO_H 44 +#define CLK_TOP_UFS 45 +#define CLK_TOP_AES_FDE 46 +#define CLK_TOP_AUDIODSP 47 +#define CLK_TOP_DVFSRC 48 +#define CLK_TOP_DSI_OCC 49 +#define CLK_TOP_SPMI_MST 50 +#define CLK_TOP_SPINOR 51 +#define CLK_TOP_NNA 52 +#define CLK_TOP_NNA1 53 +#define CLK_TOP_NNA2 54 +#define CLK_TOP_SSUSB_XHCI 55 +#define CLK_TOP_SSUSB_TOP_1P 56 +#define CLK_TOP_SSUSB_XHCI_1P 57 +#define CLK_TOP_WPE 58 +#define CLK_TOP_DPI 59 +#define CLK_TOP_U3_OCC_250M 60 +#define CLK_TOP_U3_OCC_500M 61 +#define CLK_TOP_ADSP_BUS 62 +#define CLK_TOP_APLL_I2S0_MCK_SEL 63 +#define CLK_TOP_APLL_I2S1_MCK_SEL 64 +#define CLK_TOP_APLL_I2S2_MCK_SEL 65 +#define CLK_TOP_APLL_I2S4_MCK_SEL 66 +#define CLK_TOP_APLL_TDMOUT_MCK_SEL 67 +#define CLK_TOP_MAINPLL_D2 68 +#define CLK_TOP_MAINPLL_D2_D2 69 +#define CLK_TOP_MAINPLL_D2_D4 70 +#define CLK_TOP_MAINPLL_D2_D16 71 +#define CLK_TOP_MAINPLL_D3 72 +#define CLK_TOP_MAINPLL_D3_D2 73 +#define CLK_TOP_MAINPLL_D3_D4 74 +#define CLK_TOP_MAINPLL_D5 75 +#define CLK_TOP_MAINPLL_D5_D2 76 +#define CLK_TOP_MAINPLL_D5_D4 77 +#define CLK_TOP_MAINPLL_D7 78 +#define CLK_TOP_MAINPLL_D7_D2 79 +#define CLK_TOP_MAINPLL_D7_D4 80 +#define CLK_TOP_UNIVPLL 81 +#define CLK_TOP_UNIVPLL_D2 82 +#define CLK_TOP_UNIVPLL_D2_D2 83 +#define CLK_TOP_UNIVPLL_D2_D4 84 +#define CLK_TOP_UNIVPLL_D3 85 +#define CLK_TOP_UNIVPLL_D3_D2 86 +#define CLK_TOP_UNIVPLL_D3_D4 87 +#define CLK_TOP_UNIVPLL_D3_D8 88 +#define CLK_TOP_UNIVPLL_D3_D32 89 +#define CLK_TOP_UNIVPLL_D5 90 +#define CLK_TOP_UNIVPLL_D5_D2 91 +#define CLK_TOP_UNIVPLL_D5_D4 92 +#define CLK_TOP_UNIVPLL_D7 93 +#define CLK_TOP_UNIVPLL_192M 94 +#define CLK_TOP_UNIVPLL_192M_D4 95 +#define CLK_TOP_UNIVPLL_192M_D8 96 +#define CLK_TOP_UNIVPLL_192M_D16 97 +#define CLK_TOP_UNIVPLL_192M_D32 98 +#define CLK_TOP_APLL1_D2 99 +#define CLK_TOP_APLL1_D4 100 +#define CLK_TOP_APLL1_D8 101 +#define CLK_TOP_APLL2_D2 102 +#define CLK_TOP_APLL2_D4 103 +#define CLK_TOP_APLL2_D8 104 +#define CLK_TOP_MMPLL_D2 105 +#define CLK_TOP_TVDPLL_D2 106 +#define CLK_TOP_TVDPLL_D4 107 +#define CLK_TOP_TVDPLL_D8 108 +#define CLK_TOP_TVDPLL_D16 109 +#define CLK_TOP_TVDPLL_D32 110 +#define CLK_TOP_MSDCPLL_D2 111 +#define CLK_TOP_ULPOSC1 112 +#define CLK_TOP_ULPOSC1_D2 113 +#define CLK_TOP_ULPOSC1_D4 114 +#define CLK_TOP_ULPOSC1_D8 115 +#define CLK_TOP_ULPOSC1_D10 116 +#define CLK_TOP_ULPOSC1_D16 117 +#define CLK_TOP_ULPOSC1_D32 118 +#define CLK_TOP_ADSPPLL_D2 119 +#define CLK_TOP_ADSPPLL_D4 120 +#define CLK_TOP_ADSPPLL_D8 121 +#define CLK_TOP_NNAPLL_D2 122 +#define CLK_TOP_NNAPLL_D4 123 +#define CLK_TOP_NNAPLL_D8 124 +#define CLK_TOP_NNA2PLL_D2 125 +#define CLK_TOP_NNA2PLL_D4 126 +#define CLK_TOP_NNA2PLL_D8 127 +#define CLK_TOP_F_BIST2FPC 128 +#define CLK_TOP_466M_FMEM 129 +#define CLK_TOP_MPLL 130 +#define CLK_TOP_APLL12_CK_DIV0 131 +#define CLK_TOP_APLL12_CK_DIV1 132 +#define CLK_TOP_APLL12_CK_DIV2 133 +#define CLK_TOP_APLL12_CK_DIV4 134 +#define CLK_TOP_APLL12_CK_DIV_TDMOUT_M 135 +#define CLK_TOP_NR_CLK 136 + +/* INFRACFG_AO */ + +#define CLK_INFRA_AO_PMIC_TMR 0 +#define CLK_INFRA_AO_PMIC_AP 1 +#define CLK_INFRA_AO_PMIC_MD 2 +#define CLK_INFRA_AO_PMIC_CONN 3 +#define CLK_INFRA_AO_SCP_CORE 4 +#define CLK_INFRA_AO_SEJ 5 +#define CLK_INFRA_AO_APXGPT 6 +#define CLK_INFRA_AO_ICUSB 7 +#define CLK_INFRA_AO_GCE 8 +#define CLK_INFRA_AO_THERM 9 +#define CLK_INFRA_AO_I2C_AP 10 +#define CLK_INFRA_AO_I2C_CCU 11 +#define CLK_INFRA_AO_I2C_SSPM 12 +#define CLK_INFRA_AO_I2C_RSV 13 +#define CLK_INFRA_AO_PWM_HCLK 14 +#define CLK_INFRA_AO_PWM1 15 +#define CLK_INFRA_AO_PWM2 16 +#define CLK_INFRA_AO_PWM3 17 +#define CLK_INFRA_AO_PWM4 18 +#define CLK_INFRA_AO_PWM5 19 +#define CLK_INFRA_AO_PWM 20 +#define CLK_INFRA_AO_UART0 21 +#define CLK_INFRA_AO_UART1 22 +#define CLK_INFRA_AO_UART2 23 +#define CLK_INFRA_AO_GCE_26M 24 +#define CLK_INFRA_AO_CQ_DMA_FPC 25 +#define CLK_INFRA_AO_BTIF 26 +#define CLK_INFRA_AO_SPI0 27 +#define CLK_INFRA_AO_MSDC0 28 +#define CLK_INFRA_AO_MSDCFDE 29 +#define CLK_INFRA_AO_MSDC1 30 +#define CLK_INFRA_AO_DVFSRC 31 +#define CLK_INFRA_AO_GCPU 32 +#define CLK_INFRA_AO_TRNG 33 +#define CLK_INFRA_AO_AUXADC 34 +#define CLK_INFRA_AO_CPUM 35 +#define CLK_INFRA_AO_CCIF1_AP 36 +#define CLK_INFRA_AO_CCIF1_MD 37 +#define CLK_INFRA_AO_AUXADC_MD 38 +#define CLK_INFRA_AO_AP_DMA 39 +#define CLK_INFRA_AO_XIU 40 +#define CLK_INFRA_AO_DEVICE_APC 41 +#define CLK_INFRA_AO_CCIF_AP 42 +#define CLK_INFRA_AO_DEBUGTOP 43 +#define CLK_INFRA_AO_AUDIO 44 +#define CLK_INFRA_AO_CCIF_MD 45 +#define CLK_INFRA_AO_DXCC_SEC_CORE 46 +#define CLK_INFRA_AO_DXCC_AO 47 +#define CLK_INFRA_AO_IMP_IIC 48 +#define CLK_INFRA_AO_DRAMC_F26M 49 +#define CLK_INFRA_AO_RG_PWM_FBCLK6 50 +#define CLK_INFRA_AO_SSUSB_TOP_HCLK 51 +#define CLK_INFRA_AO_DISP_PWM 52 +#define CLK_INFRA_AO_CLDMA_BCLK 53 +#define CLK_INFRA_AO_AUDIO_26M_BCLK 54 +#define CLK_INFRA_AO_SSUSB_TOP_P1_HCLK 55 +#define CLK_INFRA_AO_SPI1 56 +#define CLK_INFRA_AO_I2C4 57 +#define CLK_INFRA_AO_MODEM_TEMP_SHARE 58 +#define CLK_INFRA_AO_SPI2 59 +#define CLK_INFRA_AO_SPI3 60 +#define CLK_INFRA_AO_SSUSB_TOP_REF 61 +#define CLK_INFRA_AO_SSUSB_TOP_XHCI 62 +#define CLK_INFRA_AO_SSUSB_TOP_P1_REF 63 +#define CLK_INFRA_AO_SSUSB_TOP_P1_XHCI 64 +#define CLK_INFRA_AO_SSPM 65 +#define CLK_INFRA_AO_SSUSB_TOP_P1_SYS 66 +#define CLK_INFRA_AO_I2C5 67 +#define CLK_INFRA_AO_I2C5_ARBITER 68 +#define CLK_INFRA_AO_I2C5_IMM 69 +#define CLK_INFRA_AO_I2C1_ARBITER 70 +#define CLK_INFRA_AO_I2C1_IMM 71 +#define CLK_INFRA_AO_I2C2_ARBITER 72 +#define CLK_INFRA_AO_I2C2_IMM 73 +#define CLK_INFRA_AO_SPI4 74 +#define CLK_INFRA_AO_SPI5 75 +#define CLK_INFRA_AO_CQ_DMA 76 +#define CLK_INFRA_AO_BIST2FPC 77 +#define CLK_INFRA_AO_MSDC0_SELF 78 +#define CLK_INFRA_AO_SPINOR 79 +#define CLK_INFRA_AO_SSPM_26M_SELF 80 +#define CLK_INFRA_AO_SSPM_32K_SELF 81 +#define CLK_INFRA_AO_I2C6 82 +#define CLK_INFRA_AO_AP_MSDC0 83 +#define CLK_INFRA_AO_MD_MSDC0 84 +#define CLK_INFRA_AO_MSDC0_SRC 85 +#define CLK_INFRA_AO_MSDC1_SRC 86 +#define CLK_INFRA_AO_SEJ_F13M 87 +#define CLK_INFRA_AO_AES_TOP0_BCLK 88 +#define CLK_INFRA_AO_MCU_PM_BCLK 89 +#define CLK_INFRA_AO_CCIF2_AP 90 +#define CLK_INFRA_AO_CCIF2_MD 91 +#define CLK_INFRA_AO_CCIF3_AP 92 +#define CLK_INFRA_AO_CCIF3_MD 93 +#define CLK_INFRA_AO_FADSP_26M 94 +#define CLK_INFRA_AO_FADSP_32K 95 +#define CLK_INFRA_AO_CCIF4_AP 96 +#define CLK_INFRA_AO_CCIF4_MD 97 +#define CLK_INFRA_AO_FADSP 98 +#define CLK_INFRA_AO_FLASHIF_133M 99 +#define CLK_INFRA_AO_FLASHIF_66M 100 +#define CLK_INFRA_AO_NR_CLK 101 + +/* APMIXEDSYS */ + +#define CLK_APMIXED_ARMPLL_LL 0 +#define CLK_APMIXED_ARMPLL_BL 1 +#define CLK_APMIXED_CCIPLL 2 +#define CLK_APMIXED_MAINPLL 3 +#define CLK_APMIXED_UNIV2PLL 4 +#define CLK_APMIXED_MSDCPLL 5 +#define CLK_APMIXED_MMPLL 6 +#define CLK_APMIXED_NNAPLL 7 +#define CLK_APMIXED_NNA2PLL 8 +#define CLK_APMIXED_ADSPPLL 9 +#define CLK_APMIXED_MFGPLL 10 +#define CLK_APMIXED_TVDPLL 11 +#define CLK_APMIXED_APLL1 12 +#define CLK_APMIXED_APLL2 13 +#define CLK_APMIXED_NR_CLK 14 + +/* IMP_IIC_WRAP */ + +#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C0 0 +#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C1 1 +#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C2 2 +#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C3 3 +#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C4 4 +#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C5 5 +#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C6 6 +#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C7 7 +#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C8 8 +#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C9 9 +#define CLK_IMP_IIC_WRAP_NR_CLK 10 + +/* MFGCFG */ + +#define CLK_MFG_BG3D 0 +#define CLK_MFG_NR_CLK 1 + +/* MMSYS */ + +#define CLK_MM_DISP_MUTEX0 0 +#define CLK_MM_APB_MM_BUS 1 +#define CLK_MM_DISP_OVL0 2 +#define CLK_MM_DISP_RDMA0 3 +#define CLK_MM_DISP_OVL0_2L 4 +#define CLK_MM_DISP_WDMA0 5 +#define CLK_MM_DISP_RSZ0 6 +#define CLK_MM_DISP_AAL0 7 +#define CLK_MM_DISP_CCORR0 8 +#define CLK_MM_DISP_COLOR0 9 +#define CLK_MM_SMI_INFRA 10 +#define CLK_MM_DISP_DSC_WRAP0 11 +#define CLK_MM_DISP_GAMMA0 12 +#define CLK_MM_DISP_POSTMASK0 13 +#define CLK_MM_DISP_DITHER0 14 +#define CLK_MM_SMI_COMMON 15 +#define CLK_MM_DSI0 16 +#define CLK_MM_DISP_FAKE_ENG0 17 +#define CLK_MM_DISP_FAKE_ENG1 18 +#define CLK_MM_SMI_GALS 19 +#define CLK_MM_SMI_IOMMU 20 +#define CLK_MM_DISP_RDMA1 21 +#define CLK_MM_DISP_DPI 22 +#define CLK_MM_DSI0_DSI_CK_DOMAIN 23 +#define CLK_MM_DISP_26M 24 +#define CLK_MM_NR_CLK 25 + +/* WPESYS */ + +#define CLK_WPE_CK_EN 0 +#define CLK_WPE_SMI_LARB8_CK_EN 1 +#define CLK_WPE_SYS_EVENT_TX_CK_EN 2 +#define CLK_WPE_SMI_LARB8_PCLK_EN 3 +#define CLK_WPE_NR_CLK 4 + +/* IMGSYS1 */ + +#define CLK_IMG1_LARB9_IMG1 0 +#define CLK_IMG1_LARB10_IMG1 1 +#define CLK_IMG1_DIP 2 +#define CLK_IMG1_GALS_IMG1 3 +#define CLK_IMG1_NR_CLK 4 + +/* IMGSYS2 */ + +#define CLK_IMG2_LARB9_IMG2 0 +#define CLK_IMG2_LARB10_IMG2 1 +#define CLK_IMG2_MFB 2 +#define CLK_IMG2_WPE 3 +#define CLK_IMG2_MSS 4 +#define CLK_IMG2_GALS_IMG2 5 +#define CLK_IMG2_NR_CLK 6 + +/* VDECSYS */ + +#define CLK_VDEC_LARB1_CKEN 0 +#define CLK_VDEC_LAT_CKEN 1 +#define CLK_VDEC_LAT_ACTIVE 2 +#define CLK_VDEC_LAT_CKEN_ENG 3 +#define CLK_VDEC_MINI_MDP_CKEN_CFG_RG 4 +#define CLK_VDEC_CKEN 5 +#define CLK_VDEC_ACTIVE 6 +#define CLK_VDEC_CKEN_ENG 7 +#define CLK_VDEC_NR_CLK 8 + +/* VENCSYS */ + +#define CLK_VENC_CKE0_LARB 0 +#define CLK_VENC_CKE1_VENC 1 +#define CLK_VENC_CKE2_JPGENC 2 +#define CLK_VENC_CKE5_GALS 3 +#define CLK_VENC_NR_CLK 4 + +/* CAMSYS */ + +#define CLK_CAM_LARB13 0 +#define CLK_CAM_DFP_VAD 1 +#define CLK_CAM_LARB14 2 +#define CLK_CAM 3 +#define CLK_CAMTG 4 +#define CLK_CAM_SENINF 5 +#define CLK_CAMSV1 6 +#define CLK_CAMSV2 7 +#define CLK_CAMSV3 8 +#define CLK_CAM_CCU0 9 +#define CLK_CAM_CCU1 10 +#define CLK_CAM_MRAW0 11 +#define CLK_CAM_FAKE_ENG 12 +#define CLK_CAM_CCU_GALS 13 +#define CLK_CAM2MM_GALS 14 +#define CLK_CAM_NR_CLK 15 + +/* CAMSYS_RAWA */ + +#define CLK_CAM_RAWA_LARBX_RAWA 0 +#define CLK_CAM_RAWA 1 +#define CLK_CAM_RAWA_CAMTG_RAWA 2 +#define CLK_CAM_RAWA_NR_CLK 3 + +/* CAMSYS_RAWB */ + +#define CLK_CAM_RAWB_LARBX_RAWB 0 +#define CLK_CAM_RAWB 1 +#define CLK_CAM_RAWB_CAMTG_RAWB 2 +#define CLK_CAM_RAWB_NR_CLK 3 + +/* MDPSYS */ + +#define CLK_MDP_RDMA0 0 +#define CLK_MDP_TDSHP0 1 +#define CLK_MDP_IMG_DL_ASYNC0 2 +#define CLK_MDP_IMG_DL_ASYNC1 3 +#define CLK_MDP_DISP_RDMA 4 +#define CLK_MDP_HMS 5 +#define CLK_MDP_SMI0 6 +#define CLK_MDP_APB_BUS 7 +#define CLK_MDP_WROT0 8 +#define CLK_MDP_RSZ0 9 +#define CLK_MDP_HDR0 10 +#define CLK_MDP_MUTEX0 11 +#define CLK_MDP_WROT1 12 +#define CLK_MDP_RSZ1 13 +#define CLK_MDP_FAKE_ENG0 14 +#define CLK_MDP_AAL0 15 +#define CLK_MDP_DISP_WDMA 16 +#define CLK_MDP_COLOR 17 +#define CLK_MDP_IMG_DL_ASYNC2 18 +#define CLK_MDP_IMG_DL_RELAY0_ASYNC0 19 +#define CLK_MDP_IMG_DL_RELAY1_ASYNC1 20 +#define CLK_MDP_IMG_DL_RELAY2_ASYNC2 21 +#define CLK_MDP_NR_CLK 22 + +/* IPESYS */ + +#define CLK_IPE_LARB19 0 +#define CLK_IPE_LARB20 1 +#define CLK_IPE_SMI_SUBCOM 2 +#define CLK_IPE_FD 3 +#define CLK_IPE_FE 4 +#define CLK_IPE_RSC 5 +#define CLK_IPE_DPE 6 +#define CLK_IPE_GALS_IPE 7 +#define CLK_IPE_NR_CLK 8 + +#endif /* _DT_BINDINGS_CLK_MT8186_H */ diff --git a/include/dt-bindings/clock/nuvoton,npcm845-clk.h b/include/dt-bindings/clock/nuvoton,npcm845-clk.h new file mode 100644 index 0000000000..e5cce08b00 --- /dev/null +++ b/include/dt-bindings/clock/nuvoton,npcm845-clk.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) 2021 Nuvoton Technologies. + * Author: Tomer Maimon + * + * Device Tree binding constants for NPCM8XX clock controller. + */ + +#ifndef __DT_BINDINGS_CLOCK_NPCM8XX_H +#define __DT_BINDINGS_CLOCK_NPCM8XX_H + +#define NPCM8XX_CLK_CPU 0 +#define NPCM8XX_CLK_GFX_PIXEL 1 +#define NPCM8XX_CLK_MC 2 +#define NPCM8XX_CLK_ADC 3 +#define NPCM8XX_CLK_AHB 4 +#define NPCM8XX_CLK_TIMER 5 +#define NPCM8XX_CLK_UART 6 +#define NPCM8XX_CLK_UART2 7 +#define NPCM8XX_CLK_MMC 8 +#define NPCM8XX_CLK_SPI3 9 +#define NPCM8XX_CLK_PCI 10 +#define NPCM8XX_CLK_AXI 11 +#define NPCM8XX_CLK_APB4 12 +#define NPCM8XX_CLK_APB3 13 +#define NPCM8XX_CLK_APB2 14 +#define NPCM8XX_CLK_APB1 15 +#define NPCM8XX_CLK_APB5 16 +#define NPCM8XX_CLK_CLKOUT 17 +#define NPCM8XX_CLK_GFX 18 +#define NPCM8XX_CLK_SU 19 +#define NPCM8XX_CLK_SU48 20 +#define NPCM8XX_CLK_SDHC 21 +#define NPCM8XX_CLK_SPI0 22 +#define NPCM8XX_CLK_SPI1 23 +#define NPCM8XX_CLK_SPIX 24 +#define NPCM8XX_CLK_RG 25 +#define NPCM8XX_CLK_RCP 26 +#define NPCM8XX_CLK_PRE_ADC 27 +#define NPCM8XX_CLK_ATB 28 +#define NPCM8XX_CLK_PRE_CLK 29 +#define NPCM8XX_CLK_TH 30 +#define NPCM8XX_CLK_REFCLK 31 +#define NPCM8XX_CLK_SYSBYPCK 32 +#define NPCM8XX_CLK_MCBYPCK 33 + +#define NPCM8XX_NUM_CLOCKS (NPCM8XX_CLK_MCBYPCK + 1) + +#endif diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8350.h b/include/dt-bindings/clock/qcom,dispcc-sm8350.h new file mode 100644 index 0000000000..e69de29bb2 diff --git a/include/dt-bindings/clock/qcom,gcc-sc8280xp.h b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h new file mode 100644 index 0000000000..cb2fb63882 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h @@ -0,0 +1,496 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Linaro Ltd. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_DIREWOLF_H +#define _DT_BINDINGS_CLK_QCOM_GCC_DIREWOLF_H + +/* GCC clocks */ +#define GCC_GPLL0 0 +#define GCC_GPLL0_OUT_EVEN 1 +#define GCC_GPLL2 2 +#define GCC_GPLL4 3 +#define GCC_GPLL7 4 +#define GCC_GPLL8 5 +#define GCC_GPLL9 6 +#define GCC_AGGRE_NOC_PCIE0_TUNNEL_AXI_CLK 7 +#define GCC_AGGRE_NOC_PCIE1_TUNNEL_AXI_CLK 8 +#define GCC_AGGRE_NOC_PCIE_4_AXI_CLK 9 +#define GCC_AGGRE_NOC_PCIE_SOUTH_SF_AXI_CLK 10 +#define GCC_AGGRE_UFS_CARD_AXI_CLK 11 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 12 +#define GCC_AGGRE_USB3_MP_AXI_CLK 13 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 14 +#define GCC_AGGRE_USB3_SEC_AXI_CLK 15 +#define GCC_AGGRE_USB4_1_AXI_CLK 16 +#define GCC_AGGRE_USB4_AXI_CLK 17 +#define GCC_AGGRE_USB_NOC_AXI_CLK 18 +#define GCC_AGGRE_USB_NOC_NORTH_AXI_CLK 19 +#define GCC_AGGRE_USB_NOC_SOUTH_AXI_CLK 20 +#define GCC_AHB2PHY0_CLK 21 +#define GCC_AHB2PHY2_CLK 22 +#define GCC_BOOT_ROM_AHB_CLK 23 +#define GCC_CAMERA_AHB_CLK 24 +#define GCC_CAMERA_HF_AXI_CLK 25 +#define GCC_CAMERA_SF_AXI_CLK 26 +#define GCC_CAMERA_THROTTLE_NRT_AXI_CLK 27 +#define GCC_CAMERA_THROTTLE_RT_AXI_CLK 28 +#define GCC_CAMERA_THROTTLE_XO_CLK 29 +#define GCC_CAMERA_XO_CLK 30 +#define GCC_CFG_NOC_USB3_MP_AXI_CLK 31 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 32 +#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 33 +#define GCC_CNOC_PCIE0_TUNNEL_CLK 34 +#define GCC_CNOC_PCIE1_TUNNEL_CLK 35 +#define GCC_CNOC_PCIE4_QX_CLK 36 +#define GCC_DDRSS_GPU_AXI_CLK 37 +#define GCC_DDRSS_PCIE_SF_TBU_CLK 38 +#define GCC_DISP1_AHB_CLK 39 +#define GCC_DISP1_HF_AXI_CLK 40 +#define GCC_DISP1_SF_AXI_CLK 41 +#define GCC_DISP1_THROTTLE_NRT_AXI_CLK 42 +#define GCC_DISP1_THROTTLE_RT_AXI_CLK 43 +#define GCC_DISP1_XO_CLK 44 +#define GCC_DISP_AHB_CLK 45 +#define GCC_DISP_HF_AXI_CLK 46 +#define GCC_DISP_SF_AXI_CLK 47 +#define GCC_DISP_THROTTLE_NRT_AXI_CLK 48 +#define GCC_DISP_THROTTLE_RT_AXI_CLK 49 +#define GCC_DISP_XO_CLK 50 +#define GCC_EMAC0_AXI_CLK 51 +#define GCC_EMAC0_PTP_CLK 52 +#define GCC_EMAC0_PTP_CLK_SRC 53 +#define GCC_EMAC0_RGMII_CLK 54 +#define GCC_EMAC0_RGMII_CLK_SRC 55 +#define GCC_EMAC0_SLV_AHB_CLK 56 +#define GCC_EMAC1_AXI_CLK 57 +#define GCC_EMAC1_PTP_CLK 58 +#define GCC_EMAC1_PTP_CLK_SRC 59 +#define GCC_EMAC1_RGMII_CLK 60 +#define GCC_EMAC1_RGMII_CLK_SRC 61 +#define GCC_EMAC1_SLV_AHB_CLK 62 +#define GCC_GP1_CLK 63 +#define GCC_GP1_CLK_SRC 64 +#define GCC_GP2_CLK 65 +#define GCC_GP2_CLK_SRC 66 +#define GCC_GP3_CLK 67 +#define GCC_GP3_CLK_SRC 68 +#define GCC_GP4_CLK 69 +#define GCC_GP4_CLK_SRC 70 +#define GCC_GP5_CLK 71 +#define GCC_GP5_CLK_SRC 72 +#define GCC_GPU_CFG_AHB_CLK 73 +#define GCC_GPU_GPLL0_CLK_SRC 74 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 75 +#define GCC_GPU_IREF_EN 76 +#define GCC_GPU_MEMNOC_GFX_CLK 77 +#define GCC_GPU_SNOC_DVM_GFX_CLK 78 +#define GCC_GPU_TCU_THROTTLE_AHB_CLK 79 +#define GCC_GPU_TCU_THROTTLE_CLK 80 +#define GCC_PCIE0_PHY_RCHNG_CLK 81 +#define GCC_PCIE1_PHY_RCHNG_CLK 82 +#define GCC_PCIE2A_PHY_RCHNG_CLK 83 +#define GCC_PCIE2B_PHY_RCHNG_CLK 84 +#define GCC_PCIE3A_PHY_RCHNG_CLK 85 +#define GCC_PCIE3B_PHY_RCHNG_CLK 86 +#define GCC_PCIE4_PHY_RCHNG_CLK 87 +#define GCC_PCIE_0_AUX_CLK 88 +#define GCC_PCIE_0_AUX_CLK_SRC 89 +#define GCC_PCIE_0_CFG_AHB_CLK 90 +#define GCC_PCIE_0_MSTR_AXI_CLK 91 +#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 92 +#define GCC_PCIE_0_PIPE_CLK 93 +#define GCC_PCIE_0_SLV_AXI_CLK 94 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 95 +#define GCC_PCIE_1_AUX_CLK 96 +#define GCC_PCIE_1_AUX_CLK_SRC 97 +#define GCC_PCIE_1_CFG_AHB_CLK 98 +#define GCC_PCIE_1_MSTR_AXI_CLK 99 +#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 100 +#define GCC_PCIE_1_PIPE_CLK 101 +#define GCC_PCIE_1_SLV_AXI_CLK 102 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 103 +#define GCC_PCIE_2A2B_CLKREF_CLK 104 +#define GCC_PCIE_2A_AUX_CLK 105 +#define GCC_PCIE_2A_AUX_CLK_SRC 106 +#define GCC_PCIE_2A_CFG_AHB_CLK 107 +#define GCC_PCIE_2A_MSTR_AXI_CLK 108 +#define GCC_PCIE_2A_PHY_RCHNG_CLK_SRC 109 +#define GCC_PCIE_2A_PIPE_CLK 110 +#define GCC_PCIE_2A_PIPE_CLK_SRC 111 +#define GCC_PCIE_2A_PIPE_DIV_CLK_SRC 112 +#define GCC_PCIE_2A_PIPEDIV2_CLK 113 +#define GCC_PCIE_2A_SLV_AXI_CLK 114 +#define GCC_PCIE_2A_SLV_Q2A_AXI_CLK 115 +#define GCC_PCIE_2B_AUX_CLK 116 +#define GCC_PCIE_2B_AUX_CLK_SRC 117 +#define GCC_PCIE_2B_CFG_AHB_CLK 118 +#define GCC_PCIE_2B_MSTR_AXI_CLK 119 +#define GCC_PCIE_2B_PHY_RCHNG_CLK_SRC 120 +#define GCC_PCIE_2B_PIPE_CLK 121 +#define GCC_PCIE_2B_PIPE_CLK_SRC 122 +#define GCC_PCIE_2B_PIPE_DIV_CLK_SRC 123 +#define GCC_PCIE_2B_PIPEDIV2_CLK 124 +#define GCC_PCIE_2B_SLV_AXI_CLK 125 +#define GCC_PCIE_2B_SLV_Q2A_AXI_CLK 126 +#define GCC_PCIE_3A3B_CLKREF_CLK 127 +#define GCC_PCIE_3A_AUX_CLK 128 +#define GCC_PCIE_3A_AUX_CLK_SRC 129 +#define GCC_PCIE_3A_CFG_AHB_CLK 130 +#define GCC_PCIE_3A_MSTR_AXI_CLK 131 +#define GCC_PCIE_3A_PHY_RCHNG_CLK_SRC 132 +#define GCC_PCIE_3A_PIPE_CLK 133 +#define GCC_PCIE_3A_PIPE_CLK_SRC 134 +#define GCC_PCIE_3A_PIPE_DIV_CLK_SRC 135 +#define GCC_PCIE_3A_PIPEDIV2_CLK 136 +#define GCC_PCIE_3A_SLV_AXI_CLK 137 +#define GCC_PCIE_3A_SLV_Q2A_AXI_CLK 138 +#define GCC_PCIE_3B_AUX_CLK 139 +#define GCC_PCIE_3B_AUX_CLK_SRC 140 +#define GCC_PCIE_3B_CFG_AHB_CLK 141 +#define GCC_PCIE_3B_MSTR_AXI_CLK 142 +#define GCC_PCIE_3B_PHY_RCHNG_CLK_SRC 143 +#define GCC_PCIE_3B_PIPE_CLK 144 +#define GCC_PCIE_3B_PIPE_CLK_SRC 145 +#define GCC_PCIE_3B_PIPE_DIV_CLK_SRC 146 +#define GCC_PCIE_3B_PIPEDIV2_CLK 147 +#define GCC_PCIE_3B_SLV_AXI_CLK 148 +#define GCC_PCIE_3B_SLV_Q2A_AXI_CLK 149 +#define GCC_PCIE_4_AUX_CLK 150 +#define GCC_PCIE_4_AUX_CLK_SRC 151 +#define GCC_PCIE_4_CFG_AHB_CLK 152 +#define GCC_PCIE_4_CLKREF_CLK 153 +#define GCC_PCIE_4_MSTR_AXI_CLK 154 +#define GCC_PCIE_4_PHY_RCHNG_CLK_SRC 155 +#define GCC_PCIE_4_PIPE_CLK 156 +#define GCC_PCIE_4_PIPE_CLK_SRC 157 +#define GCC_PCIE_4_PIPE_DIV_CLK_SRC 158 +#define GCC_PCIE_4_PIPEDIV2_CLK 159 +#define GCC_PCIE_4_SLV_AXI_CLK 160 +#define GCC_PCIE_4_SLV_Q2A_AXI_CLK 161 +#define GCC_PCIE_RSCC_AHB_CLK 162 +#define GCC_PCIE_RSCC_XO_CLK 163 +#define GCC_PCIE_RSCC_XO_CLK_SRC 164 +#define GCC_PCIE_THROTTLE_CFG_CLK 165 +#define GCC_PDM2_CLK 166 +#define GCC_PDM2_CLK_SRC 167 +#define GCC_PDM_AHB_CLK 168 +#define GCC_PDM_XO4_CLK 169 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 170 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 171 +#define GCC_QMIP_DISP1_AHB_CLK 172 +#define GCC_QMIP_DISP1_ROT_AHB_CLK 173 +#define GCC_QMIP_DISP_AHB_CLK 174 +#define GCC_QMIP_DISP_ROT_AHB_CLK 175 +#define GCC_QMIP_VIDEO_CVP_AHB_CLK 176 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 177 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 178 +#define GCC_QUPV3_WRAP0_CORE_CLK 179 +#define GCC_QUPV3_WRAP0_QSPI0_CLK 180 +#define GCC_QUPV3_WRAP0_S0_CLK 181 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 182 +#define GCC_QUPV3_WRAP0_S1_CLK 183 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 184 +#define GCC_QUPV3_WRAP0_S2_CLK 185 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 186 +#define GCC_QUPV3_WRAP0_S3_CLK 187 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 188 +#define GCC_QUPV3_WRAP0_S4_CLK 189 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 190 +#define GCC_QUPV3_WRAP0_S4_DIV_CLK_SRC 191 +#define GCC_QUPV3_WRAP0_S5_CLK 192 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 193 +#define GCC_QUPV3_WRAP0_S6_CLK 194 +#define GCC_QUPV3_WRAP0_S6_CLK_SRC 195 +#define GCC_QUPV3_WRAP0_S7_CLK 196 +#define GCC_QUPV3_WRAP0_S7_CLK_SRC 197 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 198 +#define GCC_QUPV3_WRAP1_CORE_CLK 199 +#define GCC_QUPV3_WRAP1_QSPI0_CLK 200 +#define GCC_QUPV3_WRAP1_S0_CLK 201 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 202 +#define GCC_QUPV3_WRAP1_S1_CLK 203 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 204 +#define GCC_QUPV3_WRAP1_S2_CLK 205 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 206 +#define GCC_QUPV3_WRAP1_S3_CLK 207 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 208 +#define GCC_QUPV3_WRAP1_S4_CLK 209 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 210 +#define GCC_QUPV3_WRAP1_S4_DIV_CLK_SRC 211 +#define GCC_QUPV3_WRAP1_S5_CLK 212 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 213 +#define GCC_QUPV3_WRAP1_S6_CLK 214 +#define GCC_QUPV3_WRAP1_S6_CLK_SRC 215 +#define GCC_QUPV3_WRAP1_S7_CLK 216 +#define GCC_QUPV3_WRAP1_S7_CLK_SRC 217 +#define GCC_QUPV3_WRAP2_CORE_2X_CLK 218 +#define GCC_QUPV3_WRAP2_CORE_CLK 219 +#define GCC_QUPV3_WRAP2_QSPI0_CLK 220 +#define GCC_QUPV3_WRAP2_S0_CLK 221 +#define GCC_QUPV3_WRAP2_S0_CLK_SRC 222 +#define GCC_QUPV3_WRAP2_S1_CLK 223 +#define GCC_QUPV3_WRAP2_S1_CLK_SRC 224 +#define GCC_QUPV3_WRAP2_S2_CLK 225 +#define GCC_QUPV3_WRAP2_S2_CLK_SRC 226 +#define GCC_QUPV3_WRAP2_S3_CLK 227 +#define GCC_QUPV3_WRAP2_S3_CLK_SRC 228 +#define GCC_QUPV3_WRAP2_S4_CLK 229 +#define GCC_QUPV3_WRAP2_S4_CLK_SRC 230 +#define GCC_QUPV3_WRAP2_S4_DIV_CLK_SRC 231 +#define GCC_QUPV3_WRAP2_S5_CLK 232 +#define GCC_QUPV3_WRAP2_S5_CLK_SRC 233 +#define GCC_QUPV3_WRAP2_S6_CLK 234 +#define GCC_QUPV3_WRAP2_S6_CLK_SRC 235 +#define GCC_QUPV3_WRAP2_S7_CLK 236 +#define GCC_QUPV3_WRAP2_S7_CLK_SRC 237 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 238 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 239 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 240 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 241 +#define GCC_QUPV3_WRAP_2_M_AHB_CLK 242 +#define GCC_QUPV3_WRAP_2_S_AHB_CLK 243 +#define GCC_SDCC2_AHB_CLK 244 +#define GCC_SDCC2_APPS_CLK 245 +#define GCC_SDCC2_APPS_CLK_SRC 246 +#define GCC_SDCC4_AHB_CLK 247 +#define GCC_SDCC4_APPS_CLK 248 +#define GCC_SDCC4_APPS_CLK_SRC 249 +#define GCC_SYS_NOC_USB_AXI_CLK 250 +#define GCC_UFS_1_CARD_CLKREF_CLK 251 +#define GCC_UFS_CARD_AHB_CLK 252 +#define GCC_UFS_CARD_AXI_CLK 253 +#define GCC_UFS_CARD_AXI_CLK_SRC 254 +#define GCC_UFS_CARD_CLKREF_CLK 255 +#define GCC_UFS_CARD_ICE_CORE_CLK 256 +#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 257 +#define GCC_UFS_CARD_PHY_AUX_CLK 258 +#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 259 +#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 260 +#define GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC 261 +#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 262 +#define GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC 263 +#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 264 +#define GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC 265 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK 266 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 267 +#define GCC_UFS_PHY_AHB_CLK 268 +#define GCC_UFS_PHY_AXI_CLK 269 +#define GCC_UFS_PHY_AXI_CLK_SRC 270 +#define GCC_UFS_PHY_ICE_CORE_CLK 271 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 272 +#define GCC_UFS_PHY_PHY_AUX_CLK 273 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 274 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 275 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 276 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 277 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 278 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 279 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 280 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 281 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 282 +#define GCC_UFS_REF_CLKREF_CLK 283 +#define GCC_USB2_HS0_CLKREF_CLK 284 +#define GCC_USB2_HS1_CLKREF_CLK 285 +#define GCC_USB2_HS2_CLKREF_CLK 286 +#define GCC_USB2_HS3_CLKREF_CLK 287 +#define GCC_USB30_MP_MASTER_CLK 288 +#define GCC_USB30_MP_MASTER_CLK_SRC 289 +#define GCC_USB30_MP_MOCK_UTMI_CLK 290 +#define GCC_USB30_MP_MOCK_UTMI_CLK_SRC 291 +#define GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC 292 +#define GCC_USB30_MP_SLEEP_CLK 293 +#define GCC_USB30_PRIM_MASTER_CLK 294 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 295 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 296 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 297 +#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 298 +#define GCC_USB30_PRIM_SLEEP_CLK 299 +#define GCC_USB30_SEC_MASTER_CLK 300 +#define GCC_USB30_SEC_MASTER_CLK_SRC 301 +#define GCC_USB30_SEC_MOCK_UTMI_CLK 302 +#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 303 +#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 304 +#define GCC_USB30_SEC_SLEEP_CLK 305 +#define GCC_USB34_PRIM_PHY_PIPE_CLK_SRC 306 +#define GCC_USB34_SEC_PHY_PIPE_CLK_SRC 307 +#define GCC_USB3_MP0_CLKREF_CLK 308 +#define GCC_USB3_MP1_CLKREF_CLK 309 +#define GCC_USB3_MP_PHY_AUX_CLK 310 +#define GCC_USB3_MP_PHY_AUX_CLK_SRC 311 +#define GCC_USB3_MP_PHY_COM_AUX_CLK 312 +#define GCC_USB3_MP_PHY_PIPE_0_CLK 313 +#define GCC_USB3_MP_PHY_PIPE_0_CLK_SRC 314 +#define GCC_USB3_MP_PHY_PIPE_1_CLK 315 +#define GCC_USB3_MP_PHY_PIPE_1_CLK_SRC 316 +#define GCC_USB3_PRIM_PHY_AUX_CLK 317 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 318 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 319 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 320 +#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 321 +#define GCC_USB3_SEC_PHY_AUX_CLK 322 +#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 323 +#define GCC_USB3_SEC_PHY_COM_AUX_CLK 324 +#define GCC_USB3_SEC_PHY_PIPE_CLK 325 +#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 326 +#define GCC_USB4_1_CFG_AHB_CLK 327 +#define GCC_USB4_1_DP_CLK 328 +#define GCC_USB4_1_MASTER_CLK 329 +#define GCC_USB4_1_MASTER_CLK_SRC 330 +#define GCC_USB4_1_PHY_DP_CLK_SRC 331 +#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK 332 +#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC 333 +#define GCC_USB4_1_PHY_PCIE_PIPE_CLK 334 +#define GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC 335 +#define GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC 336 +#define GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC 337 +#define GCC_USB4_1_PHY_RX0_CLK 338 +#define GCC_USB4_1_PHY_RX0_CLK_SRC 339 +#define GCC_USB4_1_PHY_RX1_CLK 340 +#define GCC_USB4_1_PHY_RX1_CLK_SRC 341 +#define GCC_USB4_1_PHY_SYS_CLK_SRC 342 +#define GCC_USB4_1_PHY_USB_PIPE_CLK 343 +#define GCC_USB4_1_SB_IF_CLK 344 +#define GCC_USB4_1_SB_IF_CLK_SRC 345 +#define GCC_USB4_1_SYS_CLK 346 +#define GCC_USB4_1_TMU_CLK 347 +#define GCC_USB4_1_TMU_CLK_SRC 348 +#define GCC_USB4_CFG_AHB_CLK 349 +#define GCC_USB4_CLKREF_CLK 350 +#define GCC_USB4_DP_CLK 351 +#define GCC_USB4_EUD_CLKREF_CLK 352 +#define GCC_USB4_MASTER_CLK 353 +#define GCC_USB4_MASTER_CLK_SRC 354 +#define GCC_USB4_PHY_DP_CLK_SRC 355 +#define GCC_USB4_PHY_P2RR2P_PIPE_CLK 356 +#define GCC_USB4_PHY_P2RR2P_PIPE_CLK_SRC 357 +#define GCC_USB4_PHY_PCIE_PIPE_CLK 358 +#define GCC_USB4_PHY_PCIE_PIPE_CLK_SRC 359 +#define GCC_USB4_PHY_PCIE_PIPE_MUX_CLK_SRC 360 +#define GCC_USB4_PHY_PCIE_PIPEGMUX_CLK_SRC 361 +#define GCC_USB4_PHY_RX0_CLK 362 +#define GCC_USB4_PHY_RX0_CLK_SRC 363 +#define GCC_USB4_PHY_RX1_CLK 364 +#define GCC_USB4_PHY_RX1_CLK_SRC 365 +#define GCC_USB4_PHY_SYS_CLK_SRC 366 +#define GCC_USB4_PHY_USB_PIPE_CLK 367 +#define GCC_USB4_SB_IF_CLK 368 +#define GCC_USB4_SB_IF_CLK_SRC 369 +#define GCC_USB4_SYS_CLK 370 +#define GCC_USB4_TMU_CLK 371 +#define GCC_USB4_TMU_CLK_SRC 372 +#define GCC_VIDEO_AHB_CLK 373 +#define GCC_VIDEO_AXI0_CLK 374 +#define GCC_VIDEO_AXI1_CLK 375 +#define GCC_VIDEO_CVP_THROTTLE_CLK 376 +#define GCC_VIDEO_VCODEC_THROTTLE_CLK 377 +#define GCC_VIDEO_XO_CLK 378 +#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 379 +#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 380 +#define GCC_UFS_CARD_AXI_HW_CTL_CLK 381 +#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 382 +#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 383 +#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 384 +#define GCC_UFS_PHY_AXI_HW_CTL_CLK 385 +#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 386 +#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 387 +#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 388 + +/* GCC resets */ +#define GCC_EMAC0_BCR 0 +#define GCC_EMAC1_BCR 1 +#define GCC_PCIE_0_LINK_DOWN_BCR 2 +#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 3 +#define GCC_PCIE_0_PHY_BCR 4 +#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 5 +#define GCC_PCIE_0_TUNNEL_BCR 6 +#define GCC_PCIE_1_LINK_DOWN_BCR 7 +#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 8 +#define GCC_PCIE_1_PHY_BCR 9 +#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 10 +#define GCC_PCIE_1_TUNNEL_BCR 11 +#define GCC_PCIE_2A_BCR 12 +#define GCC_PCIE_2A_LINK_DOWN_BCR 13 +#define GCC_PCIE_2A_NOCSR_COM_PHY_BCR 14 +#define GCC_PCIE_2A_PHY_BCR 15 +#define GCC_PCIE_2A_PHY_NOCSR_COM_PHY_BCR 16 +#define GCC_PCIE_2B_BCR 17 +#define GCC_PCIE_2B_LINK_DOWN_BCR 18 +#define GCC_PCIE_2B_NOCSR_COM_PHY_BCR 19 +#define GCC_PCIE_2B_PHY_BCR 20 +#define GCC_PCIE_2B_PHY_NOCSR_COM_PHY_BCR 21 +#define GCC_PCIE_3A_BCR 22 +#define GCC_PCIE_3A_LINK_DOWN_BCR 23 +#define GCC_PCIE_3A_NOCSR_COM_PHY_BCR 24 +#define GCC_PCIE_3A_PHY_BCR 25 +#define GCC_PCIE_3A_PHY_NOCSR_COM_PHY_BCR 26 +#define GCC_PCIE_3B_BCR 27 +#define GCC_PCIE_3B_LINK_DOWN_BCR 28 +#define GCC_PCIE_3B_NOCSR_COM_PHY_BCR 29 +#define GCC_PCIE_3B_PHY_BCR 30 +#define GCC_PCIE_3B_PHY_NOCSR_COM_PHY_BCR 31 +#define GCC_PCIE_4_BCR 32 +#define GCC_PCIE_4_LINK_DOWN_BCR 33 +#define GCC_PCIE_4_NOCSR_COM_PHY_BCR 34 +#define GCC_PCIE_4_PHY_BCR 35 +#define GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR 36 +#define GCC_PCIE_PHY_CFG_AHB_BCR 37 +#define GCC_PCIE_PHY_COM_BCR 38 +#define GCC_PCIE_RSCC_BCR 39 +#define GCC_QUSB2PHY_HS0_MP_BCR 40 +#define GCC_QUSB2PHY_HS1_MP_BCR 41 +#define GCC_QUSB2PHY_HS2_MP_BCR 42 +#define GCC_QUSB2PHY_HS3_MP_BCR 43 +#define GCC_QUSB2PHY_PRIM_BCR 44 +#define GCC_QUSB2PHY_SEC_BCR 45 +#define GCC_SDCC2_BCR 46 +#define GCC_SDCC4_BCR 47 +#define GCC_UFS_CARD_BCR 48 +#define GCC_UFS_PHY_BCR 49 +#define GCC_USB2_PHY_PRIM_BCR 50 +#define GCC_USB2_PHY_SEC_BCR 51 +#define GCC_USB30_MP_BCR 52 +#define GCC_USB30_PRIM_BCR 53 +#define GCC_USB30_SEC_BCR 54 +#define GCC_USB3_DP_PHY_PRIM_BCR 55 +#define GCC_USB3_DP_PHY_SEC_BCR 56 +#define GCC_USB3_PHY_PRIM_BCR 57 +#define GCC_USB3_PHY_SEC_BCR 58 +#define GCC_USB3_UNIPHY_MP0_BCR 59 +#define GCC_USB3_UNIPHY_MP1_BCR 60 +#define GCC_USB3PHY_PHY_PRIM_BCR 61 +#define GCC_USB3PHY_PHY_SEC_BCR 62 +#define GCC_USB3UNIPHY_PHY_MP0_BCR 63 +#define GCC_USB3UNIPHY_PHY_MP1_BCR 64 +#define GCC_USB4_1_BCR 65 +#define GCC_USB4_1_DP_PHY_PRIM_BCR 66 +#define GCC_USB4_1_DPPHY_AUX_BCR 67 +#define GCC_USB4_1_PHY_PRIM_BCR 68 +#define GCC_USB4_BCR 69 +#define GCC_USB4_DP_PHY_PRIM_BCR 70 +#define GCC_USB4_DPPHY_AUX_BCR 71 +#define GCC_USB4_PHY_PRIM_BCR 72 +#define GCC_USB4PHY_1_PHY_PRIM_BCR 73 +#define GCC_USB4PHY_PHY_PRIM_BCR 74 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 75 +#define GCC_VIDEO_BCR 76 +#define GCC_VIDEO_AXI0_CLK_ARES 77 +#define GCC_VIDEO_AXI1_CLK_ARES 78 + +/* GCC GDSCs */ +#define PCIE_0_TUNNEL_GDSC 0 +#define PCIE_1_TUNNEL_GDSC 1 +#define PCIE_2A_GDSC 2 +#define PCIE_2B_GDSC 3 +#define PCIE_3A_GDSC 4 +#define PCIE_3B_GDSC 5 +#define PCIE_4_GDSC 6 +#define UFS_CARD_GDSC 7 +#define UFS_PHY_GDSC 8 +#define USB30_MP_GDSC 9 +#define USB30_PRIM_GDSC 10 +#define USB30_SEC_GDSC 11 + +#endif diff --git a/include/dt-bindings/clock/qcom,gpucc-sm8350.h b/include/dt-bindings/clock/qcom,gpucc-sm8350.h new file mode 100644 index 0000000000..2ca857f5bf --- /dev/null +++ b/include/dt-bindings/clock/qcom,gpucc-sm8350.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8350_H +#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8350_H + +/* GPU_CC clocks */ +#define GPU_CC_AHB_CLK 0 +#define GPU_CC_CB_CLK 1 +#define GPU_CC_CRC_AHB_CLK 2 +#define GPU_CC_CX_APB_CLK 3 +#define GPU_CC_CX_GMU_CLK 4 +#define GPU_CC_CX_QDSS_AT_CLK 5 +#define GPU_CC_CX_QDSS_TRIG_CLK 6 +#define GPU_CC_CX_QDSS_TSCTR_CLK 7 +#define GPU_CC_CX_SNOC_DVM_CLK 8 +#define GPU_CC_CXO_AON_CLK 9 +#define GPU_CC_CXO_CLK 10 +#define GPU_CC_FREQ_MEASURE_CLK 11 +#define GPU_CC_GMU_CLK_SRC 12 +#define GPU_CC_GX_GMU_CLK 13 +#define GPU_CC_GX_QDSS_TSCTR_CLK 14 +#define GPU_CC_GX_VSENSE_CLK 15 +#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 16 +#define GPU_CC_HUB_AHB_DIV_CLK_SRC 17 +#define GPU_CC_HUB_AON_CLK 18 +#define GPU_CC_HUB_CLK_SRC 19 +#define GPU_CC_HUB_CX_INT_CLK 20 +#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 21 +#define GPU_CC_MND1X_0_GFX3D_CLK 22 +#define GPU_CC_MND1X_1_GFX3D_CLK 23 +#define GPU_CC_PLL0 24 +#define GPU_CC_PLL1 25 +#define GPU_CC_SLEEP_CLK 26 + +/* GPU_CC resets */ +#define GPUCC_GPU_CC_ACD_BCR 0 +#define GPUCC_GPU_CC_CB_BCR 1 +#define GPUCC_GPU_CC_CX_BCR 2 +#define GPUCC_GPU_CC_FAST_HUB_BCR 3 +#define GPUCC_GPU_CC_GFX3D_AON_BCR 4 +#define GPUCC_GPU_CC_GMU_BCR 5 +#define GPUCC_GPU_CC_GX_BCR 6 +#define GPUCC_GPU_CC_XO_BCR 7 + +/* GPU_CC GDSCRs */ +#define GPU_CX_GDSC 0 +#define GPU_GX_GDSC 1 + +#endif diff --git a/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h b/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h new file mode 100644 index 0000000000..20ef2ea673 --- /dev/null +++ b/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_AUDIO_CC_SC7280_H +#define _DT_BINDINGS_CLK_QCOM_LPASS_AUDIO_CC_SC7280_H + +/* LPASS_AUDIO_CC clocks */ +#define LPASS_AUDIO_CC_PLL 0 +#define LPASS_AUDIO_CC_PLL_OUT_AUX2 1 +#define LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC 2 +#define LPASS_AUDIO_CC_PLL_OUT_MAIN_DIV_CLK_SRC 3 +#define LPASS_AUDIO_CC_CDIV_RX_MCLK_DIV_CLK_SRC 4 +#define LPASS_AUDIO_CC_CODEC_MEM0_CLK 5 +#define LPASS_AUDIO_CC_CODEC_MEM1_CLK 6 +#define LPASS_AUDIO_CC_CODEC_MEM2_CLK 7 +#define LPASS_AUDIO_CC_CODEC_MEM_CLK 8 +#define LPASS_AUDIO_CC_EXT_MCLK0_CLK 9 +#define LPASS_AUDIO_CC_EXT_MCLK0_CLK_SRC 10 +#define LPASS_AUDIO_CC_EXT_MCLK1_CLK 11 +#define LPASS_AUDIO_CC_EXT_MCLK1_CLK_SRC 12 +#define LPASS_AUDIO_CC_RX_MCLK_2X_CLK 13 +#define LPASS_AUDIO_CC_RX_MCLK_CLK 14 +#define LPASS_AUDIO_CC_RX_MCLK_CLK_SRC 15 + +/* LPASS_AON_CC clocks */ +#define LPASS_AON_CC_PLL 0 +#define LPASS_AON_CC_PLL_OUT_EVEN 1 +#define LPASS_AON_CC_PLL_OUT_MAIN_CDIV_DIV_CLK_SRC 2 +#define LPASS_AON_CC_PLL_OUT_ODD 3 +#define LPASS_AON_CC_AUDIO_HM_H_CLK 4 +#define LPASS_AON_CC_CDIV_TX_MCLK_DIV_CLK_SRC 5 +#define LPASS_AON_CC_MAIN_RCG_CLK_SRC 6 +#define LPASS_AON_CC_TX_MCLK_2X_CLK 7 +#define LPASS_AON_CC_TX_MCLK_CLK 8 +#define LPASS_AON_CC_TX_MCLK_RCG_CLK_SRC 9 +#define LPASS_AON_CC_VA_MEM0_CLK 10 + +/* LPASS_AON_CC power domains */ +#define LPASS_AON_CC_LPASS_AUDIO_HM_GDSC 0 + +#endif diff --git a/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h b/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h new file mode 100644 index 0000000000..28ed2a07aa --- /dev/null +++ b/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7280_H +#define _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7280_H + +/* LPASS_CORE_CC clocks */ +#define LPASS_CORE_CC_DIG_PLL 0 +#define LPASS_CORE_CC_DIG_PLL_OUT_MAIN_DIV_CLK_SRC 1 +#define LPASS_CORE_CC_DIG_PLL_OUT_ODD 2 +#define LPASS_CORE_CC_CORE_CLK 3 +#define LPASS_CORE_CC_CORE_CLK_SRC 4 +#define LPASS_CORE_CC_EXT_IF0_CLK_SRC 5 +#define LPASS_CORE_CC_EXT_IF0_IBIT_CLK 6 +#define LPASS_CORE_CC_EXT_IF1_CLK_SRC 7 +#define LPASS_CORE_CC_EXT_IF1_IBIT_CLK 8 +#define LPASS_CORE_CC_LPM_CORE_CLK 9 +#define LPASS_CORE_CC_LPM_MEM0_CORE_CLK 10 +#define LPASS_CORE_CC_SYSNOC_MPORT_CORE_CLK 11 + +/* LPASS_CORE_CC power domains */ +#define LPASS_CORE_CC_LPASS_CORE_HM_GDSC 0 + +#endif diff --git a/include/dt-bindings/clock/qcom,sm8450-camcc.h b/include/dt-bindings/clock/qcom,sm8450-camcc.h new file mode 100644 index 0000000000..7ff67acf30 --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm8450-camcc.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8450_H +#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8450_H + +/* CAM_CC clocks */ +#define CAM_CC_BPS_AHB_CLK 0 +#define CAM_CC_BPS_CLK 1 +#define CAM_CC_BPS_CLK_SRC 2 +#define CAM_CC_BPS_FAST_AHB_CLK 3 +#define CAM_CC_CAMNOC_AXI_CLK 4 +#define CAM_CC_CAMNOC_AXI_CLK_SRC 5 +#define CAM_CC_CAMNOC_DCD_XO_CLK 6 +#define CAM_CC_CCI_0_CLK 7 +#define CAM_CC_CCI_0_CLK_SRC 8 +#define CAM_CC_CCI_1_CLK 9 +#define CAM_CC_CCI_1_CLK_SRC 10 +#define CAM_CC_CORE_AHB_CLK 11 +#define CAM_CC_CPAS_AHB_CLK 12 +#define CAM_CC_CPAS_BPS_CLK 13 +#define CAM_CC_CPAS_FAST_AHB_CLK 14 +#define CAM_CC_CPAS_IFE_0_CLK 15 +#define CAM_CC_CPAS_IFE_1_CLK 16 +#define CAM_CC_CPAS_IFE_2_CLK 17 +#define CAM_CC_CPAS_IFE_LITE_CLK 18 +#define CAM_CC_CPAS_IPE_NPS_CLK 19 +#define CAM_CC_CPAS_SBI_CLK 20 +#define CAM_CC_CPAS_SFE_0_CLK 21 +#define CAM_CC_CPAS_SFE_1_CLK 22 +#define CAM_CC_CPHY_RX_CLK_SRC 23 +#define CAM_CC_CSI0PHYTIMER_CLK 24 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 25 +#define CAM_CC_CSI1PHYTIMER_CLK 26 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 27 +#define CAM_CC_CSI2PHYTIMER_CLK 28 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 29 +#define CAM_CC_CSI3PHYTIMER_CLK 30 +#define CAM_CC_CSI3PHYTIMER_CLK_SRC 31 +#define CAM_CC_CSI4PHYTIMER_CLK 32 +#define CAM_CC_CSI4PHYTIMER_CLK_SRC 33 +#define CAM_CC_CSI5PHYTIMER_CLK 34 +#define CAM_CC_CSI5PHYTIMER_CLK_SRC 35 +#define CAM_CC_CSID_CLK 36 +#define CAM_CC_CSID_CLK_SRC 37 +#define CAM_CC_CSID_CSIPHY_RX_CLK 38 +#define CAM_CC_CSIPHY0_CLK 39 +#define CAM_CC_CSIPHY1_CLK 40 +#define CAM_CC_CSIPHY2_CLK 41 +#define CAM_CC_CSIPHY3_CLK 42 +#define CAM_CC_CSIPHY4_CLK 43 +#define CAM_CC_CSIPHY5_CLK 44 +#define CAM_CC_FAST_AHB_CLK_SRC 45 +#define CAM_CC_GDSC_CLK 46 +#define CAM_CC_ICP_AHB_CLK 47 +#define CAM_CC_ICP_CLK 48 +#define CAM_CC_ICP_CLK_SRC 49 +#define CAM_CC_IFE_0_CLK 50 +#define CAM_CC_IFE_0_CLK_SRC 51 +#define CAM_CC_IFE_0_DSP_CLK 52 +#define CAM_CC_IFE_0_FAST_AHB_CLK 53 +#define CAM_CC_IFE_1_CLK 54 +#define CAM_CC_IFE_1_CLK_SRC 55 +#define CAM_CC_IFE_1_DSP_CLK 56 +#define CAM_CC_IFE_1_FAST_AHB_CLK 57 +#define CAM_CC_IFE_2_CLK 58 +#define CAM_CC_IFE_2_CLK_SRC 59 +#define CAM_CC_IFE_2_DSP_CLK 60 +#define CAM_CC_IFE_2_FAST_AHB_CLK 61 +#define CAM_CC_IFE_LITE_AHB_CLK 62 +#define CAM_CC_IFE_LITE_CLK 63 +#define CAM_CC_IFE_LITE_CLK_SRC 64 +#define CAM_CC_IFE_LITE_CPHY_RX_CLK 65 +#define CAM_CC_IFE_LITE_CSID_CLK 66 +#define CAM_CC_IFE_LITE_CSID_CLK_SRC 67 +#define CAM_CC_IPE_NPS_AHB_CLK 68 +#define CAM_CC_IPE_NPS_CLK 69 +#define CAM_CC_IPE_NPS_CLK_SRC 70 +#define CAM_CC_IPE_NPS_FAST_AHB_CLK 71 +#define CAM_CC_IPE_PPS_CLK 72 +#define CAM_CC_IPE_PPS_FAST_AHB_CLK 73 +#define CAM_CC_JPEG_CLK 74 +#define CAM_CC_JPEG_CLK_SRC 75 +#define CAM_CC_MCLK0_CLK 76 +#define CAM_CC_MCLK0_CLK_SRC 77 +#define CAM_CC_MCLK1_CLK 78 +#define CAM_CC_MCLK1_CLK_SRC 79 +#define CAM_CC_MCLK2_CLK 80 +#define CAM_CC_MCLK2_CLK_SRC 81 +#define CAM_CC_MCLK3_CLK 82 +#define CAM_CC_MCLK3_CLK_SRC 83 +#define CAM_CC_MCLK4_CLK 84 +#define CAM_CC_MCLK4_CLK_SRC 85 +#define CAM_CC_MCLK5_CLK 86 +#define CAM_CC_MCLK5_CLK_SRC 87 +#define CAM_CC_MCLK6_CLK 88 +#define CAM_CC_MCLK6_CLK_SRC 89 +#define CAM_CC_MCLK7_CLK 90 +#define CAM_CC_MCLK7_CLK_SRC 91 +#define CAM_CC_PLL0 92 +#define CAM_CC_PLL0_OUT_EVEN 93 +#define CAM_CC_PLL0_OUT_ODD 94 +#define CAM_CC_PLL1 95 +#define CAM_CC_PLL1_OUT_EVEN 96 +#define CAM_CC_PLL2 97 +#define CAM_CC_PLL3 98 +#define CAM_CC_PLL3_OUT_EVEN 99 +#define CAM_CC_PLL4 100 +#define CAM_CC_PLL4_OUT_EVEN 101 +#define CAM_CC_PLL5 102 +#define CAM_CC_PLL5_OUT_EVEN 103 +#define CAM_CC_PLL6 104 +#define CAM_CC_PLL6_OUT_EVEN 105 +#define CAM_CC_PLL7 106 +#define CAM_CC_PLL7_OUT_EVEN 107 +#define CAM_CC_PLL8 108 +#define CAM_CC_PLL8_OUT_EVEN 109 +#define CAM_CC_QDSS_DEBUG_CLK 110 +#define CAM_CC_QDSS_DEBUG_CLK_SRC 111 +#define CAM_CC_QDSS_DEBUG_XO_CLK 112 +#define CAM_CC_SBI_AHB_CLK 113 +#define CAM_CC_SBI_CLK 114 +#define CAM_CC_SFE_0_CLK 115 +#define CAM_CC_SFE_0_CLK_SRC 116 +#define CAM_CC_SFE_0_FAST_AHB_CLK 117 +#define CAM_CC_SFE_1_CLK 118 +#define CAM_CC_SFE_1_CLK_SRC 119 +#define CAM_CC_SFE_1_FAST_AHB_CLK 120 +#define CAM_CC_SLEEP_CLK 121 +#define CAM_CC_SLEEP_CLK_SRC 122 +#define CAM_CC_SLOW_AHB_CLK_SRC 123 +#define CAM_CC_XO_CLK_SRC 124 + +/* CAM_CC resets */ +#define CAM_CC_BPS_BCR 0 +#define CAM_CC_ICP_BCR 1 +#define CAM_CC_IFE_0_BCR 2 +#define CAM_CC_IFE_1_BCR 3 +#define CAM_CC_IFE_2_BCR 4 +#define CAM_CC_IPE_0_BCR 5 +#define CAM_CC_QDSS_DEBUG_BCR 6 +#define CAM_CC_SBI_BCR 7 +#define CAM_CC_SFE_0_BCR 8 +#define CAM_CC_SFE_1_BCR 9 + +/* CAM_CC GDSCRs */ +#define BPS_GDSC 0 +#define IPE_0_GDSC 1 +#define SBI_GDSC 2 +#define IFE_0_GDSC 3 +#define IFE_1_GDSC 4 +#define IFE_2_GDSC 5 +#define SFE_0_GDSC 6 +#define SFE_1_GDSC 7 +#define TITAN_TOP_GDSC 8 + +#endif diff --git a/include/dt-bindings/clock/r8a779g0-cpg-mssr.h b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h new file mode 100644 index 0000000000..754c54a6eb --- /dev/null +++ b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) 2022 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__ + +#include + +/* r8a779g0 CPG Core Clocks */ + +#define R8A779G0_CLK_ZX 0 +#define R8A779G0_CLK_ZS 1 +#define R8A779G0_CLK_ZT 2 +#define R8A779G0_CLK_ZTR 3 +#define R8A779G0_CLK_S0D2 4 +#define R8A779G0_CLK_S0D3 5 +#define R8A779G0_CLK_S0D4 6 +#define R8A779G0_CLK_S0D1_VIO 7 +#define R8A779G0_CLK_S0D2_VIO 8 +#define R8A779G0_CLK_S0D4_VIO 9 +#define R8A779G0_CLK_S0D8_VIO 10 +#define R8A779G0_CLK_S0D1_VC 11 +#define R8A779G0_CLK_S0D2_VC 12 +#define R8A779G0_CLK_S0D4_VC 13 +#define R8A779G0_CLK_S0D2_MM 14 +#define R8A779G0_CLK_S0D4_MM 15 +#define R8A779G0_CLK_S0D2_U3DG 16 +#define R8A779G0_CLK_S0D4_U3DG 17 +#define R8A779G0_CLK_S0D2_RT 18 +#define R8A779G0_CLK_S0D3_RT 19 +#define R8A779G0_CLK_S0D4_RT 20 +#define R8A779G0_CLK_S0D6_RT 21 +#define R8A779G0_CLK_S0D24_RT 22 +#define R8A779G0_CLK_S0D2_PER 23 +#define R8A779G0_CLK_S0D3_PER 24 +#define R8A779G0_CLK_S0D4_PER 25 +#define R8A779G0_CLK_S0D6_PER 26 +#define R8A779G0_CLK_S0D12_PER 27 +#define R8A779G0_CLK_S0D24_PER 28 +#define R8A779G0_CLK_S0D1_HSC 29 +#define R8A779G0_CLK_S0D2_HSC 30 +#define R8A779G0_CLK_S0D4_HSC 31 +#define R8A779G0_CLK_S0D2_CC 32 +#define R8A779G0_CLK_SVD1_IR 33 +#define R8A779G0_CLK_SVD2_IR 34 +#define R8A779G0_CLK_SVD1_VIP 35 +#define R8A779G0_CLK_SVD2_VIP 36 +#define R8A779G0_CLK_CL 37 +#define R8A779G0_CLK_CL16M 38 +#define R8A779G0_CLK_CL16M_MM 39 +#define R8A779G0_CLK_CL16M_RT 40 +#define R8A779G0_CLK_CL16M_PER 41 +#define R8A779G0_CLK_CL16M_HSC 42 +#define R8A779G0_CLK_Z0 43 +#define R8A779G0_CLK_ZB3 44 +#define R8A779G0_CLK_ZB3D2 45 +#define R8A779G0_CLK_ZB3D4 46 +#define R8A779G0_CLK_ZG 47 +#define R8A779G0_CLK_SD0H 48 +#define R8A779G0_CLK_SD0 49 +#define R8A779G0_CLK_RPC 50 +#define R8A779G0_CLK_RPCD2 51 +#define R8A779G0_CLK_MSO 52 +#define R8A779G0_CLK_CANFD 53 +#define R8A779G0_CLK_CSI 54 +#define R8A779G0_CLK_FRAY 55 +#define R8A779G0_CLK_IPC 56 +#define R8A779G0_CLK_SASYNCRT 57 +#define R8A779G0_CLK_SASYNCPERD1 58 +#define R8A779G0_CLK_SASYNCPERD2 59 +#define R8A779G0_CLK_SASYNCPERD4 60 +#define R8A779G0_CLK_VIOBUS 61 +#define R8A779G0_CLK_VIOBUSD2 62 +#define R8A779G0_CLK_VCBUS 63 +#define R8A779G0_CLK_VCBUSD2 64 +#define R8A779G0_CLK_DSIEXT 65 +#define R8A779G0_CLK_DSIREF 66 +#define R8A779G0_CLK_ADGH 67 +#define R8A779G0_CLK_OSC 68 +#define R8A779G0_CLK_ZR0 69 +#define R8A779G0_CLK_ZR1 70 +#define R8A779G0_CLK_ZR2 71 +#define R8A779G0_CLK_IMPA 72 +#define R8A779G0_CLK_IMPAD4 73 +#define R8A779G0_CLK_CPEX 74 +#define R8A779G0_CLK_CBFUSA 75 +#define R8A779G0_CLK_R 76 + +#endif /* __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r9a07g043-cpg.h b/include/dt-bindings/clock/r9a07g043-cpg.h new file mode 100644 index 0000000000..77cde8effd --- /dev/null +++ b/include/dt-bindings/clock/r9a07g043-cpg.h @@ -0,0 +1,204 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + * + * Copyright (C) 2022 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__ +#define __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__ + +#include + +/* R9A07G043 CPG Core Clocks */ +#define R9A07G043_CLK_I 0 +#define R9A07G043_CLK_I2 1 +#define R9A07G043_CLK_S0 2 +#define R9A07G043_CLK_SPI0 3 +#define R9A07G043_CLK_SPI1 4 +#define R9A07G043_CLK_SD0 5 +#define R9A07G043_CLK_SD1 6 +#define R9A07G043_CLK_M0 7 +#define R9A07G043_CLK_M2 8 +#define R9A07G043_CLK_M3 9 +#define R9A07G043_CLK_HP 10 +#define R9A07G043_CLK_TSU 11 +#define R9A07G043_CLK_ZT 12 +#define R9A07G043_CLK_P0 13 +#define R9A07G043_CLK_P1 14 +#define R9A07G043_CLK_P2 15 +#define R9A07G043_CLK_AT 16 +#define R9A07G043_OSCCLK 17 +#define R9A07G043_CLK_P0_DIV2 18 + +/* R9A07G043 Module Clocks */ +#define R9A07G043_CA55_SCLK 0 /* RZ/G2UL Only */ +#define R9A07G043_CA55_PCLK 1 /* RZ/G2UL Only */ +#define R9A07G043_CA55_ATCLK 2 /* RZ/G2UL Only */ +#define R9A07G043_CA55_GICCLK 3 /* RZ/G2UL Only */ +#define R9A07G043_CA55_PERICLK 4 /* RZ/G2UL Only */ +#define R9A07G043_CA55_ACLK 5 /* RZ/G2UL Only */ +#define R9A07G043_CA55_TSCLK 6 /* RZ/G2UL Only */ +#define R9A07G043_GIC600_GICCLK 7 /* RZ/G2UL Only */ +#define R9A07G043_IA55_CLK 8 /* RZ/G2UL Only */ +#define R9A07G043_IA55_PCLK 9 /* RZ/G2UL Only */ +#define R9A07G043_MHU_PCLK 10 /* RZ/G2UL Only */ +#define R9A07G043_SYC_CNT_CLK 11 +#define R9A07G043_DMAC_ACLK 12 +#define R9A07G043_DMAC_PCLK 13 +#define R9A07G043_OSTM0_PCLK 14 +#define R9A07G043_OSTM1_PCLK 15 +#define R9A07G043_OSTM2_PCLK 16 +#define R9A07G043_MTU_X_MCK_MTU3 17 +#define R9A07G043_POE3_CLKM_POE 18 +#define R9A07G043_WDT0_PCLK 19 +#define R9A07G043_WDT0_CLK 20 +#define R9A07G043_WDT2_PCLK 21 /* RZ/G2UL Only */ +#define R9A07G043_WDT2_CLK 22 /* RZ/G2UL Only */ +#define R9A07G043_SPI_CLK2 23 +#define R9A07G043_SPI_CLK 24 +#define R9A07G043_SDHI0_IMCLK 25 +#define R9A07G043_SDHI0_IMCLK2 26 +#define R9A07G043_SDHI0_CLK_HS 27 +#define R9A07G043_SDHI0_ACLK 28 +#define R9A07G043_SDHI1_IMCLK 29 +#define R9A07G043_SDHI1_IMCLK2 30 +#define R9A07G043_SDHI1_CLK_HS 31 +#define R9A07G043_SDHI1_ACLK 32 +#define R9A07G043_ISU_ACLK 33 /* RZ/G2UL Only */ +#define R9A07G043_ISU_PCLK 34 /* RZ/G2UL Only */ +#define R9A07G043_CRU_SYSCLK 35 /* RZ/G2UL Only */ +#define R9A07G043_CRU_VCLK 36 /* RZ/G2UL Only */ +#define R9A07G043_CRU_PCLK 37 /* RZ/G2UL Only */ +#define R9A07G043_CRU_ACLK 38 /* RZ/G2UL Only */ +#define R9A07G043_LCDC_CLK_A 39 /* RZ/G2UL Only */ +#define R9A07G043_LCDC_CLK_P 40 /* RZ/G2UL Only */ +#define R9A07G043_LCDC_CLK_D 41 /* RZ/G2UL Only */ +#define R9A07G043_SSI0_PCLK2 42 +#define R9A07G043_SSI0_PCLK_SFR 43 +#define R9A07G043_SSI1_PCLK2 44 +#define R9A07G043_SSI1_PCLK_SFR 45 +#define R9A07G043_SSI2_PCLK2 46 +#define R9A07G043_SSI2_PCLK_SFR 47 +#define R9A07G043_SSI3_PCLK2 48 +#define R9A07G043_SSI3_PCLK_SFR 49 +#define R9A07G043_SRC_CLKP 50 /* RZ/G2UL Only */ +#define R9A07G043_USB_U2H0_HCLK 51 +#define R9A07G043_USB_U2H1_HCLK 52 +#define R9A07G043_USB_U2P_EXR_CPUCLK 53 +#define R9A07G043_USB_PCLK 54 +#define R9A07G043_ETH0_CLK_AXI 55 +#define R9A07G043_ETH0_CLK_CHI 56 +#define R9A07G043_ETH1_CLK_AXI 57 +#define R9A07G043_ETH1_CLK_CHI 58 +#define R9A07G043_I2C0_PCLK 59 +#define R9A07G043_I2C1_PCLK 60 +#define R9A07G043_I2C2_PCLK 61 +#define R9A07G043_I2C3_PCLK 62 +#define R9A07G043_SCIF0_CLK_PCK 63 +#define R9A07G043_SCIF1_CLK_PCK 64 +#define R9A07G043_SCIF2_CLK_PCK 65 +#define R9A07G043_SCIF3_CLK_PCK 66 +#define R9A07G043_SCIF4_CLK_PCK 67 +#define R9A07G043_SCI0_CLKP 68 +#define R9A07G043_SCI1_CLKP 69 +#define R9A07G043_IRDA_CLKP 70 +#define R9A07G043_RSPI0_CLKB 71 +#define R9A07G043_RSPI1_CLKB 72 +#define R9A07G043_RSPI2_CLKB 73 +#define R9A07G043_CANFD_PCLK 74 +#define R9A07G043_GPIO_HCLK 75 +#define R9A07G043_ADC_ADCLK 76 +#define R9A07G043_ADC_PCLK 77 +#define R9A07G043_TSU_PCLK 78 +#define R9A07G043_NCEPLDM_DM_CLK 79 /* RZ/Five Only */ +#define R9A07G043_NCEPLDM_ACLK 80 /* RZ/Five Only */ +#define R9A07G043_NCEPLDM_TCK 81 /* RZ/Five Only */ +#define R9A07G043_NCEPLMT_ACLK 82 /* RZ/Five Only */ +#define R9A07G043_NCEPLIC_ACLK 83 /* RZ/Five Only */ +#define R9A07G043_AX45MP_CORE0_CLK 84 /* RZ/Five Only */ +#define R9A07G043_AX45MP_ACLK 85 /* RZ/Five Only */ +#define R9A07G043_IAX45_CLK 86 /* RZ/Five Only */ +#define R9A07G043_IAX45_PCLK 87 /* RZ/Five Only */ + +/* R9A07G043 Resets */ +#define R9A07G043_CA55_RST_1_0 0 /* RZ/G2UL Only */ +#define R9A07G043_CA55_RST_1_1 1 /* RZ/G2UL Only */ +#define R9A07G043_CA55_RST_3_0 2 /* RZ/G2UL Only */ +#define R9A07G043_CA55_RST_3_1 3 /* RZ/G2UL Only */ +#define R9A07G043_CA55_RST_4 4 /* RZ/G2UL Only */ +#define R9A07G043_CA55_RST_5 5 /* RZ/G2UL Only */ +#define R9A07G043_CA55_RST_6 6 /* RZ/G2UL Only */ +#define R9A07G043_CA55_RST_7 7 /* RZ/G2UL Only */ +#define R9A07G043_CA55_RST_8 8 /* RZ/G2UL Only */ +#define R9A07G043_CA55_RST_9 9 /* RZ/G2UL Only */ +#define R9A07G043_CA55_RST_10 10 /* RZ/G2UL Only */ +#define R9A07G043_CA55_RST_11 11 /* RZ/G2UL Only */ +#define R9A07G043_CA55_RST_12 12 /* RZ/G2UL Only */ +#define R9A07G043_GIC600_GICRESET_N 13 /* RZ/G2UL Only */ +#define R9A07G043_GIC600_DBG_GICRESET_N 14 /* RZ/G2UL Only */ +#define R9A07G043_IA55_RESETN 15 /* RZ/G2UL Only */ +#define R9A07G043_MHU_RESETN 16 /* RZ/G2UL Only */ +#define R9A07G043_DMAC_ARESETN 17 +#define R9A07G043_DMAC_RST_ASYNC 18 +#define R9A07G043_SYC_RESETN 19 +#define R9A07G043_OSTM0_PRESETZ 20 +#define R9A07G043_OSTM1_PRESETZ 21 +#define R9A07G043_OSTM2_PRESETZ 22 +#define R9A07G043_MTU_X_PRESET_MTU3 23 +#define R9A07G043_POE3_RST_M_REG 24 +#define R9A07G043_WDT0_PRESETN 25 +#define R9A07G043_WDT2_PRESETN 26 /* RZ/G2UL Only */ +#define R9A07G043_SPI_RST 27 +#define R9A07G043_SDHI0_IXRST 28 +#define R9A07G043_SDHI1_IXRST 29 +#define R9A07G043_ISU_ARESETN 30 /* RZ/G2UL Only */ +#define R9A07G043_ISU_PRESETN 31 /* RZ/G2UL Only */ +#define R9A07G043_CRU_CMN_RSTB 32 /* RZ/G2UL Only */ +#define R9A07G043_CRU_PRESETN 33 /* RZ/G2UL Only */ +#define R9A07G043_CRU_ARESETN 34 /* RZ/G2UL Only */ +#define R9A07G043_LCDC_RESET_N 35 /* RZ/G2UL Only */ +#define R9A07G043_SSI0_RST_M2_REG 36 +#define R9A07G043_SSI1_RST_M2_REG 37 +#define R9A07G043_SSI2_RST_M2_REG 38 +#define R9A07G043_SSI3_RST_M2_REG 39 +#define R9A07G043_SRC_RST 40 /* RZ/G2UL Only */ +#define R9A07G043_USB_U2H0_HRESETN 41 +#define R9A07G043_USB_U2H1_HRESETN 42 +#define R9A07G043_USB_U2P_EXL_SYSRST 43 +#define R9A07G043_USB_PRESETN 44 +#define R9A07G043_ETH0_RST_HW_N 45 +#define R9A07G043_ETH1_RST_HW_N 46 +#define R9A07G043_I2C0_MRST 47 +#define R9A07G043_I2C1_MRST 48 +#define R9A07G043_I2C2_MRST 49 +#define R9A07G043_I2C3_MRST 50 +#define R9A07G043_SCIF0_RST_SYSTEM_N 51 +#define R9A07G043_SCIF1_RST_SYSTEM_N 52 +#define R9A07G043_SCIF2_RST_SYSTEM_N 53 +#define R9A07G043_SCIF3_RST_SYSTEM_N 54 +#define R9A07G043_SCIF4_RST_SYSTEM_N 55 +#define R9A07G043_SCI0_RST 56 +#define R9A07G043_SCI1_RST 57 +#define R9A07G043_IRDA_RST 58 +#define R9A07G043_RSPI0_RST 59 +#define R9A07G043_RSPI1_RST 60 +#define R9A07G043_RSPI2_RST 61 +#define R9A07G043_CANFD_RSTP_N 62 +#define R9A07G043_CANFD_RSTC_N 63 +#define R9A07G043_GPIO_RSTN 64 +#define R9A07G043_GPIO_PORT_RESETN 65 +#define R9A07G043_GPIO_SPARE_RESETN 66 +#define R9A07G043_ADC_PRESETN 67 +#define R9A07G043_ADC_ADRST_N 68 +#define R9A07G043_TSU_PRESETN 69 +#define R9A07G043_NCEPLDM_DTM_PWR_RST_N 70 /* RZ/Five Only */ +#define R9A07G043_NCEPLDM_ARESETN 71 /* RZ/Five Only */ +#define R9A07G043_NCEPLMT_POR_RSTN 72 /* RZ/Five Only */ +#define R9A07G043_NCEPLMT_ARESETN 73 /* RZ/Five Only */ +#define R9A07G043_NCEPLIC_ARESETN 74 /* RZ/Five Only */ +#define R9A07G043_AX45MP_ARESETNM 75 /* RZ/Five Only */ +#define R9A07G043_AX45MP_ARESETNS 76 /* RZ/Five Only */ +#define R9A07G043_AX45MP_L2_RESETN 77 /* RZ/Five Only */ +#define R9A07G043_AX45MP_CORE0_RESETN 78 /* RZ/Five Only */ +#define R9A07G043_IAX45_RESETN 79 /* RZ/Five Only */ + + +#endif /* __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__ */ diff --git a/include/dt-bindings/clock/r9a09g011-cpg.h b/include/dt-bindings/clock/r9a09g011-cpg.h new file mode 100644 index 0000000000..41dd585d71 --- /dev/null +++ b/include/dt-bindings/clock/r9a09g011-cpg.h @@ -0,0 +1,352 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + * + * Copyright (C) 2022 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R9A09G011_CPG_H__ +#define __DT_BINDINGS_CLOCK_R9A09G011_CPG_H__ + +#include + +/* Module Clocks */ +#define R9A09G011_SYS_CLK 0 +#define R9A09G011_PFC_PCLK 1 +#define R9A09G011_PMC_CORE_CLOCK 2 +#define R9A09G011_GIC_CLK 3 +#define R9A09G011_RAMA_ACLK 4 +#define R9A09G011_ROMA_ACLK 5 +#define R9A09G011_SEC_ACLK 6 +#define R9A09G011_SEC_PCLK 7 +#define R9A09G011_SEC_TCLK 8 +#define R9A09G011_DMAA_ACLK 9 +#define R9A09G011_TSU0_PCLK 10 +#define R9A09G011_TSU1_PCLK 11 + +#define R9A09G011_CST_TRACECLK 12 +#define R9A09G011_CST_SB_CLK 13 +#define R9A09G011_CST_AHB_CLK 14 +#define R9A09G011_CST_ATB_SB_CLK 15 +#define R9A09G011_CST_TS_SB_CLK 16 + +#define R9A09G011_SDI0_ACLK 17 +#define R9A09G011_SDI0_IMCLK 18 +#define R9A09G011_SDI0_IMCLK2 19 +#define R9A09G011_SDI0_CLK_HS 20 +#define R9A09G011_SDI1_ACLK 21 +#define R9A09G011_SDI1_IMCLK 22 +#define R9A09G011_SDI1_IMCLK2 23 +#define R9A09G011_SDI1_CLK_HS 24 +#define R9A09G011_EMM_ACLK 25 +#define R9A09G011_EMM_IMCLK 26 +#define R9A09G011_EMM_IMCLK2 27 +#define R9A09G011_EMM_CLK_HS 28 +#define R9A09G011_NFI_ACLK 29 +#define R9A09G011_NFI_NF_CLK 30 + +#define R9A09G011_PCI_ACLK 31 +#define R9A09G011_PCI_CLK_PMU 32 +#define R9A09G011_PCI_APB_CLK 33 +#define R9A09G011_USB_ACLK_H 34 +#define R9A09G011_USB_ACLK_P 35 +#define R9A09G011_USB_PCLK 36 +#define R9A09G011_ETH0_CLK_AXI 37 +#define R9A09G011_ETH0_CLK_CHI 38 +#define R9A09G011_ETH0_GPTP_EXT 39 + +#define R9A09G011_SDT_CLK 40 +#define R9A09G011_SDT_CLKAPB 41 +#define R9A09G011_SDT_CLK48 42 +#define R9A09G011_GRP_CLK 43 +#define R9A09G011_CIF_P0_CLK 44 +#define R9A09G011_CIF_P1_CLK 45 +#define R9A09G011_CIF_APB_CLK 46 +#define R9A09G011_DCI_CLKAXI 47 +#define R9A09G011_DCI_CLKAPB 48 +#define R9A09G011_DCI_CLKDCI2 49 + +#define R9A09G011_HMI_PCLK 50 +#define R9A09G011_LCI_PCLK 51 +#define R9A09G011_LCI_ACLK 52 +#define R9A09G011_LCI_VCLK 53 +#define R9A09G011_LCI_LPCLK 54 + +#define R9A09G011_AUI_CLK 55 +#define R9A09G011_AUI_CLKAXI 56 +#define R9A09G011_AUI_CLKAPB 57 +#define R9A09G011_AUMCLK 58 +#define R9A09G011_GMCLK0 59 +#define R9A09G011_GMCLK1 60 +#define R9A09G011_MTR_CLK0 61 +#define R9A09G011_MTR_CLK1 62 +#define R9A09G011_MTR_CLKAPB 63 +#define R9A09G011_GFT_CLK 64 +#define R9A09G011_GFT_CLKAPB 65 +#define R9A09G011_GFT_MCLK 66 + +#define R9A09G011_ATGA_CLK 67 +#define R9A09G011_ATGA_CLKAPB 68 +#define R9A09G011_ATGB_CLK 69 +#define R9A09G011_ATGB_CLKAPB 70 +#define R9A09G011_SYC_CNT_CLK 71 + +#define R9A09G011_CPERI_GRPA_PCLK 72 +#define R9A09G011_TIM0_CLK 73 +#define R9A09G011_TIM1_CLK 74 +#define R9A09G011_TIM2_CLK 75 +#define R9A09G011_TIM3_CLK 76 +#define R9A09G011_TIM4_CLK 77 +#define R9A09G011_TIM5_CLK 78 +#define R9A09G011_TIM6_CLK 79 +#define R9A09G011_TIM7_CLK 80 +#define R9A09G011_IIC_PCLK0 81 + +#define R9A09G011_CPERI_GRPB_PCLK 82 +#define R9A09G011_TIM8_CLK 83 +#define R9A09G011_TIM9_CLK 84 +#define R9A09G011_TIM10_CLK 85 +#define R9A09G011_TIM11_CLK 86 +#define R9A09G011_TIM12_CLK 87 +#define R9A09G011_TIM13_CLK 88 +#define R9A09G011_TIM14_CLK 89 +#define R9A09G011_TIM15_CLK 90 +#define R9A09G011_IIC_PCLK1 91 + +#define R9A09G011_CPERI_GRPC_PCLK 92 +#define R9A09G011_TIM16_CLK 93 +#define R9A09G011_TIM17_CLK 94 +#define R9A09G011_TIM18_CLK 95 +#define R9A09G011_TIM19_CLK 96 +#define R9A09G011_TIM20_CLK 97 +#define R9A09G011_TIM21_CLK 98 +#define R9A09G011_TIM22_CLK 99 +#define R9A09G011_TIM23_CLK 100 +#define R9A09G011_WDT0_PCLK 101 +#define R9A09G011_WDT0_CLK 102 +#define R9A09G011_WDT1_PCLK 103 +#define R9A09G011_WDT1_CLK 104 + +#define R9A09G011_CPERI_GRPD_PCLK 105 +#define R9A09G011_TIM24_CLK 106 +#define R9A09G011_TIM25_CLK 107 +#define R9A09G011_TIM26_CLK 108 +#define R9A09G011_TIM27_CLK 109 +#define R9A09G011_TIM28_CLK 110 +#define R9A09G011_TIM29_CLK 111 +#define R9A09G011_TIM30_CLK 112 +#define R9A09G011_TIM31_CLK 113 + +#define R9A09G011_CPERI_GRPE_PCLK 114 +#define R9A09G011_PWM0_CLK 115 +#define R9A09G011_PWM1_CLK 116 +#define R9A09G011_PWM2_CLK 117 +#define R9A09G011_PWM3_CLK 118 +#define R9A09G011_PWM4_CLK 119 +#define R9A09G011_PWM5_CLK 120 +#define R9A09G011_PWM6_CLK 121 +#define R9A09G011_PWM7_CLK 122 + +#define R9A09G011_CPERI_GRPF_PCLK 123 +#define R9A09G011_PWM8_CLK 124 +#define R9A09G011_PWM9_CLK 125 +#define R9A09G011_PWM10_CLK 126 +#define R9A09G011_PWM11_CLK 127 +#define R9A09G011_PWM12_CLK 128 +#define R9A09G011_PWM13_CLK 129 +#define R9A09G011_PWM14_CLK 130 +#define R9A09G011_PWM15_CLK 131 + +#define R9A09G011_CPERI_GRPG_PCLK 132 +#define R9A09G011_CPERI_GRPH_PCLK 133 +#define R9A09G011_URT_PCLK 134 +#define R9A09G011_URT0_CLK 135 +#define R9A09G011_URT1_CLK 136 +#define R9A09G011_CSI0_CLK 137 +#define R9A09G011_CSI1_CLK 138 +#define R9A09G011_CSI2_CLK 139 +#define R9A09G011_CSI3_CLK 140 +#define R9A09G011_CSI4_CLK 141 +#define R9A09G011_CSI5_CLK 142 + +#define R9A09G011_ICB_ACLK1 143 +#define R9A09G011_ICB_GIC_CLK 144 +#define R9A09G011_ICB_MPCLK1 145 +#define R9A09G011_ICB_SPCLK1 146 +#define R9A09G011_ICB_CLK48 147 +#define R9A09G011_ICB_CLK48_2 148 +#define R9A09G011_ICB_CLK48_3 149 +#define R9A09G011_ICB_CLK48_4L 150 +#define R9A09G011_ICB_CLK48_4R 151 +#define R9A09G011_ICB_CLK48_5 152 +#define R9A09G011_ICB_CST_ATB_SB_CLK 153 +#define R9A09G011_ICB_CST_CS_CLK 154 +#define R9A09G011_ICB_CLK100_1 155 +#define R9A09G011_ICB_ETH0_CLK_AXI 156 +#define R9A09G011_ICB_DCI_CLKAXI 157 +#define R9A09G011_ICB_SYC_CNT_CLK 158 + +#define R9A09G011_ICB_DRPA_ACLK 159 +#define R9A09G011_ICB_RFX_ACLK 160 +#define R9A09G011_ICB_RFX_PCLK5 161 +#define R9A09G011_ICB_MMC_ACLK 162 + +#define R9A09G011_ICB_MPCLK3 163 +#define R9A09G011_ICB_CIMA_CLK 164 +#define R9A09G011_ICB_CIMB_CLK 165 +#define R9A09G011_ICB_BIMA_CLK 166 +#define R9A09G011_ICB_FCD_CLKAXI 167 +#define R9A09G011_ICB_VD_ACLK4 168 +#define R9A09G011_ICB_MPCLK4 169 +#define R9A09G011_ICB_VCD_PCLK4 170 + +#define R9A09G011_CA53_CLK 171 +#define R9A09G011_CA53_ACLK 172 +#define R9A09G011_CA53_APCLK_DBG 173 +#define R9A09G011_CST_APB_CA53_CLK 174 +#define R9A09G011_CA53_ATCLK 175 +#define R9A09G011_CST_CS_CLK 176 +#define R9A09G011_CA53_TSCLK 177 +#define R9A09G011_CST_TS_CLK 178 +#define R9A09G011_CA53_APCLK_REG 179 + +#define R9A09G011_DRPA_ACLK 180 +#define R9A09G011_DRPA_DCLK 181 +#define R9A09G011_DRPA_INITCLK 182 + +#define R9A09G011_RAMB0_ACLK 183 +#define R9A09G011_RAMB1_ACLK 184 +#define R9A09G011_RAMB2_ACLK 185 +#define R9A09G011_RAMB3_ACLK 186 + +#define R9A09G011_CIMA_CLKAPB 187 +#define R9A09G011_CIMA_CLK 188 +#define R9A09G011_CIMB_CLK 189 +#define R9A09G011_FAFA_CLK 190 +#define R9A09G011_STG_CLKAXI 191 +#define R9A09G011_STG_CLK0 192 + +#define R9A09G011_BIMA_CLKAPB 193 +#define R9A09G011_BIMA_CLK 194 +#define R9A09G011_FAFB_CLK 195 +#define R9A09G011_FCD_CLK 196 +#define R9A09G011_FCD_CLKAXI 197 + +#define R9A09G011_RIM_CLK 198 +#define R9A09G011_VCD_ACLK 199 +#define R9A09G011_VCD_PCLK 200 +#define R9A09G011_JPG0_CLK 201 +#define R9A09G011_JPG0_ACLK 202 + +#define R9A09G011_MMC_CORE_DDRC_CLK 203 +#define R9A09G011_MMC_ACLK 204 +#define R9A09G011_MMC_PCLK 205 +#define R9A09G011_DDI_APBCLK 206 + +/* Resets */ +#define R9A09G011_SYS_RST_N 0 +#define R9A09G011_PFC_PRESETN 1 +#define R9A09G011_RAMA_ARESETN 2 +#define R9A09G011_ROM_ARESETN 3 +#define R9A09G011_DMAA_ARESETN 4 +#define R9A09G011_SEC_ARESETN 5 +#define R9A09G011_SEC_PRESETN 6 +#define R9A09G011_SEC_RSTB 7 +#define R9A09G011_TSU0_RESETN 8 +#define R9A09G011_TSU1_RESETN 9 +#define R9A09G011_PMC_RESET_N 10 + +#define R9A09G011_CST_NTRST 11 +#define R9A09G011_CST_NPOTRST 12 +#define R9A09G011_CST_NTRST2 13 +#define R9A09G011_CST_CS_RESETN 14 +#define R9A09G011_CST_TS_RESETN 15 +#define R9A09G011_CST_TRESETN 16 +#define R9A09G011_CST_SB_RESETN 17 +#define R9A09G011_CST_AHB_RESETN 18 +#define R9A09G011_CST_TS_SB_RESETN 19 +#define R9A09G011_CST_APB_CA53_RESETN 20 +#define R9A09G011_CST_ATB_SB_RESETN 21 + +#define R9A09G011_SDI0_IXRST 22 +#define R9A09G011_SDI1_IXRST 23 +#define R9A09G011_EMM_IXRST 24 +#define R9A09G011_NFI_MARESETN 25 +#define R9A09G011_NFI_REG_RST_N 26 +#define R9A09G011_USB_PRESET_N 27 +#define R9A09G011_USB_DRD_RESET 28 +#define R9A09G011_USB_ARESETN_P 29 +#define R9A09G011_USB_ARESETN_H 30 +#define R9A09G011_ETH0_RST_HW_N 31 +#define R9A09G011_PCI_ARESETN 32 + +#define R9A09G011_SDT_RSTSYSAX 33 +#define R9A09G011_GRP_RESETN 34 +#define R9A09G011_CIF_RST_N 35 +#define R9A09G011_DCU_RSTSYSAX 36 +#define R9A09G011_HMI_RST_N 37 +#define R9A09G011_HMI_PRESETN 38 +#define R9A09G011_LCI_PRESETN 39 +#define R9A09G011_LCI_ARESETN 40 + +#define R9A09G011_AUI_RSTSYSAX 41 +#define R9A09G011_MTR_RSTSYSAX 42 +#define R9A09G011_GFT_RSTSYSAX 43 +#define R9A09G011_ATGA_RSTSYSAX 44 +#define R9A09G011_ATGB_RSTSYSAX 45 +#define R9A09G011_SYC_RST_N 46 + +#define R9A09G011_TIM_GPA_PRESETN 47 +#define R9A09G011_TIM_GPB_PRESETN 48 +#define R9A09G011_TIM_GPC_PRESETN 49 +#define R9A09G011_TIM_GPD_PRESETN 50 +#define R9A09G011_PWM_GPE_PRESETN 51 +#define R9A09G011_PWM_GPF_PRESETN 52 +#define R9A09G011_CSI_GPG_PRESETN 53 +#define R9A09G011_CSI_GPH_PRESETN 54 +#define R9A09G011_IIC_GPA_PRESETN 55 +#define R9A09G011_IIC_GPB_PRESETN 56 +#define R9A09G011_URT_PRESETN 57 +#define R9A09G011_WDT0_PRESETN 58 +#define R9A09G011_WDT1_PRESETN 59 + +#define R9A09G011_ICB_PD_AWO_RST_N 60 +#define R9A09G011_ICB_PD_MMC_RST_N 61 +#define R9A09G011_ICB_PD_VD0_RST_N 62 +#define R9A09G011_ICB_PD_VD1_RST_N 63 +#define R9A09G011_ICB_PD_RFX_RST_N 64 + +#define R9A09G011_CA53_NCPUPORESET0 65 +#define R9A09G011_CA53_NCPUPORESET1 66 +#define R9A09G011_CA53_NCORERESET0 67 +#define R9A09G011_CA53_NCORERESET1 68 +#define R9A09G011_CA53_NPRESETDBG 69 +#define R9A09G011_CA53_L2RESET 70 +#define R9A09G011_CA53_NMISCRESET_HM 71 +#define R9A09G011_CA53_NMISCRESET_SM 72 +#define R9A09G011_CA53_NARESET 73 + +#define R9A09G011_DRPA_ARESETN 74 + +#define R9A09G011_RAMB0_ARESETN 75 +#define R9A09G011_RAMB1_ARESETN 76 +#define R9A09G011_RAMB2_ARESETN 77 +#define R9A09G011_RAMB3_ARESETN 78 + +#define R9A09G011_CIMA_RSTSYSAX 79 +#define R9A09G011_CIMB_RSTSYSAX 80 +#define R9A09G011_FAFA_RSTSYSAX 81 +#define R9A09G011_STG_RSTSYSAX 82 + +#define R9A09G011_BIMA_RSTSYSAX 83 +#define R9A09G011_FAFB_RSTSYSAX 84 +#define R9A09G011_FCD_RSTSYSAX 85 +#define R9A09G011_RIM_RSTSYSAX 86 +#define R9A09G011_VCD_RESETN 87 +#define R9A09G011_JPG_XRESET 88 + +#define R9A09G011_MMC_CORE_DDRC_RSTN 89 +#define R9A09G011_MMC_ARESETN_N 90 +#define R9A09G011_MMC_PRESETN 91 +#define R9A09G011_DDI_PWROK 92 +#define R9A09G011_DDI_RESET 93 +#define R9A09G011_DDI_RESETN_APB 94 + +#endif /* __DT_BINDINGS_CLOCK_R9A09G011_CPG_H__ */ diff --git a/include/dt-bindings/clock/samsung,exynosautov9.h b/include/dt-bindings/clock/samsung,exynosautov9.h new file mode 100644 index 0000000000..ea9f91b4eb --- /dev/null +++ b/include/dt-bindings/clock/samsung,exynosautov9.h @@ -0,0 +1,299 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022 Samsung Electronics Co., Ltd. + * Author: Chanho Park + * + * Device Tree binding constants for Exynos Auto V9 clock controller. + */ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOSAUTOV9_H +#define _DT_BINDINGS_CLOCK_EXYNOSAUTOV9_H + +/* CMU_TOP */ +#define FOUT_SHARED0_PLL 1 +#define FOUT_SHARED1_PLL 2 +#define FOUT_SHARED2_PLL 3 +#define FOUT_SHARED3_PLL 4 +#define FOUT_SHARED4_PLL 5 + +/* MUX in CMU_TOP */ +#define MOUT_SHARED0_PLL 6 +#define MOUT_SHARED1_PLL 7 +#define MOUT_SHARED2_PLL 8 +#define MOUT_SHARED3_PLL 9 +#define MOUT_SHARED4_PLL 10 +#define MOUT_CLKCMU_CMU_BOOST 11 +#define MOUT_CLKCMU_CMU_CMUREF 12 +#define MOUT_CLKCMU_ACC_BUS 13 +#define MOUT_CLKCMU_APM_BUS 14 +#define MOUT_CLKCMU_AUD_CPU 15 +#define MOUT_CLKCMU_AUD_BUS 16 +#define MOUT_CLKCMU_BUSC_BUS 17 +#define MOUT_CLKCMU_BUSMC_BUS 19 +#define MOUT_CLKCMU_CORE_BUS 20 +#define MOUT_CLKCMU_CPUCL0_SWITCH 21 +#define MOUT_CLKCMU_CPUCL0_CLUSTER 22 +#define MOUT_CLKCMU_CPUCL1_SWITCH 24 +#define MOUT_CLKCMU_CPUCL1_CLUSTER 25 +#define MOUT_CLKCMU_DPTX_BUS 26 +#define MOUT_CLKCMU_DPTX_DPGTC 27 +#define MOUT_CLKCMU_DPUM_BUS 28 +#define MOUT_CLKCMU_DPUS0_BUS 29 +#define MOUT_CLKCMU_DPUS1_BUS 30 +#define MOUT_CLKCMU_FSYS0_BUS 31 +#define MOUT_CLKCMU_FSYS0_PCIE 32 +#define MOUT_CLKCMU_FSYS1_BUS 33 +#define MOUT_CLKCMU_FSYS1_USBDRD 34 +#define MOUT_CLKCMU_FSYS1_MMC_CARD 35 +#define MOUT_CLKCMU_FSYS2_BUS 36 +#define MOUT_CLKCMU_FSYS2_UFS_EMBD 37 +#define MOUT_CLKCMU_FSYS2_ETHERNET 38 +#define MOUT_CLKCMU_G2D_G2D 39 +#define MOUT_CLKCMU_G2D_MSCL 40 +#define MOUT_CLKCMU_G3D00_SWITCH 41 +#define MOUT_CLKCMU_G3D01_SWITCH 42 +#define MOUT_CLKCMU_G3D1_SWITCH 43 +#define MOUT_CLKCMU_ISPB_BUS 44 +#define MOUT_CLKCMU_MFC_MFC 45 +#define MOUT_CLKCMU_MFC_WFD 46 +#define MOUT_CLKCMU_MIF_SWITCH 47 +#define MOUT_CLKCMU_MIF_BUSP 48 +#define MOUT_CLKCMU_NPU_BUS 49 +#define MOUT_CLKCMU_PERIC0_BUS 50 +#define MOUT_CLKCMU_PERIC0_IP 51 +#define MOUT_CLKCMU_PERIC1_BUS 52 +#define MOUT_CLKCMU_PERIC1_IP 53 +#define MOUT_CLKCMU_PERIS_BUS 54 + +/* DIV in CMU_TOP */ +#define DOUT_SHARED0_DIV3 101 +#define DOUT_SHARED0_DIV2 102 +#define DOUT_SHARED1_DIV3 103 +#define DOUT_SHARED1_DIV2 104 +#define DOUT_SHARED1_DIV4 105 +#define DOUT_SHARED2_DIV3 106 +#define DOUT_SHARED2_DIV2 107 +#define DOUT_SHARED2_DIV4 108 +#define DOUT_SHARED4_DIV2 109 +#define DOUT_SHARED4_DIV4 110 +#define DOUT_CLKCMU_CMU_BOOST 111 +#define DOUT_CLKCMU_ACC_BUS 112 +#define DOUT_CLKCMU_APM_BUS 113 +#define DOUT_CLKCMU_AUD_CPU 114 +#define DOUT_CLKCMU_AUD_BUS 115 +#define DOUT_CLKCMU_BUSC_BUS 116 +#define DOUT_CLKCMU_BUSMC_BUS 118 +#define DOUT_CLKCMU_CORE_BUS 119 +#define DOUT_CLKCMU_CPUCL0_SWITCH 120 +#define DOUT_CLKCMU_CPUCL0_CLUSTER 121 +#define DOUT_CLKCMU_CPUCL1_SWITCH 123 +#define DOUT_CLKCMU_CPUCL1_CLUSTER 124 +#define DOUT_CLKCMU_DPTX_BUS 125 +#define DOUT_CLKCMU_DPTX_DPGTC 126 +#define DOUT_CLKCMU_DPUM_BUS 127 +#define DOUT_CLKCMU_DPUS0_BUS 128 +#define DOUT_CLKCMU_DPUS1_BUS 129 +#define DOUT_CLKCMU_FSYS0_BUS 130 +#define DOUT_CLKCMU_FSYS0_PCIE 131 +#define DOUT_CLKCMU_FSYS1_BUS 132 +#define DOUT_CLKCMU_FSYS1_USBDRD 133 +#define DOUT_CLKCMU_FSYS2_BUS 134 +#define DOUT_CLKCMU_FSYS2_UFS_EMBD 135 +#define DOUT_CLKCMU_FSYS2_ETHERNET 136 +#define DOUT_CLKCMU_G2D_G2D 137 +#define DOUT_CLKCMU_G2D_MSCL 138 +#define DOUT_CLKCMU_G3D00_SWITCH 139 +#define DOUT_CLKCMU_G3D01_SWITCH 140 +#define DOUT_CLKCMU_G3D1_SWITCH 141 +#define DOUT_CLKCMU_ISPB_BUS 142 +#define DOUT_CLKCMU_MFC_MFC 143 +#define DOUT_CLKCMU_MFC_WFD 144 +#define DOUT_CLKCMU_MIF_SWITCH 145 +#define DOUT_CLKCMU_MIF_BUSP 146 +#define DOUT_CLKCMU_NPU_BUS 147 +#define DOUT_CLKCMU_PERIC0_BUS 148 +#define DOUT_CLKCMU_PERIC0_IP 149 +#define DOUT_CLKCMU_PERIC1_BUS 150 +#define DOUT_CLKCMU_PERIC1_IP 151 +#define DOUT_CLKCMU_PERIS_BUS 152 + +/* GAT in CMU_TOP */ +#define GOUT_CLKCMU_CMU_BOOST 201 +#define GOUT_CLKCMU_CPUCL0_BOOST 202 +#define GOUT_CLKCMU_CPUCL1_BOOST 203 +#define GOUT_CLKCMU_CORE_BOOST 204 +#define GOUT_CLKCMU_BUSC_BOOST 205 +#define GOUT_CLKCMU_BUSMC_BOOST 206 +#define GOUT_CLKCMU_MIF_BOOST 207 +#define GOUT_CLKCMU_ACC_BUS 208 +#define GOUT_CLKCMU_APM_BUS 209 +#define GOUT_CLKCMU_AUD_CPU 210 +#define GOUT_CLKCMU_AUD_BUS 211 +#define GOUT_CLKCMU_BUSC_BUS 212 +#define GOUT_CLKCMU_BUSMC_BUS 214 +#define GOUT_CLKCMU_CORE_BUS 215 +#define GOUT_CLKCMU_CPUCL0_SWITCH 216 +#define GOUT_CLKCMU_CPUCL0_CLUSTER 217 +#define GOUT_CLKCMU_CPUCL1_SWITCH 219 +#define GOUT_CLKCMU_CPUCL1_CLUSTER 220 +#define GOUT_CLKCMU_DPTX_BUS 221 +#define GOUT_CLKCMU_DPTX_DPGTC 222 +#define GOUT_CLKCMU_DPUM_BUS 223 +#define GOUT_CLKCMU_DPUS0_BUS 224 +#define GOUT_CLKCMU_DPUS1_BUS 225 +#define GOUT_CLKCMU_FSYS0_BUS 226 +#define GOUT_CLKCMU_FSYS0_PCIE 227 +#define GOUT_CLKCMU_FSYS1_BUS 228 +#define GOUT_CLKCMU_FSYS1_USBDRD 229 +#define GOUT_CLKCMU_FSYS1_MMC_CARD 230 +#define GOUT_CLKCMU_FSYS2_BUS 231 +#define GOUT_CLKCMU_FSYS2_UFS_EMBD 232 +#define GOUT_CLKCMU_FSYS2_ETHERNET 233 +#define GOUT_CLKCMU_G2D_G2D 234 +#define GOUT_CLKCMU_G2D_MSCL 235 +#define GOUT_CLKCMU_G3D00_SWITCH 236 +#define GOUT_CLKCMU_G3D01_SWITCH 237 +#define GOUT_CLKCMU_G3D1_SWITCH 238 +#define GOUT_CLKCMU_ISPB_BUS 239 +#define GOUT_CLKCMU_MFC_MFC 240 +#define GOUT_CLKCMU_MFC_WFD 241 +#define GOUT_CLKCMU_MIF_SWITCH 242 +#define GOUT_CLKCMU_MIF_BUSP 243 +#define GOUT_CLKCMU_NPU_BUS 244 +#define GOUT_CLKCMU_PERIC0_BUS 245 +#define GOUT_CLKCMU_PERIC0_IP 246 +#define GOUT_CLKCMU_PERIC1_BUS 247 +#define GOUT_CLKCMU_PERIC1_IP 248 +#define GOUT_CLKCMU_PERIS_BUS 249 + +#define TOP_NR_CLK 250 + +/* CMU_BUSMC */ +#define CLK_MOUT_BUSMC_BUS_USER 1 +#define CLK_DOUT_BUSMC_BUSP 2 +#define CLK_GOUT_BUSMC_PDMA0_PCLK 3 +#define CLK_GOUT_BUSMC_SPDMA_PCLK 4 + +#define BUSMC_NR_CLK 5 + +/* CMU_CORE */ +#define CLK_MOUT_CORE_BUS_USER 1 +#define CLK_DOUT_CORE_BUSP 2 +#define CLK_GOUT_CORE_CCI_CLK 3 +#define CLK_GOUT_CORE_CCI_PCLK 4 +#define CLK_GOUT_CORE_CMU_CORE_PCLK 5 + +#define CORE_NR_CLK 6 + +/* CMU_FSYS2 */ +#define CLK_MOUT_FSYS2_BUS_USER 1 +#define CLK_MOUT_FSYS2_UFS_EMBD_USER 2 +#define CLK_MOUT_FSYS2_ETHERNET_USER 3 +#define CLK_GOUT_FSYS2_UFS_EMBD0_ACLK 4 +#define CLK_GOUT_FSYS2_UFS_EMBD0_UNIPRO 5 +#define CLK_GOUT_FSYS2_UFS_EMBD1_ACLK 6 +#define CLK_GOUT_FSYS2_UFS_EMBD1_UNIPRO 7 + +#define FSYS2_NR_CLK 8 + +/* CMU_PERIC0 */ +#define CLK_MOUT_PERIC0_BUS_USER 1 +#define CLK_MOUT_PERIC0_IP_USER 2 +#define CLK_MOUT_PERIC0_USI00_USI 3 +#define CLK_MOUT_PERIC0_USI01_USI 4 +#define CLK_MOUT_PERIC0_USI02_USI 5 +#define CLK_MOUT_PERIC0_USI03_USI 6 +#define CLK_MOUT_PERIC0_USI04_USI 7 +#define CLK_MOUT_PERIC0_USI05_USI 8 +#define CLK_MOUT_PERIC0_USI_I2C 9 + +#define CLK_DOUT_PERIC0_USI00_USI 10 +#define CLK_DOUT_PERIC0_USI01_USI 11 +#define CLK_DOUT_PERIC0_USI02_USI 12 +#define CLK_DOUT_PERIC0_USI03_USI 13 +#define CLK_DOUT_PERIC0_USI04_USI 14 +#define CLK_DOUT_PERIC0_USI05_USI 15 +#define CLK_DOUT_PERIC0_USI_I2C 16 + +#define CLK_GOUT_PERIC0_IPCLK_0 20 +#define CLK_GOUT_PERIC0_IPCLK_1 21 +#define CLK_GOUT_PERIC0_IPCLK_2 22 +#define CLK_GOUT_PERIC0_IPCLK_3 23 +#define CLK_GOUT_PERIC0_IPCLK_4 24 +#define CLK_GOUT_PERIC0_IPCLK_5 25 +#define CLK_GOUT_PERIC0_IPCLK_6 26 +#define CLK_GOUT_PERIC0_IPCLK_7 27 +#define CLK_GOUT_PERIC0_IPCLK_8 28 +#define CLK_GOUT_PERIC0_IPCLK_9 29 +#define CLK_GOUT_PERIC0_IPCLK_10 30 +#define CLK_GOUT_PERIC0_IPCLK_11 30 +#define CLK_GOUT_PERIC0_PCLK_0 31 +#define CLK_GOUT_PERIC0_PCLK_1 32 +#define CLK_GOUT_PERIC0_PCLK_2 33 +#define CLK_GOUT_PERIC0_PCLK_3 34 +#define CLK_GOUT_PERIC0_PCLK_4 35 +#define CLK_GOUT_PERIC0_PCLK_5 36 +#define CLK_GOUT_PERIC0_PCLK_6 37 +#define CLK_GOUT_PERIC0_PCLK_7 38 +#define CLK_GOUT_PERIC0_PCLK_8 39 +#define CLK_GOUT_PERIC0_PCLK_9 40 +#define CLK_GOUT_PERIC0_PCLK_10 41 +#define CLK_GOUT_PERIC0_PCLK_11 42 + +#define PERIC0_NR_CLK 43 + +/* CMU_PERIC1 */ +#define CLK_MOUT_PERIC1_BUS_USER 1 +#define CLK_MOUT_PERIC1_IP_USER 2 +#define CLK_MOUT_PERIC1_USI06_USI 3 +#define CLK_MOUT_PERIC1_USI07_USI 4 +#define CLK_MOUT_PERIC1_USI08_USI 5 +#define CLK_MOUT_PERIC1_USI09_USI 6 +#define CLK_MOUT_PERIC1_USI10_USI 7 +#define CLK_MOUT_PERIC1_USI11_USI 8 +#define CLK_MOUT_PERIC1_USI_I2C 9 + +#define CLK_DOUT_PERIC1_USI06_USI 10 +#define CLK_DOUT_PERIC1_USI07_USI 11 +#define CLK_DOUT_PERIC1_USI08_USI 12 +#define CLK_DOUT_PERIC1_USI09_USI 13 +#define CLK_DOUT_PERIC1_USI10_USI 14 +#define CLK_DOUT_PERIC1_USI11_USI 15 +#define CLK_DOUT_PERIC1_USI_I2C 16 + +#define CLK_GOUT_PERIC1_IPCLK_0 20 +#define CLK_GOUT_PERIC1_IPCLK_1 21 +#define CLK_GOUT_PERIC1_IPCLK_2 22 +#define CLK_GOUT_PERIC1_IPCLK_3 23 +#define CLK_GOUT_PERIC1_IPCLK_4 24 +#define CLK_GOUT_PERIC1_IPCLK_5 25 +#define CLK_GOUT_PERIC1_IPCLK_6 26 +#define CLK_GOUT_PERIC1_IPCLK_7 27 +#define CLK_GOUT_PERIC1_IPCLK_8 28 +#define CLK_GOUT_PERIC1_IPCLK_9 29 +#define CLK_GOUT_PERIC1_IPCLK_10 30 +#define CLK_GOUT_PERIC1_IPCLK_11 30 +#define CLK_GOUT_PERIC1_PCLK_0 31 +#define CLK_GOUT_PERIC1_PCLK_1 32 +#define CLK_GOUT_PERIC1_PCLK_2 33 +#define CLK_GOUT_PERIC1_PCLK_3 34 +#define CLK_GOUT_PERIC1_PCLK_4 35 +#define CLK_GOUT_PERIC1_PCLK_5 36 +#define CLK_GOUT_PERIC1_PCLK_6 37 +#define CLK_GOUT_PERIC1_PCLK_7 38 +#define CLK_GOUT_PERIC1_PCLK_8 39 +#define CLK_GOUT_PERIC1_PCLK_9 40 +#define CLK_GOUT_PERIC1_PCLK_10 41 +#define CLK_GOUT_PERIC1_PCLK_11 42 + +#define PERIC1_NR_CLK 43 + +/* CMU_PERIS */ +#define CLK_MOUT_PERIS_BUS_USER 1 +#define CLK_GOUT_SYSREG_PERIS_PCLK 2 +#define CLK_GOUT_WDT_CLUSTER0 3 +#define CLK_GOUT_WDT_CLUSTER1 4 + +#define PERIS_NR_CLK 5 + +#endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV9_H */ diff --git a/include/dt-bindings/clock/sprd,ums512-clk.h b/include/dt-bindings/clock/sprd,ums512-clk.h new file mode 100644 index 0000000000..4f1d908499 --- /dev/null +++ b/include/dt-bindings/clock/sprd,ums512-clk.h @@ -0,0 +1,397 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Unisoc UMS512 SoC DTS file + * + * Copyright (C) 2022, Unisoc Inc. + */ + +#ifndef _DT_BINDINGS_CLK_UMS512_H_ +#define _DT_BINDINGS_CLK_UMS512_H_ + +#define CLK_26M_AUD 0 +#define CLK_13M 1 +#define CLK_6M5 2 +#define CLK_4M3 3 +#define CLK_2M 4 +#define CLK_1M 5 +#define CLK_250K 6 +#define CLK_RCO_25M 7 +#define CLK_RCO_4M 8 +#define CLK_RCO_2M 9 +#define CLK_ISPPLL_GATE 10 +#define CLK_DPLL0_GATE 11 +#define CLK_DPLL1_GATE 12 +#define CLK_LPLL_GATE 13 +#define CLK_TWPLL_GATE 14 +#define CLK_GPLL_GATE 15 +#define CLK_RPLL_GATE 16 +#define CLK_CPPLL_GATE 17 +#define CLK_MPLL0_GATE 18 +#define CLK_MPLL1_GATE 19 +#define CLK_MPLL2_GATE 20 +#define CLK_PMU_GATE_NUM (CLK_MPLL2_GATE + 1) + +#define CLK_DPLL0 0 +#define CLK_DPLL0_58M31 1 +#define CLK_ANLG_PHY_G0_NUM (CLK_DPLL0_58M31 + 1) + +#define CLK_MPLL1 0 +#define CLK_MPLL1_63M38 1 +#define CLK_ANLG_PHY_G2_NUM (CLK_MPLL1_63M38 + 1) + +#define CLK_RPLL 0 +#define CLK_AUDIO_GATE 1 +#define CLK_MPLL0 2 +#define CLK_MPLL0_56M88 3 +#define CLK_MPLL2 4 +#define CLK_MPLL2_47M13 5 +#define CLK_ANLG_PHY_G3_NUM (CLK_MPLL2_47M13 + 1) + +#define CLK_TWPLL 0 +#define CLK_TWPLL_768M 1 +#define CLK_TWPLL_384M 2 +#define CLK_TWPLL_192M 3 +#define CLK_TWPLL_96M 4 +#define CLK_TWPLL_48M 5 +#define CLK_TWPLL_24M 6 +#define CLK_TWPLL_12M 7 +#define CLK_TWPLL_512M 8 +#define CLK_TWPLL_256M 9 +#define CLK_TWPLL_128M 10 +#define CLK_TWPLL_64M 11 +#define CLK_TWPLL_307M2 12 +#define CLK_TWPLL_219M4 13 +#define CLK_TWPLL_170M6 14 +#define CLK_TWPLL_153M6 15 +#define CLK_TWPLL_76M8 16 +#define CLK_TWPLL_51M2 17 +#define CLK_TWPLL_38M4 18 +#define CLK_TWPLL_19M2 19 +#define CLK_TWPLL_12M29 20 +#define CLK_LPLL 21 +#define CLK_LPLL_614M4 22 +#define CLK_LPLL_409M6 23 +#define CLK_LPLL_245M76 24 +#define CLK_LPLL_30M72 25 +#define CLK_ISPPLL 26 +#define CLK_ISPPLL_468M 27 +#define CLK_ISPPLL_78M 28 +#define CLK_GPLL 29 +#define CLK_GPLL_40M 30 +#define CLK_CPPLL 31 +#define CLK_CPPLL_39M32 32 +#define CLK_ANLG_PHY_GC_NUM (CLK_CPPLL_39M32 + 1) + +#define CLK_AP_APB 0 +#define CLK_IPI 1 +#define CLK_AP_UART0 2 +#define CLK_AP_UART1 3 +#define CLK_AP_UART2 4 +#define CLK_AP_I2C0 5 +#define CLK_AP_I2C1 6 +#define CLK_AP_I2C2 7 +#define CLK_AP_I2C3 8 +#define CLK_AP_I2C4 9 +#define CLK_AP_SPI0 10 +#define CLK_AP_SPI1 11 +#define CLK_AP_SPI2 12 +#define CLK_AP_SPI3 13 +#define CLK_AP_IIS0 14 +#define CLK_AP_IIS1 15 +#define CLK_AP_IIS2 16 +#define CLK_AP_SIM 17 +#define CLK_AP_CE 18 +#define CLK_SDIO0_2X 19 +#define CLK_SDIO1_2X 20 +#define CLK_EMMC_2X 21 +#define CLK_VSP 22 +#define CLK_DISPC0 23 +#define CLK_DISPC0_DPI 24 +#define CLK_DSI_APB 25 +#define CLK_DSI_RXESC 26 +#define CLK_DSI_LANEBYTE 27 +#define CLK_VDSP 28 +#define CLK_VDSP_M 29 +#define CLK_AP_CLK_NUM (CLK_VDSP_M + 1) + +#define CLK_DSI_EB 0 +#define CLK_DISPC_EB 1 +#define CLK_VSP_EB 2 +#define CLK_VDMA_EB 3 +#define CLK_DMA_PUB_EB 4 +#define CLK_DMA_SEC_EB 5 +#define CLK_IPI_EB 6 +#define CLK_AHB_CKG_EB 7 +#define CLK_BM_CLK_EB 8 +#define CLK_AP_AHB_GATE_NUM (CLK_BM_CLK_EB + 1) + +#define CLK_AON_APB 0 +#define CLK_ADI 1 +#define CLK_AUX0 2 +#define CLK_AUX1 3 +#define CLK_AUX2 4 +#define CLK_PROBE 5 +#define CLK_PWM0 6 +#define CLK_PWM1 7 +#define CLK_PWM2 8 +#define CLK_PWM3 9 +#define CLK_EFUSE 10 +#define CLK_UART0 11 +#define CLK_UART1 12 +#define CLK_THM0 13 +#define CLK_THM1 14 +#define CLK_THM2 15 +#define CLK_THM3 16 +#define CLK_AON_I2C 17 +#define CLK_AON_IIS 18 +#define CLK_SCC 19 +#define CLK_APCPU_DAP 20 +#define CLK_APCPU_DAP_MTCK 21 +#define CLK_APCPU_TS 22 +#define CLK_DEBUG_TS 23 +#define CLK_DSI_TEST_S 24 +#define CLK_DJTAG_TCK 25 +#define CLK_DJTAG_TCK_HW 26 +#define CLK_AON_TMR 27 +#define CLK_AON_PMU 28 +#define CLK_DEBOUNCE 29 +#define CLK_APCPU_PMU 30 +#define CLK_TOP_DVFS 31 +#define CLK_OTG_UTMI 32 +#define CLK_OTG_REF 33 +#define CLK_CSSYS 34 +#define CLK_CSSYS_PUB 35 +#define CLK_CSSYS_APB 36 +#define CLK_AP_AXI 37 +#define CLK_AP_MM 38 +#define CLK_SDIO2_2X 39 +#define CLK_ANALOG_IO_APB 40 +#define CLK_DMC_REF_CLK 41 +#define CLK_EMC 42 +#define CLK_USB 43 +#define CLK_26M_PMU 44 +#define CLK_AON_APB_NUM (CLK_26M_PMU + 1) + +#define CLK_MM_AHB 0 +#define CLK_MM_MTX 1 +#define CLK_SENSOR0 2 +#define CLK_SENSOR1 3 +#define CLK_SENSOR2 4 +#define CLK_CPP 5 +#define CLK_JPG 6 +#define CLK_FD 7 +#define CLK_DCAM_IF 8 +#define CLK_DCAM_AXI 9 +#define CLK_ISP 10 +#define CLK_MIPI_CSI0 11 +#define CLK_MIPI_CSI1 12 +#define CLK_MIPI_CSI2 13 +#define CLK_MM_CLK_NUM (CLK_MIPI_CSI2 + 1) + +#define CLK_RC100M_CAL_EB 0 +#define CLK_DJTAG_TCK_EB 1 +#define CLK_DJTAG_EB 2 +#define CLK_AUX0_EB 3 +#define CLK_AUX1_EB 4 +#define CLK_AUX2_EB 5 +#define CLK_PROBE_EB 6 +#define CLK_MM_EB 7 +#define CLK_GPU_EB 8 +#define CLK_MSPI_EB 9 +#define CLK_APCPU_DAP_EB 10 +#define CLK_AON_CSSYS_EB 11 +#define CLK_CSSYS_APB_EB 12 +#define CLK_CSSYS_PUB_EB 13 +#define CLK_SDPHY_CFG_EB 14 +#define CLK_SDPHY_REF_EB 15 +#define CLK_EFUSE_EB 16 +#define CLK_GPIO_EB 17 +#define CLK_MBOX_EB 18 +#define CLK_KPD_EB 19 +#define CLK_AON_SYST_EB 20 +#define CLK_AP_SYST_EB 21 +#define CLK_AON_TMR_EB 22 +#define CLK_OTG_UTMI_EB 23 +#define CLK_OTG_PHY_EB 24 +#define CLK_SPLK_EB 25 +#define CLK_PIN_EB 26 +#define CLK_ANA_EB 27 +#define CLK_APCPU_TS0_EB 28 +#define CLK_APB_BUSMON_EB 29 +#define CLK_AON_IIS_EB 30 +#define CLK_SCC_EB 31 +#define CLK_THM0_EB 32 +#define CLK_THM1_EB 33 +#define CLK_THM2_EB 34 +#define CLK_ASIM_TOP_EB 35 +#define CLK_I2C_EB 36 +#define CLK_PMU_EB 37 +#define CLK_ADI_EB 38 +#define CLK_EIC_EB 39 +#define CLK_AP_INTC0_EB 40 +#define CLK_AP_INTC1_EB 41 +#define CLK_AP_INTC2_EB 42 +#define CLK_AP_INTC3_EB 43 +#define CLK_AP_INTC4_EB 44 +#define CLK_AP_INTC5_EB 45 +#define CLK_AUDCP_INTC_EB 46 +#define CLK_AP_TMR0_EB 47 +#define CLK_AP_TMR1_EB 48 +#define CLK_AP_TMR2_EB 49 +#define CLK_PWM0_EB 50 +#define CLK_PWM1_EB 51 +#define CLK_PWM2_EB 52 +#define CLK_PWM3_EB 53 +#define CLK_AP_WDG_EB 54 +#define CLK_APCPU_WDG_EB 55 +#define CLK_SERDES_EB 56 +#define CLK_ARCH_RTC_EB 57 +#define CLK_KPD_RTC_EB 58 +#define CLK_AON_SYST_RTC_EB 59 +#define CLK_AP_SYST_RTC_EB 60 +#define CLK_AON_TMR_RTC_EB 61 +#define CLK_EIC_RTC_EB 62 +#define CLK_EIC_RTCDV5_EB 63 +#define CLK_AP_WDG_RTC_EB 64 +#define CLK_AC_WDG_RTC_EB 65 +#define CLK_AP_TMR0_RTC_EB 66 +#define CLK_AP_TMR1_RTC_EB 67 +#define CLK_AP_TMR2_RTC_EB 68 +#define CLK_DCXO_LC_RTC_EB 69 +#define CLK_BB_CAL_RTC_EB 70 +#define CLK_AP_EMMC_RTC_EB 71 +#define CLK_AP_SDIO0_RTC_EB 72 +#define CLK_AP_SDIO1_RTC_EB 73 +#define CLK_AP_SDIO2_RTC_EB 74 +#define CLK_DSI_CSI_TEST_EB 75 +#define CLK_DJTAG_TCK_EN 76 +#define CLK_DPHY_REF_EB 77 +#define CLK_DMC_REF_EB 78 +#define CLK_OTG_REF_EB 79 +#define CLK_TSEN_EB 80 +#define CLK_TMR_EB 81 +#define CLK_RC100M_REF_EB 82 +#define CLK_RC100M_FDK_EB 83 +#define CLK_DEBOUNCE_EB 84 +#define CLK_DET_32K_EB 85 +#define CLK_TOP_CSSYS_EB 86 +#define CLK_AP_AXI_EN 87 +#define CLK_SDIO0_2X_EN 88 +#define CLK_SDIO0_1X_EN 89 +#define CLK_SDIO1_2X_EN 90 +#define CLK_SDIO1_1X_EN 91 +#define CLK_SDIO2_2X_EN 92 +#define CLK_SDIO2_1X_EN 93 +#define CLK_EMMC_2X_EN 94 +#define CLK_EMMC_1X_EN 95 +#define CLK_PLL_TEST_EN 96 +#define CLK_CPHY_CFG_EN 97 +#define CLK_DEBUG_TS_EN 98 +#define CLK_ACCESS_AUD_EN 99 +#define CLK_AON_APB_GATE_NUM (CLK_ACCESS_AUD_EN + 1) + +#define CLK_MM_CPP_EB 0 +#define CLK_MM_JPG_EB 1 +#define CLK_MM_DCAM_EB 2 +#define CLK_MM_ISP_EB 3 +#define CLK_MM_CSI2_EB 4 +#define CLK_MM_CSI1_EB 5 +#define CLK_MM_CSI0_EB 6 +#define CLK_MM_CKG_EB 7 +#define CLK_ISP_AHB_EB 8 +#define CLK_MM_DVFS_EB 9 +#define CLK_MM_FD_EB 10 +#define CLK_MM_SENSOR2_EB 11 +#define CLK_MM_SENSOR1_EB 12 +#define CLK_MM_SENSOR0_EB 13 +#define CLK_MM_MIPI_CSI2_EB 14 +#define CLK_MM_MIPI_CSI1_EB 15 +#define CLK_MM_MIPI_CSI0_EB 16 +#define CLK_DCAM_AXI_EB 17 +#define CLK_ISP_AXI_EB 18 +#define CLK_MM_CPHY_EB 19 +#define CLK_MM_GATE_CLK_NUM (CLK_MM_CPHY_EB + 1) + +#define CLK_SIM0_EB 0 +#define CLK_IIS0_EB 1 +#define CLK_IIS1_EB 2 +#define CLK_IIS2_EB 3 +#define CLK_APB_REG_EB 4 +#define CLK_SPI0_EB 5 +#define CLK_SPI1_EB 6 +#define CLK_SPI2_EB 7 +#define CLK_SPI3_EB 8 +#define CLK_I2C0_EB 9 +#define CLK_I2C1_EB 10 +#define CLK_I2C2_EB 11 +#define CLK_I2C3_EB 12 +#define CLK_I2C4_EB 13 +#define CLK_UART0_EB 14 +#define CLK_UART1_EB 15 +#define CLK_UART2_EB 16 +#define CLK_SIM0_32K_EB 17 +#define CLK_SPI0_LFIN_EB 18 +#define CLK_SPI1_LFIN_EB 19 +#define CLK_SPI2_LFIN_EB 20 +#define CLK_SPI3_LFIN_EB 21 +#define CLK_SDIO0_EB 22 +#define CLK_SDIO1_EB 23 +#define CLK_SDIO2_EB 24 +#define CLK_EMMC_EB 25 +#define CLK_SDIO0_32K_EB 26 +#define CLK_SDIO1_32K_EB 27 +#define CLK_SDIO2_32K_EB 28 +#define CLK_EMMC_32K_EB 29 +#define CLK_AP_APB_GATE_NUM (CLK_EMMC_32K_EB + 1) + +#define CLK_GPU_CORE_EB 0 +#define CLK_GPU_CORE 1 +#define CLK_GPU_MEM_EB 2 +#define CLK_GPU_MEM 3 +#define CLK_GPU_SYS_EB 4 +#define CLK_GPU_SYS 5 +#define CLK_GPU_CLK_NUM (CLK_GPU_SYS + 1) + +#define CLK_AUDCP_IIS0_EB 0 +#define CLK_AUDCP_IIS1_EB 1 +#define CLK_AUDCP_IIS2_EB 2 +#define CLK_AUDCP_UART_EB 3 +#define CLK_AUDCP_DMA_CP_EB 4 +#define CLK_AUDCP_DMA_AP_EB 5 +#define CLK_AUDCP_SRC48K_EB 6 +#define CLK_AUDCP_MCDT_EB 7 +#define CLK_AUDCP_VBCIFD_EB 8 +#define CLK_AUDCP_VBC_EB 9 +#define CLK_AUDCP_SPLK_EB 10 +#define CLK_AUDCP_ICU_EB 11 +#define CLK_AUDCP_DMA_AP_ASHB_EB 12 +#define CLK_AUDCP_DMA_CP_ASHB_EB 13 +#define CLK_AUDCP_AUD_EB 14 +#define CLK_AUDCP_VBC_24M_EB 15 +#define CLK_AUDCP_TMR_26M_EB 16 +#define CLK_AUDCP_DVFS_ASHB_EB 17 +#define CLK_AUDCP_AHB_GATE_NUM (CLK_AUDCP_DVFS_ASHB_EB + 1) + +#define CLK_AUDCP_WDG_EB 0 +#define CLK_AUDCP_RTC_WDG_EB 1 +#define CLK_AUDCP_TMR0_EB 2 +#define CLK_AUDCP_TMR1_EB 3 +#define CLK_AUDCP_APB_GATE_NUM (CLK_AUDCP_TMR1_EB + 1) + +#define CLK_ACORE0 0 +#define CLK_ACORE1 1 +#define CLK_ACORE2 2 +#define CLK_ACORE3 3 +#define CLK_ACORE4 4 +#define CLK_ACORE5 5 +#define CLK_PCORE0 6 +#define CLK_PCORE1 7 +#define CLK_SCU 8 +#define CLK_ACE 9 +#define CLK_PERIPH 10 +#define CLK_GIC 11 +#define CLK_ATB 12 +#define CLK_DEBUG_APB 13 +#define CLK_APCPU_SEC_NUM (CLK_DEBUG_APB + 1) + +#endif /* _DT_BINDINGS_CLK_UMS512_H_ */ diff --git a/include/dt-bindings/clock/ste-db8500-clkout.h b/include/dt-bindings/clock/ste-db8500-clkout.h new file mode 100644 index 0000000000..ca07cb2bd1 --- /dev/null +++ b/include/dt-bindings/clock/ste-db8500-clkout.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __STE_CLK_DB8500_CLKOUT_H__ +#define __STE_CLK_DB8500_CLKOUT_H__ + +#define DB8500_CLKOUT_1 0 +#define DB8500_CLKOUT_2 1 + +#define DB8500_CLKOUT_SRC_CLK38M 0 +#define DB8500_CLKOUT_SRC_ACLK 1 +#define DB8500_CLKOUT_SRC_SYSCLK 2 +#define DB8500_CLKOUT_SRC_LCDCLK 3 +#define DB8500_CLKOUT_SRC_SDMMCCLK 4 +#define DB8500_CLKOUT_SRC_TVCLK 5 +#define DB8500_CLKOUT_SRC_TIMCLK 6 +#define DB8500_CLKOUT_SRC_CLK009 7 + +#endif diff --git a/include/dt-bindings/clock/stm32mp13-clks.h b/include/dt-bindings/clock/stm32mp13-clks.h new file mode 100644 index 0000000000..02befd25ed --- /dev/null +++ b/include/dt-bindings/clock/stm32mp13-clks.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0+ or BSD-3-Clause */ +/* + * Copyright (C) STMicroelectronics 2020 - All Rights Reserved + * Author: Gabriel Fernandez for STMicroelectronics. + */ + +#ifndef _DT_BINDINGS_STM32MP13_CLKS_H_ +#define _DT_BINDINGS_STM32MP13_CLKS_H_ + +/* OSCILLATOR clocks */ +#define CK_HSE 0 +#define CK_CSI 1 +#define CK_LSI 2 +#define CK_LSE 3 +#define CK_HSI 4 +#define CK_HSE_DIV2 5 + +/* PLL */ +#define PLL1 6 +#define PLL2 7 +#define PLL3 8 +#define PLL4 9 + +/* ODF */ +#define PLL1_P 10 +#define PLL1_Q 11 +#define PLL1_R 12 +#define PLL2_P 13 +#define PLL2_Q 14 +#define PLL2_R 15 +#define PLL3_P 16 +#define PLL3_Q 17 +#define PLL3_R 18 +#define PLL4_P 19 +#define PLL4_Q 20 +#define PLL4_R 21 + +#define PCLK1 22 +#define PCLK2 23 +#define PCLK3 24 +#define PCLK4 25 +#define PCLK5 26 +#define PCLK6 27 + +/* SYSTEM CLOCK */ +#define CK_PER 28 +#define CK_MPU 29 +#define CK_AXI 30 +#define CK_MLAHB 31 + +/* BASE TIMER */ +#define CK_TIMG1 32 +#define CK_TIMG2 33 +#define CK_TIMG3 34 + +/* AUX */ +#define RTC 35 + +/* TRACE & DEBUG clocks */ +#define CK_DBG 36 +#define CK_TRACE 37 + +/* MCO clocks */ +#define CK_MCO1 38 +#define CK_MCO2 39 + +/* IP clocks */ +#define SYSCFG 40 +#define VREF 41 +#define DTS 42 +#define PMBCTRL 43 +#define HDP 44 +#define IWDG2 45 +#define STGENRO 46 +#define USART1 47 +#define RTCAPB 48 +#define TZC 49 +#define TZPC 50 +#define IWDG1 51 +#define BSEC 52 +#define DMA1 53 +#define DMA2 54 +#define DMAMUX1 55 +#define DMAMUX2 56 +#define GPIOA 57 +#define GPIOB 58 +#define GPIOC 59 +#define GPIOD 60 +#define GPIOE 61 +#define GPIOF 62 +#define GPIOG 63 +#define GPIOH 64 +#define GPIOI 65 +#define CRYP1 66 +#define HASH1 67 +#define BKPSRAM 68 +#define MDMA 69 +#define CRC1 70 +#define USBH 71 +#define DMA3 72 +#define TSC 73 +#define PKA 74 +#define AXIMC 75 +#define MCE 76 +#define ETH1TX 77 +#define ETH2TX 78 +#define ETH1RX 79 +#define ETH2RX 80 +#define ETH1MAC 81 +#define ETH2MAC 82 +#define ETH1STP 83 +#define ETH2STP 84 + +/* IP clocks with parents */ +#define SDMMC1_K 85 +#define SDMMC2_K 86 +#define ADC1_K 87 +#define ADC2_K 88 +#define FMC_K 89 +#define QSPI_K 90 +#define RNG1_K 91 +#define USBPHY_K 92 +#define STGEN_K 93 +#define SPDIF_K 94 +#define SPI1_K 95 +#define SPI2_K 96 +#define SPI3_K 97 +#define SPI4_K 98 +#define SPI5_K 99 +#define I2C1_K 100 +#define I2C2_K 101 +#define I2C3_K 102 +#define I2C4_K 103 +#define I2C5_K 104 +#define TIM2_K 105 +#define TIM3_K 106 +#define TIM4_K 107 +#define TIM5_K 108 +#define TIM6_K 109 +#define TIM7_K 110 +#define TIM12_K 111 +#define TIM13_K 112 +#define TIM14_K 113 +#define TIM1_K 114 +#define TIM8_K 115 +#define TIM15_K 116 +#define TIM16_K 117 +#define TIM17_K 118 +#define LPTIM1_K 119 +#define LPTIM2_K 120 +#define LPTIM3_K 121 +#define LPTIM4_K 122 +#define LPTIM5_K 123 +#define USART1_K 124 +#define USART2_K 125 +#define USART3_K 126 +#define UART4_K 127 +#define UART5_K 128 +#define USART6_K 129 +#define UART7_K 130 +#define UART8_K 131 +#define DFSDM_K 132 +#define FDCAN_K 133 +#define SAI1_K 134 +#define SAI2_K 135 +#define ADFSDM_K 136 +#define USBO_K 137 +#define LTDC_PX 138 +#define ETH1CK_K 139 +#define ETH1PTP_K 140 +#define ETH2CK_K 141 +#define ETH2PTP_K 142 +#define DCMIPP_K 143 +#define SAES_K 144 +#define DTS_K 145 + +/* DDR */ +#define DDRC1 146 +#define DDRC1LP 147 +#define DDRC2 148 +#define DDRC2LP 149 +#define DDRPHYC 150 +#define DDRPHYCLP 151 +#define DDRCAPB 152 +#define DDRCAPBLP 153 +#define AXIDCG 154 +#define DDRPHYCAPB 155 +#define DDRPHYCAPBLP 156 +#define DDRPERFM 157 + +#define ADC1 158 +#define ADC2 159 +#define SAI1 160 +#define SAI2 161 + +#define STM32MP1_LAST_CLK 162 + +/* SCMI clock identifiers */ +#define CK_SCMI_HSE 0 +#define CK_SCMI_HSI 1 +#define CK_SCMI_CSI 2 +#define CK_SCMI_LSE 3 +#define CK_SCMI_LSI 4 +#define CK_SCMI_HSE_DIV2 5 +#define CK_SCMI_PLL2_Q 6 +#define CK_SCMI_PLL2_R 7 +#define CK_SCMI_PLL3_P 8 +#define CK_SCMI_PLL3_Q 9 +#define CK_SCMI_PLL3_R 10 +#define CK_SCMI_PLL4_P 11 +#define CK_SCMI_PLL4_Q 12 +#define CK_SCMI_PLL4_R 13 +#define CK_SCMI_MPU 14 +#define CK_SCMI_AXI 15 +#define CK_SCMI_MLAHB 16 +#define CK_SCMI_CKPER 17 +#define CK_SCMI_PCLK1 18 +#define CK_SCMI_PCLK2 19 +#define CK_SCMI_PCLK3 20 +#define CK_SCMI_PCLK4 21 +#define CK_SCMI_PCLK5 22 +#define CK_SCMI_PCLK6 23 +#define CK_SCMI_CKTIMG1 24 +#define CK_SCMI_CKTIMG2 25 +#define CK_SCMI_CKTIMG3 26 +#define CK_SCMI_RTC 27 +#define CK_SCMI_RTCAPB 28 + +#endif /* _DT_BINDINGS_STM32MP13_CLKS_H_ */ diff --git a/include/dt-bindings/clock/sunplus,sp7021-clkc.h b/include/dt-bindings/clock/sunplus,sp7021-clkc.h new file mode 100644 index 0000000000..cd84321eb2 --- /dev/null +++ b/include/dt-bindings/clock/sunplus,sp7021-clkc.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) Sunplus Technology Co., Ltd. + * All rights reserved. + */ +#ifndef _DT_BINDINGS_CLOCK_SUNPLUS_SP7021_H +#define _DT_BINDINGS_CLOCK_SUNPLUS_SP7021_H + +/* gates */ +#define CLK_RTC 0 +#define CLK_OTPRX 1 +#define CLK_NOC 2 +#define CLK_BR 3 +#define CLK_SPIFL 4 +#define CLK_PERI0 5 +#define CLK_PERI1 6 +#define CLK_STC0 7 +#define CLK_STC_AV0 8 +#define CLK_STC_AV1 9 +#define CLK_STC_AV2 10 +#define CLK_UA0 11 +#define CLK_UA1 12 +#define CLK_UA2 13 +#define CLK_UA3 14 +#define CLK_UA4 15 +#define CLK_HWUA 16 +#define CLK_DDC0 17 +#define CLK_UADMA 18 +#define CLK_CBDMA0 19 +#define CLK_CBDMA1 20 +#define CLK_SPI_COMBO_0 21 +#define CLK_SPI_COMBO_1 22 +#define CLK_SPI_COMBO_2 23 +#define CLK_SPI_COMBO_3 24 +#define CLK_AUD 25 +#define CLK_USBC0 26 +#define CLK_USBC1 27 +#define CLK_UPHY0 28 +#define CLK_UPHY1 29 +#define CLK_I2CM0 30 +#define CLK_I2CM1 31 +#define CLK_I2CM2 32 +#define CLK_I2CM3 33 +#define CLK_PMC 34 +#define CLK_CARD_CTL0 35 +#define CLK_CARD_CTL1 36 +#define CLK_CARD_CTL4 37 +#define CLK_BCH 38 +#define CLK_DDFCH 39 +#define CLK_CSIIW0 40 +#define CLK_CSIIW1 41 +#define CLK_MIPICSI0 42 +#define CLK_MIPICSI1 43 +#define CLK_HDMI_TX 44 +#define CLK_VPOST 45 +#define CLK_TGEN 46 +#define CLK_DMIX 47 +#define CLK_TCON 48 +#define CLK_GPIO 49 +#define CLK_MAILBOX 50 +#define CLK_SPIND 51 +#define CLK_I2C2CBUS 52 +#define CLK_SEC 53 +#define CLK_DVE 54 +#define CLK_GPOST0 55 +#define CLK_OSD0 56 +#define CLK_DISP_PWM 57 +#define CLK_UADBG 58 +#define CLK_FIO_CTL 59 +#define CLK_FPGA 60 +#define CLK_L2SW 61 +#define CLK_ICM 62 +#define CLK_AXI_GLOBAL 63 + +/* plls */ +#define PLL_A 64 +#define PLL_E 65 +#define PLL_E_2P5 66 +#define PLL_E_25 67 +#define PLL_E_112P5 68 +#define PLL_F 69 +#define PLL_TV 70 +#define PLL_TV_A 71 +#define PLL_SYS 72 + +#define CLK_MAX 73 + +#endif diff --git a/include/dt-bindings/gce/mt8186-gce.h b/include/dt-bindings/gce/mt8186-gce.h new file mode 100644 index 0000000000..f12e3cb586 --- /dev/null +++ b/include/dt-bindings/gce/mt8186-gce.h @@ -0,0 +1,421 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) 2022 MediaTek Inc. + * Author: Yongqiang Niu + */ + +#ifndef _DT_BINDINGS_GCE_MT8186_H +#define _DT_BINDINGS_GCE_MT8186_H + +/* assign timeout 0 also means default */ +#define CMDQ_NO_TIMEOUT 0xffffffff +#define CMDQ_TIMEOUT_DEFAULT 1000 + +/* GCE thread priority */ +#define CMDQ_THR_PRIO_LOWEST 0 +#define CMDQ_THR_PRIO_1 1 +#define CMDQ_THR_PRIO_2 2 +#define CMDQ_THR_PRIO_3 3 +#define CMDQ_THR_PRIO_4 4 +#define CMDQ_THR_PRIO_5 5 +#define CMDQ_THR_PRIO_6 6 +#define CMDQ_THR_PRIO_HIGHEST 7 + +/* CPR count in 32bit register */ +#define GCE_CPR_COUNT 1312 + +/* GCE subsys table */ +#define SUBSYS_1300XXXX 0 +#define SUBSYS_1400XXXX 1 +#define SUBSYS_1401XXXX 2 +#define SUBSYS_1402XXXX 3 +#define SUBSYS_1502XXXX 4 +#define SUBSYS_1582XXXX 5 +#define SUBSYS_1B00XXXX 6 +#define SUBSYS_1C00XXXX 7 +#define SUBSYS_1C10XXXX 8 +#define SUBSYS_1000XXXX 9 +#define SUBSYS_1001XXXX 10 +#define SUBSYS_1020XXXX 11 +#define SUBSYS_1021XXXX 12 +#define SUBSYS_1022XXXX 13 +#define SUBSYS_1023XXXX 14 +#define SUBSYS_1060XXXX 15 +#define SUBSYS_1602XXXX 16 +#define SUBSYS_1608XXXX 17 +#define SUBSYS_1700XXXX 18 +#define SUBSYS_1701XXXX 19 +#define SUBSYS_1702XXXX 20 +#define SUBSYS_1703XXXX 21 +#define SUBSYS_1706XXXX 22 +#define SUBSYS_1A00XXXX 23 +#define SUBSYS_1A01XXXX 24 +#define SUBSYS_1A02XXXX 25 +#define SUBSYS_1A03XXXX 26 +#define SUBSYS_1A04XXXX 27 +#define SUBSYS_1A05XXXX 28 +#define SUBSYS_1A06XXXX 29 +#define SUBSYS_NO_SUPPORT 99 + +/* GCE General Purpose Register (GPR) support + * Leave note for scenario usage here + */ +/* GCE: write mask */ +#define GCE_GPR_R00 0x00 +#define GCE_GPR_R01 0x01 +/* MDP: P1: JPEG dest */ +#define GCE_GPR_R02 0x02 +#define GCE_GPR_R03 0x03 +/* MDP: PQ color */ +#define GCE_GPR_R04 0x04 +/* MDP: 2D sharpness */ +#define GCE_GPR_R05 0x05 +/* DISP: poll esd */ +#define GCE_GPR_R06 0x06 +#define GCE_GPR_R07 0x07 +/* MDP: P4: 2D sharpness dst */ +#define GCE_GPR_R08 0x08 +#define GCE_GPR_R09 0x09 +/* VCU: poll with timeout for GPR timer */ +#define GCE_GPR_R10 0x0A +#define GCE_GPR_R11 0x0B +/* CMDQ: debug */ +#define GCE_GPR_R12 0x0C +#define GCE_GPR_R13 0x0D +/* CMDQ: P7: debug */ +#define GCE_GPR_R14 0x0E +#define GCE_GPR_R15 0x0F + +/* GCE hardware events */ +/* VDEC */ +#define CMDQ_EVENT_LINE_COUNT_THRESHOLD_INTERRUPT 0 +#define CMDQ_EVENT_VDEC_INT 1 +#define CMDQ_EVENT_VDEC_PAUSE 2 +#define CMDQ_EVENT_VDEC_DEC_ERROR 3 +#define CMDQ_EVENT_MDEC_TIMEOUT 4 +#define CMDQ_EVENT_DRAM_ACCESS_DONE 5 +#define CMDQ_EVENT_INI_FETCH_RDY 6 +#define CMDQ_EVENT_PROCESS_FLAG 7 +#define CMDQ_EVENT_SEARCH_START_CODE_DONE 8 +#define CMDQ_EVENT_REF_REORDER_DONE 9 +#define CMDQ_EVENT_WP_TBLE_DONE 10 +#define CMDQ_EVENT_COUNT_SRAM_CLR_DONE 11 +#define CMDQ_EVENT_GCE_CNT_OP_THRESHOLD 15 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_0 16 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_1 17 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_2 18 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_3 19 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_4 20 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_5 21 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_6 22 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_7 23 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_8 24 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_9 25 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_10 26 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_11 27 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_12 28 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_13 29 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_14 30 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_15 31 +#define CMDQ_EVENT_WPE_GCE_FRAME_DONE 32 + +/* CAM */ +#define CMDQ_EVENT_ISP_FRAME_DONE_A 65 +#define CMDQ_EVENT_ISP_FRAME_DONE_B 66 +#define CMDQ_EVENT_CAMSV1_PASS1_DONE 70 +#define CMDQ_EVENT_CAMSV2_PASS1_DONE 71 +#define CMDQ_EVENT_CAMSV3_PASS1_DONE 72 +#define CMDQ_EVENT_MRAW_0_PASS1_DONE 73 +#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 75 +#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL 76 +#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 77 +#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 78 +#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 79 +#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 80 +#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 81 +#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 82 +#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 83 +#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 84 +#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL 85 +#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL 86 +#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL 87 +#define CMDQ_EVENT_TG_OVRUN_A_INT 88 +#define CMDQ_EVENT_DMA_R1_ERROR_A_INT 89 +#define CMDQ_EVENT_TG_OVRUN_B_INT 90 +#define CMDQ_EVENT_DMA_R1_ERROR_B_INT 91 +#define CMDQ_EVENT_TG_OVRUN_M0_INT 94 +#define CMDQ_EVENT_R1_ERROR_M0_INT 95 +#define CMDQ_EVENT_TG_GRABERR_M0_INT 96 +#define CMDQ_EVENT_TG_GRABERR_A_INT 98 +#define CMDQ_EVENT_CQ_VR_SNAP_A_INT 99 +#define CMDQ_EVENT_TG_GRABERR_B_INT 100 +#define CMDQ_EVENT_CQ_VR_SNAP_B_INT 101 +/* VENC */ +#define CMDQ_EVENT_VENC_CMDQ_FRAME_DONE 129 +#define CMDQ_EVENT_VENC_CMDQ_PAUSE_DONE 130 +#define CMDQ_EVENT_JPGENC_CMDQ_DONE 131 +#define CMDQ_EVENT_VENC_CMDQ_MB_DONE 132 +#define CMDQ_EVENT_VENC_CMDQ_128BYTE_CNT_DONE 133 +#define CMDQ_EVENT_VENC_CMDQ_PPS_DONE 136 +#define CMDQ_EVENT_VENC_CMDQ_SPS_DONE 137 +#define CMDQ_EVENT_VENC_CMDQ_VPS_DONE 138 +/* IPE */ +#define CMDQ_EVENT_FDVT_DONE 161 +#define CMDQ_EVENT_FE_DONE 162 +#define CMDQ_EVENT_RSC_DONE 163 +#define CMDQ_EVENT_DVS_DONE_ASYNC_SHOT 164 +#define CMDQ_EVENT_DVP_DONE_ASYNC_SHOT 165 +/* IMG2 */ +#define CMDQ_EVENT_GCE_IMG2_EVENT0 193 +#define CMDQ_EVENT_GCE_IMG2_EVENT1 194 +#define CMDQ_EVENT_GCE_IMG2_EVENT2 195 +#define CMDQ_EVENT_GCE_IMG2_EVENT3 196 +#define CMDQ_EVENT_GCE_IMG2_EVENT4 197 +#define CMDQ_EVENT_GCE_IMG2_EVENT5 198 +#define CMDQ_EVENT_GCE_IMG2_EVENT6 199 +#define CMDQ_EVENT_GCE_IMG2_EVENT7 200 +#define CMDQ_EVENT_GCE_IMG2_EVENT8 201 +#define CMDQ_EVENT_GCE_IMG2_EVENT9 202 +#define CMDQ_EVENT_GCE_IMG2_EVENT10 203 +#define CMDQ_EVENT_GCE_IMG2_EVENT11 204 +#define CMDQ_EVENT_GCE_IMG2_EVENT12 205 +#define CMDQ_EVENT_GCE_IMG2_EVENT13 206 +#define CMDQ_EVENT_GCE_IMG2_EVENT14 207 +#define CMDQ_EVENT_GCE_IMG2_EVENT15 208 +#define CMDQ_EVENT_GCE_IMG2_EVENT16 209 +#define CMDQ_EVENT_GCE_IMG2_EVENT17 210 +#define CMDQ_EVENT_GCE_IMG2_EVENT18 211 +#define CMDQ_EVENT_GCE_IMG2_EVENT19 212 +#define CMDQ_EVENT_GCE_IMG2_EVENT20 213 +#define CMDQ_EVENT_GCE_IMG2_EVENT21 214 +#define CMDQ_EVENT_GCE_IMG2_EVENT22 215 +#define CMDQ_EVENT_GCE_IMG2_EVENT23 216 +/* IMG1 */ +#define CMDQ_EVENT_GCE_IMG1_EVENT0 225 +#define CMDQ_EVENT_GCE_IMG1_EVENT1 226 +#define CMDQ_EVENT_GCE_IMG1_EVENT2 227 +#define CMDQ_EVENT_GCE_IMG1_EVENT3 228 +#define CMDQ_EVENT_GCE_IMG1_EVENT4 229 +#define CMDQ_EVENT_GCE_IMG1_EVENT5 230 +#define CMDQ_EVENT_GCE_IMG1_EVENT6 231 +#define CMDQ_EVENT_GCE_IMG1_EVENT7 232 +#define CMDQ_EVENT_GCE_IMG1_EVENT8 233 +#define CMDQ_EVENT_GCE_IMG1_EVENT9 234 +#define CMDQ_EVENT_GCE_IMG1_EVENT10 235 +#define CMDQ_EVENT_GCE_IMG1_EVENT11 236 +#define CMDQ_EVENT_GCE_IMG1_EVENT12 237 +#define CMDQ_EVENT_GCE_IMG1_EVENT13 238 +#define CMDQ_EVENT_GCE_IMG1_EVENT14 239 +#define CMDQ_EVENT_GCE_IMG1_EVENT15 240 +#define CMDQ_EVENT_GCE_IMG1_EVENT16 241 +#define CMDQ_EVENT_GCE_IMG1_EVENT17 242 +#define CMDQ_EVENT_GCE_IMG1_EVENT18 243 +#define CMDQ_EVENT_GCE_IMG1_EVENT19 244 +#define CMDQ_EVENT_GCE_IMG1_EVENT20 245 +#define CMDQ_EVENT_GCE_IMG1_EVENT21 246 +#define CMDQ_EVENT_GCE_IMG1_EVENT22 247 +#define CMDQ_EVENT_GCE_IMG1_EVENT23 248 +/* MDP */ +#define CMDQ_EVENT_MDP_RDMA0_SOF 256 +#define CMDQ_EVENT_MDP_RDMA1_SOF 257 +#define CMDQ_EVENT_MDP_AAL0_SOF 258 +#define CMDQ_EVENT_MDP_AAL1_SOF 259 +#define CMDQ_EVENT_MDP_HDR0_SOF 260 +#define CMDQ_EVENT_MDP_RSZ0_SOF 261 +#define CMDQ_EVENT_MDP_RSZ1_SOF 262 +#define CMDQ_EVENT_MDP_WROT0_SOF 263 +#define CMDQ_EVENT_MDP_WROT1_SOF 264 +#define CMDQ_EVENT_MDP_TDSHP0_SOF 265 +#define CMDQ_EVENT_MDP_TDSHP1_SOF 266 +#define CMDQ_EVENT_IMG_DL_RELAY0_SOF 267 +#define CMDQ_EVENT_IMG_DL_RELAY1_SOF 268 +#define CMDQ_EVENT_MDP_COLOR0_SOF 269 +#define CMDQ_EVENT_MDP_WROT3_FRAME_DONE 288 +#define CMDQ_EVENT_MDP_WROT2_FRAME_DONE 289 +#define CMDQ_EVENT_MDP_WROT1_FRAME_DONE 290 +#define CMDQ_EVENT_MDP_WROT0_FRAME_DONE 291 +#define CMDQ_EVENT_MDP_TDSHP3_FRAME_DONE 292 +#define CMDQ_EVENT_MDP_TDSHP2_FRAME_DONE 293 +#define CMDQ_EVENT_MDP_TDSHP1_FRAME_DONE 294 +#define CMDQ_EVENT_MDP_TDSHP0_FRAME_DONE 295 +#define CMDQ_EVENT_MDP_RSZ3_FRAME_DONE 296 +#define CMDQ_EVENT_MDP_RSZ2_FRAME_DONE 297 +#define CMDQ_EVENT_MDP_RSZ1_FRAME_DONE 298 +#define CMDQ_EVENT_MDP_RSZ0_FRAME_DONE 299 +#define CMDQ_EVENT_MDP_RDMA3_FRAME_DONE 300 +#define CMDQ_EVENT_MDP_RDMA2_FRAME_DONE 301 +#define CMDQ_EVENT_MDP_RDMA1_FRAME_DONE 302 +#define CMDQ_EVENT_MDP_RDMA0_FRAME_DONE 303 +#define CMDQ_EVENT_MDP_HDR1_FRAME_DONE 304 +#define CMDQ_EVENT_MDP_HDR0_FRAME_DONE 305 +#define CMDQ_EVENT_MDP_COLOR0_FRAME_DONE 306 +#define CMDQ_EVENT_MDP_AAL3_FRAME_DONE 307 +#define CMDQ_EVENT_MDP_AAL2_FRAME_DONE 308 +#define CMDQ_EVENT_MDP_AAL1_FRAME_DONE 309 +#define CMDQ_EVENT_MDP_AAL0_FRAME_DONE 310 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_0 320 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_1 321 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_2 322 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_3 323 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_4 324 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_5 325 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_6 326 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_7 327 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_8 328 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_9 329 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_10 330 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_11 331 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_12 332 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_13 333 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_14 334 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_15 335 +#define CMDQ_EVENT_MDP_WROT3_SW_RST_DONE_ENG_EVENT 336 +#define CMDQ_EVENT_MDP_WROT2_SW_RST_DONE_ENG_EVENT 337 +#define CMDQ_EVENT_MDP_WROT1_SW_RST_DONE_ENG_EVENT 338 +#define CMDQ_EVENT_MDP_WROT0_SW_RST_DONE_ENG_EVENT 339 +#define CMDQ_EVENT_MDP_RDMA3_SW_RST_DONE_ENG_EVENT 340 +#define CMDQ_EVENT_MDP_RDMA2_SW_RST_DONE_ENG_EVENT 341 +#define CMDQ_EVENT_MDP_RDMA1_SW_RST_DONE_ENG_EVENT 342 +#define CMDQ_EVENT_MDP_RDMA0_SW_RST_DONE_ENG_EVENT 343 +/* DISP */ +#define CMDQ_EVENT_DISP_OVL0_SOF 384 +#define CMDQ_EVENT_DISP_OVL0_2L_SOF 385 +#define CMDQ_EVENT_DISP_RDMA0_SOF 386 +#define CMDQ_EVENT_DISP_RSZ0_SOF 387 +#define CMDQ_EVENT_DISP_COLOR0_SOF 388 +#define CMDQ_EVENT_DISP_CCORR0_SOF 389 +#define CMDQ_EVENT_DISP_CCORR1_SOF 390 +#define CMDQ_EVENT_DISP_AAL0_SOF 391 +#define CMDQ_EVENT_DISP_GAMMA0_SOF 392 +#define CMDQ_EVENT_DISP_POSTMASK0_SOF 393 +#define CMDQ_EVENT_DISP_DITHER0_SOF 394 +#define CMDQ_EVENT_DISP_CM0_SOF 395 +#define CMDQ_EVENT_DISP_SPR0_SOF 396 +#define CMDQ_EVENT_DISP_DSC_WRAP0_SOF 397 +#define CMDQ_EVENT_DSI0_SOF 398 +#define CMDQ_EVENT_DISP_WDMA0_SOF 399 +#define CMDQ_EVENT_DISP_PWM0_SOF 400 +#define CMDQ_EVENT_DSI0_FRAME_DONE 410 +#define CMDQ_EVENT_DISP_WDMA0_FRAME_DONE 411 +#define CMDQ_EVENT_DISP_SPR0_FRAME_DONE 412 +#define CMDQ_EVENT_DISP_RSZ0_FRAME_DONE 413 +#define CMDQ_EVENT_DISP_RDMA0_FRAME_DONE 414 +#define CMDQ_EVENT_DISP_POSTMASK0_FRAME_DONE 415 +#define CMDQ_EVENT_DISP_OVL0_FRAME_DONE 416 +#define CMDQ_EVENT_DISP_OVL0_2L_FRAME_DONE 417 +#define CMDQ_EVENT_DISP_GAMMA0_FRAME_DONE 418 +#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE0_FRAME_DONE 420 +#define CMDQ_EVENT_DISP_DITHER0_FRAME_DONE 421 +#define CMDQ_EVENT_DISP_COLOR0_FRAME_DONE 422 +#define CMDQ_EVENT_DISP_CM0_FRAME_DONE 423 +#define CMDQ_EVENT_DISP_CCORR1_FRAME_DONE 424 +#define CMDQ_EVENT_DISP_CCORR0_FRAME_DONE 425 +#define CMDQ_EVENT_DISP_AAL0_FRAME_DONE 426 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0 434 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1 435 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_2 436 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_3 437 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_4 438 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_5 439 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_6 440 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_7 441 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_8 442 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_9 443 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_10 444 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_11 445 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_12 446 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_13 447 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_14 448 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_15 449 +#define CMDQ_EVENT_DSI0_TE_ENG_EVENT 450 +#define CMDQ_EVENT_DSI0_IRQ_ENG_EVENT 451 +#define CMDQ_EVENT_DSI0_DONE_ENG_EVENT 452 +#define CMDQ_EVENT_DISP_WDMA0_SW_RST_DONE_ENG_EVENT 453 +#define CMDQ_EVENT_DISP_SMIASSERT_ENG_EVENT 454 +#define CMDQ_EVENT_DISP_POSTMASK0_RST_DONE_ENG_EVENT 455 +#define CMDQ_EVENT_DISP_OVL0_RST_DONE_ENG_EVENT 456 +#define CMDQ_EVENT_DISP_OVL0_2L_RST_DONE_ENG_EVENT 457 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_0 458 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_1 459 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_2 460 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_3 461 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_4 462 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_5 463 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_6 464 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_7 465 +#define CMDQ_EVENT_OUT_EVENT_0 898 + +/* CMDQ sw tokens + * Following definitions are gce sw token which may use by clients + * event operation API. + * Note that token 512 to 639 may set secure + */ + +/* end of hw event and begin of sw token */ +#define CMDQ_MAX_HW_EVENT 512 + +/* Config thread notify trigger thread */ +#define CMDQ_SYNC_TOKEN_CONFIG_DIRTY 640 +/* Trigger thread notify config thread */ +#define CMDQ_SYNC_TOKEN_STREAM_EOF 641 +/* Block Trigger thread until the ESD check finishes. */ +#define CMDQ_SYNC_TOKEN_ESD_EOF 642 +#define CMDQ_SYNC_TOKEN_STREAM_BLOCK 643 +/* check CABC setup finish */ +#define CMDQ_SYNC_TOKEN_CABC_EOF 644 + +/* Notify normal CMDQ there are some secure task done + * MUST NOT CHANGE, this token sync with secure world + */ +#define CMDQ_SYNC_SECURE_THR_EOF 647 + +/* CMDQ use sw token */ +#define CMDQ_SYNC_TOKEN_USER_0 649 +#define CMDQ_SYNC_TOKEN_USER_1 650 +#define CMDQ_SYNC_TOKEN_POLL_MONITOR 651 +#define CMDQ_SYNC_TOKEN_TPR_LOCK 652 + +/* ISP sw token */ +#define CMDQ_SYNC_TOKEN_MSS 665 +#define CMDQ_SYNC_TOKEN_MSF 666 + +/* DISP sw token */ +#define CMDQ_SYNC_TOKEN_SODI 671 + +/* GPR access tokens (for register backup) + * There are 15 32-bit GPR, 3 GPR form a set + * (64-bit for address, 32-bit for value) + * MUST NOT CHANGE, these tokens sync with MDP + */ +#define CMDQ_SYNC_TOKEN_GPR_SET_0 700 +#define CMDQ_SYNC_TOKEN_GPR_SET_1 701 +#define CMDQ_SYNC_TOKEN_GPR_SET_2 702 +#define CMDQ_SYNC_TOKEN_GPR_SET_3 703 +#define CMDQ_SYNC_TOKEN_GPR_SET_4 704 + +/* Resource lock event to control resource in GCE thread */ +#define CMDQ_SYNC_RESOURCE_WROT0 710 +#define CMDQ_SYNC_RESOURCE_WROT1 711 + +/* event for gpr timer, used in sleep and poll with timeout */ +#define CMDQ_TOKEN_GPR_TIMER_R0 994 +#define CMDQ_TOKEN_GPR_TIMER_R1 995 +#define CMDQ_TOKEN_GPR_TIMER_R2 996 +#define CMDQ_TOKEN_GPR_TIMER_R3 997 +#define CMDQ_TOKEN_GPR_TIMER_R4 998 +#define CMDQ_TOKEN_GPR_TIMER_R5 999 +#define CMDQ_TOKEN_GPR_TIMER_R6 1000 +#define CMDQ_TOKEN_GPR_TIMER_R7 1001 +#define CMDQ_TOKEN_GPR_TIMER_R8 1002 +#define CMDQ_TOKEN_GPR_TIMER_R9 1003 +#define CMDQ_TOKEN_GPR_TIMER_R10 1004 +#define CMDQ_TOKEN_GPR_TIMER_R11 1005 +#define CMDQ_TOKEN_GPR_TIMER_R12 1006 +#define CMDQ_TOKEN_GPR_TIMER_R13 1007 +#define CMDQ_TOKEN_GPR_TIMER_R14 1008 +#define CMDQ_TOKEN_GPR_TIMER_R15 1009 + +#define CMDQ_EVENT_MAX 0x3FF +/* CMDQ sw tokens END */ + +#endif diff --git a/include/dt-bindings/interconnect/fsl,imx8mp.h b/include/dt-bindings/interconnect/fsl,imx8mp.h new file mode 100644 index 0000000000..7357d41752 --- /dev/null +++ b/include/dt-bindings/interconnect/fsl,imx8mp.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Interconnect framework driver for i.MX SoC + * + * Copyright 2022 NXP + * Peng Fan + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_IMX8MP_H +#define __DT_BINDINGS_INTERCONNECT_IMX8MP_H + +#define IMX8MP_ICN_NOC 0 +#define IMX8MP_ICN_MAIN 1 +#define IMX8MP_ICS_DRAM 2 +#define IMX8MP_ICS_OCRAM 3 +#define IMX8MP_ICM_A53 4 +#define IMX8MP_ICM_SUPERMIX 5 +#define IMX8MP_ICM_GIC 6 +#define IMX8MP_ICM_MLMIX 7 + +#define IMX8MP_ICN_AUDIO 8 +#define IMX8MP_ICM_DSP 9 +#define IMX8MP_ICM_SDMA2PER 10 +#define IMX8MP_ICM_SDMA2BURST 11 +#define IMX8MP_ICM_SDMA3PER 12 +#define IMX8MP_ICM_SDMA3BURST 13 +#define IMX8MP_ICM_EDMA 14 + +#define IMX8MP_ICN_GPU 15 +#define IMX8MP_ICM_GPU2D 16 +#define IMX8MP_ICM_GPU3D 17 + +#define IMX8MP_ICN_HDMI 18 +#define IMX8MP_ICM_HRV 19 +#define IMX8MP_ICM_LCDIF_HDMI 20 +#define IMX8MP_ICM_HDCP 21 + +#define IMX8MP_ICN_HSIO 22 +#define IMX8MP_ICM_NOC_PCIE 23 +#define IMX8MP_ICM_USB1 24 +#define IMX8MP_ICM_USB2 25 +#define IMX8MP_ICM_PCIE 26 + +#define IMX8MP_ICN_MEDIA 27 +#define IMX8MP_ICM_LCDIF_RD 28 +#define IMX8MP_ICM_LCDIF_WR 29 +#define IMX8MP_ICM_ISI0 30 +#define IMX8MP_ICM_ISI1 31 +#define IMX8MP_ICM_ISI2 32 +#define IMX8MP_ICM_ISP0 33 +#define IMX8MP_ICM_ISP1 34 +#define IMX8MP_ICM_DWE 35 + +#define IMX8MP_ICN_VIDEO 36 +#define IMX8MP_ICM_VPU_G1 37 +#define IMX8MP_ICM_VPU_G2 38 +#define IMX8MP_ICM_VPU_H1 39 + +#endif /* __DT_BINDINGS_INTERCONNECT_IMX8MP_H */ diff --git a/include/dt-bindings/interconnect/qcom,sc8280xp.h b/include/dt-bindings/interconnect/qcom,sc8280xp.h new file mode 100644 index 0000000000..a3e5fda7c1 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sc8280xp.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Linaro Ltd. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC8280XP_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SC8280XP_H + +/* aggre1_noc */ +#define MASTER_QSPI_0 0 +#define MASTER_QUP_1 1 +#define MASTER_QUP_2 2 +#define MASTER_A1NOC_CFG 3 +#define MASTER_IPA 4 +#define MASTER_EMAC_1 5 +#define MASTER_SDCC_4 6 +#define MASTER_UFS_MEM 7 +#define MASTER_USB3_0 8 +#define MASTER_USB3_1 9 +#define MASTER_USB3_MP 10 +#define MASTER_USB4_0 11 +#define MASTER_USB4_1 12 +#define SLAVE_A1NOC_SNOC 13 +#define SLAVE_USB_NOC_SNOC 14 +#define SLAVE_SERVICE_A1NOC 15 + +/* aggre2_noc */ +#define MASTER_QDSS_BAM 0 +#define MASTER_QUP_0 1 +#define MASTER_A2NOC_CFG 2 +#define MASTER_CRYPTO 3 +#define MASTER_SENSORS_PROC 4 +#define MASTER_SP 5 +#define MASTER_EMAC 6 +#define MASTER_PCIE_0 7 +#define MASTER_PCIE_1 8 +#define MASTER_PCIE_2A 9 +#define MASTER_PCIE_2B 10 +#define MASTER_PCIE_3A 11 +#define MASTER_PCIE_3B 12 +#define MASTER_PCIE_4 13 +#define MASTER_QDSS_ETR 14 +#define MASTER_SDCC_2 15 +#define MASTER_UFS_CARD 16 +#define SLAVE_A2NOC_SNOC 17 +#define SLAVE_ANOC_PCIE_GEM_NOC 18 +#define SLAVE_SERVICE_A2NOC 19 + +/* clk_virt */ +#define MASTER_IPA_CORE 0 +#define MASTER_QUP_CORE_0 1 +#define MASTER_QUP_CORE_1 2 +#define MASTER_QUP_CORE_2 3 +#define SLAVE_IPA_CORE 4 +#define SLAVE_QUP_CORE_0 5 +#define SLAVE_QUP_CORE_1 6 +#define SLAVE_QUP_CORE_2 7 + +/* config_noc */ +#define MASTER_GEM_NOC_CNOC 0 +#define MASTER_GEM_NOC_PCIE_SNOC 1 +#define SLAVE_AHB2PHY_0 2 +#define SLAVE_AHB2PHY_1 3 +#define SLAVE_AHB2PHY_2 4 +#define SLAVE_AOSS 5 +#define SLAVE_APPSS 6 +#define SLAVE_CAMERA_CFG 7 +#define SLAVE_CLK_CTL 8 +#define SLAVE_CDSP_CFG 9 +#define SLAVE_CDSP1_CFG 10 +#define SLAVE_RBCPR_CX_CFG 11 +#define SLAVE_RBCPR_MMCX_CFG 12 +#define SLAVE_RBCPR_MX_CFG 13 +#define SLAVE_CPR_NSPCX 14 +#define SLAVE_CRYPTO_0_CFG 15 +#define SLAVE_CX_RDPM 16 +#define SLAVE_DCC_CFG 17 +#define SLAVE_DISPLAY_CFG 18 +#define SLAVE_DISPLAY1_CFG 19 +#define SLAVE_EMAC_CFG 20 +#define SLAVE_EMAC1_CFG 21 +#define SLAVE_GFX3D_CFG 22 +#define SLAVE_HWKM 23 +#define SLAVE_IMEM_CFG 24 +#define SLAVE_IPA_CFG 25 +#define SLAVE_IPC_ROUTER_CFG 26 +#define SLAVE_LPASS 27 +#define SLAVE_MX_RDPM 28 +#define SLAVE_MXC_RDPM 29 +#define SLAVE_PCIE_0_CFG 30 +#define SLAVE_PCIE_1_CFG 31 +#define SLAVE_PCIE_2A_CFG 32 +#define SLAVE_PCIE_2B_CFG 33 +#define SLAVE_PCIE_3A_CFG 34 +#define SLAVE_PCIE_3B_CFG 35 +#define SLAVE_PCIE_4_CFG 36 +#define SLAVE_PCIE_RSC_CFG 37 +#define SLAVE_PDM 38 +#define SLAVE_PIMEM_CFG 39 +#define SLAVE_PKA_WRAPPER_CFG 40 +#define SLAVE_PMU_WRAPPER_CFG 41 +#define SLAVE_QDSS_CFG 42 +#define SLAVE_QSPI_0 43 +#define SLAVE_QUP_0 44 +#define SLAVE_QUP_1 45 +#define SLAVE_QUP_2 46 +#define SLAVE_SDCC_2 47 +#define SLAVE_SDCC_4 48 +#define SLAVE_SECURITY 49 +#define SLAVE_SMMUV3_CFG 50 +#define SLAVE_SMSS_CFG 51 +#define SLAVE_SPSS_CFG 52 +#define SLAVE_TCSR 53 +#define SLAVE_TLMM 54 +#define SLAVE_UFS_CARD_CFG 55 +#define SLAVE_UFS_MEM_CFG 56 +#define SLAVE_USB3_0 57 +#define SLAVE_USB3_1 58 +#define SLAVE_USB3_MP 59 +#define SLAVE_USB4_0 60 +#define SLAVE_USB4_1 61 +#define SLAVE_VENUS_CFG 62 +#define SLAVE_VSENSE_CTRL_CFG 63 +#define SLAVE_VSENSE_CTRL_R_CFG 64 +#define SLAVE_A1NOC_CFG 65 +#define SLAVE_A2NOC_CFG 66 +#define SLAVE_ANOC_PCIE_BRIDGE_CFG 67 +#define SLAVE_DDRSS_CFG 68 +#define SLAVE_CNOC_MNOC_CFG 69 +#define SLAVE_SNOC_CFG 70 +#define SLAVE_SNOC_SF_BRIDGE_CFG 71 +#define SLAVE_IMEM 72 +#define SLAVE_PIMEM 73 +#define SLAVE_SERVICE_CNOC 74 +#define SLAVE_PCIE_0 75 +#define SLAVE_PCIE_1 76 +#define SLAVE_PCIE_2A 77 +#define SLAVE_PCIE_2B 78 +#define SLAVE_PCIE_3A 79 +#define SLAVE_PCIE_3B 80 +#define SLAVE_PCIE_4 81 +#define SLAVE_QDSS_STM 82 +#define SLAVE_SMSS 83 +#define SLAVE_TCU 84 + +/* dc_noc */ +#define MASTER_CNOC_DC_NOC 0 +#define SLAVE_LLCC_CFG 1 +#define SLAVE_GEM_NOC_CFG 2 + +/* gem_noc */ +#define MASTER_GPU_TCU 0 +#define MASTER_PCIE_TCU 1 +#define MASTER_SYS_TCU 2 +#define MASTER_APPSS_PROC 3 +#define MASTER_COMPUTE_NOC 4 +#define MASTER_COMPUTE_NOC_1 5 +#define MASTER_GEM_NOC_CFG 6 +#define MASTER_GFX3D 7 +#define MASTER_MNOC_HF_MEM_NOC 8 +#define MASTER_MNOC_SF_MEM_NOC 9 +#define MASTER_ANOC_PCIE_GEM_NOC 10 +#define MASTER_SNOC_GC_MEM_NOC 11 +#define MASTER_SNOC_SF_MEM_NOC 12 +#define SLAVE_GEM_NOC_CNOC 13 +#define SLAVE_LLCC 14 +#define SLAVE_GEM_NOC_PCIE_CNOC 15 +#define SLAVE_SERVICE_GEM_NOC_1 16 +#define SLAVE_SERVICE_GEM_NOC_2 17 +#define SLAVE_SERVICE_GEM_NOC 18 + +/* lpass_ag_noc */ +#define MASTER_CNOC_LPASS_AG_NOC 0 +#define MASTER_LPASS_PROC 1 +#define SLAVE_LPASS_CORE_CFG 2 +#define SLAVE_LPASS_LPI_CFG 3 +#define SLAVE_LPASS_MPU_CFG 4 +#define SLAVE_LPASS_TOP_CFG 5 +#define SLAVE_LPASS_SNOC 6 +#define SLAVE_SERVICES_LPASS_AML_NOC 7 +#define SLAVE_SERVICE_LPASS_AG_NOC 8 + +/* mc_virt */ +#define MASTER_LLCC 0 +#define SLAVE_EBI1 1 + +/*mmss_noc */ +#define MASTER_CAMNOC_HF 0 +#define MASTER_MDP0 1 +#define MASTER_MDP1 2 +#define MASTER_MDP_CORE1_0 3 +#define MASTER_MDP_CORE1_1 4 +#define MASTER_CNOC_MNOC_CFG 5 +#define MASTER_ROTATOR 6 +#define MASTER_ROTATOR_1 7 +#define MASTER_VIDEO_P0 8 +#define MASTER_VIDEO_P1 9 +#define MASTER_VIDEO_PROC 10 +#define MASTER_CAMNOC_ICP 11 +#define MASTER_CAMNOC_SF 12 +#define SLAVE_MNOC_HF_MEM_NOC 13 +#define SLAVE_MNOC_SF_MEM_NOC 14 +#define SLAVE_SERVICE_MNOC 15 + +/* nspa_noc */ +#define MASTER_CDSP_NOC_CFG 0 +#define MASTER_CDSP_PROC 1 +#define SLAVE_CDSP_MEM_NOC 2 +#define SLAVE_NSP_XFR 3 +#define SLAVE_SERVICE_NSP_NOC 4 + +/* nspb_noc */ +#define MASTER_CDSPB_NOC_CFG 0 +#define MASTER_CDSP_PROC_B 1 +#define SLAVE_CDSPB_MEM_NOC 2 +#define SLAVE_NSPB_XFR 3 +#define SLAVE_SERVICE_NSPB_NOC 4 + +/* system_noc */ +#define MASTER_A1NOC_SNOC 0 +#define MASTER_A2NOC_SNOC 1 +#define MASTER_USB_NOC_SNOC 2 +#define MASTER_LPASS_ANOC 3 +#define MASTER_SNOC_CFG 4 +#define MASTER_PIMEM 5 +#define MASTER_GIC 6 +#define SLAVE_SNOC_GEM_NOC_GC 7 +#define SLAVE_SNOC_GEM_NOC_SF 8 +#define SLAVE_SERVICE_SNOC 9 + +#endif diff --git a/include/dt-bindings/interconnect/qcom,sdx65.h b/include/dt-bindings/interconnect/qcom,sdx65.h new file mode 100644 index 0000000000..b25288aa7d --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sdx65.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX65_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX65_H + +#define MASTER_LLCC 0 +#define SLAVE_EBI1 1 + +#define MASTER_TCU_0 0 +#define MASTER_SNOC_GC_MEM_NOC 1 +#define MASTER_APPSS_PROC 2 +#define SLAVE_LLCC 3 +#define SLAVE_MEM_NOC_SNOC 4 +#define SLAVE_MEM_NOC_PCIE_SNOC 5 + +#define MASTER_AUDIO 0 +#define MASTER_BLSP_1 1 +#define MASTER_QDSS_BAM 2 +#define MASTER_QPIC 3 +#define MASTER_SNOC_CFG 4 +#define MASTER_SPMI_FETCHER 5 +#define MASTER_ANOC_SNOC 6 +#define MASTER_IPA 7 +#define MASTER_MEM_NOC_SNOC 8 +#define MASTER_MEM_NOC_PCIE_SNOC 9 +#define MASTER_CRYPTO 10 +#define MASTER_IPA_PCIE 11 +#define MASTER_PCIE_0 12 +#define MASTER_QDSS_ETR 13 +#define MASTER_SDCC_1 14 +#define MASTER_USB3 15 +#define SLAVE_AOSS 16 +#define SLAVE_APPSS 17 +#define SLAVE_AUDIO 18 +#define SLAVE_BLSP_1 19 +#define SLAVE_CLK_CTL 20 +#define SLAVE_CRYPTO_0_CFG 21 +#define SLAVE_CNOC_DDRSS 22 +#define SLAVE_ECC_CFG 23 +#define SLAVE_IMEM_CFG 24 +#define SLAVE_IPA_CFG 25 +#define SLAVE_CNOC_MSS 26 +#define SLAVE_PCIE_PARF 27 +#define SLAVE_PDM 28 +#define SLAVE_PRNG 29 +#define SLAVE_QDSS_CFG 30 +#define SLAVE_QPIC 31 +#define SLAVE_SDCC_1 32 +#define SLAVE_SNOC_CFG 33 +#define SLAVE_SPMI_FETCHER 34 +#define SLAVE_SPMI_VGI_COEX 35 +#define SLAVE_TCSR 36 +#define SLAVE_TLMM 37 +#define SLAVE_USB3 38 +#define SLAVE_USB3_PHY_CFG 39 +#define SLAVE_ANOC_SNOC 40 +#define SLAVE_SNOC_MEM_NOC_GC 41 +#define SLAVE_IMEM 42 +#define SLAVE_SERVICE_SNOC 43 +#define SLAVE_PCIE_0 44 +#define SLAVE_QDSS_STM 45 +#define SLAVE_TCU 46 + +#endif diff --git a/include/dt-bindings/interconnect/qcom,sm6350.h b/include/dt-bindings/interconnect/qcom,sm6350.h new file mode 100644 index 0000000000..e662cede9a --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sm6350.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* + * Qualcomm SM6350 interconnect IDs + * + * Copyright (C) 2022 Luca Weiss + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM6350_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SM6350_H + +#define MASTER_A1NOC_CFG 0 +#define MASTER_QUP_0 1 +#define MASTER_EMMC 2 +#define MASTER_UFS_MEM 3 +#define A1NOC_SNOC_SLV 4 +#define SLAVE_SERVICE_A1NOC 5 + +#define MASTER_A2NOC_CFG 0 +#define MASTER_QDSS_BAM 1 +#define MASTER_QUP_1 2 +#define MASTER_CRYPTO_CORE_0 3 +#define MASTER_IPA 4 +#define MASTER_QDSS_ETR 5 +#define MASTER_SDCC_2 6 +#define MASTER_USB3 7 +#define A2NOC_SNOC_SLV 8 +#define SLAVE_SERVICE_A2NOC 9 + +#define MASTER_CAMNOC_HF0_UNCOMP 0 +#define MASTER_CAMNOC_ICP_UNCOMP 1 +#define MASTER_CAMNOC_SF_UNCOMP 2 +#define MASTER_QUP_CORE_0 3 +#define MASTER_QUP_CORE_1 4 +#define MASTER_LLCC 5 +#define SLAVE_CAMNOC_UNCOMP 6 +#define SLAVE_QUP_CORE_0 7 +#define SLAVE_QUP_CORE_1 8 +#define SLAVE_EBI_CH0 9 + +#define MASTER_NPU 0 +#define MASTER_NPU_PROC 1 +#define SLAVE_CDSP_GEM_NOC 2 + +#define SNOC_CNOC_MAS 0 +#define MASTER_QDSS_DAP 1 +#define SLAVE_A1NOC_CFG 2 +#define SLAVE_A2NOC_CFG 3 +#define SLAVE_AHB2PHY 4 +#define SLAVE_AHB2PHY_2 5 +#define SLAVE_AOSS 6 +#define SLAVE_BOOT_ROM 7 +#define SLAVE_CAMERA_CFG 8 +#define SLAVE_CAMERA_NRT_THROTTLE_CFG 9 +#define SLAVE_CAMERA_RT_THROTTLE_CFG 10 +#define SLAVE_CLK_CTL 11 +#define SLAVE_RBCPR_CX_CFG 12 +#define SLAVE_RBCPR_MX_CFG 13 +#define SLAVE_CRYPTO_0_CFG 14 +#define SLAVE_DCC_CFG 15 +#define SLAVE_CNOC_DDRSS 16 +#define SLAVE_DISPLAY_CFG 17 +#define SLAVE_DISPLAY_THROTTLE_CFG 18 +#define SLAVE_EMMC_CFG 19 +#define SLAVE_GLM 20 +#define SLAVE_GRAPHICS_3D_CFG 21 +#define SLAVE_IMEM_CFG 22 +#define SLAVE_IPA_CFG 23 +#define SLAVE_CNOC_MNOC_CFG 24 +#define SLAVE_CNOC_MSS 25 +#define SLAVE_NPU_CFG 26 +#define SLAVE_PDM 27 +#define SLAVE_PIMEM_CFG 28 +#define SLAVE_PRNG 29 +#define SLAVE_QDSS_CFG 30 +#define SLAVE_QM_CFG 31 +#define SLAVE_QM_MPU_CFG 32 +#define SLAVE_QUP_0 33 +#define SLAVE_QUP_1 34 +#define SLAVE_SDCC_2 35 +#define SLAVE_SECURITY 36 +#define SLAVE_SNOC_CFG 37 +#define SLAVE_TCSR 38 +#define SLAVE_UFS_MEM_CFG 39 +#define SLAVE_USB3 40 +#define SLAVE_VENUS_CFG 41 +#define SLAVE_VENUS_THROTTLE_CFG 42 +#define SLAVE_VSENSE_CTRL_CFG 43 +#define SLAVE_SERVICE_CNOC 44 + +#define MASTER_CNOC_DC_NOC 0 +#define SLAVE_GEM_NOC_CFG 1 +#define SLAVE_LLCC_CFG 2 + +#define MASTER_AMPSS_M0 0 +#define MASTER_SYS_TCU 1 +#define MASTER_GEM_NOC_CFG 2 +#define MASTER_COMPUTE_NOC 3 +#define MASTER_MNOC_HF_MEM_NOC 4 +#define MASTER_MNOC_SF_MEM_NOC 5 +#define MASTER_SNOC_GC_MEM_NOC 6 +#define MASTER_SNOC_SF_MEM_NOC 7 +#define MASTER_GRAPHICS_3D 8 +#define SLAVE_MCDMA_MS_MPU_CFG 9 +#define SLAVE_MSS_PROC_MS_MPU_CFG 10 +#define SLAVE_GEM_NOC_SNOC 11 +#define SLAVE_LLCC 12 +#define SLAVE_SERVICE_GEM_NOC 13 + +#define MASTER_CNOC_MNOC_CFG 0 +#define MASTER_VIDEO_P0 1 +#define MASTER_VIDEO_PROC 2 +#define MASTER_CAMNOC_HF 3 +#define MASTER_CAMNOC_ICP 4 +#define MASTER_CAMNOC_SF 5 +#define MASTER_MDP_PORT0 6 +#define SLAVE_MNOC_HF_MEM_NOC 7 +#define SLAVE_MNOC_SF_MEM_NOC 8 +#define SLAVE_SERVICE_MNOC 9 + +#define MASTER_NPU_SYS 0 +#define MASTER_NPU_NOC_CFG 1 +#define SLAVE_NPU_CAL_DP0 2 +#define SLAVE_NPU_CP 3 +#define SLAVE_NPU_INT_DMA_BWMON_CFG 4 +#define SLAVE_NPU_DPM 5 +#define SLAVE_ISENSE_CFG 6 +#define SLAVE_NPU_LLM_CFG 7 +#define SLAVE_NPU_TCM 8 +#define SLAVE_NPU_COMPUTE_NOC 9 +#define SLAVE_SERVICE_NPU_NOC 10 + +#define MASTER_SNOC_CFG 0 +#define A1NOC_SNOC_MAS 1 +#define A2NOC_SNOC_MAS 2 +#define MASTER_GEM_NOC_SNOC 3 +#define MASTER_PIMEM 4 +#define MASTER_GIC 5 +#define SLAVE_APPSS 6 +#define SNOC_CNOC_SLV 7 +#define SLAVE_SNOC_GEM_NOC_GC 8 +#define SLAVE_SNOC_GEM_NOC_SF 9 +#define SLAVE_OCIMEM 10 +#define SLAVE_PIMEM 11 +#define SLAVE_SERVICE_SNOC 12 +#define SLAVE_QDSS_STM 13 +#define SLAVE_TCU 14 + +#endif diff --git a/include/dt-bindings/memory/mt8186-memory-port.h b/include/dt-bindings/memory/mt8186-memory-port.h new file mode 100644 index 0000000000..2bc6e44330 --- /dev/null +++ b/include/dt-bindings/memory/mt8186-memory-port.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 MediaTek Inc. + * + * Author: Anan Sun + * Author: Yong Wu + */ +#ifndef _DT_BINDINGS_MEMORY_MT8186_LARB_PORT_H_ +#define _DT_BINDINGS_MEMORY_MT8186_LARB_PORT_H_ + +#include + +/* + * MM IOMMU supports 16GB dma address. We separate it to four ranges: + * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters + * locate in anyone region. BUT: + * a) Make sure all the ports inside a larb are in one range. + * b) The iova of any master can NOT cross the 4G/8G/12G boundary. + * + * This is the suggested mapping in this SoC: + * + * modules dma-address-region larbs-ports + * disp 0 ~ 4G larb0/1/2 + * vcodec 4G ~ 8G larb4/7 + * cam/mdp 8G ~ 12G the other larbs. + * N/A 12G ~ 16G + * CCU0 0x24000_0000 ~ 0x243ff_ffff larb13: port 9/10 + * CCU1 0x24400_0000 ~ 0x247ff_ffff larb14: port 4/5 + */ + +/* MM IOMMU ports */ +/* LARB 0 -- MMSYS */ +#define IOMMU_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(0, 0) +#define IOMMU_PORT_L0_REVERSED MTK_M4U_ID(0, 1) +#define IOMMU_PORT_L0_OVL_RDMA0 MTK_M4U_ID(0, 2) +#define IOMMU_PORT_L0_DISP_FAKE0 MTK_M4U_ID(0, 3) + +/* LARB 1 -- MMSYS */ +#define IOMMU_PORT_L1_DISP_RDMA1 MTK_M4U_ID(1, 0) +#define IOMMU_PORT_L1_OVL_2L_RDMA0 MTK_M4U_ID(1, 1) +#define IOMMU_PORT_L1_DISP_RDMA0 MTK_M4U_ID(1, 2) +#define IOMMU_PORT_L1_DISP_WDMA0 MTK_M4U_ID(1, 3) +#define IOMMU_PORT_L1_DISP_FAKE1 MTK_M4U_ID(1, 4) + +/* LARB 2 -- MMSYS */ +#define IOMMU_PORT_L2_MDP_RDMA0 MTK_M4U_ID(2, 0) +#define IOMMU_PORT_L2_MDP_RDMA1 MTK_M4U_ID(2, 1) +#define IOMMU_PORT_L2_MDP_WROT0 MTK_M4U_ID(2, 2) +#define IOMMU_PORT_L2_MDP_WROT1 MTK_M4U_ID(2, 3) +#define IOMMU_PORT_L2_DISP_FAKE0 MTK_M4U_ID(2, 4) + +/* LARB 4 -- VDEC */ +#define IOMMU_PORT_L4_HW_VDEC_MC_EXT MTK_M4U_ID(4, 0) +#define IOMMU_PORT_L4_HW_VDEC_UFO_EXT MTK_M4U_ID(4, 1) +#define IOMMU_PORT_L4_HW_VDEC_PP_EXT MTK_M4U_ID(4, 2) +#define IOMMU_PORT_L4_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(4, 3) +#define IOMMU_PORT_L4_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(4, 4) +#define IOMMU_PORT_L4_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(4, 5) +#define IOMMU_PORT_L4_HW_VDEC_TILE_EXT MTK_M4U_ID(4, 6) +#define IOMMU_PORT_L4_HW_VDEC_VLD_EXT MTK_M4U_ID(4, 7) +#define IOMMU_PORT_L4_HW_VDEC_VLD2_EXT MTK_M4U_ID(4, 8) +#define IOMMU_PORT_L4_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(4, 9) +#define IOMMU_PORT_L4_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(4, 10) +#define IOMMU_PORT_L4_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(4, 11) +#define IOMMU_PORT_L4_HW_MINI_MDP_R0_EXT MTK_M4U_ID(4, 12) +#define IOMMU_PORT_L4_HW_MINI_MDP_W0_EXT MTK_M4U_ID(4, 13) + +/* LARB 7 -- VENC */ +#define IOMMU_PORT_L7_VENC_RCPU MTK_M4U_ID(7, 0) +#define IOMMU_PORT_L7_VENC_REC MTK_M4U_ID(7, 1) +#define IOMMU_PORT_L7_VENC_BSDMA MTK_M4U_ID(7, 2) +#define IOMMU_PORT_L7_VENC_SV_COMV MTK_M4U_ID(7, 3) +#define IOMMU_PORT_L7_VENC_RD_COMV MTK_M4U_ID(7, 4) +#define IOMMU_PORT_L7_VENC_CUR_LUMA MTK_M4U_ID(7, 5) +#define IOMMU_PORT_L7_VENC_CUR_CHROMA MTK_M4U_ID(7, 6) +#define IOMMU_PORT_L7_VENC_REF_LUMA MTK_M4U_ID(7, 7) +#define IOMMU_PORT_L7_VENC_REF_CHROMA MTK_M4U_ID(7, 8) +#define IOMMU_PORT_L7_JPGENC_Y_RDMA MTK_M4U_ID(7, 9) +#define IOMMU_PORT_L7_JPGENC_C_RDMA MTK_M4U_ID(7, 10) +#define IOMMU_PORT_L7_JPGENC_Q_TABLE MTK_M4U_ID(7, 11) +#define IOMMU_PORT_L7_JPGENC_BSDMA MTK_M4U_ID(7, 12) + +/* LARB 8 -- WPE */ +#define IOMMU_PORT_L8_WPE_RDMA_0 MTK_M4U_ID(8, 0) +#define IOMMU_PORT_L8_WPE_RDMA_1 MTK_M4U_ID(8, 1) +#define IOMMU_PORT_L8_WPE_WDMA_0 MTK_M4U_ID(8, 2) + +/* LARB 9 -- IMG-1 */ +#define IOMMU_PORT_L9_IMG_IMGI_D1 MTK_M4U_ID(9, 0) +#define IOMMU_PORT_L9_IMG_IMGBI_D1 MTK_M4U_ID(9, 1) +#define IOMMU_PORT_L9_IMG_DMGI_D1 MTK_M4U_ID(9, 2) +#define IOMMU_PORT_L9_IMG_DEPI_D1 MTK_M4U_ID(9, 3) +#define IOMMU_PORT_L9_IMG_LCE_D1 MTK_M4U_ID(9, 4) +#define IOMMU_PORT_L9_IMG_SMTI_D1 MTK_M4U_ID(9, 5) +#define IOMMU_PORT_L9_IMG_SMTO_D2 MTK_M4U_ID(9, 6) +#define IOMMU_PORT_L9_IMG_SMTO_D1 MTK_M4U_ID(9, 7) +#define IOMMU_PORT_L9_IMG_CRZO_D1 MTK_M4U_ID(9, 8) +#define IOMMU_PORT_L9_IMG_IMG3O_D1 MTK_M4U_ID(9, 9) +#define IOMMU_PORT_L9_IMG_VIPI_D1 MTK_M4U_ID(9, 10) +#define IOMMU_PORT_L9_IMG_SMTI_D5 MTK_M4U_ID(9, 11) +#define IOMMU_PORT_L9_IMG_TIMGO_D1 MTK_M4U_ID(9, 12) +#define IOMMU_PORT_L9_IMG_UFBC_W0 MTK_M4U_ID(9, 13) +#define IOMMU_PORT_L9_IMG_UFBC_R0 MTK_M4U_ID(9, 14) +#define IOMMU_PORT_L9_IMG_WPE_RDMA1 MTK_M4U_ID(9, 15) +#define IOMMU_PORT_L9_IMG_WPE_RDMA0 MTK_M4U_ID(9, 16) +#define IOMMU_PORT_L9_IMG_WPE_WDMA MTK_M4U_ID(9, 17) +#define IOMMU_PORT_L9_IMG_MFB_RDMA0 MTK_M4U_ID(9, 18) +#define IOMMU_PORT_L9_IMG_MFB_RDMA1 MTK_M4U_ID(9, 19) +#define IOMMU_PORT_L9_IMG_MFB_RDMA2 MTK_M4U_ID(9, 20) +#define IOMMU_PORT_L9_IMG_MFB_RDMA3 MTK_M4U_ID(9, 21) +#define IOMMU_PORT_L9_IMG_MFB_RDMA4 MTK_M4U_ID(9, 22) +#define IOMMU_PORT_L9_IMG_MFB_RDMA5 MTK_M4U_ID(9, 23) +#define IOMMU_PORT_L9_IMG_MFB_WDMA0 MTK_M4U_ID(9, 24) +#define IOMMU_PORT_L9_IMG_MFB_WDMA1 MTK_M4U_ID(9, 25) +#define IOMMU_PORT_L9_IMG_RESERVE6 MTK_M4U_ID(9, 26) +#define IOMMU_PORT_L9_IMG_RESERVE7 MTK_M4U_ID(9, 27) +#define IOMMU_PORT_L9_IMG_RESERVE8 MTK_M4U_ID(9, 28) + +/* LARB 11 -- IMG-2 */ +#define IOMMU_PORT_L11_IMG_IMGI_D1 MTK_M4U_ID(11, 0) +#define IOMMU_PORT_L11_IMG_IMGBI_D1 MTK_M4U_ID(11, 1) +#define IOMMU_PORT_L11_IMG_DMGI_D1 MTK_M4U_ID(11, 2) +#define IOMMU_PORT_L11_IMG_DEPI_D1 MTK_M4U_ID(11, 3) +#define IOMMU_PORT_L11_IMG_LCE_D1 MTK_M4U_ID(11, 4) +#define IOMMU_PORT_L11_IMG_SMTI_D1 MTK_M4U_ID(11, 5) +#define IOMMU_PORT_L11_IMG_SMTO_D2 MTK_M4U_ID(11, 6) +#define IOMMU_PORT_L11_IMG_SMTO_D1 MTK_M4U_ID(11, 7) +#define IOMMU_PORT_L11_IMG_CRZO_D1 MTK_M4U_ID(11, 8) +#define IOMMU_PORT_L11_IMG_IMG3O_D1 MTK_M4U_ID(11, 9) +#define IOMMU_PORT_L11_IMG_VIPI_D1 MTK_M4U_ID(11, 10) +#define IOMMU_PORT_L11_IMG_SMTI_D5 MTK_M4U_ID(11, 11) +#define IOMMU_PORT_L11_IMG_TIMGO_D1 MTK_M4U_ID(11, 12) +#define IOMMU_PORT_L11_IMG_UFBC_W0 MTK_M4U_ID(11, 13) +#define IOMMU_PORT_L11_IMG_UFBC_R0 MTK_M4U_ID(11, 14) +#define IOMMU_PORT_L11_IMG_WPE_RDMA1 MTK_M4U_ID(11, 15) +#define IOMMU_PORT_L11_IMG_WPE_RDMA0 MTK_M4U_ID(11, 16) +#define IOMMU_PORT_L11_IMG_WPE_WDMA MTK_M4U_ID(11, 17) +#define IOMMU_PORT_L11_IMG_MFB_RDMA0 MTK_M4U_ID(11, 18) +#define IOMMU_PORT_L11_IMG_MFB_RDMA1 MTK_M4U_ID(11, 19) +#define IOMMU_PORT_L11_IMG_MFB_RDMA2 MTK_M4U_ID(11, 20) +#define IOMMU_PORT_L11_IMG_MFB_RDMA3 MTK_M4U_ID(11, 21) +#define IOMMU_PORT_L11_IMG_MFB_RDMA4 MTK_M4U_ID(11, 22) +#define IOMMU_PORT_L11_IMG_MFB_RDMA5 MTK_M4U_ID(11, 23) +#define IOMMU_PORT_L11_IMG_MFB_WDMA0 MTK_M4U_ID(11, 24) +#define IOMMU_PORT_L11_IMG_MFB_WDMA1 MTK_M4U_ID(11, 25) +#define IOMMU_PORT_L11_IMG_RESERVE6 MTK_M4U_ID(11, 26) +#define IOMMU_PORT_L11_IMG_RESERVE7 MTK_M4U_ID(11, 27) +#define IOMMU_PORT_L11_IMG_RESERVE8 MTK_M4U_ID(11, 28) + +/* LARB 13 -- CAM */ +#define IOMMU_PORT_L13_CAM_MRAWI MTK_M4U_ID(13, 0) +#define IOMMU_PORT_L13_CAM_MRAWO_0 MTK_M4U_ID(13, 1) +#define IOMMU_PORT_L13_CAM_MRAWO_1 MTK_M4U_ID(13, 2) +#define IOMMU_PORT_L13_CAM_CAMSV_4 MTK_M4U_ID(13, 6) +#define IOMMU_PORT_L13_CAM_CAMSV_5 MTK_M4U_ID(13, 7) +#define IOMMU_PORT_L13_CAM_CAMSV_6 MTK_M4U_ID(13, 8) +#define IOMMU_PORT_L13_CAM_CCUI MTK_M4U_ID(13, 9) +#define IOMMU_PORT_L13_CAM_CCUO MTK_M4U_ID(13, 10) +#define IOMMU_PORT_L13_CAM_FAKE MTK_M4U_ID(13, 11) + +/* LARB 14 -- CAM */ +#define IOMMU_PORT_L14_CAM_CCUI MTK_M4U_ID(14, 4) +#define IOMMU_PORT_L14_CAM_CCUO MTK_M4U_ID(14, 5) + +/* LARB 16 -- RAW-A */ +#define IOMMU_PORT_L16_CAM_IMGO_R1_A MTK_M4U_ID(16, 0) +#define IOMMU_PORT_L16_CAM_RRZO_R1_A MTK_M4U_ID(16, 1) +#define IOMMU_PORT_L16_CAM_CQI_R1_A MTK_M4U_ID(16, 2) +#define IOMMU_PORT_L16_CAM_BPCI_R1_A MTK_M4U_ID(16, 3) +#define IOMMU_PORT_L16_CAM_YUVO_R1_A MTK_M4U_ID(16, 4) +#define IOMMU_PORT_L16_CAM_UFDI_R2_A MTK_M4U_ID(16, 5) +#define IOMMU_PORT_L16_CAM_RAWI_R2_A MTK_M4U_ID(16, 6) +#define IOMMU_PORT_L16_CAM_RAWI_R3_A MTK_M4U_ID(16, 7) +#define IOMMU_PORT_L16_CAM_AAO_R1_A MTK_M4U_ID(16, 8) +#define IOMMU_PORT_L16_CAM_AFO_R1_A MTK_M4U_ID(16, 9) +#define IOMMU_PORT_L16_CAM_FLKO_R1_A MTK_M4U_ID(16, 10) +#define IOMMU_PORT_L16_CAM_LCESO_R1_A MTK_M4U_ID(16, 11) +#define IOMMU_PORT_L16_CAM_CRZO_R1_A MTK_M4U_ID(16, 12) +#define IOMMU_PORT_L16_CAM_LTMSO_R1_A MTK_M4U_ID(16, 13) +#define IOMMU_PORT_L16_CAM_RSSO_R1_A MTK_M4U_ID(16, 14) +#define IOMMU_PORT_L16_CAM_AAHO_R1_A MTK_M4U_ID(16, 15) +#define IOMMU_PORT_L16_CAM_LSCI_R1_A MTK_M4U_ID(16, 16) + +/* LARB 17 -- RAW-B */ +#define IOMMU_PORT_L17_CAM_IMGO_R1_B MTK_M4U_ID(17, 0) +#define IOMMU_PORT_L17_CAM_RRZO_R1_B MTK_M4U_ID(17, 1) +#define IOMMU_PORT_L17_CAM_CQI_R1_B MTK_M4U_ID(17, 2) +#define IOMMU_PORT_L17_CAM_BPCI_R1_B MTK_M4U_ID(17, 3) +#define IOMMU_PORT_L17_CAM_YUVO_R1_B MTK_M4U_ID(17, 4) +#define IOMMU_PORT_L17_CAM_UFDI_R2_B MTK_M4U_ID(17, 5) +#define IOMMU_PORT_L17_CAM_RAWI_R2_B MTK_M4U_ID(17, 6) +#define IOMMU_PORT_L17_CAM_RAWI_R3_B MTK_M4U_ID(17, 7) +#define IOMMU_PORT_L17_CAM_AAO_R1_B MTK_M4U_ID(17, 8) +#define IOMMU_PORT_L17_CAM_AFO_R1_B MTK_M4U_ID(17, 9) +#define IOMMU_PORT_L17_CAM_FLKO_R1_B MTK_M4U_ID(17, 10) +#define IOMMU_PORT_L17_CAM_LCESO_R1_B MTK_M4U_ID(17, 11) +#define IOMMU_PORT_L17_CAM_CRZO_R1_B MTK_M4U_ID(17, 12) +#define IOMMU_PORT_L17_CAM_LTMSO_R1_B MTK_M4U_ID(17, 13) +#define IOMMU_PORT_L17_CAM_RSSO_R1_B MTK_M4U_ID(17, 14) +#define IOMMU_PORT_L17_CAM_AAHO_R1_B MTK_M4U_ID(17, 15) +#define IOMMU_PORT_L17_CAM_LSCI_R1_B MTK_M4U_ID(17, 16) + +/* LARB 19 -- IPE */ +#define IOMMU_PORT_L19_IPE_DVS_RDMA MTK_M4U_ID(19, 0) +#define IOMMU_PORT_L19_IPE_DVS_WDMA MTK_M4U_ID(19, 1) +#define IOMMU_PORT_L19_IPE_DVP_RDMA MTK_M4U_ID(19, 2) +#define IOMMU_PORT_L19_IPE_DVP_WDMA MTK_M4U_ID(19, 3) + +/* LARB 20 -- IPE */ +#define IOMMU_PORT_L20_IPE_FDVT_RDA MTK_M4U_ID(20, 0) +#define IOMMU_PORT_L20_IPE_FDVT_RDB MTK_M4U_ID(20, 1) +#define IOMMU_PORT_L20_IPE_FDVT_WRA MTK_M4U_ID(20, 2) +#define IOMMU_PORT_L20_IPE_FDVT_WRB MTK_M4U_ID(20, 3) +#define IOMMU_PORT_L20_IPE_RSC_RDMA0 MTK_M4U_ID(20, 4) +#define IOMMU_PORT_L20_IPE_RSC_WDMA MTK_M4U_ID(20, 5) + +#endif diff --git a/include/dt-bindings/memory/mt8195-memory-port.h b/include/dt-bindings/memory/mt8195-memory-port.h new file mode 100644 index 0000000000..70ba9f498e --- /dev/null +++ b/include/dt-bindings/memory/mt8195-memory-port.h @@ -0,0 +1,408 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 MediaTek Inc. + * Author: Yong Wu + */ +#ifndef _DT_BINDINGS_MEMORY_MT8195_LARB_PORT_H_ +#define _DT_BINDINGS_MEMORY_MT8195_LARB_PORT_H_ + +#include + +/* + * MM IOMMU supports 16GB dma address. We separate it to four ranges: + * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters + * locate in anyone region. BUT: + * a) Make sure all the ports inside a larb are in one range. + * b) The iova of any master can NOT cross the 4G/8G/12G boundary. + * + * This is the suggested mapping in this SoC: + * + * modules dma-address-region larbs-ports + * disp 0 ~ 4G larb0/1/2/3 + * vcodec 4G ~ 8G larb19/20/21/22/23/24 + * cam/mdp 8G ~ 12G the other larbs. + * N/A 12G ~ 16G + * CCU0 0x24000_0000 ~ 0x243ff_ffff larb18: port 0/1 + * CCU1 0x24400_0000 ~ 0x247ff_ffff larb18: port 2/3 + * + * This SoC have two IOMMU HWs, this is the detailed connected information: + * iommu-vdo: larb0/2/5/7/9/10/11/13/17/19/21/24/25/28 + * iommu-vpp: larb1/3/4/6/8/12/14/16/18/20/22/23/26/27 + */ + +/* MM IOMMU ports */ +/* larb0 */ +#define M4U_PORT_L0_DISP_RDMA0 MTK_M4U_ID(0, 0) +#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(0, 1) +#define M4U_PORT_L0_DISP_OVL0_RDMA0 MTK_M4U_ID(0, 2) +#define M4U_PORT_L0_DISP_OVL0_RDMA1 MTK_M4U_ID(0, 3) +#define M4U_PORT_L0_DISP_OVL0_HDR MTK_M4U_ID(0, 4) +#define M4U_PORT_L0_DISP_FAKE0 MTK_M4U_ID(0, 5) + +/* larb1 */ +#define M4U_PORT_L1_DISP_RDMA0 MTK_M4U_ID(1, 0) +#define M4U_PORT_L1_DISP_WDMA0 MTK_M4U_ID(1, 1) +#define M4U_PORT_L1_DISP_OVL0_RDMA0 MTK_M4U_ID(1, 2) +#define M4U_PORT_L1_DISP_OVL0_RDMA1 MTK_M4U_ID(1, 3) +#define M4U_PORT_L1_DISP_OVL0_HDR MTK_M4U_ID(1, 4) +#define M4U_PORT_L1_DISP_FAKE0 MTK_M4U_ID(1, 5) + +/* larb2 */ +#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(2, 0) +#define M4U_PORT_L2_MDP_RDMA2 MTK_M4U_ID(2, 1) +#define M4U_PORT_L2_MDP_RDMA4 MTK_M4U_ID(2, 2) +#define M4U_PORT_L2_MDP_RDMA6 MTK_M4U_ID(2, 3) +#define M4U_PORT_L2_DISP_FAKE1 MTK_M4U_ID(2, 4) + +/* larb3 */ +#define M4U_PORT_L3_MDP_RDMA1 MTK_M4U_ID(3, 0) +#define M4U_PORT_L3_MDP_RDMA3 MTK_M4U_ID(3, 1) +#define M4U_PORT_L3_MDP_RDMA5 MTK_M4U_ID(3, 2) +#define M4U_PORT_L3_MDP_RDMA7 MTK_M4U_ID(3, 3) +#define M4U_PORT_L3_HDR_DS MTK_M4U_ID(3, 4) +#define M4U_PORT_L3_HDR_ADL MTK_M4U_ID(3, 5) +#define M4U_PORT_L3_DISP_FAKE1 MTK_M4U_ID(3, 6) + +/* larb4 */ +#define M4U_PORT_L4_MDP_RDMA MTK_M4U_ID(4, 0) +#define M4U_PORT_L4_MDP_FG MTK_M4U_ID(4, 1) +#define M4U_PORT_L4_MDP_OVL MTK_M4U_ID(4, 2) +#define M4U_PORT_L4_MDP_WROT MTK_M4U_ID(4, 3) +#define M4U_PORT_L4_FAKE MTK_M4U_ID(4, 4) + +/* larb5 */ +#define M4U_PORT_L5_SVPP1_MDP_RDMA MTK_M4U_ID(5, 0) +#define M4U_PORT_L5_SVPP1_MDP_FG MTK_M4U_ID(5, 1) +#define M4U_PORT_L5_SVPP1_MDP_OVL MTK_M4U_ID(5, 2) +#define M4U_PORT_L5_SVPP1_MDP_WROT MTK_M4U_ID(5, 3) +#define M4U_PORT_L5_SVPP2_MDP_RDMA MTK_M4U_ID(5, 4) +#define M4U_PORT_L5_SVPP2_MDP_FG MTK_M4U_ID(5, 5) +#define M4U_PORT_L5_SVPP2_MDP_WROT MTK_M4U_ID(5, 6) +#define M4U_PORT_L5_FAKE MTK_M4U_ID(5, 7) + +/* larb6 */ +#define M4U_PORT_L6_SVPP3_MDP_RDMA MTK_M4U_ID(6, 0) +#define M4U_PORT_L6_SVPP3_MDP_FG MTK_M4U_ID(6, 1) +#define M4U_PORT_L6_SVPP3_MDP_WROT MTK_M4U_ID(6, 2) +#define M4U_PORT_L6_FAKE MTK_M4U_ID(6, 3) + +/* larb7 */ +#define M4U_PORT_L7_IMG_WPE_RDMA0 MTK_M4U_ID(7, 0) +#define M4U_PORT_L7_IMG_WPE_RDMA1 MTK_M4U_ID(7, 1) +#define M4U_PORT_L7_IMG_WPE_WDMA0 MTK_M4U_ID(7, 2) + +/* larb8 */ +#define M4U_PORT_L8_IMG_WPE_RDMA0 MTK_M4U_ID(8, 0) +#define M4U_PORT_L8_IMG_WPE_RDMA1 MTK_M4U_ID(8, 1) +#define M4U_PORT_L8_IMG_WPE_WDMA0 MTK_M4U_ID(8, 2) + +/* larb9 */ +#define M4U_PORT_L9_IMG_IMGI_T1_A MTK_M4U_ID(9, 0) +#define M4U_PORT_L9_IMG_IMGBI_T1_A MTK_M4U_ID(9, 1) +#define M4U_PORT_L9_IMG_IMGCI_T1_A MTK_M4U_ID(9, 2) +#define M4U_PORT_L9_IMG_SMTI_T1_A MTK_M4U_ID(9, 3) +#define M4U_PORT_L9_IMG_TNCSTI_T1_A MTK_M4U_ID(9, 4) +#define M4U_PORT_L9_IMG_TNCSTI_T4_A MTK_M4U_ID(9, 5) +#define M4U_PORT_L9_IMG_YUVO_T1_A MTK_M4U_ID(9, 6) +#define M4U_PORT_L9_IMG_TIMGO_T1_A MTK_M4U_ID(9, 7) +#define M4U_PORT_L9_IMG_YUVO_T2_A MTK_M4U_ID(9, 8) +#define M4U_PORT_L9_IMG_IMGI_T1_B MTK_M4U_ID(9, 9) +#define M4U_PORT_L9_IMG_IMGBI_T1_B MTK_M4U_ID(9, 10) +#define M4U_PORT_L9_IMG_IMGCI_T1_B MTK_M4U_ID(9, 11) +#define M4U_PORT_L9_IMG_YUVO_T5_A MTK_M4U_ID(9, 12) +#define M4U_PORT_L9_IMG_SMTI_T1_B MTK_M4U_ID(9, 13) +#define M4U_PORT_L9_IMG_TNCSO_T1_A MTK_M4U_ID(9, 14) +#define M4U_PORT_L9_IMG_SMTO_T1_A MTK_M4U_ID(9, 15) +#define M4U_PORT_L9_IMG_TNCSTO_T1_A MTK_M4U_ID(9, 16) +#define M4U_PORT_L9_IMG_YUVO_T2_B MTK_M4U_ID(9, 17) +#define M4U_PORT_L9_IMG_YUVO_T5_B MTK_M4U_ID(9, 18) +#define M4U_PORT_L9_IMG_SMTO_T1_B MTK_M4U_ID(9, 19) + +/* larb10 */ +#define M4U_PORT_L10_IMG_IMGI_D1_A MTK_M4U_ID(10, 0) +#define M4U_PORT_L10_IMG_IMGCI_D1_A MTK_M4U_ID(10, 1) +#define M4U_PORT_L10_IMG_DEPI_D1_A MTK_M4U_ID(10, 2) +#define M4U_PORT_L10_IMG_DMGI_D1_A MTK_M4U_ID(10, 3) +#define M4U_PORT_L10_IMG_VIPI_D1_A MTK_M4U_ID(10, 4) +#define M4U_PORT_L10_IMG_TNRWI_D1_A MTK_M4U_ID(10, 5) +#define M4U_PORT_L10_IMG_RECI_D1_A MTK_M4U_ID(10, 6) +#define M4U_PORT_L10_IMG_SMTI_D1_A MTK_M4U_ID(10, 7) +#define M4U_PORT_L10_IMG_SMTI_D6_A MTK_M4U_ID(10, 8) +#define M4U_PORT_L10_IMG_PIMGI_P1_A MTK_M4U_ID(10, 9) +#define M4U_PORT_L10_IMG_PIMGBI_P1_A MTK_M4U_ID(10, 10) +#define M4U_PORT_L10_IMG_PIMGCI_P1_A MTK_M4U_ID(10, 11) +#define M4U_PORT_L10_IMG_PIMGI_P1_B MTK_M4U_ID(10, 12) +#define M4U_PORT_L10_IMG_PIMGBI_P1_B MTK_M4U_ID(10, 13) +#define M4U_PORT_L10_IMG_PIMGCI_P1_B MTK_M4U_ID(10, 14) +#define M4U_PORT_L10_IMG_IMG3O_D1_A MTK_M4U_ID(10, 15) +#define M4U_PORT_L10_IMG_IMG4O_D1_A MTK_M4U_ID(10, 16) +#define M4U_PORT_L10_IMG_IMG3CO_D1_A MTK_M4U_ID(10, 17) +#define M4U_PORT_L10_IMG_FEO_D1_A MTK_M4U_ID(10, 18) +#define M4U_PORT_L10_IMG_IMG2O_D1_A MTK_M4U_ID(10, 19) +#define M4U_PORT_L10_IMG_TNRWO_D1_A MTK_M4U_ID(10, 20) +#define M4U_PORT_L10_IMG_SMTO_D1_A MTK_M4U_ID(10, 21) +#define M4U_PORT_L10_IMG_WROT_P1_A MTK_M4U_ID(10, 22) +#define M4U_PORT_L10_IMG_WROT_P1_B MTK_M4U_ID(10, 23) + +/* larb11 */ +#define M4U_PORT_L11_IMG_WPE_EIS_RDMA0_A MTK_M4U_ID(11, 0) +#define M4U_PORT_L11_IMG_WPE_EIS_RDMA1_A MTK_M4U_ID(11, 1) +#define M4U_PORT_L11_IMG_WPE_EIS_WDMA0_A MTK_M4U_ID(11, 2) +#define M4U_PORT_L11_IMG_WPE_TNR_RDMA0_A MTK_M4U_ID(11, 3) +#define M4U_PORT_L11_IMG_WPE_TNR_RDMA1_A MTK_M4U_ID(11, 4) +#define M4U_PORT_L11_IMG_WPE_TNR_WDMA0_A MTK_M4U_ID(11, 5) +#define M4U_PORT_L11_IMG_WPE_EIS_CQ0_A MTK_M4U_ID(11, 6) +#define M4U_PORT_L11_IMG_WPE_EIS_CQ1_A MTK_M4U_ID(11, 7) +#define M4U_PORT_L11_IMG_WPE_TNR_CQ0_A MTK_M4U_ID(11, 8) +#define M4U_PORT_L11_IMG_WPE_TNR_CQ1_A MTK_M4U_ID(11, 9) + +/* larb12 */ +#define M4U_PORT_L12_IMG_FDVT_RDA MTK_M4U_ID(12, 0) +#define M4U_PORT_L12_IMG_FDVT_RDB MTK_M4U_ID(12, 1) +#define M4U_PORT_L12_IMG_FDVT_WRA MTK_M4U_ID(12, 2) +#define M4U_PORT_L12_IMG_FDVT_WRB MTK_M4U_ID(12, 3) +#define M4U_PORT_L12_IMG_ME_RDMA MTK_M4U_ID(12, 4) +#define M4U_PORT_L12_IMG_ME_WDMA MTK_M4U_ID(12, 5) +#define M4U_PORT_L12_IMG_DVS_RDMA MTK_M4U_ID(12, 6) +#define M4U_PORT_L12_IMG_DVS_WDMA MTK_M4U_ID(12, 7) +#define M4U_PORT_L12_IMG_DVP_RDMA MTK_M4U_ID(12, 8) +#define M4U_PORT_L12_IMG_DVP_WDMA MTK_M4U_ID(12, 9) + +/* larb13 */ +#define M4U_PORT_L13_CAM_CAMSV_CQI_E1 MTK_M4U_ID(13, 0) +#define M4U_PORT_L13_CAM_CAMSV_CQI_E2 MTK_M4U_ID(13, 1) +#define M4U_PORT_L13_CAM_GCAMSV_A_IMGO_0 MTK_M4U_ID(13, 2) +#define M4U_PORT_L13_CAM_SCAMSV_A_IMGO_0 MTK_M4U_ID(13, 3) +#define M4U_PORT_L13_CAM_GCAMSV_B_IMGO_0 MTK_M4U_ID(13, 4) +#define M4U_PORT_L13_CAM_GCAMSV_B_IMGO_1 MTK_M4U_ID(13, 5) +#define M4U_PORT_L13_CAM_GCAMSV_A_UFEO_0 MTK_M4U_ID(13, 6) +#define M4U_PORT_L13_CAM_GCAMSV_B_UFEO_0 MTK_M4U_ID(13, 7) +#define M4U_PORT_L13_CAM_PDAI_0 MTK_M4U_ID(13, 8) +#define M4U_PORT_L13_CAM_FAKE MTK_M4U_ID(13, 9) + +/* larb14 */ +#define M4U_PORT_L14_CAM_GCAMSV_A_IMGO_1 MTK_M4U_ID(14, 0) +#define M4U_PORT_L14_CAM_SCAMSV_A_IMGO_1 MTK_M4U_ID(14, 1) +#define M4U_PORT_L14_CAM_GCAMSV_B_IMGO_0 MTK_M4U_ID(14, 2) +#define M4U_PORT_L14_CAM_GCAMSV_B_IMGO_1 MTK_M4U_ID(14, 3) +#define M4U_PORT_L14_CAM_SCAMSV_B_IMGO_0 MTK_M4U_ID(14, 4) +#define M4U_PORT_L14_CAM_SCAMSV_B_IMGO_1 MTK_M4U_ID(14, 5) +#define M4U_PORT_L14_CAM_IPUI MTK_M4U_ID(14, 6) +#define M4U_PORT_L14_CAM_IPU2I MTK_M4U_ID(14, 7) +#define M4U_PORT_L14_CAM_IPUO MTK_M4U_ID(14, 8) +#define M4U_PORT_L14_CAM_IPU2O MTK_M4U_ID(14, 9) +#define M4U_PORT_L14_CAM_IPU3O MTK_M4U_ID(14, 10) +#define M4U_PORT_L14_CAM_GCAMSV_A_UFEO_1 MTK_M4U_ID(14, 11) +#define M4U_PORT_L14_CAM_GCAMSV_B_UFEO_1 MTK_M4U_ID(14, 12) +#define M4U_PORT_L14_CAM_PDAI_1 MTK_M4U_ID(14, 13) +#define M4U_PORT_L14_CAM_PDAO MTK_M4U_ID(14, 14) + +/* larb15: null */ + +/* larb16 */ +#define M4U_PORT_L16_CAM_IMGO_R1 MTK_M4U_ID(16, 0) +#define M4U_PORT_L16_CAM_CQI_R1 MTK_M4U_ID(16, 1) +#define M4U_PORT_L16_CAM_CQI_R2 MTK_M4U_ID(16, 2) +#define M4U_PORT_L16_CAM_BPCI_R1 MTK_M4U_ID(16, 3) +#define M4U_PORT_L16_CAM_LSCI_R1 MTK_M4U_ID(16, 4) +#define M4U_PORT_L16_CAM_RAWI_R2 MTK_M4U_ID(16, 5) +#define M4U_PORT_L16_CAM_RAWI_R3 MTK_M4U_ID(16, 6) +#define M4U_PORT_L16_CAM_UFDI_R2 MTK_M4U_ID(16, 7) +#define M4U_PORT_L16_CAM_UFDI_R3 MTK_M4U_ID(16, 8) +#define M4U_PORT_L16_CAM_RAWI_R4 MTK_M4U_ID(16, 9) +#define M4U_PORT_L16_CAM_RAWI_R5 MTK_M4U_ID(16, 10) +#define M4U_PORT_L16_CAM_AAI_R1 MTK_M4U_ID(16, 11) +#define M4U_PORT_L16_CAM_FHO_R1 MTK_M4U_ID(16, 12) +#define M4U_PORT_L16_CAM_AAO_R1 MTK_M4U_ID(16, 13) +#define M4U_PORT_L16_CAM_TSFSO_R1 MTK_M4U_ID(16, 14) +#define M4U_PORT_L16_CAM_FLKO_R1 MTK_M4U_ID(16, 15) + +/* larb17 */ +#define M4U_PORT_L17_CAM_YUVO_R1 MTK_M4U_ID(17, 0) +#define M4U_PORT_L17_CAM_YUVO_R3 MTK_M4U_ID(17, 1) +#define M4U_PORT_L17_CAM_YUVCO_R1 MTK_M4U_ID(17, 2) +#define M4U_PORT_L17_CAM_YUVO_R2 MTK_M4U_ID(17, 3) +#define M4U_PORT_L17_CAM_RZH1N2TO_R1 MTK_M4U_ID(17, 4) +#define M4U_PORT_L17_CAM_DRZS4NO_R1 MTK_M4U_ID(17, 5) +#define M4U_PORT_L17_CAM_TNCSO_R1 MTK_M4U_ID(17, 6) + +/* larb18 */ +#define M4U_PORT_L18_CAM_CCUI MTK_M4U_ID(18, 0) +#define M4U_PORT_L18_CAM_CCUO MTK_M4U_ID(18, 1) +#define M4U_PORT_L18_CAM_CCUI2 MTK_M4U_ID(18, 2) +#define M4U_PORT_L18_CAM_CCUO2 MTK_M4U_ID(18, 3) + +/* larb19 */ +#define M4U_PORT_L19_VENC_RCPU MTK_M4U_ID(19, 0) +#define M4U_PORT_L19_VENC_REC MTK_M4U_ID(19, 1) +#define M4U_PORT_L19_VENC_BSDMA MTK_M4U_ID(19, 2) +#define M4U_PORT_L19_VENC_SV_COMV MTK_M4U_ID(19, 3) +#define M4U_PORT_L19_VENC_RD_COMV MTK_M4U_ID(19, 4) +#define M4U_PORT_L19_VENC_NBM_RDMA MTK_M4U_ID(19, 5) +#define M4U_PORT_L19_VENC_NBM_RDMA_LITE MTK_M4U_ID(19, 6) +#define M4U_PORT_L19_JPGENC_Y_RDMA MTK_M4U_ID(19, 7) +#define M4U_PORT_L19_JPGENC_C_RDMA MTK_M4U_ID(19, 8) +#define M4U_PORT_L19_JPGENC_Q_TABLE MTK_M4U_ID(19, 9) +#define M4U_PORT_L19_VENC_SUB_W_LUMA MTK_M4U_ID(19, 10) +#define M4U_PORT_L19_VENC_FCS_NBM_RDMA MTK_M4U_ID(19, 11) +#define M4U_PORT_L19_JPGENC_BSDMA MTK_M4U_ID(19, 12) +#define M4U_PORT_L19_JPGDEC_WDMA0 MTK_M4U_ID(19, 13) +#define M4U_PORT_L19_JPGDEC_BSDMA0 MTK_M4U_ID(19, 14) +#define M4U_PORT_L19_VENC_NBM_WDMA MTK_M4U_ID(19, 15) +#define M4U_PORT_L19_VENC_NBM_WDMA_LITE MTK_M4U_ID(19, 16) +#define M4U_PORT_L19_VENC_FCS_NBM_WDMA MTK_M4U_ID(19, 17) +#define M4U_PORT_L19_JPGDEC_WDMA1 MTK_M4U_ID(19, 18) +#define M4U_PORT_L19_JPGDEC_BSDMA1 MTK_M4U_ID(19, 19) +#define M4U_PORT_L19_JPGDEC_BUFF_OFFSET1 MTK_M4U_ID(19, 20) +#define M4U_PORT_L19_JPGDEC_BUFF_OFFSET0 MTK_M4U_ID(19, 21) +#define M4U_PORT_L19_VENC_CUR_LUMA MTK_M4U_ID(19, 22) +#define M4U_PORT_L19_VENC_CUR_CHROMA MTK_M4U_ID(19, 23) +#define M4U_PORT_L19_VENC_REF_LUMA MTK_M4U_ID(19, 24) +#define M4U_PORT_L19_VENC_REF_CHROMA MTK_M4U_ID(19, 25) +#define M4U_PORT_L19_VENC_SUB_R_CHROMA MTK_M4U_ID(19, 26) + +/* larb20 */ +#define M4U_PORT_L20_VENC_RCPU MTK_M4U_ID(20, 0) +#define M4U_PORT_L20_VENC_REC MTK_M4U_ID(20, 1) +#define M4U_PORT_L20_VENC_BSDMA MTK_M4U_ID(20, 2) +#define M4U_PORT_L20_VENC_SV_COMV MTK_M4U_ID(20, 3) +#define M4U_PORT_L20_VENC_RD_COMV MTK_M4U_ID(20, 4) +#define M4U_PORT_L20_VENC_NBM_RDMA MTK_M4U_ID(20, 5) +#define M4U_PORT_L20_VENC_NBM_RDMA_LITE MTK_M4U_ID(20, 6) +#define M4U_PORT_L20_JPGENC_Y_RDMA MTK_M4U_ID(20, 7) +#define M4U_PORT_L20_JPGENC_C_RDMA MTK_M4U_ID(20, 8) +#define M4U_PORT_L20_JPGENC_Q_TABLE MTK_M4U_ID(20, 9) +#define M4U_PORT_L20_VENC_SUB_W_LUMA MTK_M4U_ID(20, 10) +#define M4U_PORT_L20_VENC_FCS_NBM_RDMA MTK_M4U_ID(20, 11) +#define M4U_PORT_L20_JPGENC_BSDMA MTK_M4U_ID(20, 12) +#define M4U_PORT_L20_JPGDEC_WDMA0 MTK_M4U_ID(20, 13) +#define M4U_PORT_L20_JPGDEC_BSDMA0 MTK_M4U_ID(20, 14) +#define M4U_PORT_L20_VENC_NBM_WDMA MTK_M4U_ID(20, 15) +#define M4U_PORT_L20_VENC_NBM_WDMA_LITE MTK_M4U_ID(20, 16) +#define M4U_PORT_L20_VENC_FCS_NBM_WDMA MTK_M4U_ID(20, 17) +#define M4U_PORT_L20_JPGDEC_WDMA1 MTK_M4U_ID(20, 18) +#define M4U_PORT_L20_JPGDEC_BSDMA1 MTK_M4U_ID(20, 19) +#define M4U_PORT_L20_JPGDEC_BUFF_OFFSET1 MTK_M4U_ID(20, 20) +#define M4U_PORT_L20_JPGDEC_BUFF_OFFSET0 MTK_M4U_ID(20, 21) +#define M4U_PORT_L20_VENC_CUR_LUMA MTK_M4U_ID(20, 22) +#define M4U_PORT_L20_VENC_CUR_CHROMA MTK_M4U_ID(20, 23) +#define M4U_PORT_L20_VENC_REF_LUMA MTK_M4U_ID(20, 24) +#define M4U_PORT_L20_VENC_REF_CHROMA MTK_M4U_ID(20, 25) +#define M4U_PORT_L20_VENC_SUB_R_CHROMA MTK_M4U_ID(20, 26) + +/* larb21 */ +#define M4U_PORT_L21_VDEC_MC_EXT MTK_M4U_ID(21, 0) +#define M4U_PORT_L21_VDEC_UFO_EXT MTK_M4U_ID(21, 1) +#define M4U_PORT_L21_VDEC_PP_EXT MTK_M4U_ID(21, 2) +#define M4U_PORT_L21_VDEC_PRED_RD_EXT MTK_M4U_ID(21, 3) +#define M4U_PORT_L21_VDEC_PRED_WR_EXT MTK_M4U_ID(21, 4) +#define M4U_PORT_L21_VDEC_PPWRAP_EXT MTK_M4U_ID(21, 5) +#define M4U_PORT_L21_VDEC_TILE_EXT MTK_M4U_ID(21, 6) +#define M4U_PORT_L21_VDEC_VLD_EXT MTK_M4U_ID(21, 7) +#define M4U_PORT_L21_VDEC_VLD2_EXT MTK_M4U_ID(21, 8) +#define M4U_PORT_L21_VDEC_AVC_MV_EXT MTK_M4U_ID(21, 9) + +/* larb22 */ +#define M4U_PORT_L22_VDEC_MC_EXT MTK_M4U_ID(22, 0) +#define M4U_PORT_L22_VDEC_UFO_EXT MTK_M4U_ID(22, 1) +#define M4U_PORT_L22_VDEC_PP_EXT MTK_M4U_ID(22, 2) +#define M4U_PORT_L22_VDEC_PRED_RD_EXT MTK_M4U_ID(22, 3) +#define M4U_PORT_L22_VDEC_PRED_WR_EXT MTK_M4U_ID(22, 4) +#define M4U_PORT_L22_VDEC_PPWRAP_EXT MTK_M4U_ID(22, 5) +#define M4U_PORT_L22_VDEC_TILE_EXT MTK_M4U_ID(22, 6) +#define M4U_PORT_L22_VDEC_VLD_EXT MTK_M4U_ID(22, 7) +#define M4U_PORT_L22_VDEC_VLD2_EXT MTK_M4U_ID(22, 8) +#define M4U_PORT_L22_VDEC_AVC_MV_EXT MTK_M4U_ID(22, 9) + +/* larb23 */ +#define M4U_PORT_L23_VDEC_UFO_ENC_EXT MTK_M4U_ID(23, 0) +#define M4U_PORT_L23_VDEC_RDMA_EXT MTK_M4U_ID(23, 1) + +/* larb24 */ +#define M4U_PORT_L24_VDEC_LAT0_VLD_EXT MTK_M4U_ID(24, 0) +#define M4U_PORT_L24_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(24, 1) +#define M4U_PORT_L24_VDEC_LAT0_AVC_MC_EXT MTK_M4U_ID(24, 2) +#define M4U_PORT_L24_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(24, 3) +#define M4U_PORT_L24_VDEC_LAT0_TILE_EXT MTK_M4U_ID(24, 4) +#define M4U_PORT_L24_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(24, 5) +#define M4U_PORT_L24_VDEC_LAT1_VLD_EXT MTK_M4U_ID(24, 6) +#define M4U_PORT_L24_VDEC_LAT1_VLD2_EXT MTK_M4U_ID(24, 7) +#define M4U_PORT_L24_VDEC_LAT1_AVC_MC_EXT MTK_M4U_ID(24, 8) +#define M4U_PORT_L24_VDEC_LAT1_PRED_RD_EXT MTK_M4U_ID(24, 9) +#define M4U_PORT_L24_VDEC_LAT1_TILE_EXT MTK_M4U_ID(24, 10) +#define M4U_PORT_L24_VDEC_LAT1_WDMA_EXT MTK_M4U_ID(24, 11) + +/* larb25 */ +#define M4U_PORT_L25_CAM_MRAW0_LSCI_M1 MTK_M4U_ID(25, 0) +#define M4U_PORT_L25_CAM_MRAW0_CQI_M1 MTK_M4U_ID(25, 1) +#define M4U_PORT_L25_CAM_MRAW0_CQI_M2 MTK_M4U_ID(25, 2) +#define M4U_PORT_L25_CAM_MRAW0_IMGO_M1 MTK_M4U_ID(25, 3) +#define M4U_PORT_L25_CAM_MRAW0_IMGBO_M1 MTK_M4U_ID(25, 4) +#define M4U_PORT_L25_CAM_MRAW2_LSCI_M1 MTK_M4U_ID(25, 5) +#define M4U_PORT_L25_CAM_MRAW2_CQI_M1 MTK_M4U_ID(25, 6) +#define M4U_PORT_L25_CAM_MRAW2_CQI_M2 MTK_M4U_ID(25, 7) +#define M4U_PORT_L25_CAM_MRAW2_IMGO_M1 MTK_M4U_ID(25, 8) +#define M4U_PORT_L25_CAM_MRAW2_IMGBO_M1 MTK_M4U_ID(25, 9) +#define M4U_PORT_L25_CAM_MRAW0_AFO_M1 MTK_M4U_ID(25, 10) +#define M4U_PORT_L25_CAM_MRAW2_AFO_M1 MTK_M4U_ID(25, 11) + +/* larb26 */ +#define M4U_PORT_L26_CAM_MRAW1_LSCI_M1 MTK_M4U_ID(26, 0) +#define M4U_PORT_L26_CAM_MRAW1_CQI_M1 MTK_M4U_ID(26, 1) +#define M4U_PORT_L26_CAM_MRAW1_CQI_M2 MTK_M4U_ID(26, 2) +#define M4U_PORT_L26_CAM_MRAW1_IMGO_M1 MTK_M4U_ID(26, 3) +#define M4U_PORT_L26_CAM_MRAW1_IMGBO_M1 MTK_M4U_ID(26, 4) +#define M4U_PORT_L26_CAM_MRAW3_LSCI_M1 MTK_M4U_ID(26, 5) +#define M4U_PORT_L26_CAM_MRAW3_CQI_M1 MTK_M4U_ID(26, 6) +#define M4U_PORT_L26_CAM_MRAW3_CQI_M2 MTK_M4U_ID(26, 7) +#define M4U_PORT_L26_CAM_MRAW3_IMGO_M1 MTK_M4U_ID(26, 8) +#define M4U_PORT_L26_CAM_MRAW3_IMGBO_M1 MTK_M4U_ID(26, 9) +#define M4U_PORT_L26_CAM_MRAW1_AFO_M1 MTK_M4U_ID(26, 10) +#define M4U_PORT_L26_CAM_MRAW3_AFO_M1 MTK_M4U_ID(26, 11) + +/* larb27 */ +#define M4U_PORT_L27_CAM_IMGO_R1 MTK_M4U_ID(27, 0) +#define M4U_PORT_L27_CAM_CQI_R1 MTK_M4U_ID(27, 1) +#define M4U_PORT_L27_CAM_CQI_R2 MTK_M4U_ID(27, 2) +#define M4U_PORT_L27_CAM_BPCI_R1 MTK_M4U_ID(27, 3) +#define M4U_PORT_L27_CAM_LSCI_R1 MTK_M4U_ID(27, 4) +#define M4U_PORT_L27_CAM_RAWI_R2 MTK_M4U_ID(27, 5) +#define M4U_PORT_L27_CAM_RAWI_R3 MTK_M4U_ID(27, 6) +#define M4U_PORT_L27_CAM_UFDI_R2 MTK_M4U_ID(27, 7) +#define M4U_PORT_L27_CAM_UFDI_R3 MTK_M4U_ID(27, 8) +#define M4U_PORT_L27_CAM_RAWI_R4 MTK_M4U_ID(27, 9) +#define M4U_PORT_L27_CAM_RAWI_R5 MTK_M4U_ID(27, 10) +#define M4U_PORT_L27_CAM_AAI_R1 MTK_M4U_ID(27, 11) +#define M4U_PORT_L27_CAM_FHO_R1 MTK_M4U_ID(27, 12) +#define M4U_PORT_L27_CAM_AAO_R1 MTK_M4U_ID(27, 13) +#define M4U_PORT_L27_CAM_TSFSO_R1 MTK_M4U_ID(27, 14) +#define M4U_PORT_L27_CAM_FLKO_R1 MTK_M4U_ID(27, 15) + +/* larb28 */ +#define M4U_PORT_L28_CAM_YUVO_R1 MTK_M4U_ID(28, 0) +#define M4U_PORT_L28_CAM_YUVO_R3 MTK_M4U_ID(28, 1) +#define M4U_PORT_L28_CAM_YUVCO_R1 MTK_M4U_ID(28, 2) +#define M4U_PORT_L28_CAM_YUVO_R2 MTK_M4U_ID(28, 3) +#define M4U_PORT_L28_CAM_RZH1N2TO_R1 MTK_M4U_ID(28, 4) +#define M4U_PORT_L28_CAM_DRZS4NO_R1 MTK_M4U_ID(28, 5) +#define M4U_PORT_L28_CAM_TNCSO_R1 MTK_M4U_ID(28, 6) + +/* Infra iommu ports */ +/* PCIe1: read: BIT16; write BIT17. */ +#define IOMMU_PORT_INFRA_PCIE1 MTK_IFAIOMMU_PERI_ID(16) +/* PCIe0: read: BIT18; write BIT19. */ +#define IOMMU_PORT_INFRA_PCIE0 MTK_IFAIOMMU_PERI_ID(18) +#define IOMMU_PORT_INFRA_SSUSB_P3_R MTK_IFAIOMMU_PERI_ID(20) +#define IOMMU_PORT_INFRA_SSUSB_P3_W MTK_IFAIOMMU_PERI_ID(21) +#define IOMMU_PORT_INFRA_SSUSB_P2_R MTK_IFAIOMMU_PERI_ID(22) +#define IOMMU_PORT_INFRA_SSUSB_P2_W MTK_IFAIOMMU_PERI_ID(23) +#define IOMMU_PORT_INFRA_SSUSB_P1_1_R MTK_IFAIOMMU_PERI_ID(24) +#define IOMMU_PORT_INFRA_SSUSB_P1_1_W MTK_IFAIOMMU_PERI_ID(25) +#define IOMMU_PORT_INFRA_SSUSB_P1_0_R MTK_IFAIOMMU_PERI_ID(26) +#define IOMMU_PORT_INFRA_SSUSB_P1_0_W MTK_IFAIOMMU_PERI_ID(27) +#define IOMMU_PORT_INFRA_SSUSB2_R MTK_IFAIOMMU_PERI_ID(28) +#define IOMMU_PORT_INFRA_SSUSB2_W MTK_IFAIOMMU_PERI_ID(29) +#define IOMMU_PORT_INFRA_SSUSB_R MTK_IFAIOMMU_PERI_ID(30) +#define IOMMU_PORT_INFRA_SSUSB_W MTK_IFAIOMMU_PERI_ID(31) + +#endif diff --git a/include/dt-bindings/mfd/cros_ec.h b/include/dt-bindings/mfd/cros_ec.h new file mode 100644 index 0000000000..3b29cd0495 --- /dev/null +++ b/include/dt-bindings/mfd/cros_ec.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * DTS binding definitions used for the Chromium OS Embedded Controller. + * + * Copyright (c) 2022 The Chromium OS Authors. All rights reserved. + */ + +#ifndef _DT_BINDINGS_MFD_CROS_EC_H +#define _DT_BINDINGS_MFD_CROS_EC_H + +/* Typed channel for keyboard backlight. */ +#define CROS_EC_PWM_DT_KB_LIGHT 0 +/* Typed channel for display backlight. */ +#define CROS_EC_PWM_DT_DISPLAY_LIGHT 1 +/* Number of typed channels. */ +#define CROS_EC_PWM_DT_COUNT 2 + +#endif diff --git a/include/dt-bindings/net/pcs-rzn1-miic.h b/include/dt-bindings/net/pcs-rzn1-miic.h new file mode 100644 index 0000000000..784782eaec --- /dev/null +++ b/include/dt-bindings/net/pcs-rzn1-miic.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) 2022 Schneider-Electric + * + * Clément Léger + */ + +#ifndef _DT_BINDINGS_PCS_RZN1_MIIC +#define _DT_BINDINGS_PCS_RZN1_MIIC + +/* + * Reefer to the datasheet [1] section 8.2.1, Internal Connection of Ethernet + * Ports to check the available combination + * + * [1] REN_r01uh0750ej0140-rzn1-introduction_MAT_20210228.pdf + */ + +#define MIIC_GMAC1_PORT 0 +#define MIIC_GMAC2_PORT 1 +#define MIIC_RTOS_PORT 2 +#define MIIC_SERCOS_PORTA 3 +#define MIIC_SERCOS_PORTB 4 +#define MIIC_ETHERCAT_PORTA 5 +#define MIIC_ETHERCAT_PORTB 6 +#define MIIC_ETHERCAT_PORTC 7 +#define MIIC_SWITCH_PORTA 8 +#define MIIC_SWITCH_PORTB 9 +#define MIIC_SWITCH_PORTC 10 +#define MIIC_SWITCH_PORTD 11 +#define MIIC_HSR_PORTA 12 +#define MIIC_HSR_PORTB 13 + +#endif diff --git a/include/dt-bindings/nvmem/microchip,sama7g5-otpc.h b/include/dt-bindings/nvmem/microchip,sama7g5-otpc.h new file mode 100644 index 0000000000..f570b23165 --- /dev/null +++ b/include/dt-bindings/nvmem/microchip,sama7g5-otpc.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ + +#ifndef _DT_BINDINGS_NVMEM_MICROCHIP_OTPC_H +#define _DT_BINDINGS_NVMEM_MICROCHIP_OTPC_H + +/* + * Need to have it as a multiple of 4 as NVMEM memory is registered with + * stride = 4. + */ +#define OTP_PKT(id) ((id) * 4) + +#endif diff --git a/include/dt-bindings/pinctrl/mt6795-pinfunc.h b/include/dt-bindings/pinctrl/mt6795-pinfunc.h new file mode 100644 index 0000000000..bd1c5a9fad --- /dev/null +++ b/include/dt-bindings/pinctrl/mt6795-pinfunc.h @@ -0,0 +1,908 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022 Collabora Ltd. + * Author: AngeloGioacchino Del Regno + */ + +#ifndef __DTS_MT8173_PINFUNC_H +#define __DTS_MT8173_PINFUNC_H + +#include + +#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0) +#define PINMUX_GPIO0__FUNC_IRDA_PDN (MTK_PIN_NO(0) | 1) +#define PINMUX_GPIO0__FUNC_I2S1_WS (MTK_PIN_NO(0) | 2) +#define PINMUX_GPIO0__FUNC_TDD_TMS (MTK_PIN_NO(0) | 4) +#define PINMUX_GPIO0__FUNC_UTXD0 (MTK_PIN_NO(0) | 5) + +#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0) +#define PINMUX_GPIO1__FUNC_IRDA_RXD (MTK_PIN_NO(1) | 1) +#define PINMUX_GPIO1__FUNC_I2S1_BCK (MTK_PIN_NO(1) | 2) +#define PINMUX_GPIO1__FUNC_SDA4 (MTK_PIN_NO(1) | 3) +#define PINMUX_GPIO1__FUNC_TDD_TCK (MTK_PIN_NO(1) | 4) +#define PINMUX_GPIO1__FUNC_URXD0 (MTK_PIN_NO(1) | 5) + +#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0) +#define PINMUX_GPIO2__FUNC_IRDA_TXD (MTK_PIN_NO(2) | 1) +#define PINMUX_GPIO2__FUNC_I2S1_MCK (MTK_PIN_NO(2) | 2) +#define PINMUX_GPIO2__FUNC_SCL4 (MTK_PIN_NO(2) | 3) +#define PINMUX_GPIO2__FUNC_TDD_TDI (MTK_PIN_NO(2) | 4) +#define PINMUX_GPIO2__FUNC_UTXD3 (MTK_PIN_NO(2) | 5) + +#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0) +#define PINMUX_GPIO3__FUNC_DSI1_TE (MTK_PIN_NO(3) | 1) +#define PINMUX_GPIO3__FUNC_I2S1_DO_1 (MTK_PIN_NO(3) | 2) +#define PINMUX_GPIO3__FUNC_SDA3 (MTK_PIN_NO(3) | 3) +#define PINMUX_GPIO3__FUNC_TDD_TDO (MTK_PIN_NO(3) | 4) +#define PINMUX_GPIO3__FUNC_URXD3 (MTK_PIN_NO(3) | 5) + +#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0) +#define PINMUX_GPIO4__FUNC_DISP_PWM1 (MTK_PIN_NO(4) | 1) +#define PINMUX_GPIO4__FUNC_I2S1_DO_2 (MTK_PIN_NO(4) | 2) +#define PINMUX_GPIO4__FUNC_SCL3 (MTK_PIN_NO(4) | 3) +#define PINMUX_GPIO4__FUNC_TDD_TRSTN (MTK_PIN_NO(4) | 4) + +#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0) +#define PINMUX_GPIO5__FUNC_PCM1_CLK (MTK_PIN_NO(5) | 1) +#define PINMUX_GPIO5__FUNC_I2S2_WS (MTK_PIN_NO(5) | 2) +#define PINMUX_GPIO5__FUNC_SPI_CK_3 (MTK_PIN_NO(5) | 3) +#define PINMUX_GPIO5__FUNC_LTE_MD32_JTAG_TMS (MTK_PIN_NO(5) | 4) +#define PINMUX_GPIO5__FUNC_AP_MD32_JTAG_TMS (MTK_PIN_NO(5) | 5) + +#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0) +#define PINMUX_GPIO6__FUNC_PCM1_SYNC (MTK_PIN_NO(6) | 1) +#define PINMUX_GPIO6__FUNC_I2S2_BCK (MTK_PIN_NO(6) | 2) +#define PINMUX_GPIO6__FUNC_SPI_MI_3 (MTK_PIN_NO(6) | 3) +#define PINMUX_GPIO6__FUNC_LTE_MD32_JTAG_TCK (MTK_PIN_NO(6) | 4) +#define PINMUX_GPIO6__FUNC_AP_MD32_JTAG_TCK (MTK_PIN_NO(6) | 5) + +#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0) +#define PINMUX_GPIO7__FUNC_PCM1_DI (MTK_PIN_NO(7) | 1) +#define PINMUX_GPIO7__FUNC_I2S2_DI_1 (MTK_PIN_NO(7) | 2) +#define PINMUX_GPIO7__FUNC_SPI_MO_3 (MTK_PIN_NO(7) | 3) +#define PINMUX_GPIO7__FUNC_LTE_MD32_JTAG_TDI (MTK_PIN_NO(7) | 4) +#define PINMUX_GPIO7__FUNC_AP_MD32_JTAG_TDI (MTK_PIN_NO(7) | 5) + +#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0) +#define PINMUX_GPIO8__FUNC_PCM1_DO (MTK_PIN_NO(8) | 1) +#define PINMUX_GPIO8__FUNC_I2S2_DI_2 (MTK_PIN_NO(8) | 2) +#define PINMUX_GPIO8__FUNC_SPI_CS_3 (MTK_PIN_NO(8) | 3) +#define PINMUX_GPIO8__FUNC_LTE_MD32_JTAG_TDO (MTK_PIN_NO(8) | 4) +#define PINMUX_GPIO8__FUNC_AP_MD32_JTAG_TDO (MTK_PIN_NO(8) | 5) + +#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0) +#define PINMUX_GPIO9__FUNC_USB_DRVVBUS (MTK_PIN_NO(9) | 1) +#define PINMUX_GPIO9__FUNC_I2S2_MCK (MTK_PIN_NO(9) | 2) +#define PINMUX_GPIO9__FUNC_LTE_MD32_JTAG_TRST (MTK_PIN_NO(9) | 4) +#define PINMUX_GPIO9__FUNC_AP_MD32_JTAG_TRST (MTK_PIN_NO(9) | 5) + +#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0) +#define PINMUX_GPIO10__FUNC_I2S0_WS (MTK_PIN_NO(10) | 2) + +#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0) +#define PINMUX_GPIO11__FUNC_I2S0_BCK (MTK_PIN_NO(11) | 2) + +#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0) +#define PINMUX_GPIO12__FUNC_I2S0_MCK (MTK_PIN_NO(12) | 2) + +#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0) +#define PINMUX_GPIO13__FUNC_I2S0_DO (MTK_PIN_NO(13) | 2) + +#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0) +#define PINMUX_GPIO14__FUNC_I2S0_DI (MTK_PIN_NO(14) | 2) +#define PINMUX_GPIO14__FUNC_DISP_PWM1 (MTK_PIN_NO(14) | 3) +#define PINMUX_GPIO14__FUNC_PWM4 (MTK_PIN_NO(14) | 4) +#define PINMUX_GPIO14__FUNC_IRDA_RXD (MTK_PIN_NO(14) | 5) +#define PINMUX_GPIO14__FUNC_I2S1_BCK (MTK_PIN_NO(14) | 6) + +#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0) +#define PINMUX_GPIO15__FUNC_DSI1_TE (MTK_PIN_NO(15) | 2) +#define PINMUX_GPIO15__FUNC_USB_DRVVBUS (MTK_PIN_NO(15) | 3) +#define PINMUX_GPIO15__FUNC_PWM5 (MTK_PIN_NO(15) | 4) +#define PINMUX_GPIO15__FUNC_IRDA_TXD (MTK_PIN_NO(15) | 5) +#define PINMUX_GPIO15__FUNC_I2S1_MCK (MTK_PIN_NO(15) | 6) + +#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0) +#define PINMUX_GPIO16__FUNC_IDDIG (MTK_PIN_NO(16) | 1) +#define PINMUX_GPIO16__FUNC_FLASH (MTK_PIN_NO(16) | 2) +#define PINMUX_GPIO16__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(16) | 3) +#define PINMUX_GPIO16__FUNC_PWM5 (MTK_PIN_NO(16) | 4) + +#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0) +#define PINMUX_GPIO17__FUNC_SIM1_SCLK (MTK_PIN_NO(17) | 1) +#define PINMUX_GPIO17__FUNC_SIM2_SCLK (MTK_PIN_NO(17) | 2) + +#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0) +#define PINMUX_GPIO18__FUNC_SIM1_SRST (MTK_PIN_NO(18) | 1) +#define PINMUX_GPIO18__FUNC_SIM2_SRST (MTK_PIN_NO(18) | 2) + +#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0) +#define PINMUX_GPIO19__FUNC_SIM1_SDAT (MTK_PIN_NO(19) | 1) +#define PINMUX_GPIO19__FUNC_SIM2_SDAT (MTK_PIN_NO(19) | 2) + +#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0) +#define PINMUX_GPIO20__FUNC_SIM2_SCLK (MTK_PIN_NO(20) | 1) +#define PINMUX_GPIO20__FUNC_SIM1_SCLK (MTK_PIN_NO(20) | 2) + +#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0) +#define PINMUX_GPIO21__FUNC_SIM2_SRST (MTK_PIN_NO(21) | 1) +#define PINMUX_GPIO21__FUNC_SIM1_SRST (MTK_PIN_NO(21) | 2) + +#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0) +#define PINMUX_GPIO22__FUNC_SIM2_SDAT (MTK_PIN_NO(22) | 1) +#define PINMUX_GPIO22__FUNC_SIM1_SDAT (MTK_PIN_NO(22) | 2) + +#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0) +#define PINMUX_GPIO23__FUNC_MSDC3_DAT0 (MTK_PIN_NO(23) | 1) + +#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0) +#define PINMUX_GPIO24__FUNC_MSDC3_DAT1 (MTK_PIN_NO(24) | 1) + +#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0) +#define PINMUX_GPIO25__FUNC_MSDC3_DAT2 (MTK_PIN_NO(25) | 1) + +#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0) +#define PINMUX_GPIO26__FUNC_MSDC3_DAT3 (MTK_PIN_NO(26) | 1) + +#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0) +#define PINMUX_GPIO27__FUNC_MSDC3_CLK (MTK_PIN_NO(27) | 1) + +#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0) +#define PINMUX_GPIO28__FUNC_MSDC3_CMD (MTK_PIN_NO(28) | 1) + +#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0) +#define PINMUX_GPIO29__FUNC_PTA_RXD (MTK_PIN_NO(29) | 1) +#define PINMUX_GPIO29__FUNC_UCTS2 (MTK_PIN_NO(29) | 2) + +#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0) +#define PINMUX_GPIO30__FUNC_PTA_TXD (MTK_PIN_NO(30) | 1) +#define PINMUX_GPIO30__FUNC_URTS2 (MTK_PIN_NO(30) | 2) + +#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0) +#define PINMUX_GPIO31__FUNC_URXD2 (MTK_PIN_NO(31) | 1) +#define PINMUX_GPIO31__FUNC_UTXD2 (MTK_PIN_NO(31) | 2) + +#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0) +#define PINMUX_GPIO32__FUNC_UTXD2 (MTK_PIN_NO(32) | 1) +#define PINMUX_GPIO32__FUNC_URXD2 (MTK_PIN_NO(32) | 2) + +#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0) +#define PINMUX_GPIO33__FUNC_MRG_CLK (MTK_PIN_NO(33) | 1) +#define PINMUX_GPIO33__FUNC_PCM0_CLK (MTK_PIN_NO(33) | 2) + +#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0) +#define PINMUX_GPIO34__FUNC_MRG_DI (MTK_PIN_NO(34) | 1) +#define PINMUX_GPIO34__FUNC_PCM0_DI (MTK_PIN_NO(34) | 2) + +#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0) +#define PINMUX_GPIO35__FUNC_MRG_DO (MTK_PIN_NO(35) | 1) +#define PINMUX_GPIO35__FUNC_PCM0_DO (MTK_PIN_NO(35) | 2) + +#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0) +#define PINMUX_GPIO36__FUNC_MRG_SYNC (MTK_PIN_NO(36) | 1) +#define PINMUX_GPIO36__FUNC_PCM0_SYNC (MTK_PIN_NO(36) | 2) + +#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0) +#define PINMUX_GPIO37__FUNC_GPS_SYNC (MTK_PIN_NO(37) | 1) + +#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0) +#define PINMUX_GPIO38__FUNC_DAIRSTB (MTK_PIN_NO(38) | 1) + +#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0) +#define PINMUX_GPIO39__FUNC_CM2MCLK (MTK_PIN_NO(39) | 1) + +#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0) +#define PINMUX_GPIO40__FUNC_CM3MCLK (MTK_PIN_NO(40) | 1) +#define PINMUX_GPIO40__FUNC_IRDA_PDN (MTK_PIN_NO(40) | 2) +#define PINMUX_GPIO40__FUNC_PWM6 (MTK_PIN_NO(40) | 3) +#define PINMUX_GPIO40__FUNC_I2S1_WS (MTK_PIN_NO(40) | 4) + +#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0) +#define PINMUX_GPIO41__FUNC_CMPCLK (MTK_PIN_NO(41) | 1) +#define PINMUX_GPIO41__FUNC_CMCSK (MTK_PIN_NO(41) | 2) +#define PINMUX_GPIO41__FUNC_FLASH (MTK_PIN_NO(41) | 3) + +#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0) +#define PINMUX_GPIO42__FUNC_CMMCLK (MTK_PIN_NO(42) | 1) + +#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0) +#define PINMUX_GPIO43__FUNC_SDA2 (MTK_PIN_NO(43) | 1) + +#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0) +#define PINMUX_GPIO44__FUNC_SCL2 (MTK_PIN_NO(44) | 1) + +#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0) +#define PINMUX_GPIO45__FUNC_SDA0 (MTK_PIN_NO(45) | 1) + +#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0) +#define PINMUX_GPIO46__FUNC_SCL0 (MTK_PIN_NO(46) | 1) + +#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0) +#define PINMUX_GPIO47__FUNC_BPI_BUS0 (MTK_PIN_NO(47) | 1) + +#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0) +#define PINMUX_GPIO48__FUNC_BPI_BUS1 (MTK_PIN_NO(48) | 1) + +#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0) +#define PINMUX_GPIO49__FUNC_BPI_BUS2 (MTK_PIN_NO(49) | 1) + +#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0) +#define PINMUX_GPIO50__FUNC_BPI_BUS3 (MTK_PIN_NO(50) | 1) + +#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0) +#define PINMUX_GPIO51__FUNC_BPI_BUS4 (MTK_PIN_NO(51) | 1) + +#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0) +#define PINMUX_GPIO52__FUNC_BPI_BUS5 (MTK_PIN_NO(52) | 1) + +#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0) +#define PINMUX_GPIO53__FUNC_BPI_BUS6 (MTK_PIN_NO(53) | 1) + +#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0) +#define PINMUX_GPIO54__FUNC_BPI_BUS7 (MTK_PIN_NO(54) | 1) + +#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0) +#define PINMUX_GPIO55__FUNC_BPI_BUS8 (MTK_PIN_NO(55) | 1) + +#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0) +#define PINMUX_GPIO56__FUNC_BPI_BUS9 (MTK_PIN_NO(56) | 1) + +#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0) +#define PINMUX_GPIO57__FUNC_BPI_BUS10 (MTK_PIN_NO(57) | 1) + +#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0) +#define PINMUX_GPIO58__FUNC_BPI_BUS11 (MTK_PIN_NO(58) | 1) + +#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0) +#define PINMUX_GPIO59__FUNC_BPI_BUS12 (MTK_PIN_NO(59) | 1) + +#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0) +#define PINMUX_GPIO60__FUNC_BPI_BUS13 (MTK_PIN_NO(60) | 1) + +#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0) +#define PINMUX_GPIO61__FUNC_BPI_BUS14 (MTK_PIN_NO(61) | 1) + +#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0) +#define PINMUX_GPIO62__FUNC_RFIC1_BSI_CK (MTK_PIN_NO(62) | 1) + +#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0) +#define PINMUX_GPIO63__FUNC_RFIC1_BSI_D0 (MTK_PIN_NO(63) | 1) + +#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0) +#define PINMUX_GPIO64__FUNC_RFIC1_BSI_D1 (MTK_PIN_NO(64) | 1) + +#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0) +#define PINMUX_GPIO65__FUNC_RFIC1_BSI_D2 (MTK_PIN_NO(65) | 1) + +#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0) +#define PINMUX_GPIO66__FUNC_RFIC1_BSI_CS (MTK_PIN_NO(66) | 1) + +#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0) +#define PINMUX_GPIO67__FUNC_TD_TXBPI (MTK_PIN_NO(67) | 1) + +#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0) +#define PINMUX_GPIO68__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(68) | 1) + +#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0) +#define PINMUX_GPIO69__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(69) | 1) + +#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0) +#define PINMUX_GPIO70__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(70) | 1) + +#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0) +#define PINMUX_GPIO71__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(71) | 1) + +#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0) +#define PINMUX_GPIO72__FUNC_RFIC0_BSI_CS (MTK_PIN_NO(72) | 1) + +#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0) +#define PINMUX_GPIO73__FUNC_MISC_BSI_DO (MTK_PIN_NO(73) | 1) + +#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0) +#define PINMUX_GPIO74__FUNC_MISC_BSI_CK (MTK_PIN_NO(74) | 1) + +#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0) +#define PINMUX_GPIO75__FUNC_MISC_BSI_CS0B (MTK_PIN_NO(75) | 1) +#define PINMUX_GPIO75__FUNC_MIPI1_SCLK (MTK_PIN_NO(75) | 2) + +#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0) +#define PINMUX_GPIO76__FUNC_MISC_BSI_CS1B (MTK_PIN_NO(76) | 1) + +#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0) +#define PINMUX_GPIO77__FUNC_MISC_BSI_DI (MTK_PIN_NO(77) | 1) +#define PINMUX_GPIO77__FUNC_MIPI1_SDATA (MTK_PIN_NO(77) | 2) + +#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0) +#define PINMUX_GPIO78__FUNC_LTE_TXBPI (MTK_PIN_NO(78) | 1) + +#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0) +#define PINMUX_GPIO79__FUNC_BPI_BUS15 (MTK_PIN_NO(79) | 1) + +#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0) +#define PINMUX_GPIO80__FUNC_BPI_BUS16 (MTK_PIN_NO(80) | 1) + +#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0) +#define PINMUX_GPIO81__FUNC_BPI_BUS17 (MTK_PIN_NO(81) | 1) + +#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0) +#define PINMUX_GPIO82__FUNC_BPI_BUS18 (MTK_PIN_NO(82) | 1) + +#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0) +#define PINMUX_GPIO83__FUNC_BPI_BUS19 (MTK_PIN_NO(83) | 1) + +#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0) +#define PINMUX_GPIO84__FUNC_BPI_BUS20 (MTK_PIN_NO(84) | 1) + +#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0) +#define PINMUX_GPIO85__FUNC_BPI_BUS21 (MTK_PIN_NO(85) | 1) + +#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0) +#define PINMUX_GPIO86__FUNC_BPI_BUS22 (MTK_PIN_NO(86) | 1) + +#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0) +#define PINMUX_GPIO87__FUNC_BPI_BUS23 (MTK_PIN_NO(87) | 1) + +#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0) +#define PINMUX_GPIO88__FUNC_BPI_BUS24 (MTK_PIN_NO(88) | 1) + +#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0) +#define PINMUX_GPIO89__FUNC_BPI_BUS25 (MTK_PIN_NO(89) | 1) + +#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0) +#define PINMUX_GPIO90__FUNC_BPI_BUS26 (MTK_PIN_NO(90) | 1) + +#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0) +#define PINMUX_GPIO91__FUNC_BPI_BUS27 (MTK_PIN_NO(91) | 1) + +#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0) +#define PINMUX_GPIO92__FUNC_PCM1_CLK (MTK_PIN_NO(92) | 1) +#define PINMUX_GPIO92__FUNC_I2S0_BCK (MTK_PIN_NO(92) | 2) +#define PINMUX_GPIO92__FUNC_NLD6 (MTK_PIN_NO(92) | 3) + +#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0) +#define PINMUX_GPIO93__FUNC_PCM1_SYNC (MTK_PIN_NO(93) | 1) +#define PINMUX_GPIO93__FUNC_I2S0_WS (MTK_PIN_NO(93) | 2) +#define PINMUX_GPIO93__FUNC_NLD7 (MTK_PIN_NO(93) | 3) + +#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0) +#define PINMUX_GPIO94__FUNC_PCM1_DI (MTK_PIN_NO(94) | 1) +#define PINMUX_GPIO94__FUNC_I2S0_DI (MTK_PIN_NO(94) | 2) +#define PINMUX_GPIO94__FUNC_NREB (MTK_PIN_NO(94) | 3) + +#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0) +#define PINMUX_GPIO95__FUNC_PCM1_DO (MTK_PIN_NO(95) | 1) +#define PINMUX_GPIO95__FUNC_I2S0_DO (MTK_PIN_NO(95) | 2) +#define PINMUX_GPIO95__FUNC_NRNB0 (MTK_PIN_NO(95) | 3) + +#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0) +#define PINMUX_GPIO96__FUNC_URXD1 (MTK_PIN_NO(96) | 1) +#define PINMUX_GPIO96__FUNC_UTXD1 (MTK_PIN_NO(96) | 2) +#define PINMUX_GPIO96__FUNC_NWEB (MTK_PIN_NO(96) | 3) + +#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0) +#define PINMUX_GPIO97__FUNC_UTXD1 (MTK_PIN_NO(97) | 1) +#define PINMUX_GPIO97__FUNC_URXD1 (MTK_PIN_NO(97) | 2) +#define PINMUX_GPIO97__FUNC_NCEB0 (MTK_PIN_NO(97) | 3) + +#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0) +#define PINMUX_GPIO98__FUNC_URTS1 (MTK_PIN_NO(98) | 1) +#define PINMUX_GPIO98__FUNC_UCTS1 (MTK_PIN_NO(98) | 2) +#define PINMUX_GPIO98__FUNC_NALE (MTK_PIN_NO(98) | 3) + +#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0) +#define PINMUX_GPIO99__FUNC_UCTS1 (MTK_PIN_NO(99) | 1) +#define PINMUX_GPIO99__FUNC_URTS1 (MTK_PIN_NO(99) | 2) +#define PINMUX_GPIO99__FUNC_NCLE (MTK_PIN_NO(99) | 3) + +#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0) +#define PINMUX_GPIO100__FUNC_MSDC2_DAT0 (MTK_PIN_NO(100) | 1) +#define PINMUX_GPIO100__FUNC_URXD1 (MTK_PIN_NO(100) | 2) +#define PINMUX_GPIO100__FUNC_USB_DRVVBUS (MTK_PIN_NO(100) | 3) +#define PINMUX_GPIO100__FUNC_SDA4 (MTK_PIN_NO(100) | 4) + +#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0) +#define PINMUX_GPIO101__FUNC_MSDC2_DAT1 (MTK_PIN_NO(101) | 1) +#define PINMUX_GPIO101__FUNC_UTXD1 (MTK_PIN_NO(101) | 2) +#define PINMUX_GPIO101__FUNC_SCL4 (MTK_PIN_NO(101) | 4) + +#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0) +#define PINMUX_GPIO102__FUNC_MSDC2_DAT2 (MTK_PIN_NO(102) | 1) +#define PINMUX_GPIO102__FUNC_URTS1 (MTK_PIN_NO(102) | 2) +#define PINMUX_GPIO102__FUNC_UTXD0 (MTK_PIN_NO(102) | 3) +#define PINMUX_GPIO102__FUNC_PWM0 (MTK_PIN_NO(102) | 5) +#define PINMUX_GPIO102__FUNC_SPI_CK_1 (MTK_PIN_NO(102) | 6) + +#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0) +#define PINMUX_GPIO103__FUNC_MSDC2_DAT3 (MTK_PIN_NO(103) | 1) +#define PINMUX_GPIO103__FUNC_UCTS1 (MTK_PIN_NO(103) | 2) +#define PINMUX_GPIO103__FUNC_URXD0 (MTK_PIN_NO(103) | 3) +#define PINMUX_GPIO103__FUNC_PWM1 (MTK_PIN_NO(103) | 5) +#define PINMUX_GPIO103__FUNC_SPI_MI_1 (MTK_PIN_NO(103) | 6) + +#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0) +#define PINMUX_GPIO104__FUNC_MSDC2_CLK (MTK_PIN_NO(104) | 1) +#define PINMUX_GPIO104__FUNC_NLD4 (MTK_PIN_NO(104) | 2) +#define PINMUX_GPIO104__FUNC_UTXD3 (MTK_PIN_NO(104) | 3) +#define PINMUX_GPIO104__FUNC_SDA3 (MTK_PIN_NO(104) | 4) +#define PINMUX_GPIO104__FUNC_PWM2 (MTK_PIN_NO(104) | 5) +#define PINMUX_GPIO104__FUNC_SPI_MO_1 (MTK_PIN_NO(104) | 6) + +#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0) +#define PINMUX_GPIO105__FUNC_MSDC2_CMD (MTK_PIN_NO(105) | 1) +#define PINMUX_GPIO105__FUNC_NLD5 (MTK_PIN_NO(105) | 2) +#define PINMUX_GPIO105__FUNC_URXD3 (MTK_PIN_NO(105) | 3) +#define PINMUX_GPIO105__FUNC_SCL3 (MTK_PIN_NO(105) | 4) +#define PINMUX_GPIO105__FUNC_PWM3 (MTK_PIN_NO(105) | 5) +#define PINMUX_GPIO105__FUNC_SPI_CS_1 (MTK_PIN_NO(105) | 6) + +#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0) +#define PINMUX_GPIO106__FUNC_LCM_RST (MTK_PIN_NO(106) | 1) + +#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0) +#define PINMUX_GPIO107__FUNC_DSI_TE (MTK_PIN_NO(107) | 1) + +#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0) +#define PINMUX_GPIO108__FUNC_JTMS (MTK_PIN_NO(108) | 1) +#define PINMUX_GPIO108__FUNC_MFG_JTAG_TMS (MTK_PIN_NO(108) | 2) +#define PINMUX_GPIO108__FUNC_TDD_TMS (MTK_PIN_NO(108) | 3) +#define PINMUX_GPIO108__FUNC_LTE_MD32_JTAG_TMS (MTK_PIN_NO(108) | 4) +#define PINMUX_GPIO108__FUNC_AP_MD32_JTAG_TMS (MTK_PIN_NO(108) | 5) +#define PINMUX_GPIO108__FUNC_DFD_TMS (MTK_PIN_NO(108) | 6) + +#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0) +#define PINMUX_GPIO109__FUNC_JTCK (MTK_PIN_NO(109) | 1) +#define PINMUX_GPIO109__FUNC_MFG_JTAG_TCK (MTK_PIN_NO(109) | 2) +#define PINMUX_GPIO109__FUNC_TDD_TCK (MTK_PIN_NO(109) | 3) +#define PINMUX_GPIO109__FUNC_LTE_MD32_JTAG_TCK (MTK_PIN_NO(109) | 4) +#define PINMUX_GPIO109__FUNC_AP_MD32_JTAG_TCK (MTK_PIN_NO(109) | 5) +#define PINMUX_GPIO109__FUNC_DFD_TCK (MTK_PIN_NO(109) | 6) + +#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0) +#define PINMUX_GPIO110__FUNC_JTDI (MTK_PIN_NO(110) | 1) +#define PINMUX_GPIO110__FUNC_MFG_JTAG_TDI (MTK_PIN_NO(110) | 2) +#define PINMUX_GPIO110__FUNC_TDD_TDI (MTK_PIN_NO(110) | 3) +#define PINMUX_GPIO110__FUNC_LTE_MD32_JTAG_TDI (MTK_PIN_NO(110) | 4) +#define PINMUX_GPIO110__FUNC_AP_MD32_JTAG_TDI (MTK_PIN_NO(110) | 5) +#define PINMUX_GPIO110__FUNC_DFD_TDI (MTK_PIN_NO(110) | 6) + +#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0) +#define PINMUX_GPIO111__FUNC_JTDO (MTK_PIN_NO(111) | 1) +#define PINMUX_GPIO111__FUNC_MFG_JTAG_TDO (MTK_PIN_NO(111) | 2) +#define PINMUX_GPIO111__FUNC_TDD_TDO (MTK_PIN_NO(111) | 3) +#define PINMUX_GPIO111__FUNC_LTE_MD32_JTAG_TDO (MTK_PIN_NO(111) | 4) +#define PINMUX_GPIO111__FUNC_AP_MD32_JTAG_TDO (MTK_PIN_NO(111) | 5) +#define PINMUX_GPIO111__FUNC_DFD_TDO (MTK_PIN_NO(111) | 6) + +#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0) +#define PINMUX_GPIO112__FUNC_JTRST_B (MTK_PIN_NO(112) | 1) +#define PINMUX_GPIO112__FUNC_MFG_JTAG_TRSTN (MTK_PIN_NO(112) | 2) +#define PINMUX_GPIO112__FUNC_TDD_TRSTN (MTK_PIN_NO(112) | 3) +#define PINMUX_GPIO112__FUNC_LTE_MD32_JTAG_TRST (MTK_PIN_NO(112) | 4) +#define PINMUX_GPIO112__FUNC_AP_MD32_JTAG_TRST (MTK_PIN_NO(112) | 5) +#define PINMUX_GPIO112__FUNC_DFD_NTRST (MTK_PIN_NO(112) | 6) + +#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0) +#define PINMUX_GPIO113__FUNC_URXD0 (MTK_PIN_NO(113) | 1) +#define PINMUX_GPIO113__FUNC_UTXD0 (MTK_PIN_NO(113) | 2) +#define PINMUX_GPIO113__FUNC_MD_URXD (MTK_PIN_NO(113) | 3) +#define PINMUX_GPIO113__FUNC_LTE_URXD (MTK_PIN_NO(113) | 4) +#define PINMUX_GPIO113__FUNC_TDD_TXD (MTK_PIN_NO(113) | 5) +#define PINMUX_GPIO113__FUNC_I2S2_WS (MTK_PIN_NO(113) | 6) + +#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0) +#define PINMUX_GPIO114__FUNC_UTXD0 (MTK_PIN_NO(114) | 1) +#define PINMUX_GPIO114__FUNC_URXD0 (MTK_PIN_NO(114) | 2) +#define PINMUX_GPIO114__FUNC_MD_UTXD (MTK_PIN_NO(114) | 3) +#define PINMUX_GPIO114__FUNC_LTE_UTXD (MTK_PIN_NO(114) | 4) +#define PINMUX_GPIO114__FUNC_TDD_TXD (MTK_PIN_NO(114) | 5) +#define PINMUX_GPIO114__FUNC_I2S2_BCK (MTK_PIN_NO(114) | 6) + +#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0) +#define PINMUX_GPIO115__FUNC_URTS0 (MTK_PIN_NO(115) | 1) +#define PINMUX_GPIO115__FUNC_UCTS0 (MTK_PIN_NO(115) | 2) +#define PINMUX_GPIO115__FUNC_MD_URXD (MTK_PIN_NO(115) | 3) +#define PINMUX_GPIO115__FUNC_LTE_URXD (MTK_PIN_NO(115) | 4) +#define PINMUX_GPIO115__FUNC_TDD_TXD (MTK_PIN_NO(115) | 5) +#define PINMUX_GPIO115__FUNC_I2S2_MCK (MTK_PIN_NO(115) | 6) + +#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0) +#define PINMUX_GPIO116__FUNC_UCTS0 (MTK_PIN_NO(116) | 1) +#define PINMUX_GPIO116__FUNC_URTS0 (MTK_PIN_NO(116) | 2) +#define PINMUX_GPIO116__FUNC_MD_UTXD (MTK_PIN_NO(116) | 3) +#define PINMUX_GPIO116__FUNC_LTE_UTXD (MTK_PIN_NO(116) | 4) +#define PINMUX_GPIO116__FUNC_TDD_TXD (MTK_PIN_NO(116) | 5) +#define PINMUX_GPIO116__FUNC_I2S2_DI_1 (MTK_PIN_NO(116) | 6) + +#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0) +#define PINMUX_GPIO117__FUNC_URXD3 (MTK_PIN_NO(117) | 1) +#define PINMUX_GPIO117__FUNC_UTXD3 (MTK_PIN_NO(117) | 2) +#define PINMUX_GPIO117__FUNC_MD_URXD (MTK_PIN_NO(117) | 3) +#define PINMUX_GPIO117__FUNC_LTE_URXD (MTK_PIN_NO(117) | 4) +#define PINMUX_GPIO117__FUNC_TDD_TXD (MTK_PIN_NO(117) | 5) + +#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0) +#define PINMUX_GPIO118__FUNC_UTXD3 (MTK_PIN_NO(118) | 1) +#define PINMUX_GPIO118__FUNC_URXD3 (MTK_PIN_NO(118) | 2) +#define PINMUX_GPIO118__FUNC_MD_UTXD (MTK_PIN_NO(118) | 3) +#define PINMUX_GPIO118__FUNC_LTE_UTXD (MTK_PIN_NO(118) | 4) +#define PINMUX_GPIO118__FUNC_TDD_TXD (MTK_PIN_NO(118) | 5) + +#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0) +#define PINMUX_GPIO119__FUNC_KROW0 (MTK_PIN_NO(119) | 1) + +#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0) +#define PINMUX_GPIO120__FUNC_KROW1 (MTK_PIN_NO(120) | 1) +#define PINMUX_GPIO120__FUNC_PWM6 (MTK_PIN_NO(120) | 3) + +#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0) +#define PINMUX_GPIO121__FUNC_KROW2 (MTK_PIN_NO(121) | 1) +#define PINMUX_GPIO121__FUNC_IRDA_PDN (MTK_PIN_NO(121) | 2) +#define PINMUX_GPIO121__FUNC_I2S1_DO_1 (MTK_PIN_NO(121) | 3) +#define PINMUX_GPIO121__FUNC_USB_DRVVBUS (MTK_PIN_NO(121) | 4) +#define PINMUX_GPIO121__FUNC_SPI_CK_2 (MTK_PIN_NO(121) | 5) +#define PINMUX_GPIO121__FUNC_PWM4 (MTK_PIN_NO(121) | 6) + +#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0) +#define PINMUX_GPIO122__FUNC_KCOL0 (MTK_PIN_NO(122) | 1) + +#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0) +#define PINMUX_GPIO123__FUNC_KCOL1 (MTK_PIN_NO(123) | 1) +#define PINMUX_GPIO123__FUNC_IRDA_RXD (MTK_PIN_NO(123) | 2) +#define PINMUX_GPIO123__FUNC_I2S2_DI_2 (MTK_PIN_NO(123) | 3) +#define PINMUX_GPIO123__FUNC_PWM5 (MTK_PIN_NO(123) | 4) + +#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0) +#define PINMUX_GPIO124__FUNC_KCOL2 (MTK_PIN_NO(124) | 1) +#define PINMUX_GPIO124__FUNC_IRDA_TXD (MTK_PIN_NO(124) | 2) +#define PINMUX_GPIO124__FUNC_I2S1_DO_2 (MTK_PIN_NO(124) | 3) +#define PINMUX_GPIO124__FUNC_USB_DRVVBUS (MTK_PIN_NO(124) | 4) +#define PINMUX_GPIO124__FUNC_SPI_MI_2 (MTK_PIN_NO(124) | 5) +#define PINMUX_GPIO124__FUNC_PWM3 (MTK_PIN_NO(124) | 6) + +#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0) +#define PINMUX_GPIO125__FUNC_SDA1 (MTK_PIN_NO(125) | 1) + +#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0) +#define PINMUX_GPIO126__FUNC_SCL1 (MTK_PIN_NO(126) | 1) + +#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0) +#define PINMUX_GPIO127__FUNC_MD_EINT1 (MTK_PIN_NO(127) | 1) +#define PINMUX_GPIO127__FUNC_DISP_PWM1 (MTK_PIN_NO(127) | 2) +#define PINMUX_GPIO127__FUNC_SPI_MO_2 (MTK_PIN_NO(127) | 3) + +#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0) +#define PINMUX_GPIO128__FUNC_MD_EINT2 (MTK_PIN_NO(128) | 1) +#define PINMUX_GPIO128__FUNC_DSI1_TE (MTK_PIN_NO(128) | 2) +#define PINMUX_GPIO128__FUNC_SPI_CS_2 (MTK_PIN_NO(128) | 3) + +#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0) +#define PINMUX_GPIO129__FUNC_I2S3_WS (MTK_PIN_NO(129) | 1) +#define PINMUX_GPIO129__FUNC_I2S2_WS (MTK_PIN_NO(129) | 2) +#define PINMUX_GPIO129__FUNC_PWM0 (MTK_PIN_NO(129) | 3) + +#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0) +#define PINMUX_GPIO130__FUNC_I2S3_BCK (MTK_PIN_NO(130) | 1) +#define PINMUX_GPIO130__FUNC_I2S2_BCK (MTK_PIN_NO(130) | 2) +#define PINMUX_GPIO130__FUNC_PWM1 (MTK_PIN_NO(130) | 3) + +#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0) +#define PINMUX_GPIO131__FUNC_I2S3_MCK (MTK_PIN_NO(131) | 1) +#define PINMUX_GPIO131__FUNC_I2S2_MCK (MTK_PIN_NO(131) | 2) +#define PINMUX_GPIO131__FUNC_PWM2 (MTK_PIN_NO(131) | 3) + +#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0) +#define PINMUX_GPIO132__FUNC_I2S3_DO_1 (MTK_PIN_NO(132) | 1) +#define PINMUX_GPIO132__FUNC_I2S2_DI_1 (MTK_PIN_NO(132) | 2) +#define PINMUX_GPIO132__FUNC_PWM3 (MTK_PIN_NO(132) | 3) + +#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0) +#define PINMUX_GPIO133__FUNC_I2S3_DO_2 (MTK_PIN_NO(133) | 1) +#define PINMUX_GPIO133__FUNC_I2S2_DI_2 (MTK_PIN_NO(133) | 2) +#define PINMUX_GPIO133__FUNC_PWM4 (MTK_PIN_NO(133) | 3) + +#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0) +#define PINMUX_GPIO134__FUNC_I2S3_DO_3 (MTK_PIN_NO(134) | 1) +#define PINMUX_GPIO134__FUNC_DISP_PWM1 (MTK_PIN_NO(134) | 2) +#define PINMUX_GPIO134__FUNC_I2S1_DO_1 (MTK_PIN_NO(134) | 3) +#define PINMUX_GPIO134__FUNC_PWM5 (MTK_PIN_NO(134) | 4) + +#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0) +#define PINMUX_GPIO135__FUNC_I2S3_DO_4 (MTK_PIN_NO(135) | 1) +#define PINMUX_GPIO135__FUNC_DSI1_TE (MTK_PIN_NO(135) | 2) +#define PINMUX_GPIO135__FUNC_I2S1_DO_2 (MTK_PIN_NO(135) | 3) +#define PINMUX_GPIO135__FUNC_PWM6 (MTK_PIN_NO(135) | 4) + +#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0) +#define PINMUX_GPIO136__FUNC_SDA3 (MTK_PIN_NO(136) | 1) + +#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0) +#define PINMUX_GPIO137__FUNC_SCL3 (MTK_PIN_NO(137) | 1) + +#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0) +#define PINMUX_GPIO138__FUNC_DPI_CK (MTK_PIN_NO(138) | 1) +#define PINMUX_GPIO138__FUNC_NLD6 (MTK_PIN_NO(138) | 2) +#define PINMUX_GPIO138__FUNC_UTXD0 (MTK_PIN_NO(138) | 3) +#define PINMUX_GPIO138__FUNC_USB_DRVVBUS (MTK_PIN_NO(138) | 4) +#define PINMUX_GPIO138__FUNC_IRDA_PDN (MTK_PIN_NO(138) | 5) + +#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0) +#define PINMUX_GPIO139__FUNC_DPI_DE (MTK_PIN_NO(139) | 1) +#define PINMUX_GPIO139__FUNC_NLD7 (MTK_PIN_NO(139) | 2) +#define PINMUX_GPIO139__FUNC_URXD0 (MTK_PIN_NO(139) | 3) +#define PINMUX_GPIO139__FUNC_MD_UTXD (MTK_PIN_NO(139) | 4) +#define PINMUX_GPIO139__FUNC_IRDA_RXD (MTK_PIN_NO(139) | 5) + +#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0) +#define PINMUX_GPIO140__FUNC_DPI_D0 (MTK_PIN_NO(140) | 1) +#define PINMUX_GPIO140__FUNC_NREB (MTK_PIN_NO(140) | 2) +#define PINMUX_GPIO140__FUNC_UCTS0 (MTK_PIN_NO(140) | 3) +#define PINMUX_GPIO140__FUNC_MD_URXD (MTK_PIN_NO(140) | 4) +#define PINMUX_GPIO140__FUNC_IRDA_TXD (MTK_PIN_NO(140) | 5) + +#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0) +#define PINMUX_GPIO141__FUNC_DPI_D1 (MTK_PIN_NO(141) | 1) +#define PINMUX_GPIO141__FUNC_NRNB0 (MTK_PIN_NO(141) | 2) +#define PINMUX_GPIO141__FUNC_URTS0 (MTK_PIN_NO(141) | 3) +#define PINMUX_GPIO141__FUNC_LTE_UTXD (MTK_PIN_NO(141) | 4) +#define PINMUX_GPIO141__FUNC_I2S2_WS (MTK_PIN_NO(141) | 5) + +#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0) +#define PINMUX_GPIO142__FUNC_DPI_D2 (MTK_PIN_NO(142) | 1) +#define PINMUX_GPIO142__FUNC_NWEB (MTK_PIN_NO(142) | 2) +#define PINMUX_GPIO142__FUNC_UTXD1 (MTK_PIN_NO(142) | 3) +#define PINMUX_GPIO142__FUNC_LTE_URXD (MTK_PIN_NO(142) | 4) +#define PINMUX_GPIO142__FUNC_I2S2_BCK (MTK_PIN_NO(142) | 5) + +#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0) +#define PINMUX_GPIO143__FUNC_DPI_D3 (MTK_PIN_NO(143) | 1) +#define PINMUX_GPIO143__FUNC_NCEB0 (MTK_PIN_NO(143) | 2) +#define PINMUX_GPIO143__FUNC_URXD1 (MTK_PIN_NO(143) | 3) +#define PINMUX_GPIO143__FUNC_TDD_TXD (MTK_PIN_NO(143) | 4) +#define PINMUX_GPIO143__FUNC_I2S2_MCK (MTK_PIN_NO(143) | 5) + +#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0) +#define PINMUX_GPIO144__FUNC_DPI_D4 (MTK_PIN_NO(144) | 1) +#define PINMUX_GPIO144__FUNC_NALE (MTK_PIN_NO(144) | 2) +#define PINMUX_GPIO144__FUNC_UCTS1 (MTK_PIN_NO(144) | 3) +#define PINMUX_GPIO144__FUNC_TDD_TMS (MTK_PIN_NO(144) | 4) +#define PINMUX_GPIO144__FUNC_I2S2_DI_1 (MTK_PIN_NO(144) | 5) + +#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0) +#define PINMUX_GPIO145__FUNC_DPI_D5 (MTK_PIN_NO(145) | 1) +#define PINMUX_GPIO145__FUNC_NCLE (MTK_PIN_NO(145) | 2) +#define PINMUX_GPIO145__FUNC_URTS1 (MTK_PIN_NO(145) | 3) +#define PINMUX_GPIO145__FUNC_TDD_TCK (MTK_PIN_NO(145) | 4) +#define PINMUX_GPIO145__FUNC_I2S2_DI_2 (MTK_PIN_NO(145) | 5) + +#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0) +#define PINMUX_GPIO146__FUNC_DPI_D6 (MTK_PIN_NO(146) | 1) +#define PINMUX_GPIO146__FUNC_NLD8 (MTK_PIN_NO(146) | 2) +#define PINMUX_GPIO146__FUNC_UTXD2 (MTK_PIN_NO(146) | 3) +#define PINMUX_GPIO146__FUNC_TDD_TDI (MTK_PIN_NO(146) | 4) + +#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0) +#define PINMUX_GPIO147__FUNC_DPI_D7 (MTK_PIN_NO(147) | 1) +#define PINMUX_GPIO147__FUNC_NLD9 (MTK_PIN_NO(147) | 2) +#define PINMUX_GPIO147__FUNC_URXD2 (MTK_PIN_NO(147) | 3) +#define PINMUX_GPIO147__FUNC_TDD_TDO (MTK_PIN_NO(147) | 4) +#define PINMUX_GPIO147__FUNC_I2S1_WS (MTK_PIN_NO(147) | 5) + +#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0) +#define PINMUX_GPIO148__FUNC_DPI_D8 (MTK_PIN_NO(148) | 1) +#define PINMUX_GPIO148__FUNC_NLD10 (MTK_PIN_NO(148) | 2) +#define PINMUX_GPIO148__FUNC_UCTS2 (MTK_PIN_NO(148) | 3) +#define PINMUX_GPIO148__FUNC_TDD_TRSTN (MTK_PIN_NO(148) | 4) +#define PINMUX_GPIO148__FUNC_I2S1_BCK (MTK_PIN_NO(148) | 5) + +#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0) +#define PINMUX_GPIO149__FUNC_DPI_D9 (MTK_PIN_NO(149) | 1) +#define PINMUX_GPIO149__FUNC_NLD11 (MTK_PIN_NO(149) | 2) +#define PINMUX_GPIO149__FUNC_URTS2 (MTK_PIN_NO(149) | 3) +#define PINMUX_GPIO149__FUNC_LTE_MD32_JTAG_TMS (MTK_PIN_NO(149) | 4) +#define PINMUX_GPIO149__FUNC_I2S1_MCK (MTK_PIN_NO(149) | 5) + +#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0) +#define PINMUX_GPIO150__FUNC_DPI_D10 (MTK_PIN_NO(150) | 1) +#define PINMUX_GPIO150__FUNC_NLD12 (MTK_PIN_NO(150) | 2) +#define PINMUX_GPIO150__FUNC_UTXD3 (MTK_PIN_NO(150) | 3) +#define PINMUX_GPIO150__FUNC_LTE_MD32_JTAG_TCK (MTK_PIN_NO(150) | 4) +#define PINMUX_GPIO150__FUNC_I2S1_DO_1 (MTK_PIN_NO(150) | 5) + +#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0) +#define PINMUX_GPIO151__FUNC_DPI_D11 (MTK_PIN_NO(151) | 1) +#define PINMUX_GPIO151__FUNC_NLD13 (MTK_PIN_NO(151) | 2) +#define PINMUX_GPIO151__FUNC_URXD3 (MTK_PIN_NO(151) | 3) +#define PINMUX_GPIO151__FUNC_LTE_MD32_JTAG_TDI (MTK_PIN_NO(151) | 4) +#define PINMUX_GPIO151__FUNC_I2S1_DO_2 (MTK_PIN_NO(151) | 5) + +#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0) +#define PINMUX_GPIO152__FUNC_DPI_HSYNC (MTK_PIN_NO(152) | 1) +#define PINMUX_GPIO152__FUNC_NLD14 (MTK_PIN_NO(152) | 2) +#define PINMUX_GPIO152__FUNC_UCTS3 (MTK_PIN_NO(152) | 3) +#define PINMUX_GPIO152__FUNC_LTE_MD32_JTAG_TDO (MTK_PIN_NO(152) | 4) +#define PINMUX_GPIO152__FUNC_DSI1_TE (MTK_PIN_NO(152) | 5) + +#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0) +#define PINMUX_GPIO153__FUNC_DPI_VSYNC (MTK_PIN_NO(153) | 1) +#define PINMUX_GPIO153__FUNC_NLD15 (MTK_PIN_NO(153) | 2) +#define PINMUX_GPIO153__FUNC_URTS3 (MTK_PIN_NO(153) | 3) +#define PINMUX_GPIO153__FUNC_LTE_MD32_JTAG_TRST (MTK_PIN_NO(153) | 4) +#define PINMUX_GPIO153__FUNC_DISP_PWM1 (MTK_PIN_NO(153) | 5) + +#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0) +#define PINMUX_GPIO154__FUNC_MSDC0_DAT0 (MTK_PIN_NO(154) | 1) +#define PINMUX_GPIO154__FUNC_NLD8 (MTK_PIN_NO(154) | 2) + +#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0) +#define PINMUX_GPIO155__FUNC_MSDC0_DAT1 (MTK_PIN_NO(155) | 1) +#define PINMUX_GPIO155__FUNC_NLD9 (MTK_PIN_NO(155) | 2) + +#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0) +#define PINMUX_GPIO156__FUNC_MSDC0_DAT2 (MTK_PIN_NO(156) | 1) +#define PINMUX_GPIO156__FUNC_NLD10 (MTK_PIN_NO(156) | 2) + +#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0) +#define PINMUX_GPIO157__FUNC_MSDC0_DAT3 (MTK_PIN_NO(157) | 1) +#define PINMUX_GPIO157__FUNC_NLD11 (MTK_PIN_NO(157) | 2) + +#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0) +#define PINMUX_GPIO158__FUNC_MSDC0_DAT4 (MTK_PIN_NO(158) | 1) +#define PINMUX_GPIO158__FUNC_NLD12 (MTK_PIN_NO(158) | 2) + +#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0) +#define PINMUX_GPIO159__FUNC_MSDC0_DAT5 (MTK_PIN_NO(159) | 1) +#define PINMUX_GPIO159__FUNC_NLD13 (MTK_PIN_NO(159) | 2) + +#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0) +#define PINMUX_GPIO160__FUNC_MSDC0_DAT6 (MTK_PIN_NO(160) | 1) +#define PINMUX_GPIO160__FUNC_NLD14 (MTK_PIN_NO(160) | 2) + +#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0) +#define PINMUX_GPIO161__FUNC_MSDC0_DAT7 (MTK_PIN_NO(161) | 1) +#define PINMUX_GPIO161__FUNC_NLD15 (MTK_PIN_NO(161) | 2) + +#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0) +#define PINMUX_GPIO162__FUNC_MSDC0_CMD (MTK_PIN_NO(162) | 1) + +#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0) +#define PINMUX_GPIO163__FUNC_MSDC0_CLK (MTK_PIN_NO(163) | 1) + +#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0) +#define PINMUX_GPIO164__FUNC_MSDC0_DSL (MTK_PIN_NO(164) | 1) + +#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0) +#define PINMUX_GPIO165__FUNC_MSDC0_RSTB (MTK_PIN_NO(165) | 1) + +#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0) +#define PINMUX_GPIO166__FUNC_SPI_CK_0 (MTK_PIN_NO(166) | 1) +#define PINMUX_GPIO166__FUNC_PWM0 (MTK_PIN_NO(166) | 3) + +#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0) +#define PINMUX_GPIO167__FUNC_SPI_MI_0 (MTK_PIN_NO(167) | 1) +#define PINMUX_GPIO167__FUNC_PWM1 (MTK_PIN_NO(167) | 3) +#define PINMUX_GPIO167__FUNC_SPI_MO_0 (MTK_PIN_NO(167) | 4) + +#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0) +#define PINMUX_GPIO168__FUNC_SPI_MO_0 (MTK_PIN_NO(168) | 1) +#define PINMUX_GPIO168__FUNC_MD_EINT3 (MTK_PIN_NO(168) | 2) +#define PINMUX_GPIO168__FUNC_PWM2 (MTK_PIN_NO(168) | 3) +#define PINMUX_GPIO168__FUNC_SPI_MI_0 (MTK_PIN_NO(168) | 4) + +#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0) +#define PINMUX_GPIO169__FUNC_SPI_CS_0 (MTK_PIN_NO(169) | 1) +#define PINMUX_GPIO169__FUNC_MD_EINT4 (MTK_PIN_NO(169) | 2) +#define PINMUX_GPIO169__FUNC_PWM3 (MTK_PIN_NO(169) | 3) + +#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0) +#define PINMUX_GPIO170__FUNC_MSDC1_CMD (MTK_PIN_NO(170) | 1) + +#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0) +#define PINMUX_GPIO171__FUNC_MSDC1_DAT0 (MTK_PIN_NO(171) | 1) + +#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0) +#define PINMUX_GPIO172__FUNC_MSDC1_DAT1 (MTK_PIN_NO(172) | 1) + +#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0) +#define PINMUX_GPIO173__FUNC_MSDC1_DAT2 (MTK_PIN_NO(173) | 1) + +#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0) +#define PINMUX_GPIO174__FUNC_MSDC1_DAT3 (MTK_PIN_NO(174) | 1) + +#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0) +#define PINMUX_GPIO175__FUNC_MSDC1_CLK (MTK_PIN_NO(175) | 1) + +#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0) +#define PINMUX_GPIO176__FUNC_PWRAP_SPIMI (MTK_PIN_NO(176) | 1) +#define PINMUX_GPIO176__FUNC_PWRAP_SPIMO (MTK_PIN_NO(176) | 2) + +#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0) +#define PINMUX_GPIO177__FUNC_PWRAP_SPIMO (MTK_PIN_NO(177) | 1) +#define PINMUX_GPIO177__FUNC_PWRAP_SPIMI (MTK_PIN_NO(177) | 2) + +#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0) +#define PINMUX_GPIO178__FUNC_PWRAP_SPICK (MTK_PIN_NO(178) | 1) + +#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0) +#define PINMUX_GPIO179__FUNC_PWRAP_SPICS (MTK_PIN_NO(179) | 1) + +#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0) +#define PINMUX_GPIO180__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(180) | 1) +#define PINMUX_GPIO180__FUNC_I2S1_WS (MTK_PIN_NO(180) | 2) +#define PINMUX_GPIO180__FUNC_I2S2_WS (MTK_PIN_NO(180) | 3) +#define PINMUX_GPIO180__FUNC_I2S0_WS (MTK_PIN_NO(180) | 4) + +#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0) +#define PINMUX_GPIO181__FUNC_AUD_DAT_MISO_1 (MTK_PIN_NO(181) | 1) +#define PINMUX_GPIO181__FUNC_I2S1_BCK (MTK_PIN_NO(181) | 2) +#define PINMUX_GPIO181__FUNC_I2S2_BCK (MTK_PIN_NO(181) | 3) +#define PINMUX_GPIO181__FUNC_I2S0_BCK (MTK_PIN_NO(181) | 4) + +#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0) +#define PINMUX_GPIO182__FUNC_AUD_DAT_MOSI_1 (MTK_PIN_NO(182) | 1) +#define PINMUX_GPIO182__FUNC_I2S1_MCK (MTK_PIN_NO(182) | 2) +#define PINMUX_GPIO182__FUNC_I2S2_MCK (MTK_PIN_NO(182) | 3) +#define PINMUX_GPIO182__FUNC_I2S0_MCK (MTK_PIN_NO(182) | 4) + +#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0) +#define PINMUX_GPIO183__FUNC_AUD_DAT_MISO_2 (MTK_PIN_NO(183) | 1) +#define PINMUX_GPIO183__FUNC_I2S1_DO_1 (MTK_PIN_NO(183) | 2) +#define PINMUX_GPIO183__FUNC_I2S2_DI_1 (MTK_PIN_NO(183) | 3) +#define PINMUX_GPIO183__FUNC_I2S0_DO (MTK_PIN_NO(183) | 4) + +#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0) +#define PINMUX_GPIO184__FUNC_AUD_DAT_MOSI_2 (MTK_PIN_NO(184) | 1) +#define PINMUX_GPIO184__FUNC_I2S1_DO_2 (MTK_PIN_NO(184) | 2) +#define PINMUX_GPIO184__FUNC_I2S2_DI_2 (MTK_PIN_NO(184) | 3) +#define PINMUX_GPIO184__FUNC_I2S0_DI (MTK_PIN_NO(184) | 4) + +#define PINMUX_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0) +#define PINMUX_GPIO185__FUNC_RTC32K_CK (MTK_PIN_NO(185) | 1) + +#define PINMUX_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0) +#define PINMUX_GPIO186__FUNC_DISP_PWM0 (MTK_PIN_NO(186) | 1) +#define PINMUX_GPIO186__FUNC_DISP_PWM1 (MTK_PIN_NO(186) | 2) + +#define PINMUX_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0) +#define PINMUX_GPIO187__FUNC_SRCLKENAI (MTK_PIN_NO(187) | 1) + +#define PINMUX_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0) +#define PINMUX_GPIO188__FUNC_SRCLKENAI2 (MTK_PIN_NO(188) | 1) + +#define PINMUX_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0) +#define PINMUX_GPIO189__FUNC_SRCLKENA0 (MTK_PIN_NO(189) | 1) + +#define PINMUX_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0) +#define PINMUX_GPIO190__FUNC_SRCLKENA1 (MTK_PIN_NO(190) | 1) + +#define PINMUX_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0) +#define PINMUX_GPIO191__FUNC_WATCHDOG_AO (MTK_PIN_NO(191) | 1) + +#define PINMUX_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0) +#define PINMUX_GPIO192__FUNC_I2S0_WS (MTK_PIN_NO(192) | 1) +#define PINMUX_GPIO192__FUNC_I2S1_WS (MTK_PIN_NO(192) | 2) +#define PINMUX_GPIO192__FUNC_I2S2_WS (MTK_PIN_NO(192) | 3) +#define PINMUX_GPIO192__FUNC_NCEB1 (MTK_PIN_NO(192) | 4) + +#define PINMUX_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0) +#define PINMUX_GPIO193__FUNC_I2S0_BCK (MTK_PIN_NO(193) | 1) +#define PINMUX_GPIO193__FUNC_I2S1_BCK (MTK_PIN_NO(193) | 2) +#define PINMUX_GPIO193__FUNC_I2S2_BCK (MTK_PIN_NO(193) | 3) +#define PINMUX_GPIO193__FUNC_NRNB1 (MTK_PIN_NO(193) | 4) + +#define PINMUX_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0) +#define PINMUX_GPIO194__FUNC_I2S0_MCK (MTK_PIN_NO(194) | 1) +#define PINMUX_GPIO194__FUNC_I2S1_MCK (MTK_PIN_NO(194) | 2) +#define PINMUX_GPIO194__FUNC_I2S2_MCK (MTK_PIN_NO(194) | 3) + +#define PINMUX_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0) +#define PINMUX_GPIO195__FUNC_I2S0_DO (MTK_PIN_NO(195) | 1) +#define PINMUX_GPIO195__FUNC_I2S1_DO_1 (MTK_PIN_NO(195) | 2) +#define PINMUX_GPIO195__FUNC_I2S2_DI_1 (MTK_PIN_NO(195) | 3) + +#define PINMUX_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0) +#define PINMUX_GPIO196__FUNC_I2S0_DI (MTK_PIN_NO(196) | 1) +#define PINMUX_GPIO196__FUNC_I2S1_DO_2 (MTK_PIN_NO(196) | 2) +#define PINMUX_GPIO196__FUNC_I2S2_DI_2 (MTK_PIN_NO(196) | 3) + + +#endif diff --git a/include/dt-bindings/pinctrl/rzv2m-pinctrl.h b/include/dt-bindings/pinctrl/rzv2m-pinctrl.h new file mode 100644 index 0000000000..525532cd15 --- /dev/null +++ b/include/dt-bindings/pinctrl/rzv2m-pinctrl.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * This header provides constants for Renesas RZ/V2M pinctrl bindings. + * + * Copyright (C) 2022 Renesas Electronics Corp. + * + */ + +#ifndef __DT_BINDINGS_RZV2M_PINCTRL_H +#define __DT_BINDINGS_RZV2M_PINCTRL_H + +#define RZV2M_PINS_PER_PORT 16 + +/* + * Create the pin index from its bank and position numbers and store in + * the upper 16 bits the alternate function identifier + */ +#define RZV2M_PORT_PINMUX(b, p, f) ((b) * RZV2M_PINS_PER_PORT + (p) | ((f) << 16)) + +/* Convert a port and pin label to its global pin index */ +#define RZV2M_GPIO(port, pin) ((port) * RZV2M_PINS_PER_PORT + (pin)) + +#endif /* __DT_BINDINGS_RZV2M_PINCTRL_H */ diff --git a/include/dt-bindings/power/mt6795-power.h b/include/dt-bindings/power/mt6795-power.h new file mode 100644 index 0000000000..b0fc26cb1d --- /dev/null +++ b/include/dt-bindings/power/mt6795-power.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +#ifndef _DT_BINDINGS_POWER_MT6795_POWER_H +#define _DT_BINDINGS_POWER_MT6795_POWER_H + +#define MT6795_POWER_DOMAIN_MM 0 +#define MT6795_POWER_DOMAIN_VDEC 1 +#define MT6795_POWER_DOMAIN_VENC 2 +#define MT6795_POWER_DOMAIN_ISP 3 +#define MT6795_POWER_DOMAIN_MJC 4 +#define MT6795_POWER_DOMAIN_AUDIO 5 +#define MT6795_POWER_DOMAIN_MFG_ASYNC 6 +#define MT6795_POWER_DOMAIN_MFG_2D 7 +#define MT6795_POWER_DOMAIN_MFG 8 +#define MT6795_POWER_DOMAIN_MODEM 9 + +#endif /* _DT_BINDINGS_POWER_MT6795_POWER_H */ diff --git a/include/dt-bindings/power/r8a779g0-sysc.h b/include/dt-bindings/power/r8a779g0-sysc.h new file mode 100644 index 0000000000..7daa70f181 --- /dev/null +++ b/include/dt-bindings/power/r8a779g0-sysc.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) 2022 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_POWER_R8A779G0_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A779G0_SYSC_H__ + +/* + * These power domain indices match the Power Domain Register Numbers (PDR) + */ + +#define R8A779G0_PD_A1E0D0C0 0 +#define R8A779G0_PD_A1E0D0C1 1 +#define R8A779G0_PD_A1E0D1C0 2 +#define R8A779G0_PD_A1E0D1C1 3 +#define R8A779G0_PD_A2E0D0 16 +#define R8A779G0_PD_A2E0D1 17 +#define R8A779G0_PD_A3E0 20 +#define R8A779G0_PD_A33DGA 24 +#define R8A779G0_PD_A23DGB 25 +#define R8A779G0_PD_A1DSP0 33 +#define R8A779G0_PD_A2IMP01 34 +#define R8A779G0_PD_A2PSC 35 +#define R8A779G0_PD_A2CV0 36 +#define R8A779G0_PD_A2CV1 37 +#define R8A779G0_PD_A1CNN0 41 +#define R8A779G0_PD_A2CN0 42 +#define R8A779G0_PD_A3IR 43 +#define R8A779G0_PD_A1DSP1 45 +#define R8A779G0_PD_A2IMP23 46 +#define R8A779G0_PD_A2DMA 47 +#define R8A779G0_PD_A2CV2 48 +#define R8A779G0_PD_A2CV3 49 +#define R8A779G0_PD_A1DSP2 53 +#define R8A779G0_PD_A1DSP3 54 +#define R8A779G0_PD_A3VIP0 56 +#define R8A779G0_PD_A3VIP1 57 +#define R8A779G0_PD_A3VIP2 58 +#define R8A779G0_PD_A3ISP0 60 +#define R8A779G0_PD_A3ISP1 61 + +/* Always-on power area */ +#define R8A779G0_PD_ALWAYS_ON 64 + +#endif /* __DT_BINDINGS_POWER_R8A779G0_SYSC_H__*/ diff --git a/include/dt-bindings/reset/amlogic,meson-s4-reset.h b/include/dt-bindings/reset/amlogic,meson-s4-reset.h new file mode 100644 index 0000000000..eab428eb8a --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson-s4-reset.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ +/* + * Copyright (c) 2021 Amlogic, Inc. All rights reserved. + * Author: Zelong Dong + * + */ + +#ifndef _DT_BINDINGS_AMLOGIC_MESON_S4_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON_S4_RESET_H + +/* RESET0 */ +#define RESET_USB_DDR0 0 +#define RESET_USB_DDR1 1 +#define RESET_USB_DDR2 2 +#define RESET_USB_DDR3 3 +#define RESET_USBCTRL 4 +/* 5-7 */ +#define RESET_USBPHY20 8 +#define RESET_USBPHY21 9 +/* 10-15 */ +#define RESET_HDMITX_APB 16 +#define RESET_BRG_VCBUS_DEC 17 +#define RESET_VCBUS 18 +#define RESET_VID_PLL_DIV 19 +#define RESET_VDI6 20 +#define RESET_GE2D 21 +#define RESET_HDMITXPHY 22 +#define RESET_VID_LOCK 23 +#define RESET_VENCL 24 +#define RESET_VDAC 25 +#define RESET_VENCP 26 +#define RESET_VENCI 27 +#define RESET_RDMA 28 +#define RESET_HDMI_TX 29 +#define RESET_VIU 30 +#define RESET_VENC 31 + +/* RESET1 */ +#define RESET_AUDIO 32 +#define RESET_MALI_APB 33 +#define RESET_MALI 34 +#define RESET_DDR_APB 35 +#define RESET_DDR 36 +#define RESET_DOS_APB 37 +#define RESET_DOS 38 +/* 39-47 */ +#define RESET_ETH 48 +/* 49-51 */ +#define RESET_DEMOD 52 +/* 53-63 */ + +/* RESET2 */ +#define RESET_ABUS_ARB 64 +#define RESET_IR_CTRL 65 +#define RESET_TEMPSENSOR_DDR 66 +#define RESET_TEMPSENSOR_PLL 67 +/* 68-71 */ +#define RESET_SMART_CARD 72 +#define RESET_SPICC0 73 +/* 74 */ +#define RESET_RSA 75 +/* 76-79 */ +#define RESET_MSR_CLK 80 +#define RESET_SPIFC 81 +#define RESET_SARADC 82 +/* 83-87 */ +#define RESET_ACODEC 88 +#define RESET_CEC 89 +#define RESET_AFIFO 90 +#define RESET_WATCHDOG 91 +/* 92-95 */ + +/* RESET3 */ +/* 96-127 */ + +/* RESET4 */ +/* 128-131 */ +#define RESET_PWM_AB 132 +#define RESET_PWM_CD 133 +#define RESET_PWM_EF 134 +#define RESET_PWM_GH 135 +#define RESET_PWM_IJ 136 +/* 137 */ +#define RESET_UART_A 138 +#define RESET_UART_B 139 +#define RESET_UART_C 140 +#define RESET_UART_D 141 +#define RESET_UART_E 142 +/* 143 */ +#define RESET_I2C_S_A 144 +#define RESET_I2C_M_A 145 +#define RESET_I2C_M_B 146 +#define RESET_I2C_M_C 147 +#define RESET_I2C_M_D 148 +#define RESET_I2C_M_E 149 +/* 150-151 */ +#define RESET_SD_EMMC_A 152 +#define RESET_SD_EMMC_B 153 +#define RESET_NAND_EMMC 154 +/* 155-159 */ + +/* RESET5 */ +#define RESET_BRG_VDEC_PIPL0 160 +#define RESET_BRG_HEVCF_PIPL0 161 +/* 162 */ +#define RESET_BRG_HCODEC_PIPL0 163 +#define RESET_BRG_GE2D_PIPL0 164 +#define RESET_BRG_VPU_PIPL0 165 +#define RESET_BRG_CPU_PIPL0 166 +#define RESET_BRG_MALI_PIPL0 167 +/* 168 */ +#define RESET_BRG_MALI_PIPL1 169 +/* 170-171 */ +#define RESET_BRG_HEVCF_PIPL1 172 +#define RESET_BRG_HEVCB_PIPL1 173 +/* 174-183 */ +#define RESET_RAMA 184 +/* 185-186 */ +#define RESET_BRG_NIC_VAPB 187 +#define RESET_BRG_NIC_DSU 188 +#define RESET_BRG_NIC_SYSCLK 189 +#define RESET_BRG_NIC_MAIN 190 +#define RESET_BRG_NIC_ALL 191 + +#endif diff --git a/include/dt-bindings/reset/mt7986-resets.h b/include/dt-bindings/reset/mt7986-resets.h new file mode 100644 index 0000000000..af3d16c811 --- /dev/null +++ b/include/dt-bindings/reset/mt7986-resets.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (c) 2022 MediaTek Inc. + * Author: Sam Shih + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT7986 +#define _DT_BINDINGS_RESET_CONTROLLER_MT7986 + +/* INFRACFG resets */ +#define MT7986_INFRACFG_PEXTP_MAC_SW_RST 6 +#define MT7986_INFRACFG_SSUSB_SW_RST 7 +#define MT7986_INFRACFG_EIP97_SW_RST 8 +#define MT7986_INFRACFG_AUDIO_SW_RST 13 +#define MT7986_INFRACFG_CQ_DMA_SW_RST 14 + +#define MT7986_INFRACFG_TRNG_SW_RST 17 +#define MT7986_INFRACFG_AP_DMA_SW_RST 32 +#define MT7986_INFRACFG_I2C_SW_RST 33 +#define MT7986_INFRACFG_NFI_SW_RST 34 +#define MT7986_INFRACFG_SPI0_SW_RST 35 +#define MT7986_INFRACFG_SPI1_SW_RST 36 +#define MT7986_INFRACFG_UART0_SW_RST 37 +#define MT7986_INFRACFG_UART1_SW_RST 38 +#define MT7986_INFRACFG_UART2_SW_RST 39 +#define MT7986_INFRACFG_AUXADC_SW_RST 43 + +#define MT7986_INFRACFG_APXGPT_SW_RST 66 +#define MT7986_INFRACFG_PWM_SW_RST 68 + +#define MT7986_INFRACFG_SW_RST_NUM 69 + +/* TOPRGU resets */ +#define MT7986_TOPRGU_APMIXEDSYS_SW_RST 0 +#define MT7986_TOPRGU_SGMII0_SW_RST 1 +#define MT7986_TOPRGU_SGMII1_SW_RST 2 +#define MT7986_TOPRGU_INFRA_SW_RST 3 +#define MT7986_TOPRGU_U2PHY_SW_RST 5 +#define MT7986_TOPRGU_PCIE_SW_RST 6 +#define MT7986_TOPRGU_SSUSB_SW_RST 7 +#define MT7986_TOPRGU_ETHDMA_SW_RST 20 +#define MT7986_TOPRGU_CONSYS_SW_RST 23 + +#define MT7986_TOPRGU_SW_RST_NUM 24 + +/* ETHSYS Subsystem resets */ +#define MT7986_ETHSYS_FE_SW_RST 6 +#define MT7986_ETHSYS_PMTR_SW_RST 8 +#define MT7986_ETHSYS_GMAC_SW_RST 23 +#define MT7986_ETHSYS_PPE0_SW_RST 30 +#define MT7986_ETHSYS_PPE1_SW_RST 31 + +#define MT7986_ETHSYS_SW_RST_NUM 32 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT7986 */ diff --git a/include/dt-bindings/reset/mt8186-resets.h b/include/dt-bindings/reset/mt8186-resets.h new file mode 100644 index 0000000000..2e9029c22f --- /dev/null +++ b/include/dt-bindings/reset/mt8186-resets.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (c) 2022 MediaTek Inc. + * Author: Runyang Chen + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8186 +#define _DT_BINDINGS_RESET_CONTROLLER_MT8186 + +/* TOPRGU resets */ +#define MT8186_TOPRGU_INFRA_SW_RST 0 +#define MT8186_TOPRGU_MM_SW_RST 1 +#define MT8186_TOPRGU_MFG_SW_RST 2 +#define MT8186_TOPRGU_VENC_SW_RST 3 +#define MT8186_TOPRGU_VDEC_SW_RST 4 +#define MT8186_TOPRGU_IMG_SW_RST 5 +#define MT8186_TOPRGU_DDR_SW_RST 6 +#define MT8186_TOPRGU_INFRA_AO_SW_RST 8 +#define MT8186_TOPRGU_CONNSYS_SW_RST 9 +#define MT8186_TOPRGU_APMIXED_SW_RST 10 +#define MT8186_TOPRGU_PWRAP_SW_RST 11 +#define MT8186_TOPRGU_CONN_MCU_SW_RST 12 +#define MT8186_TOPRGU_IPNNA_SW_RST 13 +#define MT8186_TOPRGU_WPE_SW_RST 14 +#define MT8186_TOPRGU_ADSP_SW_RST 15 +#define MT8186_TOPRGU_AUDIO_SW_RST 17 +#define MT8186_TOPRGU_CAM_MAIN_SW_RST 18 +#define MT8186_TOPRGU_CAM_RAWA_SW_RST 19 +#define MT8186_TOPRGU_CAM_RAWB_SW_RST 20 +#define MT8186_TOPRGU_IPE_SW_RST 21 +#define MT8186_TOPRGU_IMG2_SW_RST 22 +#define MT8186_TOPRGU_SW_RST_NUM 23 + +/* MMSYS resets */ +#define MT8186_MMSYS_SW0_RST_B_DISP_DSI0 19 + +/* INFRA resets */ +#define MT8186_INFRA_THERMAL_CTRL_RST 0 +#define MT8186_INFRA_PTP_CTRL_RST 1 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8186 */ diff --git a/include/dt-bindings/reset/sama7g5-reset.h b/include/dt-bindings/reset/sama7g5-reset.h new file mode 100644 index 0000000000..2116f41d04 --- /dev/null +++ b/include/dt-bindings/reset/sama7g5-reset.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef __DT_BINDINGS_RESET_SAMA7G5_H +#define __DT_BINDINGS_RESET_SAMA7G5_H + +#define SAMA7G5_RESET_USB_PHY1 4 +#define SAMA7G5_RESET_USB_PHY2 5 +#define SAMA7G5_RESET_USB_PHY3 6 + +#endif /* __DT_BINDINGS_RESET_SAMA7G5_H */ diff --git a/include/dt-bindings/reset/stm32mp13-resets.h b/include/dt-bindings/reset/stm32mp13-resets.h new file mode 100644 index 0000000000..934864e90d --- /dev/null +++ b/include/dt-bindings/reset/stm32mp13-resets.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved + * Author: Gabriel Fernandez for STMicroelectronics. + */ + +#ifndef _DT_BINDINGS_STM32MP13_RESET_H_ +#define _DT_BINDINGS_STM32MP13_RESET_H_ + +#define TIM2_R 13568 +#define TIM3_R 13569 +#define TIM4_R 13570 +#define TIM5_R 13571 +#define TIM6_R 13572 +#define TIM7_R 13573 +#define LPTIM1_R 13577 +#define SPI2_R 13579 +#define SPI3_R 13580 +#define USART3_R 13583 +#define UART4_R 13584 +#define UART5_R 13585 +#define UART7_R 13586 +#define UART8_R 13587 +#define I2C1_R 13589 +#define I2C2_R 13590 +#define SPDIF_R 13594 +#define TIM1_R 13632 +#define TIM8_R 13633 +#define SPI1_R 13640 +#define USART6_R 13645 +#define SAI1_R 13648 +#define SAI2_R 13649 +#define DFSDM_R 13652 +#define FDCAN_R 13656 +#define LPTIM2_R 13696 +#define LPTIM3_R 13697 +#define LPTIM4_R 13698 +#define LPTIM5_R 13699 +#define SYSCFG_R 13707 +#define VREF_R 13709 +#define DTS_R 13712 +#define PMBCTRL_R 13713 +#define LTDC_R 13760 +#define DCMIPP_R 13761 +#define DDRPERFM_R 13768 +#define USBPHY_R 13776 +#define STGEN_R 13844 +#define USART1_R 13888 +#define USART2_R 13889 +#define SPI4_R 13890 +#define SPI5_R 13891 +#define I2C3_R 13892 +#define I2C4_R 13893 +#define I2C5_R 13894 +#define TIM12_R 13895 +#define TIM13_R 13896 +#define TIM14_R 13897 +#define TIM15_R 13898 +#define TIM16_R 13899 +#define TIM17_R 13900 +#define DMA1_R 13952 +#define DMA2_R 13953 +#define DMAMUX1_R 13954 +#define DMA3_R 13955 +#define DMAMUX2_R 13956 +#define ADC1_R 13957 +#define ADC2_R 13958 +#define USBO_R 13960 +#define GPIOA_R 14080 +#define GPIOB_R 14081 +#define GPIOC_R 14082 +#define GPIOD_R 14083 +#define GPIOE_R 14084 +#define GPIOF_R 14085 +#define GPIOG_R 14086 +#define GPIOH_R 14087 +#define GPIOI_R 14088 +#define TSC_R 14095 +#define PKA_R 14146 +#define SAES_R 14147 +#define CRYP1_R 14148 +#define HASH1_R 14149 +#define RNG1_R 14150 +#define AXIMC_R 14160 +#define MDMA_R 14208 +#define MCE_R 14209 +#define ETH1MAC_R 14218 +#define FMC_R 14220 +#define QSPI_R 14222 +#define SDMMC1_R 14224 +#define SDMMC2_R 14225 +#define CRC1_R 14228 +#define USBH_R 14232 +#define ETH2MAC_R 14238 + +/* SCMI reset domain identifiers */ +#define RST_SCMI_LTDC 0 +#define RST_SCMI_MDMA 1 + +#endif /* _DT_BINDINGS_STM32MP13_RESET_H_ */ diff --git a/include/dt-bindings/reset/sunplus,sp7021-reset.h b/include/dt-bindings/reset/sunplus,sp7021-reset.h new file mode 100644 index 0000000000..ab48670738 --- /dev/null +++ b/include/dt-bindings/reset/sunplus,sp7021-reset.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) Sunplus Technology Co., Ltd. + * All rights reserved. + */ +#ifndef _DT_BINDINGS_RST_SUNPLUS_SP7021_H +#define _DT_BINDINGS_RST_SUNPLUS_SP7021_H + +#define RST_SYSTEM 0 +#define RST_RTC 1 +#define RST_IOCTL 2 +#define RST_IOP 3 +#define RST_OTPRX 4 +#define RST_NOC 5 +#define RST_BR 6 +#define RST_RBUS_L00 7 +#define RST_SPIFL 8 +#define RST_SDCTRL0 9 +#define RST_PERI0 10 +#define RST_A926 11 +#define RST_UMCTL2 12 +#define RST_PERI1 13 +#define RST_DDR_PHY0 14 +#define RST_ACHIP 15 +#define RST_STC0 16 +#define RST_STC_AV0 17 +#define RST_STC_AV1 18 +#define RST_STC_AV2 19 +#define RST_UA0 20 +#define RST_UA1 21 +#define RST_UA2 22 +#define RST_UA3 23 +#define RST_UA4 24 +#define RST_HWUA 25 +#define RST_DDC0 26 +#define RST_UADMA 27 +#define RST_CBDMA0 28 +#define RST_CBDMA1 29 +#define RST_SPI_COMBO_0 30 +#define RST_SPI_COMBO_1 31 +#define RST_SPI_COMBO_2 32 +#define RST_SPI_COMBO_3 33 +#define RST_AUD 34 +#define RST_USBC0 35 +#define RST_USBC1 36 +#define RST_UPHY0 37 +#define RST_UPHY1 38 +#define RST_I2CM0 39 +#define RST_I2CM1 40 +#define RST_I2CM2 41 +#define RST_I2CM3 42 +#define RST_PMC 43 +#define RST_CARD_CTL0 44 +#define RST_CARD_CTL1 45 +#define RST_CARD_CTL4 46 +#define RST_BCH 47 +#define RST_DDFCH 48 +#define RST_CSIIW0 49 +#define RST_CSIIW1 50 +#define RST_MIPICSI0 51 +#define RST_MIPICSI1 52 +#define RST_HDMI_TX 53 +#define RST_VPOST 54 +#define RST_TGEN 55 +#define RST_DMIX 56 +#define RST_TCON 57 +#define RST_INTERRUPT 58 +#define RST_RGST 59 +#define RST_GPIO 60 +#define RST_RBUS_TOP 61 +#define RST_MAILBOX 62 +#define RST_SPIND 63 +#define RST_I2C2CBUS 64 +#define RST_SEC 65 +#define RST_DVE 66 +#define RST_GPOST0 67 +#define RST_OSD0 68 +#define RST_DISP_PWM 69 +#define RST_UADBG 70 +#define RST_DUMMY_MASTER 71 +#define RST_FIO_CTL 72 +#define RST_FPGA 73 +#define RST_L2SW 74 +#define RST_ICM 75 +#define RST_AXI_GLOBAL 76 + +#endif diff --git a/include/dt-bindings/soc/rockchip,vop2.h b/include/dt-bindings/soc/rockchip,vop2.h new file mode 100644 index 0000000000..6e66a802b9 --- /dev/null +++ b/include/dt-bindings/soc/rockchip,vop2.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ + +#ifndef __DT_BINDINGS_ROCKCHIP_VOP2_H +#define __DT_BINDINGS_ROCKCHIP_VOP2_H + +#define ROCKCHIP_VOP2_EP_RGB0 1 +#define ROCKCHIP_VOP2_EP_HDMI0 2 +#define ROCKCHIP_VOP2_EP_EDP0 3 +#define ROCKCHIP_VOP2_EP_MIPI0 4 +#define ROCKCHIP_VOP2_EP_LVDS0 5 +#define ROCKCHIP_VOP2_EP_MIPI1 6 +#define ROCKCHIP_VOP2_EP_LVDS1 7 + +#endif /* __DT_BINDINGS_ROCKCHIP_VOP2_H */ diff --git a/include/dt-bindings/soc/samsung,boot-mode.h b/include/dt-bindings/soc/samsung,boot-mode.h new file mode 100644 index 0000000000..47ef1cdd39 --- /dev/null +++ b/include/dt-bindings/soc/samsung,boot-mode.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022 Samsung Electronics Co., Ltd. + * Author: Chanho Park + * + * Device Tree bindings for Samsung Boot Mode. + */ + +#ifndef __DT_BINDINGS_SAMSUNG_BOOT_MODE_H +#define __DT_BINDINGS_SAMSUNG_BOOT_MODE_H + +/* Boot mode definitions for Exynos Auto v9 SoC */ + +#define EXYNOSAUTOV9_BOOT_FASTBOOT 0xfa +#define EXYNOSAUTOV9_BOOT_BOOTLOADER 0xfc +#define EXYNOSAUTOV9_BOOT_RECOVERY 0xff + +#endif /* __DT_BINDINGS_SAMSUNG_BOOT_MODE_H */ diff --git a/include/dt-bindings/sound/cs35l45.h b/include/dt-bindings/sound/cs35l45.h new file mode 100644 index 0000000000..076da4b2c2 --- /dev/null +++ b/include/dt-bindings/sound/cs35l45.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * cs35l45.h -- CS35L45 ALSA SoC audio driver DT bindings header + * + * Copyright 2022 Cirrus Logic, Inc. + */ + +#ifndef DT_CS35L45_H +#define DT_CS35L45_H + +/* + * cirrus,asp-sdout-hiz-ctrl + * + * TX_HIZ_UNUSED: TX pin high-impedance during unused slots. + * TX_HIZ_DISABLED: TX pin high-impedance when all channels disabled. + */ +#define CS35L45_ASP_TX_HIZ_UNUSED 0x1 +#define CS35L45_ASP_TX_HIZ_DISABLED 0x2 + +#endif /* DT_CS35L45_H */ diff --git a/include/dt-bindings/sound/qcom,wcd9335.h b/include/dt-bindings/sound/qcom,wcd9335.h new file mode 100644 index 0000000000..f5e9f1db09 --- /dev/null +++ b/include/dt-bindings/sound/qcom,wcd9335.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef __DT_SOUND_QCOM_WCD9335_H +#define __DT_SOUND_QCOM_WCD9335_H + +#define AIF1_PB 0 +#define AIF1_CAP 1 +#define AIF2_PB 2 +#define AIF2_CAP 3 +#define AIF3_PB 4 +#define AIF3_CAP 5 +#define AIF4_PB 6 +#define NUM_CODEC_DAIS 7 + +#endif diff --git a/include/keys/trusted_caam.h b/include/keys/trusted_caam.h new file mode 100644 index 0000000000..73fe2f32f6 --- /dev/null +++ b/include/keys/trusted_caam.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Pengutronix, Ahmad Fatoum + */ + +#ifndef __CAAM_TRUSTED_KEY_H +#define __CAAM_TRUSTED_KEY_H + +extern struct trusted_key_ops trusted_key_caam_ops; + +#endif diff --git a/include/kunit/resource.h b/include/kunit/resource.h new file mode 100644 index 0000000000..09c2b34d1c --- /dev/null +++ b/include/kunit/resource.h @@ -0,0 +1,406 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * KUnit resource API for test managed resources (allocations, etc.). + * + * Copyright (C) 2022, Google LLC. + * Author: Daniel Latypov + */ + +#ifndef _KUNIT_RESOURCE_H +#define _KUNIT_RESOURCE_H + +#include + +#include +#include +#include +#include + +struct kunit_resource; + +typedef int (*kunit_resource_init_t)(struct kunit_resource *, void *); +typedef void (*kunit_resource_free_t)(struct kunit_resource *); + +/** + * struct kunit_resource - represents a *test managed resource* + * @data: for the user to store arbitrary data. + * @name: optional name + * @free: a user supplied function to free the resource. + * + * Represents a *test managed resource*, a resource which will automatically be + * cleaned up at the end of a test case. This cleanup is performed by the 'free' + * function. The struct kunit_resource itself is freed automatically with + * kfree() if it was allocated by KUnit (e.g., by kunit_alloc_resource()), but + * must be freed by the user otherwise. + * + * Resources are reference counted so if a resource is retrieved via + * kunit_alloc_and_get_resource() or kunit_find_resource(), we need + * to call kunit_put_resource() to reduce the resource reference count + * when finished with it. Note that kunit_alloc_resource() does not require a + * kunit_resource_put() because it does not retrieve the resource itself. + * + * Example: + * + * .. code-block:: c + * + * struct kunit_kmalloc_params { + * size_t size; + * gfp_t gfp; + * }; + * + * static int kunit_kmalloc_init(struct kunit_resource *res, void *context) + * { + * struct kunit_kmalloc_params *params = context; + * res->data = kmalloc(params->size, params->gfp); + * + * if (!res->data) + * return -ENOMEM; + * + * return 0; + * } + * + * static void kunit_kmalloc_free(struct kunit_resource *res) + * { + * kfree(res->data); + * } + * + * void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp) + * { + * struct kunit_kmalloc_params params; + * + * params.size = size; + * params.gfp = gfp; + * + * return kunit_alloc_resource(test, kunit_kmalloc_init, + * kunit_kmalloc_free, ¶ms); + * } + * + * Resources can also be named, with lookup/removal done on a name + * basis also. kunit_add_named_resource(), kunit_find_named_resource() + * and kunit_destroy_named_resource(). Resource names must be + * unique within the test instance. + */ +struct kunit_resource { + void *data; + const char *name; + kunit_resource_free_t free; + + /* private: internal use only. */ + struct kref refcount; + struct list_head node; + bool should_kfree; +}; + +/** + * kunit_get_resource() - Hold resource for use. Should not need to be used + * by most users as we automatically get resources + * retrieved by kunit_find_resource*(). + * @res: resource + */ +static inline void kunit_get_resource(struct kunit_resource *res) +{ + kref_get(&res->refcount); +} + +/* + * Called when refcount reaches zero via kunit_put_resource(); + * should not be called directly. + */ +static inline void kunit_release_resource(struct kref *kref) +{ + struct kunit_resource *res = container_of(kref, struct kunit_resource, + refcount); + + if (res->free) + res->free(res); + + /* 'res' is valid here, as if should_kfree is set, res->free may not free + * 'res' itself, just res->data + */ + if (res->should_kfree) + kfree(res); +} + +/** + * kunit_put_resource() - When caller is done with retrieved resource, + * kunit_put_resource() should be called to drop + * reference count. The resource list maintains + * a reference count on resources, so if no users + * are utilizing a resource and it is removed from + * the resource list, it will be freed via the + * associated free function (if any). Only + * needs to be used if we alloc_and_get() or + * find() resource. + * @res: resource + */ +static inline void kunit_put_resource(struct kunit_resource *res) +{ + kref_put(&res->refcount, kunit_release_resource); +} + +/** + * __kunit_add_resource() - Internal helper to add a resource. + * + * res->should_kfree is not initialised. + * @test: The test context object. + * @init: a user-supplied function to initialize the result (if needed). If + * none is supplied, the resource data value is simply set to @data. + * If an init function is supplied, @data is passed to it instead. + * @free: a user-supplied function to free the resource (if needed). + * @res: The resource. + * @data: value to pass to init function or set in resource data field. + */ +int __kunit_add_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + struct kunit_resource *res, + void *data); + +/** + * kunit_add_resource() - Add a *test managed resource*. + * @test: The test context object. + * @init: a user-supplied function to initialize the result (if needed). If + * none is supplied, the resource data value is simply set to @data. + * If an init function is supplied, @data is passed to it instead. + * @free: a user-supplied function to free the resource (if needed). + * @res: The resource. + * @data: value to pass to init function or set in resource data field. + */ +static inline int kunit_add_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + struct kunit_resource *res, + void *data) +{ + res->should_kfree = false; + return __kunit_add_resource(test, init, free, res, data); +} + +static inline struct kunit_resource * +kunit_find_named_resource(struct kunit *test, const char *name); + +/** + * kunit_add_named_resource() - Add a named *test managed resource*. + * @test: The test context object. + * @init: a user-supplied function to initialize the resource data, if needed. + * @free: a user-supplied function to free the resource data, if needed. + * @res: The resource. + * @name: name to be set for resource. + * @data: value to pass to init function or set in resource data field. + */ +static inline int kunit_add_named_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + struct kunit_resource *res, + const char *name, + void *data) +{ + struct kunit_resource *existing; + + if (!name) + return -EINVAL; + + existing = kunit_find_named_resource(test, name); + if (existing) { + kunit_put_resource(existing); + return -EEXIST; + } + + res->name = name; + res->should_kfree = false; + + return __kunit_add_resource(test, init, free, res, data); +} + +/** + * kunit_alloc_and_get_resource() - Allocates and returns a *test managed resource*. + * @test: The test context object. + * @init: a user supplied function to initialize the resource. + * @free: a user supplied function to free the resource (if needed). + * @internal_gfp: gfp to use for internal allocations, if unsure, use GFP_KERNEL + * @context: for the user to pass in arbitrary data to the init function. + * + * Allocates a *test managed resource*, a resource which will automatically be + * cleaned up at the end of a test case. See &struct kunit_resource for an + * example. + * + * This is effectively identical to kunit_alloc_resource, but returns the + * struct kunit_resource pointer, not just the 'data' pointer. It therefore + * also increments the resource's refcount, so kunit_put_resource() should be + * called when you've finished with it. + * + * Note: KUnit needs to allocate memory for a kunit_resource object. You must + * specify an @internal_gfp that is compatible with the use context of your + * resource. + */ +static inline struct kunit_resource * +kunit_alloc_and_get_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + gfp_t internal_gfp, + void *context) +{ + struct kunit_resource *res; + int ret; + + res = kzalloc(sizeof(*res), internal_gfp); + if (!res) + return NULL; + + res->should_kfree = true; + + ret = __kunit_add_resource(test, init, free, res, context); + if (!ret) { + /* + * bump refcount for get; kunit_resource_put() should be called + * when done. + */ + kunit_get_resource(res); + return res; + } + return NULL; +} + +/** + * kunit_alloc_resource() - Allocates a *test managed resource*. + * @test: The test context object. + * @init: a user supplied function to initialize the resource. + * @free: a user supplied function to free the resource (if needed). + * @internal_gfp: gfp to use for internal allocations, if unsure, use GFP_KERNEL + * @context: for the user to pass in arbitrary data to the init function. + * + * Allocates a *test managed resource*, a resource which will automatically be + * cleaned up at the end of a test case. See &struct kunit_resource for an + * example. + * + * Note: KUnit needs to allocate memory for a kunit_resource object. You must + * specify an @internal_gfp that is compatible with the use context of your + * resource. + */ +static inline void *kunit_alloc_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + gfp_t internal_gfp, + void *context) +{ + struct kunit_resource *res; + + res = kzalloc(sizeof(*res), internal_gfp); + if (!res) + return NULL; + + res->should_kfree = true; + if (!__kunit_add_resource(test, init, free, res, context)) + return res->data; + + return NULL; +} + +typedef bool (*kunit_resource_match_t)(struct kunit *test, + struct kunit_resource *res, + void *match_data); + +/** + * kunit_resource_instance_match() - Match a resource with the same instance. + * @test: Test case to which the resource belongs. + * @res: The resource. + * @match_data: The resource pointer to match against. + * + * An instance of kunit_resource_match_t that matches a resource whose + * allocation matches @match_data. + */ +static inline bool kunit_resource_instance_match(struct kunit *test, + struct kunit_resource *res, + void *match_data) +{ + return res->data == match_data; +} + +/** + * kunit_resource_name_match() - Match a resource with the same name. + * @test: Test case to which the resource belongs. + * @res: The resource. + * @match_name: The name to match against. + */ +static inline bool kunit_resource_name_match(struct kunit *test, + struct kunit_resource *res, + void *match_name) +{ + return res->name && strcmp(res->name, match_name) == 0; +} + +/** + * kunit_find_resource() - Find a resource using match function/data. + * @test: Test case to which the resource belongs. + * @match: match function to be applied to resources/match data. + * @match_data: data to be used in matching. + */ +static inline struct kunit_resource * +kunit_find_resource(struct kunit *test, + kunit_resource_match_t match, + void *match_data) +{ + struct kunit_resource *res, *found = NULL; + unsigned long flags; + + spin_lock_irqsave(&test->lock, flags); + + list_for_each_entry_reverse(res, &test->resources, node) { + if (match(test, res, (void *)match_data)) { + found = res; + kunit_get_resource(found); + break; + } + } + + spin_unlock_irqrestore(&test->lock, flags); + + return found; +} + +/** + * kunit_find_named_resource() - Find a resource using match name. + * @test: Test case to which the resource belongs. + * @name: match name. + */ +static inline struct kunit_resource * +kunit_find_named_resource(struct kunit *test, + const char *name) +{ + return kunit_find_resource(test, kunit_resource_name_match, + (void *)name); +} + +/** + * kunit_destroy_resource() - Find a kunit_resource and destroy it. + * @test: Test case to which the resource belongs. + * @match: Match function. Returns whether a given resource matches @match_data. + * @match_data: Data passed into @match. + * + * RETURNS: + * 0 if kunit_resource is found and freed, -ENOENT if not found. + */ +int kunit_destroy_resource(struct kunit *test, + kunit_resource_match_t match, + void *match_data); + +static inline int kunit_destroy_named_resource(struct kunit *test, + const char *name) +{ + return kunit_destroy_resource(test, kunit_resource_name_match, + (void *)name); +} + +/** + * kunit_remove_resource() - remove resource from resource list associated with + * test. + * @test: The test context object. + * @res: The resource to be removed. + * + * Note that the resource will not be immediately freed since it is likely + * the caller has a reference to it via alloc_and_get() or find(); + * in this case a final call to kunit_put_resource() is required. + */ +void kunit_remove_resource(struct kunit *test, struct kunit_resource *res); + +#endif /* _KUNIT_RESOURCE_H */ diff --git a/include/linux/aperture.h b/include/linux/aperture.h new file mode 100644 index 0000000000..442f15a57c --- /dev/null +++ b/include/linux/aperture.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef _LINUX_APERTURE_H_ +#define _LINUX_APERTURE_H_ + +#include + +struct pci_dev; +struct platform_device; + +#if defined(CONFIG_APERTURE_HELPERS) +int devm_aperture_acquire_for_platform_device(struct platform_device *pdev, + resource_size_t base, + resource_size_t size); + +int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size, + bool primary, const char *name); + +int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name); +#else +static inline int devm_aperture_acquire_for_platform_device(struct platform_device *pdev, + resource_size_t base, + resource_size_t size) +{ + return 0; +} + +static inline int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size, + bool primary, const char *name) +{ + return 0; +} + +static inline int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name) +{ + return 0; +} +#endif + +/** + * aperture_remove_all_conflicting_devices - remove all existing framebuffers + * @primary: also kick vga16fb if present; only relevant for VGA devices + * @name: a descriptive name of the requesting driver + * + * This function removes all graphics device drivers. Use this function on systems + * that can have their framebuffer located anywhere in memory. + * + * Returns: + * 0 on success, or a negative errno code otherwise + */ +static inline int aperture_remove_all_conflicting_devices(bool primary, const char *name) +{ + return aperture_remove_conflicting_devices(0, (resource_size_t)-1, primary, name); +} + +#endif diff --git a/include/linux/base64.h b/include/linux/base64.h new file mode 100644 index 0000000000..660d4cb1ef --- /dev/null +++ b/include/linux/base64.h @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * base64 encoding, lifted from fs/crypto/fname.c. + */ + +#ifndef _LINUX_BASE64_H +#define _LINUX_BASE64_H + +#include + +#define BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3) + +int base64_encode(const u8 *src, int len, char *dst); +int base64_decode(const char *src, int len, u8 *dst); + +#endif /* _LINUX_BASE64_H */ diff --git a/include/linux/clk/pxa.h b/include/linux/clk/pxa.h new file mode 100644 index 0000000000..736b8bb91b --- /dev/null +++ b/include/linux/clk/pxa.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include +#include + +extern int pxa25x_clocks_init(void __iomem *regs); +extern int pxa27x_clocks_init(void __iomem *regs); +extern int pxa3xx_clocks_init(void __iomem *regs, void __iomem *oscc_reg); + +#ifdef CONFIG_PXA3xx +extern unsigned pxa3xx_get_clk_frequency_khz(int); +extern void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask); +#else +#define pxa3xx_get_clk_frequency_khz(x) (0) +#define pxa3xx_clk_update_accr(disable, enable, xclkcfg, mask) do { } while (0) +#endif diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h new file mode 100644 index 0000000000..c50b5670c4 --- /dev/null +++ b/include/linux/context_tracking_irq.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CONTEXT_TRACKING_IRQ_H +#define _LINUX_CONTEXT_TRACKING_IRQ_H + +#ifdef CONFIG_CONTEXT_TRACKING_IDLE +void ct_irq_enter(void); +void ct_irq_exit(void); +void ct_irq_enter_irqson(void); +void ct_irq_exit_irqson(void); +void ct_nmi_enter(void); +void ct_nmi_exit(void); +#else +static inline void ct_irq_enter(void) { } +static inline void ct_irq_exit(void) { } +static inline void ct_irq_enter_irqson(void) { } +static inline void ct_irq_exit_irqson(void) { } +static inline void ct_nmi_enter(void) { } +static inline void ct_nmi_exit(void) { } +#endif + +#endif diff --git a/include/linux/dm-verity-loadpin.h b/include/linux/dm-verity-loadpin.h new file mode 100644 index 0000000000..552b817ab1 --- /dev/null +++ b/include/linux/dm-verity-loadpin.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_DM_VERITY_LOADPIN_H +#define __LINUX_DM_VERITY_LOADPIN_H + +#include + +struct block_device; + +extern struct list_head dm_verity_loadpin_trusted_root_digests; + +struct dm_verity_loadpin_trusted_root_digest { + struct list_head node; + unsigned int len; + u8 data[]; +}; + +#if IS_ENABLED(CONFIG_SECURITY_LOADPIN_VERITY) +bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev); +#else +static inline bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev) +{ + return false; +} +#endif + +#endif /* __LINUX_DM_VERITY_LOADPIN_H */ diff --git a/include/linux/dma/imx-dma.h b/include/linux/dma/imx-dma.h new file mode 100644 index 0000000000..f487a4fa10 --- /dev/null +++ b/include/linux/dma/imx-dma.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. + */ + +#ifndef __LINUX_DMA_IMX_H +#define __LINUX_DMA_IMX_H + +#include +#include +#include + +/* + * This enumerates peripheral types. Used for SDMA. + */ +enum sdma_peripheral_type { + IMX_DMATYPE_SSI, /* MCU domain SSI */ + IMX_DMATYPE_SSI_SP, /* Shared SSI */ + IMX_DMATYPE_MMC, /* MMC */ + IMX_DMATYPE_SDHC, /* SDHC */ + IMX_DMATYPE_UART, /* MCU domain UART */ + IMX_DMATYPE_UART_SP, /* Shared UART */ + IMX_DMATYPE_FIRI, /* FIRI */ + IMX_DMATYPE_CSPI, /* MCU domain CSPI */ + IMX_DMATYPE_CSPI_SP, /* Shared CSPI */ + IMX_DMATYPE_SIM, /* SIM */ + IMX_DMATYPE_ATA, /* ATA */ + IMX_DMATYPE_CCM, /* CCM */ + IMX_DMATYPE_EXT, /* External peripheral */ + IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */ + IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */ + IMX_DMATYPE_DSP, /* DSP */ + IMX_DMATYPE_MEMORY, /* Memory */ + IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */ + IMX_DMATYPE_SPDIF, /* SPDIF */ + IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */ + IMX_DMATYPE_ASRC, /* ASRC */ + IMX_DMATYPE_ESAI, /* ESAI */ + IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ + IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ + IMX_DMATYPE_SAI, /* SAI */ + IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */ +}; + +enum imx_dma_prio { + DMA_PRIO_HIGH = 0, + DMA_PRIO_MEDIUM = 1, + DMA_PRIO_LOW = 2 +}; + +struct imx_dma_data { + int dma_request; /* DMA request line */ + int dma_request2; /* secondary DMA request line */ + enum sdma_peripheral_type peripheral_type; + int priority; +}; + +static inline int imx_dma_is_ipu(struct dma_chan *chan) +{ + return !strcmp(dev_name(chan->device->dev), "ipu-core"); +} + +static inline int imx_dma_is_general_purpose(struct dma_chan *chan) +{ + return !strcmp(chan->device->dev->driver->name, "imx-sdma") || + !strcmp(chan->device->dev->driver->name, "imx-dma"); +} + +/** + * struct sdma_peripheral_config - SDMA config for audio + * @n_fifos_src: Number of FIFOs for recording + * @n_fifos_dst: Number of FIFOs for playback + * @stride_fifos_src: FIFO address stride for recording, 0 means all FIFOs are + * continuous, 1 means 1 word stride between FIFOs. All stride + * between FIFOs should be same. + * @stride_fifos_dst: FIFO address stride for playback + * @words_per_fifo: numbers of words per FIFO fetch/fill, 1 means + * one channel per FIFO, 2 means 2 channels per FIFO.. + * If 'n_fifos_src = 4' and 'words_per_fifo = 2', it + * means the first two words(channels) fetch from FIFO0 + * and then jump to FIFO1 for next two words, and so on + * after the last FIFO3 fetched, roll back to FIFO0. + * @sw_done: Use software done. Needed for PDM (micfil) + * + * Some i.MX Audio devices (SAI, micfil) have multiple successive FIFO + * registers. For multichannel recording/playback the SAI/micfil have + * one FIFO register per channel and the SDMA engine has to read/write + * the next channel from/to the next register and wrap around to the + * first register when all channels are handled. The number of active + * channels must be communicated to the SDMA engine using this struct. + */ +struct sdma_peripheral_config { + int n_fifos_src; + int n_fifos_dst; + int stride_fifos_src; + int stride_fifos_dst; + int words_per_fifo; + bool sw_done; +}; + +#endif /* __LINUX_DMA_IMX_H */ diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h new file mode 100644 index 0000000000..c2b1d4fd59 --- /dev/null +++ b/include/linux/export-internal.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Please do not include this explicitly. + * This is used by C files generated by modpost. + */ + +#ifndef __LINUX_EXPORT_INTERNAL_H__ +#define __LINUX_EXPORT_INTERNAL_H__ + +#include +#include + +/* __used is needed to keep __crc_* for LTO */ +#define SYMBOL_CRC(sym, crc, sec) \ + u32 __section("___kcrctab" sec "+" #sym) __used __crc_##sym = crc + +#endif /* __LINUX_EXPORT_INTERNAL_H__ */ diff --git a/include/linux/firmware/mediatek/mtk-adsp-ipc.h b/include/linux/firmware/mediatek/mtk-adsp-ipc.h new file mode 100644 index 0000000000..28fd313340 --- /dev/null +++ b/include/linux/firmware/mediatek/mtk-adsp-ipc.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 MediaTek Inc. + */ + +#ifndef MTK_ADSP_IPC_H +#define MTK_ADSP_IPC_H + +#include +#include +#include +#include + +#define MTK_ADSP_IPC_REQ 0 +#define MTK_ADSP_IPC_RSP 1 +#define MTK_ADSP_IPC_OP_REQ 0x1 +#define MTK_ADSP_IPC_OP_RSP 0x2 + +enum { + MTK_ADSP_MBOX_REPLY, + MTK_ADSP_MBOX_REQUEST, + MTK_ADSP_MBOX_NUM, +}; + +struct mtk_adsp_ipc; + +struct mtk_adsp_ipc_ops { + void (*handle_reply)(struct mtk_adsp_ipc *ipc); + void (*handle_request)(struct mtk_adsp_ipc *ipc); +}; + +struct mtk_adsp_chan { + struct mtk_adsp_ipc *ipc; + struct mbox_client cl; + struct mbox_chan *ch; + char *name; + int idx; +}; + +struct mtk_adsp_ipc { + struct mtk_adsp_chan chans[MTK_ADSP_MBOX_NUM]; + struct device *dev; + struct mtk_adsp_ipc_ops *ops; + void *private_data; +}; + +static inline void mtk_adsp_ipc_set_data(struct mtk_adsp_ipc *ipc, void *data) +{ + if (!ipc) + return; + + ipc->private_data = data; +} + +static inline void *mtk_adsp_ipc_get_data(struct mtk_adsp_ipc *ipc) +{ + if (!ipc) + return NULL; + + return ipc->private_data; +} + +int mtk_adsp_ipc_send(struct mtk_adsp_ipc *ipc, unsigned int idx, uint32_t op); + +#endif /* MTK_ADSP_IPC_H */ diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h new file mode 100644 index 0000000000..d88c46ca82 --- /dev/null +++ b/include/linux/gfp_types.h @@ -0,0 +1,348 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_GFP_TYPES_H +#define __LINUX_GFP_TYPES_H + +/* The typedef is in types.h but we want the documentation here */ +#if 0 +/** + * typedef gfp_t - Memory allocation flags. + * + * GFP flags are commonly used throughout Linux to indicate how memory + * should be allocated. The GFP acronym stands for get_free_pages(), + * the underlying memory allocation function. Not every GFP flag is + * supported by every function which may allocate memory. Most users + * will want to use a plain ``GFP_KERNEL``. + */ +typedef unsigned int __bitwise gfp_t; +#endif + +/* + * In case of changes, please don't forget to update + * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c + */ + +/* Plain integer GFP bitmasks. Do not use this directly. */ +#define ___GFP_DMA 0x01u +#define ___GFP_HIGHMEM 0x02u +#define ___GFP_DMA32 0x04u +#define ___GFP_MOVABLE 0x08u +#define ___GFP_RECLAIMABLE 0x10u +#define ___GFP_HIGH 0x20u +#define ___GFP_IO 0x40u +#define ___GFP_FS 0x80u +#define ___GFP_ZERO 0x100u +#define ___GFP_ATOMIC 0x200u +#define ___GFP_DIRECT_RECLAIM 0x400u +#define ___GFP_KSWAPD_RECLAIM 0x800u +#define ___GFP_WRITE 0x1000u +#define ___GFP_NOWARN 0x2000u +#define ___GFP_RETRY_MAYFAIL 0x4000u +#define ___GFP_NOFAIL 0x8000u +#define ___GFP_NORETRY 0x10000u +#define ___GFP_MEMALLOC 0x20000u +#define ___GFP_COMP 0x40000u +#define ___GFP_NOMEMALLOC 0x80000u +#define ___GFP_HARDWALL 0x100000u +#define ___GFP_THISNODE 0x200000u +#define ___GFP_ACCOUNT 0x400000u +#define ___GFP_ZEROTAGS 0x800000u +#ifdef CONFIG_KASAN_HW_TAGS +#define ___GFP_SKIP_ZERO 0x1000000u +#define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u +#define ___GFP_SKIP_KASAN_POISON 0x4000000u +#else +#define ___GFP_SKIP_ZERO 0 +#define ___GFP_SKIP_KASAN_UNPOISON 0 +#define ___GFP_SKIP_KASAN_POISON 0 +#endif +#ifdef CONFIG_LOCKDEP +#define ___GFP_NOLOCKDEP 0x8000000u +#else +#define ___GFP_NOLOCKDEP 0 +#endif +/* If the above are modified, __GFP_BITS_SHIFT may need updating */ + +/* + * Physical address zone modifiers (see linux/mmzone.h - low four bits) + * + * Do not put any conditional on these. If necessary modify the definitions + * without the underscores and use them consistently. The definitions here may + * be used in bit comparisons. + */ +#define __GFP_DMA ((__force gfp_t)___GFP_DMA) +#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) +#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) +#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ +#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) + +/** + * DOC: Page mobility and placement hints + * + * Page mobility and placement hints + * --------------------------------- + * + * These flags provide hints about how mobile the page is. Pages with similar + * mobility are placed within the same pageblocks to minimise problems due + * to external fragmentation. + * + * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be + * moved by page migration during memory compaction or can be reclaimed. + * + * %__GFP_RECLAIMABLE is used for slab allocations that specify + * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. + * + * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible, + * these pages will be spread between local zones to avoid all the dirty + * pages being in one zone (fair zone allocation policy). + * + * %__GFP_HARDWALL enforces the cpuset memory allocation policy. + * + * %__GFP_THISNODE forces the allocation to be satisfied from the requested + * node with no fallbacks or placement policy enforcements. + * + * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. + */ +#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) +#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) +#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) +#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) +#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) + +/** + * DOC: Watermark modifiers + * + * Watermark modifiers -- controls access to emergency reserves + * ------------------------------------------------------------ + * + * %__GFP_HIGH indicates that the caller is high-priority and that granting + * the request is necessary before the system can make forward progress. + * For example, creating an IO context to clean pages. + * + * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is + * high priority. Users are typically interrupt handlers. This may be + * used in conjunction with %__GFP_HIGH + * + * %__GFP_MEMALLOC allows access to all memory. This should only be used when + * the caller guarantees the allocation will allow more memory to be freed + * very shortly e.g. process exiting or swapping. Users either should + * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). + * Users of this flag have to be extremely careful to not deplete the reserve + * completely and implement a throttling mechanism which controls the + * consumption of the reserve based on the amount of freed memory. + * Usage of a pre-allocated pool (e.g. mempool) should be always considered + * before using this flag. + * + * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. + * This takes precedence over the %__GFP_MEMALLOC flag if both are set. + */ +#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) +#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) +#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) +#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) + +/** + * DOC: Reclaim modifiers + * + * Reclaim modifiers + * ----------------- + * Please note that all the following flags are only applicable to sleepable + * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them). + * + * %__GFP_IO can start physical IO. + * + * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the + * allocator recursing into the filesystem which might already be holding + * locks. + * + * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. + * This flag can be cleared to avoid unnecessary delays when a fallback + * option is available. + * + * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when + * the low watermark is reached and have it reclaim pages until the high + * watermark is reached. A caller may wish to clear this flag when fallback + * options are available and the reclaim is likely to disrupt the system. The + * canonical example is THP allocation where a fallback is cheap but + * reclaim/compaction may cause indirect stalls. + * + * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. + * + * The default allocator behavior depends on the request size. We have a concept + * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER). + * !costly allocations are too essential to fail so they are implicitly + * non-failing by default (with some exceptions like OOM victims might fail so + * the caller still has to check for failures) while costly requests try to be + * not disruptive and back off even without invoking the OOM killer. + * The following three modifiers might be used to override some of these + * implicit rules + * + * %__GFP_NORETRY: The VM implementation will try only very lightweight + * memory direct reclaim to get some memory under memory pressure (thus + * it can sleep). It will avoid disruptive actions like OOM killer. The + * caller must handle the failure which is quite likely to happen under + * heavy memory pressure. The flag is suitable when failure can easily be + * handled at small cost, such as reduced throughput + * + * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim + * procedures that have previously failed if there is some indication + * that progress has been made else where. It can wait for other + * tasks to attempt high level approaches to freeing memory such as + * compaction (which removes fragmentation) and page-out. + * There is still a definite limit to the number of retries, but it is + * a larger limit than with %__GFP_NORETRY. + * Allocations with this flag may fail, but only when there is + * genuinely little unused memory. While these allocations do not + * directly trigger the OOM killer, their failure indicates that + * the system is likely to need to use the OOM killer soon. The + * caller must handle failure, but can reasonably do so by failing + * a higher-level request, or completing it only in a much less + * efficient manner. + * If the allocation does fail, and the caller is in a position to + * free some non-essential memory, doing so could benefit the system + * as a whole. + * + * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller + * cannot handle allocation failures. The allocation could block + * indefinitely but will never return with failure. Testing for + * failure is pointless. + * New users should be evaluated carefully (and the flag should be + * used only when there is no reasonable failure policy) but it is + * definitely preferable to use the flag rather than opencode endless + * loop around allocator. + * Using this flag for costly allocations is _highly_ discouraged. + */ +#define __GFP_IO ((__force gfp_t)___GFP_IO) +#define __GFP_FS ((__force gfp_t)___GFP_FS) +#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ +#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ +#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) +#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL) +#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) +#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) + +/** + * DOC: Action modifiers + * + * Action modifiers + * ---------------- + * + * %__GFP_NOWARN suppresses allocation failure reports. + * + * %__GFP_COMP address compound page metadata. + * + * %__GFP_ZERO returns a zeroed page on success. + * + * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself + * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that + * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting + * memory tags at the same time as zeroing memory has minimal additional + * performace impact. + * + * %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation. + * Only effective in HW_TAGS mode. + * + * %__GFP_SKIP_KASAN_POISON makes KASAN skip poisoning on page deallocation. + * Typically, used for userspace pages. Only effective in HW_TAGS mode. + */ +#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) +#define __GFP_COMP ((__force gfp_t)___GFP_COMP) +#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) +#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS) +#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO) +#define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON) +#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) + +/* Disable lockdep for GFP context tracking */ +#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) + +/* Room for N __GFP_FOO bits */ +#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) + +/** + * DOC: Useful GFP flag combinations + * + * Useful GFP flag combinations + * ---------------------------- + * + * Useful GFP flag combinations that are commonly used. It is recommended + * that subsystems start with one of these combinations and then set/clear + * %__GFP_FOO flags as necessary. + * + * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower + * watermark is applied to allow access to "atomic reserves". + * The current implementation doesn't support NMI and few other strict + * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT. + * + * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires + * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim. + * + * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is + * accounted to kmemcg. + * + * %GFP_NOWAIT is for kernel allocations that should not stall for direct + * reclaim, start physical IO or use any filesystem callback. + * + * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages + * that do not require the starting of any physical IO. + * Please try to avoid using this flag directly and instead use + * memalloc_noio_{save,restore} to mark the whole scope which cannot + * perform any IO with a short explanation why. All allocation requests + * will inherit GFP_NOIO implicitly. + * + * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. + * Please try to avoid using this flag directly and instead use + * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't + * recurse into the FS layer with a short explanation why. All allocation + * requests will inherit GFP_NOFS implicitly. + * + * %GFP_USER is for userspace allocations that also need to be directly + * accessibly by the kernel or hardware. It is typically used by hardware + * for buffers that are mapped to userspace (e.g. graphics) that hardware + * still must DMA to. cpuset limits are enforced for these allocations. + * + * %GFP_DMA exists for historical reasons and should be avoided where possible. + * The flags indicates that the caller requires that the lowest zone be + * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but + * it would require careful auditing as some users really require it and + * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the + * lowest zone as a type of emergency reserve. + * + * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit + * address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory + * because the DMA32 kmalloc cache array is not implemented. + * (Reason: there is no such user in kernel). + * + * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, + * do not need to be directly accessible by the kernel but that cannot + * move once in use. An example may be a hardware allocation that maps + * data directly into userspace but has no addressing limitations. + * + * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not + * need direct access to but can use kmap() when access is required. They + * are expected to be movable via page reclaim or page migration. Typically, + * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE. + * + * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They + * are compound allocations that will generally fail quickly if memory is not + * available and will not wake kswapd/kcompactd on failure. The _LIGHT + * version does not attempt reclaim/compaction at all and is by default used + * in page fault path, while the non-light is used by khugepaged. + */ +#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) +#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) +#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) +#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) +#define GFP_NOIO (__GFP_RECLAIM) +#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) +#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) +#define GFP_DMA __GFP_DMA +#define GFP_DMA32 __GFP_DMA32 +#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) +#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \ + __GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON) +#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ + __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) +#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) + +#endif /* __LINUX_GFP_TYPES_H */ diff --git a/include/linux/host1x_context_bus.h b/include/linux/host1x_context_bus.h new file mode 100644 index 0000000000..72462737a6 --- /dev/null +++ b/include/linux/host1x_context_bus.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021, NVIDIA Corporation. All rights reserved. + */ + +#ifndef __LINUX_HOST1X_CONTEXT_BUS_H +#define __LINUX_HOST1X_CONTEXT_BUS_H + +#include + +#ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS +extern struct bus_type host1x_context_device_bus_type; +#endif + +#endif diff --git a/include/linux/hte.h b/include/linux/hte.h new file mode 100644 index 0000000000..8289055061 --- /dev/null +++ b/include/linux/hte.h @@ -0,0 +1,271 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_HTE_H +#define __LINUX_HTE_H + +#include + +struct hte_chip; +struct hte_device; +struct of_phandle_args; + +/** + * enum hte_edge - HTE line edge flags. + * + * @HTE_EDGE_NO_SETUP: No edge setup. In this case consumer will setup edges, + * for example during request irq call. + * @HTE_RISING_EDGE_TS: Rising edge. + * @HTE_FALLING_EDGE_TS: Falling edge. + * + */ +enum hte_edge { + HTE_EDGE_NO_SETUP = 1U << 0, + HTE_RISING_EDGE_TS = 1U << 1, + HTE_FALLING_EDGE_TS = 1U << 2, +}; + +/** + * enum hte_return - HTE subsystem return values used during callback. + * + * @HTE_CB_HANDLED: The consumer handled the data. + * @HTE_RUN_SECOND_CB: The consumer needs further processing, in that case + * HTE subsystem calls secondary callback provided by the consumer where it + * is allowed to sleep. + */ +enum hte_return { + HTE_CB_HANDLED, + HTE_RUN_SECOND_CB, +}; + +/** + * struct hte_ts_data - HTE timestamp data. + * + * @tsc: Timestamp value. + * @seq: Sequence counter of the timestamps. + * @raw_level: Level of the line at the timestamp if provider supports it, + * -1 otherwise. + */ +struct hte_ts_data { + u64 tsc; + u64 seq; + int raw_level; +}; + +/** + * struct hte_clk_info - Clock source info that HTE provider uses to timestamp. + * + * @hz: Supported clock rate in HZ, for example 1KHz clock = 1000. + * @type: Supported clock type. + */ +struct hte_clk_info { + u64 hz; + clockid_t type; +}; + +/** + * typedef hte_ts_cb_t - HTE timestamp data processing primary callback. + * + * The callback is used to push timestamp data to the client and it is + * not allowed to sleep. + * + * @ts: HW timestamp data. + * @data: Client supplied data. + */ +typedef enum hte_return (*hte_ts_cb_t)(struct hte_ts_data *ts, void *data); + +/** + * typedef hte_ts_sec_cb_t - HTE timestamp data processing secondary callback. + * + * This is used when the client needs further processing where it is + * allowed to sleep. + * + * @data: Client supplied data. + * + */ +typedef enum hte_return (*hte_ts_sec_cb_t)(void *data); + +/** + * struct hte_line_attr - Line attributes. + * + * @line_id: The logical ID understood by the consumers and providers. + * @line_data: Line data related to line_id. + * @edge_flags: Edge setup flags. + * @name: Descriptive name of the entity that is being monitored for the + * hardware timestamping. If null, HTE core will construct the name. + * + */ +struct hte_line_attr { + u32 line_id; + void *line_data; + unsigned long edge_flags; + const char *name; +}; + +/** + * struct hte_ts_desc - HTE timestamp descriptor. + * + * This structure is a communication token between consumers to subsystem + * and subsystem to providers. + * + * @attr: The line attributes. + * @hte_data: Subsystem's private data, set by HTE subsystem. + */ +struct hte_ts_desc { + struct hte_line_attr attr; + void *hte_data; +}; + +/** + * struct hte_ops - HTE operations set by providers. + * + * @request: Hook for requesting a HTE timestamp. Returns 0 on success, + * non-zero for failures. + * @release: Hook for releasing a HTE timestamp. Returns 0 on success, + * non-zero for failures. + * @enable: Hook to enable the specified timestamp. Returns 0 on success, + * non-zero for failures. + * @disable: Hook to disable specified timestamp. Returns 0 on success, + * non-zero for failures. + * @get_clk_src_info: Hook to get the clock information the provider uses + * to timestamp. Returns 0 for success and negative error code for failure. On + * success HTE subsystem fills up provided struct hte_clk_info. + * + * xlated_id parameter is used to communicate between HTE subsystem and the + * providers and is translated by the provider. + */ +struct hte_ops { + int (*request)(struct hte_chip *chip, struct hte_ts_desc *desc, + u32 xlated_id); + int (*release)(struct hte_chip *chip, struct hte_ts_desc *desc, + u32 xlated_id); + int (*enable)(struct hte_chip *chip, u32 xlated_id); + int (*disable)(struct hte_chip *chip, u32 xlated_id); + int (*get_clk_src_info)(struct hte_chip *chip, + struct hte_clk_info *ci); +}; + +/** + * struct hte_chip - Abstract HTE chip. + * + * @name: functional name of the HTE IP block. + * @dev: device providing the HTE. + * @ops: callbacks for this HTE. + * @nlines: number of lines/signals supported by this chip. + * @xlate_of: Callback which translates consumer supplied logical ids to + * physical ids, return 0 for the success and negative for the failures. + * It stores (between 0 to @nlines) in xlated_id parameter for the success. + * @xlate_plat: Same as above but for the consumers with no DT node. + * @match_from_linedata: Match HTE device using the line_data. + * @of_hte_n_cells: Number of cells used to form the HTE specifier. + * @gdev: HTE subsystem abstract device, internal to the HTE subsystem. + * @data: chip specific private data. + */ +struct hte_chip { + const char *name; + struct device *dev; + const struct hte_ops *ops; + u32 nlines; + int (*xlate_of)(struct hte_chip *gc, + const struct of_phandle_args *args, + struct hte_ts_desc *desc, u32 *xlated_id); + int (*xlate_plat)(struct hte_chip *gc, struct hte_ts_desc *desc, + u32 *xlated_id); + bool (*match_from_linedata)(const struct hte_chip *chip, + const struct hte_ts_desc *hdesc); + u8 of_hte_n_cells; + + struct hte_device *gdev; + void *data; +}; + +#if IS_ENABLED(CONFIG_HTE) +/* HTE APIs for the providers */ +int devm_hte_register_chip(struct hte_chip *chip); +int hte_push_ts_ns(const struct hte_chip *chip, u32 xlated_id, + struct hte_ts_data *data); + +/* HTE APIs for the consumers */ +int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id, + unsigned long edge_flags, const char *name, + void *data); +int hte_ts_get(struct device *dev, struct hte_ts_desc *desc, int index); +int hte_ts_put(struct hte_ts_desc *desc); +int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb, + hte_ts_sec_cb_t tcb, void *data); +int devm_hte_request_ts_ns(struct device *dev, struct hte_ts_desc *desc, + hte_ts_cb_t cb, hte_ts_sec_cb_t tcb, void *data); +int of_hte_req_count(struct device *dev); +int hte_enable_ts(struct hte_ts_desc *desc); +int hte_disable_ts(struct hte_ts_desc *desc); +int hte_get_clk_src_info(const struct hte_ts_desc *desc, + struct hte_clk_info *ci); + +#else /* !CONFIG_HTE */ +static inline int devm_hte_register_chip(struct hte_chip *chip) +{ + return -EOPNOTSUPP; +} + +static inline int hte_push_ts_ns(const struct hte_chip *chip, + u32 xlated_id, + const struct hte_ts_data *data) +{ + return -EOPNOTSUPP; +} + +static inline int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id, + unsigned long edge_flags, + const char *name, void *data) +{ + return -EOPNOTSUPP; +} + +static inline int hte_ts_get(struct device *dev, struct hte_ts_desc *desc, + int index) +{ + return -EOPNOTSUPP; +} + +static inline int hte_ts_put(struct hte_ts_desc *desc) +{ + return -EOPNOTSUPP; +} + +static inline int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb, + hte_ts_sec_cb_t tcb, void *data) +{ + return -EOPNOTSUPP; +} + +static inline int devm_hte_request_ts_ns(struct device *dev, + struct hte_ts_desc *desc, + hte_ts_cb_t cb, + hte_ts_sec_cb_t tcb, + void *data) +{ + return -EOPNOTSUPP; +} + +static inline int of_hte_req_count(struct device *dev) +{ + return -EOPNOTSUPP; +} + +static inline int hte_enable_ts(struct hte_ts_desc *desc) +{ + return -EOPNOTSUPP; +} + +static inline int hte_disable_ts(struct hte_ts_desc *desc) +{ + return -EOPNOTSUPP; +} + +static inline int hte_get_clk_src_info(const struct hte_ts_desc *desc, + struct hte_clk_info *ci) +{ + return -EOPNOTSUPP; +} +#endif /* !CONFIG_HTE */ + +#endif diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h new file mode 100644 index 0000000000..677a25d44d --- /dev/null +++ b/include/linux/io_uring_types.h @@ -0,0 +1,581 @@ +#ifndef IO_URING_TYPES_H +#define IO_URING_TYPES_H + +#include +#include +#include +#include +#include + +struct io_wq_work_node { + struct io_wq_work_node *next; +}; + +struct io_wq_work_list { + struct io_wq_work_node *first; + struct io_wq_work_node *last; +}; + +struct io_wq_work { + struct io_wq_work_node list; + unsigned flags; + /* place it here instead of io_kiocb as it fills padding and saves 4B */ + int cancel_seq; +}; + +struct io_fixed_file { + /* file * with additional FFS_* flags */ + unsigned long file_ptr; +}; + +struct io_file_table { + struct io_fixed_file *files; + unsigned long *bitmap; + unsigned int alloc_hint; +}; + +struct io_notif; +struct io_notif_slot; + +struct io_hash_bucket { + spinlock_t lock; + struct hlist_head list; +} ____cacheline_aligned_in_smp; + +struct io_hash_table { + struct io_hash_bucket *hbs; + unsigned hash_bits; +}; + +/* + * Arbitrary limit, can be raised if need be + */ +#define IO_RINGFD_REG_MAX 16 + +struct io_uring_task { + /* submission side */ + int cached_refs; + const struct io_ring_ctx *last; + struct io_wq *io_wq; + struct file *registered_rings[IO_RINGFD_REG_MAX]; + + struct xarray xa; + struct wait_queue_head wait; + atomic_t in_idle; + atomic_t inflight_tracked; + struct percpu_counter inflight; + + struct { /* task_work */ + struct llist_head task_list; + struct callback_head task_work; + } ____cacheline_aligned_in_smp; +}; + +struct io_uring { + u32 head ____cacheline_aligned_in_smp; + u32 tail ____cacheline_aligned_in_smp; +}; + +/* + * This data is shared with the application through the mmap at offsets + * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING. + * + * The offsets to the member fields are published through struct + * io_sqring_offsets when calling io_uring_setup. + */ +struct io_rings { + /* + * Head and tail offsets into the ring; the offsets need to be + * masked to get valid indices. + * + * The kernel controls head of the sq ring and the tail of the cq ring, + * and the application controls tail of the sq ring and the head of the + * cq ring. + */ + struct io_uring sq, cq; + /* + * Bitmasks to apply to head and tail offsets (constant, equals + * ring_entries - 1) + */ + u32 sq_ring_mask, cq_ring_mask; + /* Ring sizes (constant, power of 2) */ + u32 sq_ring_entries, cq_ring_entries; + /* + * Number of invalid entries dropped by the kernel due to + * invalid index stored in array + * + * Written by the kernel, shouldn't be modified by the + * application (i.e. get number of "new events" by comparing to + * cached value). + * + * After a new SQ head value was read by the application this + * counter includes all submissions that were dropped reaching + * the new SQ head (and possibly more). + */ + u32 sq_dropped; + /* + * Runtime SQ flags + * + * Written by the kernel, shouldn't be modified by the + * application. + * + * The application needs a full memory barrier before checking + * for IORING_SQ_NEED_WAKEUP after updating the sq tail. + */ + atomic_t sq_flags; + /* + * Runtime CQ flags + * + * Written by the application, shouldn't be modified by the + * kernel. + */ + u32 cq_flags; + /* + * Number of completion events lost because the queue was full; + * this should be avoided by the application by making sure + * there are not more requests pending than there is space in + * the completion queue. + * + * Written by the kernel, shouldn't be modified by the + * application (i.e. get number of "new events" by comparing to + * cached value). + * + * As completion events come in out of order this counter is not + * ordered with any other data. + */ + u32 cq_overflow; + /* + * Ring buffer of completion events. + * + * The kernel writes completion events fresh every time they are + * produced, so the application is allowed to modify pending + * entries. + */ + struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; +}; + +struct io_restriction { + DECLARE_BITMAP(register_op, IORING_REGISTER_LAST); + DECLARE_BITMAP(sqe_op, IORING_OP_LAST); + u8 sqe_flags_allowed; + u8 sqe_flags_required; + bool registered; +}; + +struct io_submit_link { + struct io_kiocb *head; + struct io_kiocb *last; +}; + +struct io_submit_state { + /* inline/task_work completion list, under ->uring_lock */ + struct io_wq_work_node free_list; + /* batch completion logic */ + struct io_wq_work_list compl_reqs; + struct io_submit_link link; + + bool plug_started; + bool need_plug; + unsigned short submit_nr; + struct blk_plug plug; +}; + +struct io_ev_fd { + struct eventfd_ctx *cq_ev_fd; + unsigned int eventfd_async: 1; + struct rcu_head rcu; +}; + +struct io_alloc_cache { + struct hlist_head list; + unsigned int nr_cached; +}; + +struct io_ring_ctx { + /* const or read-mostly hot data */ + struct { + struct percpu_ref refs; + + struct io_rings *rings; + unsigned int flags; + enum task_work_notify_mode notify_method; + unsigned int compat: 1; + unsigned int drain_next: 1; + unsigned int restricted: 1; + unsigned int off_timeout_used: 1; + unsigned int drain_active: 1; + unsigned int drain_disabled: 1; + unsigned int has_evfd: 1; + unsigned int syscall_iopoll: 1; + } ____cacheline_aligned_in_smp; + + /* submission data */ + struct { + struct mutex uring_lock; + + /* + * Ring buffer of indices into array of io_uring_sqe, which is + * mmapped by the application using the IORING_OFF_SQES offset. + * + * This indirection could e.g. be used to assign fixed + * io_uring_sqe entries to operations and only submit them to + * the queue when needed. + * + * The kernel modifies neither the indices array nor the entries + * array. + */ + u32 *sq_array; + struct io_uring_sqe *sq_sqes; + unsigned cached_sq_head; + unsigned sq_entries; + + /* + * Fixed resources fast path, should be accessed only under + * uring_lock, and updated through io_uring_register(2) + */ + struct io_rsrc_node *rsrc_node; + int rsrc_cached_refs; + atomic_t cancel_seq; + struct io_file_table file_table; + unsigned nr_user_files; + unsigned nr_user_bufs; + struct io_mapped_ubuf **user_bufs; + struct io_notif_slot *notif_slots; + unsigned nr_notif_slots; + + struct io_submit_state submit_state; + + struct io_buffer_list *io_bl; + struct xarray io_bl_xa; + struct list_head io_buffers_cache; + + struct io_hash_table cancel_table_locked; + struct list_head cq_overflow_list; + struct io_alloc_cache apoll_cache; + struct io_alloc_cache netmsg_cache; + } ____cacheline_aligned_in_smp; + + /* IRQ completion list, under ->completion_lock */ + struct io_wq_work_list locked_free_list; + unsigned int locked_free_nr; + + const struct cred *sq_creds; /* cred used for __io_sq_thread() */ + struct io_sq_data *sq_data; /* if using sq thread polling */ + + struct wait_queue_head sqo_sq_wait; + struct list_head sqd_list; + + unsigned long check_cq; + + unsigned int file_alloc_start; + unsigned int file_alloc_end; + + struct xarray personalities; + u32 pers_next; + + struct { + /* + * We cache a range of free CQEs we can use, once exhausted it + * should go through a slower range setup, see __io_get_cqe() + */ + struct io_uring_cqe *cqe_cached; + struct io_uring_cqe *cqe_sentinel; + + unsigned cached_cq_tail; + unsigned cq_entries; + struct io_ev_fd __rcu *io_ev_fd; + struct wait_queue_head cq_wait; + unsigned cq_extra; + } ____cacheline_aligned_in_smp; + + struct { + spinlock_t completion_lock; + + /* + * ->iopoll_list is protected by the ctx->uring_lock for + * io_uring instances that don't use IORING_SETUP_SQPOLL. + * For SQPOLL, only the single threaded io_sq_thread() will + * manipulate the list, hence no extra locking is needed there. + */ + struct io_wq_work_list iopoll_list; + struct io_hash_table cancel_table; + bool poll_multi_queue; + + struct list_head io_buffers_comp; + } ____cacheline_aligned_in_smp; + + /* timeouts */ + struct { + spinlock_t timeout_lock; + atomic_t cq_timeouts; + struct list_head timeout_list; + struct list_head ltimeout_list; + unsigned cq_last_tm_flush; + } ____cacheline_aligned_in_smp; + + /* Keep this last, we don't need it for the fast path */ + + struct io_restriction restrictions; + struct task_struct *submitter_task; + + /* slow path rsrc auxilary data, used by update/register */ + struct io_rsrc_node *rsrc_backup_node; + struct io_mapped_ubuf *dummy_ubuf; + struct io_rsrc_data *file_data; + struct io_rsrc_data *buf_data; + + struct delayed_work rsrc_put_work; + struct llist_head rsrc_put_llist; + struct list_head rsrc_ref_list; + spinlock_t rsrc_ref_lock; + + struct list_head io_buffers_pages; + + #if defined(CONFIG_UNIX) + struct socket *ring_sock; + #endif + /* hashed buffered write serialization */ + struct io_wq_hash *hash_map; + + /* Only used for accounting purposes */ + struct user_struct *user; + struct mm_struct *mm_account; + + /* ctx exit and cancelation */ + struct llist_head fallback_llist; + struct delayed_work fallback_work; + struct work_struct exit_work; + struct list_head tctx_list; + struct completion ref_comp; + + /* io-wq management, e.g. thread count */ + u32 iowq_limits[2]; + bool iowq_limits_set; + + struct list_head defer_list; + unsigned sq_thread_idle; + /* protected by ->completion_lock */ + unsigned evfd_last_cq_tail; +}; + +enum { + REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT, + REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT, + REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT, + REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT, + REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, + REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, + REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT, + + /* first byte is taken by user flags, shift it to not overlap */ + REQ_F_FAIL_BIT = 8, + REQ_F_INFLIGHT_BIT, + REQ_F_CUR_POS_BIT, + REQ_F_NOWAIT_BIT, + REQ_F_LINK_TIMEOUT_BIT, + REQ_F_NEED_CLEANUP_BIT, + REQ_F_POLLED_BIT, + REQ_F_BUFFER_SELECTED_BIT, + REQ_F_BUFFER_RING_BIT, + REQ_F_REISSUE_BIT, + REQ_F_CREDS_BIT, + REQ_F_REFCOUNT_BIT, + REQ_F_ARM_LTIMEOUT_BIT, + REQ_F_ASYNC_DATA_BIT, + REQ_F_SKIP_LINK_CQES_BIT, + REQ_F_SINGLE_POLL_BIT, + REQ_F_DOUBLE_POLL_BIT, + REQ_F_PARTIAL_IO_BIT, + REQ_F_CQE32_INIT_BIT, + REQ_F_APOLL_MULTISHOT_BIT, + REQ_F_CLEAR_POLLIN_BIT, + REQ_F_HASH_LOCKED_BIT, + /* keep async read/write and isreg together and in order */ + REQ_F_SUPPORT_NOWAIT_BIT, + REQ_F_ISREG_BIT, + + /* not a real bit, just to check we're not overflowing the space */ + __REQ_F_LAST_BIT, +}; + +enum { + /* ctx owns file */ + REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT), + /* drain existing IO first */ + REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT), + /* linked sqes */ + REQ_F_LINK = BIT(REQ_F_LINK_BIT), + /* doesn't sever on completion < 0 */ + REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT), + /* IOSQE_ASYNC */ + REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT), + /* IOSQE_BUFFER_SELECT */ + REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT), + /* IOSQE_CQE_SKIP_SUCCESS */ + REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT), + + /* fail rest of links */ + REQ_F_FAIL = BIT(REQ_F_FAIL_BIT), + /* on inflight list, should be cancelled and waited on exit reliably */ + REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT), + /* read/write uses file position */ + REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT), + /* must not punt to workers */ + REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT), + /* has or had linked timeout */ + REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT), + /* needs cleanup */ + REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT), + /* already went through poll handler */ + REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), + /* buffer already selected */ + REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), + /* buffer selected from ring, needs commit */ + REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT), + /* caller should reissue async */ + REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), + /* supports async reads/writes */ + REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT), + /* regular file */ + REQ_F_ISREG = BIT(REQ_F_ISREG_BIT), + /* has creds assigned */ + REQ_F_CREDS = BIT(REQ_F_CREDS_BIT), + /* skip refcounting if not set */ + REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT), + /* there is a linked timeout that has to be armed */ + REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT), + /* ->async_data allocated */ + REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT), + /* don't post CQEs while failing linked requests */ + REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT), + /* single poll may be active */ + REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT), + /* double poll may active */ + REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT), + /* request has already done partial IO */ + REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT), + /* fast poll multishot mode */ + REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT), + /* ->extra1 and ->extra2 are initialised */ + REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT), + /* recvmsg special flag, clear EPOLLIN */ + REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT), + /* hashed into ->cancel_hash_locked, protected by ->uring_lock */ + REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT), +}; + +typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked); + +struct io_task_work { + struct llist_node node; + io_req_tw_func_t func; +}; + +struct io_cqe { + __u64 user_data; + __s32 res; + /* fd initially, then cflags for completion */ + union { + __u32 flags; + int fd; + }; +}; + +/* + * Each request type overlays its private data structure on top of this one. + * They must not exceed this one in size. + */ +struct io_cmd_data { + struct file *file; + /* each command gets 56 bytes of data */ + __u8 data[56]; +}; + +static inline void io_kiocb_cmd_sz_check(size_t cmd_sz) +{ + BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data)); +} +#define io_kiocb_to_cmd(req, cmd_type) ( \ + io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \ + ((cmd_type *)&(req)->cmd) \ +) +#define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr) + +struct io_kiocb { + union { + /* + * NOTE! Each of the io_kiocb union members has the file pointer + * as the first entry in their struct definition. So you can + * access the file pointer through any of the sub-structs, + * or directly as just 'file' in this struct. + */ + struct file *file; + struct io_cmd_data cmd; + }; + + u8 opcode; + /* polled IO has completed */ + u8 iopoll_completed; + /* + * Can be either a fixed buffer index, or used with provided buffers. + * For the latter, before issue it points to the buffer group ID, + * and after selection it points to the buffer ID itself. + */ + u16 buf_index; + unsigned int flags; + + struct io_cqe cqe; + + struct io_ring_ctx *ctx; + struct task_struct *task; + + struct io_rsrc_node *rsrc_node; + + union { + /* store used ubuf, so we can prevent reloading */ + struct io_mapped_ubuf *imu; + + /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ + struct io_buffer *kbuf; + + /* + * stores buffer ID for ring provided buffers, valid IFF + * REQ_F_BUFFER_RING is set. + */ + struct io_buffer_list *buf_list; + }; + + union { + /* used by request caches, completion batching and iopoll */ + struct io_wq_work_node comp_list; + /* cache ->apoll->events */ + __poll_t apoll_events; + }; + atomic_t refs; + atomic_t poll_refs; + struct io_task_work io_task_work; + /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ + union { + struct hlist_node hash_node; + struct { + u64 extra1; + u64 extra2; + }; + }; + /* internal polling, see IORING_FEAT_FAST_POLL */ + struct async_poll *apoll; + /* opcode allocated if it needs to store data for async defer */ + void *async_data; + /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */ + struct io_kiocb *link; + /* custom credentials, valid IFF REQ_F_CREDS is set */ + const struct cred *creds; + struct io_wq_work work; +}; + +struct io_overflow_cqe { + struct list_head list; + struct io_uring_cqe cqe; +}; + +#endif diff --git a/include/linux/isa-dma.h b/include/linux/isa-dma.h new file mode 100644 index 0000000000..61504a8c1b --- /dev/null +++ b/include/linux/isa-dma.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_ISA_DMA_H +#define __LINUX_ISA_DMA_H + +#include + +#if defined(CONFIG_PCI) && defined(CONFIG_X86_32) +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + +#endif /* __LINUX_ISA_DMA_H */ diff --git a/include/linux/mei_aux.h b/include/linux/mei_aux.h new file mode 100644 index 0000000000..587f251288 --- /dev/null +++ b/include/linux/mei_aux.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Intel Corporation. All rights reserved. + */ +#ifndef _LINUX_MEI_AUX_H +#define _LINUX_MEI_AUX_H + +#include + +struct mei_aux_device { + struct auxiliary_device aux_dev; + int irq; + struct resource bar; +}; + +#define auxiliary_dev_to_mei_aux_dev(auxiliary_dev) \ + container_of(auxiliary_dev, struct mei_aux_device, aux_dev) + +#endif /* _LINUX_MEI_AUX_H */ diff --git a/include/linux/mfd/mt6331/core.h b/include/linux/mfd/mt6331/core.h new file mode 100644 index 0000000000..df8e6b1e4b --- /dev/null +++ b/include/linux/mfd/mt6331/core.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 AngeloGioacchino Del Regno + */ + +#ifndef __MFD_MT6331_CORE_H__ +#define __MFD_MT6331_CORE_H__ + +enum mt6331_irq_status_numbers { + MT6331_IRQ_STATUS_PWRKEY = 0, + MT6331_IRQ_STATUS_HOMEKEY, + MT6331_IRQ_STATUS_CHRDET, + MT6331_IRQ_STATUS_THR_H, + MT6331_IRQ_STATUS_THR_L, + MT6331_IRQ_STATUS_BAT_H, + MT6331_IRQ_STATUS_BAT_L, + MT6331_IRQ_STATUS_RTC, + MT6331_IRQ_STATUS_AUDIO, + MT6331_IRQ_STATUS_MAD, + MT6331_IRQ_STATUS_ACCDET, + MT6331_IRQ_STATUS_ACCDET_EINT, + MT6331_IRQ_STATUS_ACCDET_NEGV = 12, + MT6331_IRQ_STATUS_VDVFS11_OC = 16, + MT6331_IRQ_STATUS_VDVFS12_OC, + MT6331_IRQ_STATUS_VDVFS13_OC, + MT6331_IRQ_STATUS_VDVFS14_OC, + MT6331_IRQ_STATUS_GPU_OC, + MT6331_IRQ_STATUS_VCORE1_OC, + MT6331_IRQ_STATUS_VCORE2_OC, + MT6331_IRQ_STATUS_VIO18_OC, + MT6331_IRQ_STATUS_LDO_OC, + MT6331_IRQ_STATUS_NR, +}; + +#define MT6331_IRQ_CON0_BASE MT6331_IRQ_STATUS_PWRKEY +#define MT6331_IRQ_CON0_BITS (MT6331_IRQ_STATUS_ACCDET_NEGV + 1) +#define MT6331_IRQ_CON1_BASE MT6331_IRQ_STATUS_VDVFS11_OC +#define MT6331_IRQ_CON1_BITS (MT6331_IRQ_STATUS_LDO_OC - MT6331_IRQ_STATUS_VDFS11_OC + 1) + +#endif /* __MFD_MT6331_CORE_H__ */ diff --git a/include/linux/mfd/mt6331/registers.h b/include/linux/mfd/mt6331/registers.h new file mode 100644 index 0000000000..e2be6bccd1 --- /dev/null +++ b/include/linux/mfd/mt6331/registers.h @@ -0,0 +1,584 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 AngeloGioacchino Del Regno + */ + +#ifndef __MFD_MT6331_REGISTERS_H__ +#define __MFD_MT6331_REGISTERS_H__ + +/* PMIC Registers */ +#define MT6331_STRUP_CON0 0x0 +#define MT6331_STRUP_CON2 0x2 +#define MT6331_STRUP_CON3 0x4 +#define MT6331_STRUP_CON4 0x6 +#define MT6331_STRUP_CON5 0x8 +#define MT6331_STRUP_CON6 0xA +#define MT6331_STRUP_CON7 0xC +#define MT6331_STRUP_CON8 0xE +#define MT6331_STRUP_CON9 0x10 +#define MT6331_STRUP_CON10 0x12 +#define MT6331_STRUP_CON11 0x14 +#define MT6331_STRUP_CON12 0x16 +#define MT6331_STRUP_CON13 0x18 +#define MT6331_STRUP_CON14 0x1A +#define MT6331_STRUP_CON15 0x1C +#define MT6331_STRUP_CON16 0x1E +#define MT6331_STRUP_CON17 0x20 +#define MT6331_STRUP_CON18 0x22 +#define MT6331_HWCID 0x100 +#define MT6331_SWCID 0x102 +#define MT6331_EXT_PMIC_STATUS 0x104 +#define MT6331_TOP_CON 0x106 +#define MT6331_TEST_OUT 0x108 +#define MT6331_TEST_CON0 0x10A +#define MT6331_TEST_CON1 0x10C +#define MT6331_TESTMODE_SW 0x10E +#define MT6331_EN_STATUS0 0x110 +#define MT6331_EN_STATUS1 0x112 +#define MT6331_EN_STATUS2 0x114 +#define MT6331_OCSTATUS0 0x116 +#define MT6331_OCSTATUS1 0x118 +#define MT6331_OCSTATUS2 0x11A +#define MT6331_PGSTATUS 0x11C +#define MT6331_TOPSTATUS 0x11E +#define MT6331_TDSEL_CON 0x120 +#define MT6331_RDSEL_CON 0x122 +#define MT6331_SMT_CON0 0x124 +#define MT6331_SMT_CON1 0x126 +#define MT6331_SMT_CON2 0x128 +#define MT6331_DRV_CON0 0x12A +#define MT6331_DRV_CON1 0x12C +#define MT6331_DRV_CON2 0x12E +#define MT6331_DRV_CON3 0x130 +#define MT6331_TOP_STATUS 0x132 +#define MT6331_TOP_STATUS_SET 0x134 +#define MT6331_TOP_STATUS_CLR 0x136 +#define MT6331_TOP_CKPDN_CON0 0x138 +#define MT6331_TOP_CKPDN_CON0_SET 0x13A +#define MT6331_TOP_CKPDN_CON0_CLR 0x13C +#define MT6331_TOP_CKPDN_CON1 0x13E +#define MT6331_TOP_CKPDN_CON1_SET 0x140 +#define MT6331_TOP_CKPDN_CON1_CLR 0x142 +#define MT6331_TOP_CKPDN_CON2 0x144 +#define MT6331_TOP_CKPDN_CON2_SET 0x146 +#define MT6331_TOP_CKPDN_CON2_CLR 0x148 +#define MT6331_TOP_CKSEL_CON 0x14A +#define MT6331_TOP_CKSEL_CON_SET 0x14C +#define MT6331_TOP_CKSEL_CON_CLR 0x14E +#define MT6331_TOP_CKHWEN_CON 0x150 +#define MT6331_TOP_CKHWEN_CON_SET 0x152 +#define MT6331_TOP_CKHWEN_CON_CLR 0x154 +#define MT6331_TOP_CKTST_CON0 0x156 +#define MT6331_TOP_CKTST_CON1 0x158 +#define MT6331_TOP_CLKSQ 0x15A +#define MT6331_TOP_CLKSQ_SET 0x15C +#define MT6331_TOP_CLKSQ_CLR 0x15E +#define MT6331_TOP_RST_CON 0x160 +#define MT6331_TOP_RST_CON_SET 0x162 +#define MT6331_TOP_RST_CON_CLR 0x164 +#define MT6331_TOP_RST_MISC 0x166 +#define MT6331_TOP_RST_MISC_SET 0x168 +#define MT6331_TOP_RST_MISC_CLR 0x16A +#define MT6331_INT_CON0 0x16C +#define MT6331_INT_CON0_SET 0x16E +#define MT6331_INT_CON0_CLR 0x170 +#define MT6331_INT_CON1 0x172 +#define MT6331_INT_CON1_SET 0x174 +#define MT6331_INT_CON1_CLR 0x176 +#define MT6331_INT_MISC_CON 0x178 +#define MT6331_INT_MISC_CON_SET 0x17A +#define MT6331_INT_MISC_CON_CLR 0x17C +#define MT6331_INT_STATUS_CON0 0x17E +#define MT6331_INT_STATUS_CON1 0x180 +#define MT6331_OC_GEAR_0 0x182 +#define MT6331_FQMTR_CON0 0x184 +#define MT6331_FQMTR_CON1 0x186 +#define MT6331_FQMTR_CON2 0x188 +#define MT6331_RG_SPI_CON 0x18A +#define MT6331_DEW_DIO_EN 0x18C +#define MT6331_DEW_READ_TEST 0x18E +#define MT6331_DEW_WRITE_TEST 0x190 +#define MT6331_DEW_CRC_SWRST 0x192 +#define MT6331_DEW_CRC_EN 0x194 +#define MT6331_DEW_CRC_VAL 0x196 +#define MT6331_DEW_DBG_MON_SEL 0x198 +#define MT6331_DEW_CIPHER_KEY_SEL 0x19A +#define MT6331_DEW_CIPHER_IV_SEL 0x19C +#define MT6331_DEW_CIPHER_EN 0x19E +#define MT6331_DEW_CIPHER_RDY 0x1A0 +#define MT6331_DEW_CIPHER_MODE 0x1A2 +#define MT6331_DEW_CIPHER_SWRST 0x1A4 +#define MT6331_DEW_RDDMY_NO 0x1A6 +#define MT6331_INT_TYPE_CON0 0x1A8 +#define MT6331_INT_TYPE_CON0_SET 0x1AA +#define MT6331_INT_TYPE_CON0_CLR 0x1AC +#define MT6331_INT_TYPE_CON1 0x1AE +#define MT6331_INT_TYPE_CON1_SET 0x1B0 +#define MT6331_INT_TYPE_CON1_CLR 0x1B2 +#define MT6331_INT_STA 0x1B4 +#define MT6331_BUCK_ALL_CON0 0x200 +#define MT6331_BUCK_ALL_CON1 0x202 +#define MT6331_BUCK_ALL_CON2 0x204 +#define MT6331_BUCK_ALL_CON3 0x206 +#define MT6331_BUCK_ALL_CON4 0x208 +#define MT6331_BUCK_ALL_CON5 0x20A +#define MT6331_BUCK_ALL_CON6 0x20C +#define MT6331_BUCK_ALL_CON7 0x20E +#define MT6331_BUCK_ALL_CON8 0x210 +#define MT6331_BUCK_ALL_CON9 0x212 +#define MT6331_BUCK_ALL_CON10 0x214 +#define MT6331_BUCK_ALL_CON11 0x216 +#define MT6331_BUCK_ALL_CON12 0x218 +#define MT6331_BUCK_ALL_CON13 0x21A +#define MT6331_BUCK_ALL_CON14 0x21C +#define MT6331_BUCK_ALL_CON15 0x21E +#define MT6331_BUCK_ALL_CON16 0x220 +#define MT6331_BUCK_ALL_CON17 0x222 +#define MT6331_BUCK_ALL_CON18 0x224 +#define MT6331_BUCK_ALL_CON19 0x226 +#define MT6331_BUCK_ALL_CON20 0x228 +#define MT6331_BUCK_ALL_CON21 0x22A +#define MT6331_BUCK_ALL_CON22 0x22C +#define MT6331_BUCK_ALL_CON23 0x22E +#define MT6331_BUCK_ALL_CON24 0x230 +#define MT6331_BUCK_ALL_CON25 0x232 +#define MT6331_BUCK_ALL_CON26 0x234 +#define MT6331_VDVFS11_CON0 0x236 +#define MT6331_VDVFS11_CON1 0x238 +#define MT6331_VDVFS11_CON2 0x23A +#define MT6331_VDVFS11_CON3 0x23C +#define MT6331_VDVFS11_CON4 0x23E +#define MT6331_VDVFS11_CON5 0x240 +#define MT6331_VDVFS11_CON6 0x242 +#define MT6331_VDVFS11_CON7 0x244 +#define MT6331_VDVFS11_CON8 0x246 +#define MT6331_VDVFS11_CON9 0x248 +#define MT6331_VDVFS11_CON10 0x24A +#define MT6331_VDVFS11_CON11 0x24C +#define MT6331_VDVFS11_CON12 0x24E +#define MT6331_VDVFS11_CON13 0x250 +#define MT6331_VDVFS11_CON14 0x252 +#define MT6331_VDVFS11_CON18 0x25A +#define MT6331_VDVFS11_CON19 0x25C +#define MT6331_VDVFS11_CON20 0x25E +#define MT6331_VDVFS11_CON21 0x260 +#define MT6331_VDVFS11_CON22 0x262 +#define MT6331_VDVFS11_CON23 0x264 +#define MT6331_VDVFS11_CON24 0x266 +#define MT6331_VDVFS11_CON25 0x268 +#define MT6331_VDVFS11_CON26 0x26A +#define MT6331_VDVFS11_CON27 0x26C +#define MT6331_VDVFS12_CON0 0x26E +#define MT6331_VDVFS12_CON1 0x270 +#define MT6331_VDVFS12_CON2 0x272 +#define MT6331_VDVFS12_CON3 0x274 +#define MT6331_VDVFS12_CON4 0x276 +#define MT6331_VDVFS12_CON5 0x278 +#define MT6331_VDVFS12_CON6 0x27A +#define MT6331_VDVFS12_CON7 0x27C +#define MT6331_VDVFS12_CON8 0x27E +#define MT6331_VDVFS12_CON9 0x280 +#define MT6331_VDVFS12_CON10 0x282 +#define MT6331_VDVFS12_CON11 0x284 +#define MT6331_VDVFS12_CON12 0x286 +#define MT6331_VDVFS12_CON13 0x288 +#define MT6331_VDVFS12_CON14 0x28A +#define MT6331_VDVFS12_CON18 0x292 +#define MT6331_VDVFS12_CON19 0x294 +#define MT6331_VDVFS12_CON20 0x296 +#define MT6331_VDVFS13_CON0 0x298 +#define MT6331_VDVFS13_CON1 0x29A +#define MT6331_VDVFS13_CON2 0x29C +#define MT6331_VDVFS13_CON3 0x29E +#define MT6331_VDVFS13_CON4 0x2A0 +#define MT6331_VDVFS13_CON5 0x2A2 +#define MT6331_VDVFS13_CON6 0x2A4 +#define MT6331_VDVFS13_CON7 0x2A6 +#define MT6331_VDVFS13_CON8 0x2A8 +#define MT6331_VDVFS13_CON9 0x2AA +#define MT6331_VDVFS13_CON10 0x2AC +#define MT6331_VDVFS13_CON11 0x2AE +#define MT6331_VDVFS13_CON12 0x2B0 +#define MT6331_VDVFS13_CON13 0x2B2 +#define MT6331_VDVFS13_CON14 0x2B4 +#define MT6331_VDVFS13_CON18 0x2BC +#define MT6331_VDVFS13_CON19 0x2BE +#define MT6331_VDVFS13_CON20 0x2C0 +#define MT6331_VDVFS14_CON0 0x2C2 +#define MT6331_VDVFS14_CON1 0x2C4 +#define MT6331_VDVFS14_CON2 0x2C6 +#define MT6331_VDVFS14_CON3 0x2C8 +#define MT6331_VDVFS14_CON4 0x2CA +#define MT6331_VDVFS14_CON5 0x2CC +#define MT6331_VDVFS14_CON6 0x2CE +#define MT6331_VDVFS14_CON7 0x2D0 +#define MT6331_VDVFS14_CON8 0x2D2 +#define MT6331_VDVFS14_CON9 0x2D4 +#define MT6331_VDVFS14_CON10 0x2D6 +#define MT6331_VDVFS14_CON11 0x2D8 +#define MT6331_VDVFS14_CON12 0x2DA +#define MT6331_VDVFS14_CON13 0x2DC +#define MT6331_VDVFS14_CON14 0x2DE +#define MT6331_VDVFS14_CON18 0x2E6 +#define MT6331_VDVFS14_CON19 0x2E8 +#define MT6331_VDVFS14_CON20 0x2EA +#define MT6331_VGPU_CON0 0x300 +#define MT6331_VGPU_CON1 0x302 +#define MT6331_VGPU_CON2 0x304 +#define MT6331_VGPU_CON3 0x306 +#define MT6331_VGPU_CON4 0x308 +#define MT6331_VGPU_CON5 0x30A +#define MT6331_VGPU_CON6 0x30C +#define MT6331_VGPU_CON7 0x30E +#define MT6331_VGPU_CON8 0x310 +#define MT6331_VGPU_CON9 0x312 +#define MT6331_VGPU_CON10 0x314 +#define MT6331_VGPU_CON11 0x316 +#define MT6331_VGPU_CON12 0x318 +#define MT6331_VGPU_CON13 0x31A +#define MT6331_VGPU_CON14 0x31C +#define MT6331_VGPU_CON15 0x31E +#define MT6331_VGPU_CON16 0x320 +#define MT6331_VGPU_CON17 0x322 +#define MT6331_VGPU_CON18 0x324 +#define MT6331_VGPU_CON19 0x326 +#define MT6331_VGPU_CON20 0x328 +#define MT6331_VCORE1_CON0 0x32A +#define MT6331_VCORE1_CON1 0x32C +#define MT6331_VCORE1_CON2 0x32E +#define MT6331_VCORE1_CON3 0x330 +#define MT6331_VCORE1_CON4 0x332 +#define MT6331_VCORE1_CON5 0x334 +#define MT6331_VCORE1_CON6 0x336 +#define MT6331_VCORE1_CON7 0x338 +#define MT6331_VCORE1_CON8 0x33A +#define MT6331_VCORE1_CON9 0x33C +#define MT6331_VCORE1_CON10 0x33E +#define MT6331_VCORE1_CON11 0x340 +#define MT6331_VCORE1_CON12 0x342 +#define MT6331_VCORE1_CON13 0x344 +#define MT6331_VCORE1_CON14 0x346 +#define MT6331_VCORE1_CON15 0x348 +#define MT6331_VCORE1_CON16 0x34A +#define MT6331_VCORE1_CON17 0x34C +#define MT6331_VCORE1_CON18 0x34E +#define MT6331_VCORE1_CON19 0x350 +#define MT6331_VCORE1_CON20 0x352 +#define MT6331_VCORE2_CON0 0x354 +#define MT6331_VCORE2_CON1 0x356 +#define MT6331_VCORE2_CON2 0x358 +#define MT6331_VCORE2_CON3 0x35A +#define MT6331_VCORE2_CON4 0x35C +#define MT6331_VCORE2_CON5 0x35E +#define MT6331_VCORE2_CON6 0x360 +#define MT6331_VCORE2_CON7 0x362 +#define MT6331_VCORE2_CON8 0x364 +#define MT6331_VCORE2_CON9 0x366 +#define MT6331_VCORE2_CON10 0x368 +#define MT6331_VCORE2_CON11 0x36A +#define MT6331_VCORE2_CON12 0x36C +#define MT6331_VCORE2_CON13 0x36E +#define MT6331_VCORE2_CON14 0x370 +#define MT6331_VCORE2_CON15 0x372 +#define MT6331_VCORE2_CON16 0x374 +#define MT6331_VCORE2_CON17 0x376 +#define MT6331_VCORE2_CON18 0x378 +#define MT6331_VCORE2_CON19 0x37A +#define MT6331_VCORE2_CON20 0x37C +#define MT6331_VCORE2_CON21 0x37E +#define MT6331_VIO18_CON0 0x380 +#define MT6331_VIO18_CON1 0x382 +#define MT6331_VIO18_CON2 0x384 +#define MT6331_VIO18_CON3 0x386 +#define MT6331_VIO18_CON4 0x388 +#define MT6331_VIO18_CON5 0x38A +#define MT6331_VIO18_CON6 0x38C +#define MT6331_VIO18_CON7 0x38E +#define MT6331_VIO18_CON8 0x390 +#define MT6331_VIO18_CON9 0x392 +#define MT6331_VIO18_CON10 0x394 +#define MT6331_VIO18_CON11 0x396 +#define MT6331_VIO18_CON12 0x398 +#define MT6331_VIO18_CON13 0x39A +#define MT6331_VIO18_CON14 0x39C +#define MT6331_VIO18_CON15 0x39E +#define MT6331_VIO18_CON16 0x3A0 +#define MT6331_VIO18_CON17 0x3A2 +#define MT6331_VIO18_CON18 0x3A4 +#define MT6331_VIO18_CON19 0x3A6 +#define MT6331_VIO18_CON20 0x3A8 +#define MT6331_BUCK_K_CON0 0x3AA +#define MT6331_BUCK_K_CON1 0x3AC +#define MT6331_BUCK_K_CON2 0x3AE +#define MT6331_BUCK_K_CON3 0x3B0 +#define MT6331_ZCD_CON0 0x400 +#define MT6331_ZCD_CON1 0x402 +#define MT6331_ZCD_CON2 0x404 +#define MT6331_ZCD_CON3 0x406 +#define MT6331_ZCD_CON4 0x408 +#define MT6331_ZCD_CON5 0x40A +#define MT6331_ISINK0_CON0 0x40C +#define MT6331_ISINK0_CON1 0x40E +#define MT6331_ISINK0_CON2 0x410 +#define MT6331_ISINK0_CON3 0x412 +#define MT6331_ISINK0_CON4 0x414 +#define MT6331_ISINK1_CON0 0x416 +#define MT6331_ISINK1_CON1 0x418 +#define MT6331_ISINK1_CON2 0x41A +#define MT6331_ISINK1_CON3 0x41C +#define MT6331_ISINK1_CON4 0x41E +#define MT6331_ISINK2_CON0 0x420 +#define MT6331_ISINK2_CON1 0x422 +#define MT6331_ISINK2_CON2 0x424 +#define MT6331_ISINK2_CON3 0x426 +#define MT6331_ISINK2_CON4 0x428 +#define MT6331_ISINK3_CON0 0x42A +#define MT6331_ISINK3_CON1 0x42C +#define MT6331_ISINK3_CON2 0x42E +#define MT6331_ISINK3_CON3 0x430 +#define MT6331_ISINK3_CON4 0x432 +#define MT6331_ISINK_ANA0 0x434 +#define MT6331_ISINK_ANA1 0x436 +#define MT6331_ISINK_PHASE_DLY 0x438 +#define MT6331_ISINK_EN_CTRL 0x43A +#define MT6331_ANALDO_CON0 0x500 +#define MT6331_ANALDO_CON1 0x502 +#define MT6331_ANALDO_CON2 0x504 +#define MT6331_ANALDO_CON3 0x506 +#define MT6331_ANALDO_CON4 0x508 +#define MT6331_ANALDO_CON5 0x50A +#define MT6331_ANALDO_CON6 0x50C +#define MT6331_ANALDO_CON7 0x50E +#define MT6331_ANALDO_CON8 0x510 +#define MT6331_ANALDO_CON9 0x512 +#define MT6331_ANALDO_CON10 0x514 +#define MT6331_ANALDO_CON11 0x516 +#define MT6331_ANALDO_CON12 0x518 +#define MT6331_ANALDO_CON13 0x51A +#define MT6331_SYSLDO_CON0 0x51C +#define MT6331_SYSLDO_CON1 0x51E +#define MT6331_SYSLDO_CON2 0x520 +#define MT6331_SYSLDO_CON3 0x522 +#define MT6331_SYSLDO_CON4 0x524 +#define MT6331_SYSLDO_CON5 0x526 +#define MT6331_SYSLDO_CON6 0x528 +#define MT6331_SYSLDO_CON7 0x52A +#define MT6331_SYSLDO_CON8 0x52C +#define MT6331_SYSLDO_CON9 0x52E +#define MT6331_SYSLDO_CON10 0x530 +#define MT6331_SYSLDO_CON11 0x532 +#define MT6331_SYSLDO_CON12 0x534 +#define MT6331_SYSLDO_CON13 0x536 +#define MT6331_SYSLDO_CON14 0x538 +#define MT6331_SYSLDO_CON15 0x53A +#define MT6331_SYSLDO_CON16 0x53C +#define MT6331_SYSLDO_CON17 0x53E +#define MT6331_SYSLDO_CON18 0x540 +#define MT6331_SYSLDO_CON19 0x542 +#define MT6331_SYSLDO_CON20 0x544 +#define MT6331_SYSLDO_CON21 0x546 +#define MT6331_DIGLDO_CON0 0x548 +#define MT6331_DIGLDO_CON1 0x54A +#define MT6331_DIGLDO_CON2 0x54C +#define MT6331_DIGLDO_CON3 0x54E +#define MT6331_DIGLDO_CON4 0x550 +#define MT6331_DIGLDO_CON5 0x552 +#define MT6331_DIGLDO_CON6 0x554 +#define MT6331_DIGLDO_CON7 0x556 +#define MT6331_DIGLDO_CON8 0x558 +#define MT6331_DIGLDO_CON9 0x55A +#define MT6331_DIGLDO_CON10 0x55C +#define MT6331_DIGLDO_CON11 0x55E +#define MT6331_DIGLDO_CON12 0x560 +#define MT6331_DIGLDO_CON13 0x562 +#define MT6331_DIGLDO_CON14 0x564 +#define MT6331_DIGLDO_CON15 0x566 +#define MT6331_DIGLDO_CON16 0x568 +#define MT6331_DIGLDO_CON17 0x56A +#define MT6331_DIGLDO_CON18 0x56C +#define MT6331_DIGLDO_CON19 0x56E +#define MT6331_DIGLDO_CON20 0x570 +#define MT6331_DIGLDO_CON21 0x572 +#define MT6331_DIGLDO_CON22 0x574 +#define MT6331_DIGLDO_CON23 0x576 +#define MT6331_DIGLDO_CON24 0x578 +#define MT6331_DIGLDO_CON25 0x57A +#define MT6331_DIGLDO_CON26 0x57C +#define MT6331_DIGLDO_CON27 0x57E +#define MT6331_DIGLDO_CON28 0x580 +#define MT6331_OTP_CON0 0x600 +#define MT6331_OTP_CON1 0x602 +#define MT6331_OTP_CON2 0x604 +#define MT6331_OTP_CON3 0x606 +#define MT6331_OTP_CON4 0x608 +#define MT6331_OTP_CON5 0x60A +#define MT6331_OTP_CON6 0x60C +#define MT6331_OTP_CON7 0x60E +#define MT6331_OTP_CON8 0x610 +#define MT6331_OTP_CON9 0x612 +#define MT6331_OTP_CON10 0x614 +#define MT6331_OTP_CON11 0x616 +#define MT6331_OTP_CON12 0x618 +#define MT6331_OTP_CON13 0x61A +#define MT6331_OTP_CON14 0x61C +#define MT6331_OTP_DOUT_0_15 0x61E +#define MT6331_OTP_DOUT_16_31 0x620 +#define MT6331_OTP_DOUT_32_47 0x622 +#define MT6331_OTP_DOUT_48_63 0x624 +#define MT6331_OTP_DOUT_64_79 0x626 +#define MT6331_OTP_DOUT_80_95 0x628 +#define MT6331_OTP_DOUT_96_111 0x62A +#define MT6331_OTP_DOUT_112_127 0x62C +#define MT6331_OTP_DOUT_128_143 0x62E +#define MT6331_OTP_DOUT_144_159 0x630 +#define MT6331_OTP_DOUT_160_175 0x632 +#define MT6331_OTP_DOUT_176_191 0x634 +#define MT6331_OTP_DOUT_192_207 0x636 +#define MT6331_OTP_DOUT_208_223 0x638 +#define MT6331_OTP_DOUT_224_239 0x63A +#define MT6331_OTP_DOUT_240_255 0x63C +#define MT6331_OTP_VAL_0_15 0x63E +#define MT6331_OTP_VAL_16_31 0x640 +#define MT6331_OTP_VAL_32_47 0x642 +#define MT6331_OTP_VAL_48_63 0x644 +#define MT6331_OTP_VAL_64_79 0x646 +#define MT6331_OTP_VAL_80_95 0x648 +#define MT6331_OTP_VAL_96_111 0x64A +#define MT6331_OTP_VAL_112_127 0x64C +#define MT6331_OTP_VAL_128_143 0x64E +#define MT6331_OTP_VAL_144_159 0x650 +#define MT6331_OTP_VAL_160_175 0x652 +#define MT6331_OTP_VAL_176_191 0x654 +#define MT6331_OTP_VAL_192_207 0x656 +#define MT6331_OTP_VAL_208_223 0x658 +#define MT6331_OTP_VAL_224_239 0x65A +#define MT6331_OTP_VAL_240_255 0x65C +#define MT6331_RTC_MIX_CON0 0x65E +#define MT6331_RTC_MIX_CON1 0x660 +#define MT6331_AUDDAC_CFG0 0x662 +#define MT6331_AUDBUF_CFG0 0x664 +#define MT6331_AUDBUF_CFG1 0x666 +#define MT6331_AUDBUF_CFG2 0x668 +#define MT6331_AUDBUF_CFG3 0x66A +#define MT6331_AUDBUF_CFG4 0x66C +#define MT6331_AUDBUF_CFG5 0x66E +#define MT6331_AUDBUF_CFG6 0x670 +#define MT6331_AUDBUF_CFG7 0x672 +#define MT6331_AUDBUF_CFG8 0x674 +#define MT6331_IBIASDIST_CFG0 0x676 +#define MT6331_AUDCLKGEN_CFG0 0x678 +#define MT6331_AUDLDO_CFG0 0x67A +#define MT6331_AUDDCDC_CFG0 0x67C +#define MT6331_AUDDCDC_CFG1 0x67E +#define MT6331_AUDNVREGGLB_CFG0 0x680 +#define MT6331_AUD_NCP0 0x682 +#define MT6331_AUD_ZCD_CFG0 0x684 +#define MT6331_AUDPREAMP_CFG0 0x686 +#define MT6331_AUDPREAMP_CFG1 0x688 +#define MT6331_AUDPREAMP_CFG2 0x68A +#define MT6331_AUDADC_CFG0 0x68C +#define MT6331_AUDADC_CFG1 0x68E +#define MT6331_AUDADC_CFG2 0x690 +#define MT6331_AUDADC_CFG3 0x692 +#define MT6331_AUDADC_CFG4 0x694 +#define MT6331_AUDADC_CFG5 0x696 +#define MT6331_AUDDIGMI_CFG0 0x698 +#define MT6331_AUDDIGMI_CFG1 0x69A +#define MT6331_AUDMICBIAS_CFG0 0x69C +#define MT6331_AUDMICBIAS_CFG1 0x69E +#define MT6331_AUDENCSPARE_CFG0 0x6A0 +#define MT6331_AUDPREAMPGAIN_CFG0 0x6A2 +#define MT6331_AUDMADPLL_CFG0 0x6A4 +#define MT6331_AUDMADPLL_CFG1 0x6A6 +#define MT6331_AUDMADPLL_CFG2 0x6A8 +#define MT6331_AUDLDO_NVREG_CFG0 0x6AA +#define MT6331_AUDLDO_NVREG_CFG1 0x6AC +#define MT6331_AUDLDO_NVREG_CFG2 0x6AE +#define MT6331_AUXADC_ADC0 0x700 +#define MT6331_AUXADC_ADC1 0x702 +#define MT6331_AUXADC_ADC2 0x704 +#define MT6331_AUXADC_ADC3 0x706 +#define MT6331_AUXADC_ADC4 0x708 +#define MT6331_AUXADC_ADC5 0x70A +#define MT6331_AUXADC_ADC6 0x70C +#define MT6331_AUXADC_ADC7 0x70E +#define MT6331_AUXADC_ADC8 0x710 +#define MT6331_AUXADC_ADC9 0x712 +#define MT6331_AUXADC_ADC10 0x714 +#define MT6331_AUXADC_ADC11 0x716 +#define MT6331_AUXADC_ADC12 0x718 +#define MT6331_AUXADC_ADC13 0x71A +#define MT6331_AUXADC_ADC14 0x71C +#define MT6331_AUXADC_ADC15 0x71E +#define MT6331_AUXADC_ADC16 0x720 +#define MT6331_AUXADC_ADC17 0x722 +#define MT6331_AUXADC_ADC18 0x724 +#define MT6331_AUXADC_ADC19 0x726 +#define MT6331_AUXADC_STA0 0x728 +#define MT6331_AUXADC_STA1 0x72A +#define MT6331_AUXADC_RQST0 0x72C +#define MT6331_AUXADC_RQST0_SET 0x72E +#define MT6331_AUXADC_RQST0_CLR 0x730 +#define MT6331_AUXADC_RQST1 0x732 +#define MT6331_AUXADC_RQST1_SET 0x734 +#define MT6331_AUXADC_RQST1_CLR 0x736 +#define MT6331_AUXADC_CON0 0x738 +#define MT6331_AUXADC_CON1 0x73A +#define MT6331_AUXADC_CON2 0x73C +#define MT6331_AUXADC_CON3 0x73E +#define MT6331_AUXADC_CON4 0x740 +#define MT6331_AUXADC_CON5 0x742 +#define MT6331_AUXADC_CON6 0x744 +#define MT6331_AUXADC_CON7 0x746 +#define MT6331_AUXADC_CON8 0x748 +#define MT6331_AUXADC_CON9 0x74A +#define MT6331_AUXADC_CON10 0x74C +#define MT6331_AUXADC_CON11 0x74E +#define MT6331_AUXADC_CON12 0x750 +#define MT6331_AUXADC_CON13 0x752 +#define MT6331_AUXADC_CON14 0x754 +#define MT6331_AUXADC_CON15 0x756 +#define MT6331_AUXADC_CON16 0x758 +#define MT6331_AUXADC_CON17 0x75A +#define MT6331_AUXADC_CON18 0x75C +#define MT6331_AUXADC_CON19 0x75E +#define MT6331_AUXADC_CON20 0x760 +#define MT6331_AUXADC_CON21 0x762 +#define MT6331_AUXADC_CON22 0x764 +#define MT6331_AUXADC_CON23 0x766 +#define MT6331_AUXADC_CON24 0x768 +#define MT6331_AUXADC_CON25 0x76A +#define MT6331_AUXADC_CON26 0x76C +#define MT6331_AUXADC_CON27 0x76E +#define MT6331_AUXADC_CON28 0x770 +#define MT6331_AUXADC_CON29 0x772 +#define MT6331_AUXADC_CON30 0x774 +#define MT6331_AUXADC_CON31 0x776 +#define MT6331_AUXADC_CON32 0x778 +#define MT6331_ACCDET_CON0 0x77A +#define MT6331_ACCDET_CON1 0x77C +#define MT6331_ACCDET_CON2 0x77E +#define MT6331_ACCDET_CON3 0x780 +#define MT6331_ACCDET_CON4 0x782 +#define MT6331_ACCDET_CON5 0x784 +#define MT6331_ACCDET_CON6 0x786 +#define MT6331_ACCDET_CON7 0x788 +#define MT6331_ACCDET_CON8 0x78A +#define MT6331_ACCDET_CON9 0x78C +#define MT6331_ACCDET_CON10 0x78E +#define MT6331_ACCDET_CON11 0x790 +#define MT6331_ACCDET_CON12 0x792 +#define MT6331_ACCDET_CON13 0x794 +#define MT6331_ACCDET_CON14 0x796 +#define MT6331_ACCDET_CON15 0x798 +#define MT6331_ACCDET_CON16 0x79A +#define MT6331_ACCDET_CON17 0x79C +#define MT6331_ACCDET_CON18 0x79E +#define MT6331_ACCDET_CON19 0x7A0 +#define MT6331_ACCDET_CON20 0x7A2 +#define MT6331_ACCDET_CON21 0x7A4 +#define MT6331_ACCDET_CON22 0x7A6 +#define MT6331_ACCDET_CON23 0x7A8 +#define MT6331_ACCDET_CON24 0x7AA + +#endif /* __MFD_MT6331_REGISTERS_H__ */ diff --git a/include/linux/mfd/mt6332/core.h b/include/linux/mfd/mt6332/core.h new file mode 100644 index 0000000000..cd6013eb82 --- /dev/null +++ b/include/linux/mfd/mt6332/core.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 AngeloGioacchino Del Regno + */ + +#ifndef __MFD_MT6332_CORE_H__ +#define __MFD_MT6332_CORE_H__ + +enum mt6332_irq_status_numbers { + MT6332_IRQ_STATUS_CHR_COMPLETE = 0, + MT6332_IRQ_STATUS_THERMAL_SD, + MT6332_IRQ_STATUS_THERMAL_REG_IN, + MT6332_IRQ_STATUS_THERMAL_REG_OUT, + MT6332_IRQ_STATUS_OTG_OC, + MT6332_IRQ_STATUS_CHR_OC, + MT6332_IRQ_STATUS_OTG_THERMAL, + MT6332_IRQ_STATUS_CHRIN_SHORT, + MT6332_IRQ_STATUS_DRVCDT_SHORT, + MT6332_IRQ_STATUS_PLUG_IN_FLASH, + MT6332_IRQ_STATUS_CHRWDT_FLAG, + MT6332_IRQ_STATUS_FLASH_EN_TIMEOUT, + MT6332_IRQ_STATUS_FLASH_VLED1_SHORT, + MT6332_IRQ_STATUS_FLASH_VLED1_OPEN = 13, + MT6332_IRQ_STATUS_OV = 16, + MT6332_IRQ_STATUS_BVALID_DET, + MT6332_IRQ_STATUS_VBATON_UNDET, + MT6332_IRQ_STATUS_CHR_PLUG_IN, + MT6332_IRQ_STATUS_CHR_PLUG_OUT, + MT6332_IRQ_STATUS_BC11_TIMEOUT, + MT6332_IRQ_STATUS_FLASH_VLED2_SHORT, + MT6332_IRQ_STATUS_FLASH_VLED2_OPEN = 23, + MT6332_IRQ_STATUS_THR_H = 32, + MT6332_IRQ_STATUS_THR_L, + MT6332_IRQ_STATUS_BAT_H, + MT6332_IRQ_STATUS_BAT_L, + MT6332_IRQ_STATUS_M3_H, + MT6332_IRQ_STATUS_M3_L, + MT6332_IRQ_STATUS_FG_BAT_H, + MT6332_IRQ_STATUS_FG_BAT_L, + MT6332_IRQ_STATUS_FG_CUR_H, + MT6332_IRQ_STATUS_FG_CUR_L, + MT6332_IRQ_STATUS_SPKL_D, + MT6332_IRQ_STATUS_SPKL_AB, + MT6332_IRQ_STATUS_BIF, + MT6332_IRQ_STATUS_VWLED_OC = 45, + MT6332_IRQ_STATUS_VDRAM_OC = 48, + MT6332_IRQ_STATUS_VDVFS2_OC, + MT6332_IRQ_STATUS_VRF1_OC, + MT6332_IRQ_STATUS_VRF2_OC, + MT6332_IRQ_STATUS_VPA_OC, + MT6332_IRQ_STATUS_VSBST_OC, + MT6332_IRQ_STATUS_LDO_OC, + MT6332_IRQ_STATUS_NR, +}; + +#define MT6332_IRQ_CON0_BASE MT6332_IRQ_STATUS_CHR_COMPLETE +#define MT6332_IRQ_CON0_BITS (MT6332_IRQ_STATUS_FLASH_VLED1_OPEN + 1) +#define MT6332_IRQ_CON1_BASE MT6332_IRQ_STATUS_OV +#define MT6332_IRQ_CON1_BITS (MT6332_IRQ_STATUS_FLASH_VLED2_OPEN - MT6332_IRQ_STATUS_OV + 1) +#define MT6332_IRQ_CON2_BASE MT6332_IRQ_STATUS_THR_H +#define MT6332_IRQ_CON2_BITS (MT6332_IRQ_STATUS_VWLED_OC - MT6332_IRQ_STATUS_THR_H + 1) +#define MT6332_IRQ_CON3_BASE MT6332_IRQ_STATUS_VDRAM_OC +#define MT6332_IRQ_CON3_BITS (MT6332_IRQ_STATUS_LDO_OC - MT6332_IRQ_STATUS_VDRAM_OC + 1) + +#endif /* __MFD_MT6332_CORE_H__ */ diff --git a/include/linux/mfd/mt6332/registers.h b/include/linux/mfd/mt6332/registers.h new file mode 100644 index 0000000000..65e0b86fce --- /dev/null +++ b/include/linux/mfd/mt6332/registers.h @@ -0,0 +1,642 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 AngeloGioacchino Del Regno + */ + +#ifndef __MFD_MT6332_REGISTERS_H__ +#define __MFD_MT6332_REGISTERS_H__ + +/* PMIC Registers */ +#define MT6332_HWCID 0x8000 +#define MT6332_SWCID 0x8002 +#define MT6332_TOP_CON 0x8004 +#define MT6332_DDR_VREF_AP_CON 0x8006 +#define MT6332_DDR_VREF_DQ_CON 0x8008 +#define MT6332_DDR_VREF_CA_CON 0x800A +#define MT6332_TEST_OUT 0x800C +#define MT6332_TEST_CON0 0x800E +#define MT6332_TEST_CON1 0x8010 +#define MT6332_TESTMODE_SW 0x8012 +#define MT6332_TESTMODE_ANA 0x8014 +#define MT6332_TDSEL_CON 0x8016 +#define MT6332_RDSEL_CON 0x8018 +#define MT6332_SMT_CON0 0x801A +#define MT6332_SMT_CON1 0x801C +#define MT6332_DRV_CON0 0x801E +#define MT6332_DRV_CON1 0x8020 +#define MT6332_DRV_CON2 0x8022 +#define MT6332_EN_STATUS0 0x8024 +#define MT6332_OCSTATUS0 0x8026 +#define MT6332_TOP_STATUS 0x8028 +#define MT6332_TOP_STATUS_SET 0x802A +#define MT6332_TOP_STATUS_CLR 0x802C +#define MT6332_FLASH_CON0 0x802E +#define MT6332_FLASH_CON1 0x8030 +#define MT6332_FLASH_CON2 0x8032 +#define MT6332_CORE_CON0 0x8034 +#define MT6332_CORE_CON1 0x8036 +#define MT6332_CORE_CON2 0x8038 +#define MT6332_CORE_CON3 0x803A +#define MT6332_CORE_CON4 0x803C +#define MT6332_CORE_CON5 0x803E +#define MT6332_CORE_CON6 0x8040 +#define MT6332_CORE_CON7 0x8042 +#define MT6332_CORE_CON8 0x8044 +#define MT6332_CORE_CON9 0x8046 +#define MT6332_CORE_CON10 0x8048 +#define MT6332_CORE_CON11 0x804A +#define MT6332_CORE_CON12 0x804C +#define MT6332_CORE_CON13 0x804E +#define MT6332_CORE_CON14 0x8050 +#define MT6332_CORE_CON15 0x8052 +#define MT6332_STA_CON0 0x8054 +#define MT6332_STA_CON1 0x8056 +#define MT6332_STA_CON2 0x8058 +#define MT6332_STA_CON3 0x805A +#define MT6332_STA_CON4 0x805C +#define MT6332_STA_CON5 0x805E +#define MT6332_STA_CON6 0x8060 +#define MT6332_STA_CON7 0x8062 +#define MT6332_CHR_CON0 0x8064 +#define MT6332_CHR_CON1 0x8066 +#define MT6332_CHR_CON2 0x8068 +#define MT6332_CHR_CON3 0x806A +#define MT6332_CHR_CON4 0x806C +#define MT6332_CHR_CON5 0x806E +#define MT6332_CHR_CON6 0x8070 +#define MT6332_CHR_CON7 0x8072 +#define MT6332_CHR_CON8 0x8074 +#define MT6332_CHR_CON9 0x8076 +#define MT6332_CHR_CON10 0x8078 +#define MT6332_CHR_CON11 0x807A +#define MT6332_CHR_CON12 0x807C +#define MT6332_CHR_CON13 0x807E +#define MT6332_CHR_CON14 0x8080 +#define MT6332_CHR_CON15 0x8082 +#define MT6332_BOOST_CON0 0x8084 +#define MT6332_BOOST_CON1 0x8086 +#define MT6332_BOOST_CON2 0x8088 +#define MT6332_BOOST_CON3 0x808A +#define MT6332_BOOST_CON4 0x808C +#define MT6332_BOOST_CON5 0x808E +#define MT6332_BOOST_CON6 0x8090 +#define MT6332_BOOST_CON7 0x8092 +#define MT6332_TOP_CKPDN_CON0 0x8094 +#define MT6332_TOP_CKPDN_CON0_SET 0x8096 +#define MT6332_TOP_CKPDN_CON0_CLR 0x8098 +#define MT6332_TOP_CKPDN_CON1 0x809A +#define MT6332_TOP_CKPDN_CON1_SET 0x809C +#define MT6332_TOP_CKPDN_CON1_CLR 0x809E +#define MT6332_TOP_CKPDN_CON2 0x80A0 +#define MT6332_TOP_CKPDN_CON2_SET 0x80A2 +#define MT6332_TOP_CKPDN_CON2_CLR 0x80A4 +#define MT6332_TOP_CKSEL_CON0 0x80A6 +#define MT6332_TOP_CKSEL_CON0_SET 0x80A8 +#define MT6332_TOP_CKSEL_CON0_CLR 0x80AA +#define MT6332_TOP_CKSEL_CON1 0x80AC +#define MT6332_TOP_CKSEL_CON1_SET 0x80AE +#define MT6332_TOP_CKSEL_CON1_CLR 0x80B0 +#define MT6332_TOP_CKHWEN_CON 0x80B2 +#define MT6332_TOP_CKHWEN_CON_SET 0x80B4 +#define MT6332_TOP_CKHWEN_CON_CLR 0x80B6 +#define MT6332_TOP_CKTST_CON0 0x80B8 +#define MT6332_TOP_CKTST_CON1 0x80BA +#define MT6332_TOP_RST_CON 0x80BC +#define MT6332_TOP_RST_CON_SET 0x80BE +#define MT6332_TOP_RST_CON_CLR 0x80C0 +#define MT6332_TOP_RST_MISC 0x80C2 +#define MT6332_TOP_RST_MISC_SET 0x80C4 +#define MT6332_TOP_RST_MISC_CLR 0x80C6 +#define MT6332_INT_CON0 0x80C8 +#define MT6332_INT_CON0_SET 0x80CA +#define MT6332_INT_CON0_CLR 0x80CC +#define MT6332_INT_CON1 0x80CE +#define MT6332_INT_CON1_SET 0x80D0 +#define MT6332_INT_CON1_CLR 0x80D2 +#define MT6332_INT_CON2 0x80D4 +#define MT6332_INT_CON2_SET 0x80D6 +#define MT6332_INT_CON2_CLR 0x80D8 +#define MT6332_INT_CON3 0x80DA +#define MT6332_INT_CON3_SET 0x80DC +#define MT6332_INT_CON3_CLR 0x80DE +#define MT6332_CHRWDT_CON0 0x80E0 +#define MT6332_CHRWDT_STATUS0 0x80E2 +#define MT6332_INT_STATUS0 0x80E4 +#define MT6332_INT_STATUS1 0x80E6 +#define MT6332_INT_STATUS2 0x80E8 +#define MT6332_INT_STATUS3 0x80EA +#define MT6332_OC_GEAR_0 0x80EC +#define MT6332_OC_GEAR_1 0x80EE +#define MT6332_OC_GEAR_2 0x80F0 +#define MT6332_INT_MISC_CON 0x80F2 +#define MT6332_RG_SPI_CON 0x80F4 +#define MT6332_DEW_DIO_EN 0x80F6 +#define MT6332_DEW_READ_TEST 0x80F8 +#define MT6332_DEW_WRITE_TEST 0x80FA +#define MT6332_DEW_CRC_SWRST 0x80FC +#define MT6332_DEW_CRC_EN 0x80FE +#define MT6332_DEW_CRC_VAL 0x8100 +#define MT6332_DEW_DBG_MON_SEL 0x8102 +#define MT6332_DEW_CIPHER_KEY_SEL 0x8104 +#define MT6332_DEW_CIPHER_IV_SEL 0x8106 +#define MT6332_DEW_CIPHER_EN 0x8108 +#define MT6332_DEW_CIPHER_RDY 0x810A +#define MT6332_DEW_CIPHER_MODE 0x810C +#define MT6332_DEW_CIPHER_SWRST 0x810E +#define MT6332_DEW_RDDMY_NO 0x8110 +#define MT6332_INT_STA 0x8112 +#define MT6332_BIF_CON0 0x8114 +#define MT6332_BIF_CON1 0x8116 +#define MT6332_BIF_CON2 0x8118 +#define MT6332_BIF_CON3 0x811A +#define MT6332_BIF_CON4 0x811C +#define MT6332_BIF_CON5 0x811E +#define MT6332_BIF_CON6 0x8120 +#define MT6332_BIF_CON7 0x8122 +#define MT6332_BIF_CON8 0x8124 +#define MT6332_BIF_CON9 0x8126 +#define MT6332_BIF_CON10 0x8128 +#define MT6332_BIF_CON11 0x812A +#define MT6332_BIF_CON12 0x812C +#define MT6332_BIF_CON13 0x812E +#define MT6332_BIF_CON14 0x8130 +#define MT6332_BIF_CON15 0x8132 +#define MT6332_BIF_CON16 0x8134 +#define MT6332_BIF_CON17 0x8136 +#define MT6332_BIF_CON18 0x8138 +#define MT6332_BIF_CON19 0x813A +#define MT6332_BIF_CON20 0x813C +#define MT6332_BIF_CON21 0x813E +#define MT6332_BIF_CON22 0x8140 +#define MT6332_BIF_CON23 0x8142 +#define MT6332_BIF_CON24 0x8144 +#define MT6332_BIF_CON25 0x8146 +#define MT6332_BIF_CON26 0x8148 +#define MT6332_BIF_CON27 0x814A +#define MT6332_BIF_CON28 0x814C +#define MT6332_BIF_CON29 0x814E +#define MT6332_BIF_CON30 0x8150 +#define MT6332_BIF_CON31 0x8152 +#define MT6332_BIF_CON32 0x8154 +#define MT6332_BIF_CON33 0x8156 +#define MT6332_BIF_CON34 0x8158 +#define MT6332_BIF_CON35 0x815A +#define MT6332_BIF_CON36 0x815C +#define MT6332_BATON_CON0 0x815E +#define MT6332_BIF_CON37 0x8160 +#define MT6332_BIF_CON38 0x8162 +#define MT6332_CHR_CON16 0x8164 +#define MT6332_CHR_CON17 0x8166 +#define MT6332_CHR_CON18 0x8168 +#define MT6332_CHR_CON19 0x816A +#define MT6332_CHR_CON20 0x816C +#define MT6332_CHR_CON21 0x816E +#define MT6332_CHR_CON22 0x8170 +#define MT6332_CHR_CON23 0x8172 +#define MT6332_CHR_CON24 0x8174 +#define MT6332_CHR_CON25 0x8176 +#define MT6332_STA_CON8 0x8178 +#define MT6332_BUCK_ALL_CON0 0x8400 +#define MT6332_BUCK_ALL_CON1 0x8402 +#define MT6332_BUCK_ALL_CON2 0x8404 +#define MT6332_BUCK_ALL_CON3 0x8406 +#define MT6332_BUCK_ALL_CON4 0x8408 +#define MT6332_BUCK_ALL_CON5 0x840A +#define MT6332_BUCK_ALL_CON6 0x840C +#define MT6332_BUCK_ALL_CON7 0x840E +#define MT6332_BUCK_ALL_CON8 0x8410 +#define MT6332_BUCK_ALL_CON9 0x8412 +#define MT6332_BUCK_ALL_CON10 0x8414 +#define MT6332_BUCK_ALL_CON11 0x8416 +#define MT6332_BUCK_ALL_CON12 0x8418 +#define MT6332_BUCK_ALL_CON13 0x841A +#define MT6332_BUCK_ALL_CON14 0x841C +#define MT6332_BUCK_ALL_CON15 0x841E +#define MT6332_BUCK_ALL_CON16 0x8420 +#define MT6332_BUCK_ALL_CON17 0x8422 +#define MT6332_BUCK_ALL_CON18 0x8424 +#define MT6332_BUCK_ALL_CON19 0x8426 +#define MT6332_BUCK_ALL_CON20 0x8428 +#define MT6332_BUCK_ALL_CON21 0x842A +#define MT6332_BUCK_ALL_CON22 0x842C +#define MT6332_BUCK_ALL_CON23 0x842E +#define MT6332_BUCK_ALL_CON24 0x8430 +#define MT6332_BUCK_ALL_CON25 0x8432 +#define MT6332_BUCK_ALL_CON26 0x8434 +#define MT6332_BUCK_ALL_CON27 0x8436 +#define MT6332_VDRAM_CON0 0x8438 +#define MT6332_VDRAM_CON1 0x843A +#define MT6332_VDRAM_CON2 0x843C +#define MT6332_VDRAM_CON3 0x843E +#define MT6332_VDRAM_CON4 0x8440 +#define MT6332_VDRAM_CON5 0x8442 +#define MT6332_VDRAM_CON6 0x8444 +#define MT6332_VDRAM_CON7 0x8446 +#define MT6332_VDRAM_CON8 0x8448 +#define MT6332_VDRAM_CON9 0x844A +#define MT6332_VDRAM_CON10 0x844C +#define MT6332_VDRAM_CON11 0x844E +#define MT6332_VDRAM_CON12 0x8450 +#define MT6332_VDRAM_CON13 0x8452 +#define MT6332_VDRAM_CON14 0x8454 +#define MT6332_VDRAM_CON15 0x8456 +#define MT6332_VDRAM_CON16 0x8458 +#define MT6332_VDRAM_CON17 0x845A +#define MT6332_VDRAM_CON18 0x845C +#define MT6332_VDRAM_CON19 0x845E +#define MT6332_VDRAM_CON20 0x8460 +#define MT6332_VDRAM_CON21 0x8462 +#define MT6332_VDVFS2_CON0 0x8464 +#define MT6332_VDVFS2_CON1 0x8466 +#define MT6332_VDVFS2_CON2 0x8468 +#define MT6332_VDVFS2_CON3 0x846A +#define MT6332_VDVFS2_CON4 0x846C +#define MT6332_VDVFS2_CON5 0x846E +#define MT6332_VDVFS2_CON6 0x8470 +#define MT6332_VDVFS2_CON7 0x8472 +#define MT6332_VDVFS2_CON8 0x8474 +#define MT6332_VDVFS2_CON9 0x8476 +#define MT6332_VDVFS2_CON10 0x8478 +#define MT6332_VDVFS2_CON11 0x847A +#define MT6332_VDVFS2_CON12 0x847C +#define MT6332_VDVFS2_CON13 0x847E +#define MT6332_VDVFS2_CON14 0x8480 +#define MT6332_VDVFS2_CON15 0x8482 +#define MT6332_VDVFS2_CON16 0x8484 +#define MT6332_VDVFS2_CON17 0x8486 +#define MT6332_VDVFS2_CON18 0x8488 +#define MT6332_VDVFS2_CON19 0x848A +#define MT6332_VDVFS2_CON20 0x848C +#define MT6332_VDVFS2_CON21 0x848E +#define MT6332_VDVFS2_CON22 0x8490 +#define MT6332_VDVFS2_CON23 0x8492 +#define MT6332_VDVFS2_CON24 0x8494 +#define MT6332_VDVFS2_CON25 0x8496 +#define MT6332_VDVFS2_CON26 0x8498 +#define MT6332_VDVFS2_CON27 0x849A +#define MT6332_VRF1_CON0 0x849C +#define MT6332_VRF1_CON1 0x849E +#define MT6332_VRF1_CON2 0x84A0 +#define MT6332_VRF1_CON3 0x84A2 +#define MT6332_VRF1_CON4 0x84A4 +#define MT6332_VRF1_CON5 0x84A6 +#define MT6332_VRF1_CON6 0x84A8 +#define MT6332_VRF1_CON7 0x84AA +#define MT6332_VRF1_CON8 0x84AC +#define MT6332_VRF1_CON9 0x84AE +#define MT6332_VRF1_CON10 0x84B0 +#define MT6332_VRF1_CON11 0x84B2 +#define MT6332_VRF1_CON12 0x84B4 +#define MT6332_VRF1_CON13 0x84B6 +#define MT6332_VRF1_CON14 0x84B8 +#define MT6332_VRF1_CON15 0x84BA +#define MT6332_VRF1_CON16 0x84BC +#define MT6332_VRF1_CON17 0x84BE +#define MT6332_VRF1_CON18 0x84C0 +#define MT6332_VRF1_CON19 0x84C2 +#define MT6332_VRF1_CON20 0x84C4 +#define MT6332_VRF1_CON21 0x84C6 +#define MT6332_VRF2_CON0 0x84C8 +#define MT6332_VRF2_CON1 0x84CA +#define MT6332_VRF2_CON2 0x84CC +#define MT6332_VRF2_CON3 0x84CE +#define MT6332_VRF2_CON4 0x84D0 +#define MT6332_VRF2_CON5 0x84D2 +#define MT6332_VRF2_CON6 0x84D4 +#define MT6332_VRF2_CON7 0x84D6 +#define MT6332_VRF2_CON8 0x84D8 +#define MT6332_VRF2_CON9 0x84DA +#define MT6332_VRF2_CON10 0x84DC +#define MT6332_VRF2_CON11 0x84DE +#define MT6332_VRF2_CON12 0x84E0 +#define MT6332_VRF2_CON13 0x84E2 +#define MT6332_VRF2_CON14 0x84E4 +#define MT6332_VRF2_CON15 0x84E6 +#define MT6332_VRF2_CON16 0x84E8 +#define MT6332_VRF2_CON17 0x84EA +#define MT6332_VRF2_CON18 0x84EC +#define MT6332_VRF2_CON19 0x84EE +#define MT6332_VRF2_CON20 0x84F0 +#define MT6332_VRF2_CON21 0x84F2 +#define MT6332_VPA_CON0 0x84F4 +#define MT6332_VPA_CON1 0x84F6 +#define MT6332_VPA_CON2 0x84F8 +#define MT6332_VPA_CON3 0x84FC +#define MT6332_VPA_CON4 0x84FE +#define MT6332_VPA_CON5 0x8500 +#define MT6332_VPA_CON6 0x8502 +#define MT6332_VPA_CON7 0x8504 +#define MT6332_VPA_CON8 0x8506 +#define MT6332_VPA_CON9 0x8508 +#define MT6332_VPA_CON10 0x850A +#define MT6332_VPA_CON11 0x850C +#define MT6332_VPA_CON12 0x850E +#define MT6332_VPA_CON13 0x8510 +#define MT6332_VPA_CON14 0x8512 +#define MT6332_VPA_CON15 0x8514 +#define MT6332_VPA_CON16 0x8516 +#define MT6332_VPA_CON17 0x8518 +#define MT6332_VPA_CON18 0x851A +#define MT6332_VPA_CON19 0x851C +#define MT6332_VPA_CON20 0x851E +#define MT6332_VPA_CON21 0x8520 +#define MT6332_VPA_CON22 0x8522 +#define MT6332_VPA_CON23 0x8524 +#define MT6332_VPA_CON24 0x8526 +#define MT6332_VPA_CON25 0x8528 +#define MT6332_VSBST_CON0 0x852A +#define MT6332_VSBST_CON1 0x852C +#define MT6332_VSBST_CON2 0x852E +#define MT6332_VSBST_CON3 0x8530 +#define MT6332_VSBST_CON4 0x8532 +#define MT6332_VSBST_CON5 0x8534 +#define MT6332_VSBST_CON6 0x8536 +#define MT6332_VSBST_CON7 0x8538 +#define MT6332_VSBST_CON8 0x853A +#define MT6332_VSBST_CON9 0x853C +#define MT6332_VSBST_CON10 0x853E +#define MT6332_VSBST_CON11 0x8540 +#define MT6332_VSBST_CON12 0x8542 +#define MT6332_VSBST_CON13 0x8544 +#define MT6332_VSBST_CON14 0x8546 +#define MT6332_VSBST_CON15 0x8548 +#define MT6332_VSBST_CON16 0x854A +#define MT6332_VSBST_CON17 0x854C +#define MT6332_VSBST_CON18 0x854E +#define MT6332_VSBST_CON19 0x8550 +#define MT6332_VSBST_CON20 0x8552 +#define MT6332_VSBST_CON21 0x8554 +#define MT6332_BUCK_K_CON0 0x8556 +#define MT6332_BUCK_K_CON1 0x8558 +#define MT6332_BUCK_K_CON2 0x855A +#define MT6332_BUCK_K_CON3 0x855C +#define MT6332_BUCK_K_CON4 0x855E +#define MT6332_BUCK_K_CON5 0x8560 +#define MT6332_AUXADC_ADC0 0x8800 +#define MT6332_AUXADC_ADC1 0x8802 +#define MT6332_AUXADC_ADC2 0x8804 +#define MT6332_AUXADC_ADC3 0x8806 +#define MT6332_AUXADC_ADC4 0x8808 +#define MT6332_AUXADC_ADC5 0x880A +#define MT6332_AUXADC_ADC6 0x880C +#define MT6332_AUXADC_ADC7 0x880E +#define MT6332_AUXADC_ADC8 0x8810 +#define MT6332_AUXADC_ADC9 0x8812 +#define MT6332_AUXADC_ADC10 0x8814 +#define MT6332_AUXADC_ADC11 0x8816 +#define MT6332_AUXADC_ADC12 0x8818 +#define MT6332_AUXADC_ADC13 0x881A +#define MT6332_AUXADC_ADC14 0x881C +#define MT6332_AUXADC_ADC15 0x881E +#define MT6332_AUXADC_ADC16 0x8820 +#define MT6332_AUXADC_ADC17 0x8822 +#define MT6332_AUXADC_ADC18 0x8824 +#define MT6332_AUXADC_ADC19 0x8826 +#define MT6332_AUXADC_ADC20 0x8828 +#define MT6332_AUXADC_ADC21 0x882A +#define MT6332_AUXADC_ADC22 0x882C +#define MT6332_AUXADC_ADC23 0x882E +#define MT6332_AUXADC_ADC24 0x8830 +#define MT6332_AUXADC_ADC25 0x8832 +#define MT6332_AUXADC_ADC26 0x8834 +#define MT6332_AUXADC_ADC27 0x8836 +#define MT6332_AUXADC_ADC28 0x8838 +#define MT6332_AUXADC_ADC29 0x883A +#define MT6332_AUXADC_ADC30 0x883C +#define MT6332_AUXADC_ADC31 0x883E +#define MT6332_AUXADC_ADC32 0x8840 +#define MT6332_AUXADC_ADC33 0x8842 +#define MT6332_AUXADC_ADC34 0x8844 +#define MT6332_AUXADC_ADC35 0x8846 +#define MT6332_AUXADC_ADC36 0x8848 +#define MT6332_AUXADC_ADC37 0x884A +#define MT6332_AUXADC_ADC38 0x884C +#define MT6332_AUXADC_ADC39 0x884E +#define MT6332_AUXADC_ADC40 0x8850 +#define MT6332_AUXADC_ADC41 0x8852 +#define MT6332_AUXADC_ADC42 0x8854 +#define MT6332_AUXADC_ADC43 0x8856 +#define MT6332_AUXADC_STA0 0x8858 +#define MT6332_AUXADC_STA1 0x885A +#define MT6332_AUXADC_RQST0 0x885C +#define MT6332_AUXADC_RQST0_SET 0x885E +#define MT6332_AUXADC_RQST0_CLR 0x8860 +#define MT6332_AUXADC_RQST1 0x8862 +#define MT6332_AUXADC_RQST1_SET 0x8864 +#define MT6332_AUXADC_RQST1_CLR 0x8866 +#define MT6332_AUXADC_CON0 0x8868 +#define MT6332_AUXADC_CON1 0x886A +#define MT6332_AUXADC_CON2 0x886C +#define MT6332_AUXADC_CON3 0x886E +#define MT6332_AUXADC_CON4 0x8870 +#define MT6332_AUXADC_CON5 0x8872 +#define MT6332_AUXADC_CON6 0x8874 +#define MT6332_AUXADC_CON7 0x8876 +#define MT6332_AUXADC_CON8 0x8878 +#define MT6332_AUXADC_CON9 0x887A +#define MT6332_AUXADC_CON10 0x887C +#define MT6332_AUXADC_CON11 0x887E +#define MT6332_AUXADC_CON12 0x8880 +#define MT6332_AUXADC_CON13 0x8882 +#define MT6332_AUXADC_CON14 0x8884 +#define MT6332_AUXADC_CON15 0x8886 +#define MT6332_AUXADC_CON16 0x8888 +#define MT6332_AUXADC_CON17 0x888A +#define MT6332_AUXADC_CON18 0x888C +#define MT6332_AUXADC_CON19 0x888E +#define MT6332_AUXADC_CON20 0x8890 +#define MT6332_AUXADC_CON21 0x8892 +#define MT6332_AUXADC_CON22 0x8894 +#define MT6332_AUXADC_CON23 0x8896 +#define MT6332_AUXADC_CON24 0x8898 +#define MT6332_AUXADC_CON25 0x889A +#define MT6332_AUXADC_CON26 0x889C +#define MT6332_AUXADC_CON27 0x889E +#define MT6332_AUXADC_CON28 0x88A0 +#define MT6332_AUXADC_CON29 0x88A2 +#define MT6332_AUXADC_CON30 0x88A4 +#define MT6332_AUXADC_CON31 0x88A6 +#define MT6332_AUXADC_CON32 0x88A8 +#define MT6332_AUXADC_CON33 0x88AA +#define MT6332_AUXADC_CON34 0x88AC +#define MT6332_AUXADC_CON35 0x88AE +#define MT6332_AUXADC_CON36 0x88B0 +#define MT6332_AUXADC_CON37 0x88B2 +#define MT6332_AUXADC_CON38 0x88B4 +#define MT6332_AUXADC_CON39 0x88B6 +#define MT6332_AUXADC_CON40 0x88B8 +#define MT6332_AUXADC_CON41 0x88BA +#define MT6332_AUXADC_CON42 0x88BC +#define MT6332_AUXADC_CON43 0x88BE +#define MT6332_AUXADC_CON44 0x88C0 +#define MT6332_AUXADC_CON45 0x88C2 +#define MT6332_AUXADC_CON46 0x88C4 +#define MT6332_AUXADC_CON47 0x88C6 +#define MT6332_STRUP_CONA0 0x8C00 +#define MT6332_STRUP_CONA1 0x8C02 +#define MT6332_STRUP_CONA2 0x8C04 +#define MT6332_STRUP_CON0 0x8C06 +#define MT6332_STRUP_CON2 0x8C08 +#define MT6332_STRUP_CON3 0x8C0A +#define MT6332_STRUP_CON4 0x8C0C +#define MT6332_STRUP_CON5 0x8C0E +#define MT6332_STRUP_CON6 0x8C10 +#define MT6332_STRUP_CON7 0x8C12 +#define MT6332_STRUP_CON8 0x8C14 +#define MT6332_STRUP_CON9 0x8C16 +#define MT6332_STRUP_CON10 0x8C18 +#define MT6332_STRUP_CON11 0x8C1A +#define MT6332_STRUP_CON12 0x8C1C +#define MT6332_STRUP_CON13 0x8C1E +#define MT6332_STRUP_CON14 0x8C20 +#define MT6332_STRUP_CON15 0x8C22 +#define MT6332_STRUP_CON16 0x8C24 +#define MT6332_STRUP_CON17 0x8C26 +#define MT6332_FGADC_CON0 0x8C28 +#define MT6332_FGADC_CON1 0x8C2A +#define MT6332_FGADC_CON2 0x8C2C +#define MT6332_FGADC_CON3 0x8C2E +#define MT6332_FGADC_CON4 0x8C30 +#define MT6332_FGADC_CON5 0x8C32 +#define MT6332_FGADC_CON6 0x8C34 +#define MT6332_FGADC_CON7 0x8C36 +#define MT6332_FGADC_CON8 0x8C38 +#define MT6332_FGADC_CON9 0x8C3A +#define MT6332_FGADC_CON10 0x8C3C +#define MT6332_FGADC_CON11 0x8C3E +#define MT6332_FGADC_CON12 0x8C40 +#define MT6332_FGADC_CON13 0x8C42 +#define MT6332_FGADC_CON14 0x8C44 +#define MT6332_FGADC_CON15 0x8C46 +#define MT6332_FGADC_CON16 0x8C48 +#define MT6332_FGADC_CON17 0x8C4A +#define MT6332_FGADC_CON18 0x8C4C +#define MT6332_FGADC_CON19 0x8C4E +#define MT6332_FGADC_CON20 0x8C50 +#define MT6332_FGADC_CON21 0x8C52 +#define MT6332_FGADC_CON22 0x8C54 +#define MT6332_OTP_CON0 0x8C56 +#define MT6332_OTP_CON1 0x8C58 +#define MT6332_OTP_CON2 0x8C5A +#define MT6332_OTP_CON3 0x8C5C +#define MT6332_OTP_CON4 0x8C5E +#define MT6332_OTP_CON5 0x8C60 +#define MT6332_OTP_CON6 0x8C62 +#define MT6332_OTP_CON7 0x8C64 +#define MT6332_OTP_CON8 0x8C66 +#define MT6332_OTP_CON9 0x8C68 +#define MT6332_OTP_CON10 0x8C6A +#define MT6332_OTP_CON11 0x8C6C +#define MT6332_OTP_CON12 0x8C6E +#define MT6332_OTP_CON13 0x8C70 +#define MT6332_OTP_CON14 0x8C72 +#define MT6332_OTP_DOUT_0_15 0x8C74 +#define MT6332_OTP_DOUT_16_31 0x8C76 +#define MT6332_OTP_DOUT_32_47 0x8C78 +#define MT6332_OTP_DOUT_48_63 0x8C7A +#define MT6332_OTP_DOUT_64_79 0x8C7C +#define MT6332_OTP_DOUT_80_95 0x8C7E +#define MT6332_OTP_DOUT_96_111 0x8C80 +#define MT6332_OTP_DOUT_112_127 0x8C82 +#define MT6332_OTP_DOUT_128_143 0x8C84 +#define MT6332_OTP_DOUT_144_159 0x8C86 +#define MT6332_OTP_DOUT_160_175 0x8C88 +#define MT6332_OTP_DOUT_176_191 0x8C8A +#define MT6332_OTP_DOUT_192_207 0x8C8C +#define MT6332_OTP_DOUT_208_223 0x8C8E +#define MT6332_OTP_DOUT_224_239 0x8C90 +#define MT6332_OTP_DOUT_240_255 0x8C92 +#define MT6332_OTP_VAL_0_15 0x8C94 +#define MT6332_OTP_VAL_16_31 0x8C96 +#define MT6332_OTP_VAL_32_47 0x8C98 +#define MT6332_OTP_VAL_48_63 0x8C9A +#define MT6332_OTP_VAL_64_79 0x8C9C +#define MT6332_OTP_VAL_80_95 0x8C9E +#define MT6332_OTP_VAL_96_111 0x8CA0 +#define MT6332_OTP_VAL_112_127 0x8CA2 +#define MT6332_OTP_VAL_128_143 0x8CA4 +#define MT6332_OTP_VAL_144_159 0x8CA6 +#define MT6332_OTP_VAL_160_175 0x8CA8 +#define MT6332_OTP_VAL_176_191 0x8CAA +#define MT6332_OTP_VAL_192_207 0x8CAC +#define MT6332_OTP_VAL_208_223 0x8CAE +#define MT6332_OTP_VAL_224_239 0x8CB0 +#define MT6332_OTP_VAL_240_255 0x8CB2 +#define MT6332_LDO_CON0 0x8CB4 +#define MT6332_LDO_CON1 0x8CB6 +#define MT6332_LDO_CON2 0x8CB8 +#define MT6332_LDO_CON3 0x8CBA +#define MT6332_LDO_CON5 0x8CBC +#define MT6332_LDO_CON6 0x8CBE +#define MT6332_LDO_CON7 0x8CC0 +#define MT6332_LDO_CON8 0x8CC2 +#define MT6332_LDO_CON9 0x8CC4 +#define MT6332_LDO_CON10 0x8CC6 +#define MT6332_LDO_CON11 0x8CC8 +#define MT6332_LDO_CON12 0x8CCA +#define MT6332_LDO_CON13 0x8CCC +#define MT6332_FQMTR_CON0 0x8CCE +#define MT6332_FQMTR_CON1 0x8CD0 +#define MT6332_FQMTR_CON2 0x8CD2 +#define MT6332_IWLED_CON0 0x8CD4 +#define MT6332_IWLED_DEG 0x8CD6 +#define MT6332_IWLED_STATUS 0x8CD8 +#define MT6332_IWLED_EN_CTRL 0x8CDA +#define MT6332_IWLED_CON1 0x8CDC +#define MT6332_IWLED_CON2 0x8CDE +#define MT6332_IWLED_TRIM0 0x8CE0 +#define MT6332_IWLED_TRIM1 0x8CE2 +#define MT6332_IWLED_CON3 0x8CE4 +#define MT6332_IWLED_CON4 0x8CE6 +#define MT6332_IWLED_CON5 0x8CE8 +#define MT6332_IWLED_CON6 0x8CEA +#define MT6332_IWLED_CON7 0x8CEC +#define MT6332_IWLED_CON8 0x8CEE +#define MT6332_IWLED_CON9 0x8CF0 +#define MT6332_SPK_CON0 0x8CF2 +#define MT6332_SPK_CON1 0x8CF4 +#define MT6332_SPK_CON2 0x8CF6 +#define MT6332_SPK_CON3 0x8CF8 +#define MT6332_SPK_CON4 0x8CFA +#define MT6332_SPK_CON5 0x8CFC +#define MT6332_SPK_CON6 0x8CFE +#define MT6332_SPK_CON7 0x8D00 +#define MT6332_SPK_CON8 0x8D02 +#define MT6332_SPK_CON9 0x8D04 +#define MT6332_SPK_CON10 0x8D06 +#define MT6332_SPK_CON11 0x8D08 +#define MT6332_SPK_CON12 0x8D0A +#define MT6332_SPK_CON13 0x8D0C +#define MT6332_SPK_CON14 0x8D0E +#define MT6332_SPK_CON15 0x8D10 +#define MT6332_SPK_CON16 0x8D12 +#define MT6332_TESTI_CON0 0x8D14 +#define MT6332_TESTI_CON1 0x8D16 +#define MT6332_TESTI_CON2 0x8D18 +#define MT6332_TESTI_CON3 0x8D1A +#define MT6332_TESTI_CON4 0x8D1C +#define MT6332_TESTI_CON5 0x8D1E +#define MT6332_TESTI_CON6 0x8D20 +#define MT6332_TESTI_MUX_CON0 0x8D22 +#define MT6332_TESTI_MUX_CON1 0x8D24 +#define MT6332_TESTI_MUX_CON2 0x8D26 +#define MT6332_TESTI_MUX_CON3 0x8D28 +#define MT6332_TESTI_MUX_CON4 0x8D2A +#define MT6332_TESTI_MUX_CON5 0x8D2C +#define MT6332_TESTI_MUX_CON6 0x8D2E +#define MT6332_TESTO_CON0 0x8D30 +#define MT6332_TESTO_CON1 0x8D32 +#define MT6332_TEST_OMUX_CON0 0x8D34 +#define MT6332_TEST_OMUX_CON1 0x8D36 +#define MT6332_DEBUG_CON0 0x8D38 +#define MT6332_DEBUG_CON1 0x8D3A +#define MT6332_DEBUG_CON2 0x8D3C +#define MT6332_FGADC_CON23 0x8D3E +#define MT6332_FGADC_CON24 0x8D40 +#define MT6332_FGADC_CON25 0x8D42 +#define MT6332_TOP_RST_STATUS 0x8D44 +#define MT6332_TOP_RST_STATUS_SET 0x8D46 +#define MT6332_TOP_RST_STATUS_CLR 0x8D48 +#define MT6332_VDVFS2_CON28 0x8D4A + +#endif /* __MFD_MT6332_REGISTERS_H__ */ diff --git a/include/linux/mfd/mt6357/core.h b/include/linux/mfd/mt6357/core.h new file mode 100644 index 0000000000..2441611264 --- /dev/null +++ b/include/linux/mfd/mt6357/core.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 BayLibre, SAS + * Author: Fabien Parent + */ + +#ifndef __MFD_MT6357_CORE_H__ +#define __MFD_MT6357_CORE_H__ + +enum mt6357_irq_top_status_shift { + MT6357_BUCK_TOP = 0, + MT6357_LDO_TOP, + MT6357_PSC_TOP, + MT6357_SCK_TOP, + MT6357_BM_TOP, + MT6357_HK_TOP, + MT6357_XPP_TOP, + MT6357_AUD_TOP, + MT6357_MISC_TOP, +}; + +enum mt6357_irq_numbers { + MT6357_IRQ_VPROC_OC = 0, + MT6357_IRQ_VCORE_OC, + MT6357_IRQ_VMODEM_OC, + MT6357_IRQ_VS1_OC, + MT6357_IRQ_VPA_OC, + MT6357_IRQ_VCORE_PREOC, + MT6357_IRQ_VFE28_OC = 16, + MT6357_IRQ_VXO22_OC, + MT6357_IRQ_VRF18_OC, + MT6357_IRQ_VRF12_OC, + MT6357_IRQ_VEFUSE_OC, + MT6357_IRQ_VCN33_OC, + MT6357_IRQ_VCN28_OC, + MT6357_IRQ_VCN18_OC, + MT6357_IRQ_VCAMA_OC, + MT6357_IRQ_VCAMD_OC, + MT6357_IRQ_VCAMIO_OC, + MT6357_IRQ_VLDO28_OC, + MT6357_IRQ_VUSB33_OC, + MT6357_IRQ_VAUX18_OC, + MT6357_IRQ_VAUD28_OC, + MT6357_IRQ_VIO28_OC, + MT6357_IRQ_VIO18_OC, + MT6357_IRQ_VSRAM_PROC_OC, + MT6357_IRQ_VSRAM_OTHERS_OC, + MT6357_IRQ_VIBR_OC, + MT6357_IRQ_VDRAM_OC, + MT6357_IRQ_VMC_OC, + MT6357_IRQ_VMCH_OC, + MT6357_IRQ_VEMC_OC, + MT6357_IRQ_VSIM1_OC, + MT6357_IRQ_VSIM2_OC, + MT6357_IRQ_PWRKEY = 48, + MT6357_IRQ_HOMEKEY, + MT6357_IRQ_PWRKEY_R, + MT6357_IRQ_HOMEKEY_R, + MT6357_IRQ_NI_LBAT_INT, + MT6357_IRQ_CHRDET, + MT6357_IRQ_CHRDET_EDGE, + MT6357_IRQ_VCDT_HV_DET, + MT6357_IRQ_WATCHDOG, + MT6357_IRQ_VBATON_UNDET, + MT6357_IRQ_BVALID_DET, + MT6357_IRQ_OV, + MT6357_IRQ_RTC = 64, + MT6357_IRQ_FG_BAT0_H = 80, + MT6357_IRQ_FG_BAT0_L, + MT6357_IRQ_FG_CUR_H, + MT6357_IRQ_FG_CUR_L, + MT6357_IRQ_FG_ZCV, + MT6357_IRQ_BATON_LV = 96, + MT6357_IRQ_BATON_HT, + MT6357_IRQ_BAT_H = 112, + MT6357_IRQ_BAT_L, + MT6357_IRQ_AUXADC_IMP, + MT6357_IRQ_NAG_C_DLTV, + MT6357_IRQ_AUDIO = 128, + MT6357_IRQ_ACCDET = 133, + MT6357_IRQ_ACCDET_EINT0, + MT6357_IRQ_ACCDET_EINT1, + MT6357_IRQ_SPI_CMD_ALERT = 144, + MT6357_IRQ_NR, +}; + +#define MT6357_IRQ_BUCK_BASE MT6357_IRQ_VPROC_OC +#define MT6357_IRQ_LDO_BASE MT6357_IRQ_VFE28_OC +#define MT6357_IRQ_PSC_BASE MT6357_IRQ_PWRKEY +#define MT6357_IRQ_SCK_BASE MT6357_IRQ_RTC +#define MT6357_IRQ_BM_BASE MT6357_IRQ_FG_BAT0_H +#define MT6357_IRQ_HK_BASE MT6357_IRQ_BAT_H +#define MT6357_IRQ_AUD_BASE MT6357_IRQ_AUDIO +#define MT6357_IRQ_MISC_BASE MT6357_IRQ_SPI_CMD_ALERT + +#define MT6357_IRQ_BUCK_BITS (MT6357_IRQ_VCORE_PREOC - MT6357_IRQ_BUCK_BASE + 1) +#define MT6357_IRQ_LDO_BITS (MT6357_IRQ_VSIM2_OC - MT6357_IRQ_LDO_BASE + 1) +#define MT6357_IRQ_PSC_BITS (MT6357_IRQ_VCDT_HV_DET - MT6357_IRQ_PSC_BASE + 1) +#define MT6357_IRQ_SCK_BITS (MT6357_IRQ_RTC - MT6357_IRQ_SCK_BASE + 1) +#define MT6357_IRQ_BM_BITS (MT6357_IRQ_BATON_HT - MT6357_IRQ_BM_BASE + 1) +#define MT6357_IRQ_HK_BITS (MT6357_IRQ_NAG_C_DLTV - MT6357_IRQ_HK_BASE + 1) +#define MT6357_IRQ_AUD_BITS (MT6357_IRQ_ACCDET_EINT1 - MT6357_IRQ_AUD_BASE + 1) +#define MT6357_IRQ_MISC_BITS \ + (MT6357_IRQ_SPI_CMD_ALERT - MT6357_IRQ_MISC_BASE + 1) + +#define MT6357_TOP_GEN(sp) \ +{ \ + .hwirq_base = MT6357_IRQ_##sp##_BASE, \ + .num_int_regs = \ + ((MT6357_IRQ_##sp##_BITS - 1) / \ + MTK_PMIC_REG_WIDTH) + 1, \ + .en_reg = MT6357_##sp##_TOP_INT_CON0, \ + .en_reg_shift = 0x6, \ + .sta_reg = MT6357_##sp##_TOP_INT_STATUS0, \ + .sta_reg_shift = 0x2, \ + .top_offset = MT6357_##sp##_TOP, \ +} + +#endif /* __MFD_MT6357_CORE_H__ */ diff --git a/include/linux/mfd/mt6357/registers.h b/include/linux/mfd/mt6357/registers.h new file mode 100644 index 0000000000..e24af83b61 --- /dev/null +++ b/include/linux/mfd/mt6357/registers.h @@ -0,0 +1,1574 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 MediaTek Inc. + */ + +#ifndef __MFD_MT6357_REGISTERS_H__ +#define __MFD_MT6357_REGISTERS_H__ + +/* PMIC Registers */ +#define MT6357_TOP0_ID 0x0 +#define MT6357_TOP0_REV0 0x2 +#define MT6357_TOP0_DSN_DBI 0x4 +#define MT6357_TOP0_DSN_DXI 0x6 +#define MT6357_HWCID 0x8 +#define MT6357_SWCID 0xa +#define MT6357_PONSTS 0xc +#define MT6357_POFFSTS 0xe +#define MT6357_PSTSCTL 0x10 +#define MT6357_PG_DEB_STS0 0x12 +#define MT6357_PG_SDN_STS0 0x14 +#define MT6357_OC_SDN_STS0 0x16 +#define MT6357_THERMALSTATUS 0x18 +#define MT6357_TOP_CON 0x1a +#define MT6357_TEST_OUT 0x1c +#define MT6357_TEST_CON0 0x1e +#define MT6357_TEST_CON1 0x20 +#define MT6357_TESTMODE_SW 0x22 +#define MT6357_TOPSTATUS 0x24 +#define MT6357_TDSEL_CON 0x26 +#define MT6357_RDSEL_CON 0x28 +#define MT6357_SMT_CON0 0x2a +#define MT6357_SMT_CON1 0x2c +#define MT6357_TOP_RSV0 0x2e +#define MT6357_TOP_RSV1 0x30 +#define MT6357_DRV_CON0 0x32 +#define MT6357_DRV_CON1 0x34 +#define MT6357_DRV_CON2 0x36 +#define MT6357_DRV_CON3 0x38 +#define MT6357_FILTER_CON0 0x3a +#define MT6357_FILTER_CON1 0x3c +#define MT6357_FILTER_CON2 0x3e +#define MT6357_FILTER_CON3 0x40 +#define MT6357_TOP_STATUS 0x42 +#define MT6357_TOP_STATUS_SET 0x44 +#define MT6357_TOP_STATUS_CLR 0x46 +#define MT6357_TOP_TRAP 0x48 +#define MT6357_TOP1_ID 0x80 +#define MT6357_TOP1_REV0 0x82 +#define MT6357_TOP1_DSN_DBI 0x84 +#define MT6357_TOP1_DSN_DXI 0x86 +#define MT6357_GPIO_DIR0 0x88 +#define MT6357_GPIO_DIR0_SET 0x8a +#define MT6357_GPIO_DIR0_CLR 0x8c +#define MT6357_GPIO_PULLEN0 0x8e +#define MT6357_GPIO_PULLEN0_SET 0x90 +#define MT6357_GPIO_PULLEN0_CLR 0x92 +#define MT6357_GPIO_PULLSEL0 0x94 +#define MT6357_GPIO_PULLSEL0_SET 0x96 +#define MT6357_GPIO_PULLSEL0_CLR 0x98 +#define MT6357_GPIO_DINV0 0x9a +#define MT6357_GPIO_DINV0_SET 0x9c +#define MT6357_GPIO_DINV0_CLR 0x9e +#define MT6357_GPIO_DOUT0 0xa0 +#define MT6357_GPIO_DOUT0_SET 0xa2 +#define MT6357_GPIO_DOUT0_CLR 0xa4 +#define MT6357_GPIO_PI0 0xa6 +#define MT6357_GPIO_POE0 0xa8 +#define MT6357_GPIO_MODE0 0xaa +#define MT6357_GPIO_MODE0_SET 0xac +#define MT6357_GPIO_MODE0_CLR 0xae +#define MT6357_GPIO_MODE1 0xb0 +#define MT6357_GPIO_MODE1_SET 0xb2 +#define MT6357_GPIO_MODE1_CLR 0xb4 +#define MT6357_GPIO_MODE2 0xb6 +#define MT6357_GPIO_MODE2_SET 0xb8 +#define MT6357_GPIO_MODE2_CLR 0xba +#define MT6357_GPIO_MODE3 0xbc +#define MT6357_GPIO_MODE3_SET 0xbe +#define MT6357_GPIO_MODE3_CLR 0xc0 +#define MT6357_GPIO_RSV 0xc2 +#define MT6357_TOP2_ID 0x100 +#define MT6357_TOP2_REV0 0x102 +#define MT6357_TOP2_DSN_DBI 0x104 +#define MT6357_TOP2_DSN_DXI 0x106 +#define MT6357_TOP_PAM0 0x108 +#define MT6357_TOP_PAM1 0x10a +#define MT6357_TOP_CKPDN_CON0 0x10c +#define MT6357_TOP_CKPDN_CON0_SET 0x10e +#define MT6357_TOP_CKPDN_CON0_CLR 0x110 +#define MT6357_TOP_CKPDN_CON1 0x112 +#define MT6357_TOP_CKPDN_CON1_SET 0x114 +#define MT6357_TOP_CKPDN_CON1_CLR 0x116 +#define MT6357_TOP_CKSEL_CON0 0x118 +#define MT6357_TOP_CKSEL_CON0_SET 0x11a +#define MT6357_TOP_CKSEL_CON0_CLR 0x11c +#define MT6357_TOP_CKSEL_CON1 0x11e +#define MT6357_TOP_CKSEL_CON1_SET 0x120 +#define MT6357_TOP_CKSEL_CON1_CLR 0x122 +#define MT6357_TOP_CKDIVSEL_CON0 0x124 +#define MT6357_TOP_CKDIVSEL_CON0_SET 0x126 +#define MT6357_TOP_CKDIVSEL_CON0_CLR 0x128 +#define MT6357_TOP_CKHWEN_CON0 0x12a +#define MT6357_TOP_CKHWEN_CON0_SET 0x12c +#define MT6357_TOP_CKHWEN_CON0_CLR 0x12e +#define MT6357_TOP_CKTST_CON0 0x130 +#define MT6357_TOP_CKTST_CON1 0x132 +#define MT6357_TOP_CLK_CON0 0x134 +#define MT6357_TOP_CLK_CON0_SET 0x136 +#define MT6357_TOP_CLK_CON0_CLR 0x138 +#define MT6357_TOP_DCM_CON0 0x13a +#define MT6357_TOP_HANDOVER_DEBUG0 0x13c +#define MT6357_TOP_RST_CON0 0x13e +#define MT6357_TOP_RST_CON0_SET 0x140 +#define MT6357_TOP_RST_CON0_CLR 0x142 +#define MT6357_TOP_RST_CON1 0x144 +#define MT6357_TOP_RST_CON1_SET 0x146 +#define MT6357_TOP_RST_CON1_CLR 0x148 +#define MT6357_TOP_RST_CON2 0x14a +#define MT6357_TOP_RST_MISC 0x14c +#define MT6357_TOP_RST_MISC_SET 0x14e +#define MT6357_TOP_RST_MISC_CLR 0x150 +#define MT6357_TOP_RST_STATUS 0x152 +#define MT6357_TOP_RST_STATUS_SET 0x154 +#define MT6357_TOP_RST_STATUS_CLR 0x156 +#define MT6357_TOP2_ELR_NUM 0x158 +#define MT6357_TOP2_ELR0 0x15a +#define MT6357_TOP2_ELR1 0x15c +#define MT6357_TOP3_ID 0x180 +#define MT6357_TOP3_REV0 0x182 +#define MT6357_TOP3_DSN_DBI 0x184 +#define MT6357_TOP3_DSN_DXI 0x186 +#define MT6357_MISC_TOP_INT_CON0 0x188 +#define MT6357_MISC_TOP_INT_CON0_SET 0x18a +#define MT6357_MISC_TOP_INT_CON0_CLR 0x18c +#define MT6357_MISC_TOP_INT_MASK_CON0 0x18e +#define MT6357_MISC_TOP_INT_MASK_CON0_SET 0x190 +#define MT6357_MISC_TOP_INT_MASK_CON0_CLR 0x192 +#define MT6357_MISC_TOP_INT_STATUS0 0x194 +#define MT6357_MISC_TOP_INT_RAW_STATUS0 0x196 +#define MT6357_TOP_INT_MASK_CON0 0x198 +#define MT6357_TOP_INT_MASK_CON0_SET 0x19a +#define MT6357_TOP_INT_MASK_CON0_CLR 0x19c +#define MT6357_TOP_INT_STATUS0 0x19e +#define MT6357_TOP_INT_RAW_STATUS0 0x1a0 +#define MT6357_TOP_INT_CON0 0x1a2 +#define MT6357_PLT0_ID 0x380 +#define MT6357_PLT0_REV0 0x382 +#define MT6357_PLT0_REV1 0x384 +#define MT6357_PLT0_DSN_DXI 0x386 +#define MT6357_FQMTR_CON0 0x388 +#define MT6357_FQMTR_CON1 0x38a +#define MT6357_FQMTR_CON2 0x38c +#define MT6357_TOP_CLK_TRIM 0x38e +#define MT6357_OTP_CON0 0x390 +#define MT6357_OTP_CON1 0x392 +#define MT6357_OTP_CON2 0x394 +#define MT6357_OTP_CON3 0x396 +#define MT6357_OTP_CON4 0x398 +#define MT6357_OTP_CON5 0x39a +#define MT6357_OTP_CON6 0x39c +#define MT6357_OTP_CON7 0x39e +#define MT6357_OTP_CON8 0x3a0 +#define MT6357_OTP_CON9 0x3a2 +#define MT6357_OTP_CON10 0x3a4 +#define MT6357_OTP_CON11 0x3a6 +#define MT6357_OTP_CON12 0x3a8 +#define MT6357_OTP_CON13 0x3aa +#define MT6357_OTP_CON14 0x3ac +#define MT6357_TOP_TMA_KEY 0x3ae +#define MT6357_TOP_MDB_CONF0 0x3b0 +#define MT6357_TOP_MDB_CONF1 0x3b2 +#define MT6357_TOP_MDB_CONF2 0x3b4 +#define MT6357_PLT0_ELR_NUM 0x3b6 +#define MT6357_PLT0_ELR0 0x3b8 +#define MT6357_PLT0_ELR1 0x3ba +#define MT6357_SPISLV_ID 0x400 +#define MT6357_SPISLV_REV0 0x402 +#define MT6357_SPISLV_REV1 0x404 +#define MT6357_SPISLV_DSN_DXI 0x406 +#define MT6357_RG_SPI_CON0 0x408 +#define MT6357_DEW_DIO_EN 0x40a +#define MT6357_DEW_READ_TEST 0x40c +#define MT6357_DEW_WRITE_TEST 0x40e +#define MT6357_DEW_CRC_SWRST 0x410 +#define MT6357_DEW_CRC_EN 0x412 +#define MT6357_DEW_CRC_VAL 0x414 +#define MT6357_DEW_DBG_MON_SEL 0x416 +#define MT6357_DEW_CIPHER_KEY_SEL 0x418 +#define MT6357_DEW_CIPHER_IV_SEL 0x41a +#define MT6357_DEW_CIPHER_EN 0x41c +#define MT6357_DEW_CIPHER_RDY 0x41e +#define MT6357_DEW_CIPHER_MODE 0x420 +#define MT6357_DEW_CIPHER_SWRST 0x422 +#define MT6357_DEW_RDDMY_NO 0x424 +#define MT6357_INT_TYPE_CON0 0x426 +#define MT6357_INT_TYPE_CON0_SET 0x428 +#define MT6357_INT_TYPE_CON0_CLR 0x42a +#define MT6357_INT_STA 0x42c +#define MT6357_RG_SPI_CON1 0x42e +#define MT6357_RG_SPI_CON2 0x430 +#define MT6357_RG_SPI_CON3 0x432 +#define MT6357_RG_SPI_CON4 0x434 +#define MT6357_RG_SPI_CON5 0x436 +#define MT6357_RG_SPI_CON6 0x438 +#define MT6357_RG_SPI_CON7 0x43a +#define MT6357_RG_SPI_CON8 0x43c +#define MT6357_RG_SPI_CON9 0x43e +#define MT6357_RG_SPI_CON10 0x440 +#define MT6357_RG_SPI_CON11 0x442 +#define MT6357_RG_SPI_CON12 0x444 +#define MT6357_RG_SPI_CON13 0x446 +#define MT6357_TOP_SPI_CON0 0x448 +#define MT6357_TOP_SPI_CON1 0x44a +#define MT6357_SCK_TOP_DSN_ID 0x500 +#define MT6357_SCK_TOP_DSN_REV0 0x502 +#define MT6357_SCK_TOP_DBI 0x504 +#define MT6357_SCK_TOP_DXI 0x506 +#define MT6357_SCK_TOP_TPM0 0x508 +#define MT6357_SCK_TOP_TPM1 0x50a +#define MT6357_SCK_TOP_CON0 0x50c +#define MT6357_SCK_TOP_CON1 0x50e +#define MT6357_SCK_TOP_TEST_OUT 0x510 +#define MT6357_SCK_TOP_TEST_CON0 0x512 +#define MT6357_SCK_TOP_CKPDN_CON0 0x514 +#define MT6357_SCK_TOP_CKPDN_CON0_SET 0x516 +#define MT6357_SCK_TOP_CKPDN_CON0_CLR 0x518 +#define MT6357_SCK_TOP_CKHWEN_CON0 0x51a +#define MT6357_SCK_TOP_CKHWEN_CON0_SET 0x51c +#define MT6357_SCK_TOP_CKHWEN_CON0_CLR 0x51e +#define MT6357_SCK_TOP_CKTST_CON 0x520 +#define MT6357_SCK_TOP_RST_CON0 0x522 +#define MT6357_SCK_TOP_RST_CON0_SET 0x524 +#define MT6357_SCK_TOP_RST_CON0_CLR 0x526 +#define MT6357_SCK_TOP_INT_CON0 0x528 +#define MT6357_SCK_TOP_INT_CON0_SET 0x52a +#define MT6357_SCK_TOP_INT_CON0_CLR 0x52c +#define MT6357_SCK_TOP_INT_MASK_CON0 0x52e +#define MT6357_SCK_TOP_INT_MASK_CON0_SET 0x530 +#define MT6357_SCK_TOP_INT_MASK_CON0_CLR 0x532 +#define MT6357_SCK_TOP_INT_STATUS0 0x534 +#define MT6357_SCK_TOP_INT_RAW_STATUS0 0x536 +#define MT6357_SCK_TOP_INT_MISC_CON 0x538 +#define MT6357_EOSC_CALI_CON0 0x53a +#define MT6357_EOSC_CALI_CON1 0x53c +#define MT6357_RTC_MIX_CON0 0x53e +#define MT6357_RTC_MIX_CON1 0x540 +#define MT6357_RTC_MIX_CON2 0x542 +#define MT6357_RTC_DSN_ID 0x580 +#define MT6357_RTC_DSN_REV0 0x582 +#define MT6357_RTC_DBI 0x584 +#define MT6357_RTC_DXI 0x586 +#define MT6357_RTC_BBPU 0x588 +#define MT6357_RTC_IRQ_STA 0x58a +#define MT6357_RTC_IRQ_EN 0x58c +#define MT6357_RTC_CII_EN 0x58e +#define MT6357_RTC_AL_MASK 0x590 +#define MT6357_RTC_TC_SEC 0x592 +#define MT6357_RTC_TC_MIN 0x594 +#define MT6357_RTC_TC_HOU 0x596 +#define MT6357_RTC_TC_DOM 0x598 +#define MT6357_RTC_TC_DOW 0x59a +#define MT6357_RTC_TC_MTH 0x59c +#define MT6357_RTC_TC_YEA 0x59e +#define MT6357_RTC_AL_SEC 0x5a0 +#define MT6357_RTC_AL_MIN 0x5a2 +#define MT6357_RTC_AL_HOU 0x5a4 +#define MT6357_RTC_AL_DOM 0x5a6 +#define MT6357_RTC_AL_DOW 0x5a8 +#define MT6357_RTC_AL_MTH 0x5aa +#define MT6357_RTC_AL_YEA 0x5ac +#define MT6357_RTC_OSC32CON 0x5ae +#define MT6357_RTC_POWERKEY1 0x5b0 +#define MT6357_RTC_POWERKEY2 0x5b2 +#define MT6357_RTC_PDN1 0x5b4 +#define MT6357_RTC_PDN2 0x5b6 +#define MT6357_RTC_SPAR0 0x5b8 +#define MT6357_RTC_SPAR1 0x5ba +#define MT6357_RTC_PROT 0x5bc +#define MT6357_RTC_DIFF 0x5be +#define MT6357_RTC_CALI 0x5c0 +#define MT6357_RTC_WRTGR 0x5c2 +#define MT6357_RTC_CON 0x5c4 +#define MT6357_RTC_SEC_CTRL 0x5c6 +#define MT6357_RTC_INT_CNT 0x5c8 +#define MT6357_RTC_SEC_DAT0 0x5ca +#define MT6357_RTC_SEC_DAT1 0x5cc +#define MT6357_RTC_SEC_DAT2 0x5ce +#define MT6357_RTC_SEC_DSN_ID 0x600 +#define MT6357_RTC_SEC_DSN_REV0 0x602 +#define MT6357_RTC_SEC_DBI 0x604 +#define MT6357_RTC_SEC_DXI 0x606 +#define MT6357_RTC_TC_SEC_SEC 0x608 +#define MT6357_RTC_TC_MIN_SEC 0x60a +#define MT6357_RTC_TC_HOU_SEC 0x60c +#define MT6357_RTC_TC_DOM_SEC 0x60e +#define MT6357_RTC_TC_DOW_SEC 0x610 +#define MT6357_RTC_TC_MTH_SEC 0x612 +#define MT6357_RTC_TC_YEA_SEC 0x614 +#define MT6357_RTC_SEC_CK_PDN 0x616 +#define MT6357_RTC_SEC_WRTGR 0x618 +#define MT6357_DCXO_DSN_ID 0x780 +#define MT6357_DCXO_DSN_REV0 0x782 +#define MT6357_DCXO_DSN_DBI 0x784 +#define MT6357_DCXO_DSN_DXI 0x786 +#define MT6357_DCXO_CW00 0x788 +#define MT6357_DCXO_CW00_SET 0x78a +#define MT6357_DCXO_CW00_CLR 0x78c +#define MT6357_DCXO_CW01 0x78e +#define MT6357_DCXO_CW02 0x790 +#define MT6357_DCXO_CW03 0x792 +#define MT6357_DCXO_CW04 0x794 +#define MT6357_DCXO_CW05 0x796 +#define MT6357_DCXO_CW06 0x798 +#define MT6357_DCXO_CW07 0x79a +#define MT6357_DCXO_CW08 0x79c +#define MT6357_DCXO_CW09 0x79e +#define MT6357_DCXO_CW10 0x7a0 +#define MT6357_DCXO_CW11 0x7a2 +#define MT6357_DCXO_CW11_SET 0x7a4 +#define MT6357_DCXO_CW11_CLR 0x7a6 +#define MT6357_DCXO_CW12 0x7a8 +#define MT6357_DCXO_CW13 0x7aa +#define MT6357_DCXO_CW14 0x7ac +#define MT6357_DCXO_CW15 0x7ae +#define MT6357_DCXO_CW16 0x7b0 +#define MT6357_DCXO_CW17 0x7b2 +#define MT6357_DCXO_CW18 0x7b4 +#define MT6357_DCXO_CW19 0x7b6 +#define MT6357_DCXO_CW20 0x7b8 +#define MT6357_DCXO_CW21 0x7ba +#define MT6357_DCXO_CW22 0x7bc +#define MT6357_DCXO_ELR_NUM 0x7be +#define MT6357_DCXO_ELR0 0x7c0 +#define MT6357_PSC_TOP_ID 0x900 +#define MT6357_PSC_TOP_REV0 0x902 +#define MT6357_PSC_TOP_DBI 0x904 +#define MT6357_PSC_TOP_DXI 0x906 +#define MT6357_PSC_TPM0 0x908 +#define MT6357_PSC_TPM1 0x90a +#define MT6357_PSC_TOP_RSTCTL_0 0x90c +#define MT6357_PSC_TOP_INT_CON0 0x90e +#define MT6357_PSC_TOP_INT_CON0_SET 0x910 +#define MT6357_PSC_TOP_INT_CON0_CLR 0x912 +#define MT6357_PSC_TOP_INT_MASK_CON0 0x914 +#define MT6357_PSC_TOP_INT_MASK_CON0_SET 0x916 +#define MT6357_PSC_TOP_INT_MASK_CON0_CLR 0x918 +#define MT6357_PSC_TOP_INT_STATUS0 0x91a +#define MT6357_PSC_TOP_INT_RAW_STATUS0 0x91c +#define MT6357_PSC_TOP_INT_MISC_CON 0x91e +#define MT6357_PSC_TOP_INT_MISC_CON_SET 0x920 +#define MT6357_PSC_TOP_INT_MISC_CON_CLR 0x922 +#define MT6357_PSC_TOP_MON_CTL 0x924 +#define MT6357_STRUP_ID 0x980 +#define MT6357_STRUP_REV0 0x982 +#define MT6357_STRUP_DBI 0x984 +#define MT6357_STRUP_DXI 0x986 +#define MT6357_STRUP_ANA_CON0 0x988 +#define MT6357_STRUP_ANA_CON1 0x98a +#define MT6357_STRUP_ANA_CON2 0x98c +#define MT6357_STRUP_ELR_NUM 0x98e +#define MT6357_STRUP_ELR_0 0x990 +#define MT6357_PSEQ_ID 0xa00 +#define MT6357_PSEQ_REV0 0xa02 +#define MT6357_PSEQ_DBI 0xa04 +#define MT6357_PSEQ_DXI 0xa06 +#define MT6357_PPCCTL0 0xa08 +#define MT6357_PPCCTL1 0xa0a +#define MT6357_PPCCTL2 0xa0c +#define MT6357_PPCCFG0 0xa0e +#define MT6357_PPCTST0 0xa10 +#define MT6357_PORFLAG 0xa12 +#define MT6357_STRUP_CON0 0xa14 +#define MT6357_STRUP_CON1 0xa16 +#define MT6357_STRUP_CON2 0xa18 +#define MT6357_STRUP_CON3 0xa1a +#define MT6357_STRUP_CON4 0xa1c +#define MT6357_STRUP_CON5 0xa1e +#define MT6357_STRUP_CON6 0xa20 +#define MT6357_STRUP_CON7 0xa22 +#define MT6357_CPSCFG0 0xa24 +#define MT6357_STRUP_CON9 0xa26 +#define MT6357_STRUP_CON10 0xa28 +#define MT6357_STRUP_CON11 0xa2a +#define MT6357_STRUP_CON12 0xa2c +#define MT6357_STRUP_CON13 0xa2e +#define MT6357_STRUP_CON14 0xa30 +#define MT6357_STRUP_CON15 0xa32 +#define MT6357_STRUP_CON16 0xa34 +#define MT6357_STRUP_CON19 0xa36 +#define MT6357_PSEQ_ELR_NUM 0xa38 +#define MT6357_PSEQ_ELR7 0xa3a +#define MT6357_PSEQ_ELR8 0xa3c +#define MT6357_PCHR_DIG_DSN_ID 0xa80 +#define MT6357_PCHR_DIG_DSN_REV0 0xa82 +#define MT6357_PCHR_DIG_DSN_DBI 0xa84 +#define MT6357_PCHR_DIG_DSN_DXI 0xa86 +#define MT6357_CHR_TOP_CON0 0xa88 +#define MT6357_CHR_TOP_CON1 0xa8a +#define MT6357_CHR_TOP_CON2 0xa8c +#define MT6357_CHR_TOP_CON3 0xa8e +#define MT6357_CHR_TOP_CON4 0xa90 +#define MT6357_CHR_TOP_CON5 0xa92 +#define MT6357_CHR_TOP_CON6 0xa94 +#define MT6357_PCHR_DIG_ELR_NUM 0xa96 +#define MT6357_PCHR_ELR0 0xa98 +#define MT6357_PCHR_ELR1 0xa9a +#define MT6357_PCHR_MACRO_DSN_ID 0xb80 +#define MT6357_PCHR_MACRO_DSN_REV0 0xb82 +#define MT6357_PCHR_MACRO_DSN_DBI 0xb84 +#define MT6357_PCHR_MACRO_DSN_DXI 0xb86 +#define MT6357_CHR_CON0 0xb88 +#define MT6357_CHR_CON1 0xb8a +#define MT6357_CHR_CON2 0xb8c +#define MT6357_CHR_CON3 0xb8e +#define MT6357_CHR_CON4 0xb90 +#define MT6357_CHR_CON5 0xb92 +#define MT6357_CHR_CON6 0xb94 +#define MT6357_CHR_CON7 0xb96 +#define MT6357_CHR_CON8 0xb98 +#define MT6357_CHR_CON9 0xb9a +#define MT6357_BM_TOP_DSN_ID 0xc00 +#define MT6357_BM_TOP_DSN_REV0 0xc02 +#define MT6357_BM_TOP_DBI 0xc04 +#define MT6357_BM_TOP_DXI 0xc06 +#define MT6357_BM_TPM0 0xc08 +#define MT6357_BM_TPM1 0xc0a +#define MT6357_BM_TOP_CKPDN_CON0 0xc0c +#define MT6357_BM_TOP_CKPDN_CON0_SET 0xc0e +#define MT6357_BM_TOP_CKPDN_CON0_CLR 0xc10 +#define MT6357_BM_TOP_CKSEL_CON0 0xc12 +#define MT6357_BM_TOP_CKSEL_CON0_SET 0xc14 +#define MT6357_BM_TOP_CKSEL_CON0_CLR 0xc16 +#define MT6357_BM_TOP_CKTST_CON0 0xc18 +#define MT6357_BM_TOP_RST_CON0 0xc1a +#define MT6357_BM_TOP_RST_CON0_SET 0xc1c +#define MT6357_BM_TOP_RST_CON0_CLR 0xc1e +#define MT6357_BM_TOP_INT_CON0 0xc20 +#define MT6357_BM_TOP_INT_CON0_SET 0xc22 +#define MT6357_BM_TOP_INT_CON0_CLR 0xc24 +#define MT6357_BM_TOP_INT_CON1 0xc26 +#define MT6357_BM_TOP_INT_CON1_SET 0xc28 +#define MT6357_BM_TOP_INT_CON1_CLR 0xc2a +#define MT6357_BM_TOP_INT_MASK_CON0 0xc2c +#define MT6357_BM_TOP_INT_MASK_CON0_SET 0xc2e +#define MT6357_BM_TOP_INT_MASK_CON0_CLR 0xc30 +#define MT6357_BM_TOP_INT_MASK_CON1 0xc32 +#define MT6357_BM_TOP_INT_MASK_CON1_SET 0xc34 +#define MT6357_BM_TOP_INT_MASK_CON1_CLR 0xc36 +#define MT6357_BM_TOP_INT_STATUS0 0xc38 +#define MT6357_BM_TOP_INT_STATUS1 0xc3a +#define MT6357_BM_TOP_INT_RAW_STATUS0 0xc3c +#define MT6357_BM_TOP_INT_RAW_STATUS1 0xc3e +#define MT6357_BM_TOP_INT_MISC_CON 0xc40 +#define MT6357_BM_TOP_DBG_CON 0xc42 +#define MT6357_BM_TOP_RSV0 0xc44 +#define MT6357_FGADC_ANA_DSN_ID 0xc80 +#define MT6357_FGADC_ANA_DSN_REV0 0xc82 +#define MT6357_FGADC_ANA_DSN_DBI 0xc84 +#define MT6357_FGADC_ANA_DSN_DXI 0xc86 +#define MT6357_FGADC_ANA_CON0 0xc88 +#define MT6357_FGADC_ANA_TEST_CON0 0xc8a +#define MT6357_FGADC_ANA_ELR_NUM 0xc8c +#define MT6357_FGADC_ANA_ELR0 0xc8e +#define MT6357_FGADC_ANA_ELR1 0xc90 +#define MT6357_FGADC0_DSN_ID 0xd00 +#define MT6357_FGADC0_DSN_REV0 0xd02 +#define MT6357_FGADC0_DSN_DBI 0xd04 +#define MT6357_FGADC0_DSN_DXI 0xd06 +#define MT6357_FGADC_CON0 0xd08 +#define MT6357_FGADC_CON1 0xd0a +#define MT6357_FGADC_CON2 0xd0c +#define MT6357_FGADC_CON3 0xd0e +#define MT6357_FGADC_CON4 0xd10 +#define MT6357_FGADC_CAR_CON0 0xd12 +#define MT6357_FGADC_CAR_CON1 0xd14 +#define MT6357_FGADC_CAR_CON2 0xd16 +#define MT6357_FGADC_CARTH_CON0 0xd18 +#define MT6357_FGADC_CARTH_CON1 0xd1a +#define MT6357_FGADC_CARTH_CON2 0xd1c +#define MT6357_FGADC_CARTH_CON3 0xd1e +#define MT6357_FGADC_NTER_CON0 0xd20 +#define MT6357_FGADC_NTER_CON1 0xd22 +#define MT6357_FGADC_NTER_CON2 0xd24 +#define MT6357_FGADC_SON_CON0 0xd26 +#define MT6357_FGADC_SON_CON1 0xd28 +#define MT6357_FGADC_SON_CON2 0xd2a +#define MT6357_FGADC_SON_CON3 0xd2c +#define MT6357_FGADC_ZCV_CON0 0xd2e +#define MT6357_FGADC_ZCV_CON1 0xd30 +#define MT6357_FGADC_ZCV_CON2 0xd32 +#define MT6357_FGADC_ZCV_CON3 0xd34 +#define MT6357_FGADC_ZCV_CON4 0xd36 +#define MT6357_FGADC_ZCVTH_CON0 0xd38 +#define MT6357_FGADC_ZCVTH_CON1 0xd3a +#define MT6357_FGADC_ZCVTH_CON2 0xd3c +#define MT6357_FGADC1_DSN_ID 0xd80 +#define MT6357_FGADC1_DSN_REV0 0xd82 +#define MT6357_FGADC1_DSN_DBI 0xd84 +#define MT6357_FGADC1_DSN_DXI 0xd86 +#define MT6357_FGADC_R_CON0 0xd88 +#define MT6357_FGADC_CUR_CON0 0xd8a +#define MT6357_FGADC_CUR_CON1 0xd8c +#define MT6357_FGADC_CUR_CON2 0xd8e +#define MT6357_FGADC_CUR_CON3 0xd90 +#define MT6357_FGADC_OFFSET_CON0 0xd92 +#define MT6357_FGADC_OFFSET_CON1 0xd94 +#define MT6357_FGADC_GAIN_CON0 0xd96 +#define MT6357_FGADC_TEST_CON0 0xd98 +#define MT6357_SYSTEM_INFO_CON0 0xd9a +#define MT6357_SYSTEM_INFO_CON1 0xd9c +#define MT6357_SYSTEM_INFO_CON2 0xd9e +#define MT6357_SYSTEM_INFO_CON3 0xda0 +#define MT6357_SYSTEM_INFO_CON4 0xda2 +#define MT6357_BATON_ANA_DSN_ID 0xe00 +#define MT6357_BATON_ANA_DSN_REV0 0xe02 +#define MT6357_BATON_ANA_DSN_DBI 0xe04 +#define MT6357_BATON_ANA_DSN_DXI 0xe06 +#define MT6357_BATON_ANA_CON0 0xe08 +#define MT6357_BATON_ANA_ELR_NUM 0xe0a +#define MT6357_BATON_ANA_ELR0 0xe0c +#define MT6357_HK_TOP_ID 0xf80 +#define MT6357_HK_TOP_REV0 0xf82 +#define MT6357_HK_TOP_DBI 0xf84 +#define MT6357_HK_TOP_DXI 0xf86 +#define MT6357_HK_TPM0 0xf88 +#define MT6357_HK_TPM1 0xf8a +#define MT6357_HK_TOP_CLK_CON0 0xf8c +#define MT6357_HK_TOP_CLK_CON1 0xf8e +#define MT6357_HK_TOP_RST_CON0 0xf90 +#define MT6357_HK_TOP_INT_CON0 0xf92 +#define MT6357_HK_TOP_INT_CON0_SET 0xf94 +#define MT6357_HK_TOP_INT_CON0_CLR 0xf96 +#define MT6357_HK_TOP_INT_MASK_CON0 0xf98 +#define MT6357_HK_TOP_INT_MASK_CON0_SET 0xf9a +#define MT6357_HK_TOP_INT_MASK_CON0_CLR 0xf9c +#define MT6357_HK_TOP_INT_STATUS0 0xf9e +#define MT6357_HK_TOP_INT_RAW_STATUS0 0xfa0 +#define MT6357_HK_TOP_MON_CON0 0xfa2 +#define MT6357_HK_TOP_MON_CON1 0xfa4 +#define MT6357_HK_TOP_MON_CON2 0xfa6 +#define MT6357_AUXADC_DSN_ID 0x1000 +#define MT6357_AUXADC_DSN_REV0 0x1002 +#define MT6357_AUXADC_DSN_DBI 0x1004 +#define MT6357_AUXADC_DSN_DXI 0x1006 +#define MT6357_AUXADC_ANA_CON0 0x1008 +#define MT6357_AUXADC_DIG_1_DSN_ID 0x1080 +#define MT6357_AUXADC_DIG_1_DSN_REV0 0x1082 +#define MT6357_AUXADC_DIG_1_DSN_DBI 0x1084 +#define MT6357_AUXADC_DIG_1_DSN_DXI 0x1086 +#define MT6357_AUXADC_ADC0 0x1088 +#define MT6357_AUXADC_ADC1 0x108a +#define MT6357_AUXADC_ADC2 0x108c +#define MT6357_AUXADC_ADC3 0x108e +#define MT6357_AUXADC_ADC4 0x1090 +#define MT6357_AUXADC_ADC5 0x1092 +#define MT6357_AUXADC_ADC6 0x1094 +#define MT6357_AUXADC_ADC7 0x1096 +#define MT6357_AUXADC_ADC8 0x1098 +#define MT6357_AUXADC_ADC9 0x109a +#define MT6357_AUXADC_ADC10 0x109c +#define MT6357_AUXADC_ADC11 0x109e +#define MT6357_AUXADC_ADC12 0x10a0 +#define MT6357_AUXADC_ADC14 0x10a2 +#define MT6357_AUXADC_ADC16 0x10a4 +#define MT6357_AUXADC_ADC17 0x10a6 +#define MT6357_AUXADC_ADC18 0x10a8 +#define MT6357_AUXADC_ADC19 0x10aa +#define MT6357_AUXADC_ADC20 0x10ac +#define MT6357_AUXADC_ADC21 0x10ae +#define MT6357_AUXADC_ADC22 0x10b0 +#define MT6357_AUXADC_ADC23 0x10b2 +#define MT6357_AUXADC_ADC24 0x10b4 +#define MT6357_AUXADC_ADC25 0x10b6 +#define MT6357_AUXADC_ADC26 0x10b8 +#define MT6357_AUXADC_ADC27 0x10ba +#define MT6357_AUXADC_ADC29 0x10bc +#define MT6357_AUXADC_ADC30 0x10be +#define MT6357_AUXADC_ADC31 0x10c0 +#define MT6357_AUXADC_ADC32 0x10c2 +#define MT6357_AUXADC_ADC33 0x10c4 +#define MT6357_AUXADC_ADC34 0x10c6 +#define MT6357_AUXADC_ADC35 0x10c8 +#define MT6357_AUXADC_ADC36 0x10ca +#define MT6357_AUXADC_ADC38 0x10cc +#define MT6357_AUXADC_ADC39 0x10ce +#define MT6357_AUXADC_ADC40 0x10d0 +#define MT6357_AUXADC_ADC41 0x10d2 +#define MT6357_AUXADC_ADC42 0x10d4 +#define MT6357_AUXADC_ADC43 0x10d6 +#define MT6357_AUXADC_ADC46 0x10d8 +#define MT6357_AUXADC_ADC47 0x10da +#define MT6357_AUXADC_DIG_1_ELR_NUM 0x10dc +#define MT6357_AUXADC_DIG_1_ELR0 0x10de +#define MT6357_AUXADC_DIG_1_ELR1 0x10e0 +#define MT6357_AUXADC_DIG_2_DSN_ID 0x1100 +#define MT6357_AUXADC_DIG_2_DSN_REV0 0x1102 +#define MT6357_AUXADC_DIG_2_DSN_DBI 0x1104 +#define MT6357_AUXADC_DIG_2_DSN_DXI 0x1106 +#define MT6357_AUXADC_STA0 0x1108 +#define MT6357_AUXADC_STA1 0x110a +#define MT6357_AUXADC_STA2 0x110c +#define MT6357_AUXADC_RQST0 0x110e +#define MT6357_AUXADC_RQST0_SET 0x1110 +#define MT6357_AUXADC_RQST0_CLR 0x1112 +#define MT6357_AUXADC_RQST2 0x1114 +#define MT6357_AUXADC_RQST2_SET 0x1116 +#define MT6357_AUXADC_RQST2_CLR 0x1118 +#define MT6357_AUXADC_RQST1 0x111a +#define MT6357_AUXADC_RQST1_SET 0x111c +#define MT6357_AUXADC_RQST1_CLR 0x111e +#define MT6357_AUXADC_CON0 0x1120 +#define MT6357_AUXADC_CON0_SET 0x1122 +#define MT6357_AUXADC_CON0_CLR 0x1124 +#define MT6357_AUXADC_CON1 0x1126 +#define MT6357_AUXADC_CON2 0x1128 +#define MT6357_AUXADC_CON3 0x112a +#define MT6357_AUXADC_CON4 0x112c +#define MT6357_AUXADC_CON5 0x112e +#define MT6357_AUXADC_CON6 0x1130 +#define MT6357_AUXADC_CON7 0x1132 +#define MT6357_AUXADC_CON8 0x1134 +#define MT6357_AUXADC_CON9 0x1136 +#define MT6357_AUXADC_CON10 0x1138 +#define MT6357_AUXADC_CON11 0x113a +#define MT6357_AUXADC_CON12 0x113c +#define MT6357_AUXADC_CON13 0x113e +#define MT6357_AUXADC_CON14 0x1140 +#define MT6357_AUXADC_CON15 0x1142 +#define MT6357_AUXADC_CON16 0x1144 +#define MT6357_AUXADC_CON17 0x1146 +#define MT6357_AUXADC_CON18 0x1148 +#define MT6357_AUXADC_CON19 0x114a +#define MT6357_AUXADC_CON20 0x114c +#define MT6357_AUXADC_DIG_3_DSN_ID 0x1180 +#define MT6357_AUXADC_DIG_3_DSN_REV0 0x1182 +#define MT6357_AUXADC_DIG_3_DSN_DBI 0x1184 +#define MT6357_AUXADC_DIG_3_DSN_DXI 0x1186 +#define MT6357_AUXADC_AUTORPT0 0x1188 +#define MT6357_AUXADC_LBAT0 0x118a +#define MT6357_AUXADC_LBAT1 0x118c +#define MT6357_AUXADC_LBAT2 0x118e +#define MT6357_AUXADC_LBAT3 0x1190 +#define MT6357_AUXADC_LBAT4 0x1192 +#define MT6357_AUXADC_LBAT5 0x1194 +#define MT6357_AUXADC_LBAT6 0x1196 +#define MT6357_AUXADC_ACCDET 0x1198 +#define MT6357_AUXADC_DBG0 0x119a +#define MT6357_AUXADC_IMP0 0x119c +#define MT6357_AUXADC_IMP1 0x119e +#define MT6357_AUXADC_DIG_3_ELR_NUM 0x11a0 +#define MT6357_AUXADC_DIG_3_ELR0 0x11a2 +#define MT6357_AUXADC_DIG_3_ELR1 0x11a4 +#define MT6357_AUXADC_DIG_3_ELR2 0x11a6 +#define MT6357_AUXADC_DIG_3_ELR3 0x11a8 +#define MT6357_AUXADC_DIG_3_ELR4 0x11aa +#define MT6357_AUXADC_DIG_3_ELR5 0x11ac +#define MT6357_AUXADC_DIG_3_ELR6 0x11ae +#define MT6357_AUXADC_DIG_3_ELR7 0x11b0 +#define MT6357_AUXADC_DIG_3_ELR8 0x11b2 +#define MT6357_AUXADC_DIG_3_ELR9 0x11b4 +#define MT6357_AUXADC_DIG_3_ELR10 0x11b6 +#define MT6357_AUXADC_DIG_3_ELR11 0x11b8 +#define MT6357_AUXADC_DIG_4_DSN_ID 0x1200 +#define MT6357_AUXADC_DIG_4_DSN_REV0 0x1202 +#define MT6357_AUXADC_DIG_4_DSN_DBI 0x1204 +#define MT6357_AUXADC_DIG_4_DSN_DXI 0x1206 +#define MT6357_AUXADC_MDRT_0 0x1208 +#define MT6357_AUXADC_MDRT_1 0x120a +#define MT6357_AUXADC_MDRT_2 0x120c +#define MT6357_AUXADC_MDRT_3 0x120e +#define MT6357_AUXADC_MDRT_4 0x1210 +#define MT6357_AUXADC_DCXO_MDRT_0 0x1212 +#define MT6357_AUXADC_DCXO_MDRT_1 0x1214 +#define MT6357_AUXADC_DCXO_MDRT_2 0x1216 +#define MT6357_AUXADC_NAG_0 0x1218 +#define MT6357_AUXADC_NAG_1 0x121a +#define MT6357_AUXADC_NAG_2 0x121c +#define MT6357_AUXADC_NAG_3 0x121e +#define MT6357_AUXADC_NAG_4 0x1220 +#define MT6357_AUXADC_NAG_5 0x1222 +#define MT6357_AUXADC_NAG_6 0x1224 +#define MT6357_AUXADC_NAG_7 0x1226 +#define MT6357_AUXADC_NAG_8 0x1228 +#define MT6357_AUXADC_RSV_1 0x122a +#define MT6357_AUXADC_ANA_0 0x122c +#define MT6357_AUXADC_IMP_CG0 0x122e +#define MT6357_AUXADC_LBAT_CG0 0x1230 +#define MT6357_AUXADC_NAG_CG0 0x1232 +#define MT6357_AUXADC_PRI_NEW 0x1234 +#define MT6357_AUXADC_CHR_TOP_CON2 0x1236 +#define MT6357_BUCK_TOP_DSN_ID 0x1400 +#define MT6357_BUCK_TOP_DSN_REV0 0x1402 +#define MT6357_BUCK_TOP_DBI 0x1404 +#define MT6357_BUCK_TOP_DXI 0x1406 +#define MT6357_BUCK_TOP_PAM0 0x1408 +#define MT6357_BUCK_TOP_PAM1 0x140a +#define MT6357_BUCK_TOP_CLK_CON0 0x140c +#define MT6357_BUCK_TOP_CLK_CON0_SET 0x140e +#define MT6357_BUCK_TOP_CLK_CON0_CLR 0x1410 +#define MT6357_BUCK_TOP_CLK_HWEN_CON0 0x1412 +#define MT6357_BUCK_TOP_CLK_HWEN_CON0_SET 0x1414 +#define MT6357_BUCK_TOP_CLK_HWEN_CON0_CLR 0x1416 +#define MT6357_BUCK_TOP_CLK_MISC_CON0 0x1418 +#define MT6357_BUCK_TOP_INT_CON0 0x141a +#define MT6357_BUCK_TOP_INT_CON0_SET 0x141c +#define MT6357_BUCK_TOP_INT_CON0_CLR 0x141e +#define MT6357_BUCK_TOP_INT_MASK_CON0 0x1420 +#define MT6357_BUCK_TOP_INT_MASK_CON0_SET 0x1422 +#define MT6357_BUCK_TOP_INT_MASK_CON0_CLR 0x1424 +#define MT6357_BUCK_TOP_INT_STATUS0 0x1426 +#define MT6357_BUCK_TOP_INT_RAW_STATUS0 0x1428 +#define MT6357_BUCK_TOP_STB_CON 0x142a +#define MT6357_BUCK_TOP_SLP_CON0 0x142c +#define MT6357_BUCK_TOP_SLP_CON1 0x142e +#define MT6357_BUCK_TOP_SLP_CON2 0x1430 +#define MT6357_BUCK_TOP_MINFREQ_CON 0x1432 +#define MT6357_BUCK_TOP_OC_CON0 0x1434 +#define MT6357_BUCK_TOP_K_CON0 0x1436 +#define MT6357_BUCK_TOP_K_CON1 0x1438 +#define MT6357_BUCK_TOP_K_CON2 0x143a +#define MT6357_BUCK_TOP_WDTDBG0 0x143c +#define MT6357_BUCK_TOP_WDTDBG1 0x143e +#define MT6357_BUCK_TOP_WDTDBG2 0x1440 +#define MT6357_BUCK_TOP_ELR_NUM 0x1442 +#define MT6357_BUCK_TOP_ELR0 0x1444 +#define MT6357_BUCK_TOP_ELR1 0x1446 +#define MT6357_BUCK_VPROC_DSN_ID 0x1480 +#define MT6357_BUCK_VPROC_DSN_REV0 0x1482 +#define MT6357_BUCK_VPROC_DSN_DBI 0x1484 +#define MT6357_BUCK_VPROC_DSN_DXI 0x1486 +#define MT6357_BUCK_VPROC_CON0 0x1488 +#define MT6357_BUCK_VPROC_CON1 0x148a +#define MT6357_BUCK_VPROC_CFG0 0x148c +#define MT6357_BUCK_VPROC_CFG1 0x148e +#define MT6357_BUCK_VPROC_OP_EN 0x1490 +#define MT6357_BUCK_VPROC_OP_EN_SET 0x1492 +#define MT6357_BUCK_VPROC_OP_EN_CLR 0x1494 +#define MT6357_BUCK_VPROC_OP_CFG 0x1496 +#define MT6357_BUCK_VPROC_OP_CFG_SET 0x1498 +#define MT6357_BUCK_VPROC_OP_CFG_CLR 0x149a +#define MT6357_BUCK_VPROC_SP_CON 0x149c +#define MT6357_BUCK_VPROC_SP_CFG 0x149e +#define MT6357_BUCK_VPROC_OC_CFG 0x14a0 +#define MT6357_BUCK_VPROC_DBG0 0x14a2 +#define MT6357_BUCK_VPROC_DBG1 0x14a4 +#define MT6357_BUCK_VPROC_DBG2 0x14a6 +#define MT6357_BUCK_VPROC_ELR_NUM 0x14a8 +#define MT6357_BUCK_VPROC_ELR0 0x14aa +#define MT6357_BUCK_VCORE_DSN_ID 0x1500 +#define MT6357_BUCK_VCORE_DSN_REV0 0x1502 +#define MT6357_BUCK_VCORE_DSN_DBI 0x1504 +#define MT6357_BUCK_VCORE_DSN_DXI 0x1506 +#define MT6357_BUCK_VCORE_CON0 0x1508 +#define MT6357_BUCK_VCORE_CON1 0x150a +#define MT6357_BUCK_VCORE_CFG0 0x150c +#define MT6357_BUCK_VCORE_CFG1 0x150e +#define MT6357_BUCK_VCORE_OP_EN 0x1510 +#define MT6357_BUCK_VCORE_OP_EN_SET 0x1512 +#define MT6357_BUCK_VCORE_OP_EN_CLR 0x1514 +#define MT6357_BUCK_VCORE_OP_CFG 0x1516 +#define MT6357_BUCK_VCORE_OP_CFG_SET 0x1518 +#define MT6357_BUCK_VCORE_OP_CFG_CLR 0x151a +#define MT6357_BUCK_VCORE_SP_CON 0x151c +#define MT6357_BUCK_VCORE_SP_CFG 0x151e +#define MT6357_BUCK_VCORE_OC_CFG 0x1520 +#define MT6357_BUCK_VCORE_DBG0 0x1522 +#define MT6357_BUCK_VCORE_DBG1 0x1524 +#define MT6357_BUCK_VCORE_DBG2 0x1526 +#define MT6357_BUCK_VCORE_ELR_NUM 0x1528 +#define MT6357_BUCK_VCORE_ELR0 0x152a +#define MT6357_BUCK_VMODEM_DSN_ID 0x1580 +#define MT6357_BUCK_VMODEM_DSN_REV0 0x1582 +#define MT6357_BUCK_VMODEM_DSN_DBI 0x1584 +#define MT6357_BUCK_VMODEM_DSN_DXI 0x1586 +#define MT6357_BUCK_VMODEM_CON0 0x1588 +#define MT6357_BUCK_VMODEM_CON1 0x158a +#define MT6357_BUCK_VMODEM_CFG0 0x158c +#define MT6357_BUCK_VMODEM_CFG1 0x158e +#define MT6357_BUCK_VMODEM_OP_EN 0x1590 +#define MT6357_BUCK_VMODEM_OP_EN_SET 0x1592 +#define MT6357_BUCK_VMODEM_OP_EN_CLR 0x1594 +#define MT6357_BUCK_VMODEM_OP_CFG 0x1596 +#define MT6357_BUCK_VMODEM_OP_CFG_SET 0x1598 +#define MT6357_BUCK_VMODEM_OP_CFG_CLR 0x159a +#define MT6357_BUCK_VMODEM_SP_CON 0x159c +#define MT6357_BUCK_VMODEM_SP_CFG 0x159e +#define MT6357_BUCK_VMODEM_OC_CFG 0x15a0 +#define MT6357_BUCK_VMODEM_DBG0 0x15a2 +#define MT6357_BUCK_VMODEM_DBG1 0x15a4 +#define MT6357_BUCK_VMODEM_DBG2 0x15a6 +#define MT6357_BUCK_VMODEM_ELR_NUM 0x15a8 +#define MT6357_BUCK_VMODEM_ELR0 0x15aa +#define MT6357_BUCK_VS1_DSN_ID 0x1600 +#define MT6357_BUCK_VS1_DSN_REV0 0x1602 +#define MT6357_BUCK_VS1_DSN_DBI 0x1604 +#define MT6357_BUCK_VS1_DSN_DXI 0x1606 +#define MT6357_BUCK_VS1_CON0 0x1608 +#define MT6357_BUCK_VS1_CON1 0x160a +#define MT6357_BUCK_VS1_CFG0 0x160c +#define MT6357_BUCK_VS1_CFG1 0x160e +#define MT6357_BUCK_VS1_OP_EN 0x1610 +#define MT6357_BUCK_VS1_OP_EN_SET 0x1612 +#define MT6357_BUCK_VS1_OP_EN_CLR 0x1614 +#define MT6357_BUCK_VS1_OP_CFG 0x1616 +#define MT6357_BUCK_VS1_OP_CFG_SET 0x1618 +#define MT6357_BUCK_VS1_OP_CFG_CLR 0x161a +#define MT6357_BUCK_VS1_SP_CON 0x161c +#define MT6357_BUCK_VS1_SP_CFG 0x161e +#define MT6357_BUCK_VS1_OC_CFG 0x1620 +#define MT6357_BUCK_VS1_DBG0 0x1622 +#define MT6357_BUCK_VS1_DBG1 0x1624 +#define MT6357_BUCK_VS1_DBG2 0x1626 +#define MT6357_BUCK_VS1_VOTER 0x1628 +#define MT6357_BUCK_VS1_VOTER_SET 0x162a +#define MT6357_BUCK_VS1_VOTER_CLR 0x162c +#define MT6357_BUCK_VS1_VOTER_CFG 0x162e +#define MT6357_BUCK_VS1_ELR_NUM 0x1630 +#define MT6357_BUCK_VS1_ELR0 0x1632 +#define MT6357_BUCK_VPA_DSN_ID 0x1680 +#define MT6357_BUCK_VPA_DSN_REV0 0x1682 +#define MT6357_BUCK_VPA_DSN_DBI 0x1684 +#define MT6357_BUCK_VPA_DSN_DXI 0x1686 +#define MT6357_BUCK_VPA_CON0 0x1688 +#define MT6357_BUCK_VPA_CON1 0x168a +#define MT6357_BUCK_VPA_CFG0 0x168c +#define MT6357_BUCK_VPA_CFG1 0x168e +#define MT6357_BUCK_VPA_OC_CFG 0x1690 +#define MT6357_BUCK_VPA_DBG0 0x1692 +#define MT6357_BUCK_VPA_DBG1 0x1694 +#define MT6357_BUCK_VPA_DBG2 0x1696 +#define MT6357_BUCK_VPA_DLC_CON0 0x1698 +#define MT6357_BUCK_VPA_DLC_CON1 0x169a +#define MT6357_BUCK_VPA_DLC_CON2 0x169c +#define MT6357_BUCK_VPA_MSFG_CON0 0x169e +#define MT6357_BUCK_VPA_MSFG_CON1 0x16a0 +#define MT6357_BUCK_VPA_MSFG_RRATE0 0x16a2 +#define MT6357_BUCK_VPA_MSFG_RRATE1 0x16a4 +#define MT6357_BUCK_VPA_MSFG_RRATE2 0x16a6 +#define MT6357_BUCK_VPA_MSFG_RTHD0 0x16a8 +#define MT6357_BUCK_VPA_MSFG_RTHD1 0x16aa +#define MT6357_BUCK_VPA_MSFG_RTHD2 0x16ac +#define MT6357_BUCK_VPA_MSFG_FRATE0 0x16ae +#define MT6357_BUCK_VPA_MSFG_FRATE1 0x16b0 +#define MT6357_BUCK_VPA_MSFG_FRATE2 0x16b2 +#define MT6357_BUCK_VPA_MSFG_FTHD0 0x16b4 +#define MT6357_BUCK_VPA_MSFG_FTHD1 0x16b6 +#define MT6357_BUCK_VPA_MSFG_FTHD2 0x16b8 +#define MT6357_BUCK_ANA_DSN_ID 0x1700 +#define MT6357_BUCK_ANA_DSN_REV0 0x1702 +#define MT6357_BUCK_ANA_DSN_DBI 0x1704 +#define MT6357_BUCK_ANA_DSN_FPI 0x1706 +#define MT6357_SMPS_ANA_CON0 0x1708 +#define MT6357_SMPS_ANA_CON1 0x170a +#define MT6357_SMPS_ANA_CON2 0x170c +#define MT6357_VCORE_VPROC_ANA_CON0 0x170e +#define MT6357_VCORE_VPROC_ANA_CON1 0x1710 +#define MT6357_VCORE_VPROC_ANA_CON2 0x1712 +#define MT6357_VCORE_VPROC_ANA_CON3 0x1714 +#define MT6357_VCORE_VPROC_ANA_CON4 0x1716 +#define MT6357_VCORE_VPROC_ANA_CON5 0x1718 +#define MT6357_VCORE_VPROC_ANA_CON6 0x171a +#define MT6357_VCORE_VPROC_ANA_CON7 0x171c +#define MT6357_VCORE_VPROC_ANA_CON8 0x171e +#define MT6357_VCORE_VPROC_ANA_CON9 0x1720 +#define MT6357_VCORE_VPROC_ANA_CON10 0x1722 +#define MT6357_VCORE_VPROC_ANA_CON11 0x1724 +#define MT6357_VMODEM_ANA_CON0 0x1726 +#define MT6357_VMODEM_ANA_CON1 0x1728 +#define MT6357_VMODEM_ANA_CON2 0x172a +#define MT6357_VMODEM_ANA_CON3 0x172c +#define MT6357_VMODEM_ANA_CON4 0x172e +#define MT6357_VMODEM_ANA_CON5 0x1730 +#define MT6357_VS1_ANA_CON0 0x1732 +#define MT6357_VS1_ANA_CON1 0x1734 +#define MT6357_VS1_ANA_CON2 0x1736 +#define MT6357_VS1_ANA_CON3 0x1738 +#define MT6357_VS1_ANA_CON4 0x173a +#define MT6357_VS1_ANA_CON5 0x173c +#define MT6357_VPA_ANA_CON0 0x173e +#define MT6357_VPA_ANA_CON1 0x1740 +#define MT6357_VPA_ANA_CON2 0x1742 +#define MT6357_VPA_ANA_CON3 0x1744 +#define MT6357_VPA_ANA_CON4 0x1746 +#define MT6357_VPA_ANA_CON5 0x1748 +#define MT6357_BUCK_ANA_ELR_NUM 0x174a +#define MT6357_SMPS_ELR_0 0x174c +#define MT6357_SMPS_ELR_1 0x174e +#define MT6357_SMPS_ELR_2 0x1750 +#define MT6357_SMPS_ELR_3 0x1752 +#define MT6357_SMPS_ELR_4 0x1754 +#define MT6357_SMPS_ELR_5 0x1756 +#define MT6357_VCORE_VPROC_ELR_0 0x1758 +#define MT6357_VCORE_VPROC_ELR_1 0x175a +#define MT6357_VCORE_VPROC_ELR_2 0x175c +#define MT6357_VCORE_VPROC_ELR_3 0x175e +#define MT6357_VCORE_VPROC_ELR_4 0x1760 +#define MT6357_VMODEM_ELR_0 0x1762 +#define MT6357_VMODEM_ELR_1 0x1764 +#define MT6357_VMODEM_ELR_2 0x1766 +#define MT6357_VS1_ELR_0 0x1768 +#define MT6357_VS1_ELR_1 0x176a +#define MT6357_VPA_ELR_0 0x176c +#define MT6357_LDO_TOP_ID 0x1880 +#define MT6357_LDO_TOP_REV0 0x1882 +#define MT6357_LDO_TOP_DBI 0x1884 +#define MT6357_LDO_TOP_DXI 0x1886 +#define MT6357_LDO_TPM0 0x1888 +#define MT6357_LDO_TPM1 0x188a +#define MT6357_LDO_TOP_CLK_DCM_CON0 0x188c +#define MT6357_LDO_TOP_CLK_VIO28_CON0 0x188e +#define MT6357_LDO_TOP_CLK_VIO18_CON0 0x1890 +#define MT6357_LDO_TOP_CLK_VAUD28_CON0 0x1892 +#define MT6357_LDO_TOP_CLK_VDRAM_CON0 0x1894 +#define MT6357_LDO_TOP_CLK_VSRAM_PROC_CON0 0x1896 +#define MT6357_LDO_TOP_CLK_VSRAM_OTHERS_CON0 0x1898 +#define MT6357_LDO_TOP_CLK_VAUX18_CON0 0x189a +#define MT6357_LDO_TOP_CLK_VUSB33_CON0 0x189c +#define MT6357_LDO_TOP_CLK_VEMC_CON0 0x189e +#define MT6357_LDO_TOP_CLK_VXO22_CON0 0x18a0 +#define MT6357_LDO_TOP_CLK_VSIM1_CON0 0x18a2 +#define MT6357_LDO_TOP_CLK_VSIM2_CON0 0x18a4 +#define MT6357_LDO_TOP_CLK_VCAMD_CON0 0x18a6 +#define MT6357_LDO_TOP_CLK_VCAMIO_CON0 0x18a8 +#define MT6357_LDO_TOP_CLK_VEFUSE_CON0 0x18aa +#define MT6357_LDO_TOP_CLK_VCN33_CON0 0x18ac +#define MT6357_LDO_TOP_CLK_VCN18_CON0 0x18ae +#define MT6357_LDO_TOP_CLK_VCN28_CON0 0x18b0 +#define MT6357_LDO_TOP_CLK_VIBR_CON0 0x18b2 +#define MT6357_LDO_TOP_CLK_VFE28_CON0 0x18b4 +#define MT6357_LDO_TOP_CLK_VMCH_CON0 0x18b6 +#define MT6357_LDO_TOP_CLK_VMC_CON0 0x18b8 +#define MT6357_LDO_TOP_CLK_VRF18_CON0 0x18ba +#define MT6357_LDO_TOP_CLK_VLDO28_CON0 0x18bc +#define MT6357_LDO_TOP_CLK_VRF12_CON0 0x18be +#define MT6357_LDO_TOP_CLK_VCAMA_CON0 0x18c0 +#define MT6357_LDO_TOP_CLK_TREF_CON0 0x18c2 +#define MT6357_LDO_TOP_INT_CON0 0x18c4 +#define MT6357_LDO_TOP_INT_CON0_SET 0x18c6 +#define MT6357_LDO_TOP_INT_CON0_CLR 0x18c8 +#define MT6357_LDO_TOP_INT_CON1 0x18ca +#define MT6357_LDO_TOP_INT_CON1_SET 0x18cc +#define MT6357_LDO_TOP_INT_CON1_CLR 0x18ce +#define MT6357_LDO_TOP_INT_MASK_CON0 0x18d0 +#define MT6357_LDO_TOP_INT_MASK_CON0_SET 0x18d2 +#define MT6357_LDO_TOP_INT_MASK_CON0_CLR 0x18d4 +#define MT6357_LDO_TOP_INT_MASK_CON1 0x18d6 +#define MT6357_LDO_TOP_INT_MASK_CON1_SET 0x18d8 +#define MT6357_LDO_TOP_INT_MASK_CON1_CLR 0x18da +#define MT6357_LDO_TOP_INT_STATUS0 0x18dc +#define MT6357_LDO_TOP_INT_STATUS1 0x18de +#define MT6357_LDO_TOP_INT_RAW_STATUS0 0x18e0 +#define MT6357_LDO_TOP_INT_RAW_STATUS1 0x18e2 +#define MT6357_LDO_TEST_CON0 0x18e4 +#define MT6357_LDO_TOP_WDT_CON0 0x18e6 +#define MT6357_LDO_TOP_RSV_CON0 0x18e8 +#define MT6357_LDO_TOP_RSV_CON1 0x18ea +#define MT6357_LDO_OCFB0 0x18ec +#define MT6357_LDO_LP_PROTECTION 0x18ee +#define MT6357_LDO_DUMMY_LOAD_GATED 0x18f0 +#define MT6357_LDO_GON0_DSN_ID 0x1900 +#define MT6357_LDO_GON0_DSN_REV0 0x1902 +#define MT6357_LDO_GON0_DSN_DBI 0x1904 +#define MT6357_LDO_GON0_DSN_DXI 0x1906 +#define MT6357_LDO_VXO22_CON0 0x1908 +#define MT6357_LDO_VXO22_OP_EN 0x190a +#define MT6357_LDO_VXO22_OP_EN_SET 0x190c +#define MT6357_LDO_VXO22_OP_EN_CLR 0x190e +#define MT6357_LDO_VXO22_OP_CFG 0x1910 +#define MT6357_LDO_VXO22_OP_CFG_SET 0x1912 +#define MT6357_LDO_VXO22_OP_CFG_CLR 0x1914 +#define MT6357_LDO_VXO22_CON1 0x1916 +#define MT6357_LDO_VXO22_CON2 0x1918 +#define MT6357_LDO_VXO22_CON3 0x191a +#define MT6357_LDO_VAUX18_CON0 0x191c +#define MT6357_LDO_VAUX18_OP_EN 0x191e +#define MT6357_LDO_VAUX18_OP_EN_SET 0x1920 +#define MT6357_LDO_VAUX18_OP_EN_CLR 0x1922 +#define MT6357_LDO_VAUX18_OP_CFG 0x1924 +#define MT6357_LDO_VAUX18_OP_CFG_SET 0x1926 +#define MT6357_LDO_VAUX18_OP_CFG_CLR 0x1928 +#define MT6357_LDO_VAUX18_CON1 0x192a +#define MT6357_LDO_VAUX18_CON2 0x192c +#define MT6357_LDO_VAUX18_CON3 0x192e +#define MT6357_LDO_VAUD28_CON0 0x1930 +#define MT6357_LDO_VAUD28_OP_EN 0x1932 +#define MT6357_LDO_VAUD28_OP_EN_SET 0x1934 +#define MT6357_LDO_VAUD28_OP_EN_CLR 0x1936 +#define MT6357_LDO_VAUD28_OP_CFG 0x1938 +#define MT6357_LDO_VAUD28_OP_CFG_SET 0x193a +#define MT6357_LDO_VAUD28_OP_CFG_CLR 0x193c +#define MT6357_LDO_VAUD28_CON1 0x193e +#define MT6357_LDO_VAUD28_CON2 0x1940 +#define MT6357_LDO_VAUD28_CON3 0x1942 +#define MT6357_LDO_VIO28_CON0 0x1944 +#define MT6357_LDO_VIO28_OP_EN 0x1946 +#define MT6357_LDO_VIO28_OP_EN_SET 0x1948 +#define MT6357_LDO_VIO28_OP_EN_CLR 0x194a +#define MT6357_LDO_VIO28_OP_CFG 0x194c +#define MT6357_LDO_VIO28_OP_CFG_SET 0x194e +#define MT6357_LDO_VIO28_OP_CFG_CLR 0x1950 +#define MT6357_LDO_VIO28_CON1 0x1952 +#define MT6357_LDO_VIO28_CON2 0x1954 +#define MT6357_LDO_VIO28_CON3 0x1956 +#define MT6357_LDO_VIO18_CON0 0x1958 +#define MT6357_LDO_VIO18_OP_EN 0x195a +#define MT6357_LDO_VIO18_OP_EN_SET 0x195c +#define MT6357_LDO_VIO18_OP_EN_CLR 0x195e +#define MT6357_LDO_VIO18_OP_CFG 0x1960 +#define MT6357_LDO_VIO18_OP_CFG_SET 0x1962 +#define MT6357_LDO_VIO18_OP_CFG_CLR 0x1964 +#define MT6357_LDO_VIO18_CON1 0x1966 +#define MT6357_LDO_VIO18_CON2 0x1968 +#define MT6357_LDO_VIO18_CON3 0x196a +#define MT6357_LDO_VDRAM_CON0 0x196c +#define MT6357_LDO_VDRAM_OP_EN 0x196e +#define MT6357_LDO_VDRAM_OP_EN_SET 0x1970 +#define MT6357_LDO_VDRAM_OP_EN_CLR 0x1972 +#define MT6357_LDO_VDRAM_OP_CFG 0x1974 +#define MT6357_LDO_VDRAM_OP_CFG_SET 0x1976 +#define MT6357_LDO_VDRAM_OP_CFG_CLR 0x1978 +#define MT6357_LDO_VDRAM_CON1 0x197a +#define MT6357_LDO_VDRAM_CON2 0x197c +#define MT6357_LDO_VDRAM_CON3 0x197e +#define MT6357_LDO_GON1_DSN_ID 0x1980 +#define MT6357_LDO_GON1_DSN_REV0 0x1982 +#define MT6357_LDO_GON1_DSN_DBI 0x1984 +#define MT6357_LDO_GON1_DSN_DXI 0x1986 +#define MT6357_LDO_VEMC_CON0 0x1988 +#define MT6357_LDO_VEMC_OP_EN 0x198a +#define MT6357_LDO_VEMC_OP_EN_SET 0x198c +#define MT6357_LDO_VEMC_OP_EN_CLR 0x198e +#define MT6357_LDO_VEMC_OP_CFG 0x1990 +#define MT6357_LDO_VEMC_OP_CFG_SET 0x1992 +#define MT6357_LDO_VEMC_OP_CFG_CLR 0x1994 +#define MT6357_LDO_VEMC_CON1 0x1996 +#define MT6357_LDO_VEMC_CON2 0x1998 +#define MT6357_LDO_VEMC_CON3 0x199a +#define MT6357_LDO_VUSB33_CON0_0 0x199c +#define MT6357_LDO_VUSB33_OP_EN 0x199e +#define MT6357_LDO_VUSB33_OP_EN_SET 0x19a0 +#define MT6357_LDO_VUSB33_OP_EN_CLR 0x19a2 +#define MT6357_LDO_VUSB33_OP_CFG 0x19a4 +#define MT6357_LDO_VUSB33_OP_CFG_SET 0x19a6 +#define MT6357_LDO_VUSB33_OP_CFG_CLR 0x19a8 +#define MT6357_LDO_VUSB33_CON0_1 0x19aa +#define MT6357_LDO_VUSB33_CON1 0x19ac +#define MT6357_LDO_VUSB33_CON2 0x19ae +#define MT6357_LDO_VUSB33_CON3 0x19b0 +#define MT6357_LDO_VSRAM_PROC_CON0 0x19b2 +#define MT6357_LDO_VSRAM_PROC_CON2 0x19b4 +#define MT6357_LDO_VSRAM_PROC_CFG0 0x19b6 +#define MT6357_LDO_VSRAM_PROC_CFG1 0x19b8 +#define MT6357_LDO_VSRAM_PROC_OP_EN 0x19ba +#define MT6357_LDO_VSRAM_PROC_OP_EN_SET 0x19bc +#define MT6357_LDO_VSRAM_PROC_OP_EN_CLR 0x19be +#define MT6357_LDO_VSRAM_PROC_OP_CFG 0x19c0 +#define MT6357_LDO_VSRAM_PROC_OP_CFG_SET 0x19c2 +#define MT6357_LDO_VSRAM_PROC_OP_CFG_CLR 0x19c4 +#define MT6357_LDO_VSRAM_PROC_CON3 0x19c6 +#define MT6357_LDO_VSRAM_PROC_CON4 0x19c8 +#define MT6357_LDO_VSRAM_PROC_CON5 0x19ca +#define MT6357_LDO_VSRAM_PROC_DBG0 0x19cc +#define MT6357_LDO_VSRAM_PROC_DBG1 0x19ce +#define MT6357_LDO_VSRAM_OTHERS_CON0 0x19d0 +#define MT6357_LDO_VSRAM_OTHERS_CON2 0x19d2 +#define MT6357_LDO_VSRAM_OTHERS_CFG0 0x19d4 +#define MT6357_LDO_VSRAM_OTHERS_CFG1 0x19d6 +#define MT6357_LDO_VSRAM_OTHERS_OP_EN 0x19d8 +#define MT6357_LDO_VSRAM_OTHERS_OP_EN_SET 0x19da +#define MT6357_LDO_VSRAM_OTHERS_OP_EN_CLR 0x19dc +#define MT6357_LDO_VSRAM_OTHERS_OP_CFG 0x19de +#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_SET 0x19e0 +#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_CLR 0x19e2 +#define MT6357_LDO_VSRAM_OTHERS_CON3 0x19e4 +#define MT6357_LDO_VSRAM_OTHERS_CON4 0x19e6 +#define MT6357_LDO_VSRAM_OTHERS_CON5 0x19e8 +#define MT6357_LDO_VSRAM_OTHERS_DBG0 0x19ea +#define MT6357_LDO_VSRAM_OTHERS_DBG1 0x19ec +#define MT6357_LDO_VSRAM_PROC_SP 0x19ee +#define MT6357_LDO_VSRAM_OTHERS_SP 0x19f0 +#define MT6357_LDO_VSRAM_PROC_R2R_PDN_DIS 0x19f2 +#define MT6357_LDO_VSRAM_OTHERS_R2R_PDN_DIS 0x19f4 +#define MT6357_LDO_VSRAM_WDT_DBG0 0x19f6 +#define MT6357_LDO_GON1_ELR_NUM 0x19f8 +#define MT6357_LDO_VSRAM_CON0 0x19fa +#define MT6357_LDO_VSRAM_CON1 0x19fc +#define MT6357_LDO_VSRAM_CON2 0x19fe +#define MT6357_LDO_GOFF0_DSN_ID 0x1a00 +#define MT6357_LDO_GOFF0_DSN_REV0 0x1a02 +#define MT6357_LDO_GOFF0_DSN_DBI 0x1a04 +#define MT6357_LDO_GOFF0_DSN_DXI 0x1a06 +#define MT6357_LDO_VFE28_CON0 0x1a08 +#define MT6357_LDO_VFE28_OP_EN 0x1a0a +#define MT6357_LDO_VFE28_OP_EN_SET 0x1a0c +#define MT6357_LDO_VFE28_OP_EN_CLR 0x1a0e +#define MT6357_LDO_VFE28_OP_CFG 0x1a10 +#define MT6357_LDO_VFE28_OP_CFG_SET 0x1a12 +#define MT6357_LDO_VFE28_OP_CFG_CLR 0x1a14 +#define MT6357_LDO_VFE28_CON1 0x1a16 +#define MT6357_LDO_VFE28_CON2 0x1a18 +#define MT6357_LDO_VFE28_CON3 0x1a1a +#define MT6357_LDO_VRF18_CON0 0x1a1c +#define MT6357_LDO_VRF18_OP_EN 0x1a1e +#define MT6357_LDO_VRF18_OP_EN_SET 0x1a20 +#define MT6357_LDO_VRF18_OP_EN_CLR 0x1a22 +#define MT6357_LDO_VRF18_OP_CFG 0x1a24 +#define MT6357_LDO_VRF18_OP_CFG_SET 0x1a26 +#define MT6357_LDO_VRF18_OP_CFG_CLR 0x1a28 +#define MT6357_LDO_VRF18_CON1 0x1a2a +#define MT6357_LDO_VRF18_CON2 0x1a2c +#define MT6357_LDO_VRF18_CON3 0x1a2e +#define MT6357_LDO_VRF12_CON0 0x1a30 +#define MT6357_LDO_VRF12_OP_EN 0x1a32 +#define MT6357_LDO_VRF12_OP_EN_SET 0x1a34 +#define MT6357_LDO_VRF12_OP_EN_CLR 0x1a36 +#define MT6357_LDO_VRF12_OP_CFG 0x1a38 +#define MT6357_LDO_VRF12_OP_CFG_SET 0x1a3a +#define MT6357_LDO_VRF12_OP_CFG_CLR 0x1a3c +#define MT6357_LDO_VRF12_CON1 0x1a3e +#define MT6357_LDO_VRF12_CON2 0x1a40 +#define MT6357_LDO_VRF12_CON3 0x1a42 +#define MT6357_LDO_VEFUSE_CON0 0x1a44 +#define MT6357_LDO_VEFUSE_OP_EN 0x1a46 +#define MT6357_LDO_VEFUSE_OP_EN_SET 0x1a48 +#define MT6357_LDO_VEFUSE_OP_EN_CLR 0x1a4a +#define MT6357_LDO_VEFUSE_OP_CFG 0x1a4c +#define MT6357_LDO_VEFUSE_OP_CFG_SET 0x1a4e +#define MT6357_LDO_VEFUSE_OP_CFG_CLR 0x1a50 +#define MT6357_LDO_VEFUSE_CON1 0x1a52 +#define MT6357_LDO_VEFUSE_CON2 0x1a54 +#define MT6357_LDO_VEFUSE_CON3 0x1a56 +#define MT6357_LDO_VCN18_CON0 0x1a58 +#define MT6357_LDO_VCN18_OP_EN 0x1a5a +#define MT6357_LDO_VCN18_OP_EN_SET 0x1a5c +#define MT6357_LDO_VCN18_OP_EN_CLR 0x1a5e +#define MT6357_LDO_VCN18_OP_CFG 0x1a60 +#define MT6357_LDO_VCN18_OP_CFG_SET 0x1a62 +#define MT6357_LDO_VCN18_OP_CFG_CLR 0x1a64 +#define MT6357_LDO_VCN18_CON1 0x1a66 +#define MT6357_LDO_VCN18_CON2 0x1a68 +#define MT6357_LDO_VCN18_CON3 0x1a6a +#define MT6357_LDO_VCAMA_CON0 0x1a6c +#define MT6357_LDO_VCAMA_OP_EN 0x1a6e +#define MT6357_LDO_VCAMA_OP_EN_SET 0x1a70 +#define MT6357_LDO_VCAMA_OP_EN_CLR 0x1a72 +#define MT6357_LDO_VCAMA_OP_CFG 0x1a74 +#define MT6357_LDO_VCAMA_OP_CFG_SET 0x1a76 +#define MT6357_LDO_VCAMA_OP_CFG_CLR 0x1a78 +#define MT6357_LDO_VCAMA_CON1 0x1a7a +#define MT6357_LDO_VCAMA_CON2 0x1a7c +#define MT6357_LDO_VCAMA_CON3 0x1a7e +#define MT6357_LDO_GOFF1_DSN_ID 0x1a80 +#define MT6357_LDO_GOFF1_DSN_REV0 0x1a82 +#define MT6357_LDO_GOFF1_DSN_DBI 0x1a84 +#define MT6357_LDO_GOFF1_DSN_DXI 0x1a86 +#define MT6357_LDO_VCAMD_CON0 0x1a88 +#define MT6357_LDO_VCAMD_OP_EN 0x1a8a +#define MT6357_LDO_VCAMD_OP_EN_SET 0x1a8c +#define MT6357_LDO_VCAMD_OP_EN_CLR 0x1a8e +#define MT6357_LDO_VCAMD_OP_CFG 0x1a90 +#define MT6357_LDO_VCAMD_OP_CFG_SET 0x1a92 +#define MT6357_LDO_VCAMD_OP_CFG_CLR 0x1a94 +#define MT6357_LDO_VCAMD_CON1 0x1a96 +#define MT6357_LDO_VCAMD_CON2 0x1a98 +#define MT6357_LDO_VCAMD_CON3 0x1a9a +#define MT6357_LDO_VCAMIO_CON0 0x1a9c +#define MT6357_LDO_VCAMIO_OP_EN 0x1a9e +#define MT6357_LDO_VCAMIO_OP_EN_SET 0x1aa0 +#define MT6357_LDO_VCAMIO_OP_EN_CLR 0x1aa2 +#define MT6357_LDO_VCAMIO_OP_CFG 0x1aa4 +#define MT6357_LDO_VCAMIO_OP_CFG_SET 0x1aa6 +#define MT6357_LDO_VCAMIO_OP_CFG_CLR 0x1aa8 +#define MT6357_LDO_VCAMIO_CON1 0x1aaa +#define MT6357_LDO_VCAMIO_CON2 0x1aac +#define MT6357_LDO_VCAMIO_CON3 0x1aae +#define MT6357_LDO_VMC_CON0 0x1ab0 +#define MT6357_LDO_VMC_OP_EN 0x1ab2 +#define MT6357_LDO_VMC_OP_EN_SET 0x1ab4 +#define MT6357_LDO_VMC_OP_EN_CLR 0x1ab6 +#define MT6357_LDO_VMC_OP_CFG 0x1ab8 +#define MT6357_LDO_VMC_OP_CFG_SET 0x1aba +#define MT6357_LDO_VMC_OP_CFG_CLR 0x1abc +#define MT6357_LDO_VMC_CON1 0x1abe +#define MT6357_LDO_VMC_CON2 0x1ac0 +#define MT6357_LDO_VMC_CON3 0x1ac2 +#define MT6357_LDO_VMCH_CON0 0x1ac4 +#define MT6357_LDO_VMCH_OP_EN 0x1ac6 +#define MT6357_LDO_VMCH_OP_EN_SET 0x1ac8 +#define MT6357_LDO_VMCH_OP_EN_CLR 0x1aca +#define MT6357_LDO_VMCH_OP_CFG 0x1acc +#define MT6357_LDO_VMCH_OP_CFG_SET 0x1ace +#define MT6357_LDO_VMCH_OP_CFG_CLR 0x1ad0 +#define MT6357_LDO_VMCH_CON1 0x1ad2 +#define MT6357_LDO_VMCH_CON2 0x1ad4 +#define MT6357_LDO_VMCH_CON3 0x1ad6 +#define MT6357_LDO_VSIM1_CON0 0x1ad8 +#define MT6357_LDO_VSIM1_OP_EN 0x1ada +#define MT6357_LDO_VSIM1_OP_EN_SET 0x1adc +#define MT6357_LDO_VSIM1_OP_EN_CLR 0x1ade +#define MT6357_LDO_VSIM1_OP_CFG 0x1ae0 +#define MT6357_LDO_VSIM1_OP_CFG_SET 0x1ae2 +#define MT6357_LDO_VSIM1_OP_CFG_CLR 0x1ae4 +#define MT6357_LDO_VSIM1_CON1 0x1ae6 +#define MT6357_LDO_VSIM1_CON2 0x1ae8 +#define MT6357_LDO_VSIM1_CON3 0x1aea +#define MT6357_LDO_VSIM2_CON0 0x1aec +#define MT6357_LDO_VSIM2_OP_EN 0x1aee +#define MT6357_LDO_VSIM2_OP_EN_SET 0x1af0 +#define MT6357_LDO_VSIM2_OP_EN_CLR 0x1af2 +#define MT6357_LDO_VSIM2_OP_CFG 0x1af4 +#define MT6357_LDO_VSIM2_OP_CFG_SET 0x1af6 +#define MT6357_LDO_VSIM2_OP_CFG_CLR 0x1af8 +#define MT6357_LDO_VSIM2_CON1 0x1afa +#define MT6357_LDO_VSIM2_CON2 0x1afc +#define MT6357_LDO_VSIM2_CON3 0x1afe +#define MT6357_LDO_GOFF2_DSN_ID 0x1b00 +#define MT6357_LDO_GOFF2_DSN_REV0 0x1b02 +#define MT6357_LDO_GOFF2_DSN_DBI 0x1b04 +#define MT6357_LDO_GOFF2_DSN_DXI 0x1b06 +#define MT6357_LDO_VIBR_CON0 0x1b08 +#define MT6357_LDO_VIBR_OP_EN 0x1b0a +#define MT6357_LDO_VIBR_OP_EN_SET 0x1b0c +#define MT6357_LDO_VIBR_OP_EN_CLR 0x1b0e +#define MT6357_LDO_VIBR_OP_CFG 0x1b10 +#define MT6357_LDO_VIBR_OP_CFG_SET 0x1b12 +#define MT6357_LDO_VIBR_OP_CFG_CLR 0x1b14 +#define MT6357_LDO_VIBR_CON1 0x1b16 +#define MT6357_LDO_VIBR_CON2 0x1b18 +#define MT6357_LDO_VIBR_CON3 0x1b1a +#define MT6357_LDO_VCN33_CON0_0 0x1b1c +#define MT6357_LDO_VCN33_OP_EN 0x1b1e +#define MT6357_LDO_VCN33_OP_EN_SET 0x1b20 +#define MT6357_LDO_VCN33_OP_EN_CLR 0x1b22 +#define MT6357_LDO_VCN33_OP_CFG 0x1b24 +#define MT6357_LDO_VCN33_OP_CFG_SET 0x1b26 +#define MT6357_LDO_VCN33_OP_CFG_CLR 0x1b28 +#define MT6357_LDO_VCN33_CON0_1 0x1b2a +#define MT6357_LDO_VCN33_CON1 0x1b2c +#define MT6357_LDO_VCN33_CON2 0x1b2e +#define MT6357_LDO_VCN33_CON3 0x1b30 +#define MT6357_LDO_VLDO28_CON0_0 0x1b32 +#define MT6357_LDO_VLDO28_OP_EN 0x1b34 +#define MT6357_LDO_VLDO28_OP_EN_SET 0x1b36 +#define MT6357_LDO_VLDO28_OP_EN_CLR 0x1b38 +#define MT6357_LDO_VLDO28_OP_CFG 0x1b3a +#define MT6357_LDO_VLDO28_OP_CFG_SET 0x1b3c +#define MT6357_LDO_VLDO28_OP_CFG_CLR 0x1b3e +#define MT6357_LDO_VLDO28_CON0_1 0x1b40 +#define MT6357_LDO_VLDO28_CON1 0x1b42 +#define MT6357_LDO_VLDO28_CON2 0x1b44 +#define MT6357_LDO_VLDO28_CON3 0x1b46 +#define MT6357_LDO_GOFF2_RSV_CON0 0x1b48 +#define MT6357_LDO_GOFF2_RSV_CON1 0x1b4a +#define MT6357_LDO_GOFF3_DSN_ID 0x1b80 +#define MT6357_LDO_GOFF3_DSN_REV0 0x1b82 +#define MT6357_LDO_GOFF3_DSN_DBI 0x1b84 +#define MT6357_LDO_GOFF3_DSN_DXI 0x1b86 +#define MT6357_LDO_VCN28_CON0 0x1b88 +#define MT6357_LDO_VCN28_OP_EN 0x1b8a +#define MT6357_LDO_VCN28_OP_EN_SET 0x1b8c +#define MT6357_LDO_VCN28_OP_EN_CLR 0x1b8e +#define MT6357_LDO_VCN28_OP_CFG 0x1b90 +#define MT6357_LDO_VCN28_OP_CFG_SET 0x1b92 +#define MT6357_LDO_VCN28_OP_CFG_CLR 0x1b94 +#define MT6357_LDO_VCN28_CON1 0x1b96 +#define MT6357_LDO_VCN28_CON2 0x1b98 +#define MT6357_LDO_VCN28_CON3 0x1b9a +#define MT6357_VRTC_CON0 0x1b9c +#define MT6357_LDO_TREF_CON0 0x1b9e +#define MT6357_LDO_TREF_OP_EN 0x1ba0 +#define MT6357_LDO_TREF_OP_EN_SET 0x1ba2 +#define MT6357_LDO_TREF_OP_EN_CLR 0x1ba4 +#define MT6357_LDO_TREF_OP_CFG 0x1ba6 +#define MT6357_LDO_TREF_OP_CFG_SET 0x1ba8 +#define MT6357_LDO_TREF_OP_CFG_CLR 0x1baa +#define MT6357_LDO_TREF_CON1 0x1bac +#define MT6357_LDO_GOFF3_RSV_CON0 0x1bae +#define MT6357_LDO_GOFF3_RSV_CON1 0x1bb0 +#define MT6357_LDO_ANA0_DSN_ID 0x1c00 +#define MT6357_LDO_ANA0_DSN_REV0 0x1c02 +#define MT6357_LDO_ANA0_DSN_DBI 0x1c04 +#define MT6357_LDO_ANA0_DSN_DXI 0x1c06 +#define MT6357_VFE28_ANA_CON0 0x1c08 +#define MT6357_VFE28_ANA_CON1 0x1c0a +#define MT6357_VCN28_ANA_CON0 0x1c0c +#define MT6357_VCN28_ANA_CON1 0x1c0e +#define MT6357_VAUD28_ANA_CON0 0x1c10 +#define MT6357_VAUD28_ANA_CON1 0x1c12 +#define MT6357_VAUX18_ANA_CON0 0x1c14 +#define MT6357_VAUX18_ANA_CON1 0x1c16 +#define MT6357_VXO22_ANA_CON0 0x1c18 +#define MT6357_VXO22_ANA_CON1 0x1c1a +#define MT6357_VCN33_ANA_CON0 0x1c1c +#define MT6357_VCN33_ANA_CON1 0x1c1e +#define MT6357_VEMC_ANA_CON0 0x1c20 +#define MT6357_VEMC_ANA_CON1 0x1c22 +#define MT6357_VLDO28_ANA_CON0 0x1c24 +#define MT6357_VLDO28_ANA_CON1 0x1c26 +#define MT6357_VIO28_ANA_CON0 0x1c28 +#define MT6357_VIO28_ANA_CON1 0x1c2a +#define MT6357_VIBR_ANA_CON0 0x1c2c +#define MT6357_VIBR_ANA_CON1 0x1c2e +#define MT6357_VSIM1_ANA_CON0 0x1c30 +#define MT6357_VSIM1_ANA_CON1 0x1c32 +#define MT6357_VSIM2_ANA_CON0 0x1c34 +#define MT6357_VSIM2_ANA_CON1 0x1c36 +#define MT6357_VMCH_ANA_CON0 0x1c38 +#define MT6357_VMCH_ANA_CON1 0x1c3a +#define MT6357_VMC_ANA_CON0 0x1c3c +#define MT6357_VMC_ANA_CON1 0x1c3e +#define MT6357_VCAMIO_ANA_CON0 0x1c40 +#define MT6357_VCAMIO_ANA_CON1 0x1c42 +#define MT6357_VCN18_ANA_CON0 0x1c44 +#define MT6357_VCN18_ANA_CON1 0x1c46 +#define MT6357_VRF18_ANA_CON0 0x1c48 +#define MT6357_VRF18_ANA_CON1 0x1c4a +#define MT6357_VIO18_ANA_CON0 0x1c4c +#define MT6357_VIO18_ANA_CON1 0x1c4e +#define MT6357_VDRAM_ANA_CON1 0x1c50 +#define MT6357_VRF12_ANA_CON0 0x1c52 +#define MT6357_VRF12_ANA_CON1 0x1c54 +#define MT6357_VSRAM_PROC_ANA_CON0 0x1c56 +#define MT6357_VSRAM_OTHERS_ANA_CON0 0x1c58 +#define MT6357_LDO_ANA0_ELR_NUM 0x1c5a +#define MT6357_VFE28_ELR_0 0x1c5c +#define MT6357_VCN28_ELR_0 0x1c5e +#define MT6357_VAUD28_ELR_0 0x1c60 +#define MT6357_VAUX18_ELR_0 0x1c62 +#define MT6357_VXO22_ELR_0 0x1c64 +#define MT6357_VCN33_ELR_0 0x1c66 +#define MT6357_VEMC_ELR_0 0x1c68 +#define MT6357_VLDO28_ELR_0 0x1c6a +#define MT6357_VIO28_ELR_0 0x1c6c +#define MT6357_VIBR_ELR_0 0x1c6e +#define MT6357_VSIM1_ELR_0 0x1c70 +#define MT6357_VSIM2_ELR_0 0x1c72 +#define MT6357_VMCH_ELR_0 0x1c74 +#define MT6357_VMC_ELR_0 0x1c76 +#define MT6357_VCAMIO_ELR_0 0x1c78 +#define MT6357_VCN18_ELR_0 0x1c7a +#define MT6357_VRF18_ELR_0 0x1c7c +#define MT6357_LDO_ANA1_DSN_ID 0x1c80 +#define MT6357_LDO_ANA1_DSN_REV0 0x1c82 +#define MT6357_LDO_ANA1_DSN_DBI 0x1c84 +#define MT6357_LDO_ANA1_DSN_DXI 0x1c86 +#define MT6357_VUSB33_ANA_CON0 0x1c88 +#define MT6357_VUSB33_ANA_CON1 0x1c8a +#define MT6357_VCAMA_ANA_CON0 0x1c8c +#define MT6357_VCAMA_ANA_CON1 0x1c8e +#define MT6357_VEFUSE_ANA_CON0 0x1c90 +#define MT6357_VEFUSE_ANA_CON1 0x1c92 +#define MT6357_VCAMD_ANA_CON0 0x1c94 +#define MT6357_VCAMD_ANA_CON1 0x1c96 +#define MT6357_LDO_ANA1_ELR_NUM 0x1c98 +#define MT6357_VUSB33_ELR_0 0x1c9a +#define MT6357_VCAMA_ELR_0 0x1c9c +#define MT6357_VEFUSE_ELR_0 0x1c9e +#define MT6357_VCAMD_ELR_0 0x1ca0 +#define MT6357_VIO18_ELR_0 0x1ca2 +#define MT6357_VDRAM_ELR_0 0x1ca4 +#define MT6357_VRF12_ELR_0 0x1ca6 +#define MT6357_VRTC_ELR_0 0x1ca8 +#define MT6357_VDRAM_ELR_1 0x1caa +#define MT6357_VDRAM_ELR_2 0x1cac +#define MT6357_XPP_TOP_ID 0x1e00 +#define MT6357_XPP_TOP_REV0 0x1e02 +#define MT6357_XPP_TOP_DBI 0x1e04 +#define MT6357_XPP_TOP_DXI 0x1e06 +#define MT6357_XPP_TPM0 0x1e08 +#define MT6357_XPP_TPM1 0x1e0a +#define MT6357_XPP_TOP_TEST_OUT 0x1e0c +#define MT6357_XPP_TOP_TEST_CON0 0x1e0e +#define MT6357_XPP_TOP_CKPDN_CON0 0x1e10 +#define MT6357_XPP_TOP_CKPDN_CON0_SET 0x1e12 +#define MT6357_XPP_TOP_CKPDN_CON0_CLR 0x1e14 +#define MT6357_XPP_TOP_CKSEL_CON0 0x1e16 +#define MT6357_XPP_TOP_CKSEL_CON0_SET 0x1e18 +#define MT6357_XPP_TOP_CKSEL_CON0_CLR 0x1e1a +#define MT6357_XPP_TOP_RST_CON0 0x1e1c +#define MT6357_XPP_TOP_RST_CON0_SET 0x1e1e +#define MT6357_XPP_TOP_RST_CON0_CLR 0x1e20 +#define MT6357_XPP_TOP_RST_BANK_CON0 0x1e22 +#define MT6357_XPP_TOP_RST_BANK_CON0_SET 0x1e24 +#define MT6357_XPP_TOP_RST_BANK_CON0_CLR 0x1e26 +#define MT6357_DRIVER_BL_DSN_ID 0x1e80 +#define MT6357_DRIVER_BL_DSN_REV0 0x1e82 +#define MT6357_DRIVER_BL_DSN_DBI 0x1e84 +#define MT6357_DRIVER_BL_DSN_DXI 0x1e86 +#define MT6357_ISINK1_CON0 0x1e88 +#define MT6357_ISINK1_CON1 0x1e8a +#define MT6357_ISINK1_CON2 0x1e8c +#define MT6357_ISINK1_CON3 0x1e8e +#define MT6357_ISINK_ANA1 0x1e90 +#define MT6357_ISINK_PHASE_DLY 0x1e92 +#define MT6357_ISINK_SFSTR 0x1e94 +#define MT6357_ISINK_EN_CTRL 0x1e96 +#define MT6357_ISINK_MODE_CTRL 0x1e98 +#define MT6357_DRIVER_ANA_CON0 0x1e9a +#define MT6357_ISINK_ANA_CON0 0x1e9c +#define MT6357_ISINK_ANA_CON1 0x1e9e +#define MT6357_DRIVER_BL_ELR_NUM 0x1ea0 +#define MT6357_DRIVER_BL_ELR_0 0x1ea2 +#define MT6357_DRIVER_CI_DSN_ID 0x1f00 +#define MT6357_DRIVER_CI_DSN_REV0 0x1f02 +#define MT6357_DRIVER_CI_DSN_DBI 0x1f04 +#define MT6357_DRIVER_CI_DSN_DXI 0x1f06 +#define MT6357_CHRIND_CON0 0x1f08 +#define MT6357_CHRIND_CON1 0x1f0a +#define MT6357_CHRIND_CON2 0x1f0c +#define MT6357_CHRIND_CON3 0x1f0e +#define MT6357_CHRIND_CON4 0x1f10 +#define MT6357_CHRIND_EN_CTRL 0x1f12 +#define MT6357_CHRIND_ANA_CON0 0x1f14 +#define MT6357_DRIVER_DL_DSN_ID 0x1f80 +#define MT6357_DRIVER_DL_DSN_REV0 0x1f82 +#define MT6357_DRIVER_DL_DSN_DBI 0x1f84 +#define MT6357_DRIVER_DL_DSN_DXI 0x1f86 +#define MT6357_ISINK2_CON0 0x1f88 +#define MT6357_ISINK3_CON0 0x1f8a +#define MT6357_ISINK_EN_CTRL_SMPL 0x1f8c +#define MT6357_AUD_TOP_ID 0x2080 +#define MT6357_AUD_TOP_REV0 0x2082 +#define MT6357_AUD_TOP_DBI 0x2084 +#define MT6357_AUD_TOP_DXI 0x2086 +#define MT6357_AUD_TOP_CKPDN_TPM0 0x2088 +#define MT6357_AUD_TOP_CKPDN_TPM1 0x208a +#define MT6357_AUD_TOP_CKPDN_CON0 0x208c +#define MT6357_AUD_TOP_CKPDN_CON0_SET 0x208e +#define MT6357_AUD_TOP_CKPDN_CON0_CLR 0x2090 +#define MT6357_AUD_TOP_CKSEL_CON0 0x2092 +#define MT6357_AUD_TOP_CKSEL_CON0_SET 0x2094 +#define MT6357_AUD_TOP_CKSEL_CON0_CLR 0x2096 +#define MT6357_AUD_TOP_CKTST_CON0 0x2098 +#define MT6357_AUD_TOP_RST_CON0 0x209a +#define MT6357_AUD_TOP_RST_CON0_SET 0x209c +#define MT6357_AUD_TOP_RST_CON0_CLR 0x209e +#define MT6357_AUD_TOP_RST_BANK_CON0 0x20a0 +#define MT6357_AUD_TOP_INT_CON0 0x20a2 +#define MT6357_AUD_TOP_INT_CON0_SET 0x20a4 +#define MT6357_AUD_TOP_INT_CON0_CLR 0x20a6 +#define MT6357_AUD_TOP_INT_MASK_CON0 0x20a8 +#define MT6357_AUD_TOP_INT_MASK_CON0_SET 0x20aa +#define MT6357_AUD_TOP_INT_MASK_CON0_CLR 0x20ac +#define MT6357_AUD_TOP_INT_STATUS0 0x20ae +#define MT6357_AUD_TOP_INT_RAW_STATUS0 0x20b0 +#define MT6357_AUD_TOP_INT_MISC_CON0 0x20b2 +#define MT6357_AUDNCP_CLKDIV_CON0 0x20b4 +#define MT6357_AUDNCP_CLKDIV_CON1 0x20b6 +#define MT6357_AUDNCP_CLKDIV_CON2 0x20b8 +#define MT6357_AUDNCP_CLKDIV_CON3 0x20ba +#define MT6357_AUDNCP_CLKDIV_CON4 0x20bc +#define MT6357_AUD_TOP_MON_CON0 0x20be +#define MT6357_AUDIO_DIG_DSN_ID 0x2100 +#define MT6357_AUDIO_DIG_DSN_REV0 0x2102 +#define MT6357_AUDIO_DIG_DSN_DBI 0x2104 +#define MT6357_AUDIO_DIG_DSN_DXI 0x2106 +#define MT6357_AFE_UL_DL_CON0 0x2108 +#define MT6357_AFE_DL_SRC2_CON0_L 0x210a +#define MT6357_AFE_UL_SRC_CON0_H 0x210c +#define MT6357_AFE_UL_SRC_CON0_L 0x210e +#define MT6357_AFE_TOP_CON0 0x2110 +#define MT6357_AUDIO_TOP_CON0 0x2112 +#define MT6357_AFE_MON_DEBUG0 0x2114 +#define MT6357_AFUNC_AUD_CON0 0x2116 +#define MT6357_AFUNC_AUD_CON1 0x2118 +#define MT6357_AFUNC_AUD_CON2 0x211a +#define MT6357_AFUNC_AUD_CON3 0x211c +#define MT6357_AFUNC_AUD_CON4 0x211e +#define MT6357_AFUNC_AUD_CON5 0x2120 +#define MT6357_AFUNC_AUD_CON6 0x2122 +#define MT6357_AFUNC_AUD_MON0 0x2124 +#define MT6357_AUDRC_TUNE_MON0 0x2126 +#define MT6357_AFE_ADDA_MTKAIF_FIFO_CFG0 0x2128 +#define MT6357_AFE_ADDA_MTKAIF_FIFO_LOG_MON1 0x212a +#define MT6357_AFE_ADDA_MTKAIF_MON0 0x212c +#define MT6357_AFE_ADDA_MTKAIF_MON1 0x212e +#define MT6357_AFE_ADDA_MTKAIF_MON2 0x2130 +#define MT6357_AFE_ADDA_MTKAIF_MON3 0x2132 +#define MT6357_AFE_ADDA_MTKAIF_CFG0 0x2134 +#define MT6357_AFE_ADDA_MTKAIF_RX_CFG0 0x2136 +#define MT6357_AFE_ADDA_MTKAIF_RX_CFG1 0x2138 +#define MT6357_AFE_ADDA_MTKAIF_RX_CFG2 0x213a +#define MT6357_AFE_ADDA_MTKAIF_RX_CFG3 0x213c +#define MT6357_AFE_ADDA_MTKAIF_TX_CFG1 0x213e +#define MT6357_AFE_SGEN_CFG0 0x2140 +#define MT6357_AFE_SGEN_CFG1 0x2142 +#define MT6357_AFE_ADC_ASYNC_FIFO_CFG 0x2144 +#define MT6357_AFE_DCCLK_CFG0 0x2146 +#define MT6357_AFE_DCCLK_CFG1 0x2148 +#define MT6357_AUDIO_DIG_CFG 0x214a +#define MT6357_AFE_AUD_PAD_TOP 0x214c +#define MT6357_AFE_AUD_PAD_TOP_MON 0x214e +#define MT6357_AFE_AUD_PAD_TOP_MON1 0x2150 +#define MT6357_AUDENC_DSN_ID 0x2180 +#define MT6357_AUDENC_DSN_REV0 0x2182 +#define MT6357_AUDENC_DSN_DBI 0x2184 +#define MT6357_AUDENC_DSN_FPI 0x2186 +#define MT6357_AUDENC_ANA_CON0 0x2188 +#define MT6357_AUDENC_ANA_CON1 0x218a +#define MT6357_AUDENC_ANA_CON2 0x218c +#define MT6357_AUDENC_ANA_CON3 0x218e +#define MT6357_AUDENC_ANA_CON4 0x2190 +#define MT6357_AUDENC_ANA_CON5 0x2192 +#define MT6357_AUDENC_ANA_CON6 0x2194 +#define MT6357_AUDENC_ANA_CON7 0x2196 +#define MT6357_AUDENC_ANA_CON8 0x2198 +#define MT6357_AUDENC_ANA_CON9 0x219a +#define MT6357_AUDENC_ANA_CON10 0x219c +#define MT6357_AUDENC_ANA_CON11 0x219e +#define MT6357_AUDDEC_DSN_ID 0x2200 +#define MT6357_AUDDEC_DSN_REV0 0x2202 +#define MT6357_AUDDEC_DSN_DBI 0x2204 +#define MT6357_AUDDEC_DSN_FPI 0x2206 +#define MT6357_AUDDEC_ANA_CON0 0x2208 +#define MT6357_AUDDEC_ANA_CON1 0x220a +#define MT6357_AUDDEC_ANA_CON2 0x220c +#define MT6357_AUDDEC_ANA_CON3 0x220e +#define MT6357_AUDDEC_ANA_CON4 0x2210 +#define MT6357_AUDDEC_ANA_CON5 0x2212 +#define MT6357_AUDDEC_ANA_CON6 0x2214 +#define MT6357_AUDDEC_ANA_CON7 0x2216 +#define MT6357_AUDDEC_ANA_CON8 0x2218 +#define MT6357_AUDDEC_ANA_CON9 0x221a +#define MT6357_AUDDEC_ANA_CON10 0x221c +#define MT6357_AUDDEC_ANA_CON11 0x221e +#define MT6357_AUDDEC_ANA_CON12 0x2220 +#define MT6357_AUDDEC_ANA_CON13 0x2222 +#define MT6357_AUDDEC_ELR_NUM 0x2224 +#define MT6357_AUDDEC_ELR_0 0x2226 +#define MT6357_AUDZCD_DSN_ID 0x2280 +#define MT6357_AUDZCD_DSN_REV0 0x2282 +#define MT6357_AUDZCD_DSN_DBI 0x2284 +#define MT6357_AUDZCD_DSN_FPI 0x2286 +#define MT6357_ZCD_CON0 0x2288 +#define MT6357_ZCD_CON1 0x228a +#define MT6357_ZCD_CON2 0x228c +#define MT6357_ZCD_CON3 0x228e +#define MT6357_ZCD_CON4 0x2290 +#define MT6357_ZCD_CON5 0x2292 +#define MT6357_ACCDET_DSN_DIG_ID 0x2300 +#define MT6357_ACCDET_DSN_DIG_REV0 0x2302 +#define MT6357_ACCDET_DSN_DBI 0x2304 +#define MT6357_ACCDET_DSN_FPI 0x2306 +#define MT6357_ACCDET_CON0 0x2308 +#define MT6357_ACCDET_CON1 0x230a +#define MT6357_ACCDET_CON2 0x230c +#define MT6357_ACCDET_CON3 0x230e +#define MT6357_ACCDET_CON4 0x2310 +#define MT6357_ACCDET_CON5 0x2312 +#define MT6357_ACCDET_CON6 0x2314 +#define MT6357_ACCDET_CON7 0x2316 +#define MT6357_ACCDET_CON8 0x2318 +#define MT6357_ACCDET_CON9 0x231a +#define MT6357_ACCDET_CON10 0x231c +#define MT6357_ACCDET_CON11 0x231e +#define MT6357_ACCDET_CON12 0x2320 +#define MT6357_ACCDET_CON13 0x2322 +#define MT6357_ACCDET_CON14 0x2324 +#define MT6357_ACCDET_CON15 0x2326 +#define MT6357_ACCDET_CON16 0x2328 +#define MT6357_ACCDET_CON17 0x232a +#define MT6357_ACCDET_CON18 0x232c +#define MT6357_ACCDET_CON19 0x232e +#define MT6357_ACCDET_CON20 0x2330 +#define MT6357_ACCDET_CON21 0x2332 +#define MT6357_ACCDET_CON22 0x2334 +#define MT6357_ACCDET_CON23 0x2336 +#define MT6357_ACCDET_CON24 0x2338 +#define MT6357_ACCDET_CON25 0x233a +#define MT6357_ACCDET_CON26 0x233c +#define MT6357_ACCDET_CON27 0x233e +#define MT6357_ACCDET_CON28 0x2340 + +#endif /* __MFD_MT6357_REGISTERS_H__ */ diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h new file mode 100644 index 0000000000..478aece170 --- /dev/null +++ b/include/linux/mhi_ep.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Linaro Ltd. + * + */ +#ifndef _MHI_EP_H_ +#define _MHI_EP_H_ + +#include +#include + +#define MHI_EP_DEFAULT_MTU 0x8000 + +/** + * struct mhi_ep_channel_config - Channel configuration structure for controller + * @name: The name of this channel + * @num: The number assigned to this channel + * @num_elements: The number of elements that can be queued to this channel + * @dir: Direction that data may flow on this channel + */ +struct mhi_ep_channel_config { + char *name; + u32 num; + u32 num_elements; + enum dma_data_direction dir; +}; + +/** + * struct mhi_ep_cntrl_config - MHI Endpoint controller configuration + * @mhi_version: MHI spec version supported by the controller + * @max_channels: Maximum number of channels supported + * @num_channels: Number of channels defined in @ch_cfg + * @ch_cfg: Array of defined channels + */ +struct mhi_ep_cntrl_config { + u32 mhi_version; + u32 max_channels; + u32 num_channels; + const struct mhi_ep_channel_config *ch_cfg; +}; + +/** + * struct mhi_ep_db_info - MHI Endpoint doorbell info + * @mask: Mask of the doorbell interrupt + * @status: Status of the doorbell interrupt + */ +struct mhi_ep_db_info { + u32 mask; + u32 status; +}; + +/** + * struct mhi_ep_cntrl - MHI Endpoint controller structure + * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI + * Endpoint controller + * @mhi_dev: MHI Endpoint device instance for the controller + * @mmio: MMIO region containing the MHI registers + * @mhi_chan: Points to the channel configuration table + * @mhi_event: Points to the event ring configurations table + * @mhi_cmd: Points to the command ring configurations table + * @sm: MHI Endpoint state machine + * @ch_ctx_cache: Cache of host channel context data structure + * @ev_ctx_cache: Cache of host event context data structure + * @cmd_ctx_cache: Cache of host command context data structure + * @ch_ctx_host_pa: Physical address of host channel context data structure + * @ev_ctx_host_pa: Physical address of host event context data structure + * @cmd_ctx_host_pa: Physical address of host command context data structure + * @ch_ctx_cache_phys: Physical address of the host channel context cache + * @ev_ctx_cache_phys: Physical address of the host event context cache + * @cmd_ctx_cache_phys: Physical address of the host command context cache + * @chdb: Array of channel doorbell interrupt info + * @event_lock: Lock for protecting event rings + * @list_lock: Lock for protecting state transition and channel doorbell lists + * @state_lock: Lock for protecting state transitions + * @st_transition_list: List of state transitions + * @ch_db_list: List of queued channel doorbells + * @wq: Dedicated workqueue for handling rings and state changes + * @state_work: State transition worker + * @reset_work: Worker for MHI Endpoint reset + * @cmd_ring_work: Worker for processing command rings + * @ch_ring_work: Worker for processing channel rings + * @raise_irq: CB function for raising IRQ to the host + * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it + * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context + * @read_from_host: CB function for reading from host memory from endpoint + * @write_to_host: CB function for writing to host memory from endpoint + * @mhi_state: MHI Endpoint state + * @max_chan: Maximum channels supported by the endpoint controller + * @mru: MRU (Maximum Receive Unit) value of the endpoint controller + * @event_rings: Number of event rings supported by the endpoint controller + * @hw_event_rings: Number of hardware event rings supported by the endpoint controller + * @chdb_offset: Channel doorbell offset set by the host + * @erdb_offset: Event ring doorbell offset set by the host + * @index: MHI Endpoint controller index + * @irq: IRQ used by the endpoint controller + * @enabled: Check if the endpoint controller is enabled or not + */ +struct mhi_ep_cntrl { + struct device *cntrl_dev; + struct mhi_ep_device *mhi_dev; + void __iomem *mmio; + + struct mhi_ep_chan *mhi_chan; + struct mhi_ep_event *mhi_event; + struct mhi_ep_cmd *mhi_cmd; + struct mhi_ep_sm *sm; + + struct mhi_chan_ctxt *ch_ctx_cache; + struct mhi_event_ctxt *ev_ctx_cache; + struct mhi_cmd_ctxt *cmd_ctx_cache; + u64 ch_ctx_host_pa; + u64 ev_ctx_host_pa; + u64 cmd_ctx_host_pa; + phys_addr_t ch_ctx_cache_phys; + phys_addr_t ev_ctx_cache_phys; + phys_addr_t cmd_ctx_cache_phys; + + struct mhi_ep_db_info chdb[4]; + struct mutex event_lock; + spinlock_t list_lock; + spinlock_t state_lock; + + struct list_head st_transition_list; + struct list_head ch_db_list; + + struct workqueue_struct *wq; + struct work_struct state_work; + struct work_struct reset_work; + struct work_struct cmd_ring_work; + struct work_struct ch_ring_work; + + void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); + int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, + void __iomem **virt, size_t size); + void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys, + void __iomem *virt, size_t size); + int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size); + int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size); + + enum mhi_state mhi_state; + + u32 max_chan; + u32 mru; + u32 event_rings; + u32 hw_event_rings; + u32 chdb_offset; + u32 erdb_offset; + u32 index; + int irq; + bool enabled; +}; + +/** + * struct mhi_ep_device - Structure representing an MHI Endpoint device that binds + * to channels or is associated with controllers + * @dev: Driver model device node for the MHI Endpoint device + * @mhi_cntrl: Controller the device belongs to + * @id: Pointer to MHI Endpoint device ID struct + * @name: Name of the associated MHI Endpoint device + * @ul_chan: UL (from host to endpoint) channel for the device + * @dl_chan: DL (from endpoint to host) channel for the device + * @dev_type: MHI device type + */ +struct mhi_ep_device { + struct device dev; + struct mhi_ep_cntrl *mhi_cntrl; + const struct mhi_device_id *id; + const char *name; + struct mhi_ep_chan *ul_chan; + struct mhi_ep_chan *dl_chan; + enum mhi_device_type dev_type; +}; + +/** + * struct mhi_ep_driver - Structure representing a MHI Endpoint client driver + * @id_table: Pointer to MHI Endpoint device ID table + * @driver: Device driver model driver + * @probe: CB function for client driver probe function + * @remove: CB function for client driver remove function + * @ul_xfer_cb: CB function for UL (from host to endpoint) data transfer + * @dl_xfer_cb: CB function for DL (from endpoint to host) data transfer + */ +struct mhi_ep_driver { + const struct mhi_device_id *id_table; + struct device_driver driver; + int (*probe)(struct mhi_ep_device *mhi_ep, + const struct mhi_device_id *id); + void (*remove)(struct mhi_ep_device *mhi_ep); + void (*ul_xfer_cb)(struct mhi_ep_device *mhi_dev, + struct mhi_result *result); + void (*dl_xfer_cb)(struct mhi_ep_device *mhi_dev, + struct mhi_result *result); +}; + +#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev) +#define to_mhi_ep_driver(drv) container_of(drv, struct mhi_ep_driver, driver) + +/* + * module_mhi_ep_driver() - Helper macro for drivers that don't do + * anything special other than using default mhi_ep_driver_register() and + * mhi_ep_driver_unregister(). This eliminates a lot of boilerplate. + * Each module may only use this macro once. + */ +#define module_mhi_ep_driver(mhi_drv) \ + module_driver(mhi_drv, mhi_ep_driver_register, \ + mhi_ep_driver_unregister) + +/* + * Macro to avoid include chaining to get THIS_MODULE + */ +#define mhi_ep_driver_register(mhi_drv) \ + __mhi_ep_driver_register(mhi_drv, THIS_MODULE) + +/** + * __mhi_ep_driver_register - Register a driver with MHI Endpoint bus + * @mhi_drv: Driver to be associated with the device + * @owner: The module owner + * + * Return: 0 if driver registrations succeeds, a negative error code otherwise. + */ +int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner); + +/** + * mhi_ep_driver_unregister - Unregister a driver from MHI Endpoint bus + * @mhi_drv: Driver associated with the device + */ +void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv); + +/** + * mhi_ep_register_controller - Register MHI Endpoint controller + * @mhi_cntrl: MHI Endpoint controller to register + * @config: Configuration to use for the controller + * + * Return: 0 if controller registrations succeeds, a negative error code otherwise. + */ +int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config); + +/** + * mhi_ep_unregister_controller - Unregister MHI Endpoint controller + * @mhi_cntrl: MHI Endpoint controller to unregister + */ +void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl); + +/** + * mhi_ep_power_up - Power up the MHI endpoint stack + * @mhi_cntrl: MHI Endpoint controller + * + * Return: 0 if power up succeeds, a negative error code otherwise. + */ +int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl); + +/** + * mhi_ep_power_down - Power down the MHI endpoint stack + * @mhi_cntrl: MHI controller + */ +void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl); + +/** + * mhi_ep_queue_is_empty - Determine whether the transfer queue is empty + * @mhi_dev: Device associated with the channels + * @dir: DMA direction for the channel + * + * Return: true if the queue is empty, false otherwise. + */ +bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir); + +/** + * mhi_ep_queue_skb - Send SKBs to host over MHI Endpoint + * @mhi_dev: Device associated with the DL channel + * @skb: SKBs to be queued + * + * Return: 0 if the SKBs has been sent successfully, a negative error code otherwise. + */ +int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb); + +#endif diff --git a/include/linux/mtd/nand-ecc-mtk.h b/include/linux/mtd/nand-ecc-mtk.h new file mode 100644 index 0000000000..0e48c36e6c --- /dev/null +++ b/include/linux/mtd/nand-ecc-mtk.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * MTK SDG1 ECC controller + * + * Copyright (c) 2016 Mediatek + * Authors: Xiaolei Li + * Jorge Ramirez-Ortiz + */ + +#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__ +#define __DRIVERS_MTD_NAND_MTK_ECC_H__ + +#include + +enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1}; +enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE}; + +struct device_node; +struct mtk_ecc; + +struct mtk_ecc_stats { + u32 corrected; + u32 bitflips; + u32 failed; +}; + +struct mtk_ecc_config { + enum mtk_ecc_operation op; + enum mtk_ecc_mode mode; + dma_addr_t addr; + u32 strength; + u32 sectors; + u32 len; +}; + +int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32); +void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int); +int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation); +int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *); +void mtk_ecc_disable(struct mtk_ecc *); +void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p); +unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc); + +struct mtk_ecc *of_mtk_ecc_get(struct device_node *); +void mtk_ecc_release(struct mtk_ecc *); + +#endif diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h new file mode 100644 index 0000000000..dcb8030062 --- /dev/null +++ b/include/linux/nvme-auth.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 Hannes Reinecke, SUSE Software Solutions + */ + +#ifndef _NVME_AUTH_H +#define _NVME_AUTH_H + +#include + +struct nvme_dhchap_key { + u8 *key; + size_t len; + u8 hash; +}; + +u32 nvme_auth_get_seqnum(void); +const char *nvme_auth_dhgroup_name(u8 dhgroup_id); +const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id); +u8 nvme_auth_dhgroup_id(const char *dhgroup_name); + +const char *nvme_auth_hmac_name(u8 hmac_id); +const char *nvme_auth_digest_name(u8 hmac_id); +size_t nvme_auth_hmac_hash_len(u8 hmac_id); +u8 nvme_auth_hmac_id(const char *hmac_name); + +struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret, + u8 key_hash); +void nvme_auth_free_key(struct nvme_dhchap_key *key); +u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn); +int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key); +int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len, + u8 *challenge, u8 *aug, size_t hlen); +int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid); +int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm, + u8 *host_key, size_t host_key_len); +int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm, + u8 *ctrl_key, size_t ctrl_key_len, + u8 *sess_key, size_t sess_key_len); + +#endif /* _NVME_AUTH_H */ diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h new file mode 100644 index 0000000000..ed9b4df792 --- /dev/null +++ b/include/linux/pci-doe.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Data Object Exchange + * PCIe r6.0, sec 6.30 DOE + * + * Copyright (C) 2021 Huawei + * Jonathan Cameron + * + * Copyright (C) 2022 Intel Corporation + * Ira Weiny + */ + +#ifndef LINUX_PCI_DOE_H +#define LINUX_PCI_DOE_H + +struct pci_doe_protocol { + u16 vid; + u8 type; +}; + +struct pci_doe_mb; + +/** + * struct pci_doe_task - represents a single query/response + * + * @prot: DOE Protocol + * @request_pl: The request payload + * @request_pl_sz: Size of the request payload (bytes) + * @response_pl: The response payload + * @response_pl_sz: Size of the response payload (bytes) + * @rv: Return value. Length of received response or error (bytes) + * @complete: Called when task is complete + * @private: Private data for the consumer + * @work: Used internally by the mailbox + * @doe_mb: Used internally by the mailbox + * + * The payload sizes and rv are specified in bytes with the following + * restrictions concerning the protocol. + * + * 1) The request_pl_sz must be a multiple of double words (4 bytes) + * 2) The response_pl_sz must be >= a single double word (4 bytes) + * 3) rv is returned as bytes but it will be a multiple of double words + * + * NOTE there is no need for the caller to initialize work or doe_mb. + */ +struct pci_doe_task { + struct pci_doe_protocol prot; + u32 *request_pl; + size_t request_pl_sz; + u32 *response_pl; + size_t response_pl_sz; + int rv; + void (*complete)(struct pci_doe_task *task); + void *private; + + /* No need for the user to initialize these fields */ + struct work_struct work; + struct pci_doe_mb *doe_mb; +}; + +/** + * pci_doe_for_each_off - Iterate each DOE capability + * @pdev: struct pci_dev to iterate + * @off: u16 of config space offset of each mailbox capability found + */ +#define pci_doe_for_each_off(pdev, off) \ + for (off = pci_find_next_ext_capability(pdev, off, \ + PCI_EXT_CAP_ID_DOE); \ + off > 0; \ + off = pci_find_next_ext_capability(pdev, off, \ + PCI_EXT_CAP_ID_DOE)) + +struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset); +bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type); +int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task); + +#endif diff --git a/include/linux/pcs-rzn1-miic.h b/include/linux/pcs-rzn1-miic.h new file mode 100644 index 0000000000..56d12b2136 --- /dev/null +++ b/include/linux/pcs-rzn1-miic.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022 Schneider Electric + * + * Clément Léger + */ + +#ifndef __LINUX_PCS_MIIC_H +#define __LINUX_PCS_MIIC_H + +struct phylink; +struct device_node; + +struct phylink_pcs *miic_create(struct device *dev, struct device_node *np); + +void miic_destroy(struct phylink_pcs *pcs); + +#endif /* __LINUX_PCS_MIIC_H */ diff --git a/include/linux/phy/phy-lvds.h b/include/linux/phy/phy-lvds.h new file mode 100644 index 0000000000..09931d080a --- /dev/null +++ b/include/linux/phy/phy-lvds.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2020,2022 NXP + */ + +#ifndef __PHY_LVDS_H_ +#define __PHY_LVDS_H_ + +/** + * struct phy_configure_opts_lvds - LVDS configuration set + * @bits_per_lane_and_dclk_cycle: Number of bits per lane per differential + * clock cycle. + * @differential_clk_rate: Clock rate, in Hertz, of the LVDS + * differential clock. + * @lanes: Number of active, consecutive, + * data lanes, starting from lane 0, + * used for the transmissions. + * @is_slave: Boolean, true if the phy is a slave + * which works together with a master + * phy to support dual link transmission, + * otherwise a regular phy or a master phy. + * + * This structure is used to represent the configuration state of a LVDS phy. + */ +struct phy_configure_opts_lvds { + unsigned int bits_per_lane_and_dclk_cycle; + unsigned long differential_clk_rate; + unsigned int lanes; + bool is_slave; +}; + +#endif /* __PHY_LVDS_H_ */ diff --git a/include/linux/platform_data/asoc-poodle.h b/include/linux/platform_data/asoc-poodle.h new file mode 100644 index 0000000000..2052fad55c --- /dev/null +++ b/include/linux/platform_data/asoc-poodle.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PLATFORM_DATA_POODLE_AUDIO +#define __LINUX_PLATFORM_DATA_POODLE_AUDIO + +/* locomo is not a proper gpio driver, and uses its own api */ +struct poodle_audio_platform_data { + struct device *locomo_dev; + + int gpio_amp_on; + int gpio_mute_l; + int gpio_mute_r; + int gpio_232vcc_on; + int gpio_jk_b; +}; + +#endif diff --git a/include/linux/platform_data/asoc-pxa.h b/include/linux/platform_data/asoc-pxa.h new file mode 100644 index 0000000000..327454cd82 --- /dev/null +++ b/include/linux/platform_data/asoc-pxa.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SOC_PXA_AUDIO_H__ +#define __SOC_PXA_AUDIO_H__ + +#include +#include +#include + +/* + * @reset_gpio: AC97 reset gpio (normally gpio113 or gpio95) + * a -1 value means no gpio will be used for reset + * @codec_pdata: AC97 codec platform_data + + * reset_gpio should only be specified for pxa27x CPUs where a silicon + * bug prevents correct operation of the reset line. If not specified, + * the default behaviour on these CPUs is to consider gpio 113 as the + * AC97 reset line, which is the default on most boards. + */ +typedef struct { + int (*startup)(struct snd_pcm_substream *, void *); + void (*shutdown)(struct snd_pcm_substream *, void *); + void (*suspend)(void *); + void (*resume)(void *); + void *priv; + int reset_gpio; + void *codec_pdata[AC97_BUS_MAX_DEVICES]; +} pxa2xx_audio_ops_t; + +extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops); + +#endif diff --git a/include/linux/platform_data/sh_mmcif.h b/include/linux/platform_data/sh_mmcif.h new file mode 100644 index 0000000000..6eb914f958 --- /dev/null +++ b/include/linux/platform_data/sh_mmcif.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * platform data for eMMC driver + * + * Copyright (C) 2010 Renesas Solutions Corp. + */ + +#ifndef LINUX_MMC_SH_MMCIF_H +#define LINUX_MMC_SH_MMCIF_H + +#include +#include + +/* + * MMCIF : CE_CLK_CTRL [19:16] + * 1000 : Peripheral clock / 512 + * 0111 : Peripheral clock / 256 + * 0110 : Peripheral clock / 128 + * 0101 : Peripheral clock / 64 + * 0100 : Peripheral clock / 32 + * 0011 : Peripheral clock / 16 + * 0010 : Peripheral clock / 8 + * 0001 : Peripheral clock / 4 + * 0000 : Peripheral clock / 2 + * 1111 : Peripheral clock (sup_pclk set '1') + */ + +struct sh_mmcif_plat_data { + unsigned int slave_id_tx; /* embedded slave_id_[tr]x */ + unsigned int slave_id_rx; + u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */ + unsigned long caps; + u32 ocr; +}; + +#define MMCIF_CE_CMD_SET 0x00000000 +#define MMCIF_CE_ARG 0x00000008 +#define MMCIF_CE_ARG_CMD12 0x0000000C +#define MMCIF_CE_CMD_CTRL 0x00000010 +#define MMCIF_CE_BLOCK_SET 0x00000014 +#define MMCIF_CE_CLK_CTRL 0x00000018 +#define MMCIF_CE_BUF_ACC 0x0000001C +#define MMCIF_CE_RESP3 0x00000020 +#define MMCIF_CE_RESP2 0x00000024 +#define MMCIF_CE_RESP1 0x00000028 +#define MMCIF_CE_RESP0 0x0000002C +#define MMCIF_CE_RESP_CMD12 0x00000030 +#define MMCIF_CE_DATA 0x00000034 +#define MMCIF_CE_INT 0x00000040 +#define MMCIF_CE_INT_MASK 0x00000044 +#define MMCIF_CE_HOST_STS1 0x00000048 +#define MMCIF_CE_HOST_STS2 0x0000004C +#define MMCIF_CE_CLK_CTRL2 0x00000070 +#define MMCIF_CE_VERSION 0x0000007C + +/* CE_BUF_ACC */ +#define BUF_ACC_DMAWEN (1 << 25) +#define BUF_ACC_DMAREN (1 << 24) +#define BUF_ACC_BUSW_32 (0 << 17) +#define BUF_ACC_BUSW_16 (1 << 17) +#define BUF_ACC_ATYP (1 << 16) + +/* CE_CLK_CTRL */ +#define CLK_ENABLE (1 << 24) /* 1: output mmc clock */ +#define CLK_CLEAR (0xf << 16) +#define CLK_SUP_PCLK (0xf << 16) +#define CLKDIV_4 (1 << 16) /* mmc clock frequency. + * n: bus clock/(2^(n+1)) */ +#define CLKDIV_256 (7 << 16) /* mmc clock frequency. (see above) */ +#define SRSPTO_256 (2 << 12) /* resp timeout */ +#define SRBSYTO_29 (0xf << 8) /* resp busy timeout */ +#define SRWDTO_29 (0xf << 4) /* read/write timeout */ +#define SCCSTO_29 (0xf << 0) /* ccs timeout */ + +/* CE_VERSION */ +#define SOFT_RST_ON (1 << 31) +#define SOFT_RST_OFF 0 + +static inline u32 sh_mmcif_readl(void __iomem *addr, int reg) +{ + return __raw_readl(addr + reg); +} + +static inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val) +{ + __raw_writel(val, addr + reg); +} + +#define SH_MMCIF_BBS 512 /* boot block size */ + +static inline void sh_mmcif_boot_cmd_send(void __iomem *base, + unsigned long cmd, unsigned long arg) +{ + sh_mmcif_writel(base, MMCIF_CE_INT, 0); + sh_mmcif_writel(base, MMCIF_CE_ARG, arg); + sh_mmcif_writel(base, MMCIF_CE_CMD_SET, cmd); +} + +static inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask) +{ + unsigned long tmp; + int cnt; + + for (cnt = 0; cnt < 1000000; cnt++) { + tmp = sh_mmcif_readl(base, MMCIF_CE_INT); + if (tmp & mask) { + sh_mmcif_writel(base, MMCIF_CE_INT, tmp & ~mask); + return 0; + } + } + + return -1; +} + +static inline int sh_mmcif_boot_cmd(void __iomem *base, + unsigned long cmd, unsigned long arg) +{ + sh_mmcif_boot_cmd_send(base, cmd, arg); + return sh_mmcif_boot_cmd_poll(base, 0x00010000); +} + +static inline int sh_mmcif_boot_do_read_single(void __iomem *base, + unsigned int block_nr, + unsigned long *buf) +{ + int k; + + /* CMD13 - Status */ + sh_mmcif_boot_cmd(base, 0x0d400000, 0x00010000); + + if (sh_mmcif_readl(base, MMCIF_CE_RESP0) != 0x0900) + return -1; + + /* CMD17 - Read */ + sh_mmcif_boot_cmd(base, 0x11480000, block_nr * SH_MMCIF_BBS); + if (sh_mmcif_boot_cmd_poll(base, 0x00100000) < 0) + return -1; + + for (k = 0; k < (SH_MMCIF_BBS / 4); k++) + buf[k] = sh_mmcif_readl(base, MMCIF_CE_DATA); + + return 0; +} + +static inline int sh_mmcif_boot_do_read(void __iomem *base, + unsigned long first_block, + unsigned long nr_blocks, + void *buf) +{ + unsigned long k; + int ret = 0; + + /* In data transfer mode: Set clock to Bus clock/4 (about 20Mhz) */ + sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL, + CLK_ENABLE | CLKDIV_4 | SRSPTO_256 | + SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); + + /* CMD9 - Get CSD */ + sh_mmcif_boot_cmd(base, 0x09806000, 0x00010000); + + /* CMD7 - Select the card */ + sh_mmcif_boot_cmd(base, 0x07400000, 0x00010000); + + /* CMD16 - Set the block size */ + sh_mmcif_boot_cmd(base, 0x10400000, SH_MMCIF_BBS); + + for (k = 0; !ret && k < nr_blocks; k++) + ret = sh_mmcif_boot_do_read_single(base, first_block + k, + buf + (k * SH_MMCIF_BBS)); + + return ret; +} + +static inline void sh_mmcif_boot_init(void __iomem *base) +{ + /* reset */ + sh_mmcif_writel(base, MMCIF_CE_VERSION, SOFT_RST_ON); + sh_mmcif_writel(base, MMCIF_CE_VERSION, SOFT_RST_OFF); + + /* byte swap */ + sh_mmcif_writel(base, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP); + + /* Set block size in MMCIF hardware */ + sh_mmcif_writel(base, MMCIF_CE_BLOCK_SET, SH_MMCIF_BBS); + + /* Enable the clock, set it to Bus clock/256 (about 325Khz). */ + sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL, + CLK_ENABLE | CLKDIV_256 | SRSPTO_256 | + SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); + + /* CMD0 */ + sh_mmcif_boot_cmd(base, 0x00000040, 0); + + /* CMD1 - Get OCR */ + do { + sh_mmcif_boot_cmd(base, 0x01405040, 0x40300000); /* CMD1 */ + } while ((sh_mmcif_readl(base, MMCIF_CE_RESP0) & 0x80000000) + != 0x80000000); + + /* CMD2 - Get CID */ + sh_mmcif_boot_cmd(base, 0x02806040, 0); + + /* CMD3 - Set card relative address */ + sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000); +} + +#endif /* LINUX_MMC_SH_MMCIF_H */ diff --git a/include/linux/platform_data/x86/p2sb.h b/include/linux/platform_data/x86/p2sb.h new file mode 100644 index 0000000000..a1d5fddc8f --- /dev/null +++ b/include/linux/platform_data/x86/p2sb.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Primary to Sideband (P2SB) bridge access support + */ + +#ifndef _PLATFORM_DATA_X86_P2SB_H +#define _PLATFORM_DATA_X86_P2SB_H + +#include +#include + +struct pci_bus; +struct resource; + +#if IS_BUILTIN(CONFIG_P2SB) + +int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem); + +#else /* CONFIG_P2SB */ + +static inline int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem) +{ + return -ENODEV; +} + +#endif /* CONFIG_P2SB is not set */ + +#endif /* _PLATFORM_DATA_X86_P2SB_H */ diff --git a/include/linux/polynomial.h b/include/linux/polynomial.h new file mode 100644 index 0000000000..9e074a0bb6 --- /dev/null +++ b/include/linux/polynomial.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + */ + +#ifndef _POLYNOMIAL_H +#define _POLYNOMIAL_H + +/* + * struct polynomial_term - one term descriptor of a polynomial + * @deg: degree of the term. + * @coef: multiplication factor of the term. + * @divider: distributed divider per each degree. + * @divider_leftover: divider leftover, which couldn't be redistributed. + */ +struct polynomial_term { + unsigned int deg; + long coef; + long divider; + long divider_leftover; +}; + +/* + * struct polynomial - a polynomial descriptor + * @total_divider: total data divider. + * @terms: polynomial terms, last term must have degree of 0 + */ +struct polynomial { + long total_divider; + struct polynomial_term terms[]; +}; + +long polynomial_calc(const struct polynomial *poly, long data); + +#endif diff --git a/include/linux/rv.h b/include/linux/rv.h new file mode 100644 index 0000000000..8883b41d88 --- /dev/null +++ b/include/linux/rv.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Runtime Verification. + * + * For futher information, see: kernel/trace/rv/rv.c. + */ +#ifndef _LINUX_RV_H +#define _LINUX_RV_H + +#define MAX_DA_NAME_LEN 24 + +#ifdef CONFIG_RV +/* + * Deterministic automaton per-object variables. + */ +struct da_monitor { + bool monitoring; + unsigned int curr_state; +}; + +/* + * Per-task RV monitors count. Nowadays fixed in RV_PER_TASK_MONITORS. + * If we find justification for more monitors, we can think about + * adding more or developing a dynamic method. So far, none of + * these are justified. + */ +#define RV_PER_TASK_MONITORS 1 +#define RV_PER_TASK_MONITOR_INIT (RV_PER_TASK_MONITORS) + +/* + * Futher monitor types are expected, so make this a union. + */ +union rv_task_monitor { + struct da_monitor da_mon; +}; + +#ifdef CONFIG_RV_REACTORS +struct rv_reactor { + const char *name; + const char *description; + void (*react)(char *msg); +}; +#endif + +struct rv_monitor { + const char *name; + const char *description; + bool enabled; + int (*enable)(void); + void (*disable)(void); + void (*reset)(void); +#ifdef CONFIG_RV_REACTORS + void (*react)(char *msg); +#endif +}; + +bool rv_monitoring_on(void); +int rv_unregister_monitor(struct rv_monitor *monitor); +int rv_register_monitor(struct rv_monitor *monitor); +int rv_get_task_monitor_slot(void); +void rv_put_task_monitor_slot(int slot); + +#ifdef CONFIG_RV_REACTORS +bool rv_reacting_on(void); +int rv_unregister_reactor(struct rv_reactor *reactor); +int rv_register_reactor(struct rv_reactor *reactor); +#endif /* CONFIG_RV_REACTORS */ + +#endif /* CONFIG_RV */ +#endif /* _LINUX_RV_H */ diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h new file mode 100644 index 0000000000..88eb832eac --- /dev/null +++ b/include/linux/soc/apple/rtkit.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* + * Apple RTKit IPC Library + * Copyright (C) The Asahi Linux Contributors + * + * Apple's SoCs come with various co-processors running their RTKit operating + * system. This protocol library is used by client drivers to use the + * features provided by them. + */ +#ifndef _LINUX_APPLE_RTKIT_H_ +#define _LINUX_APPLE_RTKIT_H_ + +#include +#include +#include + +/* + * Struct to represent implementation-specific RTKit operations. + * + * @buffer: Shared memory buffer allocated inside normal RAM. + * @iomem: Shared memory buffer controlled by the co-processors. + * @size: Size of the shared memory buffer. + * @iova: Device VA of shared memory buffer. + * @is_mapped: Shared memory buffer is managed by the co-processor. + */ + +struct apple_rtkit_shmem { + void *buffer; + void __iomem *iomem; + size_t size; + dma_addr_t iova; + bool is_mapped; +}; + +/* + * Struct to represent implementation-specific RTKit operations. + * + * @crashed: Called when the co-processor has crashed. Runs in process + * context. + * @recv_message: Function called when a message from RTKit is received + * on a non-system endpoint. Called from a worker thread. + * @recv_message_early: + * Like recv_message, but called from atomic context. It + * should return true if it handled the message. If it + * returns false, the message will be passed on to the + * worker thread. + * @shmem_setup: Setup shared memory buffer. If bfr.is_iomem is true the + * buffer is managed by the co-processor and needs to be mapped. + * Otherwise the buffer is managed by Linux and needs to be + * allocated. If not specified dma_alloc_coherent is used. + * Called in process context. + * @shmem_destroy: Undo the shared memory buffer setup in shmem_setup. If not + * specified dma_free_coherent is used. Called in process + * context. + */ +struct apple_rtkit_ops { + void (*crashed)(void *cookie); + void (*recv_message)(void *cookie, u8 endpoint, u64 message); + bool (*recv_message_early)(void *cookie, u8 endpoint, u64 message); + int (*shmem_setup)(void *cookie, struct apple_rtkit_shmem *bfr); + void (*shmem_destroy)(void *cookie, struct apple_rtkit_shmem *bfr); +}; + +struct apple_rtkit; + +/* + * Initializes the internal state required to handle RTKit. This + * should usually be called within _probe. + * + * @dev: Pointer to the device node this coprocessor is assocated with + * @cookie: opaque cookie passed to all functions defined in rtkit_ops + * @mbox_name: mailbox name used to communicate with the co-processor + * @mbox_idx: mailbox index to be used if mbox_name is NULL + * @ops: pointer to rtkit_ops to be used for this co-processor + */ +struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie, + const char *mbox_name, int mbox_idx, + const struct apple_rtkit_ops *ops); + +/* + * Reinitialize internal structures. Must only be called with the co-processor + * is held in reset. + */ +int apple_rtkit_reinit(struct apple_rtkit *rtk); + +/* + * Handle RTKit's boot process. Should be called after the CPU of the + * co-processor has been started. + */ +int apple_rtkit_boot(struct apple_rtkit *rtk); + +/* + * Quiesce the co-processor. + */ +int apple_rtkit_quiesce(struct apple_rtkit *rtk); + +/* + * Wake the co-processor up from hibernation mode. + */ +int apple_rtkit_wake(struct apple_rtkit *rtk); + +/* + * Shutdown the co-processor + */ +int apple_rtkit_shutdown(struct apple_rtkit *rtk); + +/* + * Checks if RTKit is running and ready to handle messages. + */ +bool apple_rtkit_is_running(struct apple_rtkit *rtk); + +/* + * Checks if RTKit has crashed. + */ +bool apple_rtkit_is_crashed(struct apple_rtkit *rtk); + +/* + * Starts an endpoint. Must be called after boot but before any messages can be + * sent or received from that endpoint. + */ +int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint); + +/* + * Send a message to the given endpoint. + * + * @rtk: RTKit reference + * @ep: target endpoint + * @message: message to be sent + * @completeion: will be completed once the message has been submitted + * to the hardware FIFO. Can be NULL. + * @atomic: if set to true this function can be called from atomic + * context. + */ +int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message, + struct completion *completion, bool atomic); + +/* + * Send a message to the given endpoint and wait until it has been submitted + * to the hardware FIFO. + * Will return zero on success and a negative error code on failure + * (e.g. -ETIME when the message couldn't be written within the given + * timeout) + * + * @rtk: RTKit reference + * @ep: target endpoint + * @message: message to be sent + * @timeout: timeout in milliseconds to allow the message transmission + * to be completed + * @atomic: if set to true this function can be called from atomic + * context. + */ +int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message, + unsigned long timeout, bool atomic); + +#endif /* _LINUX_APPLE_RTKIT_H_ */ diff --git a/include/linux/soc/apple/sart.h b/include/linux/soc/apple/sart.h new file mode 100644 index 0000000000..2249bf6cde --- /dev/null +++ b/include/linux/soc/apple/sart.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* + * Apple SART device driver + * Copyright (C) The Asahi Linux Contributors + * + * Apple SART is a simple address filter for DMA transactions. + * Regions of physical memory must be added to the SART's allow + * list before any DMA can target these. Unlike a proper + * IOMMU no remapping can be done. + */ + +#ifndef _LINUX_SOC_APPLE_SART_H_ +#define _LINUX_SOC_APPLE_SART_H_ + +#include +#include +#include + +struct apple_sart; + +/* + * Get a reference to the SART attached to dev. + * + * Looks for the phandle reference in apple,sart and returns a pointer + * to the corresponding apple_sart struct to be used with + * apple_sart_add_allowed_region and apple_sart_remove_allowed_region. + */ +struct apple_sart *devm_apple_sart_get(struct device *dev); + +/* + * Adds the region [paddr, paddr+size] to the DMA allow list. + * + * @sart: SART reference + * @paddr: Start address of the region to be used for DMA + * @size: Size of the region to be used for DMA. + */ +int apple_sart_add_allowed_region(struct apple_sart *sart, phys_addr_t paddr, + size_t size); + +/* + * Removes the region [paddr, paddr+size] from the DMA allow list. + * + * Note that exact same paddr and size used for apple_sart_add_allowed_region + * have to be passed. + * + * @sart: SART reference + * @paddr: Start address of the region no longer used for DMA + * @size: Size of the region no longer used for DMA. + */ +int apple_sart_remove_allowed_region(struct apple_sart *sart, phys_addr_t paddr, + size_t size); + +#endif /* _LINUX_SOC_APPLE_SART_H_ */ diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h new file mode 100644 index 0000000000..7e00cca067 --- /dev/null +++ b/include/linux/soc/mediatek/mtk_wed.h @@ -0,0 +1,131 @@ +#ifndef __MTK_WED_H +#define __MTK_WED_H + +#include +#include +#include +#include + +#define MTK_WED_TX_QUEUES 2 + +struct mtk_wed_hw; +struct mtk_wdma_desc; + +struct mtk_wed_ring { + struct mtk_wdma_desc *desc; + dma_addr_t desc_phys; + int size; + + u32 reg_base; + void __iomem *wpdma; +}; + +struct mtk_wed_device { +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + const struct mtk_wed_ops *ops; + struct device *dev; + struct mtk_wed_hw *hw; + bool init_done, running; + int wdma_idx; + int irq; + + struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES]; + struct mtk_wed_ring txfree_ring; + struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES]; + + struct { + int size; + void **pages; + struct mtk_wdma_desc *desc; + dma_addr_t desc_phys; + } buf_ring; + + /* filled by driver: */ + struct { + struct pci_dev *pci_dev; + + u32 wpdma_phys; + + u16 token_start; + unsigned int nbuf; + + u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id); + int (*offload_enable)(struct mtk_wed_device *wed); + void (*offload_disable)(struct mtk_wed_device *wed); + } wlan; +#endif +}; + +struct mtk_wed_ops { + int (*attach)(struct mtk_wed_device *dev); + int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring, + void __iomem *regs); + int (*txfree_ring_setup)(struct mtk_wed_device *dev, + void __iomem *regs); + void (*detach)(struct mtk_wed_device *dev); + + void (*stop)(struct mtk_wed_device *dev); + void (*start)(struct mtk_wed_device *dev, u32 irq_mask); + void (*reset_dma)(struct mtk_wed_device *dev); + + u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg); + void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val); + + u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask); + void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask); +}; + +extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops; + +static inline int +mtk_wed_device_attach(struct mtk_wed_device *dev) +{ + int ret = -ENODEV; + +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + rcu_read_lock(); + dev->ops = rcu_dereference(mtk_soc_wed_ops); + if (dev->ops) + ret = dev->ops->attach(dev); + else + rcu_read_unlock(); + + if (ret) + dev->ops = NULL; +#endif + + return ret; +} + +#ifdef CONFIG_NET_MEDIATEK_SOC_WED +#define mtk_wed_device_active(_dev) !!(_dev)->ops +#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev) +#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask) +#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \ + (_dev)->ops->tx_ring_setup(_dev, _ring, _regs) +#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \ + (_dev)->ops->txfree_ring_setup(_dev, _regs) +#define mtk_wed_device_reg_read(_dev, _reg) \ + (_dev)->ops->reg_read(_dev, _reg) +#define mtk_wed_device_reg_write(_dev, _reg, _val) \ + (_dev)->ops->reg_write(_dev, _reg, _val) +#define mtk_wed_device_irq_get(_dev, _mask) \ + (_dev)->ops->irq_get(_dev, _mask) +#define mtk_wed_device_irq_set_mask(_dev, _mask) \ + (_dev)->ops->irq_set_mask(_dev, _mask) +#else +static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) +{ + return false; +} +#define mtk_wed_device_detach(_dev) do {} while (0) +#define mtk_wed_device_start(_dev, _mask) do {} while (0) +#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV +#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV +#define mtk_wed_device_reg_read(_dev, _reg) 0 +#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0) +#define mtk_wed_device_irq_get(_dev, _mask) 0 +#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0) +#endif + +#endif diff --git a/include/linux/soc/pxa/cpu.h b/include/linux/soc/pxa/cpu.h new file mode 100644 index 0000000000..5782450ee4 --- /dev/null +++ b/include/linux/soc/pxa/cpu.h @@ -0,0 +1,252 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Author: Nicolas Pitre + * Created: Jun 15, 2001 + * Copyright: MontaVista Software Inc. + */ + +#ifndef __SOC_PXA_CPU_H +#define __SOC_PXA_CPU_H + +#ifdef CONFIG_ARM +#include +#endif + +/* + * CPU Stepping CPU_ID JTAG_ID + * + * PXA210 B0 0x69052922 0x2926C013 + * PXA210 B1 0x69052923 0x3926C013 + * PXA210 B2 0x69052924 0x4926C013 + * PXA210 C0 0x69052D25 0x5926C013 + * + * PXA250 A0 0x69052100 0x09264013 + * PXA250 A1 0x69052101 0x19264013 + * PXA250 B0 0x69052902 0x29264013 + * PXA250 B1 0x69052903 0x39264013 + * PXA250 B2 0x69052904 0x49264013 + * PXA250 C0 0x69052D05 0x59264013 + * + * PXA255 A0 0x69052D06 0x69264013 + * + * PXA26x A0 0x69052903 0x39264013 + * PXA26x B0 0x69052D05 0x59264013 + * + * PXA27x A0 0x69054110 0x09265013 + * PXA27x A1 0x69054111 0x19265013 + * PXA27x B0 0x69054112 0x29265013 + * PXA27x B1 0x69054113 0x39265013 + * PXA27x C0 0x69054114 0x49265013 + * PXA27x C5 0x69054117 0x79265013 + * + * PXA30x A0 0x69056880 0x0E648013 + * PXA30x A1 0x69056881 0x1E648013 + * PXA31x A0 0x69056890 0x0E649013 + * PXA31x A1 0x69056891 0x1E649013 + * PXA31x A2 0x69056892 0x2E649013 + * PXA32x B1 0x69056825 0x5E642013 + * PXA32x B2 0x69056826 0x6E642013 + * + * PXA930 B0 0x69056835 0x5E643013 + * PXA930 B1 0x69056837 0x7E643013 + * PXA930 B2 0x69056838 0x8E643013 + * + * PXA935 A0 0x56056931 0x1E653013 + * PXA935 B0 0x56056936 0x6E653013 + * PXA935 B1 0x56056938 0x8E653013 + */ +#ifdef CONFIG_PXA25x +#define __cpu_is_pxa210(id) \ + ({ \ + unsigned int _id = (id) & 0xf3f0; \ + _id == 0x2120; \ + }) + +#define __cpu_is_pxa250(id) \ + ({ \ + unsigned int _id = (id) & 0xf3ff; \ + _id <= 0x2105; \ + }) + +#define __cpu_is_pxa255(id) \ + ({ \ + unsigned int _id = (id) & 0xffff; \ + _id == 0x2d06; \ + }) + +#define __cpu_is_pxa25x(id) \ + ({ \ + unsigned int _id = (id) & 0xf300; \ + _id == 0x2100; \ + }) +#else +#define __cpu_is_pxa210(id) (0) +#define __cpu_is_pxa250(id) (0) +#define __cpu_is_pxa255(id) (0) +#define __cpu_is_pxa25x(id) (0) +#endif + +#ifdef CONFIG_PXA27x +#define __cpu_is_pxa27x(id) \ + ({ \ + unsigned int _id = (id) >> 4 & 0xfff; \ + _id == 0x411; \ + }) +#else +#define __cpu_is_pxa27x(id) (0) +#endif + +#ifdef CONFIG_CPU_PXA300 +#define __cpu_is_pxa300(id) \ + ({ \ + unsigned int _id = (id) >> 4 & 0xfff; \ + _id == 0x688; \ + }) +#else +#define __cpu_is_pxa300(id) (0) +#endif + +#ifdef CONFIG_CPU_PXA310 +#define __cpu_is_pxa310(id) \ + ({ \ + unsigned int _id = (id) >> 4 & 0xfff; \ + _id == 0x689; \ + }) +#else +#define __cpu_is_pxa310(id) (0) +#endif + +#ifdef CONFIG_CPU_PXA320 +#define __cpu_is_pxa320(id) \ + ({ \ + unsigned int _id = (id) >> 4 & 0xfff; \ + _id == 0x603 || _id == 0x682; \ + }) +#else +#define __cpu_is_pxa320(id) (0) +#endif + +#ifdef CONFIG_CPU_PXA930 +#define __cpu_is_pxa930(id) \ + ({ \ + unsigned int _id = (id) >> 4 & 0xfff; \ + _id == 0x683; \ + }) +#else +#define __cpu_is_pxa930(id) (0) +#endif + +#ifdef CONFIG_CPU_PXA935 +#define __cpu_is_pxa935(id) \ + ({ \ + unsigned int _id = (id) >> 4 & 0xfff; \ + _id == 0x693; \ + }) +#else +#define __cpu_is_pxa935(id) (0) +#endif + +#define cpu_is_pxa210() \ + ({ \ + __cpu_is_pxa210(read_cpuid_id()); \ + }) + +#define cpu_is_pxa250() \ + ({ \ + __cpu_is_pxa250(read_cpuid_id()); \ + }) + +#define cpu_is_pxa255() \ + ({ \ + __cpu_is_pxa255(read_cpuid_id()); \ + }) + +#define cpu_is_pxa25x() \ + ({ \ + __cpu_is_pxa25x(read_cpuid_id()); \ + }) + +#define cpu_is_pxa27x() \ + ({ \ + __cpu_is_pxa27x(read_cpuid_id()); \ + }) + +#define cpu_is_pxa300() \ + ({ \ + __cpu_is_pxa300(read_cpuid_id()); \ + }) + +#define cpu_is_pxa310() \ + ({ \ + __cpu_is_pxa310(read_cpuid_id()); \ + }) + +#define cpu_is_pxa320() \ + ({ \ + __cpu_is_pxa320(read_cpuid_id()); \ + }) + +#define cpu_is_pxa930() \ + ({ \ + __cpu_is_pxa930(read_cpuid_id()); \ + }) + +#define cpu_is_pxa935() \ + ({ \ + __cpu_is_pxa935(read_cpuid_id()); \ + }) + + + +/* + * CPUID Core Generation Bit + * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x + */ +#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x) +#define __cpu_is_pxa2xx(id) \ + ({ \ + unsigned int _id = (id) >> 13 & 0x7; \ + _id <= 0x2; \ + }) +#else +#define __cpu_is_pxa2xx(id) (0) +#endif + +#ifdef CONFIG_PXA3xx +#define __cpu_is_pxa3xx(id) \ + ({ \ + __cpu_is_pxa300(id) \ + || __cpu_is_pxa310(id) \ + || __cpu_is_pxa320(id) \ + || __cpu_is_pxa93x(id); \ + }) +#else +#define __cpu_is_pxa3xx(id) (0) +#endif + +#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935) +#define __cpu_is_pxa93x(id) \ + ({ \ + __cpu_is_pxa930(id) \ + || __cpu_is_pxa935(id); \ + }) +#else +#define __cpu_is_pxa93x(id) (0) +#endif + +#define cpu_is_pxa2xx() \ + ({ \ + __cpu_is_pxa2xx(read_cpuid_id()); \ + }) + +#define cpu_is_pxa3xx() \ + ({ \ + __cpu_is_pxa3xx(read_cpuid_id()); \ + }) + +#define cpu_is_pxa93x() \ + ({ \ + __cpu_is_pxa93x(read_cpuid_id()); \ + }) + +#endif diff --git a/include/linux/soc/pxa/mfp.h b/include/linux/soc/pxa/mfp.h new file mode 100644 index 0000000000..39779cbed0 --- /dev/null +++ b/include/linux/soc/pxa/mfp.h @@ -0,0 +1,470 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Common Multi-Function Pin Definitions + * + * Copyright (C) 2007 Marvell International Ltd. + * + * 2007-8-21: eric miao + * initial version + */ + +#ifndef __ASM_PLAT_MFP_H +#define __ASM_PLAT_MFP_H + +#define mfp_to_gpio(m) ((m) % 256) + +/* list of all the configurable MFP pins */ +enum { + MFP_PIN_INVALID = -1, + + MFP_PIN_GPIO0 = 0, + MFP_PIN_GPIO1, + MFP_PIN_GPIO2, + MFP_PIN_GPIO3, + MFP_PIN_GPIO4, + MFP_PIN_GPIO5, + MFP_PIN_GPIO6, + MFP_PIN_GPIO7, + MFP_PIN_GPIO8, + MFP_PIN_GPIO9, + MFP_PIN_GPIO10, + MFP_PIN_GPIO11, + MFP_PIN_GPIO12, + MFP_PIN_GPIO13, + MFP_PIN_GPIO14, + MFP_PIN_GPIO15, + MFP_PIN_GPIO16, + MFP_PIN_GPIO17, + MFP_PIN_GPIO18, + MFP_PIN_GPIO19, + MFP_PIN_GPIO20, + MFP_PIN_GPIO21, + MFP_PIN_GPIO22, + MFP_PIN_GPIO23, + MFP_PIN_GPIO24, + MFP_PIN_GPIO25, + MFP_PIN_GPIO26, + MFP_PIN_GPIO27, + MFP_PIN_GPIO28, + MFP_PIN_GPIO29, + MFP_PIN_GPIO30, + MFP_PIN_GPIO31, + MFP_PIN_GPIO32, + MFP_PIN_GPIO33, + MFP_PIN_GPIO34, + MFP_PIN_GPIO35, + MFP_PIN_GPIO36, + MFP_PIN_GPIO37, + MFP_PIN_GPIO38, + MFP_PIN_GPIO39, + MFP_PIN_GPIO40, + MFP_PIN_GPIO41, + MFP_PIN_GPIO42, + MFP_PIN_GPIO43, + MFP_PIN_GPIO44, + MFP_PIN_GPIO45, + MFP_PIN_GPIO46, + MFP_PIN_GPIO47, + MFP_PIN_GPIO48, + MFP_PIN_GPIO49, + MFP_PIN_GPIO50, + MFP_PIN_GPIO51, + MFP_PIN_GPIO52, + MFP_PIN_GPIO53, + MFP_PIN_GPIO54, + MFP_PIN_GPIO55, + MFP_PIN_GPIO56, + MFP_PIN_GPIO57, + MFP_PIN_GPIO58, + MFP_PIN_GPIO59, + MFP_PIN_GPIO60, + MFP_PIN_GPIO61, + MFP_PIN_GPIO62, + MFP_PIN_GPIO63, + MFP_PIN_GPIO64, + MFP_PIN_GPIO65, + MFP_PIN_GPIO66, + MFP_PIN_GPIO67, + MFP_PIN_GPIO68, + MFP_PIN_GPIO69, + MFP_PIN_GPIO70, + MFP_PIN_GPIO71, + MFP_PIN_GPIO72, + MFP_PIN_GPIO73, + MFP_PIN_GPIO74, + MFP_PIN_GPIO75, + MFP_PIN_GPIO76, + MFP_PIN_GPIO77, + MFP_PIN_GPIO78, + MFP_PIN_GPIO79, + MFP_PIN_GPIO80, + MFP_PIN_GPIO81, + MFP_PIN_GPIO82, + MFP_PIN_GPIO83, + MFP_PIN_GPIO84, + MFP_PIN_GPIO85, + MFP_PIN_GPIO86, + MFP_PIN_GPIO87, + MFP_PIN_GPIO88, + MFP_PIN_GPIO89, + MFP_PIN_GPIO90, + MFP_PIN_GPIO91, + MFP_PIN_GPIO92, + MFP_PIN_GPIO93, + MFP_PIN_GPIO94, + MFP_PIN_GPIO95, + MFP_PIN_GPIO96, + MFP_PIN_GPIO97, + MFP_PIN_GPIO98, + MFP_PIN_GPIO99, + MFP_PIN_GPIO100, + MFP_PIN_GPIO101, + MFP_PIN_GPIO102, + MFP_PIN_GPIO103, + MFP_PIN_GPIO104, + MFP_PIN_GPIO105, + MFP_PIN_GPIO106, + MFP_PIN_GPIO107, + MFP_PIN_GPIO108, + MFP_PIN_GPIO109, + MFP_PIN_GPIO110, + MFP_PIN_GPIO111, + MFP_PIN_GPIO112, + MFP_PIN_GPIO113, + MFP_PIN_GPIO114, + MFP_PIN_GPIO115, + MFP_PIN_GPIO116, + MFP_PIN_GPIO117, + MFP_PIN_GPIO118, + MFP_PIN_GPIO119, + MFP_PIN_GPIO120, + MFP_PIN_GPIO121, + MFP_PIN_GPIO122, + MFP_PIN_GPIO123, + MFP_PIN_GPIO124, + MFP_PIN_GPIO125, + MFP_PIN_GPIO126, + MFP_PIN_GPIO127, + + MFP_PIN_GPIO128, + MFP_PIN_GPIO129, + MFP_PIN_GPIO130, + MFP_PIN_GPIO131, + MFP_PIN_GPIO132, + MFP_PIN_GPIO133, + MFP_PIN_GPIO134, + MFP_PIN_GPIO135, + MFP_PIN_GPIO136, + MFP_PIN_GPIO137, + MFP_PIN_GPIO138, + MFP_PIN_GPIO139, + MFP_PIN_GPIO140, + MFP_PIN_GPIO141, + MFP_PIN_GPIO142, + MFP_PIN_GPIO143, + MFP_PIN_GPIO144, + MFP_PIN_GPIO145, + MFP_PIN_GPIO146, + MFP_PIN_GPIO147, + MFP_PIN_GPIO148, + MFP_PIN_GPIO149, + MFP_PIN_GPIO150, + MFP_PIN_GPIO151, + MFP_PIN_GPIO152, + MFP_PIN_GPIO153, + MFP_PIN_GPIO154, + MFP_PIN_GPIO155, + MFP_PIN_GPIO156, + MFP_PIN_GPIO157, + MFP_PIN_GPIO158, + MFP_PIN_GPIO159, + MFP_PIN_GPIO160, + MFP_PIN_GPIO161, + MFP_PIN_GPIO162, + MFP_PIN_GPIO163, + MFP_PIN_GPIO164, + MFP_PIN_GPIO165, + MFP_PIN_GPIO166, + MFP_PIN_GPIO167, + MFP_PIN_GPIO168, + MFP_PIN_GPIO169, + MFP_PIN_GPIO170, + MFP_PIN_GPIO171, + MFP_PIN_GPIO172, + MFP_PIN_GPIO173, + MFP_PIN_GPIO174, + MFP_PIN_GPIO175, + MFP_PIN_GPIO176, + MFP_PIN_GPIO177, + MFP_PIN_GPIO178, + MFP_PIN_GPIO179, + MFP_PIN_GPIO180, + MFP_PIN_GPIO181, + MFP_PIN_GPIO182, + MFP_PIN_GPIO183, + MFP_PIN_GPIO184, + MFP_PIN_GPIO185, + MFP_PIN_GPIO186, + MFP_PIN_GPIO187, + MFP_PIN_GPIO188, + MFP_PIN_GPIO189, + MFP_PIN_GPIO190, + MFP_PIN_GPIO191, + + MFP_PIN_GPIO255 = 255, + + MFP_PIN_GPIO0_2, + MFP_PIN_GPIO1_2, + MFP_PIN_GPIO2_2, + MFP_PIN_GPIO3_2, + MFP_PIN_GPIO4_2, + MFP_PIN_GPIO5_2, + MFP_PIN_GPIO6_2, + MFP_PIN_GPIO7_2, + MFP_PIN_GPIO8_2, + MFP_PIN_GPIO9_2, + MFP_PIN_GPIO10_2, + MFP_PIN_GPIO11_2, + MFP_PIN_GPIO12_2, + MFP_PIN_GPIO13_2, + MFP_PIN_GPIO14_2, + MFP_PIN_GPIO15_2, + MFP_PIN_GPIO16_2, + MFP_PIN_GPIO17_2, + + MFP_PIN_ULPI_STP, + MFP_PIN_ULPI_NXT, + MFP_PIN_ULPI_DIR, + + MFP_PIN_nXCVREN, + MFP_PIN_DF_CLE_nOE, + MFP_PIN_DF_nADV1_ALE, + MFP_PIN_DF_SCLK_E, + MFP_PIN_DF_SCLK_S, + MFP_PIN_nBE0, + MFP_PIN_nBE1, + MFP_PIN_DF_nADV2_ALE, + MFP_PIN_DF_INT_RnB, + MFP_PIN_DF_nCS0, + MFP_PIN_DF_nCS1, + MFP_PIN_nLUA, + MFP_PIN_nLLA, + MFP_PIN_DF_nWE, + MFP_PIN_DF_ALE_nWE, + MFP_PIN_DF_nRE_nOE, + MFP_PIN_DF_ADDR0, + MFP_PIN_DF_ADDR1, + MFP_PIN_DF_ADDR2, + MFP_PIN_DF_ADDR3, + MFP_PIN_DF_IO0, + MFP_PIN_DF_IO1, + MFP_PIN_DF_IO2, + MFP_PIN_DF_IO3, + MFP_PIN_DF_IO4, + MFP_PIN_DF_IO5, + MFP_PIN_DF_IO6, + MFP_PIN_DF_IO7, + MFP_PIN_DF_IO8, + MFP_PIN_DF_IO9, + MFP_PIN_DF_IO10, + MFP_PIN_DF_IO11, + MFP_PIN_DF_IO12, + MFP_PIN_DF_IO13, + MFP_PIN_DF_IO14, + MFP_PIN_DF_IO15, + MFP_PIN_DF_nCS0_SM_nCS2, + MFP_PIN_DF_nCS1_SM_nCS3, + MFP_PIN_SM_nCS0, + MFP_PIN_SM_nCS1, + MFP_PIN_DF_WEn, + MFP_PIN_DF_REn, + MFP_PIN_DF_CLE_SM_OEn, + MFP_PIN_DF_ALE_SM_WEn, + MFP_PIN_DF_RDY0, + MFP_PIN_DF_RDY1, + + MFP_PIN_SM_SCLK, + MFP_PIN_SM_BE0, + MFP_PIN_SM_BE1, + MFP_PIN_SM_ADV, + MFP_PIN_SM_ADVMUX, + MFP_PIN_SM_RDY, + + MFP_PIN_MMC1_DAT7, + MFP_PIN_MMC1_DAT6, + MFP_PIN_MMC1_DAT5, + MFP_PIN_MMC1_DAT4, + MFP_PIN_MMC1_DAT3, + MFP_PIN_MMC1_DAT2, + MFP_PIN_MMC1_DAT1, + MFP_PIN_MMC1_DAT0, + MFP_PIN_MMC1_CMD, + MFP_PIN_MMC1_CLK, + MFP_PIN_MMC1_CD, + MFP_PIN_MMC1_WP, + + /* additional pins on PXA930 */ + MFP_PIN_GSIM_UIO, + MFP_PIN_GSIM_UCLK, + MFP_PIN_GSIM_UDET, + MFP_PIN_GSIM_nURST, + MFP_PIN_PMIC_INT, + MFP_PIN_RDY, + + /* additional pins on MMP2 */ + MFP_PIN_TWSI1_SCL, + MFP_PIN_TWSI1_SDA, + MFP_PIN_TWSI4_SCL, + MFP_PIN_TWSI4_SDA, + MFP_PIN_CLK_REQ, + + MFP_PIN_MAX, +}; + +/* + * a possible MFP configuration is represented by a 32-bit integer + * + * bit 0.. 9 - MFP Pin Number (1024 Pins Maximum) + * bit 10..12 - Alternate Function Selection + * bit 13..15 - Drive Strength + * bit 16..18 - Low Power Mode State + * bit 19..20 - Low Power Mode Edge Detection + * bit 21..22 - Run Mode Pull State + * + * to facilitate the definition, the following macros are provided + * + * MFP_CFG_DEFAULT - default MFP configuration value, with + * alternate function = 0, + * drive strength = fast 3mA (MFP_DS03X) + * low power mode = default + * edge detection = none + * + * MFP_CFG - default MFPR value with alternate function + * MFP_CFG_DRV - default MFPR value with alternate function and + * pin drive strength + * MFP_CFG_LPM - default MFPR value with alternate function and + * low power mode + * MFP_CFG_X - default MFPR value with alternate function, + * pin drive strength and low power mode + */ + +typedef unsigned long mfp_cfg_t; + +#define MFP_PIN(x) ((x) & 0x3ff) + +#define MFP_AF0 (0x0 << 10) +#define MFP_AF1 (0x1 << 10) +#define MFP_AF2 (0x2 << 10) +#define MFP_AF3 (0x3 << 10) +#define MFP_AF4 (0x4 << 10) +#define MFP_AF5 (0x5 << 10) +#define MFP_AF6 (0x6 << 10) +#define MFP_AF7 (0x7 << 10) +#define MFP_AF_MASK (0x7 << 10) +#define MFP_AF(x) (((x) >> 10) & 0x7) + +#define MFP_DS01X (0x0 << 13) +#define MFP_DS02X (0x1 << 13) +#define MFP_DS03X (0x2 << 13) +#define MFP_DS04X (0x3 << 13) +#define MFP_DS06X (0x4 << 13) +#define MFP_DS08X (0x5 << 13) +#define MFP_DS10X (0x6 << 13) +#define MFP_DS13X (0x7 << 13) +#define MFP_DS_MASK (0x7 << 13) +#define MFP_DS(x) (((x) >> 13) & 0x7) + +#define MFP_LPM_DEFAULT (0x0 << 16) +#define MFP_LPM_DRIVE_LOW (0x1 << 16) +#define MFP_LPM_DRIVE_HIGH (0x2 << 16) +#define MFP_LPM_PULL_LOW (0x3 << 16) +#define MFP_LPM_PULL_HIGH (0x4 << 16) +#define MFP_LPM_FLOAT (0x5 << 16) +#define MFP_LPM_INPUT (0x6 << 16) +#define MFP_LPM_STATE_MASK (0x7 << 16) +#define MFP_LPM_STATE(x) (((x) >> 16) & 0x7) + +#define MFP_LPM_EDGE_NONE (0x0 << 19) +#define MFP_LPM_EDGE_RISE (0x1 << 19) +#define MFP_LPM_EDGE_FALL (0x2 << 19) +#define MFP_LPM_EDGE_BOTH (0x3 << 19) +#define MFP_LPM_EDGE_MASK (0x3 << 19) +#define MFP_LPM_EDGE(x) (((x) >> 19) & 0x3) + +#define MFP_PULL_NONE (0x0 << 21) +#define MFP_PULL_LOW (0x1 << 21) +#define MFP_PULL_HIGH (0x2 << 21) +#define MFP_PULL_BOTH (0x3 << 21) +#define MFP_PULL_FLOAT (0x4 << 21) +#define MFP_PULL_MASK (0x7 << 21) +#define MFP_PULL(x) (((x) >> 21) & 0x7) + +#define MFP_CFG_DEFAULT (MFP_AF0 | MFP_DS03X | MFP_LPM_DEFAULT |\ + MFP_LPM_EDGE_NONE | MFP_PULL_NONE) + +#define MFP_CFG(pin, af) \ + ((MFP_CFG_DEFAULT & ~MFP_AF_MASK) |\ + (MFP_PIN(MFP_PIN_##pin) | MFP_##af)) + +#define MFP_CFG_DRV(pin, af, drv) \ + ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK)) |\ + (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv)) + +#define MFP_CFG_LPM(pin, af, lpm) \ + ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_LPM_STATE_MASK)) |\ + (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_LPM_##lpm)) + +#define MFP_CFG_X(pin, af, drv, lpm) \ + ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK | MFP_LPM_STATE_MASK)) |\ + (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv | MFP_LPM_##lpm)) + +#if defined(CONFIG_PXA3xx) || defined(CONFIG_ARCH_MMP) +/* + * each MFP pin will have a MFPR register, since the offset of the + * register varies between processors, the processor specific code + * should initialize the pin offsets by mfp_init() + * + * mfp_init_base() - accepts a virtual base for all MFPR registers and + * initialize the MFP table to a default state + * + * mfp_init_addr() - accepts a table of "mfp_addr_map" structure, which + * represents a range of MFP pins from "start" to "end", with the offset + * beginning at "offset", to define a single pin, let "end" = -1. + * + * use + * + * MFP_ADDR_X() to define a range of pins + * MFP_ADDR() to define a single pin + * MFP_ADDR_END to signal the end of pin offset definitions + */ +struct mfp_addr_map { + unsigned int start; + unsigned int end; + unsigned long offset; +}; + +#define MFP_ADDR_X(start, end, offset) \ + { MFP_PIN_##start, MFP_PIN_##end, offset } + +#define MFP_ADDR(pin, offset) \ + { MFP_PIN_##pin, -1, offset } + +#define MFP_ADDR_END { MFP_PIN_INVALID, 0 } + +void mfp_init_base(void __iomem *mfpr_base); +void mfp_init_addr(struct mfp_addr_map *map); + +/* + * mfp_{read, write}() - for direct read/write access to the MFPR register + * mfp_config() - for configuring a group of MFPR registers + * mfp_config_lpm() - configuring all low power MFPR registers for suspend + * mfp_config_run() - configuring all run time MFPR registers after resume + */ +unsigned long mfp_read(int mfp); +void mfp_write(int mfp, unsigned long mfpr_val); +void mfp_config(unsigned long *mfp_cfgs, int num); +void mfp_config_run(void); +void mfp_config_lpm(void); +#endif /* CONFIG_PXA3xx || CONFIG_ARCH_MMP */ + +#endif /* __ASM_PLAT_MFP_H */ diff --git a/include/linux/soc/pxa/smemc.h b/include/linux/soc/pxa/smemc.h new file mode 100644 index 0000000000..f1ffea236c --- /dev/null +++ b/include/linux/soc/pxa/smemc.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __PXA_REGS_H +#define __PXA_REGS_H + +#include + +void pxa_smemc_set_pcmcia_timing(int sock, u32 mcmem, u32 mcatt, u32 mcio); +void pxa_smemc_set_pcmcia_socket(int nr); +int pxa2xx_smemc_get_sdram_rows(void); +unsigned int pxa3xx_smemc_get_memclkdiv(void); +void __iomem *pxa_smemc_get_mdrefr(void); + +#endif diff --git a/include/linux/soc/renesas/r9a06g032-sysctrl.h b/include/linux/soc/renesas/r9a06g032-sysctrl.h new file mode 100644 index 0000000000..066dfb15cb --- /dev/null +++ b/include/linux/soc/renesas/r9a06g032-sysctrl.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__ +#define __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__ + +#ifdef CONFIG_CLK_R9A06G032 +int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val); +#else +static inline int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val) { return -ENODEV; } +#endif + +#endif /* __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__ */ diff --git a/include/linux/soc/ti/omap1-io.h b/include/linux/soc/ti/omap1-io.h new file mode 100644 index 0000000000..f7f12728d4 --- /dev/null +++ b/include/linux/soc/ti/omap1-io.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_ARCH_OMAP_IO_H +#define __ASM_ARCH_OMAP_IO_H + +#ifndef __ASSEMBLER__ +#include + +#ifdef CONFIG_ARCH_OMAP1_ANY +/* + * NOTE: Please use ioremap + __raw_read/write where possible instead of these + */ +extern u8 omap_readb(u32 pa); +extern u16 omap_readw(u32 pa); +extern u32 omap_readl(u32 pa); +extern void omap_writeb(u8 v, u32 pa); +extern void omap_writew(u16 v, u32 pa); +extern void omap_writel(u32 v, u32 pa); +#else +static inline u8 omap_readb(u32 pa) { return 0; } +static inline u16 omap_readw(u32 pa) { return 0; } +static inline u32 omap_readl(u32 pa) { return 0; } +static inline void omap_writeb(u8 v, u32 pa) { } +static inline void omap_writew(u16 v, u32 pa) { } +static inline void omap_writel(u32 v, u32 pa) { } +#endif +#endif + +/* + * ---------------------------------------------------------------------------- + * System control registers + * ---------------------------------------------------------------------------- + */ +#define MOD_CONF_CTRL_0 0xfffe1080 +#define MOD_CONF_CTRL_1 0xfffe1110 + +/* + * --------------------------------------------------------------------------- + * UPLD + * --------------------------------------------------------------------------- + */ +#define ULPD_REG_BASE (0xfffe0800) +#define ULPD_IT_STATUS (ULPD_REG_BASE + 0x14) +#define ULPD_SETUP_ANALOG_CELL_3 (ULPD_REG_BASE + 0x24) +#define ULPD_CLOCK_CTRL (ULPD_REG_BASE + 0x30) +# define DIS_USB_PVCI_CLK (1 << 5) /* no USB/FAC synch */ +# define USB_MCLK_EN (1 << 4) /* enable W4_USB_CLKO */ +#define ULPD_SOFT_REQ (ULPD_REG_BASE + 0x34) +# define SOFT_UDC_REQ (1 << 4) +# define SOFT_USB_CLK_REQ (1 << 3) +# define SOFT_DPLL_REQ (1 << 0) +#define ULPD_DPLL_CTRL (ULPD_REG_BASE + 0x3c) +#define ULPD_STATUS_REQ (ULPD_REG_BASE + 0x40) +#define ULPD_APLL_CTRL (ULPD_REG_BASE + 0x4c) +#define ULPD_POWER_CTRL (ULPD_REG_BASE + 0x50) +#define ULPD_SOFT_DISABLE_REQ_REG (ULPD_REG_BASE + 0x68) +# define DIS_MMC2_DPLL_REQ (1 << 11) +# define DIS_MMC1_DPLL_REQ (1 << 10) +# define DIS_UART3_DPLL_REQ (1 << 9) +# define DIS_UART2_DPLL_REQ (1 << 8) +# define DIS_UART1_DPLL_REQ (1 << 7) +# define DIS_USB_HOST_DPLL_REQ (1 << 6) +#define ULPD_SDW_CLK_DIV_CTRL_SEL (ULPD_REG_BASE + 0x74) +#define ULPD_CAM_CLK_CTRL (ULPD_REG_BASE + 0x7c) + +/* + * ---------------------------------------------------------------------------- + * Clocks + * ---------------------------------------------------------------------------- + */ +#define CLKGEN_REG_BASE (0xfffece00) +#define ARM_CKCTL (CLKGEN_REG_BASE + 0x0) +#define ARM_IDLECT1 (CLKGEN_REG_BASE + 0x4) +#define ARM_IDLECT2 (CLKGEN_REG_BASE + 0x8) +#define ARM_EWUPCT (CLKGEN_REG_BASE + 0xC) +#define ARM_RSTCT1 (CLKGEN_REG_BASE + 0x10) +#define ARM_RSTCT2 (CLKGEN_REG_BASE + 0x14) +#define ARM_SYSST (CLKGEN_REG_BASE + 0x18) +#define ARM_IDLECT3 (CLKGEN_REG_BASE + 0x24) + +#define CK_RATEF 1 +#define CK_IDLEF 2 +#define CK_ENABLEF 4 +#define CK_SELECTF 8 +#define SETARM_IDLE_SHIFT + +/* DPLL control registers */ +#define DPLL_CTL (0xfffecf00) + +/* DSP clock control. Must use __raw_readw() and __raw_writew() with these */ +#define DSP_CONFIG_REG_BASE IOMEM(0xe1008000) +#define DSP_CKCTL (DSP_CONFIG_REG_BASE + 0x0) +#define DSP_IDLECT1 (DSP_CONFIG_REG_BASE + 0x4) +#define DSP_IDLECT2 (DSP_CONFIG_REG_BASE + 0x8) +#define DSP_RSTCT2 (DSP_CONFIG_REG_BASE + 0x14) + +/* + * ---------------------------------------------------------------------------- + * Pulse-Width Light + * ---------------------------------------------------------------------------- + */ +#define OMAP_PWL_BASE 0xfffb5800 +#define OMAP_PWL_ENABLE (OMAP_PWL_BASE + 0x00) +#define OMAP_PWL_CLK_ENABLE (OMAP_PWL_BASE + 0x04) + +/* + * ---------------------------------------------------------------------------- + * Pin multiplexing registers + * ---------------------------------------------------------------------------- + */ +#define FUNC_MUX_CTRL_0 0xfffe1000 +#define FUNC_MUX_CTRL_1 0xfffe1004 +#define FUNC_MUX_CTRL_2 0xfffe1008 +#define COMP_MODE_CTRL_0 0xfffe100c +#define FUNC_MUX_CTRL_3 0xfffe1010 +#define FUNC_MUX_CTRL_4 0xfffe1014 +#define FUNC_MUX_CTRL_5 0xfffe1018 +#define FUNC_MUX_CTRL_6 0xfffe101C +#define FUNC_MUX_CTRL_7 0xfffe1020 +#define FUNC_MUX_CTRL_8 0xfffe1024 +#define FUNC_MUX_CTRL_9 0xfffe1028 +#define FUNC_MUX_CTRL_A 0xfffe102C +#define FUNC_MUX_CTRL_B 0xfffe1030 +#define FUNC_MUX_CTRL_C 0xfffe1034 +#define FUNC_MUX_CTRL_D 0xfffe1038 +#define PULL_DWN_CTRL_0 0xfffe1040 +#define PULL_DWN_CTRL_1 0xfffe1044 +#define PULL_DWN_CTRL_2 0xfffe1048 +#define PULL_DWN_CTRL_3 0xfffe104c +#define PULL_DWN_CTRL_4 0xfffe10ac + +/* OMAP-1610 specific multiplexing registers */ +#define FUNC_MUX_CTRL_E 0xfffe1090 +#define FUNC_MUX_CTRL_F 0xfffe1094 +#define FUNC_MUX_CTRL_10 0xfffe1098 +#define FUNC_MUX_CTRL_11 0xfffe109c +#define FUNC_MUX_CTRL_12 0xfffe10a0 +#define PU_PD_SEL_0 0xfffe10b4 +#define PU_PD_SEL_1 0xfffe10b8 +#define PU_PD_SEL_2 0xfffe10bc +#define PU_PD_SEL_3 0xfffe10c0 +#define PU_PD_SEL_4 0xfffe10c4 + +#endif diff --git a/include/linux/soc/ti/omap1-mux.h b/include/linux/soc/ti/omap1-mux.h new file mode 100644 index 0000000000..59c239b556 --- /dev/null +++ b/include/linux/soc/ti/omap1-mux.h @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef __SOC_TI_OMAP1_MUX_H +#define __SOC_TI_OMAP1_MUX_H +/* + * This should not really be a global header, it reflects the + * traditional way that omap1 does pin muxing without the + * pinctrl subsystem. + */ + +enum omap7xx_index { + /* OMAP 730 keyboard */ + E2_7XX_KBR0, + J7_7XX_KBR1, + E1_7XX_KBR2, + F3_7XX_KBR3, + D2_7XX_KBR4, + C2_7XX_KBC0, + D3_7XX_KBC1, + E4_7XX_KBC2, + F4_7XX_KBC3, + E3_7XX_KBC4, + + /* USB */ + AA17_7XX_USB_DM, + W16_7XX_USB_PU_EN, + W17_7XX_USB_VBUSI, + W18_7XX_USB_DMCK_OUT, + W19_7XX_USB_DCRST, + + /* MMC */ + MMC_7XX_CMD, + MMC_7XX_CLK, + MMC_7XX_DAT0, + + /* I2C */ + I2C_7XX_SCL, + I2C_7XX_SDA, + + /* SPI */ + SPI_7XX_1, + SPI_7XX_2, + SPI_7XX_3, + SPI_7XX_4, + SPI_7XX_5, + SPI_7XX_6, + + /* UART */ + UART_7XX_1, + UART_7XX_2, +}; + +enum omap1xxx_index { + /* UART1 (BT_UART_GATING)*/ + UART1_TX = 0, + UART1_RTS, + + /* UART2 (COM_UART_GATING)*/ + UART2_TX, + UART2_RX, + UART2_CTS, + UART2_RTS, + + /* UART3 (GIGA_UART_GATING) */ + UART3_TX, + UART3_RX, + UART3_CTS, + UART3_RTS, + UART3_CLKREQ, + UART3_BCLK, /* 12MHz clock out */ + Y15_1610_UART3_RTS, + + /* PWT & PWL */ + PWT, + PWL, + + /* USB master generic */ + R18_USB_VBUS, + R18_1510_USB_GPIO0, + W4_USB_PUEN, + W4_USB_CLKO, + W4_USB_HIGHZ, + W4_GPIO58, + + /* USB1 master */ + USB1_SUSP, + USB1_SEO, + W13_1610_USB1_SE0, + USB1_TXEN, + USB1_TXD, + USB1_VP, + USB1_VM, + USB1_RCV, + USB1_SPEED, + R13_1610_USB1_SPEED, + R13_1710_USB1_SE0, + + /* USB2 master */ + USB2_SUSP, + USB2_VP, + USB2_TXEN, + USB2_VM, + USB2_RCV, + USB2_SEO, + USB2_TXD, + + /* OMAP-1510 GPIO */ + R18_1510_GPIO0, + R19_1510_GPIO1, + M14_1510_GPIO2, + + /* OMAP1610 GPIO */ + P18_1610_GPIO3, + Y15_1610_GPIO17, + + /* OMAP-1710 GPIO */ + R18_1710_GPIO0, + V2_1710_GPIO10, + N21_1710_GPIO14, + W15_1710_GPIO40, + + /* MPUIO */ + MPUIO2, + N15_1610_MPUIO2, + MPUIO4, + MPUIO5, + T20_1610_MPUIO5, + W11_1610_MPUIO6, + V10_1610_MPUIO7, + W11_1610_MPUIO9, + V10_1610_MPUIO10, + W10_1610_MPUIO11, + E20_1610_MPUIO13, + U20_1610_MPUIO14, + E19_1610_MPUIO15, + + /* MCBSP2 */ + MCBSP2_CLKR, + MCBSP2_CLKX, + MCBSP2_DR, + MCBSP2_DX, + MCBSP2_FSR, + MCBSP2_FSX, + + /* MCBSP3 */ + MCBSP3_CLKX, + + /* Misc ballouts */ + BALLOUT_V8_ARMIO3, + N20_HDQ, + + /* OMAP-1610 MMC2 */ + W8_1610_MMC2_DAT0, + V8_1610_MMC2_DAT1, + W15_1610_MMC2_DAT2, + R10_1610_MMC2_DAT3, + Y10_1610_MMC2_CLK, + Y8_1610_MMC2_CMD, + V9_1610_MMC2_CMDDIR, + V5_1610_MMC2_DATDIR0, + W19_1610_MMC2_DATDIR1, + R18_1610_MMC2_CLKIN, + + /* OMAP-1610 External Trace Interface */ + M19_1610_ETM_PSTAT0, + L15_1610_ETM_PSTAT1, + L18_1610_ETM_PSTAT2, + L19_1610_ETM_D0, + J19_1610_ETM_D6, + J18_1610_ETM_D7, + + /* OMAP16XX GPIO */ + P20_1610_GPIO4, + V9_1610_GPIO7, + W8_1610_GPIO9, + N20_1610_GPIO11, + N19_1610_GPIO13, + P10_1610_GPIO22, + V5_1610_GPIO24, + AA20_1610_GPIO_41, + W19_1610_GPIO48, + M7_1610_GPIO62, + V14_16XX_GPIO37, + R9_16XX_GPIO18, + L14_16XX_GPIO49, + + /* OMAP-1610 uWire */ + V19_1610_UWIRE_SCLK, + U18_1610_UWIRE_SDI, + W21_1610_UWIRE_SDO, + N14_1610_UWIRE_CS0, + P15_1610_UWIRE_CS3, + N15_1610_UWIRE_CS1, + + /* OMAP-1610 SPI */ + U19_1610_SPIF_SCK, + U18_1610_SPIF_DIN, + P20_1610_SPIF_DIN, + W21_1610_SPIF_DOUT, + R18_1610_SPIF_DOUT, + N14_1610_SPIF_CS0, + N15_1610_SPIF_CS1, + T19_1610_SPIF_CS2, + P15_1610_SPIF_CS3, + + /* OMAP-1610 Flash */ + L3_1610_FLASH_CS2B_OE, + M8_1610_FLASH_CS2B_WE, + + /* First MMC */ + MMC_CMD, + MMC_DAT1, + MMC_DAT2, + MMC_DAT0, + MMC_CLK, + MMC_DAT3, + + /* OMAP-1710 MMC CMDDIR and DATDIR0 */ + M15_1710_MMC_CLKI, + P19_1710_MMC_CMDDIR, + P20_1710_MMC_DATDIR0, + + /* OMAP-1610 USB0 alternate pin configuration */ + W9_USB0_TXEN, + AA9_USB0_VP, + Y5_USB0_RCV, + R9_USB0_VM, + V6_USB0_TXD, + W5_USB0_SE0, + V9_USB0_SPEED, + V9_USB0_SUSP, + + /* USB2 */ + W9_USB2_TXEN, + AA9_USB2_VP, + Y5_USB2_RCV, + R9_USB2_VM, + V6_USB2_TXD, + W5_USB2_SE0, + + /* 16XX UART */ + R13_1610_UART1_TX, + V14_16XX_UART1_RX, + R14_1610_UART1_CTS, + AA15_1610_UART1_RTS, + R9_16XX_UART2_RX, + L14_16XX_UART3_RX, + + /* I2C OMAP-1610 */ + I2C_SCL, + I2C_SDA, + + /* Keypad */ + F18_1610_KBC0, + D20_1610_KBC1, + D19_1610_KBC2, + E18_1610_KBC3, + C21_1610_KBC4, + G18_1610_KBR0, + F19_1610_KBR1, + H14_1610_KBR2, + E20_1610_KBR3, + E19_1610_KBR4, + N19_1610_KBR5, + + /* Power management */ + T20_1610_LOW_PWR, + + /* MCLK Settings */ + V5_1710_MCLK_ON, + V5_1710_MCLK_OFF, + R10_1610_MCLK_ON, + R10_1610_MCLK_OFF, + + /* CompactFlash controller */ + P11_1610_CF_CD2, + R11_1610_CF_IOIS16, + V10_1610_CF_IREQ, + W10_1610_CF_RESET, + W11_1610_CF_CD1, + + /* parallel camera */ + J15_1610_CAM_LCLK, + J18_1610_CAM_D7, + J19_1610_CAM_D6, + J14_1610_CAM_D5, + K18_1610_CAM_D4, + K19_1610_CAM_D3, + K15_1610_CAM_D2, + K14_1610_CAM_D1, + L19_1610_CAM_D0, + L18_1610_CAM_VS, + L15_1610_CAM_HS, + M19_1610_CAM_RSTZ, + Y15_1610_CAM_OUTCLK, + + /* serial camera */ + H19_1610_CAM_EXCLK, + Y12_1610_CCP_CLKP, + W13_1610_CCP_CLKM, + W14_1610_CCP_DATAP, + Y14_1610_CCP_DATAM, + +}; + +#ifdef CONFIG_OMAP_MUX +extern int omap_cfg_reg(unsigned long reg_cfg); +#else +static inline int omap_cfg_reg(unsigned long reg_cfg) { return 0; } +#endif + +#endif diff --git a/include/linux/soc/ti/omap1-soc.h b/include/linux/soc/ti/omap1-soc.h new file mode 100644 index 0000000000..81008d400b --- /dev/null +++ b/include/linux/soc/ti/omap1-soc.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * OMAP cpu type detection + * + * Copyright (C) 2004, 2008 Nokia Corporation + * + * Copyright (C) 2009-11 Texas Instruments. + * + * Written by Tony Lindgren + * + * Added OMAP4/5 specific defines - Santosh Shilimkar + */ + +#ifndef __ASM_ARCH_OMAP_CPU_H +#define __ASM_ARCH_OMAP_CPU_H + +/* + * Test if multicore OMAP support is needed + */ +#undef MULTI_OMAP1 +#undef OMAP_NAME + +#ifdef CONFIG_ARCH_OMAP730 +# ifdef OMAP_NAME +# undef MULTI_OMAP1 +# define MULTI_OMAP1 +# else +# define OMAP_NAME omap730 +# endif +#endif +#ifdef CONFIG_ARCH_OMAP850 +# ifdef OMAP_NAME +# undef MULTI_OMAP1 +# define MULTI_OMAP1 +# else +# define OMAP_NAME omap850 +# endif +#endif +#ifdef CONFIG_ARCH_OMAP15XX +# ifdef OMAP_NAME +# undef MULTI_OMAP1 +# define MULTI_OMAP1 +# else +# define OMAP_NAME omap1510 +# endif +#endif +#ifdef CONFIG_ARCH_OMAP16XX +# ifdef OMAP_NAME +# undef MULTI_OMAP1 +# define MULTI_OMAP1 +# else +# define OMAP_NAME omap16xx +# endif +#endif + +/* + * omap_rev bits: + * CPU id bits (0730, 1510, 1710, 2422...) [31:16] + * CPU revision (See _REV_ defined in cpu.h) [15:08] + * CPU class bits (15xx, 16xx, 24xx, 34xx...) [07:00] + */ +unsigned int omap_rev(void); + +/* + * Get the CPU revision for OMAP devices + */ +#define GET_OMAP_REVISION() ((omap_rev() >> 8) & 0xff) + +/* + * Macros to group OMAP into cpu classes. + * These can be used in most places. + * cpu_is_omap7xx(): True for OMAP730, OMAP850 + * cpu_is_omap15xx(): True for OMAP1510, OMAP5910 and OMAP310 + * cpu_is_omap16xx(): True for OMAP1610, OMAP5912 and OMAP1710 + */ +#define GET_OMAP_CLASS (omap_rev() & 0xff) + +#define IS_OMAP_CLASS(class, id) \ +static inline int is_omap ##class (void) \ +{ \ + return (GET_OMAP_CLASS == (id)) ? 1 : 0; \ +} + +#define GET_OMAP_SUBCLASS ((omap_rev() >> 20) & 0x0fff) + +#define IS_OMAP_SUBCLASS(subclass, id) \ +static inline int is_omap ##subclass (void) \ +{ \ + return (GET_OMAP_SUBCLASS == (id)) ? 1 : 0; \ +} + +IS_OMAP_CLASS(7xx, 0x07) +IS_OMAP_CLASS(15xx, 0x15) +IS_OMAP_CLASS(16xx, 0x16) + +#define cpu_is_omap7xx() 0 +#define cpu_is_omap15xx() 0 +#define cpu_is_omap16xx() 0 + +#if defined(MULTI_OMAP1) +# if defined(CONFIG_ARCH_OMAP730) +# undef cpu_is_omap7xx +# define cpu_is_omap7xx() is_omap7xx() +# endif +# if defined(CONFIG_ARCH_OMAP850) +# undef cpu_is_omap7xx +# define cpu_is_omap7xx() is_omap7xx() +# endif +# if defined(CONFIG_ARCH_OMAP15XX) +# undef cpu_is_omap15xx +# define cpu_is_omap15xx() is_omap15xx() +# endif +# if defined(CONFIG_ARCH_OMAP16XX) +# undef cpu_is_omap16xx +# define cpu_is_omap16xx() is_omap16xx() +# endif +#else +# if defined(CONFIG_ARCH_OMAP730) +# undef cpu_is_omap7xx +# define cpu_is_omap7xx() 1 +# endif +# if defined(CONFIG_ARCH_OMAP850) +# undef cpu_is_omap7xx +# define cpu_is_omap7xx() 1 +# endif +# if defined(CONFIG_ARCH_OMAP15XX) +# undef cpu_is_omap15xx +# define cpu_is_omap15xx() 1 +# endif +# if defined(CONFIG_ARCH_OMAP16XX) +# undef cpu_is_omap16xx +# define cpu_is_omap16xx() 1 +# endif +#endif + +/* + * Macros to detect individual cpu types. + * These are only rarely needed. + * cpu_is_omap310(): True for OMAP310 + * cpu_is_omap1510(): True for OMAP1510 + * cpu_is_omap1610(): True for OMAP1610 + * cpu_is_omap1611(): True for OMAP1611 + * cpu_is_omap5912(): True for OMAP5912 + * cpu_is_omap1621(): True for OMAP1621 + * cpu_is_omap1710(): True for OMAP1710 + */ +#define GET_OMAP_TYPE ((omap_rev() >> 16) & 0xffff) + +#define IS_OMAP_TYPE(type, id) \ +static inline int is_omap ##type (void) \ +{ \ + return (GET_OMAP_TYPE == (id)) ? 1 : 0; \ +} + +IS_OMAP_TYPE(310, 0x0310) +IS_OMAP_TYPE(1510, 0x1510) +IS_OMAP_TYPE(1610, 0x1610) +IS_OMAP_TYPE(1611, 0x1611) +IS_OMAP_TYPE(5912, 0x1611) +IS_OMAP_TYPE(1621, 0x1621) +IS_OMAP_TYPE(1710, 0x1710) + +#define cpu_is_omap310() 0 +#define cpu_is_omap1510() 0 +#define cpu_is_omap1610() 0 +#define cpu_is_omap5912() 0 +#define cpu_is_omap1611() 0 +#define cpu_is_omap1621() 0 +#define cpu_is_omap1710() 0 + +#define cpu_class_is_omap1() 1 + +/* + * Whether we have MULTI_OMAP1 or not, we still need to distinguish + * between 310 vs. 1510 and 1611B/5912 vs. 1710. + */ + +#if defined(CONFIG_ARCH_OMAP15XX) +# undef cpu_is_omap310 +# undef cpu_is_omap1510 +# define cpu_is_omap310() is_omap310() +# define cpu_is_omap1510() is_omap1510() +#endif + +#if defined(CONFIG_ARCH_OMAP16XX) +# undef cpu_is_omap1610 +# undef cpu_is_omap1611 +# undef cpu_is_omap5912 +# undef cpu_is_omap1621 +# undef cpu_is_omap1710 +# define cpu_is_omap1610() is_omap1610() +# define cpu_is_omap1611() is_omap1611() +# define cpu_is_omap5912() is_omap5912() +# define cpu_is_omap1621() is_omap1621() +# define cpu_is_omap1710() is_omap1710() +#endif + +#endif diff --git a/include/linux/soc/ti/omap1-usb.h b/include/linux/soc/ti/omap1-usb.h new file mode 100644 index 0000000000..6748869860 --- /dev/null +++ b/include/linux/soc/ti/omap1-usb.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SOC_TI_OMAP1_USB +#define __SOC_TI_OMAP1_USB +/* + * Constants in this file are used all over the place, in platform + * code, as well as the udc, phy and ohci drivers. + * This is not a great design, but unlikely to get fixed after + * such a long time. Don't do this elsewhere. + */ + +#define OMAP1_OTG_BASE 0xfffb0400 +#define OMAP1_UDC_BASE 0xfffb4000 + +#define OMAP2_UDC_BASE 0x4805e200 +#define OMAP2_OTG_BASE 0x4805e300 +#define OTG_BASE OMAP1_OTG_BASE +#define UDC_BASE OMAP1_UDC_BASE + +/* + * OTG and transceiver registers, for OMAPs starting with ARM926 + */ +#define OTG_REV (OTG_BASE + 0x00) +#define OTG_SYSCON_1 (OTG_BASE + 0x04) +# define USB2_TRX_MODE(w) (((w)>>24)&0x07) +# define USB1_TRX_MODE(w) (((w)>>20)&0x07) +# define USB0_TRX_MODE(w) (((w)>>16)&0x07) +# define OTG_IDLE_EN (1 << 15) +# define HST_IDLE_EN (1 << 14) +# define DEV_IDLE_EN (1 << 13) +# define OTG_RESET_DONE (1 << 2) +# define OTG_SOFT_RESET (1 << 1) +#define OTG_SYSCON_2 (OTG_BASE + 0x08) +# define OTG_EN (1 << 31) +# define USBX_SYNCHRO (1 << 30) +# define OTG_MST16 (1 << 29) +# define SRP_GPDATA (1 << 28) +# define SRP_GPDVBUS (1 << 27) +# define SRP_GPUVBUS(w) (((w)>>24)&0x07) +# define A_WAIT_VRISE(w) (((w)>>20)&0x07) +# define B_ASE_BRST(w) (((w)>>16)&0x07) +# define SRP_DPW (1 << 14) +# define SRP_DATA (1 << 13) +# define SRP_VBUS (1 << 12) +# define OTG_PADEN (1 << 10) +# define HMC_PADEN (1 << 9) +# define UHOST_EN (1 << 8) +# define HMC_TLLSPEED (1 << 7) +# define HMC_TLLATTACH (1 << 6) +# define OTG_HMC(w) (((w)>>0)&0x3f) +#define OTG_CTRL (OTG_BASE + 0x0c) +# define OTG_USB2_EN (1 << 29) +# define OTG_USB2_DP (1 << 28) +# define OTG_USB2_DM (1 << 27) +# define OTG_USB1_EN (1 << 26) +# define OTG_USB1_DP (1 << 25) +# define OTG_USB1_DM (1 << 24) +# define OTG_USB0_EN (1 << 23) +# define OTG_USB0_DP (1 << 22) +# define OTG_USB0_DM (1 << 21) +# define OTG_ASESSVLD (1 << 20) +# define OTG_BSESSEND (1 << 19) +# define OTG_BSESSVLD (1 << 18) +# define OTG_VBUSVLD (1 << 17) +# define OTG_ID (1 << 16) +# define OTG_DRIVER_SEL (1 << 15) +# define OTG_A_SETB_HNPEN (1 << 12) +# define OTG_A_BUSREQ (1 << 11) +# define OTG_B_HNPEN (1 << 9) +# define OTG_B_BUSREQ (1 << 8) +# define OTG_BUSDROP (1 << 7) +# define OTG_PULLDOWN (1 << 5) +# define OTG_PULLUP (1 << 4) +# define OTG_DRV_VBUS (1 << 3) +# define OTG_PD_VBUS (1 << 2) +# define OTG_PU_VBUS (1 << 1) +# define OTG_PU_ID (1 << 0) +#define OTG_IRQ_EN (OTG_BASE + 0x10) /* 16-bit */ +# define DRIVER_SWITCH (1 << 15) +# define A_VBUS_ERR (1 << 13) +# define A_REQ_TMROUT (1 << 12) +# define A_SRP_DETECT (1 << 11) +# define B_HNP_FAIL (1 << 10) +# define B_SRP_TMROUT (1 << 9) +# define B_SRP_DONE (1 << 8) +# define B_SRP_STARTED (1 << 7) +# define OPRT_CHG (1 << 0) +#define OTG_IRQ_SRC (OTG_BASE + 0x14) /* 16-bit */ + // same bits as in IRQ_EN +#define OTG_OUTCTRL (OTG_BASE + 0x18) /* 16-bit */ +# define OTGVPD (1 << 14) +# define OTGVPU (1 << 13) +# define OTGPUID (1 << 12) +# define USB2VDR (1 << 10) +# define USB2PDEN (1 << 9) +# define USB2PUEN (1 << 8) +# define USB1VDR (1 << 6) +# define USB1PDEN (1 << 5) +# define USB1PUEN (1 << 4) +# define USB0VDR (1 << 2) +# define USB0PDEN (1 << 1) +# define USB0PUEN (1 << 0) +#define OTG_TEST (OTG_BASE + 0x20) /* 16-bit */ +#define OTG_VENDOR_CODE (OTG_BASE + 0xfc) /* 16-bit */ + +/*-------------------------------------------------------------------------*/ + +/* OMAP1 */ +#define USB_TRANSCEIVER_CTRL (0xfffe1000 + 0x0064) +# define CONF_USB2_UNI_R (1 << 8) +# define CONF_USB1_UNI_R (1 << 7) +# define CONF_USB_PORT0_R(x) (((x)>>4)&0x7) +# define CONF_USB0_ISOLATE_R (1 << 3) +# define CONF_USB_PWRDN_DM_R (1 << 2) +# define CONF_USB_PWRDN_DP_R (1 << 1) + +#endif diff --git a/include/linux/usb/onboard_hub.h b/include/linux/usb/onboard_hub.h new file mode 100644 index 0000000000..d937323055 --- /dev/null +++ b/include/linux/usb/onboard_hub.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_USB_ONBOARD_HUB_H +#define __LINUX_USB_ONBOARD_HUB_H + +struct usb_device; +struct list_head; + +#if IS_ENABLED(CONFIG_USB_ONBOARD_HUB) +void onboard_hub_create_pdevs(struct usb_device *parent_hub, struct list_head *pdev_list); +void onboard_hub_destroy_pdevs(struct list_head *pdev_list); +#else +static inline void onboard_hub_create_pdevs(struct usb_device *parent_hub, + struct list_head *pdev_list) {} +static inline void onboard_hub_destroy_pdevs(struct list_head *pdev_list) {} +#endif + +#endif /* __LINUX_USB_ONBOARD_HUB_H */ diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h new file mode 100644 index 0000000000..20c0bedb8e --- /dev/null +++ b/include/linux/usb/tcpci.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2015-2017 Google, Inc + * + * USB Type-C Port Controller Interface. + */ + +#ifndef __LINUX_USB_TCPCI_H +#define __LINUX_USB_TCPCI_H + +#include +#include + +#define TCPC_VENDOR_ID 0x0 +#define TCPC_PRODUCT_ID 0x2 +#define TCPC_BCD_DEV 0x4 +#define TCPC_TC_REV 0x6 +#define TCPC_PD_REV 0x8 +#define TCPC_PD_INT_REV 0xa + +#define TCPC_ALERT 0x10 +#define TCPC_ALERT_EXTND BIT(14) +#define TCPC_ALERT_EXTENDED_STATUS BIT(13) +#define TCPC_ALERT_VBUS_DISCNCT BIT(11) +#define TCPC_ALERT_RX_BUF_OVF BIT(10) +#define TCPC_ALERT_FAULT BIT(9) +#define TCPC_ALERT_V_ALARM_LO BIT(8) +#define TCPC_ALERT_V_ALARM_HI BIT(7) +#define TCPC_ALERT_TX_SUCCESS BIT(6) +#define TCPC_ALERT_TX_DISCARDED BIT(5) +#define TCPC_ALERT_TX_FAILED BIT(4) +#define TCPC_ALERT_RX_HARD_RST BIT(3) +#define TCPC_ALERT_RX_STATUS BIT(2) +#define TCPC_ALERT_POWER_STATUS BIT(1) +#define TCPC_ALERT_CC_STATUS BIT(0) + +#define TCPC_ALERT_MASK 0x12 +#define TCPC_POWER_STATUS_MASK 0x14 +#define TCPC_FAULT_STATUS_MASK 0x15 + +#define TCPC_EXTENDED_STATUS_MASK 0x16 +#define TCPC_EXTENDED_STATUS_MASK_VSAFE0V BIT(0) + +#define TCPC_ALERT_EXTENDED_MASK 0x17 +#define TCPC_SINK_FAST_ROLE_SWAP BIT(0) + +#define TCPC_CONFIG_STD_OUTPUT 0x18 + +#define TCPC_TCPC_CTRL 0x19 +#define TCPC_TCPC_CTRL_ORIENTATION BIT(0) +#define PLUG_ORNT_CC1 0 +#define PLUG_ORNT_CC2 1 +#define TCPC_TCPC_CTRL_BIST_TM BIT(1) +#define TCPC_TCPC_CTRL_EN_LK4CONN_ALRT BIT(6) + +#define TCPC_EXTENDED_STATUS 0x20 +#define TCPC_EXTENDED_STATUS_VSAFE0V BIT(0) + +#define TCPC_ROLE_CTRL 0x1a +#define TCPC_ROLE_CTRL_DRP BIT(6) +#define TCPC_ROLE_CTRL_RP_VAL_SHIFT 4 +#define TCPC_ROLE_CTRL_RP_VAL_MASK 0x3 +#define TCPC_ROLE_CTRL_RP_VAL_DEF 0x0 +#define TCPC_ROLE_CTRL_RP_VAL_1_5 0x1 +#define TCPC_ROLE_CTRL_RP_VAL_3_0 0x2 +#define TCPC_ROLE_CTRL_CC2_SHIFT 2 +#define TCPC_ROLE_CTRL_CC2_MASK 0x3 +#define TCPC_ROLE_CTRL_CC1_SHIFT 0 +#define TCPC_ROLE_CTRL_CC1_MASK 0x3 +#define TCPC_ROLE_CTRL_CC_RA 0x0 +#define TCPC_ROLE_CTRL_CC_RP 0x1 +#define TCPC_ROLE_CTRL_CC_RD 0x2 +#define TCPC_ROLE_CTRL_CC_OPEN 0x3 + +#define TCPC_FAULT_CTRL 0x1b + +#define TCPC_POWER_CTRL 0x1c +#define TCPC_POWER_CTRL_VCONN_ENABLE BIT(0) +#define TCPC_POWER_CTRL_BLEED_DISCHARGE BIT(3) +#define TCPC_POWER_CTRL_AUTO_DISCHARGE BIT(4) +#define TCPC_DIS_VOLT_ALRM BIT(5) +#define TCPC_POWER_CTRL_VBUS_VOLT_MON BIT(6) +#define TCPC_FAST_ROLE_SWAP_EN BIT(7) + +#define TCPC_CC_STATUS 0x1d +#define TCPC_CC_STATUS_TOGGLING BIT(5) +#define TCPC_CC_STATUS_TERM BIT(4) +#define TCPC_CC_STATUS_TERM_RP 0 +#define TCPC_CC_STATUS_TERM_RD 1 +#define TCPC_CC_STATE_SRC_OPEN 0 +#define TCPC_CC_STATUS_CC2_SHIFT 2 +#define TCPC_CC_STATUS_CC2_MASK 0x3 +#define TCPC_CC_STATUS_CC1_SHIFT 0 +#define TCPC_CC_STATUS_CC1_MASK 0x3 + +#define TCPC_POWER_STATUS 0x1e +#define TCPC_POWER_STATUS_DBG_ACC_CON BIT(7) +#define TCPC_POWER_STATUS_UNINIT BIT(6) +#define TCPC_POWER_STATUS_SOURCING_VBUS BIT(4) +#define TCPC_POWER_STATUS_VBUS_DET BIT(3) +#define TCPC_POWER_STATUS_VBUS_PRES BIT(2) +#define TCPC_POWER_STATUS_VCONN_PRES BIT(1) +#define TCPC_POWER_STATUS_SINKING_VBUS BIT(0) + +#define TCPC_FAULT_STATUS 0x1f + +#define TCPC_ALERT_EXTENDED 0x21 + +#define TCPC_COMMAND 0x23 +#define TCPC_CMD_WAKE_I2C 0x11 +#define TCPC_CMD_DISABLE_VBUS_DETECT 0x22 +#define TCPC_CMD_ENABLE_VBUS_DETECT 0x33 +#define TCPC_CMD_DISABLE_SINK_VBUS 0x44 +#define TCPC_CMD_SINK_VBUS 0x55 +#define TCPC_CMD_DISABLE_SRC_VBUS 0x66 +#define TCPC_CMD_SRC_VBUS_DEFAULT 0x77 +#define TCPC_CMD_SRC_VBUS_HIGH 0x88 +#define TCPC_CMD_LOOK4CONNECTION 0x99 +#define TCPC_CMD_RXONEMORE 0xAA +#define TCPC_CMD_I2C_IDLE 0xFF + +#define TCPC_DEV_CAP_1 0x24 +#define TCPC_DEV_CAP_2 0x26 +#define TCPC_STD_INPUT_CAP 0x28 +#define TCPC_STD_OUTPUT_CAP 0x29 + +#define TCPC_MSG_HDR_INFO 0x2e +#define TCPC_MSG_HDR_INFO_DATA_ROLE BIT(3) +#define TCPC_MSG_HDR_INFO_PWR_ROLE BIT(0) +#define TCPC_MSG_HDR_INFO_REV_SHIFT 1 +#define TCPC_MSG_HDR_INFO_REV_MASK 0x3 + +#define TCPC_RX_DETECT 0x2f +#define TCPC_RX_DETECT_HARD_RESET BIT(5) +#define TCPC_RX_DETECT_SOP BIT(0) +#define TCPC_RX_DETECT_SOP1 BIT(1) +#define TCPC_RX_DETECT_SOP2 BIT(2) +#define TCPC_RX_DETECT_DBG1 BIT(3) +#define TCPC_RX_DETECT_DBG2 BIT(4) + +#define TCPC_RX_BYTE_CNT 0x30 +#define TCPC_RX_BUF_FRAME_TYPE 0x31 +#define TCPC_RX_BUF_FRAME_TYPE_SOP 0 +#define TCPC_RX_HDR 0x32 +#define TCPC_RX_DATA 0x34 /* through 0x4f */ + +#define TCPC_TRANSMIT 0x50 +#define TCPC_TRANSMIT_RETRY_SHIFT 4 +#define TCPC_TRANSMIT_RETRY_MASK 0x3 +#define TCPC_TRANSMIT_TYPE_SHIFT 0 +#define TCPC_TRANSMIT_TYPE_MASK 0x7 + +#define TCPC_TX_BYTE_CNT 0x51 +#define TCPC_TX_HDR 0x52 +#define TCPC_TX_DATA 0x54 /* through 0x6f */ + +#define TCPC_VBUS_VOLTAGE 0x70 +#define TCPC_VBUS_VOLTAGE_MASK 0x3ff +#define TCPC_VBUS_VOLTAGE_LSB_MV 25 +#define TCPC_VBUS_SINK_DISCONNECT_THRESH 0x72 +#define TCPC_VBUS_SINK_DISCONNECT_THRESH_LSB_MV 25 +#define TCPC_VBUS_SINK_DISCONNECT_THRESH_MAX 0x3ff +#define TCPC_VBUS_STOP_DISCHARGE_THRESH 0x74 +#define TCPC_VBUS_VOLTAGE_ALARM_HI_CFG 0x76 +#define TCPC_VBUS_VOLTAGE_ALARM_LO_CFG 0x78 + +/* I2C_WRITE_BYTE_COUNT + 1 when TX_BUF_BYTE_x is only accessible I2C_WRITE_BYTE_COUNT */ +#define TCPC_TRANSMIT_BUFFER_MAX_LEN 31 + +struct tcpci; + +/* + * @TX_BUF_BYTE_x_hidden: + * optional; Set when TX_BUF_BYTE_x can only be accessed through I2C_WRITE_BYTE_COUNT. + * @frs_sourcing_vbus: + * Optional; Callback to perform chip specific operations when FRS + * is sourcing vbus. + * @auto_discharge_disconnect: + * Optional; Enables TCPC to autonously discharge vbus on disconnect. + * @vbus_vsafe0v: + * optional; Set when TCPC can detect whether vbus is at VSAFE0V. + * @set_partner_usb_comm_capable: + * Optional; The USB Communications Capable bit indicates if port + * partner is capable of communication over the USB data lines + * (e.g. D+/- or SS Tx/Rx). Called to notify the status of the bit. + */ +struct tcpci_data { + struct regmap *regmap; + unsigned char TX_BUF_BYTE_x_hidden:1; + unsigned char auto_discharge_disconnect:1; + unsigned char vbus_vsafe0v:1; + + int (*init)(struct tcpci *tcpci, struct tcpci_data *data); + int (*set_vconn)(struct tcpci *tcpci, struct tcpci_data *data, + bool enable); + int (*start_drp_toggling)(struct tcpci *tcpci, struct tcpci_data *data, + enum typec_cc_status cc); + int (*set_vbus)(struct tcpci *tcpci, struct tcpci_data *data, bool source, bool sink); + void (*frs_sourcing_vbus)(struct tcpci *tcpci, struct tcpci_data *data); + void (*set_partner_usb_comm_capable)(struct tcpci *tcpci, struct tcpci_data *data, + bool capable); +}; + +struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data); +void tcpci_unregister_port(struct tcpci *tcpci); +irqreturn_t tcpci_irq(struct tcpci *tcpci); + +struct tcpm_port; +struct tcpm_port *tcpci_get_tcpm_port(struct tcpci *tcpci); +#endif /* __LINUX_USB_TCPCI_H */ diff --git a/include/linux/usb/typec_retimer.h b/include/linux/usb/typec_retimer.h new file mode 100644 index 0000000000..5e036b3360 --- /dev/null +++ b/include/linux/usb/typec_retimer.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __USB_TYPEC_RETIMER +#define __USB_TYPEC_RETIMER + +#include +#include + +struct device; +struct typec_retimer; +struct typec_altmode; +struct fwnode_handle; + +struct typec_retimer_state { + struct typec_altmode *alt; + unsigned long mode; + void *data; +}; + +typedef int (*typec_retimer_set_fn_t)(struct typec_retimer *retimer, + struct typec_retimer_state *state); + +struct typec_retimer_desc { + struct fwnode_handle *fwnode; + typec_retimer_set_fn_t set; + const char *name; + void *drvdata; +}; + +struct typec_retimer *fwnode_typec_retimer_get(struct fwnode_handle *fwnode); +void typec_retimer_put(struct typec_retimer *retimer); +int typec_retimer_set(struct typec_retimer *retimer, struct typec_retimer_state *state); + +static inline struct typec_retimer *typec_retimer_get(struct device *dev) +{ + return fwnode_typec_retimer_get(dev_fwnode(dev)); +} + +struct typec_retimer * +typec_retimer_register(struct device *parent, const struct typec_retimer_desc *desc); +void typec_retimer_unregister(struct typec_retimer *retimer); + +void *typec_retimer_get_drvdata(struct typec_retimer *retimer); + +#endif /* __USB_TYPEC_RETIMER */ diff --git a/include/linux/virtio_anchor.h b/include/linux/virtio_anchor.h new file mode 100644 index 0000000000..432e6c00b3 --- /dev/null +++ b/include/linux/virtio_anchor.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_VIRTIO_ANCHOR_H +#define _LINUX_VIRTIO_ANCHOR_H + +#ifdef CONFIG_VIRTIO_ANCHOR +struct virtio_device; + +bool virtio_require_restricted_mem_acc(struct virtio_device *dev); +extern bool (*virtio_check_mem_acc_cb)(struct virtio_device *dev); + +static inline void virtio_set_mem_acc_cb(bool (*func)(struct virtio_device *)) +{ + virtio_check_mem_acc_cb = func; +} +#else +#define virtio_set_mem_acc_cb(func) do { } while (0) +#endif + +#endif /* _LINUX_VIRTIO_ANCHOR_H */ diff --git a/include/net/bluetooth/iso.h b/include/net/bluetooth/iso.h new file mode 100644 index 0000000000..3f4fe8b78e --- /dev/null +++ b/include/net/bluetooth/iso.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * BlueZ - Bluetooth protocol stack for Linux + * + * Copyright (C) 2022 Intel Corporation + */ + +#ifndef __ISO_H +#define __ISO_H + +/* ISO defaults */ +#define ISO_DEFAULT_MTU 251 +#define ISO_MAX_NUM_BIS 0x1f + +/* ISO socket broadcast address */ +struct sockaddr_iso_bc { + bdaddr_t bc_bdaddr; + __u8 bc_bdaddr_type; + __u8 bc_sid; + __u8 bc_num_bis; + __u8 bc_bis[ISO_MAX_NUM_BIS]; +}; + +/* ISO socket address */ +struct sockaddr_iso { + sa_family_t iso_family; + bdaddr_t iso_bdaddr; + __u8 iso_bdaddr_type; + struct sockaddr_iso_bc iso_bc[]; +}; + +#endif /* __ISO_H */ diff --git a/include/net/dropreason.h b/include/net/dropreason.h new file mode 100644 index 0000000000..fae9b40e54 --- /dev/null +++ b/include/net/dropreason.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef _LINUX_DROPREASON_H +#define _LINUX_DROPREASON_H + +/** + * enum skb_drop_reason - the reasons of skb drops + * + * The reason of skb drop, which is used in kfree_skb_reason(). + */ +enum skb_drop_reason { + /** + * @SKB_NOT_DROPPED_YET: skb is not dropped yet (used for no-drop case) + */ + SKB_NOT_DROPPED_YET = 0, + /** @SKB_DROP_REASON_NOT_SPECIFIED: drop reason is not specified */ + SKB_DROP_REASON_NOT_SPECIFIED, + /** @SKB_DROP_REASON_NO_SOCKET: socket not found */ + SKB_DROP_REASON_NO_SOCKET, + /** @SKB_DROP_REASON_PKT_TOO_SMALL: packet size is too small */ + SKB_DROP_REASON_PKT_TOO_SMALL, + /** @SKB_DROP_REASON_TCP_CSUM: TCP checksum error */ + SKB_DROP_REASON_TCP_CSUM, + /** @SKB_DROP_REASON_SOCKET_FILTER: dropped by socket filter */ + SKB_DROP_REASON_SOCKET_FILTER, + /** @SKB_DROP_REASON_UDP_CSUM: UDP checksum error */ + SKB_DROP_REASON_UDP_CSUM, + /** @SKB_DROP_REASON_NETFILTER_DROP: dropped by netfilter */ + SKB_DROP_REASON_NETFILTER_DROP, + /** + * @SKB_DROP_REASON_OTHERHOST: packet don't belong to current host + * (interface is in promisc mode) + */ + SKB_DROP_REASON_OTHERHOST, + /** @SKB_DROP_REASON_IP_CSUM: IP checksum error */ + SKB_DROP_REASON_IP_CSUM, + /** + * @SKB_DROP_REASON_IP_INHDR: there is something wrong with IP header (see + * IPSTATS_MIB_INHDRERRORS) + */ + SKB_DROP_REASON_IP_INHDR, + /** + * @SKB_DROP_REASON_IP_RPFILTER: IP rpfilter validate failed. see the + * document for rp_filter in ip-sysctl.rst for more information + */ + SKB_DROP_REASON_IP_RPFILTER, + /** + * @SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST: destination address of L2 is + * multicast, but L3 is unicast. + */ + SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST, + /** @SKB_DROP_REASON_XFRM_POLICY: xfrm policy check failed */ + SKB_DROP_REASON_XFRM_POLICY, + /** @SKB_DROP_REASON_IP_NOPROTO: no support for IP protocol */ + SKB_DROP_REASON_IP_NOPROTO, + /** @SKB_DROP_REASON_SOCKET_RCVBUFF: socket receive buff is full */ + SKB_DROP_REASON_SOCKET_RCVBUFF, + /** + * @SKB_DROP_REASON_PROTO_MEM: proto memory limition, such as udp packet + * drop out of udp_memory_allocated. + */ + SKB_DROP_REASON_PROTO_MEM, + /** + * @SKB_DROP_REASON_TCP_MD5NOTFOUND: no MD5 hash and one expected, + * corresponding to LINUX_MIB_TCPMD5NOTFOUND + */ + SKB_DROP_REASON_TCP_MD5NOTFOUND, + /** + * @SKB_DROP_REASON_TCP_MD5UNEXPECTED: MD5 hash and we're not expecting + * one, corresponding to LINUX_MIB_TCPMD5UNEXPECTED + */ + SKB_DROP_REASON_TCP_MD5UNEXPECTED, + /** + * @SKB_DROP_REASON_TCP_MD5FAILURE: MD5 hash and its wrong, corresponding + * to LINUX_MIB_TCPMD5FAILURE + */ + SKB_DROP_REASON_TCP_MD5FAILURE, + /** + * @SKB_DROP_REASON_SOCKET_BACKLOG: failed to add skb to socket backlog ( + * see LINUX_MIB_TCPBACKLOGDROP) + */ + SKB_DROP_REASON_SOCKET_BACKLOG, + /** @SKB_DROP_REASON_TCP_FLAGS: TCP flags invalid */ + SKB_DROP_REASON_TCP_FLAGS, + /** + * @SKB_DROP_REASON_TCP_ZEROWINDOW: TCP receive window size is zero, + * see LINUX_MIB_TCPZEROWINDOWDROP + */ + SKB_DROP_REASON_TCP_ZEROWINDOW, + /** + * @SKB_DROP_REASON_TCP_OLD_DATA: the TCP data reveived is already + * received before (spurious retrans may happened), see + * LINUX_MIB_DELAYEDACKLOST + */ + SKB_DROP_REASON_TCP_OLD_DATA, + /** + * @SKB_DROP_REASON_TCP_OVERWINDOW: the TCP data is out of window, + * the seq of the first byte exceed the right edges of receive + * window + */ + SKB_DROP_REASON_TCP_OVERWINDOW, + /** + * @SKB_DROP_REASON_TCP_OFOMERGE: the data of skb is already in the ofo + * queue, corresponding to LINUX_MIB_TCPOFOMERGE + */ + SKB_DROP_REASON_TCP_OFOMERGE, + /** + * @SKB_DROP_REASON_TCP_RFC7323_PAWS: PAWS check, corresponding to + * LINUX_MIB_PAWSESTABREJECTED + */ + SKB_DROP_REASON_TCP_RFC7323_PAWS, + /** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */ + SKB_DROP_REASON_TCP_INVALID_SEQUENCE, + /** @SKB_DROP_REASON_TCP_RESET: Invalid RST packet */ + SKB_DROP_REASON_TCP_RESET, + /** + * @SKB_DROP_REASON_TCP_INVALID_SYN: Incoming packet has unexpected + * SYN flag + */ + SKB_DROP_REASON_TCP_INVALID_SYN, + /** @SKB_DROP_REASON_TCP_CLOSE: TCP socket in CLOSE state */ + SKB_DROP_REASON_TCP_CLOSE, + /** @SKB_DROP_REASON_TCP_FASTOPEN: dropped by FASTOPEN request socket */ + SKB_DROP_REASON_TCP_FASTOPEN, + /** @SKB_DROP_REASON_TCP_OLD_ACK: TCP ACK is old, but in window */ + SKB_DROP_REASON_TCP_OLD_ACK, + /** @SKB_DROP_REASON_TCP_TOO_OLD_ACK: TCP ACK is too old */ + SKB_DROP_REASON_TCP_TOO_OLD_ACK, + /** + * @SKB_DROP_REASON_TCP_ACK_UNSENT_DATA: TCP ACK for data we haven't + * sent yet + */ + SKB_DROP_REASON_TCP_ACK_UNSENT_DATA, + /** @SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE: pruned from TCP OFO queue */ + SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE, + /** @SKB_DROP_REASON_TCP_OFO_DROP: data already in receive queue */ + SKB_DROP_REASON_TCP_OFO_DROP, + /** @SKB_DROP_REASON_IP_OUTNOROUTES: route lookup failed */ + SKB_DROP_REASON_IP_OUTNOROUTES, + /** + * @SKB_DROP_REASON_BPF_CGROUP_EGRESS: dropped by BPF_PROG_TYPE_CGROUP_SKB + * eBPF program + */ + SKB_DROP_REASON_BPF_CGROUP_EGRESS, + /** @SKB_DROP_REASON_IPV6DISABLED: IPv6 is disabled on the device */ + SKB_DROP_REASON_IPV6DISABLED, + /** @SKB_DROP_REASON_NEIGH_CREATEFAIL: failed to create neigh entry */ + SKB_DROP_REASON_NEIGH_CREATEFAIL, + /** @SKB_DROP_REASON_NEIGH_FAILED: neigh entry in failed state */ + SKB_DROP_REASON_NEIGH_FAILED, + /** @SKB_DROP_REASON_NEIGH_QUEUEFULL: arp_queue for neigh entry is full */ + SKB_DROP_REASON_NEIGH_QUEUEFULL, + /** @SKB_DROP_REASON_NEIGH_DEAD: neigh entry is dead */ + SKB_DROP_REASON_NEIGH_DEAD, + /** @SKB_DROP_REASON_TC_EGRESS: dropped in TC egress HOOK */ + SKB_DROP_REASON_TC_EGRESS, + /** + * @SKB_DROP_REASON_QDISC_DROP: dropped by qdisc when packet outputting ( + * failed to enqueue to current qdisc) + */ + SKB_DROP_REASON_QDISC_DROP, + /** + * @SKB_DROP_REASON_CPU_BACKLOG: failed to enqueue the skb to the per CPU + * backlog queue. This can be caused by backlog queue full (see + * netdev_max_backlog in net.rst) or RPS flow limit + */ + SKB_DROP_REASON_CPU_BACKLOG, + /** @SKB_DROP_REASON_XDP: dropped by XDP in input path */ + SKB_DROP_REASON_XDP, + /** @SKB_DROP_REASON_TC_INGRESS: dropped in TC ingress HOOK */ + SKB_DROP_REASON_TC_INGRESS, + /** @SKB_DROP_REASON_UNHANDLED_PROTO: protocol not implemented or not supported */ + SKB_DROP_REASON_UNHANDLED_PROTO, + /** @SKB_DROP_REASON_SKB_CSUM: sk_buff checksum computation error */ + SKB_DROP_REASON_SKB_CSUM, + /** @SKB_DROP_REASON_SKB_GSO_SEG: gso segmentation error */ + SKB_DROP_REASON_SKB_GSO_SEG, + /** + * @SKB_DROP_REASON_SKB_UCOPY_FAULT: failed to copy data from user space, + * e.g., via zerocopy_sg_from_iter() or skb_orphan_frags_rx() + */ + SKB_DROP_REASON_SKB_UCOPY_FAULT, + /** @SKB_DROP_REASON_DEV_HDR: device driver specific header/metadata is invalid */ + SKB_DROP_REASON_DEV_HDR, + /** + * @SKB_DROP_REASON_DEV_READY: the device is not ready to xmit/recv due to + * any of its data structure that is not up/ready/initialized, + * e.g., the IFF_UP is not set, or driver specific tun->tfiles[txq] + * is not initialized + */ + SKB_DROP_REASON_DEV_READY, + /** @SKB_DROP_REASON_FULL_RING: ring buffer is full */ + SKB_DROP_REASON_FULL_RING, + /** @SKB_DROP_REASON_NOMEM: error due to OOM */ + SKB_DROP_REASON_NOMEM, + /** + * @SKB_DROP_REASON_HDR_TRUNC: failed to trunc/extract the header from + * networking data, e.g., failed to pull the protocol header from + * frags via pskb_may_pull() + */ + SKB_DROP_REASON_HDR_TRUNC, + /** + * @SKB_DROP_REASON_TAP_FILTER: dropped by (ebpf) filter directly attached + * to tun/tap, e.g., via TUNSETFILTEREBPF + */ + SKB_DROP_REASON_TAP_FILTER, + /** + * @SKB_DROP_REASON_TAP_TXFILTER: dropped by tx filter implemented at + * tun/tap, e.g., check_filter() + */ + SKB_DROP_REASON_TAP_TXFILTER, + /** @SKB_DROP_REASON_ICMP_CSUM: ICMP checksum error */ + SKB_DROP_REASON_ICMP_CSUM, + /** + * @SKB_DROP_REASON_INVALID_PROTO: the packet doesn't follow RFC 2211, + * such as a broadcasts ICMP_TIMESTAMP + */ + SKB_DROP_REASON_INVALID_PROTO, + /** + * @SKB_DROP_REASON_IP_INADDRERRORS: host unreachable, corresponding to + * IPSTATS_MIB_INADDRERRORS + */ + SKB_DROP_REASON_IP_INADDRERRORS, + /** + * @SKB_DROP_REASON_IP_INNOROUTES: network unreachable, corresponding to + * IPSTATS_MIB_INADDRERRORS + */ + SKB_DROP_REASON_IP_INNOROUTES, + /** + * @SKB_DROP_REASON_PKT_TOO_BIG: packet size is too big (maybe exceed the + * MTU) + */ + SKB_DROP_REASON_PKT_TOO_BIG, + /** + * @SKB_DROP_REASON_MAX: the maximum of drop reason, which shouldn't be + * used as a real 'reason' + */ + SKB_DROP_REASON_MAX, +}; + +#define SKB_DR_INIT(name, reason) \ + enum skb_drop_reason name = SKB_DROP_REASON_##reason +#define SKB_DR(name) \ + SKB_DR_INIT(name, NOT_SPECIFIED) +#define SKB_DR_SET(name, reason) \ + (name = SKB_DROP_REASON_##reason) +#define SKB_DR_OR(name, reason) \ + do { \ + if (name == SKB_DROP_REASON_NOT_SPECIFIED || \ + name == SKB_NOT_DROPPED_YET) \ + SKB_DR_SET(name, reason); \ + } while (0) + +extern const char * const drop_reasons[]; + +#endif diff --git a/include/net/net_debug.h b/include/net/net_debug.h new file mode 100644 index 0000000000..1e74684cbb --- /dev/null +++ b/include/net/net_debug.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_NET_DEBUG_H +#define _LINUX_NET_DEBUG_H + +#include +#include + +struct net_device; + +__printf(3, 4) __cold +void netdev_printk(const char *level, const struct net_device *dev, + const char *format, ...); +__printf(2, 3) __cold +void netdev_emerg(const struct net_device *dev, const char *format, ...); +__printf(2, 3) __cold +void netdev_alert(const struct net_device *dev, const char *format, ...); +__printf(2, 3) __cold +void netdev_crit(const struct net_device *dev, const char *format, ...); +__printf(2, 3) __cold +void netdev_err(const struct net_device *dev, const char *format, ...); +__printf(2, 3) __cold +void netdev_warn(const struct net_device *dev, const char *format, ...); +__printf(2, 3) __cold +void netdev_notice(const struct net_device *dev, const char *format, ...); +__printf(2, 3) __cold +void netdev_info(const struct net_device *dev, const char *format, ...); + +#define netdev_level_once(level, dev, fmt, ...) \ +do { \ + static bool __section(".data.once") __print_once; \ + \ + if (!__print_once) { \ + __print_once = true; \ + netdev_printk(level, dev, fmt, ##__VA_ARGS__); \ + } \ +} while (0) + +#define netdev_emerg_once(dev, fmt, ...) \ + netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__) +#define netdev_alert_once(dev, fmt, ...) \ + netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__) +#define netdev_crit_once(dev, fmt, ...) \ + netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__) +#define netdev_err_once(dev, fmt, ...) \ + netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__) +#define netdev_warn_once(dev, fmt, ...) \ + netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__) +#define netdev_notice_once(dev, fmt, ...) \ + netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__) +#define netdev_info_once(dev, fmt, ...) \ + netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__) + +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_netdev_dbg(__dev, format, ##args); \ +} while (0) +#elif defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#else +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ +}) +#endif + +#if defined(VERBOSE_DEBUG) +#define netdev_vdbg netdev_dbg +#else + +#define netdev_vdbg(dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, dev, format, ##args); \ + 0; \ +}) +#endif + +/* netif printk helpers, similar to netdev_printk */ + +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) + +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) +#define netif_dbg(priv, type, netdev, format, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + dynamic_netdev_dbg(netdev, format, ##args); \ +} while (0) +#elif defined(DEBUG) +#define netif_dbg(priv, type, dev, format, args...) \ + netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) +#else +#define netif_dbg(priv, type, dev, format, args...) \ +({ \ + if (0) \ + netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ + 0; \ +}) +#endif + +/* if @cond then downgrade to debug, else print at @level */ +#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \ + do { \ + if (cond) \ + netif_dbg(priv, type, netdev, fmt, ##args); \ + else \ + netif_ ## level(priv, type, netdev, fmt, ##args); \ + } while (0) + +#if defined(VERBOSE_DEBUG) +#define netif_vdbg netif_dbg +#else +#define netif_vdbg(priv, type, dev, format, args...) \ +({ \ + if (0) \ + netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ + 0; \ +}) +#endif + + +#if defined(CONFIG_DEBUG_NET) +#define DEBUG_NET_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) +#else +#define DEBUG_NET_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) +#endif + +#endif /* _LINUX_NET_DEBUG_H */ diff --git a/include/net/netns/flow_table.h b/include/net/netns/flow_table.h new file mode 100644 index 0000000000..1c5fc657e2 --- /dev/null +++ b/include/net/netns/flow_table.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __NETNS_FLOW_TABLE_H +#define __NETNS_FLOW_TABLE_H + +struct nf_flow_table_stat { + unsigned int count_wq_add; + unsigned int count_wq_del; + unsigned int count_wq_stats; +}; + +struct netns_ft { + struct nf_flow_table_stat __percpu *stat; +}; +#endif diff --git a/include/pcmcia/soc_common.h b/include/pcmcia/soc_common.h new file mode 100644 index 0000000000..d4f18f4679 --- /dev/null +++ b/include/pcmcia/soc_common.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include + +struct module; +struct cpufreq_freqs; + +struct soc_pcmcia_regulator { + struct regulator *reg; + bool on; +}; + +struct pcmcia_state { + unsigned detect: 1, + ready: 1, + bvd1: 1, + bvd2: 1, + wrprot: 1, + vs_3v: 1, + vs_Xv: 1; +}; + +/* + * This structure encapsulates per-socket state which we might need to + * use when responding to a Card Services query of some kind. + */ +struct soc_pcmcia_socket { + struct pcmcia_socket socket; + + /* + * Info from low level handler + */ + unsigned int nr; + struct clk *clk; + + /* + * Core PCMCIA state + */ + const struct pcmcia_low_level *ops; + + unsigned int status; + socket_state_t cs_state; + + unsigned short spd_io[MAX_IO_WIN]; + unsigned short spd_mem[MAX_WIN]; + unsigned short spd_attr[MAX_WIN]; + + struct resource res_skt; + struct resource res_io; + struct resource res_io_io; + struct resource res_mem; + struct resource res_attr; + + struct { + int gpio; + struct gpio_desc *desc; + unsigned int irq; + const char *name; + } stat[6]; +#define SOC_STAT_CD 0 /* Card detect */ +#define SOC_STAT_BVD1 1 /* BATDEAD / IOSTSCHG */ +#define SOC_STAT_BVD2 2 /* BATWARN / IOSPKR */ +#define SOC_STAT_RDY 3 /* Ready / Interrupt */ +#define SOC_STAT_VS1 4 /* Voltage sense 1 */ +#define SOC_STAT_VS2 5 /* Voltage sense 2 */ + + struct gpio_desc *gpio_reset; + struct gpio_desc *gpio_bus_enable; + struct soc_pcmcia_regulator vcc; + struct soc_pcmcia_regulator vpp; + + unsigned int irq_state; + +#ifdef CONFIG_CPU_FREQ + struct notifier_block cpufreq_nb; +#endif + struct timer_list poll_timer; + struct list_head node; + void *driver_data; +}; + + +struct pcmcia_low_level { + struct module *owner; + + /* first socket in system */ + int first; + /* nr of sockets */ + int nr; + + int (*hw_init)(struct soc_pcmcia_socket *); + void (*hw_shutdown)(struct soc_pcmcia_socket *); + + void (*socket_state)(struct soc_pcmcia_socket *, struct pcmcia_state *); + int (*configure_socket)(struct soc_pcmcia_socket *, const socket_state_t *); + + /* + * Enable card status IRQs on (re-)initialisation. This can + * be called at initialisation, power management event, or + * pcmcia event. + */ + void (*socket_init)(struct soc_pcmcia_socket *); + + /* + * Disable card status IRQs and PCMCIA bus on suspend. + */ + void (*socket_suspend)(struct soc_pcmcia_socket *); + + /* + * Hardware specific timing routines. + * If provided, the get_timing routine overrides the SOC default. + */ + unsigned int (*get_timing)(struct soc_pcmcia_socket *, unsigned int, unsigned int); + int (*set_timing)(struct soc_pcmcia_socket *); + int (*show_timing)(struct soc_pcmcia_socket *, char *); + +#ifdef CONFIG_CPU_FREQ + /* + * CPUFREQ support. + */ + int (*frequency_change)(struct soc_pcmcia_socket *, unsigned long, struct cpufreq_freqs *); +#endif +}; + + + diff --git a/include/rv/automata.h b/include/rv/automata.h new file mode 100644 index 0000000000..eb9e636809 --- /dev/null +++ b/include/rv/automata.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira + * + * Deterministic automata helper functions, to be used with the automata + * models in C generated by the dot2k tool. + */ + +/* + * DECLARE_AUTOMATA_HELPERS - define a set of helper functions for automata + * + * Define a set of helper functions for automata. The 'name' argument is used + * as suffix for the functions and data. These functions will handle automaton + * with data type 'type'. + */ +#define DECLARE_AUTOMATA_HELPERS(name, type) \ + \ +/* \ + * model_get_state_name_##name - return the (string) name of the given state \ + */ \ +static char *model_get_state_name_##name(enum states_##name state) \ +{ \ + if ((state < 0) || (state >= state_max_##name)) \ + return "INVALID"; \ + \ + return automaton_##name.state_names[state]; \ +} \ + \ +/* \ + * model_get_event_name_##name - return the (string) name of the given event \ + */ \ +static char *model_get_event_name_##name(enum events_##name event) \ +{ \ + if ((event < 0) || (event >= event_max_##name)) \ + return "INVALID"; \ + \ + return automaton_##name.event_names[event]; \ +} \ + \ +/* \ + * model_get_initial_state_##name - return the automaton's initial state \ + */ \ +static inline type model_get_initial_state_##name(void) \ +{ \ + return automaton_##name.initial_state; \ +} \ + \ +/* \ + * model_get_next_state_##name - process an automaton event occurrence \ + * \ + * Given the current state (curr_state) and the event (event), returns \ + * the next state, or INVALID_STATE in case of error. \ + */ \ +static inline type model_get_next_state_##name(enum states_##name curr_state, \ + enum events_##name event) \ +{ \ + if ((curr_state < 0) || (curr_state >= state_max_##name)) \ + return INVALID_STATE; \ + \ + if ((event < 0) || (event >= event_max_##name)) \ + return INVALID_STATE; \ + \ + return automaton_##name.function[curr_state][event]; \ +} \ + \ +/* \ + * model_is_final_state_##name - check if the given state is a final state \ + */ \ +static inline bool model_is_final_state_##name(enum states_##name state) \ +{ \ + if ((state < 0) || (state >= state_max_##name)) \ + return 0; \ + \ + return automaton_##name.final_states[state]; \ +} diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h new file mode 100644 index 0000000000..9eb75683e0 --- /dev/null +++ b/include/rv/da_monitor.h @@ -0,0 +1,544 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira + * + * Deterministic automata (DA) monitor functions, to be used together + * with automata models in C generated by the dot2k tool. + * + * The dot2k tool is available at tools/verification/dot2k/ + * + * For further information, see: + * Documentation/trace/rv/da_monitor_synthesis.rst + */ + +#include +#include +#include + +#ifdef CONFIG_RV_REACTORS + +#define DECLARE_RV_REACTING_HELPERS(name, type) \ +static char REACT_MSG_##name[1024]; \ + \ +static inline char *format_react_msg_##name(type curr_state, type event) \ +{ \ + snprintf(REACT_MSG_##name, 1024, \ + "rv: monitor %s does not allow event %s on state %s\n", \ + #name, \ + model_get_event_name_##name(event), \ + model_get_state_name_##name(curr_state)); \ + return REACT_MSG_##name; \ +} \ + \ +static void cond_react_##name(char *msg) \ +{ \ + if (rv_##name.react) \ + rv_##name.react(msg); \ +} \ + \ +static bool rv_reacting_on_##name(void) \ +{ \ + return rv_reacting_on(); \ +} + +#else /* CONFIG_RV_REACTOR */ + +#define DECLARE_RV_REACTING_HELPERS(name, type) \ +static inline char *format_react_msg_##name(type curr_state, type event) \ +{ \ + return NULL; \ +} \ + \ +static void cond_react_##name(char *msg) \ +{ \ + return; \ +} \ + \ +static bool rv_reacting_on_##name(void) \ +{ \ + return 0; \ +} +#endif + +/* + * Generic helpers for all types of deterministic automata monitors. + */ +#define DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ + \ +DECLARE_RV_REACTING_HELPERS(name, type) \ + \ +/* \ + * da_monitor_reset_##name - reset a monitor and setting it to init state \ + */ \ +static inline void da_monitor_reset_##name(struct da_monitor *da_mon) \ +{ \ + da_mon->monitoring = 0; \ + da_mon->curr_state = model_get_initial_state_##name(); \ +} \ + \ +/* \ + * da_monitor_curr_state_##name - return the current state \ + */ \ +static inline type da_monitor_curr_state_##name(struct da_monitor *da_mon) \ +{ \ + return da_mon->curr_state; \ +} \ + \ +/* \ + * da_monitor_set_state_##name - set the new current state \ + */ \ +static inline void \ +da_monitor_set_state_##name(struct da_monitor *da_mon, enum states_##name state) \ +{ \ + da_mon->curr_state = state; \ +} \ + \ +/* \ + * da_monitor_start_##name - start monitoring \ + * \ + * The monitor will ignore all events until monitoring is set to true. This \ + * function needs to be called to tell the monitor to start monitoring. \ + */ \ +static inline void da_monitor_start_##name(struct da_monitor *da_mon) \ +{ \ + da_mon->curr_state = model_get_initial_state_##name(); \ + da_mon->monitoring = 1; \ +} \ + \ +/* \ + * da_monitoring_##name - returns true if the monitor is processing events \ + */ \ +static inline bool da_monitoring_##name(struct da_monitor *da_mon) \ +{ \ + return da_mon->monitoring; \ +} \ + \ +/* \ + * da_monitor_enabled_##name - checks if the monitor is enabled \ + */ \ +static inline bool da_monitor_enabled_##name(void) \ +{ \ + /* global switch */ \ + if (unlikely(!rv_monitoring_on())) \ + return 0; \ + \ + /* monitor enabled */ \ + if (unlikely(!rv_##name.enabled)) \ + return 0; \ + \ + return 1; \ +} \ + \ +/* \ + * da_monitor_handling_event_##name - checks if the monitor is ready to handle events \ + */ \ +static inline bool da_monitor_handling_event_##name(struct da_monitor *da_mon) \ +{ \ + \ + if (!da_monitor_enabled_##name()) \ + return 0; \ + \ + /* monitor is actually monitoring */ \ + if (unlikely(!da_monitoring_##name(da_mon))) \ + return 0; \ + \ + return 1; \ +} + +/* + * Event handler for implicit monitors. Implicit monitor is the one which the + * handler does not need to specify which da_monitor to manipulate. Examples + * of implicit monitor are the per_cpu or the global ones. + */ +#define DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \ + \ +static inline bool \ +da_event_##name(struct da_monitor *da_mon, enum events_##name event) \ +{ \ + type curr_state = da_monitor_curr_state_##name(da_mon); \ + type next_state = model_get_next_state_##name(curr_state, event); \ + \ + if (next_state != INVALID_STATE) { \ + da_monitor_set_state_##name(da_mon, next_state); \ + \ + trace_event_##name(model_get_state_name_##name(curr_state), \ + model_get_event_name_##name(event), \ + model_get_state_name_##name(next_state), \ + model_is_final_state_##name(next_state)); \ + \ + return true; \ + } \ + \ + if (rv_reacting_on_##name()) \ + cond_react_##name(format_react_msg_##name(curr_state, event)); \ + \ + trace_error_##name(model_get_state_name_##name(curr_state), \ + model_get_event_name_##name(event)); \ + \ + return false; \ +} \ + +/* + * Event handler for per_task monitors. + */ +#define DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \ + \ +static inline bool da_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \ + enum events_##name event) \ +{ \ + type curr_state = da_monitor_curr_state_##name(da_mon); \ + type next_state = model_get_next_state_##name(curr_state, event); \ + \ + if (next_state != INVALID_STATE) { \ + da_monitor_set_state_##name(da_mon, next_state); \ + \ + trace_event_##name(tsk->pid, \ + model_get_state_name_##name(curr_state), \ + model_get_event_name_##name(event), \ + model_get_state_name_##name(next_state), \ + model_is_final_state_##name(next_state)); \ + \ + return true; \ + } \ + \ + if (rv_reacting_on_##name()) \ + cond_react_##name(format_react_msg_##name(curr_state, event)); \ + \ + trace_error_##name(tsk->pid, \ + model_get_state_name_##name(curr_state), \ + model_get_event_name_##name(event)); \ + \ + return false; \ +} + +/* + * Functions to define, init and get a global monitor. + */ +#define DECLARE_DA_MON_INIT_GLOBAL(name, type) \ + \ +/* \ + * global monitor (a single variable) \ + */ \ +static struct da_monitor da_mon_##name; \ + \ +/* \ + * da_get_monitor_##name - return the global monitor address \ + */ \ +static struct da_monitor *da_get_monitor_##name(void) \ +{ \ + return &da_mon_##name; \ +} \ + \ +/* \ + * da_monitor_reset_all_##name - reset the single monitor \ + */ \ +static void da_monitor_reset_all_##name(void) \ +{ \ + da_monitor_reset_##name(da_get_monitor_##name()); \ +} \ + \ +/* \ + * da_monitor_init_##name - initialize a monitor \ + */ \ +static inline int da_monitor_init_##name(void) \ +{ \ + da_monitor_reset_all_##name(); \ + return 0; \ +} \ + \ +/* \ + * da_monitor_destroy_##name - destroy the monitor \ + */ \ +static inline void da_monitor_destroy_##name(void) \ +{ \ + return; \ +} + +/* + * Functions to define, init and get a per-cpu monitor. + */ +#define DECLARE_DA_MON_INIT_PER_CPU(name, type) \ + \ +/* \ + * per-cpu monitor variables \ + */ \ +DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \ + \ +/* \ + * da_get_monitor_##name - return current CPU monitor address \ + */ \ +static struct da_monitor *da_get_monitor_##name(void) \ +{ \ + return this_cpu_ptr(&da_mon_##name); \ +} \ + \ +/* \ + * da_monitor_reset_all_##name - reset all CPUs' monitor \ + */ \ +static void da_monitor_reset_all_##name(void) \ +{ \ + struct da_monitor *da_mon; \ + int cpu; \ + for_each_cpu(cpu, cpu_online_mask) { \ + da_mon = per_cpu_ptr(&da_mon_##name, cpu); \ + da_monitor_reset_##name(da_mon); \ + } \ +} \ + \ +/* \ + * da_monitor_init_##name - initialize all CPUs' monitor \ + */ \ +static inline int da_monitor_init_##name(void) \ +{ \ + da_monitor_reset_all_##name(); \ + return 0; \ +} \ + \ +/* \ + * da_monitor_destroy_##name - destroy the monitor \ + */ \ +static inline void da_monitor_destroy_##name(void) \ +{ \ + return; \ +} + +/* + * Functions to define, init and get a per-task monitor. + */ +#define DECLARE_DA_MON_INIT_PER_TASK(name, type) \ + \ +/* \ + * The per-task monitor is stored a vector in the task struct. This variable \ + * stores the position on the vector reserved for this monitor. \ + */ \ +static int task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \ + \ +/* \ + * da_get_monitor_##name - return the monitor in the allocated slot for tsk \ + */ \ +static inline struct da_monitor *da_get_monitor_##name(struct task_struct *tsk) \ +{ \ + return &tsk->rv[task_mon_slot_##name].da_mon; \ +} \ + \ +static void da_monitor_reset_all_##name(void) \ +{ \ + struct task_struct *g, *p; \ + \ + read_lock(&tasklist_lock); \ + for_each_process_thread(g, p) \ + da_monitor_reset_##name(da_get_monitor_##name(p)); \ + read_unlock(&tasklist_lock); \ +} \ + \ +/* \ + * da_monitor_init_##name - initialize the per-task monitor \ + * \ + * Try to allocate a slot in the task's vector of monitors. If there \ + * is an available slot, use it and reset all task's monitor. \ + */ \ +static int da_monitor_init_##name(void) \ +{ \ + int slot; \ + \ + slot = rv_get_task_monitor_slot(); \ + if (slot < 0 || slot >= RV_PER_TASK_MONITOR_INIT) \ + return slot; \ + \ + task_mon_slot_##name = slot; \ + \ + da_monitor_reset_all_##name(); \ + return 0; \ +} \ + \ +/* \ + * da_monitor_destroy_##name - return the allocated slot \ + */ \ +static inline void da_monitor_destroy_##name(void) \ +{ \ + if (task_mon_slot_##name == RV_PER_TASK_MONITOR_INIT) { \ + WARN_ONCE(1, "Disabling a disabled monitor: " #name); \ + return; \ + } \ + rv_put_task_monitor_slot(task_mon_slot_##name); \ + task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \ + return; \ +} + +/* + * Handle event for implicit monitor: da_get_monitor_##name() will figure out + * the monitor. + */ +#define DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) \ + \ +static inline void __da_handle_event_##name(struct da_monitor *da_mon, \ + enum events_##name event) \ +{ \ + bool retval; \ + \ + retval = da_event_##name(da_mon, event); \ + if (!retval) \ + da_monitor_reset_##name(da_mon); \ +} \ + \ +/* \ + * da_handle_event_##name - handle an event \ + */ \ +static inline void da_handle_event_##name(enum events_##name event) \ +{ \ + struct da_monitor *da_mon = da_get_monitor_##name(); \ + bool retval; \ + \ + retval = da_monitor_handling_event_##name(da_mon); \ + if (!retval) \ + return; \ + \ + __da_handle_event_##name(da_mon, event); \ +} \ + \ +/* \ + * da_handle_start_event_##name - start monitoring or handle event \ + * \ + * This function is used to notify the monitor that the system is returning \ + * to the initial state, so the monitor can start monitoring in the next event. \ + * Thus: \ + * \ + * If the monitor already started, handle the event. \ + * If the monitor did not start yet, start the monitor but skip the event. \ + */ \ +static inline bool da_handle_start_event_##name(enum events_##name event) \ +{ \ + struct da_monitor *da_mon; \ + \ + if (!da_monitor_enabled_##name()) \ + return 0; \ + \ + da_mon = da_get_monitor_##name(); \ + \ + if (unlikely(!da_monitoring_##name(da_mon))) { \ + da_monitor_start_##name(da_mon); \ + return 0; \ + } \ + \ + __da_handle_event_##name(da_mon, event); \ + \ + return 1; \ +} \ + \ +/* \ + * da_handle_start_run_event_##name - start monitoring and handle event \ + * \ + * This function is used to notify the monitor that the system is in the \ + * initial state, so the monitor can start monitoring and handling event. \ + */ \ +static inline bool da_handle_start_run_event_##name(enum events_##name event) \ +{ \ + struct da_monitor *da_mon; \ + \ + if (!da_monitor_enabled_##name()) \ + return 0; \ + \ + da_mon = da_get_monitor_##name(); \ + \ + if (unlikely(!da_monitoring_##name(da_mon))) \ + da_monitor_start_##name(da_mon); \ + \ + __da_handle_event_##name(da_mon, event); \ + \ + return 1; \ +} + +/* + * Handle event for per task. + */ +#define DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type) \ + \ +static inline void \ +__da_handle_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \ + enum events_##name event) \ +{ \ + bool retval; \ + \ + retval = da_event_##name(da_mon, tsk, event); \ + if (!retval) \ + da_monitor_reset_##name(da_mon); \ +} \ + \ +/* \ + * da_handle_event_##name - handle an event \ + */ \ +static inline void \ +da_handle_event_##name(struct task_struct *tsk, enum events_##name event) \ +{ \ + struct da_monitor *da_mon = da_get_monitor_##name(tsk); \ + bool retval; \ + \ + retval = da_monitor_handling_event_##name(da_mon); \ + if (!retval) \ + return; \ + \ + __da_handle_event_##name(da_mon, tsk, event); \ +} \ + \ +/* \ + * da_handle_start_event_##name - start monitoring or handle event \ + * \ + * This function is used to notify the monitor that the system is returning \ + * to the initial state, so the monitor can start monitoring in the next event. \ + * Thus: \ + * \ + * If the monitor already started, handle the event. \ + * If the monitor did not start yet, start the monitor but skip the event. \ + */ \ +static inline bool \ +da_handle_start_event_##name(struct task_struct *tsk, enum events_##name event) \ +{ \ + struct da_monitor *da_mon; \ + \ + if (!da_monitor_enabled_##name()) \ + return 0; \ + \ + da_mon = da_get_monitor_##name(tsk); \ + \ + if (unlikely(!da_monitoring_##name(da_mon))) { \ + da_monitor_start_##name(da_mon); \ + return 0; \ + } \ + \ + __da_handle_event_##name(da_mon, tsk, event); \ + \ + return 1; \ +} + +/* + * Entry point for the global monitor. + */ +#define DECLARE_DA_MON_GLOBAL(name, type) \ + \ +DECLARE_AUTOMATA_HELPERS(name, type) \ +DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ +DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \ +DECLARE_DA_MON_INIT_GLOBAL(name, type) \ +DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) + +/* + * Entry point for the per-cpu monitor. + */ +#define DECLARE_DA_MON_PER_CPU(name, type) \ + \ +DECLARE_AUTOMATA_HELPERS(name, type) \ +DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ +DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \ +DECLARE_DA_MON_INIT_PER_CPU(name, type) \ +DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) + +/* + * Entry point for the per-task monitor. + */ +#define DECLARE_DA_MON_PER_TASK(name, type) \ + \ +DECLARE_AUTOMATA_HELPERS(name, type) \ +DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ +DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \ +DECLARE_DA_MON_INIT_PER_TASK(name, type) \ +DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type) diff --git a/include/rv/instrumentation.h b/include/rv/instrumentation.h new file mode 100644 index 0000000000..d4e7a02ede --- /dev/null +++ b/include/rv/instrumentation.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira + * + * Helper functions to facilitate the instrumentation of auto-generated + * RV monitors create by dot2k. + * + * The dot2k tool is available at tools/verification/dot2/ + */ + +#include + +/* + * rv_attach_trace_probe - check and attach a handler function to a tracepoint + */ +#define rv_attach_trace_probe(monitor, tp, rv_handler) \ + do { \ + check_trace_callback_type_##tp(rv_handler); \ + WARN_ONCE(register_trace_##tp(rv_handler, NULL), \ + "fail attaching " #monitor " " #tp "handler"); \ + } while (0) + +/* + * rv_detach_trace_probe - detach a handler function to a tracepoint + */ +#define rv_detach_trace_probe(monitor, tp, rv_handler) \ + do { \ + unregister_trace_##tp(rv_handler, NULL); \ + } while (0) diff --git a/include/soc/bcm2835/raspberrypi-clocks.h b/include/soc/bcm2835/raspberrypi-clocks.h new file mode 100644 index 0000000000..6275358779 --- /dev/null +++ b/include/soc/bcm2835/raspberrypi-clocks.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __SOC_RASPBERRY_CLOCKS_H__ +#define __SOC_RASPBERRY_CLOCKS_H__ + +#if IS_ENABLED(CONFIG_CLK_RASPBERRYPI) +unsigned long rpi_firmware_clk_get_max_rate(struct clk *clk); +unsigned long rpi_firmware_clk_get_min_rate(struct clk *clk); +#else +static inline unsigned long rpi_firmware_clk_get_max_rate(struct clk *clk) +{ + return ULONG_MAX; +} + +static inline unsigned long rpi_firmware_clk_get_min_rate(struct clk *clk) +{ + return 0; +} +#endif + +#endif /* __SOC_RASPBERRY_CLOCKS_H__ */ diff --git a/include/soc/fsl/caam-blob.h b/include/soc/fsl/caam-blob.h new file mode 100644 index 0000000000..937cac52f3 --- /dev/null +++ b/include/soc/fsl/caam-blob.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Pengutronix, Ahmad Fatoum + */ + +#ifndef __CAAM_BLOB_GEN +#define __CAAM_BLOB_GEN + +#include +#include + +#define CAAM_BLOB_KEYMOD_LENGTH 16 +#define CAAM_BLOB_OVERHEAD (32 + 16) +#define CAAM_BLOB_MAX_LEN 4096 + +struct caam_blob_priv; + +/** + * struct caam_blob_info - information for CAAM blobbing + * @input: pointer to input buffer (must be DMAable) + * @input_len: length of @input buffer in bytes. + * @output: pointer to output buffer (must be DMAable) + * @output_len: length of @output buffer in bytes. + * @key_mod: key modifier + * @key_mod_len: length of @key_mod in bytes. + * May not exceed %CAAM_BLOB_KEYMOD_LENGTH + */ +struct caam_blob_info { + void *input; + size_t input_len; + + void *output; + size_t output_len; + + const void *key_mod; + size_t key_mod_len; +}; + +/** + * caam_blob_gen_init - initialize blob generation + * Return: pointer to new &struct caam_blob_priv instance on success + * and ``ERR_PTR(-ENODEV)`` if CAAM has no hardware blobbing support + * or no job ring could be allocated. + */ +struct caam_blob_priv *caam_blob_gen_init(void); + +/** + * caam_blob_gen_exit - free blob generation resources + * @priv: instance returned by caam_blob_gen_init() + */ +void caam_blob_gen_exit(struct caam_blob_priv *priv); + +/** + * caam_process_blob - encapsulate or decapsulate blob + * @priv: instance returned by caam_blob_gen_init() + * @info: pointer to blobbing info describing key, blob and + * key modifier buffers. + * @encap: true for encapsulation, false for decapsulation + * + * Return: %0 and sets ``info->output_len`` on success and a negative + * error code otherwise. + */ +int caam_process_blob(struct caam_blob_priv *priv, + struct caam_blob_info *info, bool encap); + +/** + * caam_encap_blob - encapsulate blob + * @priv: instance returned by caam_blob_gen_init() + * @info: pointer to blobbing info describing input key, + * output blob and key modifier buffers. + * + * Return: %0 and sets ``info->output_len`` on success and + * a negative error code otherwise. + */ +static inline int caam_encap_blob(struct caam_blob_priv *priv, + struct caam_blob_info *info) +{ + if (info->output_len < info->input_len + CAAM_BLOB_OVERHEAD) + return -EINVAL; + + return caam_process_blob(priv, info, true); +} + +/** + * caam_decap_blob - decapsulate blob + * @priv: instance returned by caam_blob_gen_init() + * @info: pointer to blobbing info describing output key, + * input blob and key modifier buffers. + * + * Return: %0 and sets ``info->output_len`` on success and + * a negative error code otherwise. + */ +static inline int caam_decap_blob(struct caam_blob_priv *priv, + struct caam_blob_info *info) +{ + if (info->input_len < CAAM_BLOB_OVERHEAD || + info->output_len < info->input_len - CAAM_BLOB_OVERHEAD) + return -EINVAL; + + return caam_process_blob(priv, info, false); +} + +#endif diff --git a/include/soc/qcom/qcom-spmi-pmic.h b/include/soc/qcom/qcom-spmi-pmic.h new file mode 100644 index 0000000000..72398ff447 --- /dev/null +++ b/include/soc/qcom/qcom-spmi-pmic.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2022 Linaro. All rights reserved. + * Author: Caleb Connolly + */ + +#ifndef __QCOM_SPMI_PMIC_H__ +#define __QCOM_SPMI_PMIC_H__ + +#include + +#define COMMON_SUBTYPE 0x00 +#define PM8941_SUBTYPE 0x01 +#define PM8841_SUBTYPE 0x02 +#define PM8019_SUBTYPE 0x03 +#define PM8226_SUBTYPE 0x04 +#define PM8110_SUBTYPE 0x05 +#define PMA8084_SUBTYPE 0x06 +#define PMI8962_SUBTYPE 0x07 +#define PMD9635_SUBTYPE 0x08 +#define PM8994_SUBTYPE 0x09 +#define PMI8994_SUBTYPE 0x0a +#define PM8916_SUBTYPE 0x0b +#define PM8004_SUBTYPE 0x0c +#define PM8909_SUBTYPE 0x0d +#define PM8028_SUBTYPE 0x0e +#define PM8901_SUBTYPE 0x0f +#define PM8950_SUBTYPE 0x10 +#define PMI8950_SUBTYPE 0x11 +#define PM8998_SUBTYPE 0x14 +#define PMI8998_SUBTYPE 0x15 +#define PM8005_SUBTYPE 0x18 +#define PM660L_SUBTYPE 0x1A +#define PM660_SUBTYPE 0x1B +#define PM8150_SUBTYPE 0x1E +#define PM8150L_SUBTYPE 0x1f +#define PM8150B_SUBTYPE 0x20 +#define PMK8002_SUBTYPE 0x21 +#define PM8009_SUBTYPE 0x24 +#define PM8150C_SUBTYPE 0x26 +#define SMB2351_SUBTYPE 0x29 + +#define PMI8998_FAB_ID_SMIC 0x11 +#define PMI8998_FAB_ID_GF 0x30 + +#define PM660_FAB_ID_GF 0x0 +#define PM660_FAB_ID_TSMC 0x2 +#define PM660_FAB_ID_MX 0x3 + +struct qcom_spmi_pmic { + unsigned int type; + unsigned int subtype; + unsigned int major; + unsigned int minor; + unsigned int rev2; + unsigned int fab_id; + const char *name; +}; + +const struct qcom_spmi_pmic *qcom_pmic_get(struct device *dev); + +#endif /* __QCOM_SPMI_PMIC_H__ */ diff --git a/include/soc/rockchip/pm_domains.h b/include/soc/rockchip/pm_domains.h new file mode 100644 index 0000000000..7dbd941fc9 --- /dev/null +++ b/include/soc/rockchip/pm_domains.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2022, The Chromium OS Authors. All rights reserved. + */ + +#ifndef __SOC_ROCKCHIP_PM_DOMAINS_H__ +#define __SOC_ROCKCHIP_PM_DOMAINS_H__ + +#ifdef CONFIG_ROCKCHIP_PM_DOMAINS + +int rockchip_pmu_block(void); +void rockchip_pmu_unblock(void); + +#else /* CONFIG_ROCKCHIP_PM_DOMAINS */ + +static inline int rockchip_pmu_block(void) +{ + return 0; +} + +static inline void rockchip_pmu_unblock(void) { } + +#endif /* CONFIG_ROCKCHIP_PM_DOMAINS */ + +#endif /* __SOC_ROCKCHIP_PM_DOMAINS_H__ */ diff --git a/include/sound/cs42l42.h b/include/sound/cs42l42.h new file mode 100644 index 0000000000..a55d522f17 --- /dev/null +++ b/include/sound/cs42l42.h @@ -0,0 +1,810 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * linux/sound/cs42l42.h -- Platform data for CS42L42 ALSA SoC audio driver header + * + * Copyright 2016-2022 Cirrus Logic, Inc. + * + * Author: James Schulman + * Author: Brian Austin + * Author: Michael White + */ + +#ifndef __CS42L42_H +#define __CS42L42_H + +#define CS42L42_PAGE_REGISTER 0x00 /* Page Select Register */ +#define CS42L42_WIN_START 0x00 +#define CS42L42_WIN_LEN 0x100 +#define CS42L42_RANGE_MIN 0x00 +#define CS42L42_RANGE_MAX 0x7F + +#define CS42L42_PAGE_10 0x1000 +#define CS42L42_PAGE_11 0x1100 +#define CS42L42_PAGE_12 0x1200 +#define CS42L42_PAGE_13 0x1300 +#define CS42L42_PAGE_15 0x1500 +#define CS42L42_PAGE_19 0x1900 +#define CS42L42_PAGE_1B 0x1B00 +#define CS42L42_PAGE_1C 0x1C00 +#define CS42L42_PAGE_1D 0x1D00 +#define CS42L42_PAGE_1F 0x1F00 +#define CS42L42_PAGE_20 0x2000 +#define CS42L42_PAGE_21 0x2100 +#define CS42L42_PAGE_23 0x2300 +#define CS42L42_PAGE_24 0x2400 +#define CS42L42_PAGE_25 0x2500 +#define CS42L42_PAGE_26 0x2600 +#define CS42L42_PAGE_28 0x2800 +#define CS42L42_PAGE_29 0x2900 +#define CS42L42_PAGE_2A 0x2A00 +#define CS42L42_PAGE_30 0x3000 + +#define CS42L42_CHIP_ID 0x42A42 + +/* Page 0x10 Global Registers */ +#define CS42L42_DEVID_AB (CS42L42_PAGE_10 + 0x01) +#define CS42L42_DEVID_CD (CS42L42_PAGE_10 + 0x02) +#define CS42L42_DEVID_E (CS42L42_PAGE_10 + 0x03) +#define CS42L42_FABID (CS42L42_PAGE_10 + 0x04) +#define CS42L42_REVID (CS42L42_PAGE_10 + 0x05) +#define CS42L42_FRZ_CTL (CS42L42_PAGE_10 + 0x06) + +#define CS42L42_SRC_CTL (CS42L42_PAGE_10 + 0x07) +#define CS42L42_SRC_BYPASS_DAC_SHIFT 1 +#define CS42L42_SRC_BYPASS_DAC_MASK (1 << CS42L42_SRC_BYPASS_DAC_SHIFT) + +#define CS42L42_MCLK_STATUS (CS42L42_PAGE_10 + 0x08) + +#define CS42L42_MCLK_CTL (CS42L42_PAGE_10 + 0x09) +#define CS42L42_INTERNAL_FS_SHIFT 1 +#define CS42L42_INTERNAL_FS_MASK (1 << CS42L42_INTERNAL_FS_SHIFT) + +#define CS42L42_SFTRAMP_RATE (CS42L42_PAGE_10 + 0x0A) +#define CS42L42_SLOW_START_ENABLE (CS42L42_PAGE_10 + 0x0B) +#define CS42L42_SLOW_START_EN_MASK GENMASK(6, 4) +#define CS42L42_SLOW_START_EN_SHIFT 4 +#define CS42L42_I2C_DEBOUNCE (CS42L42_PAGE_10 + 0x0E) +#define CS42L42_I2C_STRETCH (CS42L42_PAGE_10 + 0x0F) +#define CS42L42_I2C_TIMEOUT (CS42L42_PAGE_10 + 0x10) + +/* Page 0x11 Power and Headset Detect Registers */ +#define CS42L42_PWR_CTL1 (CS42L42_PAGE_11 + 0x01) +#define CS42L42_ASP_DAO_PDN_SHIFT 7 +#define CS42L42_ASP_DAO_PDN_MASK (1 << CS42L42_ASP_DAO_PDN_SHIFT) +#define CS42L42_ASP_DAI_PDN_SHIFT 6 +#define CS42L42_ASP_DAI_PDN_MASK (1 << CS42L42_ASP_DAI_PDN_SHIFT) +#define CS42L42_MIXER_PDN_SHIFT 5 +#define CS42L42_MIXER_PDN_MASK (1 << CS42L42_MIXER_PDN_SHIFT) +#define CS42L42_EQ_PDN_SHIFT 4 +#define CS42L42_EQ_PDN_MASK (1 << CS42L42_EQ_PDN_SHIFT) +#define CS42L42_HP_PDN_SHIFT 3 +#define CS42L42_HP_PDN_MASK (1 << CS42L42_HP_PDN_SHIFT) +#define CS42L42_ADC_PDN_SHIFT 2 +#define CS42L42_ADC_PDN_MASK (1 << CS42L42_ADC_PDN_SHIFT) +#define CS42L42_PDN_ALL_SHIFT 0 +#define CS42L42_PDN_ALL_MASK (1 << CS42L42_PDN_ALL_SHIFT) + +#define CS42L42_PWR_CTL2 (CS42L42_PAGE_11 + 0x02) +#define CS42L42_ADC_SRC_PDNB_SHIFT 0 +#define CS42L42_ADC_SRC_PDNB_MASK (1 << CS42L42_ADC_SRC_PDNB_SHIFT) +#define CS42L42_DAC_SRC_PDNB_SHIFT 1 +#define CS42L42_DAC_SRC_PDNB_MASK (1 << CS42L42_DAC_SRC_PDNB_SHIFT) +#define CS42L42_ASP_DAI1_PDN_SHIFT 2 +#define CS42L42_ASP_DAI1_PDN_MASK (1 << CS42L42_ASP_DAI1_PDN_SHIFT) +#define CS42L42_SRC_PDN_OVERRIDE_SHIFT 3 +#define CS42L42_SRC_PDN_OVERRIDE_MASK (1 << CS42L42_SRC_PDN_OVERRIDE_SHIFT) +#define CS42L42_DISCHARGE_FILT_SHIFT 4 +#define CS42L42_DISCHARGE_FILT_MASK (1 << CS42L42_DISCHARGE_FILT_SHIFT) + +#define CS42L42_PWR_CTL3 (CS42L42_PAGE_11 + 0x03) +#define CS42L42_RING_SENSE_PDNB_SHIFT 1 +#define CS42L42_RING_SENSE_PDNB_MASK (1 << CS42L42_RING_SENSE_PDNB_SHIFT) +#define CS42L42_VPMON_PDNB_SHIFT 2 +#define CS42L42_VPMON_PDNB_MASK (1 << CS42L42_VPMON_PDNB_SHIFT) +#define CS42L42_SW_CLK_STP_STAT_SEL_SHIFT 5 +#define CS42L42_SW_CLK_STP_STAT_SEL_MASK (3 << CS42L42_SW_CLK_STP_STAT_SEL_SHIFT) + +#define CS42L42_RSENSE_CTL1 (CS42L42_PAGE_11 + 0x04) +#define CS42L42_RS_TRIM_R_SHIFT 0 +#define CS42L42_RS_TRIM_R_MASK (1 << CS42L42_RS_TRIM_R_SHIFT) +#define CS42L42_RS_TRIM_T_SHIFT 1 +#define CS42L42_RS_TRIM_T_MASK (1 << CS42L42_RS_TRIM_T_SHIFT) +#define CS42L42_HPREF_RS_SHIFT 2 +#define CS42L42_HPREF_RS_MASK (1 << CS42L42_HPREF_RS_SHIFT) +#define CS42L42_HSBIAS_FILT_REF_RS_SHIFT 3 +#define CS42L42_HSBIAS_FILT_REF_RS_MASK (1 << CS42L42_HSBIAS_FILT_REF_RS_SHIFT) +#define CS42L42_RING_SENSE_PU_HIZ_SHIFT 6 +#define CS42L42_RING_SENSE_PU_HIZ_MASK (1 << CS42L42_RING_SENSE_PU_HIZ_SHIFT) + +#define CS42L42_RSENSE_CTL2 (CS42L42_PAGE_11 + 0x05) +#define CS42L42_TS_RS_GATE_SHIFT 7 +#define CS42L42_TS_RS_GATE_MAS (1 << CS42L42_TS_RS_GATE_SHIFT) + +#define CS42L42_OSC_SWITCH (CS42L42_PAGE_11 + 0x07) +#define CS42L42_SCLK_PRESENT_SHIFT 0 +#define CS42L42_SCLK_PRESENT_MASK (1 << CS42L42_SCLK_PRESENT_SHIFT) + +#define CS42L42_OSC_SWITCH_STATUS (CS42L42_PAGE_11 + 0x09) +#define CS42L42_OSC_SW_SEL_STAT_SHIFT 0 +#define CS42L42_OSC_SW_SEL_STAT_MASK (3 << CS42L42_OSC_SW_SEL_STAT_SHIFT) +#define CS42L42_OSC_PDNB_STAT_SHIFT 2 +#define CS42L42_OSC_PDNB_STAT_MASK (1 << CS42L42_OSC_SW_SEL_STAT_SHIFT) + +#define CS42L42_RSENSE_CTL3 (CS42L42_PAGE_11 + 0x12) +#define CS42L42_RS_RISE_DBNCE_TIME_SHIFT 0 +#define CS42L42_RS_RISE_DBNCE_TIME_MASK (7 << CS42L42_RS_RISE_DBNCE_TIME_SHIFT) +#define CS42L42_RS_FALL_DBNCE_TIME_SHIFT 3 +#define CS42L42_RS_FALL_DBNCE_TIME_MASK (7 << CS42L42_RS_FALL_DBNCE_TIME_SHIFT) +#define CS42L42_RS_PU_EN_SHIFT 6 +#define CS42L42_RS_PU_EN_MASK (1 << CS42L42_RS_PU_EN_SHIFT) +#define CS42L42_RS_INV_SHIFT 7 +#define CS42L42_RS_INV_MASK (1 << CS42L42_RS_INV_SHIFT) + +#define CS42L42_TSENSE_CTL (CS42L42_PAGE_11 + 0x13) +#define CS42L42_TS_RISE_DBNCE_TIME_SHIFT 0 +#define CS42L42_TS_RISE_DBNCE_TIME_MASK (7 << CS42L42_TS_RISE_DBNCE_TIME_SHIFT) +#define CS42L42_TS_FALL_DBNCE_TIME_SHIFT 3 +#define CS42L42_TS_FALL_DBNCE_TIME_MASK (7 << CS42L42_TS_FALL_DBNCE_TIME_SHIFT) +#define CS42L42_TS_INV_SHIFT 7 +#define CS42L42_TS_INV_MASK (1 << CS42L42_TS_INV_SHIFT) + +#define CS42L42_TSRS_INT_DISABLE (CS42L42_PAGE_11 + 0x14) +#define CS42L42_D_RS_PLUG_DBNC_SHIFT 0 +#define CS42L42_D_RS_PLUG_DBNC_MASK (1 << CS42L42_D_RS_PLUG_DBNC_SHIFT) +#define CS42L42_D_RS_UNPLUG_DBNC_SHIFT 1 +#define CS42L42_D_RS_UNPLUG_DBNC_MASK (1 << CS42L42_D_RS_UNPLUG_DBNC_SHIFT) +#define CS42L42_D_TS_PLUG_DBNC_SHIFT 2 +#define CS42L42_D_TS_PLUG_DBNC_MASK (1 << CS42L42_D_TS_PLUG_DBNC_SHIFT) +#define CS42L42_D_TS_UNPLUG_DBNC_SHIFT 3 +#define CS42L42_D_TS_UNPLUG_DBNC_MASK (1 << CS42L42_D_TS_UNPLUG_DBNC_SHIFT) + +#define CS42L42_TRSENSE_STATUS (CS42L42_PAGE_11 + 0x15) +#define CS42L42_RS_PLUG_DBNC_SHIFT 0 +#define CS42L42_RS_PLUG_DBNC_MASK (1 << CS42L42_RS_PLUG_DBNC_SHIFT) +#define CS42L42_RS_UNPLUG_DBNC_SHIFT 1 +#define CS42L42_RS_UNPLUG_DBNC_MASK (1 << CS42L42_RS_UNPLUG_DBNC_SHIFT) +#define CS42L42_TS_PLUG_DBNC_SHIFT 2 +#define CS42L42_TS_PLUG_DBNC_MASK (1 << CS42L42_TS_PLUG_DBNC_SHIFT) +#define CS42L42_TS_UNPLUG_DBNC_SHIFT 3 +#define CS42L42_TS_UNPLUG_DBNC_MASK (1 << CS42L42_TS_UNPLUG_DBNC_SHIFT) + +#define CS42L42_HSDET_CTL1 (CS42L42_PAGE_11 + 0x1F) +#define CS42L42_HSDET_COMP1_LVL_SHIFT 0 +#define CS42L42_HSDET_COMP1_LVL_MASK (15 << CS42L42_HSDET_COMP1_LVL_SHIFT) +#define CS42L42_HSDET_COMP2_LVL_SHIFT 4 +#define CS42L42_HSDET_COMP2_LVL_MASK (15 << CS42L42_HSDET_COMP2_LVL_SHIFT) + +#define CS42L42_HSDET_COMP1_LVL_VAL 12 /* 1.25V Comparator */ +#define CS42L42_HSDET_COMP2_LVL_VAL 2 /* 1.75V Comparator */ +#define CS42L42_HSDET_COMP1_LVL_DEFAULT 7 /* 1V Comparator */ +#define CS42L42_HSDET_COMP2_LVL_DEFAULT 7 /* 2V Comparator */ + +#define CS42L42_HSDET_CTL2 (CS42L42_PAGE_11 + 0x20) +#define CS42L42_HSDET_AUTO_TIME_SHIFT 0 +#define CS42L42_HSDET_AUTO_TIME_MASK (3 << CS42L42_HSDET_AUTO_TIME_SHIFT) +#define CS42L42_HSBIAS_REF_SHIFT 3 +#define CS42L42_HSBIAS_REF_MASK (1 << CS42L42_HSBIAS_REF_SHIFT) +#define CS42L42_HSDET_SET_SHIFT 4 +#define CS42L42_HSDET_SET_MASK (3 << CS42L42_HSDET_SET_SHIFT) +#define CS42L42_HSDET_CTRL_SHIFT 6 +#define CS42L42_HSDET_CTRL_MASK (3 << CS42L42_HSDET_CTRL_SHIFT) + +#define CS42L42_HS_SWITCH_CTL (CS42L42_PAGE_11 + 0x21) +#define CS42L42_SW_GNDHS_HS4_SHIFT 0 +#define CS42L42_SW_GNDHS_HS4_MASK (1 << CS42L42_SW_GNDHS_HS4_SHIFT) +#define CS42L42_SW_GNDHS_HS3_SHIFT 1 +#define CS42L42_SW_GNDHS_HS3_MASK (1 << CS42L42_SW_GNDHS_HS3_SHIFT) +#define CS42L42_SW_HSB_HS4_SHIFT 2 +#define CS42L42_SW_HSB_HS4_MASK (1 << CS42L42_SW_HSB_HS4_SHIFT) +#define CS42L42_SW_HSB_HS3_SHIFT 3 +#define CS42L42_SW_HSB_HS3_MASK (1 << CS42L42_SW_HSB_HS3_SHIFT) +#define CS42L42_SW_HSB_FILT_HS4_SHIFT 4 +#define CS42L42_SW_HSB_FILT_HS4_MASK (1 << CS42L42_SW_HSB_FILT_HS4_SHIFT) +#define CS42L42_SW_HSB_FILT_HS3_SHIFT 5 +#define CS42L42_SW_HSB_FILT_HS3_MASK (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT) +#define CS42L42_SW_REF_HS4_SHIFT 6 +#define CS42L42_SW_REF_HS4_MASK (1 << CS42L42_SW_REF_HS4_SHIFT) +#define CS42L42_SW_REF_HS3_SHIFT 7 +#define CS42L42_SW_REF_HS3_MASK (1 << CS42L42_SW_REF_HS3_SHIFT) + +#define CS42L42_HS_DET_STATUS (CS42L42_PAGE_11 + 0x24) +#define CS42L42_HSDET_TYPE_SHIFT 0 +#define CS42L42_HSDET_TYPE_MASK (3 << CS42L42_HSDET_TYPE_SHIFT) +#define CS42L42_HSDET_COMP1_OUT_SHIFT 6 +#define CS42L42_HSDET_COMP1_OUT_MASK (1 << CS42L42_HSDET_COMP1_OUT_SHIFT) +#define CS42L42_HSDET_COMP2_OUT_SHIFT 7 +#define CS42L42_HSDET_COMP2_OUT_MASK (1 << CS42L42_HSDET_COMP2_OUT_SHIFT) +#define CS42L42_PLUG_CTIA 0 +#define CS42L42_PLUG_OMTP 1 +#define CS42L42_PLUG_HEADPHONE 2 +#define CS42L42_PLUG_INVALID 3 + +#define CS42L42_HSDET_SW_COMP1 ((0 << CS42L42_SW_GNDHS_HS4_SHIFT) | \ + (1 << CS42L42_SW_GNDHS_HS3_SHIFT) | \ + (1 << CS42L42_SW_HSB_HS4_SHIFT) | \ + (0 << CS42L42_SW_HSB_HS3_SHIFT) | \ + (0 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \ + (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \ + (0 << CS42L42_SW_REF_HS4_SHIFT) | \ + (1 << CS42L42_SW_REF_HS3_SHIFT)) +#define CS42L42_HSDET_SW_COMP2 ((1 << CS42L42_SW_GNDHS_HS4_SHIFT) | \ + (0 << CS42L42_SW_GNDHS_HS3_SHIFT) | \ + (0 << CS42L42_SW_HSB_HS4_SHIFT) | \ + (1 << CS42L42_SW_HSB_HS3_SHIFT) | \ + (1 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \ + (0 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \ + (1 << CS42L42_SW_REF_HS4_SHIFT) | \ + (0 << CS42L42_SW_REF_HS3_SHIFT)) +#define CS42L42_HSDET_SW_TYPE1 ((0 << CS42L42_SW_GNDHS_HS4_SHIFT) | \ + (1 << CS42L42_SW_GNDHS_HS3_SHIFT) | \ + (1 << CS42L42_SW_HSB_HS4_SHIFT) | \ + (0 << CS42L42_SW_HSB_HS3_SHIFT) | \ + (0 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \ + (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \ + (0 << CS42L42_SW_REF_HS4_SHIFT) | \ + (1 << CS42L42_SW_REF_HS3_SHIFT)) +#define CS42L42_HSDET_SW_TYPE2 ((1 << CS42L42_SW_GNDHS_HS4_SHIFT) | \ + (0 << CS42L42_SW_GNDHS_HS3_SHIFT) | \ + (0 << CS42L42_SW_HSB_HS4_SHIFT) | \ + (1 << CS42L42_SW_HSB_HS3_SHIFT) | \ + (1 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \ + (0 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \ + (1 << CS42L42_SW_REF_HS4_SHIFT) | \ + (0 << CS42L42_SW_REF_HS3_SHIFT)) +#define CS42L42_HSDET_SW_TYPE3 ((1 << CS42L42_SW_GNDHS_HS4_SHIFT) | \ + (1 << CS42L42_SW_GNDHS_HS3_SHIFT) | \ + (0 << CS42L42_SW_HSB_HS4_SHIFT) | \ + (0 << CS42L42_SW_HSB_HS3_SHIFT) | \ + (1 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \ + (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \ + (1 << CS42L42_SW_REF_HS4_SHIFT) | \ + (1 << CS42L42_SW_REF_HS3_SHIFT)) +#define CS42L42_HSDET_SW_TYPE4 ((0 << CS42L42_SW_GNDHS_HS4_SHIFT) | \ + (1 << CS42L42_SW_GNDHS_HS3_SHIFT) | \ + (1 << CS42L42_SW_HSB_HS4_SHIFT) | \ + (0 << CS42L42_SW_HSB_HS3_SHIFT) | \ + (0 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \ + (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \ + (0 << CS42L42_SW_REF_HS4_SHIFT) | \ + (1 << CS42L42_SW_REF_HS3_SHIFT)) + +#define CS42L42_HSDET_COMP_TYPE1 1 +#define CS42L42_HSDET_COMP_TYPE2 2 +#define CS42L42_HSDET_COMP_TYPE3 0 +#define CS42L42_HSDET_COMP_TYPE4 3 + +#define CS42L42_HS_CLAMP_DISABLE (CS42L42_PAGE_11 + 0x29) +#define CS42L42_HS_CLAMP_DISABLE_SHIFT 0 +#define CS42L42_HS_CLAMP_DISABLE_MASK (1 << CS42L42_HS_CLAMP_DISABLE_SHIFT) + +/* Page 0x12 Clocking Registers */ +#define CS42L42_MCLK_SRC_SEL (CS42L42_PAGE_12 + 0x01) +#define CS42L42_MCLKDIV_SHIFT 1 +#define CS42L42_MCLKDIV_MASK (1 << CS42L42_MCLKDIV_SHIFT) +#define CS42L42_MCLK_SRC_SEL_SHIFT 0 +#define CS42L42_MCLK_SRC_SEL_MASK (1 << CS42L42_MCLK_SRC_SEL_SHIFT) + +#define CS42L42_SPDIF_CLK_CFG (CS42L42_PAGE_12 + 0x02) +#define CS42L42_FSYNC_PW_LOWER (CS42L42_PAGE_12 + 0x03) + +#define CS42L42_FSYNC_PW_UPPER (CS42L42_PAGE_12 + 0x04) +#define CS42L42_FSYNC_PULSE_WIDTH_SHIFT 0 +#define CS42L42_FSYNC_PULSE_WIDTH_MASK (0xff << \ + CS42L42_FSYNC_PULSE_WIDTH_SHIFT) + +#define CS42L42_FSYNC_P_LOWER (CS42L42_PAGE_12 + 0x05) + +#define CS42L42_FSYNC_P_UPPER (CS42L42_PAGE_12 + 0x06) +#define CS42L42_FSYNC_PERIOD_SHIFT 0 +#define CS42L42_FSYNC_PERIOD_MASK (0xff << CS42L42_FSYNC_PERIOD_SHIFT) + +#define CS42L42_ASP_CLK_CFG (CS42L42_PAGE_12 + 0x07) +#define CS42L42_ASP_SCLK_EN_SHIFT 5 +#define CS42L42_ASP_SCLK_EN_MASK (1 << CS42L42_ASP_SCLK_EN_SHIFT) +#define CS42L42_ASP_MASTER_MODE 0x01 +#define CS42L42_ASP_SLAVE_MODE 0x00 +#define CS42L42_ASP_MODE_SHIFT 4 +#define CS42L42_ASP_MODE_MASK (1 << CS42L42_ASP_MODE_SHIFT) +#define CS42L42_ASP_SCPOL_SHIFT 2 +#define CS42L42_ASP_SCPOL_MASK (3 << CS42L42_ASP_SCPOL_SHIFT) +#define CS42L42_ASP_SCPOL_NOR 3 +#define CS42L42_ASP_LCPOL_SHIFT 0 +#define CS42L42_ASP_LCPOL_MASK (3 << CS42L42_ASP_LCPOL_SHIFT) +#define CS42L42_ASP_LCPOL_INV 3 + +#define CS42L42_ASP_FRM_CFG (CS42L42_PAGE_12 + 0x08) +#define CS42L42_ASP_STP_SHIFT 4 +#define CS42L42_ASP_STP_MASK (1 << CS42L42_ASP_STP_SHIFT) +#define CS42L42_ASP_5050_SHIFT 3 +#define CS42L42_ASP_5050_MASK (1 << CS42L42_ASP_5050_SHIFT) +#define CS42L42_ASP_FSD_SHIFT 0 +#define CS42L42_ASP_FSD_MASK (7 << CS42L42_ASP_FSD_SHIFT) +#define CS42L42_ASP_FSD_0_5 1 +#define CS42L42_ASP_FSD_1_0 2 +#define CS42L42_ASP_FSD_1_5 3 +#define CS42L42_ASP_FSD_2_0 4 + +#define CS42L42_FS_RATE_EN (CS42L42_PAGE_12 + 0x09) +#define CS42L42_FS_EN_SHIFT 0 +#define CS42L42_FS_EN_MASK (0xf << CS42L42_FS_EN_SHIFT) +#define CS42L42_FS_EN_IASRC_96K 0x1 +#define CS42L42_FS_EN_OASRC_96K 0x2 + +#define CS42L42_IN_ASRC_CLK (CS42L42_PAGE_12 + 0x0A) +#define CS42L42_CLK_IASRC_SEL_SHIFT 0 +#define CS42L42_CLK_IASRC_SEL_MASK (1 << CS42L42_CLK_IASRC_SEL_SHIFT) +#define CS42L42_CLK_IASRC_SEL_6 0 +#define CS42L42_CLK_IASRC_SEL_12 1 + +#define CS42L42_OUT_ASRC_CLK (CS42L42_PAGE_12 + 0x0B) +#define CS42L42_CLK_OASRC_SEL_SHIFT 0 +#define CS42L42_CLK_OASRC_SEL_MASK (1 << CS42L42_CLK_OASRC_SEL_SHIFT) +#define CS42L42_CLK_OASRC_SEL_12 1 + +#define CS42L42_PLL_DIV_CFG1 (CS42L42_PAGE_12 + 0x0C) +#define CS42L42_SCLK_PREDIV_SHIFT 0 +#define CS42L42_SCLK_PREDIV_MASK (3 << CS42L42_SCLK_PREDIV_SHIFT) + +/* Page 0x13 Interrupt Registers */ +/* Interrupts */ +#define CS42L42_ADC_OVFL_STATUS (CS42L42_PAGE_13 + 0x01) +#define CS42L42_MIXER_STATUS (CS42L42_PAGE_13 + 0x02) +#define CS42L42_SRC_STATUS (CS42L42_PAGE_13 + 0x03) +#define CS42L42_ASP_RX_STATUS (CS42L42_PAGE_13 + 0x04) +#define CS42L42_ASP_TX_STATUS (CS42L42_PAGE_13 + 0x05) +#define CS42L42_CODEC_STATUS (CS42L42_PAGE_13 + 0x08) +#define CS42L42_DET_INT_STATUS1 (CS42L42_PAGE_13 + 0x09) +#define CS42L42_DET_INT_STATUS2 (CS42L42_PAGE_13 + 0x0A) +#define CS42L42_SRCPL_INT_STATUS (CS42L42_PAGE_13 + 0x0B) +#define CS42L42_VPMON_STATUS (CS42L42_PAGE_13 + 0x0D) +#define CS42L42_PLL_LOCK_STATUS (CS42L42_PAGE_13 + 0x0E) +#define CS42L42_TSRS_PLUG_STATUS (CS42L42_PAGE_13 + 0x0F) +/* Masks */ +#define CS42L42_ADC_OVFL_INT_MASK (CS42L42_PAGE_13 + 0x16) +#define CS42L42_ADC_OVFL_SHIFT 0 +#define CS42L42_ADC_OVFL_MASK (1 << CS42L42_ADC_OVFL_SHIFT) +#define CS42L42_ADC_OVFL_VAL_MASK CS42L42_ADC_OVFL_MASK + +#define CS42L42_MIXER_INT_MASK (CS42L42_PAGE_13 + 0x17) +#define CS42L42_MIX_CHB_OVFL_SHIFT 0 +#define CS42L42_MIX_CHB_OVFL_MASK (1 << CS42L42_MIX_CHB_OVFL_SHIFT) +#define CS42L42_MIX_CHA_OVFL_SHIFT 1 +#define CS42L42_MIX_CHA_OVFL_MASK (1 << CS42L42_MIX_CHA_OVFL_SHIFT) +#define CS42L42_EQ_OVFL_SHIFT 2 +#define CS42L42_EQ_OVFL_MASK (1 << CS42L42_EQ_OVFL_SHIFT) +#define CS42L42_EQ_BIQUAD_OVFL_SHIFT 3 +#define CS42L42_EQ_BIQUAD_OVFL_MASK (1 << CS42L42_EQ_BIQUAD_OVFL_SHIFT) +#define CS42L42_MIXER_VAL_MASK (CS42L42_MIX_CHB_OVFL_MASK | \ + CS42L42_MIX_CHA_OVFL_MASK | \ + CS42L42_EQ_OVFL_MASK | \ + CS42L42_EQ_BIQUAD_OVFL_MASK) + +#define CS42L42_SRC_INT_MASK (CS42L42_PAGE_13 + 0x18) +#define CS42L42_SRC_ILK_SHIFT 0 +#define CS42L42_SRC_ILK_MASK (1 << CS42L42_SRC_ILK_SHIFT) +#define CS42L42_SRC_OLK_SHIFT 1 +#define CS42L42_SRC_OLK_MASK (1 << CS42L42_SRC_OLK_SHIFT) +#define CS42L42_SRC_IUNLK_SHIFT 2 +#define CS42L42_SRC_IUNLK_MASK (1 << CS42L42_SRC_IUNLK_SHIFT) +#define CS42L42_SRC_OUNLK_SHIFT 3 +#define CS42L42_SRC_OUNLK_MASK (1 << CS42L42_SRC_OUNLK_SHIFT) +#define CS42L42_SRC_VAL_MASK (CS42L42_SRC_ILK_MASK | \ + CS42L42_SRC_OLK_MASK | \ + CS42L42_SRC_IUNLK_MASK | \ + CS42L42_SRC_OUNLK_MASK) + +#define CS42L42_ASP_RX_INT_MASK (CS42L42_PAGE_13 + 0x19) +#define CS42L42_ASPRX_NOLRCK_SHIFT 0 +#define CS42L42_ASPRX_NOLRCK_MASK (1 << CS42L42_ASPRX_NOLRCK_SHIFT) +#define CS42L42_ASPRX_EARLY_SHIFT 1 +#define CS42L42_ASPRX_EARLY_MASK (1 << CS42L42_ASPRX_EARLY_SHIFT) +#define CS42L42_ASPRX_LATE_SHIFT 2 +#define CS42L42_ASPRX_LATE_MASK (1 << CS42L42_ASPRX_LATE_SHIFT) +#define CS42L42_ASPRX_ERROR_SHIFT 3 +#define CS42L42_ASPRX_ERROR_MASK (1 << CS42L42_ASPRX_ERROR_SHIFT) +#define CS42L42_ASPRX_OVLD_SHIFT 4 +#define CS42L42_ASPRX_OVLD_MASK (1 << CS42L42_ASPRX_OVLD_SHIFT) +#define CS42L42_ASP_RX_VAL_MASK (CS42L42_ASPRX_NOLRCK_MASK | \ + CS42L42_ASPRX_EARLY_MASK | \ + CS42L42_ASPRX_LATE_MASK | \ + CS42L42_ASPRX_ERROR_MASK | \ + CS42L42_ASPRX_OVLD_MASK) + +#define CS42L42_ASP_TX_INT_MASK (CS42L42_PAGE_13 + 0x1A) +#define CS42L42_ASPTX_NOLRCK_SHIFT 0 +#define CS42L42_ASPTX_NOLRCK_MASK (1 << CS42L42_ASPTX_NOLRCK_SHIFT) +#define CS42L42_ASPTX_EARLY_SHIFT 1 +#define CS42L42_ASPTX_EARLY_MASK (1 << CS42L42_ASPTX_EARLY_SHIFT) +#define CS42L42_ASPTX_LATE_SHIFT 2 +#define CS42L42_ASPTX_LATE_MASK (1 << CS42L42_ASPTX_LATE_SHIFT) +#define CS42L42_ASPTX_SMERROR_SHIFT 3 +#define CS42L42_ASPTX_SMERROR_MASK (1 << CS42L42_ASPTX_SMERROR_SHIFT) +#define CS42L42_ASP_TX_VAL_MASK (CS42L42_ASPTX_NOLRCK_MASK | \ + CS42L42_ASPTX_EARLY_MASK | \ + CS42L42_ASPTX_LATE_MASK | \ + CS42L42_ASPTX_SMERROR_MASK) + +#define CS42L42_CODEC_INT_MASK (CS42L42_PAGE_13 + 0x1B) +#define CS42L42_PDN_DONE_SHIFT 0 +#define CS42L42_PDN_DONE_MASK (1 << CS42L42_PDN_DONE_SHIFT) +#define CS42L42_HSDET_AUTO_DONE_SHIFT 1 +#define CS42L42_HSDET_AUTO_DONE_MASK (1 << CS42L42_HSDET_AUTO_DONE_SHIFT) +#define CS42L42_CODEC_VAL_MASK (CS42L42_PDN_DONE_MASK | \ + CS42L42_HSDET_AUTO_DONE_MASK) + +#define CS42L42_SRCPL_INT_MASK (CS42L42_PAGE_13 + 0x1C) +#define CS42L42_SRCPL_ADC_LK_SHIFT 0 +#define CS42L42_SRCPL_ADC_LK_MASK (1 << CS42L42_SRCPL_ADC_LK_SHIFT) +#define CS42L42_SRCPL_DAC_LK_SHIFT 2 +#define CS42L42_SRCPL_DAC_LK_MASK (1 << CS42L42_SRCPL_DAC_LK_SHIFT) +#define CS42L42_SRCPL_ADC_UNLK_SHIFT 5 +#define CS42L42_SRCPL_ADC_UNLK_MASK (1 << CS42L42_SRCPL_ADC_UNLK_SHIFT) +#define CS42L42_SRCPL_DAC_UNLK_SHIFT 6 +#define CS42L42_SRCPL_DAC_UNLK_MASK (1 << CS42L42_SRCPL_DAC_UNLK_SHIFT) +#define CS42L42_SRCPL_VAL_MASK (CS42L42_SRCPL_ADC_LK_MASK | \ + CS42L42_SRCPL_DAC_LK_MASK | \ + CS42L42_SRCPL_ADC_UNLK_MASK | \ + CS42L42_SRCPL_DAC_UNLK_MASK) + +#define CS42L42_VPMON_INT_MASK (CS42L42_PAGE_13 + 0x1E) +#define CS42L42_VPMON_SHIFT 0 +#define CS42L42_VPMON_MASK (1 << CS42L42_VPMON_SHIFT) +#define CS42L42_VPMON_VAL_MASK CS42L42_VPMON_MASK + +#define CS42L42_PLL_LOCK_INT_MASK (CS42L42_PAGE_13 + 0x1F) +#define CS42L42_PLL_LOCK_SHIFT 0 +#define CS42L42_PLL_LOCK_MASK (1 << CS42L42_PLL_LOCK_SHIFT) +#define CS42L42_PLL_LOCK_VAL_MASK CS42L42_PLL_LOCK_MASK + +#define CS42L42_TSRS_PLUG_INT_MASK (CS42L42_PAGE_13 + 0x20) +#define CS42L42_RS_PLUG_SHIFT 0 +#define CS42L42_RS_PLUG_MASK (1 << CS42L42_RS_PLUG_SHIFT) +#define CS42L42_RS_UNPLUG_SHIFT 1 +#define CS42L42_RS_UNPLUG_MASK (1 << CS42L42_RS_UNPLUG_SHIFT) +#define CS42L42_TS_PLUG_SHIFT 2 +#define CS42L42_TS_PLUG_MASK (1 << CS42L42_TS_PLUG_SHIFT) +#define CS42L42_TS_UNPLUG_SHIFT 3 +#define CS42L42_TS_UNPLUG_MASK (1 << CS42L42_TS_UNPLUG_SHIFT) +#define CS42L42_TSRS_PLUG_VAL_MASK (CS42L42_RS_PLUG_MASK | \ + CS42L42_RS_UNPLUG_MASK | \ + CS42L42_TS_PLUG_MASK | \ + CS42L42_TS_UNPLUG_MASK) +#define CS42L42_TS_PLUG 3 +#define CS42L42_TS_UNPLUG 0 +#define CS42L42_TS_TRANS 1 + +/* + * NOTE: PLL_START must be 0 while both ADC_PDN=1 and HP_PDN=1. + * Otherwise it will prevent FILT+ from charging properly. + */ +#define CS42L42_PLL_CTL1 (CS42L42_PAGE_15 + 0x01) +#define CS42L42_PLL_START_SHIFT 0 +#define CS42L42_PLL_START_MASK (1 << CS42L42_PLL_START_SHIFT) + +#define CS42L42_PLL_DIV_FRAC0 (CS42L42_PAGE_15 + 0x02) +#define CS42L42_PLL_DIV_FRAC_SHIFT 0 +#define CS42L42_PLL_DIV_FRAC_MASK (0xff << CS42L42_PLL_DIV_FRAC_SHIFT) + +#define CS42L42_PLL_DIV_FRAC1 (CS42L42_PAGE_15 + 0x03) +#define CS42L42_PLL_DIV_FRAC2 (CS42L42_PAGE_15 + 0x04) + +#define CS42L42_PLL_DIV_INT (CS42L42_PAGE_15 + 0x05) +#define CS42L42_PLL_DIV_INT_SHIFT 0 +#define CS42L42_PLL_DIV_INT_MASK (0xff << CS42L42_PLL_DIV_INT_SHIFT) + +#define CS42L42_PLL_CTL3 (CS42L42_PAGE_15 + 0x08) +#define CS42L42_PLL_DIVOUT_SHIFT 0 +#define CS42L42_PLL_DIVOUT_MASK (0xff << CS42L42_PLL_DIVOUT_SHIFT) + +#define CS42L42_PLL_CAL_RATIO (CS42L42_PAGE_15 + 0x0A) +#define CS42L42_PLL_CAL_RATIO_SHIFT 0 +#define CS42L42_PLL_CAL_RATIO_MASK (0xff << CS42L42_PLL_CAL_RATIO_SHIFT) + +#define CS42L42_PLL_CTL4 (CS42L42_PAGE_15 + 0x1B) +#define CS42L42_PLL_MODE_SHIFT 0 +#define CS42L42_PLL_MODE_MASK (3 << CS42L42_PLL_MODE_SHIFT) + +/* Page 0x19 HP Load Detect Registers */ +#define CS42L42_LOAD_DET_RCSTAT (CS42L42_PAGE_19 + 0x25) +#define CS42L42_RLA_STAT_SHIFT 0 +#define CS42L42_RLA_STAT_MASK (3 << CS42L42_RLA_STAT_SHIFT) +#define CS42L42_RLA_STAT_15_OHM 0 + +#define CS42L42_LOAD_DET_DONE (CS42L42_PAGE_19 + 0x26) +#define CS42L42_HPLOAD_DET_DONE_SHIFT 0 +#define CS42L42_HPLOAD_DET_DONE_MASK (1 << CS42L42_HPLOAD_DET_DONE_SHIFT) + +#define CS42L42_LOAD_DET_EN (CS42L42_PAGE_19 + 0x27) +#define CS42L42_HP_LD_EN_SHIFT 0 +#define CS42L42_HP_LD_EN_MASK (1 << CS42L42_HP_LD_EN_SHIFT) + +/* Page 0x1B Headset Interface Registers */ +#define CS42L42_HSBIAS_SC_AUTOCTL (CS42L42_PAGE_1B + 0x70) +#define CS42L42_HSBIAS_SENSE_TRIP_SHIFT 0 +#define CS42L42_HSBIAS_SENSE_TRIP_MASK (7 << CS42L42_HSBIAS_SENSE_TRIP_SHIFT) +#define CS42L42_TIP_SENSE_EN_SHIFT 5 +#define CS42L42_TIP_SENSE_EN_MASK (1 << CS42L42_TIP_SENSE_EN_SHIFT) +#define CS42L42_AUTO_HSBIAS_HIZ_SHIFT 6 +#define CS42L42_AUTO_HSBIAS_HIZ_MASK (1 << CS42L42_AUTO_HSBIAS_HIZ_SHIFT) +#define CS42L42_HSBIAS_SENSE_EN_SHIFT 7 +#define CS42L42_HSBIAS_SENSE_EN_MASK (1 << CS42L42_HSBIAS_SENSE_EN_SHIFT) + +#define CS42L42_WAKE_CTL (CS42L42_PAGE_1B + 0x71) +#define CS42L42_WAKEB_CLEAR_SHIFT 0 +#define CS42L42_WAKEB_CLEAR_MASK (1 << CS42L42_WAKEB_CLEAR_SHIFT) +#define CS42L42_WAKEB_MODE_SHIFT 5 +#define CS42L42_WAKEB_MODE_MASK (1 << CS42L42_WAKEB_MODE_SHIFT) +#define CS42L42_M_HP_WAKE_SHIFT 6 +#define CS42L42_M_HP_WAKE_MASK (1 << CS42L42_M_HP_WAKE_SHIFT) +#define CS42L42_M_MIC_WAKE_SHIFT 7 +#define CS42L42_M_MIC_WAKE_MASK (1 << CS42L42_M_MIC_WAKE_SHIFT) + +#define CS42L42_ADC_DISABLE_MUTE (CS42L42_PAGE_1B + 0x72) +#define CS42L42_ADC_DISABLE_S0_MUTE_SHIFT 7 +#define CS42L42_ADC_DISABLE_S0_MUTE_MASK (1 << CS42L42_ADC_DISABLE_S0_MUTE_SHIFT) + +#define CS42L42_TIPSENSE_CTL (CS42L42_PAGE_1B + 0x73) +#define CS42L42_TIP_SENSE_DEBOUNCE_SHIFT 0 +#define CS42L42_TIP_SENSE_DEBOUNCE_MASK (3 << CS42L42_TIP_SENSE_DEBOUNCE_SHIFT) +#define CS42L42_TIP_SENSE_INV_SHIFT 5 +#define CS42L42_TIP_SENSE_INV_MASK (1 << CS42L42_TIP_SENSE_INV_SHIFT) +#define CS42L42_TIP_SENSE_CTRL_SHIFT 6 +#define CS42L42_TIP_SENSE_CTRL_MASK (3 << CS42L42_TIP_SENSE_CTRL_SHIFT) + +/* + * NOTE: DETECT_MODE must be 0 while both ADC_PDN=1 and HP_PDN=1. + * Otherwise it will prevent FILT+ from charging properly. + */ +#define CS42L42_MISC_DET_CTL (CS42L42_PAGE_1B + 0x74) +#define CS42L42_PDN_MIC_LVL_DET_SHIFT 0 +#define CS42L42_PDN_MIC_LVL_DET_MASK (1 << CS42L42_PDN_MIC_LVL_DET_SHIFT) +#define CS42L42_HSBIAS_CTL_SHIFT 1 +#define CS42L42_HSBIAS_CTL_MASK (3 << CS42L42_HSBIAS_CTL_SHIFT) +#define CS42L42_DETECT_MODE_SHIFT 3 +#define CS42L42_DETECT_MODE_MASK (3 << CS42L42_DETECT_MODE_SHIFT) + +#define CS42L42_MIC_DET_CTL1 (CS42L42_PAGE_1B + 0x75) +#define CS42L42_HS_DET_LEVEL_SHIFT 0 +#define CS42L42_HS_DET_LEVEL_MASK (0x3F << CS42L42_HS_DET_LEVEL_SHIFT) +#define CS42L42_EVENT_STAT_SEL_SHIFT 6 +#define CS42L42_EVENT_STAT_SEL_MASK (1 << CS42L42_EVENT_STAT_SEL_SHIFT) +#define CS42L42_LATCH_TO_VP_SHIFT 7 +#define CS42L42_LATCH_TO_VP_MASK (1 << CS42L42_LATCH_TO_VP_SHIFT) + +#define CS42L42_MIC_DET_CTL2 (CS42L42_PAGE_1B + 0x76) +#define CS42L42_DEBOUNCE_TIME_SHIFT 5 +#define CS42L42_DEBOUNCE_TIME_MASK (0x07 << CS42L42_DEBOUNCE_TIME_SHIFT) + +#define CS42L42_DET_STATUS1 (CS42L42_PAGE_1B + 0x77) +#define CS42L42_HSBIAS_HIZ_MODE_SHIFT 6 +#define CS42L42_HSBIAS_HIZ_MODE_MASK (1 << CS42L42_HSBIAS_HIZ_MODE_SHIFT) +#define CS42L42_TIP_SENSE_SHIFT 7 +#define CS42L42_TIP_SENSE_MASK (1 << CS42L42_TIP_SENSE_SHIFT) + +#define CS42L42_DET_STATUS2 (CS42L42_PAGE_1B + 0x78) +#define CS42L42_SHORT_TRUE_SHIFT 0 +#define CS42L42_SHORT_TRUE_MASK (1 << CS42L42_SHORT_TRUE_SHIFT) +#define CS42L42_HS_TRUE_SHIFT 1 +#define CS42L42_HS_TRUE_MASK (1 << CS42L42_HS_TRUE_SHIFT) + +#define CS42L42_DET_INT1_MASK (CS42L42_PAGE_1B + 0x79) +#define CS42L42_TIP_SENSE_UNPLUG_SHIFT 5 +#define CS42L42_TIP_SENSE_UNPLUG_MASK (1 << CS42L42_TIP_SENSE_UNPLUG_SHIFT) +#define CS42L42_TIP_SENSE_PLUG_SHIFT 6 +#define CS42L42_TIP_SENSE_PLUG_MASK (1 << CS42L42_TIP_SENSE_PLUG_SHIFT) +#define CS42L42_HSBIAS_SENSE_SHIFT 7 +#define CS42L42_HSBIAS_SENSE_MASK (1 << CS42L42_HSBIAS_SENSE_SHIFT) +#define CS42L42_DET_INT_VAL1_MASK (CS42L42_TIP_SENSE_UNPLUG_MASK | \ + CS42L42_TIP_SENSE_PLUG_MASK | \ + CS42L42_HSBIAS_SENSE_MASK) + +#define CS42L42_DET_INT2_MASK (CS42L42_PAGE_1B + 0x7A) +#define CS42L42_M_SHORT_DET_SHIFT 0 +#define CS42L42_M_SHORT_DET_MASK (1 << CS42L42_M_SHORT_DET_SHIFT) +#define CS42L42_M_SHORT_RLS_SHIFT 1 +#define CS42L42_M_SHORT_RLS_MASK (1 << CS42L42_M_SHORT_RLS_SHIFT) +#define CS42L42_M_HSBIAS_HIZ_SHIFT 2 +#define CS42L42_M_HSBIAS_HIZ_MASK (1 << CS42L42_M_HSBIAS_HIZ_SHIFT) +#define CS42L42_M_DETECT_FT_SHIFT 6 +#define CS42L42_M_DETECT_FT_MASK (1 << CS42L42_M_DETECT_FT_SHIFT) +#define CS42L42_M_DETECT_TF_SHIFT 7 +#define CS42L42_M_DETECT_TF_MASK (1 << CS42L42_M_DETECT_TF_SHIFT) +#define CS42L42_DET_INT_VAL2_MASK (CS42L42_M_SHORT_DET_MASK | \ + CS42L42_M_SHORT_RLS_MASK | \ + CS42L42_M_HSBIAS_HIZ_MASK | \ + CS42L42_M_DETECT_FT_MASK | \ + CS42L42_M_DETECT_TF_MASK) + +/* Page 0x1C Headset Bias Registers */ +#define CS42L42_HS_BIAS_CTL (CS42L42_PAGE_1C + 0x03) +#define CS42L42_HSBIAS_RAMP_SHIFT 0 +#define CS42L42_HSBIAS_RAMP_MASK (3 << CS42L42_HSBIAS_RAMP_SHIFT) +#define CS42L42_HSBIAS_PD_SHIFT 4 +#define CS42L42_HSBIAS_PD_MASK (1 << CS42L42_HSBIAS_PD_SHIFT) +#define CS42L42_HSBIAS_CAPLESS_SHIFT 7 +#define CS42L42_HSBIAS_CAPLESS_MASK (1 << CS42L42_HSBIAS_CAPLESS_SHIFT) + +/* Page 0x1D ADC Registers */ +#define CS42L42_ADC_CTL (CS42L42_PAGE_1D + 0x01) +#define CS42L42_ADC_NOTCH_DIS_SHIFT 5 +#define CS42L42_ADC_FORCE_WEAK_VCM_SHIFT 4 +#define CS42L42_ADC_INV_SHIFT 2 +#define CS42L42_ADC_DIG_BOOST_SHIFT 0 + +#define CS42L42_ADC_VOLUME (CS42L42_PAGE_1D + 0x03) +#define CS42L42_ADC_VOL_SHIFT 0 + +#define CS42L42_ADC_WNF_HPF_CTL (CS42L42_PAGE_1D + 0x04) +#define CS42L42_ADC_WNF_CF_SHIFT 4 +#define CS42L42_ADC_WNF_EN_SHIFT 3 +#define CS42L42_ADC_HPF_CF_SHIFT 1 +#define CS42L42_ADC_HPF_EN_SHIFT 0 + +/* Page 0x1F DAC Registers */ +#define CS42L42_DAC_CTL1 (CS42L42_PAGE_1F + 0x01) +#define CS42L42_DACB_INV_SHIFT 1 +#define CS42L42_DACA_INV_SHIFT 0 + +#define CS42L42_DAC_CTL2 (CS42L42_PAGE_1F + 0x06) +#define CS42L42_HPOUT_PULLDOWN_SHIFT 4 +#define CS42L42_HPOUT_PULLDOWN_MASK (15 << CS42L42_HPOUT_PULLDOWN_SHIFT) +#define CS42L42_HPOUT_LOAD_SHIFT 3 +#define CS42L42_HPOUT_LOAD_MASK (1 << CS42L42_HPOUT_LOAD_SHIFT) +#define CS42L42_HPOUT_CLAMP_SHIFT 2 +#define CS42L42_HPOUT_CLAMP_MASK (1 << CS42L42_HPOUT_CLAMP_SHIFT) +#define CS42L42_DAC_HPF_EN_SHIFT 1 +#define CS42L42_DAC_HPF_EN_MASK (1 << CS42L42_DAC_HPF_EN_SHIFT) +#define CS42L42_DAC_MON_EN_SHIFT 0 +#define CS42L42_DAC_MON_EN_MASK (1 << CS42L42_DAC_MON_EN_SHIFT) + +/* Page 0x20 HP CTL Registers */ +#define CS42L42_HP_CTL (CS42L42_PAGE_20 + 0x01) +#define CS42L42_HP_ANA_BMUTE_SHIFT 3 +#define CS42L42_HP_ANA_BMUTE_MASK (1 << CS42L42_HP_ANA_BMUTE_SHIFT) +#define CS42L42_HP_ANA_AMUTE_SHIFT 2 +#define CS42L42_HP_ANA_AMUTE_MASK (1 << CS42L42_HP_ANA_AMUTE_SHIFT) +#define CS42L42_HP_FULL_SCALE_VOL_SHIFT 1 +#define CS42L42_HP_FULL_SCALE_VOL_MASK (1 << CS42L42_HP_FULL_SCALE_VOL_SHIFT) + +/* Page 0x21 Class H Registers */ +#define CS42L42_CLASSH_CTL (CS42L42_PAGE_21 + 0x01) + +/* Page 0x23 Mixer Volume Registers */ +#define CS42L42_MIXER_CHA_VOL (CS42L42_PAGE_23 + 0x01) +#define CS42L42_MIXER_ADC_VOL (CS42L42_PAGE_23 + 0x02) + +#define CS42L42_MIXER_CHB_VOL (CS42L42_PAGE_23 + 0x03) +#define CS42L42_MIXER_CH_VOL_SHIFT 0 +#define CS42L42_MIXER_CH_VOL_MASK (0x3f << CS42L42_MIXER_CH_VOL_SHIFT) + +/* Page 0x24 EQ Registers */ +#define CS42L42_EQ_COEF_IN0 (CS42L42_PAGE_24 + 0x01) +#define CS42L42_EQ_COEF_IN1 (CS42L42_PAGE_24 + 0x02) +#define CS42L42_EQ_COEF_IN2 (CS42L42_PAGE_24 + 0x03) +#define CS42L42_EQ_COEF_IN3 (CS42L42_PAGE_24 + 0x04) +#define CS42L42_EQ_COEF_RW (CS42L42_PAGE_24 + 0x06) +#define CS42L42_EQ_COEF_OUT0 (CS42L42_PAGE_24 + 0x07) +#define CS42L42_EQ_COEF_OUT1 (CS42L42_PAGE_24 + 0x08) +#define CS42L42_EQ_COEF_OUT2 (CS42L42_PAGE_24 + 0x09) +#define CS42L42_EQ_COEF_OUT3 (CS42L42_PAGE_24 + 0x0A) +#define CS42L42_EQ_INIT_STAT (CS42L42_PAGE_24 + 0x0B) +#define CS42L42_EQ_START_FILT (CS42L42_PAGE_24 + 0x0C) +#define CS42L42_EQ_MUTE_CTL (CS42L42_PAGE_24 + 0x0E) + +/* Page 0x25 Audio Port Registers */ +#define CS42L42_SP_RX_CH_SEL (CS42L42_PAGE_25 + 0x01) +#define CS42L42_SP_RX_CHB_SEL_SHIFT 2 +#define CS42L42_SP_RX_CHB_SEL_MASK (3 << CS42L42_SP_RX_CHB_SEL_SHIFT) + +#define CS42L42_SP_RX_ISOC_CTL (CS42L42_PAGE_25 + 0x02) +#define CS42L42_SP_RX_RSYNC_SHIFT 6 +#define CS42L42_SP_RX_RSYNC_MASK (1 << CS42L42_SP_RX_RSYNC_SHIFT) +#define CS42L42_SP_RX_NSB_POS_SHIFT 3 +#define CS42L42_SP_RX_NSB_POS_MASK (7 << CS42L42_SP_RX_NSB_POS_SHIFT) +#define CS42L42_SP_RX_NFS_NSBB_SHIFT 2 +#define CS42L42_SP_RX_NFS_NSBB_MASK (1 << CS42L42_SP_RX_NFS_NSBB_SHIFT) +#define CS42L42_SP_RX_ISOC_MODE_SHIFT 0 +#define CS42L42_SP_RX_ISOC_MODE_MASK (3 << CS42L42_SP_RX_ISOC_MODE_SHIFT) + +#define CS42L42_SP_RX_FS (CS42L42_PAGE_25 + 0x03) +#define CS42l42_SPDIF_CH_SEL (CS42L42_PAGE_25 + 0x04) +#define CS42L42_SP_TX_ISOC_CTL (CS42L42_PAGE_25 + 0x05) +#define CS42L42_SP_TX_FS (CS42L42_PAGE_25 + 0x06) +#define CS42L42_SPDIF_SW_CTL1 (CS42L42_PAGE_25 + 0x07) + +/* Page 0x26 SRC Registers */ +#define CS42L42_SRC_SDIN_FS (CS42L42_PAGE_26 + 0x01) +#define CS42L42_SRC_SDIN_FS_SHIFT 0 +#define CS42L42_SRC_SDIN_FS_MASK (0x1f << CS42L42_SRC_SDIN_FS_SHIFT) + +#define CS42L42_SRC_SDOUT_FS (CS42L42_PAGE_26 + 0x09) + +/* Page 0x28 S/PDIF Registers */ +#define CS42L42_SPDIF_CTL1 (CS42L42_PAGE_28 + 0x01) +#define CS42L42_SPDIF_CTL2 (CS42L42_PAGE_28 + 0x02) +#define CS42L42_SPDIF_CTL3 (CS42L42_PAGE_28 + 0x03) +#define CS42L42_SPDIF_CTL4 (CS42L42_PAGE_28 + 0x04) + +/* Page 0x29 Serial Port TX Registers */ +#define CS42L42_ASP_TX_SZ_EN (CS42L42_PAGE_29 + 0x01) +#define CS42L42_ASP_TX_EN_SHIFT 0 +#define CS42L42_ASP_TX_CH_EN (CS42L42_PAGE_29 + 0x02) +#define CS42L42_ASP_TX0_CH2_SHIFT 1 +#define CS42L42_ASP_TX0_CH1_SHIFT 0 + +#define CS42L42_ASP_TX_CH_AP_RES (CS42L42_PAGE_29 + 0x03) +#define CS42L42_ASP_TX_CH1_AP_SHIFT 7 +#define CS42L42_ASP_TX_CH1_AP_MASK (1 << CS42L42_ASP_TX_CH1_AP_SHIFT) +#define CS42L42_ASP_TX_CH2_AP_SHIFT 6 +#define CS42L42_ASP_TX_CH2_AP_MASK (1 << CS42L42_ASP_TX_CH2_AP_SHIFT) +#define CS42L42_ASP_TX_CH2_RES_SHIFT 2 +#define CS42L42_ASP_TX_CH2_RES_MASK (3 << CS42L42_ASP_TX_CH2_RES_SHIFT) +#define CS42L42_ASP_TX_CH1_RES_SHIFT 0 +#define CS42L42_ASP_TX_CH1_RES_MASK (3 << CS42L42_ASP_TX_CH1_RES_SHIFT) +#define CS42L42_ASP_TX_CH1_BIT_MSB (CS42L42_PAGE_29 + 0x04) +#define CS42L42_ASP_TX_CH1_BIT_LSB (CS42L42_PAGE_29 + 0x05) +#define CS42L42_ASP_TX_HIZ_DLY_CFG (CS42L42_PAGE_29 + 0x06) +#define CS42L42_ASP_TX_CH2_BIT_MSB (CS42L42_PAGE_29 + 0x0A) +#define CS42L42_ASP_TX_CH2_BIT_LSB (CS42L42_PAGE_29 + 0x0B) + +/* Page 0x2A Serial Port RX Registers */ +#define CS42L42_ASP_RX_DAI0_EN (CS42L42_PAGE_2A + 0x01) +#define CS42L42_ASP_RX0_CH_EN_SHIFT 2 +#define CS42L42_ASP_RX0_CH_EN_MASK (0xf << CS42L42_ASP_RX0_CH_EN_SHIFT) +#define CS42L42_ASP_RX0_CH1_SHIFT 2 +#define CS42L42_ASP_RX0_CH2_SHIFT 3 +#define CS42L42_ASP_RX0_CH3_SHIFT 4 +#define CS42L42_ASP_RX0_CH4_SHIFT 5 + +#define CS42L42_ASP_RX_DAI0_CH1_AP_RES (CS42L42_PAGE_2A + 0x02) +#define CS42L42_ASP_RX_DAI0_CH1_BIT_MSB (CS42L42_PAGE_2A + 0x03) +#define CS42L42_ASP_RX_DAI0_CH1_BIT_LSB (CS42L42_PAGE_2A + 0x04) +#define CS42L42_ASP_RX_DAI0_CH2_AP_RES (CS42L42_PAGE_2A + 0x05) +#define CS42L42_ASP_RX_DAI0_CH2_BIT_MSB (CS42L42_PAGE_2A + 0x06) +#define CS42L42_ASP_RX_DAI0_CH2_BIT_LSB (CS42L42_PAGE_2A + 0x07) +#define CS42L42_ASP_RX_DAI0_CH3_AP_RES (CS42L42_PAGE_2A + 0x08) +#define CS42L42_ASP_RX_DAI0_CH3_BIT_MSB (CS42L42_PAGE_2A + 0x09) +#define CS42L42_ASP_RX_DAI0_CH3_BIT_LSB (CS42L42_PAGE_2A + 0x0A) +#define CS42L42_ASP_RX_DAI0_CH4_AP_RES (CS42L42_PAGE_2A + 0x0B) +#define CS42L42_ASP_RX_DAI0_CH4_BIT_MSB (CS42L42_PAGE_2A + 0x0C) +#define CS42L42_ASP_RX_DAI0_CH4_BIT_LSB (CS42L42_PAGE_2A + 0x0D) +#define CS42L42_ASP_RX_DAI1_CH1_AP_RES (CS42L42_PAGE_2A + 0x0E) +#define CS42L42_ASP_RX_DAI1_CH1_BIT_MSB (CS42L42_PAGE_2A + 0x0F) +#define CS42L42_ASP_RX_DAI1_CH1_BIT_LSB (CS42L42_PAGE_2A + 0x10) +#define CS42L42_ASP_RX_DAI1_CH2_AP_RES (CS42L42_PAGE_2A + 0x11) +#define CS42L42_ASP_RX_DAI1_CH2_BIT_MSB (CS42L42_PAGE_2A + 0x12) +#define CS42L42_ASP_RX_DAI1_CH2_BIT_LSB (CS42L42_PAGE_2A + 0x13) + +#define CS42L42_ASP_RX_CH_AP_SHIFT 6 +#define CS42L42_ASP_RX_CH_AP_MASK (1 << CS42L42_ASP_RX_CH_AP_SHIFT) +#define CS42L42_ASP_RX_CH_AP_LOW 0 +#define CS42L42_ASP_RX_CH_AP_HI 1 +#define CS42L42_ASP_RX_CH_RES_SHIFT 0 +#define CS42L42_ASP_RX_CH_RES_MASK (3 << CS42L42_ASP_RX_CH_RES_SHIFT) +#define CS42L42_ASP_RX_CH_RES_32 3 +#define CS42L42_ASP_RX_CH_RES_16 1 +#define CS42L42_ASP_RX_CH_BIT_ST_SHIFT 0 +#define CS42L42_ASP_RX_CH_BIT_ST_MASK (0xff << CS42L42_ASP_RX_CH_BIT_ST_SHIFT) + +/* Page 0x30 ID Registers */ +#define CS42L42_SUB_REVID (CS42L42_PAGE_30 + 0x14) +#define CS42L42_MAX_REGISTER (CS42L42_PAGE_30 + 0x14) + +/* Defines for fracturing values spread across multiple registers */ +#define CS42L42_FRAC0_VAL(val) ((val) & 0x0000ff) +#define CS42L42_FRAC1_VAL(val) (((val) & 0x00ff00) >> 8) +#define CS42L42_FRAC2_VAL(val) (((val) & 0xff0000) >> 16) + +#define CS42L42_NUM_SUPPLIES 5 +#define CS42L42_BOOT_TIME_US 3000 +#define CS42L42_PLL_DIVOUT_TIME_US 800 +#define CS42L42_CLOCK_SWITCH_DELAY_US 150 +#define CS42L42_PLL_LOCK_POLL_US 250 +#define CS42L42_PLL_LOCK_TIMEOUT_US 1250 +#define CS42L42_HP_ADC_EN_TIME_US 20000 +#define CS42L42_PDN_DONE_POLL_US 1000 +#define CS42L42_PDN_DONE_TIMEOUT_US 200000 +#define CS42L42_PDN_DONE_TIME_MS 100 +#define CS42L42_FILT_DISCHARGE_TIME_MS 46 + +#endif /* __CS42L42_H */ diff --git a/include/sound/sof/ext_manifest4.h b/include/sound/sof/ext_manifest4.h new file mode 100644 index 0000000000..ec97edcbbf --- /dev/null +++ b/include/sound/sof/ext_manifest4.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * Copyright(c) 2022 Intel Corporation. All rights reserved. + */ + +/* + * Extended manifest is a place to store metadata about firmware, known during + * compilation time - for example firmware version or used compiler. + * Given information are read on host side before firmware startup. + * This part of output binary is not signed. + */ + +#ifndef __SOF_FIRMWARE_EXT_MANIFEST4_H__ +#define __SOF_FIRMWARE_EXT_MANIFEST4_H__ + +#include + +/* In ASCII $AE1 */ +#define SOF_EXT_MAN4_MAGIC_NUMBER 0x31454124 + +#define MAX_MODULE_NAME_LEN 8 +#define MAX_FW_BINARY_NAME 8 +#define DEFAULT_HASH_SHA256_LEN 32 +#define SOF_MAN4_FW_HDR_OFFSET 0x2000 +#define SOF_MAN4_FW_HDR_OFFSET_CAVS_1_5 0x284 + +/********************************************************************* + * extended manifest (struct sof_ext_manifest4_hdr) + *------------------- + * css_manifest hdr + *------------------- + * offset reserved for future + *------------------- + * fw_hdr (struct sof_man4_fw_binary_header) + *------------------- + * module_entry[0] (struct sof_man4_module) + *------------------- + * module_entry[1] + *------------------- + * ... + *------------------- + * module_entry[n] + *------------------- + * module_config[0] (struct sof_man4_module_config) + *------------------- + * module_config[1] + *------------------- + * ... + *------------------- + * module_config[m] + *------------------- + * FW content + *------------------- + *********************************************************************/ + +struct sof_ext_manifest4_hdr { + uint32_t id; + uint32_t len; /* length of extension manifest */ + uint16_t version_major; /* header version */ + uint16_t version_minor; + uint32_t num_module_entries; +} __packed; + +struct sof_man4_fw_binary_header { + /* This part must be unchanged to be backward compatible with SPT-LP ROM */ + uint32_t id; + uint32_t len; /* sizeof(sof_man4_fw_binary_header) in bytes */ + uint8_t name[MAX_FW_BINARY_NAME]; + uint32_t preload_page_count; /* number of pages of preloaded image */ + uint32_t fw_image_flags; + uint32_t feature_mask; + uint16_t major_version; /* Firmware version */ + uint16_t minor_version; + uint16_t hotfix_version; + uint16_t build_version; + uint32_t num_module_entries; + + /* This part may change to contain any additional data for BaseFw that is skipped by ROM */ + uint32_t hw_buf_base_addr; + uint32_t hw_buf_length; + uint32_t load_offset; /* This value is used by ROM */ +} __packed; + +struct sof_man4_segment_desc { + uint32_t flags; + uint32_t v_base_addr; + uint32_t file_offset; +} __packed; + +struct sof_man4_module { + uint32_t id; + uint8_t name[MAX_MODULE_NAME_LEN]; + guid_t uuid; + uint32_t type; + uint8_t hash[DEFAULT_HASH_SHA256_LEN]; + uint32_t entry_point; + uint16_t cfg_offset; + uint16_t cfg_count; + uint32_t affinity_mask; + uint16_t instance_max_count; + uint16_t instance_stack_size; + struct sof_man4_segment_desc segments[3]; +} __packed; + +struct sof_man4_module_config { + uint32_t par[4]; /* module parameters */ + uint32_t is_bytes; /* actual size of instance .bss (bytes) */ + uint32_t cps; /* cycles per second */ + uint32_t ibs; /* input buffer size (bytes) */ + uint32_t obs; /* output buffer size (bytes) */ + uint32_t module_flags; /* flags, reserved for future use */ + uint32_t cpc; /* cycles per single run */ + uint32_t obls; /* output block size, reserved for future use */ +} __packed; + +#endif /* __SOF_FIRMWARE_EXT_MANIFEST4_H__ */ diff --git a/include/sound/sof/ipc4/header.h b/include/sound/sof/ipc4/header.h new file mode 100644 index 0000000000..a795deacc2 --- /dev/null +++ b/include/sound/sof/ipc4/header.h @@ -0,0 +1,468 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * Copyright(c) 2022 Intel Corporation. All rights reserved. + */ + +#ifndef __INCLUDE_SOUND_SOF_IPC4_HEADER_H__ +#define __INCLUDE_SOUND_SOF_IPC4_HEADER_H__ + +#include +#include + +/* maximum message size for mailbox Tx/Rx */ +#define SOF_IPC4_MSG_MAX_SIZE 4096 + +/** \addtogroup sof_uapi uAPI + * SOF uAPI specification. + * @{ + */ + +/** + * struct sof_ipc4_msg - Placeholder of an IPC4 message + * @header_u64: IPC4 header as single u64 number + * @primary: Primary, mandatory part of the header + * @extension: Extended part of the header, if not used it should be + * set to 0 + * @data_size: Size of data in bytes pointed by @data_ptr + * @data_ptr: Pointer to the optional payload of a message + */ +struct sof_ipc4_msg { + union { + u64 header_u64; + struct { + u32 primary; + u32 extension; + }; + }; + + size_t data_size; + void *data_ptr; +}; + +/** + * struct sof_ipc4_tuple - Generic type/ID and parameter tuple + * @type: type/ID + * @size: size of the @value array in bytes + * @value: value for the given type + */ +struct sof_ipc4_tuple { + uint32_t type; + uint32_t size; + uint32_t value[]; +} __packed; + +/* + * IPC4 messages have two 32 bit identifier made up as follows :- + * + * header - msg type, msg id, msg direction ... + * extension - extra params such as msg data size in mailbox + * + * These are sent at the start of the IPC message in the mailbox. Messages + * should not be sent in the doorbell (special exceptions for firmware). + */ + +/* + * IPC4 primary header bit allocation for messages + * bit 0-23: message type specific + * bit 24-28: type: enum sof_ipc4_global_msg if target is SOF_IPC4_FW_GEN_MSG + * enum sof_ipc4_module_type if target is SOF_IPC4_MODULE_MSG + * bit 29: response - sof_ipc4_msg_dir + * bit 30: target - enum sof_ipc4_msg_target + * bit 31: reserved, unused + */ + +/* Value of target field - must fit into 1 bit */ +enum sof_ipc4_msg_target { + /* Global FW message */ + SOF_IPC4_FW_GEN_MSG, + + /* Module message */ + SOF_IPC4_MODULE_MSG +}; + +/* Value of type field - must fit into 5 bits */ +enum sof_ipc4_global_msg { + SOF_IPC4_GLB_BOOT_CONFIG, + SOF_IPC4_GLB_ROM_CONTROL, + SOF_IPC4_GLB_IPCGATEWAY_CMD, + + /* 3 .. 12: RESERVED - do not use */ + + SOF_IPC4_GLB_PERF_MEASUREMENTS_CMD = 13, + SOF_IPC4_GLB_CHAIN_DMA, + + SOF_IPC4_GLB_LOAD_MULTIPLE_MODULES, + SOF_IPC4_GLB_UNLOAD_MULTIPLE_MODULES, + + /* pipeline settings */ + SOF_IPC4_GLB_CREATE_PIPELINE, + SOF_IPC4_GLB_DELETE_PIPELINE, + SOF_IPC4_GLB_SET_PIPELINE_STATE, + SOF_IPC4_GLB_GET_PIPELINE_STATE, + SOF_IPC4_GLB_GET_PIPELINE_CONTEXT_SIZE, + SOF_IPC4_GLB_SAVE_PIPELINE, + SOF_IPC4_GLB_RESTORE_PIPELINE, + + /* Loads library (using Code Load or HD/A Host Output DMA) */ + SOF_IPC4_GLB_LOAD_LIBRARY, + + /* 25: RESERVED - do not use */ + + SOF_IPC4_GLB_INTERNAL_MESSAGE = 26, + + /* Notification (FW to SW driver) */ + SOF_IPC4_GLB_NOTIFICATION, + + /* 28 .. 31: RESERVED - do not use */ + + SOF_IPC4_GLB_TYPE_LAST, +}; + +/* Value of response field - must fit into 1 bit */ +enum sof_ipc4_msg_dir { + SOF_IPC4_MSG_REQUEST, + SOF_IPC4_MSG_REPLY, +}; + +enum sof_ipc4_pipeline_state { + SOF_IPC4_PIPE_INVALID_STATE, + SOF_IPC4_PIPE_UNINITIALIZED, + SOF_IPC4_PIPE_RESET, + SOF_IPC4_PIPE_PAUSED, + SOF_IPC4_PIPE_RUNNING, + SOF_IPC4_PIPE_EOS +}; + +/* Generic message fields (bit 24-30) */ + +/* encoded to header's msg_tgt field */ +#define SOF_IPC4_MSG_TARGET_SHIFT 30 +#define SOF_IPC4_MSG_TARGET_MASK BIT(30) +#define SOF_IPC4_MSG_TARGET(x) ((x) << SOF_IPC4_MSG_TARGET_SHIFT) +#define SOF_IPC4_MSG_IS_MODULE_MSG(x) ((x) & SOF_IPC4_MSG_TARGET_MASK ? 1 : 0) + +/* encoded to header's rsp field */ +#define SOF_IPC4_MSG_DIR_SHIFT 29 +#define SOF_IPC4_MSG_DIR_MASK BIT(29) +#define SOF_IPC4_MSG_DIR(x) ((x) << SOF_IPC4_MSG_DIR_SHIFT) + +/* encoded to header's type field */ +#define SOF_IPC4_MSG_TYPE_SHIFT 24 +#define SOF_IPC4_MSG_TYPE_MASK GENMASK(28, 24) +#define SOF_IPC4_MSG_TYPE_SET(x) (((x) << SOF_IPC4_MSG_TYPE_SHIFT) & \ + SOF_IPC4_MSG_TYPE_MASK) +#define SOF_IPC4_MSG_TYPE_GET(x) (((x) & SOF_IPC4_MSG_TYPE_MASK) >> \ + SOF_IPC4_MSG_TYPE_SHIFT) + +/* Global message type specific field definitions */ + +/* pipeline creation ipc msg */ +#define SOF_IPC4_GLB_PIPE_INSTANCE_SHIFT 16 +#define SOF_IPC4_GLB_PIPE_INSTANCE_MASK GENMASK(23, 16) +#define SOF_IPC4_GLB_PIPE_INSTANCE_ID(x) ((x) << SOF_IPC4_GLB_PIPE_INSTANCE_SHIFT) + +#define SOF_IPC4_GLB_PIPE_PRIORITY_SHIFT 11 +#define SOF_IPC4_GLB_PIPE_PRIORITY_MASK GENMASK(15, 11) +#define SOF_IPC4_GLB_PIPE_PRIORITY(x) ((x) << SOF_IPC4_GLB_PIPE_PRIORITY_SHIFT) + +#define SOF_IPC4_GLB_PIPE_MEM_SIZE_SHIFT 0 +#define SOF_IPC4_GLB_PIPE_MEM_SIZE_MASK GENMASK(10, 0) +#define SOF_IPC4_GLB_PIPE_MEM_SIZE(x) ((x) << SOF_IPC4_GLB_PIPE_MEM_SIZE_SHIFT) + +#define SOF_IPC4_GLB_PIPE_EXT_LP_SHIFT 0 +#define SOF_IPC4_GLB_PIPE_EXT_LP_MASK BIT(0) +#define SOF_IPC4_GLB_PIPE_EXT_LP(x) ((x) << SOF_IPC4_GLB_PIPE_EXT_LP_SHIFT) + +/* pipeline set state ipc msg */ +#define SOF_IPC4_GLB_PIPE_STATE_ID_SHIFT 16 +#define SOF_IPC4_GLB_PIPE_STATE_ID_MASK GENMASK(23, 16) +#define SOF_IPC4_GLB_PIPE_STATE_ID(x) ((x) << SOF_IPC4_GLB_PIPE_STATE_ID_SHIFT) + +#define SOF_IPC4_GLB_PIPE_STATE_SHIFT 0 +#define SOF_IPC4_GLB_PIPE_STATE_MASK GENMASK(15, 0) +#define SOF_IPC4_GLB_PIPE_STATE(x) ((x) << SOF_IPC4_GLB_PIPE_STATE_SHIFT) + +enum sof_ipc4_channel_config { + /* one channel only. */ + SOF_IPC4_CHANNEL_CONFIG_MONO, + /* L & R. */ + SOF_IPC4_CHANNEL_CONFIG_STEREO, + /* L, R & LFE; PCM only. */ + SOF_IPC4_CHANNEL_CONFIG_2_POINT_1, + /* L, C & R; MP3 & AAC only. */ + SOF_IPC4_CHANNEL_CONFIG_3_POINT_0, + /* L, C, R & LFE; PCM only. */ + SOF_IPC4_CHANNEL_CONFIG_3_POINT_1, + /* L, R, Ls & Rs; PCM only. */ + SOF_IPC4_CHANNEL_CONFIG_QUATRO, + /* L, C, R & Cs; MP3 & AAC only. */ + SOF_IPC4_CHANNEL_CONFIG_4_POINT_0, + /* L, C, R, Ls & Rs. */ + SOF_IPC4_CHANNEL_CONFIG_5_POINT_0, + /* L, C, R, Ls, Rs & LFE. */ + SOF_IPC4_CHANNEL_CONFIG_5_POINT_1, + /* one channel replicated in two. */ + SOF_IPC4_CHANNEL_CONFIG_DUAL_MONO, + /* Stereo (L,R) in 4 slots, 1st stream: [ L, R, -, - ] */ + SOF_IPC4_CHANNEL_CONFIG_I2S_DUAL_STEREO_0, + /* Stereo (L,R) in 4 slots, 2nd stream: [ -, -, L, R ] */ + SOF_IPC4_CHANNEL_CONFIG_I2S_DUAL_STEREO_1, + /* L, C, R, Ls, Rs & LFE., LS, RS */ + SOF_IPC4_CHANNEL_CONFIG_7_POINT_1, +}; + +enum sof_ipc4_interleaved_style { + SOF_IPC4_CHANNELS_INTERLEAVED, + SOF_IPC4_CHANNELS_NONINTERLEAVED, +}; + +enum sof_ipc4_sample_type { + SOF_IPC4_MSB_INTEGER, /* integer with Most Significant Byte first */ + SOF_IPC4_LSB_INTEGER, /* integer with Least Significant Byte first */ +}; + +struct sof_ipc4_audio_format { + uint32_t sampling_frequency; + uint32_t bit_depth; + uint32_t ch_map; + uint32_t ch_cfg; /* sof_ipc4_channel_config */ + uint32_t interleaving_style; + uint32_t fmt_cfg; /* channels_count valid_bit_depth s_type */ +} __packed __aligned(4); + +#define SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT_SHIFT 0 +#define SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT_MASK GENMASK(7, 0) +#define SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(x) \ + ((x) & SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT_MASK) +#define SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH_SHIFT 8 +#define SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH_MASK GENMASK(15, 8) +#define SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(x) \ + (((x) & SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH_MASK) >> \ + SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH_SHIFT) +#define SOF_IPC4_AUDIO_FORMAT_CFG_SAMPLE_TYPE_SHIFT 16 +#define SOF_IPC4_AUDIO_FORMAT_CFG_SAMPLE_TYPE_MASK GENMASK(23, 16) +#define SOF_IPC4_AUDIO_FORMAT_CFG_SAMPLE_TYPE(x) \ + (((x) & SOF_IPC4_AUDIO_FORMAT_CFG_SAMPLE_TYPE_MASK) >> \ + SOF_IPC4_AUDIO_FORMAT_CFG_SAMPLE_TYPE_SHIFT) + +/* Module message type specific field definitions */ + +enum sof_ipc4_module_type { + SOF_IPC4_MOD_INIT_INSTANCE, + SOF_IPC4_MOD_CONFIG_GET, + SOF_IPC4_MOD_CONFIG_SET, + SOF_IPC4_MOD_LARGE_CONFIG_GET, + SOF_IPC4_MOD_LARGE_CONFIG_SET, + SOF_IPC4_MOD_BIND, + SOF_IPC4_MOD_UNBIND, + SOF_IPC4_MOD_SET_DX, + SOF_IPC4_MOD_SET_D0IX, + SOF_IPC4_MOD_ENTER_MODULE_RESTORE, + SOF_IPC4_MOD_EXIT_MODULE_RESTORE, + SOF_IPC4_MOD_DELETE_INSTANCE, + + SOF_IPC4_MOD_TYPE_LAST, +}; + +struct sof_ipc4_base_module_cfg { + uint32_t cpc; /* the max count of Cycles Per Chunk processing */ + uint32_t ibs; /* input Buffer Size (in bytes) */ + uint32_t obs; /* output Buffer Size (in bytes) */ + uint32_t is_pages; /* number of physical pages used */ + struct sof_ipc4_audio_format audio_fmt; +} __packed __aligned(4); + +/* common module ipc msg */ +#define SOF_IPC4_MOD_INSTANCE_SHIFT 16 +#define SOF_IPC4_MOD_INSTANCE_MASK GENMASK(23, 16) +#define SOF_IPC4_MOD_INSTANCE(x) ((x) << SOF_IPC4_MOD_INSTANCE_SHIFT) + +#define SOF_IPC4_MOD_ID_SHIFT 0 +#define SOF_IPC4_MOD_ID_MASK GENMASK(15, 0) +#define SOF_IPC4_MOD_ID(x) ((x) << SOF_IPC4_MOD_ID_SHIFT) + +/* init module ipc msg */ +#define SOF_IPC4_MOD_EXT_PARAM_SIZE_SHIFT 0 +#define SOF_IPC4_MOD_EXT_PARAM_SIZE_MASK GENMASK(15, 0) +#define SOF_IPC4_MOD_EXT_PARAM_SIZE(x) ((x) << SOF_IPC4_MOD_EXT_PARAM_SIZE_SHIFT) + +#define SOF_IPC4_MOD_EXT_PPL_ID_SHIFT 16 +#define SOF_IPC4_MOD_EXT_PPL_ID_MASK GENMASK(23, 16) +#define SOF_IPC4_MOD_EXT_PPL_ID(x) ((x) << SOF_IPC4_MOD_EXT_PPL_ID_SHIFT) + +#define SOF_IPC4_MOD_EXT_CORE_ID_SHIFT 24 +#define SOF_IPC4_MOD_EXT_CORE_ID_MASK GENMASK(27, 24) +#define SOF_IPC4_MOD_EXT_CORE_ID(x) ((x) << SOF_IPC4_MOD_EXT_CORE_ID_SHIFT) + +#define SOF_IPC4_MOD_EXT_DOMAIN_SHIFT 28 +#define SOF_IPC4_MOD_EXT_DOMAIN_MASK BIT(28) +#define SOF_IPC4_MOD_EXT_DOMAIN(x) ((x) << SOF_IPC4_MOD_EXT_DOMAIN_SHIFT) + +/* bind/unbind module ipc msg */ +#define SOF_IPC4_MOD_EXT_DST_MOD_ID_SHIFT 0 +#define SOF_IPC4_MOD_EXT_DST_MOD_ID_MASK GENMASK(15, 0) +#define SOF_IPC4_MOD_EXT_DST_MOD_ID(x) ((x) << SOF_IPC4_MOD_EXT_DST_MOD_ID_SHIFT) + +#define SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE_SHIFT 16 +#define SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE_MASK GENMASK(23, 16) +#define SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE(x) ((x) << SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE_SHIFT) + +#define SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID_SHIFT 24 +#define SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID_MASK GENMASK(26, 24) +#define SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID(x) ((x) << SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID_SHIFT) + +#define SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID_SHIFT 27 +#define SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID_MASK GENMASK(29, 27) +#define SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID(x) ((x) << SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID_SHIFT) + +#define MOD_ENABLE_LOG 6 +#define MOD_SYSTEM_TIME 20 + +/* set module large config */ +#define SOF_IPC4_MOD_EXT_MSG_SIZE_SHIFT 0 +#define SOF_IPC4_MOD_EXT_MSG_SIZE_MASK GENMASK(19, 0) +#define SOF_IPC4_MOD_EXT_MSG_SIZE(x) ((x) << SOF_IPC4_MOD_EXT_MSG_SIZE_SHIFT) + +#define SOF_IPC4_MOD_EXT_MSG_PARAM_ID_SHIFT 20 +#define SOF_IPC4_MOD_EXT_MSG_PARAM_ID_MASK GENMASK(27, 20) +#define SOF_IPC4_MOD_EXT_MSG_PARAM_ID(x) ((x) << SOF_IPC4_MOD_EXT_MSG_PARAM_ID_SHIFT) + +#define SOF_IPC4_MOD_EXT_MSG_LAST_BLOCK_SHIFT 28 +#define SOF_IPC4_MOD_EXT_MSG_LAST_BLOCK_MASK BIT(28) +#define SOF_IPC4_MOD_EXT_MSG_LAST_BLOCK(x) ((x) << SOF_IPC4_MOD_EXT_MSG_LAST_BLOCK_SHIFT) + +#define SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK_SHIFT 29 +#define SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK_MASK BIT(29) +#define SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK(x) ((x) << SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK_SHIFT) + +/* Init instance messagees */ +#define SOF_IPC4_MOD_INIT_BASEFW_MOD_ID 0 +#define SOF_IPC4_MOD_INIT_BASEFW_INSTANCE_ID 0 + +enum sof_ipc4_base_fw_params { + SOF_IPC4_FW_PARAM_ENABLE_LOGS = 6, + SOF_IPC4_FW_PARAM_FW_CONFIG, + SOF_IPC4_FW_PARAM_HW_CONFIG_GET, + SOF_IPC4_FW_PARAM_MODULES_INFO_GET, + SOF_IPC4_FW_PARAM_LIBRARIES_INFO_GET = 16, + SOF_IPC4_FW_PARAM_SYSTEM_TIME = 20, +}; + +enum sof_ipc4_fw_config_params { + SOF_IPC4_FW_CFG_FW_VERSION, + SOF_IPC4_FW_CFG_MEMORY_RECLAIMED, + SOF_IPC4_FW_CFG_SLOW_CLOCK_FREQ_HZ, + SOF_IPC4_FW_CFG_FAST_CLOCK_FREQ_HZ, + SOF_IPC4_FW_CFG_DMA_BUFFER_CONFIG, + SOF_IPC4_FW_CFG_ALH_SUPPORT_LEVEL, + SOF_IPC4_FW_CFG_DL_MAILBOX_BYTES, + SOF_IPC4_FW_CFG_UL_MAILBOX_BYTES, + SOF_IPC4_FW_CFG_TRACE_LOG_BYTES, + SOF_IPC4_FW_CFG_MAX_PPL_COUNT, + SOF_IPC4_FW_CFG_MAX_ASTATE_COUNT, + SOF_IPC4_FW_CFG_MAX_MODULE_PIN_COUNT, + SOF_IPC4_FW_CFG_MODULES_COUNT, + SOF_IPC4_FW_CFG_MAX_MOD_INST_COUNT, + SOF_IPC4_FW_CFG_MAX_LL_TASKS_PER_PRI_COUNT, + SOF_IPC4_FW_CFG_LL_PRI_COUNT, + SOF_IPC4_FW_CFG_MAX_DP_TASKS_COUNT, + SOF_IPC4_FW_CFG_MAX_LIBS_COUNT, + SOF_IPC4_FW_CFG_SCHEDULER_CONFIG, + SOF_IPC4_FW_CFG_XTAL_FREQ_HZ, + SOF_IPC4_FW_CFG_CLOCKS_CONFIG, + SOF_IPC4_FW_CFG_RESERVED, + SOF_IPC4_FW_CFG_POWER_GATING_POLICY, + SOF_IPC4_FW_CFG_ASSERT_MODE, +}; + +struct sof_ipc4_fw_version { + uint16_t major; + uint16_t minor; + uint16_t hotfix; + uint16_t build; +} __packed; + +/* Payload data for SOF_IPC4_MOD_SET_DX */ +struct sof_ipc4_dx_state_info { + /* core(s) to apply the change */ + uint32_t core_mask; + /* core state: 0: put core_id to D3; 1: put core_id to D0 */ + uint32_t dx_mask; +} __packed __aligned(4); + +/* Reply messages */ + +/* + * IPC4 primary header bit allocation for replies + * bit 0-23: status + * bit 24-28: type: enum sof_ipc4_global_msg if target is SOF_IPC4_FW_GEN_MSG + * enum sof_ipc4_module_type if target is SOF_IPC4_MODULE_MSG + * bit 29: response - sof_ipc4_msg_dir + * bit 30: target - enum sof_ipc4_msg_target + * bit 31: reserved, unused + */ + +#define SOF_IPC4_REPLY_STATUS GENMASK(23, 0) + +/* Notification messages */ + +/* + * IPC4 primary header bit allocation for notifications + * bit 0-15: notification type specific + * bit 16-23: enum sof_ipc4_notification_type + * bit 24-28: SOF_IPC4_GLB_NOTIFICATION + * bit 29: response - sof_ipc4_msg_dir + * bit 30: target - enum sof_ipc4_msg_target + * bit 31: reserved, unused + */ + +#define SOF_IPC4_MSG_IS_NOTIFICATION(x) (SOF_IPC4_MSG_TYPE_GET(x) == \ + SOF_IPC4_GLB_NOTIFICATION) + +#define SOF_IPC4_NOTIFICATION_TYPE_SHIFT 16 +#define SOF_IPC4_NOTIFICATION_TYPE_MASK GENMASK(23, 16) +#define SOF_IPC4_NOTIFICATION_TYPE_GET(x) (((x) & SOF_IPC4_NOTIFICATION_TYPE_MASK) >> \ + SOF_IPC4_NOTIFICATION_TYPE_SHIFT) + +/* Value of notification type field - must fit into 8 bits */ +enum sof_ipc4_notification_type { + /* Phrase detected (notification from WoV module) */ + SOF_IPC4_NOTIFY_PHRASE_DETECTED = 4, + /* Event from a resource (pipeline or module instance) */ + SOF_IPC4_NOTIFY_RESOURCE_EVENT, + /* Debug log buffer status changed */ + SOF_IPC4_NOTIFY_LOG_BUFFER_STATUS, + /* Timestamp captured at the link */ + SOF_IPC4_NOTIFY_TIMESTAMP_CAPTURED, + /* FW complete initialization */ + SOF_IPC4_NOTIFY_FW_READY, + /* Audio classifier result (ACA) */ + SOF_IPC4_NOTIFY_FW_AUD_CLASS_RESULT, + /* Exception caught by DSP FW */ + SOF_IPC4_NOTIFY_EXCEPTION_CAUGHT, + /* 11 is skipped by the existing cavs firmware */ + /* Custom module notification */ + SOF_IPC4_NOTIFY_MODULE_NOTIFICATION = 12, + /* 13 is reserved - do not use */ + /* Probe notify data available */ + SOF_IPC4_NOTIFY_PROBE_DATA_AVAILABLE = 14, + /* AM module notifications */ + SOF_IPC4_NOTIFY_ASYNC_MSG_SRVC_MESSAGE, + + SOF_IPC4_NOTIFY_TYPE_LAST, +}; + +struct sof_ipc4_notify_resource_data { + uint32_t resource_type; + uint32_t resource_id; + uint32_t event_type; + uint32_t reserved; + uint32_t data[6]; +} __packed __aligned(4); + +/** @}*/ + +#endif diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h new file mode 100644 index 0000000000..d735302401 --- /dev/null +++ b/include/trace/events/intel_ifs.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM intel_ifs + +#if !defined(_TRACE_IFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_IFS_H + +#include +#include + +TRACE_EVENT(ifs_status, + + TP_PROTO(int cpu, union ifs_scan activate, union ifs_status status), + + TP_ARGS(cpu, activate, status), + + TP_STRUCT__entry( + __field( u64, status ) + __field( int, cpu ) + __field( u8, start ) + __field( u8, stop ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->start = activate.start; + __entry->stop = activate.stop; + __entry->status = status.data; + ), + + TP_printk("cpu: %d, start: %.2x, stop: %.2x, status: %llx", + __entry->cpu, + __entry->start, + __entry->stop, + __entry->status) +); + +#endif /* _TRACE_IFS_H */ + +/* This part must be outside protection */ +#include diff --git a/include/trace/events/rv.h b/include/trace/events/rv.h new file mode 100644 index 0000000000..56592da930 --- /dev/null +++ b/include/trace/events/rv.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rv + +#if !defined(_TRACE_RV_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RV_H + +#include +#include + +#ifdef CONFIG_DA_MON_EVENTS_IMPLICIT +DECLARE_EVENT_CLASS(event_da_monitor, + + TP_PROTO(char *state, char *event, char *next_state, bool final_state), + + TP_ARGS(state, event, next_state, final_state), + + TP_STRUCT__entry( + __array( char, state, MAX_DA_NAME_LEN ) + __array( char, event, MAX_DA_NAME_LEN ) + __array( char, next_state, MAX_DA_NAME_LEN ) + __field( bool, final_state ) + ), + + TP_fast_assign( + memcpy(__entry->state, state, MAX_DA_NAME_LEN); + memcpy(__entry->event, event, MAX_DA_NAME_LEN); + memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN); + __entry->final_state = final_state; + ), + + TP_printk("%s x %s -> %s %s", + __entry->state, + __entry->event, + __entry->next_state, + __entry->final_state ? "(final)" : "") +); + +DECLARE_EVENT_CLASS(error_da_monitor, + + TP_PROTO(char *state, char *event), + + TP_ARGS(state, event), + + TP_STRUCT__entry( + __array( char, state, MAX_DA_NAME_LEN ) + __array( char, event, MAX_DA_NAME_LEN ) + ), + + TP_fast_assign( + memcpy(__entry->state, state, MAX_DA_NAME_LEN); + memcpy(__entry->event, event, MAX_DA_NAME_LEN); + ), + + TP_printk("event %s not expected in the state %s", + __entry->event, + __entry->state) +); + +#ifdef CONFIG_RV_MON_WIP +DEFINE_EVENT(event_da_monitor, event_wip, + TP_PROTO(char *state, char *event, char *next_state, bool final_state), + TP_ARGS(state, event, next_state, final_state)); + +DEFINE_EVENT(error_da_monitor, error_wip, + TP_PROTO(char *state, char *event), + TP_ARGS(state, event)); +#endif /* CONFIG_RV_MON_WIP */ +#endif /* CONFIG_DA_MON_EVENTS_IMPLICIT */ + +#ifdef CONFIG_DA_MON_EVENTS_ID +DECLARE_EVENT_CLASS(event_da_monitor_id, + + TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state), + + TP_ARGS(id, state, event, next_state, final_state), + + TP_STRUCT__entry( + __field( int, id ) + __array( char, state, MAX_DA_NAME_LEN ) + __array( char, event, MAX_DA_NAME_LEN ) + __array( char, next_state, MAX_DA_NAME_LEN ) + __field( bool, final_state ) + ), + + TP_fast_assign( + memcpy(__entry->state, state, MAX_DA_NAME_LEN); + memcpy(__entry->event, event, MAX_DA_NAME_LEN); + memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN); + __entry->id = id; + __entry->final_state = final_state; + ), + + TP_printk("%d: %s x %s -> %s %s", + __entry->id, + __entry->state, + __entry->event, + __entry->next_state, + __entry->final_state ? "(final)" : "") +); + +DECLARE_EVENT_CLASS(error_da_monitor_id, + + TP_PROTO(int id, char *state, char *event), + + TP_ARGS(id, state, event), + + TP_STRUCT__entry( + __field( int, id ) + __array( char, state, MAX_DA_NAME_LEN ) + __array( char, event, MAX_DA_NAME_LEN ) + ), + + TP_fast_assign( + memcpy(__entry->state, state, MAX_DA_NAME_LEN); + memcpy(__entry->event, event, MAX_DA_NAME_LEN); + __entry->id = id; + ), + + TP_printk("%d: event %s not expected in the state %s", + __entry->id, + __entry->event, + __entry->state) +); + +#ifdef CONFIG_RV_MON_WWNR +/* id is the pid of the task */ +DEFINE_EVENT(event_da_monitor_id, event_wwnr, + TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state), + TP_ARGS(id, state, event, next_state, final_state)); + +DEFINE_EVENT(error_da_monitor_id, error_wwnr, + TP_PROTO(int id, char *state, char *event), + TP_ARGS(id, state, event)); +#endif /* CONFIG_RV_MON_WWNR */ + +#endif /* CONFIG_DA_MON_EVENTS_ID */ +#endif /* _TRACE_RV_H */ + +/* This part ust be outside protection */ +#undef TRACE_INCLUDE_PATH +#include diff --git a/include/trace/events/rwmmio.h b/include/trace/events/rwmmio.h new file mode 100644 index 0000000000..de41159216 --- /dev/null +++ b/include/trace/events/rwmmio.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rwmmio + +#if !defined(_TRACE_RWMMIO_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RWMMIO_H + +#include + +DECLARE_EVENT_CLASS(rwmmio_rw_template, + + TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr), + + TP_ARGS(caller, val, width, addr), + + TP_STRUCT__entry( + __field(unsigned long, caller) + __field(unsigned long, addr) + __field(u64, val) + __field(u8, width) + ), + + TP_fast_assign( + __entry->caller = caller; + __entry->val = val; + __entry->addr = (unsigned long)addr; + __entry->width = width; + ), + + TP_printk("%pS width=%d val=%#llx addr=%#lx", + (void *)__entry->caller, __entry->width, + __entry->val, __entry->addr) +); + +DEFINE_EVENT(rwmmio_rw_template, rwmmio_write, + TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr), + TP_ARGS(caller, val, width, addr) +); + +DEFINE_EVENT(rwmmio_rw_template, rwmmio_post_write, + TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr), + TP_ARGS(caller, val, width, addr) +); + +TRACE_EVENT(rwmmio_read, + + TP_PROTO(unsigned long caller, u8 width, const volatile void __iomem *addr), + + TP_ARGS(caller, width, addr), + + TP_STRUCT__entry( + __field(unsigned long, caller) + __field(unsigned long, addr) + __field(u8, width) + ), + + TP_fast_assign( + __entry->caller = caller; + __entry->addr = (unsigned long)addr; + __entry->width = width; + ), + + TP_printk("%pS width=%d addr=%#lx", + (void *)__entry->caller, __entry->width, __entry->addr) +); + +TRACE_EVENT(rwmmio_post_read, + + TP_PROTO(unsigned long caller, u64 val, u8 width, const volatile void __iomem *addr), + + TP_ARGS(caller, val, width, addr), + + TP_STRUCT__entry( + __field(unsigned long, caller) + __field(unsigned long, addr) + __field(u64, val) + __field(u8, width) + ), + + TP_fast_assign( + __entry->caller = caller; + __entry->val = val; + __entry->addr = (unsigned long)addr; + __entry->width = width; + ), + + TP_printk("%pS width=%d val=%#llx addr=%#lx", + (void *)__entry->caller, __entry->width, + __entry->val, __entry->addr) +); + +#endif /* _TRACE_RWMMIO_H */ + +#include diff --git a/include/trace/events/thermal_pressure.h b/include/trace/events/thermal_pressure.h new file mode 100644 index 0000000000..b686802013 --- /dev/null +++ b/include/trace/events/thermal_pressure.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM thermal_pressure + +#if !defined(_TRACE_THERMAL_PRESSURE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_THERMAL_PRESSURE_H + +#include + +TRACE_EVENT(thermal_pressure_update, + TP_PROTO(int cpu, unsigned long thermal_pressure), + TP_ARGS(cpu, thermal_pressure), + + TP_STRUCT__entry( + __field(unsigned long, thermal_pressure) + __field(int, cpu) + ), + + TP_fast_assign( + __entry->thermal_pressure = thermal_pressure; + __entry->cpu = cpu; + ), + + TP_printk("cpu=%d thermal_pressure=%lu", __entry->cpu, __entry->thermal_pressure) +); +#endif /* _TRACE_THERMAL_PRESSURE_H */ + +/* This part must be outside protection */ +#include diff --git a/include/uapi/asm-generic/termbits-common.h b/include/uapi/asm-generic/termbits-common.h new file mode 100644 index 0000000000..4a6a79f28b --- /dev/null +++ b/include/uapi/asm-generic/termbits-common.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_GENERIC_TERMBITS_COMMON_H +#define __ASM_GENERIC_TERMBITS_COMMON_H + +typedef unsigned char cc_t; +typedef unsigned int speed_t; + +/* c_iflag bits */ +#define IGNBRK 0x001 /* Ignore break condition */ +#define BRKINT 0x002 /* Signal interrupt on break */ +#define IGNPAR 0x004 /* Ignore characters with parity errors */ +#define PARMRK 0x008 /* Mark parity and framing errors */ +#define INPCK 0x010 /* Enable input parity check */ +#define ISTRIP 0x020 /* Strip 8th bit off characters */ +#define INLCR 0x040 /* Map NL to CR on input */ +#define IGNCR 0x080 /* Ignore CR */ +#define ICRNL 0x100 /* Map CR to NL on input */ +#define IXANY 0x800 /* Any character will restart after stop */ + +/* c_oflag bits */ +#define OPOST 0x01 /* Perform output processing */ +#define OCRNL 0x08 +#define ONOCR 0x10 +#define ONLRET 0x20 +#define OFILL 0x40 +#define OFDEL 0x80 + +/* c_cflag bit meaning */ +/* Common CBAUD rates */ +#define B0 0x00000000 /* hang up */ +#define B50 0x00000001 +#define B75 0x00000002 +#define B110 0x00000003 +#define B134 0x00000004 +#define B150 0x00000005 +#define B200 0x00000006 +#define B300 0x00000007 +#define B600 0x00000008 +#define B1200 0x00000009 +#define B1800 0x0000000a +#define B2400 0x0000000b +#define B4800 0x0000000c +#define B9600 0x0000000d +#define B19200 0x0000000e +#define B38400 0x0000000f +#define EXTA B19200 +#define EXTB B38400 + +#define ADDRB 0x20000000 /* address bit */ +#define CMSPAR 0x40000000 /* mark or space (stick) parity */ +#define CRTSCTS 0x80000000 /* flow control */ + +#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ + +/* tcflow() ACTION argument and TCXONC use these */ +#define TCOOFF 0 /* Suspend output */ +#define TCOON 1 /* Restart suspended output */ +#define TCIOFF 2 /* Send a STOP character */ +#define TCION 3 /* Send a START character */ + +/* tcflush() QUEUE_SELECTOR argument and TCFLSH use these */ +#define TCIFLUSH 0 /* Discard data received but not yet read */ +#define TCOFLUSH 1 /* Discard data written but not yet sent */ +#define TCIOFLUSH 2 /* Discard all pending data */ + +#endif /* __ASM_GENERIC_TERMBITS_COMMON_H */ diff --git a/include/uapi/linux/cachefiles.h b/include/uapi/linux/cachefiles.h new file mode 100644 index 0000000000..78caa73e53 --- /dev/null +++ b/include/uapi/linux/cachefiles.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_CACHEFILES_H +#define _LINUX_CACHEFILES_H + +#include +#include + +/* + * Fscache ensures that the maximum length of cookie key is 255. The volume key + * is controlled by netfs, and generally no bigger than 255. + */ +#define CACHEFILES_MSG_MAX_SIZE 1024 + +enum cachefiles_opcode { + CACHEFILES_OP_OPEN, + CACHEFILES_OP_CLOSE, + CACHEFILES_OP_READ, +}; + +/* + * Message Header + * + * @msg_id a unique ID identifying this message + * @opcode message type, CACHEFILE_OP_* + * @len message length, including message header and following data + * @object_id a unique ID identifying a cache file + * @data message type specific payload + */ +struct cachefiles_msg { + __u32 msg_id; + __u32 opcode; + __u32 len; + __u32 object_id; + __u8 data[]; +}; + +/* + * @data contains the volume_key followed directly by the cookie_key. volume_key + * is a NUL-terminated string; @volume_key_size indicates the size of the volume + * key in bytes. cookie_key is binary data, which is netfs specific; + * @cookie_key_size indicates the size of the cookie key in bytes. + * + * @fd identifies an anon_fd referring to the cache file. + */ +struct cachefiles_open { + __u32 volume_key_size; + __u32 cookie_key_size; + __u32 fd; + __u32 flags; + __u8 data[]; +}; + +/* + * @off indicates the starting offset of the requested file range + * @len indicates the length of the requested file range + */ +struct cachefiles_read { + __u64 off; + __u64 len; +}; + +/* + * Reply for READ request + * @arg for this ioctl is the @id field of READ request. + */ +#define CACHEFILES_IOC_READ_COMPLETE _IOW(0x98, 1, int) + +#endif